From 692e5dbf12901edacf14812a6fae25462920af42 Mon Sep 17 00:00:00 2001 From: Steve Block Date: Tue, 27 Apr 2010 16:23:55 +0100 Subject: Merge webkit.org at r55033 : Initial merge by git Change-Id: I98a4af828067cc243ec3dc5e5826154dd88074b5 --- JavaScriptCore/jit/JIT.cpp | 7 +- JavaScriptCore/jit/JITOpcodes.cpp | 4 +- JavaScriptCore/jit/JITPropertyAccess.cpp | 1010 +----------------------- JavaScriptCore/jit/JITPropertyAccess32_64.cpp | 1026 +++++++++++++++++++++++++ JavaScriptCore/jit/JITStubs.cpp | 21 +- 5 files changed, 1065 insertions(+), 1003 deletions(-) create mode 100644 JavaScriptCore/jit/JITPropertyAccess32_64.cpp (limited to 'JavaScriptCore/jit') diff --git a/JavaScriptCore/jit/JIT.cpp b/JavaScriptCore/jit/JIT.cpp index c0da66d..78c5153 100644 --- a/JavaScriptCore/jit/JIT.cpp +++ b/JavaScriptCore/jit/JIT.cpp @@ -322,6 +322,11 @@ void JIT::privateCompileMainPass() case op_get_by_id_proto_list: case op_get_by_id_self: case op_get_by_id_self_list: + case op_get_by_id_getter_chain: + case op_get_by_id_getter_proto: + case op_get_by_id_getter_proto_list: + case op_get_by_id_getter_self: + case op_get_by_id_getter_self_list: case op_get_string_length: case op_put_by_id_generic: case op_put_by_id_replace: @@ -582,7 +587,7 @@ void JIT::unlinkCall(CallLinkInfo* callLinkInfo) // When the JSFunction is deleted the pointer embedded in the instruction stream will no longer be valid // (and, if a new JSFunction happened to be constructed at the same location, we could get a false positive // match). Reset the check so it no longer matches. - RepatchBuffer repatchBuffer(callLinkInfo->ownerCodeBlock.get()); + RepatchBuffer repatchBuffer(callLinkInfo->ownerCodeBlock); #if USE(JSVALUE32_64) repatchBuffer.repatch(callLinkInfo->hotPathBegin, 0); #else diff --git a/JavaScriptCore/jit/JITOpcodes.cpp b/JavaScriptCore/jit/JITOpcodes.cpp index c3f20f1..c470495 100644 --- a/JavaScriptCore/jit/JITOpcodes.cpp +++ b/JavaScriptCore/jit/JITOpcodes.cpp @@ -56,7 +56,7 @@ void JIT::privateCompileCTIMachineTrampolines(RefPtr* executable Jump string_failureCases2 = branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsStringVPtr)); // Checks out okay! - get the length from the Ustring. - load32(Address(regT0, OBJECT_OFFSETOF(JSString, m_stringLength)), regT2); + load32(Address(regT0, OBJECT_OFFSETOF(JSString, m_length)), regT2); Jump string_failureCases3 = branch32(Above, regT2, Imm32(INT_MAX)); move(regT2, regT0); @@ -1517,7 +1517,7 @@ void JIT::privateCompileCTIMachineTrampolines(RefPtr* executable Jump string_failureCases2 = branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsStringVPtr)); // Checks out okay! - get the length from the Ustring. - load32(Address(regT0, OBJECT_OFFSETOF(JSString, m_stringLength)), regT0); + load32(Address(regT0, OBJECT_OFFSETOF(JSString, m_length)), regT0); Jump string_failureCases3 = branch32(Above, regT0, Imm32(JSImmediate::maxImmediateInt)); diff --git a/JavaScriptCore/jit/JITPropertyAccess.cpp b/JavaScriptCore/jit/JITPropertyAccess.cpp index ef95f99..151bb03 100644 --- a/JavaScriptCore/jit/JITPropertyAccess.cpp +++ b/JavaScriptCore/jit/JITPropertyAccess.cpp @@ -24,1001 +24,32 @@ */ #include "config.h" -#include "JIT.h" - -#if ENABLE(JIT) - -#include "CodeBlock.h" -#include "JITInlineMethods.h" -#include "JITStubCall.h" -#include "JSArray.h" -#include "JSFunction.h" -#include "JSPropertyNameIterator.h" -#include "Interpreter.h" -#include "LinkBuffer.h" -#include "RepatchBuffer.h" -#include "ResultType.h" -#include "SamplingTool.h" - -#ifndef NDEBUG -#include -#endif - -using namespace std; - -namespace JSC { - -#if USE(JSVALUE32_64) - -void JIT::emit_op_put_by_index(Instruction* currentInstruction) -{ - unsigned base = currentInstruction[1].u.operand; - unsigned property = currentInstruction[2].u.operand; - unsigned value = currentInstruction[3].u.operand; - - JITStubCall stubCall(this, cti_op_put_by_index); - stubCall.addArgument(base); - stubCall.addArgument(Imm32(property)); - stubCall.addArgument(value); - stubCall.call(); -} - -void JIT::emit_op_put_getter(Instruction* currentInstruction) -{ - unsigned base = currentInstruction[1].u.operand; - unsigned property = currentInstruction[2].u.operand; - unsigned function = currentInstruction[3].u.operand; - - JITStubCall stubCall(this, cti_op_put_getter); - stubCall.addArgument(base); - stubCall.addArgument(ImmPtr(&m_codeBlock->identifier(property))); - stubCall.addArgument(function); - stubCall.call(); -} - -void JIT::emit_op_put_setter(Instruction* currentInstruction) -{ - unsigned base = currentInstruction[1].u.operand; - unsigned property = currentInstruction[2].u.operand; - unsigned function = currentInstruction[3].u.operand; - - JITStubCall stubCall(this, cti_op_put_setter); - stubCall.addArgument(base); - stubCall.addArgument(ImmPtr(&m_codeBlock->identifier(property))); - stubCall.addArgument(function); - stubCall.call(); -} - -void JIT::emit_op_del_by_id(Instruction* currentInstruction) -{ - unsigned dst = currentInstruction[1].u.operand; - unsigned base = currentInstruction[2].u.operand; - unsigned property = currentInstruction[3].u.operand; - - JITStubCall stubCall(this, cti_op_del_by_id); - stubCall.addArgument(base); - stubCall.addArgument(ImmPtr(&m_codeBlock->identifier(property))); - stubCall.call(dst); -} - - -#if !ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS) - -/* ------------------------------ BEGIN: !ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS) ------------------------------ */ - -// Treat these as nops - the call will be handed as a regular get_by_id/op_call pair. -void JIT::emit_op_method_check(Instruction*) {} -void JIT::emitSlow_op_method_check(Instruction*, Vector::iterator&) { ASSERT_NOT_REACHED(); } -#if ENABLE(JIT_OPTIMIZE_METHOD_CALLS) -#error "JIT_OPTIMIZE_METHOD_CALLS requires JIT_OPTIMIZE_PROPERTY_ACCESS" -#endif - -void JIT::emit_op_get_by_val(Instruction* currentInstruction) -{ - unsigned dst = currentInstruction[1].u.operand; - unsigned base = currentInstruction[2].u.operand; - unsigned property = currentInstruction[3].u.operand; - - JITStubCall stubCall(this, cti_op_get_by_val); - stubCall.addArgument(base); - stubCall.addArgument(property); - stubCall.call(dst); -} - -void JIT::emitSlow_op_get_by_val(Instruction*, Vector::iterator&) -{ - ASSERT_NOT_REACHED(); -} - -void JIT::emit_op_put_by_val(Instruction* currentInstruction) -{ - unsigned base = currentInstruction[1].u.operand; - unsigned property = currentInstruction[2].u.operand; - unsigned value = currentInstruction[3].u.operand; - - JITStubCall stubCall(this, cti_op_put_by_val); - stubCall.addArgument(base); - stubCall.addArgument(property); - stubCall.addArgument(value); - stubCall.call(); -} - -void JIT::emitSlow_op_put_by_val(Instruction*, Vector::iterator&) -{ - ASSERT_NOT_REACHED(); -} - -void JIT::emit_op_get_by_id(Instruction* currentInstruction) -{ - int dst = currentInstruction[1].u.operand; - int base = currentInstruction[2].u.operand; - int ident = currentInstruction[3].u.operand; - - JITStubCall stubCall(this, cti_op_get_by_id_generic); - stubCall.addArgument(base); - stubCall.addArgument(ImmPtr(&(m_codeBlock->identifier(ident)))); - stubCall.call(dst); - - m_propertyAccessInstructionIndex++; -} - -void JIT::emitSlow_op_get_by_id(Instruction*, Vector::iterator&) -{ - m_propertyAccessInstructionIndex++; - ASSERT_NOT_REACHED(); -} - -void JIT::emit_op_put_by_id(Instruction* currentInstruction) -{ - int base = currentInstruction[1].u.operand; - int ident = currentInstruction[2].u.operand; - int value = currentInstruction[3].u.operand; - - JITStubCall stubCall(this, cti_op_put_by_id_generic); - stubCall.addArgument(base); - stubCall.addArgument(ImmPtr(&(m_codeBlock->identifier(ident)))); - stubCall.addArgument(value); - stubCall.call(); - - m_propertyAccessInstructionIndex++; -} - -void JIT::emitSlow_op_put_by_id(Instruction*, Vector::iterator&) -{ - m_propertyAccessInstructionIndex++; - ASSERT_NOT_REACHED(); -} - -#else // !ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS) - -/* ------------------------------ BEGIN: ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS) ------------------------------ */ - -#if ENABLE(JIT_OPTIMIZE_METHOD_CALLS) - -void JIT::emit_op_method_check(Instruction* currentInstruction) -{ - // Assert that the following instruction is a get_by_id. - ASSERT(m_interpreter->getOpcodeID((currentInstruction + OPCODE_LENGTH(op_method_check))->u.opcode) == op_get_by_id); - - currentInstruction += OPCODE_LENGTH(op_method_check); - - // Do the method check - check the object & its prototype's structure inline (this is the common case). - m_methodCallCompilationInfo.append(MethodCallCompilationInfo(m_propertyAccessInstructionIndex)); - MethodCallCompilationInfo& info = m_methodCallCompilationInfo.last(); - - int dst = currentInstruction[1].u.operand; - int base = currentInstruction[2].u.operand; - - emitLoad(base, regT1, regT0); - emitJumpSlowCaseIfNotJSCell(base, regT1); - - BEGIN_UNINTERRUPTED_SEQUENCE(sequenceMethodCheck); - - Jump structureCheck = branchPtrWithPatch(NotEqual, Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), info.structureToCompare, ImmPtr(reinterpret_cast(patchGetByIdDefaultStructure))); - DataLabelPtr protoStructureToCompare, protoObj = moveWithPatch(ImmPtr(0), regT2); - Jump protoStructureCheck = branchPtrWithPatch(NotEqual, Address(regT2, OBJECT_OFFSETOF(JSCell, m_structure)), protoStructureToCompare, ImmPtr(reinterpret_cast(patchGetByIdDefaultStructure))); - - // This will be relinked to load the function without doing a load. - DataLabelPtr putFunction = moveWithPatch(ImmPtr(0), regT0); - - END_UNINTERRUPTED_SEQUENCE(sequenceMethodCheck); - - move(Imm32(JSValue::CellTag), regT1); - Jump match = jump(); - - ASSERT(differenceBetween(info.structureToCompare, protoObj) == patchOffsetMethodCheckProtoObj); - ASSERT(differenceBetween(info.structureToCompare, protoStructureToCompare) == patchOffsetMethodCheckProtoStruct); - ASSERT(differenceBetween(info.structureToCompare, putFunction) == patchOffsetMethodCheckPutFunction); - - // Link the failure cases here. - structureCheck.link(this); - protoStructureCheck.link(this); - - // Do a regular(ish) get_by_id (the slow case will be link to - // cti_op_get_by_id_method_check instead of cti_op_get_by_id. - compileGetByIdHotPath(); - - match.link(this); - emitStore(dst, regT1, regT0); - map(m_bytecodeIndex + OPCODE_LENGTH(op_method_check), dst, regT1, regT0); - - // We've already generated the following get_by_id, so make sure it's skipped over. - m_bytecodeIndex += OPCODE_LENGTH(op_get_by_id); -} - -void JIT::emitSlow_op_method_check(Instruction* currentInstruction, Vector::iterator& iter) -{ - currentInstruction += OPCODE_LENGTH(op_method_check); - - int dst = currentInstruction[1].u.operand; - int base = currentInstruction[2].u.operand; - int ident = currentInstruction[3].u.operand; - - compileGetByIdSlowCase(dst, base, &(m_codeBlock->identifier(ident)), iter, true); - - // We've already generated the following get_by_id, so make sure it's skipped over. - m_bytecodeIndex += OPCODE_LENGTH(op_get_by_id); -} - -#else //!ENABLE(JIT_OPTIMIZE_METHOD_CALLS) - -// Treat these as nops - the call will be handed as a regular get_by_id/op_call pair. -void JIT::emit_op_method_check(Instruction*) {} -void JIT::emitSlow_op_method_check(Instruction*, Vector::iterator&) { ASSERT_NOT_REACHED(); } - -#endif - -void JIT::emit_op_get_by_val(Instruction* currentInstruction) -{ - unsigned dst = currentInstruction[1].u.operand; - unsigned base = currentInstruction[2].u.operand; - unsigned property = currentInstruction[3].u.operand; - - emitLoad2(base, regT1, regT0, property, regT3, regT2); - - addSlowCase(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag))); - emitJumpSlowCaseIfNotJSCell(base, regT1); - addSlowCase(branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsArrayVPtr))); - - loadPtr(Address(regT0, OBJECT_OFFSETOF(JSArray, m_storage)), regT3); - addSlowCase(branch32(AboveOrEqual, regT2, Address(regT0, OBJECT_OFFSETOF(JSArray, m_vectorLength)))); - - load32(BaseIndex(regT3, regT2, TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + 4), regT1); // tag - load32(BaseIndex(regT3, regT2, TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0])), regT0); // payload - addSlowCase(branch32(Equal, regT1, Imm32(JSValue::EmptyValueTag))); - - emitStore(dst, regT1, regT0); - map(m_bytecodeIndex + OPCODE_LENGTH(op_get_by_val), dst, regT1, regT0); -} - -void JIT::emitSlow_op_get_by_val(Instruction* currentInstruction, Vector::iterator& iter) -{ - unsigned dst = currentInstruction[1].u.operand; - unsigned base = currentInstruction[2].u.operand; - unsigned property = currentInstruction[3].u.operand; - - linkSlowCase(iter); // property int32 check - linkSlowCaseIfNotJSCell(iter, base); // base cell check - linkSlowCase(iter); // base array check - linkSlowCase(iter); // vector length check - linkSlowCase(iter); // empty value - - JITStubCall stubCall(this, cti_op_get_by_val); - stubCall.addArgument(base); - stubCall.addArgument(property); - stubCall.call(dst); -} - -void JIT::emit_op_put_by_val(Instruction* currentInstruction) -{ - unsigned base = currentInstruction[1].u.operand; - unsigned property = currentInstruction[2].u.operand; - unsigned value = currentInstruction[3].u.operand; - - emitLoad2(base, regT1, regT0, property, regT3, regT2); - - addSlowCase(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag))); - emitJumpSlowCaseIfNotJSCell(base, regT1); - addSlowCase(branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsArrayVPtr))); - addSlowCase(branch32(AboveOrEqual, regT2, Address(regT0, OBJECT_OFFSETOF(JSArray, m_vectorLength)))); - - loadPtr(Address(regT0, OBJECT_OFFSETOF(JSArray, m_storage)), regT3); - - Jump empty = branch32(Equal, BaseIndex(regT3, regT2, TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + 4), Imm32(JSValue::EmptyValueTag)); - - Label storeResult(this); - emitLoad(value, regT1, regT0); - store32(regT0, BaseIndex(regT3, regT2, TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]))); // payload - store32(regT1, BaseIndex(regT3, regT2, TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + 4)); // tag - Jump end = jump(); - - empty.link(this); - add32(Imm32(1), Address(regT3, OBJECT_OFFSETOF(ArrayStorage, m_numValuesInVector))); - branch32(Below, regT2, Address(regT3, OBJECT_OFFSETOF(ArrayStorage, m_length))).linkTo(storeResult, this); - - add32(Imm32(1), regT2, regT0); - store32(regT0, Address(regT3, OBJECT_OFFSETOF(ArrayStorage, m_length))); - jump().linkTo(storeResult, this); - - end.link(this); -} - -void JIT::emitSlow_op_put_by_val(Instruction* currentInstruction, Vector::iterator& iter) -{ - unsigned base = currentInstruction[1].u.operand; - unsigned property = currentInstruction[2].u.operand; - unsigned value = currentInstruction[3].u.operand; - - linkSlowCase(iter); // property int32 check - linkSlowCaseIfNotJSCell(iter, base); // base cell check - linkSlowCase(iter); // base not array check - linkSlowCase(iter); // in vector check - - JITStubCall stubPutByValCall(this, cti_op_put_by_val); - stubPutByValCall.addArgument(base); - stubPutByValCall.addArgument(property); - stubPutByValCall.addArgument(value); - stubPutByValCall.call(); -} - -void JIT::emit_op_get_by_id(Instruction* currentInstruction) -{ - int dst = currentInstruction[1].u.operand; - int base = currentInstruction[2].u.operand; - - emitLoad(base, regT1, regT0); - emitJumpSlowCaseIfNotJSCell(base, regT1); - compileGetByIdHotPath(); - emitStore(dst, regT1, regT0); - map(m_bytecodeIndex + OPCODE_LENGTH(op_get_by_id), dst, regT1, regT0); -} - -void JIT::compileGetByIdHotPath() -{ - // As for put_by_id, get_by_id requires the offset of the Structure and the offset of the access to be patched. - // Additionally, for get_by_id we need patch the offset of the branch to the slow case (we patch this to jump - // to array-length / prototype access tranpolines, and finally we also the the property-map access offset as a label - // to jump back to if one of these trampolies finds a match. - - BEGIN_UNINTERRUPTED_SEQUENCE(sequenceGetByIdHotPath); - - Label hotPathBegin(this); - m_propertyAccessCompilationInfo[m_propertyAccessInstructionIndex].hotPathBegin = hotPathBegin; - m_propertyAccessInstructionIndex++; - - DataLabelPtr structureToCompare; - Jump structureCheck = branchPtrWithPatch(NotEqual, Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), structureToCompare, ImmPtr(reinterpret_cast(patchGetByIdDefaultStructure))); - addSlowCase(structureCheck); - ASSERT(differenceBetween(hotPathBegin, structureToCompare) == patchOffsetGetByIdStructure); - ASSERT(differenceBetween(hotPathBegin, structureCheck) == patchOffsetGetByIdBranchToSlowCase); - - Label externalLoad = loadPtrWithPatchToLEA(Address(regT0, OBJECT_OFFSETOF(JSObject, m_externalStorage)), regT2); - Label externalLoadComplete(this); - ASSERT(differenceBetween(hotPathBegin, externalLoad) == patchOffsetGetByIdExternalLoad); - ASSERT(differenceBetween(externalLoad, externalLoadComplete) == patchLengthGetByIdExternalLoad); - - DataLabel32 displacementLabel1 = loadPtrWithAddressOffsetPatch(Address(regT2, patchGetByIdDefaultOffset), regT0); // payload - ASSERT(differenceBetween(hotPathBegin, displacementLabel1) == patchOffsetGetByIdPropertyMapOffset1); - DataLabel32 displacementLabel2 = loadPtrWithAddressOffsetPatch(Address(regT2, patchGetByIdDefaultOffset), regT1); // tag - ASSERT(differenceBetween(hotPathBegin, displacementLabel2) == patchOffsetGetByIdPropertyMapOffset2); - - Label putResult(this); - ASSERT(differenceBetween(hotPathBegin, putResult) == patchOffsetGetByIdPutResult); - - END_UNINTERRUPTED_SEQUENCE(sequenceGetByIdHotPath); -} - -void JIT::emitSlow_op_get_by_id(Instruction* currentInstruction, Vector::iterator& iter) -{ - int dst = currentInstruction[1].u.operand; - int base = currentInstruction[2].u.operand; - int ident = currentInstruction[3].u.operand; - - compileGetByIdSlowCase(dst, base, &(m_codeBlock->identifier(ident)), iter); -} - -void JIT::compileGetByIdSlowCase(int dst, int base, Identifier* ident, Vector::iterator& iter, bool isMethodCheck) -{ - // As for the hot path of get_by_id, above, we ensure that we can use an architecture specific offset - // so that we only need track one pointer into the slow case code - we track a pointer to the location - // of the call (which we can use to look up the patch information), but should a array-length or - // prototype access trampoline fail we want to bail out back to here. To do so we can subtract back - // the distance from the call to the head of the slow case. - linkSlowCaseIfNotJSCell(iter, base); - linkSlowCase(iter); - - BEGIN_UNINTERRUPTED_SEQUENCE(sequenceGetByIdSlowCase); - -#ifndef NDEBUG - Label coldPathBegin(this); -#endif - JITStubCall stubCall(this, isMethodCheck ? cti_op_get_by_id_method_check : cti_op_get_by_id); - stubCall.addArgument(regT1, regT0); - stubCall.addArgument(ImmPtr(ident)); - Call call = stubCall.call(dst); - - END_UNINTERRUPTED_SEQUENCE(sequenceGetByIdSlowCase); - - ASSERT(differenceBetween(coldPathBegin, call) == patchOffsetGetByIdSlowCaseCall); - - // Track the location of the call; this will be used to recover patch information. - m_propertyAccessCompilationInfo[m_propertyAccessInstructionIndex].callReturnLocation = call; - m_propertyAccessInstructionIndex++; -} - -void JIT::emit_op_put_by_id(Instruction* currentInstruction) -{ - // In order to be able to patch both the Structure, and the object offset, we store one pointer, - // to just after the arguments have been loaded into registers 'hotPathBegin', and we generate code - // such that the Structure & offset are always at the same distance from this. - - int base = currentInstruction[1].u.operand; - int value = currentInstruction[3].u.operand; - - emitLoad2(base, regT1, regT0, value, regT3, regT2); - - emitJumpSlowCaseIfNotJSCell(base, regT1); - - BEGIN_UNINTERRUPTED_SEQUENCE(sequencePutById); - - Label hotPathBegin(this); - m_propertyAccessCompilationInfo[m_propertyAccessInstructionIndex].hotPathBegin = hotPathBegin; - m_propertyAccessInstructionIndex++; - - // It is important that the following instruction plants a 32bit immediate, in order that it can be patched over. - DataLabelPtr structureToCompare; - addSlowCase(branchPtrWithPatch(NotEqual, Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), structureToCompare, ImmPtr(reinterpret_cast(patchGetByIdDefaultStructure)))); - ASSERT(differenceBetween(hotPathBegin, structureToCompare) == patchOffsetPutByIdStructure); - - // Plant a load from a bogus ofset in the object's property map; we will patch this later, if it is to be used. - Label externalLoad = loadPtrWithPatchToLEA(Address(regT0, OBJECT_OFFSETOF(JSObject, m_externalStorage)), regT0); - Label externalLoadComplete(this); - ASSERT(differenceBetween(hotPathBegin, externalLoad) == patchOffsetPutByIdExternalLoad); - ASSERT(differenceBetween(externalLoad, externalLoadComplete) == patchLengthPutByIdExternalLoad); - - DataLabel32 displacementLabel1 = storePtrWithAddressOffsetPatch(regT2, Address(regT0, patchGetByIdDefaultOffset)); // payload - DataLabel32 displacementLabel2 = storePtrWithAddressOffsetPatch(regT3, Address(regT0, patchGetByIdDefaultOffset)); // tag - - END_UNINTERRUPTED_SEQUENCE(sequencePutById); - - ASSERT(differenceBetween(hotPathBegin, displacementLabel1) == patchOffsetPutByIdPropertyMapOffset1); - ASSERT(differenceBetween(hotPathBegin, displacementLabel2) == patchOffsetPutByIdPropertyMapOffset2); -} - -void JIT::emitSlow_op_put_by_id(Instruction* currentInstruction, Vector::iterator& iter) -{ - int base = currentInstruction[1].u.operand; - int ident = currentInstruction[2].u.operand; - - linkSlowCaseIfNotJSCell(iter, base); - linkSlowCase(iter); - - JITStubCall stubCall(this, cti_op_put_by_id); - stubCall.addArgument(regT1, regT0); - stubCall.addArgument(ImmPtr(&(m_codeBlock->identifier(ident)))); - stubCall.addArgument(regT3, regT2); - Call call = stubCall.call(); - - // Track the location of the call; this will be used to recover patch information. - m_propertyAccessCompilationInfo[m_propertyAccessInstructionIndex].callReturnLocation = call; - m_propertyAccessInstructionIndex++; -} - -// Compile a store into an object's property storage. May overwrite base. -void JIT::compilePutDirectOffset(RegisterID base, RegisterID valueTag, RegisterID valuePayload, Structure* structure, size_t cachedOffset) -{ - int offset = cachedOffset; - if (structure->isUsingInlineStorage()) - offset += OBJECT_OFFSETOF(JSObject, m_inlineStorage) / sizeof(Register); - else - loadPtr(Address(base, OBJECT_OFFSETOF(JSObject, m_externalStorage)), base); - emitStore(offset, valueTag, valuePayload, base); -} - -// Compile a load from an object's property storage. May overwrite base. -void JIT::compileGetDirectOffset(RegisterID base, RegisterID resultTag, RegisterID resultPayload, Structure* structure, size_t cachedOffset) -{ - int offset = cachedOffset; - if (structure->isUsingInlineStorage()) - offset += OBJECT_OFFSETOF(JSObject, m_inlineStorage) / sizeof(Register); - else - loadPtr(Address(base, OBJECT_OFFSETOF(JSObject, m_externalStorage)), base); - emitLoad(offset, resultTag, resultPayload, base); -} - -void JIT::compileGetDirectOffset(JSObject* base, RegisterID temp, RegisterID resultTag, RegisterID resultPayload, size_t cachedOffset) -{ - if (base->isUsingInlineStorage()) { - load32(reinterpret_cast(&base->m_inlineStorage[cachedOffset]), resultPayload); - load32(reinterpret_cast(&base->m_inlineStorage[cachedOffset]) + 4, resultTag); - return; - } - - size_t offset = cachedOffset * sizeof(JSValue); - - PropertyStorage* protoPropertyStorage = &base->m_externalStorage; - loadPtr(static_cast(protoPropertyStorage), temp); - load32(Address(temp, offset), resultPayload); - load32(Address(temp, offset + 4), resultTag); -} - -void JIT::testPrototype(Structure* structure, JumpList& failureCases) -{ - if (structure->m_prototype.isNull()) - return; - - failureCases.append(branchPtr(NotEqual, AbsoluteAddress(&asCell(structure->m_prototype)->m_structure), ImmPtr(asCell(structure->m_prototype)->m_structure))); -} - -void JIT::privateCompilePutByIdTransition(StructureStubInfo* stubInfo, Structure* oldStructure, Structure* newStructure, size_t cachedOffset, StructureChain* chain, ReturnAddressPtr returnAddress) -{ - // It is assumed that regT0 contains the basePayload and regT1 contains the baseTag. The value can be found on the stack. - - JumpList failureCases; - failureCases.append(branch32(NotEqual, regT1, Imm32(JSValue::CellTag))); - failureCases.append(branchPtr(NotEqual, Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), ImmPtr(oldStructure))); - testPrototype(oldStructure, failureCases); - - // Verify that nothing in the prototype chain has a setter for this property. - for (RefPtr* it = chain->head(); *it; ++it) - testPrototype(it->get(), failureCases); - - // Reallocate property storage if needed. - Call callTarget; - bool willNeedStorageRealloc = oldStructure->propertyStorageCapacity() != newStructure->propertyStorageCapacity(); - if (willNeedStorageRealloc) { - // This trampoline was called to like a JIT stub; before we can can call again we need to - // remove the return address from the stack, to prevent the stack from becoming misaligned. - preserveReturnAddressAfterCall(regT3); - - JITStubCall stubCall(this, cti_op_put_by_id_transition_realloc); - stubCall.skipArgument(); // base - stubCall.skipArgument(); // ident - stubCall.skipArgument(); // value - stubCall.addArgument(Imm32(oldStructure->propertyStorageCapacity())); - stubCall.addArgument(Imm32(newStructure->propertyStorageCapacity())); - stubCall.call(regT0); - - restoreReturnAddressBeforeReturn(regT3); - } - - sub32(Imm32(1), AbsoluteAddress(oldStructure->addressOfCount())); - add32(Imm32(1), AbsoluteAddress(newStructure->addressOfCount())); - storePtr(ImmPtr(newStructure), Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure))); - - load32(Address(stackPointerRegister, offsetof(struct JITStackFrame, args[2]) + sizeof(void*)), regT3); - load32(Address(stackPointerRegister, offsetof(struct JITStackFrame, args[2]) + sizeof(void*) + 4), regT2); - - // Write the value - compilePutDirectOffset(regT0, regT2, regT3, newStructure, cachedOffset); - - ret(); - - ASSERT(!failureCases.empty()); - failureCases.link(this); - restoreArgumentReferenceForTrampoline(); - Call failureCall = tailRecursiveCall(); - - LinkBuffer patchBuffer(this, m_codeBlock->executablePool()); - - patchBuffer.link(failureCall, FunctionPtr(cti_op_put_by_id_fail)); - - if (willNeedStorageRealloc) { - ASSERT(m_calls.size() == 1); - patchBuffer.link(m_calls[0].from, FunctionPtr(cti_op_put_by_id_transition_realloc)); - } - - CodeLocationLabel entryLabel = patchBuffer.finalizeCodeAddendum(); - stubInfo->stubRoutine = entryLabel; - RepatchBuffer repatchBuffer(m_codeBlock); - repatchBuffer.relinkCallerToTrampoline(returnAddress, entryLabel); -} - -void JIT::patchGetByIdSelf(CodeBlock* codeBlock, StructureStubInfo* stubInfo, Structure* structure, size_t cachedOffset, ReturnAddressPtr returnAddress) -{ - RepatchBuffer repatchBuffer(codeBlock); - - // We don't want to patch more than once - in future go to cti_op_get_by_id_generic. - // Should probably go to JITStubs::cti_op_get_by_id_fail, but that doesn't do anything interesting right now. - repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_get_by_id_self_fail)); - - int offset = sizeof(JSValue) * cachedOffset; - - // If we're patching to use inline storage, convert the initial load to a lea; this avoids the extra load - // and makes the subsequent load's offset automatically correct - if (structure->isUsingInlineStorage()) - repatchBuffer.repatchLoadPtrToLEA(stubInfo->hotPathBegin.instructionAtOffset(patchOffsetGetByIdExternalLoad)); - - // Patch the offset into the propoerty map to load from, then patch the Structure to look for. - repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabelPtrAtOffset(patchOffsetGetByIdStructure), structure); - repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabel32AtOffset(patchOffsetGetByIdPropertyMapOffset1), offset); // payload - repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabel32AtOffset(patchOffsetGetByIdPropertyMapOffset2), offset + 4); // tag -} - -void JIT::patchMethodCallProto(CodeBlock* codeBlock, MethodCallLinkInfo& methodCallLinkInfo, JSFunction* callee, Structure* structure, JSObject* proto, ReturnAddressPtr returnAddress) -{ - RepatchBuffer repatchBuffer(codeBlock); - - ASSERT(!methodCallLinkInfo.cachedStructure); - methodCallLinkInfo.cachedStructure = structure; - structure->ref(); - - Structure* prototypeStructure = proto->structure(); - ASSERT(!methodCallLinkInfo.cachedPrototypeStructure); - methodCallLinkInfo.cachedPrototypeStructure = prototypeStructure; - prototypeStructure->ref(); - - repatchBuffer.repatch(methodCallLinkInfo.structureLabel, structure); - repatchBuffer.repatch(methodCallLinkInfo.structureLabel.dataLabelPtrAtOffset(patchOffsetMethodCheckProtoObj), proto); - repatchBuffer.repatch(methodCallLinkInfo.structureLabel.dataLabelPtrAtOffset(patchOffsetMethodCheckProtoStruct), prototypeStructure); - repatchBuffer.repatch(methodCallLinkInfo.structureLabel.dataLabelPtrAtOffset(patchOffsetMethodCheckPutFunction), callee); - - repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_get_by_id)); -} - -void JIT::patchPutByIdReplace(CodeBlock* codeBlock, StructureStubInfo* stubInfo, Structure* structure, size_t cachedOffset, ReturnAddressPtr returnAddress) -{ - RepatchBuffer repatchBuffer(codeBlock); - - // We don't want to patch more than once - in future go to cti_op_put_by_id_generic. - // Should probably go to cti_op_put_by_id_fail, but that doesn't do anything interesting right now. - repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_put_by_id_generic)); - - int offset = sizeof(JSValue) * cachedOffset; - - // If we're patching to use inline storage, convert the initial load to a lea; this avoids the extra load - // and makes the subsequent load's offset automatically correct - if (structure->isUsingInlineStorage()) - repatchBuffer.repatchLoadPtrToLEA(stubInfo->hotPathBegin.instructionAtOffset(patchOffsetPutByIdExternalLoad)); - - // Patch the offset into the propoerty map to load from, then patch the Structure to look for. - repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabelPtrAtOffset(patchOffsetPutByIdStructure), structure); - repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabel32AtOffset(patchOffsetPutByIdPropertyMapOffset1), offset); // payload - repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabel32AtOffset(patchOffsetPutByIdPropertyMapOffset2), offset + 4); // tag -} - -void JIT::privateCompilePatchGetArrayLength(ReturnAddressPtr returnAddress) -{ - StructureStubInfo* stubInfo = &m_codeBlock->getStubInfo(returnAddress); - - // regT0 holds a JSCell* - - // Check for array - Jump failureCases1 = branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsArrayVPtr)); - - // Checks out okay! - get the length from the storage - loadPtr(Address(regT0, OBJECT_OFFSETOF(JSArray, m_storage)), regT2); - load32(Address(regT2, OBJECT_OFFSETOF(ArrayStorage, m_length)), regT2); - - Jump failureCases2 = branch32(Above, regT2, Imm32(INT_MAX)); - move(regT2, regT0); - move(Imm32(JSValue::Int32Tag), regT1); - Jump success = jump(); - - LinkBuffer patchBuffer(this, m_codeBlock->executablePool()); - - // Use the patch information to link the failure cases back to the original slow case routine. - CodeLocationLabel slowCaseBegin = stubInfo->callReturnLocation.labelAtOffset(-patchOffsetGetByIdSlowCaseCall); - patchBuffer.link(failureCases1, slowCaseBegin); - patchBuffer.link(failureCases2, slowCaseBegin); - - // On success return back to the hot patch code, at a point it will perform the store to dest for us. - patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult)); - - // Track the stub we have created so that it will be deleted later. - CodeLocationLabel entryLabel = patchBuffer.finalizeCodeAddendum(); - stubInfo->stubRoutine = entryLabel; - - // Finally patch the jump to slow case back in the hot path to jump here instead. - CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase); - RepatchBuffer repatchBuffer(m_codeBlock); - repatchBuffer.relink(jumpLocation, entryLabel); - - // We don't want to patch more than once - in future go to cti_op_put_by_id_generic. - repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_get_by_id_array_fail)); -} - -void JIT::privateCompileGetByIdProto(StructureStubInfo* stubInfo, Structure* structure, Structure* prototypeStructure, size_t cachedOffset, ReturnAddressPtr returnAddress, CallFrame* callFrame) -{ - // regT0 holds a JSCell* - - // The prototype object definitely exists (if this stub exists the CodeBlock is referencing a Structure that is - // referencing the prototype object - let's speculatively load it's table nice and early!) - JSObject* protoObject = asObject(structure->prototypeForLookup(callFrame)); - - Jump failureCases1 = checkStructure(regT0, structure); - - // Check the prototype object's Structure had not changed. - Structure** prototypeStructureAddress = &(protoObject->m_structure); -#if CPU(X86_64) - move(ImmPtr(prototypeStructure), regT3); - Jump failureCases2 = branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), regT3); -#else - Jump failureCases2 = branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), ImmPtr(prototypeStructure)); -#endif - - // Checks out okay! - getDirectOffset - compileGetDirectOffset(protoObject, regT2, regT1, regT0, cachedOffset); - - Jump success = jump(); - - LinkBuffer patchBuffer(this, m_codeBlock->executablePool()); - - // Use the patch information to link the failure cases back to the original slow case routine. - CodeLocationLabel slowCaseBegin = stubInfo->callReturnLocation.labelAtOffset(-patchOffsetGetByIdSlowCaseCall); - patchBuffer.link(failureCases1, slowCaseBegin); - patchBuffer.link(failureCases2, slowCaseBegin); - - // On success return back to the hot patch code, at a point it will perform the store to dest for us. - patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult)); - - // Track the stub we have created so that it will be deleted later. - CodeLocationLabel entryLabel = patchBuffer.finalizeCodeAddendum(); - stubInfo->stubRoutine = entryLabel; - - // Finally patch the jump to slow case back in the hot path to jump here instead. - CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase); - RepatchBuffer repatchBuffer(m_codeBlock); - repatchBuffer.relink(jumpLocation, entryLabel); - - // We don't want to patch more than once - in future go to cti_op_put_by_id_generic. - repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_get_by_id_proto_list)); -} - - -void JIT::privateCompileGetByIdSelfList(StructureStubInfo* stubInfo, PolymorphicAccessStructureList* polymorphicStructures, int currentIndex, Structure* structure, size_t cachedOffset) -{ - // regT0 holds a JSCell* - - Jump failureCase = checkStructure(regT0, structure); - compileGetDirectOffset(regT0, regT1, regT0, structure, cachedOffset); - Jump success = jump(); - - LinkBuffer patchBuffer(this, m_codeBlock->executablePool()); - - // Use the patch information to link the failure cases back to the original slow case routine. - CodeLocationLabel lastProtoBegin = polymorphicStructures->list[currentIndex - 1].stubRoutine; - if (!lastProtoBegin) - lastProtoBegin = stubInfo->callReturnLocation.labelAtOffset(-patchOffsetGetByIdSlowCaseCall); - - patchBuffer.link(failureCase, lastProtoBegin); - - // On success return back to the hot patch code, at a point it will perform the store to dest for us. - patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult)); - - CodeLocationLabel entryLabel = patchBuffer.finalizeCodeAddendum(); - - structure->ref(); - polymorphicStructures->list[currentIndex].set(entryLabel, structure); - - // Finally patch the jump to slow case back in the hot path to jump here instead. - CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase); - RepatchBuffer repatchBuffer(m_codeBlock); - repatchBuffer.relink(jumpLocation, entryLabel); -} - -void JIT::privateCompileGetByIdProtoList(StructureStubInfo* stubInfo, PolymorphicAccessStructureList* prototypeStructures, int currentIndex, Structure* structure, Structure* prototypeStructure, size_t cachedOffset, CallFrame* callFrame) -{ - // regT0 holds a JSCell* - - // The prototype object definitely exists (if this stub exists the CodeBlock is referencing a Structure that is - // referencing the prototype object - let's speculatively load it's table nice and early!) - JSObject* protoObject = asObject(structure->prototypeForLookup(callFrame)); - - // Check eax is an object of the right Structure. - Jump failureCases1 = checkStructure(regT0, structure); - - // Check the prototype object's Structure had not changed. - Structure** prototypeStructureAddress = &(protoObject->m_structure); -#if CPU(X86_64) - move(ImmPtr(prototypeStructure), regT3); - Jump failureCases2 = branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), regT3); -#else - Jump failureCases2 = branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), ImmPtr(prototypeStructure)); -#endif - - compileGetDirectOffset(protoObject, regT2, regT1, regT0, cachedOffset); - - Jump success = jump(); - - LinkBuffer patchBuffer(this, m_codeBlock->executablePool()); - - // Use the patch information to link the failure cases back to the original slow case routine. - CodeLocationLabel lastProtoBegin = prototypeStructures->list[currentIndex - 1].stubRoutine; - patchBuffer.link(failureCases1, lastProtoBegin); - patchBuffer.link(failureCases2, lastProtoBegin); - - // On success return back to the hot patch code, at a point it will perform the store to dest for us. - patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult)); - - CodeLocationLabel entryLabel = patchBuffer.finalizeCodeAddendum(); - - structure->ref(); - prototypeStructure->ref(); - prototypeStructures->list[currentIndex].set(entryLabel, structure, prototypeStructure); - - // Finally patch the jump to slow case back in the hot path to jump here instead. - CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase); - RepatchBuffer repatchBuffer(m_codeBlock); - repatchBuffer.relink(jumpLocation, entryLabel); -} - -void JIT::privateCompileGetByIdChainList(StructureStubInfo* stubInfo, PolymorphicAccessStructureList* prototypeStructures, int currentIndex, Structure* structure, StructureChain* chain, size_t count, size_t cachedOffset, CallFrame* callFrame) -{ - // regT0 holds a JSCell* - - ASSERT(count); - - JumpList bucketsOfFail; - - // Check eax is an object of the right Structure. - bucketsOfFail.append(checkStructure(regT0, structure)); - - Structure* currStructure = structure; - RefPtr* chainEntries = chain->head(); - JSObject* protoObject = 0; - for (unsigned i = 0; i < count; ++i) { - protoObject = asObject(currStructure->prototypeForLookup(callFrame)); - currStructure = chainEntries[i].get(); - - // Check the prototype object's Structure had not changed. - Structure** prototypeStructureAddress = &(protoObject->m_structure); -#if CPU(X86_64) - move(ImmPtr(currStructure), regT3); - bucketsOfFail.append(branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), regT3)); -#else - bucketsOfFail.append(branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), ImmPtr(currStructure))); -#endif - } - ASSERT(protoObject); - - compileGetDirectOffset(protoObject, regT2, regT1, regT0, cachedOffset); - Jump success = jump(); - - LinkBuffer patchBuffer(this, m_codeBlock->executablePool()); - - // Use the patch information to link the failure cases back to the original slow case routine. - CodeLocationLabel lastProtoBegin = prototypeStructures->list[currentIndex - 1].stubRoutine; - - patchBuffer.link(bucketsOfFail, lastProtoBegin); - - // On success return back to the hot patch code, at a point it will perform the store to dest for us. - patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult)); - - CodeLocationLabel entryLabel = patchBuffer.finalizeCodeAddendum(); - // Track the stub we have created so that it will be deleted later. - structure->ref(); - chain->ref(); - prototypeStructures->list[currentIndex].set(entryLabel, structure, chain); - - // Finally patch the jump to slow case back in the hot path to jump here instead. - CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase); - RepatchBuffer repatchBuffer(m_codeBlock); - repatchBuffer.relink(jumpLocation, entryLabel); -} +#if !USE(JSVALUE32_64) -void JIT::privateCompileGetByIdChain(StructureStubInfo* stubInfo, Structure* structure, StructureChain* chain, size_t count, size_t cachedOffset, ReturnAddressPtr returnAddress, CallFrame* callFrame) -{ - // regT0 holds a JSCell* - - ASSERT(count); - - JumpList bucketsOfFail; +#include "JIT.h" - // Check eax is an object of the right Structure. - bucketsOfFail.append(checkStructure(regT0, structure)); +#if ENABLE(JIT) - Structure* currStructure = structure; - RefPtr* chainEntries = chain->head(); - JSObject* protoObject = 0; - for (unsigned i = 0; i < count; ++i) { - protoObject = asObject(currStructure->prototypeForLookup(callFrame)); - currStructure = chainEntries[i].get(); +#include "CodeBlock.h" +#include "JITInlineMethods.h" +#include "JITStubCall.h" +#include "JSArray.h" +#include "JSFunction.h" +#include "JSPropertyNameIterator.h" +#include "Interpreter.h" +#include "LinkBuffer.h" +#include "RepatchBuffer.h" +#include "ResultType.h" +#include "SamplingTool.h" - // Check the prototype object's Structure had not changed. - Structure** prototypeStructureAddress = &(protoObject->m_structure); -#if CPU(X86_64) - move(ImmPtr(currStructure), regT3); - bucketsOfFail.append(branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), regT3)); -#else - bucketsOfFail.append(branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), ImmPtr(currStructure))); +#ifndef NDEBUG +#include #endif - } - ASSERT(protoObject); - - compileGetDirectOffset(protoObject, regT2, regT1, regT0, cachedOffset); - Jump success = jump(); - - LinkBuffer patchBuffer(this, m_codeBlock->executablePool()); - - // Use the patch information to link the failure cases back to the original slow case routine. - patchBuffer.link(bucketsOfFail, stubInfo->callReturnLocation.labelAtOffset(-patchOffsetGetByIdSlowCaseCall)); - - // On success return back to the hot patch code, at a point it will perform the store to dest for us. - patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult)); - - // Track the stub we have created so that it will be deleted later. - CodeLocationLabel entryLabel = patchBuffer.finalizeCodeAddendum(); - stubInfo->stubRoutine = entryLabel; - - // Finally patch the jump to slow case back in the hot path to jump here instead. - CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase); - RepatchBuffer repatchBuffer(m_codeBlock); - repatchBuffer.relink(jumpLocation, entryLabel); - - // We don't want to patch more than once - in future go to cti_op_put_by_id_generic. - repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_get_by_id_proto_list)); -} - -/* ------------------------------ END: !ENABLE / ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS) ------------------------------ */ - -#endif // !ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS) - -void JIT::compileGetDirectOffset(RegisterID base, RegisterID resultTag, RegisterID resultPayload, RegisterID structure, RegisterID offset) -{ - ASSERT(sizeof(((Structure*)0)->m_propertyStorageCapacity) == sizeof(int32_t)); - ASSERT(sizeof(JSObject::inlineStorageCapacity) == sizeof(int32_t)); - ASSERT(sizeof(JSValue) == 8); - - Jump notUsingInlineStorage = branch32(NotEqual, Address(structure, OBJECT_OFFSETOF(Structure, m_propertyStorageCapacity)), Imm32(JSObject::inlineStorageCapacity)); - loadPtr(BaseIndex(base, offset, TimesEight, OBJECT_OFFSETOF(JSObject, m_inlineStorage)+OBJECT_OFFSETOF(JSValue, u.asBits.payload)), resultPayload); - loadPtr(BaseIndex(base, offset, TimesEight, OBJECT_OFFSETOF(JSObject, m_inlineStorage)+OBJECT_OFFSETOF(JSValue, u.asBits.tag)), resultTag); - Jump finishedLoad = jump(); - notUsingInlineStorage.link(this); - loadPtr(Address(base, OBJECT_OFFSETOF(JSObject, m_externalStorage)), base); - loadPtr(BaseIndex(base, offset, TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.payload)), resultPayload); - loadPtr(BaseIndex(base, offset, TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.tag)), resultTag); - finishedLoad.link(this); -} - -void JIT::emit_op_get_by_pname(Instruction* currentInstruction) -{ - unsigned dst = currentInstruction[1].u.operand; - unsigned base = currentInstruction[2].u.operand; - unsigned property = currentInstruction[3].u.operand; - unsigned expected = currentInstruction[4].u.operand; - unsigned iter = currentInstruction[5].u.operand; - unsigned i = currentInstruction[6].u.operand; - - emitLoad2(property, regT1, regT0, base, regT3, regT2); - emitJumpSlowCaseIfNotJSCell(property, regT1); - addSlowCase(branchPtr(NotEqual, regT0, payloadFor(expected))); - // Property registers are now available as the property is known - emitJumpSlowCaseIfNotJSCell(base, regT3); - emitLoadPayload(iter, regT1); - - // Test base's structure - loadPtr(Address(regT2, OBJECT_OFFSETOF(JSCell, m_structure)), regT0); - addSlowCase(branchPtr(NotEqual, regT0, Address(regT1, OBJECT_OFFSETOF(JSPropertyNameIterator, m_cachedStructure)))); - load32(addressFor(i), regT3); - sub32(Imm32(1), regT3); - addSlowCase(branch32(AboveOrEqual, regT3, Address(regT1, OBJECT_OFFSETOF(JSPropertyNameIterator, m_numCacheableSlots)))); - compileGetDirectOffset(regT2, regT1, regT0, regT0, regT3); - emitStore(dst, regT1, regT0); - map(m_bytecodeIndex + OPCODE_LENGTH(op_get_by_pname), dst, regT1, regT0); -} - -void JIT::emitSlow_op_get_by_pname(Instruction* currentInstruction, Vector::iterator& iter) -{ - unsigned dst = currentInstruction[1].u.operand; - unsigned base = currentInstruction[2].u.operand; - unsigned property = currentInstruction[3].u.operand; - - linkSlowCaseIfNotJSCell(iter, property); - linkSlowCase(iter); - linkSlowCaseIfNotJSCell(iter, base); - linkSlowCase(iter); - linkSlowCase(iter); - - JITStubCall stubCall(this, cti_op_get_by_val); - stubCall.addArgument(base); - stubCall.addArgument(property); - stubCall.call(dst); -} +using namespace std; -#else // USE(JSVALUE32_64) +namespace JSC { void JIT::emit_op_get_by_val(Instruction* currentInstruction) { @@ -1594,7 +625,6 @@ void JIT::patchMethodCallProto(CodeBlock* codeBlock, MethodCallLinkInfo& methodC structure->ref(); Structure* prototypeStructure = proto->structure(); - ASSERT(!methodCallLinkInfo.cachedPrototypeStructure); methodCallLinkInfo.cachedPrototypeStructure = prototypeStructure; prototypeStructure->ref(); @@ -1894,8 +924,8 @@ void JIT::privateCompileGetByIdChain(StructureStubInfo* stubInfo, Structure* str #endif // !ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS) -#endif // USE(JSVALUE32_64) - } // namespace JSC #endif // ENABLE(JIT) + +#endif // !USE(JSVALUE32_64) diff --git a/JavaScriptCore/jit/JITPropertyAccess32_64.cpp b/JavaScriptCore/jit/JITPropertyAccess32_64.cpp new file mode 100644 index 0000000..f9e323d --- /dev/null +++ b/JavaScriptCore/jit/JITPropertyAccess32_64.cpp @@ -0,0 +1,1026 @@ +/* + * Copyright (C) 2008, 2009 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "config.h" + +#if USE(JSVALUE32_64) + +#include "JIT.h" + +#if ENABLE(JIT) + +#include "CodeBlock.h" +#include "JITInlineMethods.h" +#include "JITStubCall.h" +#include "JSArray.h" +#include "JSFunction.h" +#include "JSPropertyNameIterator.h" +#include "Interpreter.h" +#include "LinkBuffer.h" +#include "RepatchBuffer.h" +#include "ResultType.h" +#include "SamplingTool.h" + +#ifndef NDEBUG +#include +#endif + +using namespace std; + +namespace JSC { + +void JIT::emit_op_put_by_index(Instruction* currentInstruction) +{ + unsigned base = currentInstruction[1].u.operand; + unsigned property = currentInstruction[2].u.operand; + unsigned value = currentInstruction[3].u.operand; + + JITStubCall stubCall(this, cti_op_put_by_index); + stubCall.addArgument(base); + stubCall.addArgument(Imm32(property)); + stubCall.addArgument(value); + stubCall.call(); +} + +void JIT::emit_op_put_getter(Instruction* currentInstruction) +{ + unsigned base = currentInstruction[1].u.operand; + unsigned property = currentInstruction[2].u.operand; + unsigned function = currentInstruction[3].u.operand; + + JITStubCall stubCall(this, cti_op_put_getter); + stubCall.addArgument(base); + stubCall.addArgument(ImmPtr(&m_codeBlock->identifier(property))); + stubCall.addArgument(function); + stubCall.call(); +} + +void JIT::emit_op_put_setter(Instruction* currentInstruction) +{ + unsigned base = currentInstruction[1].u.operand; + unsigned property = currentInstruction[2].u.operand; + unsigned function = currentInstruction[3].u.operand; + + JITStubCall stubCall(this, cti_op_put_setter); + stubCall.addArgument(base); + stubCall.addArgument(ImmPtr(&m_codeBlock->identifier(property))); + stubCall.addArgument(function); + stubCall.call(); +} + +void JIT::emit_op_del_by_id(Instruction* currentInstruction) +{ + unsigned dst = currentInstruction[1].u.operand; + unsigned base = currentInstruction[2].u.operand; + unsigned property = currentInstruction[3].u.operand; + + JITStubCall stubCall(this, cti_op_del_by_id); + stubCall.addArgument(base); + stubCall.addArgument(ImmPtr(&m_codeBlock->identifier(property))); + stubCall.call(dst); +} + + +#if !ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS) + +/* ------------------------------ BEGIN: !ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS) ------------------------------ */ + +// Treat these as nops - the call will be handed as a regular get_by_id/op_call pair. +void JIT::emit_op_method_check(Instruction*) {} +void JIT::emitSlow_op_method_check(Instruction*, Vector::iterator&) { ASSERT_NOT_REACHED(); } +#if ENABLE(JIT_OPTIMIZE_METHOD_CALLS) +#error "JIT_OPTIMIZE_METHOD_CALLS requires JIT_OPTIMIZE_PROPERTY_ACCESS" +#endif + +void JIT::emit_op_get_by_val(Instruction* currentInstruction) +{ + unsigned dst = currentInstruction[1].u.operand; + unsigned base = currentInstruction[2].u.operand; + unsigned property = currentInstruction[3].u.operand; + + JITStubCall stubCall(this, cti_op_get_by_val); + stubCall.addArgument(base); + stubCall.addArgument(property); + stubCall.call(dst); +} + +void JIT::emitSlow_op_get_by_val(Instruction*, Vector::iterator&) +{ + ASSERT_NOT_REACHED(); +} + +void JIT::emit_op_put_by_val(Instruction* currentInstruction) +{ + unsigned base = currentInstruction[1].u.operand; + unsigned property = currentInstruction[2].u.operand; + unsigned value = currentInstruction[3].u.operand; + + JITStubCall stubCall(this, cti_op_put_by_val); + stubCall.addArgument(base); + stubCall.addArgument(property); + stubCall.addArgument(value); + stubCall.call(); +} + +void JIT::emitSlow_op_put_by_val(Instruction*, Vector::iterator&) +{ + ASSERT_NOT_REACHED(); +} + +void JIT::emit_op_get_by_id(Instruction* currentInstruction) +{ + int dst = currentInstruction[1].u.operand; + int base = currentInstruction[2].u.operand; + int ident = currentInstruction[3].u.operand; + + JITStubCall stubCall(this, cti_op_get_by_id_generic); + stubCall.addArgument(base); + stubCall.addArgument(ImmPtr(&(m_codeBlock->identifier(ident)))); + stubCall.call(dst); + + m_propertyAccessInstructionIndex++; +} + +void JIT::emitSlow_op_get_by_id(Instruction*, Vector::iterator&) +{ + m_propertyAccessInstructionIndex++; + ASSERT_NOT_REACHED(); +} + +void JIT::emit_op_put_by_id(Instruction* currentInstruction) +{ + int base = currentInstruction[1].u.operand; + int ident = currentInstruction[2].u.operand; + int value = currentInstruction[3].u.operand; + + JITStubCall stubCall(this, cti_op_put_by_id_generic); + stubCall.addArgument(base); + stubCall.addArgument(ImmPtr(&(m_codeBlock->identifier(ident)))); + stubCall.addArgument(value); + stubCall.call(); + + m_propertyAccessInstructionIndex++; +} + +void JIT::emitSlow_op_put_by_id(Instruction*, Vector::iterator&) +{ + m_propertyAccessInstructionIndex++; + ASSERT_NOT_REACHED(); +} + +#else // !ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS) + +/* ------------------------------ BEGIN: ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS) ------------------------------ */ + +#if ENABLE(JIT_OPTIMIZE_METHOD_CALLS) + +void JIT::emit_op_method_check(Instruction* currentInstruction) +{ + // Assert that the following instruction is a get_by_id. + ASSERT(m_interpreter->getOpcodeID((currentInstruction + OPCODE_LENGTH(op_method_check))->u.opcode) == op_get_by_id); + + currentInstruction += OPCODE_LENGTH(op_method_check); + + // Do the method check - check the object & its prototype's structure inline (this is the common case). + m_methodCallCompilationInfo.append(MethodCallCompilationInfo(m_propertyAccessInstructionIndex)); + MethodCallCompilationInfo& info = m_methodCallCompilationInfo.last(); + + int dst = currentInstruction[1].u.operand; + int base = currentInstruction[2].u.operand; + + emitLoad(base, regT1, regT0); + emitJumpSlowCaseIfNotJSCell(base, regT1); + + BEGIN_UNINTERRUPTED_SEQUENCE(sequenceMethodCheck); + + Jump structureCheck = branchPtrWithPatch(NotEqual, Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), info.structureToCompare, ImmPtr(reinterpret_cast(patchGetByIdDefaultStructure))); + DataLabelPtr protoStructureToCompare, protoObj = moveWithPatch(ImmPtr(0), regT2); + Jump protoStructureCheck = branchPtrWithPatch(NotEqual, Address(regT2, OBJECT_OFFSETOF(JSCell, m_structure)), protoStructureToCompare, ImmPtr(reinterpret_cast(patchGetByIdDefaultStructure))); + + // This will be relinked to load the function without doing a load. + DataLabelPtr putFunction = moveWithPatch(ImmPtr(0), regT0); + + END_UNINTERRUPTED_SEQUENCE(sequenceMethodCheck); + + move(Imm32(JSValue::CellTag), regT1); + Jump match = jump(); + + ASSERT(differenceBetween(info.structureToCompare, protoObj) == patchOffsetMethodCheckProtoObj); + ASSERT(differenceBetween(info.structureToCompare, protoStructureToCompare) == patchOffsetMethodCheckProtoStruct); + ASSERT(differenceBetween(info.structureToCompare, putFunction) == patchOffsetMethodCheckPutFunction); + + // Link the failure cases here. + structureCheck.link(this); + protoStructureCheck.link(this); + + // Do a regular(ish) get_by_id (the slow case will be link to + // cti_op_get_by_id_method_check instead of cti_op_get_by_id. + compileGetByIdHotPath(); + + match.link(this); + emitStore(dst, regT1, regT0); + map(m_bytecodeIndex + OPCODE_LENGTH(op_method_check), dst, regT1, regT0); + + // We've already generated the following get_by_id, so make sure it's skipped over. + m_bytecodeIndex += OPCODE_LENGTH(op_get_by_id); +} + +void JIT::emitSlow_op_method_check(Instruction* currentInstruction, Vector::iterator& iter) +{ + currentInstruction += OPCODE_LENGTH(op_method_check); + + int dst = currentInstruction[1].u.operand; + int base = currentInstruction[2].u.operand; + int ident = currentInstruction[3].u.operand; + + compileGetByIdSlowCase(dst, base, &(m_codeBlock->identifier(ident)), iter, true); + + // We've already generated the following get_by_id, so make sure it's skipped over. + m_bytecodeIndex += OPCODE_LENGTH(op_get_by_id); +} + +#else //!ENABLE(JIT_OPTIMIZE_METHOD_CALLS) + +// Treat these as nops - the call will be handed as a regular get_by_id/op_call pair. +void JIT::emit_op_method_check(Instruction*) {} +void JIT::emitSlow_op_method_check(Instruction*, Vector::iterator&) { ASSERT_NOT_REACHED(); } + +#endif + +void JIT::emit_op_get_by_val(Instruction* currentInstruction) +{ + unsigned dst = currentInstruction[1].u.operand; + unsigned base = currentInstruction[2].u.operand; + unsigned property = currentInstruction[3].u.operand; + + emitLoad2(base, regT1, regT0, property, regT3, regT2); + + addSlowCase(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag))); + emitJumpSlowCaseIfNotJSCell(base, regT1); + addSlowCase(branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsArrayVPtr))); + + loadPtr(Address(regT0, OBJECT_OFFSETOF(JSArray, m_storage)), regT3); + addSlowCase(branch32(AboveOrEqual, regT2, Address(regT0, OBJECT_OFFSETOF(JSArray, m_vectorLength)))); + + load32(BaseIndex(regT3, regT2, TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + 4), regT1); // tag + load32(BaseIndex(regT3, regT2, TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0])), regT0); // payload + addSlowCase(branch32(Equal, regT1, Imm32(JSValue::EmptyValueTag))); + + emitStore(dst, regT1, regT0); + map(m_bytecodeIndex + OPCODE_LENGTH(op_get_by_val), dst, regT1, regT0); +} + +void JIT::emitSlow_op_get_by_val(Instruction* currentInstruction, Vector::iterator& iter) +{ + unsigned dst = currentInstruction[1].u.operand; + unsigned base = currentInstruction[2].u.operand; + unsigned property = currentInstruction[3].u.operand; + + linkSlowCase(iter); // property int32 check + linkSlowCaseIfNotJSCell(iter, base); // base cell check + linkSlowCase(iter); // base array check + linkSlowCase(iter); // vector length check + linkSlowCase(iter); // empty value + + JITStubCall stubCall(this, cti_op_get_by_val); + stubCall.addArgument(base); + stubCall.addArgument(property); + stubCall.call(dst); +} + +void JIT::emit_op_put_by_val(Instruction* currentInstruction) +{ + unsigned base = currentInstruction[1].u.operand; + unsigned property = currentInstruction[2].u.operand; + unsigned value = currentInstruction[3].u.operand; + + emitLoad2(base, regT1, regT0, property, regT3, regT2); + + addSlowCase(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag))); + emitJumpSlowCaseIfNotJSCell(base, regT1); + addSlowCase(branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsArrayVPtr))); + addSlowCase(branch32(AboveOrEqual, regT2, Address(regT0, OBJECT_OFFSETOF(JSArray, m_vectorLength)))); + + loadPtr(Address(regT0, OBJECT_OFFSETOF(JSArray, m_storage)), regT3); + + Jump empty = branch32(Equal, BaseIndex(regT3, regT2, TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + 4), Imm32(JSValue::EmptyValueTag)); + + Label storeResult(this); + emitLoad(value, regT1, regT0); + store32(regT0, BaseIndex(regT3, regT2, TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]))); // payload + store32(regT1, BaseIndex(regT3, regT2, TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + 4)); // tag + Jump end = jump(); + + empty.link(this); + add32(Imm32(1), Address(regT3, OBJECT_OFFSETOF(ArrayStorage, m_numValuesInVector))); + branch32(Below, regT2, Address(regT3, OBJECT_OFFSETOF(ArrayStorage, m_length))).linkTo(storeResult, this); + + add32(Imm32(1), regT2, regT0); + store32(regT0, Address(regT3, OBJECT_OFFSETOF(ArrayStorage, m_length))); + jump().linkTo(storeResult, this); + + end.link(this); +} + +void JIT::emitSlow_op_put_by_val(Instruction* currentInstruction, Vector::iterator& iter) +{ + unsigned base = currentInstruction[1].u.operand; + unsigned property = currentInstruction[2].u.operand; + unsigned value = currentInstruction[3].u.operand; + + linkSlowCase(iter); // property int32 check + linkSlowCaseIfNotJSCell(iter, base); // base cell check + linkSlowCase(iter); // base not array check + linkSlowCase(iter); // in vector check + + JITStubCall stubPutByValCall(this, cti_op_put_by_val); + stubPutByValCall.addArgument(base); + stubPutByValCall.addArgument(property); + stubPutByValCall.addArgument(value); + stubPutByValCall.call(); +} + +void JIT::emit_op_get_by_id(Instruction* currentInstruction) +{ + int dst = currentInstruction[1].u.operand; + int base = currentInstruction[2].u.operand; + + emitLoad(base, regT1, regT0); + emitJumpSlowCaseIfNotJSCell(base, regT1); + compileGetByIdHotPath(); + emitStore(dst, regT1, regT0); + map(m_bytecodeIndex + OPCODE_LENGTH(op_get_by_id), dst, regT1, regT0); +} + +void JIT::compileGetByIdHotPath() +{ + // As for put_by_id, get_by_id requires the offset of the Structure and the offset of the access to be patched. + // Additionally, for get_by_id we need patch the offset of the branch to the slow case (we patch this to jump + // to array-length / prototype access tranpolines, and finally we also the the property-map access offset as a label + // to jump back to if one of these trampolies finds a match. + + BEGIN_UNINTERRUPTED_SEQUENCE(sequenceGetByIdHotPath); + + Label hotPathBegin(this); + m_propertyAccessCompilationInfo[m_propertyAccessInstructionIndex].hotPathBegin = hotPathBegin; + m_propertyAccessInstructionIndex++; + + DataLabelPtr structureToCompare; + Jump structureCheck = branchPtrWithPatch(NotEqual, Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), structureToCompare, ImmPtr(reinterpret_cast(patchGetByIdDefaultStructure))); + addSlowCase(structureCheck); + ASSERT(differenceBetween(hotPathBegin, structureToCompare) == patchOffsetGetByIdStructure); + ASSERT(differenceBetween(hotPathBegin, structureCheck) == patchOffsetGetByIdBranchToSlowCase); + + Label externalLoad = loadPtrWithPatchToLEA(Address(regT0, OBJECT_OFFSETOF(JSObject, m_externalStorage)), regT2); + Label externalLoadComplete(this); + ASSERT(differenceBetween(hotPathBegin, externalLoad) == patchOffsetGetByIdExternalLoad); + ASSERT(differenceBetween(externalLoad, externalLoadComplete) == patchLengthGetByIdExternalLoad); + + DataLabel32 displacementLabel1 = loadPtrWithAddressOffsetPatch(Address(regT2, patchGetByIdDefaultOffset), regT0); // payload + ASSERT(differenceBetween(hotPathBegin, displacementLabel1) == patchOffsetGetByIdPropertyMapOffset1); + DataLabel32 displacementLabel2 = loadPtrWithAddressOffsetPatch(Address(regT2, patchGetByIdDefaultOffset), regT1); // tag + ASSERT(differenceBetween(hotPathBegin, displacementLabel2) == patchOffsetGetByIdPropertyMapOffset2); + + Label putResult(this); + ASSERT(differenceBetween(hotPathBegin, putResult) == patchOffsetGetByIdPutResult); + + END_UNINTERRUPTED_SEQUENCE(sequenceGetByIdHotPath); +} + +void JIT::emitSlow_op_get_by_id(Instruction* currentInstruction, Vector::iterator& iter) +{ + int dst = currentInstruction[1].u.operand; + int base = currentInstruction[2].u.operand; + int ident = currentInstruction[3].u.operand; + + compileGetByIdSlowCase(dst, base, &(m_codeBlock->identifier(ident)), iter); +} + +void JIT::compileGetByIdSlowCase(int dst, int base, Identifier* ident, Vector::iterator& iter, bool isMethodCheck) +{ + // As for the hot path of get_by_id, above, we ensure that we can use an architecture specific offset + // so that we only need track one pointer into the slow case code - we track a pointer to the location + // of the call (which we can use to look up the patch information), but should a array-length or + // prototype access trampoline fail we want to bail out back to here. To do so we can subtract back + // the distance from the call to the head of the slow case. + linkSlowCaseIfNotJSCell(iter, base); + linkSlowCase(iter); + + BEGIN_UNINTERRUPTED_SEQUENCE(sequenceGetByIdSlowCase); + +#ifndef NDEBUG + Label coldPathBegin(this); +#endif + JITStubCall stubCall(this, isMethodCheck ? cti_op_get_by_id_method_check : cti_op_get_by_id); + stubCall.addArgument(regT1, regT0); + stubCall.addArgument(ImmPtr(ident)); + Call call = stubCall.call(dst); + + END_UNINTERRUPTED_SEQUENCE(sequenceGetByIdSlowCase); + + ASSERT(differenceBetween(coldPathBegin, call) == patchOffsetGetByIdSlowCaseCall); + + // Track the location of the call; this will be used to recover patch information. + m_propertyAccessCompilationInfo[m_propertyAccessInstructionIndex].callReturnLocation = call; + m_propertyAccessInstructionIndex++; +} + +void JIT::emit_op_put_by_id(Instruction* currentInstruction) +{ + // In order to be able to patch both the Structure, and the object offset, we store one pointer, + // to just after the arguments have been loaded into registers 'hotPathBegin', and we generate code + // such that the Structure & offset are always at the same distance from this. + + int base = currentInstruction[1].u.operand; + int value = currentInstruction[3].u.operand; + + emitLoad2(base, regT1, regT0, value, regT3, regT2); + + emitJumpSlowCaseIfNotJSCell(base, regT1); + + BEGIN_UNINTERRUPTED_SEQUENCE(sequencePutById); + + Label hotPathBegin(this); + m_propertyAccessCompilationInfo[m_propertyAccessInstructionIndex].hotPathBegin = hotPathBegin; + m_propertyAccessInstructionIndex++; + + // It is important that the following instruction plants a 32bit immediate, in order that it can be patched over. + DataLabelPtr structureToCompare; + addSlowCase(branchPtrWithPatch(NotEqual, Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), structureToCompare, ImmPtr(reinterpret_cast(patchGetByIdDefaultStructure)))); + ASSERT(differenceBetween(hotPathBegin, structureToCompare) == patchOffsetPutByIdStructure); + + // Plant a load from a bogus ofset in the object's property map; we will patch this later, if it is to be used. + Label externalLoad = loadPtrWithPatchToLEA(Address(regT0, OBJECT_OFFSETOF(JSObject, m_externalStorage)), regT0); + Label externalLoadComplete(this); + ASSERT(differenceBetween(hotPathBegin, externalLoad) == patchOffsetPutByIdExternalLoad); + ASSERT(differenceBetween(externalLoad, externalLoadComplete) == patchLengthPutByIdExternalLoad); + + DataLabel32 displacementLabel1 = storePtrWithAddressOffsetPatch(regT2, Address(regT0, patchGetByIdDefaultOffset)); // payload + DataLabel32 displacementLabel2 = storePtrWithAddressOffsetPatch(regT3, Address(regT0, patchGetByIdDefaultOffset)); // tag + + END_UNINTERRUPTED_SEQUENCE(sequencePutById); + + ASSERT(differenceBetween(hotPathBegin, displacementLabel1) == patchOffsetPutByIdPropertyMapOffset1); + ASSERT(differenceBetween(hotPathBegin, displacementLabel2) == patchOffsetPutByIdPropertyMapOffset2); +} + +void JIT::emitSlow_op_put_by_id(Instruction* currentInstruction, Vector::iterator& iter) +{ + int base = currentInstruction[1].u.operand; + int ident = currentInstruction[2].u.operand; + + linkSlowCaseIfNotJSCell(iter, base); + linkSlowCase(iter); + + JITStubCall stubCall(this, cti_op_put_by_id); + stubCall.addArgument(regT1, regT0); + stubCall.addArgument(ImmPtr(&(m_codeBlock->identifier(ident)))); + stubCall.addArgument(regT3, regT2); + Call call = stubCall.call(); + + // Track the location of the call; this will be used to recover patch information. + m_propertyAccessCompilationInfo[m_propertyAccessInstructionIndex].callReturnLocation = call; + m_propertyAccessInstructionIndex++; +} + +// Compile a store into an object's property storage. May overwrite base. +void JIT::compilePutDirectOffset(RegisterID base, RegisterID valueTag, RegisterID valuePayload, Structure* structure, size_t cachedOffset) +{ + int offset = cachedOffset; + if (structure->isUsingInlineStorage()) + offset += OBJECT_OFFSETOF(JSObject, m_inlineStorage) / sizeof(Register); + else + loadPtr(Address(base, OBJECT_OFFSETOF(JSObject, m_externalStorage)), base); + emitStore(offset, valueTag, valuePayload, base); +} + +// Compile a load from an object's property storage. May overwrite base. +void JIT::compileGetDirectOffset(RegisterID base, RegisterID resultTag, RegisterID resultPayload, Structure* structure, size_t cachedOffset) +{ + int offset = cachedOffset; + if (structure->isUsingInlineStorage()) + offset += OBJECT_OFFSETOF(JSObject, m_inlineStorage) / sizeof(Register); + else + loadPtr(Address(base, OBJECT_OFFSETOF(JSObject, m_externalStorage)), base); + emitLoad(offset, resultTag, resultPayload, base); +} + +void JIT::compileGetDirectOffset(JSObject* base, RegisterID temp, RegisterID resultTag, RegisterID resultPayload, size_t cachedOffset) +{ + if (base->isUsingInlineStorage()) { + load32(reinterpret_cast(&base->m_inlineStorage[cachedOffset]), resultPayload); + load32(reinterpret_cast(&base->m_inlineStorage[cachedOffset]) + 4, resultTag); + return; + } + + size_t offset = cachedOffset * sizeof(JSValue); + + PropertyStorage* protoPropertyStorage = &base->m_externalStorage; + loadPtr(static_cast(protoPropertyStorage), temp); + load32(Address(temp, offset), resultPayload); + load32(Address(temp, offset + 4), resultTag); +} + +void JIT::testPrototype(Structure* structure, JumpList& failureCases) +{ + if (structure->m_prototype.isNull()) + return; + + failureCases.append(branchPtr(NotEqual, AbsoluteAddress(&asCell(structure->m_prototype)->m_structure), ImmPtr(asCell(structure->m_prototype)->m_structure))); +} + +void JIT::privateCompilePutByIdTransition(StructureStubInfo* stubInfo, Structure* oldStructure, Structure* newStructure, size_t cachedOffset, StructureChain* chain, ReturnAddressPtr returnAddress) +{ + // It is assumed that regT0 contains the basePayload and regT1 contains the baseTag. The value can be found on the stack. + + JumpList failureCases; + failureCases.append(branch32(NotEqual, regT1, Imm32(JSValue::CellTag))); + failureCases.append(branchPtr(NotEqual, Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), ImmPtr(oldStructure))); + testPrototype(oldStructure, failureCases); + + // Verify that nothing in the prototype chain has a setter for this property. + for (RefPtr* it = chain->head(); *it; ++it) + testPrototype(it->get(), failureCases); + + // Reallocate property storage if needed. + Call callTarget; + bool willNeedStorageRealloc = oldStructure->propertyStorageCapacity() != newStructure->propertyStorageCapacity(); + if (willNeedStorageRealloc) { + // This trampoline was called to like a JIT stub; before we can can call again we need to + // remove the return address from the stack, to prevent the stack from becoming misaligned. + preserveReturnAddressAfterCall(regT3); + + JITStubCall stubCall(this, cti_op_put_by_id_transition_realloc); + stubCall.skipArgument(); // base + stubCall.skipArgument(); // ident + stubCall.skipArgument(); // value + stubCall.addArgument(Imm32(oldStructure->propertyStorageCapacity())); + stubCall.addArgument(Imm32(newStructure->propertyStorageCapacity())); + stubCall.call(regT0); + + restoreReturnAddressBeforeReturn(regT3); + } + + sub32(Imm32(1), AbsoluteAddress(oldStructure->addressOfCount())); + add32(Imm32(1), AbsoluteAddress(newStructure->addressOfCount())); + storePtr(ImmPtr(newStructure), Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure))); + + load32(Address(stackPointerRegister, offsetof(struct JITStackFrame, args[2]) + sizeof(void*)), regT3); + load32(Address(stackPointerRegister, offsetof(struct JITStackFrame, args[2]) + sizeof(void*) + 4), regT2); + + // Write the value + compilePutDirectOffset(regT0, regT2, regT3, newStructure, cachedOffset); + + ret(); + + ASSERT(!failureCases.empty()); + failureCases.link(this); + restoreArgumentReferenceForTrampoline(); + Call failureCall = tailRecursiveCall(); + + LinkBuffer patchBuffer(this, m_codeBlock->executablePool()); + + patchBuffer.link(failureCall, FunctionPtr(cti_op_put_by_id_fail)); + + if (willNeedStorageRealloc) { + ASSERT(m_calls.size() == 1); + patchBuffer.link(m_calls[0].from, FunctionPtr(cti_op_put_by_id_transition_realloc)); + } + + CodeLocationLabel entryLabel = patchBuffer.finalizeCodeAddendum(); + stubInfo->stubRoutine = entryLabel; + RepatchBuffer repatchBuffer(m_codeBlock); + repatchBuffer.relinkCallerToTrampoline(returnAddress, entryLabel); +} + +void JIT::patchGetByIdSelf(CodeBlock* codeBlock, StructureStubInfo* stubInfo, Structure* structure, size_t cachedOffset, ReturnAddressPtr returnAddress) +{ + RepatchBuffer repatchBuffer(codeBlock); + + // We don't want to patch more than once - in future go to cti_op_get_by_id_generic. + // Should probably go to JITStubs::cti_op_get_by_id_fail, but that doesn't do anything interesting right now. + repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_get_by_id_self_fail)); + + int offset = sizeof(JSValue) * cachedOffset; + + // If we're patching to use inline storage, convert the initial load to a lea; this avoids the extra load + // and makes the subsequent load's offset automatically correct + if (structure->isUsingInlineStorage()) + repatchBuffer.repatchLoadPtrToLEA(stubInfo->hotPathBegin.instructionAtOffset(patchOffsetGetByIdExternalLoad)); + + // Patch the offset into the propoerty map to load from, then patch the Structure to look for. + repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabelPtrAtOffset(patchOffsetGetByIdStructure), structure); + repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabel32AtOffset(patchOffsetGetByIdPropertyMapOffset1), offset); // payload + repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabel32AtOffset(patchOffsetGetByIdPropertyMapOffset2), offset + 4); // tag +} + +void JIT::patchMethodCallProto(CodeBlock* codeBlock, MethodCallLinkInfo& methodCallLinkInfo, JSFunction* callee, Structure* structure, JSObject* proto, ReturnAddressPtr returnAddress) +{ + RepatchBuffer repatchBuffer(codeBlock); + + ASSERT(!methodCallLinkInfo.cachedStructure); + methodCallLinkInfo.cachedStructure = structure; + structure->ref(); + + Structure* prototypeStructure = proto->structure(); + methodCallLinkInfo.cachedPrototypeStructure = prototypeStructure; + prototypeStructure->ref(); + + repatchBuffer.repatch(methodCallLinkInfo.structureLabel, structure); + repatchBuffer.repatch(methodCallLinkInfo.structureLabel.dataLabelPtrAtOffset(patchOffsetMethodCheckProtoObj), proto); + repatchBuffer.repatch(methodCallLinkInfo.structureLabel.dataLabelPtrAtOffset(patchOffsetMethodCheckProtoStruct), prototypeStructure); + repatchBuffer.repatch(methodCallLinkInfo.structureLabel.dataLabelPtrAtOffset(patchOffsetMethodCheckPutFunction), callee); + + repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_get_by_id)); +} + +void JIT::patchPutByIdReplace(CodeBlock* codeBlock, StructureStubInfo* stubInfo, Structure* structure, size_t cachedOffset, ReturnAddressPtr returnAddress) +{ + RepatchBuffer repatchBuffer(codeBlock); + + // We don't want to patch more than once - in future go to cti_op_put_by_id_generic. + // Should probably go to cti_op_put_by_id_fail, but that doesn't do anything interesting right now. + repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_put_by_id_generic)); + + int offset = sizeof(JSValue) * cachedOffset; + + // If we're patching to use inline storage, convert the initial load to a lea; this avoids the extra load + // and makes the subsequent load's offset automatically correct + if (structure->isUsingInlineStorage()) + repatchBuffer.repatchLoadPtrToLEA(stubInfo->hotPathBegin.instructionAtOffset(patchOffsetPutByIdExternalLoad)); + + // Patch the offset into the propoerty map to load from, then patch the Structure to look for. + repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabelPtrAtOffset(patchOffsetPutByIdStructure), structure); + repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabel32AtOffset(patchOffsetPutByIdPropertyMapOffset1), offset); // payload + repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabel32AtOffset(patchOffsetPutByIdPropertyMapOffset2), offset + 4); // tag +} + +void JIT::privateCompilePatchGetArrayLength(ReturnAddressPtr returnAddress) +{ + StructureStubInfo* stubInfo = &m_codeBlock->getStubInfo(returnAddress); + + // regT0 holds a JSCell* + + // Check for array + Jump failureCases1 = branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsArrayVPtr)); + + // Checks out okay! - get the length from the storage + loadPtr(Address(regT0, OBJECT_OFFSETOF(JSArray, m_storage)), regT2); + load32(Address(regT2, OBJECT_OFFSETOF(ArrayStorage, m_length)), regT2); + + Jump failureCases2 = branch32(Above, regT2, Imm32(INT_MAX)); + move(regT2, regT0); + move(Imm32(JSValue::Int32Tag), regT1); + Jump success = jump(); + + LinkBuffer patchBuffer(this, m_codeBlock->executablePool()); + + // Use the patch information to link the failure cases back to the original slow case routine. + CodeLocationLabel slowCaseBegin = stubInfo->callReturnLocation.labelAtOffset(-patchOffsetGetByIdSlowCaseCall); + patchBuffer.link(failureCases1, slowCaseBegin); + patchBuffer.link(failureCases2, slowCaseBegin); + + // On success return back to the hot patch code, at a point it will perform the store to dest for us. + patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult)); + + // Track the stub we have created so that it will be deleted later. + CodeLocationLabel entryLabel = patchBuffer.finalizeCodeAddendum(); + stubInfo->stubRoutine = entryLabel; + + // Finally patch the jump to slow case back in the hot path to jump here instead. + CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase); + RepatchBuffer repatchBuffer(m_codeBlock); + repatchBuffer.relink(jumpLocation, entryLabel); + + // We don't want to patch more than once - in future go to cti_op_put_by_id_generic. + repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_get_by_id_array_fail)); +} + +void JIT::privateCompileGetByIdProto(StructureStubInfo* stubInfo, Structure* structure, Structure* prototypeStructure, size_t cachedOffset, ReturnAddressPtr returnAddress, CallFrame* callFrame) +{ + // regT0 holds a JSCell* + + // The prototype object definitely exists (if this stub exists the CodeBlock is referencing a Structure that is + // referencing the prototype object - let's speculatively load it's table nice and early!) + JSObject* protoObject = asObject(structure->prototypeForLookup(callFrame)); + + Jump failureCases1 = checkStructure(regT0, structure); + + // Check the prototype object's Structure had not changed. + Structure** prototypeStructureAddress = &(protoObject->m_structure); +#if CPU(X86_64) + move(ImmPtr(prototypeStructure), regT3); + Jump failureCases2 = branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), regT3); +#else + Jump failureCases2 = branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), ImmPtr(prototypeStructure)); +#endif + + // Checks out okay! - getDirectOffset + compileGetDirectOffset(protoObject, regT2, regT1, regT0, cachedOffset); + + Jump success = jump(); + + LinkBuffer patchBuffer(this, m_codeBlock->executablePool()); + + // Use the patch information to link the failure cases back to the original slow case routine. + CodeLocationLabel slowCaseBegin = stubInfo->callReturnLocation.labelAtOffset(-patchOffsetGetByIdSlowCaseCall); + patchBuffer.link(failureCases1, slowCaseBegin); + patchBuffer.link(failureCases2, slowCaseBegin); + + // On success return back to the hot patch code, at a point it will perform the store to dest for us. + patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult)); + + // Track the stub we have created so that it will be deleted later. + CodeLocationLabel entryLabel = patchBuffer.finalizeCodeAddendum(); + stubInfo->stubRoutine = entryLabel; + + // Finally patch the jump to slow case back in the hot path to jump here instead. + CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase); + RepatchBuffer repatchBuffer(m_codeBlock); + repatchBuffer.relink(jumpLocation, entryLabel); + + // We don't want to patch more than once - in future go to cti_op_put_by_id_generic. + repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_get_by_id_proto_list)); +} + + +void JIT::privateCompileGetByIdSelfList(StructureStubInfo* stubInfo, PolymorphicAccessStructureList* polymorphicStructures, int currentIndex, Structure* structure, size_t cachedOffset) +{ + // regT0 holds a JSCell* + + Jump failureCase = checkStructure(regT0, structure); + compileGetDirectOffset(regT0, regT1, regT0, structure, cachedOffset); + Jump success = jump(); + + LinkBuffer patchBuffer(this, m_codeBlock->executablePool()); + + // Use the patch information to link the failure cases back to the original slow case routine. + CodeLocationLabel lastProtoBegin = polymorphicStructures->list[currentIndex - 1].stubRoutine; + if (!lastProtoBegin) + lastProtoBegin = stubInfo->callReturnLocation.labelAtOffset(-patchOffsetGetByIdSlowCaseCall); + + patchBuffer.link(failureCase, lastProtoBegin); + + // On success return back to the hot patch code, at a point it will perform the store to dest for us. + patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult)); + + CodeLocationLabel entryLabel = patchBuffer.finalizeCodeAddendum(); + + structure->ref(); + polymorphicStructures->list[currentIndex].set(entryLabel, structure); + + // Finally patch the jump to slow case back in the hot path to jump here instead. + CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase); + RepatchBuffer repatchBuffer(m_codeBlock); + repatchBuffer.relink(jumpLocation, entryLabel); +} + +void JIT::privateCompileGetByIdProtoList(StructureStubInfo* stubInfo, PolymorphicAccessStructureList* prototypeStructures, int currentIndex, Structure* structure, Structure* prototypeStructure, size_t cachedOffset, CallFrame* callFrame) +{ + // regT0 holds a JSCell* + + // The prototype object definitely exists (if this stub exists the CodeBlock is referencing a Structure that is + // referencing the prototype object - let's speculatively load it's table nice and early!) + JSObject* protoObject = asObject(structure->prototypeForLookup(callFrame)); + + // Check eax is an object of the right Structure. + Jump failureCases1 = checkStructure(regT0, structure); + + // Check the prototype object's Structure had not changed. + Structure** prototypeStructureAddress = &(protoObject->m_structure); +#if CPU(X86_64) + move(ImmPtr(prototypeStructure), regT3); + Jump failureCases2 = branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), regT3); +#else + Jump failureCases2 = branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), ImmPtr(prototypeStructure)); +#endif + + compileGetDirectOffset(protoObject, regT2, regT1, regT0, cachedOffset); + + Jump success = jump(); + + LinkBuffer patchBuffer(this, m_codeBlock->executablePool()); + + // Use the patch information to link the failure cases back to the original slow case routine. + CodeLocationLabel lastProtoBegin = prototypeStructures->list[currentIndex - 1].stubRoutine; + patchBuffer.link(failureCases1, lastProtoBegin); + patchBuffer.link(failureCases2, lastProtoBegin); + + // On success return back to the hot patch code, at a point it will perform the store to dest for us. + patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult)); + + CodeLocationLabel entryLabel = patchBuffer.finalizeCodeAddendum(); + + structure->ref(); + prototypeStructure->ref(); + prototypeStructures->list[currentIndex].set(entryLabel, structure, prototypeStructure); + + // Finally patch the jump to slow case back in the hot path to jump here instead. + CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase); + RepatchBuffer repatchBuffer(m_codeBlock); + repatchBuffer.relink(jumpLocation, entryLabel); +} + +void JIT::privateCompileGetByIdChainList(StructureStubInfo* stubInfo, PolymorphicAccessStructureList* prototypeStructures, int currentIndex, Structure* structure, StructureChain* chain, size_t count, size_t cachedOffset, CallFrame* callFrame) +{ + // regT0 holds a JSCell* + + ASSERT(count); + + JumpList bucketsOfFail; + + // Check eax is an object of the right Structure. + bucketsOfFail.append(checkStructure(regT0, structure)); + + Structure* currStructure = structure; + RefPtr* chainEntries = chain->head(); + JSObject* protoObject = 0; + for (unsigned i = 0; i < count; ++i) { + protoObject = asObject(currStructure->prototypeForLookup(callFrame)); + currStructure = chainEntries[i].get(); + + // Check the prototype object's Structure had not changed. + Structure** prototypeStructureAddress = &(protoObject->m_structure); +#if CPU(X86_64) + move(ImmPtr(currStructure), regT3); + bucketsOfFail.append(branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), regT3)); +#else + bucketsOfFail.append(branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), ImmPtr(currStructure))); +#endif + } + ASSERT(protoObject); + + compileGetDirectOffset(protoObject, regT2, regT1, regT0, cachedOffset); + Jump success = jump(); + + LinkBuffer patchBuffer(this, m_codeBlock->executablePool()); + + // Use the patch information to link the failure cases back to the original slow case routine. + CodeLocationLabel lastProtoBegin = prototypeStructures->list[currentIndex - 1].stubRoutine; + + patchBuffer.link(bucketsOfFail, lastProtoBegin); + + // On success return back to the hot patch code, at a point it will perform the store to dest for us. + patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult)); + + CodeLocationLabel entryLabel = patchBuffer.finalizeCodeAddendum(); + + // Track the stub we have created so that it will be deleted later. + structure->ref(); + chain->ref(); + prototypeStructures->list[currentIndex].set(entryLabel, structure, chain); + + // Finally patch the jump to slow case back in the hot path to jump here instead. + CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase); + RepatchBuffer repatchBuffer(m_codeBlock); + repatchBuffer.relink(jumpLocation, entryLabel); +} + +void JIT::privateCompileGetByIdChain(StructureStubInfo* stubInfo, Structure* structure, StructureChain* chain, size_t count, size_t cachedOffset, ReturnAddressPtr returnAddress, CallFrame* callFrame) +{ + // regT0 holds a JSCell* + + ASSERT(count); + + JumpList bucketsOfFail; + + // Check eax is an object of the right Structure. + bucketsOfFail.append(checkStructure(regT0, structure)); + + Structure* currStructure = structure; + RefPtr* chainEntries = chain->head(); + JSObject* protoObject = 0; + for (unsigned i = 0; i < count; ++i) { + protoObject = asObject(currStructure->prototypeForLookup(callFrame)); + currStructure = chainEntries[i].get(); + + // Check the prototype object's Structure had not changed. + Structure** prototypeStructureAddress = &(protoObject->m_structure); +#if CPU(X86_64) + move(ImmPtr(currStructure), regT3); + bucketsOfFail.append(branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), regT3)); +#else + bucketsOfFail.append(branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), ImmPtr(currStructure))); +#endif + } + ASSERT(protoObject); + + compileGetDirectOffset(protoObject, regT2, regT1, regT0, cachedOffset); + Jump success = jump(); + + LinkBuffer patchBuffer(this, m_codeBlock->executablePool()); + + // Use the patch information to link the failure cases back to the original slow case routine. + patchBuffer.link(bucketsOfFail, stubInfo->callReturnLocation.labelAtOffset(-patchOffsetGetByIdSlowCaseCall)); + + // On success return back to the hot patch code, at a point it will perform the store to dest for us. + patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult)); + + // Track the stub we have created so that it will be deleted later. + CodeLocationLabel entryLabel = patchBuffer.finalizeCodeAddendum(); + stubInfo->stubRoutine = entryLabel; + + // Finally patch the jump to slow case back in the hot path to jump here instead. + CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase); + RepatchBuffer repatchBuffer(m_codeBlock); + repatchBuffer.relink(jumpLocation, entryLabel); + + // We don't want to patch more than once - in future go to cti_op_put_by_id_generic. + repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_get_by_id_proto_list)); +} + +/* ------------------------------ END: !ENABLE / ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS) ------------------------------ */ + +#endif // !ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS) + +void JIT::compileGetDirectOffset(RegisterID base, RegisterID resultTag, RegisterID resultPayload, RegisterID structure, RegisterID offset) +{ + ASSERT(sizeof(((Structure*)0)->m_propertyStorageCapacity) == sizeof(int32_t)); + ASSERT(sizeof(JSObject::inlineStorageCapacity) == sizeof(int32_t)); + ASSERT(sizeof(JSValue) == 8); + + Jump notUsingInlineStorage = branch32(NotEqual, Address(structure, OBJECT_OFFSETOF(Structure, m_propertyStorageCapacity)), Imm32(JSObject::inlineStorageCapacity)); + loadPtr(BaseIndex(base, offset, TimesEight, OBJECT_OFFSETOF(JSObject, m_inlineStorage)+OBJECT_OFFSETOF(JSValue, u.asBits.payload)), resultPayload); + loadPtr(BaseIndex(base, offset, TimesEight, OBJECT_OFFSETOF(JSObject, m_inlineStorage)+OBJECT_OFFSETOF(JSValue, u.asBits.tag)), resultTag); + Jump finishedLoad = jump(); + notUsingInlineStorage.link(this); + loadPtr(Address(base, OBJECT_OFFSETOF(JSObject, m_externalStorage)), base); + loadPtr(BaseIndex(base, offset, TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.payload)), resultPayload); + loadPtr(BaseIndex(base, offset, TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.tag)), resultTag); + finishedLoad.link(this); +} + +void JIT::emit_op_get_by_pname(Instruction* currentInstruction) +{ + unsigned dst = currentInstruction[1].u.operand; + unsigned base = currentInstruction[2].u.operand; + unsigned property = currentInstruction[3].u.operand; + unsigned expected = currentInstruction[4].u.operand; + unsigned iter = currentInstruction[5].u.operand; + unsigned i = currentInstruction[6].u.operand; + + emitLoad2(property, regT1, regT0, base, regT3, regT2); + emitJumpSlowCaseIfNotJSCell(property, regT1); + addSlowCase(branchPtr(NotEqual, regT0, payloadFor(expected))); + // Property registers are now available as the property is known + emitJumpSlowCaseIfNotJSCell(base, regT3); + emitLoadPayload(iter, regT1); + + // Test base's structure + loadPtr(Address(regT2, OBJECT_OFFSETOF(JSCell, m_structure)), regT0); + addSlowCase(branchPtr(NotEqual, regT0, Address(regT1, OBJECT_OFFSETOF(JSPropertyNameIterator, m_cachedStructure)))); + load32(addressFor(i), regT3); + sub32(Imm32(1), regT3); + addSlowCase(branch32(AboveOrEqual, regT3, Address(regT1, OBJECT_OFFSETOF(JSPropertyNameIterator, m_numCacheableSlots)))); + compileGetDirectOffset(regT2, regT1, regT0, regT0, regT3); + + emitStore(dst, regT1, regT0); + map(m_bytecodeIndex + OPCODE_LENGTH(op_get_by_pname), dst, regT1, regT0); +} + +void JIT::emitSlow_op_get_by_pname(Instruction* currentInstruction, Vector::iterator& iter) +{ + unsigned dst = currentInstruction[1].u.operand; + unsigned base = currentInstruction[2].u.operand; + unsigned property = currentInstruction[3].u.operand; + + linkSlowCaseIfNotJSCell(iter, property); + linkSlowCase(iter); + linkSlowCaseIfNotJSCell(iter, base); + linkSlowCase(iter); + linkSlowCase(iter); + + JITStubCall stubCall(this, cti_op_get_by_val); + stubCall.addArgument(base); + stubCall.addArgument(property); + stubCall.call(dst); +} + +} // namespace JSC + +#endif // ENABLE(JIT) + +#endif // ENABLE(JSVALUE32_64) + diff --git a/JavaScriptCore/jit/JITStubs.cpp b/JavaScriptCore/jit/JITStubs.cpp index 85471de..bf430a6 100644 --- a/JavaScriptCore/jit/JITStubs.cpp +++ b/JavaScriptCore/jit/JITStubs.cpp @@ -600,7 +600,7 @@ SYMBOL_STRING(ctiOpThrowNotCaught) ":" "\n" "ldr r5, [sp, #0x28]" "\n" "ldr r4, [sp, #0x24]" "\n" "ldr lr, [sp, #0x20]" "\n" - "add sp, sp, #0x3c" "\n" + "add sp, sp, #0x40" "\n" "bx lr" "\n" ); @@ -856,10 +856,11 @@ NEVER_INLINE void JITThunks::tryCacheGetByID(CallFrame* callFrame, CodeBlock* co } // Uncacheable: give up. - if (!slot.isCacheable()) { + if (!slot.isCacheableValue()) { ctiPatchCallByReturnAddress(codeBlock, returnAddress, FunctionPtr(cti_op_get_by_id_generic)); return; } + ASSERT(!slot.isGetter()); JSCell* baseCell = asCell(baseValue); Structure* structure = baseCell->structure(); @@ -1070,9 +1071,9 @@ RVCT(__asm #rtype# cti_#op#(STUB_ARGS_DECLARATION)) RVCT({) RVCT( ARM) RVCT( IMPORT JITStubThunked_#op#) -RVCT( str lr, [sp, #32]) +RVCT( str lr, [sp, ##offset#]) RVCT( bl JITStubThunked_#op#) -RVCT( ldr lr, [sp, #32]) +RVCT( ldr lr, [sp, ##offset#]) RVCT( bx lr) RVCT(}) RVCT() @@ -1289,7 +1290,7 @@ DEFINE_STUB_FUNCTION(EncodedJSValue, op_get_by_id_method_check) // If we successfully got something, then the base from which it is being accessed must // be an object. (Assertion to ensure asObject() call below is safe, which comes after // an isCacheable() chceck. - ASSERT(!slot.isCacheable() || slot.slotBase().isObject()); + ASSERT(!slot.isCacheableValue() || slot.slotBase().isObject()); // Check that: // * We're dealing with a JSCell, @@ -1300,7 +1301,7 @@ DEFINE_STUB_FUNCTION(EncodedJSValue, op_get_by_id_method_check) JSCell* specific; JSObject* slotBaseObject; if (baseValue.isCell() - && slot.isCacheable() + && slot.isCacheableValue() && !(structure = asCell(baseValue)->structure())->isUncacheableDictionary() && (slotBaseObject = asObject(slot.slotBase()))->getPropertySpecificValue(callFrame, ident, specific) && specific @@ -1374,7 +1375,7 @@ DEFINE_STUB_FUNCTION(EncodedJSValue, op_get_by_id_self_fail) CHECK_FOR_EXCEPTION(); if (baseValue.isCell() - && slot.isCacheable() + && slot.isCacheableValue() && !asCell(baseValue)->structure()->isUncacheableDictionary() && slot.slotBase() == baseValue) { @@ -1447,7 +1448,7 @@ DEFINE_STUB_FUNCTION(EncodedJSValue, op_get_by_id_proto_list) CHECK_FOR_EXCEPTION(); - if (!baseValue.isCell() || !slot.isCacheable() || asCell(baseValue)->structure()->isDictionary()) { + if (!baseValue.isCell() || !slot.isCacheableValue() || asCell(baseValue)->structure()->isDictionary()) { ctiPatchCallByReturnAddress(callFrame->codeBlock(), STUB_RETURN_ADDRESS, FunctionPtr(cti_op_get_by_id_proto_fail)); return JSValue::encode(result); } @@ -2303,7 +2304,7 @@ DEFINE_STUB_FUNCTION(EncodedJSValue, op_resolve_global) PropertySlot slot(globalObject); if (globalObject->getPropertySlot(callFrame, ident, slot)) { JSValue result = slot.getValue(callFrame, ident); - if (slot.isCacheable() && !globalObject->structure()->isUncacheableDictionary() && slot.slotBase() == globalObject) { + if (slot.isCacheableValue() && !globalObject->structure()->isUncacheableDictionary() && slot.slotBase() == globalObject) { GlobalResolveInfo& globalResolveInfo = callFrame->codeBlock()->globalResolveInfo(globalResolveInfoIndex); if (globalResolveInfo.structure) globalResolveInfo.structure->deref(); @@ -3023,7 +3024,7 @@ DEFINE_STUB_FUNCTION(void*, op_switch_char) if (scrutinee.isString()) { UString::Rep* value = asString(scrutinee)->value(callFrame).rep(); - if (value->size() == 1) + if (value->length() == 1) result = codeBlock->characterSwitchJumpTable(tableIndex).ctiForValue(value->data()[0]).executableAddress(); } -- cgit v1.1