diff options
author | Feng Qian <> | 2009-04-10 18:11:29 -0700 |
---|---|---|
committer | The Android Open Source Project <initial-contribution@android.com> | 2009-04-10 18:11:29 -0700 |
commit | 8f72e70a9fd78eec56623b3a62e68f16b7b27e28 (patch) | |
tree | 181bf9a400c30a1bf34ea6d72560e8d00111d549 /JavaScriptCore/jit/JITPropertyAccess.cpp | |
parent | 7ed56f225e0ade046e1c2178977f72b2d896f196 (diff) | |
download | external_webkit-8f72e70a9fd78eec56623b3a62e68f16b7b27e28.zip external_webkit-8f72e70a9fd78eec56623b3a62e68f16b7b27e28.tar.gz external_webkit-8f72e70a9fd78eec56623b3a62e68f16b7b27e28.tar.bz2 |
AI 145796: Land the WebKit merge @r42026.
Automated import of CL 145796
Diffstat (limited to 'JavaScriptCore/jit/JITPropertyAccess.cpp')
-rw-r--r-- | JavaScriptCore/jit/JITPropertyAccess.cpp | 373 |
1 files changed, 186 insertions, 187 deletions
diff --git a/JavaScriptCore/jit/JITPropertyAccess.cpp b/JavaScriptCore/jit/JITPropertyAccess.cpp index 6740bec..ce90ee4 100644 --- a/JavaScriptCore/jit/JITPropertyAccess.cpp +++ b/JavaScriptCore/jit/JITPropertyAccess.cpp @@ -53,11 +53,11 @@ void JIT::compileGetByIdHotPath(int resultVReg, int baseVReg, Identifier* ident, // to array-length / prototype access tranpolines, and finally we also the the property-map access offset as a label // to jump back to if one of these trampolies finds a match. - emitGetVirtualRegister(baseVReg, X86::eax); + emitGetVirtualRegister(baseVReg, regT0); - emitPutJITStubArg(X86::eax, 1); + emitPutJITStubArg(regT0, 1); emitPutJITStubArgConstant(ident, 2); - emitCTICall(Interpreter::cti_op_get_by_id_generic); + emitCTICall(JITStubs::cti_op_get_by_id_generic); emitPutVirtualRegister(resultVReg); } @@ -73,12 +73,12 @@ void JIT::compilePutByIdHotPath(int baseVReg, Identifier* ident, int valueVReg, // to just after the arguments have been loaded into registers 'hotPathBegin', and we generate code // such that the Structure & offset are always at the same distance from this. - emitGetVirtualRegisters(baseVReg, X86::eax, valueVReg, X86::edx); + emitGetVirtualRegisters(baseVReg, regT0, valueVReg, regT1); emitPutJITStubArgConstant(ident, 2); - emitPutJITStubArg(X86::eax, 1); - emitPutJITStubArg(X86::edx, 3); - emitCTICall(Interpreter::cti_op_put_by_id_generic); + emitPutJITStubArg(regT0, 1); + emitPutJITStubArg(regT1, 3); + emitCTICall(JITStubs::cti_op_put_by_id_generic); } void JIT::compilePutByIdSlowCase(int, Identifier*, int, Vector<SlowCaseEntry>::iterator&, unsigned) @@ -95,21 +95,21 @@ void JIT::compileGetByIdHotPath(int resultVReg, int baseVReg, Identifier*, unsig // to array-length / prototype access tranpolines, and finally we also the the property-map access offset as a label // to jump back to if one of these trampolies finds a match. - emitGetVirtualRegister(baseVReg, X86::eax); + emitGetVirtualRegister(baseVReg, regT0); - emitJumpSlowCaseIfNotJSCell(X86::eax, baseVReg); + emitJumpSlowCaseIfNotJSCell(regT0, baseVReg); Label hotPathBegin(this); m_propertyAccessCompilationInfo[propertyAccessInstructionIndex].hotPathBegin = hotPathBegin; DataLabelPtr structureToCompare; - Jump structureCheck = jnePtrWithPatch(Address(X86::eax, FIELD_OFFSET(JSCell, m_structure)), structureToCompare, ImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure))); + Jump structureCheck = branchPtrWithPatch(NotEqual, Address(regT0, FIELD_OFFSET(JSCell, m_structure)), structureToCompare, ImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure))); addSlowCase(structureCheck); ASSERT(differenceBetween(hotPathBegin, structureToCompare) == patchOffsetGetByIdStructure); ASSERT(differenceBetween(hotPathBegin, structureCheck) == patchOffsetGetByIdBranchToSlowCase); - loadPtr(Address(X86::eax, FIELD_OFFSET(JSObject, m_propertyStorage)), X86::eax); - DataLabel32 displacementLabel = loadPtrWithAddressOffsetPatch(Address(X86::eax, patchGetByIdDefaultOffset), X86::eax); + loadPtr(Address(regT0, FIELD_OFFSET(JSObject, m_propertyStorage)), regT0); + DataLabel32 displacementLabel = loadPtrWithAddressOffsetPatch(Address(regT0, patchGetByIdDefaultOffset), regT0); ASSERT(differenceBetween(hotPathBegin, displacementLabel) == patchOffsetGetByIdPropertyMapOffset); Label putResult(this); @@ -132,9 +132,9 @@ void JIT::compileGetByIdSlowCase(int resultVReg, int baseVReg, Identifier* ident #ifndef NDEBUG Label coldPathBegin(this); #endif - emitPutJITStubArg(X86::eax, 1); + emitPutJITStubArg(regT0, 1); emitPutJITStubArgConstant(ident, 2); - Jump call = emitCTICall(Interpreter::cti_op_get_by_id); + Call call = emitCTICall(JITStubs::cti_op_get_by_id); emitPutVirtualRegister(resultVReg); ASSERT(differenceBetween(coldPathBegin, call) == patchOffsetGetByIdSlowCaseCall); @@ -149,22 +149,22 @@ void JIT::compilePutByIdHotPath(int baseVReg, Identifier*, int valueVReg, unsign // to just after the arguments have been loaded into registers 'hotPathBegin', and we generate code // such that the Structure & offset are always at the same distance from this. - emitGetVirtualRegisters(baseVReg, X86::eax, valueVReg, X86::edx); + emitGetVirtualRegisters(baseVReg, regT0, valueVReg, regT1); // Jump to a slow case if either the base object is an immediate, or if the Structure does not match. - emitJumpSlowCaseIfNotJSCell(X86::eax, baseVReg); + emitJumpSlowCaseIfNotJSCell(regT0, baseVReg); Label hotPathBegin(this); m_propertyAccessCompilationInfo[propertyAccessInstructionIndex].hotPathBegin = hotPathBegin; // It is important that the following instruction plants a 32bit immediate, in order that it can be patched over. DataLabelPtr structureToCompare; - addSlowCase(jnePtrWithPatch(Address(X86::eax, FIELD_OFFSET(JSCell, m_structure)), structureToCompare, ImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure)))); + addSlowCase(branchPtrWithPatch(NotEqual, Address(regT0, FIELD_OFFSET(JSCell, m_structure)), structureToCompare, ImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure)))); ASSERT(differenceBetween(hotPathBegin, structureToCompare) == patchOffsetPutByIdStructure); // Plant a load from a bogus ofset in the object's property map; we will patch this later, if it is to be used. - loadPtr(Address(X86::eax, FIELD_OFFSET(JSObject, m_propertyStorage)), X86::eax); - DataLabel32 displacementLabel = storePtrWithAddressOffsetPatch(X86::edx, Address(X86::eax, patchGetByIdDefaultOffset)); + loadPtr(Address(regT0, FIELD_OFFSET(JSObject, m_propertyStorage)), regT0); + DataLabel32 displacementLabel = storePtrWithAddressOffsetPatch(regT1, Address(regT0, patchGetByIdDefaultOffset)); ASSERT(differenceBetween(hotPathBegin, displacementLabel) == patchOffsetPutByIdPropertyMapOffset); } @@ -174,9 +174,9 @@ void JIT::compilePutByIdSlowCase(int baseVReg, Identifier* ident, int, Vector<Sl linkSlowCase(iter); emitPutJITStubArgConstant(ident, 2); - emitPutJITStubArg(X86::eax, 1); - emitPutJITStubArg(X86::edx, 3); - Jump call = emitCTICall(Interpreter::cti_op_put_by_id); + emitPutJITStubArg(regT0, 1); + emitPutJITStubArg(regT1, 3); + Call call = emitCTICall(JITStubs::cti_op_put_by_id); // Track the location of the call; this will be used to recover patch information. m_propertyAccessCompilationInfo[propertyAccessInstructionIndex].callReturnLocation = call; @@ -193,54 +193,55 @@ static inline bool transitionWillNeedStorageRealloc(Structure* oldStructure, Str return oldStructure->propertyStorageCapacity() != newStructure->propertyStorageCapacity(); } -void JIT::privateCompilePutByIdTransition(StructureStubInfo* stubInfo, Structure* oldStructure, Structure* newStructure, size_t cachedOffset, StructureChain* chain, void* returnAddress) +void JIT::privateCompilePutByIdTransition(StructureStubInfo* stubInfo, Structure* oldStructure, Structure* newStructure, size_t cachedOffset, StructureChain* chain, ProcessorReturnAddress returnAddress) { JumpList failureCases; // Check eax is an object of the right Structure. - failureCases.append(emitJumpIfNotJSCell(X86::eax)); - failureCases.append(jnePtr(Address(X86::eax, FIELD_OFFSET(JSCell, m_structure)), ImmPtr(oldStructure))); + failureCases.append(emitJumpIfNotJSCell(regT0)); + failureCases.append(branchPtr(NotEqual, Address(regT0, FIELD_OFFSET(JSCell, m_structure)), ImmPtr(oldStructure))); JumpList successCases; // ecx = baseObject - loadPtr(Address(X86::eax, FIELD_OFFSET(JSCell, m_structure)), X86::ecx); + loadPtr(Address(regT0, FIELD_OFFSET(JSCell, m_structure)), regT2); // proto(ecx) = baseObject->structure()->prototype() - failureCases.append(jne32(Address(X86::ecx, FIELD_OFFSET(Structure, m_typeInfo) + FIELD_OFFSET(TypeInfo, m_type)), Imm32(ObjectType))); + failureCases.append(branch32(NotEqual, Address(regT2, FIELD_OFFSET(Structure, m_typeInfo) + FIELD_OFFSET(TypeInfo, m_type)), Imm32(ObjectType))); - loadPtr(Address(X86::ecx, FIELD_OFFSET(Structure, m_prototype)), X86::ecx); + loadPtr(Address(regT2, FIELD_OFFSET(Structure, m_prototype)), regT2); // ecx = baseObject->m_structure for (RefPtr<Structure>* it = chain->head(); *it; ++it) { // null check the prototype - successCases.append(jePtr(X86::ecx, ImmPtr(JSValuePtr::encode(jsNull())))); + successCases.append(branchPtr(Equal, regT2, ImmPtr(JSValuePtr::encode(jsNull())))); // Check the structure id - failureCases.append(jnePtr(Address(X86::ecx, FIELD_OFFSET(JSCell, m_structure)), ImmPtr(it->get()))); + failureCases.append(branchPtr(NotEqual, Address(regT2, FIELD_OFFSET(JSCell, m_structure)), ImmPtr(it->get()))); - loadPtr(Address(X86::ecx, FIELD_OFFSET(JSCell, m_structure)), X86::ecx); - failureCases.append(jne32(Address(X86::ecx, FIELD_OFFSET(Structure, m_typeInfo) + FIELD_OFFSET(TypeInfo, m_type)), Imm32(ObjectType))); - loadPtr(Address(X86::ecx, FIELD_OFFSET(Structure, m_prototype)), X86::ecx); + loadPtr(Address(regT2, FIELD_OFFSET(JSCell, m_structure)), regT2); + failureCases.append(branch32(NotEqual, Address(regT2, FIELD_OFFSET(Structure, m_typeInfo) + FIELD_OFFSET(TypeInfo, m_type)), Imm32(ObjectType))); + loadPtr(Address(regT2, FIELD_OFFSET(Structure, m_prototype)), regT2); } successCases.link(this); - Jump callTarget; + Call callTarget; // emit a call only if storage realloc is needed - if (transitionWillNeedStorageRealloc(oldStructure, newStructure)) { + bool willNeedStorageRealloc = transitionWillNeedStorageRealloc(oldStructure, newStructure); + if (willNeedStorageRealloc) { pop(X86::ebx); #if PLATFORM(X86_64) - move(Imm32(newStructure->propertyStorageCapacity()), X86::edx); + move(Imm32(newStructure->propertyStorageCapacity()), regT1); move(Imm32(oldStructure->propertyStorageCapacity()), X86::esi); - move(X86::eax, X86::edi); + move(regT0, X86::edi); callTarget = call(); #else push(Imm32(newStructure->propertyStorageCapacity())); push(Imm32(oldStructure->propertyStorageCapacity())); - push(X86::eax); + push(regT0); callTarget = call(); addPtr(Imm32(3 * sizeof(void*)), X86::esp); #endif - emitGetJITStubArg(3, X86::edx); + emitGetJITStubArg(3, regT1); push(X86::ebx); } @@ -248,150 +249,144 @@ void JIT::privateCompilePutByIdTransition(StructureStubInfo* stubInfo, Structure // codeblock should ensure oldStructure->m_refCount > 0 sub32(Imm32(1), AbsoluteAddress(oldStructure->addressOfCount())); add32(Imm32(1), AbsoluteAddress(newStructure->addressOfCount())); - storePtr(ImmPtr(newStructure), Address(X86::eax, FIELD_OFFSET(JSCell, m_structure))); + storePtr(ImmPtr(newStructure), Address(regT0, FIELD_OFFSET(JSCell, m_structure))); // write the value - loadPtr(Address(X86::eax, FIELD_OFFSET(JSObject, m_propertyStorage)), X86::eax); - storePtr(X86::edx, Address(X86::eax, cachedOffset * sizeof(JSValuePtr))); + loadPtr(Address(regT0, FIELD_OFFSET(JSObject, m_propertyStorage)), regT0); + storePtr(regT1, Address(regT0, cachedOffset * sizeof(JSValuePtr))); ret(); - Jump failureJump; - bool plantedFailureJump = false; - if (!failureCases.empty()) { - failureCases.link(this); - restoreArgumentReferenceForTrampoline(); - failureJump = jump(); - plantedFailureJump = true; - } + ASSERT(!failureCases.empty()); + failureCases.link(this); + restoreArgumentReferenceForTrampoline(); + Call failureCall = tailRecursiveCall(); void* code = m_assembler.executableCopy(m_codeBlock->executablePool()); PatchBuffer patchBuffer(code); - if (plantedFailureJump) - patchBuffer.link(failureJump, reinterpret_cast<void*>(Interpreter::cti_op_put_by_id_fail)); + patchBuffer.link(failureCall, JITStubs::cti_op_put_by_id_fail); - if (transitionWillNeedStorageRealloc(oldStructure, newStructure)) - patchBuffer.link(callTarget, reinterpret_cast<void*>(resizePropertyStorage)); - - stubInfo->stubRoutine = code; + if (willNeedStorageRealloc) + patchBuffer.link(callTarget, resizePropertyStorage); - Jump::patch(returnAddress, code); + stubInfo->stubRoutine = patchBuffer.entry(); + + returnAddress.relinkCallerToFunction(code); } -void JIT::patchGetByIdSelf(StructureStubInfo* stubInfo, Structure* structure, size_t cachedOffset, void* returnAddress) +void JIT::patchGetByIdSelf(StructureStubInfo* stubInfo, Structure* structure, size_t cachedOffset, ProcessorReturnAddress returnAddress) { // We don't want to patch more than once - in future go to cti_op_get_by_id_generic. - // Should probably go to Interpreter::cti_op_get_by_id_fail, but that doesn't do anything interesting right now. - Jump::patch(returnAddress, reinterpret_cast<void*>(Interpreter::cti_op_get_by_id_self_fail)); + // Should probably go to JITStubs::cti_op_get_by_id_fail, but that doesn't do anything interesting right now. + returnAddress.relinkCallerToFunction(JITStubs::cti_op_get_by_id_self_fail); // Patch the offset into the propoerty map to load from, then patch the Structure to look for. - void* structureAddress = reinterpret_cast<void*>(reinterpret_cast<intptr_t>(stubInfo->hotPathBegin) + patchOffsetGetByIdStructure); - void* displacementAddress = reinterpret_cast<void*>(reinterpret_cast<intptr_t>(stubInfo->hotPathBegin) + patchOffsetGetByIdPropertyMapOffset); - DataLabelPtr::patch(structureAddress, structure); - DataLabel32::patch(displacementAddress, cachedOffset * sizeof(JSValuePtr)); + stubInfo->hotPathBegin.dataLabelPtrAtOffset(patchOffsetGetByIdStructure).repatch(structure); + stubInfo->hotPathBegin.dataLabel32AtOffset(patchOffsetGetByIdPropertyMapOffset).repatch(cachedOffset * sizeof(JSValuePtr)); } -void JIT::patchPutByIdReplace(StructureStubInfo* stubInfo, Structure* structure, size_t cachedOffset, void* returnAddress) +void JIT::patchPutByIdReplace(StructureStubInfo* stubInfo, Structure* structure, size_t cachedOffset, ProcessorReturnAddress returnAddress) { // We don't want to patch more than once - in future go to cti_op_put_by_id_generic. - // Should probably go to Interpreter::cti_op_put_by_id_fail, but that doesn't do anything interesting right now. - Jump::patch(returnAddress, reinterpret_cast<void*>(Interpreter::cti_op_put_by_id_generic)); + // Should probably go to JITStubs::cti_op_put_by_id_fail, but that doesn't do anything interesting right now. + returnAddress.relinkCallerToFunction(JITStubs::cti_op_put_by_id_generic); // Patch the offset into the propoerty map to load from, then patch the Structure to look for. - void* structureAddress = reinterpret_cast<char*>(stubInfo->hotPathBegin) + patchOffsetPutByIdStructure; - void* displacementAddress = reinterpret_cast<char*>(stubInfo->hotPathBegin) + patchOffsetPutByIdPropertyMapOffset; - DataLabelPtr::patch(structureAddress, structure); - DataLabel32::patch(displacementAddress, cachedOffset * sizeof(JSValuePtr)); + stubInfo->hotPathBegin.dataLabelPtrAtOffset(patchOffsetPutByIdStructure).repatch(structure); + stubInfo->hotPathBegin.dataLabel32AtOffset(patchOffsetPutByIdPropertyMapOffset).repatch(cachedOffset * sizeof(JSValuePtr)); } -void JIT::privateCompilePatchGetArrayLength(void* returnAddress) +void JIT::privateCompilePatchGetArrayLength(ProcessorReturnAddress returnAddress) { StructureStubInfo* stubInfo = &m_codeBlock->getStubInfo(returnAddress); // We don't want to patch more than once - in future go to cti_op_put_by_id_generic. - Jump::patch(returnAddress, reinterpret_cast<void*>(Interpreter::cti_op_get_by_id_array_fail)); + returnAddress.relinkCallerToFunction(JITStubs::cti_op_get_by_id_array_fail); // Check eax is an array - Jump failureCases1 = jnePtr(Address(X86::eax), ImmPtr(m_interpreter->m_jsArrayVptr)); + Jump failureCases1 = branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsArrayVPtr)); // Checks out okay! - get the length from the storage - loadPtr(Address(X86::eax, FIELD_OFFSET(JSArray, m_storage)), X86::ecx); - load32(Address(X86::ecx, FIELD_OFFSET(ArrayStorage, m_length)), X86::ecx); + loadPtr(Address(regT0, FIELD_OFFSET(JSArray, m_storage)), regT2); + load32(Address(regT2, FIELD_OFFSET(ArrayStorage, m_length)), regT2); - Jump failureCases2 = ja32(X86::ecx, Imm32(JSImmediate::maxImmediateInt)); + Jump failureCases2 = branch32(Above, regT2, Imm32(JSImmediate::maxImmediateInt)); - emitFastArithIntToImmNoCheck(X86::ecx, X86::eax); + emitFastArithIntToImmNoCheck(regT2, regT0); Jump success = jump(); void* code = m_assembler.executableCopy(m_codeBlock->executablePool()); PatchBuffer patchBuffer(code); // Use the patch information to link the failure cases back to the original slow case routine. - void* slowCaseBegin = reinterpret_cast<char*>(stubInfo->callReturnLocation) - patchOffsetGetByIdSlowCaseCall; + CodeLocationLabel slowCaseBegin = stubInfo->callReturnLocation.labelAtOffset(-patchOffsetGetByIdSlowCaseCall); patchBuffer.link(failureCases1, slowCaseBegin); patchBuffer.link(failureCases2, slowCaseBegin); // On success return back to the hot patch code, at a point it will perform the store to dest for us. - void* hotPathPutResult = reinterpret_cast<char*>(stubInfo->hotPathBegin) + patchOffsetGetByIdPutResult; - patchBuffer.link(success, hotPathPutResult); + patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult)); // Track the stub we have created so that it will be deleted later. - stubInfo->stubRoutine = code; + CodeLocationLabel entryLabel = patchBuffer.entry(); + stubInfo->stubRoutine = entryLabel; - // Finally patch the jump to sow case back in the hot path to jump here instead. - void* jumpLocation = reinterpret_cast<char*>(stubInfo->hotPathBegin) + patchOffsetGetByIdBranchToSlowCase; - Jump::patch(jumpLocation, code); + // Finally patch the jump to slow case back in the hot path to jump here instead. + CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase); + jumpLocation.relink(entryLabel); } -void JIT::privateCompileGetByIdSelf(StructureStubInfo* stubInfo, Structure* structure, size_t cachedOffset, void* returnAddress) +void JIT::privateCompileGetByIdSelf(StructureStubInfo* stubInfo, Structure* structure, size_t cachedOffset, ProcessorReturnAddress returnAddress) { // Check eax is an object of the right Structure. - Jump failureCases1 = emitJumpIfNotJSCell(X86::eax); - Jump failureCases2 = checkStructure(X86::eax, structure); + Jump failureCases1 = emitJumpIfNotJSCell(regT0); + Jump failureCases2 = checkStructure(regT0, structure); // Checks out okay! - getDirectOffset - loadPtr(Address(X86::eax, FIELD_OFFSET(JSObject, m_propertyStorage)), X86::eax); - loadPtr(Address(X86::eax, cachedOffset * sizeof(JSValuePtr)), X86::eax); + loadPtr(Address(regT0, FIELD_OFFSET(JSObject, m_propertyStorage)), regT0); + loadPtr(Address(regT0, cachedOffset * sizeof(JSValuePtr)), regT0); ret(); + Call failureCases1Call = makeTailRecursiveCall(failureCases1); + Call failureCases2Call = makeTailRecursiveCall(failureCases2); + void* code = m_assembler.executableCopy(m_codeBlock->executablePool()); PatchBuffer patchBuffer(code); - patchBuffer.link(failureCases1, reinterpret_cast<void*>(Interpreter::cti_op_get_by_id_self_fail)); - patchBuffer.link(failureCases2, reinterpret_cast<void*>(Interpreter::cti_op_get_by_id_self_fail)); + patchBuffer.link(failureCases1Call, JITStubs::cti_op_get_by_id_self_fail); + patchBuffer.link(failureCases2Call, JITStubs::cti_op_get_by_id_self_fail); - stubInfo->stubRoutine = code; + stubInfo->stubRoutine = patchBuffer.entry(); - Jump::patch(returnAddress, code); + returnAddress.relinkCallerToFunction(code); } -void JIT::privateCompileGetByIdProto(StructureStubInfo* stubInfo, Structure* structure, Structure* prototypeStructure, size_t cachedOffset, void* returnAddress, CallFrame* callFrame) +void JIT::privateCompileGetByIdProto(StructureStubInfo* stubInfo, Structure* structure, Structure* prototypeStructure, size_t cachedOffset, ProcessorReturnAddress returnAddress, CallFrame* callFrame) { #if USE(CTI_REPATCH_PIC) // We don't want to patch more than once - in future go to cti_op_put_by_id_generic. - Jump::patch(returnAddress, reinterpret_cast<void*>(Interpreter::cti_op_get_by_id_proto_list)); + returnAddress.relinkCallerToFunction(JITStubs::cti_op_get_by_id_proto_list); // The prototype object definitely exists (if this stub exists the CodeBlock is referencing a Structure that is // referencing the prototype object - let's speculatively load it's table nice and early!) JSObject* protoObject = asObject(structure->prototypeForLookup(callFrame)); PropertyStorage* protoPropertyStorage = &protoObject->m_propertyStorage; - loadPtr(static_cast<void*>(protoPropertyStorage), X86::edx); + loadPtr(static_cast<void*>(protoPropertyStorage), regT1); // Check eax is an object of the right Structure. - Jump failureCases1 = checkStructure(X86::eax, structure); + Jump failureCases1 = checkStructure(regT0, structure); // Check the prototype object's Structure had not changed. Structure** prototypeStructureAddress = &(protoObject->m_structure); #if PLATFORM(X86_64) - move(ImmPtr(prototypeStructure), X86::ebx); - Jump failureCases2 = jnePtr(X86::ebx, AbsoluteAddress(prototypeStructureAddress)); + move(ImmPtr(prototypeStructure), regT3); + Jump failureCases2 = branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), regT3); #else - Jump failureCases2 = jnePtr(AbsoluteAddress(prototypeStructureAddress), ImmPtr(prototypeStructure)); + Jump failureCases2 = branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), ImmPtr(prototypeStructure)); #endif // Checks out okay! - getDirectOffset - loadPtr(Address(X86::edx, cachedOffset * sizeof(JSValuePtr)), X86::eax); + loadPtr(Address(regT1, cachedOffset * sizeof(JSValuePtr)), regT0); Jump success = jump(); @@ -399,59 +394,59 @@ void JIT::privateCompileGetByIdProto(StructureStubInfo* stubInfo, Structure* str PatchBuffer patchBuffer(code); // Use the patch information to link the failure cases back to the original slow case routine. - void* slowCaseBegin = reinterpret_cast<char*>(stubInfo->callReturnLocation) - patchOffsetGetByIdSlowCaseCall; + CodeLocationLabel slowCaseBegin = stubInfo->callReturnLocation.labelAtOffset(-patchOffsetGetByIdSlowCaseCall); patchBuffer.link(failureCases1, slowCaseBegin); patchBuffer.link(failureCases2, slowCaseBegin); // On success return back to the hot patch code, at a point it will perform the store to dest for us. - intptr_t successDest = reinterpret_cast<intptr_t>(stubInfo->hotPathBegin) + patchOffsetGetByIdPutResult; - patchBuffer.link(success, reinterpret_cast<void*>(successDest)); + patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult)); // Track the stub we have created so that it will be deleted later. - stubInfo->stubRoutine = code; + CodeLocationLabel entryLabel = patchBuffer.entry(); + stubInfo->stubRoutine = entryLabel; // Finally patch the jump to slow case back in the hot path to jump here instead. - void* jumpLocation = reinterpret_cast<char*>(stubInfo->hotPathBegin) + patchOffsetGetByIdBranchToSlowCase; - Jump::patch(jumpLocation, code); + CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase); + jumpLocation.relink(entryLabel); #else // The prototype object definitely exists (if this stub exists the CodeBlock is referencing a Structure that is // referencing the prototype object - let's speculatively load it's table nice and early!) JSObject* protoObject = asObject(structure->prototypeForLookup(callFrame)); PropertyStorage* protoPropertyStorage = &protoObject->m_propertyStorage; - loadPtr(protoPropertyStorage, X86::edx); + loadPtr(protoPropertyStorage, regT1); // Check eax is an object of the right Structure. - Jump failureCases1 = emitJumpIfNotJSCell(X86::eax); - Jump failureCases2 = checkStructure(X86::eax, structure); + Jump failureCases1 = emitJumpIfNotJSCell(regT0); + Jump failureCases2 = checkStructure(regT0, structure); // Check the prototype object's Structure had not changed. Structure** prototypeStructureAddress = &(protoObject->m_structure); - Jump failureCases3 = jnePtr(AbsoluteAddress(prototypeStructureAddress), ImmPtr(prototypeStructure)); + Jump failureCases3 = branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), ImmPtr(prototypeStructure)); // Checks out okay! - getDirectOffset - loadPtr(Address(X86::edx, cachedOffset * sizeof(JSValuePtr)), X86::eax); + loadPtr(Address(regT1, cachedOffset * sizeof(JSValuePtr)), regT0); ret(); void* code = m_assembler.executableCopy(m_codeBlock->executablePool()); PatchBuffer patchBuffer(code); - patchBuffer.link(failureCases1, reinterpret_cast<void*>(Interpreter::cti_op_get_by_id_proto_fail)); - patchBuffer.link(failureCases2, reinterpret_cast<void*>(Interpreter::cti_op_get_by_id_proto_fail)); - patchBuffer.link(failureCases3, reinterpret_cast<void*>(Interpreter::cti_op_get_by_id_proto_fail)); + patchBuffer.link(failureCases1, JITStubs::cti_op_get_by_id_proto_fail); + patchBuffer.link(failureCases2, JITStubs::cti_op_get_by_id_proto_fail); + patchBuffer.link(failureCases3, JITStubs::cti_op_get_by_id_proto_fail); - stubInfo->stubRoutine = code; + stubInfo->stubRoutine = patchBuffer.entry(); - Jump::patch(returnAddress, code); + returnAddress.relinkCallerToFunction(code); #endif } #if USE(CTI_REPATCH_PIC) void JIT::privateCompileGetByIdSelfList(StructureStubInfo* stubInfo, PolymorphicAccessStructureList* polymorphicStructures, int currentIndex, Structure* structure, size_t cachedOffset) { - Jump failureCase = checkStructure(X86::eax, structure); - loadPtr(Address(X86::eax, FIELD_OFFSET(JSObject, m_propertyStorage)), X86::eax); - loadPtr(Address(X86::eax, cachedOffset * sizeof(JSValuePtr)), X86::eax); + Jump failureCase = checkStructure(regT0, structure); + loadPtr(Address(regT0, FIELD_OFFSET(JSObject, m_propertyStorage)), regT0); + loadPtr(Address(regT0, cachedOffset * sizeof(JSValuePtr)), regT0); Jump success = jump(); void* code = m_assembler.executableCopy(m_codeBlock->executablePool()); @@ -459,22 +454,23 @@ void JIT::privateCompileGetByIdSelfList(StructureStubInfo* stubInfo, Polymorphic PatchBuffer patchBuffer(code); // Use the patch information to link the failure cases back to the original slow case routine. - void* lastProtoBegin = polymorphicStructures->list[currentIndex - 1].stubRoutine; + CodeLocationLabel lastProtoBegin = polymorphicStructures->list[currentIndex - 1].stubRoutine; if (!lastProtoBegin) - lastProtoBegin = reinterpret_cast<char*>(stubInfo->callReturnLocation) - patchOffsetGetByIdSlowCaseCall; + lastProtoBegin = stubInfo->callReturnLocation.labelAtOffset(-patchOffsetGetByIdSlowCaseCall); patchBuffer.link(failureCase, lastProtoBegin); // On success return back to the hot patch code, at a point it will perform the store to dest for us. - intptr_t successDest = reinterpret_cast<intptr_t>(stubInfo->hotPathBegin) + patchOffsetGetByIdPutResult; - patchBuffer.link(success, reinterpret_cast<void*>(successDest)); + patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult)); + + CodeLocationLabel entryLabel = patchBuffer.entry(); structure->ref(); - polymorphicStructures->list[currentIndex].set(code, structure); + polymorphicStructures->list[currentIndex].set(entryLabel, structure); // Finally patch the jump to slow case back in the hot path to jump here instead. - void* jumpLocation = reinterpret_cast<char*>(stubInfo->hotPathBegin) + patchOffsetGetByIdBranchToSlowCase; - Jump::patch(jumpLocation, code); + CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase); + jumpLocation.relink(entryLabel); } void JIT::privateCompileGetByIdProtoList(StructureStubInfo* stubInfo, PolymorphicAccessStructureList* prototypeStructures, int currentIndex, Structure* structure, Structure* prototypeStructure, size_t cachedOffset, CallFrame* callFrame) @@ -483,22 +479,22 @@ void JIT::privateCompileGetByIdProtoList(StructureStubInfo* stubInfo, Polymorphi // referencing the prototype object - let's speculatively load it's table nice and early!) JSObject* protoObject = asObject(structure->prototypeForLookup(callFrame)); PropertyStorage* protoPropertyStorage = &protoObject->m_propertyStorage; - loadPtr(protoPropertyStorage, X86::edx); + loadPtr(protoPropertyStorage, regT1); // Check eax is an object of the right Structure. - Jump failureCases1 = checkStructure(X86::eax, structure); + Jump failureCases1 = checkStructure(regT0, structure); // Check the prototype object's Structure had not changed. Structure** prototypeStructureAddress = &(protoObject->m_structure); #if PLATFORM(X86_64) - move(ImmPtr(prototypeStructure), X86::ebx); - Jump failureCases2 = jnePtr(X86::ebx, AbsoluteAddress(prototypeStructureAddress)); + move(ImmPtr(prototypeStructure), regT3); + Jump failureCases2 = branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), regT3); #else - Jump failureCases2 = jnePtr(AbsoluteAddress(prototypeStructureAddress), ImmPtr(prototypeStructure)); + Jump failureCases2 = branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), ImmPtr(prototypeStructure)); #endif // Checks out okay! - getDirectOffset - loadPtr(Address(X86::edx, cachedOffset * sizeof(JSValuePtr)), X86::eax); + loadPtr(Address(regT1, cachedOffset * sizeof(JSValuePtr)), regT0); Jump success = jump(); @@ -506,21 +502,22 @@ void JIT::privateCompileGetByIdProtoList(StructureStubInfo* stubInfo, Polymorphi PatchBuffer patchBuffer(code); // Use the patch information to link the failure cases back to the original slow case routine. - void* lastProtoBegin = prototypeStructures->list[currentIndex - 1].stubRoutine; + CodeLocationLabel lastProtoBegin = prototypeStructures->list[currentIndex - 1].stubRoutine; patchBuffer.link(failureCases1, lastProtoBegin); patchBuffer.link(failureCases2, lastProtoBegin); // On success return back to the hot patch code, at a point it will perform the store to dest for us. - intptr_t successDest = reinterpret_cast<intptr_t>(stubInfo->hotPathBegin) + patchOffsetGetByIdPutResult; - patchBuffer.link(success, reinterpret_cast<void*>(successDest)); + patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult)); + + CodeLocationLabel entryLabel = patchBuffer.entry(); structure->ref(); prototypeStructure->ref(); - prototypeStructures->list[currentIndex].set(code, structure, prototypeStructure); + prototypeStructures->list[currentIndex].set(entryLabel, structure, prototypeStructure); // Finally patch the jump to slow case back in the hot path to jump here instead. - void* jumpLocation = reinterpret_cast<char*>(stubInfo->hotPathBegin) + patchOffsetGetByIdBranchToSlowCase; - Jump::patch(jumpLocation, code); + CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase); + jumpLocation.relink(entryLabel); } void JIT::privateCompileGetByIdChainList(StructureStubInfo* stubInfo, PolymorphicAccessStructureList* prototypeStructures, int currentIndex, Structure* structure, StructureChain* chain, size_t count, size_t cachedOffset, CallFrame* callFrame) @@ -530,7 +527,7 @@ void JIT::privateCompileGetByIdChainList(StructureStubInfo* stubInfo, Polymorphi JumpList bucketsOfFail; // Check eax is an object of the right Structure. - Jump baseObjectCheck = checkStructure(X86::eax, structure); + Jump baseObjectCheck = checkStructure(regT0, structure); bucketsOfFail.append(baseObjectCheck); Structure* currStructure = structure; @@ -543,54 +540,55 @@ void JIT::privateCompileGetByIdChainList(StructureStubInfo* stubInfo, Polymorphi // Check the prototype object's Structure had not changed. Structure** prototypeStructureAddress = &(protoObject->m_structure); #if PLATFORM(X86_64) - move(ImmPtr(currStructure), X86::ebx); - bucketsOfFail.append(jnePtr(X86::ebx, AbsoluteAddress(prototypeStructureAddress))); + move(ImmPtr(currStructure), regT3); + bucketsOfFail.append(branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), regT3)); #else - bucketsOfFail.append(jnePtr(AbsoluteAddress(prototypeStructureAddress), ImmPtr(currStructure))); + bucketsOfFail.append(branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), ImmPtr(currStructure))); #endif } ASSERT(protoObject); PropertyStorage* protoPropertyStorage = &protoObject->m_propertyStorage; - loadPtr(protoPropertyStorage, X86::edx); - loadPtr(Address(X86::edx, cachedOffset * sizeof(JSValuePtr)), X86::eax); + loadPtr(protoPropertyStorage, regT1); + loadPtr(Address(regT1, cachedOffset * sizeof(JSValuePtr)), regT0); Jump success = jump(); void* code = m_assembler.executableCopy(m_codeBlock->executablePool()); PatchBuffer patchBuffer(code); // Use the patch information to link the failure cases back to the original slow case routine. - void* lastProtoBegin = prototypeStructures->list[currentIndex - 1].stubRoutine; + CodeLocationLabel lastProtoBegin = prototypeStructures->list[currentIndex - 1].stubRoutine; patchBuffer.link(bucketsOfFail, lastProtoBegin); // On success return back to the hot patch code, at a point it will perform the store to dest for us. - intptr_t successDest = reinterpret_cast<intptr_t>(stubInfo->hotPathBegin) + patchOffsetGetByIdPutResult; - patchBuffer.link(success, reinterpret_cast<void*>(successDest)); + patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult)); + + CodeLocationLabel entryLabel = patchBuffer.entry(); // Track the stub we have created so that it will be deleted later. structure->ref(); chain->ref(); - prototypeStructures->list[currentIndex].set(code, structure, chain); + prototypeStructures->list[currentIndex].set(entryLabel, structure, chain); // Finally patch the jump to slow case back in the hot path to jump here instead. - void* jumpLocation = reinterpret_cast<char*>(stubInfo->hotPathBegin) + patchOffsetGetByIdBranchToSlowCase; - Jump::patch(jumpLocation, code); + CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase); + jumpLocation.relink(entryLabel); } #endif -void JIT::privateCompileGetByIdChain(StructureStubInfo* stubInfo, Structure* structure, StructureChain* chain, size_t count, size_t cachedOffset, void* returnAddress, CallFrame* callFrame) +void JIT::privateCompileGetByIdChain(StructureStubInfo* stubInfo, Structure* structure, StructureChain* chain, size_t count, size_t cachedOffset, ProcessorReturnAddress returnAddress, CallFrame* callFrame) { #if USE(CTI_REPATCH_PIC) // We don't want to patch more than once - in future go to cti_op_put_by_id_generic. - Jump::patch(returnAddress, reinterpret_cast<void*>(Interpreter::cti_op_get_by_id_proto_list)); + returnAddress.relinkCallerToFunction(JITStubs::cti_op_get_by_id_proto_list); ASSERT(count); JumpList bucketsOfFail; // Check eax is an object of the right Structure. - bucketsOfFail.append(checkStructure(X86::eax, structure)); + bucketsOfFail.append(checkStructure(regT0, structure)); Structure* currStructure = structure; RefPtr<Structure>* chainEntries = chain->head(); @@ -602,45 +600,43 @@ void JIT::privateCompileGetByIdChain(StructureStubInfo* stubInfo, Structure* str // Check the prototype object's Structure had not changed. Structure** prototypeStructureAddress = &(protoObject->m_structure); #if PLATFORM(X86_64) - move(ImmPtr(currStructure), X86::ebx); - bucketsOfFail.append(jnePtr(X86::ebx, AbsoluteAddress(prototypeStructureAddress))); + move(ImmPtr(currStructure), regT3); + bucketsOfFail.append(branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), regT3)); #else - bucketsOfFail.append(jnePtr(AbsoluteAddress(prototypeStructureAddress), ImmPtr(currStructure))); + bucketsOfFail.append(branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), ImmPtr(currStructure))); #endif } ASSERT(protoObject); PropertyStorage* protoPropertyStorage = &protoObject->m_propertyStorage; - loadPtr(protoPropertyStorage, X86::edx); - loadPtr(Address(X86::edx, cachedOffset * sizeof(JSValuePtr)), X86::eax); + loadPtr(protoPropertyStorage, regT1); + loadPtr(Address(regT1, cachedOffset * sizeof(JSValuePtr)), regT0); Jump success = jump(); void* code = m_assembler.executableCopy(m_codeBlock->executablePool()); PatchBuffer patchBuffer(code); // Use the patch information to link the failure cases back to the original slow case routine. - void* slowCaseBegin = reinterpret_cast<char*>(stubInfo->callReturnLocation) - patchOffsetGetByIdSlowCaseCall; - - patchBuffer.link(bucketsOfFail, slowCaseBegin); + patchBuffer.link(bucketsOfFail, stubInfo->callReturnLocation.labelAtOffset(-patchOffsetGetByIdSlowCaseCall)); // On success return back to the hot patch code, at a point it will perform the store to dest for us. - intptr_t successDest = reinterpret_cast<intptr_t>(stubInfo->hotPathBegin) + patchOffsetGetByIdPutResult; - patchBuffer.link(success, reinterpret_cast<void*>(successDest)); + patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult)); // Track the stub we have created so that it will be deleted later. - stubInfo->stubRoutine = code; + CodeLocationLabel entryLabel = patchBuffer.entry(); + stubInfo->stubRoutine = entryLabel; // Finally patch the jump to slow case back in the hot path to jump here instead. - void* jumpLocation = reinterpret_cast<char*>(stubInfo->hotPathBegin) + patchOffsetGetByIdBranchToSlowCase; - Jump::patch(jumpLocation, code); + CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase); + jumpLocation.relink(entryLabel); #else ASSERT(count); JumpList bucketsOfFail; // Check eax is an object of the right Structure. - bucketsOfFail.append(emitJumpIfNotJSCell(X86::eax)); - bucketsOfFail.append(checkStructure(X86::eax, structure)); + bucketsOfFail.append(emitJumpIfNotJSCell(regT0)); + bucketsOfFail.append(checkStructure(regT0, structure)); Structure* currStructure = structure; RefPtr<Structure>* chainEntries = chain->head(); @@ -652,49 +648,52 @@ void JIT::privateCompileGetByIdChain(StructureStubInfo* stubInfo, Structure* str // Check the prototype object's Structure had not changed. Structure** prototypeStructureAddress = &(protoObject->m_structure); #if PLATFORM(X86_64) - move(ImmPtr(currStructure), X86::ebx); - bucketsOfFail.append(jnePtr(X86::ebx, AbsoluteAddress(prototypeStructureAddress))); + move(ImmPtr(currStructure), regT3); + bucketsOfFail.append(branchPtr(NotEqual, regT3, AbsoluteAddress(prototypeStructureAddress))); #else - bucketsOfFail.append(jnePtr(AbsoluteAddress(prototypeStructureAddress), ImmPtr(currStructure))); + bucketsOfFail.append(branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), ImmPtr(currStructure))); #endif } ASSERT(protoObject); PropertyStorage* protoPropertyStorage = &protoObject->m_propertyStorage; - loadPtr(protoPropertyStorage, X86::edx); - loadPtr(Address(X86::edx, cachedOffset * sizeof(JSValuePtr)), X86::eax); + loadPtr(protoPropertyStorage, regT1); + loadPtr(Address(regT1, cachedOffset * sizeof(JSValuePtr)), regT0); ret(); void* code = m_assembler.executableCopy(m_codeBlock->executablePool()); - patchBuffer.link(bucketsOfFail, reinterpret_cast<void*>(Interpreter::cti_op_get_by_id_proto_fail)); + patchBuffer.link(bucketsOfFail, JITStubs::cti_op_get_by_id_proto_fail); - stubInfo->stubRoutine = code; + stubInfo->stubRoutine = patchBuffer.entry(); - Jump::patch(returnAddress, code); + returnAddress.relinkCallerToFunction(code); #endif } -void JIT::privateCompilePutByIdReplace(StructureStubInfo* stubInfo, Structure* structure, size_t cachedOffset, void* returnAddress) +void JIT::privateCompilePutByIdReplace(StructureStubInfo* stubInfo, Structure* structure, size_t cachedOffset, ProcessorReturnAddress returnAddress) { // Check eax is an object of the right Structure. - Jump failureCases1 = emitJumpIfNotJSCell(X86::eax); - Jump failureCases2 = checkStructure(X86::eax, structure); + Jump failureCases1 = emitJumpIfNotJSCell(regT0); + Jump failureCases2 = checkStructure(regT0, structure); // checks out okay! - putDirectOffset - loadPtr(Address(X86::eax, FIELD_OFFSET(JSObject, m_propertyStorage)), X86::eax); - storePtr(X86::edx, Address(X86::eax, cachedOffset * sizeof(JSValuePtr))); + loadPtr(Address(regT0, FIELD_OFFSET(JSObject, m_propertyStorage)), regT0); + storePtr(regT1, Address(regT0, cachedOffset * sizeof(JSValuePtr))); ret(); + Call failureCases1Call = makeTailRecursiveCall(failureCases1); + Call failureCases2Call = makeTailRecursiveCall(failureCases2); + void* code = m_assembler.executableCopy(m_codeBlock->executablePool()); PatchBuffer patchBuffer(code); - patchBuffer.link(failureCases1, reinterpret_cast<void*>(Interpreter::cti_op_put_by_id_fail)); - patchBuffer.link(failureCases2, reinterpret_cast<void*>(Interpreter::cti_op_put_by_id_fail)); + patchBuffer.link(failureCases1Call, JITStubs::cti_op_put_by_id_fail); + patchBuffer.link(failureCases2Call, JITStubs::cti_op_put_by_id_fail); - stubInfo->stubRoutine = code; + stubInfo->stubRoutine = patchBuffer.entry(); - Jump::patch(returnAddress, code); + returnAddress.relinkCallerToFunction(code); } #endif |