summaryrefslogtreecommitdiffstats
path: root/JavaScriptCore/jit/JITInlineMethods.h
diff options
context:
space:
mode:
Diffstat (limited to 'JavaScriptCore/jit/JITInlineMethods.h')
-rw-r--r--JavaScriptCore/jit/JITInlineMethods.h220
1 files changed, 81 insertions, 139 deletions
diff --git a/JavaScriptCore/jit/JITInlineMethods.h b/JavaScriptCore/jit/JITInlineMethods.h
index 5af7565..39ca4a5 100644
--- a/JavaScriptCore/jit/JITInlineMethods.h
+++ b/JavaScriptCore/jit/JITInlineMethods.h
@@ -26,7 +26,6 @@
#ifndef JITInlineMethods_h
#define JITInlineMethods_h
-#include <wtf/Platform.h>
#if ENABLE(JIT)
@@ -34,31 +33,6 @@ namespace JSC {
/* Deprecated: Please use JITStubCall instead. */
-// puts an arg onto the stack, as an arg to a context threaded function.
-ALWAYS_INLINE void JIT::emitPutJITStubArg(RegisterID src, unsigned argumentNumber)
-{
- unsigned argumentStackOffset = (argumentNumber * (sizeof(JSValue) / sizeof(void*))) + JITSTACKFRAME_ARGS_INDEX;
- poke(src, argumentStackOffset);
-}
-
-/* Deprecated: Please use JITStubCall instead. */
-
-ALWAYS_INLINE void JIT::emitPutJITStubArgConstant(unsigned value, unsigned argumentNumber)
-{
- unsigned argumentStackOffset = (argumentNumber * (sizeof(JSValue) / sizeof(void*))) + JITSTACKFRAME_ARGS_INDEX;
- poke(Imm32(value), argumentStackOffset);
-}
-
-/* Deprecated: Please use JITStubCall instead. */
-
-ALWAYS_INLINE void JIT::emitPutJITStubArgConstant(void* value, unsigned argumentNumber)
-{
- unsigned argumentStackOffset = (argumentNumber * (sizeof(JSValue) / sizeof(void*))) + JITSTACKFRAME_ARGS_INDEX;
- poke(ImmPtr(value), argumentStackOffset);
-}
-
-/* Deprecated: Please use JITStubCall instead. */
-
ALWAYS_INLINE void JIT::emitGetJITStubArg(unsigned argumentNumber, RegisterID dst)
{
unsigned argumentStackOffset = (argumentNumber * (sizeof(JSValue) / sizeof(void*))) + JITSTACKFRAME_ARGS_INDEX;
@@ -89,25 +63,35 @@ ALWAYS_INLINE void JIT::emitPutImmediateToCallFrameHeader(void* value, RegisterF
ALWAYS_INLINE void JIT::emitGetFromCallFrameHeaderPtr(RegisterFile::CallFrameHeaderEntry entry, RegisterID to, RegisterID from)
{
loadPtr(Address(from, entry * sizeof(Register)), to);
-#if !USE(JSVALUE32_64)
+#if USE(JSVALUE64)
killLastResultRegister();
#endif
}
+ALWAYS_INLINE void JIT::emitLoadCharacterString(RegisterID src, RegisterID dst, JumpList& failures)
+{
+ failures.append(branchPtr(NotEqual, Address(src), ImmPtr(m_globalData->jsStringVPtr)));
+ failures.append(branchTest32(NonZero, Address(src, OBJECT_OFFSETOF(JSString, m_fiberCount))));
+ failures.append(branch32(NotEqual, MacroAssembler::Address(src, ThunkHelpers::jsStringLengthOffset()), Imm32(1)));
+ loadPtr(MacroAssembler::Address(src, ThunkHelpers::jsStringValueOffset()), dst);
+ loadPtr(MacroAssembler::Address(dst, ThunkHelpers::stringImplDataOffset()), dst);
+ load16(MacroAssembler::Address(dst, 0), dst);
+}
+
ALWAYS_INLINE void JIT::emitGetFromCallFrameHeader32(RegisterFile::CallFrameHeaderEntry entry, RegisterID to, RegisterID from)
{
load32(Address(from, entry * sizeof(Register)), to);
-#if !USE(JSVALUE32_64)
+#if USE(JSVALUE64)
killLastResultRegister();
#endif
}
ALWAYS_INLINE JIT::Call JIT::emitNakedCall(CodePtr function)
{
- ASSERT(m_bytecodeIndex != (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeIndex is set.
+ ASSERT(m_bytecodeOffset != (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeOffset is set.
Call nakedCall = nearCall();
- m_calls.append(CallRecord(nakedCall, m_bytecodeIndex, function.executableAddress()));
+ m_calls.append(CallRecord(nakedCall, m_bytecodeOffset, function.executableAddress()));
return nakedCall;
}
@@ -115,6 +99,7 @@ ALWAYS_INLINE JIT::Call JIT::emitNakedCall(CodePtr function)
ALWAYS_INLINE void JIT::beginUninterruptedSequence(int insnSpace, int constSpace)
{
+ JSInterfaceJIT::beginUninterruptedSequence();
#if CPU(ARM_TRADITIONAL)
#ifndef NDEBUG
// Ensure the label after the sequence can also fit
@@ -137,9 +122,17 @@ ALWAYS_INLINE void JIT::beginUninterruptedSequence(int insnSpace, int constSpace
ALWAYS_INLINE void JIT::endUninterruptedSequence(int insnSpace, int constSpace)
{
#if defined(ASSEMBLER_HAS_CONSTANT_POOL) && ASSEMBLER_HAS_CONSTANT_POOL
- ASSERT(differenceBetween(m_uninterruptedInstructionSequenceBegin, label()) == insnSpace);
- ASSERT(sizeOfConstantPool() - m_uninterruptedConstantSequenceBegin == constSpace);
+ /* There are several cases when the uninterrupted sequence is larger than
+ * maximum required offset for pathing the same sequence. Eg.: if in a
+ * uninterrupted sequence the last macroassembler's instruction is a stub
+ * call, it emits store instruction(s) which should not be included in the
+ * calculation of length of uninterrupted sequence. So, the insnSpace and
+ * constSpace should be upper limit instead of hard limit.
+ */
+ ASSERT(differenceBetween(m_uninterruptedInstructionSequenceBegin, label()) <= insnSpace);
+ ASSERT(sizeOfConstantPool() - m_uninterruptedConstantSequenceBegin <= constSpace);
#endif
+ JSInterfaceJIT::endUninterruptedSequence();
}
#endif
@@ -161,6 +154,23 @@ ALWAYS_INLINE void JIT::restoreReturnAddressBeforeReturn(Address address)
loadPtr(address, linkRegister);
}
+#elif CPU(MIPS)
+
+ALWAYS_INLINE void JIT::preserveReturnAddressAfterCall(RegisterID reg)
+{
+ move(returnAddressRegister, reg);
+}
+
+ALWAYS_INLINE void JIT::restoreReturnAddressBeforeReturn(RegisterID reg)
+{
+ move(reg, returnAddressRegister);
+}
+
+ALWAYS_INLINE void JIT::restoreReturnAddressBeforeReturn(Address address)
+{
+ loadPtr(address, returnAddressRegister);
+}
+
#else // CPU(X86) || CPU(X86_64)
ALWAYS_INLINE void JIT::preserveReturnAddressAfterCall(RegisterID reg)
@@ -180,18 +190,12 @@ ALWAYS_INLINE void JIT::restoreReturnAddressBeforeReturn(Address address)
#endif
-#if USE(JIT_STUB_ARGUMENT_VA_LIST)
-ALWAYS_INLINE void JIT::restoreArgumentReference()
-{
- poke(callFrameRegister, OBJECT_OFFSETOF(struct JITStackFrame, callFrame) / sizeof (void*));
-}
-ALWAYS_INLINE void JIT::restoreArgumentReferenceForTrampoline() {}
-#else
ALWAYS_INLINE void JIT::restoreArgumentReference()
{
move(stackPointerRegister, firstArgumentRegister);
- poke(callFrameRegister, OBJECT_OFFSETOF(struct JITStackFrame, callFrame) / sizeof (void*));
+ poke(callFrameRegister, OBJECT_OFFSETOF(struct JITStackFrame, callFrame) / sizeof(void*));
}
+
ALWAYS_INLINE void JIT::restoreArgumentReferenceForTrampoline()
{
#if CPU(X86)
@@ -202,7 +206,6 @@ ALWAYS_INLINE void JIT::restoreArgumentReferenceForTrampoline()
#endif
// In the trampoline on x86-64, the first argument register is not overwritten.
}
-#endif
ALWAYS_INLINE JIT::Jump JIT::checkStructure(RegisterID reg, Structure* structure)
{
@@ -217,33 +220,33 @@ ALWAYS_INLINE void JIT::linkSlowCaseIfNotJSCell(Vector<SlowCaseEntry>::iterator&
ALWAYS_INLINE void JIT::addSlowCase(Jump jump)
{
- ASSERT(m_bytecodeIndex != (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeIndex is set.
+ ASSERT(m_bytecodeOffset != (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeOffset is set.
- m_slowCases.append(SlowCaseEntry(jump, m_bytecodeIndex));
+ m_slowCases.append(SlowCaseEntry(jump, m_bytecodeOffset));
}
ALWAYS_INLINE void JIT::addSlowCase(JumpList jumpList)
{
- ASSERT(m_bytecodeIndex != (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeIndex is set.
+ ASSERT(m_bytecodeOffset != (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeOffset is set.
const JumpList::JumpVector& jumpVector = jumpList.jumps();
size_t size = jumpVector.size();
for (size_t i = 0; i < size; ++i)
- m_slowCases.append(SlowCaseEntry(jumpVector[i], m_bytecodeIndex));
+ m_slowCases.append(SlowCaseEntry(jumpVector[i], m_bytecodeOffset));
}
ALWAYS_INLINE void JIT::addJump(Jump jump, int relativeOffset)
{
- ASSERT(m_bytecodeIndex != (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeIndex is set.
+ ASSERT(m_bytecodeOffset != (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeOffset is set.
- m_jmpTable.append(JumpTable(jump, m_bytecodeIndex + relativeOffset));
+ m_jmpTable.append(JumpTable(jump, m_bytecodeOffset + relativeOffset));
}
ALWAYS_INLINE void JIT::emitJumpSlowToHot(Jump jump, int relativeOffset)
{
- ASSERT(m_bytecodeIndex != (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeIndex is set.
+ ASSERT(m_bytecodeOffset != (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeOffset is set.
- jump.linkTo(m_labels[m_bytecodeIndex + relativeOffset], this);
+ jump.linkTo(m_labels[m_bytecodeOffset + relativeOffset], this);
}
#if ENABLE(SAMPLING_FLAGS)
@@ -307,23 +310,13 @@ ALWAYS_INLINE void JIT::sampleCodeBlock(CodeBlock* codeBlock)
#endif
#endif
-inline JIT::Address JIT::addressFor(unsigned index, RegisterID base)
+ALWAYS_INLINE bool JIT::isOperandConstantImmediateChar(unsigned src)
{
- return Address(base, (index * sizeof(Register)));
+ return m_codeBlock->isConstantRegisterIndex(src) && getConstantOperand(src).isString() && asString(getConstantOperand(src).asCell())->length() == 1;
}
#if USE(JSVALUE32_64)
-inline JIT::Address JIT::tagFor(unsigned index, RegisterID base)
-{
- return Address(base, (index * sizeof(Register)) + OBJECT_OFFSETOF(JSValue, u.asBits.tag));
-}
-
-inline JIT::Address JIT::payloadFor(unsigned index, RegisterID base)
-{
- return Address(base, (index * sizeof(Register)) + OBJECT_OFFSETOF(JSValue, u.asBits.payload));
-}
-
inline void JIT::emitLoadTag(unsigned index, RegisterID tag)
{
RegisterID mappedTag;
@@ -469,24 +462,24 @@ ALWAYS_INLINE void JIT::emitInitRegister(unsigned dst)
emitStore(dst, jsUndefined());
}
-inline bool JIT::isLabeled(unsigned bytecodeIndex)
+inline bool JIT::isLabeled(unsigned bytecodeOffset)
{
for (size_t numberOfJumpTargets = m_codeBlock->numberOfJumpTargets(); m_jumpTargetIndex != numberOfJumpTargets; ++m_jumpTargetIndex) {
unsigned jumpTarget = m_codeBlock->jumpTarget(m_jumpTargetIndex);
- if (jumpTarget == bytecodeIndex)
+ if (jumpTarget == bytecodeOffset)
return true;
- if (jumpTarget > bytecodeIndex)
+ if (jumpTarget > bytecodeOffset)
return false;
}
return false;
}
-inline void JIT::map(unsigned bytecodeIndex, unsigned virtualRegisterIndex, RegisterID tag, RegisterID payload)
+inline void JIT::map(unsigned bytecodeOffset, unsigned virtualRegisterIndex, RegisterID tag, RegisterID payload)
{
- if (isLabeled(bytecodeIndex))
+ if (isLabeled(bytecodeOffset))
return;
- m_mappedBytecodeIndex = bytecodeIndex;
+ m_mappedBytecodeOffset = bytecodeOffset;
m_mappedVirtualRegisterIndex = virtualRegisterIndex;
m_mappedTag = tag;
m_mappedPayload = payload;
@@ -502,7 +495,7 @@ inline void JIT::unmap(RegisterID registerID)
inline void JIT::unmap()
{
- m_mappedBytecodeIndex = (unsigned)-1;
+ m_mappedBytecodeOffset = (unsigned)-1;
m_mappedVirtualRegisterIndex = (unsigned)-1;
m_mappedTag = (RegisterID)-1;
m_mappedPayload = (RegisterID)-1;
@@ -510,7 +503,7 @@ inline void JIT::unmap()
inline bool JIT::isMapped(unsigned virtualRegisterIndex)
{
- if (m_mappedBytecodeIndex != m_bytecodeIndex)
+ if (m_mappedBytecodeOffset != m_bytecodeOffset)
return false;
if (m_mappedVirtualRegisterIndex != virtualRegisterIndex)
return false;
@@ -519,7 +512,7 @@ inline bool JIT::isMapped(unsigned virtualRegisterIndex)
inline bool JIT::getMappedPayload(unsigned virtualRegisterIndex, RegisterID& payload)
{
- if (m_mappedBytecodeIndex != m_bytecodeIndex)
+ if (m_mappedBytecodeOffset != m_bytecodeOffset)
return false;
if (m_mappedVirtualRegisterIndex != virtualRegisterIndex)
return false;
@@ -531,7 +524,7 @@ inline bool JIT::getMappedPayload(unsigned virtualRegisterIndex, RegisterID& pay
inline bool JIT::getMappedTag(unsigned virtualRegisterIndex, RegisterID& tag)
{
- if (m_mappedBytecodeIndex != m_bytecodeIndex)
+ if (m_mappedBytecodeOffset != m_bytecodeOffset)
return false;
if (m_mappedVirtualRegisterIndex != virtualRegisterIndex)
return false;
@@ -543,14 +536,22 @@ inline bool JIT::getMappedTag(unsigned virtualRegisterIndex, RegisterID& tag)
inline void JIT::emitJumpSlowCaseIfNotJSCell(unsigned virtualRegisterIndex)
{
- if (!m_codeBlock->isKnownNotImmediate(virtualRegisterIndex))
- addSlowCase(branch32(NotEqual, tagFor(virtualRegisterIndex), Imm32(JSValue::CellTag)));
+ if (!m_codeBlock->isKnownNotImmediate(virtualRegisterIndex)) {
+ if (m_codeBlock->isConstantRegisterIndex(virtualRegisterIndex))
+ addSlowCase(jump());
+ else
+ addSlowCase(emitJumpIfNotJSCell(virtualRegisterIndex));
+ }
}
inline void JIT::emitJumpSlowCaseIfNotJSCell(unsigned virtualRegisterIndex, RegisterID tag)
{
- if (!m_codeBlock->isKnownNotImmediate(virtualRegisterIndex))
- addSlowCase(branch32(NotEqual, tag, Imm32(JSValue::CellTag)));
+ if (!m_codeBlock->isKnownNotImmediate(virtualRegisterIndex)) {
+ if (m_codeBlock->isConstantRegisterIndex(virtualRegisterIndex))
+ addSlowCase(jump());
+ else
+ addSlowCase(branch32(NotEqual, tag, Imm32(JSValue::CellTag)));
+ }
}
inline void JIT::linkSlowCaseIfNotJSCell(Vector<SlowCaseEntry>::iterator& iter, unsigned virtualRegisterIndex)
@@ -581,31 +582,6 @@ ALWAYS_INLINE bool JIT::getOperandConstantImmediateInt(unsigned op1, unsigned op
return false;
}
-/* Deprecated: Please use JITStubCall instead. */
-
-ALWAYS_INLINE void JIT::emitPutJITStubArg(RegisterID tag, RegisterID payload, unsigned argumentNumber)
-{
- unsigned argumentStackOffset = (argumentNumber * (sizeof(JSValue) / sizeof(void*))) + JITSTACKFRAME_ARGS_INDEX;
- poke(payload, argumentStackOffset);
- poke(tag, argumentStackOffset + 1);
-}
-
-/* Deprecated: Please use JITStubCall instead. */
-
-ALWAYS_INLINE void JIT::emitPutJITStubArgFromVirtualRegister(unsigned src, unsigned argumentNumber, RegisterID scratch1, RegisterID scratch2)
-{
- unsigned argumentStackOffset = (argumentNumber * (sizeof(JSValue) / sizeof(void*))) + JITSTACKFRAME_ARGS_INDEX;
- if (m_codeBlock->isConstantRegisterIndex(src)) {
- JSValue constant = m_codeBlock->getConstant(src);
- poke(Imm32(constant.payload()), argumentStackOffset);
- poke(Imm32(constant.tag()), argumentStackOffset + 1);
- } else {
- emitLoad(src, scratch1, scratch2);
- poke(scratch2, argumentStackOffset);
- poke(scratch1, argumentStackOffset + 1);
- }
-}
-
#else // USE(JSVALUE32_64)
ALWAYS_INLINE void JIT::killLastResultRegister()
@@ -616,7 +592,7 @@ ALWAYS_INLINE void JIT::killLastResultRegister()
// get arg puts an arg from the SF register array into a h/w register
ALWAYS_INLINE void JIT::emitGetVirtualRegister(int src, RegisterID dst)
{
- ASSERT(m_bytecodeIndex != (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeIndex is set.
+ ASSERT(m_bytecodeOffset != (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeOffset is set.
// TODO: we want to reuse values that are already in registers if we can - add a register allocator!
if (m_codeBlock->isConstantRegisterIndex(src)) {
@@ -628,8 +604,8 @@ ALWAYS_INLINE void JIT::emitGetVirtualRegister(int src, RegisterID dst)
if (src == m_lastResultBytecodeRegister && m_codeBlock->isTemporaryRegisterIndex(src)) {
bool atJumpTarget = false;
- while (m_jumpTargetsPosition < m_codeBlock->numberOfJumpTargets() && m_codeBlock->jumpTarget(m_jumpTargetsPosition) <= m_bytecodeIndex) {
- if (m_codeBlock->jumpTarget(m_jumpTargetsPosition) == m_bytecodeIndex)
+ while (m_jumpTargetsPosition < m_codeBlock->numberOfJumpTargets() && m_codeBlock->jumpTarget(m_jumpTargetsPosition) <= m_bytecodeOffset) {
+ if (m_codeBlock->jumpTarget(m_jumpTargetsPosition) == m_bytecodeOffset)
atJumpTarget = true;
++m_jumpTargetsPosition;
}
@@ -671,7 +647,7 @@ ALWAYS_INLINE bool JIT::isOperandConstantImmediateInt(unsigned src)
ALWAYS_INLINE void JIT::emitPutVirtualRegister(unsigned dst, RegisterID from)
{
storePtr(from, Address(callFrameRegister, dst * sizeof(Register)));
- m_lastResultBytecodeRegister = (from == cachedResultRegister) ? dst : std::numeric_limits<int>::max();
+ m_lastResultBytecodeRegister = (from == cachedResultRegister) ? static_cast<int>(dst) : std::numeric_limits<int>::max();
}
ALWAYS_INLINE void JIT::emitInitRegister(unsigned dst)
@@ -721,14 +697,6 @@ ALWAYS_INLINE void JIT::emitJumpSlowCaseIfNotJSCell(RegisterID reg, int vReg)
}
#if USE(JSVALUE64)
-ALWAYS_INLINE JIT::Jump JIT::emitJumpIfImmediateNumber(RegisterID reg)
-{
- return branchTestPtr(NonZero, reg, tagTypeNumberRegister);
-}
-ALWAYS_INLINE JIT::Jump JIT::emitJumpIfNotImmediateNumber(RegisterID reg)
-{
- return branchTestPtr(Zero, reg, tagTypeNumberRegister);
-}
inline void JIT::emitLoadDouble(unsigned index, FPRegisterID value)
{
@@ -789,7 +757,7 @@ ALWAYS_INLINE void JIT::emitJumpSlowCaseIfNotImmediateNumber(RegisterID reg)
addSlowCase(emitJumpIfNotImmediateNumber(reg));
}
-#if !USE(JSVALUE64)
+#if USE(JSVALUE32_64)
ALWAYS_INLINE void JIT::emitFastArithDeTagImmediate(RegisterID reg)
{
subPtr(Imm32(JSImmediate::TagTypeNumber), reg);
@@ -812,15 +780,6 @@ ALWAYS_INLINE void JIT::emitFastArithReTagImmediate(RegisterID src, RegisterID d
#endif
}
-ALWAYS_INLINE void JIT::emitFastArithImmToInt(RegisterID reg)
-{
-#if USE(JSVALUE64)
- UNUSED_PARAM(reg);
-#else
- rshift32(Imm32(JSImmediate::IntegerPayloadShift), reg);
-#endif
-}
-
// operand is int32_t, must have been zero-extended if register is 64-bit.
ALWAYS_INLINE void JIT::emitFastArithIntToImmNoCheck(RegisterID src, RegisterID dest)
{
@@ -841,23 +800,6 @@ ALWAYS_INLINE void JIT::emitTagAsBoolImmediate(RegisterID reg)
or32(Imm32(static_cast<int32_t>(JSImmediate::FullTagTypeBool)), reg);
}
-/* Deprecated: Please use JITStubCall instead. */
-
-// get arg puts an arg from the SF register array onto the stack, as an arg to a context threaded function.
-ALWAYS_INLINE void JIT::emitPutJITStubArgFromVirtualRegister(unsigned src, unsigned argumentNumber, RegisterID scratch)
-{
- unsigned argumentStackOffset = (argumentNumber * (sizeof(JSValue) / sizeof(void*))) + JITSTACKFRAME_ARGS_INDEX;
- if (m_codeBlock->isConstantRegisterIndex(src)) {
- JSValue value = m_codeBlock->getConstant(src);
- poke(ImmPtr(JSValue::encode(value)), argumentStackOffset);
- } else {
- loadPtr(Address(callFrameRegister, src * sizeof(Register)), scratch);
- poke(scratch, argumentStackOffset);
- }
-
- killLastResultRegister();
-}
-
#endif // USE(JSVALUE32_64)
} // namespace JSC