summaryrefslogtreecommitdiffstats
path: root/Source/JavaScriptCore/jit
diff options
context:
space:
mode:
authorSteve Block <steveblock@google.com>2011-05-25 19:08:45 +0100
committerSteve Block <steveblock@google.com>2011-06-08 13:51:31 +0100
commit2bde8e466a4451c7319e3a072d118917957d6554 (patch)
tree28f4a1b869a513e565c7760d0e6a06e7cf1fe95a /Source/JavaScriptCore/jit
parent6939c99b71d9372d14a0c74a772108052e8c48c8 (diff)
downloadexternal_webkit-2bde8e466a4451c7319e3a072d118917957d6554.zip
external_webkit-2bde8e466a4451c7319e3a072d118917957d6554.tar.gz
external_webkit-2bde8e466a4451c7319e3a072d118917957d6554.tar.bz2
Merge WebKit at r82507: Initial merge by git
Change-Id: I60ce9d780725b58b45e54165733a8ffee23b683e
Diffstat (limited to 'Source/JavaScriptCore/jit')
-rw-r--r--Source/JavaScriptCore/jit/JIT.cpp31
-rw-r--r--Source/JavaScriptCore/jit/JIT.h5
-rw-r--r--Source/JavaScriptCore/jit/JITArithmetic.cpp18
-rw-r--r--Source/JavaScriptCore/jit/JITArithmetic32_64.cpp160
-rw-r--r--Source/JavaScriptCore/jit/JITCall.cpp29
-rw-r--r--Source/JavaScriptCore/jit/JITCall32_64.cpp49
-rw-r--r--Source/JavaScriptCore/jit/JITInlineMethods.h92
-rw-r--r--Source/JavaScriptCore/jit/JITOpcodes.cpp235
-rw-r--r--Source/JavaScriptCore/jit/JITOpcodes32_64.cpp334
-rw-r--r--Source/JavaScriptCore/jit/JITPropertyAccess.cpp145
-rw-r--r--Source/JavaScriptCore/jit/JITPropertyAccess32_64.cpp159
-rw-r--r--Source/JavaScriptCore/jit/JITStubCall.h4
-rw-r--r--Source/JavaScriptCore/jit/JITStubs.cpp57
-rw-r--r--Source/JavaScriptCore/jit/JITStubs.h13
-rw-r--r--Source/JavaScriptCore/jit/JSInterfaceJIT.h67
-rw-r--r--Source/JavaScriptCore/jit/SpecializedThunkJIT.h16
-rw-r--r--Source/JavaScriptCore/jit/ThunkGenerators.cpp10
17 files changed, 739 insertions, 685 deletions
diff --git a/Source/JavaScriptCore/jit/JIT.cpp b/Source/JavaScriptCore/jit/JIT.cpp
index 29e3778..063ae8c 100644
--- a/Source/JavaScriptCore/jit/JIT.cpp
+++ b/Source/JavaScriptCore/jit/JIT.cpp
@@ -92,7 +92,7 @@ JIT::JIT(JSGlobalData* globalData, CodeBlock* codeBlock, void* linkerOffset)
#if USE(JSVALUE32_64)
void JIT::emitTimeoutCheck()
{
- Jump skipTimeout = branchSub32(NonZero, Imm32(1), timeoutCheckRegister);
+ Jump skipTimeout = branchSub32(NonZero, TrustedImm32(1), timeoutCheckRegister);
JITStubCall stubCall(this, cti_timeout_check);
stubCall.addArgument(regT1, regT0); // save last result registers.
stubCall.call(timeoutCheckRegister);
@@ -102,7 +102,7 @@ void JIT::emitTimeoutCheck()
#else
void JIT::emitTimeoutCheck()
{
- Jump skipTimeout = branchSub32(NonZero, Imm32(1), timeoutCheckRegister);
+ Jump skipTimeout = branchSub32(NonZero, TrustedImm32(1), timeoutCheckRegister);
JITStubCall(this, cti_timeout_check).call(timeoutCheckRegister);
skipTimeout.link(this);
@@ -475,7 +475,7 @@ JITCode JIT::privateCompile(CodePtr* functionEntryArityCheck)
emitPutImmediateToCallFrameHeader(m_codeBlock, RegisterFile::CodeBlock);
addPtr(Imm32(m_codeBlock->m_numCalleeRegisters * sizeof(Register)), callFrameRegister, regT1);
- registerFileCheck = branchPtr(Below, AbsoluteAddress(&m_globalData->interpreter->registerFile().m_end), regT1);
+ registerFileCheck = branchPtr(Below, AbsoluteAddress(m_globalData->interpreter->registerFile().addressOfEnd()), regT1);
}
Label functionBody = label();
@@ -497,7 +497,7 @@ JITCode JIT::privateCompile(CodePtr* functionEntryArityCheck)
arityCheck = label();
preserveReturnAddressAfterCall(regT2);
emitPutToCallFrameHeader(regT2, RegisterFile::ReturnPC);
- branch32(Equal, regT1, Imm32(m_codeBlock->m_numParameters)).linkTo(beginLabel, this);
+ branch32(Equal, regT1, TrustedImm32(m_codeBlock->m_numParameters)).linkTo(beginLabel, this);
restoreArgumentReference();
JITStubCall(this, m_codeBlock->m_isConstructor ? cti_op_construct_arityCheck : cti_op_call_arityCheck).call(callFrameRegister);
@@ -567,7 +567,6 @@ JITCode JIT::privateCompile(CodePtr* functionEntryArityCheck)
#if ENABLE(JIT_OPTIMIZE_CALL)
for (unsigned i = 0; i < m_codeBlock->numberOfCallLinkInfos(); ++i) {
CallLinkInfo& info = m_codeBlock->callLinkInfo(i);
- info.ownerCodeBlock = m_codeBlock;
info.callReturnLocation = patchBuffer.locationOfNearCall(m_callStructureStubCompilationInfo[i].callReturnLocation);
info.hotPathBegin = patchBuffer.locationOf(m_callStructureStubCompilationInfo[i].hotPathBegin);
info.hotPathOther = patchBuffer.locationOfNearCall(m_callStructureStubCompilationInfo[i].hotPathOther);
@@ -588,18 +587,6 @@ JITCode JIT::privateCompile(CodePtr* functionEntryArityCheck)
}
#if ENABLE(JIT_OPTIMIZE_CALL)
-void JIT::unlinkCallOrConstruct(CallLinkInfo* callLinkInfo)
-{
- // When the JSFunction is deleted the pointer embedded in the instruction stream will no longer be valid
- // (and, if a new JSFunction happened to be constructed at the same location, we could get a false positive
- // match). Reset the check so it no longer matches.
- RepatchBuffer repatchBuffer(callLinkInfo->ownerCodeBlock);
-#if USE(JSVALUE32_64)
- repatchBuffer.repatch(callLinkInfo->hotPathBegin, 0);
-#else
- repatchBuffer.repatch(callLinkInfo->hotPathBegin, JSValue::encode(JSValue()));
-#endif
-}
void JIT::linkCall(JSFunction* callee, CodeBlock* callerCodeBlock, CodeBlock* calleeCodeBlock, JIT::CodePtr code, CallLinkInfo* callLinkInfo, int callerArgCount, JSGlobalData* globalData)
{
@@ -609,10 +596,7 @@ void JIT::linkCall(JSFunction* callee, CodeBlock* callerCodeBlock, CodeBlock* ca
// If this is a native call calleeCodeBlock is null so the number of parameters is unimportant
if (!calleeCodeBlock || (callerArgCount == calleeCodeBlock->m_numParameters)) {
ASSERT(!callLinkInfo->isLinked());
-
- if (calleeCodeBlock)
- calleeCodeBlock->addCaller(callLinkInfo);
-
+ callLinkInfo->callee.set(*globalData, callerCodeBlock->ownerExecutable(), callee);
repatchBuffer.repatch(callLinkInfo->hotPathBegin, callee);
repatchBuffer.relink(callLinkInfo->hotPathOther, code);
}
@@ -629,10 +613,7 @@ void JIT::linkConstruct(JSFunction* callee, CodeBlock* callerCodeBlock, CodeBloc
// If this is a native call calleeCodeBlock is null so the number of parameters is unimportant
if (!calleeCodeBlock || (callerArgCount == calleeCodeBlock->m_numParameters)) {
ASSERT(!callLinkInfo->isLinked());
-
- if (calleeCodeBlock)
- calleeCodeBlock->addCaller(callLinkInfo);
-
+ callLinkInfo->callee.set(*globalData, callerCodeBlock->ownerExecutable(), callee);
repatchBuffer.repatch(callLinkInfo->hotPathBegin, callee);
repatchBuffer.relink(callLinkInfo->hotPathOther, code);
}
diff --git a/Source/JavaScriptCore/jit/JIT.h b/Source/JavaScriptCore/jit/JIT.h
index f98158c..61bd2ab 100644
--- a/Source/JavaScriptCore/jit/JIT.h
+++ b/Source/JavaScriptCore/jit/JIT.h
@@ -245,7 +245,6 @@ namespace JSC {
static void linkCall(JSFunction* callee, CodeBlock* callerCodeBlock, CodeBlock* calleeCodeBlock, CodePtr, CallLinkInfo*, int callerArgCount, JSGlobalData*);
static void linkConstruct(JSFunction* callee, CodeBlock* callerCodeBlock, CodeBlock* calleeCodeBlock, CodePtr, CallLinkInfo*, int callerArgCount, JSGlobalData*);
- static void unlinkCallOrConstruct(CallLinkInfo*);
private:
struct JSRInfo {
@@ -310,7 +309,7 @@ namespace JSC {
void emitStore(unsigned index, RegisterID tag, RegisterID payload, RegisterID base = callFrameRegister);
void emitStore(unsigned index, const JSValue constant, RegisterID base = callFrameRegister);
void emitStoreInt32(unsigned index, RegisterID payload, bool indexIsInt32 = false);
- void emitStoreInt32(unsigned index, Imm32 payload, bool indexIsInt32 = false);
+ void emitStoreInt32(unsigned index, TrustedImm32 payload, bool indexIsInt32 = false);
void emitStoreCell(unsigned index, RegisterID payload, bool indexIsCell = false);
void emitStoreBool(unsigned index, RegisterID tag, bool indexIsBool = false);
void emitStoreDouble(unsigned index, FPRegisterID value);
@@ -854,6 +853,8 @@ namespace JSC {
void emitInitRegister(unsigned dst);
void emitPutToCallFrameHeader(RegisterID from, RegisterFile::CallFrameHeaderEntry entry);
+ void emitPutCellToCallFrameHeader(RegisterID from, RegisterFile::CallFrameHeaderEntry);
+ void emitPutIntToCallFrameHeader(RegisterID from, RegisterFile::CallFrameHeaderEntry);
void emitPutImmediateToCallFrameHeader(void* value, RegisterFile::CallFrameHeaderEntry entry);
void emitGetFromCallFrameHeaderPtr(RegisterFile::CallFrameHeaderEntry entry, RegisterID to, RegisterID from = callFrameRegister);
void emitGetFromCallFrameHeader32(RegisterFile::CallFrameHeaderEntry entry, RegisterID to, RegisterID from = callFrameRegister);
diff --git a/Source/JavaScriptCore/jit/JITArithmetic.cpp b/Source/JavaScriptCore/jit/JITArithmetic.cpp
index cd05f51..edf2290 100644
--- a/Source/JavaScriptCore/jit/JITArithmetic.cpp
+++ b/Source/JavaScriptCore/jit/JITArithmetic.cpp
@@ -167,7 +167,7 @@ void JIT::emit_op_urshift(Instruction* currentInstruction)
// a toUint conversion, which can result in a value we can represent
// as an immediate int.
if (shift < 0 || !(shift & 31))
- addSlowCase(branch32(LessThan, regT0, Imm32(0)));
+ addSlowCase(branch32(LessThan, regT0, TrustedImm32(0)));
emitFastArithReTagImmediate(regT0, regT0);
emitPutVirtualRegister(dst, regT0);
return;
@@ -179,7 +179,7 @@ void JIT::emit_op_urshift(Instruction* currentInstruction)
emitFastArithImmToInt(regT0);
emitFastArithImmToInt(regT1);
urshift32(regT1, regT0);
- addSlowCase(branch32(LessThan, regT0, Imm32(0)));
+ addSlowCase(branch32(LessThan, regT0, TrustedImm32(0)));
emitFastArithReTagImmediate(regT0, regT0);
emitPutVirtualRegister(dst, regT0);
}
@@ -202,7 +202,7 @@ void JIT::emitSlow_op_urshift(Instruction* currentInstruction, Vector<SlowCaseEn
if (shift)
urshift32(Imm32(shift & 0x1f), regT0);
if (shift < 0 || !(shift & 31))
- failures.append(branch32(LessThan, regT0, Imm32(0)));
+ failures.append(branch32(LessThan, regT0, TrustedImm32(0)));
emitFastArithReTagImmediate(regT0, regT0);
emitPutVirtualRegister(dst, regT0);
emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_rshift));
@@ -224,7 +224,7 @@ void JIT::emitSlow_op_urshift(Instruction* currentInstruction, Vector<SlowCaseEn
failures.append(emitJumpIfNotImmediateInteger(regT1)); // op2 is not an int
emitFastArithImmToInt(regT1);
urshift32(regT1, regT0);
- failures.append(branch32(LessThan, regT0, Imm32(0)));
+ failures.append(branch32(LessThan, regT0, TrustedImm32(0)));
emitFastArithReTagImmediate(regT0, regT0);
emitPutVirtualRegister(dst, regT0);
emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_rshift));
@@ -773,7 +773,7 @@ void JIT::emit_op_post_inc(Instruction* currentInstruction)
emitGetVirtualRegister(srcDst, regT0);
move(regT0, regT1);
emitJumpSlowCaseIfNotImmediateInteger(regT0);
- addSlowCase(branchAdd32(Overflow, Imm32(1), regT1));
+ addSlowCase(branchAdd32(Overflow, TrustedImm32(1), regT1));
emitFastArithIntToImmNoCheck(regT1, regT1);
emitPutVirtualRegister(srcDst, regT1);
emitPutVirtualRegister(result);
@@ -800,7 +800,7 @@ void JIT::emit_op_post_dec(Instruction* currentInstruction)
emitGetVirtualRegister(srcDst, regT0);
move(regT0, regT1);
emitJumpSlowCaseIfNotImmediateInteger(regT0);
- addSlowCase(branchSub32(Zero, Imm32(1), regT1));
+ addSlowCase(branchSub32(Zero, TrustedImm32(1), regT1));
emitFastArithIntToImmNoCheck(regT1, regT1);
emitPutVirtualRegister(srcDst, regT1);
emitPutVirtualRegister(result);
@@ -825,7 +825,7 @@ void JIT::emit_op_pre_inc(Instruction* currentInstruction)
emitGetVirtualRegister(srcDst, regT0);
emitJumpSlowCaseIfNotImmediateInteger(regT0);
- addSlowCase(branchAdd32(Overflow, Imm32(1), regT0));
+ addSlowCase(branchAdd32(Overflow, TrustedImm32(1), regT0));
emitFastArithIntToImmNoCheck(regT0, regT0);
emitPutVirtualRegister(srcDst);
}
@@ -849,7 +849,7 @@ void JIT::emit_op_pre_dec(Instruction* currentInstruction)
emitGetVirtualRegister(srcDst, regT0);
emitJumpSlowCaseIfNotImmediateInteger(regT0);
- addSlowCase(branchSub32(Zero, Imm32(1), regT0));
+ addSlowCase(branchSub32(Zero, TrustedImm32(1), regT0));
emitFastArithIntToImmNoCheck(regT0, regT0);
emitPutVirtualRegister(srcDst);
}
@@ -888,7 +888,7 @@ void JIT::emit_op_mod(Instruction* currentInstruction)
emitJumpSlowCaseIfNotImmediateInteger(regT0);
emitJumpSlowCaseIfNotImmediateInteger(regT2);
- addSlowCase(branchPtr(Equal, regT2, ImmPtr(JSValue::encode(jsNumber(0)))));
+ addSlowCase(branchPtr(Equal, regT2, TrustedImmPtr(JSValue::encode(jsNumber(0)))));
m_assembler.cdq();
m_assembler.idivl_r(regT2);
emitFastArithReTagImmediate(regT1, regT0);
diff --git a/Source/JavaScriptCore/jit/JITArithmetic32_64.cpp b/Source/JavaScriptCore/jit/JITArithmetic32_64.cpp
index e0b31f0..6865489 100644
--- a/Source/JavaScriptCore/jit/JITArithmetic32_64.cpp
+++ b/Source/JavaScriptCore/jit/JITArithmetic32_64.cpp
@@ -54,17 +54,17 @@ void JIT::emit_op_negate(Instruction* currentInstruction)
emitLoad(src, regT1, regT0);
- Jump srcNotInt = branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag));
- addSlowCase(branchTest32(Zero, regT0, Imm32(0x7fffffff)));
+ Jump srcNotInt = branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag));
+ addSlowCase(branchTest32(Zero, regT0, TrustedImm32(0x7fffffff)));
neg32(regT0);
emitStoreInt32(dst, regT0, (dst == src));
Jump end = jump();
srcNotInt.link(this);
- addSlowCase(branch32(Above, regT1, Imm32(JSValue::LowestTag)));
+ addSlowCase(branch32(Above, regT1, TrustedImm32(JSValue::LowestTag)));
- xor32(Imm32(1 << 31), regT1);
+ xor32(TrustedImm32(1 << 31), regT1);
store32(regT1, tagFor(dst));
if (dst != src)
store32(regT0, payloadFor(dst));
@@ -96,7 +96,7 @@ void JIT::emit_op_jnless(Instruction* currentInstruction)
// Character less.
if (isOperandConstantImmediateChar(op1)) {
emitLoad(op2, regT1, regT0);
- addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::CellTag)));
+ addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::CellTag)));
JumpList failures;
emitLoadCharacterString(regT0, regT0, failures);
addSlowCase(failures);
@@ -105,7 +105,7 @@ void JIT::emit_op_jnless(Instruction* currentInstruction)
}
if (isOperandConstantImmediateChar(op2)) {
emitLoad(op1, regT1, regT0);
- addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::CellTag)));
+ addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::CellTag)));
JumpList failures;
emitLoadCharacterString(regT0, regT0, failures);
addSlowCase(failures);
@@ -115,16 +115,16 @@ void JIT::emit_op_jnless(Instruction* currentInstruction)
if (isOperandConstantImmediateInt(op1)) {
// Int32 less.
emitLoad(op2, regT3, regT2);
- notInt32Op2.append(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag)));
+ notInt32Op2.append(branch32(NotEqual, regT3, TrustedImm32(JSValue::Int32Tag)));
addJump(branch32(LessThanOrEqual, regT2, Imm32(getConstantOperand(op1).asInt32())), target);
} else if (isOperandConstantImmediateInt(op2)) {
emitLoad(op1, regT1, regT0);
- notInt32Op1.append(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
+ notInt32Op1.append(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
addJump(branch32(GreaterThanOrEqual, regT0, Imm32(getConstantOperand(op2).asInt32())), target);
} else {
emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
- notInt32Op1.append(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
- notInt32Op2.append(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag)));
+ notInt32Op1.append(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
+ notInt32Op2.append(branch32(NotEqual, regT3, TrustedImm32(JSValue::Int32Tag)));
addJump(branch32(GreaterThanOrEqual, regT0, regT2), target);
}
@@ -185,7 +185,7 @@ void JIT::emit_op_jless(Instruction* currentInstruction)
// Character less.
if (isOperandConstantImmediateChar(op1)) {
emitLoad(op2, regT1, regT0);
- addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::CellTag)));
+ addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::CellTag)));
JumpList failures;
emitLoadCharacterString(regT0, regT0, failures);
addSlowCase(failures);
@@ -194,7 +194,7 @@ void JIT::emit_op_jless(Instruction* currentInstruction)
}
if (isOperandConstantImmediateChar(op2)) {
emitLoad(op1, regT1, regT0);
- addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::CellTag)));
+ addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::CellTag)));
JumpList failures;
emitLoadCharacterString(regT0, regT0, failures);
addSlowCase(failures);
@@ -203,16 +203,16 @@ void JIT::emit_op_jless(Instruction* currentInstruction)
}
if (isOperandConstantImmediateInt(op1)) {
emitLoad(op2, regT3, regT2);
- notInt32Op2.append(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag)));
+ notInt32Op2.append(branch32(NotEqual, regT3, TrustedImm32(JSValue::Int32Tag)));
addJump(branch32(GreaterThan, regT2, Imm32(getConstantOperand(op1).asInt32())), target);
} else if (isOperandConstantImmediateInt(op2)) {
emitLoad(op1, regT1, regT0);
- notInt32Op1.append(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
+ notInt32Op1.append(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
addJump(branch32(LessThan, regT0, Imm32(getConstantOperand(op2).asInt32())), target);
} else {
emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
- notInt32Op1.append(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
- notInt32Op2.append(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag)));
+ notInt32Op1.append(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
+ notInt32Op2.append(branch32(NotEqual, regT3, TrustedImm32(JSValue::Int32Tag)));
addJump(branch32(LessThan, regT0, regT2), target);
}
@@ -272,7 +272,7 @@ void JIT::emit_op_jlesseq(Instruction* currentInstruction, bool invert)
// Character less.
if (isOperandConstantImmediateChar(op1)) {
emitLoad(op2, regT1, regT0);
- addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::CellTag)));
+ addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::CellTag)));
JumpList failures;
emitLoadCharacterString(regT0, regT0, failures);
addSlowCase(failures);
@@ -281,7 +281,7 @@ void JIT::emit_op_jlesseq(Instruction* currentInstruction, bool invert)
}
if (isOperandConstantImmediateChar(op2)) {
emitLoad(op1, regT1, regT0);
- addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::CellTag)));
+ addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::CellTag)));
JumpList failures;
emitLoadCharacterString(regT0, regT0, failures);
addSlowCase(failures);
@@ -290,16 +290,16 @@ void JIT::emit_op_jlesseq(Instruction* currentInstruction, bool invert)
}
if (isOperandConstantImmediateInt(op1)) {
emitLoad(op2, regT3, regT2);
- notInt32Op2.append(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag)));
+ notInt32Op2.append(branch32(NotEqual, regT3, TrustedImm32(JSValue::Int32Tag)));
addJump(branch32(invert ? LessThan : GreaterThanOrEqual, regT2, Imm32(getConstantOperand(op1).asInt32())), target);
} else if (isOperandConstantImmediateInt(op2)) {
emitLoad(op1, regT1, regT0);
- notInt32Op1.append(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
+ notInt32Op1.append(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
addJump(branch32(invert ? GreaterThan : LessThanOrEqual, regT0, Imm32(getConstantOperand(op2).asInt32())), target);
} else {
emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
- notInt32Op1.append(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
- notInt32Op2.append(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag)));
+ notInt32Op1.append(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
+ notInt32Op2.append(branch32(NotEqual, regT3, TrustedImm32(JSValue::Int32Tag)));
addJump(branch32(invert ? GreaterThan : LessThanOrEqual, regT0, regT2), target);
}
@@ -368,7 +368,7 @@ void JIT::emit_op_lshift(Instruction* currentInstruction)
if (isOperandConstantImmediateInt(op2)) {
emitLoad(op1, regT1, regT0);
- addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
+ addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
lshift32(Imm32(getConstantOperand(op2).asInt32()), regT0);
emitStoreInt32(dst, regT0, dst == op1);
return;
@@ -376,8 +376,8 @@ void JIT::emit_op_lshift(Instruction* currentInstruction)
emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
if (!isOperandConstantImmediateInt(op1))
- addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
- addSlowCase(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag)));
+ addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
+ addSlowCase(branch32(NotEqual, regT3, TrustedImm32(JSValue::Int32Tag)));
lshift32(regT2, regT0);
emitStoreInt32(dst, regT0, dst == op1 || dst == op2);
}
@@ -410,7 +410,7 @@ void JIT::emitRightShift(Instruction* currentInstruction, bool isUnsigned)
// shift arguments, so any changes must be updated there as well.
if (isOperandConstantImmediateInt(op2)) {
emitLoad(op1, regT1, regT0);
- addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
+ addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
int shift = getConstantOperand(op2).asInt32();
if (isUnsigned) {
if (shift)
@@ -419,7 +419,7 @@ void JIT::emitRightShift(Instruction* currentInstruction, bool isUnsigned)
// a toUint conversion, which can result in a value we can represent
// as an immediate int.
if (shift < 0 || !(shift & 31))
- addSlowCase(branch32(LessThan, regT0, Imm32(0)));
+ addSlowCase(branch32(LessThan, regT0, TrustedImm32(0)));
} else if (shift) { // signed right shift by zero is simply toInt conversion
rshift32(Imm32(shift & 0x1f), regT0);
}
@@ -429,11 +429,11 @@ void JIT::emitRightShift(Instruction* currentInstruction, bool isUnsigned)
emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
if (!isOperandConstantImmediateInt(op1))
- addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
- addSlowCase(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag)));
+ addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
+ addSlowCase(branch32(NotEqual, regT3, TrustedImm32(JSValue::Int32Tag)));
if (isUnsigned) {
urshift32(regT2, regT0);
- addSlowCase(branch32(LessThan, regT0, Imm32(0)));
+ addSlowCase(branch32(LessThan, regT0, TrustedImm32(0)));
} else
rshift32(regT2, regT0);
emitStoreInt32(dst, regT0, dst == op1 || dst == op2);
@@ -450,14 +450,14 @@ void JIT::emitRightShiftSlowCase(Instruction* currentInstruction, Vector<SlowCas
linkSlowCase(iter); // int32 check
if (supportsFloatingPointTruncate()) {
JumpList failures;
- failures.append(branch32(AboveOrEqual, regT1, Imm32(JSValue::LowestTag)));
+ failures.append(branch32(AboveOrEqual, regT1, TrustedImm32(JSValue::LowestTag)));
emitLoadDouble(op1, fpRegT0);
failures.append(branchTruncateDoubleToInt32(fpRegT0, regT0));
if (isUnsigned) {
if (shift)
urshift32(Imm32(shift & 0x1f), regT0);
if (shift < 0 || !(shift & 31))
- failures.append(branch32(LessThan, regT0, Imm32(0)));
+ failures.append(branch32(LessThan, regT0, TrustedImm32(0)));
} else if (shift)
rshift32(Imm32(shift & 0x1f), regT0);
emitStoreInt32(dst, regT0, false);
@@ -472,9 +472,9 @@ void JIT::emitRightShiftSlowCase(Instruction* currentInstruction, Vector<SlowCas
if (!isOperandConstantImmediateInt(op1)) {
linkSlowCase(iter); // int32 check -- op1 is not an int
if (supportsFloatingPointTruncate()) {
- Jump notDouble = branch32(Above, regT1, Imm32(JSValue::LowestTag)); // op1 is not a double
+ Jump notDouble = branch32(Above, regT1, TrustedImm32(JSValue::LowestTag)); // op1 is not a double
emitLoadDouble(op1, fpRegT0);
- Jump notInt = branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag)); // op2 is not an int
+ Jump notInt = branch32(NotEqual, regT3, TrustedImm32(JSValue::Int32Tag)); // op2 is not an int
Jump cantTruncate = branchTruncateDoubleToInt32(fpRegT0, regT0);
if (isUnsigned)
urshift32(regT2, regT0);
@@ -535,15 +535,15 @@ void JIT::emit_op_bitand(Instruction* currentInstruction)
int32_t constant;
if (getOperandConstantImmediateInt(op1, op2, op, constant)) {
emitLoad(op, regT1, regT0);
- addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
+ addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
and32(Imm32(constant), regT0);
emitStoreInt32(dst, regT0, (op == dst));
return;
}
emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
- addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
- addSlowCase(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag)));
+ addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
+ addSlowCase(branch32(NotEqual, regT3, TrustedImm32(JSValue::Int32Tag)));
and32(regT2, regT0);
emitStoreInt32(dst, regT0, (op1 == dst || op2 == dst));
}
@@ -576,15 +576,15 @@ void JIT::emit_op_bitor(Instruction* currentInstruction)
int32_t constant;
if (getOperandConstantImmediateInt(op1, op2, op, constant)) {
emitLoad(op, regT1, regT0);
- addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
+ addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
or32(Imm32(constant), regT0);
emitStoreInt32(dst, regT0, (op == dst));
return;
}
emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
- addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
- addSlowCase(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag)));
+ addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
+ addSlowCase(branch32(NotEqual, regT3, TrustedImm32(JSValue::Int32Tag)));
or32(regT2, regT0);
emitStoreInt32(dst, regT0, (op1 == dst || op2 == dst));
}
@@ -617,15 +617,15 @@ void JIT::emit_op_bitxor(Instruction* currentInstruction)
int32_t constant;
if (getOperandConstantImmediateInt(op1, op2, op, constant)) {
emitLoad(op, regT1, regT0);
- addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
+ addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
xor32(Imm32(constant), regT0);
emitStoreInt32(dst, regT0, (op == dst));
return;
}
emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
- addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
- addSlowCase(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag)));
+ addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
+ addSlowCase(branch32(NotEqual, regT3, TrustedImm32(JSValue::Int32Tag)));
xor32(regT2, regT0);
emitStoreInt32(dst, regT0, (op1 == dst || op2 == dst));
}
@@ -654,7 +654,7 @@ void JIT::emit_op_bitnot(Instruction* currentInstruction)
unsigned src = currentInstruction[2].u.operand;
emitLoad(src, regT1, regT0);
- addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
+ addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
not32(regT0);
emitStoreInt32(dst, regT0, (dst == src));
@@ -679,14 +679,14 @@ void JIT::emit_op_post_inc(Instruction* currentInstruction)
unsigned srcDst = currentInstruction[2].u.operand;
emitLoad(srcDst, regT1, regT0);
- addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
+ addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
if (dst == srcDst) // x = x++ is a noop for ints.
return;
emitStoreInt32(dst, regT0);
- addSlowCase(branchAdd32(Overflow, Imm32(1), regT0));
+ addSlowCase(branchAdd32(Overflow, TrustedImm32(1), regT0));
emitStoreInt32(srcDst, regT0, true);
}
@@ -713,14 +713,14 @@ void JIT::emit_op_post_dec(Instruction* currentInstruction)
unsigned srcDst = currentInstruction[2].u.operand;
emitLoad(srcDst, regT1, regT0);
- addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
+ addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
if (dst == srcDst) // x = x-- is a noop for ints.
return;
emitStoreInt32(dst, regT0);
- addSlowCase(branchSub32(Overflow, Imm32(1), regT0));
+ addSlowCase(branchSub32(Overflow, TrustedImm32(1), regT0));
emitStoreInt32(srcDst, regT0, true);
}
@@ -735,7 +735,7 @@ void JIT::emitSlow_op_post_dec(Instruction* currentInstruction, Vector<SlowCaseE
JITStubCall stubCall(this, cti_op_post_dec);
stubCall.addArgument(srcDst);
- stubCall.addArgument(Imm32(srcDst));
+ stubCall.addArgument(TrustedImm32(srcDst));
stubCall.call(dst);
}
@@ -747,8 +747,8 @@ void JIT::emit_op_pre_inc(Instruction* currentInstruction)
emitLoad(srcDst, regT1, regT0);
- addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
- addSlowCase(branchAdd32(Overflow, Imm32(1), regT0));
+ addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
+ addSlowCase(branchAdd32(Overflow, TrustedImm32(1), regT0));
emitStoreInt32(srcDst, regT0, true);
}
@@ -772,8 +772,8 @@ void JIT::emit_op_pre_dec(Instruction* currentInstruction)
emitLoad(srcDst, regT1, regT0);
- addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
- addSlowCase(branchSub32(Overflow, Imm32(1), regT0));
+ addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
+ addSlowCase(branchSub32(Overflow, TrustedImm32(1), regT0));
emitStoreInt32(srcDst, regT0, true);
}
@@ -817,8 +817,8 @@ void JIT::emit_op_add(Instruction* currentInstruction)
}
emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
- notInt32Op1.append(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
- notInt32Op2.append(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag)));
+ notInt32Op1.append(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
+ notInt32Op2.append(branch32(NotEqual, regT3, TrustedImm32(JSValue::Int32Tag)));
// Int32 case.
addSlowCase(branchAdd32(Overflow, regT2, regT0));
@@ -840,7 +840,7 @@ void JIT::emitAdd32Constant(unsigned dst, unsigned op, int32_t constant, ResultT
{
// Int32 case.
emitLoad(op, regT1, regT0);
- Jump notInt32 = branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag));
+ Jump notInt32 = branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag));
addSlowCase(branchAdd32(Overflow, Imm32(constant), regT0));
emitStoreInt32(dst, regT0, (op == dst));
@@ -853,7 +853,7 @@ void JIT::emitAdd32Constant(unsigned dst, unsigned op, int32_t constant, ResultT
notInt32.link(this);
if (!opType.definitelyIsNumber())
- addSlowCase(branch32(Above, regT1, Imm32(JSValue::LowestTag)));
+ addSlowCase(branch32(Above, regT1, TrustedImm32(JSValue::LowestTag)));
move(Imm32(constant), regT2);
convertInt32ToDouble(regT2, fpRegT0);
emitLoadDouble(op, fpRegT1);
@@ -926,8 +926,8 @@ void JIT::emit_op_sub(Instruction* currentInstruction)
}
emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
- notInt32Op1.append(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
- notInt32Op2.append(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag)));
+ notInt32Op1.append(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
+ notInt32Op2.append(branch32(NotEqual, regT3, TrustedImm32(JSValue::Int32Tag)));
// Int32 case.
addSlowCase(branchSub32(Overflow, regT2, regT0));
@@ -949,7 +949,7 @@ void JIT::emitSub32Constant(unsigned dst, unsigned op, int32_t constant, ResultT
{
// Int32 case.
emitLoad(op, regT1, regT0);
- Jump notInt32 = branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag));
+ Jump notInt32 = branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag));
addSlowCase(branchSub32(Overflow, Imm32(constant), regT0));
emitStoreInt32(dst, regT0, (op == dst));
@@ -962,7 +962,7 @@ void JIT::emitSub32Constant(unsigned dst, unsigned op, int32_t constant, ResultT
notInt32.link(this);
if (!opType.definitelyIsNumber())
- addSlowCase(branch32(Above, regT1, Imm32(JSValue::LowestTag)));
+ addSlowCase(branch32(Above, regT1, TrustedImm32(JSValue::LowestTag)));
move(Imm32(constant), regT2);
convertInt32ToDouble(regT2, fpRegT0);
emitLoadDouble(op, fpRegT1);
@@ -1019,15 +1019,15 @@ void JIT::emitBinaryDoubleOp(OpcodeID opcodeID, unsigned dst, unsigned op1, unsi
// Verify Op1 is double.
if (!types.first().definitelyIsNumber())
- addSlowCase(branch32(Above, regT1, Imm32(JSValue::LowestTag)));
+ addSlowCase(branch32(Above, regT1, TrustedImm32(JSValue::LowestTag)));
if (!op2IsInRegisters)
emitLoad(op2, regT3, regT2);
- Jump doubleOp2 = branch32(Below, regT3, Imm32(JSValue::LowestTag));
+ Jump doubleOp2 = branch32(Below, regT3, TrustedImm32(JSValue::LowestTag));
if (!types.second().definitelyIsNumber())
- addSlowCase(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag)));
+ addSlowCase(branch32(NotEqual, regT3, TrustedImm32(JSValue::Int32Tag)));
convertInt32ToDouble(regT2, fpRegT0);
Jump doTheMath = jump();
@@ -1096,7 +1096,7 @@ void JIT::emitBinaryDoubleOp(OpcodeID opcodeID, unsigned dst, unsigned op1, unsi
// Verify op2 is double.
if (!types.second().definitelyIsNumber())
- addSlowCase(branch32(Above, regT3, Imm32(JSValue::LowestTag)));
+ addSlowCase(branch32(Above, regT3, TrustedImm32(JSValue::LowestTag)));
// Do the math.
switch (opcodeID) {
@@ -1157,8 +1157,8 @@ void JIT::emit_op_mul(Instruction* currentInstruction)
JumpList notInt32Op2;
emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
- notInt32Op1.append(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
- notInt32Op2.append(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag)));
+ notInt32Op1.append(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
+ notInt32Op2.append(branch32(NotEqual, regT3, TrustedImm32(JSValue::Int32Tag)));
// Int32 case.
move(regT0, regT3);
@@ -1189,7 +1189,7 @@ void JIT::emitSlow_op_mul(Instruction* currentInstruction, Vector<SlowCaseEntry>
linkSlowCase(iter); // zero result check
Jump negZero = branchOr32(Signed, regT2, regT3);
- emitStoreInt32(dst, Imm32(0), (op1 == dst || op2 == dst));
+ emitStoreInt32(dst, TrustedImm32(0), (op1 == dst || op2 == dst));
emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_mul));
@@ -1240,8 +1240,8 @@ void JIT::emit_op_div(Instruction* currentInstruction)
emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
- notInt32Op1.append(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
- notInt32Op2.append(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag)));
+ notInt32Op1.append(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
+ notInt32Op2.append(branch32(NotEqual, regT3, TrustedImm32(JSValue::Int32Tag)));
convertInt32ToDouble(regT0, fpRegT0);
convertInt32ToDouble(regT2, fpRegT1);
@@ -1312,16 +1312,16 @@ void JIT::emit_op_mod(Instruction* currentInstruction)
if (isOperandConstantImmediateInt(op2) && getConstantOperand(op2).asInt32() != 0) {
emitLoad(op1, regT1, regT0);
move(Imm32(getConstantOperand(op2).asInt32()), regT2);
- addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
+ addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
if (getConstantOperand(op2).asInt32() == -1)
- addSlowCase(branch32(Equal, regT0, Imm32(0x80000000))); // -2147483648 / -1 => EXC_ARITHMETIC
+ addSlowCase(branch32(Equal, regT0, TrustedImm32(0x80000000))); // -2147483648 / -1 => EXC_ARITHMETIC
} else {
emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
- addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
- addSlowCase(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag)));
+ addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
+ addSlowCase(branch32(NotEqual, regT3, TrustedImm32(JSValue::Int32Tag)));
- addSlowCase(branch32(Equal, regT0, Imm32(0x80000000))); // -2147483648 / -1 => EXC_ARITHMETIC
- addSlowCase(branch32(Equal, regT2, Imm32(0))); // divide by 0
+ addSlowCase(branch32(Equal, regT0, TrustedImm32(0x80000000))); // -2147483648 / -1 => EXC_ARITHMETIC
+ addSlowCase(branch32(Equal, regT2, TrustedImm32(0))); // divide by 0
}
move(regT0, regT3); // Save dividend payload, in case of 0.
@@ -1335,7 +1335,7 @@ void JIT::emit_op_mod(Instruction* currentInstruction)
// If the remainder is zero and the dividend is negative, the result is -0.
Jump storeResult1 = branchTest32(NonZero, regT1);
- Jump storeResult2 = branchTest32(Zero, regT3, Imm32(0x80000000)); // not negative
+ Jump storeResult2 = branchTest32(Zero, regT3, TrustedImm32(0x80000000)); // not negative
emitStore(dst, jsNumber(-0.0));
Jump end = jump();
@@ -1378,10 +1378,10 @@ void JIT::emit_op_mod(Instruction* currentInstruction)
#if ENABLE(JIT_USE_SOFT_MODULO)
emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
- addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
- addSlowCase(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag)));
+ addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
+ addSlowCase(branch32(NotEqual, regT3, TrustedImm32(JSValue::Int32Tag)));
- addSlowCase(branch32(Equal, regT2, Imm32(0)));
+ addSlowCase(branch32(Equal, regT2, TrustedImm32(0)));
emitNakedCall(m_globalData->jitStubs->ctiSoftModulo());
diff --git a/Source/JavaScriptCore/jit/JITCall.cpp b/Source/JavaScriptCore/jit/JITCall.cpp
index 524e576..77c2a69 100644
--- a/Source/JavaScriptCore/jit/JITCall.cpp
+++ b/Source/JavaScriptCore/jit/JITCall.cpp
@@ -48,10 +48,11 @@ namespace JSC {
void JIT::compileOpCallInitializeCallFrame()
{
- store32(regT1, Address(callFrameRegister, RegisterFile::ArgumentCount * static_cast<int>(sizeof(Register))));
- loadPtr(Address(regT0, OBJECT_OFFSETOF(JSFunction, m_scopeChain)), regT3); // newScopeChain
- storePtr(regT0, Address(callFrameRegister, RegisterFile::Callee * static_cast<int>(sizeof(Register))));
- storePtr(regT3, Address(callFrameRegister, RegisterFile::ScopeChain * static_cast<int>(sizeof(Register))));
+ // regT0 holds callee, regT1 holds argCount
+ loadPtr(Address(regT0, OBJECT_OFFSETOF(JSFunction, m_scopeChain)), regT3); // scopeChain
+ emitPutIntToCallFrameHeader(regT1, RegisterFile::ArgumentCount);
+ emitPutCellToCallFrameHeader(regT0, RegisterFile::Callee);
+ emitPutCellToCallFrameHeader(regT3, RegisterFile::ScopeChain);
}
void JIT::emit_op_call_put_result(Instruction* instruction)
@@ -67,15 +68,16 @@ void JIT::compileOpCallVarargs(Instruction* instruction)
int registerOffset = instruction[3].u.operand;
emitGetVirtualRegister(argCountRegister, regT1);
+ emitFastArithImmToInt(regT1);
emitGetVirtualRegister(callee, regT0);
addPtr(Imm32(registerOffset), regT1, regT2);
// Check for JSFunctions.
emitJumpSlowCaseIfNotJSCell(regT0);
- addSlowCase(branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsFunctionVPtr)));
+ addSlowCase(branchPtr(NotEqual, Address(regT0), TrustedImmPtr(m_globalData->jsFunctionVPtr)));
// Speculatively roll the callframe, assuming argCount will match the arity.
- mul32(Imm32(sizeof(Register)), regT2, regT2);
+ mul32(TrustedImm32(sizeof(Register)), regT2, regT2);
intptr_t offset = (intptr_t)sizeof(Register) * (intptr_t)RegisterFile::CallerFrame;
addPtr(Imm32((int32_t)offset), regT2, regT3);
addPtr(callFrameRegister, regT3);
@@ -118,14 +120,14 @@ void JIT::compileOpCall(OpcodeID opcodeID, Instruction* instruction, unsigned)
stubCall.addArgument(JIT::Imm32(registerOffset));
stubCall.addArgument(JIT::Imm32(argCount));
stubCall.call();
- wasEval = branchPtr(NotEqual, regT0, ImmPtr(JSValue::encode(JSValue())));
+ wasEval = branchPtr(NotEqual, regT0, TrustedImmPtr(JSValue::encode(JSValue())));
}
emitGetVirtualRegister(callee, regT0);
// Check for JSFunctions.
emitJumpSlowCaseIfNotJSCell(regT0);
- addSlowCase(branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsFunctionVPtr)));
+ addSlowCase(branchPtr(NotEqual, Address(regT0), TrustedImmPtr(m_globalData->jsFunctionVPtr)));
// Speculatively roll the callframe, assuming argCount will match the arity.
storePtr(callFrameRegister, Address(callFrameRegister, (RegisterFile::CallerFrame + registerOffset) * static_cast<int>(sizeof(Register))));
@@ -175,7 +177,7 @@ void JIT::compileOpCall(OpcodeID opcodeID, Instruction* instruction, unsigned ca
stubCall.addArgument(JIT::Imm32(registerOffset));
stubCall.addArgument(JIT::Imm32(argCount));
stubCall.call();
- wasEval = branchPtr(NotEqual, regT0, ImmPtr(JSValue::encode(JSValue())));
+ wasEval = branchPtr(NotEqual, regT0, TrustedImmPtr(JSValue::encode(JSValue())));
}
// This plants a check for a cached JSFunction value, so we can plant a fast link to the callee.
@@ -185,7 +187,7 @@ void JIT::compileOpCall(OpcodeID opcodeID, Instruction* instruction, unsigned ca
BEGIN_UNINTERRUPTED_SEQUENCE(sequenceOpCall);
- Jump jumpToSlow = branchPtrWithPatch(NotEqual, regT0, addressOfLinkedFunctionCheck, ImmPtr(JSValue::encode(JSValue())));
+ Jump jumpToSlow = branchPtrWithPatch(NotEqual, regT0, addressOfLinkedFunctionCheck, TrustedImmPtr(JSValue::encode(JSValue())));
END_UNINTERRUPTED_SEQUENCE(sequenceOpCall);
@@ -199,8 +201,9 @@ void JIT::compileOpCall(OpcodeID opcodeID, Instruction* instruction, unsigned ca
// Note that this omits to set up RegisterFile::CodeBlock, which is set in the callee
loadPtr(Address(regT0, OBJECT_OFFSETOF(JSFunction, m_scopeChain)), regT1); // newScopeChain
-
- store32(Imm32(argCount), Address(callFrameRegister, (registerOffset + RegisterFile::ArgumentCount) * static_cast<int>(sizeof(Register))));
+
+ store32(TrustedImm32(Int32Tag), intTagFor(registerOffset + RegisterFile::ArgumentCount));
+ store32(Imm32(argCount), intPayloadFor(registerOffset + RegisterFile::ArgumentCount));
storePtr(callFrameRegister, Address(callFrameRegister, (registerOffset + RegisterFile::CallerFrame) * static_cast<int>(sizeof(Register))));
storePtr(regT0, Address(callFrameRegister, (registerOffset + RegisterFile::Callee) * static_cast<int>(sizeof(Register))));
storePtr(regT1, Address(callFrameRegister, (registerOffset + RegisterFile::ScopeChain) * static_cast<int>(sizeof(Register))));
@@ -224,7 +227,7 @@ void JIT::compileOpCallSlowCase(Instruction* instruction, Vector<SlowCaseEntry>:
// Fast check for JS function.
Jump callLinkFailNotObject = emitJumpIfNotJSCell(regT0);
- Jump callLinkFailNotJSFunction = branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsFunctionVPtr));
+ Jump callLinkFailNotJSFunction = branchPtr(NotEqual, Address(regT0), TrustedImmPtr(m_globalData->jsFunctionVPtr));
// Speculatively roll the callframe, assuming argCount will match the arity.
storePtr(callFrameRegister, Address(callFrameRegister, (RegisterFile::CallerFrame + registerOffset) * static_cast<int>(sizeof(Register))));
diff --git a/Source/JavaScriptCore/jit/JITCall32_64.cpp b/Source/JavaScriptCore/jit/JITCall32_64.cpp
index ac231a9..9ffa495 100644
--- a/Source/JavaScriptCore/jit/JITCall32_64.cpp
+++ b/Source/JavaScriptCore/jit/JITCall32_64.cpp
@@ -49,10 +49,10 @@ namespace JSC {
void JIT::compileOpCallInitializeCallFrame()
{
// regT0 holds callee, regT1 holds argCount
- store32(regT1, Address(callFrameRegister, RegisterFile::ArgumentCount * static_cast<int>(sizeof(Register))));
loadPtr(Address(regT0, OBJECT_OFFSETOF(JSFunction, m_scopeChain)), regT3); // scopeChain
- storePtr(regT0, Address(callFrameRegister, RegisterFile::Callee * static_cast<int>(sizeof(Register)))); // callee
- storePtr(regT3, Address(callFrameRegister, RegisterFile::ScopeChain * static_cast<int>(sizeof(Register)))); // scopeChain
+ emitPutIntToCallFrameHeader(regT1, RegisterFile::ArgumentCount);
+ emitPutCellToCallFrameHeader(regT0, RegisterFile::Callee);
+ emitPutCellToCallFrameHeader(regT3, RegisterFile::ScopeChain);
}
void JIT::emit_op_call_put_result(Instruction* instruction)
@@ -72,12 +72,13 @@ void JIT::compileOpCallVarargs(Instruction* instruction)
addPtr(Imm32(registerOffset), regT2, regT3); // registerOffset
emitJumpSlowCaseIfNotJSCell(callee, regT1);
- addSlowCase(branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsFunctionVPtr)));
+ addSlowCase(branchPtr(NotEqual, Address(regT0), TrustedImmPtr(m_globalData->jsFunctionVPtr)));
// Speculatively roll the callframe, assuming argCount will match the arity.
- mul32(Imm32(sizeof(Register)), regT3, regT3);
+ mul32(TrustedImm32(sizeof(Register)), regT3, regT3);
addPtr(callFrameRegister, regT3);
- storePtr(callFrameRegister, Address(regT3, RegisterFile::CallerFrame * static_cast<int>(sizeof(Register))));
+ store32(TrustedImm32(JSValue::CellTag), tagFor(RegisterFile::CallerFrame, regT3));
+ storePtr(callFrameRegister, payloadFor(RegisterFile::CallerFrame, regT3));
move(regT3, callFrameRegister);
move(regT2, regT1); // argCount
@@ -121,9 +122,9 @@ void JIT::emit_op_ret_object_or_this(Instruction* currentInstruction)
unsigned thisReg = currentInstruction[2].u.operand;
emitLoad(result, regT1, regT0);
- Jump notJSCell = branch32(NotEqual, regT1, Imm32(JSValue::CellTag));
- loadPtr(Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), regT2);
- Jump notObject = branch8(NotEqual, Address(regT2, OBJECT_OFFSETOF(Structure, m_typeInfo) + OBJECT_OFFSETOF(TypeInfo, m_type)), Imm32(ObjectType));
+ Jump notJSCell = branch32(NotEqual, regT1, TrustedImm32(JSValue::CellTag));
+ loadPtr(Address(regT0, JSCell::structureOffset()), regT2);
+ Jump notObject = branch8(NotEqual, Address(regT2, Structure::typeInfoTypeOffset()), TrustedImm32(ObjectType));
emitGetFromCallFrameHeaderPtr(RegisterFile::ReturnPC, regT2);
emitGetFromCallFrameHeaderPtr(RegisterFile::CallerFrame, callFrameRegister);
@@ -199,18 +200,19 @@ void JIT::compileOpCall(OpcodeID opcodeID, Instruction* instruction, unsigned)
stubCall.addArgument(JIT::Imm32(registerOffset));
stubCall.addArgument(JIT::Imm32(argCount));
stubCall.call();
- wasEval = branch32(NotEqual, regT1, Imm32(JSValue::EmptyValueTag));
+ wasEval = branch32(NotEqual, regT1, TrustedImm32(JSValue::EmptyValueTag));
}
emitLoad(callee, regT1, regT0);
emitJumpSlowCaseIfNotJSCell(callee, regT1);
- addSlowCase(branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsFunctionVPtr)));
+ addSlowCase(branchPtr(NotEqual, Address(regT0), TrustedImmPtr(m_globalData->jsFunctionVPtr)));
// Speculatively roll the callframe, assuming argCount will match the arity.
- storePtr(callFrameRegister, Address(callFrameRegister, (RegisterFile::CallerFrame + registerOffset) * static_cast<int>(sizeof(Register))));
+ store32(TrustedImm32(JSValue::CellTag), tagFor(RegisterFile::CallerFrame + registerOffset, callFrameRegister));
+ storePtr(callFrameRegister, payloadFor(RegisterFile::CallerFrame + registerOffset, callFrameRegister));
addPtr(Imm32(registerOffset * static_cast<int>(sizeof(Register))), callFrameRegister);
- move(Imm32(argCount), regT1);
+ move(TrustedImm32(argCount), regT1);
emitNakedCall(opcodeID == op_construct ? m_globalData->jitStubs->ctiVirtualConstruct() : m_globalData->jitStubs->ctiVirtualCall());
@@ -255,7 +257,7 @@ void JIT::compileOpCall(OpcodeID opcodeID, Instruction* instruction, unsigned ca
stubCall.addArgument(JIT::Imm32(registerOffset));
stubCall.addArgument(JIT::Imm32(argCount));
stubCall.call();
- wasEval = branch32(NotEqual, regT1, Imm32(JSValue::EmptyValueTag));
+ wasEval = branch32(NotEqual, regT1, TrustedImm32(JSValue::EmptyValueTag));
}
emitLoad(callee, regT1, regT0);
@@ -264,7 +266,7 @@ void JIT::compileOpCall(OpcodeID opcodeID, Instruction* instruction, unsigned ca
BEGIN_UNINTERRUPTED_SEQUENCE(sequenceOpCall);
- Jump jumpToSlow = branchPtrWithPatch(NotEqual, regT0, addressOfLinkedFunctionCheck, ImmPtr(0));
+ Jump jumpToSlow = branchPtrWithPatch(NotEqual, regT0, addressOfLinkedFunctionCheck, TrustedImmPtr(0));
END_UNINTERRUPTED_SEQUENCE(sequenceOpCall);
@@ -272,7 +274,7 @@ void JIT::compileOpCall(OpcodeID opcodeID, Instruction* instruction, unsigned ca
ASSERT_JIT_OFFSET(differenceBetween(addressOfLinkedFunctionCheck, jumpToSlow), patchOffsetOpCallCompareToJump);
m_callStructureStubCompilationInfo[callLinkInfoIndex].hotPathBegin = addressOfLinkedFunctionCheck;
- addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::CellTag)));
+ addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::CellTag)));
// The following is the fast case, only used whan a callee can be linked.
@@ -280,10 +282,12 @@ void JIT::compileOpCall(OpcodeID opcodeID, Instruction* instruction, unsigned ca
// Note that this omits to set up RegisterFile::CodeBlock, which is set in the callee
loadPtr(Address(regT0, OBJECT_OFFSETOF(JSFunction, m_scopeChain)), regT2);
- store32(Imm32(argCount), Address(callFrameRegister, (registerOffset + RegisterFile::ArgumentCount) * static_cast<int>(sizeof(Register))));
- storePtr(callFrameRegister, Address(callFrameRegister, (registerOffset + RegisterFile::CallerFrame) * static_cast<int>(sizeof(Register))));
+ store32(TrustedImm32(JSValue::Int32Tag), tagFor(registerOffset + RegisterFile::ArgumentCount));
+ store32(Imm32(argCount), payloadFor(registerOffset + RegisterFile::ArgumentCount));
+ storePtr(callFrameRegister, payloadFor(RegisterFile::CallerFrame + registerOffset, callFrameRegister));
emitStore(registerOffset + RegisterFile::Callee, regT1, regT0);
- storePtr(regT2, Address(callFrameRegister, (registerOffset + RegisterFile::ScopeChain) * static_cast<int>(sizeof(Register))));
+ store32(TrustedImm32(JSValue::CellTag), tagFor(registerOffset + RegisterFile::ScopeChain));
+ store32(regT2, payloadFor(registerOffset + RegisterFile::ScopeChain));
addPtr(Imm32(registerOffset * sizeof(Register)), callFrameRegister);
// Call to the callee
@@ -305,11 +309,12 @@ void JIT::compileOpCallSlowCase(Instruction* instruction, Vector<SlowCaseEntry>:
linkSlowCase(iter);
// Fast check for JS function.
- Jump callLinkFailNotObject = branch32(NotEqual, regT1, Imm32(JSValue::CellTag));
- Jump callLinkFailNotJSFunction = branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsFunctionVPtr));
+ Jump callLinkFailNotObject = branch32(NotEqual, regT1, TrustedImm32(JSValue::CellTag));
+ Jump callLinkFailNotJSFunction = branchPtr(NotEqual, Address(regT0), TrustedImmPtr(m_globalData->jsFunctionVPtr));
// Speculatively roll the callframe, assuming argCount will match the arity.
- storePtr(callFrameRegister, Address(callFrameRegister, (RegisterFile::CallerFrame + registerOffset) * static_cast<int>(sizeof(Register))));
+ store32(TrustedImm32(JSValue::CellTag), tagFor(RegisterFile::CallerFrame + registerOffset, callFrameRegister));
+ storePtr(callFrameRegister, payloadFor(RegisterFile::CallerFrame + registerOffset, callFrameRegister));
addPtr(Imm32(registerOffset * static_cast<int>(sizeof(Register))), callFrameRegister);
move(Imm32(argCount), regT1);
diff --git a/Source/JavaScriptCore/jit/JITInlineMethods.h b/Source/JavaScriptCore/jit/JITInlineMethods.h
index 39ca4a5..16c2335 100644
--- a/Source/JavaScriptCore/jit/JITInlineMethods.h
+++ b/Source/JavaScriptCore/jit/JITInlineMethods.h
@@ -52,12 +52,26 @@ ALWAYS_INLINE JSValue JIT::getConstantOperand(unsigned src)
ALWAYS_INLINE void JIT::emitPutToCallFrameHeader(RegisterID from, RegisterFile::CallFrameHeaderEntry entry)
{
- storePtr(from, Address(callFrameRegister, entry * sizeof(Register)));
+ storePtr(from, payloadFor(entry, callFrameRegister));
+}
+
+ALWAYS_INLINE void JIT::emitPutCellToCallFrameHeader(RegisterID from, RegisterFile::CallFrameHeaderEntry entry)
+{
+#if USE(JSVALUE32_64)
+ store32(TrustedImm32(JSValue::CellTag), tagFor(entry, callFrameRegister));
+#endif
+ storePtr(from, payloadFor(entry, callFrameRegister));
+}
+
+ALWAYS_INLINE void JIT::emitPutIntToCallFrameHeader(RegisterID from, RegisterFile::CallFrameHeaderEntry entry)
+{
+ store32(TrustedImm32(Int32Tag), intTagFor(entry, callFrameRegister));
+ store32(from, intPayloadFor(entry, callFrameRegister));
}
ALWAYS_INLINE void JIT::emitPutImmediateToCallFrameHeader(void* value, RegisterFile::CallFrameHeaderEntry entry)
{
- storePtr(ImmPtr(value), Address(callFrameRegister, entry * sizeof(Register)));
+ storePtr(TrustedImmPtr(value), Address(callFrameRegister, entry * sizeof(Register)));
}
ALWAYS_INLINE void JIT::emitGetFromCallFrameHeaderPtr(RegisterFile::CallFrameHeaderEntry entry, RegisterID to, RegisterID from)
@@ -70,9 +84,9 @@ ALWAYS_INLINE void JIT::emitGetFromCallFrameHeaderPtr(RegisterFile::CallFrameHea
ALWAYS_INLINE void JIT::emitLoadCharacterString(RegisterID src, RegisterID dst, JumpList& failures)
{
- failures.append(branchPtr(NotEqual, Address(src), ImmPtr(m_globalData->jsStringVPtr)));
+ failures.append(branchPtr(NotEqual, Address(src), TrustedImmPtr(m_globalData->jsStringVPtr)));
failures.append(branchTest32(NonZero, Address(src, OBJECT_OFFSETOF(JSString, m_fiberCount))));
- failures.append(branch32(NotEqual, MacroAssembler::Address(src, ThunkHelpers::jsStringLengthOffset()), Imm32(1)));
+ failures.append(branch32(NotEqual, MacroAssembler::Address(src, ThunkHelpers::jsStringLengthOffset()), TrustedImm32(1)));
loadPtr(MacroAssembler::Address(src, ThunkHelpers::jsStringValueOffset()), dst);
loadPtr(MacroAssembler::Address(dst, ThunkHelpers::stringImplDataOffset()), dst);
load16(MacroAssembler::Address(dst, 0), dst);
@@ -200,7 +214,7 @@ ALWAYS_INLINE void JIT::restoreArgumentReferenceForTrampoline()
{
#if CPU(X86)
// Within a trampoline the return address will be on the stack at this point.
- addPtr(Imm32(sizeof(void*)), stackPointerRegister, firstArgumentRegister);
+ addPtr(TrustedImm32(sizeof(void*)), stackPointerRegister, firstArgumentRegister);
#elif CPU(ARM)
move(stackPointerRegister, firstArgumentRegister);
#endif
@@ -209,7 +223,7 @@ ALWAYS_INLINE void JIT::restoreArgumentReferenceForTrampoline()
ALWAYS_INLINE JIT::Jump JIT::checkStructure(RegisterID reg, Structure* structure)
{
- return branchPtr(NotEqual, Address(reg, OBJECT_OFFSETOF(JSCell, m_structure)), ImmPtr(structure));
+ return branchPtr(NotEqual, Address(reg, JSCell::structureOffset()), TrustedImmPtr(structure));
}
ALWAYS_INLINE void JIT::linkSlowCaseIfNotJSCell(Vector<SlowCaseEntry>::iterator& iter, int vReg)
@@ -254,14 +268,14 @@ ALWAYS_INLINE void JIT::setSamplingFlag(int32_t flag)
{
ASSERT(flag >= 1);
ASSERT(flag <= 32);
- or32(Imm32(1u << (flag - 1)), AbsoluteAddress(&SamplingFlags::s_flags));
+ or32(TrustedImm32(1u << (flag - 1)), AbsoluteAddress(&SamplingFlags::s_flags));
}
ALWAYS_INLINE void JIT::clearSamplingFlag(int32_t flag)
{
ASSERT(flag >= 1);
ASSERT(flag <= 32);
- and32(Imm32(~(1u << (flag - 1))), AbsoluteAddress(&SamplingFlags::s_flags));
+ and32(TrustedImm32(~(1u << (flag - 1))), AbsoluteAddress(&SamplingFlags::s_flags));
}
#endif
@@ -269,11 +283,11 @@ ALWAYS_INLINE void JIT::clearSamplingFlag(int32_t flag)
ALWAYS_INLINE void JIT::emitCount(AbstractSamplingCounter& counter, uint32_t count)
{
#if CPU(X86_64) // Or any other 64-bit plattform.
- addPtr(Imm32(count), AbsoluteAddress(&counter.m_counter));
+ addPtr(TrustedImm32(count), AbsoluteAddress(counter.addressOfCounter()));
#elif CPU(X86) // Or any other little-endian 32-bit plattform.
- intptr_t hiWord = reinterpret_cast<intptr_t>(&counter.m_counter) + sizeof(int32_t);
- add32(Imm32(count), AbsoluteAddress(&counter.m_counter));
- addWithCarry32(Imm32(0), AbsoluteAddress(reinterpret_cast<void*>(hiWord)));
+ intptr_t hiWord = reinterpret_cast<intptr_t>(counter.addressOfCounter()) + sizeof(int32_t);
+ add32(TrustedImm32(count), AbsoluteAddress(counter.addressOfCounter()));
+ addWithCarry32(TrustedImm32(0), AbsoluteAddress(reinterpret_cast<void*>(hiWord)));
#else
#error "SAMPLING_FLAGS not implemented on this platform."
#endif
@@ -284,13 +298,13 @@ ALWAYS_INLINE void JIT::emitCount(AbstractSamplingCounter& counter, uint32_t cou
#if CPU(X86_64)
ALWAYS_INLINE void JIT::sampleInstruction(Instruction* instruction, bool inHostFunction)
{
- move(ImmPtr(m_interpreter->sampler()->sampleSlot()), X86Registers::ecx);
- storePtr(ImmPtr(m_interpreter->sampler()->encodeSample(instruction, inHostFunction)), X86Registers::ecx);
+ move(TrustedImmPtr(m_interpreter->sampler()->sampleSlot()), X86Registers::ecx);
+ storePtr(TrustedImmPtr(m_interpreter->sampler()->encodeSample(instruction, inHostFunction)), X86Registers::ecx);
}
#else
ALWAYS_INLINE void JIT::sampleInstruction(Instruction* instruction, bool inHostFunction)
{
- storePtr(ImmPtr(m_interpreter->sampler()->encodeSample(instruction, inHostFunction)), m_interpreter->sampler()->sampleSlot());
+ storePtr(TrustedImmPtr(m_interpreter->sampler()->encodeSample(instruction, inHostFunction)), m_interpreter->sampler()->sampleSlot());
}
#endif
#endif
@@ -299,13 +313,13 @@ ALWAYS_INLINE void JIT::sampleInstruction(Instruction* instruction, bool inHostF
#if CPU(X86_64)
ALWAYS_INLINE void JIT::sampleCodeBlock(CodeBlock* codeBlock)
{
- move(ImmPtr(m_interpreter->sampler()->codeBlockSlot()), X86Registers::ecx);
- storePtr(ImmPtr(codeBlock), X86Registers::ecx);
+ move(TrustedImmPtr(m_interpreter->sampler()->codeBlockSlot()), X86Registers::ecx);
+ storePtr(TrustedImmPtr(codeBlock), X86Registers::ecx);
}
#else
ALWAYS_INLINE void JIT::sampleCodeBlock(CodeBlock* codeBlock)
{
- storePtr(ImmPtr(codeBlock), m_interpreter->sampler()->codeBlockSlot());
+ storePtr(TrustedImmPtr(codeBlock), m_interpreter->sampler()->codeBlockSlot());
}
#endif
#endif
@@ -396,7 +410,7 @@ inline void JIT::emitLoad2(unsigned index1, RegisterID tag1, RegisterID payload1
inline void JIT::emitLoadDouble(unsigned index, FPRegisterID value)
{
if (m_codeBlock->isConstantRegisterIndex(index)) {
- Register& inConstantPool = m_codeBlock->constantRegister(index);
+ WriteBarrier<Unknown>& inConstantPool = m_codeBlock->constantRegister(index);
loadDouble(&inConstantPool, value);
} else
loadDouble(addressFor(index), value);
@@ -405,7 +419,7 @@ inline void JIT::emitLoadDouble(unsigned index, FPRegisterID value)
inline void JIT::emitLoadInt32ToDouble(unsigned index, FPRegisterID value)
{
if (m_codeBlock->isConstantRegisterIndex(index)) {
- Register& inConstantPool = m_codeBlock->constantRegister(index);
+ WriteBarrier<Unknown>& inConstantPool = m_codeBlock->constantRegister(index);
char* bytePointer = reinterpret_cast<char*>(&inConstantPool);
convertInt32ToDouble(AbsoluteAddress(bytePointer + OBJECT_OFFSETOF(JSValue, u.asBits.payload)), value);
} else
@@ -422,27 +436,27 @@ inline void JIT::emitStoreInt32(unsigned index, RegisterID payload, bool indexIs
{
store32(payload, payloadFor(index, callFrameRegister));
if (!indexIsInt32)
- store32(Imm32(JSValue::Int32Tag), tagFor(index, callFrameRegister));
+ store32(TrustedImm32(JSValue::Int32Tag), tagFor(index, callFrameRegister));
}
-inline void JIT::emitStoreInt32(unsigned index, Imm32 payload, bool indexIsInt32)
+inline void JIT::emitStoreInt32(unsigned index, TrustedImm32 payload, bool indexIsInt32)
{
store32(payload, payloadFor(index, callFrameRegister));
if (!indexIsInt32)
- store32(Imm32(JSValue::Int32Tag), tagFor(index, callFrameRegister));
+ store32(TrustedImm32(JSValue::Int32Tag), tagFor(index, callFrameRegister));
}
inline void JIT::emitStoreCell(unsigned index, RegisterID payload, bool indexIsCell)
{
store32(payload, payloadFor(index, callFrameRegister));
if (!indexIsCell)
- store32(Imm32(JSValue::CellTag), tagFor(index, callFrameRegister));
+ store32(TrustedImm32(JSValue::CellTag), tagFor(index, callFrameRegister));
}
inline void JIT::emitStoreBool(unsigned index, RegisterID tag, bool indexIsBool)
{
if (!indexIsBool)
- store32(Imm32(0), payloadFor(index, callFrameRegister));
+ store32(TrustedImm32(0), payloadFor(index, callFrameRegister));
store32(tag, tagFor(index, callFrameRegister));
}
@@ -550,7 +564,7 @@ inline void JIT::emitJumpSlowCaseIfNotJSCell(unsigned virtualRegisterIndex, Regi
if (m_codeBlock->isConstantRegisterIndex(virtualRegisterIndex))
addSlowCase(jump());
else
- addSlowCase(branch32(NotEqual, tag, Imm32(JSValue::CellTag)));
+ addSlowCase(branch32(NotEqual, tag, TrustedImm32(JSValue::CellTag)));
}
}
@@ -652,7 +666,7 @@ ALWAYS_INLINE void JIT::emitPutVirtualRegister(unsigned dst, RegisterID from)
ALWAYS_INLINE void JIT::emitInitRegister(unsigned dst)
{
- storePtr(ImmPtr(JSValue::encode(jsUndefined())), Address(callFrameRegister, dst * sizeof(Register)));
+ storePtr(TrustedImmPtr(JSValue::encode(jsUndefined())), Address(callFrameRegister, dst * sizeof(Register)));
}
ALWAYS_INLINE JIT::Jump JIT::emitJumpIfJSCell(RegisterID reg)
@@ -660,7 +674,7 @@ ALWAYS_INLINE JIT::Jump JIT::emitJumpIfJSCell(RegisterID reg)
#if USE(JSVALUE64)
return branchTestPtr(Zero, reg, tagMaskRegister);
#else
- return branchTest32(Zero, reg, Imm32(JSImmediate::TagMask));
+ return branchTest32(Zero, reg, TrustedImm32(JSImmediate::TagMask));
#endif
}
@@ -681,7 +695,7 @@ ALWAYS_INLINE JIT::Jump JIT::emitJumpIfNotJSCell(RegisterID reg)
#if USE(JSVALUE64)
return branchTestPtr(NonZero, reg, tagMaskRegister);
#else
- return branchTest32(NonZero, reg, Imm32(JSImmediate::TagMask));
+ return branchTest32(NonZero, reg, TrustedImm32(JSImmediate::TagMask));
#endif
}
@@ -701,7 +715,7 @@ ALWAYS_INLINE void JIT::emitJumpSlowCaseIfNotJSCell(RegisterID reg, int vReg)
inline void JIT::emitLoadDouble(unsigned index, FPRegisterID value)
{
if (m_codeBlock->isConstantRegisterIndex(index)) {
- Register& inConstantPool = m_codeBlock->constantRegister(index);
+ WriteBarrier<Unknown>& inConstantPool = m_codeBlock->constantRegister(index);
loadDouble(&inConstantPool, value);
} else
loadDouble(addressFor(index), value);
@@ -710,8 +724,8 @@ inline void JIT::emitLoadDouble(unsigned index, FPRegisterID value)
inline void JIT::emitLoadInt32ToDouble(unsigned index, FPRegisterID value)
{
if (m_codeBlock->isConstantRegisterIndex(index)) {
- Register& inConstantPool = m_codeBlock->constantRegister(index);
- convertInt32ToDouble(AbsoluteAddress(&inConstantPool), value);
+ ASSERT(isOperandConstantImmediateInt(index));
+ convertInt32ToDouble(Imm32(getConstantOperand(index).asInt32()), value);
} else
convertInt32ToDouble(addressFor(index), value);
}
@@ -722,7 +736,7 @@ ALWAYS_INLINE JIT::Jump JIT::emitJumpIfImmediateInteger(RegisterID reg)
#if USE(JSVALUE64)
return branchPtr(AboveOrEqual, reg, tagTypeNumberRegister);
#else
- return branchTest32(NonZero, reg, Imm32(JSImmediate::TagTypeNumber));
+ return branchTest32(NonZero, reg, TrustedImm32(JSImmediate::TagTypeNumber));
#endif
}
@@ -731,7 +745,7 @@ ALWAYS_INLINE JIT::Jump JIT::emitJumpIfNotImmediateInteger(RegisterID reg)
#if USE(JSVALUE64)
return branchPtr(Below, reg, tagTypeNumberRegister);
#else
- return branchTest32(Zero, reg, Imm32(JSImmediate::TagTypeNumber));
+ return branchTest32(Zero, reg, TrustedImm32(JSImmediate::TagTypeNumber));
#endif
}
@@ -760,12 +774,12 @@ ALWAYS_INLINE void JIT::emitJumpSlowCaseIfNotImmediateNumber(RegisterID reg)
#if USE(JSVALUE32_64)
ALWAYS_INLINE void JIT::emitFastArithDeTagImmediate(RegisterID reg)
{
- subPtr(Imm32(JSImmediate::TagTypeNumber), reg);
+ subPtr(TrustedImm32(JSImmediate::TagTypeNumber), reg);
}
ALWAYS_INLINE JIT::Jump JIT::emitFastArithDeTagImmediateJumpIfZero(RegisterID reg)
{
- return branchSubPtr(Zero, Imm32(JSImmediate::TagTypeNumber), reg);
+ return branchSubPtr(Zero, TrustedImm32(JSImmediate::TagTypeNumber), reg);
}
#endif
@@ -776,7 +790,7 @@ ALWAYS_INLINE void JIT::emitFastArithReTagImmediate(RegisterID src, RegisterID d
#else
if (src != dest)
move(src, dest);
- addPtr(Imm32(JSImmediate::TagTypeNumber), dest);
+ addPtr(TrustedImm32(JSImmediate::TagTypeNumber), dest);
#endif
}
@@ -796,8 +810,8 @@ ALWAYS_INLINE void JIT::emitFastArithIntToImmNoCheck(RegisterID src, RegisterID
ALWAYS_INLINE void JIT::emitTagAsBoolImmediate(RegisterID reg)
{
- lshift32(Imm32(JSImmediate::ExtendedPayloadShift), reg);
- or32(Imm32(static_cast<int32_t>(JSImmediate::FullTagTypeBool)), reg);
+ lshift32(TrustedImm32(JSImmediate::ExtendedPayloadShift), reg);
+ or32(TrustedImm32(static_cast<int32_t>(JSImmediate::FullTagTypeBool)), reg);
}
#endif // USE(JSVALUE32_64)
diff --git a/Source/JavaScriptCore/jit/JITOpcodes.cpp b/Source/JavaScriptCore/jit/JITOpcodes.cpp
index fc769db..daceea6 100644
--- a/Source/JavaScriptCore/jit/JITOpcodes.cpp
+++ b/Source/JavaScriptCore/jit/JITOpcodes.cpp
@@ -52,12 +52,12 @@ void JIT::privateCompileCTIMachineTrampolines(RefPtr<ExecutablePool>* executable
// Check eax is a string
Jump string_failureCases1 = emitJumpIfNotJSCell(regT0);
- Jump string_failureCases2 = branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsStringVPtr));
+ Jump string_failureCases2 = branchPtr(NotEqual, Address(regT0), TrustedImmPtr(m_globalData->jsStringVPtr));
// Checks out okay! - get the length from the Ustring.
load32(Address(regT0, OBJECT_OFFSETOF(JSString, m_length)), regT0);
- Jump string_failureCases3 = branch32(Above, regT0, Imm32(JSImmediate::maxImmediateInt));
+ Jump string_failureCases3 = branch32(Above, regT0, TrustedImm32(JSImmediate::maxImmediateInt));
// regT0 contains a 64 bit value (is positive, is zero extended) so we don't need sign extend here.
emitFastArithIntToImmNoCheck(regT0, regT0);
@@ -102,7 +102,7 @@ void JIT::privateCompileCTIMachineTrampolines(RefPtr<ExecutablePool>* executable
loadPtr(Address(regT0, OBJECT_OFFSETOF(JSFunction, m_executable)), regT2);
- Jump hasCodeBlock3 = branch32(GreaterThanOrEqual, Address(regT2, OBJECT_OFFSETOF(FunctionExecutable, m_numParametersForCall)), Imm32(0));
+ Jump hasCodeBlock3 = branch32(GreaterThanOrEqual, Address(regT2, OBJECT_OFFSETOF(FunctionExecutable, m_numParametersForCall)), TrustedImm32(0));
preserveReturnAddressAfterCall(regT3);
restoreArgumentReference();
Call callCompileCall = call();
@@ -122,7 +122,7 @@ void JIT::privateCompileCTIMachineTrampolines(RefPtr<ExecutablePool>* executable
loadPtr(Address(regT0, OBJECT_OFFSETOF(JSFunction, m_executable)), regT2);
- Jump hasCodeBlock4 = branch32(GreaterThanOrEqual, Address(regT2, OBJECT_OFFSETOF(FunctionExecutable, m_numParametersForConstruct)), Imm32(0));
+ Jump hasCodeBlock4 = branch32(GreaterThanOrEqual, Address(regT2, OBJECT_OFFSETOF(FunctionExecutable, m_numParametersForConstruct)), TrustedImm32(0));
preserveReturnAddressAfterCall(regT3);
restoreArgumentReference();
Call callCompileConstruct = call();
@@ -141,10 +141,10 @@ void JIT::privateCompileCTIMachineTrampolines(RefPtr<ExecutablePool>* executable
emitGetFromCallFrameHeaderPtr(RegisterFile::ReturnPC, regT1);
emitGetFromCallFrameHeaderPtr(RegisterFile::CallerFrame, callFrameRegister);
restoreReturnAddressBeforeReturn(regT1);
- move(ImmPtr(&globalData->exceptionLocation), regT2);
+ move(TrustedImmPtr(&globalData->exceptionLocation), regT2);
storePtr(regT1, regT2);
poke(callFrameRegister, 1 + OBJECT_OFFSETOF(struct JITStackFrame, callFrame) / sizeof(void*));
- poke(ImmPtr(FunctionPtr(ctiVMThrowTrampoline).value()));
+ poke(TrustedImmPtr(FunctionPtr(ctiVMThrowTrampoline).value()));
ret();
// NativeCall Trampoline
@@ -199,7 +199,7 @@ JIT::Label JIT::privateCompileCTINativeCall(JSGlobalData* globalData, bool isCon
// get to its global data.
emitGetFromCallFrameHeaderPtr(RegisterFile::CallerFrame, regT0);
emitGetFromCallFrameHeaderPtr(RegisterFile::ScopeChain, regT1, regT0);
- emitPutToCallFrameHeader(regT1, RegisterFile::ScopeChain);
+ emitPutCellToCallFrameHeader(regT1, RegisterFile::ScopeChain);
peek(regT1);
emitPutToCallFrameHeader(regT1, RegisterFile::ReturnPC);
@@ -208,21 +208,21 @@ JIT::Label JIT::privateCompileCTINativeCall(JSGlobalData* globalData, bool isCon
// Host function signature: f(ExecState*);
move(callFrameRegister, X86Registers::edi);
- subPtr(Imm32(16 - sizeof(void*)), stackPointerRegister); // Align stack after call.
+ subPtr(TrustedImm32(16 - sizeof(void*)), stackPointerRegister); // Align stack after call.
emitGetFromCallFrameHeaderPtr(RegisterFile::Callee, X86Registers::esi);
loadPtr(Address(X86Registers::esi, OBJECT_OFFSETOF(JSFunction, m_executable)), X86Registers::r9);
move(regT0, callFrameRegister); // Eagerly restore caller frame register to avoid loading from stack.
call(Address(X86Registers::r9, executableOffsetToFunction));
- addPtr(Imm32(16 - sizeof(void*)), stackPointerRegister);
+ addPtr(TrustedImm32(16 - sizeof(void*)), stackPointerRegister);
#elif CPU(ARM)
// Load caller frame's scope chain into this callframe so that whatever we call can
// get to its global data.
emitGetFromCallFrameHeaderPtr(RegisterFile::CallerFrame, regT2);
emitGetFromCallFrameHeaderPtr(RegisterFile::ScopeChain, regT1, regT2);
- emitPutToCallFrameHeader(regT1, RegisterFile::ScopeChain);
+ emitPutCellToCallFrameHeader(regT1, RegisterFile::ScopeChain);
preserveReturnAddressAfterCall(regT3); // Callee preserved
emitPutToCallFrameHeader(regT3, RegisterFile::ReturnPC);
@@ -243,7 +243,7 @@ JIT::Label JIT::privateCompileCTINativeCall(JSGlobalData* globalData, bool isCon
// get to its global data.
emitGetFromCallFrameHeaderPtr(RegisterFile::CallerFrame, regT0);
emitGetFromCallFrameHeaderPtr(RegisterFile::ScopeChain, regT1, regT0);
- emitPutToCallFrameHeader(regT1, RegisterFile::ScopeChain);
+ emitPutCellToCallFrameHeader(regT1, RegisterFile::ScopeChain);
preserveReturnAddressAfterCall(regT3); // Callee preserved
emitPutToCallFrameHeader(regT3, RegisterFile::ReturnPC);
@@ -253,7 +253,7 @@ JIT::Label JIT::privateCompileCTINativeCall(JSGlobalData* globalData, bool isCon
// Allocate stack space for 16 bytes (8-byte aligned)
// 16 bytes (unused) for 4 arguments
- subPtr(Imm32(16), stackPointerRegister);
+ subPtr(TrustedImm32(16), stackPointerRegister);
// Setup arg0
move(callFrameRegister, MIPSRegisters::a0);
@@ -265,7 +265,7 @@ JIT::Label JIT::privateCompileCTINativeCall(JSGlobalData* globalData, bool isCon
call(Address(regT2, executableOffsetToFunction));
// Restore stack space
- addPtr(Imm32(16), stackPointerRegister);
+ addPtr(TrustedImm32(16), stackPointerRegister);
restoreReturnAddressBeforeReturn(regT3);
@@ -289,12 +289,12 @@ JIT::Label JIT::privateCompileCTINativeCall(JSGlobalData* globalData, bool isCon
// Grab the return address.
preserveReturnAddressAfterCall(regT1);
- move(ImmPtr(&globalData->exceptionLocation), regT2);
+ move(TrustedImmPtr(&globalData->exceptionLocation), regT2);
storePtr(regT1, regT2);
poke(callFrameRegister, OBJECT_OFFSETOF(struct JITStackFrame, callFrame) / sizeof(void*));
// Set the return address.
- move(ImmPtr(FunctionPtr(ctiVMThrowTrampoline).value()), regT1);
+ move(TrustedImmPtr(FunctionPtr(ctiVMThrowTrampoline).value()), regT1);
restoreReturnAddressBeforeReturn(regT1);
ret();
@@ -378,8 +378,8 @@ void JIT::emit_op_check_has_instance(Instruction* currentInstruction)
emitJumpSlowCaseIfNotJSCell(regT0, baseVal);
// Check that baseVal 'ImplementsHasInstance'.
- loadPtr(Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), regT0);
- addSlowCase(branchTest8(Zero, Address(regT0, OBJECT_OFFSETOF(Structure, m_typeInfo.m_flags)), Imm32(ImplementsHasInstance)));
+ loadPtr(Address(regT0, JSCell::structureOffset()), regT0);
+ addSlowCase(branchTest8(Zero, Address(regT0, Structure::typeInfoFlagsOffset()), TrustedImm32(ImplementsHasInstance)));
}
void JIT::emit_op_instanceof(Instruction* currentInstruction)
@@ -400,29 +400,29 @@ void JIT::emit_op_instanceof(Instruction* currentInstruction)
emitJumpSlowCaseIfNotJSCell(regT1, proto);
// Check that prototype is an object
- loadPtr(Address(regT1, OBJECT_OFFSETOF(JSCell, m_structure)), regT3);
- addSlowCase(branch8(NotEqual, Address(regT3, OBJECT_OFFSETOF(Structure, m_typeInfo.m_type)), Imm32(ObjectType)));
+ loadPtr(Address(regT1, JSCell::structureOffset()), regT3);
+ addSlowCase(branch8(NotEqual, Address(regT3, Structure::typeInfoTypeOffset()), TrustedImm32(ObjectType)));
// Fixme: this check is only needed because the JSC API allows HasInstance to be overridden; we should deprecate this.
// Check that baseVal 'ImplementsDefaultHasInstance'.
- loadPtr(Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), regT0);
- addSlowCase(branchTest8(Zero, Address(regT0, OBJECT_OFFSETOF(Structure, m_typeInfo.m_flags)), Imm32(ImplementsDefaultHasInstance)));
+ loadPtr(Address(regT0, JSCell::structureOffset()), regT0);
+ addSlowCase(branchTest8(Zero, Address(regT0, Structure::typeInfoFlagsOffset()), TrustedImm32(ImplementsDefaultHasInstance)));
// Optimistically load the result true, and start looping.
// Initially, regT1 still contains proto and regT2 still contains value.
// As we loop regT2 will be updated with its prototype, recursively walking the prototype chain.
- move(ImmPtr(JSValue::encode(jsBoolean(true))), regT0);
+ move(TrustedImmPtr(JSValue::encode(jsBoolean(true))), regT0);
Label loop(this);
// Load the prototype of the object in regT2. If this is equal to regT1 - WIN!
// Otherwise, check if we've hit null - if we have then drop out of the loop, if not go again.
- loadPtr(Address(regT2, OBJECT_OFFSETOF(JSCell, m_structure)), regT2);
- loadPtr(Address(regT2, OBJECT_OFFSETOF(Structure, m_prototype)), regT2);
+ loadPtr(Address(regT2, JSCell::structureOffset()), regT2);
+ loadPtr(Address(regT2, Structure::prototypeOffset()), regT2);
Jump isInstance = branchPtr(Equal, regT2, regT1);
emitJumpIfJSCell(regT2).linkTo(loop, this);
// We get here either by dropping out of the loop, or if value was not an Object. Result is false.
- move(ImmPtr(JSValue::encode(jsBoolean(false))), regT0);
+ move(TrustedImmPtr(JSValue::encode(jsBoolean(false))), regT0);
// isInstance jumps right down to here, to skip setting the result to false (it has already set true).
isInstance.link(this);
@@ -452,7 +452,7 @@ void JIT::emit_op_construct(Instruction* currentInstruction)
void JIT::emit_op_get_global_var(Instruction* currentInstruction)
{
JSVariableObject* globalObject = m_codeBlock->globalObject();
- loadPtr(&globalObject->d->registers, regT0);
+ loadPtr(&globalObject->m_registers, regT0);
loadPtr(Address(regT0, currentInstruction[2].u.operand * sizeof(Register)), regT0);
emitPutVirtualRegister(currentInstruction[1].u.operand);
}
@@ -461,7 +461,7 @@ void JIT::emit_op_put_global_var(Instruction* currentInstruction)
{
emitGetVirtualRegister(currentInstruction[2].u.operand, regT1);
JSVariableObject* globalObject = m_codeBlock->globalObject();
- loadPtr(&globalObject->d->registers, regT0);
+ loadPtr(&globalObject->m_registers, regT0);
storePtr(regT1, Address(regT0, currentInstruction[1].u.operand * sizeof(Register)));
}
@@ -483,8 +483,7 @@ void JIT::emit_op_get_scoped_var(Instruction* currentInstruction)
loadPtr(Address(regT0, OBJECT_OFFSETOF(ScopeChainNode, next)), regT0);
loadPtr(Address(regT0, OBJECT_OFFSETOF(ScopeChainNode, object)), regT0);
- loadPtr(Address(regT0, OBJECT_OFFSETOF(JSVariableObject, d)), regT0);
- loadPtr(Address(regT0, OBJECT_OFFSETOF(JSVariableObject::JSVariableObjectData, registers)), regT0);
+ loadPtr(Address(regT0, OBJECT_OFFSETOF(JSVariableObject, m_registers)), regT0);
loadPtr(Address(regT0, currentInstruction[2].u.operand * sizeof(Register)), regT0);
emitPutVirtualRegister(currentInstruction[1].u.operand);
}
@@ -508,8 +507,7 @@ void JIT::emit_op_put_scoped_var(Instruction* currentInstruction)
loadPtr(Address(regT1, OBJECT_OFFSETOF(ScopeChainNode, next)), regT1);
loadPtr(Address(regT1, OBJECT_OFFSETOF(ScopeChainNode, object)), regT1);
- loadPtr(Address(regT1, OBJECT_OFFSETOF(JSVariableObject, d)), regT1);
- loadPtr(Address(regT1, OBJECT_OFFSETOF(JSVariableObject::JSVariableObjectData, registers)), regT1);
+ loadPtr(Address(regT1, OBJECT_OFFSETOF(JSVariableObject, m_registers)), regT1);
storePtr(regT0, Address(regT1, currentInstruction[1].u.operand * sizeof(Register)));
}
@@ -567,8 +565,8 @@ void JIT::emit_op_ret_object_or_this(Instruction* currentInstruction)
// Return the result in %eax.
emitGetVirtualRegister(currentInstruction[1].u.operand, returnValueRegister);
Jump notJSCell = emitJumpIfNotJSCell(returnValueRegister);
- loadPtr(Address(returnValueRegister, OBJECT_OFFSETOF(JSCell, m_structure)), regT2);
- Jump notObject = branch8(NotEqual, Address(regT2, OBJECT_OFFSETOF(Structure, m_typeInfo) + OBJECT_OFFSETOF(TypeInfo, m_type)), Imm32(ObjectType));
+ loadPtr(Address(returnValueRegister, JSCell::structureOffset()), regT2);
+ Jump notObject = branch8(NotEqual, Address(regT2, Structure::typeInfoTypeOffset()), TrustedImm32(ObjectType));
// Grab the return address.
emitGetFromCallFrameHeaderPtr(RegisterFile::ReturnPC, regT1);
@@ -607,7 +605,7 @@ void JIT::emit_op_new_array(Instruction* currentInstruction)
void JIT::emit_op_resolve(Instruction* currentInstruction)
{
JITStubCall stubCall(this, cti_op_resolve);
- stubCall.addArgument(ImmPtr(&m_codeBlock->identifier(currentInstruction[2].u.operand)));
+ stubCall.addArgument(TrustedImmPtr(&m_codeBlock->identifier(currentInstruction[2].u.operand)));
stubCall.call(currentInstruction[1].u.operand);
}
@@ -619,7 +617,7 @@ void JIT::emit_op_to_primitive(Instruction* currentInstruction)
emitGetVirtualRegister(src, regT0);
Jump isImm = emitJumpIfNotJSCell(regT0);
- addSlowCase(branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsStringVPtr)));
+ addSlowCase(branchPtr(NotEqual, Address(regT0), TrustedImmPtr(m_globalData->jsStringVPtr)));
isImm.link(this);
if (dst != src)
@@ -638,7 +636,7 @@ void JIT::emit_op_strcat(Instruction* currentInstruction)
void JIT::emit_op_resolve_base(Instruction* currentInstruction)
{
JITStubCall stubCall(this, currentInstruction[3].u.operand ? cti_op_resolve_base_strict_put : cti_op_resolve_base);
- stubCall.addArgument(ImmPtr(&m_codeBlock->identifier(currentInstruction[2].u.operand)));
+ stubCall.addArgument(TrustedImmPtr(&m_codeBlock->identifier(currentInstruction[2].u.operand)));
stubCall.call(currentInstruction[1].u.operand);
}
@@ -646,14 +644,14 @@ void JIT::emit_op_ensure_property_exists(Instruction* currentInstruction)
{
JITStubCall stubCall(this, cti_op_ensure_property_exists);
stubCall.addArgument(Imm32(currentInstruction[1].u.operand));
- stubCall.addArgument(ImmPtr(&m_codeBlock->identifier(currentInstruction[2].u.operand)));
+ stubCall.addArgument(TrustedImmPtr(&m_codeBlock->identifier(currentInstruction[2].u.operand)));
stubCall.call(currentInstruction[1].u.operand);
}
void JIT::emit_op_resolve_skip(Instruction* currentInstruction)
{
JITStubCall stubCall(this, cti_op_resolve_skip);
- stubCall.addArgument(ImmPtr(&m_codeBlock->identifier(currentInstruction[2].u.operand)));
+ stubCall.addArgument(TrustedImmPtr(&m_codeBlock->identifier(currentInstruction[2].u.operand)));
stubCall.addArgument(Imm32(currentInstruction[3].u.operand));
stubCall.call(currentInstruction[1].u.operand);
}
@@ -667,9 +665,9 @@ void JIT::emit_op_resolve_global(Instruction* currentInstruction, bool)
void* offsetAddr = &(m_codeBlock->globalResolveInfo(currentIndex).offset);
// Check Structure of global object
- move(ImmPtr(globalObject), regT0);
+ move(TrustedImmPtr(globalObject), regT0);
loadPtr(structureAddress, regT1);
- addSlowCase(branchPtr(NotEqual, regT1, Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)))); // Structures don't match
+ addSlowCase(branchPtr(NotEqual, regT1, Address(regT0, JSCell::structureOffset()))); // Structures don't match
// Load cached property
// Assume that the global object always uses external storage.
@@ -688,7 +686,7 @@ void JIT::emitSlow_op_resolve_global(Instruction* currentInstruction, Vector<Slo
linkSlowCase(iter);
JITStubCall stubCall(this, cti_op_resolve_global);
- stubCall.addArgument(ImmPtr(ident));
+ stubCall.addArgument(TrustedImmPtr(ident));
stubCall.addArgument(Imm32(currentIndex));
stubCall.addArgument(regT0);
stubCall.call(dst);
@@ -697,9 +695,9 @@ void JIT::emitSlow_op_resolve_global(Instruction* currentInstruction, Vector<Slo
void JIT::emit_op_not(Instruction* currentInstruction)
{
emitGetVirtualRegister(currentInstruction[2].u.operand, regT0);
- xorPtr(Imm32(static_cast<int32_t>(JSImmediate::FullTagTypeBool)), regT0);
- addSlowCase(branchTestPtr(NonZero, regT0, Imm32(static_cast<int32_t>(~JSImmediate::ExtendedPayloadBitBoolValue))));
- xorPtr(Imm32(static_cast<int32_t>(JSImmediate::FullTagTypeBool | JSImmediate::ExtendedPayloadBitBoolValue)), regT0);
+ xorPtr(TrustedImm32(static_cast<int32_t>(JSImmediate::FullTagTypeBool)), regT0);
+ addSlowCase(branchTestPtr(NonZero, regT0, TrustedImm32(static_cast<int32_t>(~JSImmediate::ExtendedPayloadBitBoolValue))));
+ xorPtr(TrustedImm32(static_cast<int32_t>(JSImmediate::FullTagTypeBool | JSImmediate::ExtendedPayloadBitBoolValue)), regT0);
emitPutVirtualRegister(currentInstruction[1].u.operand);
}
@@ -708,11 +706,11 @@ void JIT::emit_op_jfalse(Instruction* currentInstruction)
unsigned target = currentInstruction[2].u.operand;
emitGetVirtualRegister(currentInstruction[1].u.operand, regT0);
- addJump(branchPtr(Equal, regT0, ImmPtr(JSValue::encode(jsNumber(0)))), target);
+ addJump(branchPtr(Equal, regT0, TrustedImmPtr(JSValue::encode(jsNumber(0)))), target);
Jump isNonZero = emitJumpIfImmediateInteger(regT0);
- addJump(branchPtr(Equal, regT0, ImmPtr(JSValue::encode(jsBoolean(false)))), target);
- addSlowCase(branchPtr(NotEqual, regT0, ImmPtr(JSValue::encode(jsBoolean(true)))));
+ addJump(branchPtr(Equal, regT0, TrustedImmPtr(JSValue::encode(jsBoolean(false)))), target);
+ addSlowCase(branchPtr(NotEqual, regT0, TrustedImmPtr(JSValue::encode(jsBoolean(true)))));
isNonZero.link(this);
RECORD_JUMP_TARGET(target);
@@ -727,14 +725,14 @@ void JIT::emit_op_jeq_null(Instruction* currentInstruction)
Jump isImmediate = emitJumpIfNotJSCell(regT0);
// First, handle JSCell cases - check MasqueradesAsUndefined bit on the structure.
- loadPtr(Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), regT2);
- addJump(branchTest8(NonZero, Address(regT2, OBJECT_OFFSETOF(Structure, m_typeInfo.m_flags)), Imm32(MasqueradesAsUndefined)), target);
+ loadPtr(Address(regT0, JSCell::structureOffset()), regT2);
+ addJump(branchTest8(NonZero, Address(regT2, Structure::typeInfoFlagsOffset()), TrustedImm32(MasqueradesAsUndefined)), target);
Jump wasNotImmediate = jump();
// Now handle the immediate cases - undefined & null
isImmediate.link(this);
- andPtr(Imm32(~JSImmediate::ExtendedTagBitUndefined), regT0);
- addJump(branchPtr(Equal, regT0, ImmPtr(JSValue::encode(jsNull()))), target);
+ andPtr(TrustedImm32(~JSImmediate::ExtendedTagBitUndefined), regT0);
+ addJump(branchPtr(Equal, regT0, TrustedImmPtr(JSValue::encode(jsNull()))), target);
wasNotImmediate.link(this);
RECORD_JUMP_TARGET(target);
@@ -748,14 +746,14 @@ void JIT::emit_op_jneq_null(Instruction* currentInstruction)
Jump isImmediate = emitJumpIfNotJSCell(regT0);
// First, handle JSCell cases - check MasqueradesAsUndefined bit on the structure.
- loadPtr(Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), regT2);
- addJump(branchTest8(Zero, Address(regT2, OBJECT_OFFSETOF(Structure, m_typeInfo.m_flags)), Imm32(MasqueradesAsUndefined)), target);
+ loadPtr(Address(regT0, JSCell::structureOffset()), regT2);
+ addJump(branchTest8(Zero, Address(regT2, Structure::typeInfoFlagsOffset()), TrustedImm32(MasqueradesAsUndefined)), target);
Jump wasNotImmediate = jump();
// Now handle the immediate cases - undefined & null
isImmediate.link(this);
- andPtr(Imm32(~JSImmediate::ExtendedTagBitUndefined), regT0);
- addJump(branchPtr(NotEqual, regT0, ImmPtr(JSValue::encode(jsNull()))), target);
+ andPtr(TrustedImm32(~JSImmediate::ExtendedTagBitUndefined), regT0);
+ addJump(branchPtr(NotEqual, regT0, TrustedImmPtr(JSValue::encode(jsNull()))), target);
wasNotImmediate.link(this);
RECORD_JUMP_TARGET(target);
@@ -764,11 +762,11 @@ void JIT::emit_op_jneq_null(Instruction* currentInstruction)
void JIT::emit_op_jneq_ptr(Instruction* currentInstruction)
{
unsigned src = currentInstruction[1].u.operand;
- JSCell* ptr = currentInstruction[2].u.jsCell;
+ JSCell* ptr = currentInstruction[2].u.jsCell.get();
unsigned target = currentInstruction[3].u.operand;
emitGetVirtualRegister(src, regT0);
- addJump(branchPtr(NotEqual, regT0, ImmPtr(JSValue::encode(JSValue(ptr)))), target);
+ addJump(branchPtr(NotEqual, regT0, TrustedImmPtr(JSValue::encode(JSValue(ptr)))), target);
RECORD_JUMP_TARGET(target);
}
@@ -777,7 +775,7 @@ void JIT::emit_op_jsr(Instruction* currentInstruction)
{
int retAddrDst = currentInstruction[1].u.operand;
int target = currentInstruction[2].u.operand;
- DataLabelPtr storeLocation = storePtrWithPatch(ImmPtr(0), Address(callFrameRegister, sizeof(Register) * retAddrDst));
+ DataLabelPtr storeLocation = storePtrWithPatch(TrustedImmPtr(0), Address(callFrameRegister, sizeof(Register) * retAddrDst));
addJump(jump(), target);
m_jsrSites.append(JSRInfo(storeLocation, label()));
killLastResultRegister();
@@ -811,7 +809,7 @@ void JIT::emit_op_bitnot(Instruction* currentInstruction)
void JIT::emit_op_resolve_with_base(Instruction* currentInstruction)
{
JITStubCall stubCall(this, cti_op_resolve_with_base);
- stubCall.addArgument(ImmPtr(&m_codeBlock->identifier(currentInstruction[3].u.operand)));
+ stubCall.addArgument(TrustedImmPtr(&m_codeBlock->identifier(currentInstruction[3].u.operand)));
stubCall.addArgument(Imm32(currentInstruction[1].u.operand));
stubCall.call(currentInstruction[2].u.operand);
}
@@ -819,7 +817,7 @@ void JIT::emit_op_resolve_with_base(Instruction* currentInstruction)
void JIT::emit_op_new_func_exp(Instruction* currentInstruction)
{
JITStubCall stubCall(this, cti_op_new_func_exp);
- stubCall.addArgument(ImmPtr(m_codeBlock->functionExpr(currentInstruction[2].u.operand)));
+ stubCall.addArgument(TrustedImmPtr(m_codeBlock->functionExpr(currentInstruction[2].u.operand)));
stubCall.call(currentInstruction[1].u.operand);
}
@@ -828,11 +826,11 @@ void JIT::emit_op_jtrue(Instruction* currentInstruction)
unsigned target = currentInstruction[2].u.operand;
emitGetVirtualRegister(currentInstruction[1].u.operand, regT0);
- Jump isZero = branchPtr(Equal, regT0, ImmPtr(JSValue::encode(jsNumber(0))));
+ Jump isZero = branchPtr(Equal, regT0, TrustedImmPtr(JSValue::encode(jsNumber(0))));
addJump(emitJumpIfImmediateInteger(regT0), target);
- addJump(branchPtr(Equal, regT0, ImmPtr(JSValue::encode(jsBoolean(true)))), target);
- addSlowCase(branchPtr(NotEqual, regT0, ImmPtr(JSValue::encode(jsBoolean(false)))));
+ addJump(branchPtr(Equal, regT0, TrustedImmPtr(JSValue::encode(jsBoolean(true)))), target);
+ addSlowCase(branchPtr(NotEqual, regT0, TrustedImmPtr(JSValue::encode(jsBoolean(false)))));
isZero.link(this);
RECORD_JUMP_TARGET(target);
@@ -893,8 +891,8 @@ void JIT::emit_op_get_pnames(Instruction* currentInstruction)
if (!m_codeBlock->isKnownNotImmediate(base))
isNotObject.append(emitJumpIfNotJSCell(regT0));
if (base != m_codeBlock->thisRegister() || m_codeBlock->isStrictMode()) {
- loadPtr(Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), regT2);
- isNotObject.append(branch8(NotEqual, Address(regT2, OBJECT_OFFSETOF(Structure, m_typeInfo.m_type)), Imm32(ObjectType)));
+ loadPtr(Address(regT0, JSCell::structureOffset()), regT2);
+ isNotObject.append(branch8(NotEqual, Address(regT2, Structure::typeInfoTypeOffset()), TrustedImm32(ObjectType)));
}
// We could inline the case where you have a valid cache, but
@@ -904,14 +902,15 @@ void JIT::emit_op_get_pnames(Instruction* currentInstruction)
getPnamesStubCall.addArgument(regT0);
getPnamesStubCall.call(dst);
load32(Address(regT0, OBJECT_OFFSETOF(JSPropertyNameIterator, m_jsStringsSize)), regT3);
- store32(Imm32(0), addressFor(i));
- store32(regT3, addressFor(size));
+ storePtr(tagTypeNumberRegister, payloadFor(i));
+ store32(TrustedImm32(Int32Tag), intTagFor(size));
+ store32(regT3, intPayloadFor(size));
Jump end = jump();
isNotObject.link(this);
move(regT0, regT1);
- and32(Imm32(~JSImmediate::ExtendedTagBitUndefined), regT1);
- addJump(branch32(Equal, regT1, Imm32(JSImmediate::FullTagTypeNull)), breakTarget);
+ and32(TrustedImm32(~JSImmediate::ExtendedTagBitUndefined), regT1);
+ addJump(branch32(Equal, regT1, TrustedImm32(JSImmediate::FullTagTypeNull)), breakTarget);
JITStubCall toObjectStubCall(this, cti_to_object);
toObjectStubCall.addArgument(regT0);
@@ -933,8 +932,8 @@ void JIT::emit_op_next_pname(Instruction* currentInstruction)
JumpList callHasProperty;
Label begin(this);
- load32(addressFor(i), regT0);
- Jump end = branch32(Equal, regT0, addressFor(size));
+ load32(intPayloadFor(i), regT0);
+ Jump end = branch32(Equal, regT0, intPayloadFor(size));
// Grab key @ i
loadPtr(addressFor(it), regT1);
@@ -945,14 +944,14 @@ void JIT::emit_op_next_pname(Instruction* currentInstruction)
emitPutVirtualRegister(dst, regT2);
// Increment i
- add32(Imm32(1), regT0);
- store32(regT0, addressFor(i));
+ add32(TrustedImm32(1), regT0);
+ store32(regT0, intPayloadFor(i));
// Verify that i is valid:
emitGetVirtualRegister(base, regT0);
// Test base's structure
- loadPtr(Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), regT2);
+ loadPtr(Address(regT0, JSCell::structureOffset()), regT2);
callHasProperty.append(branchPtr(NotEqual, regT2, Address(Address(regT1, OBJECT_OFFSETOF(JSPropertyNameIterator, m_cachedStructure)))));
// Test base's prototype chain
@@ -961,11 +960,11 @@ void JIT::emit_op_next_pname(Instruction* currentInstruction)
addJump(branchTestPtr(Zero, Address(regT3)), target);
Label checkPrototype(this);
- loadPtr(Address(regT2, OBJECT_OFFSETOF(Structure, m_prototype)), regT2);
+ loadPtr(Address(regT2, Structure::prototypeOffset()), regT2);
callHasProperty.append(emitJumpIfNotJSCell(regT2));
- loadPtr(Address(regT2, OBJECT_OFFSETOF(JSCell, m_structure)), regT2);
+ loadPtr(Address(regT2, JSCell::structureOffset()), regT2);
callHasProperty.append(branchPtr(NotEqual, regT2, Address(regT3)));
- addPtr(Imm32(sizeof(Structure*)), regT3);
+ addPtr(TrustedImm32(sizeof(Structure*)), regT3);
branchTestPtr(NonZero, Address(regT3)).linkTo(checkPrototype, this);
// Continue loop.
@@ -1040,8 +1039,8 @@ void JIT::emit_op_to_jsnumber(Instruction* currentInstruction)
Jump wasImmediate = emitJumpIfImmediateInteger(regT0);
emitJumpSlowCaseIfNotJSCell(regT0, srcVReg);
- loadPtr(Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), regT2);
- addSlowCase(branch8(NotEqual, Address(regT2, OBJECT_OFFSETOF(Structure, m_typeInfo.m_type)), Imm32(NumberType)));
+ loadPtr(Address(regT0, JSCell::structureOffset()), regT2);
+ addSlowCase(branch8(NotEqual, Address(regT2, Structure::typeInfoTypeOffset()), TrustedImm32(NumberType)));
wasImmediate.link(this);
@@ -1051,7 +1050,7 @@ void JIT::emit_op_to_jsnumber(Instruction* currentInstruction)
void JIT::emit_op_push_new_scope(Instruction* currentInstruction)
{
JITStubCall stubCall(this, cti_op_push_new_scope);
- stubCall.addArgument(ImmPtr(&m_codeBlock->identifier(currentInstruction[2].u.operand)));
+ stubCall.addArgument(TrustedImmPtr(&m_codeBlock->identifier(currentInstruction[2].u.operand)));
stubCall.addArgument(currentInstruction[3].u.operand, regT2);
stubCall.call(currentInstruction[1].u.operand);
}
@@ -1062,7 +1061,7 @@ void JIT::emit_op_catch(Instruction* currentInstruction)
move(regT0, callFrameRegister);
peek(regT3, OBJECT_OFFSETOF(struct JITStackFrame, globalData) / sizeof(void*));
loadPtr(Address(regT3, OBJECT_OFFSETOF(JSGlobalData, exception)), regT0);
- storePtr(ImmPtr(JSValue::encode(JSValue())), Address(regT3, OBJECT_OFFSETOF(JSGlobalData, exception)));
+ storePtr(TrustedImmPtr(JSValue::encode(JSValue())), Address(regT3, OBJECT_OFFSETOF(JSGlobalData, exception)));
emitPutVirtualRegister(currentInstruction[1].u.operand);
}
@@ -1157,15 +1156,15 @@ void JIT::emit_op_eq_null(Instruction* currentInstruction)
emitGetVirtualRegister(src1, regT0);
Jump isImmediate = emitJumpIfNotJSCell(regT0);
- loadPtr(Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), regT2);
- set32Test8(NonZero, Address(regT2, OBJECT_OFFSETOF(Structure, m_typeInfo.m_flags)), Imm32(MasqueradesAsUndefined), regT0);
+ loadPtr(Address(regT0, JSCell::structureOffset()), regT2);
+ set32Test8(NonZero, Address(regT2, Structure::typeInfoFlagsOffset()), TrustedImm32(MasqueradesAsUndefined), regT0);
Jump wasNotImmediate = jump();
isImmediate.link(this);
- andPtr(Imm32(~JSImmediate::ExtendedTagBitUndefined), regT0);
- setPtr(Equal, regT0, Imm32(JSImmediate::FullTagTypeNull), regT0);
+ andPtr(TrustedImm32(~JSImmediate::ExtendedTagBitUndefined), regT0);
+ setPtr(Equal, regT0, TrustedImm32(JSImmediate::FullTagTypeNull), regT0);
wasNotImmediate.link(this);
@@ -1182,15 +1181,15 @@ void JIT::emit_op_neq_null(Instruction* currentInstruction)
emitGetVirtualRegister(src1, regT0);
Jump isImmediate = emitJumpIfNotJSCell(regT0);
- loadPtr(Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), regT2);
- set32Test8(Zero, Address(regT2, OBJECT_OFFSETOF(Structure, m_typeInfo.m_flags)), Imm32(MasqueradesAsUndefined), regT0);
+ loadPtr(Address(regT0, JSCell::structureOffset()), regT2);
+ set32Test8(Zero, Address(regT2, Structure::typeInfoFlagsOffset()), TrustedImm32(MasqueradesAsUndefined), regT0);
Jump wasNotImmediate = jump();
isImmediate.link(this);
- andPtr(Imm32(~JSImmediate::ExtendedTagBitUndefined), regT0);
- setPtr(NotEqual, regT0, Imm32(JSImmediate::FullTagTypeNull), regT0);
+ andPtr(TrustedImm32(~JSImmediate::ExtendedTagBitUndefined), regT0);
+ setPtr(NotEqual, regT0, TrustedImm32(JSImmediate::FullTagTypeNull), regT0);
wasNotImmediate.link(this);
@@ -1237,7 +1236,7 @@ void JIT::emit_op_init_lazy_reg(Instruction* currentInstruction)
{
unsigned dst = currentInstruction[1].u.operand;
- storePtr(ImmPtr(0), Address(callFrameRegister, sizeof(Register) * dst));
+ storePtr(TrustedImmPtr(0), Address(callFrameRegister, sizeof(Register) * dst));
}
void JIT::emit_op_convert_this(Instruction* currentInstruction)
@@ -1245,22 +1244,22 @@ void JIT::emit_op_convert_this(Instruction* currentInstruction)
emitGetVirtualRegister(currentInstruction[1].u.operand, regT0);
emitJumpSlowCaseIfNotJSCell(regT0);
- loadPtr(Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), regT1);
- addSlowCase(branchTest8(NonZero, Address(regT1, OBJECT_OFFSETOF(Structure, m_typeInfo.m_flags)), Imm32(NeedsThisConversion)));
+ loadPtr(Address(regT0, JSCell::structureOffset()), regT1);
+ addSlowCase(branchTest8(NonZero, Address(regT1, Structure::typeInfoFlagsOffset()), TrustedImm32(NeedsThisConversion)));
}
void JIT::emit_op_convert_this_strict(Instruction* currentInstruction)
{
emitGetVirtualRegister(currentInstruction[1].u.operand, regT0);
Jump notNull = branchTestPtr(NonZero, regT0);
- move(ImmPtr(JSValue::encode(jsNull())), regT0);
+ move(TrustedImmPtr(JSValue::encode(jsNull())), regT0);
emitPutVirtualRegister(currentInstruction[1].u.operand, regT0);
Jump setThis = jump();
notNull.link(this);
Jump isImmediate = emitJumpIfNotJSCell(regT0);
- loadPtr(Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), regT1);
- Jump notAnObject = branch8(NotEqual, Address(regT1, OBJECT_OFFSETOF(Structure, m_typeInfo.m_type)), Imm32(ObjectType));
- addSlowCase(branchTest8(NonZero, Address(regT1, OBJECT_OFFSETOF(Structure, m_typeInfo.m_flags)), Imm32(NeedsThisConversion)));
+ loadPtr(Address(regT0, JSCell::structureOffset()), regT1);
+ Jump notAnObject = branch8(NotEqual, Address(regT1, Structure::typeInfoTypeOffset()), TrustedImm32(ObjectType));
+ addSlowCase(branchTest8(NonZero, Address(regT1, Structure::typeInfoFlagsOffset()), TrustedImm32(NeedsThisConversion)));
isImmediate.link(this);
notAnObject.link(this);
setThis.link(this);
@@ -1375,7 +1374,7 @@ void JIT::emitSlow_op_put_by_val(Instruction* currentInstruction, Vector<SlowCas
void JIT::emitSlow_op_not(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
{
linkSlowCase(iter);
- xorPtr(Imm32(static_cast<int32_t>(JSImmediate::FullTagTypeBool)), regT0);
+ xorPtr(TrustedImm32(static_cast<int32_t>(JSImmediate::FullTagTypeBool)), regT0);
JITStubCall stubCall(this, cti_op_not);
stubCall.addArgument(regT0);
stubCall.call(currentInstruction[1].u.operand);
@@ -1443,7 +1442,7 @@ void JIT::emitSlow_op_neq(Instruction* currentInstruction, Vector<SlowCaseEntry>
stubCall.addArgument(regT0);
stubCall.addArgument(regT1);
stubCall.call();
- xor32(Imm32(0x1), regT0);
+ xor32(TrustedImm32(0x1), regT0);
emitTagAsBoolImmediate(regT0);
emitPutVirtualRegister(currentInstruction[1].u.operand);
}
@@ -1533,7 +1532,7 @@ void JIT::emit_op_get_arguments_length(Instruction* currentInstruction)
int argumentsRegister = currentInstruction[2].u.operand;
addSlowCase(branchTestPtr(NonZero, addressFor(argumentsRegister)));
emitGetFromCallFrameHeader32(RegisterFile::ArgumentCount, regT0);
- sub32(Imm32(1), regT0);
+ sub32(TrustedImm32(1), regT0);
emitFastArithReTagImmediate(regT0, regT0);
emitPutVirtualRegister(dst, regT0);
}
@@ -1548,7 +1547,7 @@ void JIT::emitSlow_op_get_arguments_length(Instruction* currentInstruction, Vect
emitGetVirtualRegister(base, regT0);
JITStubCall stubCall(this, cti_op_get_by_id_generic);
stubCall.addArgument(regT0);
- stubCall.addArgument(ImmPtr(ident));
+ stubCall.addArgument(TrustedImmPtr(ident));
stubCall.call(dst);
}
@@ -1560,7 +1559,7 @@ void JIT::emit_op_get_argument_by_val(Instruction* currentInstruction)
addSlowCase(branchTestPtr(NonZero, addressFor(argumentsRegister)));
emitGetVirtualRegister(property, regT1);
addSlowCase(emitJumpIfNotImmediateInteger(regT1));
- add32(Imm32(1), regT1);
+ add32(TrustedImm32(1), regT1);
// regT1 now contains the integer index of the argument we want, including this
emitGetFromCallFrameHeader32(RegisterFile::ArgumentCount, regT2);
addSlowCase(branch32(AboveOrEqual, regT1, regT2));
@@ -1576,7 +1575,7 @@ void JIT::emit_op_get_argument_by_val(Instruction* currentInstruction)
}
addPtr(Imm32(static_cast<unsigned>(-(RegisterFile::CallFrameHeaderSize + numArgs) * sizeof(Register))), callFrameRegister, regT0);
- mul32(Imm32(sizeof(Register)), regT2, regT2);
+ mul32(TrustedImm32(sizeof(Register)), regT2, regT2);
subPtr(regT2, regT0);
loadPtr(BaseIndex(regT0, regT1, TimesEight, 0), regT0);
if (numArgs)
@@ -1644,7 +1643,7 @@ void JIT::emitSlow_op_resolve_global_dynamic(Instruction* currentInstruction, Ve
while (skip--)
linkSlowCase(iter);
JITStubCall resolveStubCall(this, cti_op_resolve);
- resolveStubCall.addArgument(ImmPtr(ident));
+ resolveStubCall.addArgument(TrustedImmPtr(ident));
resolveStubCall.call(dst);
emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_resolve_global_dynamic));
@@ -1652,7 +1651,7 @@ void JIT::emitSlow_op_resolve_global_dynamic(Instruction* currentInstruction, Ve
linkSlowCase(iter); // We managed to skip all the nodes in the scope chain, but the cache missed.
JITStubCall stubCall(this, cti_op_resolve_global);
- stubCall.addArgument(ImmPtr(ident));
+ stubCall.addArgument(TrustedImmPtr(ident));
stubCall.addArgument(Imm32(currentIndex));
stubCall.addArgument(regT0);
stubCall.call(dst);
@@ -1661,7 +1660,7 @@ void JIT::emitSlow_op_resolve_global_dynamic(Instruction* currentInstruction, Ve
void JIT::emit_op_new_regexp(Instruction* currentInstruction)
{
JITStubCall stubCall(this, cti_op_new_regexp);
- stubCall.addArgument(ImmPtr(m_codeBlock->regexp(currentInstruction[2].u.operand)));
+ stubCall.addArgument(TrustedImmPtr(m_codeBlock->regexp(currentInstruction[2].u.operand)));
stubCall.call(currentInstruction[1].u.operand);
}
@@ -1685,26 +1684,27 @@ void JIT::emit_op_load_varargs(Instruction* currentInstruction)
}
#if USE(JSVALUE32_64)
- addSlowCase(branch32(NotEqual, tagFor(argsOffset), Imm32(JSValue::EmptyValueTag)));
+ addSlowCase(branch32(NotEqual, tagFor(argsOffset), TrustedImm32(JSValue::EmptyValueTag)));
#else
addSlowCase(branchTestPtr(NonZero, addressFor(argsOffset)));
#endif
// Load arg count into regT0
emitGetFromCallFrameHeader32(RegisterFile::ArgumentCount, regT0);
- storePtr(regT0, addressFor(argCountDst));
- Jump endBranch = branch32(Equal, regT0, Imm32(1));
+ store32(TrustedImm32(Int32Tag), intTagFor(argCountDst));
+ store32(regT0, intPayloadFor(argCountDst));
+ Jump endBranch = branch32(Equal, regT0, TrustedImm32(1));
- mul32(Imm32(sizeof(Register)), regT0, regT3);
- addPtr(Imm32(static_cast<unsigned>(sizeof(Register) - RegisterFile::CallFrameHeaderSize * sizeof(Register))), callFrameRegister, regT1);
+ mul32(TrustedImm32(sizeof(Register)), regT0, regT3);
+ addPtr(TrustedImm32(static_cast<unsigned>(sizeof(Register) - RegisterFile::CallFrameHeaderSize * sizeof(Register))), callFrameRegister, regT1);
subPtr(regT3, regT1); // regT1 is now the start of the out of line arguments
addPtr(Imm32(argsOffset * sizeof(Register)), callFrameRegister, regT2); // regT2 is the target buffer
// Bounds check the registerfile
addPtr(regT2, regT3);
addPtr(Imm32((registerOffset - argsOffset) * sizeof(Register)), regT3);
- addSlowCase(branchPtr(Below, AbsoluteAddress(&m_globalData->interpreter->registerFile().m_end), regT3));
+ addSlowCase(branchPtr(Below, AbsoluteAddress(m_globalData->interpreter->registerFile().addressOfEnd()), regT3));
- sub32(Imm32(1), regT0);
+ sub32(TrustedImm32(1), regT0);
Label loopStart = label();
loadPtr(BaseIndex(regT1, regT0, TimesEight, static_cast<unsigned>(0 - 2 * sizeof(Register))), regT3);
storePtr(regT3, BaseIndex(regT2, regT0, TimesEight, static_cast<unsigned>(0 - sizeof(Register))));
@@ -1712,7 +1712,7 @@ void JIT::emit_op_load_varargs(Instruction* currentInstruction)
loadPtr(BaseIndex(regT1, regT0, TimesEight, static_cast<unsigned>(sizeof(void*) - 2 * sizeof(Register))), regT3);
storePtr(regT3, BaseIndex(regT2, regT0, TimesEight, static_cast<unsigned>(sizeof(void*) - sizeof(Register))));
#endif
- branchSubPtr(NonZero, Imm32(1), regT0).linkTo(loopStart, this);
+ branchSubPtr(NonZero, TrustedImm32(1), regT0).linkTo(loopStart, this);
endBranch.link(this);
}
@@ -1729,8 +1729,9 @@ void JIT::emitSlow_op_load_varargs(Instruction* currentInstruction, Vector<SlowC
JITStubCall stubCall(this, cti_op_load_varargs);
stubCall.addArgument(Imm32(argsOffset));
stubCall.call();
- // Stores a naked int32 in the register file.
- store32(returnValueRegister, Address(callFrameRegister, argCountDst * sizeof(Register)));
+
+ store32(TrustedImm32(Int32Tag), intTagFor(argCountDst));
+ store32(returnValueRegister, intPayloadFor(argCountDst));
}
void JIT::emit_op_new_func(Instruction* currentInstruction)
@@ -1739,13 +1740,13 @@ void JIT::emit_op_new_func(Instruction* currentInstruction)
int dst = currentInstruction[1].u.operand;
if (currentInstruction[3].u.operand) {
#if USE(JSVALUE32_64)
- lazyJump = branch32(NotEqual, tagFor(dst), Imm32(JSValue::EmptyValueTag));
+ lazyJump = branch32(NotEqual, tagFor(dst), TrustedImm32(JSValue::EmptyValueTag));
#else
lazyJump = branchTestPtr(NonZero, addressFor(dst));
#endif
}
JITStubCall stubCall(this, cti_op_new_func);
- stubCall.addArgument(ImmPtr(m_codeBlock->functionDecl(currentInstruction[2].u.operand)));
+ stubCall.addArgument(TrustedImmPtr(m_codeBlock->functionDecl(currentInstruction[2].u.operand)));
stubCall.call(currentInstruction[1].u.operand);
if (currentInstruction[3].u.operand)
lazyJump.link(this);
diff --git a/Source/JavaScriptCore/jit/JITOpcodes32_64.cpp b/Source/JavaScriptCore/jit/JITOpcodes32_64.cpp
index 0c8402b..bc0b2cb 100644
--- a/Source/JavaScriptCore/jit/JITOpcodes32_64.cpp
+++ b/Source/JavaScriptCore/jit/JITOpcodes32_64.cpp
@@ -52,15 +52,15 @@ void JIT::privateCompileCTIMachineTrampolines(RefPtr<ExecutablePool>* executable
// regT0 holds payload, regT1 holds tag
- Jump string_failureCases1 = branch32(NotEqual, regT1, Imm32(JSValue::CellTag));
- Jump string_failureCases2 = branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsStringVPtr));
+ Jump string_failureCases1 = branch32(NotEqual, regT1, TrustedImm32(JSValue::CellTag));
+ Jump string_failureCases2 = branchPtr(NotEqual, Address(regT0), TrustedImmPtr(m_globalData->jsStringVPtr));
// Checks out okay! - get the length from the Ustring.
load32(Address(regT0, OBJECT_OFFSETOF(JSString, m_length)), regT2);
- Jump string_failureCases3 = branch32(Above, regT2, Imm32(INT_MAX));
+ Jump string_failureCases3 = branch32(Above, regT2, TrustedImm32(INT_MAX));
move(regT2, regT0);
- move(Imm32(JSValue::Int32Tag), regT1);
+ move(TrustedImm32(JSValue::Int32Tag), regT1);
ret();
#endif
@@ -103,7 +103,7 @@ void JIT::privateCompileCTIMachineTrampolines(RefPtr<ExecutablePool>* executable
loadPtr(Address(regT0, OBJECT_OFFSETOF(JSFunction, m_executable)), regT2);
- Jump hasCodeBlock3 = branch32(GreaterThanOrEqual, Address(regT2, OBJECT_OFFSETOF(FunctionExecutable, m_numParametersForCall)), Imm32(0));
+ Jump hasCodeBlock3 = branch32(GreaterThanOrEqual, Address(regT2, OBJECT_OFFSETOF(FunctionExecutable, m_numParametersForCall)), TrustedImm32(0));
preserveReturnAddressAfterCall(regT3);
restoreArgumentReference();
Call callCompileCall = call();
@@ -123,7 +123,7 @@ void JIT::privateCompileCTIMachineTrampolines(RefPtr<ExecutablePool>* executable
loadPtr(Address(regT0, OBJECT_OFFSETOF(JSFunction, m_executable)), regT2);
- Jump hasCodeBlock4 = branch32(GreaterThanOrEqual, Address(regT2, OBJECT_OFFSETOF(FunctionExecutable, m_numParametersForConstruct)), Imm32(0));
+ Jump hasCodeBlock4 = branch32(GreaterThanOrEqual, Address(regT2, OBJECT_OFFSETOF(FunctionExecutable, m_numParametersForConstruct)), TrustedImm32(0));
preserveReturnAddressAfterCall(regT3);
restoreArgumentReference();
Call callCompileCconstruct = call();
@@ -142,10 +142,10 @@ void JIT::privateCompileCTIMachineTrampolines(RefPtr<ExecutablePool>* executable
emitGetFromCallFrameHeaderPtr(RegisterFile::ReturnPC, regT1);
emitGetFromCallFrameHeaderPtr(RegisterFile::CallerFrame, callFrameRegister);
restoreReturnAddressBeforeReturn(regT1);
- move(ImmPtr(&globalData->exceptionLocation), regT2);
+ move(TrustedImmPtr(&globalData->exceptionLocation), regT2);
storePtr(regT1, regT2);
poke(callFrameRegister, 1 + OBJECT_OFFSETOF(struct JITStackFrame, callFrame) / sizeof(void*));
- poke(ImmPtr(FunctionPtr(ctiVMThrowTrampoline).value()));
+ poke(TrustedImmPtr(FunctionPtr(ctiVMThrowTrampoline).value()));
ret();
// NativeCall Trampoline
@@ -205,7 +205,7 @@ JIT::Label JIT::privateCompileCTINativeCall(JSGlobalData* globalData, bool isCon
// get to its global data.
emitGetFromCallFrameHeaderPtr(RegisterFile::CallerFrame, regT0);
emitGetFromCallFrameHeaderPtr(RegisterFile::ScopeChain, regT1, regT0);
- emitPutToCallFrameHeader(regT1, RegisterFile::ScopeChain);
+ emitPutCellToCallFrameHeader(regT1, RegisterFile::ScopeChain);
peek(regT1);
emitPutToCallFrameHeader(regT1, RegisterFile::ReturnPC);
@@ -214,7 +214,7 @@ JIT::Label JIT::privateCompileCTINativeCall(JSGlobalData* globalData, bool isCon
// Host function signature: f(ExecState*);
move(callFrameRegister, X86Registers::ecx);
- subPtr(Imm32(16 - sizeof(void*)), stackPointerRegister); // Align stack after call.
+ subPtr(TrustedImm32(16 - sizeof(void*)), stackPointerRegister); // Align stack after call.
// call the function
emitGetFromCallFrameHeaderPtr(RegisterFile::Callee, regT1);
@@ -222,14 +222,14 @@ JIT::Label JIT::privateCompileCTINativeCall(JSGlobalData* globalData, bool isCon
move(regT0, callFrameRegister); // Eagerly restore caller frame register to avoid loading from stack.
call(Address(regT1, executableOffsetToFunction));
- addPtr(Imm32(16 - sizeof(void*)), stackPointerRegister);
+ addPtr(TrustedImm32(16 - sizeof(void*)), stackPointerRegister);
#elif CPU(ARM)
// Load caller frame's scope chain into this callframe so that whatever we call can
// get to its global data.
emitGetFromCallFrameHeaderPtr(RegisterFile::CallerFrame, regT2);
emitGetFromCallFrameHeaderPtr(RegisterFile::ScopeChain, regT1, regT2);
- emitPutToCallFrameHeader(regT1, RegisterFile::ScopeChain);
+ emitPutCellToCallFrameHeader(regT1, RegisterFile::ScopeChain);
preserveReturnAddressAfterCall(regT3); // Callee preserved
emitPutToCallFrameHeader(regT3, RegisterFile::ReturnPC);
@@ -251,7 +251,7 @@ JIT::Label JIT::privateCompileCTINativeCall(JSGlobalData* globalData, bool isCon
// get to its global data.
emitGetFromCallFrameHeaderPtr(RegisterFile::CallerFrame, regT0);
emitGetFromCallFrameHeaderPtr(RegisterFile::ScopeChain, regT1, regT0);
- emitPutToCallFrameHeader(regT1, RegisterFile::ScopeChain);
+ emitPutCellToCallFrameHeader(regT1, RegisterFile::ScopeChain);
preserveReturnAddressAfterCall(regT3); // Callee preserved
emitPutToCallFrameHeader(regT3, RegisterFile::ReturnPC);
@@ -261,7 +261,7 @@ JIT::Label JIT::privateCompileCTINativeCall(JSGlobalData* globalData, bool isCon
// Allocate stack space for 16 bytes (8-byte aligned)
// 16 bytes (unused) for 4 arguments
- subPtr(Imm32(16), stackPointerRegister);
+ subPtr(TrustedImm32(16), stackPointerRegister);
// Setup arg0
move(callFrameRegister, MIPSRegisters::a0);
@@ -273,7 +273,7 @@ JIT::Label JIT::privateCompileCTINativeCall(JSGlobalData* globalData, bool isCon
call(Address(regT2, executableOffsetToFunction));
// Restore stack space
- addPtr(Imm32(16), stackPointerRegister);
+ addPtr(TrustedImm32(16), stackPointerRegister);
restoreReturnAddressBeforeReturn(regT3);
@@ -285,7 +285,7 @@ JIT::Label JIT::privateCompileCTINativeCall(JSGlobalData* globalData, bool isCon
#endif // CPU(X86)
// Check for an exception
- Jump sawException = branch32(NotEqual, AbsoluteAddress(reinterpret_cast<char*>(&globalData->exception) + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), Imm32(JSValue::EmptyValueTag));
+ Jump sawException = branch32(NotEqual, AbsoluteAddress(reinterpret_cast<char*>(&globalData->exception) + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), TrustedImm32(JSValue::EmptyValueTag));
// Return.
ret();
@@ -296,12 +296,12 @@ JIT::Label JIT::privateCompileCTINativeCall(JSGlobalData* globalData, bool isCon
// Grab the return address.
preserveReturnAddressAfterCall(regT1);
- move(ImmPtr(&globalData->exceptionLocation), regT2);
+ move(TrustedImmPtr(&globalData->exceptionLocation), regT2);
storePtr(regT1, regT2);
poke(callFrameRegister, OBJECT_OFFSETOF(struct JITStackFrame, callFrame) / sizeof(void*));
// Set the return address.
- move(ImmPtr(FunctionPtr(ctiVMThrowTrampoline).value()), regT1);
+ move(TrustedImmPtr(FunctionPtr(ctiVMThrowTrampoline).value()), regT1);
restoreReturnAddressBeforeReturn(regT1);
ret();
@@ -321,7 +321,7 @@ JIT::CodePtr JIT::privateCompileCTINativeCall(PassRefPtr<ExecutablePool> executa
// get to its global data.
emitGetFromCallFrameHeaderPtr(RegisterFile::CallerFrame, regT0);
emitGetFromCallFrameHeaderPtr(RegisterFile::ScopeChain, regT1, regT0);
- emitPutToCallFrameHeader(regT1, RegisterFile::ScopeChain);
+ emitPutCellToCallFrameHeader(regT1, RegisterFile::ScopeChain);
peek(regT1);
emitPutToCallFrameHeader(regT1, RegisterFile::ReturnPC);
@@ -330,21 +330,21 @@ JIT::CodePtr JIT::privateCompileCTINativeCall(PassRefPtr<ExecutablePool> executa
// Host function signature: f(ExecState*);
move(callFrameRegister, X86Registers::ecx);
- subPtr(Imm32(16 - sizeof(void*)), stackPointerRegister); // Align stack after call.
+ subPtr(TrustedImm32(16 - sizeof(void*)), stackPointerRegister); // Align stack after call.
move(regT0, callFrameRegister); // Eagerly restore caller frame register to avoid loading from stack.
// call the function
nativeCall = call();
- addPtr(Imm32(16 - sizeof(void*)), stackPointerRegister);
+ addPtr(TrustedImm32(16 - sizeof(void*)), stackPointerRegister);
#elif CPU(ARM)
// Load caller frame's scope chain into this callframe so that whatever we call can
// get to its global data.
emitGetFromCallFrameHeaderPtr(RegisterFile::CallerFrame, regT2);
emitGetFromCallFrameHeaderPtr(RegisterFile::ScopeChain, regT1, regT2);
- emitPutToCallFrameHeader(regT1, RegisterFile::ScopeChain);
+ emitPutCellToCallFrameHeader(regT1, RegisterFile::ScopeChain);
preserveReturnAddressAfterCall(regT3); // Callee preserved
emitPutToCallFrameHeader(regT3, RegisterFile::ReturnPC);
@@ -367,7 +367,7 @@ JIT::CodePtr JIT::privateCompileCTINativeCall(PassRefPtr<ExecutablePool> executa
// get to its global data.
emitGetFromCallFrameHeaderPtr(RegisterFile::CallerFrame, regT0);
emitGetFromCallFrameHeaderPtr(RegisterFile::ScopeChain, regT1, regT0);
- emitPutToCallFrameHeader(regT1, RegisterFile::ScopeChain);
+ emitPutCellToCallFrameHeader(regT1, RegisterFile::ScopeChain);
preserveReturnAddressAfterCall(regT3); // Callee preserved
emitPutToCallFrameHeader(regT3, RegisterFile::ReturnPC);
@@ -377,7 +377,7 @@ JIT::CodePtr JIT::privateCompileCTINativeCall(PassRefPtr<ExecutablePool> executa
// Allocate stack space for 16 bytes (8-byte aligned)
// 16 bytes (unused) for 4 arguments
- subPtr(Imm32(16), stackPointerRegister);
+ subPtr(TrustedImm32(16), stackPointerRegister);
// Setup arg0
move(callFrameRegister, MIPSRegisters::a0);
@@ -391,7 +391,7 @@ JIT::CodePtr JIT::privateCompileCTINativeCall(PassRefPtr<ExecutablePool> executa
nativeCall = call();
// Restore stack space
- addPtr(Imm32(16), stackPointerRegister);
+ addPtr(TrustedImm32(16), stackPointerRegister);
restoreReturnAddressBeforeReturn(regT3);
@@ -402,7 +402,7 @@ JIT::CodePtr JIT::privateCompileCTINativeCall(PassRefPtr<ExecutablePool> executa
#endif // CPU(X86)
// Check for an exception
- Jump sawException = branch32(NotEqual, AbsoluteAddress(reinterpret_cast<char*>(&globalData->exception) + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), Imm32(JSValue::EmptyValueTag));
+ Jump sawException = branch32(NotEqual, AbsoluteAddress(reinterpret_cast<char*>(&globalData->exception) + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), TrustedImm32(JSValue::EmptyValueTag));
// Return.
ret();
@@ -413,12 +413,12 @@ JIT::CodePtr JIT::privateCompileCTINativeCall(PassRefPtr<ExecutablePool> executa
// Grab the return address.
preserveReturnAddressAfterCall(regT1);
- move(ImmPtr(&globalData->exceptionLocation), regT2);
+ move(TrustedImmPtr(&globalData->exceptionLocation), regT2);
storePtr(regT1, regT2);
poke(callFrameRegister, OBJECT_OFFSETOF(struct JITStackFrame, callFrame) / sizeof(void*));
// Set the return address.
- move(ImmPtr(FunctionPtr(ctiVMThrowTrampoline).value()), regT1);
+ move(TrustedImmPtr(FunctionPtr(ctiVMThrowTrampoline).value()), regT1);
restoreReturnAddressBeforeReturn(regT1);
ret();
@@ -470,21 +470,21 @@ void JIT::emit_op_loop_if_lesseq(Instruction* currentInstruction)
if (isOperandConstantImmediateInt(op1)) {
emitLoad(op2, regT1, regT0);
- addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
+ addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
addJump(branch32(GreaterThanOrEqual, regT0, Imm32(getConstantOperand(op1).asInt32())), target);
return;
}
if (isOperandConstantImmediateInt(op2)) {
emitLoad(op1, regT1, regT0);
- addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
+ addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
addJump(branch32(LessThanOrEqual, regT0, Imm32(getConstantOperand(op2).asInt32())), target);
return;
}
emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
- addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
- addSlowCase(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag)));
+ addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
+ addSlowCase(branch32(NotEqual, regT3, TrustedImm32(JSValue::Int32Tag)));
addJump(branch32(LessThanOrEqual, regT0, regT2), target);
}
@@ -520,8 +520,8 @@ void JIT::emit_op_check_has_instance(Instruction* currentInstruction)
emitJumpSlowCaseIfNotJSCell(baseVal);
// Check that baseVal 'ImplementsHasInstance'.
- loadPtr(Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), regT0);
- addSlowCase(branchTest8(Zero, Address(regT0, OBJECT_OFFSETOF(Structure, m_typeInfo.m_flags)), Imm32(ImplementsHasInstance)));
+ loadPtr(Address(regT0, JSCell::structureOffset()), regT0);
+ addSlowCase(branchTest8(Zero, Address(regT0, Structure::typeInfoFlagsOffset()), TrustedImm32(ImplementsHasInstance)));
}
void JIT::emit_op_instanceof(Instruction* currentInstruction)
@@ -542,29 +542,29 @@ void JIT::emit_op_instanceof(Instruction* currentInstruction)
emitJumpSlowCaseIfNotJSCell(proto);
// Check that prototype is an object
- loadPtr(Address(regT1, OBJECT_OFFSETOF(JSCell, m_structure)), regT3);
- addSlowCase(branch8(NotEqual, Address(regT3, OBJECT_OFFSETOF(Structure, m_typeInfo.m_type)), Imm32(ObjectType)));
+ loadPtr(Address(regT1, JSCell::structureOffset()), regT3);
+ addSlowCase(branch8(NotEqual, Address(regT3, Structure::typeInfoTypeOffset()), TrustedImm32(ObjectType)));
// Fixme: this check is only needed because the JSC API allows HasInstance to be overridden; we should deprecate this.
// Check that baseVal 'ImplementsDefaultHasInstance'.
- loadPtr(Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), regT0);
- addSlowCase(branchTest8(Zero, Address(regT0, OBJECT_OFFSETOF(Structure, m_typeInfo.m_flags)), Imm32(ImplementsDefaultHasInstance)));
+ loadPtr(Address(regT0, JSCell::structureOffset()), regT0);
+ addSlowCase(branchTest8(Zero, Address(regT0, Structure::typeInfoFlagsOffset()), TrustedImm32(ImplementsDefaultHasInstance)));
// Optimistically load the result true, and start looping.
// Initially, regT1 still contains proto and regT2 still contains value.
// As we loop regT2 will be updated with its prototype, recursively walking the prototype chain.
- move(Imm32(JSValue::TrueTag), regT0);
+ move(TrustedImm32(JSValue::TrueTag), regT0);
Label loop(this);
// Load the prototype of the cell in regT2. If this is equal to regT1 - WIN!
// Otherwise, check if we've hit null - if we have then drop out of the loop, if not go again.
- loadPtr(Address(regT2, OBJECT_OFFSETOF(JSCell, m_structure)), regT2);
- load32(Address(regT2, OBJECT_OFFSETOF(Structure, m_prototype) + OBJECT_OFFSETOF(JSValue, u.asBits.payload)), regT2);
+ loadPtr(Address(regT2, JSCell::structureOffset()), regT2);
+ load32(Address(regT2, Structure::prototypeOffset() + OBJECT_OFFSETOF(JSValue, u.asBits.payload)), regT2);
Jump isInstance = branchPtr(Equal, regT2, regT1);
branchTest32(NonZero, regT2).linkTo(loop, this);
// We get here either by dropping out of the loop, or if value was not an Object. Result is false.
- move(Imm32(JSValue::FalseTag), regT0);
+ move(TrustedImm32(JSValue::FalseTag), regT0);
// isInstance jumps right down to here, to skip setting the result to false (it has already set true).
isInstance.link(this);
@@ -609,7 +609,7 @@ void JIT::emit_op_get_global_var(Instruction* currentInstruction)
ASSERT(globalObject->isGlobalObject());
int index = currentInstruction[2].u.operand;
- loadPtr(&globalObject->d()->registers, regT2);
+ loadPtr(&globalObject->m_registers, regT2);
emitLoad(index, regT1, regT0, regT2);
emitStore(dst, regT1, regT0);
@@ -625,7 +625,7 @@ void JIT::emit_op_put_global_var(Instruction* currentInstruction)
emitLoad(value, regT1, regT0);
- loadPtr(&globalObject->d()->registers, regT2);
+ loadPtr(&globalObject->m_registers, regT2);
emitStore(index, regT1, regT0, regT2);
map(m_bytecodeOffset + OPCODE_LENGTH(op_put_global_var), value, regT1, regT0);
}
@@ -642,7 +642,7 @@ void JIT::emit_op_get_scoped_var(Instruction* currentInstruction)
if (checkTopLevel && skip--) {
Jump activationNotCreated;
if (checkTopLevel)
- activationNotCreated = branch32(Equal, tagFor(m_codeBlock->activationRegister()), Imm32(JSValue::EmptyValueTag));
+ activationNotCreated = branch32(Equal, tagFor(m_codeBlock->activationRegister()), TrustedImm32(JSValue::EmptyValueTag));
loadPtr(Address(regT2, OBJECT_OFFSETOF(ScopeChainNode, next)), regT2);
activationNotCreated.link(this);
}
@@ -650,8 +650,7 @@ void JIT::emit_op_get_scoped_var(Instruction* currentInstruction)
loadPtr(Address(regT2, OBJECT_OFFSETOF(ScopeChainNode, next)), regT2);
loadPtr(Address(regT2, OBJECT_OFFSETOF(ScopeChainNode, object)), regT2);
- loadPtr(Address(regT2, OBJECT_OFFSETOF(JSVariableObject, d)), regT2);
- loadPtr(Address(regT2, OBJECT_OFFSETOF(JSVariableObject::JSVariableObjectData, registers)), regT2);
+ loadPtr(Address(regT2, OBJECT_OFFSETOF(JSVariableObject, m_registers)), regT2);
emitLoad(index, regT1, regT0, regT2);
emitStore(dst, regT1, regT0);
@@ -672,7 +671,7 @@ void JIT::emit_op_put_scoped_var(Instruction* currentInstruction)
if (checkTopLevel && skip--) {
Jump activationNotCreated;
if (checkTopLevel)
- activationNotCreated = branch32(Equal, tagFor(m_codeBlock->activationRegister()), Imm32(JSValue::EmptyValueTag));
+ activationNotCreated = branch32(Equal, tagFor(m_codeBlock->activationRegister()), TrustedImm32(JSValue::EmptyValueTag));
loadPtr(Address(regT2, OBJECT_OFFSETOF(ScopeChainNode, next)), regT2);
activationNotCreated.link(this);
}
@@ -680,8 +679,7 @@ void JIT::emit_op_put_scoped_var(Instruction* currentInstruction)
loadPtr(Address(regT2, OBJECT_OFFSETOF(ScopeChainNode, next)), regT2);
loadPtr(Address(regT2, OBJECT_OFFSETOF(ScopeChainNode, object)), regT2);
- loadPtr(Address(regT2, OBJECT_OFFSETOF(JSVariableObject, d)), regT2);
- loadPtr(Address(regT2, OBJECT_OFFSETOF(JSVariableObject::JSVariableObjectData, registers)), regT2);
+ loadPtr(Address(regT2, OBJECT_OFFSETOF(JSVariableObject, m_registers)), regT2);
emitStore(index, regT1, regT0, regT2);
map(m_bytecodeOffset + OPCODE_LENGTH(op_put_scoped_var), value, regT1, regT0);
@@ -691,8 +689,8 @@ void JIT::emit_op_tear_off_activation(Instruction* currentInstruction)
{
unsigned activation = currentInstruction[1].u.operand;
unsigned arguments = currentInstruction[2].u.operand;
- Jump activationCreated = branch32(NotEqual, tagFor(activation), Imm32(JSValue::EmptyValueTag));
- Jump argumentsNotCreated = branch32(Equal, tagFor(arguments), Imm32(JSValue::EmptyValueTag));
+ Jump activationCreated = branch32(NotEqual, tagFor(activation), TrustedImm32(JSValue::EmptyValueTag));
+ Jump argumentsNotCreated = branch32(Equal, tagFor(arguments), TrustedImm32(JSValue::EmptyValueTag));
activationCreated.link(this);
JITStubCall stubCall(this, cti_op_tear_off_activation);
stubCall.addArgument(currentInstruction[1].u.operand);
@@ -705,7 +703,7 @@ void JIT::emit_op_tear_off_arguments(Instruction* currentInstruction)
{
int dst = currentInstruction[1].u.operand;
- Jump argsNotCreated = branch32(Equal, tagFor(unmodifiedArgumentsRegister(dst)), Imm32(JSValue::EmptyValueTag));
+ Jump argsNotCreated = branch32(Equal, tagFor(unmodifiedArgumentsRegister(dst)), TrustedImm32(JSValue::EmptyValueTag));
JITStubCall stubCall(this, cti_op_tear_off_arguments);
stubCall.addArgument(unmodifiedArgumentsRegister(dst));
stubCall.call();
@@ -723,7 +721,7 @@ void JIT::emit_op_new_array(Instruction* currentInstruction)
void JIT::emit_op_resolve(Instruction* currentInstruction)
{
JITStubCall stubCall(this, cti_op_resolve);
- stubCall.addArgument(ImmPtr(&m_codeBlock->identifier(currentInstruction[2].u.operand)));
+ stubCall.addArgument(TrustedImmPtr(&m_codeBlock->identifier(currentInstruction[2].u.operand)));
stubCall.call(currentInstruction[1].u.operand);
}
@@ -734,8 +732,8 @@ void JIT::emit_op_to_primitive(Instruction* currentInstruction)
emitLoad(src, regT1, regT0);
- Jump isImm = branch32(NotEqual, regT1, Imm32(JSValue::CellTag));
- addSlowCase(branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsStringVPtr)));
+ Jump isImm = branch32(NotEqual, regT1, TrustedImm32(JSValue::CellTag));
+ addSlowCase(branchPtr(NotEqual, Address(regT0), TrustedImmPtr(m_globalData->jsStringVPtr)));
isImm.link(this);
if (dst != src)
@@ -765,7 +763,7 @@ void JIT::emit_op_strcat(Instruction* currentInstruction)
void JIT::emit_op_resolve_base(Instruction* currentInstruction)
{
JITStubCall stubCall(this, currentInstruction[3].u.operand ? cti_op_resolve_base_strict_put : cti_op_resolve_base);
- stubCall.addArgument(ImmPtr(&m_codeBlock->identifier(currentInstruction[2].u.operand)));
+ stubCall.addArgument(TrustedImmPtr(&m_codeBlock->identifier(currentInstruction[2].u.operand)));
stubCall.call(currentInstruction[1].u.operand);
}
@@ -773,14 +771,14 @@ void JIT::emit_op_ensure_property_exists(Instruction* currentInstruction)
{
JITStubCall stubCall(this, cti_op_ensure_property_exists);
stubCall.addArgument(Imm32(currentInstruction[1].u.operand));
- stubCall.addArgument(ImmPtr(&m_codeBlock->identifier(currentInstruction[2].u.operand)));
+ stubCall.addArgument(TrustedImmPtr(&m_codeBlock->identifier(currentInstruction[2].u.operand)));
stubCall.call(currentInstruction[1].u.operand);
}
void JIT::emit_op_resolve_skip(Instruction* currentInstruction)
{
JITStubCall stubCall(this, cti_op_resolve_skip);
- stubCall.addArgument(ImmPtr(&m_codeBlock->identifier(currentInstruction[2].u.operand)));
+ stubCall.addArgument(TrustedImmPtr(&m_codeBlock->identifier(currentInstruction[2].u.operand)));
stubCall.addArgument(Imm32(currentInstruction[3].u.operand));
stubCall.call(currentInstruction[1].u.operand);
}
@@ -797,9 +795,9 @@ void JIT::emit_op_resolve_global(Instruction* currentInstruction, bool dynamic)
void* offsetAddr = &(m_codeBlock->globalResolveInfo(currentIndex).offset);
// Verify structure.
- move(ImmPtr(globalObject), regT0);
+ move(TrustedImmPtr(globalObject), regT0);
loadPtr(structureAddress, regT1);
- addSlowCase(branchPtr(NotEqual, regT1, Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure))));
+ addSlowCase(branchPtr(NotEqual, regT1, Address(regT0, JSCell::structureOffset())));
// Load property.
loadPtr(Address(regT0, OBJECT_OFFSETOF(JSGlobalObject, m_propertyStorage)), regT2);
@@ -819,7 +817,7 @@ void JIT::emitSlow_op_resolve_global(Instruction* currentInstruction, Vector<Slo
linkSlowCase(iter);
JITStubCall stubCall(this, cti_op_resolve_global);
- stubCall.addArgument(ImmPtr(ident));
+ stubCall.addArgument(TrustedImmPtr(ident));
stubCall.addArgument(Imm32(currentIndex));
stubCall.call(dst);
}
@@ -831,9 +829,9 @@ void JIT::emit_op_not(Instruction* currentInstruction)
emitLoadTag(src, regT0);
- xor32(Imm32(JSValue::FalseTag), regT0);
- addSlowCase(branchTest32(NonZero, regT0, Imm32(~1)));
- xor32(Imm32(JSValue::TrueTag), regT0);
+ xor32(TrustedImm32(JSValue::FalseTag), regT0);
+ addSlowCase(branchTest32(NonZero, regT0, TrustedImm32(~1)));
+ xor32(TrustedImm32(JSValue::TrueTag), regT0);
emitStoreBool(dst, regT0, (dst == src));
}
@@ -857,17 +855,17 @@ void JIT::emit_op_jfalse(Instruction* currentInstruction)
emitLoad(cond, regT1, regT0);
- Jump isTrue = branch32(Equal, regT1, Imm32(JSValue::TrueTag));
- addJump(branch32(Equal, regT1, Imm32(JSValue::FalseTag)), target);
+ Jump isTrue = branch32(Equal, regT1, TrustedImm32(JSValue::TrueTag));
+ addJump(branch32(Equal, regT1, TrustedImm32(JSValue::FalseTag)), target);
- Jump isNotInteger = branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag));
- Jump isTrue2 = branch32(NotEqual, regT0, Imm32(0));
+ Jump isNotInteger = branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag));
+ Jump isTrue2 = branch32(NotEqual, regT0, TrustedImm32(0));
addJump(jump(), target);
if (supportsFloatingPoint()) {
isNotInteger.link(this);
- addSlowCase(branch32(Above, regT1, Imm32(JSValue::LowestTag)));
+ addSlowCase(branch32(Above, regT1, TrustedImm32(JSValue::LowestTag)));
emitLoadDouble(cond, fpRegT0);
addJump(branchDoubleZeroOrNaN(fpRegT0, fpRegT1), target);
@@ -897,17 +895,17 @@ void JIT::emit_op_jtrue(Instruction* currentInstruction)
emitLoad(cond, regT1, regT0);
- Jump isFalse = branch32(Equal, regT1, Imm32(JSValue::FalseTag));
- addJump(branch32(Equal, regT1, Imm32(JSValue::TrueTag)), target);
+ Jump isFalse = branch32(Equal, regT1, TrustedImm32(JSValue::FalseTag));
+ addJump(branch32(Equal, regT1, TrustedImm32(JSValue::TrueTag)), target);
- Jump isNotInteger = branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag));
- Jump isFalse2 = branch32(Equal, regT0, Imm32(0));
+ Jump isNotInteger = branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag));
+ Jump isFalse2 = branch32(Equal, regT0, TrustedImm32(0));
addJump(jump(), target);
if (supportsFloatingPoint()) {
isNotInteger.link(this);
- addSlowCase(branch32(Above, regT1, Imm32(JSValue::LowestTag)));
+ addSlowCase(branch32(Above, regT1, TrustedImm32(JSValue::LowestTag)));
emitLoadDouble(cond, fpRegT0);
addJump(branchDoubleNonZero(fpRegT0, fpRegT1), target);
@@ -937,11 +935,11 @@ void JIT::emit_op_jeq_null(Instruction* currentInstruction)
emitLoad(src, regT1, regT0);
- Jump isImmediate = branch32(NotEqual, regT1, Imm32(JSValue::CellTag));
+ Jump isImmediate = branch32(NotEqual, regT1, TrustedImm32(JSValue::CellTag));
// First, handle JSCell cases - check MasqueradesAsUndefined bit on the structure.
- loadPtr(Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), regT2);
- addJump(branchTest8(NonZero, Address(regT2, OBJECT_OFFSETOF(Structure, m_typeInfo.m_flags)), Imm32(MasqueradesAsUndefined)), target);
+ loadPtr(Address(regT0, JSCell::structureOffset()), regT2);
+ addJump(branchTest8(NonZero, Address(regT2, Structure::typeInfoFlagsOffset()), TrustedImm32(MasqueradesAsUndefined)), target);
Jump wasNotImmediate = jump();
@@ -949,7 +947,7 @@ void JIT::emit_op_jeq_null(Instruction* currentInstruction)
isImmediate.link(this);
ASSERT((JSValue::UndefinedTag + 1 == JSValue::NullTag) && !(JSValue::NullTag + 1));
- addJump(branch32(AboveOrEqual, regT1, Imm32(JSValue::UndefinedTag)), target);
+ addJump(branch32(AboveOrEqual, regT1, TrustedImm32(JSValue::UndefinedTag)), target);
wasNotImmediate.link(this);
}
@@ -961,11 +959,11 @@ void JIT::emit_op_jneq_null(Instruction* currentInstruction)
emitLoad(src, regT1, regT0);
- Jump isImmediate = branch32(NotEqual, regT1, Imm32(JSValue::CellTag));
+ Jump isImmediate = branch32(NotEqual, regT1, TrustedImm32(JSValue::CellTag));
// First, handle JSCell cases - check MasqueradesAsUndefined bit on the structure.
- loadPtr(Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), regT2);
- addJump(branchTest8(Zero, Address(regT2, OBJECT_OFFSETOF(Structure, m_typeInfo.m_flags)), Imm32(MasqueradesAsUndefined)), target);
+ loadPtr(Address(regT0, JSCell::structureOffset()), regT2);
+ addJump(branchTest8(Zero, Address(regT2, Structure::typeInfoFlagsOffset()), TrustedImm32(MasqueradesAsUndefined)), target);
Jump wasNotImmediate = jump();
@@ -973,7 +971,7 @@ void JIT::emit_op_jneq_null(Instruction* currentInstruction)
isImmediate.link(this);
ASSERT((JSValue::UndefinedTag + 1 == JSValue::NullTag) && !(JSValue::NullTag + 1));
- addJump(branch32(Below, regT1, Imm32(JSValue::UndefinedTag)), target);
+ addJump(branch32(Below, regT1, TrustedImm32(JSValue::UndefinedTag)), target);
wasNotImmediate.link(this);
}
@@ -981,19 +979,19 @@ void JIT::emit_op_jneq_null(Instruction* currentInstruction)
void JIT::emit_op_jneq_ptr(Instruction* currentInstruction)
{
unsigned src = currentInstruction[1].u.operand;
- JSCell* ptr = currentInstruction[2].u.jsCell;
+ JSCell* ptr = currentInstruction[2].u.jsCell.get();
unsigned target = currentInstruction[3].u.operand;
emitLoad(src, regT1, regT0);
- addJump(branch32(NotEqual, regT1, Imm32(JSValue::CellTag)), target);
- addJump(branchPtr(NotEqual, regT0, ImmPtr(ptr)), target);
+ addJump(branch32(NotEqual, regT1, TrustedImm32(JSValue::CellTag)), target);
+ addJump(branchPtr(NotEqual, regT0, TrustedImmPtr(ptr)), target);
}
void JIT::emit_op_jsr(Instruction* currentInstruction)
{
int retAddrDst = currentInstruction[1].u.operand;
int target = currentInstruction[2].u.operand;
- DataLabelPtr storeLocation = storePtrWithPatch(ImmPtr(0), Address(callFrameRegister, sizeof(Register) * retAddrDst));
+ DataLabelPtr storeLocation = storePtrWithPatch(TrustedImmPtr(0), Address(callFrameRegister, sizeof(Register) * retAddrDst));
addJump(jump(), target);
m_jsrSites.append(JSRInfo(storeLocation, label()));
}
@@ -1011,11 +1009,11 @@ void JIT::emit_op_eq(Instruction* currentInstruction)
emitLoad2(src1, regT1, regT0, src2, regT3, regT2);
addSlowCase(branch32(NotEqual, regT1, regT3));
- addSlowCase(branch32(Equal, regT1, Imm32(JSValue::CellTag)));
- addSlowCase(branch32(Below, regT1, Imm32(JSValue::LowestTag)));
+ addSlowCase(branch32(Equal, regT1, TrustedImm32(JSValue::CellTag)));
+ addSlowCase(branch32(Below, regT1, TrustedImm32(JSValue::LowestTag)));
set8Compare32(Equal, regT0, regT2, regT0);
- or32(Imm32(JSValue::FalseTag), regT0);
+ or32(TrustedImm32(JSValue::FalseTag), regT0);
emitStoreBool(dst, regT0);
}
@@ -1032,8 +1030,8 @@ void JIT::emitSlow_op_eq(Instruction* currentInstruction, Vector<SlowCaseEntry>:
genericCase.append(getSlowCase(iter)); // tags not equal
linkSlowCase(iter); // tags equal and JSCell
- genericCase.append(branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsStringVPtr)));
- genericCase.append(branchPtr(NotEqual, Address(regT2), ImmPtr(m_globalData->jsStringVPtr)));
+ genericCase.append(branchPtr(NotEqual, Address(regT0), TrustedImmPtr(m_globalData->jsStringVPtr)));
+ genericCase.append(branchPtr(NotEqual, Address(regT2), TrustedImmPtr(m_globalData->jsStringVPtr)));
// String case.
JITStubCall stubCallEqStrings(this, cti_op_eq_strings);
@@ -1051,7 +1049,7 @@ void JIT::emitSlow_op_eq(Instruction* currentInstruction, Vector<SlowCaseEntry>:
stubCallEq.call(regT0);
storeResult.link(this);
- or32(Imm32(JSValue::FalseTag), regT0);
+ or32(TrustedImm32(JSValue::FalseTag), regT0);
emitStoreBool(dst, regT0);
}
@@ -1063,11 +1061,11 @@ void JIT::emit_op_neq(Instruction* currentInstruction)
emitLoad2(src1, regT1, regT0, src2, regT3, regT2);
addSlowCase(branch32(NotEqual, regT1, regT3));
- addSlowCase(branch32(Equal, regT1, Imm32(JSValue::CellTag)));
- addSlowCase(branch32(Below, regT1, Imm32(JSValue::LowestTag)));
+ addSlowCase(branch32(Equal, regT1, TrustedImm32(JSValue::CellTag)));
+ addSlowCase(branch32(Below, regT1, TrustedImm32(JSValue::LowestTag)));
set8Compare32(NotEqual, regT0, regT2, regT0);
- or32(Imm32(JSValue::FalseTag), regT0);
+ or32(TrustedImm32(JSValue::FalseTag), regT0);
emitStoreBool(dst, regT0);
}
@@ -1082,8 +1080,8 @@ void JIT::emitSlow_op_neq(Instruction* currentInstruction, Vector<SlowCaseEntry>
genericCase.append(getSlowCase(iter)); // tags not equal
linkSlowCase(iter); // tags equal and JSCell
- genericCase.append(branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsStringVPtr)));
- genericCase.append(branchPtr(NotEqual, Address(regT2), ImmPtr(m_globalData->jsStringVPtr)));
+ genericCase.append(branchPtr(NotEqual, Address(regT0), TrustedImmPtr(m_globalData->jsStringVPtr)));
+ genericCase.append(branchPtr(NotEqual, Address(regT2), TrustedImmPtr(m_globalData->jsStringVPtr)));
// String case.
JITStubCall stubCallEqStrings(this, cti_op_eq_strings);
@@ -1101,8 +1099,8 @@ void JIT::emitSlow_op_neq(Instruction* currentInstruction, Vector<SlowCaseEntry>
stubCallEq.call(regT0);
storeResult.link(this);
- xor32(Imm32(0x1), regT0);
- or32(Imm32(JSValue::FalseTag), regT0);
+ xor32(TrustedImm32(0x1), regT0);
+ or32(TrustedImm32(JSValue::FalseTag), regT0);
emitStoreBool(dst, regT0);
}
@@ -1119,15 +1117,15 @@ void JIT::compileOpStrictEq(Instruction* currentInstruction, CompileOpStrictEqTy
// cells and/or Int32s.
move(regT0, regT2);
and32(regT1, regT2);
- addSlowCase(branch32(Below, regT2, Imm32(JSValue::LowestTag)));
- addSlowCase(branch32(AboveOrEqual, regT2, Imm32(JSValue::CellTag)));
+ addSlowCase(branch32(Below, regT2, TrustedImm32(JSValue::LowestTag)));
+ addSlowCase(branch32(AboveOrEqual, regT2, TrustedImm32(JSValue::CellTag)));
if (type == OpStrictEq)
set8Compare32(Equal, regT0, regT1, regT0);
else
set8Compare32(NotEqual, regT0, regT1, regT0);
- or32(Imm32(JSValue::FalseTag), regT0);
+ or32(TrustedImm32(JSValue::FalseTag), regT0);
emitStoreBool(dst, regT0);
}
@@ -1178,22 +1176,22 @@ void JIT::emit_op_eq_null(Instruction* currentInstruction)
unsigned src = currentInstruction[2].u.operand;
emitLoad(src, regT1, regT0);
- Jump isImmediate = branch32(NotEqual, regT1, Imm32(JSValue::CellTag));
+ Jump isImmediate = branch32(NotEqual, regT1, TrustedImm32(JSValue::CellTag));
- loadPtr(Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), regT1);
- set32Test8(NonZero, Address(regT1, OBJECT_OFFSETOF(Structure, m_typeInfo.m_flags)), Imm32(MasqueradesAsUndefined), regT1);
+ loadPtr(Address(regT0, JSCell::structureOffset()), regT1);
+ set32Test8(NonZero, Address(regT1, Structure::typeInfoFlagsOffset()), TrustedImm32(MasqueradesAsUndefined), regT1);
Jump wasNotImmediate = jump();
isImmediate.link(this);
- set8Compare32(Equal, regT1, Imm32(JSValue::NullTag), regT2);
- set8Compare32(Equal, regT1, Imm32(JSValue::UndefinedTag), regT1);
+ set8Compare32(Equal, regT1, TrustedImm32(JSValue::NullTag), regT2);
+ set8Compare32(Equal, regT1, TrustedImm32(JSValue::UndefinedTag), regT1);
or32(regT2, regT1);
wasNotImmediate.link(this);
- or32(Imm32(JSValue::FalseTag), regT1);
+ or32(TrustedImm32(JSValue::FalseTag), regT1);
emitStoreBool(dst, regT1);
}
@@ -1204,22 +1202,22 @@ void JIT::emit_op_neq_null(Instruction* currentInstruction)
unsigned src = currentInstruction[2].u.operand;
emitLoad(src, regT1, regT0);
- Jump isImmediate = branch32(NotEqual, regT1, Imm32(JSValue::CellTag));
+ Jump isImmediate = branch32(NotEqual, regT1, TrustedImm32(JSValue::CellTag));
- loadPtr(Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), regT1);
- set32Test8(Zero, Address(regT1, OBJECT_OFFSETOF(Structure, m_typeInfo.m_flags)), Imm32(MasqueradesAsUndefined), regT1);
+ loadPtr(Address(regT0, JSCell::structureOffset()), regT1);
+ set32Test8(Zero, Address(regT1, Structure::typeInfoFlagsOffset()), TrustedImm32(MasqueradesAsUndefined), regT1);
Jump wasNotImmediate = jump();
isImmediate.link(this);
- set8Compare32(NotEqual, regT1, Imm32(JSValue::NullTag), regT2);
- set8Compare32(NotEqual, regT1, Imm32(JSValue::UndefinedTag), regT1);
+ set8Compare32(NotEqual, regT1, TrustedImm32(JSValue::NullTag), regT2);
+ set8Compare32(NotEqual, regT1, TrustedImm32(JSValue::UndefinedTag), regT1);
and32(regT2, regT1);
wasNotImmediate.link(this);
- or32(Imm32(JSValue::FalseTag), regT1);
+ or32(TrustedImm32(JSValue::FalseTag), regT1);
emitStoreBool(dst, regT1);
}
@@ -1227,7 +1225,7 @@ void JIT::emit_op_neq_null(Instruction* currentInstruction)
void JIT::emit_op_resolve_with_base(Instruction* currentInstruction)
{
JITStubCall stubCall(this, cti_op_resolve_with_base);
- stubCall.addArgument(ImmPtr(&m_codeBlock->identifier(currentInstruction[3].u.operand)));
+ stubCall.addArgument(TrustedImmPtr(&m_codeBlock->identifier(currentInstruction[3].u.operand)));
stubCall.addArgument(Imm32(currentInstruction[1].u.operand));
stubCall.call(currentInstruction[2].u.operand);
}
@@ -1235,7 +1233,7 @@ void JIT::emit_op_resolve_with_base(Instruction* currentInstruction)
void JIT::emit_op_new_func_exp(Instruction* currentInstruction)
{
JITStubCall stubCall(this, cti_op_new_func_exp);
- stubCall.addArgument(ImmPtr(m_codeBlock->functionExpr(currentInstruction[2].u.operand)));
+ stubCall.addArgument(TrustedImmPtr(m_codeBlock->functionExpr(currentInstruction[2].u.operand)));
stubCall.call(currentInstruction[1].u.operand);
}
@@ -1265,10 +1263,10 @@ void JIT::emit_op_get_pnames(Instruction* currentInstruction)
emitLoad(base, regT1, regT0);
if (!m_codeBlock->isKnownNotImmediate(base))
- isNotObject.append(branch32(NotEqual, regT1, Imm32(JSValue::CellTag)));
+ isNotObject.append(branch32(NotEqual, regT1, TrustedImm32(JSValue::CellTag)));
if (base != m_codeBlock->thisRegister() || m_codeBlock->isStrictMode()) {
- loadPtr(Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), regT2);
- isNotObject.append(branch8(NotEqual, Address(regT2, OBJECT_OFFSETOF(Structure, m_typeInfo.m_type)), Imm32(ObjectType)));
+ loadPtr(Address(regT0, JSCell::structureOffset()), regT2);
+ isNotObject.append(branch8(NotEqual, Address(regT2, Structure::typeInfoTypeOffset()), TrustedImm32(ObjectType)));
}
// We could inline the case where you have a valid cache, but
@@ -1278,13 +1276,15 @@ void JIT::emit_op_get_pnames(Instruction* currentInstruction)
getPnamesStubCall.addArgument(regT0);
getPnamesStubCall.call(dst);
load32(Address(regT0, OBJECT_OFFSETOF(JSPropertyNameIterator, m_jsStringsSize)), regT3);
- store32(Imm32(0), addressFor(i));
- store32(regT3, addressFor(size));
+ store32(TrustedImm32(Int32Tag), intTagFor(i));
+ store32(TrustedImm32(0), intPayloadFor(i));
+ store32(TrustedImm32(Int32Tag), intTagFor(size));
+ store32(regT3, payloadFor(size));
Jump end = jump();
isNotObject.link(this);
- addJump(branch32(Equal, regT1, Imm32(JSValue::NullTag)), breakTarget);
- addJump(branch32(Equal, regT1, Imm32(JSValue::UndefinedTag)), breakTarget);
+ addJump(branch32(Equal, regT1, TrustedImm32(JSValue::NullTag)), breakTarget);
+ addJump(branch32(Equal, regT1, TrustedImm32(JSValue::UndefinedTag)), breakTarget);
JITStubCall toObjectStubCall(this, cti_to_object);
toObjectStubCall.addArgument(regT1, regT0);
toObjectStubCall.call(base);
@@ -1305,25 +1305,25 @@ void JIT::emit_op_next_pname(Instruction* currentInstruction)
JumpList callHasProperty;
Label begin(this);
- load32(addressFor(i), regT0);
- Jump end = branch32(Equal, regT0, addressFor(size));
+ load32(intPayloadFor(i), regT0);
+ Jump end = branch32(Equal, regT0, intPayloadFor(size));
// Grab key @ i
- loadPtr(addressFor(it), regT1);
+ loadPtr(payloadFor(it), regT1);
loadPtr(Address(regT1, OBJECT_OFFSETOF(JSPropertyNameIterator, m_jsStrings)), regT2);
load32(BaseIndex(regT2, regT0, TimesEight), regT2);
- store32(Imm32(JSValue::CellTag), tagFor(dst));
+ store32(TrustedImm32(JSValue::CellTag), tagFor(dst));
store32(regT2, payloadFor(dst));
// Increment i
- add32(Imm32(1), regT0);
- store32(regT0, addressFor(i));
+ add32(TrustedImm32(1), regT0);
+ store32(regT0, intPayloadFor(i));
// Verify that i is valid:
- loadPtr(addressFor(base), regT0);
+ loadPtr(payloadFor(base), regT0);
// Test base's structure
- loadPtr(Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), regT2);
+ loadPtr(Address(regT0, JSCell::structureOffset()), regT2);
callHasProperty.append(branchPtr(NotEqual, regT2, Address(Address(regT1, OBJECT_OFFSETOF(JSPropertyNameIterator, m_cachedStructure)))));
// Test base's prototype chain
@@ -1332,11 +1332,11 @@ void JIT::emit_op_next_pname(Instruction* currentInstruction)
addJump(branchTestPtr(Zero, Address(regT3)), target);
Label checkPrototype(this);
- callHasProperty.append(branch32(Equal, Address(regT2, OBJECT_OFFSETOF(Structure, m_prototype) + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), Imm32(JSValue::NullTag)));
- loadPtr(Address(regT2, OBJECT_OFFSETOF(Structure, m_prototype) + OBJECT_OFFSETOF(JSValue, u.asBits.payload)), regT2);
- loadPtr(Address(regT2, OBJECT_OFFSETOF(JSCell, m_structure)), regT2);
+ callHasProperty.append(branch32(Equal, Address(regT2, Structure::prototypeOffset() + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), TrustedImm32(JSValue::NullTag)));
+ loadPtr(Address(regT2, Structure::prototypeOffset() + OBJECT_OFFSETOF(JSValue, u.asBits.payload)), regT2);
+ loadPtr(Address(regT2, JSCell::structureOffset()), regT2);
callHasProperty.append(branchPtr(NotEqual, regT2, Address(regT3)));
- addPtr(Imm32(sizeof(Structure*)), regT3);
+ addPtr(TrustedImm32(sizeof(Structure*)), regT3);
branchTestPtr(NonZero, Address(regT3)).linkTo(checkPrototype, this);
// Continue loop.
@@ -1377,8 +1377,8 @@ void JIT::emit_op_to_jsnumber(Instruction* currentInstruction)
emitLoad(src, regT1, regT0);
- Jump isInt32 = branch32(Equal, regT1, Imm32(JSValue::Int32Tag));
- addSlowCase(branch32(AboveOrEqual, regT1, Imm32(JSValue::EmptyValueTag)));
+ Jump isInt32 = branch32(Equal, regT1, TrustedImm32(JSValue::Int32Tag));
+ addSlowCase(branch32(AboveOrEqual, regT1, TrustedImm32(JSValue::EmptyValueTag)));
isInt32.link(this);
if (src != dst)
@@ -1400,7 +1400,7 @@ void JIT::emitSlow_op_to_jsnumber(Instruction* currentInstruction, Vector<SlowCa
void JIT::emit_op_push_new_scope(Instruction* currentInstruction)
{
JITStubCall stubCall(this, cti_op_push_new_scope);
- stubCall.addArgument(ImmPtr(&m_codeBlock->identifier(currentInstruction[2].u.operand)));
+ stubCall.addArgument(TrustedImmPtr(&m_codeBlock->identifier(currentInstruction[2].u.operand)));
stubCall.addArgument(currentInstruction[3].u.operand);
stubCall.call(currentInstruction[1].u.operand);
}
@@ -1414,8 +1414,8 @@ void JIT::emit_op_catch(Instruction* currentInstruction)
loadPtr(Address(stackPointerRegister, OBJECT_OFFSETOF(struct JITStackFrame, globalData)), regT3);
load32(Address(regT3, OBJECT_OFFSETOF(JSGlobalData, exception) + OBJECT_OFFSETOF(JSValue, u.asBits.payload)), regT0);
load32(Address(regT3, OBJECT_OFFSETOF(JSGlobalData, exception) + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), regT1);
- store32(Imm32(JSValue().payload()), Address(regT3, OBJECT_OFFSETOF(JSGlobalData, exception) + OBJECT_OFFSETOF(JSValue, u.asBits.payload)));
- store32(Imm32(JSValue().tag()), Address(regT3, OBJECT_OFFSETOF(JSGlobalData, exception) + OBJECT_OFFSETOF(JSValue, u.asBits.tag)));
+ store32(TrustedImm32(JSValue().payload()), Address(regT3, OBJECT_OFFSETOF(JSGlobalData, exception) + OBJECT_OFFSETOF(JSValue, u.asBits.payload)));
+ store32(TrustedImm32(JSValue().tag()), Address(regT3, OBJECT_OFFSETOF(JSGlobalData, exception) + OBJECT_OFFSETOF(JSValue, u.asBits.tag)));
unsigned exception = currentInstruction[1].u.operand;
emitStore(exception, regT1, regT0);
@@ -1520,7 +1520,7 @@ void JIT::emit_op_create_activation(Instruction* currentInstruction)
{
unsigned activation = currentInstruction[1].u.operand;
- Jump activationCreated = branch32(NotEqual, tagFor(activation), Imm32(JSValue::EmptyValueTag));
+ Jump activationCreated = branch32(NotEqual, tagFor(activation), TrustedImm32(JSValue::EmptyValueTag));
JITStubCall(this, cti_op_push_activation).call(activation);
activationCreated.link(this);
}
@@ -1529,7 +1529,7 @@ void JIT::emit_op_create_arguments(Instruction* currentInstruction)
{
unsigned dst = currentInstruction[1].u.operand;
- Jump argsCreated = branch32(NotEqual, tagFor(dst), Imm32(JSValue::EmptyValueTag));
+ Jump argsCreated = branch32(NotEqual, tagFor(dst), TrustedImm32(JSValue::EmptyValueTag));
if (m_codeBlock->m_numParameters == 1)
JITStubCall(this, cti_op_create_arguments_no_params).call();
@@ -1571,10 +1571,10 @@ void JIT::emit_op_convert_this(Instruction* currentInstruction)
emitLoad(thisRegister, regT1, regT0);
- addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::CellTag)));
+ addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::CellTag)));
- loadPtr(Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), regT2);
- addSlowCase(branchTest8(NonZero, Address(regT2, OBJECT_OFFSETOF(Structure, m_typeInfo.m_flags)), Imm32(NeedsThisConversion)));
+ loadPtr(Address(regT0, JSCell::structureOffset()), regT2);
+ addSlowCase(branchTest8(NonZero, Address(regT2, Structure::typeInfoFlagsOffset()), TrustedImm32(NeedsThisConversion)));
map(m_bytecodeOffset + OPCODE_LENGTH(op_convert_this), thisRegister, regT1, regT0);
}
@@ -1585,14 +1585,14 @@ void JIT::emit_op_convert_this_strict(Instruction* currentInstruction)
emitLoad(thisRegister, regT1, regT0);
- Jump notNull = branch32(NotEqual, regT1, Imm32(JSValue::EmptyValueTag));
+ Jump notNull = branch32(NotEqual, regT1, TrustedImm32(JSValue::EmptyValueTag));
emitStore(thisRegister, jsNull());
Jump setThis = jump();
notNull.link(this);
- Jump isImmediate = branch32(NotEqual, regT1, Imm32(JSValue::CellTag));
- loadPtr(Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), regT2);
- Jump notAnObject = branch8(NotEqual, Address(regT2, OBJECT_OFFSETOF(Structure, m_typeInfo.m_type)), Imm32(ObjectType));
- addSlowCase(branchTest8(NonZero, Address(regT2, OBJECT_OFFSETOF(Structure, m_typeInfo.m_flags)), Imm32(NeedsThisConversion)));
+ Jump isImmediate = branch32(NotEqual, regT1, TrustedImm32(JSValue::CellTag));
+ loadPtr(Address(regT0, JSCell::structureOffset()), regT2);
+ Jump notAnObject = branch8(NotEqual, Address(regT2, Structure::typeInfoTypeOffset()), TrustedImm32(ObjectType));
+ addSlowCase(branchTest8(NonZero, Address(regT2, Structure::typeInfoFlagsOffset()), TrustedImm32(NeedsThisConversion)));
isImmediate.link(this);
notAnObject.link(this);
setThis.link(this);
@@ -1648,9 +1648,9 @@ void JIT::emit_op_get_arguments_length(Instruction* currentInstruction)
{
int dst = currentInstruction[1].u.operand;
int argumentsRegister = currentInstruction[2].u.operand;
- addSlowCase(branch32(NotEqual, tagFor(argumentsRegister), Imm32(JSValue::EmptyValueTag)));
+ addSlowCase(branch32(NotEqual, tagFor(argumentsRegister), TrustedImm32(JSValue::EmptyValueTag)));
emitGetFromCallFrameHeader32(RegisterFile::ArgumentCount, regT0);
- sub32(Imm32(1), regT0);
+ sub32(TrustedImm32(1), regT0);
emitStoreInt32(dst, regT0);
}
@@ -1663,7 +1663,7 @@ void JIT::emitSlow_op_get_arguments_length(Instruction* currentInstruction, Vect
JITStubCall stubCall(this, cti_op_get_by_id_generic);
stubCall.addArgument(base);
- stubCall.addArgument(ImmPtr(&(m_codeBlock->identifier(ident))));
+ stubCall.addArgument(TrustedImmPtr(&(m_codeBlock->identifier(ident))));
stubCall.call(dst);
}
@@ -1672,10 +1672,10 @@ void JIT::emit_op_get_argument_by_val(Instruction* currentInstruction)
int dst = currentInstruction[1].u.operand;
int argumentsRegister = currentInstruction[2].u.operand;
int property = currentInstruction[3].u.operand;
- addSlowCase(branch32(NotEqual, tagFor(argumentsRegister), Imm32(JSValue::EmptyValueTag)));
+ addSlowCase(branch32(NotEqual, tagFor(argumentsRegister), TrustedImm32(JSValue::EmptyValueTag)));
emitLoad(property, regT1, regT2);
- addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
- add32(Imm32(1), regT2);
+ addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
+ add32(TrustedImm32(1), regT2);
// regT2 now contains the integer index of the argument we want, including this
emitGetFromCallFrameHeader32(RegisterFile::ArgumentCount, regT3);
addSlowCase(branch32(AboveOrEqual, regT2, regT3));
@@ -1692,7 +1692,7 @@ void JIT::emit_op_get_argument_by_val(Instruction* currentInstruction)
}
addPtr(Imm32(static_cast<unsigned>(-(RegisterFile::CallFrameHeaderSize + numArgs) * sizeof(Register))), callFrameRegister, regT1);
- mul32(Imm32(sizeof(Register)), regT3, regT3);
+ mul32(TrustedImm32(sizeof(Register)), regT3, regT3);
subPtr(regT3, regT1);
loadPtr(BaseIndex(regT1, regT2, TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.payload)), regT0);
loadPtr(BaseIndex(regT1, regT2, TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.tag)), regT1);
@@ -1734,17 +1734,17 @@ void JIT::softModulo()
push(regT3);
move(regT2, regT3);
move(regT0, regT2);
- move(Imm32(0), regT1);
+ move(TrustedImm32(0), regT1);
// Check for negative result reminder
- Jump positiveRegT3 = branch32(GreaterThanOrEqual, regT3, Imm32(0));
+ Jump positiveRegT3 = branch32(GreaterThanOrEqual, regT3, TrustedImm32(0));
neg32(regT3);
- xor32(Imm32(1), regT1);
+ xor32(TrustedImm32(1), regT1);
positiveRegT3.link(this);
- Jump positiveRegT2 = branch32(GreaterThanOrEqual, regT2, Imm32(0));
+ Jump positiveRegT2 = branch32(GreaterThanOrEqual, regT2, TrustedImm32(0));
neg32(regT2);
- xor32(Imm32(2), regT1);
+ xor32(TrustedImm32(2), regT1);
positiveRegT2.link(this);
// Save the condition for negative reminder
@@ -1754,7 +1754,7 @@ void JIT::softModulo()
// Power of two fast case
move(regT3, regT0);
- sub32(Imm32(1), regT0);
+ sub32(TrustedImm32(1), regT0);
Jump powerOfTwo = branchTest32(NotEqual, regT0, regT3);
and32(regT0, regT2);
powerOfTwo.link(this);
@@ -1767,10 +1767,10 @@ void JIT::softModulo()
countLeadingZeros32(regT3, regT1);
sub32(regT0, regT1);
- Jump useFullTable = branch32(Equal, regT1, Imm32(31));
+ Jump useFullTable = branch32(Equal, regT1, TrustedImm32(31));
neg32(regT1);
- add32(Imm32(31), regT1);
+ add32(TrustedImm32(31), regT1);
int elementSizeByShift = -1;
#if CPU(ARM)
@@ -1805,7 +1805,7 @@ void JIT::softModulo()
// Check for negative reminder
pop(regT1);
- Jump positiveResult = branch32(Equal, regT1, Imm32(0));
+ Jump positiveResult = branch32(Equal, regT1, TrustedImm32(0));
neg32(regT2);
positiveResult.link(this);
diff --git a/Source/JavaScriptCore/jit/JITPropertyAccess.cpp b/Source/JavaScriptCore/jit/JITPropertyAccess.cpp
index b497319..68f8dda 100644
--- a/Source/JavaScriptCore/jit/JITPropertyAccess.cpp
+++ b/Source/JavaScriptCore/jit/JITPropertyAccess.cpp
@@ -54,7 +54,7 @@ JIT::CodePtr JIT::stringGetByValStubGenerator(JSGlobalData* globalData, Executab
{
JSInterfaceJIT jit;
JumpList failures;
- failures.append(jit.branchPtr(NotEqual, Address(regT0), ImmPtr(globalData->jsStringVPtr)));
+ failures.append(jit.branchPtr(NotEqual, Address(regT0), TrustedImmPtr(globalData->jsStringVPtr)));
failures.append(jit.branchTest32(NonZero, Address(regT0, OBJECT_OFFSETOF(JSString, m_fiberCount))));
// Load string length to regT1, and start the process of loading the data pointer into regT0
@@ -68,13 +68,13 @@ JIT::CodePtr JIT::stringGetByValStubGenerator(JSGlobalData* globalData, Executab
// Load the character
jit.load16(BaseIndex(regT0, regT1, TimesTwo, 0), regT0);
- failures.append(jit.branch32(AboveOrEqual, regT0, Imm32(0x100)));
- jit.move(ImmPtr(globalData->smallStrings.singleCharacterStrings()), regT1);
+ failures.append(jit.branch32(AboveOrEqual, regT0, TrustedImm32(0x100)));
+ jit.move(TrustedImmPtr(globalData->smallStrings.singleCharacterStrings()), regT1);
jit.loadPtr(BaseIndex(regT1, regT0, ScalePtr, 0), regT0);
jit.ret();
failures.link(&jit);
- jit.move(Imm32(0), regT0);
+ jit.move(TrustedImm32(0), regT0);
jit.ret();
LinkBuffer patchBuffer(&jit, pool, 0);
@@ -99,10 +99,10 @@ void JIT::emit_op_get_by_val(Instruction* currentInstruction)
zeroExtend32ToPtr(regT1, regT1);
emitJumpSlowCaseIfNotJSCell(regT0, base);
- addSlowCase(branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsArrayVPtr)));
+ addSlowCase(branchPtr(NotEqual, Address(regT0), TrustedImmPtr(m_globalData->jsArrayVPtr)));
- loadPtr(Address(regT0, OBJECT_OFFSETOF(JSArray, m_storage)), regT2);
- addSlowCase(branch32(AboveOrEqual, regT1, Address(regT0, OBJECT_OFFSETOF(JSArray, m_vectorLength))));
+ loadPtr(Address(regT0, JSArray::storageOffset()), regT2);
+ addSlowCase(branch32(AboveOrEqual, regT1, Address(regT0, JSArray::vectorLengthOffset())));
loadPtr(BaseIndex(regT2, regT1, ScalePtr, OBJECT_OFFSETOF(ArrayStorage, m_vector[0])), regT0);
addSlowCase(branchTestPtr(Zero, regT0));
@@ -120,7 +120,7 @@ void JIT::emitSlow_op_get_by_val(Instruction* currentInstruction, Vector<SlowCas
linkSlowCaseIfNotJSCell(iter, base); // base cell check
Jump nonCell = jump();
linkSlowCase(iter); // base array check
- Jump notString = branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsStringVPtr));
+ Jump notString = branchPtr(NotEqual, Address(regT0), TrustedImmPtr(m_globalData->jsStringVPtr));
emitNakedCall(m_globalData->getCTIStub(stringGetByValStubGenerator));
Jump failed = branchTestPtr(Zero, regT0);
emitPutVirtualRegister(dst, regT0);
@@ -159,10 +159,10 @@ void JIT::emit_op_get_by_pname(Instruction* currentInstruction)
emitJumpSlowCaseIfNotJSCell(regT0, base);
// Test base's structure
- loadPtr(Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), regT2);
+ loadPtr(Address(regT0, JSCell::structureOffset()), regT2);
addSlowCase(branchPtr(NotEqual, regT2, Address(regT1, OBJECT_OFFSETOF(JSPropertyNameIterator, m_cachedStructure))));
load32(addressFor(i), regT3);
- sub32(Imm32(1), regT3);
+ sub32(TrustedImm32(1), regT3);
addSlowCase(branch32(AboveOrEqual, regT3, Address(regT1, OBJECT_OFFSETOF(JSPropertyNameIterator, m_numCacheableSlots))));
compileGetDirectOffset(regT0, regT0, regT3, regT1);
@@ -197,10 +197,10 @@ void JIT::emit_op_put_by_val(Instruction* currentInstruction)
// See comment in op_get_by_val.
zeroExtend32ToPtr(regT1, regT1);
emitJumpSlowCaseIfNotJSCell(regT0, base);
- addSlowCase(branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsArrayVPtr)));
- addSlowCase(branch32(AboveOrEqual, regT1, Address(regT0, OBJECT_OFFSETOF(JSArray, m_vectorLength))));
+ addSlowCase(branchPtr(NotEqual, Address(regT0), TrustedImmPtr(m_globalData->jsArrayVPtr)));
+ addSlowCase(branch32(AboveOrEqual, regT1, Address(regT0, JSArray::vectorLengthOffset())));
- loadPtr(Address(regT0, OBJECT_OFFSETOF(JSArray, m_storage)), regT2);
+ loadPtr(Address(regT0, JSArray::storageOffset()), regT2);
Jump empty = branchTestPtr(Zero, BaseIndex(regT2, regT1, ScalePtr, OBJECT_OFFSETOF(ArrayStorage, m_vector[0])));
Label storeResult(this);
@@ -209,11 +209,11 @@ void JIT::emit_op_put_by_val(Instruction* currentInstruction)
Jump end = jump();
empty.link(this);
- add32(Imm32(1), Address(regT2, OBJECT_OFFSETOF(ArrayStorage, m_numValuesInVector)));
+ add32(TrustedImm32(1), Address(regT2, OBJECT_OFFSETOF(ArrayStorage, m_numValuesInVector)));
branch32(Below, regT1, Address(regT2, OBJECT_OFFSETOF(ArrayStorage, m_length))).linkTo(storeResult, this);
move(regT1, regT0);
- add32(Imm32(1), regT0);
+ add32(TrustedImm32(1), regT0);
store32(regT0, Address(regT2, OBJECT_OFFSETOF(ArrayStorage, m_length)));
jump().linkTo(storeResult, this);
@@ -224,7 +224,7 @@ void JIT::emit_op_put_by_index(Instruction* currentInstruction)
{
JITStubCall stubCall(this, cti_op_put_by_index);
stubCall.addArgument(currentInstruction[1].u.operand, regT2);
- stubCall.addArgument(Imm32(currentInstruction[2].u.operand));
+ stubCall.addArgument(TrustedImm32(currentInstruction[2].u.operand));
stubCall.addArgument(currentInstruction[3].u.operand, regT2);
stubCall.call();
}
@@ -233,7 +233,7 @@ void JIT::emit_op_put_getter(Instruction* currentInstruction)
{
JITStubCall stubCall(this, cti_op_put_getter);
stubCall.addArgument(currentInstruction[1].u.operand, regT2);
- stubCall.addArgument(ImmPtr(&m_codeBlock->identifier(currentInstruction[2].u.operand)));
+ stubCall.addArgument(TrustedImmPtr(&m_codeBlock->identifier(currentInstruction[2].u.operand)));
stubCall.addArgument(currentInstruction[3].u.operand, regT2);
stubCall.call();
}
@@ -242,7 +242,7 @@ void JIT::emit_op_put_setter(Instruction* currentInstruction)
{
JITStubCall stubCall(this, cti_op_put_setter);
stubCall.addArgument(currentInstruction[1].u.operand, regT2);
- stubCall.addArgument(ImmPtr(&m_codeBlock->identifier(currentInstruction[2].u.operand)));
+ stubCall.addArgument(TrustedImmPtr(&m_codeBlock->identifier(currentInstruction[2].u.operand)));
stubCall.addArgument(currentInstruction[3].u.operand, regT2);
stubCall.call();
}
@@ -251,7 +251,7 @@ void JIT::emit_op_del_by_id(Instruction* currentInstruction)
{
JITStubCall stubCall(this, cti_op_del_by_id);
stubCall.addArgument(currentInstruction[2].u.operand, regT2);
- stubCall.addArgument(ImmPtr(&m_codeBlock->identifier(currentInstruction[3].u.operand)));
+ stubCall.addArgument(TrustedImmPtr(&m_codeBlock->identifier(currentInstruction[3].u.operand)));
stubCall.call(currentInstruction[1].u.operand);
}
@@ -276,7 +276,7 @@ void JIT::emit_op_get_by_id(Instruction* currentInstruction)
emitGetVirtualRegister(baseVReg, regT0);
JITStubCall stubCall(this, cti_op_get_by_id_generic);
stubCall.addArgument(regT0);
- stubCall.addArgument(ImmPtr(ident));
+ stubCall.addArgument(TrustedImmPtr(ident));
stubCall.call(resultVReg);
m_propertyAccessInstructionIndex++;
@@ -298,7 +298,7 @@ void JIT::emit_op_put_by_id(Instruction* currentInstruction)
JITStubCall stubCall(this, direct ? cti_op_put_by_id_direct_generic, cti_op_put_by_id_generic);
stubCall.addArgument(regT0);
- stubCall.addArgument(ImmPtr(ident));
+ stubCall.addArgument(TrustedImmPtr(ident));
stubCall.addArgument(regT1);
stubCall.call();
@@ -336,12 +336,12 @@ void JIT::emit_op_method_check(Instruction* currentInstruction)
BEGIN_UNINTERRUPTED_SEQUENCE(sequenceMethodCheck);
- Jump structureCheck = branchPtrWithPatch(NotEqual, Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), info.structureToCompare, ImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure)));
- DataLabelPtr protoStructureToCompare, protoObj = moveWithPatch(ImmPtr(0), regT1);
- Jump protoStructureCheck = branchPtrWithPatch(NotEqual, Address(regT1, OBJECT_OFFSETOF(JSCell, m_structure)), protoStructureToCompare, ImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure)));
+ Jump structureCheck = branchPtrWithPatch(NotEqual, Address(regT0, JSCell::structureOffset()), info.structureToCompare, TrustedImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure)));
+ DataLabelPtr protoStructureToCompare, protoObj = moveWithPatch(TrustedImmPtr(0), regT1);
+ Jump protoStructureCheck = branchPtrWithPatch(NotEqual, Address(regT1, JSCell::structureOffset()), protoStructureToCompare, TrustedImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure)));
// This will be relinked to load the function without doing a load.
- DataLabelPtr putFunction = moveWithPatch(ImmPtr(0), regT0);
+ DataLabelPtr putFunction = moveWithPatch(TrustedImmPtr(0), regT0);
END_UNINTERRUPTED_SEQUENCE(sequenceMethodCheck);
@@ -414,7 +414,7 @@ void JIT::compileGetByIdHotPath(int, int baseVReg, Identifier*, unsigned propert
m_propertyAccessCompilationInfo[propertyAccessInstructionIndex].hotPathBegin = hotPathBegin;
DataLabelPtr structureToCompare;
- Jump structureCheck = branchPtrWithPatch(NotEqual, Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), structureToCompare, ImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure)));
+ Jump structureCheck = branchPtrWithPatch(NotEqual, Address(regT0, JSCell::structureOffset()), structureToCompare, TrustedImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure)));
addSlowCase(structureCheck);
ASSERT_JIT_OFFSET(differenceBetween(hotPathBegin, structureToCompare), patchOffsetGetByIdStructure);
ASSERT_JIT_OFFSET(differenceBetween(hotPathBegin, structureCheck), patchOffsetGetByIdBranchToSlowCase)
@@ -457,7 +457,7 @@ void JIT::compileGetByIdSlowCase(int resultVReg, int baseVReg, Identifier* ident
#endif
JITStubCall stubCall(this, isMethodCheck ? cti_op_get_by_id_method_check : cti_op_get_by_id);
stubCall.addArgument(regT0);
- stubCall.addArgument(ImmPtr(ident));
+ stubCall.addArgument(TrustedImmPtr(ident));
Call call = stubCall.call(resultVReg);
END_UNINTERRUPTED_SEQUENCE(sequenceGetByIdSlowCase);
@@ -492,7 +492,7 @@ void JIT::emit_op_put_by_id(Instruction* currentInstruction)
// It is important that the following instruction plants a 32bit immediate, in order that it can be patched over.
DataLabelPtr structureToCompare;
- addSlowCase(branchPtrWithPatch(NotEqual, Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), structureToCompare, ImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure))));
+ addSlowCase(branchPtrWithPatch(NotEqual, Address(regT0, JSCell::structureOffset()), structureToCompare, TrustedImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure))));
ASSERT_JIT_OFFSET(differenceBetween(hotPathBegin, structureToCompare), patchOffsetPutByIdStructure);
loadPtr(Address(regT0, OBJECT_OFFSETOF(JSObject, m_propertyStorage)), regT0);
@@ -516,7 +516,7 @@ void JIT::emitSlow_op_put_by_id(Instruction* currentInstruction, Vector<SlowCase
JITStubCall stubCall(this, direct ? cti_op_put_by_id_direct : cti_op_put_by_id);
stubCall.addArgument(regT0);
- stubCall.addArgument(ImmPtr(ident));
+ stubCall.addArgument(TrustedImmPtr(ident));
stubCall.addArgument(regT1);
Call call = stubCall.call();
@@ -564,10 +564,10 @@ void JIT::testPrototype(JSValue prototype, JumpList& failureCases)
// values. In the non X86_64 case, the generated code is slightly more efficient because it uses
// two less instructions and doesn't require any scratch registers.
#if CPU(X86_64)
- move(ImmPtr(prototype.asCell()->structure()), regT3);
- failureCases.append(branchPtr(NotEqual, AbsoluteAddress(&prototype.asCell()->m_structure), regT3));
+ move(TrustedImmPtr(prototype.asCell()->structure()), regT3);
+ failureCases.append(branchPtr(NotEqual, AbsoluteAddress(prototype.asCell()->addressOfStructure()), regT3));
#else
- failureCases.append(branchPtr(NotEqual, AbsoluteAddress(&prototype.asCell()->m_structure), ImmPtr(prototype.asCell()->structure())));
+ failureCases.append(branchPtr(NotEqual, AbsoluteAddress(prototype.asCell()->addressOfStructure()), TrustedImmPtr(prototype.asCell()->structure())));
#endif
}
@@ -576,7 +576,7 @@ void JIT::privateCompilePutByIdTransition(StructureStubInfo* stubInfo, Structure
JumpList failureCases;
// Check eax is an object of the right Structure.
failureCases.append(emitJumpIfNotJSCell(regT0));
- failureCases.append(branchPtr(NotEqual, Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), ImmPtr(oldStructure)));
+ failureCases.append(branchPtr(NotEqual, Address(regT0, JSCell::structureOffset()), TrustedImmPtr(oldStructure)));
testPrototype(oldStructure->storedPrototype(), failureCases);
// ecx = baseObject->m_structure
@@ -598,8 +598,8 @@ void JIT::privateCompilePutByIdTransition(StructureStubInfo* stubInfo, Structure
stubCall.skipArgument(); // base
stubCall.skipArgument(); // ident
stubCall.skipArgument(); // value
- stubCall.addArgument(Imm32(oldStructure->propertyStorageCapacity()));
- stubCall.addArgument(Imm32(newStructure->propertyStorageCapacity()));
+ stubCall.addArgument(TrustedImm32(oldStructure->propertyStorageCapacity()));
+ stubCall.addArgument(TrustedImm32(newStructure->propertyStorageCapacity()));
stubCall.call(regT0);
emitGetJITStubArg(2, regT1);
@@ -608,9 +608,9 @@ void JIT::privateCompilePutByIdTransition(StructureStubInfo* stubInfo, Structure
// Assumes m_refCount can be decremented easily, refcount decrement is safe as
// codeblock should ensure oldStructure->m_refCount > 0
- sub32(Imm32(1), AbsoluteAddress(oldStructure->addressOfCount()));
- add32(Imm32(1), AbsoluteAddress(newStructure->addressOfCount()));
- storePtr(ImmPtr(newStructure), Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)));
+ sub32(TrustedImm32(1), AbsoluteAddress(oldStructure->addressOfCount()));
+ add32(TrustedImm32(1), AbsoluteAddress(newStructure->addressOfCount()));
+ storePtr(TrustedImmPtr(newStructure), Address(regT0, JSCell::structureOffset()));
// write the value
compilePutDirectOffset(regT0, regT1, newStructure, cachedOffset);
@@ -692,12 +692,12 @@ void JIT::privateCompilePatchGetArrayLength(ReturnAddressPtr returnAddress)
StructureStubInfo* stubInfo = &m_codeBlock->getStubInfo(returnAddress);
// Check eax is an array
- Jump failureCases1 = branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsArrayVPtr));
+ Jump failureCases1 = branchPtr(NotEqual, Address(regT0), TrustedImmPtr(m_globalData->jsArrayVPtr));
// Checks out okay! - get the length from the storage
- loadPtr(Address(regT0, OBJECT_OFFSETOF(JSArray, m_storage)), regT3);
+ loadPtr(Address(regT0, JSArray::storageOffset()), regT3);
load32(Address(regT3, OBJECT_OFFSETOF(ArrayStorage, m_length)), regT2);
- Jump failureCases2 = branch32(Above, regT2, Imm32(JSImmediate::maxImmediateInt));
+ Jump failureCases2 = branch32(Above, regT2, TrustedImm32(JSImmediate::maxImmediateInt));
emitFastArithIntToImmNoCheck(regT2, regT0);
Jump success = jump();
@@ -735,12 +735,12 @@ void JIT::privateCompileGetByIdProto(StructureStubInfo* stubInfo, Structure* str
Jump failureCases1 = checkStructure(regT0, structure);
// Check the prototype object's Structure had not changed.
- Structure** prototypeStructureAddress = &(protoObject->m_structure);
+ Structure* const * prototypeStructureAddress = protoObject->addressOfStructure();
#if CPU(X86_64)
- move(ImmPtr(prototypeStructure), regT3);
+ move(TrustedImmPtr(prototypeStructure), regT3);
Jump failureCases2 = branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), regT3);
#else
- Jump failureCases2 = branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), ImmPtr(prototypeStructure));
+ Jump failureCases2 = branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), TrustedImmPtr(prototypeStructure));
#endif
bool needsStubLink = false;
@@ -752,15 +752,15 @@ void JIT::privateCompileGetByIdProto(StructureStubInfo* stubInfo, Structure* str
JITStubCall stubCall(this, cti_op_get_by_id_getter_stub);
stubCall.addArgument(regT1);
stubCall.addArgument(regT0);
- stubCall.addArgument(ImmPtr(stubInfo->callReturnLocation.executableAddress()));
+ stubCall.addArgument(TrustedImmPtr(stubInfo->callReturnLocation.executableAddress()));
stubCall.call();
} else if (slot.cachedPropertyType() == PropertySlot::Custom) {
needsStubLink = true;
JITStubCall stubCall(this, cti_op_get_by_id_custom_stub);
- stubCall.addArgument(ImmPtr(protoObject));
- stubCall.addArgument(ImmPtr(FunctionPtr(slot.customGetter()).executableAddress()));
- stubCall.addArgument(ImmPtr(const_cast<Identifier*>(&ident)));
- stubCall.addArgument(ImmPtr(stubInfo->callReturnLocation.executableAddress()));
+ stubCall.addArgument(TrustedImmPtr(protoObject));
+ stubCall.addArgument(TrustedImmPtr(FunctionPtr(slot.customGetter()).executableAddress()));
+ stubCall.addArgument(TrustedImmPtr(const_cast<Identifier*>(&ident)));
+ stubCall.addArgument(TrustedImmPtr(stubInfo->callReturnLocation.executableAddress()));
stubCall.call();
} else
compileGetDirectOffset(protoObject, regT0, cachedOffset);
@@ -804,15 +804,15 @@ void JIT::privateCompileGetByIdSelfList(StructureStubInfo* stubInfo, Polymorphic
JITStubCall stubCall(this, cti_op_get_by_id_getter_stub);
stubCall.addArgument(regT1);
stubCall.addArgument(regT0);
- stubCall.addArgument(ImmPtr(stubInfo->callReturnLocation.executableAddress()));
+ stubCall.addArgument(TrustedImmPtr(stubInfo->callReturnLocation.executableAddress()));
stubCall.call();
} else if (slot.cachedPropertyType() == PropertySlot::Custom) {
needsStubLink = true;
JITStubCall stubCall(this, cti_op_get_by_id_custom_stub);
stubCall.addArgument(regT0);
- stubCall.addArgument(ImmPtr(FunctionPtr(slot.customGetter()).executableAddress()));
- stubCall.addArgument(ImmPtr(const_cast<Identifier*>(&ident)));
- stubCall.addArgument(ImmPtr(stubInfo->callReturnLocation.executableAddress()));
+ stubCall.addArgument(TrustedImmPtr(FunctionPtr(slot.customGetter()).executableAddress()));
+ stubCall.addArgument(TrustedImmPtr(const_cast<Identifier*>(&ident)));
+ stubCall.addArgument(TrustedImmPtr(stubInfo->callReturnLocation.executableAddress()));
stubCall.call();
} else
compileGetDirectOffset(regT0, regT0, structure, cachedOffset);
@@ -858,12 +858,12 @@ void JIT::privateCompileGetByIdProtoList(StructureStubInfo* stubInfo, Polymorphi
Jump failureCases1 = checkStructure(regT0, structure);
// Check the prototype object's Structure had not changed.
- Structure** prototypeStructureAddress = &(protoObject->m_structure);
+ Structure* const * prototypeStructureAddress = protoObject->addressOfStructure();
#if CPU(X86_64)
- move(ImmPtr(prototypeStructure), regT3);
+ move(TrustedImmPtr(prototypeStructure), regT3);
Jump failureCases2 = branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), regT3);
#else
- Jump failureCases2 = branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), ImmPtr(prototypeStructure));
+ Jump failureCases2 = branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), TrustedImmPtr(prototypeStructure));
#endif
// Checks out okay!
@@ -874,15 +874,15 @@ void JIT::privateCompileGetByIdProtoList(StructureStubInfo* stubInfo, Polymorphi
JITStubCall stubCall(this, cti_op_get_by_id_getter_stub);
stubCall.addArgument(regT1);
stubCall.addArgument(regT0);
- stubCall.addArgument(ImmPtr(stubInfo->callReturnLocation.executableAddress()));
+ stubCall.addArgument(TrustedImmPtr(stubInfo->callReturnLocation.executableAddress()));
stubCall.call();
} else if (slot.cachedPropertyType() == PropertySlot::Custom) {
needsStubLink = true;
JITStubCall stubCall(this, cti_op_get_by_id_custom_stub);
- stubCall.addArgument(ImmPtr(protoObject));
- stubCall.addArgument(ImmPtr(FunctionPtr(slot.customGetter()).executableAddress()));
- stubCall.addArgument(ImmPtr(const_cast<Identifier*>(&ident)));
- stubCall.addArgument(ImmPtr(stubInfo->callReturnLocation.executableAddress()));
+ stubCall.addArgument(TrustedImmPtr(protoObject));
+ stubCall.addArgument(TrustedImmPtr(FunctionPtr(slot.customGetter()).executableAddress()));
+ stubCall.addArgument(TrustedImmPtr(const_cast<Identifier*>(&ident)));
+ stubCall.addArgument(TrustedImmPtr(stubInfo->callReturnLocation.executableAddress()));
stubCall.call();
} else
compileGetDirectOffset(protoObject, regT0, cachedOffset);
@@ -944,15 +944,15 @@ void JIT::privateCompileGetByIdChainList(StructureStubInfo* stubInfo, Polymorphi
JITStubCall stubCall(this, cti_op_get_by_id_getter_stub);
stubCall.addArgument(regT1);
stubCall.addArgument(regT0);
- stubCall.addArgument(ImmPtr(stubInfo->callReturnLocation.executableAddress()));
+ stubCall.addArgument(TrustedImmPtr(stubInfo->callReturnLocation.executableAddress()));
stubCall.call();
} else if (slot.cachedPropertyType() == PropertySlot::Custom) {
needsStubLink = true;
JITStubCall stubCall(this, cti_op_get_by_id_custom_stub);
- stubCall.addArgument(ImmPtr(protoObject));
- stubCall.addArgument(ImmPtr(FunctionPtr(slot.customGetter()).executableAddress()));
- stubCall.addArgument(ImmPtr(const_cast<Identifier*>(&ident)));
- stubCall.addArgument(ImmPtr(stubInfo->callReturnLocation.executableAddress()));
+ stubCall.addArgument(TrustedImmPtr(protoObject));
+ stubCall.addArgument(TrustedImmPtr(FunctionPtr(slot.customGetter()).executableAddress()));
+ stubCall.addArgument(TrustedImmPtr(const_cast<Identifier*>(&ident)));
+ stubCall.addArgument(TrustedImmPtr(stubInfo->callReturnLocation.executableAddress()));
stubCall.call();
} else
compileGetDirectOffset(protoObject, regT0, cachedOffset);
@@ -979,8 +979,7 @@ void JIT::privateCompileGetByIdChainList(StructureStubInfo* stubInfo, Polymorphi
// Track the stub we have created so that it will be deleted later.
structure->ref();
- chain->ref();
- prototypeStructures->list[currentIndex].set(entryLabel, structure, chain);
+ prototypeStructures->list[currentIndex].set(callFrame->globalData(), m_codeBlock->ownerExecutable(), entryLabel, structure, chain);
// Finally patch the jump to slow case back in the hot path to jump here instead.
CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase);
@@ -1014,15 +1013,15 @@ void JIT::privateCompileGetByIdChain(StructureStubInfo* stubInfo, Structure* str
JITStubCall stubCall(this, cti_op_get_by_id_getter_stub);
stubCall.addArgument(regT1);
stubCall.addArgument(regT0);
- stubCall.addArgument(ImmPtr(stubInfo->callReturnLocation.executableAddress()));
+ stubCall.addArgument(TrustedImmPtr(stubInfo->callReturnLocation.executableAddress()));
stubCall.call();
} else if (slot.cachedPropertyType() == PropertySlot::Custom) {
needsStubLink = true;
JITStubCall stubCall(this, cti_op_get_by_id_custom_stub);
- stubCall.addArgument(ImmPtr(protoObject));
- stubCall.addArgument(ImmPtr(FunctionPtr(slot.customGetter()).executableAddress()));
- stubCall.addArgument(ImmPtr(const_cast<Identifier*>(&ident)));
- stubCall.addArgument(ImmPtr(stubInfo->callReturnLocation.executableAddress()));
+ stubCall.addArgument(TrustedImmPtr(protoObject));
+ stubCall.addArgument(TrustedImmPtr(FunctionPtr(slot.customGetter()).executableAddress()));
+ stubCall.addArgument(TrustedImmPtr(const_cast<Identifier*>(&ident)));
+ stubCall.addArgument(TrustedImmPtr(stubInfo->callReturnLocation.executableAddress()));
stubCall.call();
} else
compileGetDirectOffset(protoObject, regT0, cachedOffset);
diff --git a/Source/JavaScriptCore/jit/JITPropertyAccess32_64.cpp b/Source/JavaScriptCore/jit/JITPropertyAccess32_64.cpp
index 3562200..2a47e5c 100644
--- a/Source/JavaScriptCore/jit/JITPropertyAccess32_64.cpp
+++ b/Source/JavaScriptCore/jit/JITPropertyAccess32_64.cpp
@@ -70,7 +70,7 @@ void JIT::emit_op_put_getter(Instruction* currentInstruction)
JITStubCall stubCall(this, cti_op_put_getter);
stubCall.addArgument(base);
- stubCall.addArgument(ImmPtr(&m_codeBlock->identifier(property)));
+ stubCall.addArgument(TrustedImmPtr(&m_codeBlock->identifier(property)));
stubCall.addArgument(function);
stubCall.call();
}
@@ -83,7 +83,7 @@ void JIT::emit_op_put_setter(Instruction* currentInstruction)
JITStubCall stubCall(this, cti_op_put_setter);
stubCall.addArgument(base);
- stubCall.addArgument(ImmPtr(&m_codeBlock->identifier(property)));
+ stubCall.addArgument(TrustedImmPtr(&m_codeBlock->identifier(property)));
stubCall.addArgument(function);
stubCall.call();
}
@@ -96,7 +96,7 @@ void JIT::emit_op_del_by_id(Instruction* currentInstruction)
JITStubCall stubCall(this, cti_op_del_by_id);
stubCall.addArgument(base);
- stubCall.addArgument(ImmPtr(&m_codeBlock->identifier(property)));
+ stubCall.addArgument(TrustedImmPtr(&m_codeBlock->identifier(property)));
stubCall.call(dst);
}
@@ -155,7 +155,7 @@ void JIT::emit_op_get_by_id(Instruction* currentInstruction)
JITStubCall stubCall(this, cti_op_get_by_id_generic);
stubCall.addArgument(base);
- stubCall.addArgument(ImmPtr(&(m_codeBlock->identifier(ident))));
+ stubCall.addArgument(TrustedImmPtr(&(m_codeBlock->identifier(ident))));
stubCall.call(dst);
m_propertyAccessInstructionIndex++;
@@ -175,7 +175,7 @@ void JIT::emit_op_put_by_id(Instruction* currentInstruction)
JITStubCall stubCall(this, cti_op_put_by_id_generic);
stubCall.addArgument(base);
- stubCall.addArgument(ImmPtr(&(m_codeBlock->identifier(ident))));
+ stubCall.addArgument(TrustedImmPtr(&(m_codeBlock->identifier(ident))));
stubCall.addArgument(value);
stubCall.call();
@@ -213,16 +213,16 @@ void JIT::emit_op_method_check(Instruction* currentInstruction)
BEGIN_UNINTERRUPTED_SEQUENCE(sequenceMethodCheck);
- Jump structureCheck = branchPtrWithPatch(NotEqual, Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), info.structureToCompare, ImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure)));
- DataLabelPtr protoStructureToCompare, protoObj = moveWithPatch(ImmPtr(0), regT2);
- Jump protoStructureCheck = branchPtrWithPatch(NotEqual, Address(regT2, OBJECT_OFFSETOF(JSCell, m_structure)), protoStructureToCompare, ImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure)));
+ Jump structureCheck = branchPtrWithPatch(NotEqual, Address(regT0, JSCell::structureOffset()), info.structureToCompare, TrustedImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure)));
+ DataLabelPtr protoStructureToCompare, protoObj = moveWithPatch(TrustedImmPtr(0), regT2);
+ Jump protoStructureCheck = branchPtrWithPatch(NotEqual, Address(regT2, JSCell::structureOffset()), protoStructureToCompare, TrustedImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure)));
// This will be relinked to load the function without doing a load.
- DataLabelPtr putFunction = moveWithPatch(ImmPtr(0), regT0);
+ DataLabelPtr putFunction = moveWithPatch(TrustedImmPtr(0), regT0);
END_UNINTERRUPTED_SEQUENCE(sequenceMethodCheck);
- move(Imm32(JSValue::CellTag), regT1);
+ move(TrustedImm32(JSValue::CellTag), regT1);
Jump match = jump();
ASSERT_JIT_OFFSET(differenceBetween(info.structureToCompare, protoObj), patchOffsetMethodCheckProtoObj);
@@ -271,7 +271,7 @@ JIT::CodePtr JIT::stringGetByValStubGenerator(JSGlobalData* globalData, Executab
{
JSInterfaceJIT jit;
JumpList failures;
- failures.append(jit.branchPtr(NotEqual, Address(regT0), ImmPtr(globalData->jsStringVPtr)));
+ failures.append(jit.branchPtr(NotEqual, Address(regT0), TrustedImmPtr(globalData->jsStringVPtr)));
failures.append(jit.branchTest32(NonZero, Address(regT0, OBJECT_OFFSETOF(JSString, m_fiberCount))));
// Load string length to regT1, and start the process of loading the data pointer into regT0
@@ -285,14 +285,14 @@ JIT::CodePtr JIT::stringGetByValStubGenerator(JSGlobalData* globalData, Executab
// Load the character
jit.load16(BaseIndex(regT0, regT2, TimesTwo, 0), regT0);
- failures.append(jit.branch32(AboveOrEqual, regT0, Imm32(0x100)));
- jit.move(ImmPtr(globalData->smallStrings.singleCharacterStrings()), regT1);
+ failures.append(jit.branch32(AboveOrEqual, regT0, TrustedImm32(0x100)));
+ jit.move(TrustedImmPtr(globalData->smallStrings.singleCharacterStrings()), regT1);
jit.loadPtr(BaseIndex(regT1, regT0, ScalePtr, 0), regT0);
- jit.move(Imm32(JSValue::CellTag), regT1); // We null check regT0 on return so this is safe
+ jit.move(TrustedImm32(JSValue::CellTag), regT1); // We null check regT0 on return so this is safe
jit.ret();
failures.link(&jit);
- jit.move(Imm32(0), regT0);
+ jit.move(TrustedImm32(0), regT0);
jit.ret();
LinkBuffer patchBuffer(&jit, pool, 0);
@@ -307,16 +307,16 @@ void JIT::emit_op_get_by_val(Instruction* currentInstruction)
emitLoad2(base, regT1, regT0, property, regT3, regT2);
- addSlowCase(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag)));
+ addSlowCase(branch32(NotEqual, regT3, TrustedImm32(JSValue::Int32Tag)));
emitJumpSlowCaseIfNotJSCell(base, regT1);
- addSlowCase(branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsArrayVPtr)));
+ addSlowCase(branchPtr(NotEqual, Address(regT0), TrustedImmPtr(m_globalData->jsArrayVPtr)));
- loadPtr(Address(regT0, OBJECT_OFFSETOF(JSArray, m_storage)), regT3);
- addSlowCase(branch32(AboveOrEqual, regT2, Address(regT0, OBJECT_OFFSETOF(JSArray, m_vectorLength))));
+ loadPtr(Address(regT0, JSArray::storageOffset()), regT3);
+ addSlowCase(branch32(AboveOrEqual, regT2, Address(regT0, JSArray::vectorLengthOffset())));
load32(BaseIndex(regT3, regT2, TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), regT1); // tag
load32(BaseIndex(regT3, regT2, TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + OBJECT_OFFSETOF(JSValue, u.asBits.payload)), regT0); // payload
- addSlowCase(branch32(Equal, regT1, Imm32(JSValue::EmptyValueTag)));
+ addSlowCase(branch32(Equal, regT1, TrustedImm32(JSValue::EmptyValueTag)));
emitStore(dst, regT1, regT0);
map(m_bytecodeOffset + OPCODE_LENGTH(op_get_by_val), dst, regT1, regT0);
@@ -333,7 +333,7 @@ void JIT::emitSlow_op_get_by_val(Instruction* currentInstruction, Vector<SlowCas
Jump nonCell = jump();
linkSlowCase(iter); // base array check
- Jump notString = branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsStringVPtr));
+ Jump notString = branchPtr(NotEqual, Address(regT0), TrustedImmPtr(m_globalData->jsStringVPtr));
emitNakedCall(m_globalData->getCTIStub(stringGetByValStubGenerator));
Jump failed = branchTestPtr(Zero, regT0);
emitStore(dst, regT1, regT0);
@@ -359,14 +359,14 @@ void JIT::emit_op_put_by_val(Instruction* currentInstruction)
emitLoad2(base, regT1, regT0, property, regT3, regT2);
- addSlowCase(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag)));
+ addSlowCase(branch32(NotEqual, regT3, TrustedImm32(JSValue::Int32Tag)));
emitJumpSlowCaseIfNotJSCell(base, regT1);
- addSlowCase(branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsArrayVPtr)));
- addSlowCase(branch32(AboveOrEqual, regT2, Address(regT0, OBJECT_OFFSETOF(JSArray, m_vectorLength))));
+ addSlowCase(branchPtr(NotEqual, Address(regT0), TrustedImmPtr(m_globalData->jsArrayVPtr)));
+ addSlowCase(branch32(AboveOrEqual, regT2, Address(regT0, JSArray::vectorLengthOffset())));
- loadPtr(Address(regT0, OBJECT_OFFSETOF(JSArray, m_storage)), regT3);
+ loadPtr(Address(regT0, JSArray::storageOffset()), regT3);
- Jump empty = branch32(Equal, BaseIndex(regT3, regT2, TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), Imm32(JSValue::EmptyValueTag));
+ Jump empty = branch32(Equal, BaseIndex(regT3, regT2, TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), TrustedImm32(JSValue::EmptyValueTag));
Label storeResult(this);
emitLoad(value, regT1, regT0);
@@ -375,10 +375,10 @@ void JIT::emit_op_put_by_val(Instruction* currentInstruction)
Jump end = jump();
empty.link(this);
- add32(Imm32(1), Address(regT3, OBJECT_OFFSETOF(ArrayStorage, m_numValuesInVector)));
+ add32(TrustedImm32(1), Address(regT3, OBJECT_OFFSETOF(ArrayStorage, m_numValuesInVector)));
branch32(Below, regT2, Address(regT3, OBJECT_OFFSETOF(ArrayStorage, m_length))).linkTo(storeResult, this);
- add32(Imm32(1), regT2, regT0);
+ add32(TrustedImm32(1), regT2, regT0);
store32(regT0, Address(regT3, OBJECT_OFFSETOF(ArrayStorage, m_length)));
jump().linkTo(storeResult, this);
@@ -429,7 +429,7 @@ void JIT::compileGetByIdHotPath()
m_propertyAccessInstructionIndex++;
DataLabelPtr structureToCompare;
- Jump structureCheck = branchPtrWithPatch(NotEqual, Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), structureToCompare, ImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure)));
+ Jump structureCheck = branchPtrWithPatch(NotEqual, Address(regT0, JSCell::structureOffset()), structureToCompare, TrustedImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure)));
addSlowCase(structureCheck);
ASSERT_JIT_OFFSET(differenceBetween(hotPathBegin, structureToCompare), patchOffsetGetByIdStructure);
ASSERT_JIT_OFFSET(differenceBetween(hotPathBegin, structureCheck), patchOffsetGetByIdBranchToSlowCase);
@@ -472,7 +472,7 @@ void JIT::compileGetByIdSlowCase(int dst, int base, Identifier* ident, Vector<Sl
#endif
JITStubCall stubCall(this, isMethodCheck ? cti_op_get_by_id_method_check : cti_op_get_by_id);
stubCall.addArgument(regT1, regT0);
- stubCall.addArgument(ImmPtr(ident));
+ stubCall.addArgument(TrustedImmPtr(ident));
Call call = stubCall.call(dst);
END_UNINTERRUPTED_SEQUENCE(sequenceGetByIdSlowCase);
@@ -505,7 +505,7 @@ void JIT::emit_op_put_by_id(Instruction* currentInstruction)
// It is important that the following instruction plants a 32bit immediate, in order that it can be patched over.
DataLabelPtr structureToCompare;
- addSlowCase(branchPtrWithPatch(NotEqual, Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), structureToCompare, ImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure))));
+ addSlowCase(branchPtrWithPatch(NotEqual, Address(regT0, JSCell::structureOffset()), structureToCompare, TrustedImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure))));
ASSERT_JIT_OFFSET(differenceBetween(hotPathBegin, structureToCompare), patchOffsetPutByIdStructure);
loadPtr(Address(regT0, OBJECT_OFFSETOF(JSObject, m_propertyStorage)), regT0);
@@ -529,7 +529,7 @@ void JIT::emitSlow_op_put_by_id(Instruction* currentInstruction, Vector<SlowCase
JITStubCall stubCall(this, direct ? cti_op_put_by_id_direct : cti_op_put_by_id);
stubCall.addArgument(regT1, regT0);
- stubCall.addArgument(ImmPtr(&(m_codeBlock->identifier(ident))));
+ stubCall.addArgument(TrustedImmPtr(&(m_codeBlock->identifier(ident))));
stubCall.addArgument(regT3, regT2);
Call call = stubCall.call();
@@ -579,10 +579,10 @@ void JIT::testPrototype(JSValue prototype, JumpList& failureCases)
// values. In the non X86_64 case, the generated code is slightly more efficient because it uses
// two less instructions and doesn't require any scratch registers.
#if CPU(X86_64)
- move(ImmPtr(prototype.asCell()->structure()), regT3);
- failureCases.append(branchPtr(NotEqual, AbsoluteAddress(&prototype.asCell()->m_structure), regT3));
+ move(TrustedImmPtr(prototype.asCell()->structure()), regT3);
+ failureCases.append(branchPtr(NotEqual, AbsoluteAddress(prototype.asCell()->addressOfStructure()), regT3));
#else
- failureCases.append(branchPtr(NotEqual, AbsoluteAddress(&prototype.asCell()->m_structure), ImmPtr(prototype.asCell()->structure())));
+ failureCases.append(branchPtr(NotEqual, AbsoluteAddress(prototype.asCell()->addressOfStructure()), TrustedImmPtr(prototype.asCell()->structure())));
#endif
}
@@ -591,8 +591,8 @@ void JIT::privateCompilePutByIdTransition(StructureStubInfo* stubInfo, Structure
// It is assumed that regT0 contains the basePayload and regT1 contains the baseTag. The value can be found on the stack.
JumpList failureCases;
- failureCases.append(branch32(NotEqual, regT1, Imm32(JSValue::CellTag)));
- failureCases.append(branchPtr(NotEqual, Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), ImmPtr(oldStructure)));
+ failureCases.append(branch32(NotEqual, regT1, TrustedImm32(JSValue::CellTag)));
+ failureCases.append(branchPtr(NotEqual, Address(regT0, JSCell::structureOffset()), TrustedImmPtr(oldStructure)));
testPrototype(oldStructure->storedPrototype(), failureCases);
if (!direct) {
@@ -613,16 +613,16 @@ void JIT::privateCompilePutByIdTransition(StructureStubInfo* stubInfo, Structure
stubCall.skipArgument(); // base
stubCall.skipArgument(); // ident
stubCall.skipArgument(); // value
- stubCall.addArgument(Imm32(oldStructure->propertyStorageCapacity()));
- stubCall.addArgument(Imm32(newStructure->propertyStorageCapacity()));
+ stubCall.addArgument(TrustedImm32(oldStructure->propertyStorageCapacity()));
+ stubCall.addArgument(TrustedImm32(newStructure->propertyStorageCapacity()));
stubCall.call(regT0);
restoreReturnAddressBeforeReturn(regT3);
}
- sub32(Imm32(1), AbsoluteAddress(oldStructure->addressOfCount()));
- add32(Imm32(1), AbsoluteAddress(newStructure->addressOfCount()));
- storePtr(ImmPtr(newStructure), Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)));
+ sub32(TrustedImm32(1), AbsoluteAddress(oldStructure->addressOfCount()));
+ add32(TrustedImm32(1), AbsoluteAddress(newStructure->addressOfCount()));
+ storePtr(TrustedImmPtr(newStructure), Address(regT0, JSCell::structureOffset()));
#if CPU(MIPS)
// For MIPS, we don't add sizeof(void*) to the stack offset.
@@ -717,15 +717,15 @@ void JIT::privateCompilePatchGetArrayLength(ReturnAddressPtr returnAddress)
// regT0 holds a JSCell*
// Check for array
- Jump failureCases1 = branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsArrayVPtr));
+ Jump failureCases1 = branchPtr(NotEqual, Address(regT0), TrustedImmPtr(m_globalData->jsArrayVPtr));
// Checks out okay! - get the length from the storage
- loadPtr(Address(regT0, OBJECT_OFFSETOF(JSArray, m_storage)), regT2);
+ loadPtr(Address(regT0, JSArray::storageOffset()), regT2);
load32(Address(regT2, OBJECT_OFFSETOF(ArrayStorage, m_length)), regT2);
- Jump failureCases2 = branch32(Above, regT2, Imm32(INT_MAX));
+ Jump failureCases2 = branch32(Above, regT2, TrustedImm32(INT_MAX));
move(regT2, regT0);
- move(Imm32(JSValue::Int32Tag), regT1);
+ move(TrustedImm32(JSValue::Int32Tag), regT1);
Jump success = jump();
LinkBuffer patchBuffer(this, m_codeBlock->executablePool(), 0);
@@ -762,12 +762,12 @@ void JIT::privateCompileGetByIdProto(StructureStubInfo* stubInfo, Structure* str
Jump failureCases1 = checkStructure(regT0, structure);
// Check the prototype object's Structure had not changed.
- Structure** prototypeStructureAddress = &(protoObject->m_structure);
+ Structure* const * prototypeStructureAddress = protoObject->addressOfStructure();
#if CPU(X86_64)
- move(ImmPtr(prototypeStructure), regT3);
+ move(TrustedImmPtr(prototypeStructure), regT3);
Jump failureCases2 = branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), regT3);
#else
- Jump failureCases2 = branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), ImmPtr(prototypeStructure));
+ Jump failureCases2 = branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), TrustedImmPtr(prototypeStructure));
#endif
bool needsStubLink = false;
// Checks out okay!
@@ -777,15 +777,15 @@ void JIT::privateCompileGetByIdProto(StructureStubInfo* stubInfo, Structure* str
JITStubCall stubCall(this, cti_op_get_by_id_getter_stub);
stubCall.addArgument(regT1);
stubCall.addArgument(regT0);
- stubCall.addArgument(ImmPtr(stubInfo->callReturnLocation.executableAddress()));
+ stubCall.addArgument(TrustedImmPtr(stubInfo->callReturnLocation.executableAddress()));
stubCall.call();
} else if (slot.cachedPropertyType() == PropertySlot::Custom) {
needsStubLink = true;
JITStubCall stubCall(this, cti_op_get_by_id_custom_stub);
- stubCall.addArgument(ImmPtr(protoObject));
- stubCall.addArgument(ImmPtr(FunctionPtr(slot.customGetter()).executableAddress()));
- stubCall.addArgument(ImmPtr(const_cast<Identifier*>(&ident)));
- stubCall.addArgument(ImmPtr(stubInfo->callReturnLocation.executableAddress()));
+ stubCall.addArgument(TrustedImmPtr(protoObject));
+ stubCall.addArgument(TrustedImmPtr(FunctionPtr(slot.customGetter()).executableAddress()));
+ stubCall.addArgument(TrustedImmPtr(const_cast<Identifier*>(&ident)));
+ stubCall.addArgument(TrustedImmPtr(stubInfo->callReturnLocation.executableAddress()));
stubCall.call();
} else
compileGetDirectOffset(protoObject, regT1, regT0, cachedOffset);
@@ -834,15 +834,15 @@ void JIT::privateCompileGetByIdSelfList(StructureStubInfo* stubInfo, Polymorphic
JITStubCall stubCall(this, cti_op_get_by_id_getter_stub);
stubCall.addArgument(regT1);
stubCall.addArgument(regT0);
- stubCall.addArgument(ImmPtr(stubInfo->callReturnLocation.executableAddress()));
+ stubCall.addArgument(TrustedImmPtr(stubInfo->callReturnLocation.executableAddress()));
stubCall.call();
} else if (slot.cachedPropertyType() == PropertySlot::Custom) {
needsStubLink = true;
JITStubCall stubCall(this, cti_op_get_by_id_custom_stub);
stubCall.addArgument(regT0);
- stubCall.addArgument(ImmPtr(FunctionPtr(slot.customGetter()).executableAddress()));
- stubCall.addArgument(ImmPtr(const_cast<Identifier*>(&ident)));
- stubCall.addArgument(ImmPtr(stubInfo->callReturnLocation.executableAddress()));
+ stubCall.addArgument(TrustedImmPtr(FunctionPtr(slot.customGetter()).executableAddress()));
+ stubCall.addArgument(TrustedImmPtr(const_cast<Identifier*>(&ident)));
+ stubCall.addArgument(TrustedImmPtr(stubInfo->callReturnLocation.executableAddress()));
stubCall.call();
} else
compileGetDirectOffset(regT0, regT1, regT0, structure, cachedOffset);
@@ -889,12 +889,12 @@ void JIT::privateCompileGetByIdProtoList(StructureStubInfo* stubInfo, Polymorphi
Jump failureCases1 = checkStructure(regT0, structure);
// Check the prototype object's Structure had not changed.
- Structure** prototypeStructureAddress = &(protoObject->m_structure);
+ Structure* const * prototypeStructureAddress = protoObject->addressOfStructure();
#if CPU(X86_64)
- move(ImmPtr(prototypeStructure), regT3);
+ move(TrustedImmPtr(prototypeStructure), regT3);
Jump failureCases2 = branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), regT3);
#else
- Jump failureCases2 = branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), ImmPtr(prototypeStructure));
+ Jump failureCases2 = branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), TrustedImmPtr(prototypeStructure));
#endif
bool needsStubLink = false;
@@ -904,15 +904,15 @@ void JIT::privateCompileGetByIdProtoList(StructureStubInfo* stubInfo, Polymorphi
JITStubCall stubCall(this, cti_op_get_by_id_getter_stub);
stubCall.addArgument(regT1);
stubCall.addArgument(regT0);
- stubCall.addArgument(ImmPtr(stubInfo->callReturnLocation.executableAddress()));
+ stubCall.addArgument(TrustedImmPtr(stubInfo->callReturnLocation.executableAddress()));
stubCall.call();
} else if (slot.cachedPropertyType() == PropertySlot::Custom) {
needsStubLink = true;
JITStubCall stubCall(this, cti_op_get_by_id_custom_stub);
- stubCall.addArgument(ImmPtr(protoObject));
- stubCall.addArgument(ImmPtr(FunctionPtr(slot.customGetter()).executableAddress()));
- stubCall.addArgument(ImmPtr(const_cast<Identifier*>(&ident)));
- stubCall.addArgument(ImmPtr(stubInfo->callReturnLocation.executableAddress()));
+ stubCall.addArgument(TrustedImmPtr(protoObject));
+ stubCall.addArgument(TrustedImmPtr(FunctionPtr(slot.customGetter()).executableAddress()));
+ stubCall.addArgument(TrustedImmPtr(const_cast<Identifier*>(&ident)));
+ stubCall.addArgument(TrustedImmPtr(stubInfo->callReturnLocation.executableAddress()));
stubCall.call();
} else
compileGetDirectOffset(protoObject, regT1, regT0, cachedOffset);
@@ -973,15 +973,15 @@ void JIT::privateCompileGetByIdChainList(StructureStubInfo* stubInfo, Polymorphi
JITStubCall stubCall(this, cti_op_get_by_id_getter_stub);
stubCall.addArgument(regT1);
stubCall.addArgument(regT0);
- stubCall.addArgument(ImmPtr(stubInfo->callReturnLocation.executableAddress()));
+ stubCall.addArgument(TrustedImmPtr(stubInfo->callReturnLocation.executableAddress()));
stubCall.call();
} else if (slot.cachedPropertyType() == PropertySlot::Custom) {
needsStubLink = true;
JITStubCall stubCall(this, cti_op_get_by_id_custom_stub);
- stubCall.addArgument(ImmPtr(protoObject));
- stubCall.addArgument(ImmPtr(FunctionPtr(slot.customGetter()).executableAddress()));
- stubCall.addArgument(ImmPtr(const_cast<Identifier*>(&ident)));
- stubCall.addArgument(ImmPtr(stubInfo->callReturnLocation.executableAddress()));
+ stubCall.addArgument(TrustedImmPtr(protoObject));
+ stubCall.addArgument(TrustedImmPtr(FunctionPtr(slot.customGetter()).executableAddress()));
+ stubCall.addArgument(TrustedImmPtr(const_cast<Identifier*>(&ident)));
+ stubCall.addArgument(TrustedImmPtr(stubInfo->callReturnLocation.executableAddress()));
stubCall.call();
} else
compileGetDirectOffset(protoObject, regT1, regT0, cachedOffset);
@@ -1007,8 +1007,7 @@ void JIT::privateCompileGetByIdChainList(StructureStubInfo* stubInfo, Polymorphi
// Track the stub we have created so that it will be deleted later.
structure->ref();
- chain->ref();
- prototypeStructures->list[currentIndex].set(entryLabel, structure, chain);
+ prototypeStructures->list[currentIndex].set(callFrame->globalData(), m_codeBlock->ownerExecutable(), entryLabel, structure, chain);
// Finally patch the jump to slow case back in the hot path to jump here instead.
CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase);
@@ -1043,15 +1042,15 @@ void JIT::privateCompileGetByIdChain(StructureStubInfo* stubInfo, Structure* str
JITStubCall stubCall(this, cti_op_get_by_id_getter_stub);
stubCall.addArgument(regT1);
stubCall.addArgument(regT0);
- stubCall.addArgument(ImmPtr(stubInfo->callReturnLocation.executableAddress()));
+ stubCall.addArgument(TrustedImmPtr(stubInfo->callReturnLocation.executableAddress()));
stubCall.call();
} else if (slot.cachedPropertyType() == PropertySlot::Custom) {
needsStubLink = true;
JITStubCall stubCall(this, cti_op_get_by_id_custom_stub);
- stubCall.addArgument(ImmPtr(protoObject));
- stubCall.addArgument(ImmPtr(FunctionPtr(slot.customGetter()).executableAddress()));
- stubCall.addArgument(ImmPtr(const_cast<Identifier*>(&ident)));
- stubCall.addArgument(ImmPtr(stubInfo->callReturnLocation.executableAddress()));
+ stubCall.addArgument(TrustedImmPtr(protoObject));
+ stubCall.addArgument(TrustedImmPtr(FunctionPtr(slot.customGetter()).executableAddress()));
+ stubCall.addArgument(TrustedImmPtr(const_cast<Identifier*>(&ident)));
+ stubCall.addArgument(TrustedImmPtr(stubInfo->callReturnLocation.executableAddress()));
stubCall.call();
} else
compileGetDirectOffset(protoObject, regT1, regT0, cachedOffset);
@@ -1113,10 +1112,10 @@ void JIT::emit_op_get_by_pname(Instruction* currentInstruction)
emitLoadPayload(iter, regT1);
// Test base's structure
- loadPtr(Address(regT2, OBJECT_OFFSETOF(JSCell, m_structure)), regT0);
+ loadPtr(Address(regT2, JSCell::structureOffset()), regT0);
addSlowCase(branchPtr(NotEqual, regT0, Address(regT1, OBJECT_OFFSETOF(JSPropertyNameIterator, m_cachedStructure))));
load32(addressFor(i), regT3);
- sub32(Imm32(1), regT3);
+ sub32(TrustedImm32(1), regT3);
addSlowCase(branch32(AboveOrEqual, regT3, Address(regT1, OBJECT_OFFSETOF(JSPropertyNameIterator, m_numCacheableSlots))));
compileGetDirectOffset(regT2, regT1, regT0, regT3);
diff --git a/Source/JavaScriptCore/jit/JITStubCall.h b/Source/JavaScriptCore/jit/JITStubCall.h
index 4478d06..a0341d6 100644
--- a/Source/JavaScriptCore/jit/JITStubCall.h
+++ b/Source/JavaScriptCore/jit/JITStubCall.h
@@ -99,13 +99,13 @@ namespace JSC {
m_stackIndex += stackIndexStep;
}
- void addArgument(JIT::Imm32 argument)
+ void addArgument(JIT::TrustedImm32 argument)
{
m_jit->poke(argument, m_stackIndex);
m_stackIndex += stackIndexStep;
}
- void addArgument(JIT::ImmPtr argument)
+ void addArgument(JIT::TrustedImmPtr argument)
{
m_jit->poke(argument, m_stackIndex);
m_stackIndex += stackIndexStep;
diff --git a/Source/JavaScriptCore/jit/JITStubs.cpp b/Source/JavaScriptCore/jit/JITStubs.cpp
index 95bf52c..e52c7c8 100644
--- a/Source/JavaScriptCore/jit/JITStubs.cpp
+++ b/Source/JavaScriptCore/jit/JITStubs.cpp
@@ -40,6 +40,7 @@
#include "Debugger.h"
#include "ExceptionHelpers.h"
#include "GetterSetter.h"
+#include "Global.h"
#include "JIT.h"
#include "JSActivation.h"
#include "JSArray.h"
@@ -679,6 +680,7 @@ __asm void ctiOpThrowNotCaught()
#endif
JITThunks::JITThunks(JSGlobalData* globalData)
+ : m_hostFunctionStubMap(new HostFunctionStubMap)
{
if (!globalData->executableAllocator.isValid())
return;
@@ -941,17 +943,17 @@ static NEVER_INLINE void throwStackOverflowError(CallFrame* callFrame, JSGlobalD
#define CHECK_FOR_EXCEPTION() \
do { \
- if (UNLIKELY(stackFrame.globalData->exception.get())) \
+ if (UNLIKELY(stackFrame.globalData->exception)) \
VM_THROW_EXCEPTION(); \
} while (0)
#define CHECK_FOR_EXCEPTION_AT_END() \
do { \
- if (UNLIKELY(stackFrame.globalData->exception.get())) \
+ if (UNLIKELY(stackFrame.globalData->exception)) \
VM_THROW_EXCEPTION_AT_END(); \
} while (0)
#define CHECK_FOR_EXCEPTION_VOID() \
do { \
- if (UNLIKELY(stackFrame.globalData->exception.get())) { \
+ if (UNLIKELY(stackFrame.globalData->exception)) { \
VM_THROW_EXCEPTION_AT_END(); \
return; \
} \
@@ -976,7 +978,7 @@ static ExceptionHandler jitThrow(JSGlobalData* globalData, CallFrame* callFrame,
return exceptionHandler;
}
-#if CPU(ARM_THUMB2)
+#if CPU(ARM_THUMB2) && COMPILER(GCC)
#define DEFINE_STUB_FUNCTION(rtype, op) \
extern "C" { \
@@ -1072,7 +1074,7 @@ static ExceptionHandler jitThrow(JSGlobalData* globalData, CallFrame* callFrame,
); \
rtype JITStubThunked_##op(STUB_ARGS_DECLARATION)
-#elif CPU(ARM_TRADITIONAL) && COMPILER(RVCT)
+#elif (CPU(ARM_THUMB2) || CPU(ARM_TRADITIONAL)) && COMPILER(RVCT)
#define DEFINE_STUB_FUNCTION(rtype, op) rtype JITStubThunked_##op(STUB_ARGS_DECLARATION)
@@ -1085,7 +1087,7 @@ static ExceptionHandler jitThrow(JSGlobalData* globalData, CallFrame* callFrame,
RVCT(extern "C" #rtype# JITStubThunked_#op#(STUB_ARGS_DECLARATION);)
RVCT(__asm #rtype# cti_#op#(STUB_ARGS_DECLARATION))
RVCT({)
-RVCT( ARM)
+RVCT( PRESERVE8)
RVCT( IMPORT JITStubThunked_#op#)
RVCT( str lr, [sp, # THUNK_RETURN_ADDRESS_OFFSET])
RVCT( bl JITStubThunked_#op#)
@@ -1172,7 +1174,7 @@ DEFINE_STUB_FUNCTION(EncodedJSValue, op_create_this)
Structure* structure;
JSValue proto = stackFrame.args[0].jsValue();
if (proto.isObject())
- structure = asObject(proto)->inheritorID();
+ structure = asObject(proto)->inheritorID(*stackFrame.globalData);
else
structure = constructor->scope()->globalObject->emptyObjectStructure();
JSValue result = constructEmptyObject(callFrame, structure);
@@ -1551,7 +1553,7 @@ DEFINE_STUB_FUNCTION(EncodedJSValue, op_get_by_id_self_fail)
return JSValue::encode(result);
}
-static PolymorphicAccessStructureList* getPolymorphicAccessStructureListSlot(StructureStubInfo* stubInfo, int& listIndex)
+static PolymorphicAccessStructureList* getPolymorphicAccessStructureListSlot(JSGlobalData& globalData, ScriptExecutable* owner, StructureStubInfo* stubInfo, int& listIndex)
{
PolymorphicAccessStructureList* prototypeStructureList = 0;
listIndex = 1;
@@ -1563,7 +1565,7 @@ static PolymorphicAccessStructureList* getPolymorphicAccessStructureListSlot(Str
stubInfo->initGetByIdProtoList(prototypeStructureList, 2);
break;
case access_get_by_id_chain:
- prototypeStructureList = new PolymorphicAccessStructureList(stubInfo->stubRoutine, stubInfo->u.getByIdChain.baseObjectStructure, stubInfo->u.getByIdChain.chain);
+ prototypeStructureList = new PolymorphicAccessStructureList(globalData, owner, stubInfo->stubRoutine, stubInfo->u.getByIdChain.baseObjectStructure, stubInfo->u.getByIdChain.chain);
stubInfo->stubRoutine = CodeLocationLabel();
stubInfo->initGetByIdProtoList(prototypeStructureList, 2);
break;
@@ -1651,7 +1653,7 @@ DEFINE_STUB_FUNCTION(EncodedJSValue, op_get_by_id_proto_list)
}
int listIndex;
- PolymorphicAccessStructureList* prototypeStructureList = getPolymorphicAccessStructureListSlot(stubInfo, listIndex);
+ PolymorphicAccessStructureList* prototypeStructureList = getPolymorphicAccessStructureListSlot(callFrame->globalData(), codeBlock->ownerExecutable(), stubInfo, listIndex);
if (listIndex < POLYMORPHIC_LIST_CACHE_SIZE) {
JIT::compileGetByIdProtoList(callFrame->scopeChain()->globalData, callFrame, codeBlock, stubInfo, prototypeStructureList, listIndex, structure, slotBaseObject->structure(), propertyName, slot, offset);
@@ -1661,7 +1663,7 @@ DEFINE_STUB_FUNCTION(EncodedJSValue, op_get_by_id_proto_list)
} else if (size_t count = normalizePrototypeChain(callFrame, baseValue, slot.slotBase(), propertyName, offset)) {
ASSERT(!baseValue.asCell()->structure()->isDictionary());
int listIndex;
- PolymorphicAccessStructureList* prototypeStructureList = getPolymorphicAccessStructureListSlot(stubInfo, listIndex);
+ PolymorphicAccessStructureList* prototypeStructureList = getPolymorphicAccessStructureListSlot(callFrame->globalData(), codeBlock->ownerExecutable(), stubInfo, listIndex);
if (listIndex < POLYMORPHIC_LIST_CACHE_SIZE) {
StructureChain* protoChain = structure->prototypeChain(callFrame);
@@ -3067,7 +3069,15 @@ DEFINE_STUB_FUNCTION(JSObject*, op_new_regexp)
{
STUB_INIT_STACK_FRAME(stackFrame);
- return new (stackFrame.globalData) RegExpObject(stackFrame.callFrame->lexicalGlobalObject(), stackFrame.callFrame->lexicalGlobalObject()->regExpStructure(), stackFrame.args[0].regExp());
+ CallFrame* callFrame = stackFrame.callFrame;
+
+ RegExp* regExp = stackFrame.args[0].regExp();
+ if (!regExp->isValid()) {
+ stackFrame.globalData->exception = createSyntaxError(callFrame, "Invalid flags supplied to RegExp constructor.");
+ VM_THROW_EXCEPTION();
+ }
+
+ return new (stackFrame.globalData) RegExpObject(stackFrame.callFrame->lexicalGlobalObject(), stackFrame.callFrame->lexicalGlobalObject()->regExpStructure(), regExp);
}
DEFINE_STUB_FUNCTION(EncodedJSValue, op_bitor)
@@ -3462,7 +3472,7 @@ DEFINE_STUB_FUNCTION(void*, vm_throw)
{
STUB_INIT_STACK_FRAME(stackFrame);
JSGlobalData* globalData = stackFrame.globalData;
- ExceptionHandler handler = jitThrow(globalData, stackFrame.callFrame, globalData->exception.get(), globalData->exceptionLocation);
+ ExceptionHandler handler = jitThrow(globalData, stackFrame.callFrame, globalData->exception, globalData->exceptionLocation);
STUB_SET_RETURN_ADDRESS(handler.catchRoutine);
return handler.callFrame;
}
@@ -3483,22 +3493,27 @@ MacroAssemblerCodePtr JITThunks::ctiStub(JSGlobalData* globalData, ThunkGenerato
return entry.first->second;
}
-PassRefPtr<NativeExecutable> JITThunks::hostFunctionStub(JSGlobalData* globalData, NativeFunction function)
+NativeExecutable* JITThunks::hostFunctionStub(JSGlobalData* globalData, NativeFunction function)
{
- std::pair<HostFunctionStubMap::iterator, bool> entry = m_hostFunctionStubMap.add(function, 0);
+ std::pair<HostFunctionStubMap::iterator, bool> entry = m_hostFunctionStubMap->add(function, Global<NativeExecutable>(Global<NativeExecutable>::EmptyValue));
if (entry.second)
- entry.first->second = NativeExecutable::create(JIT::compileCTINativeCall(globalData, m_executablePool, function), function, ctiNativeConstruct(), callHostFunctionAsConstructor);
- return entry.first->second;
+ entry.first->second.set(*globalData, NativeExecutable::create(*globalData, JIT::compileCTINativeCall(globalData, m_executablePool, function), function, ctiNativeConstruct(), callHostFunctionAsConstructor));
+ return entry.first->second.get();
}
-PassRefPtr<NativeExecutable> JITThunks::hostFunctionStub(JSGlobalData* globalData, NativeFunction function, ThunkGenerator generator)
+NativeExecutable* JITThunks::hostFunctionStub(JSGlobalData* globalData, NativeFunction function, ThunkGenerator generator)
{
- std::pair<HostFunctionStubMap::iterator, bool> entry = m_hostFunctionStubMap.add(function, 0);
+ std::pair<HostFunctionStubMap::iterator, bool> entry = m_hostFunctionStubMap->add(function, Global<NativeExecutable>(Global<NativeExecutable>::EmptyValue));
if (entry.second) {
MacroAssemblerCodePtr code = globalData->canUseJIT() ? generator(globalData, m_executablePool.get()) : MacroAssemblerCodePtr();
- entry.first->second = NativeExecutable::create(code, function, ctiNativeConstruct(), callHostFunctionAsConstructor);
+ entry.first->second.set(*globalData, NativeExecutable::create(*globalData, code, function, ctiNativeConstruct(), callHostFunctionAsConstructor));
}
- return entry.first->second;
+ return entry.first->second.get();
+}
+
+void JITThunks::clearHostFunctionStubs()
+{
+ m_hostFunctionStubMap.clear();
}
} // namespace JSC
diff --git a/Source/JavaScriptCore/jit/JITStubs.h b/Source/JavaScriptCore/jit/JITStubs.h
index 65f6a55..af6e13f 100644
--- a/Source/JavaScriptCore/jit/JITStubs.h
+++ b/Source/JavaScriptCore/jit/JITStubs.h
@@ -254,6 +254,8 @@ namespace JSC {
extern "C" void ctiOpThrowNotCaught();
extern "C" EncodedJSValue ctiTrampoline(void* code, RegisterFile*, CallFrame*, void* /*unused1*/, Profiler**, JSGlobalData*);
+ template <typename T> class Global;
+
class JITThunks {
public:
JITThunks(JSGlobalData*);
@@ -273,13 +275,16 @@ namespace JSC {
MacroAssemblerCodePtr ctiStub(JSGlobalData* globalData, ThunkGenerator generator);
- PassRefPtr<NativeExecutable> hostFunctionStub(JSGlobalData* globalData, NativeFunction func);
- PassRefPtr<NativeExecutable> hostFunctionStub(JSGlobalData* globalData, NativeFunction func, ThunkGenerator generator);
+ NativeExecutable* hostFunctionStub(JSGlobalData*, NativeFunction);
+ NativeExecutable* hostFunctionStub(JSGlobalData*, NativeFunction, ThunkGenerator);
+
+ void clearHostFunctionStubs();
+
private:
typedef HashMap<ThunkGenerator, MacroAssemblerCodePtr> CTIStubMap;
CTIStubMap m_ctiStubMap;
- typedef HashMap<NativeFunction, RefPtr<NativeExecutable> > HostFunctionStubMap;
- HostFunctionStubMap m_hostFunctionStubMap;
+ typedef HashMap<NativeFunction, Global<NativeExecutable> > HostFunctionStubMap;
+ OwnPtr<HostFunctionStubMap> m_hostFunctionStubMap;
RefPtr<ExecutablePool> m_executablePool;
TrampolineStructure m_trampolineStructure;
diff --git a/Source/JavaScriptCore/jit/JSInterfaceJIT.h b/Source/JavaScriptCore/jit/JSInterfaceJIT.h
index 6453bab..5d3f239 100644
--- a/Source/JavaScriptCore/jit/JSInterfaceJIT.h
+++ b/Source/JavaScriptCore/jit/JSInterfaceJIT.h
@@ -29,6 +29,7 @@
#include "JITCode.h"
#include "JITStubs.h"
#include "JSImmediate.h"
+#include "JSValue.h"
#include "MacroAssembler.h"
#include "RegisterFile.h"
#include <wtf/AlwaysInline.h>
@@ -157,13 +158,20 @@ namespace JSC {
#error "JIT not supported on this platform."
#endif
+#if USE(JSVALUE32_64)
+ // Can't just propogate JSValue::Int32Tag as visual studio doesn't like it
+ static const unsigned Int32Tag = 0xfffffffd;
+ COMPILE_ASSERT(Int32Tag == JSValue::Int32Tag, Int32Tag_out_of_sync);
+#else
+ static const unsigned Int32Tag = JSImmediate::TagTypeNumber >> 32;
+#endif
inline Jump emitLoadJSCell(unsigned virtualRegisterIndex, RegisterID payload);
inline Jump emitLoadInt32(unsigned virtualRegisterIndex, RegisterID dst);
inline Jump emitLoadDouble(unsigned virtualRegisterIndex, FPRegisterID dst, RegisterID scratch);
#if USE(JSVALUE32_64)
inline Jump emitJumpIfNotJSCell(unsigned virtualRegisterIndex);
- inline Address tagFor(unsigned index, RegisterID base = callFrameRegister);
+ inline Address tagFor(int index, RegisterID base = callFrameRegister);
#endif
#if USE(JSVALUE64)
@@ -172,8 +180,10 @@ namespace JSC {
void emitFastArithImmToInt(RegisterID reg);
#endif
- inline Address payloadFor(unsigned index, RegisterID base = callFrameRegister);
- inline Address addressFor(unsigned index, RegisterID base = callFrameRegister);
+ inline Address payloadFor(int index, RegisterID base = callFrameRegister);
+ inline Address intPayloadFor(int index, RegisterID base = callFrameRegister);
+ inline Address intTagFor(int index, RegisterID base = callFrameRegister);
+ inline Address addressFor(int index, RegisterID base = callFrameRegister);
};
struct ThunkHelpers {
@@ -192,34 +202,44 @@ namespace JSC {
inline JSInterfaceJIT::Jump JSInterfaceJIT::emitJumpIfNotJSCell(unsigned virtualRegisterIndex)
{
ASSERT(static_cast<int>(virtualRegisterIndex) < FirstConstantRegisterIndex);
- return branch32(NotEqual, tagFor(virtualRegisterIndex), Imm32(JSValue::CellTag));
+ return branch32(NotEqual, tagFor(virtualRegisterIndex), TrustedImm32(JSValue::CellTag));
}
inline JSInterfaceJIT::Jump JSInterfaceJIT::emitLoadInt32(unsigned virtualRegisterIndex, RegisterID dst)
{
ASSERT(static_cast<int>(virtualRegisterIndex) < FirstConstantRegisterIndex);
loadPtr(payloadFor(virtualRegisterIndex), dst);
- return branch32(NotEqual, tagFor(virtualRegisterIndex), Imm32(JSValue::Int32Tag));
+ return branch32(NotEqual, tagFor(static_cast<int>(virtualRegisterIndex)), TrustedImm32(JSValue::Int32Tag));
}
- inline JSInterfaceJIT::Address JSInterfaceJIT::tagFor(unsigned virtualRegisterIndex, RegisterID base)
+ inline JSInterfaceJIT::Address JSInterfaceJIT::tagFor(int virtualRegisterIndex, RegisterID base)
{
- ASSERT(static_cast<int>(virtualRegisterIndex) < FirstConstantRegisterIndex);
- return Address(base, (virtualRegisterIndex * sizeof(Register)) + OBJECT_OFFSETOF(JSValue, u.asBits.tag));
+ ASSERT(virtualRegisterIndex < FirstConstantRegisterIndex);
+ return Address(base, (static_cast<unsigned>(virtualRegisterIndex) * sizeof(Register)) + OBJECT_OFFSETOF(JSValue, u.asBits.tag));
}
- inline JSInterfaceJIT::Address JSInterfaceJIT::payloadFor(unsigned virtualRegisterIndex, RegisterID base)
+ inline JSInterfaceJIT::Address JSInterfaceJIT::payloadFor(int virtualRegisterIndex, RegisterID base)
{
- ASSERT(static_cast<int>(virtualRegisterIndex) < FirstConstantRegisterIndex);
- return Address(base, (virtualRegisterIndex * sizeof(Register)) + OBJECT_OFFSETOF(JSValue, u.asBits.payload));
+ ASSERT(virtualRegisterIndex < FirstConstantRegisterIndex);
+ return Address(base, (static_cast<unsigned>(virtualRegisterIndex) * sizeof(Register)) + OBJECT_OFFSETOF(JSValue, u.asBits.payload));
+ }
+
+ inline JSInterfaceJIT::Address JSInterfaceJIT::intPayloadFor(int virtualRegisterIndex, RegisterID base)
+ {
+ return payloadFor(virtualRegisterIndex, base);
+ }
+
+ inline JSInterfaceJIT::Address JSInterfaceJIT::intTagFor(int virtualRegisterIndex, RegisterID base)
+ {
+ return tagFor(virtualRegisterIndex, base);
}
inline JSInterfaceJIT::Jump JSInterfaceJIT::emitLoadDouble(unsigned virtualRegisterIndex, FPRegisterID dst, RegisterID scratch)
{
ASSERT(static_cast<int>(virtualRegisterIndex) < FirstConstantRegisterIndex);
loadPtr(tagFor(virtualRegisterIndex), scratch);
- Jump isDouble = branch32(Below, scratch, Imm32(JSValue::LowestTag));
- Jump notInt = branch32(NotEqual, scratch, Imm32(JSValue::Int32Tag));
+ Jump isDouble = branch32(Below, scratch, TrustedImm32(JSValue::LowestTag));
+ Jump notInt = branch32(NotEqual, scratch, TrustedImm32(JSValue::Int32Tag));
loadPtr(payloadFor(virtualRegisterIndex), scratch);
convertInt32ToDouble(scratch, dst);
Jump done = jump();
@@ -274,17 +294,28 @@ namespace JSC {
#endif
#if USE(JSVALUE64)
- inline JSInterfaceJIT::Address JSInterfaceJIT::payloadFor(unsigned virtualRegisterIndex, RegisterID base)
+ inline JSInterfaceJIT::Address JSInterfaceJIT::payloadFor(int virtualRegisterIndex, RegisterID base)
{
- ASSERT(static_cast<int>(virtualRegisterIndex) < FirstConstantRegisterIndex);
+ ASSERT(virtualRegisterIndex < FirstConstantRegisterIndex);
return addressFor(virtualRegisterIndex, base);
}
+
+ inline JSInterfaceJIT::Address JSInterfaceJIT::intPayloadFor(int virtualRegisterIndex, RegisterID base)
+ {
+ ASSERT(virtualRegisterIndex < FirstConstantRegisterIndex);
+ return Address(base, (static_cast<unsigned>(virtualRegisterIndex) * sizeof(Register)) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload));
+ }
+ inline JSInterfaceJIT::Address JSInterfaceJIT::intTagFor(int virtualRegisterIndex, RegisterID base)
+ {
+ ASSERT(virtualRegisterIndex < FirstConstantRegisterIndex);
+ return Address(base, (static_cast<unsigned>(virtualRegisterIndex) * sizeof(Register)) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag));
+ }
#endif
- inline JSInterfaceJIT::Address JSInterfaceJIT::addressFor(unsigned virtualRegisterIndex, RegisterID base)
+ inline JSInterfaceJIT::Address JSInterfaceJIT::addressFor(int virtualRegisterIndex, RegisterID base)
{
- ASSERT(static_cast<int>(virtualRegisterIndex) < FirstConstantRegisterIndex);
- return Address(base, (virtualRegisterIndex * sizeof(Register)));
+ ASSERT(virtualRegisterIndex < FirstConstantRegisterIndex);
+ return Address(base, (static_cast<unsigned>(virtualRegisterIndex) * sizeof(Register)));
}
}
diff --git a/Source/JavaScriptCore/jit/SpecializedThunkJIT.h b/Source/JavaScriptCore/jit/SpecializedThunkJIT.h
index 5c593d9..8c79692 100644
--- a/Source/JavaScriptCore/jit/SpecializedThunkJIT.h
+++ b/Source/JavaScriptCore/jit/SpecializedThunkJIT.h
@@ -43,7 +43,7 @@ namespace JSC {
, m_pool(pool)
{
// Check that we have the expected number of arguments
- m_failures.append(branch32(NotEqual, Address(callFrameRegister, RegisterFile::ArgumentCount * (int)sizeof(Register)), Imm32(expectedArgCount + 1)));
+ m_failures.append(branch32(NotEqual, Address(callFrameRegister, RegisterFile::ArgumentCount * (int)sizeof(Register)), TrustedImm32(expectedArgCount + 1)));
}
void loadDoubleArgument(int argument, FPRegisterID dst, RegisterID scratch)
@@ -61,7 +61,7 @@ namespace JSC {
void loadJSStringArgument(int argument, RegisterID dst)
{
loadCellArgument(argument, dst);
- m_failures.append(branchPtr(NotEqual, Address(dst, 0), ImmPtr(m_globalData->jsStringVPtr)));
+ m_failures.append(branchPtr(NotEqual, Address(dst, 0), TrustedImmPtr(m_globalData->jsStringVPtr)));
m_failures.append(branchTest32(NonZero, Address(dst, OBJECT_OFFSETOF(JSString, m_fiberCount))));
}
@@ -87,7 +87,7 @@ namespace JSC {
{
if (src != regT0)
move(src, regT0);
- loadPtr(Address(callFrameRegister, RegisterFile::CallerFrame * (int)sizeof(Register)), callFrameRegister);
+ loadPtr(payloadFor(RegisterFile::CallerFrame, callFrameRegister), callFrameRegister);
ret();
}
@@ -101,7 +101,7 @@ namespace JSC {
loadPtr(Address(stackPointerRegister, OBJECT_OFFSETOF(JSValue, u.asBits.tag) - sizeof(double)), regT1);
loadPtr(Address(stackPointerRegister, OBJECT_OFFSETOF(JSValue, u.asBits.payload) - sizeof(double)), regT0);
#endif
- loadPtr(Address(callFrameRegister, RegisterFile::CallerFrame * (int)sizeof(Register)), callFrameRegister);
+ loadPtr(payloadFor(RegisterFile::CallerFrame, callFrameRegister), callFrameRegister);
ret();
}
@@ -110,7 +110,7 @@ namespace JSC {
if (src != regT0)
move(src, regT0);
tagReturnAsInt32();
- loadPtr(Address(callFrameRegister, RegisterFile::CallerFrame * (int)sizeof(Register)), callFrameRegister);
+ loadPtr(payloadFor(RegisterFile::CallerFrame, callFrameRegister), callFrameRegister);
ret();
}
@@ -119,7 +119,7 @@ namespace JSC {
if (src != regT0)
move(src, regT0);
tagReturnAsJSCell();
- loadPtr(Address(callFrameRegister, RegisterFile::CallerFrame * (int)sizeof(Register)), callFrameRegister);
+ loadPtr(payloadFor(RegisterFile::CallerFrame, callFrameRegister), callFrameRegister);
ret();
}
@@ -141,14 +141,14 @@ namespace JSC {
#if USE(JSVALUE64)
orPtr(tagTypeNumberRegister, regT0);
#else
- move(Imm32(JSValue::Int32Tag), regT1);
+ move(TrustedImm32(JSValue::Int32Tag), regT1);
#endif
}
void tagReturnAsJSCell()
{
#if USE(JSVALUE32_64)
- move(Imm32(JSValue::CellTag), regT1);
+ move(TrustedImm32(JSValue::CellTag), regT1);
#endif
}
diff --git a/Source/JavaScriptCore/jit/ThunkGenerators.cpp b/Source/JavaScriptCore/jit/ThunkGenerators.cpp
index 9b40f12..1201696 100644
--- a/Source/JavaScriptCore/jit/ThunkGenerators.cpp
+++ b/Source/JavaScriptCore/jit/ThunkGenerators.cpp
@@ -57,8 +57,8 @@ static void stringCharLoad(SpecializedThunkJIT& jit)
static void charToString(SpecializedThunkJIT& jit, JSGlobalData* globalData, MacroAssembler::RegisterID src, MacroAssembler::RegisterID dst, MacroAssembler::RegisterID scratch)
{
- jit.appendFailure(jit.branch32(MacroAssembler::AboveOrEqual, src, MacroAssembler::Imm32(0x100)));
- jit.move(MacroAssembler::ImmPtr(globalData->smallStrings.singleCharacterStrings()), scratch);
+ jit.appendFailure(jit.branch32(MacroAssembler::AboveOrEqual, src, MacroAssembler::TrustedImm32(0x100)));
+ jit.move(MacroAssembler::TrustedImmPtr(globalData->smallStrings.singleCharacterStrings()), scratch);
jit.loadPtr(MacroAssembler::BaseIndex(scratch, src, MacroAssembler::ScalePtr, 0), dst);
jit.appendFailure(jit.branchTestPtr(MacroAssembler::Zero, dst));
}
@@ -115,16 +115,16 @@ MacroAssemblerCodePtr powThunkGenerator(JSGlobalData* globalData, ExecutablePool
jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0);
MacroAssembler::Jump nonIntExponent;
jit.loadInt32Argument(1, SpecializedThunkJIT::regT0, nonIntExponent);
- jit.appendFailure(jit.branch32(MacroAssembler::LessThan, SpecializedThunkJIT::regT0, MacroAssembler::Imm32(0)));
+ jit.appendFailure(jit.branch32(MacroAssembler::LessThan, SpecializedThunkJIT::regT0, MacroAssembler::TrustedImm32(0)));
MacroAssembler::Jump exponentIsZero = jit.branchTest32(MacroAssembler::Zero, SpecializedThunkJIT::regT0);
MacroAssembler::Label startLoop(jit.label());
- MacroAssembler::Jump exponentIsEven = jit.branchTest32(MacroAssembler::Zero, SpecializedThunkJIT::regT0, MacroAssembler::Imm32(1));
+ MacroAssembler::Jump exponentIsEven = jit.branchTest32(MacroAssembler::Zero, SpecializedThunkJIT::regT0, MacroAssembler::TrustedImm32(1));
jit.mulDouble(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT1);
exponentIsEven.link(&jit);
jit.mulDouble(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT0);
- jit.rshift32(MacroAssembler::Imm32(1), SpecializedThunkJIT::regT0);
+ jit.rshift32(MacroAssembler::TrustedImm32(1), SpecializedThunkJIT::regT0);
jit.branchTest32(MacroAssembler::NonZero, SpecializedThunkJIT::regT0).linkTo(startLoop, &jit);
exponentIsZero.link(&jit);