summaryrefslogtreecommitdiffstats
path: root/JavaScriptCore/jit/JITOpcodes.cpp
diff options
context:
space:
mode:
authorSteve Block <steveblock@google.com>2010-02-02 14:57:50 +0000
committerSteve Block <steveblock@google.com>2010-02-04 15:06:55 +0000
commitd0825bca7fe65beaee391d30da42e937db621564 (patch)
tree7461c49eb5844ffd1f35d1ba2c8b7584c1620823 /JavaScriptCore/jit/JITOpcodes.cpp
parent3db770bd97c5a59b6c7574ca80a39e5a51c1defd (diff)
downloadexternal_webkit-d0825bca7fe65beaee391d30da42e937db621564.zip
external_webkit-d0825bca7fe65beaee391d30da42e937db621564.tar.gz
external_webkit-d0825bca7fe65beaee391d30da42e937db621564.tar.bz2
Merge webkit.org at r54127 : Initial merge by git
Change-Id: Ib661abb595522f50ea406f72d3a0ce17f7193c82
Diffstat (limited to 'JavaScriptCore/jit/JITOpcodes.cpp')
-rw-r--r--JavaScriptCore/jit/JITOpcodes.cpp273
1 files changed, 10 insertions, 263 deletions
diff --git a/JavaScriptCore/jit/JITOpcodes.cpp b/JavaScriptCore/jit/JITOpcodes.cpp
index 77fec28..d9a32d9 100644
--- a/JavaScriptCore/jit/JITOpcodes.cpp
+++ b/JavaScriptCore/jit/JITOpcodes.cpp
@@ -137,7 +137,7 @@ void JIT::privateCompileCTIMachineTrampolines(RefPtr<ExecutablePool>* executable
loadPtr(Address(regT2, OBJECT_OFFSETOF(FunctionExecutable, m_jitCode)), regT0);
jump(regT0);
-#if PLATFORM(X86) || PLATFORM(ARM_TRADITIONAL)
+#if CPU(X86) || CPU(ARM_TRADITIONAL)
Label nativeCallThunk = align();
preserveReturnAddressAfterCall(regT0);
emitPutToCallFrameHeader(regT0, RegisterFile::ReturnPC); // Push return address
@@ -148,7 +148,7 @@ void JIT::privateCompileCTIMachineTrampolines(RefPtr<ExecutablePool>* executable
emitGetFromCallFrameHeaderPtr(RegisterFile::ScopeChain, regT1, regT1);
emitPutToCallFrameHeader(regT1, RegisterFile::ScopeChain);
-#if PLATFORM(X86)
+#if CPU(X86)
emitGetFromCallFrameHeader32(RegisterFile::ArgumentCount, regT0);
/* We have two structs that we use to describe the stackframe we set up for our
@@ -160,7 +160,7 @@ void JIT::privateCompileCTIMachineTrampolines(RefPtr<ExecutablePool>* executable
* stack pointer by the right amount after the call.
*/
-#if COMPILER(MSVC) || PLATFORM(LINUX)
+#if COMPILER(MSVC) || OS(LINUX)
#if COMPILER(MSVC)
#pragma pack(push)
#pragma pack(4)
@@ -223,7 +223,7 @@ void JIT::privateCompileCTIMachineTrampolines(RefPtr<ExecutablePool>* executable
storePtr(regT2, Address(stackPointerRegister, OBJECT_OFFSETOF(NativeCallFrameStructure, thisValue) + OBJECT_OFFSETOF(JSValue, u.asBits.payload)));
storePtr(regT3, Address(stackPointerRegister, OBJECT_OFFSETOF(NativeCallFrameStructure, thisValue) + OBJECT_OFFSETOF(JSValue, u.asBits.tag)));
-#if COMPILER(MSVC) || PLATFORM(LINUX)
+#if COMPILER(MSVC) || OS(LINUX)
// ArgList is passed by reference so is stackPointerRegister + 4 * sizeof(Register)
addPtr(Imm32(OBJECT_OFFSETOF(NativeCallFrameStructure, result)), stackPointerRegister, X86Registers::ecx);
@@ -248,7 +248,7 @@ void JIT::privateCompileCTIMachineTrampolines(RefPtr<ExecutablePool>* executable
// so pull them off now
addPtr(Imm32(NativeCallFrameSize - sizeof(NativeFunctionCalleeSignature)), stackPointerRegister);
-#elif PLATFORM(ARM_TRADITIONAL)
+#elif CPU(ARM_TRADITIONAL)
emitGetFromCallFrameHeader32(RegisterFile::ArgumentCount, regT0);
// Allocate stack space for our arglist
@@ -409,58 +409,6 @@ void JIT::emit_op_jmp(Instruction* currentInstruction)
addJump(jump(), target);
}
-void JIT::emit_op_loop(Instruction* currentInstruction)
-{
- unsigned target = currentInstruction[1].u.operand;
- emitTimeoutCheck();
- addJump(jump(), target);
-}
-
-void JIT::emit_op_loop_if_less(Instruction* currentInstruction)
-{
- unsigned op1 = currentInstruction[1].u.operand;
- unsigned op2 = currentInstruction[2].u.operand;
- unsigned target = currentInstruction[3].u.operand;
-
- emitTimeoutCheck();
-
- if (isOperandConstantImmediateInt(op1)) {
- emitLoad(op2, regT1, regT0);
- addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
- addJump(branch32(GreaterThan, regT0, Imm32(getConstantOperand(op1).asInt32())), target);
- return;
- }
-
- if (isOperandConstantImmediateInt(op2)) {
- emitLoad(op1, regT1, regT0);
- addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
- addJump(branch32(LessThan, regT0, Imm32(getConstantOperand(op2).asInt32())), target);
- return;
- }
-
- emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
- addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
- addSlowCase(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag)));
- addJump(branch32(LessThan, regT0, regT2), target);
-}
-
-void JIT::emitSlow_op_loop_if_less(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- unsigned op1 = currentInstruction[1].u.operand;
- unsigned op2 = currentInstruction[2].u.operand;
- unsigned target = currentInstruction[3].u.operand;
-
- if (!isOperandConstantImmediateInt(op1) && !isOperandConstantImmediateInt(op2))
- linkSlowCase(iter); // int32 check
- linkSlowCase(iter); // int32 check
-
- JITStubCall stubCall(this, cti_op_jless);
- stubCall.addArgument(op1);
- stubCall.addArgument(op2);
- stubCall.call();
- emitJumpSlowToHot(branchTest32(NonZero, regT0), target);
-}
-
void JIT::emit_op_loop_if_lesseq(Instruction* currentInstruction)
{
unsigned op1 = currentInstruction[1].u.operand;
@@ -709,84 +657,6 @@ void JIT::emit_op_strcat(Instruction* currentInstruction)
stubCall.call(currentInstruction[1].u.operand);
}
-void JIT::emit_op_loop_if_true(Instruction* currentInstruction)
-{
- unsigned cond = currentInstruction[1].u.operand;
- unsigned target = currentInstruction[2].u.operand;
-
- emitTimeoutCheck();
-
- emitLoad(cond, regT1, regT0);
-
- Jump isNotInteger = branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag));
- addJump(branch32(NotEqual, regT0, Imm32(0)), target);
- Jump isNotZero = jump();
-
- isNotInteger.link(this);
-
- addJump(branch32(Equal, regT1, Imm32(JSValue::TrueTag)), target);
- addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::FalseTag)));
-
- isNotZero.link(this);
-}
-
-void JIT::emitSlow_op_loop_if_true(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- unsigned cond = currentInstruction[1].u.operand;
- unsigned target = currentInstruction[2].u.operand;
-
- linkSlowCase(iter);
-
- JITStubCall stubCall(this, cti_op_jtrue);
- stubCall.addArgument(cond);
- stubCall.call();
- emitJumpSlowToHot(branchTest32(NonZero, regT0), target);
-}
-
-void JIT::emit_op_loop_if_false(Instruction* currentInstruction)
-{
- unsigned cond = currentInstruction[1].u.operand;
- unsigned target = currentInstruction[2].u.operand;
-
- emitTimeoutCheck();
-
- emitLoad(cond, regT1, regT0);
-
- Jump isTrue = branch32(Equal, regT1, Imm32(JSValue::TrueTag));
- addJump(branch32(Equal, regT1, Imm32(JSValue::FalseTag)), target);
-
- Jump isNotInteger = branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag));
- Jump isTrue2 = branch32(NotEqual, regT0, Imm32(0));
- addJump(jump(), target);
-
- if (supportsFloatingPoint()) {
- isNotInteger.link(this);
-
- addSlowCase(branch32(Above, regT1, Imm32(JSValue::LowestTag)));
-
- zeroDouble(fpRegT0);
- emitLoadDouble(cond, fpRegT1);
- addJump(branchDouble(DoubleEqualOrUnordered, fpRegT0, fpRegT1), target);
- } else
- addSlowCase(isNotInteger);
-
- isTrue.link(this);
- isTrue2.link(this);
-}
-
-void JIT::emitSlow_op_loop_if_false(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- unsigned cond = currentInstruction[1].u.operand;
- unsigned target = currentInstruction[2].u.operand;
-
- linkSlowCase(iter);
-
- JITStubCall stubCall(this, cti_op_jtrue);
- stubCall.addArgument(cond);
- stubCall.call();
- emitJumpSlowToHot(branchTest32(Zero, regT0), target);
-}
-
void JIT::emit_op_resolve_base(Instruction* currentInstruction)
{
JITStubCall stubCall(this, cti_op_resolve_base);
@@ -1731,7 +1601,7 @@ void JIT::privateCompileCTIMachineTrampolines(RefPtr<ExecutablePool>* executable
emitPutToCallFrameHeader(regT1, RegisterFile::ScopeChain);
-#if PLATFORM(X86_64)
+#if CPU(X86_64)
emitGetFromCallFrameHeader32(RegisterFile::ArgumentCount, X86Registers::ecx);
// Allocate stack space for our arglist
@@ -1767,7 +1637,7 @@ void JIT::privateCompileCTIMachineTrampolines(RefPtr<ExecutablePool>* executable
call(Address(X86Registers::esi, OBJECT_OFFSETOF(JSFunction, m_data)));
addPtr(Imm32(sizeof(ArgList)), stackPointerRegister);
-#elif PLATFORM(X86)
+#elif CPU(X86)
emitGetFromCallFrameHeader32(RegisterFile::ArgumentCount, regT0);
/* We have two structs that we use to describe the stackframe we set up for our
@@ -1778,7 +1648,7 @@ void JIT::privateCompileCTIMachineTrampolines(RefPtr<ExecutablePool>* executable
* not the rest of the callframe so we need a nice way to ensure we increment the
* stack pointer by the right amount after the call.
*/
-#if COMPILER(MSVC) || PLATFORM(LINUX)
+#if COMPILER(MSVC) || OS(LINUX)
struct NativeCallFrameStructure {
// CallFrame* callFrame; // passed in EDX
JSObject* callee;
@@ -1831,7 +1701,7 @@ void JIT::privateCompileCTIMachineTrampolines(RefPtr<ExecutablePool>* executable
loadPtr(Address(regT1, -(int)sizeof(Register)), regT1);
storePtr(regT1, Address(stackPointerRegister, OBJECT_OFFSETOF(NativeCallFrameStructure, thisValue)));
-#if COMPILER(MSVC) || PLATFORM(LINUX)
+#if COMPILER(MSVC) || OS(LINUX)
// ArgList is passed by reference so is stackPointerRegister + 4 * sizeof(Register)
addPtr(Imm32(OBJECT_OFFSETOF(NativeCallFrameStructure, result)), stackPointerRegister, X86Registers::ecx);
@@ -1859,7 +1729,7 @@ void JIT::privateCompileCTIMachineTrampolines(RefPtr<ExecutablePool>* executable
// so pull them off now
addPtr(Imm32(NativeCallFrameSize - sizeof(NativeFunctionCalleeSignature)), stackPointerRegister);
-#elif PLATFORM(ARM)
+#elif CPU(ARM)
emitGetFromCallFrameHeader32(RegisterFile::ArgumentCount, regT0);
// Allocate stack space for our arglist
@@ -2005,47 +1875,6 @@ void JIT::emit_op_jmp(Instruction* currentInstruction)
RECORD_JUMP_TARGET(target);
}
-void JIT::emit_op_loop(Instruction* currentInstruction)
-{
- emitTimeoutCheck();
-
- unsigned target = currentInstruction[1].u.operand;
- addJump(jump(), target);
-}
-
-void JIT::emit_op_loop_if_less(Instruction* currentInstruction)
-{
- emitTimeoutCheck();
-
- unsigned op1 = currentInstruction[1].u.operand;
- unsigned op2 = currentInstruction[2].u.operand;
- unsigned target = currentInstruction[3].u.operand;
- if (isOperandConstantImmediateInt(op2)) {
- emitGetVirtualRegister(op1, regT0);
- emitJumpSlowCaseIfNotImmediateInteger(regT0);
-#if USE(JSVALUE64)
- int32_t op2imm = getConstantOperandImmediateInt(op2);
-#else
- int32_t op2imm = static_cast<int32_t>(JSImmediate::rawValue(getConstantOperand(op2)));
-#endif
- addJump(branch32(LessThan, regT0, Imm32(op2imm)), target);
- } else if (isOperandConstantImmediateInt(op1)) {
- emitGetVirtualRegister(op2, regT0);
- emitJumpSlowCaseIfNotImmediateInteger(regT0);
-#if USE(JSVALUE64)
- int32_t op1imm = getConstantOperandImmediateInt(op1);
-#else
- int32_t op1imm = static_cast<int32_t>(JSImmediate::rawValue(getConstantOperand(op1)));
-#endif
- addJump(branch32(GreaterThan, regT0, Imm32(op1imm)), target);
- } else {
- emitGetVirtualRegisters(op1, regT0, op2, regT1);
- emitJumpSlowCaseIfNotImmediateInteger(regT0);
- emitJumpSlowCaseIfNotImmediateInteger(regT1);
- addJump(branch32(LessThan, regT0, regT1), target);
- }
-}
-
void JIT::emit_op_loop_if_lesseq(Instruction* currentInstruction)
{
emitTimeoutCheck();
@@ -2284,40 +2113,6 @@ void JIT::emit_op_strcat(Instruction* currentInstruction)
stubCall.call(currentInstruction[1].u.operand);
}
-void JIT::emit_op_loop_if_true(Instruction* currentInstruction)
-{
- emitTimeoutCheck();
-
- unsigned target = currentInstruction[2].u.operand;
- emitGetVirtualRegister(currentInstruction[1].u.operand, regT0);
-
- Jump isZero = branchPtr(Equal, regT0, ImmPtr(JSValue::encode(jsNumber(m_globalData, 0))));
- addJump(emitJumpIfImmediateInteger(regT0), target);
-
- addJump(branchPtr(Equal, regT0, ImmPtr(JSValue::encode(jsBoolean(true)))), target);
- addSlowCase(branchPtr(NotEqual, regT0, ImmPtr(JSValue::encode(jsBoolean(false)))));
-
- isZero.link(this);
-};
-
-void JIT::emit_op_loop_if_false(Instruction* currentInstruction)
-{
- emitTimeoutCheck();
-
-
- unsigned target = currentInstruction[2].u.operand;
- emitGetVirtualRegister(currentInstruction[1].u.operand, regT0);
-
- addJump(branchPtr(Equal, regT0, ImmPtr(JSValue::encode(jsNumber(m_globalData, 0)))), target);
- Jump isNonZero = emitJumpIfImmediateInteger(regT0);
-
- addJump(branchPtr(Equal, regT0, ImmPtr(JSValue::encode(jsBoolean(false)))), target);
- addSlowCase(branchPtr(NotEqual, regT0, ImmPtr(JSValue::encode(jsBoolean(true)))));
-
- isNonZero.link(this);
- RECORD_JUMP_TARGET(target);
-};
-
void JIT::emit_op_resolve_base(Instruction* currentInstruction)
{
JITStubCall stubCall(this, cti_op_resolve_base);
@@ -2997,36 +2792,6 @@ void JIT::emitSlow_op_get_by_val(Instruction* currentInstruction, Vector<SlowCas
stubCall.call(dst);
}
-void JIT::emitSlow_op_loop_if_less(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- unsigned op1 = currentInstruction[1].u.operand;
- unsigned op2 = currentInstruction[2].u.operand;
- unsigned target = currentInstruction[3].u.operand;
- if (isOperandConstantImmediateInt(op2)) {
- linkSlowCase(iter);
- JITStubCall stubCall(this, cti_op_jless);
- stubCall.addArgument(regT0);
- stubCall.addArgument(op2, regT2);
- stubCall.call();
- emitJumpSlowToHot(branchTest32(NonZero, regT0), target);
- } else if (isOperandConstantImmediateInt(op1)) {
- linkSlowCase(iter);
- JITStubCall stubCall(this, cti_op_jless);
- stubCall.addArgument(op1, regT2);
- stubCall.addArgument(regT0);
- stubCall.call();
- emitJumpSlowToHot(branchTest32(NonZero, regT0), target);
- } else {
- linkSlowCase(iter);
- linkSlowCase(iter);
- JITStubCall stubCall(this, cti_op_jless);
- stubCall.addArgument(regT0);
- stubCall.addArgument(regT1);
- stubCall.call();
- emitJumpSlowToHot(branchTest32(NonZero, regT0), target);
- }
-}
-
void JIT::emitSlow_op_loop_if_lesseq(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
{
unsigned op2 = currentInstruction[2].u.operand;
@@ -3067,24 +2832,6 @@ void JIT::emitSlow_op_put_by_val(Instruction* currentInstruction, Vector<SlowCas
stubPutByValCall.call();
}
-void JIT::emitSlow_op_loop_if_true(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- linkSlowCase(iter);
- JITStubCall stubCall(this, cti_op_jtrue);
- stubCall.addArgument(regT0);
- stubCall.call();
- emitJumpSlowToHot(branchTest32(NonZero, regT0), currentInstruction[2].u.operand);
-}
-
-void JIT::emitSlow_op_loop_if_false(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- linkSlowCase(iter);
- JITStubCall stubCall(this, cti_op_jtrue);
- stubCall.addArgument(regT0);
- stubCall.call();
- emitJumpSlowToHot(branchTest32(Zero, regT0), currentInstruction[2].u.operand); // inverted!
-}
-
void JIT::emitSlow_op_not(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
{
linkSlowCase(iter);