summaryrefslogtreecommitdiffstats
path: root/JavaScriptCore/jit
diff options
context:
space:
mode:
authorBen Murdoch <benm@google.com>2010-05-11 18:35:50 +0100
committerBen Murdoch <benm@google.com>2010-05-14 10:23:05 +0100
commit21939df44de1705786c545cd1bf519d47250322d (patch)
treeef56c310f5c0cdc379c2abb2e212308a3281ce20 /JavaScriptCore/jit
parent4ff1d8891d520763f17675827154340c7c740f90 (diff)
downloadexternal_webkit-21939df44de1705786c545cd1bf519d47250322d.zip
external_webkit-21939df44de1705786c545cd1bf519d47250322d.tar.gz
external_webkit-21939df44de1705786c545cd1bf519d47250322d.tar.bz2
Merge Webkit at r58956: Initial merge by Git.
Change-Id: I1d9fb60ea2c3f2ddc04c17a871acdb39353be228
Diffstat (limited to 'JavaScriptCore/jit')
-rw-r--r--JavaScriptCore/jit/ExecutableAllocator.h4
-rw-r--r--JavaScriptCore/jit/ExecutableAllocatorPosix.cpp15
-rw-r--r--JavaScriptCore/jit/JIT.cpp5
-rw-r--r--JavaScriptCore/jit/JIT.h143
-rw-r--r--JavaScriptCore/jit/JITArithmetic.cpp1465
-rw-r--r--JavaScriptCore/jit/JITArithmetic32_64.cpp1411
-rw-r--r--JavaScriptCore/jit/JITInlineMethods.h34
-rw-r--r--JavaScriptCore/jit/JITOpcodes.cpp14
-rw-r--r--JavaScriptCore/jit/JITStubCall.h4
-rw-r--r--JavaScriptCore/jit/JITStubs.cpp16
-rw-r--r--JavaScriptCore/jit/JITStubs.h14
-rw-r--r--JavaScriptCore/jit/JSInterfaceJIT.h302
-rw-r--r--JavaScriptCore/jit/SpecializedThunkJIT.h174
-rw-r--r--JavaScriptCore/jit/ThunkGenerators.cpp159
-rw-r--r--JavaScriptCore/jit/ThunkGenerators.h46
15 files changed, 2431 insertions, 1375 deletions
diff --git a/JavaScriptCore/jit/ExecutableAllocator.h b/JavaScriptCore/jit/ExecutableAllocator.h
index 8f46dee..610b788 100644
--- a/JavaScriptCore/jit/ExecutableAllocator.h
+++ b/JavaScriptCore/jit/ExecutableAllocator.h
@@ -247,7 +247,9 @@ public:
{
User::IMB_Range(code, static_cast<char*>(code) + size);
}
-#elif CPU(ARM_TRADITIONAL) && OS(LINUX)
+#elif CPU(ARM_TRADITIONAL) && OS(LINUX) && COMPILER(RVCT)
+ static __asm void cacheFlush(void* code, size_t size);
+#elif CPU(ARM_TRADITIONAL) && OS(LINUX) && COMPILER(GCC)
static void cacheFlush(void* code, size_t size)
{
asm volatile (
diff --git a/JavaScriptCore/jit/ExecutableAllocatorPosix.cpp b/JavaScriptCore/jit/ExecutableAllocatorPosix.cpp
index 06375ad..eee8a7e 100644
--- a/JavaScriptCore/jit/ExecutableAllocatorPosix.cpp
+++ b/JavaScriptCore/jit/ExecutableAllocatorPosix.cpp
@@ -80,6 +80,21 @@ void ExecutableAllocator::reprotectRegion(void* start, size_t size, ProtectionSe
}
#endif
+#if CPU(ARM_TRADITIONAL) && OS(LINUX) && COMPILER(RVCT)
+__asm void ExecutableAllocator::cacheFlush(void* code, size_t size)
+{
+ ARM
+ push {r7}
+ add r1, r1, r0
+ mov r7, #0xf0000
+ add r7, r7, #0x2
+ mov r2, #0x0
+ svc #0x0
+ pop {r7}
+ bx lr
+}
+#endif
+
}
#endif // HAVE(ASSEMBLER)
diff --git a/JavaScriptCore/jit/JIT.cpp b/JavaScriptCore/jit/JIT.cpp
index ec23d8c..f7b06a0 100644
--- a/JavaScriptCore/jit/JIT.cpp
+++ b/JavaScriptCore/jit/JIT.cpp
@@ -200,7 +200,6 @@ void JIT::privateCompileMainPass()
DEFINE_BINARY_OP(op_in)
DEFINE_BINARY_OP(op_less)
DEFINE_BINARY_OP(op_lesseq)
- DEFINE_BINARY_OP(op_urshift)
DEFINE_UNARY_OP(op_is_boolean)
DEFINE_UNARY_OP(op_is_function)
DEFINE_UNARY_OP(op_is_number)
@@ -251,6 +250,7 @@ void JIT::privateCompileMainPass()
DEFINE_OP(op_jneq_ptr)
DEFINE_OP(op_jnless)
DEFINE_OP(op_jless)
+ DEFINE_OP(op_jlesseq)
DEFINE_OP(op_jnlesseq)
DEFINE_OP(op_jsr)
DEFINE_OP(op_jtrue)
@@ -301,6 +301,7 @@ void JIT::privateCompileMainPass()
DEFINE_OP(op_resolve_with_base)
DEFINE_OP(op_ret)
DEFINE_OP(op_rshift)
+ DEFINE_OP(op_urshift)
DEFINE_OP(op_sret)
DEFINE_OP(op_strcat)
DEFINE_OP(op_stricteq)
@@ -401,6 +402,7 @@ void JIT::privateCompileSlowCases()
DEFINE_SLOWCASE_OP(op_jfalse)
DEFINE_SLOWCASE_OP(op_jnless)
DEFINE_SLOWCASE_OP(op_jless)
+ DEFINE_SLOWCASE_OP(op_jlesseq)
DEFINE_SLOWCASE_OP(op_jnlesseq)
DEFINE_SLOWCASE_OP(op_jtrue)
DEFINE_SLOWCASE_OP(op_loop_if_less)
@@ -427,6 +429,7 @@ void JIT::privateCompileSlowCases()
DEFINE_SLOWCASE_OP(op_resolve_global)
#endif
DEFINE_SLOWCASE_OP(op_rshift)
+ DEFINE_SLOWCASE_OP(op_urshift)
DEFINE_SLOWCASE_OP(op_stricteq)
DEFINE_SLOWCASE_OP(op_sub)
DEFINE_SLOWCASE_OP(op_to_jsnumber)
diff --git a/JavaScriptCore/jit/JIT.h b/JavaScriptCore/jit/JIT.h
index e757112..a7e8890 100644
--- a/JavaScriptCore/jit/JIT.h
+++ b/JavaScriptCore/jit/JIT.h
@@ -40,15 +40,10 @@
#include "CodeBlock.h"
#include "Interpreter.h"
-#include "JITCode.h"
-#include "JITStubs.h"
+#include "JSInterfaceJIT.h"
#include "Opcode.h"
-#include "RegisterFile.h"
-#include "MacroAssembler.h"
#include "Profiler.h"
#include <bytecode/SamplingTool.h>
-#include <wtf/AlwaysInline.h>
-#include <wtf/Vector.h>
namespace JSC {
@@ -170,128 +165,13 @@ namespace JSC {
void ctiPatchCallByReturnAddress(CodeBlock* codeblock, ReturnAddressPtr returnAddress, MacroAssemblerCodePtr newCalleeFunction);
void ctiPatchCallByReturnAddress(CodeBlock* codeblock, ReturnAddressPtr returnAddress, FunctionPtr newCalleeFunction);
- class JIT : private MacroAssembler {
+ class JIT : private JSInterfaceJIT {
friend class JITStubCall;
using MacroAssembler::Jump;
using MacroAssembler::JumpList;
using MacroAssembler::Label;
- // NOTES:
- //
- // regT0 has two special meanings. The return value from a stub
- // call will always be in regT0, and by default (unless
- // a register is specified) emitPutVirtualRegister() will store
- // the value from regT0.
- //
- // regT3 is required to be callee-preserved.
- //
- // tempRegister2 is has no such dependencies. It is important that
- // on x86/x86-64 it is ecx for performance reasons, since the
- // MacroAssembler will need to plant register swaps if it is not -
- // however the code will still function correctly.
-#if CPU(X86_64)
- static const RegisterID returnValueRegister = X86Registers::eax;
- static const RegisterID cachedResultRegister = X86Registers::eax;
- static const RegisterID firstArgumentRegister = X86Registers::edi;
-
- static const RegisterID timeoutCheckRegister = X86Registers::r12;
- static const RegisterID callFrameRegister = X86Registers::r13;
- static const RegisterID tagTypeNumberRegister = X86Registers::r14;
- static const RegisterID tagMaskRegister = X86Registers::r15;
-
- static const RegisterID regT0 = X86Registers::eax;
- static const RegisterID regT1 = X86Registers::edx;
- static const RegisterID regT2 = X86Registers::ecx;
- static const RegisterID regT3 = X86Registers::ebx;
-
- static const FPRegisterID fpRegT0 = X86Registers::xmm0;
- static const FPRegisterID fpRegT1 = X86Registers::xmm1;
- static const FPRegisterID fpRegT2 = X86Registers::xmm2;
-#elif CPU(X86)
- static const RegisterID returnValueRegister = X86Registers::eax;
- static const RegisterID cachedResultRegister = X86Registers::eax;
- // On x86 we always use fastcall conventions = but on
- // OS X if might make more sense to just use regparm.
- static const RegisterID firstArgumentRegister = X86Registers::ecx;
-
- static const RegisterID timeoutCheckRegister = X86Registers::esi;
- static const RegisterID callFrameRegister = X86Registers::edi;
-
- static const RegisterID regT0 = X86Registers::eax;
- static const RegisterID regT1 = X86Registers::edx;
- static const RegisterID regT2 = X86Registers::ecx;
- static const RegisterID regT3 = X86Registers::ebx;
-
- static const FPRegisterID fpRegT0 = X86Registers::xmm0;
- static const FPRegisterID fpRegT1 = X86Registers::xmm1;
- static const FPRegisterID fpRegT2 = X86Registers::xmm2;
-#elif CPU(ARM_THUMB2)
- static const RegisterID returnValueRegister = ARMRegisters::r0;
- static const RegisterID cachedResultRegister = ARMRegisters::r0;
- static const RegisterID firstArgumentRegister = ARMRegisters::r0;
-
- static const RegisterID regT0 = ARMRegisters::r0;
- static const RegisterID regT1 = ARMRegisters::r1;
- static const RegisterID regT2 = ARMRegisters::r2;
- static const RegisterID regT3 = ARMRegisters::r4;
-
- static const RegisterID callFrameRegister = ARMRegisters::r5;
- static const RegisterID timeoutCheckRegister = ARMRegisters::r6;
-
- static const FPRegisterID fpRegT0 = ARMRegisters::d0;
- static const FPRegisterID fpRegT1 = ARMRegisters::d1;
- static const FPRegisterID fpRegT2 = ARMRegisters::d2;
-#elif CPU(ARM_TRADITIONAL)
- static const RegisterID returnValueRegister = ARMRegisters::r0;
- static const RegisterID cachedResultRegister = ARMRegisters::r0;
- static const RegisterID firstArgumentRegister = ARMRegisters::r0;
-
- static const RegisterID timeoutCheckRegister = ARMRegisters::r5;
- static const RegisterID callFrameRegister = ARMRegisters::r4;
-
- static const RegisterID regT0 = ARMRegisters::r0;
- static const RegisterID regT1 = ARMRegisters::r1;
- static const RegisterID regT2 = ARMRegisters::r2;
- // Callee preserved
- static const RegisterID regT3 = ARMRegisters::r7;
-
- static const RegisterID regS0 = ARMRegisters::S0;
- // Callee preserved
- static const RegisterID regS1 = ARMRegisters::S1;
-
- static const RegisterID regStackPtr = ARMRegisters::sp;
- static const RegisterID regLink = ARMRegisters::lr;
-
- static const FPRegisterID fpRegT0 = ARMRegisters::d0;
- static const FPRegisterID fpRegT1 = ARMRegisters::d1;
- static const FPRegisterID fpRegT2 = ARMRegisters::d2;
-#elif CPU(MIPS)
- static const RegisterID returnValueRegister = MIPSRegisters::v0;
- static const RegisterID cachedResultRegister = MIPSRegisters::v0;
- static const RegisterID firstArgumentRegister = MIPSRegisters::a0;
-
- // regT0 must be v0 for returning a 32-bit value.
- static const RegisterID regT0 = MIPSRegisters::v0;
-
- // regT1 must be v1 for returning a pair of 32-bit value.
- static const RegisterID regT1 = MIPSRegisters::v1;
-
- static const RegisterID regT2 = MIPSRegisters::t4;
-
- // regT3 must be saved in the callee, so use an S register.
- static const RegisterID regT3 = MIPSRegisters::s2;
-
- static const RegisterID callFrameRegister = MIPSRegisters::s0;
- static const RegisterID timeoutCheckRegister = MIPSRegisters::s1;
-
- static const FPRegisterID fpRegT0 = MIPSRegisters::f4;
- static const FPRegisterID fpRegT1 = MIPSRegisters::f6;
- static const FPRegisterID fpRegT2 = MIPSRegisters::f8;
-#else
- #error "JIT not supported on this platform."
-#endif
-
static const int patchGetByIdDefaultStructure = -1;
// Magic number - initial offset cannot be representable as a signed 8bit value, or the X86Assembler
// will compress the displacement, and we may not be able to fit a patched offset.
@@ -405,14 +285,9 @@ namespace JSC {
void emitLoadDouble(unsigned index, FPRegisterID value);
void emitLoadInt32ToDouble(unsigned index, FPRegisterID value);
- Address addressFor(unsigned index, RegisterID base = callFrameRegister);
-
void testPrototype(Structure*, JumpList& failureCases);
#if USE(JSVALUE32_64)
- Address tagFor(unsigned index, RegisterID base = callFrameRegister);
- Address payloadFor(unsigned index, RegisterID base = callFrameRegister);
-
bool getOperandConstantImmediateInt(unsigned op1, unsigned op2, unsigned& op, int32_t& constant);
void emitLoadTag(unsigned index, RegisterID tag);
@@ -549,8 +424,6 @@ namespace JSC {
void emitJumpSlowCaseIfNotJSCell(RegisterID);
void emitJumpSlowCaseIfNotJSCell(RegisterID, int VReg);
#if USE(JSVALUE64)
- JIT::Jump emitJumpIfImmediateNumber(RegisterID);
- JIT::Jump emitJumpIfNotImmediateNumber(RegisterID);
#else
JIT::Jump emitJumpIfImmediateNumber(RegisterID reg)
{
@@ -799,6 +672,7 @@ namespace JSC {
void emit_op_jneq_ptr(Instruction*);
void emit_op_jnless(Instruction*);
void emit_op_jless(Instruction*);
+ void emit_op_jlesseq(Instruction*, bool invert = false);
void emit_op_jnlesseq(Instruction*);
void emit_op_jsr(Instruction*);
void emit_op_jtrue(Instruction*);
@@ -861,6 +735,7 @@ namespace JSC {
void emit_op_to_jsnumber(Instruction*);
void emit_op_to_primitive(Instruction*);
void emit_op_unexpected_load(Instruction*);
+ void emit_op_urshift(Instruction*);
#if ENABLE(JIT_OPTIMIZE_MOD)
void softModulo();
#endif
@@ -885,6 +760,7 @@ namespace JSC {
void emitSlow_op_jfalse(Instruction*, Vector<SlowCaseEntry>::iterator&);
void emitSlow_op_jnless(Instruction*, Vector<SlowCaseEntry>::iterator&);
void emitSlow_op_jless(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_jlesseq(Instruction*, Vector<SlowCaseEntry>::iterator&, bool invert = false);
void emitSlow_op_jnlesseq(Instruction*, Vector<SlowCaseEntry>::iterator&);
void emitSlow_op_jtrue(Instruction*, Vector<SlowCaseEntry>::iterator&);
void emitSlow_op_loop_if_less(Instruction*, Vector<SlowCaseEntry>::iterator&);
@@ -911,6 +787,11 @@ namespace JSC {
void emitSlow_op_sub(Instruction*, Vector<SlowCaseEntry>::iterator&);
void emitSlow_op_to_jsnumber(Instruction*, Vector<SlowCaseEntry>::iterator&);
void emitSlow_op_to_primitive(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_urshift(Instruction*, Vector<SlowCaseEntry>::iterator&);
+
+
+ void emitRightShift(Instruction*, bool isUnsigned);
+ void emitRightShiftSlowCase(Instruction*, Vector<SlowCaseEntry>::iterator&, bool isUnsigned);
/* These functions are deprecated: Please use JITStubCall instead. */
void emitPutJITStubArg(RegisterID src, unsigned argumentNumber);
@@ -933,6 +814,7 @@ namespace JSC {
JSValue getConstantOperand(unsigned src);
bool isOperandConstantImmediateInt(unsigned src);
+ bool isOperandConstantImmediateChar(unsigned src);
Jump getSlowCase(Vector<SlowCaseEntry>::iterator& iter)
{
@@ -956,6 +838,9 @@ namespace JSC {
void restoreReturnAddressBeforeReturn(RegisterID);
void restoreReturnAddressBeforeReturn(Address);
+ // Loads the character value of a single character string into dst.
+ void emitLoadCharacterString(RegisterID src, RegisterID dst, JumpList& failures);
+
void emitTimeoutCheck();
#ifndef NDEBUG
void printBytecodeOperandTypes(unsigned src1, unsigned src2);
diff --git a/JavaScriptCore/jit/JITArithmetic.cpp b/JavaScriptCore/jit/JITArithmetic.cpp
index 2e1ff40..e5a4620 100644
--- a/JavaScriptCore/jit/JITArithmetic.cpp
+++ b/JavaScriptCore/jit/JITArithmetic.cpp
@@ -20,7 +20,7 @@
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "config.h"
@@ -46,1187 +46,7 @@ using namespace std;
namespace JSC {
-#if USE(JSVALUE32_64)
-
-void JIT::emit_op_negate(Instruction* currentInstruction)
-{
- unsigned dst = currentInstruction[1].u.operand;
- unsigned src = currentInstruction[2].u.operand;
-
- emitLoad(src, regT1, regT0);
-
- Jump srcNotInt = branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag));
- addSlowCase(branchTest32(Zero, regT0, Imm32(0x7fffffff)));
- neg32(regT0);
- emitStoreInt32(dst, regT0, (dst == src));
-
- Jump end = jump();
-
- srcNotInt.link(this);
- addSlowCase(branch32(Above, regT1, Imm32(JSValue::LowestTag)));
-
- xor32(Imm32(1 << 31), regT1);
- store32(regT1, tagFor(dst));
- if (dst != src)
- store32(regT0, payloadFor(dst));
-
- end.link(this);
-}
-
-void JIT::emitSlow_op_negate(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- unsigned dst = currentInstruction[1].u.operand;
-
- linkSlowCase(iter); // 0x7fffffff check
- linkSlowCase(iter); // double check
-
- JITStubCall stubCall(this, cti_op_negate);
- stubCall.addArgument(regT1, regT0);
- stubCall.call(dst);
-}
-
-void JIT::emit_op_jnless(Instruction* currentInstruction)
-{
- unsigned op1 = currentInstruction[1].u.operand;
- unsigned op2 = currentInstruction[2].u.operand;
- unsigned target = currentInstruction[3].u.operand;
-
- JumpList notInt32Op1;
- JumpList notInt32Op2;
-
- // Int32 less.
- if (isOperandConstantImmediateInt(op1)) {
- emitLoad(op2, regT3, regT2);
- notInt32Op2.append(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag)));
- addJump(branch32(LessThanOrEqual, regT2, Imm32(getConstantOperand(op1).asInt32())), target);
- } else if (isOperandConstantImmediateInt(op2)) {
- emitLoad(op1, regT1, regT0);
- notInt32Op1.append(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
- addJump(branch32(GreaterThanOrEqual, regT0, Imm32(getConstantOperand(op2).asInt32())), target);
- } else {
- emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
- notInt32Op1.append(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
- notInt32Op2.append(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag)));
- addJump(branch32(GreaterThanOrEqual, regT0, regT2), target);
- }
-
- if (!supportsFloatingPoint()) {
- addSlowCase(notInt32Op1);
- addSlowCase(notInt32Op2);
- return;
- }
- Jump end = jump();
-
- // Double less.
- emitBinaryDoubleOp(op_jnless, target, op1, op2, OperandTypes(), notInt32Op1, notInt32Op2, !isOperandConstantImmediateInt(op1), isOperandConstantImmediateInt(op1) || !isOperandConstantImmediateInt(op2));
- end.link(this);
-}
-
-void JIT::emitSlow_op_jnless(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- unsigned op1 = currentInstruction[1].u.operand;
- unsigned op2 = currentInstruction[2].u.operand;
- unsigned target = currentInstruction[3].u.operand;
-
- if (!supportsFloatingPoint()) {
- if (!isOperandConstantImmediateInt(op1) && !isOperandConstantImmediateInt(op2))
- linkSlowCase(iter); // int32 check
- linkSlowCase(iter); // int32 check
- } else {
- if (!isOperandConstantImmediateInt(op1)) {
- linkSlowCase(iter); // double check
- linkSlowCase(iter); // int32 check
- }
- if (isOperandConstantImmediateInt(op1) || !isOperandConstantImmediateInt(op2))
- linkSlowCase(iter); // double check
- }
-
- JITStubCall stubCall(this, cti_op_jless);
- stubCall.addArgument(op1);
- stubCall.addArgument(op2);
- stubCall.call();
- emitJumpSlowToHot(branchTest32(Zero, regT0), target);
-}
-
-void JIT::emit_op_jless(Instruction* currentInstruction)
-{
- unsigned op1 = currentInstruction[1].u.operand;
- unsigned op2 = currentInstruction[2].u.operand;
- unsigned target = currentInstruction[3].u.operand;
-
- JumpList notInt32Op1;
- JumpList notInt32Op2;
-
- // Int32 less.
- if (isOperandConstantImmediateInt(op1)) {
- emitLoad(op2, regT3, regT2);
- notInt32Op2.append(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag)));
- addJump(branch32(GreaterThan, regT2, Imm32(getConstantOperand(op1).asInt32())), target);
- } else if (isOperandConstantImmediateInt(op2)) {
- emitLoad(op1, regT1, regT0);
- notInt32Op1.append(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
- addJump(branch32(LessThan, regT0, Imm32(getConstantOperand(op2).asInt32())), target);
- } else {
- emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
- notInt32Op1.append(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
- notInt32Op2.append(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag)));
- addJump(branch32(LessThan, regT0, regT2), target);
- }
-
- if (!supportsFloatingPoint()) {
- addSlowCase(notInt32Op1);
- addSlowCase(notInt32Op2);
- return;
- }
- Jump end = jump();
-
- // Double less.
- emitBinaryDoubleOp(op_jless, target, op1, op2, OperandTypes(), notInt32Op1, notInt32Op2, !isOperandConstantImmediateInt(op1), isOperandConstantImmediateInt(op1) || !isOperandConstantImmediateInt(op2));
- end.link(this);
-}
-
-void JIT::emitSlow_op_jless(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- unsigned op1 = currentInstruction[1].u.operand;
- unsigned op2 = currentInstruction[2].u.operand;
- unsigned target = currentInstruction[3].u.operand;
-
- if (!supportsFloatingPoint()) {
- if (!isOperandConstantImmediateInt(op1) && !isOperandConstantImmediateInt(op2))
- linkSlowCase(iter); // int32 check
- linkSlowCase(iter); // int32 check
- } else {
- if (!isOperandConstantImmediateInt(op1)) {
- linkSlowCase(iter); // double check
- linkSlowCase(iter); // int32 check
- }
- if (isOperandConstantImmediateInt(op1) || !isOperandConstantImmediateInt(op2))
- linkSlowCase(iter); // double check
- }
-
- JITStubCall stubCall(this, cti_op_jless);
- stubCall.addArgument(op1);
- stubCall.addArgument(op2);
- stubCall.call();
- emitJumpSlowToHot(branchTest32(NonZero, regT0), target);
-}
-
-void JIT::emit_op_jnlesseq(Instruction* currentInstruction)
-{
- unsigned op1 = currentInstruction[1].u.operand;
- unsigned op2 = currentInstruction[2].u.operand;
- unsigned target = currentInstruction[3].u.operand;
-
- JumpList notInt32Op1;
- JumpList notInt32Op2;
-
- // Int32 less.
- if (isOperandConstantImmediateInt(op1)) {
- emitLoad(op2, regT3, regT2);
- notInt32Op2.append(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag)));
- addJump(branch32(LessThan, regT2, Imm32(getConstantOperand(op1).asInt32())), target);
- } else if (isOperandConstantImmediateInt(op2)) {
- emitLoad(op1, regT1, regT0);
- notInt32Op1.append(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
- addJump(branch32(GreaterThan, regT0, Imm32(getConstantOperand(op2).asInt32())), target);
- } else {
- emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
- notInt32Op1.append(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
- notInt32Op2.append(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag)));
- addJump(branch32(GreaterThan, regT0, regT2), target);
- }
-
- if (!supportsFloatingPoint()) {
- addSlowCase(notInt32Op1);
- addSlowCase(notInt32Op2);
- return;
- }
- Jump end = jump();
-
- // Double less.
- emitBinaryDoubleOp(op_jnlesseq, target, op1, op2, OperandTypes(), notInt32Op1, notInt32Op2, !isOperandConstantImmediateInt(op1), isOperandConstantImmediateInt(op1) || !isOperandConstantImmediateInt(op2));
- end.link(this);
-}
-
-void JIT::emitSlow_op_jnlesseq(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- unsigned op1 = currentInstruction[1].u.operand;
- unsigned op2 = currentInstruction[2].u.operand;
- unsigned target = currentInstruction[3].u.operand;
-
- if (!supportsFloatingPoint()) {
- if (!isOperandConstantImmediateInt(op1) && !isOperandConstantImmediateInt(op2))
- linkSlowCase(iter); // int32 check
- linkSlowCase(iter); // int32 check
- } else {
- if (!isOperandConstantImmediateInt(op1)) {
- linkSlowCase(iter); // double check
- linkSlowCase(iter); // int32 check
- }
- if (isOperandConstantImmediateInt(op1) || !isOperandConstantImmediateInt(op2))
- linkSlowCase(iter); // double check
- }
-
- JITStubCall stubCall(this, cti_op_jlesseq);
- stubCall.addArgument(op1);
- stubCall.addArgument(op2);
- stubCall.call();
- emitJumpSlowToHot(branchTest32(Zero, regT0), target);
-}
-
-// LeftShift (<<)
-
-void JIT::emit_op_lshift(Instruction* currentInstruction)
-{
- unsigned dst = currentInstruction[1].u.operand;
- unsigned op1 = currentInstruction[2].u.operand;
- unsigned op2 = currentInstruction[3].u.operand;
-
- if (isOperandConstantImmediateInt(op2)) {
- emitLoad(op1, regT1, regT0);
- addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
- lshift32(Imm32(getConstantOperand(op2).asInt32()), regT0);
- emitStoreInt32(dst, regT0, dst == op1);
- return;
- }
-
- emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
- if (!isOperandConstantImmediateInt(op1))
- addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
- addSlowCase(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag)));
- lshift32(regT2, regT0);
- emitStoreInt32(dst, regT0, dst == op1 || dst == op2);
-}
-
-void JIT::emitSlow_op_lshift(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- unsigned dst = currentInstruction[1].u.operand;
- unsigned op1 = currentInstruction[2].u.operand;
- unsigned op2 = currentInstruction[3].u.operand;
-
- if (!isOperandConstantImmediateInt(op1) && !isOperandConstantImmediateInt(op2))
- linkSlowCase(iter); // int32 check
- linkSlowCase(iter); // int32 check
-
- JITStubCall stubCall(this, cti_op_lshift);
- stubCall.addArgument(op1);
- stubCall.addArgument(op2);
- stubCall.call(dst);
-}
-
-// RightShift (>>)
-
-void JIT::emit_op_rshift(Instruction* currentInstruction)
-{
- unsigned dst = currentInstruction[1].u.operand;
- unsigned op1 = currentInstruction[2].u.operand;
- unsigned op2 = currentInstruction[3].u.operand;
-
- if (isOperandConstantImmediateInt(op2)) {
- emitLoad(op1, regT1, regT0);
- addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
- rshift32(Imm32(getConstantOperand(op2).asInt32()), regT0);
- emitStoreInt32(dst, regT0, dst == op1);
- return;
- }
-
- emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
- if (!isOperandConstantImmediateInt(op1))
- addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
- addSlowCase(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag)));
- rshift32(regT2, regT0);
- emitStoreInt32(dst, regT0, dst == op1 || dst == op2);
-}
-
-void JIT::emitSlow_op_rshift(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- unsigned dst = currentInstruction[1].u.operand;
- unsigned op1 = currentInstruction[2].u.operand;
- unsigned op2 = currentInstruction[3].u.operand;
-
- if (!isOperandConstantImmediateInt(op1) && !isOperandConstantImmediateInt(op2))
- linkSlowCase(iter); // int32 check
- linkSlowCase(iter); // int32 check
-
- JITStubCall stubCall(this, cti_op_rshift);
- stubCall.addArgument(op1);
- stubCall.addArgument(op2);
- stubCall.call(dst);
-}
-
-// BitAnd (&)
-
-void JIT::emit_op_bitand(Instruction* currentInstruction)
-{
- unsigned dst = currentInstruction[1].u.operand;
- unsigned op1 = currentInstruction[2].u.operand;
- unsigned op2 = currentInstruction[3].u.operand;
-
- unsigned op;
- int32_t constant;
- if (getOperandConstantImmediateInt(op1, op2, op, constant)) {
- emitLoad(op, regT1, regT0);
- addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
- and32(Imm32(constant), regT0);
- emitStoreInt32(dst, regT0, (op == dst));
- return;
- }
-
- emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
- addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
- addSlowCase(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag)));
- and32(regT2, regT0);
- emitStoreInt32(dst, regT0, (op1 == dst || op2 == dst));
-}
-
-void JIT::emitSlow_op_bitand(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- unsigned dst = currentInstruction[1].u.operand;
- unsigned op1 = currentInstruction[2].u.operand;
- unsigned op2 = currentInstruction[3].u.operand;
-
- if (!isOperandConstantImmediateInt(op1) && !isOperandConstantImmediateInt(op2))
- linkSlowCase(iter); // int32 check
- linkSlowCase(iter); // int32 check
-
- JITStubCall stubCall(this, cti_op_bitand);
- stubCall.addArgument(op1);
- stubCall.addArgument(op2);
- stubCall.call(dst);
-}
-
-// BitOr (|)
-
-void JIT::emit_op_bitor(Instruction* currentInstruction)
-{
- unsigned dst = currentInstruction[1].u.operand;
- unsigned op1 = currentInstruction[2].u.operand;
- unsigned op2 = currentInstruction[3].u.operand;
-
- unsigned op;
- int32_t constant;
- if (getOperandConstantImmediateInt(op1, op2, op, constant)) {
- emitLoad(op, regT1, regT0);
- addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
- or32(Imm32(constant), regT0);
- emitStoreInt32(dst, regT0, (op == dst));
- return;
- }
-
- emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
- addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
- addSlowCase(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag)));
- or32(regT2, regT0);
- emitStoreInt32(dst, regT0, (op1 == dst || op2 == dst));
-}
-
-void JIT::emitSlow_op_bitor(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- unsigned dst = currentInstruction[1].u.operand;
- unsigned op1 = currentInstruction[2].u.operand;
- unsigned op2 = currentInstruction[3].u.operand;
-
- if (!isOperandConstantImmediateInt(op1) && !isOperandConstantImmediateInt(op2))
- linkSlowCase(iter); // int32 check
- linkSlowCase(iter); // int32 check
-
- JITStubCall stubCall(this, cti_op_bitor);
- stubCall.addArgument(op1);
- stubCall.addArgument(op2);
- stubCall.call(dst);
-}
-
-// BitXor (^)
-
-void JIT::emit_op_bitxor(Instruction* currentInstruction)
-{
- unsigned dst = currentInstruction[1].u.operand;
- unsigned op1 = currentInstruction[2].u.operand;
- unsigned op2 = currentInstruction[3].u.operand;
-
- unsigned op;
- int32_t constant;
- if (getOperandConstantImmediateInt(op1, op2, op, constant)) {
- emitLoad(op, regT1, regT0);
- addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
- xor32(Imm32(constant), regT0);
- emitStoreInt32(dst, regT0, (op == dst));
- return;
- }
-
- emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
- addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
- addSlowCase(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag)));
- xor32(regT2, regT0);
- emitStoreInt32(dst, regT0, (op1 == dst || op2 == dst));
-}
-
-void JIT::emitSlow_op_bitxor(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- unsigned dst = currentInstruction[1].u.operand;
- unsigned op1 = currentInstruction[2].u.operand;
- unsigned op2 = currentInstruction[3].u.operand;
-
- if (!isOperandConstantImmediateInt(op1) && !isOperandConstantImmediateInt(op2))
- linkSlowCase(iter); // int32 check
- linkSlowCase(iter); // int32 check
-
- JITStubCall stubCall(this, cti_op_bitxor);
- stubCall.addArgument(op1);
- stubCall.addArgument(op2);
- stubCall.call(dst);
-}
-
-// BitNot (~)
-
-void JIT::emit_op_bitnot(Instruction* currentInstruction)
-{
- unsigned dst = currentInstruction[1].u.operand;
- unsigned src = currentInstruction[2].u.operand;
-
- emitLoad(src, regT1, regT0);
- addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
-
- not32(regT0);
- emitStoreInt32(dst, regT0, (dst == src));
-}
-
-void JIT::emitSlow_op_bitnot(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- unsigned dst = currentInstruction[1].u.operand;
-
- linkSlowCase(iter); // int32 check
-
- JITStubCall stubCall(this, cti_op_bitnot);
- stubCall.addArgument(regT1, regT0);
- stubCall.call(dst);
-}
-
-// PostInc (i++)
-
-void JIT::emit_op_post_inc(Instruction* currentInstruction)
-{
- unsigned dst = currentInstruction[1].u.operand;
- unsigned srcDst = currentInstruction[2].u.operand;
-
- emitLoad(srcDst, regT1, regT0);
- addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
-
- if (dst == srcDst) // x = x++ is a noop for ints.
- return;
-
- emitStoreInt32(dst, regT0);
-
- addSlowCase(branchAdd32(Overflow, Imm32(1), regT0));
- emitStoreInt32(srcDst, regT0, true);
-}
-
-void JIT::emitSlow_op_post_inc(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- unsigned dst = currentInstruction[1].u.operand;
- unsigned srcDst = currentInstruction[2].u.operand;
-
- linkSlowCase(iter); // int32 check
- if (dst != srcDst)
- linkSlowCase(iter); // overflow check
-
- JITStubCall stubCall(this, cti_op_post_inc);
- stubCall.addArgument(srcDst);
- stubCall.addArgument(Imm32(srcDst));
- stubCall.call(dst);
-}
-
-// PostDec (i--)
-
-void JIT::emit_op_post_dec(Instruction* currentInstruction)
-{
- unsigned dst = currentInstruction[1].u.operand;
- unsigned srcDst = currentInstruction[2].u.operand;
-
- emitLoad(srcDst, regT1, regT0);
- addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
-
- if (dst == srcDst) // x = x-- is a noop for ints.
- return;
-
- emitStoreInt32(dst, regT0);
-
- addSlowCase(branchSub32(Overflow, Imm32(1), regT0));
- emitStoreInt32(srcDst, regT0, true);
-}
-
-void JIT::emitSlow_op_post_dec(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- unsigned dst = currentInstruction[1].u.operand;
- unsigned srcDst = currentInstruction[2].u.operand;
-
- linkSlowCase(iter); // int32 check
- if (dst != srcDst)
- linkSlowCase(iter); // overflow check
-
- JITStubCall stubCall(this, cti_op_post_dec);
- stubCall.addArgument(srcDst);
- stubCall.addArgument(Imm32(srcDst));
- stubCall.call(dst);
-}
-
-// PreInc (++i)
-
-void JIT::emit_op_pre_inc(Instruction* currentInstruction)
-{
- unsigned srcDst = currentInstruction[1].u.operand;
-
- emitLoad(srcDst, regT1, regT0);
-
- addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
- addSlowCase(branchAdd32(Overflow, Imm32(1), regT0));
- emitStoreInt32(srcDst, regT0, true);
-}
-
-void JIT::emitSlow_op_pre_inc(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- unsigned srcDst = currentInstruction[1].u.operand;
-
- linkSlowCase(iter); // int32 check
- linkSlowCase(iter); // overflow check
-
- JITStubCall stubCall(this, cti_op_pre_inc);
- stubCall.addArgument(srcDst);
- stubCall.call(srcDst);
-}
-
-// PreDec (--i)
-
-void JIT::emit_op_pre_dec(Instruction* currentInstruction)
-{
- unsigned srcDst = currentInstruction[1].u.operand;
-
- emitLoad(srcDst, regT1, regT0);
-
- addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
- addSlowCase(branchSub32(Overflow, Imm32(1), regT0));
- emitStoreInt32(srcDst, regT0, true);
-}
-
-void JIT::emitSlow_op_pre_dec(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- unsigned srcDst = currentInstruction[1].u.operand;
-
- linkSlowCase(iter); // int32 check
- linkSlowCase(iter); // overflow check
-
- JITStubCall stubCall(this, cti_op_pre_dec);
- stubCall.addArgument(srcDst);
- stubCall.call(srcDst);
-}
-
-// Addition (+)
-
-void JIT::emit_op_add(Instruction* currentInstruction)
-{
- unsigned dst = currentInstruction[1].u.operand;
- unsigned op1 = currentInstruction[2].u.operand;
- unsigned op2 = currentInstruction[3].u.operand;
- OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
-
- if (!types.first().mightBeNumber() || !types.second().mightBeNumber()) {
- JITStubCall stubCall(this, cti_op_add);
- stubCall.addArgument(op1);
- stubCall.addArgument(op2);
- stubCall.call(dst);
- return;
- }
-
- JumpList notInt32Op1;
- JumpList notInt32Op2;
-
- unsigned op;
- int32_t constant;
- if (getOperandConstantImmediateInt(op1, op2, op, constant)) {
- emitAdd32Constant(dst, op, constant, op == op1 ? types.first() : types.second());
- return;
- }
-
- emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
- notInt32Op1.append(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
- notInt32Op2.append(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag)));
-
- // Int32 case.
- addSlowCase(branchAdd32(Overflow, regT2, regT0));
- emitStoreInt32(dst, regT0, (op1 == dst || op2 == dst));
-
- if (!supportsFloatingPoint()) {
- addSlowCase(notInt32Op1);
- addSlowCase(notInt32Op2);
- return;
- }
- Jump end = jump();
-
- // Double case.
- emitBinaryDoubleOp(op_add, dst, op1, op2, types, notInt32Op1, notInt32Op2);
- end.link(this);
-}
-
-void JIT::emitAdd32Constant(unsigned dst, unsigned op, int32_t constant, ResultType opType)
-{
- // Int32 case.
- emitLoad(op, regT1, regT0);
- Jump notInt32 = branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag));
- addSlowCase(branchAdd32(Overflow, Imm32(constant), regT0));
- emitStoreInt32(dst, regT0, (op == dst));
-
- // Double case.
- if (!supportsFloatingPoint()) {
- addSlowCase(notInt32);
- return;
- }
- Jump end = jump();
-
- notInt32.link(this);
- if (!opType.definitelyIsNumber())
- addSlowCase(branch32(Above, regT1, Imm32(JSValue::LowestTag)));
- move(Imm32(constant), regT2);
- convertInt32ToDouble(regT2, fpRegT0);
- emitLoadDouble(op, fpRegT1);
- addDouble(fpRegT1, fpRegT0);
- emitStoreDouble(dst, fpRegT0);
-
- end.link(this);
-}
-
-void JIT::emitSlow_op_add(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- unsigned dst = currentInstruction[1].u.operand;
- unsigned op1 = currentInstruction[2].u.operand;
- unsigned op2 = currentInstruction[3].u.operand;
- OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
-
- if (!types.first().mightBeNumber() || !types.second().mightBeNumber())
- return;
-
- unsigned op;
- int32_t constant;
- if (getOperandConstantImmediateInt(op1, op2, op, constant)) {
- linkSlowCase(iter); // overflow check
-
- if (!supportsFloatingPoint())
- linkSlowCase(iter); // non-sse case
- else {
- ResultType opType = op == op1 ? types.first() : types.second();
- if (!opType.definitelyIsNumber())
- linkSlowCase(iter); // double check
- }
- } else {
- linkSlowCase(iter); // overflow check
-
- if (!supportsFloatingPoint()) {
- linkSlowCase(iter); // int32 check
- linkSlowCase(iter); // int32 check
- } else {
- if (!types.first().definitelyIsNumber())
- linkSlowCase(iter); // double check
-
- if (!types.second().definitelyIsNumber()) {
- linkSlowCase(iter); // int32 check
- linkSlowCase(iter); // double check
- }
- }
- }
-
- JITStubCall stubCall(this, cti_op_add);
- stubCall.addArgument(op1);
- stubCall.addArgument(op2);
- stubCall.call(dst);
-}
-
-// Subtraction (-)
-
-void JIT::emit_op_sub(Instruction* currentInstruction)
-{
- unsigned dst = currentInstruction[1].u.operand;
- unsigned op1 = currentInstruction[2].u.operand;
- unsigned op2 = currentInstruction[3].u.operand;
- OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
-
- JumpList notInt32Op1;
- JumpList notInt32Op2;
-
- if (isOperandConstantImmediateInt(op2)) {
- emitSub32Constant(dst, op1, getConstantOperand(op2).asInt32(), types.first());
- return;
- }
-
- emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
- notInt32Op1.append(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
- notInt32Op2.append(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag)));
-
- // Int32 case.
- addSlowCase(branchSub32(Overflow, regT2, regT0));
- emitStoreInt32(dst, regT0, (op1 == dst || op2 == dst));
-
- if (!supportsFloatingPoint()) {
- addSlowCase(notInt32Op1);
- addSlowCase(notInt32Op2);
- return;
- }
- Jump end = jump();
-
- // Double case.
- emitBinaryDoubleOp(op_sub, dst, op1, op2, types, notInt32Op1, notInt32Op2);
- end.link(this);
-}
-
-void JIT::emitSub32Constant(unsigned dst, unsigned op, int32_t constant, ResultType opType)
-{
- // Int32 case.
- emitLoad(op, regT1, regT0);
- Jump notInt32 = branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag));
- addSlowCase(branchSub32(Overflow, Imm32(constant), regT0));
- emitStoreInt32(dst, regT0, (op == dst));
-
- // Double case.
- if (!supportsFloatingPoint()) {
- addSlowCase(notInt32);
- return;
- }
- Jump end = jump();
-
- notInt32.link(this);
- if (!opType.definitelyIsNumber())
- addSlowCase(branch32(Above, regT1, Imm32(JSValue::LowestTag)));
- move(Imm32(constant), regT2);
- convertInt32ToDouble(regT2, fpRegT0);
- emitLoadDouble(op, fpRegT1);
- subDouble(fpRegT0, fpRegT1);
- emitStoreDouble(dst, fpRegT1);
-
- end.link(this);
-}
-
-void JIT::emitSlow_op_sub(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- unsigned dst = currentInstruction[1].u.operand;
- unsigned op1 = currentInstruction[2].u.operand;
- unsigned op2 = currentInstruction[3].u.operand;
- OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
-
- if (isOperandConstantImmediateInt(op2)) {
- linkSlowCase(iter); // overflow check
-
- if (!supportsFloatingPoint() || !types.first().definitelyIsNumber())
- linkSlowCase(iter); // int32 or double check
- } else {
- linkSlowCase(iter); // overflow check
-
- if (!supportsFloatingPoint()) {
- linkSlowCase(iter); // int32 check
- linkSlowCase(iter); // int32 check
- } else {
- if (!types.first().definitelyIsNumber())
- linkSlowCase(iter); // double check
-
- if (!types.second().definitelyIsNumber()) {
- linkSlowCase(iter); // int32 check
- linkSlowCase(iter); // double check
- }
- }
- }
-
- JITStubCall stubCall(this, cti_op_sub);
- stubCall.addArgument(op1);
- stubCall.addArgument(op2);
- stubCall.call(dst);
-}
-
-void JIT::emitBinaryDoubleOp(OpcodeID opcodeID, unsigned dst, unsigned op1, unsigned op2, OperandTypes types, JumpList& notInt32Op1, JumpList& notInt32Op2, bool op1IsInRegisters, bool op2IsInRegisters)
-{
- JumpList end;
-
- if (!notInt32Op1.empty()) {
- // Double case 1: Op1 is not int32; Op2 is unknown.
- notInt32Op1.link(this);
-
- ASSERT(op1IsInRegisters);
-
- // Verify Op1 is double.
- if (!types.first().definitelyIsNumber())
- addSlowCase(branch32(Above, regT1, Imm32(JSValue::LowestTag)));
-
- if (!op2IsInRegisters)
- emitLoad(op2, regT3, regT2);
-
- Jump doubleOp2 = branch32(Below, regT3, Imm32(JSValue::LowestTag));
-
- if (!types.second().definitelyIsNumber())
- addSlowCase(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag)));
-
- convertInt32ToDouble(regT2, fpRegT0);
- Jump doTheMath = jump();
-
- // Load Op2 as double into double register.
- doubleOp2.link(this);
- emitLoadDouble(op2, fpRegT0);
-
- // Do the math.
- doTheMath.link(this);
- switch (opcodeID) {
- case op_mul:
- emitLoadDouble(op1, fpRegT2);
- mulDouble(fpRegT2, fpRegT0);
- emitStoreDouble(dst, fpRegT0);
- break;
- case op_add:
- emitLoadDouble(op1, fpRegT2);
- addDouble(fpRegT2, fpRegT0);
- emitStoreDouble(dst, fpRegT0);
- break;
- case op_sub:
- emitLoadDouble(op1, fpRegT1);
- subDouble(fpRegT0, fpRegT1);
- emitStoreDouble(dst, fpRegT1);
- break;
- case op_div:
- emitLoadDouble(op1, fpRegT1);
- divDouble(fpRegT0, fpRegT1);
- emitStoreDouble(dst, fpRegT1);
- break;
- case op_jnless:
- emitLoadDouble(op1, fpRegT2);
- addJump(branchDouble(DoubleLessThanOrEqualOrUnordered, fpRegT0, fpRegT2), dst);
- break;
- case op_jless:
- emitLoadDouble(op1, fpRegT2);
- addJump(branchDouble(DoubleLessThan, fpRegT2, fpRegT0), dst);
- break;
- case op_jnlesseq:
- emitLoadDouble(op1, fpRegT2);
- addJump(branchDouble(DoubleLessThanOrUnordered, fpRegT0, fpRegT2), dst);
- break;
- default:
- ASSERT_NOT_REACHED();
- }
-
- if (!notInt32Op2.empty())
- end.append(jump());
- }
-
- if (!notInt32Op2.empty()) {
- // Double case 2: Op1 is int32; Op2 is not int32.
- notInt32Op2.link(this);
-
- ASSERT(op2IsInRegisters);
-
- if (!op1IsInRegisters)
- emitLoadPayload(op1, regT0);
-
- convertInt32ToDouble(regT0, fpRegT0);
-
- // Verify op2 is double.
- if (!types.second().definitelyIsNumber())
- addSlowCase(branch32(Above, regT3, Imm32(JSValue::LowestTag)));
-
- // Do the math.
- switch (opcodeID) {
- case op_mul:
- emitLoadDouble(op2, fpRegT2);
- mulDouble(fpRegT2, fpRegT0);
- emitStoreDouble(dst, fpRegT0);
- break;
- case op_add:
- emitLoadDouble(op2, fpRegT2);
- addDouble(fpRegT2, fpRegT0);
- emitStoreDouble(dst, fpRegT0);
- break;
- case op_sub:
- emitLoadDouble(op2, fpRegT2);
- subDouble(fpRegT2, fpRegT0);
- emitStoreDouble(dst, fpRegT0);
- break;
- case op_div:
- emitLoadDouble(op2, fpRegT2);
- divDouble(fpRegT2, fpRegT0);
- emitStoreDouble(dst, fpRegT0);
- break;
- case op_jnless:
- emitLoadDouble(op2, fpRegT1);
- addJump(branchDouble(DoubleLessThanOrEqualOrUnordered, fpRegT1, fpRegT0), dst);
- break;
- case op_jless:
- emitLoadDouble(op2, fpRegT1);
- addJump(branchDouble(DoubleLessThan, fpRegT0, fpRegT1), dst);
- break;
- case op_jnlesseq:
- emitLoadDouble(op2, fpRegT1);
- addJump(branchDouble(DoubleLessThanOrUnordered, fpRegT1, fpRegT0), dst);
- break;
- default:
- ASSERT_NOT_REACHED();
- }
- }
-
- end.link(this);
-}
-
-// Multiplication (*)
-
-void JIT::emit_op_mul(Instruction* currentInstruction)
-{
- unsigned dst = currentInstruction[1].u.operand;
- unsigned op1 = currentInstruction[2].u.operand;
- unsigned op2 = currentInstruction[3].u.operand;
- OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
-
- JumpList notInt32Op1;
- JumpList notInt32Op2;
-
- emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
- notInt32Op1.append(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
- notInt32Op2.append(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag)));
-
- // Int32 case.
- move(regT0, regT3);
- addSlowCase(branchMul32(Overflow, regT2, regT0));
- addSlowCase(branchTest32(Zero, regT0));
- emitStoreInt32(dst, regT0, (op1 == dst || op2 == dst));
-
- if (!supportsFloatingPoint()) {
- addSlowCase(notInt32Op1);
- addSlowCase(notInt32Op2);
- return;
- }
- Jump end = jump();
-
- // Double case.
- emitBinaryDoubleOp(op_mul, dst, op1, op2, types, notInt32Op1, notInt32Op2);
- end.link(this);
-}
-
-void JIT::emitSlow_op_mul(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- unsigned dst = currentInstruction[1].u.operand;
- unsigned op1 = currentInstruction[2].u.operand;
- unsigned op2 = currentInstruction[3].u.operand;
- OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
-
- Jump overflow = getSlowCase(iter); // overflow check
- linkSlowCase(iter); // zero result check
-
- Jump negZero = branchOr32(Signed, regT2, regT3);
- emitStoreInt32(dst, Imm32(0), (op1 == dst || op2 == dst));
-
- emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_mul));
-
- negZero.link(this);
- overflow.link(this);
-
- if (!supportsFloatingPoint()) {
- linkSlowCase(iter); // int32 check
- linkSlowCase(iter); // int32 check
- }
-
- if (supportsFloatingPoint()) {
- if (!types.first().definitelyIsNumber())
- linkSlowCase(iter); // double check
-
- if (!types.second().definitelyIsNumber()) {
- linkSlowCase(iter); // int32 check
- linkSlowCase(iter); // double check
- }
- }
-
- Label jitStubCall(this);
- JITStubCall stubCall(this, cti_op_mul);
- stubCall.addArgument(op1);
- stubCall.addArgument(op2);
- stubCall.call(dst);
-}
-
-// Division (/)
-
-void JIT::emit_op_div(Instruction* currentInstruction)
-{
- unsigned dst = currentInstruction[1].u.operand;
- unsigned op1 = currentInstruction[2].u.operand;
- unsigned op2 = currentInstruction[3].u.operand;
- OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
-
- if (!supportsFloatingPoint()) {
- addSlowCase(jump());
- return;
- }
-
- // Int32 divide.
- JumpList notInt32Op1;
- JumpList notInt32Op2;
-
- JumpList end;
-
- emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
-
- notInt32Op1.append(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
- notInt32Op2.append(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag)));
-
- convertInt32ToDouble(regT0, fpRegT0);
- convertInt32ToDouble(regT2, fpRegT1);
- divDouble(fpRegT1, fpRegT0);
-
- JumpList doubleResult;
- branchConvertDoubleToInt32(fpRegT0, regT0, doubleResult, fpRegT1);
-
- // Int32 result.
- emitStoreInt32(dst, regT0, (op1 == dst || op2 == dst));
- end.append(jump());
-
- // Double result.
- doubleResult.link(this);
- emitStoreDouble(dst, fpRegT0);
- end.append(jump());
-
- // Double divide.
- emitBinaryDoubleOp(op_div, dst, op1, op2, types, notInt32Op1, notInt32Op2);
- end.link(this);
-}
-
-void JIT::emitSlow_op_div(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- unsigned dst = currentInstruction[1].u.operand;
- unsigned op1 = currentInstruction[2].u.operand;
- unsigned op2 = currentInstruction[3].u.operand;
- OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
-
- if (!supportsFloatingPoint())
- linkSlowCase(iter);
- else {
- if (!types.first().definitelyIsNumber())
- linkSlowCase(iter); // double check
-
- if (!types.second().definitelyIsNumber()) {
- linkSlowCase(iter); // int32 check
- linkSlowCase(iter); // double check
- }
- }
-
- JITStubCall stubCall(this, cti_op_div);
- stubCall.addArgument(op1);
- stubCall.addArgument(op2);
- stubCall.call(dst);
-}
-
-// Mod (%)
-
-/* ------------------------------ BEGIN: OP_MOD ------------------------------ */
-
-#if CPU(X86) || CPU(X86_64)
-
-void JIT::emit_op_mod(Instruction* currentInstruction)
-{
- unsigned dst = currentInstruction[1].u.operand;
- unsigned op1 = currentInstruction[2].u.operand;
- unsigned op2 = currentInstruction[3].u.operand;
-
- if (isOperandConstantImmediateInt(op2) && getConstantOperand(op2).asInt32() != 0) {
- emitLoad(op1, X86Registers::edx, X86Registers::eax);
- move(Imm32(getConstantOperand(op2).asInt32()), X86Registers::ecx);
- addSlowCase(branch32(NotEqual, X86Registers::edx, Imm32(JSValue::Int32Tag)));
- if (getConstantOperand(op2).asInt32() == -1)
- addSlowCase(branch32(Equal, X86Registers::eax, Imm32(0x80000000))); // -2147483648 / -1 => EXC_ARITHMETIC
- } else {
- emitLoad2(op1, X86Registers::edx, X86Registers::eax, op2, X86Registers::ebx, X86Registers::ecx);
- addSlowCase(branch32(NotEqual, X86Registers::edx, Imm32(JSValue::Int32Tag)));
- addSlowCase(branch32(NotEqual, X86Registers::ebx, Imm32(JSValue::Int32Tag)));
-
- addSlowCase(branch32(Equal, X86Registers::eax, Imm32(0x80000000))); // -2147483648 / -1 => EXC_ARITHMETIC
- addSlowCase(branch32(Equal, X86Registers::ecx, Imm32(0))); // divide by 0
- }
-
- move(X86Registers::eax, X86Registers::ebx); // Save dividend payload, in case of 0.
- m_assembler.cdq();
- m_assembler.idivl_r(X86Registers::ecx);
-
- // If the remainder is zero and the dividend is negative, the result is -0.
- Jump storeResult1 = branchTest32(NonZero, X86Registers::edx);
- Jump storeResult2 = branchTest32(Zero, X86Registers::ebx, Imm32(0x80000000)); // not negative
- emitStore(dst, jsNumber(m_globalData, -0.0));
- Jump end = jump();
-
- storeResult1.link(this);
- storeResult2.link(this);
- emitStoreInt32(dst, X86Registers::edx, (op1 == dst || op2 == dst));
- end.link(this);
-}
-
-void JIT::emitSlow_op_mod(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- unsigned dst = currentInstruction[1].u.operand;
- unsigned op1 = currentInstruction[2].u.operand;
- unsigned op2 = currentInstruction[3].u.operand;
-
- if (isOperandConstantImmediateInt(op2) && getConstantOperand(op2).asInt32() != 0) {
- linkSlowCase(iter); // int32 check
- if (getConstantOperand(op2).asInt32() == -1)
- linkSlowCase(iter); // 0x80000000 check
- } else {
- linkSlowCase(iter); // int32 check
- linkSlowCase(iter); // int32 check
- linkSlowCase(iter); // 0 check
- linkSlowCase(iter); // 0x80000000 check
- }
-
- JITStubCall stubCall(this, cti_op_mod);
- stubCall.addArgument(op1);
- stubCall.addArgument(op2);
- stubCall.call(dst);
-}
-
-#else // CPU(X86) || CPU(X86_64)
-
-void JIT::emit_op_mod(Instruction* currentInstruction)
-{
- unsigned dst = currentInstruction[1].u.operand;
- unsigned op1 = currentInstruction[2].u.operand;
- unsigned op2 = currentInstruction[3].u.operand;
-
-#if ENABLE(JIT_OPTIMIZE_MOD)
- emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
- addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
- addSlowCase(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag)));
-
- addSlowCase(branch32(Equal, regT2, Imm32(0)));
-
- emitNakedCall(m_globalData->jitStubs.ctiSoftModulo());
-
- emitStoreInt32(dst, regT0, (op1 == dst || op2 == dst));
-#else
- JITStubCall stubCall(this, cti_op_mod);
- stubCall.addArgument(op1);
- stubCall.addArgument(op2);
- stubCall.call(dst);
-#endif
-}
-
-void JIT::emitSlow_op_mod(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
-#if ENABLE(JIT_OPTIMIZE_MOD)
- unsigned result = currentInstruction[1].u.operand;
- unsigned op1 = currentInstruction[2].u.operand;
- unsigned op2 = currentInstruction[3].u.operand;
- linkSlowCase(iter);
- linkSlowCase(iter);
- linkSlowCase(iter);
- JITStubCall stubCall(this, cti_op_mod);
- stubCall.addArgument(op1);
- stubCall.addArgument(op2);
- stubCall.call(result);
-#else
- ASSERT_NOT_REACHED();
-#endif
-}
-
-#endif // CPU(X86) || CPU(X86_64)
-
-/* ------------------------------ END: OP_MOD ------------------------------ */
-
-#else // USE(JSVALUE32_64)
+#if !USE(JSVALUE32_64)
void JIT::emit_op_lshift(Instruction* currentInstruction)
{
@@ -1367,6 +187,118 @@ void JIT::emitSlow_op_rshift(Instruction* currentInstruction, Vector<SlowCaseEnt
stubCall.call(result);
}
+void JIT::emit_op_urshift(Instruction* currentInstruction)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned op1 = currentInstruction[2].u.operand;
+ unsigned op2 = currentInstruction[3].u.operand;
+
+ // Slow case of urshift makes assumptions about what registers hold the
+ // shift arguments, so any changes must be updated there as well.
+ if (isOperandConstantImmediateInt(op2)) {
+ emitGetVirtualRegister(op1, regT0);
+ emitJumpSlowCaseIfNotImmediateInteger(regT0);
+ emitFastArithImmToInt(regT0);
+ int shift = getConstantOperand(op2).asInt32();
+ if (shift)
+ urshift32(Imm32(shift & 0x1f), regT0);
+ // unsigned shift < 0 or shift = k*2^32 may result in (essentially)
+ // a toUint conversion, which can result in a value we can represent
+ // as an immediate int.
+ if (shift < 0 || !(shift & 31))
+ addSlowCase(branch32(LessThan, regT0, Imm32(0)));
+#if USE(JSVALUE32)
+ addSlowCase(branchAdd32(Overflow, regT0, regT0));
+ signExtend32ToPtr(regT0, regT0);
+#endif
+ emitFastArithReTagImmediate(regT0, regT0);
+ emitPutVirtualRegister(dst, regT0);
+ return;
+ }
+ emitGetVirtualRegisters(op1, regT0, op2, regT1);
+ if (!isOperandConstantImmediateInt(op1))
+ emitJumpSlowCaseIfNotImmediateInteger(regT0);
+ emitJumpSlowCaseIfNotImmediateInteger(regT1);
+ emitFastArithImmToInt(regT0);
+ emitFastArithImmToInt(regT1);
+ urshift32(regT1, regT0);
+ addSlowCase(branch32(LessThan, regT0, Imm32(0)));
+#if USE(JSVALUE32)
+ addSlowCase(branchAdd32(Overflow, regT0, regT0));
+ signExtend32ToPtr(regT0, regT0);
+#endif
+ emitFastArithReTagImmediate(regT0, regT0);
+ emitPutVirtualRegister(dst, regT0);
+}
+
+void JIT::emitSlow_op_urshift(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned op1 = currentInstruction[2].u.operand;
+ unsigned op2 = currentInstruction[3].u.operand;
+ if (isOperandConstantImmediateInt(op2)) {
+ int shift = getConstantOperand(op2).asInt32();
+ // op1 = regT0
+ linkSlowCase(iter); // int32 check
+#if USE(JSVALUE64)
+ if (supportsFloatingPointTruncate()) {
+ JumpList failures;
+ failures.append(emitJumpIfNotImmediateNumber(regT0)); // op1 is not a double
+ addPtr(tagTypeNumberRegister, regT0);
+ movePtrToDouble(regT0, fpRegT0);
+ failures.append(branchTruncateDoubleToInt32(fpRegT0, regT0));
+ if (shift)
+ urshift32(Imm32(shift & 0x1f), regT0);
+ if (shift < 0 || !(shift & 31))
+ failures.append(branch32(LessThan, regT0, Imm32(0)));
+ emitFastArithReTagImmediate(regT0, regT0);
+ emitPutVirtualRegister(dst, regT0);
+ emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_rshift));
+ failures.link(this);
+ }
+#endif // JSVALUE64
+ if (shift < 0 || !(shift & 31))
+ linkSlowCase(iter); // failed to box in hot path
+#if USE(JSVALUE32)
+ linkSlowCase(iter); // Couldn't box result
+#endif
+ } else {
+ // op1 = regT0
+ // op2 = regT1
+ if (!isOperandConstantImmediateInt(op1)) {
+ linkSlowCase(iter); // int32 check -- op1 is not an int
+#if USE(JSVALUE64)
+ if (supportsFloatingPointTruncate()) {
+ JumpList failures;
+ failures.append(emitJumpIfNotImmediateNumber(regT0)); // op1 is not a double
+ addPtr(tagTypeNumberRegister, regT0);
+ movePtrToDouble(regT0, fpRegT0);
+ failures.append(branchTruncateDoubleToInt32(fpRegT0, regT0));
+ failures.append(emitJumpIfNotImmediateInteger(regT1)); // op2 is not an int
+ emitFastArithImmToInt(regT1);
+ urshift32(regT1, regT0);
+ failures.append(branch32(LessThan, regT0, Imm32(0)));
+ emitFastArithReTagImmediate(regT0, regT0);
+ emitPutVirtualRegister(dst, regT0);
+ emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_rshift));
+ failures.link(this);
+ }
+#endif
+ }
+
+ linkSlowCase(iter); // int32 check - op2 is not an int
+ linkSlowCase(iter); // Can't represent unsigned result as an immediate
+#if USE(JSVALUE32)
+ linkSlowCase(iter); // Couldn't box result
+#endif
+ }
+
+ JITStubCall stubCall(this, cti_op_urshift);
+ stubCall.addArgument(op1, regT0);
+ stubCall.addArgument(op2, regT1);
+ stubCall.call(dst);
+}
+
void JIT::emit_op_jnless(Instruction* currentInstruction)
{
unsigned op1 = currentInstruction[1].u.operand;
@@ -1378,6 +310,24 @@ void JIT::emit_op_jnless(Instruction* currentInstruction)
// - constant int immediate to int immediate
// - int immediate to int immediate
+ if (isOperandConstantImmediateChar(op1)) {
+ emitGetVirtualRegister(op2, regT0);
+ addSlowCase(emitJumpIfNotJSCell(regT0));
+ JumpList failures;
+ emitLoadCharacterString(regT0, regT0, failures);
+ addSlowCase(failures);
+ addJump(branch32(LessThanOrEqual, regT0, Imm32(asString(getConstantOperand(op1))->tryGetValue()[0])), target);
+ return;
+ }
+ if (isOperandConstantImmediateChar(op2)) {
+ emitGetVirtualRegister(op1, regT0);
+ addSlowCase(emitJumpIfNotJSCell(regT0));
+ JumpList failures;
+ emitLoadCharacterString(regT0, regT0, failures);
+ addSlowCase(failures);
+ addJump(branch32(GreaterThanOrEqual, regT0, Imm32(asString(getConstantOperand(op2))->tryGetValue()[0])), target);
+ return;
+ }
if (isOperandConstantImmediateInt(op2)) {
emitGetVirtualRegister(op1, regT0);
emitJumpSlowCaseIfNotImmediateInteger(regT0);
@@ -1415,6 +365,18 @@ void JIT::emitSlow_op_jnless(Instruction* currentInstruction, Vector<SlowCaseEnt
// - floating-point number to constant int immediate
// - constant int immediate to floating-point number
// - floating-point number to floating-point number.
+ if (isOperandConstantImmediateChar(op1) || isOperandConstantImmediateChar(op2)) {
+ linkSlowCase(iter);
+ linkSlowCase(iter);
+ linkSlowCase(iter);
+ linkSlowCase(iter);
+ JITStubCall stubCall(this, cti_op_jlesseq);
+ stubCall.addArgument(op1, regT0);
+ stubCall.addArgument(op2, regT1);
+ stubCall.call();
+ emitJumpSlowToHot(branchTest32(Zero, regT0), target);
+ return;
+ }
if (isOperandConstantImmediateInt(op2)) {
linkSlowCase(iter);
@@ -1432,9 +394,9 @@ void JIT::emitSlow_op_jnless(Instruction* currentInstruction, Vector<SlowCaseEnt
Jump fail2 = checkStructure(regT0, m_globalData->numberStructure.get());
loadDouble(Address(regT0, OBJECT_OFFSETOF(JSNumberCell, m_value)), fpRegT0);
#endif
-
+
int32_t op2imm = getConstantOperand(op2).asInt32();;
-
+
move(Imm32(op2imm), regT1);
convertInt32ToDouble(regT1, fpRegT1);
@@ -1469,13 +431,13 @@ void JIT::emitSlow_op_jnless(Instruction* currentInstruction, Vector<SlowCaseEnt
Jump fail1;
if (!m_codeBlock->isKnownNotImmediate(op2))
fail1 = emitJumpIfNotJSCell(regT1);
-
+
Jump fail2 = checkStructure(regT1, m_globalData->numberStructure.get());
loadDouble(Address(regT1, OBJECT_OFFSETOF(JSNumberCell, m_value)), fpRegT1);
#endif
-
+
int32_t op1imm = getConstantOperand(op1).asInt32();;
-
+
move(Imm32(op1imm), regT0);
convertInt32ToDouble(regT0, fpRegT0);
@@ -1563,6 +525,24 @@ void JIT::emit_op_jless(Instruction* currentInstruction)
// - constant int immediate to int immediate
// - int immediate to int immediate
+ if (isOperandConstantImmediateChar(op1)) {
+ emitGetVirtualRegister(op2, regT0);
+ addSlowCase(emitJumpIfNotJSCell(regT0));
+ JumpList failures;
+ emitLoadCharacterString(regT0, regT0, failures);
+ addSlowCase(failures);
+ addJump(branch32(GreaterThan, regT0, Imm32(asString(getConstantOperand(op1))->tryGetValue()[0])), target);
+ return;
+ }
+ if (isOperandConstantImmediateChar(op2)) {
+ emitGetVirtualRegister(op1, regT0);
+ addSlowCase(emitJumpIfNotJSCell(regT0));
+ JumpList failures;
+ emitLoadCharacterString(regT0, regT0, failures);
+ addSlowCase(failures);
+ addJump(branch32(LessThan, regT0, Imm32(asString(getConstantOperand(op2))->tryGetValue()[0])), target);
+ return;
+ }
if (isOperandConstantImmediateInt(op2)) {
emitGetVirtualRegister(op1, regT0);
emitJumpSlowCaseIfNotImmediateInteger(regT0);
@@ -1600,6 +580,18 @@ void JIT::emitSlow_op_jless(Instruction* currentInstruction, Vector<SlowCaseEntr
// - floating-point number to constant int immediate
// - constant int immediate to floating-point number
// - floating-point number to floating-point number.
+ if (isOperandConstantImmediateChar(op1) || isOperandConstantImmediateChar(op2)) {
+ linkSlowCase(iter);
+ linkSlowCase(iter);
+ linkSlowCase(iter);
+ linkSlowCase(iter);
+ JITStubCall stubCall(this, cti_op_jlesseq);
+ stubCall.addArgument(op1, regT0);
+ stubCall.addArgument(op2, regT1);
+ stubCall.call();
+ emitJumpSlowToHot(branchTest32(NonZero, regT0), target);
+ return;
+ }
if (isOperandConstantImmediateInt(op2)) {
linkSlowCase(iter);
@@ -1617,9 +609,9 @@ void JIT::emitSlow_op_jless(Instruction* currentInstruction, Vector<SlowCaseEntr
Jump fail2 = checkStructure(regT0, m_globalData->numberStructure.get());
loadDouble(Address(regT0, OBJECT_OFFSETOF(JSNumberCell, m_value)), fpRegT0);
#endif
-
+
int32_t op2imm = getConstantOperand(op2).asInt32();
-
+
move(Imm32(op2imm), regT1);
convertInt32ToDouble(regT1, fpRegT1);
@@ -1654,13 +646,13 @@ void JIT::emitSlow_op_jless(Instruction* currentInstruction, Vector<SlowCaseEntr
Jump fail1;
if (!m_codeBlock->isKnownNotImmediate(op2))
fail1 = emitJumpIfNotJSCell(regT1);
-
+
Jump fail2 = checkStructure(regT1, m_globalData->numberStructure.get());
loadDouble(Address(regT1, OBJECT_OFFSETOF(JSNumberCell, m_value)), fpRegT1);
#endif
-
+
int32_t op1imm = getConstantOperand(op1).asInt32();
-
+
move(Imm32(op1imm), regT0);
convertInt32ToDouble(regT0, fpRegT0);
@@ -1737,7 +729,7 @@ void JIT::emitSlow_op_jless(Instruction* currentInstruction, Vector<SlowCaseEntr
}
}
-void JIT::emit_op_jnlesseq(Instruction* currentInstruction)
+void JIT::emit_op_jlesseq(Instruction* currentInstruction, bool invert)
{
unsigned op1 = currentInstruction[1].u.operand;
unsigned op2 = currentInstruction[2].u.operand;
@@ -1748,6 +740,24 @@ void JIT::emit_op_jnlesseq(Instruction* currentInstruction)
// - constant int immediate to int immediate
// - int immediate to int immediate
+ if (isOperandConstantImmediateChar(op1)) {
+ emitGetVirtualRegister(op2, regT0);
+ addSlowCase(emitJumpIfNotJSCell(regT0));
+ JumpList failures;
+ emitLoadCharacterString(regT0, regT0, failures);
+ addSlowCase(failures);
+ addJump(branch32(invert ? LessThan : GreaterThanOrEqual, regT0, Imm32(asString(getConstantOperand(op1))->tryGetValue()[0])), target);
+ return;
+ }
+ if (isOperandConstantImmediateChar(op2)) {
+ emitGetVirtualRegister(op1, regT0);
+ addSlowCase(emitJumpIfNotJSCell(regT0));
+ JumpList failures;
+ emitLoadCharacterString(regT0, regT0, failures);
+ addSlowCase(failures);
+ addJump(branch32(invert ? GreaterThan : LessThanOrEqual, regT0, Imm32(asString(getConstantOperand(op2))->tryGetValue()[0])), target);
+ return;
+ }
if (isOperandConstantImmediateInt(op2)) {
emitGetVirtualRegister(op1, regT0);
emitJumpSlowCaseIfNotImmediateInteger(regT0);
@@ -1756,7 +766,7 @@ void JIT::emit_op_jnlesseq(Instruction* currentInstruction)
#else
int32_t op2imm = static_cast<int32_t>(JSImmediate::rawValue(getConstantOperand(op2)));
#endif
- addJump(branch32(GreaterThan, regT0, Imm32(op2imm)), target);
+ addJump(branch32(invert ? GreaterThan : LessThanOrEqual, regT0, Imm32(op2imm)), target);
} else if (isOperandConstantImmediateInt(op1)) {
emitGetVirtualRegister(op2, regT1);
emitJumpSlowCaseIfNotImmediateInteger(regT1);
@@ -1765,17 +775,17 @@ void JIT::emit_op_jnlesseq(Instruction* currentInstruction)
#else
int32_t op1imm = static_cast<int32_t>(JSImmediate::rawValue(getConstantOperand(op1)));
#endif
- addJump(branch32(LessThan, regT1, Imm32(op1imm)), target);
+ addJump(branch32(invert ? LessThan : GreaterThanOrEqual, regT1, Imm32(op1imm)), target);
} else {
emitGetVirtualRegisters(op1, regT0, op2, regT1);
emitJumpSlowCaseIfNotImmediateInteger(regT0);
emitJumpSlowCaseIfNotImmediateInteger(regT1);
- addJump(branch32(GreaterThan, regT0, regT1), target);
+ addJump(branch32(invert ? GreaterThan : LessThanOrEqual, regT0, regT1), target);
}
}
-void JIT::emitSlow_op_jnlesseq(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+void JIT::emitSlow_op_jlesseq(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter, bool invert)
{
unsigned op1 = currentInstruction[1].u.operand;
unsigned op2 = currentInstruction[2].u.operand;
@@ -1786,6 +796,19 @@ void JIT::emitSlow_op_jnlesseq(Instruction* currentInstruction, Vector<SlowCaseE
// - constant int immediate to floating-point number
// - floating-point number to floating-point number.
+ if (isOperandConstantImmediateChar(op1) || isOperandConstantImmediateChar(op2)) {
+ linkSlowCase(iter);
+ linkSlowCase(iter);
+ linkSlowCase(iter);
+ linkSlowCase(iter);
+ JITStubCall stubCall(this, cti_op_jlesseq);
+ stubCall.addArgument(op1, regT0);
+ stubCall.addArgument(op2, regT1);
+ stubCall.call();
+ emitJumpSlowToHot(branchTest32(invert ? Zero : NonZero, regT0), target);
+ return;
+ }
+
if (isOperandConstantImmediateInt(op2)) {
linkSlowCase(iter);
@@ -1802,13 +825,13 @@ void JIT::emitSlow_op_jnlesseq(Instruction* currentInstruction, Vector<SlowCaseE
Jump fail2 = checkStructure(regT0, m_globalData->numberStructure.get());
loadDouble(Address(regT0, OBJECT_OFFSETOF(JSNumberCell, m_value)), fpRegT0);
#endif
-
+
int32_t op2imm = getConstantOperand(op2).asInt32();;
-
+
move(Imm32(op2imm), regT1);
convertInt32ToDouble(regT1, fpRegT1);
- emitJumpSlowToHot(branchDouble(DoubleLessThanOrUnordered, fpRegT1, fpRegT0), target);
+ emitJumpSlowToHot(branchDouble(invert ? DoubleLessThanOrUnordered : DoubleGreaterThanOrEqual, fpRegT1, fpRegT0), target);
emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_jnlesseq));
@@ -1825,7 +848,7 @@ void JIT::emitSlow_op_jnlesseq(Instruction* currentInstruction, Vector<SlowCaseE
stubCall.addArgument(regT0);
stubCall.addArgument(op2, regT2);
stubCall.call();
- emitJumpSlowToHot(branchTest32(Zero, regT0), target);
+ emitJumpSlowToHot(branchTest32(invert ? Zero : NonZero, regT0), target);
} else if (isOperandConstantImmediateInt(op1)) {
linkSlowCase(iter);
@@ -1839,17 +862,17 @@ void JIT::emitSlow_op_jnlesseq(Instruction* currentInstruction, Vector<SlowCaseE
Jump fail1;
if (!m_codeBlock->isKnownNotImmediate(op2))
fail1 = emitJumpIfNotJSCell(regT1);
-
+
Jump fail2 = checkStructure(regT1, m_globalData->numberStructure.get());
loadDouble(Address(regT1, OBJECT_OFFSETOF(JSNumberCell, m_value)), fpRegT1);
#endif
-
+
int32_t op1imm = getConstantOperand(op1).asInt32();;
-
+
move(Imm32(op1imm), regT0);
convertInt32ToDouble(regT0, fpRegT0);
- emitJumpSlowToHot(branchDouble(DoubleLessThanOrUnordered, fpRegT1, fpRegT0), target);
+ emitJumpSlowToHot(branchDouble(invert ? DoubleLessThanOrUnordered : DoubleGreaterThanOrEqual, fpRegT1, fpRegT0), target);
emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_jnlesseq));
@@ -1866,7 +889,7 @@ void JIT::emitSlow_op_jnlesseq(Instruction* currentInstruction, Vector<SlowCaseE
stubCall.addArgument(op1, regT2);
stubCall.addArgument(regT1);
stubCall.call();
- emitJumpSlowToHot(branchTest32(Zero, regT0), target);
+ emitJumpSlowToHot(branchTest32(invert ? Zero : NonZero, regT0), target);
} else {
linkSlowCase(iter);
@@ -1895,7 +918,7 @@ void JIT::emitSlow_op_jnlesseq(Instruction* currentInstruction, Vector<SlowCaseE
loadDouble(Address(regT1, OBJECT_OFFSETOF(JSNumberCell, m_value)), fpRegT1);
#endif
- emitJumpSlowToHot(branchDouble(DoubleLessThanOrUnordered, fpRegT1, fpRegT0), target);
+ emitJumpSlowToHot(branchDouble(invert ? DoubleLessThanOrUnordered : DoubleGreaterThanOrEqual, fpRegT1, fpRegT0), target);
emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_jnlesseq));
@@ -1918,10 +941,20 @@ void JIT::emitSlow_op_jnlesseq(Instruction* currentInstruction, Vector<SlowCaseE
stubCall.addArgument(regT0);
stubCall.addArgument(regT1);
stubCall.call();
- emitJumpSlowToHot(branchTest32(Zero, regT0), target);
+ emitJumpSlowToHot(branchTest32(invert ? Zero : NonZero, regT0), target);
}
}
+void JIT::emit_op_jnlesseq(Instruction* currentInstruction)
+{
+ emit_op_jlesseq(currentInstruction, true);
+}
+
+void JIT::emitSlow_op_jnlesseq(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ emitSlow_op_jlesseq(currentInstruction, iter, true);
+}
+
void JIT::emit_op_bitand(Instruction* currentInstruction)
{
unsigned result = currentInstruction[1].u.operand;
@@ -2164,11 +1197,11 @@ void JIT::emit_op_mod(Instruction* currentInstruction)
unsigned op1 = currentInstruction[2].u.operand;
unsigned op2 = currentInstruction[3].u.operand;
-#if ENABLE(JIT_OPTIMIZE_MOD)
+#if ENABLE(JIT_OPTIMIZE_MOD)
emitGetVirtualRegisters(op1, regT0, op2, regT2);
emitJumpSlowCaseIfNotImmediateInteger(regT0);
emitJumpSlowCaseIfNotImmediateInteger(regT2);
-
+
addSlowCase(branch32(Equal, regT2, Imm32(1)));
emitNakedCall(m_globalData->jitStubs.ctiSoftModulo());
@@ -2229,7 +1262,7 @@ void JIT::compileBinaryArithOpSlowCase(OpcodeID opcodeID, Vector<SlowCaseEntry>:
{
// We assume that subtracting TagTypeNumber is equivalent to adding DoubleEncodeOffset.
COMPILE_ASSERT(((JSImmediate::TagTypeNumber + JSImmediate::DoubleEncodeOffset) == 0), TagTypeNumber_PLUS_DoubleEncodeOffset_EQUALS_0);
-
+
Jump notImm1;
Jump notImm2;
if (op1HasImmediateIntFastCase) {
@@ -2422,7 +1455,7 @@ void JIT::emit_op_div(Instruction* currentInstruction)
movePtrToDouble(regT0, fpRegT0);
skipDoubleLoad.link(this);
}
-
+
if (isOperandConstantImmediateDouble(op2)) {
emitGetVirtualRegister(op2, regT1);
addPtr(tagTypeNumberRegister, regT1);
@@ -2535,7 +1568,7 @@ void JIT::compileBinaryArithOp(OpcodeID opcodeID, unsigned dst, unsigned src1, u
op1imm.link(this);
emitFastArithImmToInt(regT0);
convertInt32ToDouble(regT0, fpRegT0);
- // (1c)
+ // (1c)
loadedDouble.link(this);
if (opcodeID == op_add)
addDouble(Address(regT1, OBJECT_OFFSETOF(JSNumberCell, m_value)), fpRegT0);
@@ -2581,7 +1614,7 @@ void JIT::compileBinaryArithOp(OpcodeID opcodeID, unsigned dst, unsigned src1, u
op2imm.link(this);
emitFastArithImmToInt(regT1);
convertInt32ToDouble(regT1, fpRegT1);
- // (1c)
+ // (1c)
loadedDouble.link(this);
loadDouble(Address(regT0, OBJECT_OFFSETOF(JSNumberCell, m_value)), fpRegT0);
if (opcodeID == op_add)
@@ -2622,7 +1655,7 @@ void JIT::compileBinaryArithOp(OpcodeID opcodeID, unsigned dst, unsigned src1, u
Jump op2NonZero = branchTest32(NonZero, regT1);
op1Zero.link(this);
// if either input is zero, add the two together, and check if the result is < 0.
- // If it is, we have a problem (N < 0), (N * 0) == -0, not representatble as a JSImmediate.
+ // If it is, we have a problem (N < 0), (N * 0) == -0, not representatble as a JSImmediate.
move(regT0, regT2);
addSlowCase(branchAdd32(Signed, regT1, regT2));
// Skip the above check if neither input is zero
@@ -2801,7 +1834,7 @@ void JIT::emitSlow_op_sub(Instruction* currentInstruction, Vector<SlowCaseEntry>
/* ------------------------------ END: OP_ADD, OP_SUB, OP_MUL ------------------------------ */
-#endif // USE(JSVALUE32_64)
+#endif // !USE(JSVALUE32_64)
} // namespace JSC
diff --git a/JavaScriptCore/jit/JITArithmetic32_64.cpp b/JavaScriptCore/jit/JITArithmetic32_64.cpp
new file mode 100644
index 0000000..962c066
--- /dev/null
+++ b/JavaScriptCore/jit/JITArithmetic32_64.cpp
@@ -0,0 +1,1411 @@
+/*
+* Copyright (C) 2008 Apple Inc. All rights reserved.
+*
+* Redistribution and use in source and binary forms, with or without
+* modification, are permitted provided that the following conditions
+* are met:
+* 1. Redistributions of source code must retain the above copyright
+* notice, this list of conditions and the following disclaimer.
+* 2. Redistributions in binary form must reproduce the above copyright
+* notice, this list of conditions and the following disclaimer in the
+* documentation and/or other materials provided with the distribution.
+*
+* THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+#include "config.h"
+#include "JIT.h"
+
+#if ENABLE(JIT)
+
+#include "CodeBlock.h"
+#include "JITInlineMethods.h"
+#include "JITStubCall.h"
+#include "JITStubs.h"
+#include "JSArray.h"
+#include "JSFunction.h"
+#include "Interpreter.h"
+#include "ResultType.h"
+#include "SamplingTool.h"
+
+#ifndef NDEBUG
+#include <stdio.h>
+#endif
+
+using namespace std;
+
+namespace JSC {
+
+#if USE(JSVALUE32_64)
+
+void JIT::emit_op_negate(Instruction* currentInstruction)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned src = currentInstruction[2].u.operand;
+
+ emitLoad(src, regT1, regT0);
+
+ Jump srcNotInt = branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag));
+ addSlowCase(branchTest32(Zero, regT0, Imm32(0x7fffffff)));
+ neg32(regT0);
+ emitStoreInt32(dst, regT0, (dst == src));
+
+ Jump end = jump();
+
+ srcNotInt.link(this);
+ addSlowCase(branch32(Above, regT1, Imm32(JSValue::LowestTag)));
+
+ xor32(Imm32(1 << 31), regT1);
+ store32(regT1, tagFor(dst));
+ if (dst != src)
+ store32(regT0, payloadFor(dst));
+
+ end.link(this);
+}
+
+void JIT::emitSlow_op_negate(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+
+ linkSlowCase(iter); // 0x7fffffff check
+ linkSlowCase(iter); // double check
+
+ JITStubCall stubCall(this, cti_op_negate);
+ stubCall.addArgument(regT1, regT0);
+ stubCall.call(dst);
+}
+
+void JIT::emit_op_jnless(Instruction* currentInstruction)
+{
+ unsigned op1 = currentInstruction[1].u.operand;
+ unsigned op2 = currentInstruction[2].u.operand;
+ unsigned target = currentInstruction[3].u.operand;
+
+ JumpList notInt32Op1;
+ JumpList notInt32Op2;
+
+ // Character less.
+ if (isOperandConstantImmediateChar(op1)) {
+ emitLoad(op2, regT1, regT0);
+ addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::CellTag)));
+ JumpList failures;
+ emitLoadCharacterString(regT0, regT0, failures);
+ addSlowCase(failures);
+ addJump(branch32(LessThanOrEqual, regT0, Imm32(asString(getConstantOperand(op1))->tryGetValue()[0])), target);
+ return;
+ }
+ if (isOperandConstantImmediateChar(op2)) {
+ emitLoad(op1, regT1, regT0);
+ addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::CellTag)));
+ JumpList failures;
+ emitLoadCharacterString(regT0, regT0, failures);
+ addSlowCase(failures);
+ addJump(branch32(GreaterThanOrEqual, regT0, Imm32(asString(getConstantOperand(op2))->tryGetValue()[0])), target);
+ return;
+ }
+ if (isOperandConstantImmediateInt(op1)) {
+ // Int32 less.
+ emitLoad(op2, regT3, regT2);
+ notInt32Op2.append(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag)));
+ addJump(branch32(LessThanOrEqual, regT2, Imm32(getConstantOperand(op1).asInt32())), target);
+ } else if (isOperandConstantImmediateInt(op2)) {
+ emitLoad(op1, regT1, regT0);
+ notInt32Op1.append(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
+ addJump(branch32(GreaterThanOrEqual, regT0, Imm32(getConstantOperand(op2).asInt32())), target);
+ } else {
+ emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
+ notInt32Op1.append(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
+ notInt32Op2.append(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag)));
+ addJump(branch32(GreaterThanOrEqual, regT0, regT2), target);
+ }
+
+ if (!supportsFloatingPoint()) {
+ addSlowCase(notInt32Op1);
+ addSlowCase(notInt32Op2);
+ return;
+ }
+ Jump end = jump();
+
+ // Double less.
+ emitBinaryDoubleOp(op_jnless, target, op1, op2, OperandTypes(), notInt32Op1, notInt32Op2, !isOperandConstantImmediateInt(op1), isOperandConstantImmediateInt(op1) || !isOperandConstantImmediateInt(op2));
+ end.link(this);
+}
+
+void JIT::emitSlow_op_jnless(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned op1 = currentInstruction[1].u.operand;
+ unsigned op2 = currentInstruction[2].u.operand;
+ unsigned target = currentInstruction[3].u.operand;
+
+ if (isOperandConstantImmediateChar(op1) || isOperandConstantImmediateChar(op2)) {
+ linkSlowCase(iter);
+ linkSlowCase(iter);
+ linkSlowCase(iter);
+ linkSlowCase(iter);
+ } else {
+ if (!supportsFloatingPoint()) {
+ if (!isOperandConstantImmediateInt(op1) && !isOperandConstantImmediateInt(op2))
+ linkSlowCase(iter); // int32 check
+ linkSlowCase(iter); // int32 check
+ } else {
+ if (!isOperandConstantImmediateInt(op1)) {
+ linkSlowCase(iter); // double check
+ linkSlowCase(iter); // int32 check
+ }
+ if (isOperandConstantImmediateInt(op1) || !isOperandConstantImmediateInt(op2))
+ linkSlowCase(iter); // double check
+ }
+ }
+
+ JITStubCall stubCall(this, cti_op_jless);
+ stubCall.addArgument(op1);
+ stubCall.addArgument(op2);
+ stubCall.call();
+ emitJumpSlowToHot(branchTest32(Zero, regT0), target);
+}
+
+void JIT::emit_op_jless(Instruction* currentInstruction)
+{
+ unsigned op1 = currentInstruction[1].u.operand;
+ unsigned op2 = currentInstruction[2].u.operand;
+ unsigned target = currentInstruction[3].u.operand;
+
+ JumpList notInt32Op1;
+ JumpList notInt32Op2;
+
+ // Character less.
+ if (isOperandConstantImmediateChar(op1)) {
+ emitLoad(op2, regT1, regT0);
+ addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::CellTag)));
+ JumpList failures;
+ emitLoadCharacterString(regT0, regT0, failures);
+ addSlowCase(failures);
+ addJump(branch32(GreaterThan, regT0, Imm32(asString(getConstantOperand(op1))->tryGetValue()[0])), target);
+ return;
+ }
+ if (isOperandConstantImmediateChar(op2)) {
+ emitLoad(op1, regT1, regT0);
+ addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::CellTag)));
+ JumpList failures;
+ emitLoadCharacterString(regT0, regT0, failures);
+ addSlowCase(failures);
+ addJump(branch32(LessThan, regT0, Imm32(asString(getConstantOperand(op2))->tryGetValue()[0])), target);
+ return;
+ }
+ if (isOperandConstantImmediateInt(op1)) {
+ emitLoad(op2, regT3, regT2);
+ notInt32Op2.append(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag)));
+ addJump(branch32(GreaterThan, regT2, Imm32(getConstantOperand(op1).asInt32())), target);
+ } else if (isOperandConstantImmediateInt(op2)) {
+ emitLoad(op1, regT1, regT0);
+ notInt32Op1.append(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
+ addJump(branch32(LessThan, regT0, Imm32(getConstantOperand(op2).asInt32())), target);
+ } else {
+ emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
+ notInt32Op1.append(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
+ notInt32Op2.append(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag)));
+ addJump(branch32(LessThan, regT0, regT2), target);
+ }
+
+ if (!supportsFloatingPoint()) {
+ addSlowCase(notInt32Op1);
+ addSlowCase(notInt32Op2);
+ return;
+ }
+ Jump end = jump();
+
+ // Double less.
+ emitBinaryDoubleOp(op_jless, target, op1, op2, OperandTypes(), notInt32Op1, notInt32Op2, !isOperandConstantImmediateInt(op1), isOperandConstantImmediateInt(op1) || !isOperandConstantImmediateInt(op2));
+ end.link(this);
+}
+
+void JIT::emitSlow_op_jless(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned op1 = currentInstruction[1].u.operand;
+ unsigned op2 = currentInstruction[2].u.operand;
+ unsigned target = currentInstruction[3].u.operand;
+
+ if (isOperandConstantImmediateChar(op1) || isOperandConstantImmediateChar(op2)) {
+ linkSlowCase(iter);
+ linkSlowCase(iter);
+ linkSlowCase(iter);
+ linkSlowCase(iter);
+ } else {
+ if (!supportsFloatingPoint()) {
+ if (!isOperandConstantImmediateInt(op1) && !isOperandConstantImmediateInt(op2))
+ linkSlowCase(iter); // int32 check
+ linkSlowCase(iter); // int32 check
+ } else {
+ if (!isOperandConstantImmediateInt(op1)) {
+ linkSlowCase(iter); // double check
+ linkSlowCase(iter); // int32 check
+ }
+ if (isOperandConstantImmediateInt(op1) || !isOperandConstantImmediateInt(op2))
+ linkSlowCase(iter); // double check
+ }
+ }
+ JITStubCall stubCall(this, cti_op_jless);
+ stubCall.addArgument(op1);
+ stubCall.addArgument(op2);
+ stubCall.call();
+ emitJumpSlowToHot(branchTest32(NonZero, regT0), target);
+}
+
+void JIT::emit_op_jlesseq(Instruction* currentInstruction, bool invert)
+{
+ unsigned op1 = currentInstruction[1].u.operand;
+ unsigned op2 = currentInstruction[2].u.operand;
+ unsigned target = currentInstruction[3].u.operand;
+
+ JumpList notInt32Op1;
+ JumpList notInt32Op2;
+
+ // Character less.
+ if (isOperandConstantImmediateChar(op1)) {
+ emitLoad(op2, regT1, regT0);
+ addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::CellTag)));
+ JumpList failures;
+ emitLoadCharacterString(regT0, regT0, failures);
+ addSlowCase(failures);
+ addJump(branch32(invert ? LessThan : GreaterThanOrEqual, regT0, Imm32(asString(getConstantOperand(op1))->tryGetValue()[0])), target);
+ return;
+ }
+ if (isOperandConstantImmediateChar(op2)) {
+ emitLoad(op1, regT1, regT0);
+ addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::CellTag)));
+ JumpList failures;
+ emitLoadCharacterString(regT0, regT0, failures);
+ addSlowCase(failures);
+ addJump(branch32(invert ? GreaterThan : LessThanOrEqual, regT0, Imm32(asString(getConstantOperand(op2))->tryGetValue()[0])), target);
+ return;
+ }
+ if (isOperandConstantImmediateInt(op1)) {
+ emitLoad(op2, regT3, regT2);
+ notInt32Op2.append(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag)));
+ addJump(branch32(invert ? LessThan : GreaterThan, regT2, Imm32(getConstantOperand(op1).asInt32())), target);
+ } else if (isOperandConstantImmediateInt(op2)) {
+ emitLoad(op1, regT1, regT0);
+ notInt32Op1.append(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
+ addJump(branch32(invert ? GreaterThan : LessThanOrEqual, regT0, Imm32(getConstantOperand(op2).asInt32())), target);
+ } else {
+ emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
+ notInt32Op1.append(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
+ notInt32Op2.append(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag)));
+ addJump(branch32(invert ? GreaterThan : LessThanOrEqual, regT0, regT2), target);
+ }
+
+ if (!supportsFloatingPoint()) {
+ addSlowCase(notInt32Op1);
+ addSlowCase(notInt32Op2);
+ return;
+ }
+ Jump end = jump();
+
+ // Double less.
+ emitBinaryDoubleOp(invert ? op_jnlesseq : op_jlesseq, target, op1, op2, OperandTypes(), notInt32Op1, notInt32Op2, !isOperandConstantImmediateInt(op1), isOperandConstantImmediateInt(op1) || !isOperandConstantImmediateInt(op2));
+ end.link(this);
+}
+
+void JIT::emitSlow_op_jlesseq(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter, bool invert)
+{
+ unsigned op1 = currentInstruction[1].u.operand;
+ unsigned op2 = currentInstruction[2].u.operand;
+ unsigned target = currentInstruction[3].u.operand;
+
+ if (isOperandConstantImmediateChar(op1) || isOperandConstantImmediateChar(op2)) {
+ linkSlowCase(iter);
+ linkSlowCase(iter);
+ linkSlowCase(iter);
+ linkSlowCase(iter);
+ } else {
+ if (!supportsFloatingPoint()) {
+ if (!isOperandConstantImmediateInt(op1) && !isOperandConstantImmediateInt(op2))
+ linkSlowCase(iter); // int32 check
+ linkSlowCase(iter); // int32 check
+ } else {
+ if (!isOperandConstantImmediateInt(op1)) {
+ linkSlowCase(iter); // double check
+ linkSlowCase(iter); // int32 check
+ }
+ if (isOperandConstantImmediateInt(op1) || !isOperandConstantImmediateInt(op2))
+ linkSlowCase(iter); // double check
+ }
+ }
+
+ JITStubCall stubCall(this, cti_op_jlesseq);
+ stubCall.addArgument(op1);
+ stubCall.addArgument(op2);
+ stubCall.call();
+ emitJumpSlowToHot(branchTest32(invert ? Zero : NonZero, regT0), target);
+}
+
+void JIT::emit_op_jnlesseq(Instruction* currentInstruction)
+{
+ emit_op_jlesseq(currentInstruction, true);
+}
+
+void JIT::emitSlow_op_jnlesseq(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ emitSlow_op_jlesseq(currentInstruction, iter, true);
+}
+
+// LeftShift (<<)
+
+void JIT::emit_op_lshift(Instruction* currentInstruction)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned op1 = currentInstruction[2].u.operand;
+ unsigned op2 = currentInstruction[3].u.operand;
+
+ if (isOperandConstantImmediateInt(op2)) {
+ emitLoad(op1, regT1, regT0);
+ addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
+ lshift32(Imm32(getConstantOperand(op2).asInt32()), regT0);
+ emitStoreInt32(dst, regT0, dst == op1);
+ return;
+ }
+
+ emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
+ if (!isOperandConstantImmediateInt(op1))
+ addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
+ addSlowCase(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag)));
+ lshift32(regT2, regT0);
+ emitStoreInt32(dst, regT0, dst == op1 || dst == op2);
+}
+
+void JIT::emitSlow_op_lshift(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned op1 = currentInstruction[2].u.operand;
+ unsigned op2 = currentInstruction[3].u.operand;
+
+ if (!isOperandConstantImmediateInt(op1) && !isOperandConstantImmediateInt(op2))
+ linkSlowCase(iter); // int32 check
+ linkSlowCase(iter); // int32 check
+
+ JITStubCall stubCall(this, cti_op_lshift);
+ stubCall.addArgument(op1);
+ stubCall.addArgument(op2);
+ stubCall.call(dst);
+}
+
+// RightShift (>>) and UnsignedRightShift (>>>) helper
+
+void JIT::emitRightShift(Instruction* currentInstruction, bool isUnsigned)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned op1 = currentInstruction[2].u.operand;
+ unsigned op2 = currentInstruction[3].u.operand;
+
+ // Slow case of rshift makes assumptions about what registers hold the
+ // shift arguments, so any changes must be updated there as well.
+ if (isOperandConstantImmediateInt(op2)) {
+ emitLoad(op1, regT1, regT0);
+ addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
+ int shift = getConstantOperand(op2).asInt32();
+ if (isUnsigned) {
+ if (shift)
+ urshift32(Imm32(shift & 0x1f), regT0);
+ // unsigned shift < 0 or shift = k*2^32 may result in (essentially)
+ // a toUint conversion, which can result in a value we can represent
+ // as an immediate int.
+ if (shift < 0 || !(shift & 31))
+ addSlowCase(branch32(LessThan, regT0, Imm32(0)));
+ } else if (shift) { // signed right shift by zero is simply toInt conversion
+ rshift32(Imm32(shift & 0x1f), regT0);
+ }
+ emitStoreInt32(dst, regT0, dst == op1);
+ return;
+ }
+
+ emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
+ if (!isOperandConstantImmediateInt(op1))
+ addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
+ addSlowCase(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag)));
+ if (isUnsigned) {
+ urshift32(regT2, regT0);
+ addSlowCase(branch32(LessThan, regT0, Imm32(0)));
+ } else
+ rshift32(regT2, regT0);
+ emitStoreInt32(dst, regT0, dst == op1 || dst == op2);
+}
+
+void JIT::emitRightShiftSlowCase(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter, bool isUnsigned)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned op1 = currentInstruction[2].u.operand;
+ unsigned op2 = currentInstruction[3].u.operand;
+ if (isOperandConstantImmediateInt(op2)) {
+ int shift = getConstantOperand(op2).asInt32();
+ // op1 = regT1:regT0
+ linkSlowCase(iter); // int32 check
+ if (supportsFloatingPointTruncate()) {
+ JumpList failures;
+ failures.append(branch32(AboveOrEqual, regT1, Imm32(JSValue::LowestTag)));
+ emitLoadDouble(op1, fpRegT0);
+ failures.append(branchTruncateDoubleToInt32(fpRegT0, regT0));
+ if (isUnsigned) {
+ if (shift)
+ urshift32(Imm32(shift & 0x1f), regT0);
+ if (shift < 0 || !(shift & 31))
+ failures.append(branch32(LessThan, regT0, Imm32(0)));
+ } else if (shift)
+ rshift32(Imm32(shift & 0x1f), regT0);
+ emitStoreInt32(dst, regT0, dst == op1 || dst == op2);
+ emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_rshift));
+ failures.link(this);
+ }
+ if (isUnsigned && (shift < 0 || !(shift & 31)))
+ linkSlowCase(iter); // failed to box in hot path
+ } else {
+ // op1 = regT1:regT0
+ // op2 = regT3:regT2
+ if (!isOperandConstantImmediateInt(op1)) {
+ linkSlowCase(iter); // int32 check -- op1 is not an int
+ if (supportsFloatingPointTruncate()) {
+ Jump notDouble = branch32(Above, regT1, Imm32(JSValue::LowestTag)); // op1 is not a double
+ emitLoadDouble(op1, fpRegT0);
+ Jump notInt = branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag)); // op2 is not an int
+ Jump cantTruncate = branchTruncateDoubleToInt32(fpRegT0, regT0);
+ if (isUnsigned)
+ urshift32(regT2, regT0);
+ else
+ rshift32(regT2, regT0);
+ emitStoreInt32(dst, regT0, dst == op1 || dst == op2);
+ emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_rshift));
+ notDouble.link(this);
+ notInt.link(this);
+ cantTruncate.link(this);
+ }
+ }
+
+ linkSlowCase(iter); // int32 check - op2 is not an int
+ if (isUnsigned)
+ linkSlowCase(iter); // Can't represent unsigned result as an immediate
+ }
+
+ JITStubCall stubCall(this, isUnsigned ? cti_op_urshift : cti_op_rshift);
+ stubCall.addArgument(op1);
+ stubCall.addArgument(op2);
+ stubCall.call(dst);
+}
+
+// RightShift (>>)
+
+void JIT::emit_op_rshift(Instruction* currentInstruction)
+{
+ emitRightShift(currentInstruction, false);
+}
+
+void JIT::emitSlow_op_rshift(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ emitRightShiftSlowCase(currentInstruction, iter, false);
+}
+
+// UnsignedRightShift (>>>)
+
+void JIT::emit_op_urshift(Instruction* currentInstruction)
+{
+ emitRightShift(currentInstruction, true);
+}
+
+void JIT::emitSlow_op_urshift(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ emitRightShiftSlowCase(currentInstruction, iter, true);
+}
+
+// BitAnd (&)
+
+void JIT::emit_op_bitand(Instruction* currentInstruction)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned op1 = currentInstruction[2].u.operand;
+ unsigned op2 = currentInstruction[3].u.operand;
+
+ unsigned op;
+ int32_t constant;
+ if (getOperandConstantImmediateInt(op1, op2, op, constant)) {
+ emitLoad(op, regT1, regT0);
+ addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
+ and32(Imm32(constant), regT0);
+ emitStoreInt32(dst, regT0, (op == dst));
+ return;
+ }
+
+ emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
+ addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
+ addSlowCase(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag)));
+ and32(regT2, regT0);
+ emitStoreInt32(dst, regT0, (op1 == dst || op2 == dst));
+}
+
+void JIT::emitSlow_op_bitand(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned op1 = currentInstruction[2].u.operand;
+ unsigned op2 = currentInstruction[3].u.operand;
+
+ if (!isOperandConstantImmediateInt(op1) && !isOperandConstantImmediateInt(op2))
+ linkSlowCase(iter); // int32 check
+ linkSlowCase(iter); // int32 check
+
+ JITStubCall stubCall(this, cti_op_bitand);
+ stubCall.addArgument(op1);
+ stubCall.addArgument(op2);
+ stubCall.call(dst);
+}
+
+// BitOr (|)
+
+void JIT::emit_op_bitor(Instruction* currentInstruction)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned op1 = currentInstruction[2].u.operand;
+ unsigned op2 = currentInstruction[3].u.operand;
+
+ unsigned op;
+ int32_t constant;
+ if (getOperandConstantImmediateInt(op1, op2, op, constant)) {
+ emitLoad(op, regT1, regT0);
+ addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
+ or32(Imm32(constant), regT0);
+ emitStoreInt32(dst, regT0, (op == dst));
+ return;
+ }
+
+ emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
+ addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
+ addSlowCase(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag)));
+ or32(regT2, regT0);
+ emitStoreInt32(dst, regT0, (op1 == dst || op2 == dst));
+}
+
+void JIT::emitSlow_op_bitor(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned op1 = currentInstruction[2].u.operand;
+ unsigned op2 = currentInstruction[3].u.operand;
+
+ if (!isOperandConstantImmediateInt(op1) && !isOperandConstantImmediateInt(op2))
+ linkSlowCase(iter); // int32 check
+ linkSlowCase(iter); // int32 check
+
+ JITStubCall stubCall(this, cti_op_bitor);
+ stubCall.addArgument(op1);
+ stubCall.addArgument(op2);
+ stubCall.call(dst);
+}
+
+// BitXor (^)
+
+void JIT::emit_op_bitxor(Instruction* currentInstruction)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned op1 = currentInstruction[2].u.operand;
+ unsigned op2 = currentInstruction[3].u.operand;
+
+ unsigned op;
+ int32_t constant;
+ if (getOperandConstantImmediateInt(op1, op2, op, constant)) {
+ emitLoad(op, regT1, regT0);
+ addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
+ xor32(Imm32(constant), regT0);
+ emitStoreInt32(dst, regT0, (op == dst));
+ return;
+ }
+
+ emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
+ addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
+ addSlowCase(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag)));
+ xor32(regT2, regT0);
+ emitStoreInt32(dst, regT0, (op1 == dst || op2 == dst));
+}
+
+void JIT::emitSlow_op_bitxor(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned op1 = currentInstruction[2].u.operand;
+ unsigned op2 = currentInstruction[3].u.operand;
+
+ if (!isOperandConstantImmediateInt(op1) && !isOperandConstantImmediateInt(op2))
+ linkSlowCase(iter); // int32 check
+ linkSlowCase(iter); // int32 check
+
+ JITStubCall stubCall(this, cti_op_bitxor);
+ stubCall.addArgument(op1);
+ stubCall.addArgument(op2);
+ stubCall.call(dst);
+}
+
+// BitNot (~)
+
+void JIT::emit_op_bitnot(Instruction* currentInstruction)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned src = currentInstruction[2].u.operand;
+
+ emitLoad(src, regT1, regT0);
+ addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
+
+ not32(regT0);
+ emitStoreInt32(dst, regT0, (dst == src));
+}
+
+void JIT::emitSlow_op_bitnot(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+
+ linkSlowCase(iter); // int32 check
+
+ JITStubCall stubCall(this, cti_op_bitnot);
+ stubCall.addArgument(regT1, regT0);
+ stubCall.call(dst);
+}
+
+// PostInc (i++)
+
+void JIT::emit_op_post_inc(Instruction* currentInstruction)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned srcDst = currentInstruction[2].u.operand;
+
+ emitLoad(srcDst, regT1, regT0);
+ addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
+
+ if (dst == srcDst) // x = x++ is a noop for ints.
+ return;
+
+ emitStoreInt32(dst, regT0);
+
+ addSlowCase(branchAdd32(Overflow, Imm32(1), regT0));
+ emitStoreInt32(srcDst, regT0, true);
+}
+
+void JIT::emitSlow_op_post_inc(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned srcDst = currentInstruction[2].u.operand;
+
+ linkSlowCase(iter); // int32 check
+ if (dst != srcDst)
+ linkSlowCase(iter); // overflow check
+
+ JITStubCall stubCall(this, cti_op_post_inc);
+ stubCall.addArgument(srcDst);
+ stubCall.addArgument(Imm32(srcDst));
+ stubCall.call(dst);
+}
+
+// PostDec (i--)
+
+void JIT::emit_op_post_dec(Instruction* currentInstruction)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned srcDst = currentInstruction[2].u.operand;
+
+ emitLoad(srcDst, regT1, regT0);
+ addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
+
+ if (dst == srcDst) // x = x-- is a noop for ints.
+ return;
+
+ emitStoreInt32(dst, regT0);
+
+ addSlowCase(branchSub32(Overflow, Imm32(1), regT0));
+ emitStoreInt32(srcDst, regT0, true);
+}
+
+void JIT::emitSlow_op_post_dec(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned srcDst = currentInstruction[2].u.operand;
+
+ linkSlowCase(iter); // int32 check
+ if (dst != srcDst)
+ linkSlowCase(iter); // overflow check
+
+ JITStubCall stubCall(this, cti_op_post_dec);
+ stubCall.addArgument(srcDst);
+ stubCall.addArgument(Imm32(srcDst));
+ stubCall.call(dst);
+}
+
+// PreInc (++i)
+
+void JIT::emit_op_pre_inc(Instruction* currentInstruction)
+{
+ unsigned srcDst = currentInstruction[1].u.operand;
+
+ emitLoad(srcDst, regT1, regT0);
+
+ addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
+ addSlowCase(branchAdd32(Overflow, Imm32(1), regT0));
+ emitStoreInt32(srcDst, regT0, true);
+}
+
+void JIT::emitSlow_op_pre_inc(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned srcDst = currentInstruction[1].u.operand;
+
+ linkSlowCase(iter); // int32 check
+ linkSlowCase(iter); // overflow check
+
+ JITStubCall stubCall(this, cti_op_pre_inc);
+ stubCall.addArgument(srcDst);
+ stubCall.call(srcDst);
+}
+
+// PreDec (--i)
+
+void JIT::emit_op_pre_dec(Instruction* currentInstruction)
+{
+ unsigned srcDst = currentInstruction[1].u.operand;
+
+ emitLoad(srcDst, regT1, regT0);
+
+ addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
+ addSlowCase(branchSub32(Overflow, Imm32(1), regT0));
+ emitStoreInt32(srcDst, regT0, true);
+}
+
+void JIT::emitSlow_op_pre_dec(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned srcDst = currentInstruction[1].u.operand;
+
+ linkSlowCase(iter); // int32 check
+ linkSlowCase(iter); // overflow check
+
+ JITStubCall stubCall(this, cti_op_pre_dec);
+ stubCall.addArgument(srcDst);
+ stubCall.call(srcDst);
+}
+
+// Addition (+)
+
+void JIT::emit_op_add(Instruction* currentInstruction)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned op1 = currentInstruction[2].u.operand;
+ unsigned op2 = currentInstruction[3].u.operand;
+ OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
+
+ if (!types.first().mightBeNumber() || !types.second().mightBeNumber()) {
+ JITStubCall stubCall(this, cti_op_add);
+ stubCall.addArgument(op1);
+ stubCall.addArgument(op2);
+ stubCall.call(dst);
+ return;
+ }
+
+ JumpList notInt32Op1;
+ JumpList notInt32Op2;
+
+ unsigned op;
+ int32_t constant;
+ if (getOperandConstantImmediateInt(op1, op2, op, constant)) {
+ emitAdd32Constant(dst, op, constant, op == op1 ? types.first() : types.second());
+ return;
+ }
+
+ emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
+ notInt32Op1.append(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
+ notInt32Op2.append(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag)));
+
+ // Int32 case.
+ addSlowCase(branchAdd32(Overflow, regT2, regT0));
+ emitStoreInt32(dst, regT0, (op1 == dst || op2 == dst));
+
+ if (!supportsFloatingPoint()) {
+ addSlowCase(notInt32Op1);
+ addSlowCase(notInt32Op2);
+ return;
+ }
+ Jump end = jump();
+
+ // Double case.
+ emitBinaryDoubleOp(op_add, dst, op1, op2, types, notInt32Op1, notInt32Op2);
+ end.link(this);
+}
+
+void JIT::emitAdd32Constant(unsigned dst, unsigned op, int32_t constant, ResultType opType)
+{
+ // Int32 case.
+ emitLoad(op, regT1, regT0);
+ Jump notInt32 = branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag));
+ addSlowCase(branchAdd32(Overflow, Imm32(constant), regT0));
+ emitStoreInt32(dst, regT0, (op == dst));
+
+ // Double case.
+ if (!supportsFloatingPoint()) {
+ addSlowCase(notInt32);
+ return;
+ }
+ Jump end = jump();
+
+ notInt32.link(this);
+ if (!opType.definitelyIsNumber())
+ addSlowCase(branch32(Above, regT1, Imm32(JSValue::LowestTag)));
+ move(Imm32(constant), regT2);
+ convertInt32ToDouble(regT2, fpRegT0);
+ emitLoadDouble(op, fpRegT1);
+ addDouble(fpRegT1, fpRegT0);
+ emitStoreDouble(dst, fpRegT0);
+
+ end.link(this);
+}
+
+void JIT::emitSlow_op_add(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned op1 = currentInstruction[2].u.operand;
+ unsigned op2 = currentInstruction[3].u.operand;
+ OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
+
+ if (!types.first().mightBeNumber() || !types.second().mightBeNumber())
+ return;
+
+ unsigned op;
+ int32_t constant;
+ if (getOperandConstantImmediateInt(op1, op2, op, constant)) {
+ linkSlowCase(iter); // overflow check
+
+ if (!supportsFloatingPoint())
+ linkSlowCase(iter); // non-sse case
+ else {
+ ResultType opType = op == op1 ? types.first() : types.second();
+ if (!opType.definitelyIsNumber())
+ linkSlowCase(iter); // double check
+ }
+ } else {
+ linkSlowCase(iter); // overflow check
+
+ if (!supportsFloatingPoint()) {
+ linkSlowCase(iter); // int32 check
+ linkSlowCase(iter); // int32 check
+ } else {
+ if (!types.first().definitelyIsNumber())
+ linkSlowCase(iter); // double check
+
+ if (!types.second().definitelyIsNumber()) {
+ linkSlowCase(iter); // int32 check
+ linkSlowCase(iter); // double check
+ }
+ }
+ }
+
+ JITStubCall stubCall(this, cti_op_add);
+ stubCall.addArgument(op1);
+ stubCall.addArgument(op2);
+ stubCall.call(dst);
+}
+
+// Subtraction (-)
+
+void JIT::emit_op_sub(Instruction* currentInstruction)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned op1 = currentInstruction[2].u.operand;
+ unsigned op2 = currentInstruction[3].u.operand;
+ OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
+
+ JumpList notInt32Op1;
+ JumpList notInt32Op2;
+
+ if (isOperandConstantImmediateInt(op2)) {
+ emitSub32Constant(dst, op1, getConstantOperand(op2).asInt32(), types.first());
+ return;
+ }
+
+ emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
+ notInt32Op1.append(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
+ notInt32Op2.append(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag)));
+
+ // Int32 case.
+ addSlowCase(branchSub32(Overflow, regT2, regT0));
+ emitStoreInt32(dst, regT0, (op1 == dst || op2 == dst));
+
+ if (!supportsFloatingPoint()) {
+ addSlowCase(notInt32Op1);
+ addSlowCase(notInt32Op2);
+ return;
+ }
+ Jump end = jump();
+
+ // Double case.
+ emitBinaryDoubleOp(op_sub, dst, op1, op2, types, notInt32Op1, notInt32Op2);
+ end.link(this);
+}
+
+void JIT::emitSub32Constant(unsigned dst, unsigned op, int32_t constant, ResultType opType)
+{
+ // Int32 case.
+ emitLoad(op, regT1, regT0);
+ Jump notInt32 = branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag));
+ addSlowCase(branchSub32(Overflow, Imm32(constant), regT0));
+ emitStoreInt32(dst, regT0, (op == dst));
+
+ // Double case.
+ if (!supportsFloatingPoint()) {
+ addSlowCase(notInt32);
+ return;
+ }
+ Jump end = jump();
+
+ notInt32.link(this);
+ if (!opType.definitelyIsNumber())
+ addSlowCase(branch32(Above, regT1, Imm32(JSValue::LowestTag)));
+ move(Imm32(constant), regT2);
+ convertInt32ToDouble(regT2, fpRegT0);
+ emitLoadDouble(op, fpRegT1);
+ subDouble(fpRegT0, fpRegT1);
+ emitStoreDouble(dst, fpRegT1);
+
+ end.link(this);
+}
+
+void JIT::emitSlow_op_sub(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned op1 = currentInstruction[2].u.operand;
+ unsigned op2 = currentInstruction[3].u.operand;
+ OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
+
+ if (isOperandConstantImmediateInt(op2)) {
+ linkSlowCase(iter); // overflow check
+
+ if (!supportsFloatingPoint() || !types.first().definitelyIsNumber())
+ linkSlowCase(iter); // int32 or double check
+ } else {
+ linkSlowCase(iter); // overflow check
+
+ if (!supportsFloatingPoint()) {
+ linkSlowCase(iter); // int32 check
+ linkSlowCase(iter); // int32 check
+ } else {
+ if (!types.first().definitelyIsNumber())
+ linkSlowCase(iter); // double check
+
+ if (!types.second().definitelyIsNumber()) {
+ linkSlowCase(iter); // int32 check
+ linkSlowCase(iter); // double check
+ }
+ }
+ }
+
+ JITStubCall stubCall(this, cti_op_sub);
+ stubCall.addArgument(op1);
+ stubCall.addArgument(op2);
+ stubCall.call(dst);
+}
+
+void JIT::emitBinaryDoubleOp(OpcodeID opcodeID, unsigned dst, unsigned op1, unsigned op2, OperandTypes types, JumpList& notInt32Op1, JumpList& notInt32Op2, bool op1IsInRegisters, bool op2IsInRegisters)
+{
+ JumpList end;
+
+ if (!notInt32Op1.empty()) {
+ // Double case 1: Op1 is not int32; Op2 is unknown.
+ notInt32Op1.link(this);
+
+ ASSERT(op1IsInRegisters);
+
+ // Verify Op1 is double.
+ if (!types.first().definitelyIsNumber())
+ addSlowCase(branch32(Above, regT1, Imm32(JSValue::LowestTag)));
+
+ if (!op2IsInRegisters)
+ emitLoad(op2, regT3, regT2);
+
+ Jump doubleOp2 = branch32(Below, regT3, Imm32(JSValue::LowestTag));
+
+ if (!types.second().definitelyIsNumber())
+ addSlowCase(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag)));
+
+ convertInt32ToDouble(regT2, fpRegT0);
+ Jump doTheMath = jump();
+
+ // Load Op2 as double into double register.
+ doubleOp2.link(this);
+ emitLoadDouble(op2, fpRegT0);
+
+ // Do the math.
+ doTheMath.link(this);
+ switch (opcodeID) {
+ case op_mul:
+ emitLoadDouble(op1, fpRegT2);
+ mulDouble(fpRegT2, fpRegT0);
+ emitStoreDouble(dst, fpRegT0);
+ break;
+ case op_add:
+ emitLoadDouble(op1, fpRegT2);
+ addDouble(fpRegT2, fpRegT0);
+ emitStoreDouble(dst, fpRegT0);
+ break;
+ case op_sub:
+ emitLoadDouble(op1, fpRegT1);
+ subDouble(fpRegT0, fpRegT1);
+ emitStoreDouble(dst, fpRegT1);
+ break;
+ case op_div:
+ emitLoadDouble(op1, fpRegT1);
+ divDouble(fpRegT0, fpRegT1);
+ emitStoreDouble(dst, fpRegT1);
+ break;
+ case op_jnless:
+ emitLoadDouble(op1, fpRegT2);
+ addJump(branchDouble(DoubleLessThanOrEqualOrUnordered, fpRegT0, fpRegT2), dst);
+ break;
+ case op_jless:
+ emitLoadDouble(op1, fpRegT2);
+ addJump(branchDouble(DoubleLessThan, fpRegT2, fpRegT0), dst);
+ break;
+ case op_jlesseq:
+ emitLoadDouble(op1, fpRegT2);
+ addJump(branchDouble(DoubleLessThanOrEqual, fpRegT2, fpRegT0), dst);
+ break;
+ case op_jnlesseq:
+ emitLoadDouble(op1, fpRegT2);
+ addJump(branchDouble(DoubleLessThanOrUnordered, fpRegT0, fpRegT2), dst);
+ break;
+ default:
+ ASSERT_NOT_REACHED();
+ }
+
+ if (!notInt32Op2.empty())
+ end.append(jump());
+ }
+
+ if (!notInt32Op2.empty()) {
+ // Double case 2: Op1 is int32; Op2 is not int32.
+ notInt32Op2.link(this);
+
+ ASSERT(op2IsInRegisters);
+
+ if (!op1IsInRegisters)
+ emitLoadPayload(op1, regT0);
+
+ convertInt32ToDouble(regT0, fpRegT0);
+
+ // Verify op2 is double.
+ if (!types.second().definitelyIsNumber())
+ addSlowCase(branch32(Above, regT3, Imm32(JSValue::LowestTag)));
+
+ // Do the math.
+ switch (opcodeID) {
+ case op_mul:
+ emitLoadDouble(op2, fpRegT2);
+ mulDouble(fpRegT2, fpRegT0);
+ emitStoreDouble(dst, fpRegT0);
+ break;
+ case op_add:
+ emitLoadDouble(op2, fpRegT2);
+ addDouble(fpRegT2, fpRegT0);
+ emitStoreDouble(dst, fpRegT0);
+ break;
+ case op_sub:
+ emitLoadDouble(op2, fpRegT2);
+ subDouble(fpRegT2, fpRegT0);
+ emitStoreDouble(dst, fpRegT0);
+ break;
+ case op_div:
+ emitLoadDouble(op2, fpRegT2);
+ divDouble(fpRegT2, fpRegT0);
+ emitStoreDouble(dst, fpRegT0);
+ break;
+ case op_jnless:
+ emitLoadDouble(op2, fpRegT1);
+ addJump(branchDouble(DoubleLessThanOrEqualOrUnordered, fpRegT1, fpRegT0), dst);
+ break;
+ case op_jless:
+ emitLoadDouble(op2, fpRegT1);
+ addJump(branchDouble(DoubleLessThan, fpRegT0, fpRegT1), dst);
+ break;
+ case op_jnlesseq:
+ emitLoadDouble(op2, fpRegT1);
+ addJump(branchDouble(DoubleLessThanOrUnordered, fpRegT1, fpRegT0), dst);
+ break;
+ case op_jlesseq:
+ emitLoadDouble(op2, fpRegT1);
+ addJump(branchDouble(DoubleLessThanOrEqual, fpRegT1, fpRegT0), dst);
+ break;
+ default:
+ ASSERT_NOT_REACHED();
+ }
+ }
+
+ end.link(this);
+}
+
+// Multiplication (*)
+
+void JIT::emit_op_mul(Instruction* currentInstruction)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned op1 = currentInstruction[2].u.operand;
+ unsigned op2 = currentInstruction[3].u.operand;
+ OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
+
+ JumpList notInt32Op1;
+ JumpList notInt32Op2;
+
+ emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
+ notInt32Op1.append(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
+ notInt32Op2.append(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag)));
+
+ // Int32 case.
+ move(regT0, regT3);
+ addSlowCase(branchMul32(Overflow, regT2, regT0));
+ addSlowCase(branchTest32(Zero, regT0));
+ emitStoreInt32(dst, regT0, (op1 == dst || op2 == dst));
+
+ if (!supportsFloatingPoint()) {
+ addSlowCase(notInt32Op1);
+ addSlowCase(notInt32Op2);
+ return;
+ }
+ Jump end = jump();
+
+ // Double case.
+ emitBinaryDoubleOp(op_mul, dst, op1, op2, types, notInt32Op1, notInt32Op2);
+ end.link(this);
+}
+
+void JIT::emitSlow_op_mul(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned op1 = currentInstruction[2].u.operand;
+ unsigned op2 = currentInstruction[3].u.operand;
+ OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
+
+ Jump overflow = getSlowCase(iter); // overflow check
+ linkSlowCase(iter); // zero result check
+
+ Jump negZero = branchOr32(Signed, regT2, regT3);
+ emitStoreInt32(dst, Imm32(0), (op1 == dst || op2 == dst));
+
+ emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_mul));
+
+ negZero.link(this);
+ overflow.link(this);
+
+ if (!supportsFloatingPoint()) {
+ linkSlowCase(iter); // int32 check
+ linkSlowCase(iter); // int32 check
+ }
+
+ if (supportsFloatingPoint()) {
+ if (!types.first().definitelyIsNumber())
+ linkSlowCase(iter); // double check
+
+ if (!types.second().definitelyIsNumber()) {
+ linkSlowCase(iter); // int32 check
+ linkSlowCase(iter); // double check
+ }
+ }
+
+ Label jitStubCall(this);
+ JITStubCall stubCall(this, cti_op_mul);
+ stubCall.addArgument(op1);
+ stubCall.addArgument(op2);
+ stubCall.call(dst);
+}
+
+// Division (/)
+
+void JIT::emit_op_div(Instruction* currentInstruction)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned op1 = currentInstruction[2].u.operand;
+ unsigned op2 = currentInstruction[3].u.operand;
+ OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
+
+ if (!supportsFloatingPoint()) {
+ addSlowCase(jump());
+ return;
+ }
+
+ // Int32 divide.
+ JumpList notInt32Op1;
+ JumpList notInt32Op2;
+
+ JumpList end;
+
+ emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
+
+ notInt32Op1.append(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
+ notInt32Op2.append(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag)));
+
+ convertInt32ToDouble(regT0, fpRegT0);
+ convertInt32ToDouble(regT2, fpRegT1);
+ divDouble(fpRegT1, fpRegT0);
+
+ JumpList doubleResult;
+ branchConvertDoubleToInt32(fpRegT0, regT0, doubleResult, fpRegT1);
+
+ // Int32 result.
+ emitStoreInt32(dst, regT0, (op1 == dst || op2 == dst));
+ end.append(jump());
+
+ // Double result.
+ doubleResult.link(this);
+ emitStoreDouble(dst, fpRegT0);
+ end.append(jump());
+
+ // Double divide.
+ emitBinaryDoubleOp(op_div, dst, op1, op2, types, notInt32Op1, notInt32Op2);
+ end.link(this);
+}
+
+void JIT::emitSlow_op_div(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned op1 = currentInstruction[2].u.operand;
+ unsigned op2 = currentInstruction[3].u.operand;
+ OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
+
+ if (!supportsFloatingPoint())
+ linkSlowCase(iter);
+ else {
+ if (!types.first().definitelyIsNumber())
+ linkSlowCase(iter); // double check
+
+ if (!types.second().definitelyIsNumber()) {
+ linkSlowCase(iter); // int32 check
+ linkSlowCase(iter); // double check
+ }
+ }
+
+ JITStubCall stubCall(this, cti_op_div);
+ stubCall.addArgument(op1);
+ stubCall.addArgument(op2);
+ stubCall.call(dst);
+}
+
+// Mod (%)
+
+/* ------------------------------ BEGIN: OP_MOD ------------------------------ */
+
+#if CPU(X86) || CPU(X86_64)
+
+void JIT::emit_op_mod(Instruction* currentInstruction)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned op1 = currentInstruction[2].u.operand;
+ unsigned op2 = currentInstruction[3].u.operand;
+
+ if (isOperandConstantImmediateInt(op2) && getConstantOperand(op2).asInt32() != 0) {
+ emitLoad(op1, X86Registers::edx, X86Registers::eax);
+ move(Imm32(getConstantOperand(op2).asInt32()), X86Registers::ecx);
+ addSlowCase(branch32(NotEqual, X86Registers::edx, Imm32(JSValue::Int32Tag)));
+ if (getConstantOperand(op2).asInt32() == -1)
+ addSlowCase(branch32(Equal, X86Registers::eax, Imm32(0x80000000))); // -2147483648 / -1 => EXC_ARITHMETIC
+ } else {
+ emitLoad2(op1, X86Registers::edx, X86Registers::eax, op2, X86Registers::ebx, X86Registers::ecx);
+ addSlowCase(branch32(NotEqual, X86Registers::edx, Imm32(JSValue::Int32Tag)));
+ addSlowCase(branch32(NotEqual, X86Registers::ebx, Imm32(JSValue::Int32Tag)));
+
+ addSlowCase(branch32(Equal, X86Registers::eax, Imm32(0x80000000))); // -2147483648 / -1 => EXC_ARITHMETIC
+ addSlowCase(branch32(Equal, X86Registers::ecx, Imm32(0))); // divide by 0
+ }
+
+ move(X86Registers::eax, X86Registers::ebx); // Save dividend payload, in case of 0.
+ m_assembler.cdq();
+ m_assembler.idivl_r(X86Registers::ecx);
+
+ // If the remainder is zero and the dividend is negative, the result is -0.
+ Jump storeResult1 = branchTest32(NonZero, X86Registers::edx);
+ Jump storeResult2 = branchTest32(Zero, X86Registers::ebx, Imm32(0x80000000)); // not negative
+ emitStore(dst, jsNumber(m_globalData, -0.0));
+ Jump end = jump();
+
+ storeResult1.link(this);
+ storeResult2.link(this);
+ emitStoreInt32(dst, X86Registers::edx, (op1 == dst || op2 == dst));
+ end.link(this);
+}
+
+void JIT::emitSlow_op_mod(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned op1 = currentInstruction[2].u.operand;
+ unsigned op2 = currentInstruction[3].u.operand;
+
+ if (isOperandConstantImmediateInt(op2) && getConstantOperand(op2).asInt32() != 0) {
+ linkSlowCase(iter); // int32 check
+ if (getConstantOperand(op2).asInt32() == -1)
+ linkSlowCase(iter); // 0x80000000 check
+ } else {
+ linkSlowCase(iter); // int32 check
+ linkSlowCase(iter); // int32 check
+ linkSlowCase(iter); // 0 check
+ linkSlowCase(iter); // 0x80000000 check
+ }
+
+ JITStubCall stubCall(this, cti_op_mod);
+ stubCall.addArgument(op1);
+ stubCall.addArgument(op2);
+ stubCall.call(dst);
+}
+
+#else // CPU(X86) || CPU(X86_64)
+
+void JIT::emit_op_mod(Instruction* currentInstruction)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned op1 = currentInstruction[2].u.operand;
+ unsigned op2 = currentInstruction[3].u.operand;
+
+#if ENABLE(JIT_OPTIMIZE_MOD)
+ emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
+ addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
+ addSlowCase(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag)));
+
+ addSlowCase(branch32(Equal, regT2, Imm32(0)));
+
+ emitNakedCall(m_globalData->jitStubs.ctiSoftModulo());
+
+ emitStoreInt32(dst, regT0, (op1 == dst || op2 == dst));
+#else
+ JITStubCall stubCall(this, cti_op_mod);
+ stubCall.addArgument(op1);
+ stubCall.addArgument(op2);
+ stubCall.call(dst);
+#endif
+}
+
+void JIT::emitSlow_op_mod(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+#if ENABLE(JIT_OPTIMIZE_MOD)
+ unsigned result = currentInstruction[1].u.operand;
+ unsigned op1 = currentInstruction[2].u.operand;
+ unsigned op2 = currentInstruction[3].u.operand;
+ linkSlowCase(iter);
+ linkSlowCase(iter);
+ linkSlowCase(iter);
+ JITStubCall stubCall(this, cti_op_mod);
+ stubCall.addArgument(op1);
+ stubCall.addArgument(op2);
+ stubCall.call(result);
+#else
+ ASSERT_NOT_REACHED();
+#endif
+}
+
+#endif // CPU(X86) || CPU(X86_64)
+
+/* ------------------------------ END: OP_MOD ------------------------------ */
+
+#endif // USE(JSVALUE32_64)
+
+}
+
+#endif // ENABLE(JIT)
diff --git a/JavaScriptCore/jit/JITInlineMethods.h b/JavaScriptCore/jit/JITInlineMethods.h
index 96a2e5b..892ab36 100644
--- a/JavaScriptCore/jit/JITInlineMethods.h
+++ b/JavaScriptCore/jit/JITInlineMethods.h
@@ -93,6 +93,16 @@ ALWAYS_INLINE void JIT::emitGetFromCallFrameHeaderPtr(RegisterFile::CallFrameHea
#endif
}
+ALWAYS_INLINE void JIT::emitLoadCharacterString(RegisterID src, RegisterID dst, JumpList& failures)
+{
+ failures.append(branchPtr(NotEqual, Address(src), ImmPtr(m_globalData->jsStringVPtr)));
+ failures.append(branchTest32(NonZero, Address(src, OBJECT_OFFSETOF(JSString, m_fiberCount))));
+ failures.append(branch32(NotEqual, MacroAssembler::Address(src, ThunkHelpers::jsStringLengthOffset()), Imm32(1)));
+ loadPtr(MacroAssembler::Address(src, ThunkHelpers::jsStringValueOffset()), dst);
+ loadPtr(MacroAssembler::Address(dst, ThunkHelpers::stringImplDataOffset()), dst);
+ load16(MacroAssembler::Address(dst, 0), dst);
+}
+
ALWAYS_INLINE void JIT::emitGetFromCallFrameHeader32(RegisterFile::CallFrameHeaderEntry entry, RegisterID to, RegisterID from)
{
load32(Address(from, entry * sizeof(Register)), to);
@@ -323,23 +333,13 @@ ALWAYS_INLINE void JIT::sampleCodeBlock(CodeBlock* codeBlock)
#endif
#endif
-inline JIT::Address JIT::addressFor(unsigned index, RegisterID base)
+ALWAYS_INLINE bool JIT::isOperandConstantImmediateChar(unsigned src)
{
- return Address(base, (index * sizeof(Register)));
+ return m_codeBlock->isConstantRegisterIndex(src) && getConstantOperand(src).isString() && asString(getConstantOperand(src).asCell())->length() == 1;
}
#if USE(JSVALUE32_64)
-inline JIT::Address JIT::tagFor(unsigned index, RegisterID base)
-{
- return Address(base, (index * sizeof(Register)) + OBJECT_OFFSETOF(JSValue, u.asBits.tag));
-}
-
-inline JIT::Address JIT::payloadFor(unsigned index, RegisterID base)
-{
- return Address(base, (index * sizeof(Register)) + OBJECT_OFFSETOF(JSValue, u.asBits.payload));
-}
-
inline void JIT::emitLoadTag(unsigned index, RegisterID tag)
{
RegisterID mappedTag;
@@ -560,7 +560,7 @@ inline bool JIT::getMappedTag(unsigned virtualRegisterIndex, RegisterID& tag)
inline void JIT::emitJumpSlowCaseIfNotJSCell(unsigned virtualRegisterIndex)
{
if (!m_codeBlock->isKnownNotImmediate(virtualRegisterIndex))
- addSlowCase(branch32(NotEqual, tagFor(virtualRegisterIndex), Imm32(JSValue::CellTag)));
+ addSlowCase(emitJumpIfNotJSCell(virtualRegisterIndex));
}
inline void JIT::emitJumpSlowCaseIfNotJSCell(unsigned virtualRegisterIndex, RegisterID tag)
@@ -737,14 +737,6 @@ ALWAYS_INLINE void JIT::emitJumpSlowCaseIfNotJSCell(RegisterID reg, int vReg)
}
#if USE(JSVALUE64)
-ALWAYS_INLINE JIT::Jump JIT::emitJumpIfImmediateNumber(RegisterID reg)
-{
- return branchTestPtr(NonZero, reg, tagTypeNumberRegister);
-}
-ALWAYS_INLINE JIT::Jump JIT::emitJumpIfNotImmediateNumber(RegisterID reg)
-{
- return branchTestPtr(Zero, reg, tagTypeNumberRegister);
-}
inline void JIT::emitLoadDouble(unsigned index, FPRegisterID value)
{
diff --git a/JavaScriptCore/jit/JITOpcodes.cpp b/JavaScriptCore/jit/JITOpcodes.cpp
index 0dd6a40..2ad79c6 100644
--- a/JavaScriptCore/jit/JITOpcodes.cpp
+++ b/JavaScriptCore/jit/JITOpcodes.cpp
@@ -400,7 +400,7 @@ void JIT::privateCompileCTIMachineTrampolines(RefPtr<ExecutablePool>* executable
*executablePool = finalCode.m_executablePool;
trampolines->ctiVirtualCall = trampolineAt(finalCode, virtualCallBegin);
- trampolines->ctiNativeCallThunk = trampolineAt(finalCode, nativeCallThunk);
+ trampolines->ctiNativeCallThunk = adoptRef(new NativeExecutable(JITCode(JITCode::HostFunction(trampolineAt(finalCode, nativeCallThunk)))));
#if ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
trampolines->ctiStringLengthTrampoline = trampolineAt(finalCode, stringLengthBegin);
#else
@@ -1430,11 +1430,16 @@ void JIT::emit_op_new_error(Instruction* currentInstruction)
void JIT::emit_op_debug(Instruction* currentInstruction)
{
+#if ENABLE(DEBUG_WITH_BREAKPOINT)
+ UNUSED_PARAM(currentInstruction);
+ breakpoint();
+#else
JITStubCall stubCall(this, cti_op_debug);
stubCall.addArgument(Imm32(currentInstruction[1].u.operand));
stubCall.addArgument(Imm32(currentInstruction[2].u.operand));
stubCall.addArgument(Imm32(currentInstruction[3].u.operand));
stubCall.call();
+#endif
}
@@ -1937,7 +1942,7 @@ void JIT::privateCompileCTIMachineTrampolines(RefPtr<ExecutablePool>* executable
trampolines->ctiVirtualCallLink = trampolineAt(finalCode, virtualCallLinkBegin);
trampolines->ctiVirtualCall = trampolineAt(finalCode, virtualCallBegin);
- trampolines->ctiNativeCallThunk = trampolineAt(finalCode, nativeCallThunk);
+ trampolines->ctiNativeCallThunk = adoptRef(new NativeExecutable(JITCode(JITCode::HostFunction(trampolineAt(finalCode, nativeCallThunk)))));
#if ENABLE(JIT_OPTIMIZE_MOD)
trampolines->ctiSoftModulo = trampolineAt(finalCode, softModBegin);
#endif
@@ -2721,11 +2726,16 @@ void JIT::emit_op_new_error(Instruction* currentInstruction)
void JIT::emit_op_debug(Instruction* currentInstruction)
{
+#if ENABLE(DEBUG_WITH_BREAKPOINT)
+ UNUSED_PARAM(currentInstruction);
+ breakpoint();
+#else
JITStubCall stubCall(this, cti_op_debug);
stubCall.addArgument(Imm32(currentInstruction[1].u.operand));
stubCall.addArgument(Imm32(currentInstruction[2].u.operand));
stubCall.addArgument(Imm32(currentInstruction[3].u.operand));
stubCall.call();
+#endif
}
void JIT::emit_op_eq_null(Instruction* currentInstruction)
diff --git a/JavaScriptCore/jit/JITStubCall.h b/JavaScriptCore/jit/JITStubCall.h
index cfbd7dc..70d2893 100644
--- a/JavaScriptCore/jit/JITStubCall.h
+++ b/JavaScriptCore/jit/JITStubCall.h
@@ -116,13 +116,15 @@ namespace JSC {
m_jit->poke(argument, m_stackIndex);
m_stackIndex += stackIndexStep;
}
-
+
+#if USE(JSVALUE32_64)
void addArgument(const JSValue& value)
{
m_jit->poke(JIT::Imm32(value.payload()), m_stackIndex);
m_jit->poke(JIT::Imm32(value.tag()), m_stackIndex + 1);
m_stackIndex += stackIndexStep;
}
+#endif
void addArgument(JIT::RegisterID tag, JIT::RegisterID payload)
{
diff --git a/JavaScriptCore/jit/JITStubs.cpp b/JavaScriptCore/jit/JITStubs.cpp
index c32f2ce..ebd26bb 100644
--- a/JavaScriptCore/jit/JITStubs.cpp
+++ b/JavaScriptCore/jit/JITStubs.cpp
@@ -578,6 +578,9 @@ SYMBOL_STRING(ctiOpThrowNotCaught) ":" "\n"
#elif COMPILER(RVCT) && CPU(ARM_TRADITIONAL)
+#define THUNK_RETURN_ADDRESS_OFFSET 32
+#define PRESERVEDR4_OFFSET 36
+
__asm EncodedJSValue ctiTrampoline(void*, RegisterFile*, CallFrame*, JSValue*, Profiler**, JSGlobalData*)
{
ARM
@@ -835,6 +838,10 @@ JITThunks::JITThunks(JSGlobalData* globalData)
#endif
}
+JITThunks::~JITThunks()
+{
+}
+
#if ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
NEVER_INLINE void JITThunks::tryCachePutByID(CallFrame* callFrame, CodeBlock* codeBlock, ReturnAddressPtr returnAddress, JSValue baseValue, const PutPropertySlot& slot, StructureStubInfo* stubInfo)
@@ -3300,6 +3307,15 @@ DEFINE_STUB_FUNCTION(EncodedJSValue, to_object)
return JSValue::encode(stackFrame.args[0].jsValue().toObject(callFrame));
}
+NativeExecutable* JITThunks::specializedThunk(JSGlobalData* globalData, ThunkGenerator generator)
+{
+ std::pair<ThunkMap::iterator, bool> entry = m_thunkMap.add(generator, 0);
+ if (!entry.second)
+ return entry.first->second.get();
+ entry.first->second = generator(globalData, m_executablePool.get());
+ return entry.first->second.get();
+}
+
} // namespace JSC
#endif // ENABLE(JIT)
diff --git a/JavaScriptCore/jit/JITStubs.h b/JavaScriptCore/jit/JITStubs.h
index f419c8c..fe4bcfb 100644
--- a/JavaScriptCore/jit/JITStubs.h
+++ b/JavaScriptCore/jit/JITStubs.h
@@ -31,6 +31,8 @@
#include "MacroAssemblerCodeRef.h"
#include "Register.h"
+#include "ThunkGenerators.h"
+#include <wtf/HashMap.h>
#if ENABLE(JIT)
@@ -43,16 +45,16 @@ namespace JSC {
class FunctionExecutable;
class Identifier;
class JSGlobalData;
- class JSGlobalData;
+ class JSGlobalObject;
class JSObject;
class JSPropertyNameIterator;
class JSValue;
class JSValueEncodedAsPointer;
+ class NativeExecutable;
class Profiler;
class PropertySlot;
class PutPropertySlot;
class RegisterFile;
- class JSGlobalObject;
class RegExp;
union JITStubArg {
@@ -77,7 +79,7 @@ namespace JSC {
MacroAssemblerCodePtr ctiStringLengthTrampoline;
MacroAssemblerCodePtr ctiVirtualCallLink;
MacroAssemblerCodePtr ctiVirtualCall;
- MacroAssemblerCodePtr ctiNativeCallThunk;
+ RefPtr<NativeExecutable> ctiNativeCallThunk;
MacroAssemblerCodePtr ctiSoftModulo;
};
@@ -265,6 +267,7 @@ namespace JSC {
class JITThunks {
public:
JITThunks(JSGlobalData*);
+ ~JITThunks();
static void tryCacheGetByID(CallFrame*, CodeBlock*, ReturnAddressPtr returnAddress, JSValue baseValue, const Identifier& propertyName, const PropertySlot&, StructureStubInfo* stubInfo);
static void tryCachePutByID(CallFrame*, CodeBlock*, ReturnAddressPtr returnAddress, JSValue baseValue, const PutPropertySlot&, StructureStubInfo* stubInfo);
@@ -272,10 +275,13 @@ namespace JSC {
MacroAssemblerCodePtr ctiStringLengthTrampoline() { return m_trampolineStructure.ctiStringLengthTrampoline; }
MacroAssemblerCodePtr ctiVirtualCallLink() { return m_trampolineStructure.ctiVirtualCallLink; }
MacroAssemblerCodePtr ctiVirtualCall() { return m_trampolineStructure.ctiVirtualCall; }
- MacroAssemblerCodePtr ctiNativeCallThunk() { return m_trampolineStructure.ctiNativeCallThunk; }
+ NativeExecutable* ctiNativeCallThunk() { return m_trampolineStructure.ctiNativeCallThunk.get(); }
MacroAssemblerCodePtr ctiSoftModulo() { return m_trampolineStructure.ctiSoftModulo; }
+ NativeExecutable* specializedThunk(JSGlobalData* globalData, ThunkGenerator generator);
private:
+ typedef HashMap<ThunkGenerator, RefPtr<NativeExecutable> > ThunkMap;
+ ThunkMap m_thunkMap;
RefPtr<ExecutablePool> m_executablePool;
TrampolineStructure m_trampolineStructure;
diff --git a/JavaScriptCore/jit/JSInterfaceJIT.h b/JavaScriptCore/jit/JSInterfaceJIT.h
new file mode 100644
index 0000000..2cd0e33
--- /dev/null
+++ b/JavaScriptCore/jit/JSInterfaceJIT.h
@@ -0,0 +1,302 @@
+/*
+ * Copyright (C) 2010 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef JSInterfaceJIT_h
+#define JSInterfaceJIT_h
+
+#include "JITCode.h"
+#include "JITStubs.h"
+#include "JSImmediate.h"
+#include "MacroAssembler.h"
+#include "RegisterFile.h"
+#include <wtf/AlwaysInline.h>
+#include <wtf/Vector.h>
+
+namespace JSC {
+ class JSInterfaceJIT : public MacroAssembler {
+ public:
+ // NOTES:
+ //
+ // regT0 has two special meanings. The return value from a stub
+ // call will always be in regT0, and by default (unless
+ // a register is specified) emitPutVirtualRegister() will store
+ // the value from regT0.
+ //
+ // regT3 is required to be callee-preserved.
+ //
+ // tempRegister2 is has no such dependencies. It is important that
+ // on x86/x86-64 it is ecx for performance reasons, since the
+ // MacroAssembler will need to plant register swaps if it is not -
+ // however the code will still function correctly.
+#if CPU(X86_64)
+ static const RegisterID returnValueRegister = X86Registers::eax;
+ static const RegisterID cachedResultRegister = X86Registers::eax;
+ static const RegisterID firstArgumentRegister = X86Registers::edi;
+
+ static const RegisterID timeoutCheckRegister = X86Registers::r12;
+ static const RegisterID callFrameRegister = X86Registers::r13;
+ static const RegisterID tagTypeNumberRegister = X86Registers::r14;
+ static const RegisterID tagMaskRegister = X86Registers::r15;
+
+ static const RegisterID regT0 = X86Registers::eax;
+ static const RegisterID regT1 = X86Registers::edx;
+ static const RegisterID regT2 = X86Registers::ecx;
+ static const RegisterID regT3 = X86Registers::ebx;
+
+ static const FPRegisterID fpRegT0 = X86Registers::xmm0;
+ static const FPRegisterID fpRegT1 = X86Registers::xmm1;
+ static const FPRegisterID fpRegT2 = X86Registers::xmm2;
+ static const FPRegisterID fpRegT3 = X86Registers::xmm3;
+#elif CPU(X86)
+ static const RegisterID returnValueRegister = X86Registers::eax;
+ static const RegisterID cachedResultRegister = X86Registers::eax;
+ // On x86 we always use fastcall conventions = but on
+ // OS X if might make more sense to just use regparm.
+ static const RegisterID firstArgumentRegister = X86Registers::ecx;
+
+ static const RegisterID timeoutCheckRegister = X86Registers::esi;
+ static const RegisterID callFrameRegister = X86Registers::edi;
+
+ static const RegisterID regT0 = X86Registers::eax;
+ static const RegisterID regT1 = X86Registers::edx;
+ static const RegisterID regT2 = X86Registers::ecx;
+ static const RegisterID regT3 = X86Registers::ebx;
+
+ static const FPRegisterID fpRegT0 = X86Registers::xmm0;
+ static const FPRegisterID fpRegT1 = X86Registers::xmm1;
+ static const FPRegisterID fpRegT2 = X86Registers::xmm2;
+ static const FPRegisterID fpRegT3 = X86Registers::xmm3;
+#elif CPU(ARM_THUMB2)
+ static const RegisterID returnValueRegister = ARMRegisters::r0;
+ static const RegisterID cachedResultRegister = ARMRegisters::r0;
+ static const RegisterID firstArgumentRegister = ARMRegisters::r0;
+
+ static const RegisterID regT0 = ARMRegisters::r0;
+ static const RegisterID regT1 = ARMRegisters::r1;
+ static const RegisterID regT2 = ARMRegisters::r2;
+ static const RegisterID regT3 = ARMRegisters::r4;
+
+ static const RegisterID callFrameRegister = ARMRegisters::r5;
+ static const RegisterID timeoutCheckRegister = ARMRegisters::r6;
+
+ static const FPRegisterID fpRegT0 = ARMRegisters::d0;
+ static const FPRegisterID fpRegT1 = ARMRegisters::d1;
+ static const FPRegisterID fpRegT2 = ARMRegisters::d2;
+ static const FPRegisterID fpRegT3 = ARMRegisters::d3;
+#elif CPU(ARM_TRADITIONAL)
+ static const RegisterID returnValueRegister = ARMRegisters::r0;
+ static const RegisterID cachedResultRegister = ARMRegisters::r0;
+ static const RegisterID firstArgumentRegister = ARMRegisters::r0;
+
+ static const RegisterID timeoutCheckRegister = ARMRegisters::r5;
+ static const RegisterID callFrameRegister = ARMRegisters::r4;
+
+ static const RegisterID regT0 = ARMRegisters::r0;
+ static const RegisterID regT1 = ARMRegisters::r1;
+ static const RegisterID regT2 = ARMRegisters::r2;
+ // Callee preserved
+ static const RegisterID regT3 = ARMRegisters::r7;
+
+ static const RegisterID regS0 = ARMRegisters::S0;
+ // Callee preserved
+ static const RegisterID regS1 = ARMRegisters::S1;
+
+ static const RegisterID regStackPtr = ARMRegisters::sp;
+ static const RegisterID regLink = ARMRegisters::lr;
+
+ static const FPRegisterID fpRegT0 = ARMRegisters::d0;
+ static const FPRegisterID fpRegT1 = ARMRegisters::d1;
+ static const FPRegisterID fpRegT2 = ARMRegisters::d2;
+ static const FPRegisterID fpRegT3 = ARMRegisters::d3;
+#elif CPU(MIPS)
+ static const RegisterID returnValueRegister = MIPSRegisters::v0;
+ static const RegisterID cachedResultRegister = MIPSRegisters::v0;
+ static const RegisterID firstArgumentRegister = MIPSRegisters::a0;
+
+ // regT0 must be v0 for returning a 32-bit value.
+ static const RegisterID regT0 = MIPSRegisters::v0;
+
+ // regT1 must be v1 for returning a pair of 32-bit value.
+ static const RegisterID regT1 = MIPSRegisters::v1;
+
+ static const RegisterID regT2 = MIPSRegisters::t4;
+
+ // regT3 must be saved in the callee, so use an S register.
+ static const RegisterID regT3 = MIPSRegisters::s2;
+
+ static const RegisterID callFrameRegister = MIPSRegisters::s0;
+ static const RegisterID timeoutCheckRegister = MIPSRegisters::s1;
+
+ static const FPRegisterID fpRegT0 = MIPSRegisters::f4;
+ static const FPRegisterID fpRegT1 = MIPSRegisters::f6;
+ static const FPRegisterID fpRegT2 = MIPSRegisters::f8;
+ static const FPRegisterID fpRegT2 = MIPSRegisters::f10;
+#else
+#error "JIT not supported on this platform."
+#endif
+
+ inline Jump emitLoadJSCell(unsigned virtualRegisterIndex, RegisterID payload);
+ inline Jump emitLoadInt32(unsigned virtualRegisterIndex, RegisterID dst);
+ inline Jump emitLoadDouble(unsigned virtualRegisterIndex, FPRegisterID dst, RegisterID scratch);
+
+#if USE(JSVALUE32_64)
+ inline Jump emitJumpIfNotJSCell(unsigned virtualRegisterIndex);
+ inline Address tagFor(unsigned index, RegisterID base = callFrameRegister);
+#endif
+
+#if USE(JSVALUE32) || USE(JSVALUE64)
+ Jump emitJumpIfImmediateNumber(RegisterID reg);
+ Jump emitJumpIfNotImmediateNumber(RegisterID reg);
+#endif
+
+ inline Address payloadFor(unsigned index, RegisterID base = callFrameRegister);
+ inline Address addressFor(unsigned index, RegisterID base = callFrameRegister);
+ };
+
+ struct ThunkHelpers {
+ static unsigned stringImplDataOffset() { return WebCore::StringImpl::dataOffset(); }
+ static unsigned jsStringLengthOffset() { return OBJECT_OFFSETOF(JSString, m_length); }
+ static unsigned jsStringValueOffset() { return OBJECT_OFFSETOF(JSString, m_value); }
+ };
+
+#if USE(JSVALUE32_64)
+ inline JSInterfaceJIT::Jump JSInterfaceJIT::emitLoadJSCell(unsigned virtualRegisterIndex, RegisterID payload)
+ {
+ loadPtr(payloadFor(virtualRegisterIndex), payload);
+ return emitJumpIfNotJSCell(virtualRegisterIndex);
+ }
+
+ inline JSInterfaceJIT::Jump JSInterfaceJIT::emitJumpIfNotJSCell(unsigned virtualRegisterIndex)
+ {
+ return branch32(NotEqual, tagFor(virtualRegisterIndex), Imm32(JSValue::CellTag));
+ }
+
+ inline JSInterfaceJIT::Jump JSInterfaceJIT::emitLoadInt32(unsigned virtualRegisterIndex, RegisterID dst)
+ {
+ loadPtr(payloadFor(virtualRegisterIndex), dst);
+ return branch32(NotEqual, tagFor(virtualRegisterIndex), Imm32(JSValue::Int32Tag));
+ }
+
+ inline JSInterfaceJIT::Address JSInterfaceJIT::tagFor(unsigned index, RegisterID base)
+ {
+ return Address(base, (index * sizeof(Register)) + OBJECT_OFFSETOF(JSValue, u.asBits.tag));
+ }
+
+ inline JSInterfaceJIT::Address JSInterfaceJIT::payloadFor(unsigned index, RegisterID base)
+ {
+ return Address(base, (index * sizeof(Register)) + OBJECT_OFFSETOF(JSValue, u.asBits.payload));
+ }
+
+ inline JSInterfaceJIT::Jump JSInterfaceJIT::emitLoadDouble(unsigned virtualRegisterIndex, FPRegisterID dst, RegisterID scratch)
+ {
+ loadPtr(tagFor(virtualRegisterIndex), scratch);
+ Jump isDouble = branch32(Below, scratch, Imm32(JSValue::LowestTag));
+ Jump notInt = branch32(NotEqual, scratch, Imm32(JSValue::Int32Tag));
+ loadPtr(payloadFor(virtualRegisterIndex), scratch);
+ convertInt32ToDouble(scratch, dst);
+ Jump done = jump();
+ isDouble.link(this);
+ loadDouble(addressFor(virtualRegisterIndex), dst);
+ done.link(this);
+ return notInt;
+ }
+#endif
+
+#if USE(JSVALUE64)
+ ALWAYS_INLINE JSInterfaceJIT::Jump JSInterfaceJIT::emitJumpIfImmediateNumber(RegisterID reg)
+ {
+ return branchTestPtr(NonZero, reg, tagTypeNumberRegister);
+ }
+ ALWAYS_INLINE JSInterfaceJIT::Jump JSInterfaceJIT::emitJumpIfNotImmediateNumber(RegisterID reg)
+ {
+ return branchTestPtr(Zero, reg, tagTypeNumberRegister);
+ }
+ inline JSInterfaceJIT::Jump JSInterfaceJIT::emitLoadJSCell(unsigned virtualRegisterIndex, RegisterID dst)
+ {
+ loadPtr(addressFor(virtualRegisterIndex), dst);
+ return branchTestPtr(NonZero, dst, tagMaskRegister);
+ }
+
+ inline JSInterfaceJIT::Jump JSInterfaceJIT::emitLoadInt32(unsigned virtualRegisterIndex, RegisterID dst)
+ {
+ loadPtr(addressFor(virtualRegisterIndex), dst);
+ Jump result = branchPtr(Below, dst, tagTypeNumberRegister);
+ zeroExtend32ToPtr(dst, dst);
+ return result;
+ }
+
+ inline JSInterfaceJIT::Jump JSInterfaceJIT::emitLoadDouble(unsigned virtualRegisterIndex, FPRegisterID dst, RegisterID scratch)
+ {
+ loadPtr(addressFor(virtualRegisterIndex), scratch);
+ Jump notNumber = emitJumpIfNotImmediateNumber(scratch);
+ Jump notInt = branchPtr(Below, scratch, tagTypeNumberRegister);
+ convertInt32ToDouble(scratch, dst);
+ Jump done = jump();
+ notInt.link(this);
+ addPtr(tagTypeNumberRegister, scratch);
+ movePtrToDouble(scratch, dst);
+ done.link(this);
+ return notNumber;
+ }
+
+#endif
+
+#if USE(JSVALUE32)
+ inline JSInterfaceJIT::Jump JSInterfaceJIT::emitLoadJSCell(unsigned virtualRegisterIndex, RegisterID dst)
+ {
+ loadPtr(addressFor(virtualRegisterIndex), dst);
+ return branchTest32(NonZero, dst, Imm32(JSImmediate::TagMask));
+ }
+
+ inline JSInterfaceJIT::Jump JSInterfaceJIT::emitLoadInt32(unsigned virtualRegisterIndex, RegisterID dst)
+ {
+ loadPtr(addressFor(virtualRegisterIndex), dst);
+ Jump result = branchTest32(Zero, dst, Imm32(JSImmediate::TagTypeNumber));
+ rshift32(Imm32(JSImmediate::IntegerPayloadShift), dst);
+ return result;
+ }
+
+ inline JSInterfaceJIT::Jump JSInterfaceJIT::emitLoadDouble(unsigned, FPRegisterID, RegisterID)
+ {
+ ASSERT_NOT_REACHED();
+ return jump();
+ }
+#endif
+
+#if !USE(JSVALUE32_64)
+ inline JSInterfaceJIT::Address JSInterfaceJIT::payloadFor(unsigned index, RegisterID base)
+ {
+ return addressFor(index, base);
+ }
+#endif
+
+ inline JSInterfaceJIT::Address JSInterfaceJIT::addressFor(unsigned index, RegisterID base)
+ {
+ return Address(base, (index * sizeof(Register)));
+ }
+
+}
+
+#endif // JSInterfaceJIT_h
diff --git a/JavaScriptCore/jit/SpecializedThunkJIT.h b/JavaScriptCore/jit/SpecializedThunkJIT.h
new file mode 100644
index 0000000..e41411d
--- /dev/null
+++ b/JavaScriptCore/jit/SpecializedThunkJIT.h
@@ -0,0 +1,174 @@
+/*
+ * Copyright (C) 2010 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef SpecializedThunkJIT_h
+#define SpecializedThunkJIT_h
+
+#if ENABLE(JIT)
+
+#include "Executable.h"
+#include "JSInterfaceJIT.h"
+#include "LinkBuffer.h"
+
+namespace JSC {
+
+ class SpecializedThunkJIT : public JSInterfaceJIT {
+ public:
+ static const int ThisArgument = -1;
+ SpecializedThunkJIT(int expectedArgCount, JSGlobalData* globalData, ExecutablePool* pool)
+ : m_expectedArgCount(expectedArgCount)
+ , m_globalData(globalData)
+ , m_pool(pool)
+ {
+ // Check that we have the expected number of arguments
+ m_failures.append(branch32(NotEqual, Address(callFrameRegister, RegisterFile::ArgumentCount * (int)sizeof(Register)), Imm32(expectedArgCount + 1)));
+ }
+
+ void loadDoubleArgument(int argument, FPRegisterID dst, RegisterID scratch)
+ {
+ unsigned src = argumentToVirtualRegister(argument);
+ m_failures.append(emitLoadDouble(src, dst, scratch));
+ }
+
+ void loadCellArgument(int argument, RegisterID dst)
+ {
+ unsigned src = argumentToVirtualRegister(argument);
+ m_failures.append(emitLoadJSCell(src, dst));
+ }
+
+ void loadJSStringArgument(int argument, RegisterID dst)
+ {
+ loadCellArgument(argument, dst);
+ m_failures.append(branchPtr(NotEqual, Address(dst, 0), ImmPtr(m_globalData->jsStringVPtr)));
+ m_failures.append(branchTest32(NonZero, Address(dst, OBJECT_OFFSETOF(JSString, m_fiberCount))));
+ }
+
+ void loadInt32Argument(int argument, RegisterID dst, Jump& failTarget)
+ {
+ unsigned src = argumentToVirtualRegister(argument);
+ failTarget = emitLoadInt32(src, dst);
+ }
+
+ void loadInt32Argument(int argument, RegisterID dst)
+ {
+ Jump conversionFailed;
+ loadInt32Argument(argument, dst, conversionFailed);
+ m_failures.append(conversionFailed);
+ }
+
+ void appendFailure(const Jump& failure)
+ {
+ m_failures.append(failure);
+ }
+
+ void returnJSValue(RegisterID src)
+ {
+ if (src != regT0)
+ move(src, regT0);
+ loadPtr(Address(callFrameRegister, RegisterFile::CallerFrame * (int)sizeof(Register)), callFrameRegister);
+ ret();
+ }
+
+ void returnDouble(FPRegisterID src)
+ {
+#if USE(JSVALUE64)
+ moveDoubleToPtr(src, regT0);
+ subPtr(tagTypeNumberRegister, regT0);
+#elif USE(JSVALUE32_64)
+ storeDouble(src, Address(stackPointerRegister, -(int)sizeof(double)));
+ loadPtr(Address(stackPointerRegister, OBJECT_OFFSETOF(JSValue, u.asBits.tag) - sizeof(double)), regT1);
+ loadPtr(Address(stackPointerRegister, OBJECT_OFFSETOF(JSValue, u.asBits.payload) - sizeof(double)), regT0);
+#else
+ UNUSED_PARAM(src);
+ ASSERT_NOT_REACHED();
+ m_failures.append(jump());
+#endif
+ loadPtr(Address(callFrameRegister, RegisterFile::CallerFrame * (int)sizeof(Register)), callFrameRegister);
+ ret();
+ }
+
+ void returnInt32(RegisterID src)
+ {
+ if (src != regT0)
+ move(src, regT0);
+ tagReturnAsInt32();
+ loadPtr(Address(callFrameRegister, RegisterFile::CallerFrame * (int)sizeof(Register)), callFrameRegister);
+ ret();
+ }
+
+ void returnJSCell(RegisterID src)
+ {
+ if (src != regT0)
+ move(src, regT0);
+ tagReturnAsJSCell();
+ loadPtr(Address(callFrameRegister, RegisterFile::CallerFrame * (int)sizeof(Register)), callFrameRegister);
+ ret();
+ }
+
+ PassRefPtr<NativeExecutable> finalize()
+ {
+ LinkBuffer patchBuffer(this, m_pool.get());
+ patchBuffer.link(m_failures, CodeLocationLabel(m_globalData->jitStubs.ctiNativeCallThunk()->generatedJITCode().addressForCall()));
+ return adoptRef(new NativeExecutable(patchBuffer.finalizeCode()));
+ }
+
+ private:
+ int argumentToVirtualRegister(unsigned argument)
+ {
+ return -static_cast<int>(RegisterFile::CallFrameHeaderSize + (m_expectedArgCount - argument));
+ }
+
+ void tagReturnAsInt32()
+ {
+#if USE(JSVALUE64)
+ orPtr(tagTypeNumberRegister, regT0);
+#elif USE(JSVALUE32_64)
+ move(Imm32(JSValue::Int32Tag), regT1);
+#else
+ signExtend32ToPtr(regT0, regT0);
+ // If we can't tag the result, give up and jump to the slow case
+ m_failures.append(branchAddPtr(Overflow, regT0, regT0));
+ addPtr(Imm32(JSImmediate::TagTypeNumber), regT0);
+#endif
+ }
+
+ void tagReturnAsJSCell()
+ {
+#if USE(JSVALUE32_64)
+ move(Imm32(JSValue::CellTag), regT1);
+#endif
+ }
+
+ int m_expectedArgCount;
+ JSGlobalData* m_globalData;
+ RefPtr<ExecutablePool> m_pool;
+ MacroAssembler::JumpList m_failures;
+ };
+
+}
+
+#endif // ENABLE(JIT)
+
+#endif // SpecializedThunkJIT_h
diff --git a/JavaScriptCore/jit/ThunkGenerators.cpp b/JavaScriptCore/jit/ThunkGenerators.cpp
new file mode 100644
index 0000000..c625c3d
--- /dev/null
+++ b/JavaScriptCore/jit/ThunkGenerators.cpp
@@ -0,0 +1,159 @@
+/*
+ * Copyright (C) 2010 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "ThunkGenerators.h"
+#include <wtf/text/StringImpl.h>
+
+#include "SpecializedThunkJIT.h"
+
+#if ENABLE(JIT)
+
+namespace JSC {
+
+static void stringCharLoad(SpecializedThunkJIT& jit)
+{
+ // load string
+ jit.loadJSStringArgument(SpecializedThunkJIT::ThisArgument, SpecializedThunkJIT::regT0);
+ // regT0 now contains this, and is a non-rope JSString*
+
+ // Load string length to regT2, and start the process of loading the data pointer into regT0
+ jit.load32(MacroAssembler::Address(SpecializedThunkJIT::regT0, ThunkHelpers::jsStringLengthOffset()), SpecializedThunkJIT::regT2);
+ jit.loadPtr(MacroAssembler::Address(SpecializedThunkJIT::regT0, ThunkHelpers::jsStringValueOffset()), SpecializedThunkJIT::regT0);
+ jit.loadPtr(MacroAssembler::Address(SpecializedThunkJIT::regT0, ThunkHelpers::stringImplDataOffset()), SpecializedThunkJIT::regT0);
+
+ // load index
+ jit.loadInt32Argument(0, SpecializedThunkJIT::regT1); // regT1 contains the index
+
+ // Do an unsigned compare to simultaneously filter negative indices as well as indices that are too large
+ jit.appendFailure(jit.branch32(MacroAssembler::AboveOrEqual, SpecializedThunkJIT::regT1, SpecializedThunkJIT::regT2));
+
+ // Load the character
+ jit.load16(MacroAssembler::BaseIndex(SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT1, MacroAssembler::TimesTwo, 0), SpecializedThunkJIT::regT0);
+}
+
+static void charToString(SpecializedThunkJIT& jit, JSGlobalData* globalData, MacroAssembler::RegisterID src, MacroAssembler::RegisterID dst, MacroAssembler::RegisterID scratch)
+{
+ jit.appendFailure(jit.branch32(MacroAssembler::AboveOrEqual, src, MacroAssembler::Imm32(0x100)));
+ jit.move(MacroAssembler::ImmPtr(globalData->smallStrings.singleCharacterStrings()), scratch);
+ jit.loadPtr(MacroAssembler::BaseIndex(scratch, src, MacroAssembler::ScalePtr, 0), dst);
+ jit.appendFailure(jit.branchTestPtr(MacroAssembler::Zero, dst));
+}
+
+PassRefPtr<NativeExecutable> charCodeAtThunkGenerator(JSGlobalData* globalData, ExecutablePool* pool)
+{
+ SpecializedThunkJIT jit(1, globalData, pool);
+ stringCharLoad(jit);
+ jit.returnInt32(SpecializedThunkJIT::regT0);
+ return jit.finalize();
+}
+
+PassRefPtr<NativeExecutable> charAtThunkGenerator(JSGlobalData* globalData, ExecutablePool* pool)
+{
+ SpecializedThunkJIT jit(1, globalData, pool);
+ stringCharLoad(jit);
+ charToString(jit, globalData, SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT1);
+ jit.returnJSCell(SpecializedThunkJIT::regT0);
+ return jit.finalize();
+}
+
+PassRefPtr<NativeExecutable> fromCharCodeThunkGenerator(JSGlobalData* globalData, ExecutablePool* pool)
+{
+ SpecializedThunkJIT jit(1, globalData, pool);
+ // load char code
+ jit.loadInt32Argument(0, SpecializedThunkJIT::regT0);
+ charToString(jit, globalData, SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT1);
+ jit.returnJSCell(SpecializedThunkJIT::regT0);
+ return jit.finalize();
+}
+
+PassRefPtr<NativeExecutable> sqrtThunkGenerator(JSGlobalData* globalData, ExecutablePool* pool)
+{
+#if USE(JSVALUE64) || USE(JSVALUE32_64)
+ SpecializedThunkJIT jit(1, globalData, pool);
+ if (!jit.supportsFloatingPointSqrt())
+ return globalData->jitStubs.ctiNativeCallThunk();
+
+ jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0);
+ jit.sqrtDouble(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT0);
+ jit.returnDouble(SpecializedThunkJIT::fpRegT0);
+ return jit.finalize();
+#else
+ UNUSED_PARAM(pool);
+ return globalData->jitStubs.ctiNativeCallThunk();
+#endif
+}
+
+static const double oneConstant = 1.0;
+static const double negativeHalfConstant = -0.5;
+
+PassRefPtr<NativeExecutable> powThunkGenerator(JSGlobalData* globalData, ExecutablePool* pool)
+{
+#if USE(JSVALUE64) || USE(JSVALUE32_64)
+ SpecializedThunkJIT jit(2, globalData, pool);
+ if (!jit.supportsFloatingPoint())
+ return globalData->jitStubs.ctiNativeCallThunk();
+
+ jit.loadDouble(&oneConstant, SpecializedThunkJIT::fpRegT1);
+ jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0);
+ MacroAssembler::Jump nonIntExponent;
+ jit.loadInt32Argument(1, SpecializedThunkJIT::regT0, nonIntExponent);
+ jit.appendFailure(jit.branch32(MacroAssembler::LessThan, SpecializedThunkJIT::regT0, MacroAssembler::Imm32(0)));
+
+ MacroAssembler::Jump exponentIsZero = jit.branchTest32(MacroAssembler::Zero, SpecializedThunkJIT::regT0);
+ MacroAssembler::Label startLoop(jit.label());
+
+ MacroAssembler::Jump exponentIsEven = jit.branchTest32(MacroAssembler::Zero, SpecializedThunkJIT::regT0, MacroAssembler::Imm32(1));
+ jit.mulDouble(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT1);
+ exponentIsEven.link(&jit);
+ jit.mulDouble(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT0);
+ jit.rshift32(MacroAssembler::Imm32(1), SpecializedThunkJIT::regT0);
+ jit.branchTest32(MacroAssembler::NonZero, SpecializedThunkJIT::regT0).linkTo(startLoop, &jit);
+
+ exponentIsZero.link(&jit);
+ jit.returnDouble(SpecializedThunkJIT::fpRegT1);
+
+ if (jit.supportsFloatingPointSqrt()) {
+ nonIntExponent.link(&jit);
+ jit.loadDouble(&negativeHalfConstant, SpecializedThunkJIT::fpRegT3);
+ jit.loadDoubleArgument(1, SpecializedThunkJIT::fpRegT2, SpecializedThunkJIT::regT0);
+ jit.appendFailure(jit.branchDouble(MacroAssembler::DoubleLessThanOrEqual, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT1));
+ jit.appendFailure(jit.branchDouble(MacroAssembler::DoubleNotEqualOrUnordered, SpecializedThunkJIT::fpRegT2, SpecializedThunkJIT::fpRegT3));
+ jit.sqrtDouble(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT0);
+ jit.divDouble(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT1);
+ jit.returnDouble(SpecializedThunkJIT::fpRegT1);
+ } else
+ jit.appendFailure(nonIntExponent);
+
+ return jit.finalize();
+#else
+ UNUSED_PARAM(pool);
+ return globalData->jitStubs.ctiNativeCallThunk();
+#endif
+}
+
+}
+
+#endif // ENABLE(JIT)
diff --git a/JavaScriptCore/jit/ThunkGenerators.h b/JavaScriptCore/jit/ThunkGenerators.h
new file mode 100644
index 0000000..c3374f2
--- /dev/null
+++ b/JavaScriptCore/jit/ThunkGenerators.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright (C) 2010 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef ThunkGenerators_h
+#define ThunkGenerators_h
+
+#if ENABLE(JIT)
+#include <wtf/PassRefPtr.h>
+
+namespace JSC {
+ class ExecutablePool;
+ class JSGlobalData;
+ class NativeExecutable;
+
+ typedef PassRefPtr<NativeExecutable> (*ThunkGenerator)(JSGlobalData*, ExecutablePool*);
+ PassRefPtr<NativeExecutable> charCodeAtThunkGenerator(JSGlobalData*, ExecutablePool*);
+ PassRefPtr<NativeExecutable> charAtThunkGenerator(JSGlobalData*, ExecutablePool*);
+ PassRefPtr<NativeExecutable> fromCharCodeThunkGenerator(JSGlobalData*, ExecutablePool*);
+ PassRefPtr<NativeExecutable> sqrtThunkGenerator(JSGlobalData*, ExecutablePool*);
+ PassRefPtr<NativeExecutable> powThunkGenerator(JSGlobalData*, ExecutablePool*);
+}
+#endif
+
+#endif // ThunkGenerator_h