diff options
| author | Steve Block <steveblock@google.com> | 2010-02-02 14:57:50 +0000 |
|---|---|---|
| committer | Steve Block <steveblock@google.com> | 2010-02-04 15:06:55 +0000 |
| commit | d0825bca7fe65beaee391d30da42e937db621564 (patch) | |
| tree | 7461c49eb5844ffd1f35d1ba2c8b7584c1620823 /JavaScriptCore/jit | |
| parent | 3db770bd97c5a59b6c7574ca80a39e5a51c1defd (diff) | |
| download | external_webkit-d0825bca7fe65beaee391d30da42e937db621564.zip external_webkit-d0825bca7fe65beaee391d30da42e937db621564.tar.gz external_webkit-d0825bca7fe65beaee391d30da42e937db621564.tar.bz2 | |
Merge webkit.org at r54127 : Initial merge by git
Change-Id: Ib661abb595522f50ea406f72d3a0ce17f7193c82
Diffstat (limited to 'JavaScriptCore/jit')
| -rw-r--r-- | JavaScriptCore/jit/ExecutableAllocator.h | 28 | ||||
| -rw-r--r-- | JavaScriptCore/jit/ExecutableAllocatorFixedVMPool.cpp | 2 | ||||
| -rw-r--r-- | JavaScriptCore/jit/ExecutableAllocatorPosix.cpp | 6 | ||||
| -rw-r--r-- | JavaScriptCore/jit/ExecutableAllocatorSymbian.cpp | 4 | ||||
| -rw-r--r-- | JavaScriptCore/jit/ExecutableAllocatorWin.cpp | 2 | ||||
| -rw-r--r-- | JavaScriptCore/jit/JIT.cpp | 2 | ||||
| -rw-r--r-- | JavaScriptCore/jit/JIT.h | 60 | ||||
| -rw-r--r-- | JavaScriptCore/jit/JITArithmetic.cpp | 12 | ||||
| -rw-r--r-- | JavaScriptCore/jit/JITInlineMethods.h | 18 | ||||
| -rw-r--r-- | JavaScriptCore/jit/JITOpcodes.cpp | 273 | ||||
| -rw-r--r-- | JavaScriptCore/jit/JITPropertyAccess.cpp | 44 | ||||
| -rw-r--r-- | JavaScriptCore/jit/JITStubs.cpp | 188 | ||||
| -rw-r--r-- | JavaScriptCore/jit/JITStubs.h | 14 |
13 files changed, 268 insertions, 385 deletions
diff --git a/JavaScriptCore/jit/ExecutableAllocator.h b/JavaScriptCore/jit/ExecutableAllocator.h index 9ca62c8..1fb8ff7 100644 --- a/JavaScriptCore/jit/ExecutableAllocator.h +++ b/JavaScriptCore/jit/ExecutableAllocator.h @@ -26,6 +26,7 @@ #ifndef ExecutableAllocator_h #define ExecutableAllocator_h +#include <stddef.h> // for ptrdiff_t #include <limits> #include <wtf/Assertions.h> #include <wtf/PassRefPtr.h> @@ -33,15 +34,21 @@ #include <wtf/UnusedParam.h> #include <wtf/Vector.h> -#if PLATFORM(IPHONE) +#if OS(IPHONE_OS) #include <libkern/OSCacheControl.h> #include <sys/mman.h> #endif -#if PLATFORM(SYMBIAN) +#if OS(SYMBIAN) #include <e32std.h> #endif +#if OS(WINCE) +// From pkfuncs.h (private header file from the Platform Builder) +#define CACHE_SYNC_ALL 0x07F +extern "C" __declspec(dllimport) void CacheRangeFlush(LPVOID pAddr, DWORD dwLength, DWORD dwFlags); +#endif + #define JIT_ALLOCATOR_PAGE_SIZE (ExecutableAllocator::pageSize) #define JIT_ALLOCATOR_LARGE_ALLOC_SIZE (ExecutableAllocator::pageSize * 4) @@ -78,7 +85,7 @@ private: struct Allocation { char* pages; size_t size; -#if PLATFORM(SYMBIAN) +#if OS(SYMBIAN) RChunk* chunk; #endif }; @@ -179,17 +186,17 @@ public: #endif -#if PLATFORM(X86) || PLATFORM(X86_64) +#if CPU(X86) || CPU(X86_64) static void cacheFlush(void*, size_t) { } -#elif PLATFORM(ARM_THUMB2) && PLATFORM(IPHONE) +#elif CPU(ARM_THUMB2) && OS(IPHONE_OS) static void cacheFlush(void* code, size_t size) { sys_dcache_flush(code, size); sys_icache_invalidate(code, size); } -#elif PLATFORM(ARM_THUMB2) && PLATFORM(LINUX) +#elif CPU(ARM_THUMB2) && OS(LINUX) static void cacheFlush(void* code, size_t size) { asm volatile ( @@ -205,12 +212,12 @@ public: : "r" (code), "r" (reinterpret_cast<char*>(code) + size) : "r0", "r1", "r2"); } -#elif PLATFORM(SYMBIAN) +#elif OS(SYMBIAN) static void cacheFlush(void* code, size_t size) { User::IMB_Range(code, static_cast<char*>(code) + size); } -#elif PLATFORM(ARM_TRADITIONAL) && PLATFORM(LINUX) +#elif CPU(ARM_TRADITIONAL) && OS(LINUX) static void cacheFlush(void* code, size_t size) { asm volatile ( @@ -226,6 +233,11 @@ public: : "r" (code), "r" (reinterpret_cast<char*>(code) + size) : "r0", "r1", "r2"); } +#elif OS(WINCE) + static void cacheFlush(void* code, size_t size) + { + CacheRangeFlush(code, size, CACHE_SYNC_ALL); + } #else #error "The cacheFlush support is missing on this platform." #endif diff --git a/JavaScriptCore/jit/ExecutableAllocatorFixedVMPool.cpp b/JavaScriptCore/jit/ExecutableAllocatorFixedVMPool.cpp index 7682b9c..dd1db4e 100644 --- a/JavaScriptCore/jit/ExecutableAllocatorFixedVMPool.cpp +++ b/JavaScriptCore/jit/ExecutableAllocatorFixedVMPool.cpp @@ -29,7 +29,7 @@ #include <errno.h> -#if ENABLE(ASSEMBLER) && PLATFORM(MAC) && PLATFORM(X86_64) +#if ENABLE(ASSEMBLER) && OS(DARWIN) && CPU(X86_64) #include "TCSpinLock.h" #include <mach/mach_init.h> diff --git a/JavaScriptCore/jit/ExecutableAllocatorPosix.cpp b/JavaScriptCore/jit/ExecutableAllocatorPosix.cpp index 13a8626..06375ad 100644 --- a/JavaScriptCore/jit/ExecutableAllocatorPosix.cpp +++ b/JavaScriptCore/jit/ExecutableAllocatorPosix.cpp @@ -27,7 +27,7 @@ #include "ExecutableAllocator.h" -#if ENABLE(ASSEMBLER) +#if ENABLE(ASSEMBLER) && OS(UNIX) && !OS(SYMBIAN) #include <sys/mman.h> #include <unistd.h> @@ -35,7 +35,7 @@ namespace JSC { -#if !(PLATFORM(MAC) && PLATFORM(X86_64)) +#if !(OS(DARWIN) && CPU(X86_64)) void ExecutableAllocator::intializePageSize() { @@ -57,7 +57,7 @@ void ExecutablePool::systemRelease(const ExecutablePool::Allocation& alloc) ASSERT_UNUSED(result, !result); } -#endif // !(PLATFORM(MAC) && PLATFORM(X86_64)) +#endif // !(OS(DARWIN) && CPU(X86_64)) #if ENABLE(ASSEMBLER_WX_EXCLUSIVE) void ExecutableAllocator::reprotectRegion(void* start, size_t size, ProtectionSeting setting) diff --git a/JavaScriptCore/jit/ExecutableAllocatorSymbian.cpp b/JavaScriptCore/jit/ExecutableAllocatorSymbian.cpp index c96ecae..e82975c 100644 --- a/JavaScriptCore/jit/ExecutableAllocatorSymbian.cpp +++ b/JavaScriptCore/jit/ExecutableAllocatorSymbian.cpp @@ -22,7 +22,7 @@ #include "ExecutableAllocator.h" -#if ENABLE(ASSEMBLER) && PLATFORM(SYMBIAN) +#if ENABLE(ASSEMBLER) && OS(SYMBIAN) #include <e32hal.h> #include <e32std.h> @@ -34,7 +34,7 @@ namespace JSC { void ExecutableAllocator::intializePageSize() { -#if PLATFORM_ARM_ARCH(5) +#if CPU(ARMV5_OR_LOWER) // The moving memory model (as used in ARMv5 and earlier platforms) // on Symbian OS limits the number of chunks for each process to 16. // To mitigate this limitation increase the pagesize to diff --git a/JavaScriptCore/jit/ExecutableAllocatorWin.cpp b/JavaScriptCore/jit/ExecutableAllocatorWin.cpp index e6ac855..e38323c 100644 --- a/JavaScriptCore/jit/ExecutableAllocatorWin.cpp +++ b/JavaScriptCore/jit/ExecutableAllocatorWin.cpp @@ -27,7 +27,7 @@ #include "ExecutableAllocator.h" -#if ENABLE(ASSEMBLER) +#if ENABLE(ASSEMBLER) && OS(WINDOWS) #include "windows.h" diff --git a/JavaScriptCore/jit/JIT.cpp b/JavaScriptCore/jit/JIT.cpp index 585486f..c0da66d 100644 --- a/JavaScriptCore/jit/JIT.cpp +++ b/JavaScriptCore/jit/JIT.cpp @@ -27,7 +27,7 @@ #include "JIT.h" // This probably does not belong here; adding here for now as a quick Windows build fix. -#if ENABLE(ASSEMBLER) && PLATFORM(X86) && !PLATFORM(MAC) +#if ENABLE(ASSEMBLER) && CPU(X86) && !OS(MAC_OS_X) #include "MacroAssembler.h" JSC::MacroAssemblerX86Common::SSE2CheckState JSC::MacroAssemblerX86Common::s_sse2CheckState = NotCheckedSSE2; #endif diff --git a/JavaScriptCore/jit/JIT.h b/JavaScriptCore/jit/JIT.h index 0b902b9..8e0c9ac 100644 --- a/JavaScriptCore/jit/JIT.h +++ b/JavaScriptCore/jit/JIT.h @@ -192,7 +192,7 @@ namespace JSC { // on x86/x86-64 it is ecx for performance reasons, since the // MacroAssembler will need to plant register swaps if it is not - // however the code will still function correctly. -#if PLATFORM(X86_64) +#if CPU(X86_64) static const RegisterID returnValueRegister = X86Registers::eax; static const RegisterID cachedResultRegister = X86Registers::eax; static const RegisterID firstArgumentRegister = X86Registers::edi; @@ -210,7 +210,7 @@ namespace JSC { static const FPRegisterID fpRegT0 = X86Registers::xmm0; static const FPRegisterID fpRegT1 = X86Registers::xmm1; static const FPRegisterID fpRegT2 = X86Registers::xmm2; -#elif PLATFORM(X86) +#elif CPU(X86) static const RegisterID returnValueRegister = X86Registers::eax; static const RegisterID cachedResultRegister = X86Registers::eax; // On x86 we always use fastcall conventions = but on @@ -228,7 +228,7 @@ namespace JSC { static const FPRegisterID fpRegT0 = X86Registers::xmm0; static const FPRegisterID fpRegT1 = X86Registers::xmm1; static const FPRegisterID fpRegT2 = X86Registers::xmm2; -#elif PLATFORM(ARM_THUMB2) +#elif CPU(ARM_THUMB2) static const RegisterID returnValueRegister = ARMRegisters::r0; static const RegisterID cachedResultRegister = ARMRegisters::r0; static const RegisterID firstArgumentRegister = ARMRegisters::r0; @@ -244,7 +244,7 @@ namespace JSC { static const FPRegisterID fpRegT0 = ARMRegisters::d0; static const FPRegisterID fpRegT1 = ARMRegisters::d1; static const FPRegisterID fpRegT2 = ARMRegisters::d2; -#elif PLATFORM(ARM_TRADITIONAL) +#elif CPU(ARM_TRADITIONAL) static const RegisterID returnValueRegister = ARMRegisters::r0; static const RegisterID cachedResultRegister = ARMRegisters::r0; static const RegisterID firstArgumentRegister = ARMRegisters::r0; @@ -436,7 +436,7 @@ namespace JSC { void emitSub32Constant(unsigned dst, unsigned op, int32_t constant, ResultType opType); void emitBinaryDoubleOp(OpcodeID, unsigned dst, unsigned op1, unsigned op2, OperandTypes, JumpList& notInt32Op1, JumpList& notInt32Op2, bool op1IsInRegisters = true, bool op2IsInRegisters = true); -#if PLATFORM(X86) +#if CPU(X86) // These architecture specific value are used to enable patching - see comment on op_put_by_id. static const int patchOffsetPutByIdStructure = 7; static const int patchOffsetPutByIdExternalLoad = 13; @@ -465,7 +465,7 @@ namespace JSC { static const int patchOffsetMethodCheckProtoObj = 11; static const int patchOffsetMethodCheckProtoStruct = 18; static const int patchOffsetMethodCheckPutFunction = 29; -#elif PLATFORM(ARM_TRADITIONAL) +#elif CPU(ARM_TRADITIONAL) // These architecture specific value are used to enable patching - see comment on op_put_by_id. static const int patchOffsetPutByIdStructure = 4; static const int patchOffsetPutByIdExternalLoad = 16; @@ -574,7 +574,7 @@ namespace JSC { void compileGetDirectOffset(RegisterID base, RegisterID result, RegisterID structure, RegisterID offset, RegisterID scratch); void compilePutDirectOffset(RegisterID base, RegisterID value, Structure* structure, size_t cachedOffset); -#if PLATFORM(X86_64) +#if CPU(X86_64) // These architecture specific value are used to enable patching - see comment on op_put_by_id. static const int patchOffsetPutByIdStructure = 10; static const int patchOffsetPutByIdExternalLoad = 20; @@ -597,7 +597,7 @@ namespace JSC { static const int patchOffsetMethodCheckProtoObj = 20; static const int patchOffsetMethodCheckProtoStruct = 30; static const int patchOffsetMethodCheckPutFunction = 50; -#elif PLATFORM(X86) +#elif CPU(X86) // These architecture specific value are used to enable patching - see comment on op_put_by_id. static const int patchOffsetPutByIdStructure = 7; static const int patchOffsetPutByIdExternalLoad = 13; @@ -624,7 +624,7 @@ namespace JSC { static const int patchOffsetMethodCheckProtoObj = 11; static const int patchOffsetMethodCheckProtoStruct = 18; static const int patchOffsetMethodCheckPutFunction = 29; -#elif PLATFORM(ARM_THUMB2) +#elif CPU(ARM_THUMB2) // These architecture specific value are used to enable patching - see comment on op_put_by_id. static const int patchOffsetPutByIdStructure = 10; static const int patchOffsetPutByIdExternalLoad = 26; @@ -647,7 +647,7 @@ namespace JSC { static const int patchOffsetMethodCheckProtoObj = 24; static const int patchOffsetMethodCheckProtoStruct = 34; static const int patchOffsetMethodCheckPutFunction = 58; -#elif PLATFORM(ARM_TRADITIONAL) +#elif CPU(ARM_TRADITIONAL) // These architecture specific value are used to enable patching - see comment on op_put_by_id. static const int patchOffsetPutByIdStructure = 4; static const int patchOffsetPutByIdExternalLoad = 16; @@ -954,6 +954,46 @@ namespace JSC { #endif #endif } JIT_CLASS_ALIGNMENT; + + inline void JIT::emit_op_loop(Instruction* currentInstruction) + { + emitTimeoutCheck(); + emit_op_jmp(currentInstruction); + } + + inline void JIT::emit_op_loop_if_true(Instruction* currentInstruction) + { + emitTimeoutCheck(); + emit_op_jtrue(currentInstruction); + } + + inline void JIT::emitSlow_op_loop_if_true(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) + { + emitSlow_op_jtrue(currentInstruction, iter); + } + + inline void JIT::emit_op_loop_if_false(Instruction* currentInstruction) + { + emitTimeoutCheck(); + emit_op_jfalse(currentInstruction); + } + + inline void JIT::emitSlow_op_loop_if_false(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) + { + emitSlow_op_jfalse(currentInstruction, iter); + } + + inline void JIT::emit_op_loop_if_less(Instruction* currentInstruction) + { + emitTimeoutCheck(); + emit_op_jless(currentInstruction); + } + + inline void JIT::emitSlow_op_loop_if_less(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) + { + emitSlow_op_jless(currentInstruction, iter); + } + } // namespace JSC #endif // ENABLE(JIT) diff --git a/JavaScriptCore/jit/JITArithmetic.cpp b/JavaScriptCore/jit/JITArithmetic.cpp index 70f7564..feee8d2 100644 --- a/JavaScriptCore/jit/JITArithmetic.cpp +++ b/JavaScriptCore/jit/JITArithmetic.cpp @@ -1116,7 +1116,7 @@ void JIT::emitSlow_op_div(Instruction* currentInstruction, Vector<SlowCaseEntry> /* ------------------------------ BEGIN: OP_MOD ------------------------------ */ -#if PLATFORM(X86) || PLATFORM(X86_64) +#if CPU(X86) || CPU(X86_64) void JIT::emit_op_mod(Instruction* currentInstruction) { @@ -1178,7 +1178,7 @@ void JIT::emitSlow_op_mod(Instruction* currentInstruction, Vector<SlowCaseEntry> stubCall.call(dst); } -#else // PLATFORM(X86) || PLATFORM(X86_64) +#else // CPU(X86) || CPU(X86_64) void JIT::emit_op_mod(Instruction* currentInstruction) { @@ -1196,7 +1196,7 @@ void JIT::emitSlow_op_mod(Instruction*, Vector<SlowCaseEntry>::iterator&) { } -#endif // PLATFORM(X86) || PLATFORM(X86_64) +#endif // CPU(X86) || CPU(X86_64) /* ------------------------------ END: OP_MOD ------------------------------ */ @@ -2081,7 +2081,7 @@ void JIT::emitSlow_op_pre_dec(Instruction* currentInstruction, Vector<SlowCaseEn /* ------------------------------ BEGIN: OP_MOD ------------------------------ */ -#if PLATFORM(X86) || PLATFORM(X86_64) +#if CPU(X86) || CPU(X86_64) void JIT::emit_op_mod(Instruction* currentInstruction) { @@ -2130,7 +2130,7 @@ void JIT::emitSlow_op_mod(Instruction* currentInstruction, Vector<SlowCaseEntry> stubCall.call(result); } -#else // PLATFORM(X86) || PLATFORM(X86_64) +#else // CPU(X86) || CPU(X86_64) void JIT::emit_op_mod(Instruction* currentInstruction) { @@ -2149,7 +2149,7 @@ void JIT::emitSlow_op_mod(Instruction*, Vector<SlowCaseEntry>::iterator&) ASSERT_NOT_REACHED(); } -#endif // PLATFORM(X86) || PLATFORM(X86_64) +#endif // CPU(X86) || CPU(X86_64) /* ------------------------------ END: OP_MOD ------------------------------ */ diff --git a/JavaScriptCore/jit/JITInlineMethods.h b/JavaScriptCore/jit/JITInlineMethods.h index 1ce6889..5af7565 100644 --- a/JavaScriptCore/jit/JITInlineMethods.h +++ b/JavaScriptCore/jit/JITInlineMethods.h @@ -115,7 +115,7 @@ ALWAYS_INLINE JIT::Call JIT::emitNakedCall(CodePtr function) ALWAYS_INLINE void JIT::beginUninterruptedSequence(int insnSpace, int constSpace) { -#if PLATFORM(ARM_TRADITIONAL) +#if CPU(ARM_TRADITIONAL) #ifndef NDEBUG // Ensure the label after the sequence can also fit insnSpace += sizeof(ARMWord); @@ -144,7 +144,7 @@ ALWAYS_INLINE void JIT::endUninterruptedSequence(int insnSpace, int constSpace) #endif -#if PLATFORM(ARM) +#if CPU(ARM) ALWAYS_INLINE void JIT::preserveReturnAddressAfterCall(RegisterID reg) { @@ -161,7 +161,7 @@ ALWAYS_INLINE void JIT::restoreReturnAddressBeforeReturn(Address address) loadPtr(address, linkRegister); } -#else // PLATFORM(X86) || PLATFORM(X86_64) +#else // CPU(X86) || CPU(X86_64) ALWAYS_INLINE void JIT::preserveReturnAddressAfterCall(RegisterID reg) { @@ -194,10 +194,10 @@ ALWAYS_INLINE void JIT::restoreArgumentReference() } ALWAYS_INLINE void JIT::restoreArgumentReferenceForTrampoline() { -#if PLATFORM(X86) +#if CPU(X86) // Within a trampoline the return address will be on the stack at this point. addPtr(Imm32(sizeof(void*)), stackPointerRegister, firstArgumentRegister); -#elif PLATFORM(ARM) +#elif CPU(ARM) move(stackPointerRegister, firstArgumentRegister); #endif // In the trampoline on x86-64, the first argument register is not overwritten. @@ -265,9 +265,9 @@ ALWAYS_INLINE void JIT::clearSamplingFlag(int32_t flag) #if ENABLE(SAMPLING_COUNTERS) ALWAYS_INLINE void JIT::emitCount(AbstractSamplingCounter& counter, uint32_t count) { -#if PLATFORM(X86_64) // Or any other 64-bit plattform. +#if CPU(X86_64) // Or any other 64-bit plattform. addPtr(Imm32(count), AbsoluteAddress(&counter.m_counter)); -#elif PLATFORM(X86) // Or any other little-endian 32-bit plattform. +#elif CPU(X86) // Or any other little-endian 32-bit plattform. intptr_t hiWord = reinterpret_cast<intptr_t>(&counter.m_counter) + sizeof(int32_t); add32(Imm32(count), AbsoluteAddress(&counter.m_counter)); addWithCarry32(Imm32(0), AbsoluteAddress(reinterpret_cast<void*>(hiWord))); @@ -278,7 +278,7 @@ ALWAYS_INLINE void JIT::emitCount(AbstractSamplingCounter& counter, uint32_t cou #endif #if ENABLE(OPCODE_SAMPLING) -#if PLATFORM(X86_64) +#if CPU(X86_64) ALWAYS_INLINE void JIT::sampleInstruction(Instruction* instruction, bool inHostFunction) { move(ImmPtr(m_interpreter->sampler()->sampleSlot()), X86Registers::ecx); @@ -293,7 +293,7 @@ ALWAYS_INLINE void JIT::sampleInstruction(Instruction* instruction, bool inHostF #endif #if ENABLE(CODEBLOCK_SAMPLING) -#if PLATFORM(X86_64) +#if CPU(X86_64) ALWAYS_INLINE void JIT::sampleCodeBlock(CodeBlock* codeBlock) { move(ImmPtr(m_interpreter->sampler()->codeBlockSlot()), X86Registers::ecx); diff --git a/JavaScriptCore/jit/JITOpcodes.cpp b/JavaScriptCore/jit/JITOpcodes.cpp index 77fec28..d9a32d9 100644 --- a/JavaScriptCore/jit/JITOpcodes.cpp +++ b/JavaScriptCore/jit/JITOpcodes.cpp @@ -137,7 +137,7 @@ void JIT::privateCompileCTIMachineTrampolines(RefPtr<ExecutablePool>* executable loadPtr(Address(regT2, OBJECT_OFFSETOF(FunctionExecutable, m_jitCode)), regT0); jump(regT0); -#if PLATFORM(X86) || PLATFORM(ARM_TRADITIONAL) +#if CPU(X86) || CPU(ARM_TRADITIONAL) Label nativeCallThunk = align(); preserveReturnAddressAfterCall(regT0); emitPutToCallFrameHeader(regT0, RegisterFile::ReturnPC); // Push return address @@ -148,7 +148,7 @@ void JIT::privateCompileCTIMachineTrampolines(RefPtr<ExecutablePool>* executable emitGetFromCallFrameHeaderPtr(RegisterFile::ScopeChain, regT1, regT1); emitPutToCallFrameHeader(regT1, RegisterFile::ScopeChain); -#if PLATFORM(X86) +#if CPU(X86) emitGetFromCallFrameHeader32(RegisterFile::ArgumentCount, regT0); /* We have two structs that we use to describe the stackframe we set up for our @@ -160,7 +160,7 @@ void JIT::privateCompileCTIMachineTrampolines(RefPtr<ExecutablePool>* executable * stack pointer by the right amount after the call. */ -#if COMPILER(MSVC) || PLATFORM(LINUX) +#if COMPILER(MSVC) || OS(LINUX) #if COMPILER(MSVC) #pragma pack(push) #pragma pack(4) @@ -223,7 +223,7 @@ void JIT::privateCompileCTIMachineTrampolines(RefPtr<ExecutablePool>* executable storePtr(regT2, Address(stackPointerRegister, OBJECT_OFFSETOF(NativeCallFrameStructure, thisValue) + OBJECT_OFFSETOF(JSValue, u.asBits.payload))); storePtr(regT3, Address(stackPointerRegister, OBJECT_OFFSETOF(NativeCallFrameStructure, thisValue) + OBJECT_OFFSETOF(JSValue, u.asBits.tag))); -#if COMPILER(MSVC) || PLATFORM(LINUX) +#if COMPILER(MSVC) || OS(LINUX) // ArgList is passed by reference so is stackPointerRegister + 4 * sizeof(Register) addPtr(Imm32(OBJECT_OFFSETOF(NativeCallFrameStructure, result)), stackPointerRegister, X86Registers::ecx); @@ -248,7 +248,7 @@ void JIT::privateCompileCTIMachineTrampolines(RefPtr<ExecutablePool>* executable // so pull them off now addPtr(Imm32(NativeCallFrameSize - sizeof(NativeFunctionCalleeSignature)), stackPointerRegister); -#elif PLATFORM(ARM_TRADITIONAL) +#elif CPU(ARM_TRADITIONAL) emitGetFromCallFrameHeader32(RegisterFile::ArgumentCount, regT0); // Allocate stack space for our arglist @@ -409,58 +409,6 @@ void JIT::emit_op_jmp(Instruction* currentInstruction) addJump(jump(), target); } -void JIT::emit_op_loop(Instruction* currentInstruction) -{ - unsigned target = currentInstruction[1].u.operand; - emitTimeoutCheck(); - addJump(jump(), target); -} - -void JIT::emit_op_loop_if_less(Instruction* currentInstruction) -{ - unsigned op1 = currentInstruction[1].u.operand; - unsigned op2 = currentInstruction[2].u.operand; - unsigned target = currentInstruction[3].u.operand; - - emitTimeoutCheck(); - - if (isOperandConstantImmediateInt(op1)) { - emitLoad(op2, regT1, regT0); - addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag))); - addJump(branch32(GreaterThan, regT0, Imm32(getConstantOperand(op1).asInt32())), target); - return; - } - - if (isOperandConstantImmediateInt(op2)) { - emitLoad(op1, regT1, regT0); - addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag))); - addJump(branch32(LessThan, regT0, Imm32(getConstantOperand(op2).asInt32())), target); - return; - } - - emitLoad2(op1, regT1, regT0, op2, regT3, regT2); - addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag))); - addSlowCase(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag))); - addJump(branch32(LessThan, regT0, regT2), target); -} - -void JIT::emitSlow_op_loop_if_less(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) -{ - unsigned op1 = currentInstruction[1].u.operand; - unsigned op2 = currentInstruction[2].u.operand; - unsigned target = currentInstruction[3].u.operand; - - if (!isOperandConstantImmediateInt(op1) && !isOperandConstantImmediateInt(op2)) - linkSlowCase(iter); // int32 check - linkSlowCase(iter); // int32 check - - JITStubCall stubCall(this, cti_op_jless); - stubCall.addArgument(op1); - stubCall.addArgument(op2); - stubCall.call(); - emitJumpSlowToHot(branchTest32(NonZero, regT0), target); -} - void JIT::emit_op_loop_if_lesseq(Instruction* currentInstruction) { unsigned op1 = currentInstruction[1].u.operand; @@ -709,84 +657,6 @@ void JIT::emit_op_strcat(Instruction* currentInstruction) stubCall.call(currentInstruction[1].u.operand); } -void JIT::emit_op_loop_if_true(Instruction* currentInstruction) -{ - unsigned cond = currentInstruction[1].u.operand; - unsigned target = currentInstruction[2].u.operand; - - emitTimeoutCheck(); - - emitLoad(cond, regT1, regT0); - - Jump isNotInteger = branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)); - addJump(branch32(NotEqual, regT0, Imm32(0)), target); - Jump isNotZero = jump(); - - isNotInteger.link(this); - - addJump(branch32(Equal, regT1, Imm32(JSValue::TrueTag)), target); - addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::FalseTag))); - - isNotZero.link(this); -} - -void JIT::emitSlow_op_loop_if_true(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) -{ - unsigned cond = currentInstruction[1].u.operand; - unsigned target = currentInstruction[2].u.operand; - - linkSlowCase(iter); - - JITStubCall stubCall(this, cti_op_jtrue); - stubCall.addArgument(cond); - stubCall.call(); - emitJumpSlowToHot(branchTest32(NonZero, regT0), target); -} - -void JIT::emit_op_loop_if_false(Instruction* currentInstruction) -{ - unsigned cond = currentInstruction[1].u.operand; - unsigned target = currentInstruction[2].u.operand; - - emitTimeoutCheck(); - - emitLoad(cond, regT1, regT0); - - Jump isTrue = branch32(Equal, regT1, Imm32(JSValue::TrueTag)); - addJump(branch32(Equal, regT1, Imm32(JSValue::FalseTag)), target); - - Jump isNotInteger = branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)); - Jump isTrue2 = branch32(NotEqual, regT0, Imm32(0)); - addJump(jump(), target); - - if (supportsFloatingPoint()) { - isNotInteger.link(this); - - addSlowCase(branch32(Above, regT1, Imm32(JSValue::LowestTag))); - - zeroDouble(fpRegT0); - emitLoadDouble(cond, fpRegT1); - addJump(branchDouble(DoubleEqualOrUnordered, fpRegT0, fpRegT1), target); - } else - addSlowCase(isNotInteger); - - isTrue.link(this); - isTrue2.link(this); -} - -void JIT::emitSlow_op_loop_if_false(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) -{ - unsigned cond = currentInstruction[1].u.operand; - unsigned target = currentInstruction[2].u.operand; - - linkSlowCase(iter); - - JITStubCall stubCall(this, cti_op_jtrue); - stubCall.addArgument(cond); - stubCall.call(); - emitJumpSlowToHot(branchTest32(Zero, regT0), target); -} - void JIT::emit_op_resolve_base(Instruction* currentInstruction) { JITStubCall stubCall(this, cti_op_resolve_base); @@ -1731,7 +1601,7 @@ void JIT::privateCompileCTIMachineTrampolines(RefPtr<ExecutablePool>* executable emitPutToCallFrameHeader(regT1, RegisterFile::ScopeChain); -#if PLATFORM(X86_64) +#if CPU(X86_64) emitGetFromCallFrameHeader32(RegisterFile::ArgumentCount, X86Registers::ecx); // Allocate stack space for our arglist @@ -1767,7 +1637,7 @@ void JIT::privateCompileCTIMachineTrampolines(RefPtr<ExecutablePool>* executable call(Address(X86Registers::esi, OBJECT_OFFSETOF(JSFunction, m_data))); addPtr(Imm32(sizeof(ArgList)), stackPointerRegister); -#elif PLATFORM(X86) +#elif CPU(X86) emitGetFromCallFrameHeader32(RegisterFile::ArgumentCount, regT0); /* We have two structs that we use to describe the stackframe we set up for our @@ -1778,7 +1648,7 @@ void JIT::privateCompileCTIMachineTrampolines(RefPtr<ExecutablePool>* executable * not the rest of the callframe so we need a nice way to ensure we increment the * stack pointer by the right amount after the call. */ -#if COMPILER(MSVC) || PLATFORM(LINUX) +#if COMPILER(MSVC) || OS(LINUX) struct NativeCallFrameStructure { // CallFrame* callFrame; // passed in EDX JSObject* callee; @@ -1831,7 +1701,7 @@ void JIT::privateCompileCTIMachineTrampolines(RefPtr<ExecutablePool>* executable loadPtr(Address(regT1, -(int)sizeof(Register)), regT1); storePtr(regT1, Address(stackPointerRegister, OBJECT_OFFSETOF(NativeCallFrameStructure, thisValue))); -#if COMPILER(MSVC) || PLATFORM(LINUX) +#if COMPILER(MSVC) || OS(LINUX) // ArgList is passed by reference so is stackPointerRegister + 4 * sizeof(Register) addPtr(Imm32(OBJECT_OFFSETOF(NativeCallFrameStructure, result)), stackPointerRegister, X86Registers::ecx); @@ -1859,7 +1729,7 @@ void JIT::privateCompileCTIMachineTrampolines(RefPtr<ExecutablePool>* executable // so pull them off now addPtr(Imm32(NativeCallFrameSize - sizeof(NativeFunctionCalleeSignature)), stackPointerRegister); -#elif PLATFORM(ARM) +#elif CPU(ARM) emitGetFromCallFrameHeader32(RegisterFile::ArgumentCount, regT0); // Allocate stack space for our arglist @@ -2005,47 +1875,6 @@ void JIT::emit_op_jmp(Instruction* currentInstruction) RECORD_JUMP_TARGET(target); } -void JIT::emit_op_loop(Instruction* currentInstruction) -{ - emitTimeoutCheck(); - - unsigned target = currentInstruction[1].u.operand; - addJump(jump(), target); -} - -void JIT::emit_op_loop_if_less(Instruction* currentInstruction) -{ - emitTimeoutCheck(); - - unsigned op1 = currentInstruction[1].u.operand; - unsigned op2 = currentInstruction[2].u.operand; - unsigned target = currentInstruction[3].u.operand; - if (isOperandConstantImmediateInt(op2)) { - emitGetVirtualRegister(op1, regT0); - emitJumpSlowCaseIfNotImmediateInteger(regT0); -#if USE(JSVALUE64) - int32_t op2imm = getConstantOperandImmediateInt(op2); -#else - int32_t op2imm = static_cast<int32_t>(JSImmediate::rawValue(getConstantOperand(op2))); -#endif - addJump(branch32(LessThan, regT0, Imm32(op2imm)), target); - } else if (isOperandConstantImmediateInt(op1)) { - emitGetVirtualRegister(op2, regT0); - emitJumpSlowCaseIfNotImmediateInteger(regT0); -#if USE(JSVALUE64) - int32_t op1imm = getConstantOperandImmediateInt(op1); -#else - int32_t op1imm = static_cast<int32_t>(JSImmediate::rawValue(getConstantOperand(op1))); -#endif - addJump(branch32(GreaterThan, regT0, Imm32(op1imm)), target); - } else { - emitGetVirtualRegisters(op1, regT0, op2, regT1); - emitJumpSlowCaseIfNotImmediateInteger(regT0); - emitJumpSlowCaseIfNotImmediateInteger(regT1); - addJump(branch32(LessThan, regT0, regT1), target); - } -} - void JIT::emit_op_loop_if_lesseq(Instruction* currentInstruction) { emitTimeoutCheck(); @@ -2284,40 +2113,6 @@ void JIT::emit_op_strcat(Instruction* currentInstruction) stubCall.call(currentInstruction[1].u.operand); } -void JIT::emit_op_loop_if_true(Instruction* currentInstruction) -{ - emitTimeoutCheck(); - - unsigned target = currentInstruction[2].u.operand; - emitGetVirtualRegister(currentInstruction[1].u.operand, regT0); - - Jump isZero = branchPtr(Equal, regT0, ImmPtr(JSValue::encode(jsNumber(m_globalData, 0)))); - addJump(emitJumpIfImmediateInteger(regT0), target); - - addJump(branchPtr(Equal, regT0, ImmPtr(JSValue::encode(jsBoolean(true)))), target); - addSlowCase(branchPtr(NotEqual, regT0, ImmPtr(JSValue::encode(jsBoolean(false))))); - - isZero.link(this); -}; - -void JIT::emit_op_loop_if_false(Instruction* currentInstruction) -{ - emitTimeoutCheck(); - - - unsigned target = currentInstruction[2].u.operand; - emitGetVirtualRegister(currentInstruction[1].u.operand, regT0); - - addJump(branchPtr(Equal, regT0, ImmPtr(JSValue::encode(jsNumber(m_globalData, 0)))), target); - Jump isNonZero = emitJumpIfImmediateInteger(regT0); - - addJump(branchPtr(Equal, regT0, ImmPtr(JSValue::encode(jsBoolean(false)))), target); - addSlowCase(branchPtr(NotEqual, regT0, ImmPtr(JSValue::encode(jsBoolean(true))))); - - isNonZero.link(this); - RECORD_JUMP_TARGET(target); -}; - void JIT::emit_op_resolve_base(Instruction* currentInstruction) { JITStubCall stubCall(this, cti_op_resolve_base); @@ -2997,36 +2792,6 @@ void JIT::emitSlow_op_get_by_val(Instruction* currentInstruction, Vector<SlowCas stubCall.call(dst); } -void JIT::emitSlow_op_loop_if_less(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) -{ - unsigned op1 = currentInstruction[1].u.operand; - unsigned op2 = currentInstruction[2].u.operand; - unsigned target = currentInstruction[3].u.operand; - if (isOperandConstantImmediateInt(op2)) { - linkSlowCase(iter); - JITStubCall stubCall(this, cti_op_jless); - stubCall.addArgument(regT0); - stubCall.addArgument(op2, regT2); - stubCall.call(); - emitJumpSlowToHot(branchTest32(NonZero, regT0), target); - } else if (isOperandConstantImmediateInt(op1)) { - linkSlowCase(iter); - JITStubCall stubCall(this, cti_op_jless); - stubCall.addArgument(op1, regT2); - stubCall.addArgument(regT0); - stubCall.call(); - emitJumpSlowToHot(branchTest32(NonZero, regT0), target); - } else { - linkSlowCase(iter); - linkSlowCase(iter); - JITStubCall stubCall(this, cti_op_jless); - stubCall.addArgument(regT0); - stubCall.addArgument(regT1); - stubCall.call(); - emitJumpSlowToHot(branchTest32(NonZero, regT0), target); - } -} - void JIT::emitSlow_op_loop_if_lesseq(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) { unsigned op2 = currentInstruction[2].u.operand; @@ -3067,24 +2832,6 @@ void JIT::emitSlow_op_put_by_val(Instruction* currentInstruction, Vector<SlowCas stubPutByValCall.call(); } -void JIT::emitSlow_op_loop_if_true(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) -{ - linkSlowCase(iter); - JITStubCall stubCall(this, cti_op_jtrue); - stubCall.addArgument(regT0); - stubCall.call(); - emitJumpSlowToHot(branchTest32(NonZero, regT0), currentInstruction[2].u.operand); -} - -void JIT::emitSlow_op_loop_if_false(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) -{ - linkSlowCase(iter); - JITStubCall stubCall(this, cti_op_jtrue); - stubCall.addArgument(regT0); - stubCall.call(); - emitJumpSlowToHot(branchTest32(Zero, regT0), currentInstruction[2].u.operand); // inverted! -} - void JIT::emitSlow_op_not(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) { linkSlowCase(iter); diff --git a/JavaScriptCore/jit/JITPropertyAccess.cpp b/JavaScriptCore/jit/JITPropertyAccess.cpp index e2995a1..ef95f99 100644 --- a/JavaScriptCore/jit/JITPropertyAccess.cpp +++ b/JavaScriptCore/jit/JITPropertyAccess.cpp @@ -730,7 +730,7 @@ void JIT::privateCompileGetByIdProto(StructureStubInfo* stubInfo, Structure* str // Check the prototype object's Structure had not changed. Structure** prototypeStructureAddress = &(protoObject->m_structure); -#if PLATFORM(X86_64) +#if CPU(X86_64) move(ImmPtr(prototypeStructure), regT3); Jump failureCases2 = branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), regT3); #else @@ -810,7 +810,7 @@ void JIT::privateCompileGetByIdProtoList(StructureStubInfo* stubInfo, Polymorphi // Check the prototype object's Structure had not changed. Structure** prototypeStructureAddress = &(protoObject->m_structure); -#if PLATFORM(X86_64) +#if CPU(X86_64) move(ImmPtr(prototypeStructure), regT3); Jump failureCases2 = branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), regT3); #else @@ -863,7 +863,7 @@ void JIT::privateCompileGetByIdChainList(StructureStubInfo* stubInfo, Polymorphi // Check the prototype object's Structure had not changed. Structure** prototypeStructureAddress = &(protoObject->m_structure); -#if PLATFORM(X86_64) +#if CPU(X86_64) move(ImmPtr(currStructure), regT3); bucketsOfFail.append(branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), regT3)); #else @@ -918,7 +918,7 @@ void JIT::privateCompileGetByIdChain(StructureStubInfo* stubInfo, Structure* str // Check the prototype object's Structure had not changed. Structure** prototypeStructureAddress = &(protoObject->m_structure); -#if PLATFORM(X86_64) +#if CPU(X86_64) move(ImmPtr(currStructure), regT3); bucketsOfFail.append(branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), regT3)); #else @@ -1051,6 +1051,20 @@ void JIT::emit_op_get_by_val(Instruction* currentInstruction) emitPutVirtualRegister(dst); } +void JIT::compileGetDirectOffset(RegisterID base, RegisterID result, RegisterID structure, RegisterID offset, RegisterID scratch) +{ + ASSERT(sizeof(((Structure*)0)->m_propertyStorageCapacity) == sizeof(int32_t)); + ASSERT(sizeof(JSObject::inlineStorageCapacity) == sizeof(int32_t)); + + Jump notUsingInlineStorage = branch32(NotEqual, Address(structure, OBJECT_OFFSETOF(Structure, m_propertyStorageCapacity)), Imm32(JSObject::inlineStorageCapacity)); + loadPtr(BaseIndex(base, offset, ScalePtr, OBJECT_OFFSETOF(JSObject, m_inlineStorage)), result); + Jump finishedLoad = jump(); + notUsingInlineStorage.link(this); + loadPtr(Address(base, OBJECT_OFFSETOF(JSObject, m_externalStorage)), scratch); + loadPtr(BaseIndex(scratch, offset, ScalePtr, 0), result); + finishedLoad.link(this); +} + void JIT::emit_op_get_by_pname(Instruction* currentInstruction) { unsigned dst = currentInstruction[1].u.operand; @@ -1477,20 +1491,6 @@ void JIT::compileGetDirectOffset(JSObject* base, RegisterID temp, RegisterID res } } -void JIT::compileGetDirectOffset(RegisterID base, RegisterID result, RegisterID structure, RegisterID offset, RegisterID scratch) -{ - ASSERT(sizeof(((Structure*)0)->m_propertyStorageCapacity) == sizeof(int32_t)); - ASSERT(sizeof(JSObject::inlineStorageCapacity) == sizeof(int32_t)); - - Jump notUsingInlineStorage = branch32(NotEqual, Address(structure, OBJECT_OFFSETOF(Structure, m_propertyStorageCapacity)), Imm32(JSObject::inlineStorageCapacity)); - loadPtr(BaseIndex(base, offset, ScalePtr, OBJECT_OFFSETOF(JSObject, m_inlineStorage)), result); - Jump finishedLoad = jump(); - notUsingInlineStorage.link(this); - loadPtr(Address(base, OBJECT_OFFSETOF(JSObject, m_externalStorage)), scratch); - loadPtr(BaseIndex(scratch, offset, ScalePtr, 0), result); - finishedLoad.link(this); -} - void JIT::testPrototype(Structure* structure, JumpList& failureCases) { if (structure->m_prototype.isNull()) @@ -1676,7 +1676,7 @@ void JIT::privateCompileGetByIdProto(StructureStubInfo* stubInfo, Structure* str // Check the prototype object's Structure had not changed. Structure** prototypeStructureAddress = &(protoObject->m_structure); -#if PLATFORM(X86_64) +#if CPU(X86_64) move(ImmPtr(prototypeStructure), regT3); Jump failureCases2 = branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), regT3); #else @@ -1751,7 +1751,7 @@ void JIT::privateCompileGetByIdProtoList(StructureStubInfo* stubInfo, Polymorphi // Check the prototype object's Structure had not changed. Structure** prototypeStructureAddress = &(protoObject->m_structure); -#if PLATFORM(X86_64) +#if CPU(X86_64) move(ImmPtr(prototypeStructure), regT3); Jump failureCases2 = branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), regT3); #else @@ -1804,7 +1804,7 @@ void JIT::privateCompileGetByIdChainList(StructureStubInfo* stubInfo, Polymorphi // Check the prototype object's Structure had not changed. Structure** prototypeStructureAddress = &(protoObject->m_structure); -#if PLATFORM(X86_64) +#if CPU(X86_64) move(ImmPtr(currStructure), regT3); bucketsOfFail.append(branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), regT3)); #else @@ -1857,7 +1857,7 @@ void JIT::privateCompileGetByIdChain(StructureStubInfo* stubInfo, Structure* str // Check the prototype object's Structure had not changed. Structure** prototypeStructureAddress = &(protoObject->m_structure); -#if PLATFORM(X86_64) +#if CPU(X86_64) move(ImmPtr(currStructure), regT3); bucketsOfFail.append(branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), regT3)); #else diff --git a/JavaScriptCore/jit/JITStubs.cpp b/JavaScriptCore/jit/JITStubs.cpp index cace8b2..9191907 100644 --- a/JavaScriptCore/jit/JITStubs.cpp +++ b/JavaScriptCore/jit/JITStubs.cpp @@ -64,31 +64,37 @@ using namespace std; namespace JSC { -#if PLATFORM(DARWIN) || PLATFORM(WIN_OS) +#if OS(DARWIN) || OS(WINDOWS) #define SYMBOL_STRING(name) "_" #name #else #define SYMBOL_STRING(name) #name #endif -#if PLATFORM(IPHONE) +#if OS(IPHONE_OS) #define THUMB_FUNC_PARAM(name) SYMBOL_STRING(name) #else #define THUMB_FUNC_PARAM(name) #endif -#if PLATFORM(LINUX) && PLATFORM(X86_64) +#if OS(LINUX) && CPU(X86_64) #define SYMBOL_STRING_RELOCATION(name) #name "@plt" #else #define SYMBOL_STRING_RELOCATION(name) SYMBOL_STRING(name) #endif -#if PLATFORM(DARWIN) +#if OS(DARWIN) // Mach-O platform #define HIDE_SYMBOL(name) ".private_extern _" #name -#elif PLATFORM(AIX) +#elif OS(AIX) // IBM's own file format #define HIDE_SYMBOL(name) ".lglobl " #name -#elif PLATFORM(LINUX) || PLATFORM(FREEBSD) || PLATFORM(OPENBSD) || PLATFORM(SOLARIS) || (PLATFORM(HPUX) && PLATFORM(IA64)) || PLATFORM(SYMBIAN) || PLATFORM(NETBSD) +#elif OS(LINUX) \ + || OS(FREEBSD) \ + || OS(OPENBSD) \ + || OS(SOLARIS) \ + || (OS(HPUX) && CPU(IA64)) \ + || OS(SYMBIAN) \ + || OS(NETBSD) // ELF platform #define HIDE_SYMBOL(name) ".hidden " #name #else @@ -97,7 +103,7 @@ namespace JSC { #if USE(JSVALUE32_64) -#if COMPILER(GCC) && PLATFORM(X86) +#if COMPILER(GCC) && CPU(X86) // These ASSERTs remind you that, if you change the layout of JITStackFrame, you // need to change the assembly trampolines below to match. @@ -156,7 +162,7 @@ SYMBOL_STRING(ctiOpThrowNotCaught) ":" "\n" "ret" "\n" ); -#elif COMPILER(GCC) && PLATFORM(X86_64) +#elif COMPILER(GCC) && CPU(X86_64) #if USE(JIT_STUB_ARGUMENT_VA_LIST) #error "JIT_STUB_ARGUMENT_VA_LIST not supported on x86-64." @@ -226,7 +232,7 @@ SYMBOL_STRING(ctiOpThrowNotCaught) ":" "\n" "ret" "\n" ); -#elif COMPILER(GCC) && PLATFORM(ARM_THUMB2) +#elif COMPILER(GCC) && CPU(ARM_THUMB2) #if USE(JIT_STUB_ARGUMENT_VA_LIST) #error "JIT_STUB_ARGUMENT_VA_LIST not supported on ARMv7." @@ -292,7 +298,7 @@ SYMBOL_STRING(ctiOpThrowNotCaught) ":" "\n" "bx lr" "\n" ); -#elif COMPILER(GCC) && PLATFORM(ARM_TRADITIONAL) +#elif COMPILER(GCC) && CPU(ARM_TRADITIONAL) asm volatile ( ".globl " SYMBOL_STRING(ctiTrampoline) "\n" @@ -326,7 +332,7 @@ SYMBOL_STRING(ctiOpThrowNotCaught) ":" "\n" "mov pc, lr" "\n" ); -#elif COMPILER(MSVC) +#elif COMPILER(MSVC) && CPU(X86) #if USE(JIT_STUB_ARGUMENT_VA_LIST) #error "JIT_STUB_ARGUMENT_VA_LIST configuration not supported on MSVC." @@ -390,11 +396,13 @@ extern "C" { } } -#endif // COMPILER(GCC) && PLATFORM(X86) +#else + #error "JIT not supported on this platform." +#endif #else // USE(JSVALUE32_64) -#if COMPILER(GCC) && PLATFORM(X86) +#if COMPILER(GCC) && CPU(X86) // These ASSERTs remind you that, if you change the layout of JITStackFrame, you // need to change the assembly trampolines below to match. @@ -452,7 +460,7 @@ SYMBOL_STRING(ctiOpThrowNotCaught) ":" "\n" "ret" "\n" ); -#elif COMPILER(GCC) && PLATFORM(X86_64) +#elif COMPILER(GCC) && CPU(X86_64) #if USE(JIT_STUB_ARGUMENT_VA_LIST) #error "JIT_STUB_ARGUMENT_VA_LIST not supported on x86-64." @@ -529,7 +537,7 @@ SYMBOL_STRING(ctiOpThrowNotCaught) ":" "\n" "ret" "\n" ); -#elif COMPILER(GCC) && PLATFORM(ARM_THUMB2) +#elif COMPILER(GCC) && CPU(ARM_THUMB2) #if USE(JIT_STUB_ARGUMENT_VA_LIST) #error "JIT_STUB_ARGUMENT_VA_LIST not supported on ARMv7." @@ -596,7 +604,7 @@ SYMBOL_STRING(ctiOpThrowNotCaught) ":" "\n" "bx lr" "\n" ); -#elif COMPILER(GCC) && PLATFORM(ARM_TRADITIONAL) +#elif COMPILER(GCC) && CPU(ARM_TRADITIONAL) asm volatile ( ".text\n" @@ -633,7 +641,46 @@ SYMBOL_STRING(ctiOpThrowNotCaught) ":" "\n" "mov pc, lr" "\n" ); -#elif COMPILER(MSVC) +#elif COMPILER(RVCT) && CPU(ARM_TRADITIONAL) + +__asm EncodedJSValue ctiTrampoline(void*, RegisterFile*, CallFrame*, JSValue*, Profiler**, JSGlobalData*) +{ + ARM + stmdb sp!, {r1-r3} + stmdb sp!, {r4-r8, lr} + sub sp, sp, #36 + mov r4, r2 + mov r5, #512 + mov lr, pc + bx r0 + add sp, sp, #36 + ldmia sp!, {r4-r8, lr} + add sp, sp, #12 + bx lr +} + +__asm void ctiVMThrowTrampoline() +{ + ARM + PRESERVE8 + mov r0, sp + bl cti_vm_throw + add sp, sp, #36 + ldmia sp!, {r4-r8, lr} + add sp, sp, #12 + bx lr +} + +__asm void ctiOpThrowNotCaught() +{ + ARM + add sp, sp, #36 + ldmia sp!, {r4-r8, lr} + add sp, sp, #12 + bx lr +} + +#elif COMPILER(MSVC) && CPU(X86) #if USE(JIT_STUB_ARGUMENT_VA_LIST) #error "JIT_STUB_ARGUMENT_VA_LIST configuration not supported on MSVC." @@ -696,7 +743,9 @@ extern "C" { } } -#endif // COMPILER(GCC) && PLATFORM(X86) +#else + #error "JIT not supported on this platform." +#endif #endif // USE(JSVALUE32_64) @@ -710,7 +759,7 @@ JITThunks::JITThunks(JSGlobalData* globalData) { JIT::compileCTIMachineTrampolines(globalData, &m_executablePool, &m_ctiStringLengthTrampoline, &m_ctiVirtualCallLink, &m_ctiVirtualCall, &m_ctiNativeCallThunk); -#if PLATFORM(ARM_THUMB2) +#if CPU(ARM_THUMB2) // Unfortunate the arm compiler does not like the use of offsetof on JITStackFrame (since it contains non POD types), // and the OBJECT_OFFSETOF macro does not appear constantish enough for it to be happy with its use in COMPILE_ASSERT // macros. @@ -839,21 +888,25 @@ NEVER_INLINE void JITThunks::tryCacheGetByID(CallFrame* callFrame, CodeBlock* co ASSERT(slot.slotBase().isObject()); JSObject* slotBaseObject = asObject(slot.slotBase()); - + size_t offset = slot.cachedOffset(); + // Since we're accessing a prototype in a loop, it's a good bet that it // should not be treated as a dictionary. - if (slotBaseObject->structure()->isDictionary()) + if (slotBaseObject->structure()->isDictionary()) { slotBaseObject->flattenDictionaryObject(); + offset = slotBaseObject->structure()->get(propertyName); + } stubInfo->initGetByIdProto(structure, slotBaseObject->structure()); ASSERT(!structure->isDictionary()); ASSERT(!slotBaseObject->structure()->isDictionary()); - JIT::compileGetByIdProto(callFrame->scopeChain()->globalData, callFrame, codeBlock, stubInfo, structure, slotBaseObject->structure(), slot.cachedOffset(), returnAddress); + JIT::compileGetByIdProto(callFrame->scopeChain()->globalData, callFrame, codeBlock, stubInfo, structure, slotBaseObject->structure(), offset, returnAddress); return; } - size_t count = normalizePrototypeChain(callFrame, baseValue, slot.slotBase()); + size_t offset = slot.cachedOffset(); + size_t count = normalizePrototypeChain(callFrame, baseValue, slot.slotBase(), propertyName, offset); if (!count) { stubInfo->accessType = access_get_by_id_generic; return; @@ -861,7 +914,7 @@ NEVER_INLINE void JITThunks::tryCacheGetByID(CallFrame* callFrame, CodeBlock* co StructureChain* prototypeChain = structure->prototypeChain(callFrame); stubInfo->initGetByIdChain(structure, prototypeChain); - JIT::compileGetByIdChain(callFrame->scopeChain()->globalData, callFrame, codeBlock, stubInfo, structure, prototypeChain, count, slot.cachedOffset(), returnAddress); + JIT::compileGetByIdChain(callFrame->scopeChain()->globalData, callFrame, codeBlock, stubInfo, structure, prototypeChain, count, offset, returnAddress); } #endif // ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS) @@ -957,7 +1010,7 @@ static NEVER_INLINE void throwStackOverflowError(CallFrame* callFrame, JSGlobalD } \ } while (0) -#if PLATFORM(ARM_THUMB2) +#if CPU(ARM_THUMB2) #define DEFINE_STUB_FUNCTION(rtype, op) \ extern "C" { \ @@ -978,7 +1031,7 @@ static NEVER_INLINE void throwStackOverflowError(CallFrame* callFrame, JSGlobalD ); \ rtype JITStubThunked_##op(STUB_ARGS_DECLARATION) \ -#elif PLATFORM(ARM_TRADITIONAL) && COMPILER(GCC) +#elif CPU(ARM_TRADITIONAL) && COMPILER(GCC) #if USE(JSVALUE32_64) #define THUNK_RETURN_ADDRESS_OFFSET 64 @@ -1002,6 +1055,32 @@ COMPILE_ASSERT(offsetof(struct JITStackFrame, thunkReturnAddress) == THUNK_RETUR ); \ rtype JITStubThunked_##op(STUB_ARGS_DECLARATION) +#elif CPU(ARM_TRADITIONAL) && COMPILER(RVCT) + +#define DEFINE_STUB_FUNCTION(rtype, op) rtype JITStubThunked_##op(STUB_ARGS_DECLARATION) + +/* The following is a workaround for RVCT toolchain; precompiler macros are not expanded before the code is passed to the assembler */ + +/* The following section is a template to generate code for GeneratedJITStubs_RVCT.h */ +/* The pattern "#xxx#" will be replaced with "xxx" */ + +/* +RVCT(extern "C" #rtype# JITStubThunked_#op#(STUB_ARGS_DECLARATION);) +RVCT(__asm #rtype# cti_#op#(STUB_ARGS_DECLARATION)) +RVCT({) +RVCT( ARM) +RVCT( IMPORT JITStubThunked_#op#) +RVCT( str lr, [sp, #32]) +RVCT( bl JITStubThunked_#op#) +RVCT( ldr lr, [sp, #32]) +RVCT( bx lr) +RVCT(}) +RVCT() +*/ + +/* Include the generated file */ +#include "GeneratedJITStubs_RVCT.h" + #else #define DEFINE_STUB_FUNCTION(rtype, op) rtype JIT_STUB cti_##op(STUB_ARGS_DECLARATION) #endif @@ -1043,9 +1122,16 @@ DEFINE_STUB_FUNCTION(EncodedJSValue, op_add) CallFrame* callFrame = stackFrame.callFrame; +<<<<<<< HEAD bool leftIsString = v1.isString(); if (leftIsString && v2.isString()) { JSValue result = jsString(callFrame, asString(v1), asString(v2)); +======= + if (v1.isString()) { + JSValue result = v2.isString() + ? jsString(callFrame, asString(v1), asString(v2)) + : jsString(callFrame, asString(v1), v2.toPrimitiveString(callFrame)); +>>>>>>> webkit.org at r54127 CHECK_FOR_EXCEPTION_AT_END(); return JSValue::encode(result); } @@ -1375,10 +1461,11 @@ DEFINE_STUB_FUNCTION(EncodedJSValue, op_get_by_id_proto_list) STUB_INIT_STACK_FRAME(stackFrame); CallFrame* callFrame = stackFrame.callFrame; + const Identifier& propertyName = stackFrame.args[1].identifier(); JSValue baseValue = stackFrame.args[0].jsValue(); PropertySlot slot(baseValue); - JSValue result = baseValue.get(callFrame, stackFrame.args[1].identifier(), slot); + JSValue result = baseValue.get(callFrame, propertyName, slot); CHECK_FOR_EXCEPTION(); @@ -1393,6 +1480,8 @@ DEFINE_STUB_FUNCTION(EncodedJSValue, op_get_by_id_proto_list) ASSERT(slot.slotBase().isObject()); JSObject* slotBaseObject = asObject(slot.slotBase()); + + size_t offset = slot.cachedOffset(); if (slot.slotBase() == baseValue) ctiPatchCallByReturnAddress(codeBlock, STUB_RETURN_ADDRESS, FunctionPtr(cti_op_get_by_id_proto_fail)); @@ -1400,23 +1489,25 @@ DEFINE_STUB_FUNCTION(EncodedJSValue, op_get_by_id_proto_list) ASSERT(!asCell(baseValue)->structure()->isDictionary()); // Since we're accessing a prototype in a loop, it's a good bet that it // should not be treated as a dictionary. - if (slotBaseObject->structure()->isDictionary()) + if (slotBaseObject->structure()->isDictionary()) { slotBaseObject->flattenDictionaryObject(); + offset = slotBaseObject->structure()->get(propertyName); + } int listIndex; PolymorphicAccessStructureList* prototypeStructureList = getPolymorphicAccessStructureListSlot(stubInfo, listIndex); - JIT::compileGetByIdProtoList(callFrame->scopeChain()->globalData, callFrame, codeBlock, stubInfo, prototypeStructureList, listIndex, structure, slotBaseObject->structure(), slot.cachedOffset()); + JIT::compileGetByIdProtoList(callFrame->scopeChain()->globalData, callFrame, codeBlock, stubInfo, prototypeStructureList, listIndex, structure, slotBaseObject->structure(), offset); if (listIndex == (POLYMORPHIC_LIST_CACHE_SIZE - 1)) ctiPatchCallByReturnAddress(codeBlock, STUB_RETURN_ADDRESS, FunctionPtr(cti_op_get_by_id_proto_list_full)); - } else if (size_t count = normalizePrototypeChain(callFrame, baseValue, slot.slotBase())) { + } else if (size_t count = normalizePrototypeChain(callFrame, baseValue, slot.slotBase(), propertyName, offset)) { ASSERT(!asCell(baseValue)->structure()->isDictionary()); int listIndex; PolymorphicAccessStructureList* prototypeStructureList = getPolymorphicAccessStructureListSlot(stubInfo, listIndex); StructureChain* protoChain = structure->prototypeChain(callFrame); - JIT::compileGetByIdChainList(callFrame->scopeChain()->globalData, callFrame, codeBlock, stubInfo, prototypeStructureList, listIndex, structure, protoChain, count, slot.cachedOffset()); + JIT::compileGetByIdChainList(callFrame->scopeChain()->globalData, callFrame, codeBlock, stubInfo, prototypeStructureList, listIndex, structure, protoChain, count, offset); if (listIndex == (POLYMORPHIC_LIST_CACHE_SIZE - 1)) ctiPatchCallByReturnAddress(codeBlock, STUB_RETURN_ADDRESS, FunctionPtr(cti_op_get_by_id_proto_list_full)); @@ -1561,7 +1652,7 @@ DEFINE_STUB_FUNCTION(void*, op_call_JSFunction) { STUB_INIT_STACK_FRAME(stackFrame); -#ifndef NDEBUG +#if !ASSERT_DISABLED CallData callData; ASSERT(stackFrame.args[0].jsValue().getCallData(callData) == CallTypeJS); #endif @@ -1810,7 +1901,7 @@ DEFINE_STUB_FUNCTION(JSObject*, op_construct_JSConstruct) VM_THROW_EXCEPTION(); } -#ifndef NDEBUG +#if !ASSERT_DISABLED ConstructData constructData; ASSERT(constructor->getConstructData(constructData) == ConstructTypeJS); #endif @@ -2350,8 +2441,6 @@ DEFINE_STUB_FUNCTION(EncodedJSValue, op_post_inc) return JSValue::encode(number); } -#if USE(JSVALUE32_64) - DEFINE_STUB_FUNCTION(int, op_eq) { STUB_INIT_STACK_FRAME(stackFrame); @@ -2359,6 +2448,7 @@ DEFINE_STUB_FUNCTION(int, op_eq) JSValue src1 = stackFrame.args[0].jsValue(); JSValue src2 = stackFrame.args[1].jsValue(); +#if USE(JSVALUE32_64) start: if (src2.isUndefined()) { return src1.isNull() || @@ -2439,8 +2529,18 @@ DEFINE_STUB_FUNCTION(int, op_eq) src1 = asObject(cell1)->toPrimitive(stackFrame.callFrame); CHECK_FOR_EXCEPTION(); goto start; + +#else // USE(JSVALUE32_64) + CallFrame* callFrame = stackFrame.callFrame; + + bool result = JSValue::equalSlowCaseInline(callFrame, src1, src2); + CHECK_FOR_EXCEPTION_AT_END(); + return result; +#endif // USE(JSVALUE32_64) } +#if USE(JSVALUE32_64) + DEFINE_STUB_FUNCTION(int, op_eq_strings) { STUB_INIT_STACK_FRAME(stackFrame); @@ -2453,23 +2553,7 @@ DEFINE_STUB_FUNCTION(int, op_eq_strings) return string1->value(stackFrame.callFrame) == string2->value(stackFrame.callFrame); } -#else // USE(JSVALUE32_64) - -DEFINE_STUB_FUNCTION(int, op_eq) -{ - STUB_INIT_STACK_FRAME(stackFrame); - - JSValue src1 = stackFrame.args[0].jsValue(); - JSValue src2 = stackFrame.args[1].jsValue(); - - CallFrame* callFrame = stackFrame.callFrame; - - bool result = JSValue::equalSlowCaseInline(callFrame, src1, src2); - CHECK_FOR_EXCEPTION_AT_END(); - return result; -} - -#endif // USE(JSVALUE32_64) +#endif DEFINE_STUB_FUNCTION(EncodedJSValue, op_lshift) { diff --git a/JavaScriptCore/jit/JITStubs.h b/JavaScriptCore/jit/JITStubs.h index f71dc9a..99c2dd2 100644 --- a/JavaScriptCore/jit/JITStubs.h +++ b/JavaScriptCore/jit/JITStubs.h @@ -75,7 +75,7 @@ namespace JSC { ReturnAddressPtr returnAddress() { return ReturnAddressPtr(asPointer); } }; -#if PLATFORM(X86_64) +#if CPU(X86_64) struct JITStackFrame { void* reserved; // Unused JITStubArg args[6]; @@ -99,7 +99,7 @@ namespace JSC { // When JIT code makes a call, it pushes its return address just below the rest of the stack. ReturnAddressPtr* returnAddressSlot() { return reinterpret_cast<ReturnAddressPtr*>(this) - 1; } }; -#elif PLATFORM(X86) +#elif CPU(X86) #if COMPILER(MSVC) #pragma pack(push) #pragma pack(4) @@ -130,7 +130,7 @@ namespace JSC { #if COMPILER(MSVC) #pragma pack(pop) #endif // COMPILER(MSVC) -#elif PLATFORM(ARM_THUMB2) +#elif CPU(ARM_THUMB2) struct JITStackFrame { void* reserved; // Unused JITStubArg args[6]; @@ -158,7 +158,7 @@ namespace JSC { ReturnAddressPtr* returnAddressSlot() { return &thunkReturnAddress; } }; -#elif PLATFORM(ARM_TRADITIONAL) +#elif CPU(ARM_TRADITIONAL) struct JITStackFrame { JITStubArg padding; // Unused JITStubArg args[7]; @@ -202,16 +202,16 @@ namespace JSC { #define STUB_ARGS_DECLARATION void** args #define STUB_ARGS (args) - #if PLATFORM(X86) && COMPILER(MSVC) + #if CPU(X86) && COMPILER(MSVC) #define JIT_STUB __fastcall - #elif PLATFORM(X86) && COMPILER(GCC) + #elif CPU(X86) && COMPILER(GCC) #define JIT_STUB __attribute__ ((fastcall)) #else #define JIT_STUB #endif #endif -#if PLATFORM(X86_64) +#if CPU(X86_64) struct VoidPtrPair { void* first; void* second; |
