From cad810f21b803229eb11403f9209855525a25d57 Mon Sep 17 00:00:00 2001 From: Steve Block Date: Fri, 6 May 2011 11:45:16 +0100 Subject: Merge WebKit at r75315: Initial merge by git. Change-Id: I570314b346ce101c935ed22a626b48c2af266b84 --- Source/JavaScriptCore/jit/ExecutableAllocator.cpp | 128 + Source/JavaScriptCore/jit/ExecutableAllocator.h | 362 ++ .../jit/ExecutableAllocatorFixedVMPool.cpp | 470 +++ Source/JavaScriptCore/jit/JIT.cpp | 670 ++++ Source/JavaScriptCore/jit/JIT.h | 1046 ++++++ Source/JavaScriptCore/jit/JITArithmetic.cpp | 1244 +++++++ Source/JavaScriptCore/jit/JITArithmetic32_64.cpp | 1424 ++++++++ Source/JavaScriptCore/jit/JITCall.cpp | 261 ++ Source/JavaScriptCore/jit/JITCall32_64.cpp | 356 ++ Source/JavaScriptCore/jit/JITCode.h | 117 + Source/JavaScriptCore/jit/JITInlineMethods.h | 809 +++++ Source/JavaScriptCore/jit/JITOpcodes.cpp | 1775 ++++++++++ Source/JavaScriptCore/jit/JITOpcodes32_64.cpp | 1834 ++++++++++ Source/JavaScriptCore/jit/JITPropertyAccess.cpp | 1101 ++++++ .../JavaScriptCore/jit/JITPropertyAccess32_64.cpp | 1186 +++++++ Source/JavaScriptCore/jit/JITStubCall.h | 237 ++ Source/JavaScriptCore/jit/JITStubs.cpp | 3638 ++++++++++++++++++++ Source/JavaScriptCore/jit/JITStubs.h | 416 +++ Source/JavaScriptCore/jit/JSInterfaceJIT.h | 292 ++ Source/JavaScriptCore/jit/SpecializedThunkJIT.h | 165 + Source/JavaScriptCore/jit/ThunkGenerators.cpp | 162 + Source/JavaScriptCore/jit/ThunkGenerators.h | 45 + 22 files changed, 17738 insertions(+) create mode 100644 Source/JavaScriptCore/jit/ExecutableAllocator.cpp create mode 100644 Source/JavaScriptCore/jit/ExecutableAllocator.h create mode 100644 Source/JavaScriptCore/jit/ExecutableAllocatorFixedVMPool.cpp create mode 100644 Source/JavaScriptCore/jit/JIT.cpp create mode 100644 Source/JavaScriptCore/jit/JIT.h create mode 100644 Source/JavaScriptCore/jit/JITArithmetic.cpp create mode 100644 Source/JavaScriptCore/jit/JITArithmetic32_64.cpp create mode 100644 Source/JavaScriptCore/jit/JITCall.cpp create mode 100644 Source/JavaScriptCore/jit/JITCall32_64.cpp create mode 100644 Source/JavaScriptCore/jit/JITCode.h create mode 100644 Source/JavaScriptCore/jit/JITInlineMethods.h create mode 100644 Source/JavaScriptCore/jit/JITOpcodes.cpp create mode 100644 Source/JavaScriptCore/jit/JITOpcodes32_64.cpp create mode 100644 Source/JavaScriptCore/jit/JITPropertyAccess.cpp create mode 100644 Source/JavaScriptCore/jit/JITPropertyAccess32_64.cpp create mode 100644 Source/JavaScriptCore/jit/JITStubCall.h create mode 100644 Source/JavaScriptCore/jit/JITStubs.cpp create mode 100644 Source/JavaScriptCore/jit/JITStubs.h create mode 100644 Source/JavaScriptCore/jit/JSInterfaceJIT.h create mode 100644 Source/JavaScriptCore/jit/SpecializedThunkJIT.h create mode 100644 Source/JavaScriptCore/jit/ThunkGenerators.cpp create mode 100644 Source/JavaScriptCore/jit/ThunkGenerators.h (limited to 'Source/JavaScriptCore/jit') diff --git a/Source/JavaScriptCore/jit/ExecutableAllocator.cpp b/Source/JavaScriptCore/jit/ExecutableAllocator.cpp new file mode 100644 index 0000000..4530b38 --- /dev/null +++ b/Source/JavaScriptCore/jit/ExecutableAllocator.cpp @@ -0,0 +1,128 @@ +/* + * Copyright (C) 2008 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "config.h" + +#include "ExecutableAllocator.h" + +#if ENABLE(ASSEMBLER) + +namespace JSC { + +size_t ExecutableAllocator::pageSize = 0; + +#if ENABLE(EXECUTABLE_ALLOCATOR_DEMAND) + +void ExecutableAllocator::intializePageSize() +{ +#if CPU(ARMV5_OR_LOWER) + // The moving memory model (as used in ARMv5 and earlier platforms) + // on Symbian OS limits the number of chunks for each process to 16. + // To mitigate this limitation increase the pagesize to allocate + // fewer, larger chunks. Set the page size to 256 Kb to compensate + // for moving memory model limitation + ExecutableAllocator::pageSize = 256 * 1024; +#else + ExecutableAllocator::pageSize = WTF::pageSize(); +#endif +} + +ExecutablePool::Allocation ExecutablePool::systemAlloc(size_t size) +{ + PageAllocation allocation = PageAllocation::allocate(size, OSAllocator::JSJITCodePages, EXECUTABLE_POOL_WRITABLE, true); + if (!allocation) + CRASH(); + return allocation; +} + +void ExecutablePool::systemRelease(ExecutablePool::Allocation& allocation) +{ + allocation.deallocate(); +} + +bool ExecutableAllocator::isValid() const +{ + return true; +} + +bool ExecutableAllocator::underMemoryPressure() +{ + return false; +} + +size_t ExecutableAllocator::committedByteCount() +{ + return 0; +} + +#endif + +#if ENABLE(ASSEMBLER_WX_EXCLUSIVE) + +#if OS(WINDOWS) || OS(SYMBIAN) +#error "ASSEMBLER_WX_EXCLUSIVE not yet suported on this platform." +#endif + +void ExecutableAllocator::reprotectRegion(void* start, size_t size, ProtectionSeting setting) +{ + if (!pageSize) + intializePageSize(); + + // Calculate the start of the page containing this region, + // and account for this extra memory within size. + intptr_t startPtr = reinterpret_cast(start); + intptr_t pageStartPtr = startPtr & ~(pageSize - 1); + void* pageStart = reinterpret_cast(pageStartPtr); + size += (startPtr - pageStartPtr); + + // Round size up + size += (pageSize - 1); + size &= ~(pageSize - 1); + + mprotect(pageStart, size, (setting == Writable) ? PROTECTION_FLAGS_RW : PROTECTION_FLAGS_RX); +} + +#endif + +#if CPU(ARM_TRADITIONAL) && OS(LINUX) && COMPILER(RVCT) + +__asm void ExecutableAllocator::cacheFlush(void* code, size_t size) +{ + ARM + push {r7} + add r1, r1, r0 + mov r7, #0xf0000 + add r7, r7, #0x2 + mov r2, #0x0 + svc #0x0 + pop {r7} + bx lr +} + +#endif + +} + +#endif // HAVE(ASSEMBLER) diff --git a/Source/JavaScriptCore/jit/ExecutableAllocator.h b/Source/JavaScriptCore/jit/ExecutableAllocator.h new file mode 100644 index 0000000..d45f294 --- /dev/null +++ b/Source/JavaScriptCore/jit/ExecutableAllocator.h @@ -0,0 +1,362 @@ +/* + * Copyright (C) 2008 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef ExecutableAllocator_h +#define ExecutableAllocator_h +#include // for ptrdiff_t +#include +#include +#include +#include +#include +#include +#include + +#if OS(IOS) +#include +#include +#endif + +#if OS(SYMBIAN) +#include +#endif + +#if CPU(MIPS) && OS(LINUX) +#include +#endif + +#if OS(WINCE) +// From pkfuncs.h (private header file from the Platform Builder) +#define CACHE_SYNC_ALL 0x07F +extern "C" __declspec(dllimport) void CacheRangeFlush(LPVOID pAddr, DWORD dwLength, DWORD dwFlags); +#endif + +#if PLATFORM(BREWMP) +#include +#include +#include +#endif + +#define JIT_ALLOCATOR_PAGE_SIZE (ExecutableAllocator::pageSize) +#define JIT_ALLOCATOR_LARGE_ALLOC_SIZE (ExecutableAllocator::pageSize * 4) + +#if ENABLE(ASSEMBLER_WX_EXCLUSIVE) +#define PROTECTION_FLAGS_RW (PROT_READ | PROT_WRITE) +#define PROTECTION_FLAGS_RX (PROT_READ | PROT_EXEC) +#define EXECUTABLE_POOL_WRITABLE false +#else +#define EXECUTABLE_POOL_WRITABLE true +#endif + +namespace JSC { + +inline size_t roundUpAllocationSize(size_t request, size_t granularity) +{ + if ((std::numeric_limits::max() - granularity) <= request) + CRASH(); // Allocation is too large + + // Round up to next page boundary + size_t size = request + (granularity - 1); + size = size & ~(granularity - 1); + ASSERT(size >= request); + return size; +} + +} + +#if ENABLE(JIT) && ENABLE(ASSEMBLER) + +namespace JSC { + +class ExecutablePool : public RefCounted { +public: +#if ENABLE(EXECUTABLE_ALLOCATOR_DEMAND) + typedef PageAllocation Allocation; +#else + class Allocation { + public: + Allocation(void* base, size_t size) + : m_base(base) + , m_size(size) + { + } + void* base() { return m_base; } + size_t size() { return m_size; } + bool operator!() const { return !m_base; } + + private: + void* m_base; + size_t m_size; + }; +#endif + typedef Vector AllocationList; + + static PassRefPtr create(size_t n) + { + return adoptRef(new ExecutablePool(n)); + } + + void* alloc(size_t n) + { + ASSERT(m_freePtr <= m_end); + + // Round 'n' up to a multiple of word size; if all allocations are of + // word sized quantities, then all subsequent allocations will be aligned. + n = roundUpAllocationSize(n, sizeof(void*)); + + if (static_cast(n) < (m_end - m_freePtr)) { + void* result = m_freePtr; + m_freePtr += n; + return result; + } + + // Insufficient space to allocate in the existing pool + // so we need allocate into a new pool + return poolAllocate(n); + } + + void tryShrink(void* allocation, size_t oldSize, size_t newSize) + { + if (static_cast(allocation) + oldSize != m_freePtr) + return; + m_freePtr = static_cast(allocation) + roundUpAllocationSize(newSize, sizeof(void*)); + } + + ~ExecutablePool() + { + AllocationList::iterator end = m_pools.end(); + for (AllocationList::iterator ptr = m_pools.begin(); ptr != end; ++ptr) + ExecutablePool::systemRelease(*ptr); + } + + size_t available() const { return (m_pools.size() > 1) ? 0 : m_end - m_freePtr; } + +private: + static Allocation systemAlloc(size_t n); + static void systemRelease(Allocation& alloc); + + ExecutablePool(size_t n); + + void* poolAllocate(size_t n); + + char* m_freePtr; + char* m_end; + AllocationList m_pools; +}; + +class ExecutableAllocator { + enum ProtectionSeting { Writable, Executable }; + +public: + static size_t pageSize; + ExecutableAllocator() + { + if (!pageSize) + intializePageSize(); + if (isValid()) + m_smallAllocationPool = ExecutablePool::create(JIT_ALLOCATOR_LARGE_ALLOC_SIZE); +#if !ENABLE(INTERPRETER) + else + CRASH(); +#endif + } + + bool isValid() const; + + static bool underMemoryPressure(); + + PassRefPtr poolForSize(size_t n) + { + // Try to fit in the existing small allocator + ASSERT(m_smallAllocationPool); + if (n < m_smallAllocationPool->available()) + return m_smallAllocationPool; + + // If the request is large, we just provide a unshared allocator + if (n > JIT_ALLOCATOR_LARGE_ALLOC_SIZE) + return ExecutablePool::create(n); + + // Create a new allocator + RefPtr pool = ExecutablePool::create(JIT_ALLOCATOR_LARGE_ALLOC_SIZE); + + // If the new allocator will result in more free space than in + // the current small allocator, then we will use it instead + if ((pool->available() - n) > m_smallAllocationPool->available()) + m_smallAllocationPool = pool; + return pool.release(); + } + +#if ENABLE(ASSEMBLER_WX_EXCLUSIVE) + static void makeWritable(void* start, size_t size) + { + reprotectRegion(start, size, Writable); + } + + static void makeExecutable(void* start, size_t size) + { + reprotectRegion(start, size, Executable); + } +#else + static void makeWritable(void*, size_t) {} + static void makeExecutable(void*, size_t) {} +#endif + + +#if CPU(X86) || CPU(X86_64) + static void cacheFlush(void*, size_t) + { + } +#elif CPU(MIPS) + static void cacheFlush(void* code, size_t size) + { +#if COMPILER(GCC) && GCC_VERSION_AT_LEAST(4,3,0) +#if WTF_MIPS_ISA_REV(2) && !GCC_VERSION_AT_LEAST(4,4,3) + int lineSize; + asm("rdhwr %0, $1" : "=r" (lineSize)); + // + // Modify "start" and "end" to avoid GCC 4.3.0-4.4.2 bug in + // mips_expand_synci_loop that may execute synci one more time. + // "start" points to the fisrt byte of the cache line. + // "end" points to the last byte of the line before the last cache line. + // Because size is always a multiple of 4, this is safe to set + // "end" to the last byte. + // + intptr_t start = reinterpret_cast(code) & (-lineSize); + intptr_t end = ((reinterpret_cast(code) + size - 1) & (-lineSize)) - 1; + __builtin___clear_cache(reinterpret_cast(start), reinterpret_cast(end)); +#else + intptr_t end = reinterpret_cast(code) + size; + __builtin___clear_cache(reinterpret_cast(code), reinterpret_cast(end)); +#endif +#else + _flush_cache(reinterpret_cast(code), size, BCACHE); +#endif + } +#elif CPU(ARM_THUMB2) && OS(IOS) + static void cacheFlush(void* code, size_t size) + { + sys_cache_control(kCacheFunctionPrepareForExecution, code, size); + } +#elif CPU(ARM_THUMB2) && OS(LINUX) + static void cacheFlush(void* code, size_t size) + { + asm volatile ( + "push {r7}\n" + "mov r0, %0\n" + "mov r1, %1\n" + "movw r7, #0x2\n" + "movt r7, #0xf\n" + "movs r2, #0x0\n" + "svc 0x0\n" + "pop {r7}\n" + : + : "r" (code), "r" (reinterpret_cast(code) + size) + : "r0", "r1", "r2"); + } +#elif OS(SYMBIAN) + static void cacheFlush(void* code, size_t size) + { + User::IMB_Range(code, static_cast(code) + size); + } +#elif CPU(ARM_TRADITIONAL) && OS(LINUX) && COMPILER(RVCT) + static __asm void cacheFlush(void* code, size_t size); +#elif CPU(ARM_TRADITIONAL) && OS(LINUX) && COMPILER(GCC) + static void cacheFlush(void* code, size_t size) + { + asm volatile ( + "push {r7}\n" + "mov r0, %0\n" + "mov r1, %1\n" + "mov r7, #0xf0000\n" + "add r7, r7, #0x2\n" + "mov r2, #0x0\n" + "svc 0x0\n" + "pop {r7}\n" + : + : "r" (code), "r" (reinterpret_cast(code) + size) + : "r0", "r1", "r2"); + } +#elif OS(WINCE) + static void cacheFlush(void* code, size_t size) + { + CacheRangeFlush(code, size, CACHE_SYNC_ALL); + } +#elif PLATFORM(BREWMP) + static void cacheFlush(void* code, size_t size) + { + RefPtr memCache = createRefPtrInstance(AEECLSID_MemCache1); + IMemCache1_ClearCache(memCache.get(), reinterpret_cast(code), size, MEMSPACE_CACHE_FLUSH, MEMSPACE_DATACACHE); + IMemCache1_ClearCache(memCache.get(), reinterpret_cast(code), size, MEMSPACE_CACHE_INVALIDATE, MEMSPACE_INSTCACHE); + } +#else + #error "The cacheFlush support is missing on this platform." +#endif + static size_t committedByteCount(); + +private: + +#if ENABLE(ASSEMBLER_WX_EXCLUSIVE) + static void reprotectRegion(void*, size_t, ProtectionSeting); +#endif + + RefPtr m_smallAllocationPool; + static void intializePageSize(); +}; + +inline ExecutablePool::ExecutablePool(size_t n) +{ + size_t allocSize = roundUpAllocationSize(n, JIT_ALLOCATOR_PAGE_SIZE); + Allocation mem = systemAlloc(allocSize); + m_pools.append(mem); + m_freePtr = static_cast(mem.base()); + if (!m_freePtr) + CRASH(); // Failed to allocate + m_end = m_freePtr + allocSize; +} + +inline void* ExecutablePool::poolAllocate(size_t n) +{ + size_t allocSize = roundUpAllocationSize(n, JIT_ALLOCATOR_PAGE_SIZE); + + Allocation result = systemAlloc(allocSize); + if (!result.base()) + CRASH(); // Failed to allocate + + ASSERT(m_end >= m_freePtr); + if ((allocSize - n) > static_cast(m_end - m_freePtr)) { + // Replace allocation pool + m_freePtr = static_cast(result.base()) + n; + m_end = static_cast(result.base()) + allocSize; + } + + m_pools.append(result); + return result.base(); +} + +} + +#endif // ENABLE(JIT) && ENABLE(ASSEMBLER) + +#endif // !defined(ExecutableAllocator) diff --git a/Source/JavaScriptCore/jit/ExecutableAllocatorFixedVMPool.cpp b/Source/JavaScriptCore/jit/ExecutableAllocatorFixedVMPool.cpp new file mode 100644 index 0000000..e280b2d --- /dev/null +++ b/Source/JavaScriptCore/jit/ExecutableAllocatorFixedVMPool.cpp @@ -0,0 +1,470 @@ +/* + * Copyright (C) 2009 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "config.h" + +#include "ExecutableAllocator.h" + +#if ENABLE(EXECUTABLE_ALLOCATOR_FIXED) + +#include + +#include "TCSpinLock.h" +#include +#include +#include +#include +#include + +#if CPU(X86_64) + // These limits suitable on 64-bit platforms (particularly x86-64, where we require all jumps to have a 2Gb max range). + #define VM_POOL_SIZE (2u * 1024u * 1024u * 1024u) // 2Gb + #define COALESCE_LIMIT (16u * 1024u * 1024u) // 16Mb +#else + // These limits are hopefully sensible on embedded platforms. + #define VM_POOL_SIZE (32u * 1024u * 1024u) // 32Mb + #define COALESCE_LIMIT (4u * 1024u * 1024u) // 4Mb +#endif + +using namespace WTF; + +namespace JSC { + +static size_t committedBytesCount = 0; +static SpinLock spinlock = SPINLOCK_INITIALIZER; + +// FreeListEntry describes a free chunk of memory, stored in the freeList. +struct FreeListEntry { + FreeListEntry(void* pointer, size_t size) + : pointer(pointer) + , size(size) + , nextEntry(0) + , less(0) + , greater(0) + , balanceFactor(0) + { + } + + // All entries of the same size share a single entry + // in the AVLTree, and are linked together in a linked + // list, using nextEntry. + void* pointer; + size_t size; + FreeListEntry* nextEntry; + + // These fields are used by AVLTree. + FreeListEntry* less; + FreeListEntry* greater; + int balanceFactor; +}; + +// Abstractor class for use in AVLTree. +// Nodes in the AVLTree are of type FreeListEntry, keyed on +// (and thus sorted by) their size. +struct AVLTreeAbstractorForFreeList { + typedef FreeListEntry* handle; + typedef int32_t size; + typedef size_t key; + + handle get_less(handle h) { return h->less; } + void set_less(handle h, handle lh) { h->less = lh; } + handle get_greater(handle h) { return h->greater; } + void set_greater(handle h, handle gh) { h->greater = gh; } + int get_balance_factor(handle h) { return h->balanceFactor; } + void set_balance_factor(handle h, int bf) { h->balanceFactor = bf; } + + static handle null() { return 0; } + + int compare_key_key(key va, key vb) { return va - vb; } + int compare_key_node(key k, handle h) { return compare_key_key(k, h->size); } + int compare_node_node(handle h1, handle h2) { return compare_key_key(h1->size, h2->size); } +}; + +// Used to reverse sort an array of FreeListEntry pointers. +static int reverseSortFreeListEntriesByPointer(const void* leftPtr, const void* rightPtr) +{ + FreeListEntry* left = *(FreeListEntry**)leftPtr; + FreeListEntry* right = *(FreeListEntry**)rightPtr; + + return (intptr_t)(right->pointer) - (intptr_t)(left->pointer); +} + +// Used to reverse sort an array of pointers. +static int reverseSortCommonSizedAllocations(const void* leftPtr, const void* rightPtr) +{ + void* left = *(void**)leftPtr; + void* right = *(void**)rightPtr; + + return (intptr_t)right - (intptr_t)left; +} + +class FixedVMPoolAllocator +{ + // The free list is stored in a sorted tree. + typedef AVLTree SizeSortedFreeTree; + + void release(void* position, size_t size) + { + m_allocation.decommit(position, size); + addToCommittedByteCount(-static_cast(size)); + } + + void reuse(void* position, size_t size) + { + m_allocation.commit(position, size); + addToCommittedByteCount(static_cast(size)); + } + + // All addition to the free list should go through this method, rather than + // calling insert directly, to avoid multiple entries being added with the + // same key. All nodes being added should be singletons, they should not + // already be a part of a chain. + void addToFreeList(FreeListEntry* entry) + { + ASSERT(!entry->nextEntry); + + if (entry->size == m_commonSize) { + m_commonSizedAllocations.append(entry->pointer); + delete entry; + } else if (FreeListEntry* entryInFreeList = m_freeList.search(entry->size, m_freeList.EQUAL)) { + // m_freeList already contain an entry for this size - insert this node into the chain. + entry->nextEntry = entryInFreeList->nextEntry; + entryInFreeList->nextEntry = entry; + } else + m_freeList.insert(entry); + } + + // We do not attempt to coalesce addition, which may lead to fragmentation; + // instead we periodically perform a sweep to try to coalesce neighboring + // entries in m_freeList. Presently this is triggered at the point 16MB + // of memory has been released. + void coalesceFreeSpace() + { + Vector freeListEntries; + SizeSortedFreeTree::Iterator iter; + iter.start_iter_least(m_freeList); + + // Empty m_freeList into a Vector. + for (FreeListEntry* entry; (entry = *iter); ++iter) { + // Each entry in m_freeList might correspond to multiple + // free chunks of memory (of the same size). Walk the chain + // (this is likely of course only be one entry long!) adding + // each entry to the Vector (at reseting the next in chain + // pointer to separate each node out). + FreeListEntry* next; + do { + next = entry->nextEntry; + entry->nextEntry = 0; + freeListEntries.append(entry); + } while ((entry = next)); + } + // All entries are now in the Vector; purge the tree. + m_freeList.purge(); + + // Reverse-sort the freeListEntries and m_commonSizedAllocations Vectors. + // We reverse-sort so that we can logically work forwards through memory, + // whilst popping items off the end of the Vectors using last() and removeLast(). + qsort(freeListEntries.begin(), freeListEntries.size(), sizeof(FreeListEntry*), reverseSortFreeListEntriesByPointer); + qsort(m_commonSizedAllocations.begin(), m_commonSizedAllocations.size(), sizeof(void*), reverseSortCommonSizedAllocations); + + // The entries from m_commonSizedAllocations that cannot be + // coalesced into larger chunks will be temporarily stored here. + Vector newCommonSizedAllocations; + + // Keep processing so long as entries remain in either of the vectors. + while (freeListEntries.size() || m_commonSizedAllocations.size()) { + // We're going to try to find a FreeListEntry node that we can coalesce onto. + FreeListEntry* coalescionEntry = 0; + + // Is the lowest addressed chunk of free memory of common-size, or is it in the free list? + if (m_commonSizedAllocations.size() && (!freeListEntries.size() || (m_commonSizedAllocations.last() < freeListEntries.last()->pointer))) { + // Pop an item from the m_commonSizedAllocations vector - this is the lowest + // addressed free chunk. Find out the begin and end addresses of the memory chunk. + void* begin = m_commonSizedAllocations.last(); + void* end = (void*)((intptr_t)begin + m_commonSize); + m_commonSizedAllocations.removeLast(); + + // Try to find another free chunk abutting onto the end of the one we have already found. + if (freeListEntries.size() && (freeListEntries.last()->pointer == end)) { + // There is an existing FreeListEntry for the next chunk of memory! + // we can reuse this. Pop it off the end of m_freeList. + coalescionEntry = freeListEntries.last(); + freeListEntries.removeLast(); + // Update the existing node to include the common-sized chunk that we also found. + coalescionEntry->pointer = (void*)((intptr_t)coalescionEntry->pointer - m_commonSize); + coalescionEntry->size += m_commonSize; + } else if (m_commonSizedAllocations.size() && (m_commonSizedAllocations.last() == end)) { + // There is a second common-sized chunk that can be coalesced. + // Allocate a new node. + m_commonSizedAllocations.removeLast(); + coalescionEntry = new FreeListEntry(begin, 2 * m_commonSize); + } else { + // Nope - this poor little guy is all on his own. :-( + // Add him into the newCommonSizedAllocations vector for now, we're + // going to end up adding him back into the m_commonSizedAllocations + // list when we're done. + newCommonSizedAllocations.append(begin); + continue; + } + } else { + ASSERT(freeListEntries.size()); + ASSERT(!m_commonSizedAllocations.size() || (freeListEntries.last()->pointer < m_commonSizedAllocations.last())); + // The lowest addressed item is from m_freeList; pop it from the Vector. + coalescionEntry = freeListEntries.last(); + freeListEntries.removeLast(); + } + + // Right, we have a FreeListEntry, we just need check if there is anything else + // to coalesce onto the end. + ASSERT(coalescionEntry); + while (true) { + // Calculate the end address of the chunk we have found so far. + void* end = (void*)((intptr_t)coalescionEntry->pointer - coalescionEntry->size); + + // Is there another chunk adjacent to the one we already have? + if (freeListEntries.size() && (freeListEntries.last()->pointer == end)) { + // Yes - another FreeListEntry -pop it from the list. + FreeListEntry* coalescee = freeListEntries.last(); + freeListEntries.removeLast(); + // Add it's size onto our existing node. + coalescionEntry->size += coalescee->size; + delete coalescee; + } else if (m_commonSizedAllocations.size() && (m_commonSizedAllocations.last() == end)) { + // We can coalesce the next common-sized chunk. + m_commonSizedAllocations.removeLast(); + coalescionEntry->size += m_commonSize; + } else + break; // Nope, nothing to be added - stop here. + } + + // We've coalesced everything we can onto the current chunk. + // Add it back into m_freeList. + addToFreeList(coalescionEntry); + } + + // All chunks of free memory larger than m_commonSize should be + // back in m_freeList by now. All that remains to be done is to + // copy the contents on the newCommonSizedAllocations back into + // the m_commonSizedAllocations Vector. + ASSERT(m_commonSizedAllocations.size() == 0); + m_commonSizedAllocations.append(newCommonSizedAllocations); + } + +public: + + FixedVMPoolAllocator(size_t commonSize, size_t totalHeapSize) + : m_commonSize(commonSize) + , m_countFreedSinceLastCoalesce(0) + { + m_allocation = PageReservation::reserve(totalHeapSize, OSAllocator::JSJITCodePages, EXECUTABLE_POOL_WRITABLE, true); + + if (!!m_allocation) + m_freeList.insert(new FreeListEntry(m_allocation.base(), m_allocation.size())); +#if !ENABLE(INTERPRETER) + else + CRASH(); +#endif + } + + ExecutablePool::Allocation alloc(size_t size) + { + return ExecutablePool::Allocation(allocInternal(size), size); + } + + void free(ExecutablePool::Allocation allocation) + { + void* pointer = allocation.base(); + size_t size = allocation.size(); + + ASSERT(!!m_allocation); + // Call release to report to the operating system that this + // memory is no longer in use, and need not be paged out. + ASSERT(isWithinVMPool(pointer, size)); + release(pointer, size); + + // Common-sized allocations are stored in the m_commonSizedAllocations + // vector; all other freed chunks are added to m_freeList. + if (size == m_commonSize) + m_commonSizedAllocations.append(pointer); + else + addToFreeList(new FreeListEntry(pointer, size)); + + // Do some housekeeping. Every time we reach a point that + // 16MB of allocations have been freed, sweep m_freeList + // coalescing any neighboring fragments. + m_countFreedSinceLastCoalesce += size; + if (m_countFreedSinceLastCoalesce >= COALESCE_LIMIT) { + m_countFreedSinceLastCoalesce = 0; + coalesceFreeSpace(); + } + } + + bool isValid() const { return !!m_allocation; } + +private: + void* allocInternal(size_t size) + { +#if ENABLE(INTERPRETER) + if (!m_allocation) + return 0; +#else + ASSERT(!!m_allocation); +#endif + void* result; + + // Freed allocations of the common size are not stored back into the main + // m_freeList, but are instead stored in a separate vector. If the request + // is for a common sized allocation, check this list. + if ((size == m_commonSize) && m_commonSizedAllocations.size()) { + result = m_commonSizedAllocations.last(); + m_commonSizedAllocations.removeLast(); + } else { + // Search m_freeList for a suitable sized chunk to allocate memory from. + FreeListEntry* entry = m_freeList.search(size, m_freeList.GREATER_EQUAL); + + // This would be bad news. + if (!entry) { + // Errk! Lets take a last-ditch desperation attempt at defragmentation... + coalesceFreeSpace(); + // Did that free up a large enough chunk? + entry = m_freeList.search(size, m_freeList.GREATER_EQUAL); + // No?... *BOOM!* + if (!entry) + CRASH(); + } + ASSERT(entry->size != m_commonSize); + + // Remove the entry from m_freeList. But! - + // Each entry in the tree may represent a chain of multiple chunks of the + // same size, and we only want to remove one on them. So, if this entry + // does have a chain, just remove the first-but-one item from the chain. + if (FreeListEntry* next = entry->nextEntry) { + // We're going to leave 'entry' in the tree; remove 'next' from its chain. + entry->nextEntry = next->nextEntry; + next->nextEntry = 0; + entry = next; + } else + m_freeList.remove(entry->size); + + // Whoo!, we have a result! + ASSERT(entry->size >= size); + result = entry->pointer; + + // If the allocation exactly fits the chunk we found in the, + // m_freeList then the FreeListEntry node is no longer needed. + if (entry->size == size) + delete entry; + else { + // There is memory left over, and it is not of the common size. + // We can reuse the existing FreeListEntry node to add this back + // into m_freeList. + entry->pointer = (void*)((intptr_t)entry->pointer + size); + entry->size -= size; + addToFreeList(entry); + } + } + + // Call reuse to report to the operating system that this memory is in use. + ASSERT(isWithinVMPool(result, size)); + reuse(result, size); + return result; + } + +#ifndef NDEBUG + bool isWithinVMPool(void* pointer, size_t size) + { + return pointer >= m_allocation.base() && (reinterpret_cast(pointer) + size <= reinterpret_cast(m_allocation.base()) + m_allocation.size()); + } +#endif + + void addToCommittedByteCount(long byteCount) + { + ASSERT(spinlock.IsHeld()); + ASSERT(static_cast(committedBytesCount) + byteCount > -1); + committedBytesCount += byteCount; + } + + // Freed space from the most common sized allocations will be held in this list, ... + const size_t m_commonSize; + Vector m_commonSizedAllocations; + + // ... and all other freed allocations are held in m_freeList. + SizeSortedFreeTree m_freeList; + + // This is used for housekeeping, to trigger defragmentation of the freed lists. + size_t m_countFreedSinceLastCoalesce; + + PageReservation m_allocation; +}; + +size_t ExecutableAllocator::committedByteCount() +{ + SpinLockHolder lockHolder(&spinlock); + return committedBytesCount; +} + +void ExecutableAllocator::intializePageSize() +{ + ExecutableAllocator::pageSize = getpagesize(); +} + +static FixedVMPoolAllocator* allocator = 0; +static size_t allocatedCount = 0; + +bool ExecutableAllocator::isValid() const +{ + SpinLockHolder lock_holder(&spinlock); + if (!allocator) + allocator = new FixedVMPoolAllocator(JIT_ALLOCATOR_LARGE_ALLOC_SIZE, VM_POOL_SIZE); + return allocator->isValid(); +} + +bool ExecutableAllocator::underMemoryPressure() +{ + // Technically we should take the spin lock here, but we don't care if we get stale data. + // This is only really a heuristic anyway. + return allocatedCount > (VM_POOL_SIZE / 2); +} + +ExecutablePool::Allocation ExecutablePool::systemAlloc(size_t size) +{ + SpinLockHolder lock_holder(&spinlock); + ASSERT(allocator); + allocatedCount += size; + return allocator->alloc(size); +} + +void ExecutablePool::systemRelease(ExecutablePool::Allocation& allocation) +{ + SpinLockHolder lock_holder(&spinlock); + ASSERT(allocator); + allocatedCount -= allocation.size(); + allocator->free(allocation); +} + +} + + +#endif // HAVE(ASSEMBLER) diff --git a/Source/JavaScriptCore/jit/JIT.cpp b/Source/JavaScriptCore/jit/JIT.cpp new file mode 100644 index 0000000..01401a7 --- /dev/null +++ b/Source/JavaScriptCore/jit/JIT.cpp @@ -0,0 +1,670 @@ +/* + * Copyright (C) 2008, 2009 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "config.h" + +#if ENABLE(JIT) +#include "JIT.h" + +// This probably does not belong here; adding here for now as a quick Windows build fix. +#if ENABLE(ASSEMBLER) && CPU(X86) && !OS(MAC_OS_X) +#include "MacroAssembler.h" +JSC::MacroAssemblerX86Common::SSE2CheckState JSC::MacroAssemblerX86Common::s_sse2CheckState = NotCheckedSSE2; +#endif + +#include "CodeBlock.h" +#include "Interpreter.h" +#include "JITInlineMethods.h" +#include "JITStubCall.h" +#include "JSArray.h" +#include "JSFunction.h" +#include "LinkBuffer.h" +#include "RepatchBuffer.h" +#include "ResultType.h" +#include "SamplingTool.h" + +#ifndef NDEBUG +#include +#endif + +using namespace std; + +namespace JSC { + +void ctiPatchNearCallByReturnAddress(CodeBlock* codeblock, ReturnAddressPtr returnAddress, MacroAssemblerCodePtr newCalleeFunction) +{ + RepatchBuffer repatchBuffer(codeblock); + repatchBuffer.relinkNearCallerToTrampoline(returnAddress, newCalleeFunction); +} + +void ctiPatchCallByReturnAddress(CodeBlock* codeblock, ReturnAddressPtr returnAddress, MacroAssemblerCodePtr newCalleeFunction) +{ + RepatchBuffer repatchBuffer(codeblock); + repatchBuffer.relinkCallerToTrampoline(returnAddress, newCalleeFunction); +} + +void ctiPatchCallByReturnAddress(CodeBlock* codeblock, ReturnAddressPtr returnAddress, FunctionPtr newCalleeFunction) +{ + RepatchBuffer repatchBuffer(codeblock); + repatchBuffer.relinkCallerToFunction(returnAddress, newCalleeFunction); +} + +JIT::JIT(JSGlobalData* globalData, CodeBlock* codeBlock, void* linkerOffset) + : m_interpreter(globalData->interpreter) + , m_globalData(globalData) + , m_codeBlock(codeBlock) + , m_labels(codeBlock ? codeBlock->instructions().size() : 0) + , m_propertyAccessCompilationInfo(codeBlock ? codeBlock->numberOfStructureStubInfos() : 0) + , m_callStructureStubCompilationInfo(codeBlock ? codeBlock->numberOfCallLinkInfos() : 0) + , m_bytecodeOffset((unsigned)-1) +#if USE(JSVALUE32_64) + , m_jumpTargetIndex(0) + , m_mappedBytecodeOffset((unsigned)-1) + , m_mappedVirtualRegisterIndex((unsigned)-1) + , m_mappedTag((RegisterID)-1) + , m_mappedPayload((RegisterID)-1) +#else + , m_lastResultBytecodeRegister(std::numeric_limits::max()) + , m_jumpTargetsPosition(0) +#endif + , m_linkerOffset(linkerOffset) +{ +} + +#if USE(JSVALUE32_64) +void JIT::emitTimeoutCheck() +{ + Jump skipTimeout = branchSub32(NonZero, Imm32(1), timeoutCheckRegister); + JITStubCall stubCall(this, cti_timeout_check); + stubCall.addArgument(regT1, regT0); // save last result registers. + stubCall.call(timeoutCheckRegister); + stubCall.getArgument(0, regT1, regT0); // reload last result registers. + skipTimeout.link(this); +} +#else +void JIT::emitTimeoutCheck() +{ + Jump skipTimeout = branchSub32(NonZero, Imm32(1), timeoutCheckRegister); + JITStubCall(this, cti_timeout_check).call(timeoutCheckRegister); + skipTimeout.link(this); + + killLastResultRegister(); +} +#endif + +#define NEXT_OPCODE(name) \ + m_bytecodeOffset += OPCODE_LENGTH(name); \ + break; + +#if USE(JSVALUE32_64) +#define DEFINE_BINARY_OP(name) \ + case name: { \ + JITStubCall stubCall(this, cti_##name); \ + stubCall.addArgument(currentInstruction[2].u.operand); \ + stubCall.addArgument(currentInstruction[3].u.operand); \ + stubCall.call(currentInstruction[1].u.operand); \ + NEXT_OPCODE(name); \ + } + +#define DEFINE_UNARY_OP(name) \ + case name: { \ + JITStubCall stubCall(this, cti_##name); \ + stubCall.addArgument(currentInstruction[2].u.operand); \ + stubCall.call(currentInstruction[1].u.operand); \ + NEXT_OPCODE(name); \ + } + +#else // USE(JSVALUE32_64) + +#define DEFINE_BINARY_OP(name) \ + case name: { \ + JITStubCall stubCall(this, cti_##name); \ + stubCall.addArgument(currentInstruction[2].u.operand, regT2); \ + stubCall.addArgument(currentInstruction[3].u.operand, regT2); \ + stubCall.call(currentInstruction[1].u.operand); \ + NEXT_OPCODE(name); \ + } + +#define DEFINE_UNARY_OP(name) \ + case name: { \ + JITStubCall stubCall(this, cti_##name); \ + stubCall.addArgument(currentInstruction[2].u.operand, regT2); \ + stubCall.call(currentInstruction[1].u.operand); \ + NEXT_OPCODE(name); \ + } +#endif // USE(JSVALUE32_64) + +#define DEFINE_OP(name) \ + case name: { \ + emit_##name(currentInstruction); \ + NEXT_OPCODE(name); \ + } + +#define DEFINE_SLOWCASE_OP(name) \ + case name: { \ + emitSlow_##name(currentInstruction, iter); \ + NEXT_OPCODE(name); \ + } + +void JIT::privateCompileMainPass() +{ + Instruction* instructionsBegin = m_codeBlock->instructions().begin(); + unsigned instructionCount = m_codeBlock->instructions().size(); + + m_propertyAccessInstructionIndex = 0; + m_globalResolveInfoIndex = 0; + m_callLinkInfoIndex = 0; + + for (m_bytecodeOffset = 0; m_bytecodeOffset < instructionCount; ) { + Instruction* currentInstruction = instructionsBegin + m_bytecodeOffset; + ASSERT_WITH_MESSAGE(m_interpreter->isOpcode(currentInstruction->u.opcode), "privateCompileMainPass gone bad @ %d", m_bytecodeOffset); + +#if ENABLE(OPCODE_SAMPLING) + if (m_bytecodeOffset > 0) // Avoid the overhead of sampling op_enter twice. + sampleInstruction(currentInstruction); +#endif + +#if USE(JSVALUE64) + if (m_labels[m_bytecodeOffset].isUsed()) + killLastResultRegister(); +#endif + + m_labels[m_bytecodeOffset] = label(); + + switch (m_interpreter->getOpcodeID(currentInstruction->u.opcode)) { + DEFINE_BINARY_OP(op_del_by_val) + DEFINE_BINARY_OP(op_in) + DEFINE_BINARY_OP(op_less) + DEFINE_BINARY_OP(op_lesseq) + DEFINE_UNARY_OP(op_is_boolean) + DEFINE_UNARY_OP(op_is_function) + DEFINE_UNARY_OP(op_is_number) + DEFINE_UNARY_OP(op_is_object) + DEFINE_UNARY_OP(op_is_string) + DEFINE_UNARY_OP(op_is_undefined) +#if USE(JSVALUE64) + DEFINE_UNARY_OP(op_negate) +#endif + DEFINE_UNARY_OP(op_typeof) + + DEFINE_OP(op_add) + DEFINE_OP(op_bitand) + DEFINE_OP(op_bitnot) + DEFINE_OP(op_bitor) + DEFINE_OP(op_bitxor) + DEFINE_OP(op_call) + DEFINE_OP(op_call_eval) + DEFINE_OP(op_call_varargs) + DEFINE_OP(op_catch) + DEFINE_OP(op_construct) + DEFINE_OP(op_get_callee) + DEFINE_OP(op_create_this) + DEFINE_OP(op_convert_this) + DEFINE_OP(op_convert_this_strict) + DEFINE_OP(op_init_lazy_reg) + DEFINE_OP(op_create_arguments) + DEFINE_OP(op_debug) + DEFINE_OP(op_del_by_id) + DEFINE_OP(op_div) + DEFINE_OP(op_end) + DEFINE_OP(op_enter) + DEFINE_OP(op_create_activation) + DEFINE_OP(op_eq) + DEFINE_OP(op_eq_null) + DEFINE_OP(op_get_by_id) + DEFINE_OP(op_get_arguments_length) + DEFINE_OP(op_get_by_val) + DEFINE_OP(op_get_argument_by_val) + DEFINE_OP(op_get_by_pname) + DEFINE_OP(op_get_global_var) + DEFINE_OP(op_get_pnames) + DEFINE_OP(op_get_scoped_var) + DEFINE_OP(op_check_has_instance) + DEFINE_OP(op_instanceof) + DEFINE_OP(op_jeq_null) + DEFINE_OP(op_jfalse) + DEFINE_OP(op_jmp) + DEFINE_OP(op_jmp_scopes) + DEFINE_OP(op_jneq_null) + DEFINE_OP(op_jneq_ptr) + DEFINE_OP(op_jnless) + DEFINE_OP(op_jless) + DEFINE_OP(op_jlesseq) + DEFINE_OP(op_jnlesseq) + DEFINE_OP(op_jsr) + DEFINE_OP(op_jtrue) + DEFINE_OP(op_load_varargs) + DEFINE_OP(op_loop) + DEFINE_OP(op_loop_if_less) + DEFINE_OP(op_loop_if_lesseq) + DEFINE_OP(op_loop_if_true) + DEFINE_OP(op_loop_if_false) + DEFINE_OP(op_lshift) + DEFINE_OP(op_method_check) + DEFINE_OP(op_mod) + DEFINE_OP(op_mov) + DEFINE_OP(op_mul) +#if USE(JSVALUE32_64) + DEFINE_OP(op_negate) +#endif + DEFINE_OP(op_neq) + DEFINE_OP(op_neq_null) + DEFINE_OP(op_new_array) + DEFINE_OP(op_new_func) + DEFINE_OP(op_new_func_exp) + DEFINE_OP(op_new_object) + DEFINE_OP(op_new_regexp) + DEFINE_OP(op_next_pname) + DEFINE_OP(op_not) + DEFINE_OP(op_nstricteq) + DEFINE_OP(op_pop_scope) + DEFINE_OP(op_post_dec) + DEFINE_OP(op_post_inc) + DEFINE_OP(op_pre_dec) + DEFINE_OP(op_pre_inc) + DEFINE_OP(op_profile_did_call) + DEFINE_OP(op_profile_will_call) + DEFINE_OP(op_push_new_scope) + DEFINE_OP(op_push_scope) + DEFINE_OP(op_put_by_id) + DEFINE_OP(op_put_by_index) + DEFINE_OP(op_put_by_val) + DEFINE_OP(op_put_getter) + DEFINE_OP(op_put_global_var) + DEFINE_OP(op_put_scoped_var) + DEFINE_OP(op_put_setter) + DEFINE_OP(op_resolve) + DEFINE_OP(op_resolve_base) + DEFINE_OP(op_ensure_property_exists) + DEFINE_OP(op_resolve_global) + DEFINE_OP(op_resolve_global_dynamic) + DEFINE_OP(op_resolve_skip) + DEFINE_OP(op_resolve_with_base) + DEFINE_OP(op_ret) + DEFINE_OP(op_call_put_result) + DEFINE_OP(op_ret_object_or_this) + DEFINE_OP(op_rshift) + DEFINE_OP(op_urshift) + DEFINE_OP(op_sret) + DEFINE_OP(op_strcat) + DEFINE_OP(op_stricteq) + DEFINE_OP(op_sub) + DEFINE_OP(op_switch_char) + DEFINE_OP(op_switch_imm) + DEFINE_OP(op_switch_string) + DEFINE_OP(op_tear_off_activation) + DEFINE_OP(op_tear_off_arguments) + DEFINE_OP(op_throw) + DEFINE_OP(op_throw_reference_error) + DEFINE_OP(op_throw_syntax_error) + DEFINE_OP(op_to_jsnumber) + DEFINE_OP(op_to_primitive) + + case op_get_array_length: + case op_get_by_id_chain: + case op_get_by_id_generic: + case op_get_by_id_proto: + case op_get_by_id_proto_list: + case op_get_by_id_self: + case op_get_by_id_self_list: + case op_get_by_id_getter_chain: + case op_get_by_id_getter_proto: + case op_get_by_id_getter_proto_list: + case op_get_by_id_getter_self: + case op_get_by_id_getter_self_list: + case op_get_by_id_custom_chain: + case op_get_by_id_custom_proto: + case op_get_by_id_custom_proto_list: + case op_get_by_id_custom_self: + case op_get_by_id_custom_self_list: + case op_get_string_length: + case op_put_by_id_generic: + case op_put_by_id_replace: + case op_put_by_id_transition: + ASSERT_NOT_REACHED(); + } + } + + ASSERT(m_propertyAccessInstructionIndex == m_codeBlock->numberOfStructureStubInfos()); + ASSERT(m_callLinkInfoIndex == m_codeBlock->numberOfCallLinkInfos()); + +#ifndef NDEBUG + // Reset this, in order to guard its use with ASSERTs. + m_bytecodeOffset = (unsigned)-1; +#endif +} + + +void JIT::privateCompileLinkPass() +{ + unsigned jmpTableCount = m_jmpTable.size(); + for (unsigned i = 0; i < jmpTableCount; ++i) + m_jmpTable[i].from.linkTo(m_labels[m_jmpTable[i].toBytecodeOffset], this); + m_jmpTable.clear(); +} + +void JIT::privateCompileSlowCases() +{ + Instruction* instructionsBegin = m_codeBlock->instructions().begin(); + + m_propertyAccessInstructionIndex = 0; + m_globalResolveInfoIndex = 0; + m_callLinkInfoIndex = 0; + + for (Vector::iterator iter = m_slowCases.begin(); iter != m_slowCases.end();) { +#if USE(JSVALUE64) + killLastResultRegister(); +#endif + + m_bytecodeOffset = iter->to; +#ifndef NDEBUG + unsigned firstTo = m_bytecodeOffset; +#endif + Instruction* currentInstruction = instructionsBegin + m_bytecodeOffset; + + switch (m_interpreter->getOpcodeID(currentInstruction->u.opcode)) { + DEFINE_SLOWCASE_OP(op_add) + DEFINE_SLOWCASE_OP(op_bitand) + DEFINE_SLOWCASE_OP(op_bitnot) + DEFINE_SLOWCASE_OP(op_bitor) + DEFINE_SLOWCASE_OP(op_bitxor) + DEFINE_SLOWCASE_OP(op_call) + DEFINE_SLOWCASE_OP(op_call_eval) + DEFINE_SLOWCASE_OP(op_call_varargs) + DEFINE_SLOWCASE_OP(op_construct) + DEFINE_SLOWCASE_OP(op_convert_this) + DEFINE_SLOWCASE_OP(op_convert_this_strict) + DEFINE_SLOWCASE_OP(op_div) + DEFINE_SLOWCASE_OP(op_eq) + DEFINE_SLOWCASE_OP(op_get_by_id) + DEFINE_SLOWCASE_OP(op_get_arguments_length) + DEFINE_SLOWCASE_OP(op_get_by_val) + DEFINE_SLOWCASE_OP(op_get_argument_by_val) + DEFINE_SLOWCASE_OP(op_get_by_pname) + DEFINE_SLOWCASE_OP(op_check_has_instance) + DEFINE_SLOWCASE_OP(op_instanceof) + DEFINE_SLOWCASE_OP(op_jfalse) + DEFINE_SLOWCASE_OP(op_jnless) + DEFINE_SLOWCASE_OP(op_jless) + DEFINE_SLOWCASE_OP(op_jlesseq) + DEFINE_SLOWCASE_OP(op_jnlesseq) + DEFINE_SLOWCASE_OP(op_jtrue) + DEFINE_SLOWCASE_OP(op_load_varargs) + DEFINE_SLOWCASE_OP(op_loop_if_less) + DEFINE_SLOWCASE_OP(op_loop_if_lesseq) + DEFINE_SLOWCASE_OP(op_loop_if_true) + DEFINE_SLOWCASE_OP(op_loop_if_false) + DEFINE_SLOWCASE_OP(op_lshift) + DEFINE_SLOWCASE_OP(op_method_check) + DEFINE_SLOWCASE_OP(op_mod) + DEFINE_SLOWCASE_OP(op_mul) +#if USE(JSVALUE32_64) + DEFINE_SLOWCASE_OP(op_negate) +#endif + DEFINE_SLOWCASE_OP(op_neq) + DEFINE_SLOWCASE_OP(op_not) + DEFINE_SLOWCASE_OP(op_nstricteq) + DEFINE_SLOWCASE_OP(op_post_dec) + DEFINE_SLOWCASE_OP(op_post_inc) + DEFINE_SLOWCASE_OP(op_pre_dec) + DEFINE_SLOWCASE_OP(op_pre_inc) + DEFINE_SLOWCASE_OP(op_put_by_id) + DEFINE_SLOWCASE_OP(op_put_by_val) + DEFINE_SLOWCASE_OP(op_resolve_global) + DEFINE_SLOWCASE_OP(op_resolve_global_dynamic) + DEFINE_SLOWCASE_OP(op_rshift) + DEFINE_SLOWCASE_OP(op_urshift) + DEFINE_SLOWCASE_OP(op_stricteq) + DEFINE_SLOWCASE_OP(op_sub) + DEFINE_SLOWCASE_OP(op_to_jsnumber) + DEFINE_SLOWCASE_OP(op_to_primitive) + default: + ASSERT_NOT_REACHED(); + } + + ASSERT_WITH_MESSAGE(iter == m_slowCases.end() || firstTo != iter->to,"Not enough jumps linked in slow case codegen."); + ASSERT_WITH_MESSAGE(firstTo == (iter - 1)->to, "Too many jumps linked in slow case codegen."); + + emitJumpSlowToHot(jump(), 0); + } + +#if ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS) + ASSERT(m_propertyAccessInstructionIndex == m_codeBlock->numberOfStructureStubInfos()); +#endif + ASSERT(m_callLinkInfoIndex == m_codeBlock->numberOfCallLinkInfos()); + +#ifndef NDEBUG + // Reset this, in order to guard its use with ASSERTs. + m_bytecodeOffset = (unsigned)-1; +#endif +} + +JITCode JIT::privateCompile(CodePtr* functionEntryArityCheck) +{ + // Could use a pop_m, but would need to offset the following instruction if so. + preserveReturnAddressAfterCall(regT2); + emitPutToCallFrameHeader(regT2, RegisterFile::ReturnPC); + + Label beginLabel(this); + + sampleCodeBlock(m_codeBlock); +#if ENABLE(OPCODE_SAMPLING) + sampleInstruction(m_codeBlock->instructions().begin()); +#endif + + Jump registerFileCheck; + if (m_codeBlock->codeType() == FunctionCode) { + // In the case of a fast linked call, we do not set this up in the caller. + emitPutImmediateToCallFrameHeader(m_codeBlock, RegisterFile::CodeBlock); + + addPtr(Imm32(m_codeBlock->m_numCalleeRegisters * sizeof(Register)), callFrameRegister, regT1); + registerFileCheck = branchPtr(Below, AbsoluteAddress(&m_globalData->interpreter->registerFile().m_end), regT1); + } + + Label functionBody = label(); + + privateCompileMainPass(); + privateCompileLinkPass(); + privateCompileSlowCases(); + + Label arityCheck; + Call callArityCheck; + if (m_codeBlock->codeType() == FunctionCode) { + registerFileCheck.link(this); + m_bytecodeOffset = 0; + JITStubCall(this, cti_register_file_check).call(); +#ifndef NDEBUG + m_bytecodeOffset = (unsigned)-1; // Reset this, in order to guard its use with ASSERTs. +#endif + jump(functionBody); + + arityCheck = label(); + preserveReturnAddressAfterCall(regT2); + emitPutToCallFrameHeader(regT2, RegisterFile::ReturnPC); + branch32(Equal, regT1, Imm32(m_codeBlock->m_numParameters)).linkTo(beginLabel, this); + restoreArgumentReference(); + callArityCheck = call(); + move(regT0, callFrameRegister); + jump(beginLabel); + } + + ASSERT(m_jmpTable.isEmpty()); + + LinkBuffer patchBuffer(this, m_globalData->executableAllocator.poolForSize(m_assembler.size()), m_linkerOffset); + + // Translate vPC offsets into addresses in JIT generated code, for switch tables. + for (unsigned i = 0; i < m_switches.size(); ++i) { + SwitchRecord record = m_switches[i]; + unsigned bytecodeOffset = record.bytecodeOffset; + + if (record.type != SwitchRecord::String) { + ASSERT(record.type == SwitchRecord::Immediate || record.type == SwitchRecord::Character); + ASSERT(record.jumpTable.simpleJumpTable->branchOffsets.size() == record.jumpTable.simpleJumpTable->ctiOffsets.size()); + + record.jumpTable.simpleJumpTable->ctiDefault = patchBuffer.locationOf(m_labels[bytecodeOffset + record.defaultOffset]); + + for (unsigned j = 0; j < record.jumpTable.simpleJumpTable->branchOffsets.size(); ++j) { + unsigned offset = record.jumpTable.simpleJumpTable->branchOffsets[j]; + record.jumpTable.simpleJumpTable->ctiOffsets[j] = offset ? patchBuffer.locationOf(m_labels[bytecodeOffset + offset]) : record.jumpTable.simpleJumpTable->ctiDefault; + } + } else { + ASSERT(record.type == SwitchRecord::String); + + record.jumpTable.stringJumpTable->ctiDefault = patchBuffer.locationOf(m_labels[bytecodeOffset + record.defaultOffset]); + + StringJumpTable::StringOffsetTable::iterator end = record.jumpTable.stringJumpTable->offsetTable.end(); + for (StringJumpTable::StringOffsetTable::iterator it = record.jumpTable.stringJumpTable->offsetTable.begin(); it != end; ++it) { + unsigned offset = it->second.branchOffset; + it->second.ctiOffset = offset ? patchBuffer.locationOf(m_labels[bytecodeOffset + offset]) : record.jumpTable.stringJumpTable->ctiDefault; + } + } + } + + for (size_t i = 0; i < m_codeBlock->numberOfExceptionHandlers(); ++i) { + HandlerInfo& handler = m_codeBlock->exceptionHandler(i); + handler.nativeCode = patchBuffer.locationOf(m_labels[handler.target]); + } + + for (Vector::iterator iter = m_calls.begin(); iter != m_calls.end(); ++iter) { + if (iter->to) + patchBuffer.link(iter->from, FunctionPtr(iter->to)); + } + + if (m_codeBlock->needsCallReturnIndices()) { + m_codeBlock->callReturnIndexVector().reserveCapacity(m_calls.size()); + for (Vector::iterator iter = m_calls.begin(); iter != m_calls.end(); ++iter) + m_codeBlock->callReturnIndexVector().append(CallReturnOffsetToBytecodeOffset(patchBuffer.returnAddressOffset(iter->from), iter->bytecodeOffset)); + } + + // Link absolute addresses for jsr + for (Vector::iterator iter = m_jsrSites.begin(); iter != m_jsrSites.end(); ++iter) + patchBuffer.patch(iter->storeLocation, patchBuffer.locationOf(iter->target).executableAddress()); + +#if ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS) + for (unsigned i = 0; i < m_codeBlock->numberOfStructureStubInfos(); ++i) { + StructureStubInfo& info = m_codeBlock->structureStubInfo(i); + info.callReturnLocation = patchBuffer.locationOf(m_propertyAccessCompilationInfo[i].callReturnLocation); + info.hotPathBegin = patchBuffer.locationOf(m_propertyAccessCompilationInfo[i].hotPathBegin); + } +#endif +#if ENABLE(JIT_OPTIMIZE_CALL) + for (unsigned i = 0; i < m_codeBlock->numberOfCallLinkInfos(); ++i) { + CallLinkInfo& info = m_codeBlock->callLinkInfo(i); + info.ownerCodeBlock = m_codeBlock; + info.callReturnLocation = patchBuffer.locationOfNearCall(m_callStructureStubCompilationInfo[i].callReturnLocation); + info.hotPathBegin = patchBuffer.locationOf(m_callStructureStubCompilationInfo[i].hotPathBegin); + info.hotPathOther = patchBuffer.locationOfNearCall(m_callStructureStubCompilationInfo[i].hotPathOther); + } +#endif + unsigned methodCallCount = m_methodCallCompilationInfo.size(); + m_codeBlock->addMethodCallLinkInfos(methodCallCount); + for (unsigned i = 0; i < methodCallCount; ++i) { + MethodCallLinkInfo& info = m_codeBlock->methodCallLinkInfo(i); + info.structureLabel = patchBuffer.locationOf(m_methodCallCompilationInfo[i].structureToCompare); + info.callReturnLocation = m_codeBlock->structureStubInfo(m_methodCallCompilationInfo[i].propertyAccessIndex).callReturnLocation; + } + + if (m_codeBlock->codeType() == FunctionCode && functionEntryArityCheck) { + patchBuffer.link(callArityCheck, FunctionPtr(m_codeBlock->m_isConstructor ? cti_op_construct_arityCheck : cti_op_call_arityCheck)); + *functionEntryArityCheck = patchBuffer.locationOf(arityCheck); + } + + return patchBuffer.finalizeCode(); +} + +#if USE(JSVALUE64) +void JIT::emitGetVariableObjectRegister(RegisterID variableObject, int index, RegisterID dst) +{ + loadPtr(Address(variableObject, OBJECT_OFFSETOF(JSVariableObject, d)), dst); + loadPtr(Address(dst, OBJECT_OFFSETOF(JSVariableObject::JSVariableObjectData, registers)), dst); + loadPtr(Address(dst, index * sizeof(Register)), dst); +} + +void JIT::emitPutVariableObjectRegister(RegisterID src, RegisterID variableObject, int index) +{ + loadPtr(Address(variableObject, OBJECT_OFFSETOF(JSVariableObject, d)), variableObject); + loadPtr(Address(variableObject, OBJECT_OFFSETOF(JSVariableObject::JSVariableObjectData, registers)), variableObject); + storePtr(src, Address(variableObject, index * sizeof(Register))); +} +#endif + +#if ENABLE(JIT_OPTIMIZE_CALL) +void JIT::unlinkCallOrConstruct(CallLinkInfo* callLinkInfo) +{ + // When the JSFunction is deleted the pointer embedded in the instruction stream will no longer be valid + // (and, if a new JSFunction happened to be constructed at the same location, we could get a false positive + // match). Reset the check so it no longer matches. + RepatchBuffer repatchBuffer(callLinkInfo->ownerCodeBlock); +#if USE(JSVALUE32_64) + repatchBuffer.repatch(callLinkInfo->hotPathBegin, 0); +#else + repatchBuffer.repatch(callLinkInfo->hotPathBegin, JSValue::encode(JSValue())); +#endif +} + +void JIT::linkCall(JSFunction* callee, CodeBlock* callerCodeBlock, CodeBlock* calleeCodeBlock, JIT::CodePtr code, CallLinkInfo* callLinkInfo, int callerArgCount, JSGlobalData* globalData) +{ + RepatchBuffer repatchBuffer(callerCodeBlock); + + // Currently we only link calls with the exact number of arguments. + // If this is a native call calleeCodeBlock is null so the number of parameters is unimportant + if (!calleeCodeBlock || (callerArgCount == calleeCodeBlock->m_numParameters)) { + ASSERT(!callLinkInfo->isLinked()); + + if (calleeCodeBlock) + calleeCodeBlock->addCaller(callLinkInfo); + + repatchBuffer.repatch(callLinkInfo->hotPathBegin, callee); + repatchBuffer.relink(callLinkInfo->hotPathOther, code); + } + + // patch the call so we do not continue to try to link. + repatchBuffer.relink(callLinkInfo->callReturnLocation, globalData->jitStubs->ctiVirtualCall()); +} + +void JIT::linkConstruct(JSFunction* callee, CodeBlock* callerCodeBlock, CodeBlock* calleeCodeBlock, JIT::CodePtr code, CallLinkInfo* callLinkInfo, int callerArgCount, JSGlobalData* globalData) +{ + RepatchBuffer repatchBuffer(callerCodeBlock); + + // Currently we only link calls with the exact number of arguments. + // If this is a native call calleeCodeBlock is null so the number of parameters is unimportant + if (!calleeCodeBlock || (callerArgCount == calleeCodeBlock->m_numParameters)) { + ASSERT(!callLinkInfo->isLinked()); + + if (calleeCodeBlock) + calleeCodeBlock->addCaller(callLinkInfo); + + repatchBuffer.repatch(callLinkInfo->hotPathBegin, callee); + repatchBuffer.relink(callLinkInfo->hotPathOther, code); + } + + // patch the call so we do not continue to try to link. + repatchBuffer.relink(callLinkInfo->callReturnLocation, globalData->jitStubs->ctiVirtualConstruct()); +} +#endif // ENABLE(JIT_OPTIMIZE_CALL) + +} // namespace JSC + +#endif // ENABLE(JIT) diff --git a/Source/JavaScriptCore/jit/JIT.h b/Source/JavaScriptCore/jit/JIT.h new file mode 100644 index 0000000..907a774 --- /dev/null +++ b/Source/JavaScriptCore/jit/JIT.h @@ -0,0 +1,1046 @@ +/* + * Copyright (C) 2008 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef JIT_h +#define JIT_h + +#if ENABLE(JIT) + +// We've run into some problems where changing the size of the class JIT leads to +// performance fluctuations. Try forcing alignment in an attempt to stabalize this. +#if COMPILER(GCC) +#define JIT_CLASS_ALIGNMENT __attribute__ ((aligned (32))) +#else +#define JIT_CLASS_ALIGNMENT +#endif + +#define ASSERT_JIT_OFFSET(actual, expected) ASSERT_WITH_MESSAGE(actual == expected, "JIT Offset \"%s\" should be %d, not %d.\n", #expected, static_cast(actual), static_cast(expected)); + +#include "CodeBlock.h" +#include "Interpreter.h" +#include "JSInterfaceJIT.h" +#include "Opcode.h" +#include "Profiler.h" +#include + +namespace JSC { + + class CodeBlock; + class JIT; + class JSPropertyNameIterator; + class Interpreter; + class Register; + class RegisterFile; + class ScopeChainNode; + class StructureChain; + + struct CallLinkInfo; + struct Instruction; + struct OperandTypes; + struct PolymorphicAccessStructureList; + struct SimpleJumpTable; + struct StringJumpTable; + struct StructureStubInfo; + + struct CallRecord { + MacroAssembler::Call from; + unsigned bytecodeOffset; + void* to; + + CallRecord() + { + } + + CallRecord(MacroAssembler::Call from, unsigned bytecodeOffset, void* to = 0) + : from(from) + , bytecodeOffset(bytecodeOffset) + , to(to) + { + } + }; + + struct JumpTable { + MacroAssembler::Jump from; + unsigned toBytecodeOffset; + + JumpTable(MacroAssembler::Jump f, unsigned t) + : from(f) + , toBytecodeOffset(t) + { + } + }; + + struct SlowCaseEntry { + MacroAssembler::Jump from; + unsigned to; + unsigned hint; + + SlowCaseEntry(MacroAssembler::Jump f, unsigned t, unsigned h = 0) + : from(f) + , to(t) + , hint(h) + { + } + }; + + struct SwitchRecord { + enum Type { + Immediate, + Character, + String + }; + + Type type; + + union { + SimpleJumpTable* simpleJumpTable; + StringJumpTable* stringJumpTable; + } jumpTable; + + unsigned bytecodeOffset; + unsigned defaultOffset; + + SwitchRecord(SimpleJumpTable* jumpTable, unsigned bytecodeOffset, unsigned defaultOffset, Type type) + : type(type) + , bytecodeOffset(bytecodeOffset) + , defaultOffset(defaultOffset) + { + this->jumpTable.simpleJumpTable = jumpTable; + } + + SwitchRecord(StringJumpTable* jumpTable, unsigned bytecodeOffset, unsigned defaultOffset) + : type(String) + , bytecodeOffset(bytecodeOffset) + , defaultOffset(defaultOffset) + { + this->jumpTable.stringJumpTable = jumpTable; + } + }; + + struct PropertyStubCompilationInfo { + MacroAssembler::Call callReturnLocation; + MacroAssembler::Label hotPathBegin; + }; + + struct StructureStubCompilationInfo { + MacroAssembler::DataLabelPtr hotPathBegin; + MacroAssembler::Call hotPathOther; + MacroAssembler::Call callReturnLocation; + }; + + struct MethodCallCompilationInfo { + MethodCallCompilationInfo(unsigned propertyAccessIndex) + : propertyAccessIndex(propertyAccessIndex) + { + } + + MacroAssembler::DataLabelPtr structureToCompare; + unsigned propertyAccessIndex; + }; + + // Near calls can only be patched to other JIT code, regular calls can be patched to JIT code or relinked to stub functions. + void ctiPatchNearCallByReturnAddress(CodeBlock* codeblock, ReturnAddressPtr returnAddress, MacroAssemblerCodePtr newCalleeFunction); + void ctiPatchCallByReturnAddress(CodeBlock* codeblock, ReturnAddressPtr returnAddress, MacroAssemblerCodePtr newCalleeFunction); + void ctiPatchCallByReturnAddress(CodeBlock* codeblock, ReturnAddressPtr returnAddress, FunctionPtr newCalleeFunction); + + class JIT : private JSInterfaceJIT { + friend class JITStubCall; + + using MacroAssembler::Jump; + using MacroAssembler::JumpList; + using MacroAssembler::Label; + + static const int patchGetByIdDefaultStructure = -1; + // Magic number - initial offset cannot be representable as a signed 8bit value, or the X86Assembler + // will compress the displacement, and we may not be able to fit a patched offset. + static const int patchGetByIdDefaultOffset = 256; + + public: + static JITCode compile(JSGlobalData* globalData, CodeBlock* codeBlock, CodePtr* functionEntryArityCheck = 0, void* offsetBase = 0) + { + return JIT(globalData, codeBlock, offsetBase).privateCompile(functionEntryArityCheck); + } + + static void compileGetByIdProto(JSGlobalData* globalData, CallFrame* callFrame, CodeBlock* codeBlock, StructureStubInfo* stubInfo, Structure* structure, Structure* prototypeStructure, const Identifier& ident, const PropertySlot& slot, size_t cachedOffset, ReturnAddressPtr returnAddress) + { + JIT jit(globalData, codeBlock); + jit.privateCompileGetByIdProto(stubInfo, structure, prototypeStructure, ident, slot, cachedOffset, returnAddress, callFrame); + } + + static void compileGetByIdSelfList(JSGlobalData* globalData, CodeBlock* codeBlock, StructureStubInfo* stubInfo, PolymorphicAccessStructureList* polymorphicStructures, int currentIndex, Structure* structure, const Identifier& ident, const PropertySlot& slot, size_t cachedOffset) + { + JIT jit(globalData, codeBlock); + jit.privateCompileGetByIdSelfList(stubInfo, polymorphicStructures, currentIndex, structure, ident, slot, cachedOffset); + } + static void compileGetByIdProtoList(JSGlobalData* globalData, CallFrame* callFrame, CodeBlock* codeBlock, StructureStubInfo* stubInfo, PolymorphicAccessStructureList* prototypeStructureList, int currentIndex, Structure* structure, Structure* prototypeStructure, const Identifier& ident, const PropertySlot& slot, size_t cachedOffset) + { + JIT jit(globalData, codeBlock); + jit.privateCompileGetByIdProtoList(stubInfo, prototypeStructureList, currentIndex, structure, prototypeStructure, ident, slot, cachedOffset, callFrame); + } + static void compileGetByIdChainList(JSGlobalData* globalData, CallFrame* callFrame, CodeBlock* codeBlock, StructureStubInfo* stubInfo, PolymorphicAccessStructureList* prototypeStructureList, int currentIndex, Structure* structure, StructureChain* chain, size_t count, const Identifier& ident, const PropertySlot& slot, size_t cachedOffset) + { + JIT jit(globalData, codeBlock); + jit.privateCompileGetByIdChainList(stubInfo, prototypeStructureList, currentIndex, structure, chain, count, ident, slot, cachedOffset, callFrame); + } + + static void compileGetByIdChain(JSGlobalData* globalData, CallFrame* callFrame, CodeBlock* codeBlock, StructureStubInfo* stubInfo, Structure* structure, StructureChain* chain, size_t count, const Identifier& ident, const PropertySlot& slot, size_t cachedOffset, ReturnAddressPtr returnAddress) + { + JIT jit(globalData, codeBlock); + jit.privateCompileGetByIdChain(stubInfo, structure, chain, count, ident, slot, cachedOffset, returnAddress, callFrame); + } + + static void compilePutByIdTransition(JSGlobalData* globalData, CodeBlock* codeBlock, StructureStubInfo* stubInfo, Structure* oldStructure, Structure* newStructure, size_t cachedOffset, StructureChain* chain, ReturnAddressPtr returnAddress, bool direct) + { + JIT jit(globalData, codeBlock); + jit.privateCompilePutByIdTransition(stubInfo, oldStructure, newStructure, cachedOffset, chain, returnAddress, direct); + } + + static void compileCTIMachineTrampolines(JSGlobalData* globalData, RefPtr* executablePool, TrampolineStructure *trampolines) + { + if (!globalData->canUseJIT()) + return; + JIT jit(globalData, 0, 0); + jit.privateCompileCTIMachineTrampolines(executablePool, globalData, trampolines); + } + + static CodePtr compileCTINativeCall(JSGlobalData* globalData, PassRefPtr executablePool, NativeFunction func) + { + if (!globalData->canUseJIT()) + return CodePtr(); + JIT jit(globalData, 0, 0); + return jit.privateCompileCTINativeCall(executablePool, globalData, func); + } + + static void patchGetByIdSelf(CodeBlock* codeblock, StructureStubInfo*, Structure*, size_t cachedOffset, ReturnAddressPtr returnAddress); + static void patchPutByIdReplace(CodeBlock* codeblock, StructureStubInfo*, Structure*, size_t cachedOffset, ReturnAddressPtr returnAddress, bool direct); + static void patchMethodCallProto(CodeBlock* codeblock, MethodCallLinkInfo&, JSFunction*, Structure*, JSObject*, ReturnAddressPtr); + + static void compilePatchGetArrayLength(JSGlobalData* globalData, CodeBlock* codeBlock, ReturnAddressPtr returnAddress) + { + JIT jit(globalData, codeBlock); + return jit.privateCompilePatchGetArrayLength(returnAddress); + } + + static void linkCall(JSFunction* callee, CodeBlock* callerCodeBlock, CodeBlock* calleeCodeBlock, CodePtr, CallLinkInfo*, int callerArgCount, JSGlobalData*); + static void linkConstruct(JSFunction* callee, CodeBlock* callerCodeBlock, CodeBlock* calleeCodeBlock, CodePtr, CallLinkInfo*, int callerArgCount, JSGlobalData*); + static void unlinkCallOrConstruct(CallLinkInfo*); + + private: + struct JSRInfo { + DataLabelPtr storeLocation; + Label target; + + JSRInfo(DataLabelPtr storeLocation, Label targetLocation) + : storeLocation(storeLocation) + , target(targetLocation) + { + } + }; + + JIT(JSGlobalData*, CodeBlock* = 0, void* = 0); + + void privateCompileMainPass(); + void privateCompileLinkPass(); + void privateCompileSlowCases(); + JITCode privateCompile(CodePtr* functionEntryArityCheck); + void privateCompileGetByIdProto(StructureStubInfo*, Structure*, Structure* prototypeStructure, const Identifier&, const PropertySlot&, size_t cachedOffset, ReturnAddressPtr returnAddress, CallFrame* callFrame); + void privateCompileGetByIdSelfList(StructureStubInfo*, PolymorphicAccessStructureList*, int, Structure*, const Identifier&, const PropertySlot&, size_t cachedOffset); + void privateCompileGetByIdProtoList(StructureStubInfo*, PolymorphicAccessStructureList*, int, Structure*, Structure* prototypeStructure, const Identifier&, const PropertySlot&, size_t cachedOffset, CallFrame* callFrame); + void privateCompileGetByIdChainList(StructureStubInfo*, PolymorphicAccessStructureList*, int, Structure*, StructureChain* chain, size_t count, const Identifier&, const PropertySlot&, size_t cachedOffset, CallFrame* callFrame); + void privateCompileGetByIdChain(StructureStubInfo*, Structure*, StructureChain*, size_t count, const Identifier&, const PropertySlot&, size_t cachedOffset, ReturnAddressPtr returnAddress, CallFrame* callFrame); + void privateCompilePutByIdTransition(StructureStubInfo*, Structure*, Structure*, size_t cachedOffset, StructureChain*, ReturnAddressPtr returnAddress, bool direct); + + void privateCompileCTIMachineTrampolines(RefPtr* executablePool, JSGlobalData* data, TrampolineStructure *trampolines); + Label privateCompileCTINativeCall(JSGlobalData*, bool isConstruct = false); + CodePtr privateCompileCTINativeCall(PassRefPtr executablePool, JSGlobalData* data, NativeFunction func); + void privateCompilePatchGetArrayLength(ReturnAddressPtr returnAddress); + + void addSlowCase(Jump); + void addSlowCase(JumpList); + void addJump(Jump, int); + void emitJumpSlowToHot(Jump, int); + + void compileOpCall(OpcodeID, Instruction* instruction, unsigned callLinkInfoIndex); + void compileOpCallVarargs(Instruction* instruction); + void compileOpCallInitializeCallFrame(); + void compileOpCallSlowCase(Instruction* instruction, Vector::iterator& iter, unsigned callLinkInfoIndex, OpcodeID opcodeID); + void compileOpCallVarargsSlowCase(Instruction* instruction, Vector::iterator& iter); + + enum CompileOpStrictEqType { OpStrictEq, OpNStrictEq }; + void compileOpStrictEq(Instruction* instruction, CompileOpStrictEqType type); + bool isOperandConstantImmediateDouble(unsigned src); + + void emitLoadDouble(unsigned index, FPRegisterID value); + void emitLoadInt32ToDouble(unsigned index, FPRegisterID value); + + void testPrototype(JSValue, JumpList& failureCases); + +#if USE(JSVALUE32_64) + bool getOperandConstantImmediateInt(unsigned op1, unsigned op2, unsigned& op, int32_t& constant); + + void emitLoadTag(unsigned index, RegisterID tag); + void emitLoadPayload(unsigned index, RegisterID payload); + + void emitLoad(const JSValue& v, RegisterID tag, RegisterID payload); + void emitLoad(unsigned index, RegisterID tag, RegisterID payload, RegisterID base = callFrameRegister); + void emitLoad2(unsigned index1, RegisterID tag1, RegisterID payload1, unsigned index2, RegisterID tag2, RegisterID payload2); + + void emitStore(unsigned index, RegisterID tag, RegisterID payload, RegisterID base = callFrameRegister); + void emitStore(unsigned index, const JSValue constant, RegisterID base = callFrameRegister); + void emitStoreInt32(unsigned index, RegisterID payload, bool indexIsInt32 = false); + void emitStoreInt32(unsigned index, Imm32 payload, bool indexIsInt32 = false); + void emitStoreCell(unsigned index, RegisterID payload, bool indexIsCell = false); + void emitStoreBool(unsigned index, RegisterID tag, bool indexIsBool = false); + void emitStoreDouble(unsigned index, FPRegisterID value); + + bool isLabeled(unsigned bytecodeOffset); + void map(unsigned bytecodeOffset, unsigned virtualRegisterIndex, RegisterID tag, RegisterID payload); + void unmap(RegisterID); + void unmap(); + bool isMapped(unsigned virtualRegisterIndex); + bool getMappedPayload(unsigned virtualRegisterIndex, RegisterID& payload); + bool getMappedTag(unsigned virtualRegisterIndex, RegisterID& tag); + + void emitJumpSlowCaseIfNotJSCell(unsigned virtualRegisterIndex); + void emitJumpSlowCaseIfNotJSCell(unsigned virtualRegisterIndex, RegisterID tag); + void linkSlowCaseIfNotJSCell(Vector::iterator&, unsigned virtualRegisterIndex); + +#if ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS) + void compileGetByIdHotPath(); + void compileGetByIdSlowCase(int resultVReg, int baseVReg, Identifier* ident, Vector::iterator& iter, bool isMethodCheck = false); +#endif + void compileGetDirectOffset(RegisterID base, RegisterID resultTag, RegisterID resultPayload, Structure* structure, size_t cachedOffset); + void compileGetDirectOffset(JSObject* base, RegisterID temp, RegisterID resultTag, RegisterID resultPayload, size_t cachedOffset); + void compileGetDirectOffset(RegisterID base, RegisterID resultTag, RegisterID resultPayload, RegisterID structure, RegisterID offset); + void compilePutDirectOffset(RegisterID base, RegisterID valueTag, RegisterID valuePayload, Structure* structure, size_t cachedOffset); + + // Arithmetic opcode helpers + void emitAdd32Constant(unsigned dst, unsigned op, int32_t constant, ResultType opType); + void emitSub32Constant(unsigned dst, unsigned op, int32_t constant, ResultType opType); + void emitBinaryDoubleOp(OpcodeID, unsigned dst, unsigned op1, unsigned op2, OperandTypes, JumpList& notInt32Op1, JumpList& notInt32Op2, bool op1IsInRegisters = true, bool op2IsInRegisters = true); + +#if CPU(X86) + // These architecture specific value are used to enable patching - see comment on op_put_by_id. + static const int patchOffsetPutByIdStructure = 7; + static const int patchOffsetPutByIdExternalLoad = 13; + static const int patchLengthPutByIdExternalLoad = 3; + static const int patchOffsetPutByIdPropertyMapOffset1 = 22; + static const int patchOffsetPutByIdPropertyMapOffset2 = 28; + // These architecture specific value are used to enable patching - see comment on op_get_by_id. + static const int patchOffsetGetByIdStructure = 7; + static const int patchOffsetGetByIdBranchToSlowCase = 13; + static const int patchOffsetGetByIdExternalLoad = 13; + static const int patchLengthGetByIdExternalLoad = 3; + static const int patchOffsetGetByIdPropertyMapOffset1 = 22; + static const int patchOffsetGetByIdPropertyMapOffset2 = 28; + static const int patchOffsetGetByIdPutResult = 28; +#if ENABLE(OPCODE_SAMPLING) + static const int patchOffsetGetByIdSlowCaseCall = 37; +#else + static const int patchOffsetGetByIdSlowCaseCall = 27; +#endif + static const int patchOffsetOpCallCompareToJump = 6; + + static const int patchOffsetMethodCheckProtoObj = 11; + static const int patchOffsetMethodCheckProtoStruct = 18; + static const int patchOffsetMethodCheckPutFunction = 29; +#elif CPU(ARM_TRADITIONAL) + // These architecture specific value are used to enable patching - see comment on op_put_by_id. + static const int patchOffsetPutByIdStructure = 4; + static const int patchOffsetPutByIdExternalLoad = 16; + static const int patchLengthPutByIdExternalLoad = 4; + static const int patchOffsetPutByIdPropertyMapOffset1 = 20; + static const int patchOffsetPutByIdPropertyMapOffset2 = 28; + // These architecture specific value are used to enable patching - see comment on op_get_by_id. + static const int patchOffsetGetByIdStructure = 4; + static const int patchOffsetGetByIdBranchToSlowCase = 16; + static const int patchOffsetGetByIdExternalLoad = 16; + static const int patchLengthGetByIdExternalLoad = 4; + static const int patchOffsetGetByIdPropertyMapOffset1 = 20; + static const int patchOffsetGetByIdPropertyMapOffset2 = 28; + static const int patchOffsetGetByIdPutResult = 36; +#if ENABLE(OPCODE_SAMPLING) + #error "OPCODE_SAMPLING is not yet supported" +#else + static const int patchOffsetGetByIdSlowCaseCall = 32; +#endif + static const int patchOffsetOpCallCompareToJump = 12; + + static const int patchOffsetMethodCheckProtoObj = 12; + static const int patchOffsetMethodCheckProtoStruct = 20; + static const int patchOffsetMethodCheckPutFunction = 32; + + // sequenceOpCall + static const int sequenceOpCallInstructionSpace = 12; + static const int sequenceOpCallConstantSpace = 2; + // sequenceMethodCheck + static const int sequenceMethodCheckInstructionSpace = 40; + static const int sequenceMethodCheckConstantSpace = 6; + // sequenceGetByIdHotPath + static const int sequenceGetByIdHotPathInstructionSpace = 36; + static const int sequenceGetByIdHotPathConstantSpace = 4; + // sequenceGetByIdSlowCase + static const int sequenceGetByIdSlowCaseInstructionSpace = 56; + static const int sequenceGetByIdSlowCaseConstantSpace = 2; + // sequencePutById + static const int sequencePutByIdInstructionSpace = 36; + static const int sequencePutByIdConstantSpace = 4; +#elif CPU(ARM_THUMB2) + // These architecture specific value are used to enable patching - see comment on op_put_by_id. + static const int patchOffsetPutByIdStructure = 10; + static const int patchOffsetPutByIdExternalLoad = 26; + static const int patchLengthPutByIdExternalLoad = 12; + static const int patchOffsetPutByIdPropertyMapOffset1 = 46; + static const int patchOffsetPutByIdPropertyMapOffset2 = 58; + // These architecture specific value are used to enable patching - see comment on op_get_by_id. + static const int patchOffsetGetByIdStructure = 10; + static const int patchOffsetGetByIdBranchToSlowCase = 26; + static const int patchOffsetGetByIdExternalLoad = 26; + static const int patchLengthGetByIdExternalLoad = 12; + static const int patchOffsetGetByIdPropertyMapOffset1 = 46; + static const int patchOffsetGetByIdPropertyMapOffset2 = 58; + static const int patchOffsetGetByIdPutResult = 62; +#if ENABLE(OPCODE_SAMPLING) + #error "OPCODE_SAMPLING is not yet supported" +#else + static const int patchOffsetGetByIdSlowCaseCall = 30; +#endif + static const int patchOffsetOpCallCompareToJump = 16; + + static const int patchOffsetMethodCheckProtoObj = 24; + static const int patchOffsetMethodCheckProtoStruct = 34; + static const int patchOffsetMethodCheckPutFunction = 58; + + // sequenceOpCall + static const int sequenceOpCallInstructionSpace = 12; + static const int sequenceOpCallConstantSpace = 2; + // sequenceMethodCheck + static const int sequenceMethodCheckInstructionSpace = 40; + static const int sequenceMethodCheckConstantSpace = 6; + // sequenceGetByIdHotPath + static const int sequenceGetByIdHotPathInstructionSpace = 36; + static const int sequenceGetByIdHotPathConstantSpace = 4; + // sequenceGetByIdSlowCase + static const int sequenceGetByIdSlowCaseInstructionSpace = 40; + static const int sequenceGetByIdSlowCaseConstantSpace = 2; + // sequencePutById + static const int sequencePutByIdInstructionSpace = 36; + static const int sequencePutByIdConstantSpace = 4; +#elif CPU(MIPS) +#if WTF_MIPS_ISA(1) + static const int patchOffsetPutByIdStructure = 16; + static const int patchOffsetPutByIdExternalLoad = 48; + static const int patchLengthPutByIdExternalLoad = 20; + static const int patchOffsetPutByIdPropertyMapOffset1 = 68; + static const int patchOffsetPutByIdPropertyMapOffset2 = 84; + static const int patchOffsetGetByIdStructure = 16; + static const int patchOffsetGetByIdBranchToSlowCase = 48; + static const int patchOffsetGetByIdExternalLoad = 48; + static const int patchLengthGetByIdExternalLoad = 20; + static const int patchOffsetGetByIdPropertyMapOffset1 = 68; + static const int patchOffsetGetByIdPropertyMapOffset2 = 88; + static const int patchOffsetGetByIdPutResult = 108; +#if ENABLE(OPCODE_SAMPLING) + #error "OPCODE_SAMPLING is not yet supported" +#else + static const int patchOffsetGetByIdSlowCaseCall = 44; +#endif + static const int patchOffsetOpCallCompareToJump = 32; + static const int patchOffsetMethodCheckProtoObj = 32; + static const int patchOffsetMethodCheckProtoStruct = 56; + static const int patchOffsetMethodCheckPutFunction = 88; +#else // WTF_MIPS_ISA(1) + static const int patchOffsetPutByIdStructure = 12; + static const int patchOffsetPutByIdExternalLoad = 44; + static const int patchLengthPutByIdExternalLoad = 16; + static const int patchOffsetPutByIdPropertyMapOffset1 = 60; + static const int patchOffsetPutByIdPropertyMapOffset2 = 76; + static const int patchOffsetGetByIdStructure = 12; + static const int patchOffsetGetByIdBranchToSlowCase = 44; + static const int patchOffsetGetByIdExternalLoad = 44; + static const int patchLengthGetByIdExternalLoad = 16; + static const int patchOffsetGetByIdPropertyMapOffset1 = 60; + static const int patchOffsetGetByIdPropertyMapOffset2 = 76; + static const int patchOffsetGetByIdPutResult = 92; +#if ENABLE(OPCODE_SAMPLING) + #error "OPCODE_SAMPLING is not yet supported" +#else + static const int patchOffsetGetByIdSlowCaseCall = 44; +#endif + static const int patchOffsetOpCallCompareToJump = 32; + static const int patchOffsetMethodCheckProtoObj = 32; + static const int patchOffsetMethodCheckProtoStruct = 52; + static const int patchOffsetMethodCheckPutFunction = 84; +#endif +#else +#error "JSVALUE32_64 not supported on this platform." +#endif + +#else // USE(JSVALUE32_64) + void emitGetVirtualRegister(int src, RegisterID dst); + void emitGetVirtualRegisters(int src1, RegisterID dst1, int src2, RegisterID dst2); + void emitPutVirtualRegister(unsigned dst, RegisterID from = regT0); + + int32_t getConstantOperandImmediateInt(unsigned src); + + void emitGetVariableObjectRegister(RegisterID variableObject, int index, RegisterID dst); + void emitPutVariableObjectRegister(RegisterID src, RegisterID variableObject, int index); + + void killLastResultRegister(); + + Jump emitJumpIfJSCell(RegisterID); + Jump emitJumpIfBothJSCells(RegisterID, RegisterID, RegisterID); + void emitJumpSlowCaseIfJSCell(RegisterID); + Jump emitJumpIfNotJSCell(RegisterID); + void emitJumpSlowCaseIfNotJSCell(RegisterID); + void emitJumpSlowCaseIfNotJSCell(RegisterID, int VReg); +#if USE(JSVALUE32_64) + JIT::Jump emitJumpIfImmediateNumber(RegisterID reg) + { + return emitJumpIfImmediateInteger(reg); + } + + JIT::Jump emitJumpIfNotImmediateNumber(RegisterID reg) + { + return emitJumpIfNotImmediateInteger(reg); + } +#endif + JIT::Jump emitJumpIfImmediateInteger(RegisterID); + JIT::Jump emitJumpIfNotImmediateInteger(RegisterID); + JIT::Jump emitJumpIfNotImmediateIntegers(RegisterID, RegisterID, RegisterID); + void emitJumpSlowCaseIfNotImmediateInteger(RegisterID); + void emitJumpSlowCaseIfNotImmediateNumber(RegisterID); + void emitJumpSlowCaseIfNotImmediateIntegers(RegisterID, RegisterID, RegisterID); + +#if USE(JSVALUE32_64) + void emitFastArithDeTagImmediate(RegisterID); + Jump emitFastArithDeTagImmediateJumpIfZero(RegisterID); +#endif + void emitFastArithReTagImmediate(RegisterID src, RegisterID dest); + void emitFastArithIntToImmNoCheck(RegisterID src, RegisterID dest); + + void emitTagAsBoolImmediate(RegisterID reg); + void compileBinaryArithOp(OpcodeID, unsigned dst, unsigned src1, unsigned src2, OperandTypes opi); +#if USE(JSVALUE64) + void compileBinaryArithOpSlowCase(OpcodeID, Vector::iterator&, unsigned dst, unsigned src1, unsigned src2, OperandTypes, bool op1HasImmediateIntFastCase, bool op2HasImmediateIntFastCase); +#else + void compileBinaryArithOpSlowCase(OpcodeID, Vector::iterator&, unsigned dst, unsigned src1, unsigned src2, OperandTypes); +#endif + +#if ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS) + void compileGetByIdHotPath(int resultVReg, int baseVReg, Identifier* ident, unsigned propertyAccessInstructionIndex); + void compileGetByIdSlowCase(int resultVReg, int baseVReg, Identifier* ident, Vector::iterator& iter, bool isMethodCheck = false); +#endif + void compileGetDirectOffset(RegisterID base, RegisterID result, Structure* structure, size_t cachedOffset); + void compileGetDirectOffset(JSObject* base, RegisterID temp, RegisterID result, size_t cachedOffset); + void compileGetDirectOffset(RegisterID base, RegisterID result, RegisterID structure, RegisterID offset, RegisterID scratch); + void compilePutDirectOffset(RegisterID base, RegisterID value, Structure* structure, size_t cachedOffset); + +#if CPU(X86_64) + // These architecture specific value are used to enable patching - see comment on op_put_by_id. + static const int patchOffsetPutByIdStructure = 10; + static const int patchOffsetPutByIdExternalLoad = 20; + static const int patchLengthPutByIdExternalLoad = 4; + static const int patchOffsetPutByIdPropertyMapOffset = 31; + // These architecture specific value are used to enable patching - see comment on op_get_by_id. + static const int patchOffsetGetByIdStructure = 10; + static const int patchOffsetGetByIdBranchToSlowCase = 20; + static const int patchOffsetGetByIdExternalLoad = 20; + static const int patchLengthGetByIdExternalLoad = 4; + static const int patchOffsetGetByIdPropertyMapOffset = 31; + static const int patchOffsetGetByIdPutResult = 31; +#if ENABLE(OPCODE_SAMPLING) + static const int patchOffsetGetByIdSlowCaseCall = 64; +#else + static const int patchOffsetGetByIdSlowCaseCall = 41; +#endif + static const int patchOffsetOpCallCompareToJump = 9; + + static const int patchOffsetMethodCheckProtoObj = 20; + static const int patchOffsetMethodCheckProtoStruct = 30; + static const int patchOffsetMethodCheckPutFunction = 50; +#elif CPU(X86) + // These architecture specific value are used to enable patching - see comment on op_put_by_id. + static const int patchOffsetPutByIdStructure = 7; + static const int patchOffsetPutByIdExternalLoad = 13; + static const int patchLengthPutByIdExternalLoad = 3; + static const int patchOffsetPutByIdPropertyMapOffset = 22; + // These architecture specific value are used to enable patching - see comment on op_get_by_id. + static const int patchOffsetGetByIdStructure = 7; + static const int patchOffsetGetByIdBranchToSlowCase = 13; + static const int patchOffsetGetByIdExternalLoad = 13; + static const int patchLengthGetByIdExternalLoad = 3; + static const int patchOffsetGetByIdPropertyMapOffset = 22; + static const int patchOffsetGetByIdPutResult = 22; +#if ENABLE(OPCODE_SAMPLING) + static const int patchOffsetGetByIdSlowCaseCall = 33; +#else + static const int patchOffsetGetByIdSlowCaseCall = 23; +#endif + static const int patchOffsetOpCallCompareToJump = 6; + + static const int patchOffsetMethodCheckProtoObj = 11; + static const int patchOffsetMethodCheckProtoStruct = 18; + static const int patchOffsetMethodCheckPutFunction = 29; +#elif CPU(ARM_THUMB2) + // These architecture specific value are used to enable patching - see comment on op_put_by_id. + static const int patchOffsetPutByIdStructure = 10; + static const int patchOffsetPutByIdExternalLoad = 26; + static const int patchLengthPutByIdExternalLoad = 12; + static const int patchOffsetPutByIdPropertyMapOffset = 46; + // These architecture specific value are used to enable patching - see comment on op_get_by_id. + static const int patchOffsetGetByIdStructure = 10; + static const int patchOffsetGetByIdBranchToSlowCase = 26; + static const int patchOffsetGetByIdExternalLoad = 26; + static const int patchLengthGetByIdExternalLoad = 12; + static const int patchOffsetGetByIdPropertyMapOffset = 46; + static const int patchOffsetGetByIdPutResult = 50; +#if ENABLE(OPCODE_SAMPLING) + static const int patchOffsetGetByIdSlowCaseCall = 0; // FIMXE +#else + static const int patchOffsetGetByIdSlowCaseCall = 28; +#endif + static const int patchOffsetOpCallCompareToJump = 16; + + static const int patchOffsetMethodCheckProtoObj = 24; + static const int patchOffsetMethodCheckProtoStruct = 34; + static const int patchOffsetMethodCheckPutFunction = 58; +#elif CPU(ARM_TRADITIONAL) + // These architecture specific value are used to enable patching - see comment on op_put_by_id. + static const int patchOffsetPutByIdStructure = 4; + static const int patchOffsetPutByIdExternalLoad = 16; + static const int patchLengthPutByIdExternalLoad = 4; + static const int patchOffsetPutByIdPropertyMapOffset = 20; + // These architecture specific value are used to enable patching - see comment on op_get_by_id. + static const int patchOffsetGetByIdStructure = 4; + static const int patchOffsetGetByIdBranchToSlowCase = 16; + static const int patchOffsetGetByIdExternalLoad = 16; + static const int patchLengthGetByIdExternalLoad = 4; + static const int patchOffsetGetByIdPropertyMapOffset = 20; + static const int patchOffsetGetByIdPutResult = 28; +#if ENABLE(OPCODE_SAMPLING) + #error "OPCODE_SAMPLING is not yet supported" +#else + static const int patchOffsetGetByIdSlowCaseCall = 28; +#endif + static const int patchOffsetOpCallCompareToJump = 12; + + static const int patchOffsetMethodCheckProtoObj = 12; + static const int patchOffsetMethodCheckProtoStruct = 20; + static const int patchOffsetMethodCheckPutFunction = 32; + + // sequenceOpCall + static const int sequenceOpCallInstructionSpace = 12; + static const int sequenceOpCallConstantSpace = 2; + // sequenceMethodCheck + static const int sequenceMethodCheckInstructionSpace = 40; + static const int sequenceMethodCheckConstantSpace = 6; + // sequenceGetByIdHotPath + static const int sequenceGetByIdHotPathInstructionSpace = 28; + static const int sequenceGetByIdHotPathConstantSpace = 3; + // sequenceGetByIdSlowCase + static const int sequenceGetByIdSlowCaseInstructionSpace = 32; + static const int sequenceGetByIdSlowCaseConstantSpace = 2; + // sequencePutById + static const int sequencePutByIdInstructionSpace = 28; + static const int sequencePutByIdConstantSpace = 3; +#elif CPU(MIPS) +#if WTF_MIPS_ISA(1) + static const int patchOffsetPutByIdStructure = 16; + static const int patchOffsetPutByIdExternalLoad = 48; + static const int patchLengthPutByIdExternalLoad = 20; + static const int patchOffsetPutByIdPropertyMapOffset = 68; + static const int patchOffsetGetByIdStructure = 16; + static const int patchOffsetGetByIdBranchToSlowCase = 48; + static const int patchOffsetGetByIdExternalLoad = 48; + static const int patchLengthGetByIdExternalLoad = 20; + static const int patchOffsetGetByIdPropertyMapOffset = 68; + static const int patchOffsetGetByIdPutResult = 88; +#if ENABLE(OPCODE_SAMPLING) + #error "OPCODE_SAMPLING is not yet supported" +#else + static const int patchOffsetGetByIdSlowCaseCall = 40; +#endif + static const int patchOffsetOpCallCompareToJump = 32; + static const int patchOffsetMethodCheckProtoObj = 32; + static const int patchOffsetMethodCheckProtoStruct = 56; + static const int patchOffsetMethodCheckPutFunction = 88; +#else // WTF_MIPS_ISA(1) + static const int patchOffsetPutByIdStructure = 12; + static const int patchOffsetPutByIdExternalLoad = 44; + static const int patchLengthPutByIdExternalLoad = 16; + static const int patchOffsetPutByIdPropertyMapOffset = 60; + static const int patchOffsetGetByIdStructure = 12; + static const int patchOffsetGetByIdBranchToSlowCase = 44; + static const int patchOffsetGetByIdExternalLoad = 44; + static const int patchLengthGetByIdExternalLoad = 16; + static const int patchOffsetGetByIdPropertyMapOffset = 60; + static const int patchOffsetGetByIdPutResult = 76; +#if ENABLE(OPCODE_SAMPLING) + #error "OPCODE_SAMPLING is not yet supported" +#else + static const int patchOffsetGetByIdSlowCaseCall = 40; +#endif + static const int patchOffsetOpCallCompareToJump = 32; + static const int patchOffsetMethodCheckProtoObj = 32; + static const int patchOffsetMethodCheckProtoStruct = 52; + static const int patchOffsetMethodCheckPutFunction = 84; +#endif +#endif +#endif // USE(JSVALUE32_64) + +#if (defined(ASSEMBLER_HAS_CONSTANT_POOL) && ASSEMBLER_HAS_CONSTANT_POOL) +#define BEGIN_UNINTERRUPTED_SEQUENCE(name) do { beginUninterruptedSequence(name ## InstructionSpace, name ## ConstantSpace); } while (false) +#define END_UNINTERRUPTED_SEQUENCE(name) do { endUninterruptedSequence(name ## InstructionSpace, name ## ConstantSpace); } while (false) + + void beginUninterruptedSequence(int, int); + void endUninterruptedSequence(int, int); + +#else +#define BEGIN_UNINTERRUPTED_SEQUENCE(name) do { beginUninterruptedSequence(); } while (false) +#define END_UNINTERRUPTED_SEQUENCE(name) do { endUninterruptedSequence(); } while (false) +#endif + + void emit_op_add(Instruction*); + void emit_op_bitand(Instruction*); + void emit_op_bitnot(Instruction*); + void emit_op_bitor(Instruction*); + void emit_op_bitxor(Instruction*); + void emit_op_call(Instruction*); + void emit_op_call_eval(Instruction*); + void emit_op_call_varargs(Instruction*); + void emit_op_call_put_result(Instruction*); + void emit_op_catch(Instruction*); + void emit_op_construct(Instruction*); + void emit_op_get_callee(Instruction*); + void emit_op_create_this(Instruction*); + void emit_op_convert_this(Instruction*); + void emit_op_convert_this_strict(Instruction*); + void emit_op_create_arguments(Instruction*); + void emit_op_debug(Instruction*); + void emit_op_del_by_id(Instruction*); + void emit_op_div(Instruction*); + void emit_op_end(Instruction*); + void emit_op_enter(Instruction*); + void emit_op_create_activation(Instruction*); + void emit_op_eq(Instruction*); + void emit_op_eq_null(Instruction*); + void emit_op_get_by_id(Instruction*); + void emit_op_get_arguments_length(Instruction*); + void emit_op_get_by_val(Instruction*); + void emit_op_get_argument_by_val(Instruction*); + void emit_op_get_by_pname(Instruction*); + void emit_op_get_global_var(Instruction*); + void emit_op_get_scoped_var(Instruction*); + void emit_op_init_lazy_reg(Instruction*); + void emit_op_check_has_instance(Instruction*); + void emit_op_instanceof(Instruction*); + void emit_op_jeq_null(Instruction*); + void emit_op_jfalse(Instruction*); + void emit_op_jmp(Instruction*); + void emit_op_jmp_scopes(Instruction*); + void emit_op_jneq_null(Instruction*); + void emit_op_jneq_ptr(Instruction*); + void emit_op_jnless(Instruction*); + void emit_op_jless(Instruction*); + void emit_op_jlesseq(Instruction*, bool invert = false); + void emit_op_jnlesseq(Instruction*); + void emit_op_jsr(Instruction*); + void emit_op_jtrue(Instruction*); + void emit_op_load_varargs(Instruction*); + void emit_op_loop(Instruction*); + void emit_op_loop_if_less(Instruction*); + void emit_op_loop_if_lesseq(Instruction*); + void emit_op_loop_if_true(Instruction*); + void emit_op_loop_if_false(Instruction*); + void emit_op_lshift(Instruction*); + void emit_op_method_check(Instruction*); + void emit_op_mod(Instruction*); + void emit_op_mov(Instruction*); + void emit_op_mul(Instruction*); + void emit_op_negate(Instruction*); + void emit_op_neq(Instruction*); + void emit_op_neq_null(Instruction*); + void emit_op_new_array(Instruction*); + void emit_op_new_func(Instruction*); + void emit_op_new_func_exp(Instruction*); + void emit_op_new_object(Instruction*); + void emit_op_new_regexp(Instruction*); + void emit_op_get_pnames(Instruction*); + void emit_op_next_pname(Instruction*); + void emit_op_not(Instruction*); + void emit_op_nstricteq(Instruction*); + void emit_op_pop_scope(Instruction*); + void emit_op_post_dec(Instruction*); + void emit_op_post_inc(Instruction*); + void emit_op_pre_dec(Instruction*); + void emit_op_pre_inc(Instruction*); + void emit_op_profile_did_call(Instruction*); + void emit_op_profile_will_call(Instruction*); + void emit_op_push_new_scope(Instruction*); + void emit_op_push_scope(Instruction*); + void emit_op_put_by_id(Instruction*); + void emit_op_put_by_index(Instruction*); + void emit_op_put_by_val(Instruction*); + void emit_op_put_getter(Instruction*); + void emit_op_put_global_var(Instruction*); + void emit_op_put_scoped_var(Instruction*); + void emit_op_put_setter(Instruction*); + void emit_op_resolve(Instruction*); + void emit_op_resolve_base(Instruction*); + void emit_op_ensure_property_exists(Instruction*); + void emit_op_resolve_global(Instruction*, bool dynamic = false); + void emit_op_resolve_global_dynamic(Instruction*); + void emit_op_resolve_skip(Instruction*); + void emit_op_resolve_with_base(Instruction*); + void emit_op_ret(Instruction*); + void emit_op_ret_object_or_this(Instruction*); + void emit_op_rshift(Instruction*); + void emit_op_sret(Instruction*); + void emit_op_strcat(Instruction*); + void emit_op_stricteq(Instruction*); + void emit_op_sub(Instruction*); + void emit_op_switch_char(Instruction*); + void emit_op_switch_imm(Instruction*); + void emit_op_switch_string(Instruction*); + void emit_op_tear_off_activation(Instruction*); + void emit_op_tear_off_arguments(Instruction*); + void emit_op_throw(Instruction*); + void emit_op_throw_reference_error(Instruction*); + void emit_op_throw_syntax_error(Instruction*); + void emit_op_to_jsnumber(Instruction*); + void emit_op_to_primitive(Instruction*); + void emit_op_unexpected_load(Instruction*); + void emit_op_urshift(Instruction*); +#if ENABLE(JIT_USE_SOFT_MODULO) + void softModulo(); +#endif + + void emitSlow_op_add(Instruction*, Vector::iterator&); + void emitSlow_op_bitand(Instruction*, Vector::iterator&); + void emitSlow_op_bitnot(Instruction*, Vector::iterator&); + void emitSlow_op_bitor(Instruction*, Vector::iterator&); + void emitSlow_op_bitxor(Instruction*, Vector::iterator&); + void emitSlow_op_call(Instruction*, Vector::iterator&); + void emitSlow_op_call_eval(Instruction*, Vector::iterator&); + void emitSlow_op_call_varargs(Instruction*, Vector::iterator&); + void emitSlow_op_construct(Instruction*, Vector::iterator&); + void emitSlow_op_convert_this(Instruction*, Vector::iterator&); + void emitSlow_op_convert_this_strict(Instruction*, Vector::iterator&); + void emitSlow_op_div(Instruction*, Vector::iterator&); + void emitSlow_op_eq(Instruction*, Vector::iterator&); + void emitSlow_op_get_by_id(Instruction*, Vector::iterator&); + void emitSlow_op_get_arguments_length(Instruction*, Vector::iterator&); + void emitSlow_op_get_by_val(Instruction*, Vector::iterator&); + void emitSlow_op_get_argument_by_val(Instruction*, Vector::iterator&); + void emitSlow_op_get_by_pname(Instruction*, Vector::iterator&); + void emitSlow_op_check_has_instance(Instruction*, Vector::iterator&); + void emitSlow_op_instanceof(Instruction*, Vector::iterator&); + void emitSlow_op_jfalse(Instruction*, Vector::iterator&); + void emitSlow_op_jnless(Instruction*, Vector::iterator&); + void emitSlow_op_jless(Instruction*, Vector::iterator&); + void emitSlow_op_jlesseq(Instruction*, Vector::iterator&, bool invert = false); + void emitSlow_op_jnlesseq(Instruction*, Vector::iterator&); + void emitSlow_op_jtrue(Instruction*, Vector::iterator&); + void emitSlow_op_load_varargs(Instruction*, Vector::iterator&); + void emitSlow_op_loop_if_less(Instruction*, Vector::iterator&); + void emitSlow_op_loop_if_lesseq(Instruction*, Vector::iterator&); + void emitSlow_op_loop_if_true(Instruction*, Vector::iterator&); + void emitSlow_op_loop_if_false(Instruction*, Vector::iterator&); + void emitSlow_op_lshift(Instruction*, Vector::iterator&); + void emitSlow_op_method_check(Instruction*, Vector::iterator&); + void emitSlow_op_mod(Instruction*, Vector::iterator&); + void emitSlow_op_mul(Instruction*, Vector::iterator&); + void emitSlow_op_negate(Instruction*, Vector::iterator&); + void emitSlow_op_neq(Instruction*, Vector::iterator&); + void emitSlow_op_not(Instruction*, Vector::iterator&); + void emitSlow_op_nstricteq(Instruction*, Vector::iterator&); + void emitSlow_op_post_dec(Instruction*, Vector::iterator&); + void emitSlow_op_post_inc(Instruction*, Vector::iterator&); + void emitSlow_op_pre_dec(Instruction*, Vector::iterator&); + void emitSlow_op_pre_inc(Instruction*, Vector::iterator&); + void emitSlow_op_put_by_id(Instruction*, Vector::iterator&); + void emitSlow_op_put_by_val(Instruction*, Vector::iterator&); + void emitSlow_op_resolve_global(Instruction*, Vector::iterator&); + void emitSlow_op_resolve_global_dynamic(Instruction*, Vector::iterator&); + void emitSlow_op_rshift(Instruction*, Vector::iterator&); + void emitSlow_op_stricteq(Instruction*, Vector::iterator&); + void emitSlow_op_sub(Instruction*, Vector::iterator&); + void emitSlow_op_to_jsnumber(Instruction*, Vector::iterator&); + void emitSlow_op_to_primitive(Instruction*, Vector::iterator&); + void emitSlow_op_urshift(Instruction*, Vector::iterator&); + + + void emitRightShift(Instruction*, bool isUnsigned); + void emitRightShiftSlowCase(Instruction*, Vector::iterator&, bool isUnsigned); + + /* This function is deprecated. */ + void emitGetJITStubArg(unsigned argumentNumber, RegisterID dst); + + void emitInitRegister(unsigned dst); + + void emitPutToCallFrameHeader(RegisterID from, RegisterFile::CallFrameHeaderEntry entry); + void emitPutImmediateToCallFrameHeader(void* value, RegisterFile::CallFrameHeaderEntry entry); + void emitGetFromCallFrameHeaderPtr(RegisterFile::CallFrameHeaderEntry entry, RegisterID to, RegisterID from = callFrameRegister); + void emitGetFromCallFrameHeader32(RegisterFile::CallFrameHeaderEntry entry, RegisterID to, RegisterID from = callFrameRegister); + + JSValue getConstantOperand(unsigned src); + bool isOperandConstantImmediateInt(unsigned src); + bool isOperandConstantImmediateChar(unsigned src); + + Jump getSlowCase(Vector::iterator& iter) + { + return iter++->from; + } + void linkSlowCase(Vector::iterator& iter) + { + iter->from.link(this); + ++iter; + } + void linkSlowCaseIfNotJSCell(Vector::iterator&, int vReg); + + Jump checkStructure(RegisterID reg, Structure* structure); + + void restoreArgumentReference(); + void restoreArgumentReferenceForTrampoline(); + + Call emitNakedCall(CodePtr function = CodePtr()); + + void preserveReturnAddressAfterCall(RegisterID); + void restoreReturnAddressBeforeReturn(RegisterID); + void restoreReturnAddressBeforeReturn(Address); + + // Loads the character value of a single character string into dst. + void emitLoadCharacterString(RegisterID src, RegisterID dst, JumpList& failures); + + void emitTimeoutCheck(); +#ifndef NDEBUG + void printBytecodeOperandTypes(unsigned src1, unsigned src2); +#endif + +#if ENABLE(SAMPLING_FLAGS) + void setSamplingFlag(int32_t); + void clearSamplingFlag(int32_t); +#endif + +#if ENABLE(SAMPLING_COUNTERS) + void emitCount(AbstractSamplingCounter&, uint32_t = 1); +#endif + +#if ENABLE(OPCODE_SAMPLING) + void sampleInstruction(Instruction*, bool = false); +#endif + +#if ENABLE(CODEBLOCK_SAMPLING) + void sampleCodeBlock(CodeBlock*); +#else + void sampleCodeBlock(CodeBlock*) {} +#endif + + Interpreter* m_interpreter; + JSGlobalData* m_globalData; + CodeBlock* m_codeBlock; + + Vector m_calls; + Vector