summaryrefslogtreecommitdiffstats
path: root/JavaScriptCore/jit
diff options
context:
space:
mode:
authorSteve Block <steveblock@google.com>2011-05-06 11:45:16 +0100
committerSteve Block <steveblock@google.com>2011-05-12 13:44:10 +0100
commitcad810f21b803229eb11403f9209855525a25d57 (patch)
tree29a6fd0279be608e0fe9ffe9841f722f0f4e4269 /JavaScriptCore/jit
parent121b0cf4517156d0ac5111caf9830c51b69bae8f (diff)
downloadexternal_webkit-cad810f21b803229eb11403f9209855525a25d57.zip
external_webkit-cad810f21b803229eb11403f9209855525a25d57.tar.gz
external_webkit-cad810f21b803229eb11403f9209855525a25d57.tar.bz2
Merge WebKit at r75315: Initial merge by git.
Change-Id: I570314b346ce101c935ed22a626b48c2af266b84
Diffstat (limited to 'JavaScriptCore/jit')
-rw-r--r--JavaScriptCore/jit/ExecutableAllocator.cpp128
-rw-r--r--JavaScriptCore/jit/ExecutableAllocator.h362
-rw-r--r--JavaScriptCore/jit/ExecutableAllocatorFixedVMPool.cpp470
-rw-r--r--JavaScriptCore/jit/JIT.cpp670
-rw-r--r--JavaScriptCore/jit/JIT.h1046
-rw-r--r--JavaScriptCore/jit/JITArithmetic.cpp1244
-rw-r--r--JavaScriptCore/jit/JITArithmetic32_64.cpp1424
-rw-r--r--JavaScriptCore/jit/JITCall.cpp261
-rw-r--r--JavaScriptCore/jit/JITCall32_64.cpp356
-rw-r--r--JavaScriptCore/jit/JITCode.h117
-rw-r--r--JavaScriptCore/jit/JITInlineMethods.h809
-rw-r--r--JavaScriptCore/jit/JITOpcodes.cpp1775
-rw-r--r--JavaScriptCore/jit/JITOpcodes32_64.cpp1836
-rw-r--r--JavaScriptCore/jit/JITPropertyAccess.cpp1101
-rw-r--r--JavaScriptCore/jit/JITPropertyAccess32_64.cpp1186
-rw-r--r--JavaScriptCore/jit/JITStubCall.h237
-rw-r--r--JavaScriptCore/jit/JITStubs.cpp3638
-rw-r--r--JavaScriptCore/jit/JITStubs.h416
-rw-r--r--JavaScriptCore/jit/JSInterfaceJIT.h292
-rw-r--r--JavaScriptCore/jit/SpecializedThunkJIT.h165
-rw-r--r--JavaScriptCore/jit/ThunkGenerators.cpp162
-rw-r--r--JavaScriptCore/jit/ThunkGenerators.h45
22 files changed, 0 insertions, 17740 deletions
diff --git a/JavaScriptCore/jit/ExecutableAllocator.cpp b/JavaScriptCore/jit/ExecutableAllocator.cpp
deleted file mode 100644
index 4530b38..0000000
--- a/JavaScriptCore/jit/ExecutableAllocator.cpp
+++ /dev/null
@@ -1,128 +0,0 @@
-/*
- * Copyright (C) 2008 Apple Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
- * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "config.h"
-
-#include "ExecutableAllocator.h"
-
-#if ENABLE(ASSEMBLER)
-
-namespace JSC {
-
-size_t ExecutableAllocator::pageSize = 0;
-
-#if ENABLE(EXECUTABLE_ALLOCATOR_DEMAND)
-
-void ExecutableAllocator::intializePageSize()
-{
-#if CPU(ARMV5_OR_LOWER)
- // The moving memory model (as used in ARMv5 and earlier platforms)
- // on Symbian OS limits the number of chunks for each process to 16.
- // To mitigate this limitation increase the pagesize to allocate
- // fewer, larger chunks. Set the page size to 256 Kb to compensate
- // for moving memory model limitation
- ExecutableAllocator::pageSize = 256 * 1024;
-#else
- ExecutableAllocator::pageSize = WTF::pageSize();
-#endif
-}
-
-ExecutablePool::Allocation ExecutablePool::systemAlloc(size_t size)
-{
- PageAllocation allocation = PageAllocation::allocate(size, OSAllocator::JSJITCodePages, EXECUTABLE_POOL_WRITABLE, true);
- if (!allocation)
- CRASH();
- return allocation;
-}
-
-void ExecutablePool::systemRelease(ExecutablePool::Allocation& allocation)
-{
- allocation.deallocate();
-}
-
-bool ExecutableAllocator::isValid() const
-{
- return true;
-}
-
-bool ExecutableAllocator::underMemoryPressure()
-{
- return false;
-}
-
-size_t ExecutableAllocator::committedByteCount()
-{
- return 0;
-}
-
-#endif
-
-#if ENABLE(ASSEMBLER_WX_EXCLUSIVE)
-
-#if OS(WINDOWS) || OS(SYMBIAN)
-#error "ASSEMBLER_WX_EXCLUSIVE not yet suported on this platform."
-#endif
-
-void ExecutableAllocator::reprotectRegion(void* start, size_t size, ProtectionSeting setting)
-{
- if (!pageSize)
- intializePageSize();
-
- // Calculate the start of the page containing this region,
- // and account for this extra memory within size.
- intptr_t startPtr = reinterpret_cast<intptr_t>(start);
- intptr_t pageStartPtr = startPtr & ~(pageSize - 1);
- void* pageStart = reinterpret_cast<void*>(pageStartPtr);
- size += (startPtr - pageStartPtr);
-
- // Round size up
- size += (pageSize - 1);
- size &= ~(pageSize - 1);
-
- mprotect(pageStart, size, (setting == Writable) ? PROTECTION_FLAGS_RW : PROTECTION_FLAGS_RX);
-}
-
-#endif
-
-#if CPU(ARM_TRADITIONAL) && OS(LINUX) && COMPILER(RVCT)
-
-__asm void ExecutableAllocator::cacheFlush(void* code, size_t size)
-{
- ARM
- push {r7}
- add r1, r1, r0
- mov r7, #0xf0000
- add r7, r7, #0x2
- mov r2, #0x0
- svc #0x0
- pop {r7}
- bx lr
-}
-
-#endif
-
-}
-
-#endif // HAVE(ASSEMBLER)
diff --git a/JavaScriptCore/jit/ExecutableAllocator.h b/JavaScriptCore/jit/ExecutableAllocator.h
deleted file mode 100644
index 4580a67..0000000
--- a/JavaScriptCore/jit/ExecutableAllocator.h
+++ /dev/null
@@ -1,362 +0,0 @@
-/*
- * Copyright (C) 2008 Apple Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
- * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef ExecutableAllocator_h
-#define ExecutableAllocator_h
-#include <stddef.h> // for ptrdiff_t
-#include <limits>
-#include <wtf/Assertions.h>
-#include <wtf/PageAllocation.h>
-#include <wtf/PassRefPtr.h>
-#include <wtf/RefCounted.h>
-#include <wtf/UnusedParam.h>
-#include <wtf/Vector.h>
-
-#if OS(IOS)
-#include <libkern/OSCacheControl.h>
-#include <sys/mman.h>
-#endif
-
-#if OS(SYMBIAN)
-#include <e32std.h>
-#endif
-
-#if CPU(MIPS) && OS(LINUX)
-#include <sys/cachectl.h>
-#endif
-
-#if OS(WINCE)
-// From pkfuncs.h (private header file from the Platform Builder)
-#define CACHE_SYNC_ALL 0x07F
-extern "C" __declspec(dllimport) void CacheRangeFlush(LPVOID pAddr, DWORD dwLength, DWORD dwFlags);
-#endif
-
-#if PLATFORM(BREWMP)
-#include <AEEIMemCache1.h>
-#include <AEEMemCache1.bid>
-#include <wtf/brew/RefPtrBrew.h>
-#endif
-
-#define JIT_ALLOCATOR_PAGE_SIZE (ExecutableAllocator::pageSize)
-#define JIT_ALLOCATOR_LARGE_ALLOC_SIZE (ExecutableAllocator::pageSize * 4)
-
-#if ENABLE(ASSEMBLER_WX_EXCLUSIVE)
-#define PROTECTION_FLAGS_RW (PROT_READ | PROT_WRITE)
-#define PROTECTION_FLAGS_RX (PROT_READ | PROT_EXEC)
-#define EXECUTABLE_POOL_WRITABLE false
-#else
-#define EXECUTABLE_POOL_WRITABLE true
-#endif
-
-namespace JSC {
-
-inline size_t roundUpAllocationSize(size_t request, size_t granularity)
-{
- if ((std::numeric_limits<size_t>::max() - granularity) <= request)
- CRASH(); // Allocation is too large
-
- // Round up to next page boundary
- size_t size = request + (granularity - 1);
- size = size & ~(granularity - 1);
- ASSERT(size >= request);
- return size;
-}
-
-}
-
-#if ENABLE(JIT) && ENABLE(ASSEMBLER)
-
-namespace JSC {
-
-class ExecutablePool : public RefCounted<ExecutablePool> {
-public:
-#if ENABLE(EXECUTABLE_ALLOCATOR_DEMAND)
- typedef PageAllocation Allocation;
-#else
- class Allocation {
- public:
- Allocation(void* base, size_t size)
- : m_base(base)
- , m_size(size)
- {
- }
- void* base() { return m_base; }
- size_t size() { return m_size; }
- bool operator!() const { return !m_base; }
-
- private:
- void* m_base;
- size_t m_size;
- };
-#endif
- typedef Vector<Allocation, 2> AllocationList;
-
- static PassRefPtr<ExecutablePool> create(size_t n)
- {
- return adoptRef(new ExecutablePool(n));
- }
-
- void* alloc(size_t n)
- {
- ASSERT(m_freePtr <= m_end);
-
- // Round 'n' up to a multiple of word size; if all allocations are of
- // word sized quantities, then all subsequent allocations will be aligned.
- n = roundUpAllocationSize(n, sizeof(void*));
-
- if (static_cast<ptrdiff_t>(n) < (m_end - m_freePtr)) {
- void* result = m_freePtr;
- m_freePtr += n;
- return result;
- }
-
- // Insufficient space to allocate in the existing pool
- // so we need allocate into a new pool
- return poolAllocate(n);
- }
-
- void tryShrink(void* allocation, size_t oldSize, size_t newSize)
- {
- if (static_cast<char*>(allocation) + oldSize != m_freePtr)
- return;
- m_freePtr = static_cast<char*>(allocation) + roundUpAllocationSize(newSize, sizeof(void*));
- }
-
- ~ExecutablePool()
- {
- AllocationList::iterator end = m_pools.end();
- for (AllocationList::iterator ptr = m_pools.begin(); ptr != end; ++ptr)
- ExecutablePool::systemRelease(*ptr);
- }
-
- size_t available() const { return (m_pools.size() > 1) ? 0 : m_end - m_freePtr; }
-
-private:
- static Allocation systemAlloc(size_t n);
- static void systemRelease(Allocation& alloc);
-
- ExecutablePool(size_t n);
-
- void* poolAllocate(size_t n);
-
- char* m_freePtr;
- char* m_end;
- AllocationList m_pools;
-};
-
-class ExecutableAllocator {
- enum ProtectionSeting { Writable, Executable };
-
-public:
- static size_t pageSize;
- ExecutableAllocator()
- {
- if (!pageSize)
- intializePageSize();
- if (isValid())
- m_smallAllocationPool = ExecutablePool::create(JIT_ALLOCATOR_LARGE_ALLOC_SIZE);
-#if !ENABLE(INTERPRETER)
- else
- CRASH();
-#endif
- }
-
- bool isValid() const;
-
- static bool underMemoryPressure();
-
- PassRefPtr<ExecutablePool> poolForSize(size_t n)
- {
- // Try to fit in the existing small allocator
- ASSERT(m_smallAllocationPool);
- if (n < m_smallAllocationPool->available())
- return m_smallAllocationPool;
-
- // If the request is large, we just provide a unshared allocator
- if (n > JIT_ALLOCATOR_LARGE_ALLOC_SIZE)
- return ExecutablePool::create(n);
-
- // Create a new allocator
- RefPtr<ExecutablePool> pool = ExecutablePool::create(JIT_ALLOCATOR_LARGE_ALLOC_SIZE);
-
- // If the new allocator will result in more free space than in
- // the current small allocator, then we will use it instead
- if ((pool->available() - n) > m_smallAllocationPool->available())
- m_smallAllocationPool = pool;
- return pool.release();
- }
-
-#if ENABLE(ASSEMBLER_WX_EXCLUSIVE)
- static void makeWritable(void* start, size_t size)
- {
- reprotectRegion(start, size, Writable);
- }
-
- static void makeExecutable(void* start, size_t size)
- {
- reprotectRegion(start, size, Executable);
- }
-#else
- static void makeWritable(void*, size_t) {}
- static void makeExecutable(void*, size_t) {}
-#endif
-
-
-#if CPU(X86) || CPU(X86_64)
- static void cacheFlush(void*, size_t)
- {
- }
-#elif CPU(MIPS)
- static void cacheFlush(void* code, size_t size)
- {
-#if COMPILER(GCC) && GCC_VERSION_AT_LEAST(4,3,0)
-#if WTF_MIPS_ISA_REV(2) && !GCC_VERSION_AT_LEAST(4,4,3)
- int lineSize;
- asm("rdhwr %0, $1" : "=r" (lineSize));
- //
- // Modify "start" and "end" to avoid GCC 4.3.0-4.4.2 bug in
- // mips_expand_synci_loop that may execute synci one more time.
- // "start" points to the fisrt byte of the cache line.
- // "end" points to the last byte of the line before the last cache line.
- // Because size is always a multiple of 4, this is safe to set
- // "end" to the last byte.
- //
- intptr_t start = reinterpret_cast<intptr_t>(code) & (-lineSize);
- intptr_t end = ((reinterpret_cast<intptr_t>(code) + size - 1) & (-lineSize)) - 1;
- __builtin___clear_cache(reinterpret_cast<char*>(start), reinterpret_cast<char*>(end));
-#else
- intptr_t end = reinterpret_cast<intptr_t>(code) + size;
- __builtin___clear_cache(reinterpret_cast<char*>(code), reinterpret_cast<char*>(end));
-#endif
-#else
- _flush_cache(reinterpret_cast<char*>(code), size, BCACHE);
-#endif
- }
-#elif CPU(ARM_THUMB2) && OS(IOS)
- static void cacheFlush(void* code, size_t size)
- {
- sys_cache_control(kCacheFunctionPrepareForExecution, code, size);
- }
-#elif CPU(ARM_THUMB2) && OS(LINUX)
- static void cacheFlush(void* code, size_t size)
- {
- asm volatile (
- "push {r7}\n"
- "mov r0, %0\n"
- "mov r1, %1\n"
- "movw r7, #0x2\n"
- "movt r7, #0xf\n"
- "movs r2, #0x0\n"
- "svc 0x0\n"
- "pop {r7}\n"
- :
- : "r" (code), "r" (reinterpret_cast<char*>(code) + size)
- : "r0", "r1", "r2");
- }
-#elif OS(SYMBIAN)
- static void cacheFlush(void* code, size_t size)
- {
- User::IMB_Range(code, static_cast<char*>(code) + size);
- }
-#elif CPU(ARM_TRADITIONAL) && OS(LINUX) && COMPILER(RVCT)
- static __asm void cacheFlush(void* code, size_t size);
-#elif CPU(ARM_TRADITIONAL) && OS(LINUX) && COMPILER(GCC)
- static void cacheFlush(void* code, size_t size)
- {
- asm volatile (
- "push {r7}\n"
- "mov r0, %0\n"
- "mov r1, %1\n"
- "mov r7, #0xf0000\n"
- "add r7, r7, #0x2\n"
- "mov r2, #0x0\n"
- "svc 0x0\n"
- "pop {r7}\n"
- :
- : "r" (code), "r" (reinterpret_cast<char*>(code) + size)
- : "r0", "r1", "r2");
- }
-#elif OS(WINCE)
- static void cacheFlush(void* code, size_t size)
- {
- CacheRangeFlush(code, size, CACHE_SYNC_ALL);
- }
-#elif PLATFORM(BREWMP)
- static void cacheFlush(void* code, size_t size)
- {
- PlatformRefPtr<IMemCache1> memCache = createRefPtrInstance<IMemCache1>(AEECLSID_MemCache1);
- IMemCache1_ClearCache(memCache.get(), reinterpret_cast<uint32>(code), size, MEMSPACE_CACHE_FLUSH, MEMSPACE_DATACACHE);
- IMemCache1_ClearCache(memCache.get(), reinterpret_cast<uint32>(code), size, MEMSPACE_CACHE_INVALIDATE, MEMSPACE_INSTCACHE);
- }
-#else
- #error "The cacheFlush support is missing on this platform."
-#endif
- static size_t committedByteCount();
-
-private:
-
-#if ENABLE(ASSEMBLER_WX_EXCLUSIVE)
- static void reprotectRegion(void*, size_t, ProtectionSeting);
-#endif
-
- RefPtr<ExecutablePool> m_smallAllocationPool;
- static void intializePageSize();
-};
-
-inline ExecutablePool::ExecutablePool(size_t n)
-{
- size_t allocSize = roundUpAllocationSize(n, JIT_ALLOCATOR_PAGE_SIZE);
- Allocation mem = systemAlloc(allocSize);
- m_pools.append(mem);
- m_freePtr = static_cast<char*>(mem.base());
- if (!m_freePtr)
- CRASH(); // Failed to allocate
- m_end = m_freePtr + allocSize;
-}
-
-inline void* ExecutablePool::poolAllocate(size_t n)
-{
- size_t allocSize = roundUpAllocationSize(n, JIT_ALLOCATOR_PAGE_SIZE);
-
- Allocation result = systemAlloc(allocSize);
- if (!result.base())
- CRASH(); // Failed to allocate
-
- ASSERT(m_end >= m_freePtr);
- if ((allocSize - n) > static_cast<size_t>(m_end - m_freePtr)) {
- // Replace allocation pool
- m_freePtr = static_cast<char*>(result.base()) + n;
- m_end = static_cast<char*>(result.base()) + allocSize;
- }
-
- m_pools.append(result);
- return result.base();
-}
-
-}
-
-#endif // ENABLE(JIT) && ENABLE(ASSEMBLER)
-
-#endif // !defined(ExecutableAllocator)
diff --git a/JavaScriptCore/jit/ExecutableAllocatorFixedVMPool.cpp b/JavaScriptCore/jit/ExecutableAllocatorFixedVMPool.cpp
deleted file mode 100644
index e280b2d..0000000
--- a/JavaScriptCore/jit/ExecutableAllocatorFixedVMPool.cpp
+++ /dev/null
@@ -1,470 +0,0 @@
-/*
- * Copyright (C) 2009 Apple Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
- * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "config.h"
-
-#include "ExecutableAllocator.h"
-
-#if ENABLE(EXECUTABLE_ALLOCATOR_FIXED)
-
-#include <errno.h>
-
-#include "TCSpinLock.h"
-#include <sys/mman.h>
-#include <unistd.h>
-#include <wtf/AVLTree.h>
-#include <wtf/PageReservation.h>
-#include <wtf/VMTags.h>
-
-#if CPU(X86_64)
- // These limits suitable on 64-bit platforms (particularly x86-64, where we require all jumps to have a 2Gb max range).
- #define VM_POOL_SIZE (2u * 1024u * 1024u * 1024u) // 2Gb
- #define COALESCE_LIMIT (16u * 1024u * 1024u) // 16Mb
-#else
- // These limits are hopefully sensible on embedded platforms.
- #define VM_POOL_SIZE (32u * 1024u * 1024u) // 32Mb
- #define COALESCE_LIMIT (4u * 1024u * 1024u) // 4Mb
-#endif
-
-using namespace WTF;
-
-namespace JSC {
-
-static size_t committedBytesCount = 0;
-static SpinLock spinlock = SPINLOCK_INITIALIZER;
-
-// FreeListEntry describes a free chunk of memory, stored in the freeList.
-struct FreeListEntry {
- FreeListEntry(void* pointer, size_t size)
- : pointer(pointer)
- , size(size)
- , nextEntry(0)
- , less(0)
- , greater(0)
- , balanceFactor(0)
- {
- }
-
- // All entries of the same size share a single entry
- // in the AVLTree, and are linked together in a linked
- // list, using nextEntry.
- void* pointer;
- size_t size;
- FreeListEntry* nextEntry;
-
- // These fields are used by AVLTree.
- FreeListEntry* less;
- FreeListEntry* greater;
- int balanceFactor;
-};
-
-// Abstractor class for use in AVLTree.
-// Nodes in the AVLTree are of type FreeListEntry, keyed on
-// (and thus sorted by) their size.
-struct AVLTreeAbstractorForFreeList {
- typedef FreeListEntry* handle;
- typedef int32_t size;
- typedef size_t key;
-
- handle get_less(handle h) { return h->less; }
- void set_less(handle h, handle lh) { h->less = lh; }
- handle get_greater(handle h) { return h->greater; }
- void set_greater(handle h, handle gh) { h->greater = gh; }
- int get_balance_factor(handle h) { return h->balanceFactor; }
- void set_balance_factor(handle h, int bf) { h->balanceFactor = bf; }
-
- static handle null() { return 0; }
-
- int compare_key_key(key va, key vb) { return va - vb; }
- int compare_key_node(key k, handle h) { return compare_key_key(k, h->size); }
- int compare_node_node(handle h1, handle h2) { return compare_key_key(h1->size, h2->size); }
-};
-
-// Used to reverse sort an array of FreeListEntry pointers.
-static int reverseSortFreeListEntriesByPointer(const void* leftPtr, const void* rightPtr)
-{
- FreeListEntry* left = *(FreeListEntry**)leftPtr;
- FreeListEntry* right = *(FreeListEntry**)rightPtr;
-
- return (intptr_t)(right->pointer) - (intptr_t)(left->pointer);
-}
-
-// Used to reverse sort an array of pointers.
-static int reverseSortCommonSizedAllocations(const void* leftPtr, const void* rightPtr)
-{
- void* left = *(void**)leftPtr;
- void* right = *(void**)rightPtr;
-
- return (intptr_t)right - (intptr_t)left;
-}
-
-class FixedVMPoolAllocator
-{
- // The free list is stored in a sorted tree.
- typedef AVLTree<AVLTreeAbstractorForFreeList, 40> SizeSortedFreeTree;
-
- void release(void* position, size_t size)
- {
- m_allocation.decommit(position, size);
- addToCommittedByteCount(-static_cast<long>(size));
- }
-
- void reuse(void* position, size_t size)
- {
- m_allocation.commit(position, size);
- addToCommittedByteCount(static_cast<long>(size));
- }
-
- // All addition to the free list should go through this method, rather than
- // calling insert directly, to avoid multiple entries being added with the
- // same key. All nodes being added should be singletons, they should not
- // already be a part of a chain.
- void addToFreeList(FreeListEntry* entry)
- {
- ASSERT(!entry->nextEntry);
-
- if (entry->size == m_commonSize) {
- m_commonSizedAllocations.append(entry->pointer);
- delete entry;
- } else if (FreeListEntry* entryInFreeList = m_freeList.search(entry->size, m_freeList.EQUAL)) {
- // m_freeList already contain an entry for this size - insert this node into the chain.
- entry->nextEntry = entryInFreeList->nextEntry;
- entryInFreeList->nextEntry = entry;
- } else
- m_freeList.insert(entry);
- }
-
- // We do not attempt to coalesce addition, which may lead to fragmentation;
- // instead we periodically perform a sweep to try to coalesce neighboring
- // entries in m_freeList. Presently this is triggered at the point 16MB
- // of memory has been released.
- void coalesceFreeSpace()
- {
- Vector<FreeListEntry*> freeListEntries;
- SizeSortedFreeTree::Iterator iter;
- iter.start_iter_least(m_freeList);
-
- // Empty m_freeList into a Vector.
- for (FreeListEntry* entry; (entry = *iter); ++iter) {
- // Each entry in m_freeList might correspond to multiple
- // free chunks of memory (of the same size). Walk the chain
- // (this is likely of course only be one entry long!) adding
- // each entry to the Vector (at reseting the next in chain
- // pointer to separate each node out).
- FreeListEntry* next;
- do {
- next = entry->nextEntry;
- entry->nextEntry = 0;
- freeListEntries.append(entry);
- } while ((entry = next));
- }
- // All entries are now in the Vector; purge the tree.
- m_freeList.purge();
-
- // Reverse-sort the freeListEntries and m_commonSizedAllocations Vectors.
- // We reverse-sort so that we can logically work forwards through memory,
- // whilst popping items off the end of the Vectors using last() and removeLast().
- qsort(freeListEntries.begin(), freeListEntries.size(), sizeof(FreeListEntry*), reverseSortFreeListEntriesByPointer);
- qsort(m_commonSizedAllocations.begin(), m_commonSizedAllocations.size(), sizeof(void*), reverseSortCommonSizedAllocations);
-
- // The entries from m_commonSizedAllocations that cannot be
- // coalesced into larger chunks will be temporarily stored here.
- Vector<void*> newCommonSizedAllocations;
-
- // Keep processing so long as entries remain in either of the vectors.
- while (freeListEntries.size() || m_commonSizedAllocations.size()) {
- // We're going to try to find a FreeListEntry node that we can coalesce onto.
- FreeListEntry* coalescionEntry = 0;
-
- // Is the lowest addressed chunk of free memory of common-size, or is it in the free list?
- if (m_commonSizedAllocations.size() && (!freeListEntries.size() || (m_commonSizedAllocations.last() < freeListEntries.last()->pointer))) {
- // Pop an item from the m_commonSizedAllocations vector - this is the lowest
- // addressed free chunk. Find out the begin and end addresses of the memory chunk.
- void* begin = m_commonSizedAllocations.last();
- void* end = (void*)((intptr_t)begin + m_commonSize);
- m_commonSizedAllocations.removeLast();
-
- // Try to find another free chunk abutting onto the end of the one we have already found.
- if (freeListEntries.size() && (freeListEntries.last()->pointer == end)) {
- // There is an existing FreeListEntry for the next chunk of memory!
- // we can reuse this. Pop it off the end of m_freeList.
- coalescionEntry = freeListEntries.last();
- freeListEntries.removeLast();
- // Update the existing node to include the common-sized chunk that we also found.
- coalescionEntry->pointer = (void*)((intptr_t)coalescionEntry->pointer - m_commonSize);
- coalescionEntry->size += m_commonSize;
- } else if (m_commonSizedAllocations.size() && (m_commonSizedAllocations.last() == end)) {
- // There is a second common-sized chunk that can be coalesced.
- // Allocate a new node.
- m_commonSizedAllocations.removeLast();
- coalescionEntry = new FreeListEntry(begin, 2 * m_commonSize);
- } else {
- // Nope - this poor little guy is all on his own. :-(
- // Add him into the newCommonSizedAllocations vector for now, we're
- // going to end up adding him back into the m_commonSizedAllocations
- // list when we're done.
- newCommonSizedAllocations.append(begin);
- continue;
- }
- } else {
- ASSERT(freeListEntries.size());
- ASSERT(!m_commonSizedAllocations.size() || (freeListEntries.last()->pointer < m_commonSizedAllocations.last()));
- // The lowest addressed item is from m_freeList; pop it from the Vector.
- coalescionEntry = freeListEntries.last();
- freeListEntries.removeLast();
- }
-
- // Right, we have a FreeListEntry, we just need check if there is anything else
- // to coalesce onto the end.
- ASSERT(coalescionEntry);
- while (true) {
- // Calculate the end address of the chunk we have found so far.
- void* end = (void*)((intptr_t)coalescionEntry->pointer - coalescionEntry->size);
-
- // Is there another chunk adjacent to the one we already have?
- if (freeListEntries.size() && (freeListEntries.last()->pointer == end)) {
- // Yes - another FreeListEntry -pop it from the list.
- FreeListEntry* coalescee = freeListEntries.last();
- freeListEntries.removeLast();
- // Add it's size onto our existing node.
- coalescionEntry->size += coalescee->size;
- delete coalescee;
- } else if (m_commonSizedAllocations.size() && (m_commonSizedAllocations.last() == end)) {
- // We can coalesce the next common-sized chunk.
- m_commonSizedAllocations.removeLast();
- coalescionEntry->size += m_commonSize;
- } else
- break; // Nope, nothing to be added - stop here.
- }
-
- // We've coalesced everything we can onto the current chunk.
- // Add it back into m_freeList.
- addToFreeList(coalescionEntry);
- }
-
- // All chunks of free memory larger than m_commonSize should be
- // back in m_freeList by now. All that remains to be done is to
- // copy the contents on the newCommonSizedAllocations back into
- // the m_commonSizedAllocations Vector.
- ASSERT(m_commonSizedAllocations.size() == 0);
- m_commonSizedAllocations.append(newCommonSizedAllocations);
- }
-
-public:
-
- FixedVMPoolAllocator(size_t commonSize, size_t totalHeapSize)
- : m_commonSize(commonSize)
- , m_countFreedSinceLastCoalesce(0)
- {
- m_allocation = PageReservation::reserve(totalHeapSize, OSAllocator::JSJITCodePages, EXECUTABLE_POOL_WRITABLE, true);
-
- if (!!m_allocation)
- m_freeList.insert(new FreeListEntry(m_allocation.base(), m_allocation.size()));
-#if !ENABLE(INTERPRETER)
- else
- CRASH();
-#endif
- }
-
- ExecutablePool::Allocation alloc(size_t size)
- {
- return ExecutablePool::Allocation(allocInternal(size), size);
- }
-
- void free(ExecutablePool::Allocation allocation)
- {
- void* pointer = allocation.base();
- size_t size = allocation.size();
-
- ASSERT(!!m_allocation);
- // Call release to report to the operating system that this
- // memory is no longer in use, and need not be paged out.
- ASSERT(isWithinVMPool(pointer, size));
- release(pointer, size);
-
- // Common-sized allocations are stored in the m_commonSizedAllocations
- // vector; all other freed chunks are added to m_freeList.
- if (size == m_commonSize)
- m_commonSizedAllocations.append(pointer);
- else
- addToFreeList(new FreeListEntry(pointer, size));
-
- // Do some housekeeping. Every time we reach a point that
- // 16MB of allocations have been freed, sweep m_freeList
- // coalescing any neighboring fragments.
- m_countFreedSinceLastCoalesce += size;
- if (m_countFreedSinceLastCoalesce >= COALESCE_LIMIT) {
- m_countFreedSinceLastCoalesce = 0;
- coalesceFreeSpace();
- }
- }
-
- bool isValid() const { return !!m_allocation; }
-
-private:
- void* allocInternal(size_t size)
- {
-#if ENABLE(INTERPRETER)
- if (!m_allocation)
- return 0;
-#else
- ASSERT(!!m_allocation);
-#endif
- void* result;
-
- // Freed allocations of the common size are not stored back into the main
- // m_freeList, but are instead stored in a separate vector. If the request
- // is for a common sized allocation, check this list.
- if ((size == m_commonSize) && m_commonSizedAllocations.size()) {
- result = m_commonSizedAllocations.last();
- m_commonSizedAllocations.removeLast();
- } else {
- // Search m_freeList for a suitable sized chunk to allocate memory from.
- FreeListEntry* entry = m_freeList.search(size, m_freeList.GREATER_EQUAL);
-
- // This would be bad news.
- if (!entry) {
- // Errk! Lets take a last-ditch desperation attempt at defragmentation...
- coalesceFreeSpace();
- // Did that free up a large enough chunk?
- entry = m_freeList.search(size, m_freeList.GREATER_EQUAL);
- // No?... *BOOM!*
- if (!entry)
- CRASH();
- }
- ASSERT(entry->size != m_commonSize);
-
- // Remove the entry from m_freeList. But! -
- // Each entry in the tree may represent a chain of multiple chunks of the
- // same size, and we only want to remove one on them. So, if this entry
- // does have a chain, just remove the first-but-one item from the chain.
- if (FreeListEntry* next = entry->nextEntry) {
- // We're going to leave 'entry' in the tree; remove 'next' from its chain.
- entry->nextEntry = next->nextEntry;
- next->nextEntry = 0;
- entry = next;
- } else
- m_freeList.remove(entry->size);
-
- // Whoo!, we have a result!
- ASSERT(entry->size >= size);
- result = entry->pointer;
-
- // If the allocation exactly fits the chunk we found in the,
- // m_freeList then the FreeListEntry node is no longer needed.
- if (entry->size == size)
- delete entry;
- else {
- // There is memory left over, and it is not of the common size.
- // We can reuse the existing FreeListEntry node to add this back
- // into m_freeList.
- entry->pointer = (void*)((intptr_t)entry->pointer + size);
- entry->size -= size;
- addToFreeList(entry);
- }
- }
-
- // Call reuse to report to the operating system that this memory is in use.
- ASSERT(isWithinVMPool(result, size));
- reuse(result, size);
- return result;
- }
-
-#ifndef NDEBUG
- bool isWithinVMPool(void* pointer, size_t size)
- {
- return pointer >= m_allocation.base() && (reinterpret_cast<char*>(pointer) + size <= reinterpret_cast<char*>(m_allocation.base()) + m_allocation.size());
- }
-#endif
-
- void addToCommittedByteCount(long byteCount)
- {
- ASSERT(spinlock.IsHeld());
- ASSERT(static_cast<long>(committedBytesCount) + byteCount > -1);
- committedBytesCount += byteCount;
- }
-
- // Freed space from the most common sized allocations will be held in this list, ...
- const size_t m_commonSize;
- Vector<void*> m_commonSizedAllocations;
-
- // ... and all other freed allocations are held in m_freeList.
- SizeSortedFreeTree m_freeList;
-
- // This is used for housekeeping, to trigger defragmentation of the freed lists.
- size_t m_countFreedSinceLastCoalesce;
-
- PageReservation m_allocation;
-};
-
-size_t ExecutableAllocator::committedByteCount()
-{
- SpinLockHolder lockHolder(&spinlock);
- return committedBytesCount;
-}
-
-void ExecutableAllocator::intializePageSize()
-{
- ExecutableAllocator::pageSize = getpagesize();
-}
-
-static FixedVMPoolAllocator* allocator = 0;
-static size_t allocatedCount = 0;
-
-bool ExecutableAllocator::isValid() const
-{
- SpinLockHolder lock_holder(&spinlock);
- if (!allocator)
- allocator = new FixedVMPoolAllocator(JIT_ALLOCATOR_LARGE_ALLOC_SIZE, VM_POOL_SIZE);
- return allocator->isValid();
-}
-
-bool ExecutableAllocator::underMemoryPressure()
-{
- // Technically we should take the spin lock here, but we don't care if we get stale data.
- // This is only really a heuristic anyway.
- return allocatedCount > (VM_POOL_SIZE / 2);
-}
-
-ExecutablePool::Allocation ExecutablePool::systemAlloc(size_t size)
-{
- SpinLockHolder lock_holder(&spinlock);
- ASSERT(allocator);
- allocatedCount += size;
- return allocator->alloc(size);
-}
-
-void ExecutablePool::systemRelease(ExecutablePool::Allocation& allocation)
-{
- SpinLockHolder lock_holder(&spinlock);
- ASSERT(allocator);
- allocatedCount -= allocation.size();
- allocator->free(allocation);
-}
-
-}
-
-
-#endif // HAVE(ASSEMBLER)
diff --git a/JavaScriptCore/jit/JIT.cpp b/JavaScriptCore/jit/JIT.cpp
deleted file mode 100644
index 01401a7..0000000
--- a/JavaScriptCore/jit/JIT.cpp
+++ /dev/null
@@ -1,670 +0,0 @@
-/*
- * Copyright (C) 2008, 2009 Apple Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
- * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "config.h"
-
-#if ENABLE(JIT)
-#include "JIT.h"
-
-// This probably does not belong here; adding here for now as a quick Windows build fix.
-#if ENABLE(ASSEMBLER) && CPU(X86) && !OS(MAC_OS_X)
-#include "MacroAssembler.h"
-JSC::MacroAssemblerX86Common::SSE2CheckState JSC::MacroAssemblerX86Common::s_sse2CheckState = NotCheckedSSE2;
-#endif
-
-#include "CodeBlock.h"
-#include "Interpreter.h"
-#include "JITInlineMethods.h"
-#include "JITStubCall.h"
-#include "JSArray.h"
-#include "JSFunction.h"
-#include "LinkBuffer.h"
-#include "RepatchBuffer.h"
-#include "ResultType.h"
-#include "SamplingTool.h"
-
-#ifndef NDEBUG
-#include <stdio.h>
-#endif
-
-using namespace std;
-
-namespace JSC {
-
-void ctiPatchNearCallByReturnAddress(CodeBlock* codeblock, ReturnAddressPtr returnAddress, MacroAssemblerCodePtr newCalleeFunction)
-{
- RepatchBuffer repatchBuffer(codeblock);
- repatchBuffer.relinkNearCallerToTrampoline(returnAddress, newCalleeFunction);
-}
-
-void ctiPatchCallByReturnAddress(CodeBlock* codeblock, ReturnAddressPtr returnAddress, MacroAssemblerCodePtr newCalleeFunction)
-{
- RepatchBuffer repatchBuffer(codeblock);
- repatchBuffer.relinkCallerToTrampoline(returnAddress, newCalleeFunction);
-}
-
-void ctiPatchCallByReturnAddress(CodeBlock* codeblock, ReturnAddressPtr returnAddress, FunctionPtr newCalleeFunction)
-{
- RepatchBuffer repatchBuffer(codeblock);
- repatchBuffer.relinkCallerToFunction(returnAddress, newCalleeFunction);
-}
-
-JIT::JIT(JSGlobalData* globalData, CodeBlock* codeBlock, void* linkerOffset)
- : m_interpreter(globalData->interpreter)
- , m_globalData(globalData)
- , m_codeBlock(codeBlock)
- , m_labels(codeBlock ? codeBlock->instructions().size() : 0)
- , m_propertyAccessCompilationInfo(codeBlock ? codeBlock->numberOfStructureStubInfos() : 0)
- , m_callStructureStubCompilationInfo(codeBlock ? codeBlock->numberOfCallLinkInfos() : 0)
- , m_bytecodeOffset((unsigned)-1)
-#if USE(JSVALUE32_64)
- , m_jumpTargetIndex(0)
- , m_mappedBytecodeOffset((unsigned)-1)
- , m_mappedVirtualRegisterIndex((unsigned)-1)
- , m_mappedTag((RegisterID)-1)
- , m_mappedPayload((RegisterID)-1)
-#else
- , m_lastResultBytecodeRegister(std::numeric_limits<int>::max())
- , m_jumpTargetsPosition(0)
-#endif
- , m_linkerOffset(linkerOffset)
-{
-}
-
-#if USE(JSVALUE32_64)
-void JIT::emitTimeoutCheck()
-{
- Jump skipTimeout = branchSub32(NonZero, Imm32(1), timeoutCheckRegister);
- JITStubCall stubCall(this, cti_timeout_check);
- stubCall.addArgument(regT1, regT0); // save last result registers.
- stubCall.call(timeoutCheckRegister);
- stubCall.getArgument(0, regT1, regT0); // reload last result registers.
- skipTimeout.link(this);
-}
-#else
-void JIT::emitTimeoutCheck()
-{
- Jump skipTimeout = branchSub32(NonZero, Imm32(1), timeoutCheckRegister);
- JITStubCall(this, cti_timeout_check).call(timeoutCheckRegister);
- skipTimeout.link(this);
-
- killLastResultRegister();
-}
-#endif
-
-#define NEXT_OPCODE(name) \
- m_bytecodeOffset += OPCODE_LENGTH(name); \
- break;
-
-#if USE(JSVALUE32_64)
-#define DEFINE_BINARY_OP(name) \
- case name: { \
- JITStubCall stubCall(this, cti_##name); \
- stubCall.addArgument(currentInstruction[2].u.operand); \
- stubCall.addArgument(currentInstruction[3].u.operand); \
- stubCall.call(currentInstruction[1].u.operand); \
- NEXT_OPCODE(name); \
- }
-
-#define DEFINE_UNARY_OP(name) \
- case name: { \
- JITStubCall stubCall(this, cti_##name); \
- stubCall.addArgument(currentInstruction[2].u.operand); \
- stubCall.call(currentInstruction[1].u.operand); \
- NEXT_OPCODE(name); \
- }
-
-#else // USE(JSVALUE32_64)
-
-#define DEFINE_BINARY_OP(name) \
- case name: { \
- JITStubCall stubCall(this, cti_##name); \
- stubCall.addArgument(currentInstruction[2].u.operand, regT2); \
- stubCall.addArgument(currentInstruction[3].u.operand, regT2); \
- stubCall.call(currentInstruction[1].u.operand); \
- NEXT_OPCODE(name); \
- }
-
-#define DEFINE_UNARY_OP(name) \
- case name: { \
- JITStubCall stubCall(this, cti_##name); \
- stubCall.addArgument(currentInstruction[2].u.operand, regT2); \
- stubCall.call(currentInstruction[1].u.operand); \
- NEXT_OPCODE(name); \
- }
-#endif // USE(JSVALUE32_64)
-
-#define DEFINE_OP(name) \
- case name: { \
- emit_##name(currentInstruction); \
- NEXT_OPCODE(name); \
- }
-
-#define DEFINE_SLOWCASE_OP(name) \
- case name: { \
- emitSlow_##name(currentInstruction, iter); \
- NEXT_OPCODE(name); \
- }
-
-void JIT::privateCompileMainPass()
-{
- Instruction* instructionsBegin = m_codeBlock->instructions().begin();
- unsigned instructionCount = m_codeBlock->instructions().size();
-
- m_propertyAccessInstructionIndex = 0;
- m_globalResolveInfoIndex = 0;
- m_callLinkInfoIndex = 0;
-
- for (m_bytecodeOffset = 0; m_bytecodeOffset < instructionCount; ) {
- Instruction* currentInstruction = instructionsBegin + m_bytecodeOffset;
- ASSERT_WITH_MESSAGE(m_interpreter->isOpcode(currentInstruction->u.opcode), "privateCompileMainPass gone bad @ %d", m_bytecodeOffset);
-
-#if ENABLE(OPCODE_SAMPLING)
- if (m_bytecodeOffset > 0) // Avoid the overhead of sampling op_enter twice.
- sampleInstruction(currentInstruction);
-#endif
-
-#if USE(JSVALUE64)
- if (m_labels[m_bytecodeOffset].isUsed())
- killLastResultRegister();
-#endif
-
- m_labels[m_bytecodeOffset] = label();
-
- switch (m_interpreter->getOpcodeID(currentInstruction->u.opcode)) {
- DEFINE_BINARY_OP(op_del_by_val)
- DEFINE_BINARY_OP(op_in)
- DEFINE_BINARY_OP(op_less)
- DEFINE_BINARY_OP(op_lesseq)
- DEFINE_UNARY_OP(op_is_boolean)
- DEFINE_UNARY_OP(op_is_function)
- DEFINE_UNARY_OP(op_is_number)
- DEFINE_UNARY_OP(op_is_object)
- DEFINE_UNARY_OP(op_is_string)
- DEFINE_UNARY_OP(op_is_undefined)
-#if USE(JSVALUE64)
- DEFINE_UNARY_OP(op_negate)
-#endif
- DEFINE_UNARY_OP(op_typeof)
-
- DEFINE_OP(op_add)
- DEFINE_OP(op_bitand)
- DEFINE_OP(op_bitnot)
- DEFINE_OP(op_bitor)
- DEFINE_OP(op_bitxor)
- DEFINE_OP(op_call)
- DEFINE_OP(op_call_eval)
- DEFINE_OP(op_call_varargs)
- DEFINE_OP(op_catch)
- DEFINE_OP(op_construct)
- DEFINE_OP(op_get_callee)
- DEFINE_OP(op_create_this)
- DEFINE_OP(op_convert_this)
- DEFINE_OP(op_convert_this_strict)
- DEFINE_OP(op_init_lazy_reg)
- DEFINE_OP(op_create_arguments)
- DEFINE_OP(op_debug)
- DEFINE_OP(op_del_by_id)
- DEFINE_OP(op_div)
- DEFINE_OP(op_end)
- DEFINE_OP(op_enter)
- DEFINE_OP(op_create_activation)
- DEFINE_OP(op_eq)
- DEFINE_OP(op_eq_null)
- DEFINE_OP(op_get_by_id)
- DEFINE_OP(op_get_arguments_length)
- DEFINE_OP(op_get_by_val)
- DEFINE_OP(op_get_argument_by_val)
- DEFINE_OP(op_get_by_pname)
- DEFINE_OP(op_get_global_var)
- DEFINE_OP(op_get_pnames)
- DEFINE_OP(op_get_scoped_var)
- DEFINE_OP(op_check_has_instance)
- DEFINE_OP(op_instanceof)
- DEFINE_OP(op_jeq_null)
- DEFINE_OP(op_jfalse)
- DEFINE_OP(op_jmp)
- DEFINE_OP(op_jmp_scopes)
- DEFINE_OP(op_jneq_null)
- DEFINE_OP(op_jneq_ptr)
- DEFINE_OP(op_jnless)
- DEFINE_OP(op_jless)
- DEFINE_OP(op_jlesseq)
- DEFINE_OP(op_jnlesseq)
- DEFINE_OP(op_jsr)
- DEFINE_OP(op_jtrue)
- DEFINE_OP(op_load_varargs)
- DEFINE_OP(op_loop)
- DEFINE_OP(op_loop_if_less)
- DEFINE_OP(op_loop_if_lesseq)
- DEFINE_OP(op_loop_if_true)
- DEFINE_OP(op_loop_if_false)
- DEFINE_OP(op_lshift)
- DEFINE_OP(op_method_check)
- DEFINE_OP(op_mod)
- DEFINE_OP(op_mov)
- DEFINE_OP(op_mul)
-#if USE(JSVALUE32_64)
- DEFINE_OP(op_negate)
-#endif
- DEFINE_OP(op_neq)
- DEFINE_OP(op_neq_null)
- DEFINE_OP(op_new_array)
- DEFINE_OP(op_new_func)
- DEFINE_OP(op_new_func_exp)
- DEFINE_OP(op_new_object)
- DEFINE_OP(op_new_regexp)
- DEFINE_OP(op_next_pname)
- DEFINE_OP(op_not)
- DEFINE_OP(op_nstricteq)
- DEFINE_OP(op_pop_scope)
- DEFINE_OP(op_post_dec)
- DEFINE_OP(op_post_inc)
- DEFINE_OP(op_pre_dec)
- DEFINE_OP(op_pre_inc)
- DEFINE_OP(op_profile_did_call)
- DEFINE_OP(op_profile_will_call)
- DEFINE_OP(op_push_new_scope)
- DEFINE_OP(op_push_scope)
- DEFINE_OP(op_put_by_id)
- DEFINE_OP(op_put_by_index)
- DEFINE_OP(op_put_by_val)
- DEFINE_OP(op_put_getter)
- DEFINE_OP(op_put_global_var)
- DEFINE_OP(op_put_scoped_var)
- DEFINE_OP(op_put_setter)
- DEFINE_OP(op_resolve)
- DEFINE_OP(op_resolve_base)
- DEFINE_OP(op_ensure_property_exists)
- DEFINE_OP(op_resolve_global)
- DEFINE_OP(op_resolve_global_dynamic)
- DEFINE_OP(op_resolve_skip)
- DEFINE_OP(op_resolve_with_base)
- DEFINE_OP(op_ret)
- DEFINE_OP(op_call_put_result)
- DEFINE_OP(op_ret_object_or_this)
- DEFINE_OP(op_rshift)
- DEFINE_OP(op_urshift)
- DEFINE_OP(op_sret)
- DEFINE_OP(op_strcat)
- DEFINE_OP(op_stricteq)
- DEFINE_OP(op_sub)
- DEFINE_OP(op_switch_char)
- DEFINE_OP(op_switch_imm)
- DEFINE_OP(op_switch_string)
- DEFINE_OP(op_tear_off_activation)
- DEFINE_OP(op_tear_off_arguments)
- DEFINE_OP(op_throw)
- DEFINE_OP(op_throw_reference_error)
- DEFINE_OP(op_throw_syntax_error)
- DEFINE_OP(op_to_jsnumber)
- DEFINE_OP(op_to_primitive)
-
- case op_get_array_length:
- case op_get_by_id_chain:
- case op_get_by_id_generic:
- case op_get_by_id_proto:
- case op_get_by_id_proto_list:
- case op_get_by_id_self:
- case op_get_by_id_self_list:
- case op_get_by_id_getter_chain:
- case op_get_by_id_getter_proto:
- case op_get_by_id_getter_proto_list:
- case op_get_by_id_getter_self:
- case op_get_by_id_getter_self_list:
- case op_get_by_id_custom_chain:
- case op_get_by_id_custom_proto:
- case op_get_by_id_custom_proto_list:
- case op_get_by_id_custom_self:
- case op_get_by_id_custom_self_list:
- case op_get_string_length:
- case op_put_by_id_generic:
- case op_put_by_id_replace:
- case op_put_by_id_transition:
- ASSERT_NOT_REACHED();
- }
- }
-
- ASSERT(m_propertyAccessInstructionIndex == m_codeBlock->numberOfStructureStubInfos());
- ASSERT(m_callLinkInfoIndex == m_codeBlock->numberOfCallLinkInfos());
-
-#ifndef NDEBUG
- // Reset this, in order to guard its use with ASSERTs.
- m_bytecodeOffset = (unsigned)-1;
-#endif
-}
-
-
-void JIT::privateCompileLinkPass()
-{
- unsigned jmpTableCount = m_jmpTable.size();
- for (unsigned i = 0; i < jmpTableCount; ++i)
- m_jmpTable[i].from.linkTo(m_labels[m_jmpTable[i].toBytecodeOffset], this);
- m_jmpTable.clear();
-}
-
-void JIT::privateCompileSlowCases()
-{
- Instruction* instructionsBegin = m_codeBlock->instructions().begin();
-
- m_propertyAccessInstructionIndex = 0;
- m_globalResolveInfoIndex = 0;
- m_callLinkInfoIndex = 0;
-
- for (Vector<SlowCaseEntry>::iterator iter = m_slowCases.begin(); iter != m_slowCases.end();) {
-#if USE(JSVALUE64)
- killLastResultRegister();
-#endif
-
- m_bytecodeOffset = iter->to;
-#ifndef NDEBUG
- unsigned firstTo = m_bytecodeOffset;
-#endif
- Instruction* currentInstruction = instructionsBegin + m_bytecodeOffset;
-
- switch (m_interpreter->getOpcodeID(currentInstruction->u.opcode)) {
- DEFINE_SLOWCASE_OP(op_add)
- DEFINE_SLOWCASE_OP(op_bitand)
- DEFINE_SLOWCASE_OP(op_bitnot)
- DEFINE_SLOWCASE_OP(op_bitor)
- DEFINE_SLOWCASE_OP(op_bitxor)
- DEFINE_SLOWCASE_OP(op_call)
- DEFINE_SLOWCASE_OP(op_call_eval)
- DEFINE_SLOWCASE_OP(op_call_varargs)
- DEFINE_SLOWCASE_OP(op_construct)
- DEFINE_SLOWCASE_OP(op_convert_this)
- DEFINE_SLOWCASE_OP(op_convert_this_strict)
- DEFINE_SLOWCASE_OP(op_div)
- DEFINE_SLOWCASE_OP(op_eq)
- DEFINE_SLOWCASE_OP(op_get_by_id)
- DEFINE_SLOWCASE_OP(op_get_arguments_length)
- DEFINE_SLOWCASE_OP(op_get_by_val)
- DEFINE_SLOWCASE_OP(op_get_argument_by_val)
- DEFINE_SLOWCASE_OP(op_get_by_pname)
- DEFINE_SLOWCASE_OP(op_check_has_instance)
- DEFINE_SLOWCASE_OP(op_instanceof)
- DEFINE_SLOWCASE_OP(op_jfalse)
- DEFINE_SLOWCASE_OP(op_jnless)
- DEFINE_SLOWCASE_OP(op_jless)
- DEFINE_SLOWCASE_OP(op_jlesseq)
- DEFINE_SLOWCASE_OP(op_jnlesseq)
- DEFINE_SLOWCASE_OP(op_jtrue)
- DEFINE_SLOWCASE_OP(op_load_varargs)
- DEFINE_SLOWCASE_OP(op_loop_if_less)
- DEFINE_SLOWCASE_OP(op_loop_if_lesseq)
- DEFINE_SLOWCASE_OP(op_loop_if_true)
- DEFINE_SLOWCASE_OP(op_loop_if_false)
- DEFINE_SLOWCASE_OP(op_lshift)
- DEFINE_SLOWCASE_OP(op_method_check)
- DEFINE_SLOWCASE_OP(op_mod)
- DEFINE_SLOWCASE_OP(op_mul)
-#if USE(JSVALUE32_64)
- DEFINE_SLOWCASE_OP(op_negate)
-#endif
- DEFINE_SLOWCASE_OP(op_neq)
- DEFINE_SLOWCASE_OP(op_not)
- DEFINE_SLOWCASE_OP(op_nstricteq)
- DEFINE_SLOWCASE_OP(op_post_dec)
- DEFINE_SLOWCASE_OP(op_post_inc)
- DEFINE_SLOWCASE_OP(op_pre_dec)
- DEFINE_SLOWCASE_OP(op_pre_inc)
- DEFINE_SLOWCASE_OP(op_put_by_id)
- DEFINE_SLOWCASE_OP(op_put_by_val)
- DEFINE_SLOWCASE_OP(op_resolve_global)
- DEFINE_SLOWCASE_OP(op_resolve_global_dynamic)
- DEFINE_SLOWCASE_OP(op_rshift)
- DEFINE_SLOWCASE_OP(op_urshift)
- DEFINE_SLOWCASE_OP(op_stricteq)
- DEFINE_SLOWCASE_OP(op_sub)
- DEFINE_SLOWCASE_OP(op_to_jsnumber)
- DEFINE_SLOWCASE_OP(op_to_primitive)
- default:
- ASSERT_NOT_REACHED();
- }
-
- ASSERT_WITH_MESSAGE(iter == m_slowCases.end() || firstTo != iter->to,"Not enough jumps linked in slow case codegen.");
- ASSERT_WITH_MESSAGE(firstTo == (iter - 1)->to, "Too many jumps linked in slow case codegen.");
-
- emitJumpSlowToHot(jump(), 0);
- }
-
-#if ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
- ASSERT(m_propertyAccessInstructionIndex == m_codeBlock->numberOfStructureStubInfos());
-#endif
- ASSERT(m_callLinkInfoIndex == m_codeBlock->numberOfCallLinkInfos());
-
-#ifndef NDEBUG
- // Reset this, in order to guard its use with ASSERTs.
- m_bytecodeOffset = (unsigned)-1;
-#endif
-}
-
-JITCode JIT::privateCompile(CodePtr* functionEntryArityCheck)
-{
- // Could use a pop_m, but would need to offset the following instruction if so.
- preserveReturnAddressAfterCall(regT2);
- emitPutToCallFrameHeader(regT2, RegisterFile::ReturnPC);
-
- Label beginLabel(this);
-
- sampleCodeBlock(m_codeBlock);
-#if ENABLE(OPCODE_SAMPLING)
- sampleInstruction(m_codeBlock->instructions().begin());
-#endif
-
- Jump registerFileCheck;
- if (m_codeBlock->codeType() == FunctionCode) {
- // In the case of a fast linked call, we do not set this up in the caller.
- emitPutImmediateToCallFrameHeader(m_codeBlock, RegisterFile::CodeBlock);
-
- addPtr(Imm32(m_codeBlock->m_numCalleeRegisters * sizeof(Register)), callFrameRegister, regT1);
- registerFileCheck = branchPtr(Below, AbsoluteAddress(&m_globalData->interpreter->registerFile().m_end), regT1);
- }
-
- Label functionBody = label();
-
- privateCompileMainPass();
- privateCompileLinkPass();
- privateCompileSlowCases();
-
- Label arityCheck;
- Call callArityCheck;
- if (m_codeBlock->codeType() == FunctionCode) {
- registerFileCheck.link(this);
- m_bytecodeOffset = 0;
- JITStubCall(this, cti_register_file_check).call();
-#ifndef NDEBUG
- m_bytecodeOffset = (unsigned)-1; // Reset this, in order to guard its use with ASSERTs.
-#endif
- jump(functionBody);
-
- arityCheck = label();
- preserveReturnAddressAfterCall(regT2);
- emitPutToCallFrameHeader(regT2, RegisterFile::ReturnPC);
- branch32(Equal, regT1, Imm32(m_codeBlock->m_numParameters)).linkTo(beginLabel, this);
- restoreArgumentReference();
- callArityCheck = call();
- move(regT0, callFrameRegister);
- jump(beginLabel);
- }
-
- ASSERT(m_jmpTable.isEmpty());
-
- LinkBuffer patchBuffer(this, m_globalData->executableAllocator.poolForSize(m_assembler.size()), m_linkerOffset);
-
- // Translate vPC offsets into addresses in JIT generated code, for switch tables.
- for (unsigned i = 0; i < m_switches.size(); ++i) {
- SwitchRecord record = m_switches[i];
- unsigned bytecodeOffset = record.bytecodeOffset;
-
- if (record.type != SwitchRecord::String) {
- ASSERT(record.type == SwitchRecord::Immediate || record.type == SwitchRecord::Character);
- ASSERT(record.jumpTable.simpleJumpTable->branchOffsets.size() == record.jumpTable.simpleJumpTable->ctiOffsets.size());
-
- record.jumpTable.simpleJumpTable->ctiDefault = patchBuffer.locationOf(m_labels[bytecodeOffset + record.defaultOffset]);
-
- for (unsigned j = 0; j < record.jumpTable.simpleJumpTable->branchOffsets.size(); ++j) {
- unsigned offset = record.jumpTable.simpleJumpTable->branchOffsets[j];
- record.jumpTable.simpleJumpTable->ctiOffsets[j] = offset ? patchBuffer.locationOf(m_labels[bytecodeOffset + offset]) : record.jumpTable.simpleJumpTable->ctiDefault;
- }
- } else {
- ASSERT(record.type == SwitchRecord::String);
-
- record.jumpTable.stringJumpTable->ctiDefault = patchBuffer.locationOf(m_labels[bytecodeOffset + record.defaultOffset]);
-
- StringJumpTable::StringOffsetTable::iterator end = record.jumpTable.stringJumpTable->offsetTable.end();
- for (StringJumpTable::StringOffsetTable::iterator it = record.jumpTable.stringJumpTable->offsetTable.begin(); it != end; ++it) {
- unsigned offset = it->second.branchOffset;
- it->second.ctiOffset = offset ? patchBuffer.locationOf(m_labels[bytecodeOffset + offset]) : record.jumpTable.stringJumpTable->ctiDefault;
- }
- }
- }
-
- for (size_t i = 0; i < m_codeBlock->numberOfExceptionHandlers(); ++i) {
- HandlerInfo& handler = m_codeBlock->exceptionHandler(i);
- handler.nativeCode = patchBuffer.locationOf(m_labels[handler.target]);
- }
-
- for (Vector<CallRecord>::iterator iter = m_calls.begin(); iter != m_calls.end(); ++iter) {
- if (iter->to)
- patchBuffer.link(iter->from, FunctionPtr(iter->to));
- }
-
- if (m_codeBlock->needsCallReturnIndices()) {
- m_codeBlock->callReturnIndexVector().reserveCapacity(m_calls.size());
- for (Vector<CallRecord>::iterator iter = m_calls.begin(); iter != m_calls.end(); ++iter)
- m_codeBlock->callReturnIndexVector().append(CallReturnOffsetToBytecodeOffset(patchBuffer.returnAddressOffset(iter->from), iter->bytecodeOffset));
- }
-
- // Link absolute addresses for jsr
- for (Vector<JSRInfo>::iterator iter = m_jsrSites.begin(); iter != m_jsrSites.end(); ++iter)
- patchBuffer.patch(iter->storeLocation, patchBuffer.locationOf(iter->target).executableAddress());
-
-#if ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
- for (unsigned i = 0; i < m_codeBlock->numberOfStructureStubInfos(); ++i) {
- StructureStubInfo& info = m_codeBlock->structureStubInfo(i);
- info.callReturnLocation = patchBuffer.locationOf(m_propertyAccessCompilationInfo[i].callReturnLocation);
- info.hotPathBegin = patchBuffer.locationOf(m_propertyAccessCompilationInfo[i].hotPathBegin);
- }
-#endif
-#if ENABLE(JIT_OPTIMIZE_CALL)
- for (unsigned i = 0; i < m_codeBlock->numberOfCallLinkInfos(); ++i) {
- CallLinkInfo& info = m_codeBlock->callLinkInfo(i);
- info.ownerCodeBlock = m_codeBlock;
- info.callReturnLocation = patchBuffer.locationOfNearCall(m_callStructureStubCompilationInfo[i].callReturnLocation);
- info.hotPathBegin = patchBuffer.locationOf(m_callStructureStubCompilationInfo[i].hotPathBegin);
- info.hotPathOther = patchBuffer.locationOfNearCall(m_callStructureStubCompilationInfo[i].hotPathOther);
- }
-#endif
- unsigned methodCallCount = m_methodCallCompilationInfo.size();
- m_codeBlock->addMethodCallLinkInfos(methodCallCount);
- for (unsigned i = 0; i < methodCallCount; ++i) {
- MethodCallLinkInfo& info = m_codeBlock->methodCallLinkInfo(i);
- info.structureLabel = patchBuffer.locationOf(m_methodCallCompilationInfo[i].structureToCompare);
- info.callReturnLocation = m_codeBlock->structureStubInfo(m_methodCallCompilationInfo[i].propertyAccessIndex).callReturnLocation;
- }
-
- if (m_codeBlock->codeType() == FunctionCode && functionEntryArityCheck) {
- patchBuffer.link(callArityCheck, FunctionPtr(m_codeBlock->m_isConstructor ? cti_op_construct_arityCheck : cti_op_call_arityCheck));
- *functionEntryArityCheck = patchBuffer.locationOf(arityCheck);
- }
-
- return patchBuffer.finalizeCode();
-}
-
-#if USE(JSVALUE64)
-void JIT::emitGetVariableObjectRegister(RegisterID variableObject, int index, RegisterID dst)
-{
- loadPtr(Address(variableObject, OBJECT_OFFSETOF(JSVariableObject, d)), dst);
- loadPtr(Address(dst, OBJECT_OFFSETOF(JSVariableObject::JSVariableObjectData, registers)), dst);
- loadPtr(Address(dst, index * sizeof(Register)), dst);
-}
-
-void JIT::emitPutVariableObjectRegister(RegisterID src, RegisterID variableObject, int index)
-{
- loadPtr(Address(variableObject, OBJECT_OFFSETOF(JSVariableObject, d)), variableObject);
- loadPtr(Address(variableObject, OBJECT_OFFSETOF(JSVariableObject::JSVariableObjectData, registers)), variableObject);
- storePtr(src, Address(variableObject, index * sizeof(Register)));
-}
-#endif
-
-#if ENABLE(JIT_OPTIMIZE_CALL)
-void JIT::unlinkCallOrConstruct(CallLinkInfo* callLinkInfo)
-{
- // When the JSFunction is deleted the pointer embedded in the instruction stream will no longer be valid
- // (and, if a new JSFunction happened to be constructed at the same location, we could get a false positive
- // match). Reset the check so it no longer matches.
- RepatchBuffer repatchBuffer(callLinkInfo->ownerCodeBlock);
-#if USE(JSVALUE32_64)
- repatchBuffer.repatch(callLinkInfo->hotPathBegin, 0);
-#else
- repatchBuffer.repatch(callLinkInfo->hotPathBegin, JSValue::encode(JSValue()));
-#endif
-}
-
-void JIT::linkCall(JSFunction* callee, CodeBlock* callerCodeBlock, CodeBlock* calleeCodeBlock, JIT::CodePtr code, CallLinkInfo* callLinkInfo, int callerArgCount, JSGlobalData* globalData)
-{
- RepatchBuffer repatchBuffer(callerCodeBlock);
-
- // Currently we only link calls with the exact number of arguments.
- // If this is a native call calleeCodeBlock is null so the number of parameters is unimportant
- if (!calleeCodeBlock || (callerArgCount == calleeCodeBlock->m_numParameters)) {
- ASSERT(!callLinkInfo->isLinked());
-
- if (calleeCodeBlock)
- calleeCodeBlock->addCaller(callLinkInfo);
-
- repatchBuffer.repatch(callLinkInfo->hotPathBegin, callee);
- repatchBuffer.relink(callLinkInfo->hotPathOther, code);
- }
-
- // patch the call so we do not continue to try to link.
- repatchBuffer.relink(callLinkInfo->callReturnLocation, globalData->jitStubs->ctiVirtualCall());
-}
-
-void JIT::linkConstruct(JSFunction* callee, CodeBlock* callerCodeBlock, CodeBlock* calleeCodeBlock, JIT::CodePtr code, CallLinkInfo* callLinkInfo, int callerArgCount, JSGlobalData* globalData)
-{
- RepatchBuffer repatchBuffer(callerCodeBlock);
-
- // Currently we only link calls with the exact number of arguments.
- // If this is a native call calleeCodeBlock is null so the number of parameters is unimportant
- if (!calleeCodeBlock || (callerArgCount == calleeCodeBlock->m_numParameters)) {
- ASSERT(!callLinkInfo->isLinked());
-
- if (calleeCodeBlock)
- calleeCodeBlock->addCaller(callLinkInfo);
-
- repatchBuffer.repatch(callLinkInfo->hotPathBegin, callee);
- repatchBuffer.relink(callLinkInfo->hotPathOther, code);
- }
-
- // patch the call so we do not continue to try to link.
- repatchBuffer.relink(callLinkInfo->callReturnLocation, globalData->jitStubs->ctiVirtualConstruct());
-}
-#endif // ENABLE(JIT_OPTIMIZE_CALL)
-
-} // namespace JSC
-
-#endif // ENABLE(JIT)
diff --git a/JavaScriptCore/jit/JIT.h b/JavaScriptCore/jit/JIT.h
deleted file mode 100644
index 907a774..0000000
--- a/JavaScriptCore/jit/JIT.h
+++ /dev/null
@@ -1,1046 +0,0 @@
-/*
- * Copyright (C) 2008 Apple Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
- * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef JIT_h
-#define JIT_h
-
-#if ENABLE(JIT)
-
-// We've run into some problems where changing the size of the class JIT leads to
-// performance fluctuations. Try forcing alignment in an attempt to stabalize this.
-#if COMPILER(GCC)
-#define JIT_CLASS_ALIGNMENT __attribute__ ((aligned (32)))
-#else
-#define JIT_CLASS_ALIGNMENT
-#endif
-
-#define ASSERT_JIT_OFFSET(actual, expected) ASSERT_WITH_MESSAGE(actual == expected, "JIT Offset \"%s\" should be %d, not %d.\n", #expected, static_cast<int>(actual), static_cast<int>(expected));
-
-#include "CodeBlock.h"
-#include "Interpreter.h"
-#include "JSInterfaceJIT.h"
-#include "Opcode.h"
-#include "Profiler.h"
-#include <bytecode/SamplingTool.h>
-
-namespace JSC {
-
- class CodeBlock;
- class JIT;
- class JSPropertyNameIterator;
- class Interpreter;
- class Register;
- class RegisterFile;
- class ScopeChainNode;
- class StructureChain;
-
- struct CallLinkInfo;
- struct Instruction;
- struct OperandTypes;
- struct PolymorphicAccessStructureList;
- struct SimpleJumpTable;
- struct StringJumpTable;
- struct StructureStubInfo;
-
- struct CallRecord {
- MacroAssembler::Call from;
- unsigned bytecodeOffset;
- void* to;
-
- CallRecord()
- {
- }
-
- CallRecord(MacroAssembler::Call from, unsigned bytecodeOffset, void* to = 0)
- : from(from)
- , bytecodeOffset(bytecodeOffset)
- , to(to)
- {
- }
- };
-
- struct JumpTable {
- MacroAssembler::Jump from;
- unsigned toBytecodeOffset;
-
- JumpTable(MacroAssembler::Jump f, unsigned t)
- : from(f)
- , toBytecodeOffset(t)
- {
- }
- };
-
- struct SlowCaseEntry {
- MacroAssembler::Jump from;
- unsigned to;
- unsigned hint;
-
- SlowCaseEntry(MacroAssembler::Jump f, unsigned t, unsigned h = 0)
- : from(f)
- , to(t)
- , hint(h)
- {
- }
- };
-
- struct SwitchRecord {
- enum Type {
- Immediate,
- Character,
- String
- };
-
- Type type;
-
- union {
- SimpleJumpTable* simpleJumpTable;
- StringJumpTable* stringJumpTable;
- } jumpTable;
-
- unsigned bytecodeOffset;
- unsigned defaultOffset;
-
- SwitchRecord(SimpleJumpTable* jumpTable, unsigned bytecodeOffset, unsigned defaultOffset, Type type)
- : type(type)
- , bytecodeOffset(bytecodeOffset)
- , defaultOffset(defaultOffset)
- {
- this->jumpTable.simpleJumpTable = jumpTable;
- }
-
- SwitchRecord(StringJumpTable* jumpTable, unsigned bytecodeOffset, unsigned defaultOffset)
- : type(String)
- , bytecodeOffset(bytecodeOffset)
- , defaultOffset(defaultOffset)
- {
- this->jumpTable.stringJumpTable = jumpTable;
- }
- };
-
- struct PropertyStubCompilationInfo {
- MacroAssembler::Call callReturnLocation;
- MacroAssembler::Label hotPathBegin;
- };
-
- struct StructureStubCompilationInfo {
- MacroAssembler::DataLabelPtr hotPathBegin;
- MacroAssembler::Call hotPathOther;
- MacroAssembler::Call callReturnLocation;
- };
-
- struct MethodCallCompilationInfo {
- MethodCallCompilationInfo(unsigned propertyAccessIndex)
- : propertyAccessIndex(propertyAccessIndex)
- {
- }
-
- MacroAssembler::DataLabelPtr structureToCompare;
- unsigned propertyAccessIndex;
- };
-
- // Near calls can only be patched to other JIT code, regular calls can be patched to JIT code or relinked to stub functions.
- void ctiPatchNearCallByReturnAddress(CodeBlock* codeblock, ReturnAddressPtr returnAddress, MacroAssemblerCodePtr newCalleeFunction);
- void ctiPatchCallByReturnAddress(CodeBlock* codeblock, ReturnAddressPtr returnAddress, MacroAssemblerCodePtr newCalleeFunction);
- void ctiPatchCallByReturnAddress(CodeBlock* codeblock, ReturnAddressPtr returnAddress, FunctionPtr newCalleeFunction);
-
- class JIT : private JSInterfaceJIT {
- friend class JITStubCall;
-
- using MacroAssembler::Jump;
- using MacroAssembler::JumpList;
- using MacroAssembler::Label;
-
- static const int patchGetByIdDefaultStructure = -1;
- // Magic number - initial offset cannot be representable as a signed 8bit value, or the X86Assembler
- // will compress the displacement, and we may not be able to fit a patched offset.
- static const int patchGetByIdDefaultOffset = 256;
-
- public:
- static JITCode compile(JSGlobalData* globalData, CodeBlock* codeBlock, CodePtr* functionEntryArityCheck = 0, void* offsetBase = 0)
- {
- return JIT(globalData, codeBlock, offsetBase).privateCompile(functionEntryArityCheck);
- }
-
- static void compileGetByIdProto(JSGlobalData* globalData, CallFrame* callFrame, CodeBlock* codeBlock, StructureStubInfo* stubInfo, Structure* structure, Structure* prototypeStructure, const Identifier& ident, const PropertySlot& slot, size_t cachedOffset, ReturnAddressPtr returnAddress)
- {
- JIT jit(globalData, codeBlock);
- jit.privateCompileGetByIdProto(stubInfo, structure, prototypeStructure, ident, slot, cachedOffset, returnAddress, callFrame);
- }
-
- static void compileGetByIdSelfList(JSGlobalData* globalData, CodeBlock* codeBlock, StructureStubInfo* stubInfo, PolymorphicAccessStructureList* polymorphicStructures, int currentIndex, Structure* structure, const Identifier& ident, const PropertySlot& slot, size_t cachedOffset)
- {
- JIT jit(globalData, codeBlock);
- jit.privateCompileGetByIdSelfList(stubInfo, polymorphicStructures, currentIndex, structure, ident, slot, cachedOffset);
- }
- static void compileGetByIdProtoList(JSGlobalData* globalData, CallFrame* callFrame, CodeBlock* codeBlock, StructureStubInfo* stubInfo, PolymorphicAccessStructureList* prototypeStructureList, int currentIndex, Structure* structure, Structure* prototypeStructure, const Identifier& ident, const PropertySlot& slot, size_t cachedOffset)
- {
- JIT jit(globalData, codeBlock);
- jit.privateCompileGetByIdProtoList(stubInfo, prototypeStructureList, currentIndex, structure, prototypeStructure, ident, slot, cachedOffset, callFrame);
- }
- static void compileGetByIdChainList(JSGlobalData* globalData, CallFrame* callFrame, CodeBlock* codeBlock, StructureStubInfo* stubInfo, PolymorphicAccessStructureList* prototypeStructureList, int currentIndex, Structure* structure, StructureChain* chain, size_t count, const Identifier& ident, const PropertySlot& slot, size_t cachedOffset)
- {
- JIT jit(globalData, codeBlock);
- jit.privateCompileGetByIdChainList(stubInfo, prototypeStructureList, currentIndex, structure, chain, count, ident, slot, cachedOffset, callFrame);
- }
-
- static void compileGetByIdChain(JSGlobalData* globalData, CallFrame* callFrame, CodeBlock* codeBlock, StructureStubInfo* stubInfo, Structure* structure, StructureChain* chain, size_t count, const Identifier& ident, const PropertySlot& slot, size_t cachedOffset, ReturnAddressPtr returnAddress)
- {
- JIT jit(globalData, codeBlock);
- jit.privateCompileGetByIdChain(stubInfo, structure, chain, count, ident, slot, cachedOffset, returnAddress, callFrame);
- }
-
- static void compilePutByIdTransition(JSGlobalData* globalData, CodeBlock* codeBlock, StructureStubInfo* stubInfo, Structure* oldStructure, Structure* newStructure, size_t cachedOffset, StructureChain* chain, ReturnAddressPtr returnAddress, bool direct)
- {
- JIT jit(globalData, codeBlock);
- jit.privateCompilePutByIdTransition(stubInfo, oldStructure, newStructure, cachedOffset, chain, returnAddress, direct);
- }
-
- static void compileCTIMachineTrampolines(JSGlobalData* globalData, RefPtr<ExecutablePool>* executablePool, TrampolineStructure *trampolines)
- {
- if (!globalData->canUseJIT())
- return;
- JIT jit(globalData, 0, 0);
- jit.privateCompileCTIMachineTrampolines(executablePool, globalData, trampolines);
- }
-
- static CodePtr compileCTINativeCall(JSGlobalData* globalData, PassRefPtr<ExecutablePool> executablePool, NativeFunction func)
- {
- if (!globalData->canUseJIT())
- return CodePtr();
- JIT jit(globalData, 0, 0);
- return jit.privateCompileCTINativeCall(executablePool, globalData, func);
- }
-
- static void patchGetByIdSelf(CodeBlock* codeblock, StructureStubInfo*, Structure*, size_t cachedOffset, ReturnAddressPtr returnAddress);
- static void patchPutByIdReplace(CodeBlock* codeblock, StructureStubInfo*, Structure*, size_t cachedOffset, ReturnAddressPtr returnAddress, bool direct);
- static void patchMethodCallProto(CodeBlock* codeblock, MethodCallLinkInfo&, JSFunction*, Structure*, JSObject*, ReturnAddressPtr);
-
- static void compilePatchGetArrayLength(JSGlobalData* globalData, CodeBlock* codeBlock, ReturnAddressPtr returnAddress)
- {
- JIT jit(globalData, codeBlock);
- return jit.privateCompilePatchGetArrayLength(returnAddress);
- }
-
- static void linkCall(JSFunction* callee, CodeBlock* callerCodeBlock, CodeBlock* calleeCodeBlock, CodePtr, CallLinkInfo*, int callerArgCount, JSGlobalData*);
- static void linkConstruct(JSFunction* callee, CodeBlock* callerCodeBlock, CodeBlock* calleeCodeBlock, CodePtr, CallLinkInfo*, int callerArgCount, JSGlobalData*);
- static void unlinkCallOrConstruct(CallLinkInfo*);
-
- private:
- struct JSRInfo {
- DataLabelPtr storeLocation;
- Label target;
-
- JSRInfo(DataLabelPtr storeLocation, Label targetLocation)
- : storeLocation(storeLocation)
- , target(targetLocation)
- {
- }
- };
-
- JIT(JSGlobalData*, CodeBlock* = 0, void* = 0);
-
- void privateCompileMainPass();
- void privateCompileLinkPass();
- void privateCompileSlowCases();
- JITCode privateCompile(CodePtr* functionEntryArityCheck);
- void privateCompileGetByIdProto(StructureStubInfo*, Structure*, Structure* prototypeStructure, const Identifier&, const PropertySlot&, size_t cachedOffset, ReturnAddressPtr returnAddress, CallFrame* callFrame);
- void privateCompileGetByIdSelfList(StructureStubInfo*, PolymorphicAccessStructureList*, int, Structure*, const Identifier&, const PropertySlot&, size_t cachedOffset);
- void privateCompileGetByIdProtoList(StructureStubInfo*, PolymorphicAccessStructureList*, int, Structure*, Structure* prototypeStructure, const Identifier&, const PropertySlot&, size_t cachedOffset, CallFrame* callFrame);
- void privateCompileGetByIdChainList(StructureStubInfo*, PolymorphicAccessStructureList*, int, Structure*, StructureChain* chain, size_t count, const Identifier&, const PropertySlot&, size_t cachedOffset, CallFrame* callFrame);
- void privateCompileGetByIdChain(StructureStubInfo*, Structure*, StructureChain*, size_t count, const Identifier&, const PropertySlot&, size_t cachedOffset, ReturnAddressPtr returnAddress, CallFrame* callFrame);
- void privateCompilePutByIdTransition(StructureStubInfo*, Structure*, Structure*, size_t cachedOffset, StructureChain*, ReturnAddressPtr returnAddress, bool direct);
-
- void privateCompileCTIMachineTrampolines(RefPtr<ExecutablePool>* executablePool, JSGlobalData* data, TrampolineStructure *trampolines);
- Label privateCompileCTINativeCall(JSGlobalData*, bool isConstruct = false);
- CodePtr privateCompileCTINativeCall(PassRefPtr<ExecutablePool> executablePool, JSGlobalData* data, NativeFunction func);
- void privateCompilePatchGetArrayLength(ReturnAddressPtr returnAddress);
-
- void addSlowCase(Jump);
- void addSlowCase(JumpList);
- void addJump(Jump, int);
- void emitJumpSlowToHot(Jump, int);
-
- void compileOpCall(OpcodeID, Instruction* instruction, unsigned callLinkInfoIndex);
- void compileOpCallVarargs(Instruction* instruction);
- void compileOpCallInitializeCallFrame();
- void compileOpCallSlowCase(Instruction* instruction, Vector<SlowCaseEntry>::iterator& iter, unsigned callLinkInfoIndex, OpcodeID opcodeID);
- void compileOpCallVarargsSlowCase(Instruction* instruction, Vector<SlowCaseEntry>::iterator& iter);
-
- enum CompileOpStrictEqType { OpStrictEq, OpNStrictEq };
- void compileOpStrictEq(Instruction* instruction, CompileOpStrictEqType type);
- bool isOperandConstantImmediateDouble(unsigned src);
-
- void emitLoadDouble(unsigned index, FPRegisterID value);
- void emitLoadInt32ToDouble(unsigned index, FPRegisterID value);
-
- void testPrototype(JSValue, JumpList& failureCases);
-
-#if USE(JSVALUE32_64)
- bool getOperandConstantImmediateInt(unsigned op1, unsigned op2, unsigned& op, int32_t& constant);
-
- void emitLoadTag(unsigned index, RegisterID tag);
- void emitLoadPayload(unsigned index, RegisterID payload);
-
- void emitLoad(const JSValue& v, RegisterID tag, RegisterID payload);
- void emitLoad(unsigned index, RegisterID tag, RegisterID payload, RegisterID base = callFrameRegister);
- void emitLoad2(unsigned index1, RegisterID tag1, RegisterID payload1, unsigned index2, RegisterID tag2, RegisterID payload2);
-
- void emitStore(unsigned index, RegisterID tag, RegisterID payload, RegisterID base = callFrameRegister);
- void emitStore(unsigned index, const JSValue constant, RegisterID base = callFrameRegister);
- void emitStoreInt32(unsigned index, RegisterID payload, bool indexIsInt32 = false);
- void emitStoreInt32(unsigned index, Imm32 payload, bool indexIsInt32 = false);
- void emitStoreCell(unsigned index, RegisterID payload, bool indexIsCell = false);
- void emitStoreBool(unsigned index, RegisterID tag, bool indexIsBool = false);
- void emitStoreDouble(unsigned index, FPRegisterID value);
-
- bool isLabeled(unsigned bytecodeOffset);
- void map(unsigned bytecodeOffset, unsigned virtualRegisterIndex, RegisterID tag, RegisterID payload);
- void unmap(RegisterID);
- void unmap();
- bool isMapped(unsigned virtualRegisterIndex);
- bool getMappedPayload(unsigned virtualRegisterIndex, RegisterID& payload);
- bool getMappedTag(unsigned virtualRegisterIndex, RegisterID& tag);
-
- void emitJumpSlowCaseIfNotJSCell(unsigned virtualRegisterIndex);
- void emitJumpSlowCaseIfNotJSCell(unsigned virtualRegisterIndex, RegisterID tag);
- void linkSlowCaseIfNotJSCell(Vector<SlowCaseEntry>::iterator&, unsigned virtualRegisterIndex);
-
-#if ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
- void compileGetByIdHotPath();
- void compileGetByIdSlowCase(int resultVReg, int baseVReg, Identifier* ident, Vector<SlowCaseEntry>::iterator& iter, bool isMethodCheck = false);
-#endif
- void compileGetDirectOffset(RegisterID base, RegisterID resultTag, RegisterID resultPayload, Structure* structure, size_t cachedOffset);
- void compileGetDirectOffset(JSObject* base, RegisterID temp, RegisterID resultTag, RegisterID resultPayload, size_t cachedOffset);
- void compileGetDirectOffset(RegisterID base, RegisterID resultTag, RegisterID resultPayload, RegisterID structure, RegisterID offset);
- void compilePutDirectOffset(RegisterID base, RegisterID valueTag, RegisterID valuePayload, Structure* structure, size_t cachedOffset);
-
- // Arithmetic opcode helpers
- void emitAdd32Constant(unsigned dst, unsigned op, int32_t constant, ResultType opType);
- void emitSub32Constant(unsigned dst, unsigned op, int32_t constant, ResultType opType);
- void emitBinaryDoubleOp(OpcodeID, unsigned dst, unsigned op1, unsigned op2, OperandTypes, JumpList& notInt32Op1, JumpList& notInt32Op2, bool op1IsInRegisters = true, bool op2IsInRegisters = true);
-
-#if CPU(X86)
- // These architecture specific value are used to enable patching - see comment on op_put_by_id.
- static const int patchOffsetPutByIdStructure = 7;
- static const int patchOffsetPutByIdExternalLoad = 13;
- static const int patchLengthPutByIdExternalLoad = 3;
- static const int patchOffsetPutByIdPropertyMapOffset1 = 22;
- static const int patchOffsetPutByIdPropertyMapOffset2 = 28;
- // These architecture specific value are used to enable patching - see comment on op_get_by_id.
- static const int patchOffsetGetByIdStructure = 7;
- static const int patchOffsetGetByIdBranchToSlowCase = 13;
- static const int patchOffsetGetByIdExternalLoad = 13;
- static const int patchLengthGetByIdExternalLoad = 3;
- static const int patchOffsetGetByIdPropertyMapOffset1 = 22;
- static const int patchOffsetGetByIdPropertyMapOffset2 = 28;
- static const int patchOffsetGetByIdPutResult = 28;
-#if ENABLE(OPCODE_SAMPLING)
- static const int patchOffsetGetByIdSlowCaseCall = 37;
-#else
- static const int patchOffsetGetByIdSlowCaseCall = 27;
-#endif
- static const int patchOffsetOpCallCompareToJump = 6;
-
- static const int patchOffsetMethodCheckProtoObj = 11;
- static const int patchOffsetMethodCheckProtoStruct = 18;
- static const int patchOffsetMethodCheckPutFunction = 29;
-#elif CPU(ARM_TRADITIONAL)
- // These architecture specific value are used to enable patching - see comment on op_put_by_id.
- static const int patchOffsetPutByIdStructure = 4;
- static const int patchOffsetPutByIdExternalLoad = 16;
- static const int patchLengthPutByIdExternalLoad = 4;
- static const int patchOffsetPutByIdPropertyMapOffset1 = 20;
- static const int patchOffsetPutByIdPropertyMapOffset2 = 28;
- // These architecture specific value are used to enable patching - see comment on op_get_by_id.
- static const int patchOffsetGetByIdStructure = 4;
- static const int patchOffsetGetByIdBranchToSlowCase = 16;
- static const int patchOffsetGetByIdExternalLoad = 16;
- static const int patchLengthGetByIdExternalLoad = 4;
- static const int patchOffsetGetByIdPropertyMapOffset1 = 20;
- static const int patchOffsetGetByIdPropertyMapOffset2 = 28;
- static const int patchOffsetGetByIdPutResult = 36;
-#if ENABLE(OPCODE_SAMPLING)
- #error "OPCODE_SAMPLING is not yet supported"
-#else
- static const int patchOffsetGetByIdSlowCaseCall = 32;
-#endif
- static const int patchOffsetOpCallCompareToJump = 12;
-
- static const int patchOffsetMethodCheckProtoObj = 12;
- static const int patchOffsetMethodCheckProtoStruct = 20;
- static const int patchOffsetMethodCheckPutFunction = 32;
-
- // sequenceOpCall
- static const int sequenceOpCallInstructionSpace = 12;
- static const int sequenceOpCallConstantSpace = 2;
- // sequenceMethodCheck
- static const int sequenceMethodCheckInstructionSpace = 40;
- static const int sequenceMethodCheckConstantSpace = 6;
- // sequenceGetByIdHotPath
- static const int sequenceGetByIdHotPathInstructionSpace = 36;
- static const int sequenceGetByIdHotPathConstantSpace = 4;
- // sequenceGetByIdSlowCase
- static const int sequenceGetByIdSlowCaseInstructionSpace = 56;
- static const int sequenceGetByIdSlowCaseConstantSpace = 2;
- // sequencePutById
- static const int sequencePutByIdInstructionSpace = 36;
- static const int sequencePutByIdConstantSpace = 4;
-#elif CPU(ARM_THUMB2)
- // These architecture specific value are used to enable patching - see comment on op_put_by_id.
- static const int patchOffsetPutByIdStructure = 10;
- static const int patchOffsetPutByIdExternalLoad = 26;
- static const int patchLengthPutByIdExternalLoad = 12;
- static const int patchOffsetPutByIdPropertyMapOffset1 = 46;
- static const int patchOffsetPutByIdPropertyMapOffset2 = 58;
- // These architecture specific value are used to enable patching - see comment on op_get_by_id.
- static const int patchOffsetGetByIdStructure = 10;
- static const int patchOffsetGetByIdBranchToSlowCase = 26;
- static const int patchOffsetGetByIdExternalLoad = 26;
- static const int patchLengthGetByIdExternalLoad = 12;
- static const int patchOffsetGetByIdPropertyMapOffset1 = 46;
- static const int patchOffsetGetByIdPropertyMapOffset2 = 58;
- static const int patchOffsetGetByIdPutResult = 62;
-#if ENABLE(OPCODE_SAMPLING)
- #error "OPCODE_SAMPLING is not yet supported"
-#else
- static const int patchOffsetGetByIdSlowCaseCall = 30;
-#endif
- static const int patchOffsetOpCallCompareToJump = 16;
-
- static const int patchOffsetMethodCheckProtoObj = 24;
- static const int patchOffsetMethodCheckProtoStruct = 34;
- static const int patchOffsetMethodCheckPutFunction = 58;
-
- // sequenceOpCall
- static const int sequenceOpCallInstructionSpace = 12;
- static const int sequenceOpCallConstantSpace = 2;
- // sequenceMethodCheck
- static const int sequenceMethodCheckInstructionSpace = 40;
- static const int sequenceMethodCheckConstantSpace = 6;
- // sequenceGetByIdHotPath
- static const int sequenceGetByIdHotPathInstructionSpace = 36;
- static const int sequenceGetByIdHotPathConstantSpace = 4;
- // sequenceGetByIdSlowCase
- static const int sequenceGetByIdSlowCaseInstructionSpace = 40;
- static const int sequenceGetByIdSlowCaseConstantSpace = 2;
- // sequencePutById
- static const int sequencePutByIdInstructionSpace = 36;
- static const int sequencePutByIdConstantSpace = 4;
-#elif CPU(MIPS)
-#if WTF_MIPS_ISA(1)
- static const int patchOffsetPutByIdStructure = 16;
- static const int patchOffsetPutByIdExternalLoad = 48;
- static const int patchLengthPutByIdExternalLoad = 20;
- static const int patchOffsetPutByIdPropertyMapOffset1 = 68;
- static const int patchOffsetPutByIdPropertyMapOffset2 = 84;
- static const int patchOffsetGetByIdStructure = 16;
- static const int patchOffsetGetByIdBranchToSlowCase = 48;
- static const int patchOffsetGetByIdExternalLoad = 48;
- static const int patchLengthGetByIdExternalLoad = 20;
- static const int patchOffsetGetByIdPropertyMapOffset1 = 68;
- static const int patchOffsetGetByIdPropertyMapOffset2 = 88;
- static const int patchOffsetGetByIdPutResult = 108;
-#if ENABLE(OPCODE_SAMPLING)
- #error "OPCODE_SAMPLING is not yet supported"
-#else
- static const int patchOffsetGetByIdSlowCaseCall = 44;
-#endif
- static const int patchOffsetOpCallCompareToJump = 32;
- static const int patchOffsetMethodCheckProtoObj = 32;
- static const int patchOffsetMethodCheckProtoStruct = 56;
- static const int patchOffsetMethodCheckPutFunction = 88;
-#else // WTF_MIPS_ISA(1)
- static const int patchOffsetPutByIdStructure = 12;
- static const int patchOffsetPutByIdExternalLoad = 44;
- static const int patchLengthPutByIdExternalLoad = 16;
- static const int patchOffsetPutByIdPropertyMapOffset1 = 60;
- static const int patchOffsetPutByIdPropertyMapOffset2 = 76;
- static const int patchOffsetGetByIdStructure = 12;
- static const int patchOffsetGetByIdBranchToSlowCase = 44;
- static const int patchOffsetGetByIdExternalLoad = 44;
- static const int patchLengthGetByIdExternalLoad = 16;
- static const int patchOffsetGetByIdPropertyMapOffset1 = 60;
- static const int patchOffsetGetByIdPropertyMapOffset2 = 76;
- static const int patchOffsetGetByIdPutResult = 92;
-#if ENABLE(OPCODE_SAMPLING)
- #error "OPCODE_SAMPLING is not yet supported"
-#else
- static const int patchOffsetGetByIdSlowCaseCall = 44;
-#endif
- static const int patchOffsetOpCallCompareToJump = 32;
- static const int patchOffsetMethodCheckProtoObj = 32;
- static const int patchOffsetMethodCheckProtoStruct = 52;
- static const int patchOffsetMethodCheckPutFunction = 84;
-#endif
-#else
-#error "JSVALUE32_64 not supported on this platform."
-#endif
-
-#else // USE(JSVALUE32_64)
- void emitGetVirtualRegister(int src, RegisterID dst);
- void emitGetVirtualRegisters(int src1, RegisterID dst1, int src2, RegisterID dst2);
- void emitPutVirtualRegister(unsigned dst, RegisterID from = regT0);
-
- int32_t getConstantOperandImmediateInt(unsigned src);
-
- void emitGetVariableObjectRegister(RegisterID variableObject, int index, RegisterID dst);
- void emitPutVariableObjectRegister(RegisterID src, RegisterID variableObject, int index);
-
- void killLastResultRegister();
-
- Jump emitJumpIfJSCell(RegisterID);
- Jump emitJumpIfBothJSCells(RegisterID, RegisterID, RegisterID);
- void emitJumpSlowCaseIfJSCell(RegisterID);
- Jump emitJumpIfNotJSCell(RegisterID);
- void emitJumpSlowCaseIfNotJSCell(RegisterID);
- void emitJumpSlowCaseIfNotJSCell(RegisterID, int VReg);
-#if USE(JSVALUE32_64)
- JIT::Jump emitJumpIfImmediateNumber(RegisterID reg)
- {
- return emitJumpIfImmediateInteger(reg);
- }
-
- JIT::Jump emitJumpIfNotImmediateNumber(RegisterID reg)
- {
- return emitJumpIfNotImmediateInteger(reg);
- }
-#endif
- JIT::Jump emitJumpIfImmediateInteger(RegisterID);
- JIT::Jump emitJumpIfNotImmediateInteger(RegisterID);
- JIT::Jump emitJumpIfNotImmediateIntegers(RegisterID, RegisterID, RegisterID);
- void emitJumpSlowCaseIfNotImmediateInteger(RegisterID);
- void emitJumpSlowCaseIfNotImmediateNumber(RegisterID);
- void emitJumpSlowCaseIfNotImmediateIntegers(RegisterID, RegisterID, RegisterID);
-
-#if USE(JSVALUE32_64)
- void emitFastArithDeTagImmediate(RegisterID);
- Jump emitFastArithDeTagImmediateJumpIfZero(RegisterID);
-#endif
- void emitFastArithReTagImmediate(RegisterID src, RegisterID dest);
- void emitFastArithIntToImmNoCheck(RegisterID src, RegisterID dest);
-
- void emitTagAsBoolImmediate(RegisterID reg);
- void compileBinaryArithOp(OpcodeID, unsigned dst, unsigned src1, unsigned src2, OperandTypes opi);
-#if USE(JSVALUE64)
- void compileBinaryArithOpSlowCase(OpcodeID, Vector<SlowCaseEntry>::iterator&, unsigned dst, unsigned src1, unsigned src2, OperandTypes, bool op1HasImmediateIntFastCase, bool op2HasImmediateIntFastCase);
-#else
- void compileBinaryArithOpSlowCase(OpcodeID, Vector<SlowCaseEntry>::iterator&, unsigned dst, unsigned src1, unsigned src2, OperandTypes);
-#endif
-
-#if ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
- void compileGetByIdHotPath(int resultVReg, int baseVReg, Identifier* ident, unsigned propertyAccessInstructionIndex);
- void compileGetByIdSlowCase(int resultVReg, int baseVReg, Identifier* ident, Vector<SlowCaseEntry>::iterator& iter, bool isMethodCheck = false);
-#endif
- void compileGetDirectOffset(RegisterID base, RegisterID result, Structure* structure, size_t cachedOffset);
- void compileGetDirectOffset(JSObject* base, RegisterID temp, RegisterID result, size_t cachedOffset);
- void compileGetDirectOffset(RegisterID base, RegisterID result, RegisterID structure, RegisterID offset, RegisterID scratch);
- void compilePutDirectOffset(RegisterID base, RegisterID value, Structure* structure, size_t cachedOffset);
-
-#if CPU(X86_64)
- // These architecture specific value are used to enable patching - see comment on op_put_by_id.
- static const int patchOffsetPutByIdStructure = 10;
- static const int patchOffsetPutByIdExternalLoad = 20;
- static const int patchLengthPutByIdExternalLoad = 4;
- static const int patchOffsetPutByIdPropertyMapOffset = 31;
- // These architecture specific value are used to enable patching - see comment on op_get_by_id.
- static const int patchOffsetGetByIdStructure = 10;
- static const int patchOffsetGetByIdBranchToSlowCase = 20;
- static const int patchOffsetGetByIdExternalLoad = 20;
- static const int patchLengthGetByIdExternalLoad = 4;
- static const int patchOffsetGetByIdPropertyMapOffset = 31;
- static const int patchOffsetGetByIdPutResult = 31;
-#if ENABLE(OPCODE_SAMPLING)
- static const int patchOffsetGetByIdSlowCaseCall = 64;
-#else
- static const int patchOffsetGetByIdSlowCaseCall = 41;
-#endif
- static const int patchOffsetOpCallCompareToJump = 9;
-
- static const int patchOffsetMethodCheckProtoObj = 20;
- static const int patchOffsetMethodCheckProtoStruct = 30;
- static const int patchOffsetMethodCheckPutFunction = 50;
-#elif CPU(X86)
- // These architecture specific value are used to enable patching - see comment on op_put_by_id.
- static const int patchOffsetPutByIdStructure = 7;
- static const int patchOffsetPutByIdExternalLoad = 13;
- static const int patchLengthPutByIdExternalLoad = 3;
- static const int patchOffsetPutByIdPropertyMapOffset = 22;
- // These architecture specific value are used to enable patching - see comment on op_get_by_id.
- static const int patchOffsetGetByIdStructure = 7;
- static const int patchOffsetGetByIdBranchToSlowCase = 13;
- static const int patchOffsetGetByIdExternalLoad = 13;
- static const int patchLengthGetByIdExternalLoad = 3;
- static const int patchOffsetGetByIdPropertyMapOffset = 22;
- static const int patchOffsetGetByIdPutResult = 22;
-#if ENABLE(OPCODE_SAMPLING)
- static const int patchOffsetGetByIdSlowCaseCall = 33;
-#else
- static const int patchOffsetGetByIdSlowCaseCall = 23;
-#endif
- static const int patchOffsetOpCallCompareToJump = 6;
-
- static const int patchOffsetMethodCheckProtoObj = 11;
- static const int patchOffsetMethodCheckProtoStruct = 18;
- static const int patchOffsetMethodCheckPutFunction = 29;
-#elif CPU(ARM_THUMB2)
- // These architecture specific value are used to enable patching - see comment on op_put_by_id.
- static const int patchOffsetPutByIdStructure = 10;
- static const int patchOffsetPutByIdExternalLoad = 26;
- static const int patchLengthPutByIdExternalLoad = 12;
- static const int patchOffsetPutByIdPropertyMapOffset = 46;
- // These architecture specific value are used to enable patching - see comment on op_get_by_id.
- static const int patchOffsetGetByIdStructure = 10;
- static const int patchOffsetGetByIdBranchToSlowCase = 26;
- static const int patchOffsetGetByIdExternalLoad = 26;
- static const int patchLengthGetByIdExternalLoad = 12;
- static const int patchOffsetGetByIdPropertyMapOffset = 46;
- static const int patchOffsetGetByIdPutResult = 50;
-#if ENABLE(OPCODE_SAMPLING)
- static const int patchOffsetGetByIdSlowCaseCall = 0; // FIMXE
-#else
- static const int patchOffsetGetByIdSlowCaseCall = 28;
-#endif
- static const int patchOffsetOpCallCompareToJump = 16;
-
- static const int patchOffsetMethodCheckProtoObj = 24;
- static const int patchOffsetMethodCheckProtoStruct = 34;
- static const int patchOffsetMethodCheckPutFunction = 58;
-#elif CPU(ARM_TRADITIONAL)
- // These architecture specific value are used to enable patching - see comment on op_put_by_id.
- static const int patchOffsetPutByIdStructure = 4;
- static const int patchOffsetPutByIdExternalLoad = 16;
- static const int patchLengthPutByIdExternalLoad = 4;
- static const int patchOffsetPutByIdPropertyMapOffset = 20;
- // These architecture specific value are used to enable patching - see comment on op_get_by_id.
- static const int patchOffsetGetByIdStructure = 4;
- static const int patchOffsetGetByIdBranchToSlowCase = 16;
- static const int patchOffsetGetByIdExternalLoad = 16;
- static const int patchLengthGetByIdExternalLoad = 4;
- static const int patchOffsetGetByIdPropertyMapOffset = 20;
- static const int patchOffsetGetByIdPutResult = 28;
-#if ENABLE(OPCODE_SAMPLING)
- #error "OPCODE_SAMPLING is not yet supported"
-#else
- static const int patchOffsetGetByIdSlowCaseCall = 28;
-#endif
- static const int patchOffsetOpCallCompareToJump = 12;
-
- static const int patchOffsetMethodCheckProtoObj = 12;
- static const int patchOffsetMethodCheckProtoStruct = 20;
- static const int patchOffsetMethodCheckPutFunction = 32;
-
- // sequenceOpCall
- static const int sequenceOpCallInstructionSpace = 12;
- static const int sequenceOpCallConstantSpace = 2;
- // sequenceMethodCheck
- static const int sequenceMethodCheckInstructionSpace = 40;
- static const int sequenceMethodCheckConstantSpace = 6;
- // sequenceGetByIdHotPath
- static const int sequenceGetByIdHotPathInstructionSpace = 28;
- static const int sequenceGetByIdHotPathConstantSpace = 3;
- // sequenceGetByIdSlowCase
- static const int sequenceGetByIdSlowCaseInstructionSpace = 32;
- static const int sequenceGetByIdSlowCaseConstantSpace = 2;
- // sequencePutById
- static const int sequencePutByIdInstructionSpace = 28;
- static const int sequencePutByIdConstantSpace = 3;
-#elif CPU(MIPS)
-#if WTF_MIPS_ISA(1)
- static const int patchOffsetPutByIdStructure = 16;
- static const int patchOffsetPutByIdExternalLoad = 48;
- static const int patchLengthPutByIdExternalLoad = 20;
- static const int patchOffsetPutByIdPropertyMapOffset = 68;
- static const int patchOffsetGetByIdStructure = 16;
- static const int patchOffsetGetByIdBranchToSlowCase = 48;
- static const int patchOffsetGetByIdExternalLoad = 48;
- static const int patchLengthGetByIdExternalLoad = 20;
- static const int patchOffsetGetByIdPropertyMapOffset = 68;
- static const int patchOffsetGetByIdPutResult = 88;
-#if ENABLE(OPCODE_SAMPLING)
- #error "OPCODE_SAMPLING is not yet supported"
-#else
- static const int patchOffsetGetByIdSlowCaseCall = 40;
-#endif
- static const int patchOffsetOpCallCompareToJump = 32;
- static const int patchOffsetMethodCheckProtoObj = 32;
- static const int patchOffsetMethodCheckProtoStruct = 56;
- static const int patchOffsetMethodCheckPutFunction = 88;
-#else // WTF_MIPS_ISA(1)
- static const int patchOffsetPutByIdStructure = 12;
- static const int patchOffsetPutByIdExternalLoad = 44;
- static const int patchLengthPutByIdExternalLoad = 16;
- static const int patchOffsetPutByIdPropertyMapOffset = 60;
- static const int patchOffsetGetByIdStructure = 12;
- static const int patchOffsetGetByIdBranchToSlowCase = 44;
- static const int patchOffsetGetByIdExternalLoad = 44;
- static const int patchLengthGetByIdExternalLoad = 16;
- static const int patchOffsetGetByIdPropertyMapOffset = 60;
- static const int patchOffsetGetByIdPutResult = 76;
-#if ENABLE(OPCODE_SAMPLING)
- #error "OPCODE_SAMPLING is not yet supported"
-#else
- static const int patchOffsetGetByIdSlowCaseCall = 40;
-#endif
- static const int patchOffsetOpCallCompareToJump = 32;
- static const int patchOffsetMethodCheckProtoObj = 32;
- static const int patchOffsetMethodCheckProtoStruct = 52;
- static const int patchOffsetMethodCheckPutFunction = 84;
-#endif
-#endif
-#endif // USE(JSVALUE32_64)
-
-#if (defined(ASSEMBLER_HAS_CONSTANT_POOL) && ASSEMBLER_HAS_CONSTANT_POOL)
-#define BEGIN_UNINTERRUPTED_SEQUENCE(name) do { beginUninterruptedSequence(name ## InstructionSpace, name ## ConstantSpace); } while (false)
-#define END_UNINTERRUPTED_SEQUENCE(name) do { endUninterruptedSequence(name ## InstructionSpace, name ## ConstantSpace); } while (false)
-
- void beginUninterruptedSequence(int, int);
- void endUninterruptedSequence(int, int);
-
-#else
-#define BEGIN_UNINTERRUPTED_SEQUENCE(name) do { beginUninterruptedSequence(); } while (false)
-#define END_UNINTERRUPTED_SEQUENCE(name) do { endUninterruptedSequence(); } while (false)
-#endif
-
- void emit_op_add(Instruction*);
- void emit_op_bitand(Instruction*);
- void emit_op_bitnot(Instruction*);
- void emit_op_bitor(Instruction*);
- void emit_op_bitxor(Instruction*);
- void emit_op_call(Instruction*);
- void emit_op_call_eval(Instruction*);
- void emit_op_call_varargs(Instruction*);
- void emit_op_call_put_result(Instruction*);
- void emit_op_catch(Instruction*);
- void emit_op_construct(Instruction*);
- void emit_op_get_callee(Instruction*);
- void emit_op_create_this(Instruction*);
- void emit_op_convert_this(Instruction*);
- void emit_op_convert_this_strict(Instruction*);
- void emit_op_create_arguments(Instruction*);
- void emit_op_debug(Instruction*);
- void emit_op_del_by_id(Instruction*);
- void emit_op_div(Instruction*);
- void emit_op_end(Instruction*);
- void emit_op_enter(Instruction*);
- void emit_op_create_activation(Instruction*);
- void emit_op_eq(Instruction*);
- void emit_op_eq_null(Instruction*);
- void emit_op_get_by_id(Instruction*);
- void emit_op_get_arguments_length(Instruction*);
- void emit_op_get_by_val(Instruction*);
- void emit_op_get_argument_by_val(Instruction*);
- void emit_op_get_by_pname(Instruction*);
- void emit_op_get_global_var(Instruction*);
- void emit_op_get_scoped_var(Instruction*);
- void emit_op_init_lazy_reg(Instruction*);
- void emit_op_check_has_instance(Instruction*);
- void emit_op_instanceof(Instruction*);
- void emit_op_jeq_null(Instruction*);
- void emit_op_jfalse(Instruction*);
- void emit_op_jmp(Instruction*);
- void emit_op_jmp_scopes(Instruction*);
- void emit_op_jneq_null(Instruction*);
- void emit_op_jneq_ptr(Instruction*);
- void emit_op_jnless(Instruction*);
- void emit_op_jless(Instruction*);
- void emit_op_jlesseq(Instruction*, bool invert = false);
- void emit_op_jnlesseq(Instruction*);
- void emit_op_jsr(Instruction*);
- void emit_op_jtrue(Instruction*);
- void emit_op_load_varargs(Instruction*);
- void emit_op_loop(Instruction*);
- void emit_op_loop_if_less(Instruction*);
- void emit_op_loop_if_lesseq(Instruction*);
- void emit_op_loop_if_true(Instruction*);
- void emit_op_loop_if_false(Instruction*);
- void emit_op_lshift(Instruction*);
- void emit_op_method_check(Instruction*);
- void emit_op_mod(Instruction*);
- void emit_op_mov(Instruction*);
- void emit_op_mul(Instruction*);
- void emit_op_negate(Instruction*);
- void emit_op_neq(Instruction*);
- void emit_op_neq_null(Instruction*);
- void emit_op_new_array(Instruction*);
- void emit_op_new_func(Instruction*);
- void emit_op_new_func_exp(Instruction*);
- void emit_op_new_object(Instruction*);
- void emit_op_new_regexp(Instruction*);
- void emit_op_get_pnames(Instruction*);
- void emit_op_next_pname(Instruction*);
- void emit_op_not(Instruction*);
- void emit_op_nstricteq(Instruction*);
- void emit_op_pop_scope(Instruction*);
- void emit_op_post_dec(Instruction*);
- void emit_op_post_inc(Instruction*);
- void emit_op_pre_dec(Instruction*);
- void emit_op_pre_inc(Instruction*);
- void emit_op_profile_did_call(Instruction*);
- void emit_op_profile_will_call(Instruction*);
- void emit_op_push_new_scope(Instruction*);
- void emit_op_push_scope(Instruction*);
- void emit_op_put_by_id(Instruction*);
- void emit_op_put_by_index(Instruction*);
- void emit_op_put_by_val(Instruction*);
- void emit_op_put_getter(Instruction*);
- void emit_op_put_global_var(Instruction*);
- void emit_op_put_scoped_var(Instruction*);
- void emit_op_put_setter(Instruction*);
- void emit_op_resolve(Instruction*);
- void emit_op_resolve_base(Instruction*);
- void emit_op_ensure_property_exists(Instruction*);
- void emit_op_resolve_global(Instruction*, bool dynamic = false);
- void emit_op_resolve_global_dynamic(Instruction*);
- void emit_op_resolve_skip(Instruction*);
- void emit_op_resolve_with_base(Instruction*);
- void emit_op_ret(Instruction*);
- void emit_op_ret_object_or_this(Instruction*);
- void emit_op_rshift(Instruction*);
- void emit_op_sret(Instruction*);
- void emit_op_strcat(Instruction*);
- void emit_op_stricteq(Instruction*);
- void emit_op_sub(Instruction*);
- void emit_op_switch_char(Instruction*);
- void emit_op_switch_imm(Instruction*);
- void emit_op_switch_string(Instruction*);
- void emit_op_tear_off_activation(Instruction*);
- void emit_op_tear_off_arguments(Instruction*);
- void emit_op_throw(Instruction*);
- void emit_op_throw_reference_error(Instruction*);
- void emit_op_throw_syntax_error(Instruction*);
- void emit_op_to_jsnumber(Instruction*);
- void emit_op_to_primitive(Instruction*);
- void emit_op_unexpected_load(Instruction*);
- void emit_op_urshift(Instruction*);
-#if ENABLE(JIT_USE_SOFT_MODULO)
- void softModulo();
-#endif
-
- void emitSlow_op_add(Instruction*, Vector<SlowCaseEntry>::iterator&);
- void emitSlow_op_bitand(Instruction*, Vector<SlowCaseEntry>::iterator&);
- void emitSlow_op_bitnot(Instruction*, Vector<SlowCaseEntry>::iterator&);
- void emitSlow_op_bitor(Instruction*, Vector<SlowCaseEntry>::iterator&);
- void emitSlow_op_bitxor(Instruction*, Vector<SlowCaseEntry>::iterator&);
- void emitSlow_op_call(Instruction*, Vector<SlowCaseEntry>::iterator&);
- void emitSlow_op_call_eval(Instruction*, Vector<SlowCaseEntry>::iterator&);
- void emitSlow_op_call_varargs(Instruction*, Vector<SlowCaseEntry>::iterator&);
- void emitSlow_op_construct(Instruction*, Vector<SlowCaseEntry>::iterator&);
- void emitSlow_op_convert_this(Instruction*, Vector<SlowCaseEntry>::iterator&);
- void emitSlow_op_convert_this_strict(Instruction*, Vector<SlowCaseEntry>::iterator&);
- void emitSlow_op_div(Instruction*, Vector<SlowCaseEntry>::iterator&);
- void emitSlow_op_eq(Instruction*, Vector<SlowCaseEntry>::iterator&);
- void emitSlow_op_get_by_id(Instruction*, Vector<SlowCaseEntry>::iterator&);
- void emitSlow_op_get_arguments_length(Instruction*, Vector<SlowCaseEntry>::iterator&);
- void emitSlow_op_get_by_val(Instruction*, Vector<SlowCaseEntry>::iterator&);
- void emitSlow_op_get_argument_by_val(Instruction*, Vector<SlowCaseEntry>::iterator&);
- void emitSlow_op_get_by_pname(Instruction*, Vector<SlowCaseEntry>::iterator&);
- void emitSlow_op_check_has_instance(Instruction*, Vector<SlowCaseEntry>::iterator&);
- void emitSlow_op_instanceof(Instruction*, Vector<SlowCaseEntry>::iterator&);
- void emitSlow_op_jfalse(Instruction*, Vector<SlowCaseEntry>::iterator&);
- void emitSlow_op_jnless(Instruction*, Vector<SlowCaseEntry>::iterator&);
- void emitSlow_op_jless(Instruction*, Vector<SlowCaseEntry>::iterator&);
- void emitSlow_op_jlesseq(Instruction*, Vector<SlowCaseEntry>::iterator&, bool invert = false);
- void emitSlow_op_jnlesseq(Instruction*, Vector<SlowCaseEntry>::iterator&);
- void emitSlow_op_jtrue(Instruction*, Vector<SlowCaseEntry>::iterator&);
- void emitSlow_op_load_varargs(Instruction*, Vector<SlowCaseEntry>::iterator&);
- void emitSlow_op_loop_if_less(Instruction*, Vector<SlowCaseEntry>::iterator&);
- void emitSlow_op_loop_if_lesseq(Instruction*, Vector<SlowCaseEntry>::iterator&);
- void emitSlow_op_loop_if_true(Instruction*, Vector<SlowCaseEntry>::iterator&);
- void emitSlow_op_loop_if_false(Instruction*, Vector<SlowCaseEntry>::iterator&);
- void emitSlow_op_lshift(Instruction*, Vector<SlowCaseEntry>::iterator&);
- void emitSlow_op_method_check(Instruction*, Vector<SlowCaseEntry>::iterator&);
- void emitSlow_op_mod(Instruction*, Vector<SlowCaseEntry>::iterator&);
- void emitSlow_op_mul(Instruction*, Vector<SlowCaseEntry>::iterator&);
- void emitSlow_op_negate(Instruction*, Vector<SlowCaseEntry>::iterator&);
- void emitSlow_op_neq(Instruction*, Vector<SlowCaseEntry>::iterator&);
- void emitSlow_op_not(Instruction*, Vector<SlowCaseEntry>::iterator&);
- void emitSlow_op_nstricteq(Instruction*, Vector<SlowCaseEntry>::iterator&);
- void emitSlow_op_post_dec(Instruction*, Vector<SlowCaseEntry>::iterator&);
- void emitSlow_op_post_inc(Instruction*, Vector<SlowCaseEntry>::iterator&);
- void emitSlow_op_pre_dec(Instruction*, Vector<SlowCaseEntry>::iterator&);
- void emitSlow_op_pre_inc(Instruction*, Vector<SlowCaseEntry>::iterator&);
- void emitSlow_op_put_by_id(Instruction*, Vector<SlowCaseEntry>::iterator&);
- void emitSlow_op_put_by_val(Instruction*, Vector<SlowCaseEntry>::iterator&);
- void emitSlow_op_resolve_global(Instruction*, Vector<SlowCaseEntry>::iterator&);
- void emitSlow_op_resolve_global_dynamic(Instruction*, Vector<SlowCaseEntry>::iterator&);
- void emitSlow_op_rshift(Instruction*, Vector<SlowCaseEntry>::iterator&);
- void emitSlow_op_stricteq(Instruction*, Vector<SlowCaseEntry>::iterator&);
- void emitSlow_op_sub(Instruction*, Vector<SlowCaseEntry>::iterator&);
- void emitSlow_op_to_jsnumber(Instruction*, Vector<SlowCaseEntry>::iterator&);
- void emitSlow_op_to_primitive(Instruction*, Vector<SlowCaseEntry>::iterator&);
- void emitSlow_op_urshift(Instruction*, Vector<SlowCaseEntry>::iterator&);
-
-
- void emitRightShift(Instruction*, bool isUnsigned);
- void emitRightShiftSlowCase(Instruction*, Vector<SlowCaseEntry>::iterator&, bool isUnsigned);
-
- /* This function is deprecated. */
- void emitGetJITStubArg(unsigned argumentNumber, RegisterID dst);
-
- void emitInitRegister(unsigned dst);
-
- void emitPutToCallFrameHeader(RegisterID from, RegisterFile::CallFrameHeaderEntry entry);
- void emitPutImmediateToCallFrameHeader(void* value, RegisterFile::CallFrameHeaderEntry entry);
- void emitGetFromCallFrameHeaderPtr(RegisterFile::CallFrameHeaderEntry entry, RegisterID to, RegisterID from = callFrameRegister);
- void emitGetFromCallFrameHeader32(RegisterFile::CallFrameHeaderEntry entry, RegisterID to, RegisterID from = callFrameRegister);
-
- JSValue getConstantOperand(unsigned src);
- bool isOperandConstantImmediateInt(unsigned src);
- bool isOperandConstantImmediateChar(unsigned src);
-
- Jump getSlowCase(Vector<SlowCaseEntry>::iterator& iter)
- {
- return iter++->from;
- }
- void linkSlowCase(Vector<SlowCaseEntry>::iterator& iter)
- {
- iter->from.link(this);
- ++iter;
- }
- void linkSlowCaseIfNotJSCell(Vector<SlowCaseEntry>::iterator&, int vReg);
-
- Jump checkStructure(RegisterID reg, Structure* structure);
-
- void restoreArgumentReference();
- void restoreArgumentReferenceForTrampoline();
-
- Call emitNakedCall(CodePtr function = CodePtr());
-
- void preserveReturnAddressAfterCall(RegisterID);
- void restoreReturnAddressBeforeReturn(RegisterID);
- void restoreReturnAddressBeforeReturn(Address);
-
- // Loads the character value of a single character string into dst.
- void emitLoadCharacterString(RegisterID src, RegisterID dst, JumpList& failures);
-
- void emitTimeoutCheck();
-#ifndef NDEBUG
- void printBytecodeOperandTypes(unsigned src1, unsigned src2);
-#endif
-
-#if ENABLE(SAMPLING_FLAGS)
- void setSamplingFlag(int32_t);
- void clearSamplingFlag(int32_t);
-#endif
-
-#if ENABLE(SAMPLING_COUNTERS)
- void emitCount(AbstractSamplingCounter&, uint32_t = 1);
-#endif
-
-#if ENABLE(OPCODE_SAMPLING)
- void sampleInstruction(Instruction*, bool = false);
-#endif
-
-#if ENABLE(CODEBLOCK_SAMPLING)
- void sampleCodeBlock(CodeBlock*);
-#else
- void sampleCodeBlock(CodeBlock*) {}
-#endif
-
- Interpreter* m_interpreter;
- JSGlobalData* m_globalData;
- CodeBlock* m_codeBlock;
-
- Vector<CallRecord> m_calls;
- Vector<Label> m_labels;
- Vector<PropertyStubCompilationInfo> m_propertyAccessCompilationInfo;
- Vector<StructureStubCompilationInfo> m_callStructureStubCompilationInfo;
- Vector<MethodCallCompilationInfo> m_methodCallCompilationInfo;
- Vector<JumpTable> m_jmpTable;
-
- unsigned m_bytecodeOffset;
- Vector<JSRInfo> m_jsrSites;
- Vector<SlowCaseEntry> m_slowCases;
- Vector<SwitchRecord> m_switches;
-
- unsigned m_propertyAccessInstructionIndex;
- unsigned m_globalResolveInfoIndex;
- unsigned m_callLinkInfoIndex;
-
-#if USE(JSVALUE32_64)
- unsigned m_jumpTargetIndex;
- unsigned m_mappedBytecodeOffset;
- unsigned m_mappedVirtualRegisterIndex;
- RegisterID m_mappedTag;
- RegisterID m_mappedPayload;
-#else
- int m_lastResultBytecodeRegister;
- unsigned m_jumpTargetsPosition;
-#endif
-
-#ifndef NDEBUG
-#if defined(ASSEMBLER_HAS_CONSTANT_POOL) && ASSEMBLER_HAS_CONSTANT_POOL
- Label m_uninterruptedInstructionSequenceBegin;
- int m_uninterruptedConstantSequenceBegin;
-#endif
-#endif
- void* m_linkerOffset;
- static CodePtr stringGetByValStubGenerator(JSGlobalData* globalData, ExecutablePool* pool);
- } JIT_CLASS_ALIGNMENT;
-
- inline void JIT::emit_op_loop(Instruction* currentInstruction)
- {
- emitTimeoutCheck();
- emit_op_jmp(currentInstruction);
- }
-
- inline void JIT::emit_op_loop_if_true(Instruction* currentInstruction)
- {
- emitTimeoutCheck();
- emit_op_jtrue(currentInstruction);
- }
-
- inline void JIT::emitSlow_op_loop_if_true(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
- {
- emitSlow_op_jtrue(currentInstruction, iter);
- }
-
- inline void JIT::emit_op_loop_if_false(Instruction* currentInstruction)
- {
- emitTimeoutCheck();
- emit_op_jfalse(currentInstruction);
- }
-
- inline void JIT::emitSlow_op_loop_if_false(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
- {
- emitSlow_op_jfalse(currentInstruction, iter);
- }
-
- inline void JIT::emit_op_loop_if_less(Instruction* currentInstruction)
- {
- emitTimeoutCheck();
- emit_op_jless(currentInstruction);
- }
-
- inline void JIT::emitSlow_op_loop_if_less(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
- {
- emitSlow_op_jless(currentInstruction, iter);
- }
-
-} // namespace JSC
-
-#endif // ENABLE(JIT)
-
-#endif // JIT_h
diff --git a/JavaScriptCore/jit/JITArithmetic.cpp b/JavaScriptCore/jit/JITArithmetic.cpp
deleted file mode 100644
index cd05f51..0000000
--- a/JavaScriptCore/jit/JITArithmetic.cpp
+++ /dev/null
@@ -1,1244 +0,0 @@
-/*
- * Copyright (C) 2008 Apple Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
- * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "config.h"
-
-#if ENABLE(JIT)
-#if USE(JSVALUE64)
-#include "JIT.h"
-
-#include "CodeBlock.h"
-#include "JITInlineMethods.h"
-#include "JITStubCall.h"
-#include "JITStubs.h"
-#include "JSArray.h"
-#include "JSFunction.h"
-#include "Interpreter.h"
-#include "ResultType.h"
-#include "SamplingTool.h"
-
-#ifndef NDEBUG
-#include <stdio.h>
-#endif
-
-using namespace std;
-
-namespace JSC {
-
-void JIT::emit_op_lshift(Instruction* currentInstruction)
-{
- unsigned result = currentInstruction[1].u.operand;
- unsigned op1 = currentInstruction[2].u.operand;
- unsigned op2 = currentInstruction[3].u.operand;
-
- emitGetVirtualRegisters(op1, regT0, op2, regT2);
- // FIXME: would we be better using 'emitJumpSlowCaseIfNotImmediateIntegers'? - we *probably* ought to be consistent.
- emitJumpSlowCaseIfNotImmediateInteger(regT0);
- emitJumpSlowCaseIfNotImmediateInteger(regT2);
- emitFastArithImmToInt(regT0);
- emitFastArithImmToInt(regT2);
- lshift32(regT2, regT0);
- emitFastArithReTagImmediate(regT0, regT0);
- emitPutVirtualRegister(result);
-}
-
-void JIT::emitSlow_op_lshift(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- unsigned result = currentInstruction[1].u.operand;
- unsigned op1 = currentInstruction[2].u.operand;
- unsigned op2 = currentInstruction[3].u.operand;
-
- UNUSED_PARAM(op1);
- UNUSED_PARAM(op2);
- linkSlowCase(iter);
- linkSlowCase(iter);
- JITStubCall stubCall(this, cti_op_lshift);
- stubCall.addArgument(regT0);
- stubCall.addArgument(regT2);
- stubCall.call(result);
-}
-
-void JIT::emit_op_rshift(Instruction* currentInstruction)
-{
- unsigned result = currentInstruction[1].u.operand;
- unsigned op1 = currentInstruction[2].u.operand;
- unsigned op2 = currentInstruction[3].u.operand;
-
- if (isOperandConstantImmediateInt(op2)) {
- // isOperandConstantImmediateInt(op2) => 1 SlowCase
- emitGetVirtualRegister(op1, regT0);
- emitJumpSlowCaseIfNotImmediateInteger(regT0);
- // Mask with 0x1f as per ecma-262 11.7.2 step 7.
- rshift32(Imm32(getConstantOperandImmediateInt(op2) & 0x1f), regT0);
- } else {
- emitGetVirtualRegisters(op1, regT0, op2, regT2);
- if (supportsFloatingPointTruncate()) {
- Jump lhsIsInt = emitJumpIfImmediateInteger(regT0);
- // supportsFloatingPoint() && USE(JSVALUE64) => 3 SlowCases
- addSlowCase(emitJumpIfNotImmediateNumber(regT0));
- addPtr(tagTypeNumberRegister, regT0);
- movePtrToDouble(regT0, fpRegT0);
- addSlowCase(branchTruncateDoubleToInt32(fpRegT0, regT0));
- lhsIsInt.link(this);
- emitJumpSlowCaseIfNotImmediateInteger(regT2);
- } else {
- // !supportsFloatingPoint() => 2 SlowCases
- emitJumpSlowCaseIfNotImmediateInteger(regT0);
- emitJumpSlowCaseIfNotImmediateInteger(regT2);
- }
- emitFastArithImmToInt(regT2);
- rshift32(regT2, regT0);
- }
- emitFastArithIntToImmNoCheck(regT0, regT0);
- emitPutVirtualRegister(result);
-}
-
-void JIT::emitSlow_op_rshift(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- unsigned result = currentInstruction[1].u.operand;
- unsigned op1 = currentInstruction[2].u.operand;
- unsigned op2 = currentInstruction[3].u.operand;
-
- JITStubCall stubCall(this, cti_op_rshift);
-
- if (isOperandConstantImmediateInt(op2)) {
- linkSlowCase(iter);
- stubCall.addArgument(regT0);
- stubCall.addArgument(op2, regT2);
- } else {
- if (supportsFloatingPointTruncate()) {
- linkSlowCase(iter);
- linkSlowCase(iter);
- linkSlowCase(iter);
- // We're reloading op1 to regT0 as we can no longer guarantee that
- // we have not munged the operand. It may have already been shifted
- // correctly, but it still will not have been tagged.
- stubCall.addArgument(op1, regT0);
- stubCall.addArgument(regT2);
- } else {
- linkSlowCase(iter);
- linkSlowCase(iter);
- stubCall.addArgument(regT0);
- stubCall.addArgument(regT2);
- }
- }
-
- stubCall.call(result);
-}
-
-void JIT::emit_op_urshift(Instruction* currentInstruction)
-{
- unsigned dst = currentInstruction[1].u.operand;
- unsigned op1 = currentInstruction[2].u.operand;
- unsigned op2 = currentInstruction[3].u.operand;
-
- // Slow case of urshift makes assumptions about what registers hold the
- // shift arguments, so any changes must be updated there as well.
- if (isOperandConstantImmediateInt(op2)) {
- emitGetVirtualRegister(op1, regT0);
- emitJumpSlowCaseIfNotImmediateInteger(regT0);
- emitFastArithImmToInt(regT0);
- int shift = getConstantOperand(op2).asInt32();
- if (shift)
- urshift32(Imm32(shift & 0x1f), regT0);
- // unsigned shift < 0 or shift = k*2^32 may result in (essentially)
- // a toUint conversion, which can result in a value we can represent
- // as an immediate int.
- if (shift < 0 || !(shift & 31))
- addSlowCase(branch32(LessThan, regT0, Imm32(0)));
- emitFastArithReTagImmediate(regT0, regT0);
- emitPutVirtualRegister(dst, regT0);
- return;
- }
- emitGetVirtualRegisters(op1, regT0, op2, regT1);
- if (!isOperandConstantImmediateInt(op1))
- emitJumpSlowCaseIfNotImmediateInteger(regT0);
- emitJumpSlowCaseIfNotImmediateInteger(regT1);
- emitFastArithImmToInt(regT0);
- emitFastArithImmToInt(regT1);
- urshift32(regT1, regT0);
- addSlowCase(branch32(LessThan, regT0, Imm32(0)));
- emitFastArithReTagImmediate(regT0, regT0);
- emitPutVirtualRegister(dst, regT0);
-}
-
-void JIT::emitSlow_op_urshift(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- unsigned dst = currentInstruction[1].u.operand;
- unsigned op1 = currentInstruction[2].u.operand;
- unsigned op2 = currentInstruction[3].u.operand;
- if (isOperandConstantImmediateInt(op2)) {
- int shift = getConstantOperand(op2).asInt32();
- // op1 = regT0
- linkSlowCase(iter); // int32 check
- if (supportsFloatingPointTruncate()) {
- JumpList failures;
- failures.append(emitJumpIfNotImmediateNumber(regT0)); // op1 is not a double
- addPtr(tagTypeNumberRegister, regT0);
- movePtrToDouble(regT0, fpRegT0);
- failures.append(branchTruncateDoubleToInt32(fpRegT0, regT0));
- if (shift)
- urshift32(Imm32(shift & 0x1f), regT0);
- if (shift < 0 || !(shift & 31))
- failures.append(branch32(LessThan, regT0, Imm32(0)));
- emitFastArithReTagImmediate(regT0, regT0);
- emitPutVirtualRegister(dst, regT0);
- emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_rshift));
- failures.link(this);
- }
- if (shift < 0 || !(shift & 31))
- linkSlowCase(iter); // failed to box in hot path
- } else {
- // op1 = regT0
- // op2 = regT1
- if (!isOperandConstantImmediateInt(op1)) {
- linkSlowCase(iter); // int32 check -- op1 is not an int
- if (supportsFloatingPointTruncate()) {
- JumpList failures;
- failures.append(emitJumpIfNotImmediateNumber(regT0)); // op1 is not a double
- addPtr(tagTypeNumberRegister, regT0);
- movePtrToDouble(regT0, fpRegT0);
- failures.append(branchTruncateDoubleToInt32(fpRegT0, regT0));
- failures.append(emitJumpIfNotImmediateInteger(regT1)); // op2 is not an int
- emitFastArithImmToInt(regT1);
- urshift32(regT1, regT0);
- failures.append(branch32(LessThan, regT0, Imm32(0)));
- emitFastArithReTagImmediate(regT0, regT0);
- emitPutVirtualRegister(dst, regT0);
- emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_rshift));
- failures.link(this);
- }
- }
-
- linkSlowCase(iter); // int32 check - op2 is not an int
- linkSlowCase(iter); // Can't represent unsigned result as an immediate
- }
-
- JITStubCall stubCall(this, cti_op_urshift);
- stubCall.addArgument(op1, regT0);
- stubCall.addArgument(op2, regT1);
- stubCall.call(dst);
-}
-
-void JIT::emit_op_jnless(Instruction* currentInstruction)
-{
- unsigned op1 = currentInstruction[1].u.operand;
- unsigned op2 = currentInstruction[2].u.operand;
- unsigned target = currentInstruction[3].u.operand;
-
- // We generate inline code for the following cases in the fast path:
- // - int immediate to constant int immediate
- // - constant int immediate to int immediate
- // - int immediate to int immediate
-
- if (isOperandConstantImmediateChar(op1)) {
- emitGetVirtualRegister(op2, regT0);
- addSlowCase(emitJumpIfNotJSCell(regT0));
- JumpList failures;
- emitLoadCharacterString(regT0, regT0, failures);
- addSlowCase(failures);
- addJump(branch32(LessThanOrEqual, regT0, Imm32(asString(getConstantOperand(op1))->tryGetValue()[0])), target);
- return;
- }
- if (isOperandConstantImmediateChar(op2)) {
- emitGetVirtualRegister(op1, regT0);
- addSlowCase(emitJumpIfNotJSCell(regT0));
- JumpList failures;
- emitLoadCharacterString(regT0, regT0, failures);
- addSlowCase(failures);
- addJump(branch32(GreaterThanOrEqual, regT0, Imm32(asString(getConstantOperand(op2))->tryGetValue()[0])), target);
- return;
- }
- if (isOperandConstantImmediateInt(op2)) {
- emitGetVirtualRegister(op1, regT0);
- emitJumpSlowCaseIfNotImmediateInteger(regT0);
- int32_t op2imm = getConstantOperandImmediateInt(op2);
- addJump(branch32(GreaterThanOrEqual, regT0, Imm32(op2imm)), target);
- } else if (isOperandConstantImmediateInt(op1)) {
- emitGetVirtualRegister(op2, regT1);
- emitJumpSlowCaseIfNotImmediateInteger(regT1);
- int32_t op1imm = getConstantOperandImmediateInt(op1);
- addJump(branch32(LessThanOrEqual, regT1, Imm32(op1imm)), target);
- } else {
- emitGetVirtualRegisters(op1, regT0, op2, regT1);
- emitJumpSlowCaseIfNotImmediateInteger(regT0);
- emitJumpSlowCaseIfNotImmediateInteger(regT1);
-
- addJump(branch32(GreaterThanOrEqual, regT0, regT1), target);
- }
-}
-
-void JIT::emitSlow_op_jnless(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- unsigned op1 = currentInstruction[1].u.operand;
- unsigned op2 = currentInstruction[2].u.operand;
- unsigned target = currentInstruction[3].u.operand;
-
- // We generate inline code for the following cases in the slow path:
- // - floating-point number to constant int immediate
- // - constant int immediate to floating-point number
- // - floating-point number to floating-point number.
- if (isOperandConstantImmediateChar(op1) || isOperandConstantImmediateChar(op2)) {
- linkSlowCase(iter);
- linkSlowCase(iter);
- linkSlowCase(iter);
- linkSlowCase(iter);
- JITStubCall stubCall(this, cti_op_jless);
- stubCall.addArgument(op1, regT0);
- stubCall.addArgument(op2, regT1);
- stubCall.call();
- emitJumpSlowToHot(branchTest32(Zero, regT0), target);
- return;
- }
-
- if (isOperandConstantImmediateInt(op2)) {
- linkSlowCase(iter);
-
- if (supportsFloatingPoint()) {
- Jump fail1 = emitJumpIfNotImmediateNumber(regT0);
- addPtr(tagTypeNumberRegister, regT0);
- movePtrToDouble(regT0, fpRegT0);
-
- int32_t op2imm = getConstantOperand(op2).asInt32();;
-
- move(Imm32(op2imm), regT1);
- convertInt32ToDouble(regT1, fpRegT1);
-
- emitJumpSlowToHot(branchDouble(DoubleLessThanOrEqualOrUnordered, fpRegT1, fpRegT0), target);
-
- emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_jnless));
-
- fail1.link(this);
- }
-
- JITStubCall stubCall(this, cti_op_jless);
- stubCall.addArgument(regT0);
- stubCall.addArgument(op2, regT2);
- stubCall.call();
- emitJumpSlowToHot(branchTest32(Zero, regT0), target);
-
- } else if (isOperandConstantImmediateInt(op1)) {
- linkSlowCase(iter);
-
- if (supportsFloatingPoint()) {
- Jump fail1 = emitJumpIfNotImmediateNumber(regT1);
- addPtr(tagTypeNumberRegister, regT1);
- movePtrToDouble(regT1, fpRegT1);
-
- int32_t op1imm = getConstantOperand(op1).asInt32();;
-
- move(Imm32(op1imm), regT0);
- convertInt32ToDouble(regT0, fpRegT0);
-
- emitJumpSlowToHot(branchDouble(DoubleLessThanOrEqualOrUnordered, fpRegT1, fpRegT0), target);
-
- emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_jnless));
-
- fail1.link(this);
- }
-
- JITStubCall stubCall(this, cti_op_jless);
- stubCall.addArgument(op1, regT2);
- stubCall.addArgument(regT1);
- stubCall.call();
- emitJumpSlowToHot(branchTest32(Zero, regT0), target);
-
- } else {
- linkSlowCase(iter);
-
- if (supportsFloatingPoint()) {
- Jump fail1 = emitJumpIfNotImmediateNumber(regT0);
- Jump fail2 = emitJumpIfNotImmediateNumber(regT1);
- Jump fail3 = emitJumpIfImmediateInteger(regT1);
- addPtr(tagTypeNumberRegister, regT0);
- addPtr(tagTypeNumberRegister, regT1);
- movePtrToDouble(regT0, fpRegT0);
- movePtrToDouble(regT1, fpRegT1);
-
- emitJumpSlowToHot(branchDouble(DoubleLessThanOrEqualOrUnordered, fpRegT1, fpRegT0), target);
-
- emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_jnless));
-
- fail1.link(this);
- fail2.link(this);
- fail3.link(this);
- }
-
- linkSlowCase(iter);
- JITStubCall stubCall(this, cti_op_jless);
- stubCall.addArgument(regT0);
- stubCall.addArgument(regT1);
- stubCall.call();
- emitJumpSlowToHot(branchTest32(Zero, regT0), target);
- }
-}
-
-void JIT::emit_op_jless(Instruction* currentInstruction)
-{
- unsigned op1 = currentInstruction[1].u.operand;
- unsigned op2 = currentInstruction[2].u.operand;
- unsigned target = currentInstruction[3].u.operand;
-
- // We generate inline code for the following cases in the fast path:
- // - int immediate to constant int immediate
- // - constant int immediate to int immediate
- // - int immediate to int immediate
-
- if (isOperandConstantImmediateChar(op1)) {
- emitGetVirtualRegister(op2, regT0);
- addSlowCase(emitJumpIfNotJSCell(regT0));
- JumpList failures;
- emitLoadCharacterString(regT0, regT0, failures);
- addSlowCase(failures);
- addJump(branch32(GreaterThan, regT0, Imm32(asString(getConstantOperand(op1))->tryGetValue()[0])), target);
- return;
- }
- if (isOperandConstantImmediateChar(op2)) {
- emitGetVirtualRegister(op1, regT0);
- addSlowCase(emitJumpIfNotJSCell(regT0));
- JumpList failures;
- emitLoadCharacterString(regT0, regT0, failures);
- addSlowCase(failures);
- addJump(branch32(LessThan, regT0, Imm32(asString(getConstantOperand(op2))->tryGetValue()[0])), target);
- return;
- }
- if (isOperandConstantImmediateInt(op2)) {
- emitGetVirtualRegister(op1, regT0);
- emitJumpSlowCaseIfNotImmediateInteger(regT0);
- int32_t op2imm = getConstantOperandImmediateInt(op2);
- addJump(branch32(LessThan, regT0, Imm32(op2imm)), target);
- } else if (isOperandConstantImmediateInt(op1)) {
- emitGetVirtualRegister(op2, regT1);
- emitJumpSlowCaseIfNotImmediateInteger(regT1);
- int32_t op1imm = getConstantOperandImmediateInt(op1);
- addJump(branch32(GreaterThan, regT1, Imm32(op1imm)), target);
- } else {
- emitGetVirtualRegisters(op1, regT0, op2, regT1);
- emitJumpSlowCaseIfNotImmediateInteger(regT0);
- emitJumpSlowCaseIfNotImmediateInteger(regT1);
-
- addJump(branch32(LessThan, regT0, regT1), target);
- }
-}
-
-void JIT::emitSlow_op_jless(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- unsigned op1 = currentInstruction[1].u.operand;
- unsigned op2 = currentInstruction[2].u.operand;
- unsigned target = currentInstruction[3].u.operand;
-
- // We generate inline code for the following cases in the slow path:
- // - floating-point number to constant int immediate
- // - constant int immediate to floating-point number
- // - floating-point number to floating-point number.
- if (isOperandConstantImmediateChar(op1) || isOperandConstantImmediateChar(op2)) {
- linkSlowCase(iter);
- linkSlowCase(iter);
- linkSlowCase(iter);
- linkSlowCase(iter);
- JITStubCall stubCall(this, cti_op_jless);
- stubCall.addArgument(op1, regT0);
- stubCall.addArgument(op2, regT1);
- stubCall.call();
- emitJumpSlowToHot(branchTest32(NonZero, regT0), target);
- return;
- }
-
- if (isOperandConstantImmediateInt(op2)) {
- linkSlowCase(iter);
-
- if (supportsFloatingPoint()) {
- Jump fail1 = emitJumpIfNotImmediateNumber(regT0);
- addPtr(tagTypeNumberRegister, regT0);
- movePtrToDouble(regT0, fpRegT0);
-
- int32_t op2imm = getConstantOperand(op2).asInt32();
-
- move(Imm32(op2imm), regT1);
- convertInt32ToDouble(regT1, fpRegT1);
-
- emitJumpSlowToHot(branchDouble(DoubleLessThan, fpRegT0, fpRegT1), target);
-
- emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_jnless));
-
- fail1.link(this);
- }
-
- JITStubCall stubCall(this, cti_op_jless);
- stubCall.addArgument(regT0);
- stubCall.addArgument(op2, regT2);
- stubCall.call();
- emitJumpSlowToHot(branchTest32(NonZero, regT0), target);
-
- } else if (isOperandConstantImmediateInt(op1)) {
- linkSlowCase(iter);
-
- if (supportsFloatingPoint()) {
- Jump fail1 = emitJumpIfNotImmediateNumber(regT1);
- addPtr(tagTypeNumberRegister, regT1);
- movePtrToDouble(regT1, fpRegT1);
-
- int32_t op1imm = getConstantOperand(op1).asInt32();
-
- move(Imm32(op1imm), regT0);
- convertInt32ToDouble(regT0, fpRegT0);
-
- emitJumpSlowToHot(branchDouble(DoubleLessThan, fpRegT0, fpRegT1), target);
-
- emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_jnless));
-
- fail1.link(this);
- }
-
- JITStubCall stubCall(this, cti_op_jless);
- stubCall.addArgument(op1, regT2);
- stubCall.addArgument(regT1);
- stubCall.call();
- emitJumpSlowToHot(branchTest32(NonZero, regT0), target);
-
- } else {
- linkSlowCase(iter);
-
- if (supportsFloatingPoint()) {
- Jump fail1 = emitJumpIfNotImmediateNumber(regT0);
- Jump fail2 = emitJumpIfNotImmediateNumber(regT1);
- Jump fail3 = emitJumpIfImmediateInteger(regT1);
- addPtr(tagTypeNumberRegister, regT0);
- addPtr(tagTypeNumberRegister, regT1);
- movePtrToDouble(regT0, fpRegT0);
- movePtrToDouble(regT1, fpRegT1);
-
- emitJumpSlowToHot(branchDouble(DoubleLessThan, fpRegT0, fpRegT1), target);
-
- emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_jnless));
-
- fail1.link(this);
- fail2.link(this);
- fail3.link(this);
- }
-
- linkSlowCase(iter);
- JITStubCall stubCall(this, cti_op_jless);
- stubCall.addArgument(regT0);
- stubCall.addArgument(regT1);
- stubCall.call();
- emitJumpSlowToHot(branchTest32(NonZero, regT0), target);
- }
-}
-
-void JIT::emit_op_jlesseq(Instruction* currentInstruction, bool invert)
-{
- unsigned op1 = currentInstruction[1].u.operand;
- unsigned op2 = currentInstruction[2].u.operand;
- unsigned target = currentInstruction[3].u.operand;
-
- // We generate inline code for the following cases in the fast path:
- // - int immediate to constant int immediate
- // - constant int immediate to int immediate
- // - int immediate to int immediate
-
- if (isOperandConstantImmediateChar(op1)) {
- emitGetVirtualRegister(op2, regT0);
- addSlowCase(emitJumpIfNotJSCell(regT0));
- JumpList failures;
- emitLoadCharacterString(regT0, regT0, failures);
- addSlowCase(failures);
- addJump(branch32(invert ? LessThan : GreaterThanOrEqual, regT0, Imm32(asString(getConstantOperand(op1))->tryGetValue()[0])), target);
- return;
- }
- if (isOperandConstantImmediateChar(op2)) {
- emitGetVirtualRegister(op1, regT0);
- addSlowCase(emitJumpIfNotJSCell(regT0));
- JumpList failures;
- emitLoadCharacterString(regT0, regT0, failures);
- addSlowCase(failures);
- addJump(branch32(invert ? GreaterThan : LessThanOrEqual, regT0, Imm32(asString(getConstantOperand(op2))->tryGetValue()[0])), target);
- return;
- }
- if (isOperandConstantImmediateInt(op2)) {
- emitGetVirtualRegister(op1, regT0);
- emitJumpSlowCaseIfNotImmediateInteger(regT0);
- int32_t op2imm = getConstantOperandImmediateInt(op2);
- addJump(branch32(invert ? GreaterThan : LessThanOrEqual, regT0, Imm32(op2imm)), target);
- } else if (isOperandConstantImmediateInt(op1)) {
- emitGetVirtualRegister(op2, regT1);
- emitJumpSlowCaseIfNotImmediateInteger(regT1);
- int32_t op1imm = getConstantOperandImmediateInt(op1);
- addJump(branch32(invert ? LessThan : GreaterThanOrEqual, regT1, Imm32(op1imm)), target);
- } else {
- emitGetVirtualRegisters(op1, regT0, op2, regT1);
- emitJumpSlowCaseIfNotImmediateInteger(regT0);
- emitJumpSlowCaseIfNotImmediateInteger(regT1);
-
- addJump(branch32(invert ? GreaterThan : LessThanOrEqual, regT0, regT1), target);
- }
-}
-
-void JIT::emitSlow_op_jlesseq(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter, bool invert)
-{
- unsigned op1 = currentInstruction[1].u.operand;
- unsigned op2 = currentInstruction[2].u.operand;
- unsigned target = currentInstruction[3].u.operand;
-
- // We generate inline code for the following cases in the slow path:
- // - floating-point number to constant int immediate
- // - constant int immediate to floating-point number
- // - floating-point number to floating-point number.
-
- if (isOperandConstantImmediateChar(op1) || isOperandConstantImmediateChar(op2)) {
- linkSlowCase(iter);
- linkSlowCase(iter);
- linkSlowCase(iter);
- linkSlowCase(iter);
- JITStubCall stubCall(this, cti_op_jlesseq);
- stubCall.addArgument(op1, regT0);
- stubCall.addArgument(op2, regT1);
- stubCall.call();
- emitJumpSlowToHot(branchTest32(invert ? Zero : NonZero, regT0), target);
- return;
- }
-
- if (isOperandConstantImmediateInt(op2)) {
- linkSlowCase(iter);
-
- if (supportsFloatingPoint()) {
- Jump fail1 = emitJumpIfNotImmediateNumber(regT0);
- addPtr(tagTypeNumberRegister, regT0);
- movePtrToDouble(regT0, fpRegT0);
-
- int32_t op2imm = getConstantOperand(op2).asInt32();;
-
- move(Imm32(op2imm), regT1);
- convertInt32ToDouble(regT1, fpRegT1);
-
- emitJumpSlowToHot(branchDouble(invert ? DoubleLessThanOrUnordered : DoubleGreaterThanOrEqual, fpRegT1, fpRegT0), target);
-
- emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_jnlesseq));
-
- fail1.link(this);
- }
-
- JITStubCall stubCall(this, cti_op_jlesseq);
- stubCall.addArgument(regT0);
- stubCall.addArgument(op2, regT2);
- stubCall.call();
- emitJumpSlowToHot(branchTest32(invert ? Zero : NonZero, regT0), target);
-
- } else if (isOperandConstantImmediateInt(op1)) {
- linkSlowCase(iter);
-
- if (supportsFloatingPoint()) {
- Jump fail1 = emitJumpIfNotImmediateNumber(regT1);
- addPtr(tagTypeNumberRegister, regT1);
- movePtrToDouble(regT1, fpRegT1);
-
- int32_t op1imm = getConstantOperand(op1).asInt32();;
-
- move(Imm32(op1imm), regT0);
- convertInt32ToDouble(regT0, fpRegT0);
-
- emitJumpSlowToHot(branchDouble(invert ? DoubleLessThanOrUnordered : DoubleGreaterThanOrEqual, fpRegT1, fpRegT0), target);
-
- emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_jnlesseq));
-
- fail1.link(this);
- }
-
- JITStubCall stubCall(this, cti_op_jlesseq);
- stubCall.addArgument(op1, regT2);
- stubCall.addArgument(regT1);
- stubCall.call();
- emitJumpSlowToHot(branchTest32(invert ? Zero : NonZero, regT0), target);
-
- } else {
- linkSlowCase(iter);
-
- if (supportsFloatingPoint()) {
- Jump fail1 = emitJumpIfNotImmediateNumber(regT0);
- Jump fail2 = emitJumpIfNotImmediateNumber(regT1);
- Jump fail3 = emitJumpIfImmediateInteger(regT1);
- addPtr(tagTypeNumberRegister, regT0);
- addPtr(tagTypeNumberRegister, regT1);
- movePtrToDouble(regT0, fpRegT0);
- movePtrToDouble(regT1, fpRegT1);
-
- emitJumpSlowToHot(branchDouble(invert ? DoubleLessThanOrUnordered : DoubleGreaterThanOrEqual, fpRegT1, fpRegT0), target);
-
- emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_jnlesseq));
-
- fail1.link(this);
- fail2.link(this);
- fail3.link(this);
- }
-
- linkSlowCase(iter);
- JITStubCall stubCall(this, cti_op_jlesseq);
- stubCall.addArgument(regT0);
- stubCall.addArgument(regT1);
- stubCall.call();
- emitJumpSlowToHot(branchTest32(invert ? Zero : NonZero, regT0), target);
- }
-}
-
-void JIT::emit_op_jnlesseq(Instruction* currentInstruction)
-{
- emit_op_jlesseq(currentInstruction, true);
-}
-
-void JIT::emitSlow_op_jnlesseq(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- emitSlow_op_jlesseq(currentInstruction, iter, true);
-}
-
-void JIT::emit_op_bitand(Instruction* currentInstruction)
-{
- unsigned result = currentInstruction[1].u.operand;
- unsigned op1 = currentInstruction[2].u.operand;
- unsigned op2 = currentInstruction[3].u.operand;
-
- if (isOperandConstantImmediateInt(op1)) {
- emitGetVirtualRegister(op2, regT0);
- emitJumpSlowCaseIfNotImmediateInteger(regT0);
- int32_t imm = getConstantOperandImmediateInt(op1);
- andPtr(Imm32(imm), regT0);
- if (imm >= 0)
- emitFastArithIntToImmNoCheck(regT0, regT0);
- } else if (isOperandConstantImmediateInt(op2)) {
- emitGetVirtualRegister(op1, regT0);
- emitJumpSlowCaseIfNotImmediateInteger(regT0);
- int32_t imm = getConstantOperandImmediateInt(op2);
- andPtr(Imm32(imm), regT0);
- if (imm >= 0)
- emitFastArithIntToImmNoCheck(regT0, regT0);
- } else {
- emitGetVirtualRegisters(op1, regT0, op2, regT1);
- andPtr(regT1, regT0);
- emitJumpSlowCaseIfNotImmediateInteger(regT0);
- }
- emitPutVirtualRegister(result);
-}
-
-void JIT::emitSlow_op_bitand(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- unsigned result = currentInstruction[1].u.operand;
- unsigned op1 = currentInstruction[2].u.operand;
- unsigned op2 = currentInstruction[3].u.operand;
-
- linkSlowCase(iter);
- if (isOperandConstantImmediateInt(op1)) {
- JITStubCall stubCall(this, cti_op_bitand);
- stubCall.addArgument(op1, regT2);
- stubCall.addArgument(regT0);
- stubCall.call(result);
- } else if (isOperandConstantImmediateInt(op2)) {
- JITStubCall stubCall(this, cti_op_bitand);
- stubCall.addArgument(regT0);
- stubCall.addArgument(op2, regT2);
- stubCall.call(result);
- } else {
- JITStubCall stubCall(this, cti_op_bitand);
- stubCall.addArgument(op1, regT2);
- stubCall.addArgument(regT1);
- stubCall.call(result);
- }
-}
-
-void JIT::emit_op_post_inc(Instruction* currentInstruction)
-{
- unsigned result = currentInstruction[1].u.operand;
- unsigned srcDst = currentInstruction[2].u.operand;
-
- emitGetVirtualRegister(srcDst, regT0);
- move(regT0, regT1);
- emitJumpSlowCaseIfNotImmediateInteger(regT0);
- addSlowCase(branchAdd32(Overflow, Imm32(1), regT1));
- emitFastArithIntToImmNoCheck(regT1, regT1);
- emitPutVirtualRegister(srcDst, regT1);
- emitPutVirtualRegister(result);
-}
-
-void JIT::emitSlow_op_post_inc(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- unsigned result = currentInstruction[1].u.operand;
- unsigned srcDst = currentInstruction[2].u.operand;
-
- linkSlowCase(iter);
- linkSlowCase(iter);
- JITStubCall stubCall(this, cti_op_post_inc);
- stubCall.addArgument(regT0);
- stubCall.addArgument(Imm32(srcDst));
- stubCall.call(result);
-}
-
-void JIT::emit_op_post_dec(Instruction* currentInstruction)
-{
- unsigned result = currentInstruction[1].u.operand;
- unsigned srcDst = currentInstruction[2].u.operand;
-
- emitGetVirtualRegister(srcDst, regT0);
- move(regT0, regT1);
- emitJumpSlowCaseIfNotImmediateInteger(regT0);
- addSlowCase(branchSub32(Zero, Imm32(1), regT1));
- emitFastArithIntToImmNoCheck(regT1, regT1);
- emitPutVirtualRegister(srcDst, regT1);
- emitPutVirtualRegister(result);
-}
-
-void JIT::emitSlow_op_post_dec(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- unsigned result = currentInstruction[1].u.operand;
- unsigned srcDst = currentInstruction[2].u.operand;
-
- linkSlowCase(iter);
- linkSlowCase(iter);
- JITStubCall stubCall(this, cti_op_post_dec);
- stubCall.addArgument(regT0);
- stubCall.addArgument(Imm32(srcDst));
- stubCall.call(result);
-}
-
-void JIT::emit_op_pre_inc(Instruction* currentInstruction)
-{
- unsigned srcDst = currentInstruction[1].u.operand;
-
- emitGetVirtualRegister(srcDst, regT0);
- emitJumpSlowCaseIfNotImmediateInteger(regT0);
- addSlowCase(branchAdd32(Overflow, Imm32(1), regT0));
- emitFastArithIntToImmNoCheck(regT0, regT0);
- emitPutVirtualRegister(srcDst);
-}
-
-void JIT::emitSlow_op_pre_inc(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- unsigned srcDst = currentInstruction[1].u.operand;
-
- Jump notImm = getSlowCase(iter);
- linkSlowCase(iter);
- emitGetVirtualRegister(srcDst, regT0);
- notImm.link(this);
- JITStubCall stubCall(this, cti_op_pre_inc);
- stubCall.addArgument(regT0);
- stubCall.call(srcDst);
-}
-
-void JIT::emit_op_pre_dec(Instruction* currentInstruction)
-{
- unsigned srcDst = currentInstruction[1].u.operand;
-
- emitGetVirtualRegister(srcDst, regT0);
- emitJumpSlowCaseIfNotImmediateInteger(regT0);
- addSlowCase(branchSub32(Zero, Imm32(1), regT0));
- emitFastArithIntToImmNoCheck(regT0, regT0);
- emitPutVirtualRegister(srcDst);
-}
-
-void JIT::emitSlow_op_pre_dec(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- unsigned srcDst = currentInstruction[1].u.operand;
-
- Jump notImm = getSlowCase(iter);
- linkSlowCase(iter);
- emitGetVirtualRegister(srcDst, regT0);
- notImm.link(this);
- JITStubCall stubCall(this, cti_op_pre_dec);
- stubCall.addArgument(regT0);
- stubCall.call(srcDst);
-}
-
-/* ------------------------------ BEGIN: OP_MOD ------------------------------ */
-
-#if CPU(X86) || CPU(X86_64) || CPU(MIPS)
-
-void JIT::emit_op_mod(Instruction* currentInstruction)
-{
- unsigned result = currentInstruction[1].u.operand;
- unsigned op1 = currentInstruction[2].u.operand;
- unsigned op2 = currentInstruction[3].u.operand;
-
-#if CPU(X86) || CPU(X86_64)
- // Make sure registers are correct for x86 IDIV instructions.
- ASSERT(regT0 == X86Registers::eax);
- ASSERT(regT1 == X86Registers::edx);
- ASSERT(regT2 == X86Registers::ecx);
-#endif
-
- emitGetVirtualRegisters(op1, regT0, op2, regT2);
- emitJumpSlowCaseIfNotImmediateInteger(regT0);
- emitJumpSlowCaseIfNotImmediateInteger(regT2);
-
- addSlowCase(branchPtr(Equal, regT2, ImmPtr(JSValue::encode(jsNumber(0)))));
- m_assembler.cdq();
- m_assembler.idivl_r(regT2);
- emitFastArithReTagImmediate(regT1, regT0);
- emitPutVirtualRegister(result);
-}
-
-void JIT::emitSlow_op_mod(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- unsigned result = currentInstruction[1].u.operand;
-
- linkSlowCase(iter);
- linkSlowCase(iter);
- linkSlowCase(iter);
- JITStubCall stubCall(this, cti_op_mod);
- stubCall.addArgument(regT0);
- stubCall.addArgument(regT2);
- stubCall.call(result);
-}
-
-#else // CPU(X86) || CPU(X86_64) || CPU(MIPS)
-
-void JIT::emit_op_mod(Instruction* currentInstruction)
-{
- unsigned result = currentInstruction[1].u.operand;
- unsigned op1 = currentInstruction[2].u.operand;
- unsigned op2 = currentInstruction[3].u.operand;
-
- JITStubCall stubCall(this, cti_op_mod);
- stubCall.addArgument(op1, regT2);
- stubCall.addArgument(op2, regT2);
- stubCall.call(result);
-}
-
-void JIT::emitSlow_op_mod(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
-#if ENABLE(JIT_USE_SOFT_MODULO)
- unsigned result = currentInstruction[1].u.operand;
- unsigned op1 = currentInstruction[2].u.operand;
- unsigned op2 = currentInstruction[3].u.operand;
- linkSlowCase(iter);
- linkSlowCase(iter);
- linkSlowCase(iter);
- JITStubCall stubCall(this, cti_op_mod);
- stubCall.addArgument(op1, regT2);
- stubCall.addArgument(op2, regT2);
- stubCall.call(result);
-#else
- ASSERT_NOT_REACHED();
-#endif
-}
-
-#endif // CPU(X86) || CPU(X86_64)
-
-/* ------------------------------ END: OP_MOD ------------------------------ */
-
-/* ------------------------------ BEGIN: USE(JSVALUE64) (OP_ADD, OP_SUB, OP_MUL) ------------------------------ */
-
-void JIT::compileBinaryArithOp(OpcodeID opcodeID, unsigned, unsigned op1, unsigned op2, OperandTypes)
-{
- emitGetVirtualRegisters(op1, regT0, op2, regT1);
- emitJumpSlowCaseIfNotImmediateInteger(regT0);
- emitJumpSlowCaseIfNotImmediateInteger(regT1);
- if (opcodeID == op_add)
- addSlowCase(branchAdd32(Overflow, regT1, regT0));
- else if (opcodeID == op_sub)
- addSlowCase(branchSub32(Overflow, regT1, regT0));
- else {
- ASSERT(opcodeID == op_mul);
- addSlowCase(branchMul32(Overflow, regT1, regT0));
- addSlowCase(branchTest32(Zero, regT0));
- }
- emitFastArithIntToImmNoCheck(regT0, regT0);
-}
-
-void JIT::compileBinaryArithOpSlowCase(OpcodeID opcodeID, Vector<SlowCaseEntry>::iterator& iter, unsigned result, unsigned op1, unsigned op2, OperandTypes types, bool op1HasImmediateIntFastCase, bool op2HasImmediateIntFastCase)
-{
- // We assume that subtracting TagTypeNumber is equivalent to adding DoubleEncodeOffset.
- COMPILE_ASSERT(((JSImmediate::TagTypeNumber + JSImmediate::DoubleEncodeOffset) == 0), TagTypeNumber_PLUS_DoubleEncodeOffset_EQUALS_0);
-
- Jump notImm1;
- Jump notImm2;
- if (op1HasImmediateIntFastCase) {
- notImm2 = getSlowCase(iter);
- } else if (op2HasImmediateIntFastCase) {
- notImm1 = getSlowCase(iter);
- } else {
- notImm1 = getSlowCase(iter);
- notImm2 = getSlowCase(iter);
- }
-
- linkSlowCase(iter); // Integer overflow case - we could handle this in JIT code, but this is likely rare.
- if (opcodeID == op_mul && !op1HasImmediateIntFastCase && !op2HasImmediateIntFastCase) // op_mul has an extra slow case to handle 0 * negative number.
- linkSlowCase(iter);
- emitGetVirtualRegister(op1, regT0);
-
- Label stubFunctionCall(this);
- JITStubCall stubCall(this, opcodeID == op_add ? cti_op_add : opcodeID == op_sub ? cti_op_sub : cti_op_mul);
- if (op1HasImmediateIntFastCase || op2HasImmediateIntFastCase) {
- emitGetVirtualRegister(op1, regT0);
- emitGetVirtualRegister(op2, regT1);
- }
- stubCall.addArgument(regT0);
- stubCall.addArgument(regT1);
- stubCall.call(result);
- Jump end = jump();
-
- if (op1HasImmediateIntFastCase) {
- notImm2.link(this);
- if (!types.second().definitelyIsNumber())
- emitJumpIfNotImmediateNumber(regT0).linkTo(stubFunctionCall, this);
- emitGetVirtualRegister(op1, regT1);
- convertInt32ToDouble(regT1, fpRegT1);
- addPtr(tagTypeNumberRegister, regT0);
- movePtrToDouble(regT0, fpRegT2);
- } else if (op2HasImmediateIntFastCase) {
- notImm1.link(this);
- if (!types.first().definitelyIsNumber())
- emitJumpIfNotImmediateNumber(regT0).linkTo(stubFunctionCall, this);
- emitGetVirtualRegister(op2, regT1);
- convertInt32ToDouble(regT1, fpRegT1);
- addPtr(tagTypeNumberRegister, regT0);
- movePtrToDouble(regT0, fpRegT2);
- } else {
- // if we get here, eax is not an int32, edx not yet checked.
- notImm1.link(this);
- if (!types.first().definitelyIsNumber())
- emitJumpIfNotImmediateNumber(regT0).linkTo(stubFunctionCall, this);
- if (!types.second().definitelyIsNumber())
- emitJumpIfNotImmediateNumber(regT1).linkTo(stubFunctionCall, this);
- addPtr(tagTypeNumberRegister, regT0);
- movePtrToDouble(regT0, fpRegT1);
- Jump op2isDouble = emitJumpIfNotImmediateInteger(regT1);
- convertInt32ToDouble(regT1, fpRegT2);
- Jump op2wasInteger = jump();
-
- // if we get here, eax IS an int32, edx is not.
- notImm2.link(this);
- if (!types.second().definitelyIsNumber())
- emitJumpIfNotImmediateNumber(regT1).linkTo(stubFunctionCall, this);
- convertInt32ToDouble(regT0, fpRegT1);
- op2isDouble.link(this);
- addPtr(tagTypeNumberRegister, regT1);
- movePtrToDouble(regT1, fpRegT2);
- op2wasInteger.link(this);
- }
-
- if (opcodeID == op_add)
- addDouble(fpRegT2, fpRegT1);
- else if (opcodeID == op_sub)
- subDouble(fpRegT2, fpRegT1);
- else if (opcodeID == op_mul)
- mulDouble(fpRegT2, fpRegT1);
- else {
- ASSERT(opcodeID == op_div);
- divDouble(fpRegT2, fpRegT1);
- }
- moveDoubleToPtr(fpRegT1, regT0);
- subPtr(tagTypeNumberRegister, regT0);
- emitPutVirtualRegister(result, regT0);
-
- end.link(this);
-}
-
-void JIT::emit_op_add(Instruction* currentInstruction)
-{
- unsigned result = currentInstruction[1].u.operand;
- unsigned op1 = currentInstruction[2].u.operand;
- unsigned op2 = currentInstruction[3].u.operand;
- OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
-
- if (!types.first().mightBeNumber() || !types.second().mightBeNumber()) {
- JITStubCall stubCall(this, cti_op_add);
- stubCall.addArgument(op1, regT2);
- stubCall.addArgument(op2, regT2);
- stubCall.call(result);
- return;
- }
-
- if (isOperandConstantImmediateInt(op1)) {
- emitGetVirtualRegister(op2, regT0);
- emitJumpSlowCaseIfNotImmediateInteger(regT0);
- addSlowCase(branchAdd32(Overflow, Imm32(getConstantOperandImmediateInt(op1)), regT0));
- emitFastArithIntToImmNoCheck(regT0, regT0);
- } else if (isOperandConstantImmediateInt(op2)) {
- emitGetVirtualRegister(op1, regT0);
- emitJumpSlowCaseIfNotImmediateInteger(regT0);
- addSlowCase(branchAdd32(Overflow, Imm32(getConstantOperandImmediateInt(op2)), regT0));
- emitFastArithIntToImmNoCheck(regT0, regT0);
- } else
- compileBinaryArithOp(op_add, result, op1, op2, types);
-
- emitPutVirtualRegister(result);
-}
-
-void JIT::emitSlow_op_add(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- unsigned result = currentInstruction[1].u.operand;
- unsigned op1 = currentInstruction[2].u.operand;
- unsigned op2 = currentInstruction[3].u.operand;
- OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
-
- if (!types.first().mightBeNumber() || !types.second().mightBeNumber())
- return;
-
- bool op1HasImmediateIntFastCase = isOperandConstantImmediateInt(op1);
- bool op2HasImmediateIntFastCase = !op1HasImmediateIntFastCase && isOperandConstantImmediateInt(op2);
- compileBinaryArithOpSlowCase(op_add, iter, result, op1, op2, OperandTypes::fromInt(currentInstruction[4].u.operand), op1HasImmediateIntFastCase, op2HasImmediateIntFastCase);
-}
-
-void JIT::emit_op_mul(Instruction* currentInstruction)
-{
- unsigned result = currentInstruction[1].u.operand;
- unsigned op1 = currentInstruction[2].u.operand;
- unsigned op2 = currentInstruction[3].u.operand;
- OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
-
- // For now, only plant a fast int case if the constant operand is greater than zero.
- int32_t value;
- if (isOperandConstantImmediateInt(op1) && ((value = getConstantOperandImmediateInt(op1)) > 0)) {
- emitGetVirtualRegister(op2, regT0);
- emitJumpSlowCaseIfNotImmediateInteger(regT0);
- addSlowCase(branchMul32(Overflow, Imm32(value), regT0, regT0));
- emitFastArithReTagImmediate(regT0, regT0);
- } else if (isOperandConstantImmediateInt(op2) && ((value = getConstantOperandImmediateInt(op2)) > 0)) {
- emitGetVirtualRegister(op1, regT0);
- emitJumpSlowCaseIfNotImmediateInteger(regT0);
- addSlowCase(branchMul32(Overflow, Imm32(value), regT0, regT0));
- emitFastArithReTagImmediate(regT0, regT0);
- } else
- compileBinaryArithOp(op_mul, result, op1, op2, types);
-
- emitPutVirtualRegister(result);
-}
-
-void JIT::emitSlow_op_mul(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- unsigned result = currentInstruction[1].u.operand;
- unsigned op1 = currentInstruction[2].u.operand;
- unsigned op2 = currentInstruction[3].u.operand;
- OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
-
- bool op1HasImmediateIntFastCase = isOperandConstantImmediateInt(op1) && getConstantOperandImmediateInt(op1) > 0;
- bool op2HasImmediateIntFastCase = !op1HasImmediateIntFastCase && isOperandConstantImmediateInt(op2) && getConstantOperandImmediateInt(op2) > 0;
- compileBinaryArithOpSlowCase(op_mul, iter, result, op1, op2, OperandTypes::fromInt(currentInstruction[4].u.operand), op1HasImmediateIntFastCase, op2HasImmediateIntFastCase);
-}
-
-void JIT::emit_op_div(Instruction* currentInstruction)
-{
- unsigned dst = currentInstruction[1].u.operand;
- unsigned op1 = currentInstruction[2].u.operand;
- unsigned op2 = currentInstruction[3].u.operand;
- OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
-
- if (isOperandConstantImmediateDouble(op1)) {
- emitGetVirtualRegister(op1, regT0);
- addPtr(tagTypeNumberRegister, regT0);
- movePtrToDouble(regT0, fpRegT0);
- } else if (isOperandConstantImmediateInt(op1)) {
- emitLoadInt32ToDouble(op1, fpRegT0);
- } else {
- emitGetVirtualRegister(op1, regT0);
- if (!types.first().definitelyIsNumber())
- emitJumpSlowCaseIfNotImmediateNumber(regT0);
- Jump notInt = emitJumpIfNotImmediateInteger(regT0);
- convertInt32ToDouble(regT0, fpRegT0);
- Jump skipDoubleLoad = jump();
- notInt.link(this);
- addPtr(tagTypeNumberRegister, regT0);
- movePtrToDouble(regT0, fpRegT0);
- skipDoubleLoad.link(this);
- }
-
- if (isOperandConstantImmediateDouble(op2)) {
- emitGetVirtualRegister(op2, regT1);
- addPtr(tagTypeNumberRegister, regT1);
- movePtrToDouble(regT1, fpRegT1);
- } else if (isOperandConstantImmediateInt(op2)) {
- emitLoadInt32ToDouble(op2, fpRegT1);
- } else {
- emitGetVirtualRegister(op2, regT1);
- if (!types.second().definitelyIsNumber())
- emitJumpSlowCaseIfNotImmediateNumber(regT1);
- Jump notInt = emitJumpIfNotImmediateInteger(regT1);
- convertInt32ToDouble(regT1, fpRegT1);
- Jump skipDoubleLoad = jump();
- notInt.link(this);
- addPtr(tagTypeNumberRegister, regT1);
- movePtrToDouble(regT1, fpRegT1);
- skipDoubleLoad.link(this);
- }
- divDouble(fpRegT1, fpRegT0);
-
- // Double result.
- moveDoubleToPtr(fpRegT0, regT0);
- subPtr(tagTypeNumberRegister, regT0);
-
- emitPutVirtualRegister(dst, regT0);
-}
-
-void JIT::emitSlow_op_div(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- unsigned result = currentInstruction[1].u.operand;
- unsigned op1 = currentInstruction[2].u.operand;
- unsigned op2 = currentInstruction[3].u.operand;
- OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
- if (types.first().definitelyIsNumber() && types.second().definitelyIsNumber()) {
-#ifndef NDEBUG
- breakpoint();
-#endif
- return;
- }
- if (!isOperandConstantImmediateDouble(op1) && !isOperandConstantImmediateInt(op1)) {
- if (!types.first().definitelyIsNumber())
- linkSlowCase(iter);
- }
- if (!isOperandConstantImmediateDouble(op2) && !isOperandConstantImmediateInt(op2)) {
- if (!types.second().definitelyIsNumber())
- linkSlowCase(iter);
- }
- // There is an extra slow case for (op1 * -N) or (-N * op2), to check for 0 since this should produce a result of -0.
- JITStubCall stubCall(this, cti_op_div);
- stubCall.addArgument(op1, regT2);
- stubCall.addArgument(op2, regT2);
- stubCall.call(result);
-}
-
-void JIT::emit_op_sub(Instruction* currentInstruction)
-{
- unsigned result = currentInstruction[1].u.operand;
- unsigned op1 = currentInstruction[2].u.operand;
- unsigned op2 = currentInstruction[3].u.operand;
- OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
-
- compileBinaryArithOp(op_sub, result, op1, op2, types);
- emitPutVirtualRegister(result);
-}
-
-void JIT::emitSlow_op_sub(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- unsigned result = currentInstruction[1].u.operand;
- unsigned op1 = currentInstruction[2].u.operand;
- unsigned op2 = currentInstruction[3].u.operand;
- OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
-
- compileBinaryArithOpSlowCase(op_sub, iter, result, op1, op2, types, false, false);
-}
-
-/* ------------------------------ END: OP_ADD, OP_SUB, OP_MUL ------------------------------ */
-
-} // namespace JSC
-
-#endif // USE(JSVALUE64)
-#endif // ENABLE(JIT)
diff --git a/JavaScriptCore/jit/JITArithmetic32_64.cpp b/JavaScriptCore/jit/JITArithmetic32_64.cpp
deleted file mode 100644
index e0b31f0..0000000
--- a/JavaScriptCore/jit/JITArithmetic32_64.cpp
+++ /dev/null
@@ -1,1424 +0,0 @@
-/*
-* Copyright (C) 2008 Apple Inc. All rights reserved.
-*
-* Redistribution and use in source and binary forms, with or without
-* modification, are permitted provided that the following conditions
-* are met:
-* 1. Redistributions of source code must retain the above copyright
-* notice, this list of conditions and the following disclaimer.
-* 2. Redistributions in binary form must reproduce the above copyright
-* notice, this list of conditions and the following disclaimer in the
-* documentation and/or other materials provided with the distribution.
-*
-* THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
-* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
-* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
-* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-*/
-
-#include "config.h"
-
-#if ENABLE(JIT)
-#if USE(JSVALUE32_64)
-#include "JIT.h"
-
-#include "CodeBlock.h"
-#include "JITInlineMethods.h"
-#include "JITStubCall.h"
-#include "JITStubs.h"
-#include "JSArray.h"
-#include "JSFunction.h"
-#include "Interpreter.h"
-#include "ResultType.h"
-#include "SamplingTool.h"
-
-#ifndef NDEBUG
-#include <stdio.h>
-#endif
-
-using namespace std;
-
-namespace JSC {
-
-void JIT::emit_op_negate(Instruction* currentInstruction)
-{
- unsigned dst = currentInstruction[1].u.operand;
- unsigned src = currentInstruction[2].u.operand;
-
- emitLoad(src, regT1, regT0);
-
- Jump srcNotInt = branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag));
- addSlowCase(branchTest32(Zero, regT0, Imm32(0x7fffffff)));
- neg32(regT0);
- emitStoreInt32(dst, regT0, (dst == src));
-
- Jump end = jump();
-
- srcNotInt.link(this);
- addSlowCase(branch32(Above, regT1, Imm32(JSValue::LowestTag)));
-
- xor32(Imm32(1 << 31), regT1);
- store32(regT1, tagFor(dst));
- if (dst != src)
- store32(regT0, payloadFor(dst));
-
- end.link(this);
-}
-
-void JIT::emitSlow_op_negate(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- unsigned dst = currentInstruction[1].u.operand;
-
- linkSlowCase(iter); // 0x7fffffff check
- linkSlowCase(iter); // double check
-
- JITStubCall stubCall(this, cti_op_negate);
- stubCall.addArgument(regT1, regT0);
- stubCall.call(dst);
-}
-
-void JIT::emit_op_jnless(Instruction* currentInstruction)
-{
- unsigned op1 = currentInstruction[1].u.operand;
- unsigned op2 = currentInstruction[2].u.operand;
- unsigned target = currentInstruction[3].u.operand;
-
- JumpList notInt32Op1;
- JumpList notInt32Op2;
-
- // Character less.
- if (isOperandConstantImmediateChar(op1)) {
- emitLoad(op2, regT1, regT0);
- addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::CellTag)));
- JumpList failures;
- emitLoadCharacterString(regT0, regT0, failures);
- addSlowCase(failures);
- addJump(branch32(LessThanOrEqual, regT0, Imm32(asString(getConstantOperand(op1))->tryGetValue()[0])), target);
- return;
- }
- if (isOperandConstantImmediateChar(op2)) {
- emitLoad(op1, regT1, regT0);
- addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::CellTag)));
- JumpList failures;
- emitLoadCharacterString(regT0, regT0, failures);
- addSlowCase(failures);
- addJump(branch32(GreaterThanOrEqual, regT0, Imm32(asString(getConstantOperand(op2))->tryGetValue()[0])), target);
- return;
- }
- if (isOperandConstantImmediateInt(op1)) {
- // Int32 less.
- emitLoad(op2, regT3, regT2);
- notInt32Op2.append(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag)));
- addJump(branch32(LessThanOrEqual, regT2, Imm32(getConstantOperand(op1).asInt32())), target);
- } else if (isOperandConstantImmediateInt(op2)) {
- emitLoad(op1, regT1, regT0);
- notInt32Op1.append(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
- addJump(branch32(GreaterThanOrEqual, regT0, Imm32(getConstantOperand(op2).asInt32())), target);
- } else {
- emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
- notInt32Op1.append(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
- notInt32Op2.append(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag)));
- addJump(branch32(GreaterThanOrEqual, regT0, regT2), target);
- }
-
- if (!supportsFloatingPoint()) {
- addSlowCase(notInt32Op1);
- addSlowCase(notInt32Op2);
- return;
- }
- Jump end = jump();
-
- // Double less.
- emitBinaryDoubleOp(op_jnless, target, op1, op2, OperandTypes(), notInt32Op1, notInt32Op2, !isOperandConstantImmediateInt(op1), isOperandConstantImmediateInt(op1) || !isOperandConstantImmediateInt(op2));
- end.link(this);
-}
-
-void JIT::emitSlow_op_jnless(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- unsigned op1 = currentInstruction[1].u.operand;
- unsigned op2 = currentInstruction[2].u.operand;
- unsigned target = currentInstruction[3].u.operand;
-
- if (isOperandConstantImmediateChar(op1) || isOperandConstantImmediateChar(op2)) {
- linkSlowCase(iter);
- linkSlowCase(iter);
- linkSlowCase(iter);
- linkSlowCase(iter);
- } else {
- if (!supportsFloatingPoint()) {
- if (!isOperandConstantImmediateInt(op1) && !isOperandConstantImmediateInt(op2))
- linkSlowCase(iter); // int32 check
- linkSlowCase(iter); // int32 check
- } else {
- if (!isOperandConstantImmediateInt(op1)) {
- linkSlowCase(iter); // double check
- linkSlowCase(iter); // int32 check
- }
- if (isOperandConstantImmediateInt(op1) || !isOperandConstantImmediateInt(op2))
- linkSlowCase(iter); // double check
- }
- }
-
- JITStubCall stubCall(this, cti_op_jless);
- stubCall.addArgument(op1);
- stubCall.addArgument(op2);
- stubCall.call();
- emitJumpSlowToHot(branchTest32(Zero, regT0), target);
-}
-
-void JIT::emit_op_jless(Instruction* currentInstruction)
-{
- unsigned op1 = currentInstruction[1].u.operand;
- unsigned op2 = currentInstruction[2].u.operand;
- unsigned target = currentInstruction[3].u.operand;
-
- JumpList notInt32Op1;
- JumpList notInt32Op2;
-
- // Character less.
- if (isOperandConstantImmediateChar(op1)) {
- emitLoad(op2, regT1, regT0);
- addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::CellTag)));
- JumpList failures;
- emitLoadCharacterString(regT0, regT0, failures);
- addSlowCase(failures);
- addJump(branch32(GreaterThan, regT0, Imm32(asString(getConstantOperand(op1))->tryGetValue()[0])), target);
- return;
- }
- if (isOperandConstantImmediateChar(op2)) {
- emitLoad(op1, regT1, regT0);
- addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::CellTag)));
- JumpList failures;
- emitLoadCharacterString(regT0, regT0, failures);
- addSlowCase(failures);
- addJump(branch32(LessThan, regT0, Imm32(asString(getConstantOperand(op2))->tryGetValue()[0])), target);
- return;
- }
- if (isOperandConstantImmediateInt(op1)) {
- emitLoad(op2, regT3, regT2);
- notInt32Op2.append(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag)));
- addJump(branch32(GreaterThan, regT2, Imm32(getConstantOperand(op1).asInt32())), target);
- } else if (isOperandConstantImmediateInt(op2)) {
- emitLoad(op1, regT1, regT0);
- notInt32Op1.append(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
- addJump(branch32(LessThan, regT0, Imm32(getConstantOperand(op2).asInt32())), target);
- } else {
- emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
- notInt32Op1.append(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
- notInt32Op2.append(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag)));
- addJump(branch32(LessThan, regT0, regT2), target);
- }
-
- if (!supportsFloatingPoint()) {
- addSlowCase(notInt32Op1);
- addSlowCase(notInt32Op2);
- return;
- }
- Jump end = jump();
-
- // Double less.
- emitBinaryDoubleOp(op_jless, target, op1, op2, OperandTypes(), notInt32Op1, notInt32Op2, !isOperandConstantImmediateInt(op1), isOperandConstantImmediateInt(op1) || !isOperandConstantImmediateInt(op2));
- end.link(this);
-}
-
-void JIT::emitSlow_op_jless(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- unsigned op1 = currentInstruction[1].u.operand;
- unsigned op2 = currentInstruction[2].u.operand;
- unsigned target = currentInstruction[3].u.operand;
-
- if (isOperandConstantImmediateChar(op1) || isOperandConstantImmediateChar(op2)) {
- linkSlowCase(iter);
- linkSlowCase(iter);
- linkSlowCase(iter);
- linkSlowCase(iter);
- } else {
- if (!supportsFloatingPoint()) {
- if (!isOperandConstantImmediateInt(op1) && !isOperandConstantImmediateInt(op2))
- linkSlowCase(iter); // int32 check
- linkSlowCase(iter); // int32 check
- } else {
- if (!isOperandConstantImmediateInt(op1)) {
- linkSlowCase(iter); // double check
- linkSlowCase(iter); // int32 check
- }
- if (isOperandConstantImmediateInt(op1) || !isOperandConstantImmediateInt(op2))
- linkSlowCase(iter); // double check
- }
- }
- JITStubCall stubCall(this, cti_op_jless);
- stubCall.addArgument(op1);
- stubCall.addArgument(op2);
- stubCall.call();
- emitJumpSlowToHot(branchTest32(NonZero, regT0), target);
-}
-
-void JIT::emit_op_jlesseq(Instruction* currentInstruction, bool invert)
-{
- unsigned op1 = currentInstruction[1].u.operand;
- unsigned op2 = currentInstruction[2].u.operand;
- unsigned target = currentInstruction[3].u.operand;
-
- JumpList notInt32Op1;
- JumpList notInt32Op2;
-
- // Character less.
- if (isOperandConstantImmediateChar(op1)) {
- emitLoad(op2, regT1, regT0);
- addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::CellTag)));
- JumpList failures;
- emitLoadCharacterString(regT0, regT0, failures);
- addSlowCase(failures);
- addJump(branch32(invert ? LessThan : GreaterThanOrEqual, regT0, Imm32(asString(getConstantOperand(op1))->tryGetValue()[0])), target);
- return;
- }
- if (isOperandConstantImmediateChar(op2)) {
- emitLoad(op1, regT1, regT0);
- addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::CellTag)));
- JumpList failures;
- emitLoadCharacterString(regT0, regT0, failures);
- addSlowCase(failures);
- addJump(branch32(invert ? GreaterThan : LessThanOrEqual, regT0, Imm32(asString(getConstantOperand(op2))->tryGetValue()[0])), target);
- return;
- }
- if (isOperandConstantImmediateInt(op1)) {
- emitLoad(op2, regT3, regT2);
- notInt32Op2.append(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag)));
- addJump(branch32(invert ? LessThan : GreaterThanOrEqual, regT2, Imm32(getConstantOperand(op1).asInt32())), target);
- } else if (isOperandConstantImmediateInt(op2)) {
- emitLoad(op1, regT1, regT0);
- notInt32Op1.append(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
- addJump(branch32(invert ? GreaterThan : LessThanOrEqual, regT0, Imm32(getConstantOperand(op2).asInt32())), target);
- } else {
- emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
- notInt32Op1.append(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
- notInt32Op2.append(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag)));
- addJump(branch32(invert ? GreaterThan : LessThanOrEqual, regT0, regT2), target);
- }
-
- if (!supportsFloatingPoint()) {
- addSlowCase(notInt32Op1);
- addSlowCase(notInt32Op2);
- return;
- }
- Jump end = jump();
-
- // Double less.
- emitBinaryDoubleOp(invert ? op_jnlesseq : op_jlesseq, target, op1, op2, OperandTypes(), notInt32Op1, notInt32Op2, !isOperandConstantImmediateInt(op1), isOperandConstantImmediateInt(op1) || !isOperandConstantImmediateInt(op2));
- end.link(this);
-}
-
-void JIT::emitSlow_op_jlesseq(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter, bool invert)
-{
- unsigned op1 = currentInstruction[1].u.operand;
- unsigned op2 = currentInstruction[2].u.operand;
- unsigned target = currentInstruction[3].u.operand;
-
- if (isOperandConstantImmediateChar(op1) || isOperandConstantImmediateChar(op2)) {
- linkSlowCase(iter);
- linkSlowCase(iter);
- linkSlowCase(iter);
- linkSlowCase(iter);
- } else {
- if (!supportsFloatingPoint()) {
- if (!isOperandConstantImmediateInt(op1) && !isOperandConstantImmediateInt(op2))
- linkSlowCase(iter); // int32 check
- linkSlowCase(iter); // int32 check
- } else {
- if (!isOperandConstantImmediateInt(op1)) {
- linkSlowCase(iter); // double check
- linkSlowCase(iter); // int32 check
- }
- if (isOperandConstantImmediateInt(op1) || !isOperandConstantImmediateInt(op2))
- linkSlowCase(iter); // double check
- }
- }
-
- JITStubCall stubCall(this, cti_op_jlesseq);
- stubCall.addArgument(op1);
- stubCall.addArgument(op2);
- stubCall.call();
- emitJumpSlowToHot(branchTest32(invert ? Zero : NonZero, regT0), target);
-}
-
-void JIT::emit_op_jnlesseq(Instruction* currentInstruction)
-{
- emit_op_jlesseq(currentInstruction, true);
-}
-
-void JIT::emitSlow_op_jnlesseq(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- emitSlow_op_jlesseq(currentInstruction, iter, true);
-}
-
-// LeftShift (<<)
-
-void JIT::emit_op_lshift(Instruction* currentInstruction)
-{
- unsigned dst = currentInstruction[1].u.operand;
- unsigned op1 = currentInstruction[2].u.operand;
- unsigned op2 = currentInstruction[3].u.operand;
-
- if (isOperandConstantImmediateInt(op2)) {
- emitLoad(op1, regT1, regT0);
- addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
- lshift32(Imm32(getConstantOperand(op2).asInt32()), regT0);
- emitStoreInt32(dst, regT0, dst == op1);
- return;
- }
-
- emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
- if (!isOperandConstantImmediateInt(op1))
- addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
- addSlowCase(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag)));
- lshift32(regT2, regT0);
- emitStoreInt32(dst, regT0, dst == op1 || dst == op2);
-}
-
-void JIT::emitSlow_op_lshift(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- unsigned dst = currentInstruction[1].u.operand;
- unsigned op1 = currentInstruction[2].u.operand;
- unsigned op2 = currentInstruction[3].u.operand;
-
- if (!isOperandConstantImmediateInt(op1) && !isOperandConstantImmediateInt(op2))
- linkSlowCase(iter); // int32 check
- linkSlowCase(iter); // int32 check
-
- JITStubCall stubCall(this, cti_op_lshift);
- stubCall.addArgument(op1);
- stubCall.addArgument(op2);
- stubCall.call(dst);
-}
-
-// RightShift (>>) and UnsignedRightShift (>>>) helper
-
-void JIT::emitRightShift(Instruction* currentInstruction, bool isUnsigned)
-{
- unsigned dst = currentInstruction[1].u.operand;
- unsigned op1 = currentInstruction[2].u.operand;
- unsigned op2 = currentInstruction[3].u.operand;
-
- // Slow case of rshift makes assumptions about what registers hold the
- // shift arguments, so any changes must be updated there as well.
- if (isOperandConstantImmediateInt(op2)) {
- emitLoad(op1, regT1, regT0);
- addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
- int shift = getConstantOperand(op2).asInt32();
- if (isUnsigned) {
- if (shift)
- urshift32(Imm32(shift & 0x1f), regT0);
- // unsigned shift < 0 or shift = k*2^32 may result in (essentially)
- // a toUint conversion, which can result in a value we can represent
- // as an immediate int.
- if (shift < 0 || !(shift & 31))
- addSlowCase(branch32(LessThan, regT0, Imm32(0)));
- } else if (shift) { // signed right shift by zero is simply toInt conversion
- rshift32(Imm32(shift & 0x1f), regT0);
- }
- emitStoreInt32(dst, regT0, dst == op1);
- return;
- }
-
- emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
- if (!isOperandConstantImmediateInt(op1))
- addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
- addSlowCase(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag)));
- if (isUnsigned) {
- urshift32(regT2, regT0);
- addSlowCase(branch32(LessThan, regT0, Imm32(0)));
- } else
- rshift32(regT2, regT0);
- emitStoreInt32(dst, regT0, dst == op1 || dst == op2);
-}
-
-void JIT::emitRightShiftSlowCase(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter, bool isUnsigned)
-{
- unsigned dst = currentInstruction[1].u.operand;
- unsigned op1 = currentInstruction[2].u.operand;
- unsigned op2 = currentInstruction[3].u.operand;
- if (isOperandConstantImmediateInt(op2)) {
- int shift = getConstantOperand(op2).asInt32();
- // op1 = regT1:regT0
- linkSlowCase(iter); // int32 check
- if (supportsFloatingPointTruncate()) {
- JumpList failures;
- failures.append(branch32(AboveOrEqual, regT1, Imm32(JSValue::LowestTag)));
- emitLoadDouble(op1, fpRegT0);
- failures.append(branchTruncateDoubleToInt32(fpRegT0, regT0));
- if (isUnsigned) {
- if (shift)
- urshift32(Imm32(shift & 0x1f), regT0);
- if (shift < 0 || !(shift & 31))
- failures.append(branch32(LessThan, regT0, Imm32(0)));
- } else if (shift)
- rshift32(Imm32(shift & 0x1f), regT0);
- emitStoreInt32(dst, regT0, false);
- emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_rshift));
- failures.link(this);
- }
- if (isUnsigned && (shift < 0 || !(shift & 31)))
- linkSlowCase(iter); // failed to box in hot path
- } else {
- // op1 = regT1:regT0
- // op2 = regT3:regT2
- if (!isOperandConstantImmediateInt(op1)) {
- linkSlowCase(iter); // int32 check -- op1 is not an int
- if (supportsFloatingPointTruncate()) {
- Jump notDouble = branch32(Above, regT1, Imm32(JSValue::LowestTag)); // op1 is not a double
- emitLoadDouble(op1, fpRegT0);
- Jump notInt = branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag)); // op2 is not an int
- Jump cantTruncate = branchTruncateDoubleToInt32(fpRegT0, regT0);
- if (isUnsigned)
- urshift32(regT2, regT0);
- else
- rshift32(regT2, regT0);
- emitStoreInt32(dst, regT0, false);
- emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_rshift));
- notDouble.link(this);
- notInt.link(this);
- cantTruncate.link(this);
- }
- }
-
- linkSlowCase(iter); // int32 check - op2 is not an int
- if (isUnsigned)
- linkSlowCase(iter); // Can't represent unsigned result as an immediate
- }
-
- JITStubCall stubCall(this, isUnsigned ? cti_op_urshift : cti_op_rshift);
- stubCall.addArgument(op1);
- stubCall.addArgument(op2);
- stubCall.call(dst);
-}
-
-// RightShift (>>)
-
-void JIT::emit_op_rshift(Instruction* currentInstruction)
-{
- emitRightShift(currentInstruction, false);
-}
-
-void JIT::emitSlow_op_rshift(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- emitRightShiftSlowCase(currentInstruction, iter, false);
-}
-
-// UnsignedRightShift (>>>)
-
-void JIT::emit_op_urshift(Instruction* currentInstruction)
-{
- emitRightShift(currentInstruction, true);
-}
-
-void JIT::emitSlow_op_urshift(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- emitRightShiftSlowCase(currentInstruction, iter, true);
-}
-
-// BitAnd (&)
-
-void JIT::emit_op_bitand(Instruction* currentInstruction)
-{
- unsigned dst = currentInstruction[1].u.operand;
- unsigned op1 = currentInstruction[2].u.operand;
- unsigned op2 = currentInstruction[3].u.operand;
-
- unsigned op;
- int32_t constant;
- if (getOperandConstantImmediateInt(op1, op2, op, constant)) {
- emitLoad(op, regT1, regT0);
- addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
- and32(Imm32(constant), regT0);
- emitStoreInt32(dst, regT0, (op == dst));
- return;
- }
-
- emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
- addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
- addSlowCase(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag)));
- and32(regT2, regT0);
- emitStoreInt32(dst, regT0, (op1 == dst || op2 == dst));
-}
-
-void JIT::emitSlow_op_bitand(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- unsigned dst = currentInstruction[1].u.operand;
- unsigned op1 = currentInstruction[2].u.operand;
- unsigned op2 = currentInstruction[3].u.operand;
-
- if (!isOperandConstantImmediateInt(op1) && !isOperandConstantImmediateInt(op2))
- linkSlowCase(iter); // int32 check
- linkSlowCase(iter); // int32 check
-
- JITStubCall stubCall(this, cti_op_bitand);
- stubCall.addArgument(op1);
- stubCall.addArgument(op2);
- stubCall.call(dst);
-}
-
-// BitOr (|)
-
-void JIT::emit_op_bitor(Instruction* currentInstruction)
-{
- unsigned dst = currentInstruction[1].u.operand;
- unsigned op1 = currentInstruction[2].u.operand;
- unsigned op2 = currentInstruction[3].u.operand;
-
- unsigned op;
- int32_t constant;
- if (getOperandConstantImmediateInt(op1, op2, op, constant)) {
- emitLoad(op, regT1, regT0);
- addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
- or32(Imm32(constant), regT0);
- emitStoreInt32(dst, regT0, (op == dst));
- return;
- }
-
- emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
- addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
- addSlowCase(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag)));
- or32(regT2, regT0);
- emitStoreInt32(dst, regT0, (op1 == dst || op2 == dst));
-}
-
-void JIT::emitSlow_op_bitor(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- unsigned dst = currentInstruction[1].u.operand;
- unsigned op1 = currentInstruction[2].u.operand;
- unsigned op2 = currentInstruction[3].u.operand;
-
- if (!isOperandConstantImmediateInt(op1) && !isOperandConstantImmediateInt(op2))
- linkSlowCase(iter); // int32 check
- linkSlowCase(iter); // int32 check
-
- JITStubCall stubCall(this, cti_op_bitor);
- stubCall.addArgument(op1);
- stubCall.addArgument(op2);
- stubCall.call(dst);
-}
-
-// BitXor (^)
-
-void JIT::emit_op_bitxor(Instruction* currentInstruction)
-{
- unsigned dst = currentInstruction[1].u.operand;
- unsigned op1 = currentInstruction[2].u.operand;
- unsigned op2 = currentInstruction[3].u.operand;
-
- unsigned op;
- int32_t constant;
- if (getOperandConstantImmediateInt(op1, op2, op, constant)) {
- emitLoad(op, regT1, regT0);
- addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
- xor32(Imm32(constant), regT0);
- emitStoreInt32(dst, regT0, (op == dst));
- return;
- }
-
- emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
- addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
- addSlowCase(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag)));
- xor32(regT2, regT0);
- emitStoreInt32(dst, regT0, (op1 == dst || op2 == dst));
-}
-
-void JIT::emitSlow_op_bitxor(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- unsigned dst = currentInstruction[1].u.operand;
- unsigned op1 = currentInstruction[2].u.operand;
- unsigned op2 = currentInstruction[3].u.operand;
-
- if (!isOperandConstantImmediateInt(op1) && !isOperandConstantImmediateInt(op2))
- linkSlowCase(iter); // int32 check
- linkSlowCase(iter); // int32 check
-
- JITStubCall stubCall(this, cti_op_bitxor);
- stubCall.addArgument(op1);
- stubCall.addArgument(op2);
- stubCall.call(dst);
-}
-
-// BitNot (~)
-
-void JIT::emit_op_bitnot(Instruction* currentInstruction)
-{
- unsigned dst = currentInstruction[1].u.operand;
- unsigned src = currentInstruction[2].u.operand;
-
- emitLoad(src, regT1, regT0);
- addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
-
- not32(regT0);
- emitStoreInt32(dst, regT0, (dst == src));
-}
-
-void JIT::emitSlow_op_bitnot(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- unsigned dst = currentInstruction[1].u.operand;
-
- linkSlowCase(iter); // int32 check
-
- JITStubCall stubCall(this, cti_op_bitnot);
- stubCall.addArgument(regT1, regT0);
- stubCall.call(dst);
-}
-
-// PostInc (i++)
-
-void JIT::emit_op_post_inc(Instruction* currentInstruction)
-{
- unsigned dst = currentInstruction[1].u.operand;
- unsigned srcDst = currentInstruction[2].u.operand;
-
- emitLoad(srcDst, regT1, regT0);
- addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
-
- if (dst == srcDst) // x = x++ is a noop for ints.
- return;
-
- emitStoreInt32(dst, regT0);
-
- addSlowCase(branchAdd32(Overflow, Imm32(1), regT0));
- emitStoreInt32(srcDst, regT0, true);
-}
-
-void JIT::emitSlow_op_post_inc(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- unsigned dst = currentInstruction[1].u.operand;
- unsigned srcDst = currentInstruction[2].u.operand;
-
- linkSlowCase(iter); // int32 check
- if (dst != srcDst)
- linkSlowCase(iter); // overflow check
-
- JITStubCall stubCall(this, cti_op_post_inc);
- stubCall.addArgument(srcDst);
- stubCall.addArgument(Imm32(srcDst));
- stubCall.call(dst);
-}
-
-// PostDec (i--)
-
-void JIT::emit_op_post_dec(Instruction* currentInstruction)
-{
- unsigned dst = currentInstruction[1].u.operand;
- unsigned srcDst = currentInstruction[2].u.operand;
-
- emitLoad(srcDst, regT1, regT0);
- addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
-
- if (dst == srcDst) // x = x-- is a noop for ints.
- return;
-
- emitStoreInt32(dst, regT0);
-
- addSlowCase(branchSub32(Overflow, Imm32(1), regT0));
- emitStoreInt32(srcDst, regT0, true);
-}
-
-void JIT::emitSlow_op_post_dec(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- unsigned dst = currentInstruction[1].u.operand;
- unsigned srcDst = currentInstruction[2].u.operand;
-
- linkSlowCase(iter); // int32 check
- if (dst != srcDst)
- linkSlowCase(iter); // overflow check
-
- JITStubCall stubCall(this, cti_op_post_dec);
- stubCall.addArgument(srcDst);
- stubCall.addArgument(Imm32(srcDst));
- stubCall.call(dst);
-}
-
-// PreInc (++i)
-
-void JIT::emit_op_pre_inc(Instruction* currentInstruction)
-{
- unsigned srcDst = currentInstruction[1].u.operand;
-
- emitLoad(srcDst, regT1, regT0);
-
- addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
- addSlowCase(branchAdd32(Overflow, Imm32(1), regT0));
- emitStoreInt32(srcDst, regT0, true);
-}
-
-void JIT::emitSlow_op_pre_inc(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- unsigned srcDst = currentInstruction[1].u.operand;
-
- linkSlowCase(iter); // int32 check
- linkSlowCase(iter); // overflow check
-
- JITStubCall stubCall(this, cti_op_pre_inc);
- stubCall.addArgument(srcDst);
- stubCall.call(srcDst);
-}
-
-// PreDec (--i)
-
-void JIT::emit_op_pre_dec(Instruction* currentInstruction)
-{
- unsigned srcDst = currentInstruction[1].u.operand;
-
- emitLoad(srcDst, regT1, regT0);
-
- addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
- addSlowCase(branchSub32(Overflow, Imm32(1), regT0));
- emitStoreInt32(srcDst, regT0, true);
-}
-
-void JIT::emitSlow_op_pre_dec(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- unsigned srcDst = currentInstruction[1].u.operand;
-
- linkSlowCase(iter); // int32 check
- linkSlowCase(iter); // overflow check
-
- JITStubCall stubCall(this, cti_op_pre_dec);
- stubCall.addArgument(srcDst);
- stubCall.call(srcDst);
-}
-
-// Addition (+)
-
-void JIT::emit_op_add(Instruction* currentInstruction)
-{
- unsigned dst = currentInstruction[1].u.operand;
- unsigned op1 = currentInstruction[2].u.operand;
- unsigned op2 = currentInstruction[3].u.operand;
- OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
-
- if (!types.first().mightBeNumber() || !types.second().mightBeNumber()) {
- JITStubCall stubCall(this, cti_op_add);
- stubCall.addArgument(op1);
- stubCall.addArgument(op2);
- stubCall.call(dst);
- return;
- }
-
- JumpList notInt32Op1;
- JumpList notInt32Op2;
-
- unsigned op;
- int32_t constant;
- if (getOperandConstantImmediateInt(op1, op2, op, constant)) {
- emitAdd32Constant(dst, op, constant, op == op1 ? types.first() : types.second());
- return;
- }
-
- emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
- notInt32Op1.append(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
- notInt32Op2.append(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag)));
-
- // Int32 case.
- addSlowCase(branchAdd32(Overflow, regT2, regT0));
- emitStoreInt32(dst, regT0, (op1 == dst || op2 == dst));
-
- if (!supportsFloatingPoint()) {
- addSlowCase(notInt32Op1);
- addSlowCase(notInt32Op2);
- return;
- }
- Jump end = jump();
-
- // Double case.
- emitBinaryDoubleOp(op_add, dst, op1, op2, types, notInt32Op1, notInt32Op2);
- end.link(this);
-}
-
-void JIT::emitAdd32Constant(unsigned dst, unsigned op, int32_t constant, ResultType opType)
-{
- // Int32 case.
- emitLoad(op, regT1, regT0);
- Jump notInt32 = branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag));
- addSlowCase(branchAdd32(Overflow, Imm32(constant), regT0));
- emitStoreInt32(dst, regT0, (op == dst));
-
- // Double case.
- if (!supportsFloatingPoint()) {
- addSlowCase(notInt32);
- return;
- }
- Jump end = jump();
-
- notInt32.link(this);
- if (!opType.definitelyIsNumber())
- addSlowCase(branch32(Above, regT1, Imm32(JSValue::LowestTag)));
- move(Imm32(constant), regT2);
- convertInt32ToDouble(regT2, fpRegT0);
- emitLoadDouble(op, fpRegT1);
- addDouble(fpRegT1, fpRegT0);
- emitStoreDouble(dst, fpRegT0);
-
- end.link(this);
-}
-
-void JIT::emitSlow_op_add(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- unsigned dst = currentInstruction[1].u.operand;
- unsigned op1 = currentInstruction[2].u.operand;
- unsigned op2 = currentInstruction[3].u.operand;
- OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
-
- if (!types.first().mightBeNumber() || !types.second().mightBeNumber())
- return;
-
- unsigned op;
- int32_t constant;
- if (getOperandConstantImmediateInt(op1, op2, op, constant)) {
- linkSlowCase(iter); // overflow check
-
- if (!supportsFloatingPoint())
- linkSlowCase(iter); // non-sse case
- else {
- ResultType opType = op == op1 ? types.first() : types.second();
- if (!opType.definitelyIsNumber())
- linkSlowCase(iter); // double check
- }
- } else {
- linkSlowCase(iter); // overflow check
-
- if (!supportsFloatingPoint()) {
- linkSlowCase(iter); // int32 check
- linkSlowCase(iter); // int32 check
- } else {
- if (!types.first().definitelyIsNumber())
- linkSlowCase(iter); // double check
-
- if (!types.second().definitelyIsNumber()) {
- linkSlowCase(iter); // int32 check
- linkSlowCase(iter); // double check
- }
- }
- }
-
- JITStubCall stubCall(this, cti_op_add);
- stubCall.addArgument(op1);
- stubCall.addArgument(op2);
- stubCall.call(dst);
-}
-
-// Subtraction (-)
-
-void JIT::emit_op_sub(Instruction* currentInstruction)
-{
- unsigned dst = currentInstruction[1].u.operand;
- unsigned op1 = currentInstruction[2].u.operand;
- unsigned op2 = currentInstruction[3].u.operand;
- OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
-
- JumpList notInt32Op1;
- JumpList notInt32Op2;
-
- if (isOperandConstantImmediateInt(op2)) {
- emitSub32Constant(dst, op1, getConstantOperand(op2).asInt32(), types.first());
- return;
- }
-
- emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
- notInt32Op1.append(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
- notInt32Op2.append(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag)));
-
- // Int32 case.
- addSlowCase(branchSub32(Overflow, regT2, regT0));
- emitStoreInt32(dst, regT0, (op1 == dst || op2 == dst));
-
- if (!supportsFloatingPoint()) {
- addSlowCase(notInt32Op1);
- addSlowCase(notInt32Op2);
- return;
- }
- Jump end = jump();
-
- // Double case.
- emitBinaryDoubleOp(op_sub, dst, op1, op2, types, notInt32Op1, notInt32Op2);
- end.link(this);
-}
-
-void JIT::emitSub32Constant(unsigned dst, unsigned op, int32_t constant, ResultType opType)
-{
- // Int32 case.
- emitLoad(op, regT1, regT0);
- Jump notInt32 = branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag));
- addSlowCase(branchSub32(Overflow, Imm32(constant), regT0));
- emitStoreInt32(dst, regT0, (op == dst));
-
- // Double case.
- if (!supportsFloatingPoint()) {
- addSlowCase(notInt32);
- return;
- }
- Jump end = jump();
-
- notInt32.link(this);
- if (!opType.definitelyIsNumber())
- addSlowCase(branch32(Above, regT1, Imm32(JSValue::LowestTag)));
- move(Imm32(constant), regT2);
- convertInt32ToDouble(regT2, fpRegT0);
- emitLoadDouble(op, fpRegT1);
- subDouble(fpRegT0, fpRegT1);
- emitStoreDouble(dst, fpRegT1);
-
- end.link(this);
-}
-
-void JIT::emitSlow_op_sub(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- unsigned dst = currentInstruction[1].u.operand;
- unsigned op1 = currentInstruction[2].u.operand;
- unsigned op2 = currentInstruction[3].u.operand;
- OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
-
- if (isOperandConstantImmediateInt(op2)) {
- linkSlowCase(iter); // overflow check
-
- if (!supportsFloatingPoint() || !types.first().definitelyIsNumber())
- linkSlowCase(iter); // int32 or double check
- } else {
- linkSlowCase(iter); // overflow check
-
- if (!supportsFloatingPoint()) {
- linkSlowCase(iter); // int32 check
- linkSlowCase(iter); // int32 check
- } else {
- if (!types.first().definitelyIsNumber())
- linkSlowCase(iter); // double check
-
- if (!types.second().definitelyIsNumber()) {
- linkSlowCase(iter); // int32 check
- linkSlowCase(iter); // double check
- }
- }
- }
-
- JITStubCall stubCall(this, cti_op_sub);
- stubCall.addArgument(op1);
- stubCall.addArgument(op2);
- stubCall.call(dst);
-}
-
-void JIT::emitBinaryDoubleOp(OpcodeID opcodeID, unsigned dst, unsigned op1, unsigned op2, OperandTypes types, JumpList& notInt32Op1, JumpList& notInt32Op2, bool op1IsInRegisters, bool op2IsInRegisters)
-{
- JumpList end;
-
- if (!notInt32Op1.empty()) {
- // Double case 1: Op1 is not int32; Op2 is unknown.
- notInt32Op1.link(this);
-
- ASSERT(op1IsInRegisters);
-
- // Verify Op1 is double.
- if (!types.first().definitelyIsNumber())
- addSlowCase(branch32(Above, regT1, Imm32(JSValue::LowestTag)));
-
- if (!op2IsInRegisters)
- emitLoad(op2, regT3, regT2);
-
- Jump doubleOp2 = branch32(Below, regT3, Imm32(JSValue::LowestTag));
-
- if (!types.second().definitelyIsNumber())
- addSlowCase(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag)));
-
- convertInt32ToDouble(regT2, fpRegT0);
- Jump doTheMath = jump();
-
- // Load Op2 as double into double register.
- doubleOp2.link(this);
- emitLoadDouble(op2, fpRegT0);
-
- // Do the math.
- doTheMath.link(this);
- switch (opcodeID) {
- case op_mul:
- emitLoadDouble(op1, fpRegT2);
- mulDouble(fpRegT2, fpRegT0);
- emitStoreDouble(dst, fpRegT0);
- break;
- case op_add:
- emitLoadDouble(op1, fpRegT2);
- addDouble(fpRegT2, fpRegT0);
- emitStoreDouble(dst, fpRegT0);
- break;
- case op_sub:
- emitLoadDouble(op1, fpRegT1);
- subDouble(fpRegT0, fpRegT1);
- emitStoreDouble(dst, fpRegT1);
- break;
- case op_div:
- emitLoadDouble(op1, fpRegT1);
- divDouble(fpRegT0, fpRegT1);
- emitStoreDouble(dst, fpRegT1);
- break;
- case op_jnless:
- emitLoadDouble(op1, fpRegT2);
- addJump(branchDouble(DoubleLessThanOrEqualOrUnordered, fpRegT0, fpRegT2), dst);
- break;
- case op_jless:
- emitLoadDouble(op1, fpRegT2);
- addJump(branchDouble(DoubleLessThan, fpRegT2, fpRegT0), dst);
- break;
- case op_jlesseq:
- emitLoadDouble(op1, fpRegT2);
- addJump(branchDouble(DoubleLessThanOrEqual, fpRegT2, fpRegT0), dst);
- break;
- case op_jnlesseq:
- emitLoadDouble(op1, fpRegT2);
- addJump(branchDouble(DoubleLessThanOrUnordered, fpRegT0, fpRegT2), dst);
- break;
- default:
- ASSERT_NOT_REACHED();
- }
-
- if (!notInt32Op2.empty())
- end.append(jump());
- }
-
- if (!notInt32Op2.empty()) {
- // Double case 2: Op1 is int32; Op2 is not int32.
- notInt32Op2.link(this);
-
- ASSERT(op2IsInRegisters);
-
- if (!op1IsInRegisters)
- emitLoadPayload(op1, regT0);
-
- convertInt32ToDouble(regT0, fpRegT0);
-
- // Verify op2 is double.
- if (!types.second().definitelyIsNumber())
- addSlowCase(branch32(Above, regT3, Imm32(JSValue::LowestTag)));
-
- // Do the math.
- switch (opcodeID) {
- case op_mul:
- emitLoadDouble(op2, fpRegT2);
- mulDouble(fpRegT2, fpRegT0);
- emitStoreDouble(dst, fpRegT0);
- break;
- case op_add:
- emitLoadDouble(op2, fpRegT2);
- addDouble(fpRegT2, fpRegT0);
- emitStoreDouble(dst, fpRegT0);
- break;
- case op_sub:
- emitLoadDouble(op2, fpRegT2);
- subDouble(fpRegT2, fpRegT0);
- emitStoreDouble(dst, fpRegT0);
- break;
- case op_div:
- emitLoadDouble(op2, fpRegT2);
- divDouble(fpRegT2, fpRegT0);
- emitStoreDouble(dst, fpRegT0);
- break;
- case op_jnless:
- emitLoadDouble(op2, fpRegT1);
- addJump(branchDouble(DoubleLessThanOrEqualOrUnordered, fpRegT1, fpRegT0), dst);
- break;
- case op_jless:
- emitLoadDouble(op2, fpRegT1);
- addJump(branchDouble(DoubleLessThan, fpRegT0, fpRegT1), dst);
- break;
- case op_jnlesseq:
- emitLoadDouble(op2, fpRegT1);
- addJump(branchDouble(DoubleLessThanOrUnordered, fpRegT1, fpRegT0), dst);
- break;
- case op_jlesseq:
- emitLoadDouble(op2, fpRegT1);
- addJump(branchDouble(DoubleLessThanOrEqual, fpRegT0, fpRegT1), dst);
- break;
- default:
- ASSERT_NOT_REACHED();
- }
- }
-
- end.link(this);
-}
-
-// Multiplication (*)
-
-void JIT::emit_op_mul(Instruction* currentInstruction)
-{
- unsigned dst = currentInstruction[1].u.operand;
- unsigned op1 = currentInstruction[2].u.operand;
- unsigned op2 = currentInstruction[3].u.operand;
- OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
-
- JumpList notInt32Op1;
- JumpList notInt32Op2;
-
- emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
- notInt32Op1.append(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
- notInt32Op2.append(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag)));
-
- // Int32 case.
- move(regT0, regT3);
- addSlowCase(branchMul32(Overflow, regT2, regT0));
- addSlowCase(branchTest32(Zero, regT0));
- emitStoreInt32(dst, regT0, (op1 == dst || op2 == dst));
-
- if (!supportsFloatingPoint()) {
- addSlowCase(notInt32Op1);
- addSlowCase(notInt32Op2);
- return;
- }
- Jump end = jump();
-
- // Double case.
- emitBinaryDoubleOp(op_mul, dst, op1, op2, types, notInt32Op1, notInt32Op2);
- end.link(this);
-}
-
-void JIT::emitSlow_op_mul(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- unsigned dst = currentInstruction[1].u.operand;
- unsigned op1 = currentInstruction[2].u.operand;
- unsigned op2 = currentInstruction[3].u.operand;
- OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
-
- Jump overflow = getSlowCase(iter); // overflow check
- linkSlowCase(iter); // zero result check
-
- Jump negZero = branchOr32(Signed, regT2, regT3);
- emitStoreInt32(dst, Imm32(0), (op1 == dst || op2 == dst));
-
- emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_mul));
-
- negZero.link(this);
- overflow.link(this);
-
- if (!supportsFloatingPoint()) {
- linkSlowCase(iter); // int32 check
- linkSlowCase(iter); // int32 check
- }
-
- if (supportsFloatingPoint()) {
- if (!types.first().definitelyIsNumber())
- linkSlowCase(iter); // double check
-
- if (!types.second().definitelyIsNumber()) {
- linkSlowCase(iter); // int32 check
- linkSlowCase(iter); // double check
- }
- }
-
- Label jitStubCall(this);
- JITStubCall stubCall(this, cti_op_mul);
- stubCall.addArgument(op1);
- stubCall.addArgument(op2);
- stubCall.call(dst);
-}
-
-// Division (/)
-
-void JIT::emit_op_div(Instruction* currentInstruction)
-{
- unsigned dst = currentInstruction[1].u.operand;
- unsigned op1 = currentInstruction[2].u.operand;
- unsigned op2 = currentInstruction[3].u.operand;
- OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
-
- if (!supportsFloatingPoint()) {
- addSlowCase(jump());
- return;
- }
-
- // Int32 divide.
- JumpList notInt32Op1;
- JumpList notInt32Op2;
-
- JumpList end;
-
- emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
-
- notInt32Op1.append(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
- notInt32Op2.append(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag)));
-
- convertInt32ToDouble(regT0, fpRegT0);
- convertInt32ToDouble(regT2, fpRegT1);
- divDouble(fpRegT1, fpRegT0);
-
- JumpList doubleResult;
- branchConvertDoubleToInt32(fpRegT0, regT0, doubleResult, fpRegT1);
-
- // Int32 result.
- emitStoreInt32(dst, regT0, (op1 == dst || op2 == dst));
- end.append(jump());
-
- // Double result.
- doubleResult.link(this);
- emitStoreDouble(dst, fpRegT0);
- end.append(jump());
-
- // Double divide.
- emitBinaryDoubleOp(op_div, dst, op1, op2, types, notInt32Op1, notInt32Op2);
- end.link(this);
-}
-
-void JIT::emitSlow_op_div(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- unsigned dst = currentInstruction[1].u.operand;
- unsigned op1 = currentInstruction[2].u.operand;
- unsigned op2 = currentInstruction[3].u.operand;
- OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
-
- if (!supportsFloatingPoint())
- linkSlowCase(iter);
- else {
- if (!types.first().definitelyIsNumber())
- linkSlowCase(iter); // double check
-
- if (!types.second().definitelyIsNumber()) {
- linkSlowCase(iter); // int32 check
- linkSlowCase(iter); // double check
- }
- }
-
- JITStubCall stubCall(this, cti_op_div);
- stubCall.addArgument(op1);
- stubCall.addArgument(op2);
- stubCall.call(dst);
-}
-
-// Mod (%)
-
-/* ------------------------------ BEGIN: OP_MOD ------------------------------ */
-
-#if CPU(X86) || CPU(X86_64) || CPU(MIPS)
-
-void JIT::emit_op_mod(Instruction* currentInstruction)
-{
- unsigned dst = currentInstruction[1].u.operand;
- unsigned op1 = currentInstruction[2].u.operand;
- unsigned op2 = currentInstruction[3].u.operand;
-
-#if CPU(X86) || CPU(X86_64)
- // Make sure registers are correct for x86 IDIV instructions.
- ASSERT(regT0 == X86Registers::eax);
- ASSERT(regT1 == X86Registers::edx);
- ASSERT(regT2 == X86Registers::ecx);
- ASSERT(regT3 == X86Registers::ebx);
-#endif
-
- if (isOperandConstantImmediateInt(op2) && getConstantOperand(op2).asInt32() != 0) {
- emitLoad(op1, regT1, regT0);
- move(Imm32(getConstantOperand(op2).asInt32()), regT2);
- addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
- if (getConstantOperand(op2).asInt32() == -1)
- addSlowCase(branch32(Equal, regT0, Imm32(0x80000000))); // -2147483648 / -1 => EXC_ARITHMETIC
- } else {
- emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
- addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
- addSlowCase(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag)));
-
- addSlowCase(branch32(Equal, regT0, Imm32(0x80000000))); // -2147483648 / -1 => EXC_ARITHMETIC
- addSlowCase(branch32(Equal, regT2, Imm32(0))); // divide by 0
- }
-
- move(regT0, regT3); // Save dividend payload, in case of 0.
-#if CPU(X86) || CPU(X86_64)
- m_assembler.cdq();
- m_assembler.idivl_r(regT2);
-#elif CPU(MIPS)
- m_assembler.div(regT0, regT2);
- m_assembler.mfhi(regT1);
-#endif
-
- // If the remainder is zero and the dividend is negative, the result is -0.
- Jump storeResult1 = branchTest32(NonZero, regT1);
- Jump storeResult2 = branchTest32(Zero, regT3, Imm32(0x80000000)); // not negative
- emitStore(dst, jsNumber(-0.0));
- Jump end = jump();
-
- storeResult1.link(this);
- storeResult2.link(this);
- emitStoreInt32(dst, regT1, (op1 == dst || op2 == dst));
- end.link(this);
-}
-
-void JIT::emitSlow_op_mod(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- unsigned dst = currentInstruction[1].u.operand;
- unsigned op1 = currentInstruction[2].u.operand;
- unsigned op2 = currentInstruction[3].u.operand;
-
- if (isOperandConstantImmediateInt(op2) && getConstantOperand(op2).asInt32() != 0) {
- linkSlowCase(iter); // int32 check
- if (getConstantOperand(op2).asInt32() == -1)
- linkSlowCase(iter); // 0x80000000 check
- } else {
- linkSlowCase(iter); // int32 check
- linkSlowCase(iter); // int32 check
- linkSlowCase(iter); // 0 check
- linkSlowCase(iter); // 0x80000000 check
- }
-
- JITStubCall stubCall(this, cti_op_mod);
- stubCall.addArgument(op1);
- stubCall.addArgument(op2);
- stubCall.call(dst);
-}
-
-#else // CPU(X86) || CPU(X86_64) || CPU(MIPS)
-
-void JIT::emit_op_mod(Instruction* currentInstruction)
-{
- unsigned dst = currentInstruction[1].u.operand;
- unsigned op1 = currentInstruction[2].u.operand;
- unsigned op2 = currentInstruction[3].u.operand;
-
-#if ENABLE(JIT_USE_SOFT_MODULO)
- emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
- addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
- addSlowCase(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag)));
-
- addSlowCase(branch32(Equal, regT2, Imm32(0)));
-
- emitNakedCall(m_globalData->jitStubs->ctiSoftModulo());
-
- emitStoreInt32(dst, regT0, (op1 == dst || op2 == dst));
-#else
- JITStubCall stubCall(this, cti_op_mod);
- stubCall.addArgument(op1);
- stubCall.addArgument(op2);
- stubCall.call(dst);
-#endif
-}
-
-void JIT::emitSlow_op_mod(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- UNUSED_PARAM(currentInstruction);
- UNUSED_PARAM(iter);
-#if ENABLE(JIT_USE_SOFT_MODULO)
- unsigned result = currentInstruction[1].u.operand;
- unsigned op1 = currentInstruction[2].u.operand;
- unsigned op2 = currentInstruction[3].u.operand;
- linkSlowCase(iter);
- linkSlowCase(iter);
- linkSlowCase(iter);
- JITStubCall stubCall(this, cti_op_mod);
- stubCall.addArgument(op1);
- stubCall.addArgument(op2);
- stubCall.call(result);
-#else
- ASSERT_NOT_REACHED();
-#endif
-}
-
-#endif // CPU(X86) || CPU(X86_64)
-
-/* ------------------------------ END: OP_MOD ------------------------------ */
-
-} // namespace JSC
-
-#endif // USE(JSVALUE32_64)
-#endif // ENABLE(JIT)
diff --git a/JavaScriptCore/jit/JITCall.cpp b/JavaScriptCore/jit/JITCall.cpp
deleted file mode 100644
index fdd0d47..0000000
--- a/JavaScriptCore/jit/JITCall.cpp
+++ /dev/null
@@ -1,261 +0,0 @@
-/*
- * Copyright (C) 2008 Apple Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
- * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "config.h"
-
-#if ENABLE(JIT)
-#if USE(JSVALUE64)
-#include "JIT.h"
-
-#include "CodeBlock.h"
-#include "JITInlineMethods.h"
-#include "JITStubCall.h"
-#include "JSArray.h"
-#include "JSFunction.h"
-#include "Interpreter.h"
-#include "ResultType.h"
-#include "SamplingTool.h"
-
-#ifndef NDEBUG
-#include <stdio.h>
-#endif
-
-using namespace std;
-
-namespace JSC {
-
-void JIT::compileOpCallInitializeCallFrame()
-{
- store32(regT1, Address(callFrameRegister, RegisterFile::ArgumentCount * static_cast<int>(sizeof(Register))));
- loadPtr(Address(regT0, OBJECT_OFFSETOF(JSFunction, m_scopeChain) + OBJECT_OFFSETOF(ScopeChain, m_node)), regT3); // newScopeChain
- storePtr(regT0, Address(callFrameRegister, RegisterFile::Callee * static_cast<int>(sizeof(Register))));
- storePtr(regT3, Address(callFrameRegister, RegisterFile::ScopeChain * static_cast<int>(sizeof(Register))));
-}
-
-void JIT::emit_op_call_put_result(Instruction* instruction)
-{
- int dst = instruction[1].u.operand;
- emitPutVirtualRegister(dst);
-}
-
-void JIT::compileOpCallVarargs(Instruction* instruction)
-{
- int callee = instruction[1].u.operand;
- int argCountRegister = instruction[2].u.operand;
- int registerOffset = instruction[3].u.operand;
-
- emitGetVirtualRegister(argCountRegister, regT1);
- emitGetVirtualRegister(callee, regT0);
- addPtr(Imm32(registerOffset), regT1, regT2);
-
- // Check for JSFunctions.
- emitJumpSlowCaseIfNotJSCell(regT0);
- addSlowCase(branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsFunctionVPtr)));
-
- // Speculatively roll the callframe, assuming argCount will match the arity.
- mul32(Imm32(sizeof(Register)), regT2, regT2);
- intptr_t offset = (intptr_t)sizeof(Register) * (intptr_t)RegisterFile::CallerFrame;
- addPtr(Imm32((int32_t)offset), regT2, regT3);
- addPtr(callFrameRegister, regT3);
- storePtr(callFrameRegister, regT3);
- addPtr(regT2, callFrameRegister);
- emitNakedCall(m_globalData->jitStubs->ctiVirtualCall());
-
- sampleCodeBlock(m_codeBlock);
-}
-
-void JIT::compileOpCallVarargsSlowCase(Instruction*, Vector<SlowCaseEntry>::iterator& iter)
-{
- linkSlowCase(iter);
- linkSlowCase(iter);
-
- JITStubCall stubCall(this, cti_op_call_NotJSFunction);
- stubCall.addArgument(regT0);
- stubCall.addArgument(regT2);
- stubCall.addArgument(regT1);
- stubCall.call();
-
- sampleCodeBlock(m_codeBlock);
-}
-
-#if !ENABLE(JIT_OPTIMIZE_CALL)
-
-/* ------------------------------ BEGIN: !ENABLE(JIT_OPTIMIZE_CALL) ------------------------------ */
-
-void JIT::compileOpCall(OpcodeID opcodeID, Instruction* instruction, unsigned)
-{
- int callee = instruction[1].u.operand;
- int argCount = instruction[2].u.operand;
- int registerOffset = instruction[3].u.operand;
-
- // Handle eval
- Jump wasEval;
- if (opcodeID == op_call_eval) {
- JITStubCall stubCall(this, cti_op_call_eval);
- stubCall.addArgument(callee, regT0);
- stubCall.addArgument(JIT::Imm32(registerOffset));
- stubCall.addArgument(JIT::Imm32(argCount));
- stubCall.call();
- wasEval = branchPtr(NotEqual, regT0, ImmPtr(JSValue::encode(JSValue())));
- }
-
- emitGetVirtualRegister(callee, regT0);
-
- // Check for JSFunctions.
- emitJumpSlowCaseIfNotJSCell(regT0);
- addSlowCase(branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsFunctionVPtr)));
-
- // Speculatively roll the callframe, assuming argCount will match the arity.
- storePtr(callFrameRegister, Address(callFrameRegister, (RegisterFile::CallerFrame + registerOffset) * static_cast<int>(sizeof(Register))));
- addPtr(Imm32(registerOffset * static_cast<int>(sizeof(Register))), callFrameRegister);
- move(Imm32(argCount), regT1);
-
- emitNakedCall(opcodeID == op_construct ? m_globalData->jitStubs->ctiVirtualConstruct() : m_globalData->jitStubs->ctiVirtualCall());
-
- if (opcodeID == op_call_eval)
- wasEval.link(this);
-
- sampleCodeBlock(m_codeBlock);
-}
-
-void JIT::compileOpCallSlowCase(Instruction* instruction, Vector<SlowCaseEntry>::iterator& iter, unsigned, OpcodeID opcodeID)
-{
- int argCount = instruction[2].u.operand;
- int registerOffset = instruction[3].u.operand;
-
- linkSlowCase(iter);
- linkSlowCase(iter);
-
- JITStubCall stubCall(this, opcodeID == op_construct ? cti_op_construct_NotJSConstruct : cti_op_call_NotJSFunction);
- stubCall.addArgument(regT0);
- stubCall.addArgument(JIT::Imm32(registerOffset));
- stubCall.addArgument(JIT::Imm32(argCount));
- stubCall.call();
-
- sampleCodeBlock(m_codeBlock);
-}
-
-#else // !ENABLE(JIT_OPTIMIZE_CALL)
-
-/* ------------------------------ BEGIN: ENABLE(JIT_OPTIMIZE_CALL) ------------------------------ */
-
-void JIT::compileOpCall(OpcodeID opcodeID, Instruction* instruction, unsigned callLinkInfoIndex)
-{
- int callee = instruction[1].u.operand;
- int argCount = instruction[2].u.operand;
- int registerOffset = instruction[3].u.operand;
-
- // Handle eval
- Jump wasEval;
- if (opcodeID == op_call_eval) {
- JITStubCall stubCall(this, cti_op_call_eval);
- stubCall.addArgument(callee, regT0);
- stubCall.addArgument(JIT::Imm32(registerOffset));
- stubCall.addArgument(JIT::Imm32(argCount));
- stubCall.call();
- wasEval = branchPtr(NotEqual, regT0, ImmPtr(JSValue::encode(JSValue())));
- }
-
- // This plants a check for a cached JSFunction value, so we can plant a fast link to the callee.
- // This deliberately leaves the callee in ecx, used when setting up the stack frame below
- emitGetVirtualRegister(callee, regT0);
- DataLabelPtr addressOfLinkedFunctionCheck;
-
- BEGIN_UNINTERRUPTED_SEQUENCE(sequenceOpCall);
-
- Jump jumpToSlow = branchPtrWithPatch(NotEqual, regT0, addressOfLinkedFunctionCheck, ImmPtr(JSValue::encode(JSValue())));
-
- END_UNINTERRUPTED_SEQUENCE(sequenceOpCall);
-
- addSlowCase(jumpToSlow);
- ASSERT_JIT_OFFSET(differenceBetween(addressOfLinkedFunctionCheck, jumpToSlow), patchOffsetOpCallCompareToJump);
- m_callStructureStubCompilationInfo[callLinkInfoIndex].hotPathBegin = addressOfLinkedFunctionCheck;
-
- // The following is the fast case, only used whan a callee can be linked.
-
- // Fast version of stack frame initialization, directly relative to edi.
- // Note that this omits to set up RegisterFile::CodeBlock, which is set in the callee
-
- loadPtr(Address(regT0, OBJECT_OFFSETOF(JSFunction, m_scopeChain) + OBJECT_OFFSETOF(ScopeChain, m_node)), regT1); // newScopeChain
-
- store32(Imm32(argCount), Address(callFrameRegister, (registerOffset + RegisterFile::ArgumentCount) * static_cast<int>(sizeof(Register))));
- storePtr(callFrameRegister, Address(callFrameRegister, (registerOffset + RegisterFile::CallerFrame) * static_cast<int>(sizeof(Register))));
- storePtr(regT0, Address(callFrameRegister, (registerOffset + RegisterFile::Callee) * static_cast<int>(sizeof(Register))));
- storePtr(regT1, Address(callFrameRegister, (registerOffset + RegisterFile::ScopeChain) * static_cast<int>(sizeof(Register))));
- addPtr(Imm32(registerOffset * sizeof(Register)), callFrameRegister);
-
- // Call to the callee
- m_callStructureStubCompilationInfo[callLinkInfoIndex].hotPathOther = emitNakedCall();
-
- if (opcodeID == op_call_eval)
- wasEval.link(this);
-
- sampleCodeBlock(m_codeBlock);
-}
-
-void JIT::compileOpCallSlowCase(Instruction* instruction, Vector<SlowCaseEntry>::iterator& iter, unsigned callLinkInfoIndex, OpcodeID opcodeID)
-{
- int argCount = instruction[2].u.operand;
- int registerOffset = instruction[3].u.operand;
-
- linkSlowCase(iter);
-
- // Fast check for JS function.
- Jump callLinkFailNotObject = emitJumpIfNotJSCell(regT0);
- Jump callLinkFailNotJSFunction = branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsFunctionVPtr));
-
- // Speculatively roll the callframe, assuming argCount will match the arity.
- storePtr(callFrameRegister, Address(callFrameRegister, (RegisterFile::CallerFrame + registerOffset) * static_cast<int>(sizeof(Register))));
- addPtr(Imm32(registerOffset * static_cast<int>(sizeof(Register))), callFrameRegister);
- move(Imm32(argCount), regT1);
-
- m_callStructureStubCompilationInfo[callLinkInfoIndex].callReturnLocation = emitNakedCall(opcodeID == op_construct ? m_globalData->jitStubs->ctiVirtualConstructLink() : m_globalData->jitStubs->ctiVirtualCallLink());
-
- // Done! - return back to the hot path.
- ASSERT(OPCODE_LENGTH(op_call) == OPCODE_LENGTH(op_call_eval));
- ASSERT(OPCODE_LENGTH(op_call) == OPCODE_LENGTH(op_construct));
- emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_call));
-
- // This handles host functions
- callLinkFailNotObject.link(this);
- callLinkFailNotJSFunction.link(this);
-
- JITStubCall stubCall(this, opcodeID == op_construct ? cti_op_construct_NotJSConstruct : cti_op_call_NotJSFunction);
- stubCall.addArgument(regT0);
- stubCall.addArgument(JIT::Imm32(registerOffset));
- stubCall.addArgument(JIT::Imm32(argCount));
- stubCall.call();
-
- sampleCodeBlock(m_codeBlock);
-}
-
-/* ------------------------------ END: !ENABLE / ENABLE(JIT_OPTIMIZE_CALL) ------------------------------ */
-
-#endif // !ENABLE(JIT_OPTIMIZE_CALL)
-
-} // namespace JSC
-
-#endif // USE(JSVALUE64)
-#endif // ENABLE(JIT)
diff --git a/JavaScriptCore/jit/JITCall32_64.cpp b/JavaScriptCore/jit/JITCall32_64.cpp
deleted file mode 100644
index daf5d2d..0000000
--- a/JavaScriptCore/jit/JITCall32_64.cpp
+++ /dev/null
@@ -1,356 +0,0 @@
-/*
- * Copyright (C) 2008 Apple Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
- * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "config.h"
-
-#if ENABLE(JIT)
-#if USE(JSVALUE32_64)
-#include "JIT.h"
-
-#include "CodeBlock.h"
-#include "Interpreter.h"
-#include "JITInlineMethods.h"
-#include "JITStubCall.h"
-#include "JSArray.h"
-#include "JSFunction.h"
-#include "ResultType.h"
-#include "SamplingTool.h"
-
-#ifndef NDEBUG
-#include <stdio.h>
-#endif
-
-using namespace std;
-
-namespace JSC {
-
-void JIT::compileOpCallInitializeCallFrame()
-{
- // regT0 holds callee, regT1 holds argCount
- store32(regT1, Address(callFrameRegister, RegisterFile::ArgumentCount * static_cast<int>(sizeof(Register))));
- loadPtr(Address(regT0, OBJECT_OFFSETOF(JSFunction, m_scopeChain) + OBJECT_OFFSETOF(ScopeChain, m_node)), regT3); // scopeChain
- storePtr(regT0, Address(callFrameRegister, RegisterFile::Callee * static_cast<int>(sizeof(Register)))); // callee
- storePtr(regT3, Address(callFrameRegister, RegisterFile::ScopeChain * static_cast<int>(sizeof(Register)))); // scopeChain
-}
-
-void JIT::emit_op_call_put_result(Instruction* instruction)
-{
- int dst = instruction[1].u.operand;
- emitStore(dst, regT1, regT0);
-}
-
-void JIT::compileOpCallVarargs(Instruction* instruction)
-{
- int callee = instruction[1].u.operand;
- int argCountRegister = instruction[2].u.operand;
- int registerOffset = instruction[3].u.operand;
-
- emitLoad(callee, regT1, regT0);
- emitLoadPayload(argCountRegister, regT2); // argCount
- addPtr(Imm32(registerOffset), regT2, regT3); // registerOffset
-
- emitJumpSlowCaseIfNotJSCell(callee, regT1);
- addSlowCase(branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsFunctionVPtr)));
-
- // Speculatively roll the callframe, assuming argCount will match the arity.
- mul32(Imm32(sizeof(Register)), regT3, regT3);
- addPtr(callFrameRegister, regT3);
- storePtr(callFrameRegister, Address(regT3, RegisterFile::CallerFrame * static_cast<int>(sizeof(Register))));
- move(regT3, callFrameRegister);
-
- move(regT2, regT1); // argCount
-
- emitNakedCall(m_globalData->jitStubs->ctiVirtualCall());
-
- sampleCodeBlock(m_codeBlock);
-}
-
-void JIT::compileOpCallVarargsSlowCase(Instruction* instruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- int callee = instruction[1].u.operand;
-
- linkSlowCaseIfNotJSCell(iter, callee);
- linkSlowCase(iter);
-
- JITStubCall stubCall(this, cti_op_call_NotJSFunction);
- stubCall.addArgument(regT1, regT0);
- stubCall.addArgument(regT3);
- stubCall.addArgument(regT2);
- stubCall.call();
-
- sampleCodeBlock(m_codeBlock);
-}
-
-void JIT::emit_op_ret(Instruction* currentInstruction)
-{
- unsigned dst = currentInstruction[1].u.operand;
-
- // We could JIT generate the deref, only calling out to C when the refcount hits zero.
- if (m_codeBlock->needsFullScopeChain()) {
- Jump activationNotCreated = branch32(Equal, tagFor(m_codeBlock->activationRegister()), Imm32(JSValue::EmptyValueTag));
- JITStubCall(this, cti_op_ret_scopeChain).call();
- activationNotCreated.link(this);
- }
- emitLoad(dst, regT1, regT0);
- emitGetFromCallFrameHeaderPtr(RegisterFile::ReturnPC, regT2);
- emitGetFromCallFrameHeaderPtr(RegisterFile::CallerFrame, callFrameRegister);
-
- restoreReturnAddressBeforeReturn(regT2);
- ret();
-}
-
-void JIT::emit_op_ret_object_or_this(Instruction* currentInstruction)
-{
- unsigned result = currentInstruction[1].u.operand;
- unsigned thisReg = currentInstruction[2].u.operand;
-
- // We could JIT generate the deref, only calling out to C when the refcount hits zero.
- if (m_codeBlock->needsFullScopeChain()) {
- Jump activationNotCreated = branch32(Equal, tagFor(m_codeBlock->activationRegister()), Imm32(JSValue::EmptyValueTag));
- JITStubCall(this, cti_op_ret_scopeChain).call();
- activationNotCreated.link(this);
- }
-
- emitLoad(result, regT1, regT0);
- Jump notJSCell = branch32(NotEqual, regT1, Imm32(JSValue::CellTag));
- loadPtr(Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), regT2);
- Jump notObject = branch8(NotEqual, Address(regT2, OBJECT_OFFSETOF(Structure, m_typeInfo) + OBJECT_OFFSETOF(TypeInfo, m_type)), Imm32(ObjectType));
-
- emitGetFromCallFrameHeaderPtr(RegisterFile::ReturnPC, regT2);
- emitGetFromCallFrameHeaderPtr(RegisterFile::CallerFrame, callFrameRegister);
-
- restoreReturnAddressBeforeReturn(regT2);
- ret();
-
- notJSCell.link(this);
- notObject.link(this);
- emitLoad(thisReg, regT1, regT0);
-
- emitGetFromCallFrameHeaderPtr(RegisterFile::ReturnPC, regT2);
- emitGetFromCallFrameHeaderPtr(RegisterFile::CallerFrame, callFrameRegister);
-
- restoreReturnAddressBeforeReturn(regT2);
- ret();
-}
-
-void JIT::emitSlow_op_call(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- compileOpCallSlowCase(currentInstruction, iter, m_callLinkInfoIndex++, op_call);
-}
-
-void JIT::emitSlow_op_call_eval(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- compileOpCallSlowCase(currentInstruction, iter, m_callLinkInfoIndex++, op_call_eval);
-}
-
-void JIT::emitSlow_op_call_varargs(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- compileOpCallVarargsSlowCase(currentInstruction, iter);
-}
-
-void JIT::emitSlow_op_construct(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- compileOpCallSlowCase(currentInstruction, iter, m_callLinkInfoIndex++, op_construct);
-}
-
-void JIT::emit_op_call(Instruction* currentInstruction)
-{
- compileOpCall(op_call, currentInstruction, m_callLinkInfoIndex++);
-}
-
-void JIT::emit_op_call_eval(Instruction* currentInstruction)
-{
- compileOpCall(op_call_eval, currentInstruction, m_callLinkInfoIndex++);
-}
-
-void JIT::emit_op_call_varargs(Instruction* currentInstruction)
-{
- compileOpCallVarargs(currentInstruction);
-}
-
-void JIT::emit_op_construct(Instruction* currentInstruction)
-{
- compileOpCall(op_construct, currentInstruction, m_callLinkInfoIndex++);
-}
-
-#if !ENABLE(JIT_OPTIMIZE_CALL)
-
-/* ------------------------------ BEGIN: !ENABLE(JIT_OPTIMIZE_CALL) ------------------------------ */
-
-void JIT::compileOpCall(OpcodeID opcodeID, Instruction* instruction, unsigned)
-{
- int callee = instruction[1].u.operand;
- int argCount = instruction[2].u.operand;
- int registerOffset = instruction[3].u.operand;
-
- Jump wasEval;
- if (opcodeID == op_call_eval) {
- JITStubCall stubCall(this, cti_op_call_eval);
- stubCall.addArgument(callee);
- stubCall.addArgument(JIT::Imm32(registerOffset));
- stubCall.addArgument(JIT::Imm32(argCount));
- stubCall.call();
- wasEval = branch32(NotEqual, regT1, Imm32(JSValue::EmptyValueTag));
- }
-
- emitLoad(callee, regT1, regT0);
-
- emitJumpSlowCaseIfNotJSCell(callee, regT1);
- addSlowCase(branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsFunctionVPtr)));
-
- // Speculatively roll the callframe, assuming argCount will match the arity.
- storePtr(callFrameRegister, Address(callFrameRegister, (RegisterFile::CallerFrame + registerOffset) * static_cast<int>(sizeof(Register))));
- addPtr(Imm32(registerOffset * static_cast<int>(sizeof(Register))), callFrameRegister);
- move(Imm32(argCount), regT1);
-
- emitNakedCall(opcodeID == op_construct ? m_globalData->jitStubs->ctiVirtualConstruct() : m_globalData->jitStubs->ctiVirtualCall());
-
- if (opcodeID == op_call_eval)
- wasEval.link(this);
-
- sampleCodeBlock(m_codeBlock);
-}
-
-void JIT::compileOpCallSlowCase(Instruction* instruction, Vector<SlowCaseEntry>::iterator& iter, unsigned, OpcodeID opcodeID)
-{
- int callee = instruction[1].u.operand;
- int argCount = instruction[2].u.operand;
- int registerOffset = instruction[3].u.operand;
-
- linkSlowCaseIfNotJSCell(iter, callee);
- linkSlowCase(iter);
-
- JITStubCall stubCall(this, opcodeID == op_construct ? cti_op_construct_NotJSConstruct : cti_op_call_NotJSFunction);
- stubCall.addArgument(callee);
- stubCall.addArgument(JIT::Imm32(registerOffset));
- stubCall.addArgument(JIT::Imm32(argCount));
- stubCall.call();
-
- sampleCodeBlock(m_codeBlock);
-}
-
-#else // !ENABLE(JIT_OPTIMIZE_CALL)
-
-/* ------------------------------ BEGIN: ENABLE(JIT_OPTIMIZE_CALL) ------------------------------ */
-
-void JIT::compileOpCall(OpcodeID opcodeID, Instruction* instruction, unsigned callLinkInfoIndex)
-{
- int callee = instruction[1].u.operand;
- int argCount = instruction[2].u.operand;
- int registerOffset = instruction[3].u.operand;
-
- Jump wasEval;
- if (opcodeID == op_call_eval) {
- JITStubCall stubCall(this, cti_op_call_eval);
- stubCall.addArgument(callee);
- stubCall.addArgument(JIT::Imm32(registerOffset));
- stubCall.addArgument(JIT::Imm32(argCount));
- stubCall.call();
- wasEval = branch32(NotEqual, regT1, Imm32(JSValue::EmptyValueTag));
- }
-
- emitLoad(callee, regT1, regT0);
-
- DataLabelPtr addressOfLinkedFunctionCheck;
-
- BEGIN_UNINTERRUPTED_SEQUENCE(sequenceOpCall);
-
- Jump jumpToSlow = branchPtrWithPatch(NotEqual, regT0, addressOfLinkedFunctionCheck, ImmPtr(0));
-
- END_UNINTERRUPTED_SEQUENCE(sequenceOpCall);
-
- addSlowCase(jumpToSlow);
- ASSERT(differenceBetween(addressOfLinkedFunctionCheck, jumpToSlow) == patchOffsetOpCallCompareToJump);
- m_callStructureStubCompilationInfo[callLinkInfoIndex].hotPathBegin = addressOfLinkedFunctionCheck;
-
- addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::CellTag)));
-
- // The following is the fast case, only used whan a callee can be linked.
-
- // Fast version of stack frame initialization, directly relative to edi.
- // Note that this omits to set up RegisterFile::CodeBlock, which is set in the callee
- loadPtr(Address(regT0, OBJECT_OFFSETOF(JSFunction, m_scopeChain) + OBJECT_OFFSETOF(ScopeChain, m_node)), regT2);
-
- store32(Imm32(argCount), Address(callFrameRegister, (registerOffset + RegisterFile::ArgumentCount) * static_cast<int>(sizeof(Register))));
- storePtr(callFrameRegister, Address(callFrameRegister, (registerOffset + RegisterFile::CallerFrame) * static_cast<int>(sizeof(Register))));
- emitStore(registerOffset + RegisterFile::Callee, regT1, regT0);
- storePtr(regT2, Address(callFrameRegister, (registerOffset + RegisterFile::ScopeChain) * static_cast<int>(sizeof(Register))));
- addPtr(Imm32(registerOffset * sizeof(Register)), callFrameRegister);
-
- // Call to the callee
- m_callStructureStubCompilationInfo[callLinkInfoIndex].hotPathOther = emitNakedCall();
-
- if (opcodeID == op_call_eval)
- wasEval.link(this);
-
- sampleCodeBlock(m_codeBlock);
-}
-
-void JIT::compileOpCallSlowCase(Instruction* instruction, Vector<SlowCaseEntry>::iterator& iter, unsigned callLinkInfoIndex, OpcodeID opcodeID)
-{
- int callee = instruction[1].u.operand;
- int argCount = instruction[2].u.operand;
- int registerOffset = instruction[3].u.operand;
-
- linkSlowCase(iter);
- linkSlowCase(iter);
-
- // Fast check for JS function.
- Jump callLinkFailNotObject = branch32(NotEqual, regT1, Imm32(JSValue::CellTag));
- Jump callLinkFailNotJSFunction = branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsFunctionVPtr));
-
- // Speculatively roll the callframe, assuming argCount will match the arity.
- storePtr(callFrameRegister, Address(callFrameRegister, (RegisterFile::CallerFrame + registerOffset) * static_cast<int>(sizeof(Register))));
- addPtr(Imm32(registerOffset * static_cast<int>(sizeof(Register))), callFrameRegister);
- move(Imm32(argCount), regT1);
-
- m_callStructureStubCompilationInfo[callLinkInfoIndex].callReturnLocation = emitNakedCall(opcodeID == op_construct ? m_globalData->jitStubs->ctiVirtualConstructLink() : m_globalData->jitStubs->ctiVirtualCallLink());
-
- // Done! - return back to the hot path.
- ASSERT(OPCODE_LENGTH(op_call) == OPCODE_LENGTH(op_call_eval));
- ASSERT(OPCODE_LENGTH(op_call) == OPCODE_LENGTH(op_construct));
- emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_call));
-
- // This handles host functions
- callLinkFailNotObject.link(this);
- callLinkFailNotJSFunction.link(this);
-
- JITStubCall stubCall(this, opcodeID == op_construct ? cti_op_construct_NotJSConstruct : cti_op_call_NotJSFunction);
- stubCall.addArgument(callee);
- stubCall.addArgument(JIT::Imm32(registerOffset));
- stubCall.addArgument(JIT::Imm32(argCount));
- stubCall.call();
-
- sampleCodeBlock(m_codeBlock);
-}
-
-/* ------------------------------ END: !ENABLE / ENABLE(JIT_OPTIMIZE_CALL) ------------------------------ */
-
-#endif // !ENABLE(JIT_OPTIMIZE_CALL)
-
-} // namespace JSC
-
-#endif // USE(JSVALUE32_64)
-#endif // ENABLE(JIT)
diff --git a/JavaScriptCore/jit/JITCode.h b/JavaScriptCore/jit/JITCode.h
deleted file mode 100644
index 7346fd5..0000000
--- a/JavaScriptCore/jit/JITCode.h
+++ /dev/null
@@ -1,117 +0,0 @@
-/*
- * Copyright (C) 2008 Apple Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
- * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef JITCode_h
-#define JITCode_h
-
-#if ENABLE(JIT)
-
-#include "CallFrame.h"
-#include "JSValue.h"
-#include "MacroAssemblerCodeRef.h"
-#include "Profiler.h"
-
-namespace JSC {
-
- class JSGlobalData;
- class RegisterFile;
-
- class JITCode {
- typedef MacroAssemblerCodeRef CodeRef;
- typedef MacroAssemblerCodePtr CodePtr;
- public:
- JITCode()
- {
- }
-
- JITCode(const CodeRef ref)
- : m_ref(ref)
- {
- }
-
- bool operator !() const
- {
- return !m_ref.m_code.executableAddress();
- }
-
- CodePtr addressForCall()
- {
- return m_ref.m_code;
- }
-
- // This function returns the offset in bytes of 'pointerIntoCode' into
- // this block of code. The pointer provided must be a pointer into this
- // block of code. It is ASSERTed that no codeblock >4gb in size.
- unsigned offsetOf(void* pointerIntoCode)
- {
- intptr_t result = reinterpret_cast<intptr_t>(pointerIntoCode) - reinterpret_cast<intptr_t>(m_ref.m_code.executableAddress());
- ASSERT(static_cast<intptr_t>(static_cast<unsigned>(result)) == result);
- return static_cast<unsigned>(result);
- }
-
- // Execute the code!
- inline JSValue execute(RegisterFile* registerFile, CallFrame* callFrame, JSGlobalData* globalData)
- {
- JSValue result = JSValue::decode(ctiTrampoline(m_ref.m_code.executableAddress(), registerFile, callFrame, 0, Profiler::enabledProfilerReference(), globalData));
- return globalData->exception ? jsNull() : result;
- }
-
- void* start()
- {
- return m_ref.m_code.dataLocation();
- }
-
- size_t size()
- {
- ASSERT(m_ref.m_code.executableAddress());
- return m_ref.m_size;
- }
-
- ExecutablePool* getExecutablePool()
- {
- return m_ref.m_executablePool.get();
- }
-
- // Host functions are a bit special; they have a m_code pointer but they
- // do not individully ref the executable pool containing the trampoline.
- static JITCode HostFunction(CodePtr code)
- {
- return JITCode(code.dataLocation(), 0, 0);
- }
-
- private:
- JITCode(void* code, PassRefPtr<ExecutablePool> executablePool, size_t size)
- : m_ref(code, executablePool, size)
- {
- }
-
- CodeRef m_ref;
- };
-
-};
-
-#endif
-
-#endif
diff --git a/JavaScriptCore/jit/JITInlineMethods.h b/JavaScriptCore/jit/JITInlineMethods.h
deleted file mode 100644
index 39ca4a5..0000000
--- a/JavaScriptCore/jit/JITInlineMethods.h
+++ /dev/null
@@ -1,809 +0,0 @@
-/*
- * Copyright (C) 2008 Apple Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
- * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef JITInlineMethods_h
-#define JITInlineMethods_h
-
-
-#if ENABLE(JIT)
-
-namespace JSC {
-
-/* Deprecated: Please use JITStubCall instead. */
-
-ALWAYS_INLINE void JIT::emitGetJITStubArg(unsigned argumentNumber, RegisterID dst)
-{
- unsigned argumentStackOffset = (argumentNumber * (sizeof(JSValue) / sizeof(void*))) + JITSTACKFRAME_ARGS_INDEX;
- peek(dst, argumentStackOffset);
-}
-
-ALWAYS_INLINE bool JIT::isOperandConstantImmediateDouble(unsigned src)
-{
- return m_codeBlock->isConstantRegisterIndex(src) && getConstantOperand(src).isDouble();
-}
-
-ALWAYS_INLINE JSValue JIT::getConstantOperand(unsigned src)
-{
- ASSERT(m_codeBlock->isConstantRegisterIndex(src));
- return m_codeBlock->getConstant(src);
-}
-
-ALWAYS_INLINE void JIT::emitPutToCallFrameHeader(RegisterID from, RegisterFile::CallFrameHeaderEntry entry)
-{
- storePtr(from, Address(callFrameRegister, entry * sizeof(Register)));
-}
-
-ALWAYS_INLINE void JIT::emitPutImmediateToCallFrameHeader(void* value, RegisterFile::CallFrameHeaderEntry entry)
-{
- storePtr(ImmPtr(value), Address(callFrameRegister, entry * sizeof(Register)));
-}
-
-ALWAYS_INLINE void JIT::emitGetFromCallFrameHeaderPtr(RegisterFile::CallFrameHeaderEntry entry, RegisterID to, RegisterID from)
-{
- loadPtr(Address(from, entry * sizeof(Register)), to);
-#if USE(JSVALUE64)
- killLastResultRegister();
-#endif
-}
-
-ALWAYS_INLINE void JIT::emitLoadCharacterString(RegisterID src, RegisterID dst, JumpList& failures)
-{
- failures.append(branchPtr(NotEqual, Address(src), ImmPtr(m_globalData->jsStringVPtr)));
- failures.append(branchTest32(NonZero, Address(src, OBJECT_OFFSETOF(JSString, m_fiberCount))));
- failures.append(branch32(NotEqual, MacroAssembler::Address(src, ThunkHelpers::jsStringLengthOffset()), Imm32(1)));
- loadPtr(MacroAssembler::Address(src, ThunkHelpers::jsStringValueOffset()), dst);
- loadPtr(MacroAssembler::Address(dst, ThunkHelpers::stringImplDataOffset()), dst);
- load16(MacroAssembler::Address(dst, 0), dst);
-}
-
-ALWAYS_INLINE void JIT::emitGetFromCallFrameHeader32(RegisterFile::CallFrameHeaderEntry entry, RegisterID to, RegisterID from)
-{
- load32(Address(from, entry * sizeof(Register)), to);
-#if USE(JSVALUE64)
- killLastResultRegister();
-#endif
-}
-
-ALWAYS_INLINE JIT::Call JIT::emitNakedCall(CodePtr function)
-{
- ASSERT(m_bytecodeOffset != (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeOffset is set.
-
- Call nakedCall = nearCall();
- m_calls.append(CallRecord(nakedCall, m_bytecodeOffset, function.executableAddress()));
- return nakedCall;
-}
-
-#if defined(ASSEMBLER_HAS_CONSTANT_POOL) && ASSEMBLER_HAS_CONSTANT_POOL
-
-ALWAYS_INLINE void JIT::beginUninterruptedSequence(int insnSpace, int constSpace)
-{
- JSInterfaceJIT::beginUninterruptedSequence();
-#if CPU(ARM_TRADITIONAL)
-#ifndef NDEBUG
- // Ensure the label after the sequence can also fit
- insnSpace += sizeof(ARMWord);
- constSpace += sizeof(uint64_t);
-#endif
-
- ensureSpace(insnSpace, constSpace);
-
-#endif
-
-#if defined(ASSEMBLER_HAS_CONSTANT_POOL) && ASSEMBLER_HAS_CONSTANT_POOL
-#ifndef NDEBUG
- m_uninterruptedInstructionSequenceBegin = label();
- m_uninterruptedConstantSequenceBegin = sizeOfConstantPool();
-#endif
-#endif
-}
-
-ALWAYS_INLINE void JIT::endUninterruptedSequence(int insnSpace, int constSpace)
-{
-#if defined(ASSEMBLER_HAS_CONSTANT_POOL) && ASSEMBLER_HAS_CONSTANT_POOL
- /* There are several cases when the uninterrupted sequence is larger than
- * maximum required offset for pathing the same sequence. Eg.: if in a
- * uninterrupted sequence the last macroassembler's instruction is a stub
- * call, it emits store instruction(s) which should not be included in the
- * calculation of length of uninterrupted sequence. So, the insnSpace and
- * constSpace should be upper limit instead of hard limit.
- */
- ASSERT(differenceBetween(m_uninterruptedInstructionSequenceBegin, label()) <= insnSpace);
- ASSERT(sizeOfConstantPool() - m_uninterruptedConstantSequenceBegin <= constSpace);
-#endif
- JSInterfaceJIT::endUninterruptedSequence();
-}
-
-#endif
-
-#if CPU(ARM)
-
-ALWAYS_INLINE void JIT::preserveReturnAddressAfterCall(RegisterID reg)
-{
- move(linkRegister, reg);
-}
-
-ALWAYS_INLINE void JIT::restoreReturnAddressBeforeReturn(RegisterID reg)
-{
- move(reg, linkRegister);
-}
-
-ALWAYS_INLINE void JIT::restoreReturnAddressBeforeReturn(Address address)
-{
- loadPtr(address, linkRegister);
-}
-
-#elif CPU(MIPS)
-
-ALWAYS_INLINE void JIT::preserveReturnAddressAfterCall(RegisterID reg)
-{
- move(returnAddressRegister, reg);
-}
-
-ALWAYS_INLINE void JIT::restoreReturnAddressBeforeReturn(RegisterID reg)
-{
- move(reg, returnAddressRegister);
-}
-
-ALWAYS_INLINE void JIT::restoreReturnAddressBeforeReturn(Address address)
-{
- loadPtr(address, returnAddressRegister);
-}
-
-#else // CPU(X86) || CPU(X86_64)
-
-ALWAYS_INLINE void JIT::preserveReturnAddressAfterCall(RegisterID reg)
-{
- pop(reg);
-}
-
-ALWAYS_INLINE void JIT::restoreReturnAddressBeforeReturn(RegisterID reg)
-{
- push(reg);
-}
-
-ALWAYS_INLINE void JIT::restoreReturnAddressBeforeReturn(Address address)
-{
- push(address);
-}
-
-#endif
-
-ALWAYS_INLINE void JIT::restoreArgumentReference()
-{
- move(stackPointerRegister, firstArgumentRegister);
- poke(callFrameRegister, OBJECT_OFFSETOF(struct JITStackFrame, callFrame) / sizeof(void*));
-}
-
-ALWAYS_INLINE void JIT::restoreArgumentReferenceForTrampoline()
-{
-#if CPU(X86)
- // Within a trampoline the return address will be on the stack at this point.
- addPtr(Imm32(sizeof(void*)), stackPointerRegister, firstArgumentRegister);
-#elif CPU(ARM)
- move(stackPointerRegister, firstArgumentRegister);
-#endif
- // In the trampoline on x86-64, the first argument register is not overwritten.
-}
-
-ALWAYS_INLINE JIT::Jump JIT::checkStructure(RegisterID reg, Structure* structure)
-{
- return branchPtr(NotEqual, Address(reg, OBJECT_OFFSETOF(JSCell, m_structure)), ImmPtr(structure));
-}
-
-ALWAYS_INLINE void JIT::linkSlowCaseIfNotJSCell(Vector<SlowCaseEntry>::iterator& iter, int vReg)
-{
- if (!m_codeBlock->isKnownNotImmediate(vReg))
- linkSlowCase(iter);
-}
-
-ALWAYS_INLINE void JIT::addSlowCase(Jump jump)
-{
- ASSERT(m_bytecodeOffset != (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeOffset is set.
-
- m_slowCases.append(SlowCaseEntry(jump, m_bytecodeOffset));
-}
-
-ALWAYS_INLINE void JIT::addSlowCase(JumpList jumpList)
-{
- ASSERT(m_bytecodeOffset != (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeOffset is set.
-
- const JumpList::JumpVector& jumpVector = jumpList.jumps();
- size_t size = jumpVector.size();
- for (size_t i = 0; i < size; ++i)
- m_slowCases.append(SlowCaseEntry(jumpVector[i], m_bytecodeOffset));
-}
-
-ALWAYS_INLINE void JIT::addJump(Jump jump, int relativeOffset)
-{
- ASSERT(m_bytecodeOffset != (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeOffset is set.
-
- m_jmpTable.append(JumpTable(jump, m_bytecodeOffset + relativeOffset));
-}
-
-ALWAYS_INLINE void JIT::emitJumpSlowToHot(Jump jump, int relativeOffset)
-{
- ASSERT(m_bytecodeOffset != (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeOffset is set.
-
- jump.linkTo(m_labels[m_bytecodeOffset + relativeOffset], this);
-}
-
-#if ENABLE(SAMPLING_FLAGS)
-ALWAYS_INLINE void JIT::setSamplingFlag(int32_t flag)
-{
- ASSERT(flag >= 1);
- ASSERT(flag <= 32);
- or32(Imm32(1u << (flag - 1)), AbsoluteAddress(&SamplingFlags::s_flags));
-}
-
-ALWAYS_INLINE void JIT::clearSamplingFlag(int32_t flag)
-{
- ASSERT(flag >= 1);
- ASSERT(flag <= 32);
- and32(Imm32(~(1u << (flag - 1))), AbsoluteAddress(&SamplingFlags::s_flags));
-}
-#endif
-
-#if ENABLE(SAMPLING_COUNTERS)
-ALWAYS_INLINE void JIT::emitCount(AbstractSamplingCounter& counter, uint32_t count)
-{
-#if CPU(X86_64) // Or any other 64-bit plattform.
- addPtr(Imm32(count), AbsoluteAddress(&counter.m_counter));
-#elif CPU(X86) // Or any other little-endian 32-bit plattform.
- intptr_t hiWord = reinterpret_cast<intptr_t>(&counter.m_counter) + sizeof(int32_t);
- add32(Imm32(count), AbsoluteAddress(&counter.m_counter));
- addWithCarry32(Imm32(0), AbsoluteAddress(reinterpret_cast<void*>(hiWord)));
-#else
-#error "SAMPLING_FLAGS not implemented on this platform."
-#endif
-}
-#endif
-
-#if ENABLE(OPCODE_SAMPLING)
-#if CPU(X86_64)
-ALWAYS_INLINE void JIT::sampleInstruction(Instruction* instruction, bool inHostFunction)
-{
- move(ImmPtr(m_interpreter->sampler()->sampleSlot()), X86Registers::ecx);
- storePtr(ImmPtr(m_interpreter->sampler()->encodeSample(instruction, inHostFunction)), X86Registers::ecx);
-}
-#else
-ALWAYS_INLINE void JIT::sampleInstruction(Instruction* instruction, bool inHostFunction)
-{
- storePtr(ImmPtr(m_interpreter->sampler()->encodeSample(instruction, inHostFunction)), m_interpreter->sampler()->sampleSlot());
-}
-#endif
-#endif
-
-#if ENABLE(CODEBLOCK_SAMPLING)
-#if CPU(X86_64)
-ALWAYS_INLINE void JIT::sampleCodeBlock(CodeBlock* codeBlock)
-{
- move(ImmPtr(m_interpreter->sampler()->codeBlockSlot()), X86Registers::ecx);
- storePtr(ImmPtr(codeBlock), X86Registers::ecx);
-}
-#else
-ALWAYS_INLINE void JIT::sampleCodeBlock(CodeBlock* codeBlock)
-{
- storePtr(ImmPtr(codeBlock), m_interpreter->sampler()->codeBlockSlot());
-}
-#endif
-#endif
-
-ALWAYS_INLINE bool JIT::isOperandConstantImmediateChar(unsigned src)
-{
- return m_codeBlock->isConstantRegisterIndex(src) && getConstantOperand(src).isString() && asString(getConstantOperand(src).asCell())->length() == 1;
-}
-
-#if USE(JSVALUE32_64)
-
-inline void JIT::emitLoadTag(unsigned index, RegisterID tag)
-{
- RegisterID mappedTag;
- if (getMappedTag(index, mappedTag)) {
- move(mappedTag, tag);
- unmap(tag);
- return;
- }
-
- if (m_codeBlock->isConstantRegisterIndex(index)) {
- move(Imm32(getConstantOperand(index).tag()), tag);
- unmap(tag);
- return;
- }
-
- load32(tagFor(index), tag);
- unmap(tag);
-}
-
-inline void JIT::emitLoadPayload(unsigned index, RegisterID payload)
-{
- RegisterID mappedPayload;
- if (getMappedPayload(index, mappedPayload)) {
- move(mappedPayload, payload);
- unmap(payload);
- return;
- }
-
- if (m_codeBlock->isConstantRegisterIndex(index)) {
- move(Imm32(getConstantOperand(index).payload()), payload);
- unmap(payload);
- return;
- }
-
- load32(payloadFor(index), payload);
- unmap(payload);
-}
-
-inline void JIT::emitLoad(const JSValue& v, RegisterID tag, RegisterID payload)
-{
- move(Imm32(v.payload()), payload);
- move(Imm32(v.tag()), tag);
-}
-
-inline void JIT::emitLoad(unsigned index, RegisterID tag, RegisterID payload, RegisterID base)
-{
- ASSERT(tag != payload);
-
- if (base == callFrameRegister) {
- ASSERT(payload != base);
- emitLoadPayload(index, payload);
- emitLoadTag(index, tag);
- return;
- }
-
- if (payload == base) { // avoid stomping base
- load32(tagFor(index, base), tag);
- load32(payloadFor(index, base), payload);
- return;
- }
-
- load32(payloadFor(index, base), payload);
- load32(tagFor(index, base), tag);
-}
-
-inline void JIT::emitLoad2(unsigned index1, RegisterID tag1, RegisterID payload1, unsigned index2, RegisterID tag2, RegisterID payload2)
-{
- if (isMapped(index1)) {
- emitLoad(index1, tag1, payload1);
- emitLoad(index2, tag2, payload2);
- return;
- }
- emitLoad(index2, tag2, payload2);
- emitLoad(index1, tag1, payload1);
-}
-
-inline void JIT::emitLoadDouble(unsigned index, FPRegisterID value)
-{
- if (m_codeBlock->isConstantRegisterIndex(index)) {
- Register& inConstantPool = m_codeBlock->constantRegister(index);
- loadDouble(&inConstantPool, value);
- } else
- loadDouble(addressFor(index), value);
-}
-
-inline void JIT::emitLoadInt32ToDouble(unsigned index, FPRegisterID value)
-{
- if (m_codeBlock->isConstantRegisterIndex(index)) {
- Register& inConstantPool = m_codeBlock->constantRegister(index);
- char* bytePointer = reinterpret_cast<char*>(&inConstantPool);
- convertInt32ToDouble(AbsoluteAddress(bytePointer + OBJECT_OFFSETOF(JSValue, u.asBits.payload)), value);
- } else
- convertInt32ToDouble(payloadFor(index), value);
-}
-
-inline void JIT::emitStore(unsigned index, RegisterID tag, RegisterID payload, RegisterID base)
-{
- store32(payload, payloadFor(index, base));
- store32(tag, tagFor(index, base));
-}
-
-inline void JIT::emitStoreInt32(unsigned index, RegisterID payload, bool indexIsInt32)
-{
- store32(payload, payloadFor(index, callFrameRegister));
- if (!indexIsInt32)
- store32(Imm32(JSValue::Int32Tag), tagFor(index, callFrameRegister));
-}
-
-inline void JIT::emitStoreInt32(unsigned index, Imm32 payload, bool indexIsInt32)
-{
- store32(payload, payloadFor(index, callFrameRegister));
- if (!indexIsInt32)
- store32(Imm32(JSValue::Int32Tag), tagFor(index, callFrameRegister));
-}
-
-inline void JIT::emitStoreCell(unsigned index, RegisterID payload, bool indexIsCell)
-{
- store32(payload, payloadFor(index, callFrameRegister));
- if (!indexIsCell)
- store32(Imm32(JSValue::CellTag), tagFor(index, callFrameRegister));
-}
-
-inline void JIT::emitStoreBool(unsigned index, RegisterID tag, bool indexIsBool)
-{
- if (!indexIsBool)
- store32(Imm32(0), payloadFor(index, callFrameRegister));
- store32(tag, tagFor(index, callFrameRegister));
-}
-
-inline void JIT::emitStoreDouble(unsigned index, FPRegisterID value)
-{
- storeDouble(value, addressFor(index));
-}
-
-inline void JIT::emitStore(unsigned index, const JSValue constant, RegisterID base)
-{
- store32(Imm32(constant.payload()), payloadFor(index, base));
- store32(Imm32(constant.tag()), tagFor(index, base));
-}
-
-ALWAYS_INLINE void JIT::emitInitRegister(unsigned dst)
-{
- emitStore(dst, jsUndefined());
-}
-
-inline bool JIT::isLabeled(unsigned bytecodeOffset)
-{
- for (size_t numberOfJumpTargets = m_codeBlock->numberOfJumpTargets(); m_jumpTargetIndex != numberOfJumpTargets; ++m_jumpTargetIndex) {
- unsigned jumpTarget = m_codeBlock->jumpTarget(m_jumpTargetIndex);
- if (jumpTarget == bytecodeOffset)
- return true;
- if (jumpTarget > bytecodeOffset)
- return false;
- }
- return false;
-}
-
-inline void JIT::map(unsigned bytecodeOffset, unsigned virtualRegisterIndex, RegisterID tag, RegisterID payload)
-{
- if (isLabeled(bytecodeOffset))
- return;
-
- m_mappedBytecodeOffset = bytecodeOffset;
- m_mappedVirtualRegisterIndex = virtualRegisterIndex;
- m_mappedTag = tag;
- m_mappedPayload = payload;
-}
-
-inline void JIT::unmap(RegisterID registerID)
-{
- if (m_mappedTag == registerID)
- m_mappedTag = (RegisterID)-1;
- else if (m_mappedPayload == registerID)
- m_mappedPayload = (RegisterID)-1;
-}
-
-inline void JIT::unmap()
-{
- m_mappedBytecodeOffset = (unsigned)-1;
- m_mappedVirtualRegisterIndex = (unsigned)-1;
- m_mappedTag = (RegisterID)-1;
- m_mappedPayload = (RegisterID)-1;
-}
-
-inline bool JIT::isMapped(unsigned virtualRegisterIndex)
-{
- if (m_mappedBytecodeOffset != m_bytecodeOffset)
- return false;
- if (m_mappedVirtualRegisterIndex != virtualRegisterIndex)
- return false;
- return true;
-}
-
-inline bool JIT::getMappedPayload(unsigned virtualRegisterIndex, RegisterID& payload)
-{
- if (m_mappedBytecodeOffset != m_bytecodeOffset)
- return false;
- if (m_mappedVirtualRegisterIndex != virtualRegisterIndex)
- return false;
- if (m_mappedPayload == (RegisterID)-1)
- return false;
- payload = m_mappedPayload;
- return true;
-}
-
-inline bool JIT::getMappedTag(unsigned virtualRegisterIndex, RegisterID& tag)
-{
- if (m_mappedBytecodeOffset != m_bytecodeOffset)
- return false;
- if (m_mappedVirtualRegisterIndex != virtualRegisterIndex)
- return false;
- if (m_mappedTag == (RegisterID)-1)
- return false;
- tag = m_mappedTag;
- return true;
-}
-
-inline void JIT::emitJumpSlowCaseIfNotJSCell(unsigned virtualRegisterIndex)
-{
- if (!m_codeBlock->isKnownNotImmediate(virtualRegisterIndex)) {
- if (m_codeBlock->isConstantRegisterIndex(virtualRegisterIndex))
- addSlowCase(jump());
- else
- addSlowCase(emitJumpIfNotJSCell(virtualRegisterIndex));
- }
-}
-
-inline void JIT::emitJumpSlowCaseIfNotJSCell(unsigned virtualRegisterIndex, RegisterID tag)
-{
- if (!m_codeBlock->isKnownNotImmediate(virtualRegisterIndex)) {
- if (m_codeBlock->isConstantRegisterIndex(virtualRegisterIndex))
- addSlowCase(jump());
- else
- addSlowCase(branch32(NotEqual, tag, Imm32(JSValue::CellTag)));
- }
-}
-
-inline void JIT::linkSlowCaseIfNotJSCell(Vector<SlowCaseEntry>::iterator& iter, unsigned virtualRegisterIndex)
-{
- if (!m_codeBlock->isKnownNotImmediate(virtualRegisterIndex))
- linkSlowCase(iter);
-}
-
-ALWAYS_INLINE bool JIT::isOperandConstantImmediateInt(unsigned src)
-{
- return m_codeBlock->isConstantRegisterIndex(src) && getConstantOperand(src).isInt32();
-}
-
-ALWAYS_INLINE bool JIT::getOperandConstantImmediateInt(unsigned op1, unsigned op2, unsigned& op, int32_t& constant)
-{
- if (isOperandConstantImmediateInt(op1)) {
- constant = getConstantOperand(op1).asInt32();
- op = op2;
- return true;
- }
-
- if (isOperandConstantImmediateInt(op2)) {
- constant = getConstantOperand(op2).asInt32();
- op = op1;
- return true;
- }
-
- return false;
-}
-
-#else // USE(JSVALUE32_64)
-
-ALWAYS_INLINE void JIT::killLastResultRegister()
-{
- m_lastResultBytecodeRegister = std::numeric_limits<int>::max();
-}
-
-// get arg puts an arg from the SF register array into a h/w register
-ALWAYS_INLINE void JIT::emitGetVirtualRegister(int src, RegisterID dst)
-{
- ASSERT(m_bytecodeOffset != (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeOffset is set.
-
- // TODO: we want to reuse values that are already in registers if we can - add a register allocator!
- if (m_codeBlock->isConstantRegisterIndex(src)) {
- JSValue value = m_codeBlock->getConstant(src);
- move(ImmPtr(JSValue::encode(value)), dst);
- killLastResultRegister();
- return;
- }
-
- if (src == m_lastResultBytecodeRegister && m_codeBlock->isTemporaryRegisterIndex(src)) {
- bool atJumpTarget = false;
- while (m_jumpTargetsPosition < m_codeBlock->numberOfJumpTargets() && m_codeBlock->jumpTarget(m_jumpTargetsPosition) <= m_bytecodeOffset) {
- if (m_codeBlock->jumpTarget(m_jumpTargetsPosition) == m_bytecodeOffset)
- atJumpTarget = true;
- ++m_jumpTargetsPosition;
- }
-
- if (!atJumpTarget) {
- // The argument we want is already stored in eax
- if (dst != cachedResultRegister)
- move(cachedResultRegister, dst);
- killLastResultRegister();
- return;
- }
- }
-
- loadPtr(Address(callFrameRegister, src * sizeof(Register)), dst);
- killLastResultRegister();
-}
-
-ALWAYS_INLINE void JIT::emitGetVirtualRegisters(int src1, RegisterID dst1, int src2, RegisterID dst2)
-{
- if (src2 == m_lastResultBytecodeRegister) {
- emitGetVirtualRegister(src2, dst2);
- emitGetVirtualRegister(src1, dst1);
- } else {
- emitGetVirtualRegister(src1, dst1);
- emitGetVirtualRegister(src2, dst2);
- }
-}
-
-ALWAYS_INLINE int32_t JIT::getConstantOperandImmediateInt(unsigned src)
-{
- return getConstantOperand(src).asInt32();
-}
-
-ALWAYS_INLINE bool JIT::isOperandConstantImmediateInt(unsigned src)
-{
- return m_codeBlock->isConstantRegisterIndex(src) && getConstantOperand(src).isInt32();
-}
-
-ALWAYS_INLINE void JIT::emitPutVirtualRegister(unsigned dst, RegisterID from)
-{
- storePtr(from, Address(callFrameRegister, dst * sizeof(Register)));
- m_lastResultBytecodeRegister = (from == cachedResultRegister) ? static_cast<int>(dst) : std::numeric_limits<int>::max();
-}
-
-ALWAYS_INLINE void JIT::emitInitRegister(unsigned dst)
-{
- storePtr(ImmPtr(JSValue::encode(jsUndefined())), Address(callFrameRegister, dst * sizeof(Register)));
-}
-
-ALWAYS_INLINE JIT::Jump JIT::emitJumpIfJSCell(RegisterID reg)
-{
-#if USE(JSVALUE64)
- return branchTestPtr(Zero, reg, tagMaskRegister);
-#else
- return branchTest32(Zero, reg, Imm32(JSImmediate::TagMask));
-#endif
-}
-
-ALWAYS_INLINE JIT::Jump JIT::emitJumpIfBothJSCells(RegisterID reg1, RegisterID reg2, RegisterID scratch)
-{
- move(reg1, scratch);
- orPtr(reg2, scratch);
- return emitJumpIfJSCell(scratch);
-}
-
-ALWAYS_INLINE void JIT::emitJumpSlowCaseIfJSCell(RegisterID reg)
-{
- addSlowCase(emitJumpIfJSCell(reg));
-}
-
-ALWAYS_INLINE JIT::Jump JIT::emitJumpIfNotJSCell(RegisterID reg)
-{
-#if USE(JSVALUE64)
- return branchTestPtr(NonZero, reg, tagMaskRegister);
-#else
- return branchTest32(NonZero, reg, Imm32(JSImmediate::TagMask));
-#endif
-}
-
-ALWAYS_INLINE void JIT::emitJumpSlowCaseIfNotJSCell(RegisterID reg)
-{
- addSlowCase(emitJumpIfNotJSCell(reg));
-}
-
-ALWAYS_INLINE void JIT::emitJumpSlowCaseIfNotJSCell(RegisterID reg, int vReg)
-{
- if (!m_codeBlock->isKnownNotImmediate(vReg))
- emitJumpSlowCaseIfNotJSCell(reg);
-}
-
-#if USE(JSVALUE64)
-
-inline void JIT::emitLoadDouble(unsigned index, FPRegisterID value)
-{
- if (m_codeBlock->isConstantRegisterIndex(index)) {
- Register& inConstantPool = m_codeBlock->constantRegister(index);
- loadDouble(&inConstantPool, value);
- } else
- loadDouble(addressFor(index), value);
-}
-
-inline void JIT::emitLoadInt32ToDouble(unsigned index, FPRegisterID value)
-{
- if (m_codeBlock->isConstantRegisterIndex(index)) {
- Register& inConstantPool = m_codeBlock->constantRegister(index);
- convertInt32ToDouble(AbsoluteAddress(&inConstantPool), value);
- } else
- convertInt32ToDouble(addressFor(index), value);
-}
-#endif
-
-ALWAYS_INLINE JIT::Jump JIT::emitJumpIfImmediateInteger(RegisterID reg)
-{
-#if USE(JSVALUE64)
- return branchPtr(AboveOrEqual, reg, tagTypeNumberRegister);
-#else
- return branchTest32(NonZero, reg, Imm32(JSImmediate::TagTypeNumber));
-#endif
-}
-
-ALWAYS_INLINE JIT::Jump JIT::emitJumpIfNotImmediateInteger(RegisterID reg)
-{
-#if USE(JSVALUE64)
- return branchPtr(Below, reg, tagTypeNumberRegister);
-#else
- return branchTest32(Zero, reg, Imm32(JSImmediate::TagTypeNumber));
-#endif
-}
-
-ALWAYS_INLINE JIT::Jump JIT::emitJumpIfNotImmediateIntegers(RegisterID reg1, RegisterID reg2, RegisterID scratch)
-{
- move(reg1, scratch);
- andPtr(reg2, scratch);
- return emitJumpIfNotImmediateInteger(scratch);
-}
-
-ALWAYS_INLINE void JIT::emitJumpSlowCaseIfNotImmediateInteger(RegisterID reg)
-{
- addSlowCase(emitJumpIfNotImmediateInteger(reg));
-}
-
-ALWAYS_INLINE void JIT::emitJumpSlowCaseIfNotImmediateIntegers(RegisterID reg1, RegisterID reg2, RegisterID scratch)
-{
- addSlowCase(emitJumpIfNotImmediateIntegers(reg1, reg2, scratch));
-}
-
-ALWAYS_INLINE void JIT::emitJumpSlowCaseIfNotImmediateNumber(RegisterID reg)
-{
- addSlowCase(emitJumpIfNotImmediateNumber(reg));
-}
-
-#if USE(JSVALUE32_64)
-ALWAYS_INLINE void JIT::emitFastArithDeTagImmediate(RegisterID reg)
-{
- subPtr(Imm32(JSImmediate::TagTypeNumber), reg);
-}
-
-ALWAYS_INLINE JIT::Jump JIT::emitFastArithDeTagImmediateJumpIfZero(RegisterID reg)
-{
- return branchSubPtr(Zero, Imm32(JSImmediate::TagTypeNumber), reg);
-}
-#endif
-
-ALWAYS_INLINE void JIT::emitFastArithReTagImmediate(RegisterID src, RegisterID dest)
-{
-#if USE(JSVALUE64)
- emitFastArithIntToImmNoCheck(src, dest);
-#else
- if (src != dest)
- move(src, dest);
- addPtr(Imm32(JSImmediate::TagTypeNumber), dest);
-#endif
-}
-
-// operand is int32_t, must have been zero-extended if register is 64-bit.
-ALWAYS_INLINE void JIT::emitFastArithIntToImmNoCheck(RegisterID src, RegisterID dest)
-{
-#if USE(JSVALUE64)
- if (src != dest)
- move(src, dest);
- orPtr(tagTypeNumberRegister, dest);
-#else
- signExtend32ToPtr(src, dest);
- addPtr(dest, dest);
- emitFastArithReTagImmediate(dest, dest);
-#endif
-}
-
-ALWAYS_INLINE void JIT::emitTagAsBoolImmediate(RegisterID reg)
-{
- lshift32(Imm32(JSImmediate::ExtendedPayloadShift), reg);
- or32(Imm32(static_cast<int32_t>(JSImmediate::FullTagTypeBool)), reg);
-}
-
-#endif // USE(JSVALUE32_64)
-
-} // namespace JSC
-
-#endif // ENABLE(JIT)
-
-#endif
diff --git a/JavaScriptCore/jit/JITOpcodes.cpp b/JavaScriptCore/jit/JITOpcodes.cpp
deleted file mode 100644
index 972b879..0000000
--- a/JavaScriptCore/jit/JITOpcodes.cpp
+++ /dev/null
@@ -1,1775 +0,0 @@
-/*
- * Copyright (C) 2009 Apple Inc. All rights reserved.
- * Copyright (C) 2010 Patrick Gansterer <paroga@paroga.com>
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
- * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "config.h"
-#if ENABLE(JIT)
-#include "JIT.h"
-
-#include "Arguments.h"
-#include "JITInlineMethods.h"
-#include "JITStubCall.h"
-#include "JSArray.h"
-#include "JSCell.h"
-#include "JSFunction.h"
-#include "JSPropertyNameIterator.h"
-#include "LinkBuffer.h"
-
-namespace JSC {
-
-#if USE(JSVALUE64)
-
-#define RECORD_JUMP_TARGET(targetOffset) \
- do { m_labels[m_bytecodeOffset + (targetOffset)].used(); } while (false)
-
-void JIT::privateCompileCTIMachineTrampolines(RefPtr<ExecutablePool>* executablePool, JSGlobalData* globalData, TrampolineStructure *trampolines)
-{
-#if ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
- // (2) The second function provides fast property access for string length
- Label stringLengthBegin = align();
-
- // Check eax is a string
- Jump string_failureCases1 = emitJumpIfNotJSCell(regT0);
- Jump string_failureCases2 = branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsStringVPtr));
-
- // Checks out okay! - get the length from the Ustring.
- load32(Address(regT0, OBJECT_OFFSETOF(JSString, m_length)), regT0);
-
- Jump string_failureCases3 = branch32(Above, regT0, Imm32(JSImmediate::maxImmediateInt));
-
- // regT0 contains a 64 bit value (is positive, is zero extended) so we don't need sign extend here.
- emitFastArithIntToImmNoCheck(regT0, regT0);
-
- ret();
-#endif
-
- // (3) Trampolines for the slow cases of op_call / op_call_eval / op_construct.
- COMPILE_ASSERT(sizeof(CodeType) == 4, CodeTypeEnumMustBe32Bit);
-
- // VirtualCallLink Trampoline
- // regT0 holds callee, regT1 holds argCount. regT2 will hold the FunctionExecutable.
- JumpList callLinkFailures;
- Label virtualCallLinkBegin = align();
- compileOpCallInitializeCallFrame();
- preserveReturnAddressAfterCall(regT3);
- emitPutToCallFrameHeader(regT3, RegisterFile::ReturnPC);
- restoreArgumentReference();
- Call callLazyLinkCall = call();
- callLinkFailures.append(branchTestPtr(Zero, regT0));
- restoreReturnAddressBeforeReturn(regT3);
- emitGetFromCallFrameHeader32(RegisterFile::ArgumentCount, regT1);
- jump(regT0);
-
- // VirtualConstructLink Trampoline
- // regT0 holds callee, regT1 holds argCount. regT2 will hold the FunctionExecutable.
- Label virtualConstructLinkBegin = align();
- compileOpCallInitializeCallFrame();
- preserveReturnAddressAfterCall(regT3);
- emitPutToCallFrameHeader(regT3, RegisterFile::ReturnPC);
- restoreArgumentReference();
- Call callLazyLinkConstruct = call();
- callLinkFailures.append(branchTestPtr(Zero, regT0));
- restoreReturnAddressBeforeReturn(regT3);
- emitGetFromCallFrameHeader32(RegisterFile::ArgumentCount, regT1);
- jump(regT0);
-
- // VirtualCall Trampoline
- // regT0 holds callee, regT1 holds argCount. regT2 will hold the FunctionExecutable.
- Label virtualCallBegin = align();
- compileOpCallInitializeCallFrame();
-
- loadPtr(Address(regT0, OBJECT_OFFSETOF(JSFunction, m_executable)), regT2);
-
- Jump hasCodeBlock3 = branch32(GreaterThanOrEqual, Address(regT2, OBJECT_OFFSETOF(FunctionExecutable, m_numParametersForCall)), Imm32(0));
- preserveReturnAddressAfterCall(regT3);
- restoreArgumentReference();
- Call callCompileCall = call();
- callLinkFailures.append(branchTestPtr(Zero, regT0));
- emitGetFromCallFrameHeader32(RegisterFile::ArgumentCount, regT1);
- restoreReturnAddressBeforeReturn(regT3);
- loadPtr(Address(regT0, OBJECT_OFFSETOF(JSFunction, m_executable)), regT2);
- hasCodeBlock3.link(this);
-
- loadPtr(Address(regT2, OBJECT_OFFSETOF(FunctionExecutable, m_jitCodeForCallWithArityCheck)), regT0);
- jump(regT0);
-
- // VirtualConstruct Trampoline
- // regT0 holds callee, regT1 holds argCount. regT2 will hold the FunctionExecutable.
- Label virtualConstructBegin = align();
- compileOpCallInitializeCallFrame();
-
- loadPtr(Address(regT0, OBJECT_OFFSETOF(JSFunction, m_executable)), regT2);
-
- Jump hasCodeBlock4 = branch32(GreaterThanOrEqual, Address(regT2, OBJECT_OFFSETOF(FunctionExecutable, m_numParametersForConstruct)), Imm32(0));
- preserveReturnAddressAfterCall(regT3);
- restoreArgumentReference();
- Call callCompileConstruct = call();
- callLinkFailures.append(branchTestPtr(Zero, regT0));
- emitGetFromCallFrameHeader32(RegisterFile::ArgumentCount, regT1);
- restoreReturnAddressBeforeReturn(regT3);
- loadPtr(Address(regT0, OBJECT_OFFSETOF(JSFunction, m_executable)), regT2);
- hasCodeBlock4.link(this);
-
- loadPtr(Address(regT2, OBJECT_OFFSETOF(FunctionExecutable, m_jitCodeForConstructWithArityCheck)), regT0);
- jump(regT0);
-
- // If the parser fails we want to be able to be able to keep going,
- // So we handle this as a parse failure.
- callLinkFailures.link(this);
- emitGetFromCallFrameHeaderPtr(RegisterFile::ReturnPC, regT1);
- emitGetFromCallFrameHeaderPtr(RegisterFile::CallerFrame, callFrameRegister);
- restoreReturnAddressBeforeReturn(regT1);
- move(ImmPtr(&globalData->exceptionLocation), regT2);
- storePtr(regT1, regT2);
- poke(callFrameRegister, 1 + OBJECT_OFFSETOF(struct JITStackFrame, callFrame) / sizeof(void*));
- poke(ImmPtr(FunctionPtr(ctiVMThrowTrampoline).value()));
- ret();
-
- // NativeCall Trampoline
- Label nativeCallThunk = privateCompileCTINativeCall(globalData);
- Label nativeConstructThunk = privateCompileCTINativeCall(globalData, true);
-
-#if ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
- Call string_failureCases1Call = makeTailRecursiveCall(string_failureCases1);
- Call string_failureCases2Call = makeTailRecursiveCall(string_failureCases2);
- Call string_failureCases3Call = makeTailRecursiveCall(string_failureCases3);
-#endif
-
- // All trampolines constructed! copy the code, link up calls, and set the pointers on the Machine object.
- LinkBuffer patchBuffer(this, m_globalData->executableAllocator.poolForSize(m_assembler.size()), 0);
-
-#if ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
- patchBuffer.link(string_failureCases1Call, FunctionPtr(cti_op_get_by_id_string_fail));
- patchBuffer.link(string_failureCases2Call, FunctionPtr(cti_op_get_by_id_string_fail));
- patchBuffer.link(string_failureCases3Call, FunctionPtr(cti_op_get_by_id_string_fail));
-#endif
-#if ENABLE(JIT_OPTIMIZE_CALL)
- patchBuffer.link(callLazyLinkCall, FunctionPtr(cti_vm_lazyLinkCall));
- patchBuffer.link(callLazyLinkConstruct, FunctionPtr(cti_vm_lazyLinkConstruct));
-#endif
- patchBuffer.link(callCompileCall, FunctionPtr(cti_op_call_jitCompile));
- patchBuffer.link(callCompileConstruct, FunctionPtr(cti_op_construct_jitCompile));
-
- CodeRef finalCode = patchBuffer.finalizeCode();
- *executablePool = finalCode.m_executablePool;
-
- trampolines->ctiVirtualCallLink = patchBuffer.trampolineAt(virtualCallLinkBegin);
- trampolines->ctiVirtualConstructLink = patchBuffer.trampolineAt(virtualConstructLinkBegin);
- trampolines->ctiVirtualCall = patchBuffer.trampolineAt(virtualCallBegin);
- trampolines->ctiVirtualConstruct = patchBuffer.trampolineAt(virtualConstructBegin);
- trampolines->ctiNativeCall = patchBuffer.trampolineAt(nativeCallThunk);
- trampolines->ctiNativeConstruct = patchBuffer.trampolineAt(nativeConstructThunk);
-#if ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
- trampolines->ctiStringLengthTrampoline = patchBuffer.trampolineAt(stringLengthBegin);
-#endif
-}
-
-JIT::Label JIT::privateCompileCTINativeCall(JSGlobalData* globalData, bool isConstruct)
-{
- int executableOffsetToFunction = isConstruct ? OBJECT_OFFSETOF(NativeExecutable, m_constructor) : OBJECT_OFFSETOF(NativeExecutable, m_function);
-
- Label nativeCallThunk = align();
-
- emitPutImmediateToCallFrameHeader(0, RegisterFile::CodeBlock);
-
-#if CPU(X86_64)
- // Load caller frame's scope chain into this callframe so that whatever we call can
- // get to its global data.
- emitGetFromCallFrameHeaderPtr(RegisterFile::CallerFrame, regT0);
- emitGetFromCallFrameHeaderPtr(RegisterFile::ScopeChain, regT1, regT0);
- emitPutToCallFrameHeader(regT1, RegisterFile::ScopeChain);
-
- peek(regT1);
- emitPutToCallFrameHeader(regT1, RegisterFile::ReturnPC);
-
- // Calling convention: f(edi, esi, edx, ecx, ...);
- // Host function signature: f(ExecState*);
- move(callFrameRegister, X86Registers::edi);
-
- subPtr(Imm32(16 - sizeof(void*)), stackPointerRegister); // Align stack after call.
-
- emitGetFromCallFrameHeaderPtr(RegisterFile::Callee, X86Registers::esi);
- loadPtr(Address(X86Registers::esi, OBJECT_OFFSETOF(JSFunction, m_executable)), X86Registers::r9);
- move(regT0, callFrameRegister); // Eagerly restore caller frame register to avoid loading from stack.
- call(Address(X86Registers::r9, executableOffsetToFunction));
-
- addPtr(Imm32(16 - sizeof(void*)), stackPointerRegister);
-
-#elif CPU(ARM)
- // Load caller frame's scope chain into this callframe so that whatever we call can
- // get to its global data.
- emitGetFromCallFrameHeaderPtr(RegisterFile::CallerFrame, regT2);
- emitGetFromCallFrameHeaderPtr(RegisterFile::ScopeChain, regT1, regT2);
- emitPutToCallFrameHeader(regT1, RegisterFile::ScopeChain);
-
- preserveReturnAddressAfterCall(regT3); // Callee preserved
- emitPutToCallFrameHeader(regT3, RegisterFile::ReturnPC);
-
- // Calling convention: f(r0 == regT0, r1 == regT1, ...);
- // Host function signature: f(ExecState*);
- move(callFrameRegister, ARMRegisters::r0);
-
- emitGetFromCallFrameHeaderPtr(RegisterFile::Callee, ARMRegisters::r1);
- move(regT2, callFrameRegister); // Eagerly restore caller frame register to avoid loading from stack.
- loadPtr(Address(ARMRegisters::r1, OBJECT_OFFSETOF(JSFunction, m_executable)), regT2);
- call(Address(regT2, executableOffsetToFunction));
-
- restoreReturnAddressBeforeReturn(regT3);
-
-#elif CPU(MIPS)
- // Load caller frame's scope chain into this callframe so that whatever we call can
- // get to its global data.
- emitGetFromCallFrameHeaderPtr(RegisterFile::CallerFrame, regT0);
- emitGetFromCallFrameHeaderPtr(RegisterFile::ScopeChain, regT1, regT0);
- emitPutToCallFrameHeader(regT1, RegisterFile::ScopeChain);
-
- preserveReturnAddressAfterCall(regT3); // Callee preserved
- emitPutToCallFrameHeader(regT3, RegisterFile::ReturnPC);
-
- // Calling convention: f(a0, a1, a2, a3);
- // Host function signature: f(ExecState*);
-
- // Allocate stack space for 16 bytes (8-byte aligned)
- // 16 bytes (unused) for 4 arguments
- subPtr(Imm32(16), stackPointerRegister);
-
- // Setup arg0
- move(callFrameRegister, MIPSRegisters::a0);
-
- // Call
- emitGetFromCallFrameHeaderPtr(RegisterFile::Callee, MIPSRegisters::a2);
- loadPtr(Address(MIPSRegisters::a2, OBJECT_OFFSETOF(JSFunction, m_executable)), regT2);
- move(regT0, callFrameRegister); // Eagerly restore caller frame register to avoid loading from stack.
- call(Address(regT2, executableOffsetToFunction));
-
- // Restore stack space
- addPtr(Imm32(16), stackPointerRegister);
-
- restoreReturnAddressBeforeReturn(regT3);
-
-#elif ENABLE(JIT_OPTIMIZE_NATIVE_CALL)
-#error "JIT_OPTIMIZE_NATIVE_CALL not yet supported on this platform."
-#else
- UNUSED_PARAM(executableOffsetToFunction);
- breakpoint();
-#endif
-
- // Check for an exception
- loadPtr(&(globalData->exception), regT2);
- Jump exceptionHandler = branchTestPtr(NonZero, regT2);
-
- // Return.
- ret();
-
- // Handle an exception
- exceptionHandler.link(this);
-
- // Grab the return address.
- preserveReturnAddressAfterCall(regT1);
-
- move(ImmPtr(&globalData->exceptionLocation), regT2);
- storePtr(regT1, regT2);
- poke(callFrameRegister, OBJECT_OFFSETOF(struct JITStackFrame, callFrame) / sizeof(void*));
-
- // Set the return address.
- move(ImmPtr(FunctionPtr(ctiVMThrowTrampoline).value()), regT1);
- restoreReturnAddressBeforeReturn(regT1);
-
- ret();
-
- return nativeCallThunk;
-}
-
-JIT::CodePtr JIT::privateCompileCTINativeCall(PassRefPtr<ExecutablePool>, JSGlobalData* globalData, NativeFunction)
-{
- return globalData->jitStubs->ctiNativeCall();
-}
-
-void JIT::emit_op_mov(Instruction* currentInstruction)
-{
- int dst = currentInstruction[1].u.operand;
- int src = currentInstruction[2].u.operand;
-
- if (m_codeBlock->isConstantRegisterIndex(src)) {
- storePtr(ImmPtr(JSValue::encode(getConstantOperand(src))), Address(callFrameRegister, dst * sizeof(Register)));
- if (dst == m_lastResultBytecodeRegister)
- killLastResultRegister();
- } else if ((src == m_lastResultBytecodeRegister) || (dst == m_lastResultBytecodeRegister)) {
- // If either the src or dst is the cached register go though
- // get/put registers to make sure we track this correctly.
- emitGetVirtualRegister(src, regT0);
- emitPutVirtualRegister(dst);
- } else {
- // Perform the copy via regT1; do not disturb any mapping in regT0.
- loadPtr(Address(callFrameRegister, src * sizeof(Register)), regT1);
- storePtr(regT1, Address(callFrameRegister, dst * sizeof(Register)));
- }
-}
-
-void JIT::emit_op_end(Instruction* currentInstruction)
-{
- if (m_codeBlock->needsFullScopeChain())
- JITStubCall(this, cti_op_end).call();
-
- ASSERT(returnValueRegister != callFrameRegister);
- emitGetVirtualRegister(currentInstruction[1].u.operand, returnValueRegister);
- restoreReturnAddressBeforeReturn(Address(callFrameRegister, RegisterFile::ReturnPC * static_cast<int>(sizeof(Register))));
- ret();
-}
-
-void JIT::emit_op_jmp(Instruction* currentInstruction)
-{
- unsigned target = currentInstruction[1].u.operand;
- addJump(jump(), target);
- RECORD_JUMP_TARGET(target);
-}
-
-void JIT::emit_op_loop_if_lesseq(Instruction* currentInstruction)
-{
- emitTimeoutCheck();
-
- unsigned op1 = currentInstruction[1].u.operand;
- unsigned op2 = currentInstruction[2].u.operand;
- unsigned target = currentInstruction[3].u.operand;
- if (isOperandConstantImmediateInt(op2)) {
- emitGetVirtualRegister(op1, regT0);
- emitJumpSlowCaseIfNotImmediateInteger(regT0);
- int32_t op2imm = getConstantOperandImmediateInt(op2);
- addJump(branch32(LessThanOrEqual, regT0, Imm32(op2imm)), target);
- } else {
- emitGetVirtualRegisters(op1, regT0, op2, regT1);
- emitJumpSlowCaseIfNotImmediateInteger(regT0);
- emitJumpSlowCaseIfNotImmediateInteger(regT1);
- addJump(branch32(LessThanOrEqual, regT0, regT1), target);
- }
-}
-
-void JIT::emit_op_new_object(Instruction* currentInstruction)
-{
- JITStubCall(this, cti_op_new_object).call(currentInstruction[1].u.operand);
-}
-
-void JIT::emit_op_check_has_instance(Instruction* currentInstruction)
-{
- unsigned baseVal = currentInstruction[1].u.operand;
-
- emitGetVirtualRegister(baseVal, regT0);
-
- // Check that baseVal is a cell.
- emitJumpSlowCaseIfNotJSCell(regT0, baseVal);
-
- // Check that baseVal 'ImplementsHasInstance'.
- loadPtr(Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), regT0);
- addSlowCase(branchTest8(Zero, Address(regT0, OBJECT_OFFSETOF(Structure, m_typeInfo.m_flags)), Imm32(ImplementsHasInstance)));
-}
-
-void JIT::emit_op_instanceof(Instruction* currentInstruction)
-{
- unsigned dst = currentInstruction[1].u.operand;
- unsigned value = currentInstruction[2].u.operand;
- unsigned baseVal = currentInstruction[3].u.operand;
- unsigned proto = currentInstruction[4].u.operand;
-
- // Load the operands (baseVal, proto, and value respectively) into registers.
- // We use regT0 for baseVal since we will be done with this first, and we can then use it for the result.
- emitGetVirtualRegister(value, regT2);
- emitGetVirtualRegister(baseVal, regT0);
- emitGetVirtualRegister(proto, regT1);
-
- // Check that proto are cells. baseVal must be a cell - this is checked by op_check_has_instance.
- emitJumpSlowCaseIfNotJSCell(regT2, value);
- emitJumpSlowCaseIfNotJSCell(regT1, proto);
-
- // Check that prototype is an object
- loadPtr(Address(regT1, OBJECT_OFFSETOF(JSCell, m_structure)), regT3);
- addSlowCase(branch8(NotEqual, Address(regT3, OBJECT_OFFSETOF(Structure, m_typeInfo.m_type)), Imm32(ObjectType)));
-
- // Fixme: this check is only needed because the JSC API allows HasInstance to be overridden; we should deprecate this.
- // Check that baseVal 'ImplementsDefaultHasInstance'.
- loadPtr(Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), regT0);
- addSlowCase(branchTest8(Zero, Address(regT0, OBJECT_OFFSETOF(Structure, m_typeInfo.m_flags)), Imm32(ImplementsDefaultHasInstance)));
-
- // Optimistically load the result true, and start looping.
- // Initially, regT1 still contains proto and regT2 still contains value.
- // As we loop regT2 will be updated with its prototype, recursively walking the prototype chain.
- move(ImmPtr(JSValue::encode(jsBoolean(true))), regT0);
- Label loop(this);
-
- // Load the prototype of the object in regT2. If this is equal to regT1 - WIN!
- // Otherwise, check if we've hit null - if we have then drop out of the loop, if not go again.
- loadPtr(Address(regT2, OBJECT_OFFSETOF(JSCell, m_structure)), regT2);
- loadPtr(Address(regT2, OBJECT_OFFSETOF(Structure, m_prototype)), regT2);
- Jump isInstance = branchPtr(Equal, regT2, regT1);
- emitJumpIfJSCell(regT2).linkTo(loop, this);
-
- // We get here either by dropping out of the loop, or if value was not an Object. Result is false.
- move(ImmPtr(JSValue::encode(jsBoolean(false))), regT0);
-
- // isInstance jumps right down to here, to skip setting the result to false (it has already set true).
- isInstance.link(this);
- emitPutVirtualRegister(dst);
-}
-
-void JIT::emit_op_call(Instruction* currentInstruction)
-{
- compileOpCall(op_call, currentInstruction, m_callLinkInfoIndex++);
-}
-
-void JIT::emit_op_call_eval(Instruction* currentInstruction)
-{
- compileOpCall(op_call_eval, currentInstruction, m_callLinkInfoIndex++);
-}
-
-void JIT::emit_op_call_varargs(Instruction* currentInstruction)
-{
- compileOpCallVarargs(currentInstruction);
-}
-
-void JIT::emit_op_construct(Instruction* currentInstruction)
-{
- compileOpCall(op_construct, currentInstruction, m_callLinkInfoIndex++);
-}
-
-void JIT::emit_op_get_global_var(Instruction* currentInstruction)
-{
- JSVariableObject* globalObject = m_codeBlock->globalObject();
- move(ImmPtr(globalObject), regT0);
- emitGetVariableObjectRegister(regT0, currentInstruction[2].u.operand, regT0);
- emitPutVirtualRegister(currentInstruction[1].u.operand);
-}
-
-void JIT::emit_op_put_global_var(Instruction* currentInstruction)
-{
- emitGetVirtualRegister(currentInstruction[2].u.operand, regT1);
- JSVariableObject* globalObject = m_codeBlock->globalObject();
- move(ImmPtr(globalObject), regT0);
- emitPutVariableObjectRegister(regT1, regT0, currentInstruction[1].u.operand);
-}
-
-void JIT::emit_op_get_scoped_var(Instruction* currentInstruction)
-{
- int skip = currentInstruction[3].u.operand;
-
- emitGetFromCallFrameHeaderPtr(RegisterFile::ScopeChain, regT0);
- bool checkTopLevel = m_codeBlock->codeType() == FunctionCode && m_codeBlock->needsFullScopeChain();
- ASSERT(skip || !checkTopLevel);
- if (checkTopLevel && skip--) {
- Jump activationNotCreated;
- if (checkTopLevel)
- activationNotCreated = branchTestPtr(Zero, addressFor(m_codeBlock->activationRegister()));
- loadPtr(Address(regT0, OBJECT_OFFSETOF(ScopeChainNode, next)), regT0);
- activationNotCreated.link(this);
- }
- while (skip--)
- loadPtr(Address(regT0, OBJECT_OFFSETOF(ScopeChainNode, next)), regT0);
-
- loadPtr(Address(regT0, OBJECT_OFFSETOF(ScopeChainNode, object)), regT0);
- emitGetVariableObjectRegister(regT0, currentInstruction[2].u.operand, regT0);
- emitPutVirtualRegister(currentInstruction[1].u.operand);
-}
-
-void JIT::emit_op_put_scoped_var(Instruction* currentInstruction)
-{
- int skip = currentInstruction[2].u.operand;
-
- emitGetFromCallFrameHeaderPtr(RegisterFile::ScopeChain, regT1);
- emitGetVirtualRegister(currentInstruction[3].u.operand, regT0);
- bool checkTopLevel = m_codeBlock->codeType() == FunctionCode && m_codeBlock->needsFullScopeChain();
- ASSERT(skip || !checkTopLevel);
- if (checkTopLevel && skip--) {
- Jump activationNotCreated;
- if (checkTopLevel)
- activationNotCreated = branchTestPtr(Zero, addressFor(m_codeBlock->activationRegister()));
- loadPtr(Address(regT1, OBJECT_OFFSETOF(ScopeChainNode, next)), regT1);
- activationNotCreated.link(this);
- }
- while (skip--)
- loadPtr(Address(regT1, OBJECT_OFFSETOF(ScopeChainNode, next)), regT1);
-
- loadPtr(Address(regT1, OBJECT_OFFSETOF(ScopeChainNode, object)), regT1);
- emitPutVariableObjectRegister(regT0, regT1, currentInstruction[1].u.operand);
-}
-
-void JIT::emit_op_tear_off_activation(Instruction* currentInstruction)
-{
- unsigned activation = currentInstruction[1].u.operand;
- unsigned arguments = currentInstruction[2].u.operand;
- Jump activationCreated = branchTestPtr(NonZero, addressFor(activation));
- Jump argumentsNotCreated = branchTestPtr(Zero, addressFor(arguments));
- activationCreated.link(this);
- JITStubCall stubCall(this, cti_op_tear_off_activation);
- stubCall.addArgument(activation, regT2);
- stubCall.addArgument(unmodifiedArgumentsRegister(arguments), regT2);
- stubCall.call();
- argumentsNotCreated.link(this);
-}
-
-void JIT::emit_op_tear_off_arguments(Instruction* currentInstruction)
-{
- unsigned dst = currentInstruction[1].u.operand;
-
- Jump argsNotCreated = branchTestPtr(Zero, Address(callFrameRegister, sizeof(Register) * (unmodifiedArgumentsRegister(dst))));
- JITStubCall stubCall(this, cti_op_tear_off_arguments);
- stubCall.addArgument(unmodifiedArgumentsRegister(dst), regT2);
- stubCall.call();
- argsNotCreated.link(this);
-}
-
-void JIT::emit_op_ret(Instruction* currentInstruction)
-{
- // We could JIT generate the deref, only calling out to C when the refcount hits zero.
- if (m_codeBlock->needsFullScopeChain()) {
- Jump activationNotCreated = branchTestPtr(Zero, addressFor(m_codeBlock->activationRegister()));
- JITStubCall(this, cti_op_ret_scopeChain).call();
- activationNotCreated.link(this);
- }
- ASSERT(callFrameRegister != regT1);
- ASSERT(regT1 != returnValueRegister);
- ASSERT(returnValueRegister != callFrameRegister);
-
- // Return the result in %eax.
- emitGetVirtualRegister(currentInstruction[1].u.operand, returnValueRegister);
-
- // Grab the return address.
- emitGetFromCallFrameHeaderPtr(RegisterFile::ReturnPC, regT1);
-
- // Restore our caller's "r".
- emitGetFromCallFrameHeaderPtr(RegisterFile::CallerFrame, callFrameRegister);
-
- // Return.
- restoreReturnAddressBeforeReturn(regT1);
- ret();
-}
-
-void JIT::emit_op_ret_object_or_this(Instruction* currentInstruction)
-{
- // We could JIT generate the deref, only calling out to C when the refcount hits zero.
- if (m_codeBlock->needsFullScopeChain()) {
- Jump activationNotCreated = branchTestPtr(Zero, addressFor(m_codeBlock->activationRegister()));
- JITStubCall(this, cti_op_ret_scopeChain).call();
- activationNotCreated.link(this);
- }
-
- ASSERT(callFrameRegister != regT1);
- ASSERT(regT1 != returnValueRegister);
- ASSERT(returnValueRegister != callFrameRegister);
-
- // Return the result in %eax.
- emitGetVirtualRegister(currentInstruction[1].u.operand, returnValueRegister);
- Jump notJSCell = emitJumpIfNotJSCell(returnValueRegister);
- loadPtr(Address(returnValueRegister, OBJECT_OFFSETOF(JSCell, m_structure)), regT2);
- Jump notObject = branch8(NotEqual, Address(regT2, OBJECT_OFFSETOF(Structure, m_typeInfo) + OBJECT_OFFSETOF(TypeInfo, m_type)), Imm32(ObjectType));
-
- // Grab the return address.
- emitGetFromCallFrameHeaderPtr(RegisterFile::ReturnPC, regT1);
-
- // Restore our caller's "r".
- emitGetFromCallFrameHeaderPtr(RegisterFile::CallerFrame, callFrameRegister);
-
- // Return.
- restoreReturnAddressBeforeReturn(regT1);
- ret();
-
- // Return 'this' in %eax.
- notJSCell.link(this);
- notObject.link(this);
- emitGetVirtualRegister(currentInstruction[2].u.operand, returnValueRegister);
-
- // Grab the return address.
- emitGetFromCallFrameHeaderPtr(RegisterFile::ReturnPC, regT1);
-
- // Restore our caller's "r".
- emitGetFromCallFrameHeaderPtr(RegisterFile::CallerFrame, callFrameRegister);
-
- // Return.
- restoreReturnAddressBeforeReturn(regT1);
- ret();
-}
-
-void JIT::emit_op_new_array(Instruction* currentInstruction)
-{
- JITStubCall stubCall(this, cti_op_new_array);
- stubCall.addArgument(Imm32(currentInstruction[2].u.operand));
- stubCall.addArgument(Imm32(currentInstruction[3].u.operand));
- stubCall.call(currentInstruction[1].u.operand);
-}
-
-void JIT::emit_op_resolve(Instruction* currentInstruction)
-{
- JITStubCall stubCall(this, cti_op_resolve);
- stubCall.addArgument(ImmPtr(&m_codeBlock->identifier(currentInstruction[2].u.operand)));
- stubCall.call(currentInstruction[1].u.operand);
-}
-
-void JIT::emit_op_to_primitive(Instruction* currentInstruction)
-{
- int dst = currentInstruction[1].u.operand;
- int src = currentInstruction[2].u.operand;
-
- emitGetVirtualRegister(src, regT0);
-
- Jump isImm = emitJumpIfNotJSCell(regT0);
- addSlowCase(branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsStringVPtr)));
- isImm.link(this);
-
- if (dst != src)
- emitPutVirtualRegister(dst);
-
-}
-
-void JIT::emit_op_strcat(Instruction* currentInstruction)
-{
- JITStubCall stubCall(this, cti_op_strcat);
- stubCall.addArgument(Imm32(currentInstruction[2].u.operand));
- stubCall.addArgument(Imm32(currentInstruction[3].u.operand));
- stubCall.call(currentInstruction[1].u.operand);
-}
-
-void JIT::emit_op_resolve_base(Instruction* currentInstruction)
-{
- JITStubCall stubCall(this, currentInstruction[3].u.operand ? cti_op_resolve_base_strict_put : cti_op_resolve_base);
- stubCall.addArgument(ImmPtr(&m_codeBlock->identifier(currentInstruction[2].u.operand)));
- stubCall.call(currentInstruction[1].u.operand);
-}
-
-void JIT::emit_op_ensure_property_exists(Instruction* currentInstruction)
-{
- JITStubCall stubCall(this, cti_op_ensure_property_exists);
- stubCall.addArgument(Imm32(currentInstruction[1].u.operand));
- stubCall.addArgument(ImmPtr(&m_codeBlock->identifier(currentInstruction[2].u.operand)));
- stubCall.call(currentInstruction[1].u.operand);
-}
-
-void JIT::emit_op_resolve_skip(Instruction* currentInstruction)
-{
- JITStubCall stubCall(this, cti_op_resolve_skip);
- stubCall.addArgument(ImmPtr(&m_codeBlock->identifier(currentInstruction[2].u.operand)));
- stubCall.addArgument(Imm32(currentInstruction[3].u.operand));
- stubCall.call(currentInstruction[1].u.operand);
-}
-
-void JIT::emit_op_resolve_global(Instruction* currentInstruction, bool)
-{
- // Fast case
- void* globalObject = m_codeBlock->globalObject();
- unsigned currentIndex = m_globalResolveInfoIndex++;
- void* structureAddress = &(m_codeBlock->globalResolveInfo(currentIndex).structure);
- void* offsetAddr = &(m_codeBlock->globalResolveInfo(currentIndex).offset);
-
- // Check Structure of global object
- move(ImmPtr(globalObject), regT0);
- loadPtr(structureAddress, regT1);
- addSlowCase(branchPtr(NotEqual, regT1, Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)))); // Structures don't match
-
- // Load cached property
- // Assume that the global object always uses external storage.
- loadPtr(Address(regT0, OBJECT_OFFSETOF(JSGlobalObject, m_externalStorage)), regT0);
- load32(offsetAddr, regT1);
- loadPtr(BaseIndex(regT0, regT1, ScalePtr), regT0);
- emitPutVirtualRegister(currentInstruction[1].u.operand);
-}
-
-void JIT::emitSlow_op_resolve_global(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- unsigned dst = currentInstruction[1].u.operand;
- Identifier* ident = &m_codeBlock->identifier(currentInstruction[2].u.operand);
-
- unsigned currentIndex = m_globalResolveInfoIndex++;
-
- linkSlowCase(iter);
- JITStubCall stubCall(this, cti_op_resolve_global);
- stubCall.addArgument(ImmPtr(ident));
- stubCall.addArgument(Imm32(currentIndex));
- stubCall.addArgument(regT0);
- stubCall.call(dst);
-}
-
-void JIT::emit_op_not(Instruction* currentInstruction)
-{
- emitGetVirtualRegister(currentInstruction[2].u.operand, regT0);
- xorPtr(Imm32(static_cast<int32_t>(JSImmediate::FullTagTypeBool)), regT0);
- addSlowCase(branchTestPtr(NonZero, regT0, Imm32(static_cast<int32_t>(~JSImmediate::ExtendedPayloadBitBoolValue))));
- xorPtr(Imm32(static_cast<int32_t>(JSImmediate::FullTagTypeBool | JSImmediate::ExtendedPayloadBitBoolValue)), regT0);
- emitPutVirtualRegister(currentInstruction[1].u.operand);
-}
-
-void JIT::emit_op_jfalse(Instruction* currentInstruction)
-{
- unsigned target = currentInstruction[2].u.operand;
- emitGetVirtualRegister(currentInstruction[1].u.operand, regT0);
-
- addJump(branchPtr(Equal, regT0, ImmPtr(JSValue::encode(jsNumber(0)))), target);
- Jump isNonZero = emitJumpIfImmediateInteger(regT0);
-
- addJump(branchPtr(Equal, regT0, ImmPtr(JSValue::encode(jsBoolean(false)))), target);
- addSlowCase(branchPtr(NotEqual, regT0, ImmPtr(JSValue::encode(jsBoolean(true)))));
-
- isNonZero.link(this);
- RECORD_JUMP_TARGET(target);
-}
-
-void JIT::emit_op_jeq_null(Instruction* currentInstruction)
-{
- unsigned src = currentInstruction[1].u.operand;
- unsigned target = currentInstruction[2].u.operand;
-
- emitGetVirtualRegister(src, regT0);
- Jump isImmediate = emitJumpIfNotJSCell(regT0);
-
- // First, handle JSCell cases - check MasqueradesAsUndefined bit on the structure.
- loadPtr(Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), regT2);
- addJump(branchTest8(NonZero, Address(regT2, OBJECT_OFFSETOF(Structure, m_typeInfo.m_flags)), Imm32(MasqueradesAsUndefined)), target);
- Jump wasNotImmediate = jump();
-
- // Now handle the immediate cases - undefined & null
- isImmediate.link(this);
- andPtr(Imm32(~JSImmediate::ExtendedTagBitUndefined), regT0);
- addJump(branchPtr(Equal, regT0, ImmPtr(JSValue::encode(jsNull()))), target);
-
- wasNotImmediate.link(this);
- RECORD_JUMP_TARGET(target);
-};
-void JIT::emit_op_jneq_null(Instruction* currentInstruction)
-{
- unsigned src = currentInstruction[1].u.operand;
- unsigned target = currentInstruction[2].u.operand;
-
- emitGetVirtualRegister(src, regT0);
- Jump isImmediate = emitJumpIfNotJSCell(regT0);
-
- // First, handle JSCell cases - check MasqueradesAsUndefined bit on the structure.
- loadPtr(Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), regT2);
- addJump(branchTest8(Zero, Address(regT2, OBJECT_OFFSETOF(Structure, m_typeInfo.m_flags)), Imm32(MasqueradesAsUndefined)), target);
- Jump wasNotImmediate = jump();
-
- // Now handle the immediate cases - undefined & null
- isImmediate.link(this);
- andPtr(Imm32(~JSImmediate::ExtendedTagBitUndefined), regT0);
- addJump(branchPtr(NotEqual, regT0, ImmPtr(JSValue::encode(jsNull()))), target);
-
- wasNotImmediate.link(this);
- RECORD_JUMP_TARGET(target);
-}
-
-void JIT::emit_op_jneq_ptr(Instruction* currentInstruction)
-{
- unsigned src = currentInstruction[1].u.operand;
- JSCell* ptr = currentInstruction[2].u.jsCell;
- unsigned target = currentInstruction[3].u.operand;
-
- emitGetVirtualRegister(src, regT0);
- addJump(branchPtr(NotEqual, regT0, ImmPtr(JSValue::encode(JSValue(ptr)))), target);
-
- RECORD_JUMP_TARGET(target);
-}
-
-void JIT::emit_op_jsr(Instruction* currentInstruction)
-{
- int retAddrDst = currentInstruction[1].u.operand;
- int target = currentInstruction[2].u.operand;
- DataLabelPtr storeLocation = storePtrWithPatch(ImmPtr(0), Address(callFrameRegister, sizeof(Register) * retAddrDst));
- addJump(jump(), target);
- m_jsrSites.append(JSRInfo(storeLocation, label()));
- killLastResultRegister();
- RECORD_JUMP_TARGET(target);
-}
-
-void JIT::emit_op_sret(Instruction* currentInstruction)
-{
- jump(Address(callFrameRegister, sizeof(Register) * currentInstruction[1].u.operand));
- killLastResultRegister();
-}
-
-void JIT::emit_op_eq(Instruction* currentInstruction)
-{
- emitGetVirtualRegisters(currentInstruction[2].u.operand, regT0, currentInstruction[3].u.operand, regT1);
- emitJumpSlowCaseIfNotImmediateIntegers(regT0, regT1, regT2);
- set32Compare32(Equal, regT1, regT0, regT0);
- emitTagAsBoolImmediate(regT0);
- emitPutVirtualRegister(currentInstruction[1].u.operand);
-}
-
-void JIT::emit_op_bitnot(Instruction* currentInstruction)
-{
- emitGetVirtualRegister(currentInstruction[2].u.operand, regT0);
- emitJumpSlowCaseIfNotImmediateInteger(regT0);
- not32(regT0);
- emitFastArithIntToImmNoCheck(regT0, regT0);
- emitPutVirtualRegister(currentInstruction[1].u.operand);
-}
-
-void JIT::emit_op_resolve_with_base(Instruction* currentInstruction)
-{
- JITStubCall stubCall(this, cti_op_resolve_with_base);
- stubCall.addArgument(ImmPtr(&m_codeBlock->identifier(currentInstruction[3].u.operand)));
- stubCall.addArgument(Imm32(currentInstruction[1].u.operand));
- stubCall.call(currentInstruction[2].u.operand);
-}
-
-void JIT::emit_op_new_func_exp(Instruction* currentInstruction)
-{
- JITStubCall stubCall(this, cti_op_new_func_exp);
- stubCall.addArgument(ImmPtr(m_codeBlock->functionExpr(currentInstruction[2].u.operand)));
- stubCall.call(currentInstruction[1].u.operand);
-}
-
-void JIT::emit_op_jtrue(Instruction* currentInstruction)
-{
- unsigned target = currentInstruction[2].u.operand;
- emitGetVirtualRegister(currentInstruction[1].u.operand, regT0);
-
- Jump isZero = branchPtr(Equal, regT0, ImmPtr(JSValue::encode(jsNumber(0))));
- addJump(emitJumpIfImmediateInteger(regT0), target);
-
- addJump(branchPtr(Equal, regT0, ImmPtr(JSValue::encode(jsBoolean(true)))), target);
- addSlowCase(branchPtr(NotEqual, regT0, ImmPtr(JSValue::encode(jsBoolean(false)))));
-
- isZero.link(this);
- RECORD_JUMP_TARGET(target);
-}
-
-void JIT::emit_op_neq(Instruction* currentInstruction)
-{
- emitGetVirtualRegisters(currentInstruction[2].u.operand, regT0, currentInstruction[3].u.operand, regT1);
- emitJumpSlowCaseIfNotImmediateIntegers(regT0, regT1, regT2);
- set32Compare32(NotEqual, regT1, regT0, regT0);
- emitTagAsBoolImmediate(regT0);
-
- emitPutVirtualRegister(currentInstruction[1].u.operand);
-
-}
-
-void JIT::emit_op_bitxor(Instruction* currentInstruction)
-{
- emitGetVirtualRegisters(currentInstruction[2].u.operand, regT0, currentInstruction[3].u.operand, regT1);
- emitJumpSlowCaseIfNotImmediateIntegers(regT0, regT1, regT2);
- xorPtr(regT1, regT0);
- emitFastArithReTagImmediate(regT0, regT0);
- emitPutVirtualRegister(currentInstruction[1].u.operand);
-}
-
-void JIT::emit_op_bitor(Instruction* currentInstruction)
-{
- emitGetVirtualRegisters(currentInstruction[2].u.operand, regT0, currentInstruction[3].u.operand, regT1);
- emitJumpSlowCaseIfNotImmediateIntegers(regT0, regT1, regT2);
- orPtr(regT1, regT0);
- emitPutVirtualRegister(currentInstruction[1].u.operand);
-}
-
-void JIT::emit_op_throw(Instruction* currentInstruction)
-{
- JITStubCall stubCall(this, cti_op_throw);
- stubCall.addArgument(currentInstruction[1].u.operand, regT2);
- stubCall.call();
- ASSERT(regT0 == returnValueRegister);
-#ifndef NDEBUG
- // cti_op_throw always changes it's return address,
- // this point in the code should never be reached.
- breakpoint();
-#endif
-}
-
-void JIT::emit_op_get_pnames(Instruction* currentInstruction)
-{
- int dst = currentInstruction[1].u.operand;
- int base = currentInstruction[2].u.operand;
- int i = currentInstruction[3].u.operand;
- int size = currentInstruction[4].u.operand;
- int breakTarget = currentInstruction[5].u.operand;
-
- JumpList isNotObject;
-
- emitGetVirtualRegister(base, regT0);
- if (!m_codeBlock->isKnownNotImmediate(base))
- isNotObject.append(emitJumpIfNotJSCell(regT0));
- if (base != m_codeBlock->thisRegister() || m_codeBlock->isStrictMode()) {
- loadPtr(Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), regT2);
- isNotObject.append(branch8(NotEqual, Address(regT2, OBJECT_OFFSETOF(Structure, m_typeInfo.m_type)), Imm32(ObjectType)));
- }
-
- // We could inline the case where you have a valid cache, but
- // this call doesn't seem to be hot.
- Label isObject(this);
- JITStubCall getPnamesStubCall(this, cti_op_get_pnames);
- getPnamesStubCall.addArgument(regT0);
- getPnamesStubCall.call(dst);
- load32(Address(regT0, OBJECT_OFFSETOF(JSPropertyNameIterator, m_jsStringsSize)), regT3);
- store32(Imm32(0), addressFor(i));
- store32(regT3, addressFor(size));
- Jump end = jump();
-
- isNotObject.link(this);
- move(regT0, regT1);
- and32(Imm32(~JSImmediate::ExtendedTagBitUndefined), regT1);
- addJump(branch32(Equal, regT1, Imm32(JSImmediate::FullTagTypeNull)), breakTarget);
-
- JITStubCall toObjectStubCall(this, cti_to_object);
- toObjectStubCall.addArgument(regT0);
- toObjectStubCall.call(base);
- jump().linkTo(isObject, this);
-
- end.link(this);
-}
-
-void JIT::emit_op_next_pname(Instruction* currentInstruction)
-{
- int dst = currentInstruction[1].u.operand;
- int base = currentInstruction[2].u.operand;
- int i = currentInstruction[3].u.operand;
- int size = currentInstruction[4].u.operand;
- int it = currentInstruction[5].u.operand;
- int target = currentInstruction[6].u.operand;
-
- JumpList callHasProperty;
-
- Label begin(this);
- load32(addressFor(i), regT0);
- Jump end = branch32(Equal, regT0, addressFor(size));
-
- // Grab key @ i
- loadPtr(addressFor(it), regT1);
- loadPtr(Address(regT1, OBJECT_OFFSETOF(JSPropertyNameIterator, m_jsStrings)), regT2);
-
- loadPtr(BaseIndex(regT2, regT0, TimesEight), regT2);
-
- emitPutVirtualRegister(dst, regT2);
-
- // Increment i
- add32(Imm32(1), regT0);
- store32(regT0, addressFor(i));
-
- // Verify that i is valid:
- emitGetVirtualRegister(base, regT0);
-
- // Test base's structure
- loadPtr(Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), regT2);
- callHasProperty.append(branchPtr(NotEqual, regT2, Address(Address(regT1, OBJECT_OFFSETOF(JSPropertyNameIterator, m_cachedStructure)))));
-
- // Test base's prototype chain
- loadPtr(Address(Address(regT1, OBJECT_OFFSETOF(JSPropertyNameIterator, m_cachedPrototypeChain))), regT3);
- loadPtr(Address(regT3, OBJECT_OFFSETOF(StructureChain, m_vector)), regT3);
- addJump(branchTestPtr(Zero, Address(regT3)), target);
-
- Label checkPrototype(this);
- loadPtr(Address(regT2, OBJECT_OFFSETOF(Structure, m_prototype)), regT2);
- callHasProperty.append(emitJumpIfNotJSCell(regT2));
- loadPtr(Address(regT2, OBJECT_OFFSETOF(JSCell, m_structure)), regT2);
- callHasProperty.append(branchPtr(NotEqual, regT2, Address(regT3)));
- addPtr(Imm32(sizeof(Structure*)), regT3);
- branchTestPtr(NonZero, Address(regT3)).linkTo(checkPrototype, this);
-
- // Continue loop.
- addJump(jump(), target);
-
- // Slow case: Ask the object if i is valid.
- callHasProperty.link(this);
- emitGetVirtualRegister(dst, regT1);
- JITStubCall stubCall(this, cti_has_property);
- stubCall.addArgument(regT0);
- stubCall.addArgument(regT1);
- stubCall.call();
-
- // Test for valid key.
- addJump(branchTest32(NonZero, regT0), target);
- jump().linkTo(begin, this);
-
- // End of loop.
- end.link(this);
-}
-
-void JIT::emit_op_push_scope(Instruction* currentInstruction)
-{
- JITStubCall stubCall(this, cti_op_push_scope);
- stubCall.addArgument(currentInstruction[1].u.operand, regT2);
- stubCall.call(currentInstruction[1].u.operand);
-}
-
-void JIT::emit_op_pop_scope(Instruction*)
-{
- JITStubCall(this, cti_op_pop_scope).call();
-}
-
-void JIT::compileOpStrictEq(Instruction* currentInstruction, CompileOpStrictEqType type)
-{
- unsigned dst = currentInstruction[1].u.operand;
- unsigned src1 = currentInstruction[2].u.operand;
- unsigned src2 = currentInstruction[3].u.operand;
-
- emitGetVirtualRegisters(src1, regT0, src2, regT1);
-
- // Jump to a slow case if either operand is a number, or if both are JSCell*s.
- move(regT0, regT2);
- orPtr(regT1, regT2);
- addSlowCase(emitJumpIfJSCell(regT2));
- addSlowCase(emitJumpIfImmediateNumber(regT2));
-
- if (type == OpStrictEq)
- set32Compare32(Equal, regT1, regT0, regT0);
- else
- set32Compare32(NotEqual, regT1, regT0, regT0);
- emitTagAsBoolImmediate(regT0);
-
- emitPutVirtualRegister(dst);
-}
-
-void JIT::emit_op_stricteq(Instruction* currentInstruction)
-{
- compileOpStrictEq(currentInstruction, OpStrictEq);
-}
-
-void JIT::emit_op_nstricteq(Instruction* currentInstruction)
-{
- compileOpStrictEq(currentInstruction, OpNStrictEq);
-}
-
-void JIT::emit_op_to_jsnumber(Instruction* currentInstruction)
-{
- int srcVReg = currentInstruction[2].u.operand;
- emitGetVirtualRegister(srcVReg, regT0);
-
- Jump wasImmediate = emitJumpIfImmediateInteger(regT0);
-
- emitJumpSlowCaseIfNotJSCell(regT0, srcVReg);
- loadPtr(Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), regT2);
- addSlowCase(branch8(NotEqual, Address(regT2, OBJECT_OFFSETOF(Structure, m_typeInfo.m_type)), Imm32(NumberType)));
-
- wasImmediate.link(this);
-
- emitPutVirtualRegister(currentInstruction[1].u.operand);
-}
-
-void JIT::emit_op_push_new_scope(Instruction* currentInstruction)
-{
- JITStubCall stubCall(this, cti_op_push_new_scope);
- stubCall.addArgument(ImmPtr(&m_codeBlock->identifier(currentInstruction[2].u.operand)));
- stubCall.addArgument(currentInstruction[3].u.operand, regT2);
- stubCall.call(currentInstruction[1].u.operand);
-}
-
-void JIT::emit_op_catch(Instruction* currentInstruction)
-{
- killLastResultRegister(); // FIXME: Implicitly treat op_catch as a labeled statement, and remove this line of code.
- move(regT0, callFrameRegister);
- peek(regT3, OBJECT_OFFSETOF(struct JITStackFrame, globalData) / sizeof(void*));
- loadPtr(Address(regT3, OBJECT_OFFSETOF(JSGlobalData, exception)), regT0);
- storePtr(ImmPtr(JSValue::encode(JSValue())), Address(regT3, OBJECT_OFFSETOF(JSGlobalData, exception)));
- emitPutVirtualRegister(currentInstruction[1].u.operand);
-}
-
-void JIT::emit_op_jmp_scopes(Instruction* currentInstruction)
-{
- JITStubCall stubCall(this, cti_op_jmp_scopes);
- stubCall.addArgument(Imm32(currentInstruction[1].u.operand));
- stubCall.call();
- addJump(jump(), currentInstruction[2].u.operand);
- RECORD_JUMP_TARGET(currentInstruction[2].u.operand);
-}
-
-void JIT::emit_op_switch_imm(Instruction* currentInstruction)
-{
- unsigned tableIndex = currentInstruction[1].u.operand;
- unsigned defaultOffset = currentInstruction[2].u.operand;
- unsigned scrutinee = currentInstruction[3].u.operand;
-
- // create jump table for switch destinations, track this switch statement.
- SimpleJumpTable* jumpTable = &m_codeBlock->immediateSwitchJumpTable(tableIndex);
- m_switches.append(SwitchRecord(jumpTable, m_bytecodeOffset, defaultOffset, SwitchRecord::Immediate));
- jumpTable->ctiOffsets.grow(jumpTable->branchOffsets.size());
-
- JITStubCall stubCall(this, cti_op_switch_imm);
- stubCall.addArgument(scrutinee, regT2);
- stubCall.addArgument(Imm32(tableIndex));
- stubCall.call();
- jump(regT0);
-}
-
-void JIT::emit_op_switch_char(Instruction* currentInstruction)
-{
- unsigned tableIndex = currentInstruction[1].u.operand;
- unsigned defaultOffset = currentInstruction[2].u.operand;
- unsigned scrutinee = currentInstruction[3].u.operand;
-
- // create jump table for switch destinations, track this switch statement.
- SimpleJumpTable* jumpTable = &m_codeBlock->characterSwitchJumpTable(tableIndex);
- m_switches.append(SwitchRecord(jumpTable, m_bytecodeOffset, defaultOffset, SwitchRecord::Character));
- jumpTable->ctiOffsets.grow(jumpTable->branchOffsets.size());
-
- JITStubCall stubCall(this, cti_op_switch_char);
- stubCall.addArgument(scrutinee, regT2);
- stubCall.addArgument(Imm32(tableIndex));
- stubCall.call();
- jump(regT0);
-}
-
-void JIT::emit_op_switch_string(Instruction* currentInstruction)
-{
- unsigned tableIndex = currentInstruction[1].u.operand;
- unsigned defaultOffset = currentInstruction[2].u.operand;
- unsigned scrutinee = currentInstruction[3].u.operand;
-
- // create jump table for switch destinations, track this switch statement.
- StringJumpTable* jumpTable = &m_codeBlock->stringSwitchJumpTable(tableIndex);
- m_switches.append(SwitchRecord(jumpTable, m_bytecodeOffset, defaultOffset));
-
- JITStubCall stubCall(this, cti_op_switch_string);
- stubCall.addArgument(scrutinee, regT2);
- stubCall.addArgument(Imm32(tableIndex));
- stubCall.call();
- jump(regT0);
-}
-
-void JIT::emit_op_throw_reference_error(Instruction* currentInstruction)
-{
- JITStubCall stubCall(this, cti_op_throw_reference_error);
- stubCall.addArgument(ImmPtr(JSValue::encode(m_codeBlock->getConstant(currentInstruction[1].u.operand))));
- stubCall.call();
-}
-
-void JIT::emit_op_throw_syntax_error(Instruction* currentInstruction)
-{
- JITStubCall stubCall(this, cti_op_throw_syntax_error);
- stubCall.addArgument(ImmPtr(JSValue::encode(m_codeBlock->getConstant(currentInstruction[1].u.operand))));
- stubCall.call();
-}
-
-void JIT::emit_op_debug(Instruction* currentInstruction)
-{
-#if ENABLE(DEBUG_WITH_BREAKPOINT)
- UNUSED_PARAM(currentInstruction);
- breakpoint();
-#else
- JITStubCall stubCall(this, cti_op_debug);
- stubCall.addArgument(Imm32(currentInstruction[1].u.operand));
- stubCall.addArgument(Imm32(currentInstruction[2].u.operand));
- stubCall.addArgument(Imm32(currentInstruction[3].u.operand));
- stubCall.call();
-#endif
-}
-
-void JIT::emit_op_eq_null(Instruction* currentInstruction)
-{
- unsigned dst = currentInstruction[1].u.operand;
- unsigned src1 = currentInstruction[2].u.operand;
-
- emitGetVirtualRegister(src1, regT0);
- Jump isImmediate = emitJumpIfNotJSCell(regT0);
-
- loadPtr(Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), regT2);
- set32Test8(NonZero, Address(regT2, OBJECT_OFFSETOF(Structure, m_typeInfo.m_flags)), Imm32(MasqueradesAsUndefined), regT0);
-
- Jump wasNotImmediate = jump();
-
- isImmediate.link(this);
-
- andPtr(Imm32(~JSImmediate::ExtendedTagBitUndefined), regT0);
- setPtr(Equal, regT0, Imm32(JSImmediate::FullTagTypeNull), regT0);
-
- wasNotImmediate.link(this);
-
- emitTagAsBoolImmediate(regT0);
- emitPutVirtualRegister(dst);
-
-}
-
-void JIT::emit_op_neq_null(Instruction* currentInstruction)
-{
- unsigned dst = currentInstruction[1].u.operand;
- unsigned src1 = currentInstruction[2].u.operand;
-
- emitGetVirtualRegister(src1, regT0);
- Jump isImmediate = emitJumpIfNotJSCell(regT0);
-
- loadPtr(Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), regT2);
- set32Test8(Zero, Address(regT2, OBJECT_OFFSETOF(Structure, m_typeInfo.m_flags)), Imm32(MasqueradesAsUndefined), regT0);
-
- Jump wasNotImmediate = jump();
-
- isImmediate.link(this);
-
- andPtr(Imm32(~JSImmediate::ExtendedTagBitUndefined), regT0);
- setPtr(NotEqual, regT0, Imm32(JSImmediate::FullTagTypeNull), regT0);
-
- wasNotImmediate.link(this);
-
- emitTagAsBoolImmediate(regT0);
- emitPutVirtualRegister(dst);
-}
-
-void JIT::emit_op_enter(Instruction*)
-{
- // Even though CTI doesn't use them, we initialize our constant
- // registers to zap stale pointers, to avoid unnecessarily prolonging
- // object lifetime and increasing GC pressure.
- size_t count = m_codeBlock->m_numVars;
- for (size_t j = 0; j < count; ++j)
- emitInitRegister(j);
-
-}
-
-void JIT::emit_op_create_activation(Instruction* currentInstruction)
-{
- unsigned dst = currentInstruction[1].u.operand;
-
- Jump activationCreated = branchTestPtr(NonZero, Address(callFrameRegister, sizeof(Register) * dst));
- JITStubCall(this, cti_op_push_activation).call(currentInstruction[1].u.operand);
- emitPutVirtualRegister(dst);
- activationCreated.link(this);
-}
-
-void JIT::emit_op_create_arguments(Instruction* currentInstruction)
-{
- unsigned dst = currentInstruction[1].u.operand;
-
- Jump argsCreated = branchTestPtr(NonZero, Address(callFrameRegister, sizeof(Register) * dst));
- if (m_codeBlock->m_numParameters == 1)
- JITStubCall(this, cti_op_create_arguments_no_params).call();
- else
- JITStubCall(this, cti_op_create_arguments).call();
- emitPutVirtualRegister(dst);
- emitPutVirtualRegister(unmodifiedArgumentsRegister(dst));
- argsCreated.link(this);
-}
-
-void JIT::emit_op_init_lazy_reg(Instruction* currentInstruction)
-{
- unsigned dst = currentInstruction[1].u.operand;
-
- storePtr(ImmPtr(0), Address(callFrameRegister, sizeof(Register) * dst));
-}
-
-void JIT::emit_op_convert_this(Instruction* currentInstruction)
-{
- emitGetVirtualRegister(currentInstruction[1].u.operand, regT0);
-
- emitJumpSlowCaseIfNotJSCell(regT0);
- loadPtr(Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), regT1);
- addSlowCase(branchTest8(NonZero, Address(regT1, OBJECT_OFFSETOF(Structure, m_typeInfo.m_flags)), Imm32(NeedsThisConversion)));
-}
-
-void JIT::emit_op_convert_this_strict(Instruction* currentInstruction)
-{
- emitGetVirtualRegister(currentInstruction[1].u.operand, regT0);
- Jump notNull = branchTestPtr(NonZero, regT0);
- move(ImmPtr(JSValue::encode(jsNull())), regT0);
- emitPutVirtualRegister(currentInstruction[1].u.operand, regT0);
- Jump setThis = jump();
- notNull.link(this);
- Jump isImmediate = emitJumpIfNotJSCell(regT0);
- loadPtr(Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), regT1);
- Jump notAnObject = branch8(NotEqual, Address(regT1, OBJECT_OFFSETOF(Structure, m_typeInfo.m_type)), Imm32(ObjectType));
- addSlowCase(branchTest8(NonZero, Address(regT1, OBJECT_OFFSETOF(Structure, m_typeInfo.m_flags)), Imm32(NeedsThisConversion)));
- isImmediate.link(this);
- notAnObject.link(this);
- setThis.link(this);
-}
-
-void JIT::emit_op_get_callee(Instruction* currentInstruction)
-{
- unsigned result = currentInstruction[1].u.operand;
- emitGetFromCallFrameHeaderPtr(RegisterFile::Callee, regT0);
- emitPutVirtualRegister(result);
-}
-
-void JIT::emit_op_create_this(Instruction* currentInstruction)
-{
- JITStubCall stubCall(this, cti_op_create_this);
- stubCall.addArgument(currentInstruction[2].u.operand, regT1);
- stubCall.call(currentInstruction[1].u.operand);
-}
-
-void JIT::emit_op_profile_will_call(Instruction* currentInstruction)
-{
- peek(regT1, OBJECT_OFFSETOF(JITStackFrame, enabledProfilerReference) / sizeof(void*));
- Jump noProfiler = branchTestPtr(Zero, Address(regT1));
-
- JITStubCall stubCall(this, cti_op_profile_will_call);
- stubCall.addArgument(currentInstruction[1].u.operand, regT1);
- stubCall.call();
- noProfiler.link(this);
-
-}
-
-void JIT::emit_op_profile_did_call(Instruction* currentInstruction)
-{
- peek(regT1, OBJECT_OFFSETOF(JITStackFrame, enabledProfilerReference) / sizeof(void*));
- Jump noProfiler = branchTestPtr(Zero, Address(regT1));
-
- JITStubCall stubCall(this, cti_op_profile_did_call);
- stubCall.addArgument(currentInstruction[1].u.operand, regT1);
- stubCall.call();
- noProfiler.link(this);
-}
-
-
-// Slow cases
-
-void JIT::emitSlow_op_convert_this(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- linkSlowCase(iter);
- linkSlowCase(iter);
- JITStubCall stubCall(this, cti_op_convert_this);
- stubCall.addArgument(regT0);
- stubCall.call(currentInstruction[1].u.operand);
-}
-
-void JIT::emitSlow_op_convert_this_strict(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- linkSlowCase(iter);
- JITStubCall stubCall(this, cti_op_convert_this_strict);
- stubCall.addArgument(regT0);
- stubCall.call(currentInstruction[1].u.operand);
-}
-
-void JIT::emitSlow_op_to_primitive(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- linkSlowCase(iter);
-
- JITStubCall stubCall(this, cti_op_to_primitive);
- stubCall.addArgument(regT0);
- stubCall.call(currentInstruction[1].u.operand);
-}
-
-void JIT::emitSlow_op_loop_if_lesseq(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- unsigned op2 = currentInstruction[2].u.operand;
- unsigned target = currentInstruction[3].u.operand;
- if (isOperandConstantImmediateInt(op2)) {
- linkSlowCase(iter);
- JITStubCall stubCall(this, cti_op_loop_if_lesseq);
- stubCall.addArgument(regT0);
- stubCall.addArgument(currentInstruction[2].u.operand, regT2);
- stubCall.call();
- emitJumpSlowToHot(branchTest32(NonZero, regT0), target);
- } else {
- linkSlowCase(iter);
- linkSlowCase(iter);
- JITStubCall stubCall(this, cti_op_loop_if_lesseq);
- stubCall.addArgument(regT0);
- stubCall.addArgument(regT1);
- stubCall.call();
- emitJumpSlowToHot(branchTest32(NonZero, regT0), target);
- }
-}
-
-void JIT::emitSlow_op_put_by_val(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- unsigned base = currentInstruction[1].u.operand;
- unsigned property = currentInstruction[2].u.operand;
- unsigned value = currentInstruction[3].u.operand;
-
- linkSlowCase(iter); // property int32 check
- linkSlowCaseIfNotJSCell(iter, base); // base cell check
- linkSlowCase(iter); // base not array check
- linkSlowCase(iter); // in vector check
-
- JITStubCall stubPutByValCall(this, cti_op_put_by_val);
- stubPutByValCall.addArgument(regT0);
- stubPutByValCall.addArgument(property, regT2);
- stubPutByValCall.addArgument(value, regT2);
- stubPutByValCall.call();
-}
-
-void JIT::emitSlow_op_not(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- linkSlowCase(iter);
- xorPtr(Imm32(static_cast<int32_t>(JSImmediate::FullTagTypeBool)), regT0);
- JITStubCall stubCall(this, cti_op_not);
- stubCall.addArgument(regT0);
- stubCall.call(currentInstruction[1].u.operand);
-}
-
-void JIT::emitSlow_op_jfalse(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- linkSlowCase(iter);
- JITStubCall stubCall(this, cti_op_jtrue);
- stubCall.addArgument(regT0);
- stubCall.call();
- emitJumpSlowToHot(branchTest32(Zero, regT0), currentInstruction[2].u.operand); // inverted!
-}
-
-void JIT::emitSlow_op_bitnot(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- linkSlowCase(iter);
- JITStubCall stubCall(this, cti_op_bitnot);
- stubCall.addArgument(regT0);
- stubCall.call(currentInstruction[1].u.operand);
-}
-
-void JIT::emitSlow_op_jtrue(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- linkSlowCase(iter);
- JITStubCall stubCall(this, cti_op_jtrue);
- stubCall.addArgument(regT0);
- stubCall.call();
- emitJumpSlowToHot(branchTest32(NonZero, regT0), currentInstruction[2].u.operand);
-}
-
-void JIT::emitSlow_op_bitxor(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- linkSlowCase(iter);
- JITStubCall stubCall(this, cti_op_bitxor);
- stubCall.addArgument(regT0);
- stubCall.addArgument(regT1);
- stubCall.call(currentInstruction[1].u.operand);
-}
-
-void JIT::emitSlow_op_bitor(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- linkSlowCase(iter);
- JITStubCall stubCall(this, cti_op_bitor);
- stubCall.addArgument(regT0);
- stubCall.addArgument(regT1);
- stubCall.call(currentInstruction[1].u.operand);
-}
-
-void JIT::emitSlow_op_eq(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- linkSlowCase(iter);
- JITStubCall stubCall(this, cti_op_eq);
- stubCall.addArgument(regT0);
- stubCall.addArgument(regT1);
- stubCall.call();
- emitTagAsBoolImmediate(regT0);
- emitPutVirtualRegister(currentInstruction[1].u.operand);
-}
-
-void JIT::emitSlow_op_neq(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- linkSlowCase(iter);
- JITStubCall stubCall(this, cti_op_eq);
- stubCall.addArgument(regT0);
- stubCall.addArgument(regT1);
- stubCall.call();
- xor32(Imm32(0x1), regT0);
- emitTagAsBoolImmediate(regT0);
- emitPutVirtualRegister(currentInstruction[1].u.operand);
-}
-
-void JIT::emitSlow_op_stricteq(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- linkSlowCase(iter);
- linkSlowCase(iter);
- JITStubCall stubCall(this, cti_op_stricteq);
- stubCall.addArgument(regT0);
- stubCall.addArgument(regT1);
- stubCall.call(currentInstruction[1].u.operand);
-}
-
-void JIT::emitSlow_op_nstricteq(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- linkSlowCase(iter);
- linkSlowCase(iter);
- JITStubCall stubCall(this, cti_op_nstricteq);
- stubCall.addArgument(regT0);
- stubCall.addArgument(regT1);
- stubCall.call(currentInstruction[1].u.operand);
-}
-
-void JIT::emitSlow_op_check_has_instance(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- unsigned baseVal = currentInstruction[1].u.operand;
-
- linkSlowCaseIfNotJSCell(iter, baseVal);
- linkSlowCase(iter);
- JITStubCall stubCall(this, cti_op_check_has_instance);
- stubCall.addArgument(baseVal, regT2);
- stubCall.call();
-}
-
-void JIT::emitSlow_op_instanceof(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- unsigned dst = currentInstruction[1].u.operand;
- unsigned value = currentInstruction[2].u.operand;
- unsigned baseVal = currentInstruction[3].u.operand;
- unsigned proto = currentInstruction[4].u.operand;
-
- linkSlowCaseIfNotJSCell(iter, value);
- linkSlowCaseIfNotJSCell(iter, proto);
- linkSlowCase(iter);
- linkSlowCase(iter);
- JITStubCall stubCall(this, cti_op_instanceof);
- stubCall.addArgument(value, regT2);
- stubCall.addArgument(baseVal, regT2);
- stubCall.addArgument(proto, regT2);
- stubCall.call(dst);
-}
-
-void JIT::emitSlow_op_call(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- compileOpCallSlowCase(currentInstruction, iter, m_callLinkInfoIndex++, op_call);
-}
-
-void JIT::emitSlow_op_call_eval(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- compileOpCallSlowCase(currentInstruction, iter, m_callLinkInfoIndex++, op_call_eval);
-}
-
-void JIT::emitSlow_op_call_varargs(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- compileOpCallVarargsSlowCase(currentInstruction, iter);
-}
-
-void JIT::emitSlow_op_construct(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- compileOpCallSlowCase(currentInstruction, iter, m_callLinkInfoIndex++, op_construct);
-}
-
-void JIT::emitSlow_op_to_jsnumber(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- linkSlowCaseIfNotJSCell(iter, currentInstruction[2].u.operand);
- linkSlowCase(iter);
-
- JITStubCall stubCall(this, cti_op_to_jsnumber);
- stubCall.addArgument(regT0);
- stubCall.call(currentInstruction[1].u.operand);
-}
-
-void JIT::emit_op_get_arguments_length(Instruction* currentInstruction)
-{
- int dst = currentInstruction[1].u.operand;
- int argumentsRegister = currentInstruction[2].u.operand;
- addSlowCase(branchTestPtr(NonZero, addressFor(argumentsRegister)));
- emitGetFromCallFrameHeader32(RegisterFile::ArgumentCount, regT0);
- sub32(Imm32(1), regT0);
- emitFastArithReTagImmediate(regT0, regT0);
- emitPutVirtualRegister(dst, regT0);
-}
-
-void JIT::emitSlow_op_get_arguments_length(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- linkSlowCase(iter);
- unsigned dst = currentInstruction[1].u.operand;
- unsigned base = currentInstruction[2].u.operand;
- Identifier* ident = &(m_codeBlock->identifier(currentInstruction[3].u.operand));
-
- emitGetVirtualRegister(base, regT0);
- JITStubCall stubCall(this, cti_op_get_by_id_generic);
- stubCall.addArgument(regT0);
- stubCall.addArgument(ImmPtr(ident));
- stubCall.call(dst);
-}
-
-void JIT::emit_op_get_argument_by_val(Instruction* currentInstruction)
-{
- int dst = currentInstruction[1].u.operand;
- int argumentsRegister = currentInstruction[2].u.operand;
- int property = currentInstruction[3].u.operand;
- addSlowCase(branchTestPtr(NonZero, addressFor(argumentsRegister)));
- emitGetVirtualRegister(property, regT1);
- addSlowCase(emitJumpIfNotImmediateInteger(regT1));
- add32(Imm32(1), regT1);
- // regT1 now contains the integer index of the argument we want, including this
- emitGetFromCallFrameHeader32(RegisterFile::ArgumentCount, regT2);
- addSlowCase(branch32(AboveOrEqual, regT1, regT2));
-
- Jump skipOutofLineParams;
- int numArgs = m_codeBlock->m_numParameters;
- if (numArgs) {
- Jump notInInPlaceArgs = branch32(AboveOrEqual, regT1, Imm32(numArgs));
- addPtr(Imm32(static_cast<unsigned>(-(RegisterFile::CallFrameHeaderSize + numArgs) * sizeof(Register))), callFrameRegister, regT0);
- loadPtr(BaseIndex(regT0, regT1, TimesEight, 0), regT0);
- skipOutofLineParams = jump();
- notInInPlaceArgs.link(this);
- }
-
- addPtr(Imm32(static_cast<unsigned>(-(RegisterFile::CallFrameHeaderSize + numArgs) * sizeof(Register))), callFrameRegister, regT0);
- mul32(Imm32(sizeof(Register)), regT2, regT2);
- subPtr(regT2, regT0);
- loadPtr(BaseIndex(regT0, regT1, TimesEight, 0), regT0);
- if (numArgs)
- skipOutofLineParams.link(this);
- emitPutVirtualRegister(dst, regT0);
-}
-
-void JIT::emitSlow_op_get_argument_by_val(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- unsigned dst = currentInstruction[1].u.operand;
- unsigned arguments = currentInstruction[2].u.operand;
- unsigned property = currentInstruction[3].u.operand;
-
- linkSlowCase(iter);
- Jump skipArgumentsCreation = jump();
-
- linkSlowCase(iter);
- linkSlowCase(iter);
- if (m_codeBlock->m_numParameters == 1)
- JITStubCall(this, cti_op_create_arguments_no_params).call();
- else
- JITStubCall(this, cti_op_create_arguments).call();
- emitPutVirtualRegister(arguments);
- emitPutVirtualRegister(unmodifiedArgumentsRegister(arguments));
-
- skipArgumentsCreation.link(this);
- JITStubCall stubCall(this, cti_op_get_by_val);
- stubCall.addArgument(arguments, regT2);
- stubCall.addArgument(property, regT2);
- stubCall.call(dst);
-}
-
-#endif // USE(JSVALUE64)
-
-void JIT::emit_op_resolve_global_dynamic(Instruction* currentInstruction)
-{
- int skip = currentInstruction[5].u.operand;
-
- emitGetFromCallFrameHeaderPtr(RegisterFile::ScopeChain, regT0);
-
- bool checkTopLevel = m_codeBlock->codeType() == FunctionCode && m_codeBlock->needsFullScopeChain();
- ASSERT(skip || !checkTopLevel);
- if (checkTopLevel && skip--) {
- Jump activationNotCreated;
- if (checkTopLevel)
- activationNotCreated = branchTestPtr(Zero, addressFor(m_codeBlock->activationRegister()));
- loadPtr(Address(regT0, OBJECT_OFFSETOF(ScopeChainNode, object)), regT1);
- addSlowCase(checkStructure(regT1, m_globalData->activationStructure.get()));
- loadPtr(Address(regT0, OBJECT_OFFSETOF(ScopeChainNode, next)), regT0);
- activationNotCreated.link(this);
- }
- while (skip--) {
- loadPtr(Address(regT0, OBJECT_OFFSETOF(ScopeChainNode, object)), regT1);
- addSlowCase(checkStructure(regT1, m_globalData->activationStructure.get()));
- loadPtr(Address(regT0, OBJECT_OFFSETOF(ScopeChainNode, next)), regT0);
- }
- emit_op_resolve_global(currentInstruction, true);
-}
-
-void JIT::emitSlow_op_resolve_global_dynamic(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- unsigned dst = currentInstruction[1].u.operand;
- Identifier* ident = &m_codeBlock->identifier(currentInstruction[2].u.operand);
- int skip = currentInstruction[5].u.operand;
- while (skip--)
- linkSlowCase(iter);
- JITStubCall resolveStubCall(this, cti_op_resolve);
- resolveStubCall.addArgument(ImmPtr(ident));
- resolveStubCall.call(dst);
- emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_resolve_global_dynamic));
-
- unsigned currentIndex = m_globalResolveInfoIndex++;
-
- linkSlowCase(iter); // We managed to skip all the nodes in the scope chain, but the cache missed.
- JITStubCall stubCall(this, cti_op_resolve_global);
- stubCall.addArgument(ImmPtr(ident));
- stubCall.addArgument(Imm32(currentIndex));
- stubCall.addArgument(regT0);
- stubCall.call(dst);
-}
-
-void JIT::emit_op_new_regexp(Instruction* currentInstruction)
-{
- JITStubCall stubCall(this, cti_op_new_regexp);
- stubCall.addArgument(ImmPtr(m_codeBlock->regexp(currentInstruction[2].u.operand)));
- stubCall.call(currentInstruction[1].u.operand);
-}
-
-void JIT::emit_op_load_varargs(Instruction* currentInstruction)
-{
- int argCountDst = currentInstruction[1].u.operand;
- int argsOffset = currentInstruction[2].u.operand;
- int registerOffset = currentInstruction[3].u.operand;
- ASSERT(argsOffset <= registerOffset);
-
- int expectedParams = m_codeBlock->m_numParameters - 1;
- // Don't do inline copying if we aren't guaranteed to have a single stream
- // of arguments
- if (expectedParams) {
- JITStubCall stubCall(this, cti_op_load_varargs);
- stubCall.addArgument(Imm32(argsOffset));
- stubCall.call();
- // Stores a naked int32 in the register file.
- store32(returnValueRegister, Address(callFrameRegister, argCountDst * sizeof(Register)));
- return;
- }
-
-#if USE(JSVALUE32_64)
- addSlowCase(branch32(NotEqual, tagFor(argsOffset), Imm32(JSValue::EmptyValueTag)));
-#else
- addSlowCase(branchTestPtr(NonZero, addressFor(argsOffset)));
-#endif
- // Load arg count into regT0
- emitGetFromCallFrameHeader32(RegisterFile::ArgumentCount, regT0);
- storePtr(regT0, addressFor(argCountDst));
- Jump endBranch = branch32(Equal, regT0, Imm32(1));
-
- mul32(Imm32(sizeof(Register)), regT0, regT3);
- addPtr(Imm32(static_cast<unsigned>(sizeof(Register) - RegisterFile::CallFrameHeaderSize * sizeof(Register))), callFrameRegister, regT1);
- subPtr(regT3, regT1); // regT1 is now the start of the out of line arguments
- addPtr(Imm32(argsOffset * sizeof(Register)), callFrameRegister, regT2); // regT2 is the target buffer
-
- // Bounds check the registerfile
- addPtr(regT2, regT3);
- addPtr(Imm32((registerOffset - argsOffset) * sizeof(Register)), regT3);
- addSlowCase(branchPtr(Below, AbsoluteAddress(&m_globalData->interpreter->registerFile().m_end), regT3));
-
- sub32(Imm32(1), regT0);
- Label loopStart = label();
- loadPtr(BaseIndex(regT1, regT0, TimesEight, static_cast<unsigned>(0 - 2 * sizeof(Register))), regT3);
- storePtr(regT3, BaseIndex(regT2, regT0, TimesEight, static_cast<unsigned>(0 - sizeof(Register))));
-#if USE(JSVALUE32_64)
- loadPtr(BaseIndex(regT1, regT0, TimesEight, static_cast<unsigned>(sizeof(void*) - 2 * sizeof(Register))), regT3);
- storePtr(regT3, BaseIndex(regT2, regT0, TimesEight, static_cast<unsigned>(sizeof(void*) - sizeof(Register))));
-#endif
- branchSubPtr(NonZero, Imm32(1), regT0).linkTo(loopStart, this);
- endBranch.link(this);
-}
-
-void JIT::emitSlow_op_load_varargs(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- int argCountDst = currentInstruction[1].u.operand;
- int argsOffset = currentInstruction[2].u.operand;
- int expectedParams = m_codeBlock->m_numParameters - 1;
- if (expectedParams)
- return;
-
- linkSlowCase(iter);
- linkSlowCase(iter);
- JITStubCall stubCall(this, cti_op_load_varargs);
- stubCall.addArgument(Imm32(argsOffset));
- stubCall.call();
- // Stores a naked int32 in the register file.
- store32(returnValueRegister, Address(callFrameRegister, argCountDst * sizeof(Register)));
-}
-
-void JIT::emit_op_new_func(Instruction* currentInstruction)
-{
- Jump lazyJump;
- int dst = currentInstruction[1].u.operand;
- if (currentInstruction[3].u.operand) {
-#if USE(JSVALUE32_64)
- lazyJump = branch32(NotEqual, tagFor(dst), Imm32(JSValue::EmptyValueTag));
-#else
- lazyJump = branchTestPtr(NonZero, addressFor(dst));
-#endif
- }
- JITStubCall stubCall(this, cti_op_new_func);
- stubCall.addArgument(ImmPtr(m_codeBlock->functionDecl(currentInstruction[2].u.operand)));
- stubCall.call(currentInstruction[1].u.operand);
- if (currentInstruction[3].u.operand)
- lazyJump.link(this);
-}
-
-} // namespace JSC
-
-#endif // ENABLE(JIT)
diff --git a/JavaScriptCore/jit/JITOpcodes32_64.cpp b/JavaScriptCore/jit/JITOpcodes32_64.cpp
deleted file mode 100644
index 4ad974c..0000000
--- a/JavaScriptCore/jit/JITOpcodes32_64.cpp
+++ /dev/null
@@ -1,1836 +0,0 @@
-/*
- * Copyright (C) 2009 Apple Inc. All rights reserved.
- * Copyright (C) 2010 Patrick Gansterer <paroga@paroga.com>
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
- * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "config.h"
-
-#if ENABLE(JIT)
-#if USE(JSVALUE32_64)
-#include "JIT.h"
-
-#include "JITInlineMethods.h"
-#include "JITStubCall.h"
-#include "JSArray.h"
-#include "JSCell.h"
-#include "JSFunction.h"
-#include "JSPropertyNameIterator.h"
-#include "LinkBuffer.h"
-
-namespace JSC {
-
-void JIT::privateCompileCTIMachineTrampolines(RefPtr<ExecutablePool>* executablePool, JSGlobalData* globalData, TrampolineStructure *trampolines)
-{
-#if ENABLE(JIT_USE_SOFT_MODULO)
- Label softModBegin = align();
- softModulo();
-#endif
-#if ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
- // (1) This function provides fast property access for string length
- Label stringLengthBegin = align();
-
- // regT0 holds payload, regT1 holds tag
-
- Jump string_failureCases1 = branch32(NotEqual, regT1, Imm32(JSValue::CellTag));
- Jump string_failureCases2 = branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsStringVPtr));
-
- // Checks out okay! - get the length from the Ustring.
- load32(Address(regT0, OBJECT_OFFSETOF(JSString, m_length)), regT2);
-
- Jump string_failureCases3 = branch32(Above, regT2, Imm32(INT_MAX));
- move(regT2, regT0);
- move(Imm32(JSValue::Int32Tag), regT1);
-
- ret();
-#endif
-
- JumpList callLinkFailures;
- // (2) Trampolines for the slow cases of op_call / op_call_eval / op_construct.
-#if ENABLE(JIT_OPTIMIZE_CALL)
- // VirtualCallLink Trampoline
- // regT0 holds callee, regT1 holds argCount. regT2 will hold the FunctionExecutable.
- Label virtualCallLinkBegin = align();
- compileOpCallInitializeCallFrame();
- preserveReturnAddressAfterCall(regT3);
- emitPutToCallFrameHeader(regT3, RegisterFile::ReturnPC);
- restoreArgumentReference();
- Call callLazyLinkCall = call();
- callLinkFailures.append(branchTestPtr(Zero, regT0));
- restoreReturnAddressBeforeReturn(regT3);
- emitGetFromCallFrameHeader32(RegisterFile::ArgumentCount, regT1);
- jump(regT0);
-
- // VirtualConstructLink Trampoline
- // regT0 holds callee, regT1 holds argCount. regT2 will hold the FunctionExecutable.
- Label virtualConstructLinkBegin = align();
- compileOpCallInitializeCallFrame();
- preserveReturnAddressAfterCall(regT3);
- emitPutToCallFrameHeader(regT3, RegisterFile::ReturnPC);
- restoreArgumentReference();
- Call callLazyLinkConstruct = call();
- restoreReturnAddressBeforeReturn(regT3);
- callLinkFailures.append(branchTestPtr(Zero, regT0));
- emitGetFromCallFrameHeader32(RegisterFile::ArgumentCount, regT1);
- jump(regT0);
-
-#endif // ENABLE(JIT_OPTIMIZE_CALL)
-
- // VirtualCall Trampoline
- // regT0 holds callee, regT1 holds argCount. regT2 will hold the FunctionExecutable.
- Label virtualCallBegin = align();
- compileOpCallInitializeCallFrame();
-
- loadPtr(Address(regT0, OBJECT_OFFSETOF(JSFunction, m_executable)), regT2);
-
- Jump hasCodeBlock3 = branch32(GreaterThanOrEqual, Address(regT2, OBJECT_OFFSETOF(FunctionExecutable, m_numParametersForCall)), Imm32(0));
- preserveReturnAddressAfterCall(regT3);
- restoreArgumentReference();
- Call callCompileCall = call();
- callLinkFailures.append(branchTestPtr(Zero, regT0));
- emitGetFromCallFrameHeader32(RegisterFile::ArgumentCount, regT1);
- restoreReturnAddressBeforeReturn(regT3);
- loadPtr(Address(regT0, OBJECT_OFFSETOF(JSFunction, m_executable)), regT2);
- hasCodeBlock3.link(this);
-
- loadPtr(Address(regT2, OBJECT_OFFSETOF(FunctionExecutable, m_jitCodeForCallWithArityCheck)), regT0);
- jump(regT0);
-
- // VirtualConstruct Trampoline
- // regT0 holds callee, regT1 holds argCount. regT2 will hold the FunctionExecutable.
- Label virtualConstructBegin = align();
- compileOpCallInitializeCallFrame();
-
- loadPtr(Address(regT0, OBJECT_OFFSETOF(JSFunction, m_executable)), regT2);
-
- Jump hasCodeBlock4 = branch32(GreaterThanOrEqual, Address(regT2, OBJECT_OFFSETOF(FunctionExecutable, m_numParametersForConstruct)), Imm32(0));
- preserveReturnAddressAfterCall(regT3);
- restoreArgumentReference();
- Call callCompileCconstruct = call();
- callLinkFailures.append(branchTestPtr(Zero, regT0));
- emitGetFromCallFrameHeader32(RegisterFile::ArgumentCount, regT1);
- restoreReturnAddressBeforeReturn(regT3);
- loadPtr(Address(regT0, OBJECT_OFFSETOF(JSFunction, m_executable)), regT2);
- hasCodeBlock4.link(this);
-
- loadPtr(Address(regT2, OBJECT_OFFSETOF(FunctionExecutable, m_jitCodeForConstructWithArityCheck)), regT0);
- jump(regT0);
-
- // If the parser fails we want to be able to be able to keep going,
- // So we handle this as a parse failure.
- callLinkFailures.link(this);
- emitGetFromCallFrameHeaderPtr(RegisterFile::ReturnPC, regT1);
- emitGetFromCallFrameHeaderPtr(RegisterFile::CallerFrame, callFrameRegister);
- restoreReturnAddressBeforeReturn(regT1);
- move(ImmPtr(&globalData->exceptionLocation), regT2);
- storePtr(regT1, regT2);
- poke(callFrameRegister, 1 + OBJECT_OFFSETOF(struct JITStackFrame, callFrame) / sizeof(void*));
- poke(ImmPtr(FunctionPtr(ctiVMThrowTrampoline).value()));
- ret();
-
- // NativeCall Trampoline
- Label nativeCallThunk = privateCompileCTINativeCall(globalData);
- Label nativeConstructThunk = privateCompileCTINativeCall(globalData, true);
-
-#if ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
- Call string_failureCases1Call = makeTailRecursiveCall(string_failureCases1);
- Call string_failureCases2Call = makeTailRecursiveCall(string_failureCases2);
- Call string_failureCases3Call = makeTailRecursiveCall(string_failureCases3);
-#endif
-
- // All trampolines constructed! copy the code, link up calls, and set the pointers on the Machine object.
- LinkBuffer patchBuffer(this, m_globalData->executableAllocator.poolForSize(m_assembler.size()), 0);
-
-#if ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
- patchBuffer.link(string_failureCases1Call, FunctionPtr(cti_op_get_by_id_string_fail));
- patchBuffer.link(string_failureCases2Call, FunctionPtr(cti_op_get_by_id_string_fail));
- patchBuffer.link(string_failureCases3Call, FunctionPtr(cti_op_get_by_id_string_fail));
-#endif
-#if ENABLE(JIT_OPTIMIZE_CALL)
- patchBuffer.link(callLazyLinkCall, FunctionPtr(cti_vm_lazyLinkCall));
- patchBuffer.link(callLazyLinkConstruct, FunctionPtr(cti_vm_lazyLinkConstruct));
-#endif
- patchBuffer.link(callCompileCall, FunctionPtr(cti_op_call_jitCompile));
- patchBuffer.link(callCompileCconstruct, FunctionPtr(cti_op_construct_jitCompile));
-
- CodeRef finalCode = patchBuffer.finalizeCode();
- *executablePool = finalCode.m_executablePool;
-
- trampolines->ctiVirtualCall = patchBuffer.trampolineAt(virtualCallBegin);
- trampolines->ctiVirtualConstruct = patchBuffer.trampolineAt(virtualConstructBegin);
- trampolines->ctiNativeCall = patchBuffer.trampolineAt(nativeCallThunk);
- trampolines->ctiNativeConstruct = patchBuffer.trampolineAt(nativeConstructThunk);
-#if ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
- trampolines->ctiStringLengthTrampoline = patchBuffer.trampolineAt(stringLengthBegin);
-#endif
-#if ENABLE(JIT_OPTIMIZE_CALL)
- trampolines->ctiVirtualCallLink = patchBuffer.trampolineAt(virtualCallLinkBegin);
- trampolines->ctiVirtualConstructLink = patchBuffer.trampolineAt(virtualConstructLinkBegin);
-#endif
-#if ENABLE(JIT_USE_SOFT_MODULO)
- trampolines->ctiSoftModulo = patchBuffer.trampolineAt(softModBegin);
-#endif
-}
-
-JIT::Label JIT::privateCompileCTINativeCall(JSGlobalData* globalData, bool isConstruct)
-{
- int executableOffsetToFunction = isConstruct ? OBJECT_OFFSETOF(NativeExecutable, m_constructor) : OBJECT_OFFSETOF(NativeExecutable, m_function);
-
- Label nativeCallThunk = align();
-
- emitPutImmediateToCallFrameHeader(0, RegisterFile::CodeBlock);
-
-#if CPU(X86)
- // Load caller frame's scope chain into this callframe so that whatever we call can
- // get to its global data.
- emitGetFromCallFrameHeaderPtr(RegisterFile::CallerFrame, regT0);
- emitGetFromCallFrameHeaderPtr(RegisterFile::ScopeChain, regT1, regT0);
- emitPutToCallFrameHeader(regT1, RegisterFile::ScopeChain);
-
- peek(regT1);
- emitPutToCallFrameHeader(regT1, RegisterFile::ReturnPC);
-
- // Calling convention: f(ecx, edx, ...);
- // Host function signature: f(ExecState*);
- move(callFrameRegister, X86Registers::ecx);
-
- subPtr(Imm32(16 - sizeof(void*)), stackPointerRegister); // Align stack after call.
-
- // call the function
- emitGetFromCallFrameHeaderPtr(RegisterFile::Callee, regT1);
- loadPtr(Address(regT1, OBJECT_OFFSETOF(JSFunction, m_executable)), regT1);
- move(regT0, callFrameRegister); // Eagerly restore caller frame register to avoid loading from stack.
- call(Address(regT1, executableOffsetToFunction));
-
- addPtr(Imm32(16 - sizeof(void*)), stackPointerRegister);
-
-#elif CPU(ARM)
- // Load caller frame's scope chain into this callframe so that whatever we call can
- // get to its global data.
- emitGetFromCallFrameHeaderPtr(RegisterFile::CallerFrame, regT2);
- emitGetFromCallFrameHeaderPtr(RegisterFile::ScopeChain, regT1, regT2);
- emitPutToCallFrameHeader(regT1, RegisterFile::ScopeChain);
-
- preserveReturnAddressAfterCall(regT3); // Callee preserved
- emitPutToCallFrameHeader(regT3, RegisterFile::ReturnPC);
-
- // Calling convention: f(r0 == regT0, r1 == regT1, ...);
- // Host function signature: f(ExecState*);
- move(callFrameRegister, ARMRegisters::r0);
-
- // call the function
- emitGetFromCallFrameHeaderPtr(RegisterFile::Callee, ARMRegisters::r1);
- move(regT2, callFrameRegister); // Eagerly restore caller frame register to avoid loading from stack.
- loadPtr(Address(ARMRegisters::r1, OBJECT_OFFSETOF(JSFunction, m_executable)), regT2);
- call(Address(regT2, executableOffsetToFunction));
-
- restoreReturnAddressBeforeReturn(regT3);
-
-#elif CPU(MIPS)
- // Load caller frame's scope chain into this callframe so that whatever we call can
- // get to its global data.
- emitGetFromCallFrameHeaderPtr(RegisterFile::CallerFrame, regT0);
- emitGetFromCallFrameHeaderPtr(RegisterFile::ScopeChain, regT1, regT0);
- emitPutToCallFrameHeader(regT1, RegisterFile::ScopeChain);
-
- preserveReturnAddressAfterCall(regT3); // Callee preserved
- emitPutToCallFrameHeader(regT3, RegisterFile::ReturnPC);
-
- // Calling convention: f(a0, a1, a2, a3);
- // Host function signature: f(ExecState*);
-
- // Allocate stack space for 16 bytes (8-byte aligned)
- // 16 bytes (unused) for 4 arguments
- subPtr(Imm32(16), stackPointerRegister);
-
- // Setup arg0
- move(callFrameRegister, MIPSRegisters::a0);
-
- // Call
- emitGetFromCallFrameHeaderPtr(RegisterFile::Callee, MIPSRegisters::a2);
- loadPtr(Address(MIPSRegisters::a2, OBJECT_OFFSETOF(JSFunction, m_executable)), regT2);
- move(regT0, callFrameRegister); // Eagerly restore caller frame register to avoid loading from stack.
- call(Address(regT2, executableOffsetToFunction));
-
- // Restore stack space
- addPtr(Imm32(16), stackPointerRegister);
-
- restoreReturnAddressBeforeReturn(regT3);
-
-#elif ENABLE(JIT_OPTIMIZE_NATIVE_CALL)
-#error "JIT_OPTIMIZE_NATIVE_CALL not yet supported on this platform."
-#else
- UNUSED_PARAM(executableOffsetToFunction);
- breakpoint();
-#endif // CPU(X86)
-
- // Check for an exception
- Jump sawException = branch32(NotEqual, AbsoluteAddress(reinterpret_cast<char*>(&globalData->exception) + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), Imm32(JSValue::EmptyValueTag));
-
- // Return.
- ret();
-
- // Handle an exception
- sawException.link(this);
-
- // Grab the return address.
- preserveReturnAddressAfterCall(regT1);
-
- move(ImmPtr(&globalData->exceptionLocation), regT2);
- storePtr(regT1, regT2);
- poke(callFrameRegister, OBJECT_OFFSETOF(struct JITStackFrame, callFrame) / sizeof(void*));
-
- // Set the return address.
- move(ImmPtr(FunctionPtr(ctiVMThrowTrampoline).value()), regT1);
- restoreReturnAddressBeforeReturn(regT1);
-
- ret();
-
- return nativeCallThunk;
-}
-
-JIT::CodePtr JIT::privateCompileCTINativeCall(PassRefPtr<ExecutablePool> executablePool, JSGlobalData* globalData, NativeFunction func)
-{
- Call nativeCall;
- Label nativeCallThunk = align();
-
- emitPutImmediateToCallFrameHeader(0, RegisterFile::CodeBlock);
-
-#if CPU(X86)
- // Load caller frame's scope chain into this callframe so that whatever we call can
- // get to its global data.
- emitGetFromCallFrameHeaderPtr(RegisterFile::CallerFrame, regT0);
- emitGetFromCallFrameHeaderPtr(RegisterFile::ScopeChain, regT1, regT0);
- emitPutToCallFrameHeader(regT1, RegisterFile::ScopeChain);
-
- peek(regT1);
- emitPutToCallFrameHeader(regT1, RegisterFile::ReturnPC);
-
- // Calling convention: f(ecx, edx, ...);
- // Host function signature: f(ExecState*);
- move(callFrameRegister, X86Registers::ecx);
-
- subPtr(Imm32(16 - sizeof(void*)), stackPointerRegister); // Align stack after call.
-
- move(regT0, callFrameRegister); // Eagerly restore caller frame register to avoid loading from stack.
-
- // call the function
- nativeCall = call();
-
- addPtr(Imm32(16 - sizeof(void*)), stackPointerRegister);
-
-#elif CPU(ARM)
- // Load caller frame's scope chain into this callframe so that whatever we call can
- // get to its global data.
- emitGetFromCallFrameHeaderPtr(RegisterFile::CallerFrame, regT2);
- emitGetFromCallFrameHeaderPtr(RegisterFile::ScopeChain, regT1, regT2);
- emitPutToCallFrameHeader(regT1, RegisterFile::ScopeChain);
-
- preserveReturnAddressAfterCall(regT3); // Callee preserved
- emitPutToCallFrameHeader(regT3, RegisterFile::ReturnPC);
-
- // Calling convention: f(r0 == regT0, r1 == regT1, ...);
- // Host function signature: f(ExecState*);
- move(callFrameRegister, ARMRegisters::r0);
-
- emitGetFromCallFrameHeaderPtr(RegisterFile::Callee, ARMRegisters::r1);
- move(regT2, callFrameRegister); // Eagerly restore caller frame register to avoid loading from stack.
- loadPtr(Address(ARMRegisters::r1, OBJECT_OFFSETOF(JSFunction, m_executable)), regT2);
-
- // call the function
- nativeCall = call();
-
- restoreReturnAddressBeforeReturn(regT3);
-
-#elif CPU(MIPS)
- // Load caller frame's scope chain into this callframe so that whatever we call can
- // get to its global data.
- emitGetFromCallFrameHeaderPtr(RegisterFile::CallerFrame, regT0);
- emitGetFromCallFrameHeaderPtr(RegisterFile::ScopeChain, regT1, regT0);
- emitPutToCallFrameHeader(regT1, RegisterFile::ScopeChain);
-
- preserveReturnAddressAfterCall(regT3); // Callee preserved
- emitPutToCallFrameHeader(regT3, RegisterFile::ReturnPC);
-
- // Calling convention: f(a0, a1, a2, a3);
- // Host function signature: f(ExecState*);
-
- // Allocate stack space for 16 bytes (8-byte aligned)
- // 16 bytes (unused) for 4 arguments
- subPtr(Imm32(16), stackPointerRegister);
-
- // Setup arg0
- move(callFrameRegister, MIPSRegisters::a0);
-
- // Call
- emitGetFromCallFrameHeaderPtr(RegisterFile::Callee, MIPSRegisters::a2);
- loadPtr(Address(MIPSRegisters::a2, OBJECT_OFFSETOF(JSFunction, m_executable)), regT2);
- move(regT0, callFrameRegister); // Eagerly restore caller frame register to avoid loading from stack.
-
- // call the function
- nativeCall = call();
-
- // Restore stack space
- addPtr(Imm32(16), stackPointerRegister);
-
- restoreReturnAddressBeforeReturn(regT3);
-
-#elif ENABLE(JIT_OPTIMIZE_NATIVE_CALL)
-#error "JIT_OPTIMIZE_NATIVE_CALL not yet supported on this platform."
-#else
- breakpoint();
-#endif // CPU(X86)
-
- // Check for an exception
- Jump sawException = branch32(NotEqual, AbsoluteAddress(reinterpret_cast<char*>(&globalData->exception) + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), Imm32(JSValue::EmptyValueTag));
-
- // Return.
- ret();
-
- // Handle an exception
- sawException.link(this);
-
- // Grab the return address.
- preserveReturnAddressAfterCall(regT1);
-
- move(ImmPtr(&globalData->exceptionLocation), regT2);
- storePtr(regT1, regT2);
- poke(callFrameRegister, OBJECT_OFFSETOF(struct JITStackFrame, callFrame) / sizeof(void*));
-
- // Set the return address.
- move(ImmPtr(FunctionPtr(ctiVMThrowTrampoline).value()), regT1);
- restoreReturnAddressBeforeReturn(regT1);
-
- ret();
-
- // All trampolines constructed! copy the code, link up calls, and set the pointers on the Machine object.
- LinkBuffer patchBuffer(this, executablePool, 0);
-
- patchBuffer.link(nativeCall, FunctionPtr(func));
- patchBuffer.finalizeCode();
-
- return patchBuffer.trampolineAt(nativeCallThunk);
-}
-
-void JIT::emit_op_mov(Instruction* currentInstruction)
-{
- unsigned dst = currentInstruction[1].u.operand;
- unsigned src = currentInstruction[2].u.operand;
-
- if (m_codeBlock->isConstantRegisterIndex(src))
- emitStore(dst, getConstantOperand(src));
- else {
- emitLoad(src, regT1, regT0);
- emitStore(dst, regT1, regT0);
- map(m_bytecodeOffset + OPCODE_LENGTH(op_mov), dst, regT1, regT0);
- }
-}
-
-void JIT::emit_op_end(Instruction* currentInstruction)
-{
- if (m_codeBlock->needsFullScopeChain())
- JITStubCall(this, cti_op_end).call();
- ASSERT(returnValueRegister != callFrameRegister);
- emitLoad(currentInstruction[1].u.operand, regT1, regT0);
- restoreReturnAddressBeforeReturn(Address(callFrameRegister, RegisterFile::ReturnPC * static_cast<int>(sizeof(Register))));
- ret();
-}
-
-void JIT::emit_op_jmp(Instruction* currentInstruction)
-{
- unsigned target = currentInstruction[1].u.operand;
- addJump(jump(), target);
-}
-
-void JIT::emit_op_loop_if_lesseq(Instruction* currentInstruction)
-{
- unsigned op1 = currentInstruction[1].u.operand;
- unsigned op2 = currentInstruction[2].u.operand;
- unsigned target = currentInstruction[3].u.operand;
-
- emitTimeoutCheck();
-
- if (isOperandConstantImmediateInt(op1)) {
- emitLoad(op2, regT1, regT0);
- addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
- addJump(branch32(GreaterThanOrEqual, regT0, Imm32(getConstantOperand(op1).asInt32())), target);
- return;
- }
-
- if (isOperandConstantImmediateInt(op2)) {
- emitLoad(op1, regT1, regT0);
- addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
- addJump(branch32(LessThanOrEqual, regT0, Imm32(getConstantOperand(op2).asInt32())), target);
- return;
- }
-
- emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
- addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
- addSlowCase(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag)));
- addJump(branch32(LessThanOrEqual, regT0, regT2), target);
-}
-
-void JIT::emitSlow_op_loop_if_lesseq(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- unsigned op1 = currentInstruction[1].u.operand;
- unsigned op2 = currentInstruction[2].u.operand;
- unsigned target = currentInstruction[3].u.operand;
-
- if (!isOperandConstantImmediateInt(op1) && !isOperandConstantImmediateInt(op2))
- linkSlowCase(iter); // int32 check
- linkSlowCase(iter); // int32 check
-
- JITStubCall stubCall(this, cti_op_loop_if_lesseq);
- stubCall.addArgument(op1);
- stubCall.addArgument(op2);
- stubCall.call();
- emitJumpSlowToHot(branchTest32(NonZero, regT0), target);
-}
-
-void JIT::emit_op_new_object(Instruction* currentInstruction)
-{
- JITStubCall(this, cti_op_new_object).call(currentInstruction[1].u.operand);
-}
-
-void JIT::emit_op_check_has_instance(Instruction* currentInstruction)
-{
- unsigned baseVal = currentInstruction[1].u.operand;
-
- emitLoadPayload(baseVal, regT0);
-
- // Check that baseVal is a cell.
- emitJumpSlowCaseIfNotJSCell(baseVal);
-
- // Check that baseVal 'ImplementsHasInstance'.
- loadPtr(Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), regT0);
- addSlowCase(branchTest8(Zero, Address(regT0, OBJECT_OFFSETOF(Structure, m_typeInfo.m_flags)), Imm32(ImplementsHasInstance)));
-}
-
-void JIT::emit_op_instanceof(Instruction* currentInstruction)
-{
- unsigned dst = currentInstruction[1].u.operand;
- unsigned value = currentInstruction[2].u.operand;
- unsigned baseVal = currentInstruction[3].u.operand;
- unsigned proto = currentInstruction[4].u.operand;
-
- // Load the operands into registers.
- // We use regT0 for baseVal since we will be done with this first, and we can then use it for the result.
- emitLoadPayload(value, regT2);
- emitLoadPayload(baseVal, regT0);
- emitLoadPayload(proto, regT1);
-
- // Check that proto are cells. baseVal must be a cell - this is checked by op_check_has_instance.
- emitJumpSlowCaseIfNotJSCell(value);
- emitJumpSlowCaseIfNotJSCell(proto);
-
- // Check that prototype is an object
- loadPtr(Address(regT1, OBJECT_OFFSETOF(JSCell, m_structure)), regT3);
- addSlowCase(branch8(NotEqual, Address(regT3, OBJECT_OFFSETOF(Structure, m_typeInfo.m_type)), Imm32(ObjectType)));
-
- // Fixme: this check is only needed because the JSC API allows HasInstance to be overridden; we should deprecate this.
- // Check that baseVal 'ImplementsDefaultHasInstance'.
- loadPtr(Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), regT0);
- addSlowCase(branchTest8(Zero, Address(regT0, OBJECT_OFFSETOF(Structure, m_typeInfo.m_flags)), Imm32(ImplementsDefaultHasInstance)));
-
- // Optimistically load the result true, and start looping.
- // Initially, regT1 still contains proto and regT2 still contains value.
- // As we loop regT2 will be updated with its prototype, recursively walking the prototype chain.
- move(Imm32(JSValue::TrueTag), regT0);
- Label loop(this);
-
- // Load the prototype of the cell in regT2. If this is equal to regT1 - WIN!
- // Otherwise, check if we've hit null - if we have then drop out of the loop, if not go again.
- loadPtr(Address(regT2, OBJECT_OFFSETOF(JSCell, m_structure)), regT2);
- load32(Address(regT2, OBJECT_OFFSETOF(Structure, m_prototype) + OBJECT_OFFSETOF(JSValue, u.asBits.payload)), regT2);
- Jump isInstance = branchPtr(Equal, regT2, regT1);
- branchTest32(NonZero, regT2).linkTo(loop, this);
-
- // We get here either by dropping out of the loop, or if value was not an Object. Result is false.
- move(Imm32(JSValue::FalseTag), regT0);
-
- // isInstance jumps right down to here, to skip setting the result to false (it has already set true).
- isInstance.link(this);
- emitStoreBool(dst, regT0);
-}
-
-void JIT::emitSlow_op_check_has_instance(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- unsigned baseVal = currentInstruction[1].u.operand;
-
- linkSlowCaseIfNotJSCell(iter, baseVal);
- linkSlowCase(iter);
-
- JITStubCall stubCall(this, cti_op_check_has_instance);
- stubCall.addArgument(baseVal);
- stubCall.call();
-}
-
-void JIT::emitSlow_op_instanceof(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- unsigned dst = currentInstruction[1].u.operand;
- unsigned value = currentInstruction[2].u.operand;
- unsigned baseVal = currentInstruction[3].u.operand;
- unsigned proto = currentInstruction[4].u.operand;
-
- linkSlowCaseIfNotJSCell(iter, value);
- linkSlowCaseIfNotJSCell(iter, proto);
- linkSlowCase(iter);
- linkSlowCase(iter);
-
- JITStubCall stubCall(this, cti_op_instanceof);
- stubCall.addArgument(value);
- stubCall.addArgument(baseVal);
- stubCall.addArgument(proto);
- stubCall.call(dst);
-}
-
-void JIT::emit_op_get_global_var(Instruction* currentInstruction)
-{
- int dst = currentInstruction[1].u.operand;
- JSGlobalObject* globalObject = m_codeBlock->globalObject();
- ASSERT(globalObject->isGlobalObject());
- int index = currentInstruction[2].u.operand;
-
- loadPtr(&globalObject->d()->registers, regT2);
-
- emitLoad(index, regT1, regT0, regT2);
- emitStore(dst, regT1, regT0);
- map(m_bytecodeOffset + OPCODE_LENGTH(op_get_global_var), dst, regT1, regT0);
-}
-
-void JIT::emit_op_put_global_var(Instruction* currentInstruction)
-{
- JSGlobalObject* globalObject = m_codeBlock->globalObject();
- ASSERT(globalObject->isGlobalObject());
- int index = currentInstruction[1].u.operand;
- int value = currentInstruction[2].u.operand;
-
- emitLoad(value, regT1, regT0);
-
- loadPtr(&globalObject->d()->registers, regT2);
- emitStore(index, regT1, regT0, regT2);
- map(m_bytecodeOffset + OPCODE_LENGTH(op_put_global_var), value, regT1, regT0);
-}
-
-void JIT::emit_op_get_scoped_var(Instruction* currentInstruction)
-{
- int dst = currentInstruction[1].u.operand;
- int index = currentInstruction[2].u.operand;
- int skip = currentInstruction[3].u.operand;
-
- emitGetFromCallFrameHeaderPtr(RegisterFile::ScopeChain, regT2);
- bool checkTopLevel = m_codeBlock->codeType() == FunctionCode && m_codeBlock->needsFullScopeChain();
- ASSERT(skip || !checkTopLevel);
- if (checkTopLevel && skip--) {
- Jump activationNotCreated;
- if (checkTopLevel)
- activationNotCreated = branch32(Equal, tagFor(m_codeBlock->activationRegister()), Imm32(JSValue::EmptyValueTag));
- loadPtr(Address(regT2, OBJECT_OFFSETOF(ScopeChainNode, next)), regT2);
- activationNotCreated.link(this);
- }
- while (skip--)
- loadPtr(Address(regT2, OBJECT_OFFSETOF(ScopeChainNode, next)), regT2);
-
- loadPtr(Address(regT2, OBJECT_OFFSETOF(ScopeChainNode, object)), regT2);
- loadPtr(Address(regT2, OBJECT_OFFSETOF(JSVariableObject, d)), regT2);
- loadPtr(Address(regT2, OBJECT_OFFSETOF(JSVariableObject::JSVariableObjectData, registers)), regT2);
-
- emitLoad(index, regT1, regT0, regT2);
- emitStore(dst, regT1, regT0);
- map(m_bytecodeOffset + OPCODE_LENGTH(op_get_scoped_var), dst, regT1, regT0);
-}
-
-void JIT::emit_op_put_scoped_var(Instruction* currentInstruction)
-{
- int index = currentInstruction[1].u.operand;
- int skip = currentInstruction[2].u.operand;
- int value = currentInstruction[3].u.operand;
-
- emitLoad(value, regT1, regT0);
-
- emitGetFromCallFrameHeaderPtr(RegisterFile::ScopeChain, regT2);
- bool checkTopLevel = m_codeBlock->codeType() == FunctionCode && m_codeBlock->needsFullScopeChain();
- ASSERT(skip || !checkTopLevel);
- if (checkTopLevel && skip--) {
- Jump activationNotCreated;
- if (checkTopLevel)
- activationNotCreated = branch32(Equal, tagFor(m_codeBlock->activationRegister()), Imm32(JSValue::EmptyValueTag));
- loadPtr(Address(regT2, OBJECT_OFFSETOF(ScopeChainNode, next)), regT2);
- activationNotCreated.link(this);
- }
- while (skip--)
- loadPtr(Address(regT2, OBJECT_OFFSETOF(ScopeChainNode, next)), regT2);
-
- loadPtr(Address(regT2, OBJECT_OFFSETOF(ScopeChainNode, object)), regT2);
- loadPtr(Address(regT2, OBJECT_OFFSETOF(JSVariableObject, d)), regT2);
- loadPtr(Address(regT2, OBJECT_OFFSETOF(JSVariableObject::JSVariableObjectData, registers)), regT2);
-
- emitStore(index, regT1, regT0, regT2);
- map(m_bytecodeOffset + OPCODE_LENGTH(op_put_scoped_var), value, regT1, regT0);
-}
-
-void JIT::emit_op_tear_off_activation(Instruction* currentInstruction)
-{
- unsigned activation = currentInstruction[1].u.operand;
- unsigned arguments = currentInstruction[2].u.operand;
- Jump activationCreated = branch32(NotEqual, tagFor(activation), Imm32(JSValue::EmptyValueTag));
- Jump argumentsNotCreated = branch32(Equal, tagFor(arguments), Imm32(JSValue::EmptyValueTag));
- activationCreated.link(this);
- JITStubCall stubCall(this, cti_op_tear_off_activation);
- stubCall.addArgument(currentInstruction[1].u.operand);
- stubCall.addArgument(unmodifiedArgumentsRegister(currentInstruction[2].u.operand));
- stubCall.call();
- argumentsNotCreated.link(this);
-}
-
-void JIT::emit_op_tear_off_arguments(Instruction* currentInstruction)
-{
- int dst = currentInstruction[1].u.operand;
-
- Jump argsNotCreated = branch32(Equal, tagFor(unmodifiedArgumentsRegister(dst)), Imm32(JSValue::EmptyValueTag));
- JITStubCall stubCall(this, cti_op_tear_off_arguments);
- stubCall.addArgument(unmodifiedArgumentsRegister(dst));
- stubCall.call();
- argsNotCreated.link(this);
-}
-
-void JIT::emit_op_new_array(Instruction* currentInstruction)
-{
- JITStubCall stubCall(this, cti_op_new_array);
- stubCall.addArgument(Imm32(currentInstruction[2].u.operand));
- stubCall.addArgument(Imm32(currentInstruction[3].u.operand));
- stubCall.call(currentInstruction[1].u.operand);
-}
-
-void JIT::emit_op_resolve(Instruction* currentInstruction)
-{
- JITStubCall stubCall(this, cti_op_resolve);
- stubCall.addArgument(ImmPtr(&m_codeBlock->identifier(currentInstruction[2].u.operand)));
- stubCall.call(currentInstruction[1].u.operand);
-}
-
-void JIT::emit_op_to_primitive(Instruction* currentInstruction)
-{
- int dst = currentInstruction[1].u.operand;
- int src = currentInstruction[2].u.operand;
-
- emitLoad(src, regT1, regT0);
-
- Jump isImm = branch32(NotEqual, regT1, Imm32(JSValue::CellTag));
- addSlowCase(branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsStringVPtr)));
- isImm.link(this);
-
- if (dst != src)
- emitStore(dst, regT1, regT0);
- map(m_bytecodeOffset + OPCODE_LENGTH(op_to_primitive), dst, regT1, regT0);
-}
-
-void JIT::emitSlow_op_to_primitive(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- int dst = currentInstruction[1].u.operand;
-
- linkSlowCase(iter);
-
- JITStubCall stubCall(this, cti_op_to_primitive);
- stubCall.addArgument(regT1, regT0);
- stubCall.call(dst);
-}
-
-void JIT::emit_op_strcat(Instruction* currentInstruction)
-{
- JITStubCall stubCall(this, cti_op_strcat);
- stubCall.addArgument(Imm32(currentInstruction[2].u.operand));
- stubCall.addArgument(Imm32(currentInstruction[3].u.operand));
- stubCall.call(currentInstruction[1].u.operand);
-}
-
-void JIT::emit_op_resolve_base(Instruction* currentInstruction)
-{
- JITStubCall stubCall(this, currentInstruction[3].u.operand ? cti_op_resolve_base_strict_put : cti_op_resolve_base);
- stubCall.addArgument(ImmPtr(&m_codeBlock->identifier(currentInstruction[2].u.operand)));
- stubCall.call(currentInstruction[1].u.operand);
-}
-
-void JIT::emit_op_ensure_property_exists(Instruction* currentInstruction)
-{
- JITStubCall stubCall(this, cti_op_ensure_property_exists);
- stubCall.addArgument(Imm32(currentInstruction[1].u.operand));
- stubCall.addArgument(ImmPtr(&m_codeBlock->identifier(currentInstruction[2].u.operand)));
- stubCall.call(currentInstruction[1].u.operand);
-}
-
-void JIT::emit_op_resolve_skip(Instruction* currentInstruction)
-{
- JITStubCall stubCall(this, cti_op_resolve_skip);
- stubCall.addArgument(ImmPtr(&m_codeBlock->identifier(currentInstruction[2].u.operand)));
- stubCall.addArgument(Imm32(currentInstruction[3].u.operand));
- stubCall.call(currentInstruction[1].u.operand);
-}
-
-void JIT::emit_op_resolve_global(Instruction* currentInstruction, bool dynamic)
-{
- // FIXME: Optimize to use patching instead of so many memory accesses.
-
- unsigned dst = currentInstruction[1].u.operand;
- void* globalObject = m_codeBlock->globalObject();
-
- unsigned currentIndex = m_globalResolveInfoIndex++;
- void* structureAddress = &(m_codeBlock->globalResolveInfo(currentIndex).structure);
- void* offsetAddr = &(m_codeBlock->globalResolveInfo(currentIndex).offset);
-
- // Verify structure.
- move(ImmPtr(globalObject), regT0);
- loadPtr(structureAddress, regT1);
- addSlowCase(branchPtr(NotEqual, regT1, Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure))));
-
- // Load property.
- loadPtr(Address(regT0, OBJECT_OFFSETOF(JSGlobalObject, m_externalStorage)), regT2);
- load32(offsetAddr, regT3);
- load32(BaseIndex(regT2, regT3, TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.payload)), regT0); // payload
- load32(BaseIndex(regT2, regT3, TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.tag)), regT1); // tag
- emitStore(dst, regT1, regT0);
- map(m_bytecodeOffset + dynamic ? OPCODE_LENGTH(op_resolve_global_dynamic) : OPCODE_LENGTH(op_resolve_global), dst, regT1, regT0);
-}
-
-void JIT::emitSlow_op_resolve_global(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- unsigned dst = currentInstruction[1].u.operand;
- Identifier* ident = &m_codeBlock->identifier(currentInstruction[2].u.operand);
-
- unsigned currentIndex = m_globalResolveInfoIndex++;
-
- linkSlowCase(iter);
- JITStubCall stubCall(this, cti_op_resolve_global);
- stubCall.addArgument(ImmPtr(ident));
- stubCall.addArgument(Imm32(currentIndex));
- stubCall.call(dst);
-}
-
-void JIT::emit_op_not(Instruction* currentInstruction)
-{
- unsigned dst = currentInstruction[1].u.operand;
- unsigned src = currentInstruction[2].u.operand;
-
- emitLoadTag(src, regT0);
-
- xor32(Imm32(JSValue::FalseTag), regT0);
- addSlowCase(branchTest32(NonZero, regT0, Imm32(~1)));
- xor32(Imm32(JSValue::TrueTag), regT0);
-
- emitStoreBool(dst, regT0, (dst == src));
-}
-
-void JIT::emitSlow_op_not(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- unsigned dst = currentInstruction[1].u.operand;
- unsigned src = currentInstruction[2].u.operand;
-
- linkSlowCase(iter);
-
- JITStubCall stubCall(this, cti_op_not);
- stubCall.addArgument(src);
- stubCall.call(dst);
-}
-
-void JIT::emit_op_jfalse(Instruction* currentInstruction)
-{
- unsigned cond = currentInstruction[1].u.operand;
- unsigned target = currentInstruction[2].u.operand;
-
- emitLoad(cond, regT1, regT0);
-
- Jump isTrue = branch32(Equal, regT1, Imm32(JSValue::TrueTag));
- addJump(branch32(Equal, regT1, Imm32(JSValue::FalseTag)), target);
-
- Jump isNotInteger = branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag));
- Jump isTrue2 = branch32(NotEqual, regT0, Imm32(0));
- addJump(jump(), target);
-
- if (supportsFloatingPoint()) {
- isNotInteger.link(this);
-
- addSlowCase(branch32(Above, regT1, Imm32(JSValue::LowestTag)));
-
- zeroDouble(fpRegT0);
- emitLoadDouble(cond, fpRegT1);
- addJump(branchDouble(DoubleEqualOrUnordered, fpRegT0, fpRegT1), target);
- } else
- addSlowCase(isNotInteger);
-
- isTrue.link(this);
- isTrue2.link(this);
-}
-
-void JIT::emitSlow_op_jfalse(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- unsigned cond = currentInstruction[1].u.operand;
- unsigned target = currentInstruction[2].u.operand;
-
- linkSlowCase(iter);
- JITStubCall stubCall(this, cti_op_jtrue);
- stubCall.addArgument(cond);
- stubCall.call();
- emitJumpSlowToHot(branchTest32(Zero, regT0), target); // Inverted.
-}
-
-void JIT::emit_op_jtrue(Instruction* currentInstruction)
-{
- unsigned cond = currentInstruction[1].u.operand;
- unsigned target = currentInstruction[2].u.operand;
-
- emitLoad(cond, regT1, regT0);
-
- Jump isFalse = branch32(Equal, regT1, Imm32(JSValue::FalseTag));
- addJump(branch32(Equal, regT1, Imm32(JSValue::TrueTag)), target);
-
- Jump isNotInteger = branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag));
- Jump isFalse2 = branch32(Equal, regT0, Imm32(0));
- addJump(jump(), target);
-
- if (supportsFloatingPoint()) {
- isNotInteger.link(this);
-
- addSlowCase(branch32(Above, regT1, Imm32(JSValue::LowestTag)));
-
- zeroDouble(fpRegT0);
- emitLoadDouble(cond, fpRegT1);
- addJump(branchDouble(DoubleNotEqual, fpRegT0, fpRegT1), target);
- } else
- addSlowCase(isNotInteger);
-
- isFalse.link(this);
- isFalse2.link(this);
-}
-
-void JIT::emitSlow_op_jtrue(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- unsigned cond = currentInstruction[1].u.operand;
- unsigned target = currentInstruction[2].u.operand;
-
- linkSlowCase(iter);
- JITStubCall stubCall(this, cti_op_jtrue);
- stubCall.addArgument(cond);
- stubCall.call();
- emitJumpSlowToHot(branchTest32(NonZero, regT0), target);
-}
-
-void JIT::emit_op_jeq_null(Instruction* currentInstruction)
-{
- unsigned src = currentInstruction[1].u.operand;
- unsigned target = currentInstruction[2].u.operand;
-
- emitLoad(src, regT1, regT0);
-
- Jump isImmediate = branch32(NotEqual, regT1, Imm32(JSValue::CellTag));
-
- // First, handle JSCell cases - check MasqueradesAsUndefined bit on the structure.
- loadPtr(Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), regT2);
- addJump(branchTest8(NonZero, Address(regT2, OBJECT_OFFSETOF(Structure, m_typeInfo.m_flags)), Imm32(MasqueradesAsUndefined)), target);
-
- Jump wasNotImmediate = jump();
-
- // Now handle the immediate cases - undefined & null
- isImmediate.link(this);
-
- ASSERT((JSValue::UndefinedTag + 1 == JSValue::NullTag) && !(JSValue::NullTag + 1));
- addJump(branch32(AboveOrEqual, regT1, Imm32(JSValue::UndefinedTag)), target);
-
- wasNotImmediate.link(this);
-}
-
-void JIT::emit_op_jneq_null(Instruction* currentInstruction)
-{
- unsigned src = currentInstruction[1].u.operand;
- unsigned target = currentInstruction[2].u.operand;
-
- emitLoad(src, regT1, regT0);
-
- Jump isImmediate = branch32(NotEqual, regT1, Imm32(JSValue::CellTag));
-
- // First, handle JSCell cases - check MasqueradesAsUndefined bit on the structure.
- loadPtr(Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), regT2);
- addJump(branchTest8(Zero, Address(regT2, OBJECT_OFFSETOF(Structure, m_typeInfo.m_flags)), Imm32(MasqueradesAsUndefined)), target);
-
- Jump wasNotImmediate = jump();
-
- // Now handle the immediate cases - undefined & null
- isImmediate.link(this);
-
- ASSERT((JSValue::UndefinedTag + 1 == JSValue::NullTag) && !(JSValue::NullTag + 1));
- addJump(branch32(Below, regT1, Imm32(JSValue::UndefinedTag)), target);
-
- wasNotImmediate.link(this);
-}
-
-void JIT::emit_op_jneq_ptr(Instruction* currentInstruction)
-{
- unsigned src = currentInstruction[1].u.operand;
- JSCell* ptr = currentInstruction[2].u.jsCell;
- unsigned target = currentInstruction[3].u.operand;
-
- emitLoad(src, regT1, regT0);
- addJump(branch32(NotEqual, regT1, Imm32(JSValue::CellTag)), target);
- addJump(branchPtr(NotEqual, regT0, ImmPtr(ptr)), target);
-}
-
-void JIT::emit_op_jsr(Instruction* currentInstruction)
-{
- int retAddrDst = currentInstruction[1].u.operand;
- int target = currentInstruction[2].u.operand;
- DataLabelPtr storeLocation = storePtrWithPatch(ImmPtr(0), Address(callFrameRegister, sizeof(Register) * retAddrDst));
- addJump(jump(), target);
- m_jsrSites.append(JSRInfo(storeLocation, label()));
-}
-
-void JIT::emit_op_sret(Instruction* currentInstruction)
-{
- jump(Address(callFrameRegister, sizeof(Register) * currentInstruction[1].u.operand));
-}
-
-void JIT::emit_op_eq(Instruction* currentInstruction)
-{
- unsigned dst = currentInstruction[1].u.operand;
- unsigned src1 = currentInstruction[2].u.operand;
- unsigned src2 = currentInstruction[3].u.operand;
-
- emitLoad2(src1, regT1, regT0, src2, regT3, regT2);
- addSlowCase(branch32(NotEqual, regT1, regT3));
- addSlowCase(branch32(Equal, regT1, Imm32(JSValue::CellTag)));
- addSlowCase(branch32(Below, regT1, Imm32(JSValue::LowestTag)));
-
- set8Compare32(Equal, regT0, regT2, regT0);
- or32(Imm32(JSValue::FalseTag), regT0);
-
- emitStoreBool(dst, regT0);
-}
-
-void JIT::emitSlow_op_eq(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- unsigned dst = currentInstruction[1].u.operand;
- unsigned op1 = currentInstruction[2].u.operand;
- unsigned op2 = currentInstruction[3].u.operand;
-
- JumpList storeResult;
- JumpList genericCase;
-
- genericCase.append(getSlowCase(iter)); // tags not equal
-
- linkSlowCase(iter); // tags equal and JSCell
- genericCase.append(branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsStringVPtr)));
- genericCase.append(branchPtr(NotEqual, Address(regT2), ImmPtr(m_globalData->jsStringVPtr)));
-
- // String case.
- JITStubCall stubCallEqStrings(this, cti_op_eq_strings);
- stubCallEqStrings.addArgument(regT0);
- stubCallEqStrings.addArgument(regT2);
- stubCallEqStrings.call();
- storeResult.append(jump());
-
- // Generic case.
- genericCase.append(getSlowCase(iter)); // doubles
- genericCase.link(this);
- JITStubCall stubCallEq(this, cti_op_eq);
- stubCallEq.addArgument(op1);
- stubCallEq.addArgument(op2);
- stubCallEq.call(regT0);
-
- storeResult.link(this);
- or32(Imm32(JSValue::FalseTag), regT0);
- emitStoreBool(dst, regT0);
-}
-
-void JIT::emit_op_neq(Instruction* currentInstruction)
-{
- unsigned dst = currentInstruction[1].u.operand;
- unsigned src1 = currentInstruction[2].u.operand;
- unsigned src2 = currentInstruction[3].u.operand;
-
- emitLoad2(src1, regT1, regT0, src2, regT3, regT2);
- addSlowCase(branch32(NotEqual, regT1, regT3));
- addSlowCase(branch32(Equal, regT1, Imm32(JSValue::CellTag)));
- addSlowCase(branch32(Below, regT1, Imm32(JSValue::LowestTag)));
-
- set8Compare32(NotEqual, regT0, regT2, regT0);
- or32(Imm32(JSValue::FalseTag), regT0);
-
- emitStoreBool(dst, regT0);
-}
-
-void JIT::emitSlow_op_neq(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- unsigned dst = currentInstruction[1].u.operand;
-
- JumpList storeResult;
- JumpList genericCase;
-
- genericCase.append(getSlowCase(iter)); // tags not equal
-
- linkSlowCase(iter); // tags equal and JSCell
- genericCase.append(branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsStringVPtr)));
- genericCase.append(branchPtr(NotEqual, Address(regT2), ImmPtr(m_globalData->jsStringVPtr)));
-
- // String case.
- JITStubCall stubCallEqStrings(this, cti_op_eq_strings);
- stubCallEqStrings.addArgument(regT0);
- stubCallEqStrings.addArgument(regT2);
- stubCallEqStrings.call(regT0);
- storeResult.append(jump());
-
- // Generic case.
- genericCase.append(getSlowCase(iter)); // doubles
- genericCase.link(this);
- JITStubCall stubCallEq(this, cti_op_eq);
- stubCallEq.addArgument(regT1, regT0);
- stubCallEq.addArgument(regT3, regT2);
- stubCallEq.call(regT0);
-
- storeResult.link(this);
- xor32(Imm32(0x1), regT0);
- or32(Imm32(JSValue::FalseTag), regT0);
- emitStoreBool(dst, regT0);
-}
-
-void JIT::compileOpStrictEq(Instruction* currentInstruction, CompileOpStrictEqType type)
-{
- unsigned dst = currentInstruction[1].u.operand;
- unsigned src1 = currentInstruction[2].u.operand;
- unsigned src2 = currentInstruction[3].u.operand;
-
- emitLoadTag(src1, regT0);
- emitLoadTag(src2, regT1);
-
- // Jump to a slow case if either operand is double, or if both operands are
- // cells and/or Int32s.
- move(regT0, regT2);
- and32(regT1, regT2);
- addSlowCase(branch32(Below, regT2, Imm32(JSValue::LowestTag)));
- addSlowCase(branch32(AboveOrEqual, regT2, Imm32(JSValue::CellTag)));
-
- if (type == OpStrictEq)
- set8Compare32(Equal, regT0, regT1, regT0);
- else
- set8Compare32(NotEqual, regT0, regT1, regT0);
-
- or32(Imm32(JSValue::FalseTag), regT0);
-
- emitStoreBool(dst, regT0);
-}
-
-void JIT::emit_op_stricteq(Instruction* currentInstruction)
-{
- compileOpStrictEq(currentInstruction, OpStrictEq);
-}
-
-void JIT::emitSlow_op_stricteq(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- unsigned dst = currentInstruction[1].u.operand;
- unsigned src1 = currentInstruction[2].u.operand;
- unsigned src2 = currentInstruction[3].u.operand;
-
- linkSlowCase(iter);
- linkSlowCase(iter);
-
- JITStubCall stubCall(this, cti_op_stricteq);
- stubCall.addArgument(src1);
- stubCall.addArgument(src2);
- stubCall.call(dst);
-}
-
-void JIT::emit_op_nstricteq(Instruction* currentInstruction)
-{
- compileOpStrictEq(currentInstruction, OpNStrictEq);
-}
-
-void JIT::emitSlow_op_nstricteq(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- unsigned dst = currentInstruction[1].u.operand;
- unsigned src1 = currentInstruction[2].u.operand;
- unsigned src2 = currentInstruction[3].u.operand;
-
- linkSlowCase(iter);
- linkSlowCase(iter);
-
- JITStubCall stubCall(this, cti_op_nstricteq);
- stubCall.addArgument(src1);
- stubCall.addArgument(src2);
- stubCall.call(dst);
-}
-
-void JIT::emit_op_eq_null(Instruction* currentInstruction)
-{
- unsigned dst = currentInstruction[1].u.operand;
- unsigned src = currentInstruction[2].u.operand;
-
- emitLoad(src, regT1, regT0);
- Jump isImmediate = branch32(NotEqual, regT1, Imm32(JSValue::CellTag));
-
- loadPtr(Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), regT1);
- set32Test8(NonZero, Address(regT1, OBJECT_OFFSETOF(Structure, m_typeInfo.m_flags)), Imm32(MasqueradesAsUndefined), regT1);
-
- Jump wasNotImmediate = jump();
-
- isImmediate.link(this);
-
- set8Compare32(Equal, regT1, Imm32(JSValue::NullTag), regT2);
- set8Compare32(Equal, regT1, Imm32(JSValue::UndefinedTag), regT1);
- or32(regT2, regT1);
-
- wasNotImmediate.link(this);
-
- or32(Imm32(JSValue::FalseTag), regT1);
-
- emitStoreBool(dst, regT1);
-}
-
-void JIT::emit_op_neq_null(Instruction* currentInstruction)
-{
- unsigned dst = currentInstruction[1].u.operand;
- unsigned src = currentInstruction[2].u.operand;
-
- emitLoad(src, regT1, regT0);
- Jump isImmediate = branch32(NotEqual, regT1, Imm32(JSValue::CellTag));
-
- loadPtr(Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), regT1);
- set32Test8(Zero, Address(regT1, OBJECT_OFFSETOF(Structure, m_typeInfo.m_flags)), Imm32(MasqueradesAsUndefined), regT1);
-
- Jump wasNotImmediate = jump();
-
- isImmediate.link(this);
-
- set8Compare32(NotEqual, regT1, Imm32(JSValue::NullTag), regT2);
- set8Compare32(NotEqual, regT1, Imm32(JSValue::UndefinedTag), regT1);
- and32(regT2, regT1);
-
- wasNotImmediate.link(this);
-
- or32(Imm32(JSValue::FalseTag), regT1);
-
- emitStoreBool(dst, regT1);
-}
-
-void JIT::emit_op_resolve_with_base(Instruction* currentInstruction)
-{
- JITStubCall stubCall(this, cti_op_resolve_with_base);
- stubCall.addArgument(ImmPtr(&m_codeBlock->identifier(currentInstruction[3].u.operand)));
- stubCall.addArgument(Imm32(currentInstruction[1].u.operand));
- stubCall.call(currentInstruction[2].u.operand);
-}
-
-void JIT::emit_op_new_func_exp(Instruction* currentInstruction)
-{
- JITStubCall stubCall(this, cti_op_new_func_exp);
- stubCall.addArgument(ImmPtr(m_codeBlock->functionExpr(currentInstruction[2].u.operand)));
- stubCall.call(currentInstruction[1].u.operand);
-}
-
-void JIT::emit_op_throw(Instruction* currentInstruction)
-{
- unsigned exception = currentInstruction[1].u.operand;
- JITStubCall stubCall(this, cti_op_throw);
- stubCall.addArgument(exception);
- stubCall.call();
-
-#ifndef NDEBUG
- // cti_op_throw always changes it's return address,
- // this point in the code should never be reached.
- breakpoint();
-#endif
-}
-
-void JIT::emit_op_get_pnames(Instruction* currentInstruction)
-{
- int dst = currentInstruction[1].u.operand;
- int base = currentInstruction[2].u.operand;
- int i = currentInstruction[3].u.operand;
- int size = currentInstruction[4].u.operand;
- int breakTarget = currentInstruction[5].u.operand;
-
- JumpList isNotObject;
-
- emitLoad(base, regT1, regT0);
- if (!m_codeBlock->isKnownNotImmediate(base))
- isNotObject.append(branch32(NotEqual, regT1, Imm32(JSValue::CellTag)));
- if (base != m_codeBlock->thisRegister() || m_codeBlock->isStrictMode()) {
- loadPtr(Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), regT2);
- isNotObject.append(branch8(NotEqual, Address(regT2, OBJECT_OFFSETOF(Structure, m_typeInfo.m_type)), Imm32(ObjectType)));
- }
-
- // We could inline the case where you have a valid cache, but
- // this call doesn't seem to be hot.
- Label isObject(this);
- JITStubCall getPnamesStubCall(this, cti_op_get_pnames);
- getPnamesStubCall.addArgument(regT0);
- getPnamesStubCall.call(dst);
- load32(Address(regT0, OBJECT_OFFSETOF(JSPropertyNameIterator, m_jsStringsSize)), regT3);
- store32(Imm32(0), addressFor(i));
- store32(regT3, addressFor(size));
- Jump end = jump();
-
- isNotObject.link(this);
- addJump(branch32(Equal, regT1, Imm32(JSValue::NullTag)), breakTarget);
- addJump(branch32(Equal, regT1, Imm32(JSValue::UndefinedTag)), breakTarget);
- JITStubCall toObjectStubCall(this, cti_to_object);
- toObjectStubCall.addArgument(regT1, regT0);
- toObjectStubCall.call(base);
- jump().linkTo(isObject, this);
-
- end.link(this);
-}
-
-void JIT::emit_op_next_pname(Instruction* currentInstruction)
-{
- int dst = currentInstruction[1].u.operand;
- int base = currentInstruction[2].u.operand;
- int i = currentInstruction[3].u.operand;
- int size = currentInstruction[4].u.operand;
- int it = currentInstruction[5].u.operand;
- int target = currentInstruction[6].u.operand;
-
- JumpList callHasProperty;
-
- Label begin(this);
- load32(addressFor(i), regT0);
- Jump end = branch32(Equal, regT0, addressFor(size));
-
- // Grab key @ i
- loadPtr(addressFor(it), regT1);
- loadPtr(Address(regT1, OBJECT_OFFSETOF(JSPropertyNameIterator, m_jsStrings)), regT2);
- load32(BaseIndex(regT2, regT0, TimesEight), regT2);
- store32(Imm32(JSValue::CellTag), tagFor(dst));
- store32(regT2, payloadFor(dst));
-
- // Increment i
- add32(Imm32(1), regT0);
- store32(regT0, addressFor(i));
-
- // Verify that i is valid:
- loadPtr(addressFor(base), regT0);
-
- // Test base's structure
- loadPtr(Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), regT2);
- callHasProperty.append(branchPtr(NotEqual, regT2, Address(Address(regT1, OBJECT_OFFSETOF(JSPropertyNameIterator, m_cachedStructure)))));
-
- // Test base's prototype chain
- loadPtr(Address(Address(regT1, OBJECT_OFFSETOF(JSPropertyNameIterator, m_cachedPrototypeChain))), regT3);
- loadPtr(Address(regT3, OBJECT_OFFSETOF(StructureChain, m_vector)), regT3);
- addJump(branchTestPtr(Zero, Address(regT3)), target);
-
- Label checkPrototype(this);
- callHasProperty.append(branch32(Equal, Address(regT2, OBJECT_OFFSETOF(Structure, m_prototype) + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), Imm32(JSValue::NullTag)));
- loadPtr(Address(regT2, OBJECT_OFFSETOF(Structure, m_prototype) + OBJECT_OFFSETOF(JSValue, u.asBits.payload)), regT2);
- loadPtr(Address(regT2, OBJECT_OFFSETOF(JSCell, m_structure)), regT2);
- callHasProperty.append(branchPtr(NotEqual, regT2, Address(regT3)));
- addPtr(Imm32(sizeof(Structure*)), regT3);
- branchTestPtr(NonZero, Address(regT3)).linkTo(checkPrototype, this);
-
- // Continue loop.
- addJump(jump(), target);
-
- // Slow case: Ask the object if i is valid.
- callHasProperty.link(this);
- loadPtr(addressFor(dst), regT1);
- JITStubCall stubCall(this, cti_has_property);
- stubCall.addArgument(regT0);
- stubCall.addArgument(regT1);
- stubCall.call();
-
- // Test for valid key.
- addJump(branchTest32(NonZero, regT0), target);
- jump().linkTo(begin, this);
-
- // End of loop.
- end.link(this);
-}
-
-void JIT::emit_op_push_scope(Instruction* currentInstruction)
-{
- JITStubCall stubCall(this, cti_op_push_scope);
- stubCall.addArgument(currentInstruction[1].u.operand);
- stubCall.call(currentInstruction[1].u.operand);
-}
-
-void JIT::emit_op_pop_scope(Instruction*)
-{
- JITStubCall(this, cti_op_pop_scope).call();
-}
-
-void JIT::emit_op_to_jsnumber(Instruction* currentInstruction)
-{
- int dst = currentInstruction[1].u.operand;
- int src = currentInstruction[2].u.operand;
-
- emitLoad(src, regT1, regT0);
-
- Jump isInt32 = branch32(Equal, regT1, Imm32(JSValue::Int32Tag));
- addSlowCase(branch32(AboveOrEqual, regT1, Imm32(JSValue::EmptyValueTag)));
- isInt32.link(this);
-
- if (src != dst)
- emitStore(dst, regT1, regT0);
- map(m_bytecodeOffset + OPCODE_LENGTH(op_to_jsnumber), dst, regT1, regT0);
-}
-
-void JIT::emitSlow_op_to_jsnumber(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- int dst = currentInstruction[1].u.operand;
-
- linkSlowCase(iter);
-
- JITStubCall stubCall(this, cti_op_to_jsnumber);
- stubCall.addArgument(regT1, regT0);
- stubCall.call(dst);
-}
-
-void JIT::emit_op_push_new_scope(Instruction* currentInstruction)
-{
- JITStubCall stubCall(this, cti_op_push_new_scope);
- stubCall.addArgument(ImmPtr(&m_codeBlock->identifier(currentInstruction[2].u.operand)));
- stubCall.addArgument(currentInstruction[3].u.operand);
- stubCall.call(currentInstruction[1].u.operand);
-}
-
-void JIT::emit_op_catch(Instruction* currentInstruction)
-{
- // cti_op_throw returns the callFrame for the handler.
- move(regT0, callFrameRegister);
-
- // Now store the exception returned by cti_op_throw.
- loadPtr(Address(stackPointerRegister, OBJECT_OFFSETOF(struct JITStackFrame, globalData)), regT3);
- load32(Address(regT3, OBJECT_OFFSETOF(JSGlobalData, exception) + OBJECT_OFFSETOF(JSValue, u.asBits.payload)), regT0);
- load32(Address(regT3, OBJECT_OFFSETOF(JSGlobalData, exception) + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), regT1);
- store32(Imm32(JSValue().payload()), Address(regT3, OBJECT_OFFSETOF(JSGlobalData, exception) + OBJECT_OFFSETOF(JSValue, u.asBits.payload)));
- store32(Imm32(JSValue().tag()), Address(regT3, OBJECT_OFFSETOF(JSGlobalData, exception) + OBJECT_OFFSETOF(JSValue, u.asBits.tag)));
-
- unsigned exception = currentInstruction[1].u.operand;
- emitStore(exception, regT1, regT0);
- map(m_bytecodeOffset + OPCODE_LENGTH(op_catch), exception, regT1, regT0);
-}
-
-void JIT::emit_op_jmp_scopes(Instruction* currentInstruction)
-{
- JITStubCall stubCall(this, cti_op_jmp_scopes);
- stubCall.addArgument(Imm32(currentInstruction[1].u.operand));
- stubCall.call();
- addJump(jump(), currentInstruction[2].u.operand);
-}
-
-void JIT::emit_op_switch_imm(Instruction* currentInstruction)
-{
- unsigned tableIndex = currentInstruction[1].u.operand;
- unsigned defaultOffset = currentInstruction[2].u.operand;
- unsigned scrutinee = currentInstruction[3].u.operand;
-
- // create jump table for switch destinations, track this switch statement.
- SimpleJumpTable* jumpTable = &m_codeBlock->immediateSwitchJumpTable(tableIndex);
- m_switches.append(SwitchRecord(jumpTable, m_bytecodeOffset, defaultOffset, SwitchRecord::Immediate));
- jumpTable->ctiOffsets.grow(jumpTable->branchOffsets.size());
-
- JITStubCall stubCall(this, cti_op_switch_imm);
- stubCall.addArgument(scrutinee);
- stubCall.addArgument(Imm32(tableIndex));
- stubCall.call();
- jump(regT0);
-}
-
-void JIT::emit_op_switch_char(Instruction* currentInstruction)
-{
- unsigned tableIndex = currentInstruction[1].u.operand;
- unsigned defaultOffset = currentInstruction[2].u.operand;
- unsigned scrutinee = currentInstruction[3].u.operand;
-
- // create jump table for switch destinations, track this switch statement.
- SimpleJumpTable* jumpTable = &m_codeBlock->characterSwitchJumpTable(tableIndex);
- m_switches.append(SwitchRecord(jumpTable, m_bytecodeOffset, defaultOffset, SwitchRecord::Character));
- jumpTable->ctiOffsets.grow(jumpTable->branchOffsets.size());
-
- JITStubCall stubCall(this, cti_op_switch_char);
- stubCall.addArgument(scrutinee);
- stubCall.addArgument(Imm32(tableIndex));
- stubCall.call();
- jump(regT0);
-}
-
-void JIT::emit_op_switch_string(Instruction* currentInstruction)
-{
- unsigned tableIndex = currentInstruction[1].u.operand;
- unsigned defaultOffset = currentInstruction[2].u.operand;
- unsigned scrutinee = currentInstruction[3].u.operand;
-
- // create jump table for switch destinations, track this switch statement.
- StringJumpTable* jumpTable = &m_codeBlock->stringSwitchJumpTable(tableIndex);
- m_switches.append(SwitchRecord(jumpTable, m_bytecodeOffset, defaultOffset));
-
- JITStubCall stubCall(this, cti_op_switch_string);
- stubCall.addArgument(scrutinee);
- stubCall.addArgument(Imm32(tableIndex));
- stubCall.call();
- jump(regT0);
-}
-
-void JIT::emit_op_throw_reference_error(Instruction* currentInstruction)
-{
- unsigned message = currentInstruction[1].u.operand;
-
- JITStubCall stubCall(this, cti_op_throw_reference_error);
- stubCall.addArgument(m_codeBlock->getConstant(message));
- stubCall.call();
-}
-
-void JIT::emit_op_throw_syntax_error(Instruction* currentInstruction)
-{
- unsigned message = currentInstruction[1].u.operand;
-
- JITStubCall stubCall(this, cti_op_throw_syntax_error);
- stubCall.addArgument(m_codeBlock->getConstant(message));
- stubCall.call();
-}
-
-void JIT::emit_op_debug(Instruction* currentInstruction)
-{
-#if ENABLE(DEBUG_WITH_BREAKPOINT)
- UNUSED_PARAM(currentInstruction);
- breakpoint();
-#else
- JITStubCall stubCall(this, cti_op_debug);
- stubCall.addArgument(Imm32(currentInstruction[1].u.operand));
- stubCall.addArgument(Imm32(currentInstruction[2].u.operand));
- stubCall.addArgument(Imm32(currentInstruction[3].u.operand));
- stubCall.call();
-#endif
-}
-
-
-void JIT::emit_op_enter(Instruction*)
-{
- // Even though JIT code doesn't use them, we initialize our constant
- // registers to zap stale pointers, to avoid unnecessarily prolonging
- // object lifetime and increasing GC pressure.
- for (int i = 0; i < m_codeBlock->m_numVars; ++i)
- emitStore(i, jsUndefined());
-}
-
-void JIT::emit_op_create_activation(Instruction* currentInstruction)
-{
- unsigned activation = currentInstruction[1].u.operand;
-
- Jump activationCreated = branch32(NotEqual, tagFor(activation), Imm32(JSValue::EmptyValueTag));
- JITStubCall(this, cti_op_push_activation).call(activation);
- activationCreated.link(this);
-}
-
-void JIT::emit_op_create_arguments(Instruction* currentInstruction)
-{
- unsigned dst = currentInstruction[1].u.operand;
-
- Jump argsCreated = branch32(NotEqual, tagFor(dst), Imm32(JSValue::EmptyValueTag));
-
- if (m_codeBlock->m_numParameters == 1)
- JITStubCall(this, cti_op_create_arguments_no_params).call();
- else
- JITStubCall(this, cti_op_create_arguments).call();
-
- emitStore(dst, regT1, regT0);
- emitStore(unmodifiedArgumentsRegister(dst), regT1, regT0);
-
- argsCreated.link(this);
-}
-
-void JIT::emit_op_init_lazy_reg(Instruction* currentInstruction)
-{
- unsigned dst = currentInstruction[1].u.operand;
-
- emitStore(dst, JSValue());
-}
-
-void JIT::emit_op_get_callee(Instruction* currentInstruction)
-{
- int dst = currentInstruction[1].u.operand;
- emitGetFromCallFrameHeaderPtr(RegisterFile::Callee, regT0);
- emitStoreCell(dst, regT0);
-}
-
-void JIT::emit_op_create_this(Instruction* currentInstruction)
-{
- unsigned protoRegister = currentInstruction[2].u.operand;
- emitLoad(protoRegister, regT1, regT0);
- JITStubCall stubCall(this, cti_op_create_this);
- stubCall.addArgument(regT1, regT0);
- stubCall.call(currentInstruction[1].u.operand);
-}
-
-void JIT::emit_op_convert_this(Instruction* currentInstruction)
-{
- unsigned thisRegister = currentInstruction[1].u.operand;
-
- emitLoad(thisRegister, regT1, regT0);
-
- addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::CellTag)));
-
- loadPtr(Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), regT2);
- addSlowCase(branchTest8(NonZero, Address(regT2, OBJECT_OFFSETOF(Structure, m_typeInfo.m_flags)), Imm32(NeedsThisConversion)));
-
- map(m_bytecodeOffset + OPCODE_LENGTH(op_convert_this), thisRegister, regT1, regT0);
-}
-
-void JIT::emit_op_convert_this_strict(Instruction* currentInstruction)
-{
- unsigned thisRegister = currentInstruction[1].u.operand;
-
- emitLoad(thisRegister, regT1, regT0);
-
- Jump notNull = branch32(NotEqual, regT1, Imm32(JSValue::EmptyValueTag));
- emitStore(thisRegister, jsNull());
- Jump setThis = jump();
- notNull.link(this);
- Jump isImmediate = branch32(NotEqual, regT1, Imm32(JSValue::CellTag));
- loadPtr(Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), regT2);
- Jump notAnObject = branch8(NotEqual, Address(regT2, OBJECT_OFFSETOF(Structure, m_typeInfo.m_type)), Imm32(ObjectType));
- addSlowCase(branchTest8(NonZero, Address(regT2, OBJECT_OFFSETOF(Structure, m_typeInfo.m_flags)), Imm32(NeedsThisConversion)));
- isImmediate.link(this);
- notAnObject.link(this);
- setThis.link(this);
- map(m_bytecodeOffset + OPCODE_LENGTH(op_convert_this_strict), thisRegister, regT1, regT0);
-}
-
-void JIT::emitSlow_op_convert_this(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- unsigned thisRegister = currentInstruction[1].u.operand;
-
- linkSlowCase(iter);
- linkSlowCase(iter);
-
- JITStubCall stubCall(this, cti_op_convert_this);
- stubCall.addArgument(regT1, regT0);
- stubCall.call(thisRegister);
-}
-
-void JIT::emitSlow_op_convert_this_strict(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- unsigned thisRegister = currentInstruction[1].u.operand;
-
- linkSlowCase(iter);
-
- JITStubCall stubCall(this, cti_op_convert_this_strict);
- stubCall.addArgument(regT1, regT0);
- stubCall.call(thisRegister);
-}
-
-void JIT::emit_op_profile_will_call(Instruction* currentInstruction)
-{
- peek(regT2, OBJECT_OFFSETOF(JITStackFrame, enabledProfilerReference) / sizeof(void*));
- Jump noProfiler = branchTestPtr(Zero, Address(regT2));
-
- JITStubCall stubCall(this, cti_op_profile_will_call);
- stubCall.addArgument(currentInstruction[1].u.operand);
- stubCall.call();
- noProfiler.link(this);
-}
-
-void JIT::emit_op_profile_did_call(Instruction* currentInstruction)
-{
- peek(regT2, OBJECT_OFFSETOF(JITStackFrame, enabledProfilerReference) / sizeof(void*));
- Jump noProfiler = branchTestPtr(Zero, Address(regT2));
-
- JITStubCall stubCall(this, cti_op_profile_did_call);
- stubCall.addArgument(currentInstruction[1].u.operand);
- stubCall.call();
- noProfiler.link(this);
-}
-
-void JIT::emit_op_get_arguments_length(Instruction* currentInstruction)
-{
- int dst = currentInstruction[1].u.operand;
- int argumentsRegister = currentInstruction[2].u.operand;
- addSlowCase(branch32(NotEqual, tagFor(argumentsRegister), Imm32(JSValue::EmptyValueTag)));
- emitGetFromCallFrameHeader32(RegisterFile::ArgumentCount, regT0);
- sub32(Imm32(1), regT0);
- emitStoreInt32(dst, regT0);
-}
-
-void JIT::emitSlow_op_get_arguments_length(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- linkSlowCase(iter);
- int dst = currentInstruction[1].u.operand;
- int base = currentInstruction[2].u.operand;
- int ident = currentInstruction[3].u.operand;
-
- JITStubCall stubCall(this, cti_op_get_by_id_generic);
- stubCall.addArgument(base);
- stubCall.addArgument(ImmPtr(&(m_codeBlock->identifier(ident))));
- stubCall.call(dst);
-}
-
-void JIT::emit_op_get_argument_by_val(Instruction* currentInstruction)
-{
- int dst = currentInstruction[1].u.operand;
- int argumentsRegister = currentInstruction[2].u.operand;
- int property = currentInstruction[3].u.operand;
- addSlowCase(branch32(NotEqual, tagFor(argumentsRegister), Imm32(JSValue::EmptyValueTag)));
- emitLoad(property, regT1, regT2);
- addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
- add32(Imm32(1), regT2);
- // regT2 now contains the integer index of the argument we want, including this
- emitGetFromCallFrameHeader32(RegisterFile::ArgumentCount, regT3);
- addSlowCase(branch32(AboveOrEqual, regT2, regT3));
-
- Jump skipOutofLineParams;
- int numArgs = m_codeBlock->m_numParameters;
- if (numArgs) {
- Jump notInInPlaceArgs = branch32(AboveOrEqual, regT2, Imm32(numArgs));
- addPtr(Imm32(static_cast<unsigned>(-(RegisterFile::CallFrameHeaderSize + numArgs) * sizeof(Register))), callFrameRegister, regT1);
- loadPtr(BaseIndex(regT1, regT2, TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.payload)), regT0);
- loadPtr(BaseIndex(regT1, regT2, TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.tag)), regT1);
- skipOutofLineParams = jump();
- notInInPlaceArgs.link(this);
- }
-
- addPtr(Imm32(static_cast<unsigned>(-(RegisterFile::CallFrameHeaderSize + numArgs) * sizeof(Register))), callFrameRegister, regT1);
- mul32(Imm32(sizeof(Register)), regT3, regT3);
- subPtr(regT3, regT1);
- loadPtr(BaseIndex(regT1, regT2, TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.payload)), regT0);
- loadPtr(BaseIndex(regT1, regT2, TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.tag)), regT1);
- if (numArgs)
- skipOutofLineParams.link(this);
- emitStore(dst, regT1, regT0);
-}
-
-void JIT::emitSlow_op_get_argument_by_val(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- unsigned dst = currentInstruction[1].u.operand;
- unsigned arguments = currentInstruction[2].u.operand;
- unsigned property = currentInstruction[3].u.operand;
-
- linkSlowCase(iter);
- Jump skipArgumentsCreation = jump();
-
- linkSlowCase(iter);
- linkSlowCase(iter);
- if (m_codeBlock->m_numParameters == 1)
- JITStubCall(this, cti_op_create_arguments_no_params).call();
- else
- JITStubCall(this, cti_op_create_arguments).call();
-
- emitStore(arguments, regT1, regT0);
- emitStore(unmodifiedArgumentsRegister(arguments), regT1, regT0);
-
- skipArgumentsCreation.link(this);
- JITStubCall stubCall(this, cti_op_get_by_val);
- stubCall.addArgument(arguments);
- stubCall.addArgument(property);
- stubCall.call(dst);
-}
-
-#if ENABLE(JIT_USE_SOFT_MODULO)
-void JIT::softModulo()
-{
- push(regT1);
- push(regT3);
- move(regT2, regT3);
- move(regT0, regT2);
- move(Imm32(0), regT1);
-
- // Check for negative result reminder
- Jump positiveRegT3 = branch32(GreaterThanOrEqual, regT3, Imm32(0));
- neg32(regT3);
- xor32(Imm32(1), regT1);
- positiveRegT3.link(this);
-
- Jump positiveRegT2 = branch32(GreaterThanOrEqual, regT2, Imm32(0));
- neg32(regT2);
- xor32(Imm32(2), regT1);
- positiveRegT2.link(this);
-
- // Save the condition for negative reminder
- push(regT1);
-
- Jump exitBranch = branch32(LessThan, regT2, regT3);
-
- // Power of two fast case
- move(regT3, regT0);
- sub32(Imm32(1), regT0);
- Jump powerOfTwo = branchTest32(NotEqual, regT0, regT3);
- and32(regT0, regT2);
- powerOfTwo.link(this);
-
- and32(regT3, regT0);
-
- Jump exitBranch2 = branchTest32(Zero, regT0);
-
- countLeadingZeros32(regT2, regT0);
- countLeadingZeros32(regT3, regT1);
- sub32(regT0, regT1);
-
- Jump useFullTable = branch32(Equal, regT1, Imm32(31));
-
- neg32(regT1);
- add32(Imm32(31), regT1);
-
- int elementSizeByShift = -1;
-#if CPU(ARM)
- elementSizeByShift = 3;
-#else
-#error "JIT_OPTIMIZE_MOD not yet supported on this platform."
-#endif
- relativeTableJump(regT1, elementSizeByShift);
-
- useFullTable.link(this);
- // Modulo table
- for (int i = 31; i > 0; --i) {
-#if CPU(ARM_TRADITIONAL)
- m_assembler.cmp_r(regT2, m_assembler.lsl(regT3, i));
- m_assembler.sub_r(regT2, regT2, m_assembler.lsl(regT3, i), ARMAssembler::CS);
-#elif CPU(ARM_THUMB2)
- ShiftTypeAndAmount shift(SRType_LSL, i);
- m_assembler.sub_S(regT1, regT2, regT3, shift);
- m_assembler.it(ARMv7Assembler::ConditionCS);
- m_assembler.mov(regT2, regT1);
-#else
-#error "JIT_OPTIMIZE_MOD not yet supported on this platform."
-#endif
- }
-
- Jump lower = branch32(Below, regT2, regT3);
- sub32(regT3, regT2);
- lower.link(this);
-
- exitBranch.link(this);
- exitBranch2.link(this);
-
- // Check for negative reminder
- pop(regT1);
- Jump positiveResult = branch32(Equal, regT1, Imm32(0));
- neg32(regT2);
- positiveResult.link(this);
-
- move(regT2, regT0);
-
- pop(regT3);
- pop(regT1);
- ret();
-}
-#endif // ENABLE(JIT_USE_SOFT_MODULO)
-
-} // namespace JSC
-
-#endif // USE(JSVALUE32_64)
-#endif // ENABLE(JIT)
diff --git a/JavaScriptCore/jit/JITPropertyAccess.cpp b/JavaScriptCore/jit/JITPropertyAccess.cpp
deleted file mode 100644
index 1b95eec..0000000
--- a/JavaScriptCore/jit/JITPropertyAccess.cpp
+++ /dev/null
@@ -1,1101 +0,0 @@
-/*
- * Copyright (C) 2008, 2009 Apple Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
- * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "config.h"
-
-#if ENABLE(JIT)
-#if USE(JSVALUE64)
-#include "JIT.h"
-
-#include "CodeBlock.h"
-#include "GetterSetter.h"
-#include "JITInlineMethods.h"
-#include "JITStubCall.h"
-#include "JSArray.h"
-#include "JSFunction.h"
-#include "JSPropertyNameIterator.h"
-#include "Interpreter.h"
-#include "LinkBuffer.h"
-#include "RepatchBuffer.h"
-#include "ResultType.h"
-#include "SamplingTool.h"
-
-#ifndef NDEBUG
-#include <stdio.h>
-#endif
-
-using namespace std;
-
-namespace JSC {
-
-JIT::CodePtr JIT::stringGetByValStubGenerator(JSGlobalData* globalData, ExecutablePool* pool)
-{
- JSInterfaceJIT jit;
- JumpList failures;
- failures.append(jit.branchPtr(NotEqual, Address(regT0), ImmPtr(globalData->jsStringVPtr)));
- failures.append(jit.branchTest32(NonZero, Address(regT0, OBJECT_OFFSETOF(JSString, m_fiberCount))));
-
- // Load string length to regT1, and start the process of loading the data pointer into regT0
- jit.load32(Address(regT0, ThunkHelpers::jsStringLengthOffset()), regT2);
- jit.loadPtr(Address(regT0, ThunkHelpers::jsStringValueOffset()), regT0);
- jit.loadPtr(Address(regT0, ThunkHelpers::stringImplDataOffset()), regT0);
-
- // Do an unsigned compare to simultaneously filter negative indices as well as indices that are too large
- failures.append(jit.branch32(AboveOrEqual, regT1, regT2));
-
- // Load the character
- jit.load16(BaseIndex(regT0, regT1, TimesTwo, 0), regT0);
-
- failures.append(jit.branch32(AboveOrEqual, regT0, Imm32(0x100)));
- jit.move(ImmPtr(globalData->smallStrings.singleCharacterStrings()), regT1);
- jit.loadPtr(BaseIndex(regT1, regT0, ScalePtr, 0), regT0);
- jit.ret();
-
- failures.link(&jit);
- jit.move(Imm32(0), regT0);
- jit.ret();
-
- LinkBuffer patchBuffer(&jit, pool, 0);
- return patchBuffer.finalizeCode().m_code;
-}
-
-void JIT::emit_op_get_by_val(Instruction* currentInstruction)
-{
- unsigned dst = currentInstruction[1].u.operand;
- unsigned base = currentInstruction[2].u.operand;
- unsigned property = currentInstruction[3].u.operand;
-
- emitGetVirtualRegisters(base, regT0, property, regT1);
- emitJumpSlowCaseIfNotImmediateInteger(regT1);
-
- // This is technically incorrect - we're zero-extending an int32. On the hot path this doesn't matter.
- // We check the value as if it was a uint32 against the m_vectorLength - which will always fail if
- // number was signed since m_vectorLength is always less than intmax (since the total allocation
- // size is always less than 4Gb). As such zero extending wil have been correct (and extending the value
- // to 64-bits is necessary since it's used in the address calculation. We zero extend rather than sign
- // extending since it makes it easier to re-tag the value in the slow case.
- zeroExtend32ToPtr(regT1, regT1);
-
- emitJumpSlowCaseIfNotJSCell(regT0, base);
- addSlowCase(branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsArrayVPtr)));
-
- loadPtr(Address(regT0, OBJECT_OFFSETOF(JSArray, m_storage)), regT2);
- addSlowCase(branch32(AboveOrEqual, regT1, Address(regT0, OBJECT_OFFSETOF(JSArray, m_vectorLength))));
-
- loadPtr(BaseIndex(regT2, regT1, ScalePtr, OBJECT_OFFSETOF(ArrayStorage, m_vector[0])), regT0);
- addSlowCase(branchTestPtr(Zero, regT0));
-
- emitPutVirtualRegister(dst);
-}
-
-void JIT::emitSlow_op_get_by_val(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- unsigned dst = currentInstruction[1].u.operand;
- unsigned base = currentInstruction[2].u.operand;
- unsigned property = currentInstruction[3].u.operand;
-
- linkSlowCase(iter); // property int32 check
- linkSlowCaseIfNotJSCell(iter, base); // base cell check
- Jump nonCell = jump();
- linkSlowCase(iter); // base array check
- Jump notString = branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsStringVPtr));
- emitNakedCall(m_globalData->getCTIStub(stringGetByValStubGenerator));
- Jump failed = branchTestPtr(Zero, regT0);
- emitPutVirtualRegister(dst, regT0);
- emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_get_by_val));
- failed.link(this);
- notString.link(this);
- nonCell.link(this);
-
- linkSlowCase(iter); // vector length check
- linkSlowCase(iter); // empty value
-
- JITStubCall stubCall(this, cti_op_get_by_val);
- stubCall.addArgument(base, regT2);
- stubCall.addArgument(property, regT2);
- stubCall.call(dst);
-}
-
-void JIT::compileGetDirectOffset(RegisterID base, RegisterID result, RegisterID structure, RegisterID offset, RegisterID scratch)
-{
- ASSERT(sizeof(((Structure*)0)->m_propertyStorageCapacity) == sizeof(int32_t));
- ASSERT(sizeof(JSObject::inlineStorageCapacity) == sizeof(int32_t));
-
- Jump notUsingInlineStorage = branch32(NotEqual, Address(structure, OBJECT_OFFSETOF(Structure, m_propertyStorageCapacity)), Imm32(JSObject::inlineStorageCapacity));
- loadPtr(BaseIndex(base, offset, ScalePtr, OBJECT_OFFSETOF(JSObject, m_inlineStorage)), result);
- Jump finishedLoad = jump();
- notUsingInlineStorage.link(this);
- loadPtr(Address(base, OBJECT_OFFSETOF(JSObject, m_externalStorage)), scratch);
- loadPtr(BaseIndex(scratch, offset, ScalePtr, 0), result);
- finishedLoad.link(this);
-}
-
-void JIT::emit_op_get_by_pname(Instruction* currentInstruction)
-{
- unsigned dst = currentInstruction[1].u.operand;
- unsigned base = currentInstruction[2].u.operand;
- unsigned property = currentInstruction[3].u.operand;
- unsigned expected = currentInstruction[4].u.operand;
- unsigned iter = currentInstruction[5].u.operand;
- unsigned i = currentInstruction[6].u.operand;
-
- emitGetVirtualRegister(property, regT0);
- addSlowCase(branchPtr(NotEqual, regT0, addressFor(expected)));
- emitGetVirtualRegisters(base, regT0, iter, regT1);
- emitJumpSlowCaseIfNotJSCell(regT0, base);
-
- // Test base's structure
- loadPtr(Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), regT2);
- addSlowCase(branchPtr(NotEqual, regT2, Address(regT1, OBJECT_OFFSETOF(JSPropertyNameIterator, m_cachedStructure))));
- load32(addressFor(i), regT3);
- sub32(Imm32(1), regT3);
- addSlowCase(branch32(AboveOrEqual, regT3, Address(regT1, OBJECT_OFFSETOF(JSPropertyNameIterator, m_numCacheableSlots))));
- compileGetDirectOffset(regT0, regT0, regT2, regT3, regT1);
-
- emitPutVirtualRegister(dst, regT0);
-}
-
-void JIT::emitSlow_op_get_by_pname(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- unsigned dst = currentInstruction[1].u.operand;
- unsigned base = currentInstruction[2].u.operand;
- unsigned property = currentInstruction[3].u.operand;
-
- linkSlowCase(iter);
- linkSlowCaseIfNotJSCell(iter, base);
- linkSlowCase(iter);
- linkSlowCase(iter);
-
- JITStubCall stubCall(this, cti_op_get_by_val);
- stubCall.addArgument(base, regT2);
- stubCall.addArgument(property, regT2);
- stubCall.call(dst);
-}
-
-void JIT::emit_op_put_by_val(Instruction* currentInstruction)
-{
- unsigned base = currentInstruction[1].u.operand;
- unsigned property = currentInstruction[2].u.operand;
- unsigned value = currentInstruction[3].u.operand;
-
- emitGetVirtualRegisters(base, regT0, property, regT1);
- emitJumpSlowCaseIfNotImmediateInteger(regT1);
- // See comment in op_get_by_val.
- zeroExtend32ToPtr(regT1, regT1);
- emitJumpSlowCaseIfNotJSCell(regT0, base);
- addSlowCase(branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsArrayVPtr)));
- addSlowCase(branch32(AboveOrEqual, regT1, Address(regT0, OBJECT_OFFSETOF(JSArray, m_vectorLength))));
-
- loadPtr(Address(regT0, OBJECT_OFFSETOF(JSArray, m_storage)), regT2);
- Jump empty = branchTestPtr(Zero, BaseIndex(regT2, regT1, ScalePtr, OBJECT_OFFSETOF(ArrayStorage, m_vector[0])));
-
- Label storeResult(this);
- emitGetVirtualRegister(value, regT0);
- storePtr(regT0, BaseIndex(regT2, regT1, ScalePtr, OBJECT_OFFSETOF(ArrayStorage, m_vector[0])));
- Jump end = jump();
-
- empty.link(this);
- add32(Imm32(1), Address(regT2, OBJECT_OFFSETOF(ArrayStorage, m_numValuesInVector)));
- branch32(Below, regT1, Address(regT2, OBJECT_OFFSETOF(ArrayStorage, m_length))).linkTo(storeResult, this);
-
- move(regT1, regT0);
- add32(Imm32(1), regT0);
- store32(regT0, Address(regT2, OBJECT_OFFSETOF(ArrayStorage, m_length)));
- jump().linkTo(storeResult, this);
-
- end.link(this);
-}
-
-void JIT::emit_op_put_by_index(Instruction* currentInstruction)
-{
- JITStubCall stubCall(this, cti_op_put_by_index);
- stubCall.addArgument(currentInstruction[1].u.operand, regT2);
- stubCall.addArgument(Imm32(currentInstruction[2].u.operand));
- stubCall.addArgument(currentInstruction[3].u.operand, regT2);
- stubCall.call();
-}
-
-void JIT::emit_op_put_getter(Instruction* currentInstruction)
-{
- JITStubCall stubCall(this, cti_op_put_getter);
- stubCall.addArgument(currentInstruction[1].u.operand, regT2);
- stubCall.addArgument(ImmPtr(&m_codeBlock->identifier(currentInstruction[2].u.operand)));
- stubCall.addArgument(currentInstruction[3].u.operand, regT2);
- stubCall.call();
-}
-
-void JIT::emit_op_put_setter(Instruction* currentInstruction)
-{
- JITStubCall stubCall(this, cti_op_put_setter);
- stubCall.addArgument(currentInstruction[1].u.operand, regT2);
- stubCall.addArgument(ImmPtr(&m_codeBlock->identifier(currentInstruction[2].u.operand)));
- stubCall.addArgument(currentInstruction[3].u.operand, regT2);
- stubCall.call();
-}
-
-void JIT::emit_op_del_by_id(Instruction* currentInstruction)
-{
- JITStubCall stubCall(this, cti_op_del_by_id);
- stubCall.addArgument(currentInstruction[2].u.operand, regT2);
- stubCall.addArgument(ImmPtr(&m_codeBlock->identifier(currentInstruction[3].u.operand)));
- stubCall.call(currentInstruction[1].u.operand);
-}
-
-
-#if !ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
-
-/* ------------------------------ BEGIN: !ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS) ------------------------------ */
-
-// Treat these as nops - the call will be handed as a regular get_by_id/op_call pair.
-void JIT::emit_op_method_check(Instruction*) {}
-void JIT::emitSlow_op_method_check(Instruction*, Vector<SlowCaseEntry>::iterator&) { ASSERT_NOT_REACHED(); }
-#if ENABLE(JIT_OPTIMIZE_METHOD_CALLS)
-#error "JIT_OPTIMIZE_METHOD_CALLS requires JIT_OPTIMIZE_PROPERTY_ACCESS"
-#endif
-
-void JIT::emit_op_get_by_id(Instruction* currentInstruction)
-{
- unsigned resultVReg = currentInstruction[1].u.operand;
- unsigned baseVReg = currentInstruction[2].u.operand;
- Identifier* ident = &(m_codeBlock->identifier(currentInstruction[3].u.operand));
-
- emitGetVirtualRegister(baseVReg, regT0);
- JITStubCall stubCall(this, cti_op_get_by_id_generic);
- stubCall.addArgument(regT0);
- stubCall.addArgument(ImmPtr(ident));
- stubCall.call(resultVReg);
-
- m_propertyAccessInstructionIndex++;
-}
-
-void JIT::emitSlow_op_get_by_id(Instruction*, Vector<SlowCaseEntry>::iterator&)
-{
- ASSERT_NOT_REACHED();
-}
-
-void JIT::emit_op_put_by_id(Instruction* currentInstruction)
-{
- unsigned baseVReg = currentInstruction[1].u.operand;
- Identifier* ident = &(m_codeBlock->identifier(currentInstruction[2].u.operand));
- unsigned valueVReg = currentInstruction[3].u.operand;
- unsigned direct = currentInstruction[8].u.operand;
-
- emitGetVirtualRegisters(baseVReg, regT0, valueVReg, regT1);
-
- JITStubCall stubCall(this, direct ? cti_op_put_by_id_direct_generic, cti_op_put_by_id_generic);
- stubCall.addArgument(regT0);
- stubCall.addArgument(ImmPtr(ident));
- stubCall.addArgument(regT1);
- stubCall.call();
-
- m_propertyAccessInstructionIndex++;
-}
-
-void JIT::emitSlow_op_put_by_id(Instruction*, Vector<SlowCaseEntry>::iterator&)
-{
- ASSERT_NOT_REACHED();
-}
-
-#else // !ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
-
-/* ------------------------------ BEGIN: ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS) ------------------------------ */
-
-#if ENABLE(JIT_OPTIMIZE_METHOD_CALLS)
-
-void JIT::emit_op_method_check(Instruction* currentInstruction)
-{
- // Assert that the following instruction is a get_by_id.
- ASSERT(m_interpreter->getOpcodeID((currentInstruction + OPCODE_LENGTH(op_method_check))->u.opcode) == op_get_by_id);
-
- currentInstruction += OPCODE_LENGTH(op_method_check);
- unsigned resultVReg = currentInstruction[1].u.operand;
- unsigned baseVReg = currentInstruction[2].u.operand;
- Identifier* ident = &(m_codeBlock->identifier(currentInstruction[3].u.operand));
-
- emitGetVirtualRegister(baseVReg, regT0);
-
- // Do the method check - check the object & its prototype's structure inline (this is the common case).
- m_methodCallCompilationInfo.append(MethodCallCompilationInfo(m_propertyAccessInstructionIndex));
- MethodCallCompilationInfo& info = m_methodCallCompilationInfo.last();
-
- Jump notCell = emitJumpIfNotJSCell(regT0);
-
- BEGIN_UNINTERRUPTED_SEQUENCE(sequenceMethodCheck);
-
- Jump structureCheck = branchPtrWithPatch(NotEqual, Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), info.structureToCompare, ImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure)));
- DataLabelPtr protoStructureToCompare, protoObj = moveWithPatch(ImmPtr(0), regT1);
- Jump protoStructureCheck = branchPtrWithPatch(NotEqual, Address(regT1, OBJECT_OFFSETOF(JSCell, m_structure)), protoStructureToCompare, ImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure)));
-
- // This will be relinked to load the function without doing a load.
- DataLabelPtr putFunction = moveWithPatch(ImmPtr(0), regT0);
-
- END_UNINTERRUPTED_SEQUENCE(sequenceMethodCheck);
-
- Jump match = jump();
-
- ASSERT_JIT_OFFSET(differenceBetween(info.structureToCompare, protoObj), patchOffsetMethodCheckProtoObj);
- ASSERT_JIT_OFFSET(differenceBetween(info.structureToCompare, protoStructureToCompare), patchOffsetMethodCheckProtoStruct);
- ASSERT_JIT_OFFSET(differenceBetween(info.structureToCompare, putFunction), patchOffsetMethodCheckPutFunction);
-
- // Link the failure cases here.
- notCell.link(this);
- structureCheck.link(this);
- protoStructureCheck.link(this);
-
- // Do a regular(ish) get_by_id (the slow case will be link to
- // cti_op_get_by_id_method_check instead of cti_op_get_by_id.
- compileGetByIdHotPath(resultVReg, baseVReg, ident, m_propertyAccessInstructionIndex++);
-
- match.link(this);
- emitPutVirtualRegister(resultVReg);
-
- // We've already generated the following get_by_id, so make sure it's skipped over.
- m_bytecodeOffset += OPCODE_LENGTH(op_get_by_id);
-}
-
-void JIT::emitSlow_op_method_check(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- currentInstruction += OPCODE_LENGTH(op_method_check);
- unsigned resultVReg = currentInstruction[1].u.operand;
- unsigned baseVReg = currentInstruction[2].u.operand;
- Identifier* ident = &(m_codeBlock->identifier(currentInstruction[3].u.operand));
-
- compileGetByIdSlowCase(resultVReg, baseVReg, ident, iter, true);
-
- // We've already generated the following get_by_id, so make sure it's skipped over.
- m_bytecodeOffset += OPCODE_LENGTH(op_get_by_id);
-}
-
-#else //!ENABLE(JIT_OPTIMIZE_METHOD_CALLS)
-
-// Treat these as nops - the call will be handed as a regular get_by_id/op_call pair.
-void JIT::emit_op_method_check(Instruction*) {}
-void JIT::emitSlow_op_method_check(Instruction*, Vector<SlowCaseEntry>::iterator&) { ASSERT_NOT_REACHED(); }
-
-#endif
-
-void JIT::emit_op_get_by_id(Instruction* currentInstruction)
-{
- unsigned resultVReg = currentInstruction[1].u.operand;
- unsigned baseVReg = currentInstruction[2].u.operand;
- Identifier* ident = &(m_codeBlock->identifier(currentInstruction[3].u.operand));
-
- emitGetVirtualRegister(baseVReg, regT0);
- compileGetByIdHotPath(resultVReg, baseVReg, ident, m_propertyAccessInstructionIndex++);
- emitPutVirtualRegister(resultVReg);
-}
-
-void JIT::compileGetByIdHotPath(int, int baseVReg, Identifier*, unsigned propertyAccessInstructionIndex)
-{
- // As for put_by_id, get_by_id requires the offset of the Structure and the offset of the access to be patched.
- // Additionally, for get_by_id we need patch the offset of the branch to the slow case (we patch this to jump
- // to array-length / prototype access tranpolines, and finally we also the the property-map access offset as a label
- // to jump back to if one of these trampolies finds a match.
-
- emitJumpSlowCaseIfNotJSCell(regT0, baseVReg);
-
- BEGIN_UNINTERRUPTED_SEQUENCE(sequenceGetByIdHotPath);
-
- Label hotPathBegin(this);
- m_propertyAccessCompilationInfo[propertyAccessInstructionIndex].hotPathBegin = hotPathBegin;
-
- DataLabelPtr structureToCompare;
- Jump structureCheck = branchPtrWithPatch(NotEqual, Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), structureToCompare, ImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure)));
- addSlowCase(structureCheck);
- ASSERT_JIT_OFFSET(differenceBetween(hotPathBegin, structureToCompare), patchOffsetGetByIdStructure);
- ASSERT_JIT_OFFSET(differenceBetween(hotPathBegin, structureCheck), patchOffsetGetByIdBranchToSlowCase)
-
- Label externalLoad = loadPtrWithPatchToLEA(Address(regT0, OBJECT_OFFSETOF(JSObject, m_externalStorage)), regT0);
- Label externalLoadComplete(this);
- ASSERT_JIT_OFFSET(differenceBetween(hotPathBegin, externalLoad), patchOffsetGetByIdExternalLoad);
- ASSERT_JIT_OFFSET(differenceBetween(externalLoad, externalLoadComplete), patchLengthGetByIdExternalLoad);
-
- DataLabel32 displacementLabel = loadPtrWithAddressOffsetPatch(Address(regT0, patchGetByIdDefaultOffset), regT0);
- ASSERT_JIT_OFFSET(differenceBetween(hotPathBegin, displacementLabel), patchOffsetGetByIdPropertyMapOffset);
-
- Label putResult(this);
-
- END_UNINTERRUPTED_SEQUENCE(sequenceGetByIdHotPath);
-
- ASSERT_JIT_OFFSET(differenceBetween(hotPathBegin, putResult), patchOffsetGetByIdPutResult);
-}
-
-void JIT::emitSlow_op_get_by_id(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- unsigned resultVReg = currentInstruction[1].u.operand;
- unsigned baseVReg = currentInstruction[2].u.operand;
- Identifier* ident = &(m_codeBlock->identifier(currentInstruction[3].u.operand));
-
- compileGetByIdSlowCase(resultVReg, baseVReg, ident, iter, false);
-}
-
-void JIT::compileGetByIdSlowCase(int resultVReg, int baseVReg, Identifier* ident, Vector<SlowCaseEntry>::iterator& iter, bool isMethodCheck)
-{
- // As for the hot path of get_by_id, above, we ensure that we can use an architecture specific offset
- // so that we only need track one pointer into the slow case code - we track a pointer to the location
- // of the call (which we can use to look up the patch information), but should a array-length or
- // prototype access trampoline fail we want to bail out back to here. To do so we can subtract back
- // the distance from the call to the head of the slow case.
-
- linkSlowCaseIfNotJSCell(iter, baseVReg);
- linkSlowCase(iter);
-
- BEGIN_UNINTERRUPTED_SEQUENCE(sequenceGetByIdSlowCase);
-
-#ifndef NDEBUG
- Label coldPathBegin(this);
-#endif
- JITStubCall stubCall(this, isMethodCheck ? cti_op_get_by_id_method_check : cti_op_get_by_id);
- stubCall.addArgument(regT0);
- stubCall.addArgument(ImmPtr(ident));
- Call call = stubCall.call(resultVReg);
-
- END_UNINTERRUPTED_SEQUENCE(sequenceGetByIdSlowCase);
-
- ASSERT_JIT_OFFSET(differenceBetween(coldPathBegin, call), patchOffsetGetByIdSlowCaseCall);
-
- // Track the location of the call; this will be used to recover patch information.
- m_propertyAccessCompilationInfo[m_propertyAccessInstructionIndex].callReturnLocation = call;
- m_propertyAccessInstructionIndex++;
-}
-
-void JIT::emit_op_put_by_id(Instruction* currentInstruction)
-{
- unsigned baseVReg = currentInstruction[1].u.operand;
- unsigned valueVReg = currentInstruction[3].u.operand;
-
- unsigned propertyAccessInstructionIndex = m_propertyAccessInstructionIndex++;
-
- // In order to be able to patch both the Structure, and the object offset, we store one pointer,
- // to just after the arguments have been loaded into registers 'hotPathBegin', and we generate code
- // such that the Structure & offset are always at the same distance from this.
-
- emitGetVirtualRegisters(baseVReg, regT0, valueVReg, regT1);
-
- // Jump to a slow case if either the base object is an immediate, or if the Structure does not match.
- emitJumpSlowCaseIfNotJSCell(regT0, baseVReg);
-
- BEGIN_UNINTERRUPTED_SEQUENCE(sequencePutById);
-
- Label hotPathBegin(this);
- m_propertyAccessCompilationInfo[propertyAccessInstructionIndex].hotPathBegin = hotPathBegin;
-
- // It is important that the following instruction plants a 32bit immediate, in order that it can be patched over.
- DataLabelPtr structureToCompare;
- addSlowCase(branchPtrWithPatch(NotEqual, Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), structureToCompare, ImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure))));
- ASSERT_JIT_OFFSET(differenceBetween(hotPathBegin, structureToCompare), patchOffsetPutByIdStructure);
-
- // Plant a load from a bogus ofset in the object's property map; we will patch this later, if it is to be used.
- Label externalLoad = loadPtrWithPatchToLEA(Address(regT0, OBJECT_OFFSETOF(JSObject, m_externalStorage)), regT0);
- Label externalLoadComplete(this);
- ASSERT_JIT_OFFSET(differenceBetween(hotPathBegin, externalLoad), patchOffsetPutByIdExternalLoad);
- ASSERT_JIT_OFFSET(differenceBetween(externalLoad, externalLoadComplete), patchLengthPutByIdExternalLoad);
-
- DataLabel32 displacementLabel = storePtrWithAddressOffsetPatch(regT1, Address(regT0, patchGetByIdDefaultOffset));
-
- END_UNINTERRUPTED_SEQUENCE(sequencePutById);
-
- ASSERT_JIT_OFFSET(differenceBetween(hotPathBegin, displacementLabel), patchOffsetPutByIdPropertyMapOffset);
-}
-
-void JIT::emitSlow_op_put_by_id(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- unsigned baseVReg = currentInstruction[1].u.operand;
- Identifier* ident = &(m_codeBlock->identifier(currentInstruction[2].u.operand));
- unsigned direct = currentInstruction[8].u.operand;
-
- unsigned propertyAccessInstructionIndex = m_propertyAccessInstructionIndex++;
-
- linkSlowCaseIfNotJSCell(iter, baseVReg);
- linkSlowCase(iter);
-
- JITStubCall stubCall(this, direct ? cti_op_put_by_id_direct : cti_op_put_by_id);
- stubCall.addArgument(regT0);
- stubCall.addArgument(ImmPtr(ident));
- stubCall.addArgument(regT1);
- Call call = stubCall.call();
-
- // Track the location of the call; this will be used to recover patch information.
- m_propertyAccessCompilationInfo[propertyAccessInstructionIndex].callReturnLocation = call;
-}
-
-// Compile a store into an object's property storage. May overwrite the
-// value in objectReg.
-void JIT::compilePutDirectOffset(RegisterID base, RegisterID value, Structure* structure, size_t cachedOffset)
-{
- int offset = cachedOffset * sizeof(JSValue);
- if (structure->isUsingInlineStorage())
- offset += OBJECT_OFFSETOF(JSObject, m_inlineStorage);
- else
- loadPtr(Address(base, OBJECT_OFFSETOF(JSObject, m_externalStorage)), base);
- storePtr(value, Address(base, offset));
-}
-
-// Compile a load from an object's property storage. May overwrite base.
-void JIT::compileGetDirectOffset(RegisterID base, RegisterID result, Structure* structure, size_t cachedOffset)
-{
- int offset = cachedOffset * sizeof(JSValue);
- if (structure->isUsingInlineStorage())
- offset += OBJECT_OFFSETOF(JSObject, m_inlineStorage);
- else
- loadPtr(Address(base, OBJECT_OFFSETOF(JSObject, m_externalStorage)), base);
- loadPtr(Address(base, offset), result);
-}
-
-void JIT::compileGetDirectOffset(JSObject* base, RegisterID temp, RegisterID result, size_t cachedOffset)
-{
- if (base->isUsingInlineStorage())
- loadPtr(static_cast<void*>(&base->m_inlineStorage[cachedOffset]), result);
- else {
- PropertyStorage* protoPropertyStorage = &base->m_externalStorage;
- loadPtr(static_cast<void*>(protoPropertyStorage), temp);
- loadPtr(Address(temp, cachedOffset * sizeof(JSValue)), result);
- }
-}
-
-void JIT::testPrototype(JSValue prototype, JumpList& failureCases)
-{
- if (prototype.isNull())
- return;
-
- // We have a special case for X86_64 here because X86 instructions that take immediate values
- // only take 32 bit immediate values, wheras the pointer constants we are using here are 64 bit
- // values. In the non X86_64 case, the generated code is slightly more efficient because it uses
- // two less instructions and doesn't require any scratch registers.
-#if CPU(X86_64)
- move(ImmPtr(prototype.asCell()->structure()), regT3);
- failureCases.append(branchPtr(NotEqual, AbsoluteAddress(&prototype.asCell()->m_structure), regT3));
-#else
- failureCases.append(branchPtr(NotEqual, AbsoluteAddress(&prototype.asCell()->m_structure), ImmPtr(prototype.asCell()->structure())));
-#endif
-}
-
-void JIT::privateCompilePutByIdTransition(StructureStubInfo* stubInfo, Structure* oldStructure, Structure* newStructure, size_t cachedOffset, StructureChain* chain, ReturnAddressPtr returnAddress, bool direct)
-{
- JumpList failureCases;
- // Check eax is an object of the right Structure.
- failureCases.append(emitJumpIfNotJSCell(regT0));
- failureCases.append(branchPtr(NotEqual, Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), ImmPtr(oldStructure)));
- testPrototype(oldStructure->storedPrototype(), failureCases);
-
- // ecx = baseObject->m_structure
- if (!direct) {
- for (RefPtr<Structure>* it = chain->head(); *it; ++it)
- testPrototype((*it)->storedPrototype(), failureCases);
- }
-
- Call callTarget;
-
- // emit a call only if storage realloc is needed
- bool willNeedStorageRealloc = oldStructure->propertyStorageCapacity() != newStructure->propertyStorageCapacity();
- if (willNeedStorageRealloc) {
- // This trampoline was called to like a JIT stub; before we can can call again we need to
- // remove the return address from the stack, to prevent the stack from becoming misaligned.
- preserveReturnAddressAfterCall(regT3);
-
- JITStubCall stubCall(this, cti_op_put_by_id_transition_realloc);
- stubCall.skipArgument(); // base
- stubCall.skipArgument(); // ident
- stubCall.skipArgument(); // value
- stubCall.addArgument(Imm32(oldStructure->propertyStorageCapacity()));
- stubCall.addArgument(Imm32(newStructure->propertyStorageCapacity()));
- stubCall.call(regT0);
- emitGetJITStubArg(2, regT1);
-
- restoreReturnAddressBeforeReturn(regT3);
- }
-
- // Assumes m_refCount can be decremented easily, refcount decrement is safe as
- // codeblock should ensure oldStructure->m_refCount > 0
- sub32(Imm32(1), AbsoluteAddress(oldStructure->addressOfCount()));
- add32(Imm32(1), AbsoluteAddress(newStructure->addressOfCount()));
- storePtr(ImmPtr(newStructure), Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)));
-
- // write the value
- compilePutDirectOffset(regT0, regT1, newStructure, cachedOffset);
-
- ret();
-
- ASSERT(!failureCases.empty());
- failureCases.link(this);
- restoreArgumentReferenceForTrampoline();
- Call failureCall = tailRecursiveCall();
-
- LinkBuffer patchBuffer(this, m_codeBlock->executablePool(), 0);
-
- patchBuffer.link(failureCall, FunctionPtr(direct ? cti_op_put_by_id_direct_fail : cti_op_put_by_id_fail));
-
- if (willNeedStorageRealloc) {
- ASSERT(m_calls.size() == 1);
- patchBuffer.link(m_calls[0].from, FunctionPtr(cti_op_put_by_id_transition_realloc));
- }
-
- CodeLocationLabel entryLabel = patchBuffer.finalizeCodeAddendum();
- stubInfo->stubRoutine = entryLabel;
- RepatchBuffer repatchBuffer(m_codeBlock);
- repatchBuffer.relinkCallerToTrampoline(returnAddress, entryLabel);
-}
-
-void JIT::patchGetByIdSelf(CodeBlock* codeBlock, StructureStubInfo* stubInfo, Structure* structure, size_t cachedOffset, ReturnAddressPtr returnAddress)
-{
- RepatchBuffer repatchBuffer(codeBlock);
-
- // We don't want to patch more than once - in future go to cti_op_get_by_id_generic.
- // Should probably go to cti_op_get_by_id_fail, but that doesn't do anything interesting right now.
- repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_get_by_id_self_fail));
-
- int offset = sizeof(JSValue) * cachedOffset;
-
- // If we're patching to use inline storage, convert the initial load to a lea; this avoids the extra load
- // and makes the subsequent load's offset automatically correct
- if (structure->isUsingInlineStorage())
- repatchBuffer.repatchLoadPtrToLEA(stubInfo->hotPathBegin.instructionAtOffset(patchOffsetGetByIdExternalLoad));
-
- // Patch the offset into the propoerty map to load from, then patch the Structure to look for.
- repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabelPtrAtOffset(patchOffsetGetByIdStructure), structure);
- repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabel32AtOffset(patchOffsetGetByIdPropertyMapOffset), offset);
-}
-
-void JIT::patchMethodCallProto(CodeBlock* codeBlock, MethodCallLinkInfo& methodCallLinkInfo, JSFunction* callee, Structure* structure, JSObject* proto, ReturnAddressPtr returnAddress)
-{
- RepatchBuffer repatchBuffer(codeBlock);
-
- ASSERT(!methodCallLinkInfo.cachedStructure);
- methodCallLinkInfo.cachedStructure = structure;
- structure->ref();
-
- Structure* prototypeStructure = proto->structure();
- methodCallLinkInfo.cachedPrototypeStructure = prototypeStructure;
- prototypeStructure->ref();
-
- repatchBuffer.repatch(methodCallLinkInfo.structureLabel, structure);
- repatchBuffer.repatch(methodCallLinkInfo.structureLabel.dataLabelPtrAtOffset(patchOffsetMethodCheckProtoObj), proto);
- repatchBuffer.repatch(methodCallLinkInfo.structureLabel.dataLabelPtrAtOffset(patchOffsetMethodCheckProtoStruct), prototypeStructure);
- repatchBuffer.repatch(methodCallLinkInfo.structureLabel.dataLabelPtrAtOffset(patchOffsetMethodCheckPutFunction), callee);
-
- repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_get_by_id));
-}
-
-void JIT::patchPutByIdReplace(CodeBlock* codeBlock, StructureStubInfo* stubInfo, Structure* structure, size_t cachedOffset, ReturnAddressPtr returnAddress, bool direct)
-{
- RepatchBuffer repatchBuffer(codeBlock);
-
- // We don't want to patch more than once - in future go to cti_op_put_by_id_generic.
- // Should probably go to cti_op_put_by_id_fail, but that doesn't do anything interesting right now.
- repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(direct ? cti_op_put_by_id_direct_generic : cti_op_put_by_id_generic));
-
- int offset = sizeof(JSValue) * cachedOffset;
-
- // If we're patching to use inline storage, convert the initial load to a lea; this avoids the extra load
- // and makes the subsequent load's offset automatically correct
- if (structure->isUsingInlineStorage())
- repatchBuffer.repatchLoadPtrToLEA(stubInfo->hotPathBegin.instructionAtOffset(patchOffsetPutByIdExternalLoad));
-
- // Patch the offset into the propoerty map to load from, then patch the Structure to look for.
- repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabelPtrAtOffset(patchOffsetPutByIdStructure), structure);
- repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabel32AtOffset(patchOffsetPutByIdPropertyMapOffset), offset);
-}
-
-void JIT::privateCompilePatchGetArrayLength(ReturnAddressPtr returnAddress)
-{
- StructureStubInfo* stubInfo = &m_codeBlock->getStubInfo(returnAddress);
-
- // Check eax is an array
- Jump failureCases1 = branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsArrayVPtr));
-
- // Checks out okay! - get the length from the storage
- loadPtr(Address(regT0, OBJECT_OFFSETOF(JSArray, m_storage)), regT3);
- load32(Address(regT3, OBJECT_OFFSETOF(ArrayStorage, m_length)), regT2);
- Jump failureCases2 = branch32(Above, regT2, Imm32(JSImmediate::maxImmediateInt));
-
- emitFastArithIntToImmNoCheck(regT2, regT0);
- Jump success = jump();
-
- LinkBuffer patchBuffer(this, m_codeBlock->executablePool(), 0);
-
- // Use the patch information to link the failure cases back to the original slow case routine.
- CodeLocationLabel slowCaseBegin = stubInfo->callReturnLocation.labelAtOffset(-patchOffsetGetByIdSlowCaseCall);
- patchBuffer.link(failureCases1, slowCaseBegin);
- patchBuffer.link(failureCases2, slowCaseBegin);
-
- // On success return back to the hot patch code, at a point it will perform the store to dest for us.
- patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult));
-
- // Track the stub we have created so that it will be deleted later.
- CodeLocationLabel entryLabel = patchBuffer.finalizeCodeAddendum();
- stubInfo->stubRoutine = entryLabel;
-
- // Finally patch the jump to slow case back in the hot path to jump here instead.
- CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase);
- RepatchBuffer repatchBuffer(m_codeBlock);
- repatchBuffer.relink(jumpLocation, entryLabel);
-
- // We don't want to patch more than once - in future go to cti_op_put_by_id_generic.
- repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_get_by_id_array_fail));
-}
-
-void JIT::privateCompileGetByIdProto(StructureStubInfo* stubInfo, Structure* structure, Structure* prototypeStructure, const Identifier& ident, const PropertySlot& slot, size_t cachedOffset, ReturnAddressPtr returnAddress, CallFrame* callFrame)
-{
- // The prototype object definitely exists (if this stub exists the CodeBlock is referencing a Structure that is
- // referencing the prototype object - let's speculatively load it's table nice and early!)
- JSObject* protoObject = asObject(structure->prototypeForLookup(callFrame));
-
- // Check eax is an object of the right Structure.
- Jump failureCases1 = checkStructure(regT0, structure);
-
- // Check the prototype object's Structure had not changed.
- Structure** prototypeStructureAddress = &(protoObject->m_structure);
-#if CPU(X86_64)
- move(ImmPtr(prototypeStructure), regT3);
- Jump failureCases2 = branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), regT3);
-#else
- Jump failureCases2 = branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), ImmPtr(prototypeStructure));
-#endif
-
- bool needsStubLink = false;
-
- // Checks out okay!
- if (slot.cachedPropertyType() == PropertySlot::Getter) {
- needsStubLink = true;
- compileGetDirectOffset(protoObject, regT1, regT1, cachedOffset);
- JITStubCall stubCall(this, cti_op_get_by_id_getter_stub);
- stubCall.addArgument(regT1);
- stubCall.addArgument(regT0);
- stubCall.addArgument(ImmPtr(stubInfo->callReturnLocation.executableAddress()));
- stubCall.call();
- } else if (slot.cachedPropertyType() == PropertySlot::Custom) {
- needsStubLink = true;
- JITStubCall stubCall(this, cti_op_get_by_id_custom_stub);
- stubCall.addArgument(ImmPtr(protoObject));
- stubCall.addArgument(ImmPtr(FunctionPtr(slot.customGetter()).executableAddress()));
- stubCall.addArgument(ImmPtr(const_cast<Identifier*>(&ident)));
- stubCall.addArgument(ImmPtr(stubInfo->callReturnLocation.executableAddress()));
- stubCall.call();
- } else
- compileGetDirectOffset(protoObject, regT1, regT0, cachedOffset);
- Jump success = jump();
- LinkBuffer patchBuffer(this, m_codeBlock->executablePool(), 0);
-
- // Use the patch information to link the failure cases back to the original slow case routine.
- CodeLocationLabel slowCaseBegin = stubInfo->callReturnLocation.labelAtOffset(-patchOffsetGetByIdSlowCaseCall);
- patchBuffer.link(failureCases1, slowCaseBegin);
- patchBuffer.link(failureCases2, slowCaseBegin);
-
- // On success return back to the hot patch code, at a point it will perform the store to dest for us.
- patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult));
-
- if (needsStubLink) {
- for (Vector<CallRecord>::iterator iter = m_calls.begin(); iter != m_calls.end(); ++iter) {
- if (iter->to)
- patchBuffer.link(iter->from, FunctionPtr(iter->to));
- }
- }
- // Track the stub we have created so that it will be deleted later.
- CodeLocationLabel entryLabel = patchBuffer.finalizeCodeAddendum();
- stubInfo->stubRoutine = entryLabel;
-
- // Finally patch the jump to slow case back in the hot path to jump here instead.
- CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase);
- RepatchBuffer repatchBuffer(m_codeBlock);
- repatchBuffer.relink(jumpLocation, entryLabel);
-
- // We don't want to patch more than once - in future go to cti_op_put_by_id_generic.
- repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_get_by_id_proto_list));
-}
-
-void JIT::privateCompileGetByIdSelfList(StructureStubInfo* stubInfo, PolymorphicAccessStructureList* polymorphicStructures, int currentIndex, Structure* structure, const Identifier& ident, const PropertySlot& slot, size_t cachedOffset)
-{
- Jump failureCase = checkStructure(regT0, structure);
- bool needsStubLink = false;
- if (slot.cachedPropertyType() == PropertySlot::Getter) {
- needsStubLink = true;
- if (!structure->isUsingInlineStorage()) {
- move(regT0, regT1);
- compileGetDirectOffset(regT1, regT1, structure, cachedOffset);
- } else
- compileGetDirectOffset(regT0, regT1, structure, cachedOffset);
- JITStubCall stubCall(this, cti_op_get_by_id_getter_stub);
- stubCall.addArgument(regT1);
- stubCall.addArgument(regT0);
- stubCall.addArgument(ImmPtr(stubInfo->callReturnLocation.executableAddress()));
- stubCall.call();
- } else if (slot.cachedPropertyType() == PropertySlot::Custom) {
- needsStubLink = true;
- JITStubCall stubCall(this, cti_op_get_by_id_custom_stub);
- stubCall.addArgument(regT0);
- stubCall.addArgument(ImmPtr(FunctionPtr(slot.customGetter()).executableAddress()));
- stubCall.addArgument(ImmPtr(const_cast<Identifier*>(&ident)));
- stubCall.addArgument(ImmPtr(stubInfo->callReturnLocation.executableAddress()));
- stubCall.call();
- } else
- compileGetDirectOffset(regT0, regT0, structure, cachedOffset);
- Jump success = jump();
-
- LinkBuffer patchBuffer(this, m_codeBlock->executablePool(), 0);
-
- if (needsStubLink) {
- for (Vector<CallRecord>::iterator iter = m_calls.begin(); iter != m_calls.end(); ++iter) {
- if (iter->to)
- patchBuffer.link(iter->from, FunctionPtr(iter->to));
- }
- }
-
- // Use the patch information to link the failure cases back to the original slow case routine.
- CodeLocationLabel lastProtoBegin = polymorphicStructures->list[currentIndex - 1].stubRoutine;
- if (!lastProtoBegin)
- lastProtoBegin = stubInfo->callReturnLocation.labelAtOffset(-patchOffsetGetByIdSlowCaseCall);
-
- patchBuffer.link(failureCase, lastProtoBegin);
-
- // On success return back to the hot patch code, at a point it will perform the store to dest for us.
- patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult));
-
- CodeLocationLabel entryLabel = patchBuffer.finalizeCodeAddendum();
-
- structure->ref();
- polymorphicStructures->list[currentIndex].set(entryLabel, structure);
-
- // Finally patch the jump to slow case back in the hot path to jump here instead.
- CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase);
- RepatchBuffer repatchBuffer(m_codeBlock);
- repatchBuffer.relink(jumpLocation, entryLabel);
-}
-
-void JIT::privateCompileGetByIdProtoList(StructureStubInfo* stubInfo, PolymorphicAccessStructureList* prototypeStructures, int currentIndex, Structure* structure, Structure* prototypeStructure, const Identifier& ident, const PropertySlot& slot, size_t cachedOffset, CallFrame* callFrame)
-{
- // The prototype object definitely exists (if this stub exists the CodeBlock is referencing a Structure that is
- // referencing the prototype object - let's speculatively load it's table nice and early!)
- JSObject* protoObject = asObject(structure->prototypeForLookup(callFrame));
-
- // Check eax is an object of the right Structure.
- Jump failureCases1 = checkStructure(regT0, structure);
-
- // Check the prototype object's Structure had not changed.
- Structure** prototypeStructureAddress = &(protoObject->m_structure);
-#if CPU(X86_64)
- move(ImmPtr(prototypeStructure), regT3);
- Jump failureCases2 = branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), regT3);
-#else
- Jump failureCases2 = branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), ImmPtr(prototypeStructure));
-#endif
-
- // Checks out okay!
- bool needsStubLink = false;
- if (slot.cachedPropertyType() == PropertySlot::Getter) {
- needsStubLink = true;
- compileGetDirectOffset(protoObject, regT1, regT1, cachedOffset);
- JITStubCall stubCall(this, cti_op_get_by_id_getter_stub);
- stubCall.addArgument(regT1);
- stubCall.addArgument(regT0);
- stubCall.addArgument(ImmPtr(stubInfo->callReturnLocation.executableAddress()));
- stubCall.call();
- } else if (slot.cachedPropertyType() == PropertySlot::Custom) {
- needsStubLink = true;
- JITStubCall stubCall(this, cti_op_get_by_id_custom_stub);
- stubCall.addArgument(ImmPtr(protoObject));
- stubCall.addArgument(ImmPtr(FunctionPtr(slot.customGetter()).executableAddress()));
- stubCall.addArgument(ImmPtr(const_cast<Identifier*>(&ident)));
- stubCall.addArgument(ImmPtr(stubInfo->callReturnLocation.executableAddress()));
- stubCall.call();
- } else
- compileGetDirectOffset(protoObject, regT1, regT0, cachedOffset);
-
- Jump success = jump();
-
- LinkBuffer patchBuffer(this, m_codeBlock->executablePool(), 0);
-
- if (needsStubLink) {
- for (Vector<CallRecord>::iterator iter = m_calls.begin(); iter != m_calls.end(); ++iter) {
- if (iter->to)
- patchBuffer.link(iter->from, FunctionPtr(iter->to));
- }
- }
-
- // Use the patch information to link the failure cases back to the original slow case routine.
- CodeLocationLabel lastProtoBegin = prototypeStructures->list[currentIndex - 1].stubRoutine;
- patchBuffer.link(failureCases1, lastProtoBegin);
- patchBuffer.link(failureCases2, lastProtoBegin);
-
- // On success return back to the hot patch code, at a point it will perform the store to dest for us.
- patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult));
-
- CodeLocationLabel entryLabel = patchBuffer.finalizeCodeAddendum();
-
- structure->ref();
- prototypeStructure->ref();
- prototypeStructures->list[currentIndex].set(entryLabel, structure, prototypeStructure);
-
- // Finally patch the jump to slow case back in the hot path to jump here instead.
- CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase);
- RepatchBuffer repatchBuffer(m_codeBlock);
- repatchBuffer.relink(jumpLocation, entryLabel);
-}
-
-void JIT::privateCompileGetByIdChainList(StructureStubInfo* stubInfo, PolymorphicAccessStructureList* prototypeStructures, int currentIndex, Structure* structure, StructureChain* chain, size_t count, const Identifier& ident, const PropertySlot& slot, size_t cachedOffset, CallFrame* callFrame)
-{
- ASSERT(count);
- JumpList bucketsOfFail;
-
- // Check eax is an object of the right Structure.
- Jump baseObjectCheck = checkStructure(regT0, structure);
- bucketsOfFail.append(baseObjectCheck);
-
- Structure* currStructure = structure;
- RefPtr<Structure>* it = chain->head();
- JSObject* protoObject = 0;
- for (unsigned i = 0; i < count; ++i, ++it) {
- protoObject = asObject(currStructure->prototypeForLookup(callFrame));
- currStructure = it->get();
- testPrototype(protoObject, bucketsOfFail);
- }
- ASSERT(protoObject);
-
- bool needsStubLink = false;
- if (slot.cachedPropertyType() == PropertySlot::Getter) {
- needsStubLink = true;
- compileGetDirectOffset(protoObject, regT1, regT1, cachedOffset);
- JITStubCall stubCall(this, cti_op_get_by_id_getter_stub);
- stubCall.addArgument(regT1);
- stubCall.addArgument(regT0);
- stubCall.addArgument(ImmPtr(stubInfo->callReturnLocation.executableAddress()));
- stubCall.call();
- } else if (slot.cachedPropertyType() == PropertySlot::Custom) {
- needsStubLink = true;
- JITStubCall stubCall(this, cti_op_get_by_id_custom_stub);
- stubCall.addArgument(ImmPtr(protoObject));
- stubCall.addArgument(ImmPtr(FunctionPtr(slot.customGetter()).executableAddress()));
- stubCall.addArgument(ImmPtr(const_cast<Identifier*>(&ident)));
- stubCall.addArgument(ImmPtr(stubInfo->callReturnLocation.executableAddress()));
- stubCall.call();
- } else
- compileGetDirectOffset(protoObject, regT1, regT0, cachedOffset);
- Jump success = jump();
-
- LinkBuffer patchBuffer(this, m_codeBlock->executablePool(), 0);
-
- if (needsStubLink) {
- for (Vector<CallRecord>::iterator iter = m_calls.begin(); iter != m_calls.end(); ++iter) {
- if (iter->to)
- patchBuffer.link(iter->from, FunctionPtr(iter->to));
- }
- }
-
- // Use the patch information to link the failure cases back to the original slow case routine.
- CodeLocationLabel lastProtoBegin = prototypeStructures->list[currentIndex - 1].stubRoutine;
-
- patchBuffer.link(bucketsOfFail, lastProtoBegin);
-
- // On success return back to the hot patch code, at a point it will perform the store to dest for us.
- patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult));
-
- CodeLocationLabel entryLabel = patchBuffer.finalizeCodeAddendum();
-
- // Track the stub we have created so that it will be deleted later.
- structure->ref();
- chain->ref();
- prototypeStructures->list[currentIndex].set(entryLabel, structure, chain);
-
- // Finally patch the jump to slow case back in the hot path to jump here instead.
- CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase);
- RepatchBuffer repatchBuffer(m_codeBlock);
- repatchBuffer.relink(jumpLocation, entryLabel);
-}
-
-void JIT::privateCompileGetByIdChain(StructureStubInfo* stubInfo, Structure* structure, StructureChain* chain, size_t count, const Identifier& ident, const PropertySlot& slot, size_t cachedOffset, ReturnAddressPtr returnAddress, CallFrame* callFrame)
-{
- ASSERT(count);
-
- JumpList bucketsOfFail;
-
- // Check eax is an object of the right Structure.
- bucketsOfFail.append(checkStructure(regT0, structure));
-
- Structure* currStructure = structure;
- RefPtr<Structure>* it = chain->head();
- JSObject* protoObject = 0;
- for (unsigned i = 0; i < count; ++i, ++it) {
- protoObject = asObject(currStructure->prototypeForLookup(callFrame));
- currStructure = it->get();
- testPrototype(protoObject, bucketsOfFail);
- }
- ASSERT(protoObject);
-
- bool needsStubLink = false;
- if (slot.cachedPropertyType() == PropertySlot::Getter) {
- needsStubLink = true;
- compileGetDirectOffset(protoObject, regT1, regT1, cachedOffset);
- JITStubCall stubCall(this, cti_op_get_by_id_getter_stub);
- stubCall.addArgument(regT1);
- stubCall.addArgument(regT0);
- stubCall.addArgument(ImmPtr(stubInfo->callReturnLocation.executableAddress()));
- stubCall.call();
- } else if (slot.cachedPropertyType() == PropertySlot::Custom) {
- needsStubLink = true;
- JITStubCall stubCall(this, cti_op_get_by_id_custom_stub);
- stubCall.addArgument(ImmPtr(protoObject));
- stubCall.addArgument(ImmPtr(FunctionPtr(slot.customGetter()).executableAddress()));
- stubCall.addArgument(ImmPtr(const_cast<Identifier*>(&ident)));
- stubCall.addArgument(ImmPtr(stubInfo->callReturnLocation.executableAddress()));
- stubCall.call();
- } else
- compileGetDirectOffset(protoObject, regT1, regT0, cachedOffset);
- Jump success = jump();
-
- LinkBuffer patchBuffer(this, m_codeBlock->executablePool(), 0);
-
- if (needsStubLink) {
- for (Vector<CallRecord>::iterator iter = m_calls.begin(); iter != m_calls.end(); ++iter) {
- if (iter->to)
- patchBuffer.link(iter->from, FunctionPtr(iter->to));
- }
- }
-
- // Use the patch information to link the failure cases back to the original slow case routine.
- patchBuffer.link(bucketsOfFail, stubInfo->callReturnLocation.labelAtOffset(-patchOffsetGetByIdSlowCaseCall));
-
- // On success return back to the hot patch code, at a point it will perform the store to dest for us.
- patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult));
-
- // Track the stub we have created so that it will be deleted later.
- CodeLocationLabel entryLabel = patchBuffer.finalizeCodeAddendum();
- stubInfo->stubRoutine = entryLabel;
-
- // Finally patch the jump to slow case back in the hot path to jump here instead.
- CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase);
- RepatchBuffer repatchBuffer(m_codeBlock);
- repatchBuffer.relink(jumpLocation, entryLabel);
-
- // We don't want to patch more than once - in future go to cti_op_put_by_id_generic.
- repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_get_by_id_proto_list));
-}
-
-/* ------------------------------ END: !ENABLE / ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS) ------------------------------ */
-
-#endif // !ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
-
-} // namespace JSC
-
-#endif // USE(JSVALUE64)
-#endif // ENABLE(JIT)
diff --git a/JavaScriptCore/jit/JITPropertyAccess32_64.cpp b/JavaScriptCore/jit/JITPropertyAccess32_64.cpp
deleted file mode 100644
index 710a155..0000000
--- a/JavaScriptCore/jit/JITPropertyAccess32_64.cpp
+++ /dev/null
@@ -1,1186 +0,0 @@
-/*
- * Copyright (C) 2008, 2009 Apple Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
- * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "config.h"
-
-#if ENABLE(JIT)
-#if USE(JSVALUE32_64)
-#include "JIT.h"
-
-#include "CodeBlock.h"
-#include "JITInlineMethods.h"
-#include "JITStubCall.h"
-#include "JSArray.h"
-#include "JSFunction.h"
-#include "JSPropertyNameIterator.h"
-#include "Interpreter.h"
-#include "LinkBuffer.h"
-#include "RepatchBuffer.h"
-#include "ResultType.h"
-#include "SamplingTool.h"
-
-#ifndef NDEBUG
-#include <stdio.h>
-#endif
-
-using namespace std;
-
-namespace JSC {
-
-void JIT::emit_op_put_by_index(Instruction* currentInstruction)
-{
- unsigned base = currentInstruction[1].u.operand;
- unsigned property = currentInstruction[2].u.operand;
- unsigned value = currentInstruction[3].u.operand;
-
- JITStubCall stubCall(this, cti_op_put_by_index);
- stubCall.addArgument(base);
- stubCall.addArgument(Imm32(property));
- stubCall.addArgument(value);
- stubCall.call();
-}
-
-void JIT::emit_op_put_getter(Instruction* currentInstruction)
-{
- unsigned base = currentInstruction[1].u.operand;
- unsigned property = currentInstruction[2].u.operand;
- unsigned function = currentInstruction[3].u.operand;
-
- JITStubCall stubCall(this, cti_op_put_getter);
- stubCall.addArgument(base);
- stubCall.addArgument(ImmPtr(&m_codeBlock->identifier(property)));
- stubCall.addArgument(function);
- stubCall.call();
-}
-
-void JIT::emit_op_put_setter(Instruction* currentInstruction)
-{
- unsigned base = currentInstruction[1].u.operand;
- unsigned property = currentInstruction[2].u.operand;
- unsigned function = currentInstruction[3].u.operand;
-
- JITStubCall stubCall(this, cti_op_put_setter);
- stubCall.addArgument(base);
- stubCall.addArgument(ImmPtr(&m_codeBlock->identifier(property)));
- stubCall.addArgument(function);
- stubCall.call();
-}
-
-void JIT::emit_op_del_by_id(Instruction* currentInstruction)
-{
- unsigned dst = currentInstruction[1].u.operand;
- unsigned base = currentInstruction[2].u.operand;
- unsigned property = currentInstruction[3].u.operand;
-
- JITStubCall stubCall(this, cti_op_del_by_id);
- stubCall.addArgument(base);
- stubCall.addArgument(ImmPtr(&m_codeBlock->identifier(property)));
- stubCall.call(dst);
-}
-
-
-#if !ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
-
-/* ------------------------------ BEGIN: !ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS) ------------------------------ */
-
-// Treat these as nops - the call will be handed as a regular get_by_id/op_call pair.
-void JIT::emit_op_method_check(Instruction*) {}
-void JIT::emitSlow_op_method_check(Instruction*, Vector<SlowCaseEntry>::iterator&) { ASSERT_NOT_REACHED(); }
-#if ENABLE(JIT_OPTIMIZE_METHOD_CALLS)
-#error "JIT_OPTIMIZE_METHOD_CALLS requires JIT_OPTIMIZE_PROPERTY_ACCESS"
-#endif
-
-void JIT::emit_op_get_by_val(Instruction* currentInstruction)
-{
- unsigned dst = currentInstruction[1].u.operand;
- unsigned base = currentInstruction[2].u.operand;
- unsigned property = currentInstruction[3].u.operand;
-
- JITStubCall stubCall(this, cti_op_get_by_val);
- stubCall.addArgument(base);
- stubCall.addArgument(property);
- stubCall.call(dst);
-}
-
-void JIT::emitSlow_op_get_by_val(Instruction*, Vector<SlowCaseEntry>::iterator&)
-{
- ASSERT_NOT_REACHED();
-}
-
-void JIT::emit_op_put_by_val(Instruction* currentInstruction)
-{
- unsigned base = currentInstruction[1].u.operand;
- unsigned property = currentInstruction[2].u.operand;
- unsigned value = currentInstruction[3].u.operand;
-
- JITStubCall stubCall(this, cti_op_put_by_val);
- stubCall.addArgument(base);
- stubCall.addArgument(property);
- stubCall.addArgument(value);
- stubCall.call();
-}
-
-void JIT::emitSlow_op_put_by_val(Instruction*, Vector<SlowCaseEntry>::iterator&)
-{
- ASSERT_NOT_REACHED();
-}
-
-void JIT::emit_op_get_by_id(Instruction* currentInstruction)
-{
- int dst = currentInstruction[1].u.operand;
- int base = currentInstruction[2].u.operand;
- int ident = currentInstruction[3].u.operand;
-
- JITStubCall stubCall(this, cti_op_get_by_id_generic);
- stubCall.addArgument(base);
- stubCall.addArgument(ImmPtr(&(m_codeBlock->identifier(ident))));
- stubCall.call(dst);
-
- m_propertyAccessInstructionIndex++;
-}
-
-void JIT::emitSlow_op_get_by_id(Instruction*, Vector<SlowCaseEntry>::iterator&)
-{
- m_propertyAccessInstructionIndex++;
- ASSERT_NOT_REACHED();
-}
-
-void JIT::emit_op_put_by_id(Instruction* currentInstruction)
-{
- int base = currentInstruction[1].u.operand;
- int ident = currentInstruction[2].u.operand;
- int value = currentInstruction[3].u.operand;
-
- JITStubCall stubCall(this, cti_op_put_by_id_generic);
- stubCall.addArgument(base);
- stubCall.addArgument(ImmPtr(&(m_codeBlock->identifier(ident))));
- stubCall.addArgument(value);
- stubCall.call();
-
- m_propertyAccessInstructionIndex++;
-}
-
-void JIT::emitSlow_op_put_by_id(Instruction*, Vector<SlowCaseEntry>::iterator&)
-{
- m_propertyAccessInstructionIndex++;
- ASSERT_NOT_REACHED();
-}
-
-#else // !ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
-
-/* ------------------------------ BEGIN: ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS) ------------------------------ */
-
-#if ENABLE(JIT_OPTIMIZE_METHOD_CALLS)
-
-void JIT::emit_op_method_check(Instruction* currentInstruction)
-{
- // Assert that the following instruction is a get_by_id.
- ASSERT(m_interpreter->getOpcodeID((currentInstruction + OPCODE_LENGTH(op_method_check))->u.opcode) == op_get_by_id);
-
- currentInstruction += OPCODE_LENGTH(op_method_check);
-
- // Do the method check - check the object & its prototype's structure inline (this is the common case).
- m_methodCallCompilationInfo.append(MethodCallCompilationInfo(m_propertyAccessInstructionIndex));
- MethodCallCompilationInfo& info = m_methodCallCompilationInfo.last();
-
- int dst = currentInstruction[1].u.operand;
- int base = currentInstruction[2].u.operand;
-
- emitLoad(base, regT1, regT0);
- emitJumpSlowCaseIfNotJSCell(base, regT1);
-
- BEGIN_UNINTERRUPTED_SEQUENCE(sequenceMethodCheck);
-
- Jump structureCheck = branchPtrWithPatch(NotEqual, Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), info.structureToCompare, ImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure)));
- DataLabelPtr protoStructureToCompare, protoObj = moveWithPatch(ImmPtr(0), regT2);
- Jump protoStructureCheck = branchPtrWithPatch(NotEqual, Address(regT2, OBJECT_OFFSETOF(JSCell, m_structure)), protoStructureToCompare, ImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure)));
-
- // This will be relinked to load the function without doing a load.
- DataLabelPtr putFunction = moveWithPatch(ImmPtr(0), regT0);
-
- END_UNINTERRUPTED_SEQUENCE(sequenceMethodCheck);
-
- move(Imm32(JSValue::CellTag), regT1);
- Jump match = jump();
-
- ASSERT(differenceBetween(info.structureToCompare, protoObj) == patchOffsetMethodCheckProtoObj);
- ASSERT(differenceBetween(info.structureToCompare, protoStructureToCompare) == patchOffsetMethodCheckProtoStruct);
- ASSERT(differenceBetween(info.structureToCompare, putFunction) == patchOffsetMethodCheckPutFunction);
-
- // Link the failure cases here.
- structureCheck.link(this);
- protoStructureCheck.link(this);
-
- // Do a regular(ish) get_by_id (the slow case will be link to
- // cti_op_get_by_id_method_check instead of cti_op_get_by_id.
- compileGetByIdHotPath();
-
- match.link(this);
- emitStore(dst, regT1, regT0);
- map(m_bytecodeOffset + OPCODE_LENGTH(op_method_check), dst, regT1, regT0);
-
- // We've already generated the following get_by_id, so make sure it's skipped over.
- m_bytecodeOffset += OPCODE_LENGTH(op_get_by_id);
-}
-
-void JIT::emitSlow_op_method_check(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- currentInstruction += OPCODE_LENGTH(op_method_check);
-
- int dst = currentInstruction[1].u.operand;
- int base = currentInstruction[2].u.operand;
- int ident = currentInstruction[3].u.operand;
-
- compileGetByIdSlowCase(dst, base, &(m_codeBlock->identifier(ident)), iter, true);
-
- // We've already generated the following get_by_id, so make sure it's skipped over.
- m_bytecodeOffset += OPCODE_LENGTH(op_get_by_id);
-}
-
-#else //!ENABLE(JIT_OPTIMIZE_METHOD_CALLS)
-
-// Treat these as nops - the call will be handed as a regular get_by_id/op_call pair.
-void JIT::emit_op_method_check(Instruction*) {}
-void JIT::emitSlow_op_method_check(Instruction*, Vector<SlowCaseEntry>::iterator&) { ASSERT_NOT_REACHED(); }
-
-#endif
-
-JIT::CodePtr JIT::stringGetByValStubGenerator(JSGlobalData* globalData, ExecutablePool* pool)
-{
- JSInterfaceJIT jit;
- JumpList failures;
- failures.append(jit.branchPtr(NotEqual, Address(regT0), ImmPtr(globalData->jsStringVPtr)));
- failures.append(jit.branchTest32(NonZero, Address(regT0, OBJECT_OFFSETOF(JSString, m_fiberCount))));
-
- // Load string length to regT1, and start the process of loading the data pointer into regT0
- jit.load32(Address(regT0, ThunkHelpers::jsStringLengthOffset()), regT1);
- jit.loadPtr(Address(regT0, ThunkHelpers::jsStringValueOffset()), regT0);
- jit.loadPtr(Address(regT0, ThunkHelpers::stringImplDataOffset()), regT0);
-
- // Do an unsigned compare to simultaneously filter negative indices as well as indices that are too large
- failures.append(jit.branch32(AboveOrEqual, regT2, regT1));
-
- // Load the character
- jit.load16(BaseIndex(regT0, regT2, TimesTwo, 0), regT0);
-
- failures.append(jit.branch32(AboveOrEqual, regT0, Imm32(0x100)));
- jit.move(ImmPtr(globalData->smallStrings.singleCharacterStrings()), regT1);
- jit.loadPtr(BaseIndex(regT1, regT0, ScalePtr, 0), regT0);
- jit.move(Imm32(JSValue::CellTag), regT1); // We null check regT0 on return so this is safe
- jit.ret();
-
- failures.link(&jit);
- jit.move(Imm32(0), regT0);
- jit.ret();
-
- LinkBuffer patchBuffer(&jit, pool, 0);
- return patchBuffer.finalizeCode().m_code;
-}
-
-void JIT::emit_op_get_by_val(Instruction* currentInstruction)
-{
- unsigned dst = currentInstruction[1].u.operand;
- unsigned base = currentInstruction[2].u.operand;
- unsigned property = currentInstruction[3].u.operand;
-
- emitLoad2(base, regT1, regT0, property, regT3, regT2);
-
- addSlowCase(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag)));
- emitJumpSlowCaseIfNotJSCell(base, regT1);
- addSlowCase(branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsArrayVPtr)));
-
- loadPtr(Address(regT0, OBJECT_OFFSETOF(JSArray, m_storage)), regT3);
- addSlowCase(branch32(AboveOrEqual, regT2, Address(regT0, OBJECT_OFFSETOF(JSArray, m_vectorLength))));
-
- load32(BaseIndex(regT3, regT2, TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), regT1); // tag
- load32(BaseIndex(regT3, regT2, TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + OBJECT_OFFSETOF(JSValue, u.asBits.payload)), regT0); // payload
- addSlowCase(branch32(Equal, regT1, Imm32(JSValue::EmptyValueTag)));
-
- emitStore(dst, regT1, regT0);
- map(m_bytecodeOffset + OPCODE_LENGTH(op_get_by_val), dst, regT1, regT0);
-}
-
-void JIT::emitSlow_op_get_by_val(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- unsigned dst = currentInstruction[1].u.operand;
- unsigned base = currentInstruction[2].u.operand;
- unsigned property = currentInstruction[3].u.operand;
-
- linkSlowCase(iter); // property int32 check
- linkSlowCaseIfNotJSCell(iter, base); // base cell check
-
- Jump nonCell = jump();
- linkSlowCase(iter); // base array check
- Jump notString = branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsStringVPtr));
- emitNakedCall(m_globalData->getCTIStub(stringGetByValStubGenerator));
- Jump failed = branchTestPtr(Zero, regT0);
- emitStore(dst, regT1, regT0);
- emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_get_by_val));
- failed.link(this);
- notString.link(this);
- nonCell.link(this);
-
- linkSlowCase(iter); // vector length check
- linkSlowCase(iter); // empty value
-
- JITStubCall stubCall(this, cti_op_get_by_val);
- stubCall.addArgument(base);
- stubCall.addArgument(property);
- stubCall.call(dst);
-}
-
-void JIT::emit_op_put_by_val(Instruction* currentInstruction)
-{
- unsigned base = currentInstruction[1].u.operand;
- unsigned property = currentInstruction[2].u.operand;
- unsigned value = currentInstruction[3].u.operand;
-
- emitLoad2(base, regT1, regT0, property, regT3, regT2);
-
- addSlowCase(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag)));
- emitJumpSlowCaseIfNotJSCell(base, regT1);
- addSlowCase(branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsArrayVPtr)));
- addSlowCase(branch32(AboveOrEqual, regT2, Address(regT0, OBJECT_OFFSETOF(JSArray, m_vectorLength))));
-
- loadPtr(Address(regT0, OBJECT_OFFSETOF(JSArray, m_storage)), regT3);
-
- Jump empty = branch32(Equal, BaseIndex(regT3, regT2, TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), Imm32(JSValue::EmptyValueTag));
-
- Label storeResult(this);
- emitLoad(value, regT1, regT0);
- store32(regT0, BaseIndex(regT3, regT2, TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + OBJECT_OFFSETOF(JSValue, u.asBits.payload))); // payload
- store32(regT1, BaseIndex(regT3, regT2, TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + OBJECT_OFFSETOF(JSValue, u.asBits.tag))); // tag
- Jump end = jump();
-
- empty.link(this);
- add32(Imm32(1), Address(regT3, OBJECT_OFFSETOF(ArrayStorage, m_numValuesInVector)));
- branch32(Below, regT2, Address(regT3, OBJECT_OFFSETOF(ArrayStorage, m_length))).linkTo(storeResult, this);
-
- add32(Imm32(1), regT2, regT0);
- store32(regT0, Address(regT3, OBJECT_OFFSETOF(ArrayStorage, m_length)));
- jump().linkTo(storeResult, this);
-
- end.link(this);
-}
-
-void JIT::emitSlow_op_put_by_val(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- unsigned base = currentInstruction[1].u.operand;
- unsigned property = currentInstruction[2].u.operand;
- unsigned value = currentInstruction[3].u.operand;
-
- linkSlowCase(iter); // property int32 check
- linkSlowCaseIfNotJSCell(iter, base); // base cell check
- linkSlowCase(iter); // base not array check
- linkSlowCase(iter); // in vector check
-
- JITStubCall stubPutByValCall(this, cti_op_put_by_val);
- stubPutByValCall.addArgument(base);
- stubPutByValCall.addArgument(property);
- stubPutByValCall.addArgument(value);
- stubPutByValCall.call();
-}
-
-void JIT::emit_op_get_by_id(Instruction* currentInstruction)
-{
- int dst = currentInstruction[1].u.operand;
- int base = currentInstruction[2].u.operand;
-
- emitLoad(base, regT1, regT0);
- emitJumpSlowCaseIfNotJSCell(base, regT1);
- compileGetByIdHotPath();
- emitStore(dst, regT1, regT0);
- map(m_bytecodeOffset + OPCODE_LENGTH(op_get_by_id), dst, regT1, regT0);
-}
-
-void JIT::compileGetByIdHotPath()
-{
- // As for put_by_id, get_by_id requires the offset of the Structure and the offset of the access to be patched.
- // Additionally, for get_by_id we need patch the offset of the branch to the slow case (we patch this to jump
- // to array-length / prototype access tranpolines, and finally we also the the property-map access offset as a label
- // to jump back to if one of these trampolies finds a match.
-
- BEGIN_UNINTERRUPTED_SEQUENCE(sequenceGetByIdHotPath);
-
- Label hotPathBegin(this);
- m_propertyAccessCompilationInfo[m_propertyAccessInstructionIndex].hotPathBegin = hotPathBegin;
- m_propertyAccessInstructionIndex++;
-
- DataLabelPtr structureToCompare;
- Jump structureCheck = branchPtrWithPatch(NotEqual, Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), structureToCompare, ImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure)));
- addSlowCase(structureCheck);
- ASSERT(differenceBetween(hotPathBegin, structureToCompare) == patchOffsetGetByIdStructure);
- ASSERT(differenceBetween(hotPathBegin, structureCheck) == patchOffsetGetByIdBranchToSlowCase);
-
- Label externalLoad = loadPtrWithPatchToLEA(Address(regT0, OBJECT_OFFSETOF(JSObject, m_externalStorage)), regT2);
- Label externalLoadComplete(this);
- ASSERT(differenceBetween(hotPathBegin, externalLoad) == patchOffsetGetByIdExternalLoad);
- ASSERT(differenceBetween(externalLoad, externalLoadComplete) == patchLengthGetByIdExternalLoad);
-
- DataLabel32 displacementLabel1 = loadPtrWithAddressOffsetPatch(Address(regT2, patchGetByIdDefaultOffset), regT0); // payload
- ASSERT(differenceBetween(hotPathBegin, displacementLabel1) == patchOffsetGetByIdPropertyMapOffset1);
- DataLabel32 displacementLabel2 = loadPtrWithAddressOffsetPatch(Address(regT2, patchGetByIdDefaultOffset), regT1); // tag
- ASSERT(differenceBetween(hotPathBegin, displacementLabel2) == patchOffsetGetByIdPropertyMapOffset2);
-
- Label putResult(this);
- ASSERT(differenceBetween(hotPathBegin, putResult) == patchOffsetGetByIdPutResult);
-
- END_UNINTERRUPTED_SEQUENCE(sequenceGetByIdHotPath);
-}
-
-void JIT::emitSlow_op_get_by_id(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- int dst = currentInstruction[1].u.operand;
- int base = currentInstruction[2].u.operand;
- int ident = currentInstruction[3].u.operand;
-
- compileGetByIdSlowCase(dst, base, &(m_codeBlock->identifier(ident)), iter);
-}
-
-void JIT::compileGetByIdSlowCase(int dst, int base, Identifier* ident, Vector<SlowCaseEntry>::iterator& iter, bool isMethodCheck)
-{
- // As for the hot path of get_by_id, above, we ensure that we can use an architecture specific offset
- // so that we only need track one pointer into the slow case code - we track a pointer to the location
- // of the call (which we can use to look up the patch information), but should a array-length or
- // prototype access trampoline fail we want to bail out back to here. To do so we can subtract back
- // the distance from the call to the head of the slow case.
- linkSlowCaseIfNotJSCell(iter, base);
- linkSlowCase(iter);
-
- BEGIN_UNINTERRUPTED_SEQUENCE(sequenceGetByIdSlowCase);
-
-#ifndef NDEBUG
- Label coldPathBegin(this);
-#endif
- JITStubCall stubCall(this, isMethodCheck ? cti_op_get_by_id_method_check : cti_op_get_by_id);
- stubCall.addArgument(regT1, regT0);
- stubCall.addArgument(ImmPtr(ident));
- Call call = stubCall.call(dst);
-
- END_UNINTERRUPTED_SEQUENCE(sequenceGetByIdSlowCase);
-
- ASSERT(differenceBetween(coldPathBegin, call) == patchOffsetGetByIdSlowCaseCall);
-
- // Track the location of the call; this will be used to recover patch information.
- m_propertyAccessCompilationInfo[m_propertyAccessInstructionIndex].callReturnLocation = call;
- m_propertyAccessInstructionIndex++;
-}
-
-void JIT::emit_op_put_by_id(Instruction* currentInstruction)
-{
- // In order to be able to patch both the Structure, and the object offset, we store one pointer,
- // to just after the arguments have been loaded into registers 'hotPathBegin', and we generate code
- // such that the Structure & offset are always at the same distance from this.
-
- int base = currentInstruction[1].u.operand;
- int value = currentInstruction[3].u.operand;
-
- emitLoad2(base, regT1, regT0, value, regT3, regT2);
-
- emitJumpSlowCaseIfNotJSCell(base, regT1);
-
- BEGIN_UNINTERRUPTED_SEQUENCE(sequencePutById);
-
- Label hotPathBegin(this);
- m_propertyAccessCompilationInfo[m_propertyAccessInstructionIndex].hotPathBegin = hotPathBegin;
- m_propertyAccessInstructionIndex++;
-
- // It is important that the following instruction plants a 32bit immediate, in order that it can be patched over.
- DataLabelPtr structureToCompare;
- addSlowCase(branchPtrWithPatch(NotEqual, Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), structureToCompare, ImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure))));
- ASSERT(differenceBetween(hotPathBegin, structureToCompare) == patchOffsetPutByIdStructure);
-
- // Plant a load from a bogus ofset in the object's property map; we will patch this later, if it is to be used.
- Label externalLoad = loadPtrWithPatchToLEA(Address(regT0, OBJECT_OFFSETOF(JSObject, m_externalStorage)), regT0);
- Label externalLoadComplete(this);
- ASSERT(differenceBetween(hotPathBegin, externalLoad) == patchOffsetPutByIdExternalLoad);
- ASSERT(differenceBetween(externalLoad, externalLoadComplete) == patchLengthPutByIdExternalLoad);
-
- DataLabel32 displacementLabel1 = storePtrWithAddressOffsetPatch(regT2, Address(regT0, patchGetByIdDefaultOffset)); // payload
- DataLabel32 displacementLabel2 = storePtrWithAddressOffsetPatch(regT3, Address(regT0, patchGetByIdDefaultOffset)); // tag
-
- END_UNINTERRUPTED_SEQUENCE(sequencePutById);
-
- ASSERT(differenceBetween(hotPathBegin, displacementLabel1) == patchOffsetPutByIdPropertyMapOffset1);
- ASSERT(differenceBetween(hotPathBegin, displacementLabel2) == patchOffsetPutByIdPropertyMapOffset2);
-}
-
-void JIT::emitSlow_op_put_by_id(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- int base = currentInstruction[1].u.operand;
- int ident = currentInstruction[2].u.operand;
- int direct = currentInstruction[8].u.operand;
-
- linkSlowCaseIfNotJSCell(iter, base);
- linkSlowCase(iter);
-
- JITStubCall stubCall(this, direct ? cti_op_put_by_id_direct : cti_op_put_by_id);
- stubCall.addArgument(regT1, regT0);
- stubCall.addArgument(ImmPtr(&(m_codeBlock->identifier(ident))));
- stubCall.addArgument(regT3, regT2);
- Call call = stubCall.call();
-
- // Track the location of the call; this will be used to recover patch information.
- m_propertyAccessCompilationInfo[m_propertyAccessInstructionIndex].callReturnLocation = call;
- m_propertyAccessInstructionIndex++;
-}
-
-// Compile a store into an object's property storage. May overwrite base.
-void JIT::compilePutDirectOffset(RegisterID base, RegisterID valueTag, RegisterID valuePayload, Structure* structure, size_t cachedOffset)
-{
- int offset = cachedOffset;
- if (structure->isUsingInlineStorage())
- offset += OBJECT_OFFSETOF(JSObject, m_inlineStorage) / sizeof(Register);
- else
- loadPtr(Address(base, OBJECT_OFFSETOF(JSObject, m_externalStorage)), base);
- emitStore(offset, valueTag, valuePayload, base);
-}
-
-// Compile a load from an object's property storage. May overwrite base.
-void JIT::compileGetDirectOffset(RegisterID base, RegisterID resultTag, RegisterID resultPayload, Structure* structure, size_t cachedOffset)
-{
- int offset = cachedOffset;
- if (structure->isUsingInlineStorage())
- offset += OBJECT_OFFSETOF(JSObject, m_inlineStorage) / sizeof(Register);
- else
- loadPtr(Address(base, OBJECT_OFFSETOF(JSObject, m_externalStorage)), base);
- emitLoad(offset, resultTag, resultPayload, base);
-}
-
-void JIT::compileGetDirectOffset(JSObject* base, RegisterID temp, RegisterID resultTag, RegisterID resultPayload, size_t cachedOffset)
-{
- if (base->isUsingInlineStorage()) {
- load32(reinterpret_cast<char*>(&base->m_inlineStorage[cachedOffset]) + OBJECT_OFFSETOF(JSValue, u.asBits.payload), resultPayload);
- load32(reinterpret_cast<char*>(&base->m_inlineStorage[cachedOffset]) + OBJECT_OFFSETOF(JSValue, u.asBits.tag), resultTag);
- return;
- }
-
- size_t offset = cachedOffset * sizeof(JSValue);
-
- PropertyStorage* protoPropertyStorage = &base->m_externalStorage;
- loadPtr(static_cast<void*>(protoPropertyStorage), temp);
- load32(Address(temp, offset + OBJECT_OFFSETOF(JSValue, u.asBits.payload)), resultPayload);
- load32(Address(temp, offset + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), resultTag);
-}
-
-void JIT::testPrototype(JSValue prototype, JumpList& failureCases)
-{
- if (prototype.isNull())
- return;
-
- // We have a special case for X86_64 here because X86 instructions that take immediate values
- // only take 32 bit immediate values, wheras the pointer constants we are using here are 64 bit
- // values. In the non X86_64 case, the generated code is slightly more efficient because it uses
- // two less instructions and doesn't require any scratch registers.
-#if CPU(X86_64)
- move(ImmPtr(prototype.asCell()->structure()), regT3);
- failureCases.append(branchPtr(NotEqual, AbsoluteAddress(&prototype.asCell()->m_structure), regT3));
-#else
- failureCases.append(branchPtr(NotEqual, AbsoluteAddress(&prototype.asCell()->m_structure), ImmPtr(prototype.asCell()->structure())));
-#endif
-}
-
-void JIT::privateCompilePutByIdTransition(StructureStubInfo* stubInfo, Structure* oldStructure, Structure* newStructure, size_t cachedOffset, StructureChain* chain, ReturnAddressPtr returnAddress, bool direct)
-{
- // It is assumed that regT0 contains the basePayload and regT1 contains the baseTag. The value can be found on the stack.
-
- JumpList failureCases;
- failureCases.append(branch32(NotEqual, regT1, Imm32(JSValue::CellTag)));
- failureCases.append(branchPtr(NotEqual, Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), ImmPtr(oldStructure)));
- testPrototype(oldStructure->storedPrototype(), failureCases);
-
- if (!direct) {
- // Verify that nothing in the prototype chain has a setter for this property.
- for (RefPtr<Structure>* it = chain->head(); *it; ++it)
- testPrototype((*it)->storedPrototype(), failureCases);
- }
-
- // Reallocate property storage if needed.
- Call callTarget;
- bool willNeedStorageRealloc = oldStructure->propertyStorageCapacity() != newStructure->propertyStorageCapacity();
- if (willNeedStorageRealloc) {
- // This trampoline was called to like a JIT stub; before we can can call again we need to
- // remove the return address from the stack, to prevent the stack from becoming misaligned.
- preserveReturnAddressAfterCall(regT3);
-
- JITStubCall stubCall(this, cti_op_put_by_id_transition_realloc);
- stubCall.skipArgument(); // base
- stubCall.skipArgument(); // ident
- stubCall.skipArgument(); // value
- stubCall.addArgument(Imm32(oldStructure->propertyStorageCapacity()));
- stubCall.addArgument(Imm32(newStructure->propertyStorageCapacity()));
- stubCall.call(regT0);
-
- restoreReturnAddressBeforeReturn(regT3);
- }
-
- sub32(Imm32(1), AbsoluteAddress(oldStructure->addressOfCount()));
- add32(Imm32(1), AbsoluteAddress(newStructure->addressOfCount()));
- storePtr(ImmPtr(newStructure), Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)));
-
-#if CPU(MIPS)
- // For MIPS, we don't add sizeof(void*) to the stack offset.
- load32(Address(stackPointerRegister, OBJECT_OFFSETOF(JITStackFrame, args[2]) + OBJECT_OFFSETOF(JSValue, u.asBits.payload)), regT3);
- load32(Address(stackPointerRegister, OBJECT_OFFSETOF(JITStackFrame, args[2]) + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), regT2);
-#else
- load32(Address(stackPointerRegister, OBJECT_OFFSETOF(JITStackFrame, args[2]) + sizeof(void*) + OBJECT_OFFSETOF(JSValue, u.asBits.payload)), regT3);
- load32(Address(stackPointerRegister, OBJECT_OFFSETOF(JITStackFrame, args[2]) + sizeof(void*) + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), regT2);
-#endif
-
- // Write the value
- compilePutDirectOffset(regT0, regT2, regT3, newStructure, cachedOffset);
-
- ret();
-
- ASSERT(!failureCases.empty());
- failureCases.link(this);
- restoreArgumentReferenceForTrampoline();
- Call failureCall = tailRecursiveCall();
-
- LinkBuffer patchBuffer(this, m_codeBlock->executablePool(), 0);
-
- patchBuffer.link(failureCall, FunctionPtr(direct ? cti_op_put_by_id_direct_fail : cti_op_put_by_id_fail));
-
- if (willNeedStorageRealloc) {
- ASSERT(m_calls.size() == 1);
- patchBuffer.link(m_calls[0].from, FunctionPtr(cti_op_put_by_id_transition_realloc));
- }
-
- CodeLocationLabel entryLabel = patchBuffer.finalizeCodeAddendum();
- stubInfo->stubRoutine = entryLabel;
- RepatchBuffer repatchBuffer(m_codeBlock);
- repatchBuffer.relinkCallerToTrampoline(returnAddress, entryLabel);
-}
-
-void JIT::patchGetByIdSelf(CodeBlock* codeBlock, StructureStubInfo* stubInfo, Structure* structure, size_t cachedOffset, ReturnAddressPtr returnAddress)
-{
- RepatchBuffer repatchBuffer(codeBlock);
-
- // We don't want to patch more than once - in future go to cti_op_get_by_id_generic.
- // Should probably go to JITStubs::cti_op_get_by_id_fail, but that doesn't do anything interesting right now.
- repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_get_by_id_self_fail));
-
- int offset = sizeof(JSValue) * cachedOffset;
-
- // If we're patching to use inline storage, convert the initial load to a lea; this avoids the extra load
- // and makes the subsequent load's offset automatically correct
- if (structure->isUsingInlineStorage())
- repatchBuffer.repatchLoadPtrToLEA(stubInfo->hotPathBegin.instructionAtOffset(patchOffsetGetByIdExternalLoad));
-
- // Patch the offset into the propoerty map to load from, then patch the Structure to look for.
- repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabelPtrAtOffset(patchOffsetGetByIdStructure), structure);
- repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabel32AtOffset(patchOffsetGetByIdPropertyMapOffset1), offset + OBJECT_OFFSETOF(JSValue, u.asBits.payload)); // payload
- repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabel32AtOffset(patchOffsetGetByIdPropertyMapOffset2), offset + OBJECT_OFFSETOF(JSValue, u.asBits.tag)); // tag
-}
-
-void JIT::patchMethodCallProto(CodeBlock* codeBlock, MethodCallLinkInfo& methodCallLinkInfo, JSFunction* callee, Structure* structure, JSObject* proto, ReturnAddressPtr returnAddress)
-{
- RepatchBuffer repatchBuffer(codeBlock);
-
- ASSERT(!methodCallLinkInfo.cachedStructure);
- methodCallLinkInfo.cachedStructure = structure;
- structure->ref();
-
- Structure* prototypeStructure = proto->structure();
- methodCallLinkInfo.cachedPrototypeStructure = prototypeStructure;
- prototypeStructure->ref();
-
- repatchBuffer.repatch(methodCallLinkInfo.structureLabel, structure);
- repatchBuffer.repatch(methodCallLinkInfo.structureLabel.dataLabelPtrAtOffset(patchOffsetMethodCheckProtoObj), proto);
- repatchBuffer.repatch(methodCallLinkInfo.structureLabel.dataLabelPtrAtOffset(patchOffsetMethodCheckProtoStruct), prototypeStructure);
- repatchBuffer.repatch(methodCallLinkInfo.structureLabel.dataLabelPtrAtOffset(patchOffsetMethodCheckPutFunction), callee);
-
- repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_get_by_id));
-}
-
-void JIT::patchPutByIdReplace(CodeBlock* codeBlock, StructureStubInfo* stubInfo, Structure* structure, size_t cachedOffset, ReturnAddressPtr returnAddress, bool direct)
-{
- RepatchBuffer repatchBuffer(codeBlock);
-
- // We don't want to patch more than once - in future go to cti_op_put_by_id_generic.
- // Should probably go to cti_op_put_by_id_fail, but that doesn't do anything interesting right now.
- repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(direct ? cti_op_put_by_id_direct_generic : cti_op_put_by_id_generic));
-
- int offset = sizeof(JSValue) * cachedOffset;
-
- // If we're patching to use inline storage, convert the initial load to a lea; this avoids the extra load
- // and makes the subsequent load's offset automatically correct
- if (structure->isUsingInlineStorage())
- repatchBuffer.repatchLoadPtrToLEA(stubInfo->hotPathBegin.instructionAtOffset(patchOffsetPutByIdExternalLoad));
-
- // Patch the offset into the propoerty map to load from, then patch the Structure to look for.
- repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabelPtrAtOffset(patchOffsetPutByIdStructure), structure);
- repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabel32AtOffset(patchOffsetPutByIdPropertyMapOffset1), offset + OBJECT_OFFSETOF(JSValue, u.asBits.payload)); // payload
- repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabel32AtOffset(patchOffsetPutByIdPropertyMapOffset2), offset + OBJECT_OFFSETOF(JSValue, u.asBits.tag)); // tag
-}
-
-void JIT::privateCompilePatchGetArrayLength(ReturnAddressPtr returnAddress)
-{
- StructureStubInfo* stubInfo = &m_codeBlock->getStubInfo(returnAddress);
-
- // regT0 holds a JSCell*
-
- // Check for array
- Jump failureCases1 = branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsArrayVPtr));
-
- // Checks out okay! - get the length from the storage
- loadPtr(Address(regT0, OBJECT_OFFSETOF(JSArray, m_storage)), regT2);
- load32(Address(regT2, OBJECT_OFFSETOF(ArrayStorage, m_length)), regT2);
-
- Jump failureCases2 = branch32(Above, regT2, Imm32(INT_MAX));
- move(regT2, regT0);
- move(Imm32(JSValue::Int32Tag), regT1);
- Jump success = jump();
-
- LinkBuffer patchBuffer(this, m_codeBlock->executablePool(), 0);
-
- // Use the patch information to link the failure cases back to the original slow case routine.
- CodeLocationLabel slowCaseBegin = stubInfo->callReturnLocation.labelAtOffset(-patchOffsetGetByIdSlowCaseCall);
- patchBuffer.link(failureCases1, slowCaseBegin);
- patchBuffer.link(failureCases2, slowCaseBegin);
-
- // On success return back to the hot patch code, at a point it will perform the store to dest for us.
- patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult));
-
- // Track the stub we have created so that it will be deleted later.
- CodeLocationLabel entryLabel = patchBuffer.finalizeCodeAddendum();
- stubInfo->stubRoutine = entryLabel;
-
- // Finally patch the jump to slow case back in the hot path to jump here instead.
- CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase);
- RepatchBuffer repatchBuffer(m_codeBlock);
- repatchBuffer.relink(jumpLocation, entryLabel);
-
- // We don't want to patch more than once - in future go to cti_op_put_by_id_generic.
- repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_get_by_id_array_fail));
-}
-
-void JIT::privateCompileGetByIdProto(StructureStubInfo* stubInfo, Structure* structure, Structure* prototypeStructure, const Identifier& ident, const PropertySlot& slot, size_t cachedOffset, ReturnAddressPtr returnAddress, CallFrame* callFrame)
-{
- // regT0 holds a JSCell*
-
- // The prototype object definitely exists (if this stub exists the CodeBlock is referencing a Structure that is
- // referencing the prototype object - let's speculatively load it's table nice and early!)
- JSObject* protoObject = asObject(structure->prototypeForLookup(callFrame));
-
- Jump failureCases1 = checkStructure(regT0, structure);
-
- // Check the prototype object's Structure had not changed.
- Structure** prototypeStructureAddress = &(protoObject->m_structure);
-#if CPU(X86_64)
- move(ImmPtr(prototypeStructure), regT3);
- Jump failureCases2 = branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), regT3);
-#else
- Jump failureCases2 = branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), ImmPtr(prototypeStructure));
-#endif
- bool needsStubLink = false;
- // Checks out okay!
- if (slot.cachedPropertyType() == PropertySlot::Getter) {
- needsStubLink = true;
- compileGetDirectOffset(protoObject, regT2, regT2, regT1, cachedOffset);
- JITStubCall stubCall(this, cti_op_get_by_id_getter_stub);
- stubCall.addArgument(regT1);
- stubCall.addArgument(regT0);
- stubCall.addArgument(ImmPtr(stubInfo->callReturnLocation.executableAddress()));
- stubCall.call();
- } else if (slot.cachedPropertyType() == PropertySlot::Custom) {
- needsStubLink = true;
- JITStubCall stubCall(this, cti_op_get_by_id_custom_stub);
- stubCall.addArgument(ImmPtr(protoObject));
- stubCall.addArgument(ImmPtr(FunctionPtr(slot.customGetter()).executableAddress()));
- stubCall.addArgument(ImmPtr(const_cast<Identifier*>(&ident)));
- stubCall.addArgument(ImmPtr(stubInfo->callReturnLocation.executableAddress()));
- stubCall.call();
- } else
- compileGetDirectOffset(protoObject, regT2, regT1, regT0, cachedOffset);
-
- Jump success = jump();
-
- LinkBuffer patchBuffer(this, m_codeBlock->executablePool(), 0);
-
- // Use the patch information to link the failure cases back to the original slow case routine.
- CodeLocationLabel slowCaseBegin = stubInfo->callReturnLocation.labelAtOffset(-patchOffsetGetByIdSlowCaseCall);
- patchBuffer.link(failureCases1, slowCaseBegin);
- patchBuffer.link(failureCases2, slowCaseBegin);
-
- // On success return back to the hot patch code, at a point it will perform the store to dest for us.
- patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult));
-
- if (needsStubLink) {
- for (Vector<CallRecord>::iterator iter = m_calls.begin(); iter != m_calls.end(); ++iter) {
- if (iter->to)
- patchBuffer.link(iter->from, FunctionPtr(iter->to));
- }
- }
-
- // Track the stub we have created so that it will be deleted later.
- CodeLocationLabel entryLabel = patchBuffer.finalizeCodeAddendum();
- stubInfo->stubRoutine = entryLabel;
-
- // Finally patch the jump to slow case back in the hot path to jump here instead.
- CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase);
- RepatchBuffer repatchBuffer(m_codeBlock);
- repatchBuffer.relink(jumpLocation, entryLabel);
-
- // We don't want to patch more than once - in future go to cti_op_put_by_id_generic.
- repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_get_by_id_proto_list));
-}
-
-
-void JIT::privateCompileGetByIdSelfList(StructureStubInfo* stubInfo, PolymorphicAccessStructureList* polymorphicStructures, int currentIndex, Structure* structure, const Identifier& ident, const PropertySlot& slot, size_t cachedOffset)
-{
- // regT0 holds a JSCell*
- Jump failureCase = checkStructure(regT0, structure);
- bool needsStubLink = false;
- if (slot.cachedPropertyType() == PropertySlot::Getter) {
- needsStubLink = true;
- if (!structure->isUsingInlineStorage()) {
- move(regT0, regT1);
- compileGetDirectOffset(regT1, regT2, regT1, structure, cachedOffset);
- } else
- compileGetDirectOffset(regT0, regT2, regT1, structure, cachedOffset);
- JITStubCall stubCall(this, cti_op_get_by_id_getter_stub);
- stubCall.addArgument(regT1);
- stubCall.addArgument(regT0);
- stubCall.addArgument(ImmPtr(stubInfo->callReturnLocation.executableAddress()));
- stubCall.call();
- } else if (slot.cachedPropertyType() == PropertySlot::Custom) {
- needsStubLink = true;
- JITStubCall stubCall(this, cti_op_get_by_id_custom_stub);
- stubCall.addArgument(regT0);
- stubCall.addArgument(ImmPtr(FunctionPtr(slot.customGetter()).executableAddress()));
- stubCall.addArgument(ImmPtr(const_cast<Identifier*>(&ident)));
- stubCall.addArgument(ImmPtr(stubInfo->callReturnLocation.executableAddress()));
- stubCall.call();
- } else
- compileGetDirectOffset(regT0, regT1, regT0, structure, cachedOffset);
-
- Jump success = jump();
-
- LinkBuffer patchBuffer(this, m_codeBlock->executablePool(), 0);
- if (needsStubLink) {
- for (Vector<CallRecord>::iterator iter = m_calls.begin(); iter != m_calls.end(); ++iter) {
- if (iter->to)
- patchBuffer.link(iter->from, FunctionPtr(iter->to));
- }
- }
- // Use the patch information to link the failure cases back to the original slow case routine.
- CodeLocationLabel lastProtoBegin = polymorphicStructures->list[currentIndex - 1].stubRoutine;
- if (!lastProtoBegin)
- lastProtoBegin = stubInfo->callReturnLocation.labelAtOffset(-patchOffsetGetByIdSlowCaseCall);
-
- patchBuffer.link(failureCase, lastProtoBegin);
-
- // On success return back to the hot patch code, at a point it will perform the store to dest for us.
- patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult));
-
- CodeLocationLabel entryLabel = patchBuffer.finalizeCodeAddendum();
-
- structure->ref();
- polymorphicStructures->list[currentIndex].set(entryLabel, structure);
-
- // Finally patch the jump to slow case back in the hot path to jump here instead.
- CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase);
- RepatchBuffer repatchBuffer(m_codeBlock);
- repatchBuffer.relink(jumpLocation, entryLabel);
-}
-
-void JIT::privateCompileGetByIdProtoList(StructureStubInfo* stubInfo, PolymorphicAccessStructureList* prototypeStructures, int currentIndex, Structure* structure, Structure* prototypeStructure, const Identifier& ident, const PropertySlot& slot, size_t cachedOffset, CallFrame* callFrame)
-{
- // regT0 holds a JSCell*
-
- // The prototype object definitely exists (if this stub exists the CodeBlock is referencing a Structure that is
- // referencing the prototype object - let's speculatively load it's table nice and early!)
- JSObject* protoObject = asObject(structure->prototypeForLookup(callFrame));
-
- // Check eax is an object of the right Structure.
- Jump failureCases1 = checkStructure(regT0, structure);
-
- // Check the prototype object's Structure had not changed.
- Structure** prototypeStructureAddress = &(protoObject->m_structure);
-#if CPU(X86_64)
- move(ImmPtr(prototypeStructure), regT3);
- Jump failureCases2 = branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), regT3);
-#else
- Jump failureCases2 = branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), ImmPtr(prototypeStructure));
-#endif
-
- bool needsStubLink = false;
- if (slot.cachedPropertyType() == PropertySlot::Getter) {
- needsStubLink = true;
- compileGetDirectOffset(protoObject, regT2, regT2, regT1, cachedOffset);
- JITStubCall stubCall(this, cti_op_get_by_id_getter_stub);
- stubCall.addArgument(regT1);
- stubCall.addArgument(regT0);
- stubCall.addArgument(ImmPtr(stubInfo->callReturnLocation.executableAddress()));
- stubCall.call();
- } else if (slot.cachedPropertyType() == PropertySlot::Custom) {
- needsStubLink = true;
- JITStubCall stubCall(this, cti_op_get_by_id_custom_stub);
- stubCall.addArgument(ImmPtr(protoObject));
- stubCall.addArgument(ImmPtr(FunctionPtr(slot.customGetter()).executableAddress()));
- stubCall.addArgument(ImmPtr(const_cast<Identifier*>(&ident)));
- stubCall.addArgument(ImmPtr(stubInfo->callReturnLocation.executableAddress()));
- stubCall.call();
- } else
- compileGetDirectOffset(protoObject, regT2, regT1, regT0, cachedOffset);
-
- Jump success = jump();
-
- LinkBuffer patchBuffer(this, m_codeBlock->executablePool(), 0);
- if (needsStubLink) {
- for (Vector<CallRecord>::iterator iter = m_calls.begin(); iter != m_calls.end(); ++iter) {
- if (iter->to)
- patchBuffer.link(iter->from, FunctionPtr(iter->to));
- }
- }
- // Use the patch information to link the failure cases back to the original slow case routine.
- CodeLocationLabel lastProtoBegin = prototypeStructures->list[currentIndex - 1].stubRoutine;
- patchBuffer.link(failureCases1, lastProtoBegin);
- patchBuffer.link(failureCases2, lastProtoBegin);
-
- // On success return back to the hot patch code, at a point it will perform the store to dest for us.
- patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult));
-
- CodeLocationLabel entryLabel = patchBuffer.finalizeCodeAddendum();
-
- structure->ref();
- prototypeStructure->ref();
- prototypeStructures->list[currentIndex].set(entryLabel, structure, prototypeStructure);
-
- // Finally patch the jump to slow case back in the hot path to jump here instead.
- CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase);
- RepatchBuffer repatchBuffer(m_codeBlock);
- repatchBuffer.relink(jumpLocation, entryLabel);
-}
-
-void JIT::privateCompileGetByIdChainList(StructureStubInfo* stubInfo, PolymorphicAccessStructureList* prototypeStructures, int currentIndex, Structure* structure, StructureChain* chain, size_t count, const Identifier& ident, const PropertySlot& slot, size_t cachedOffset, CallFrame* callFrame)
-{
- // regT0 holds a JSCell*
- ASSERT(count);
-
- JumpList bucketsOfFail;
-
- // Check eax is an object of the right Structure.
- bucketsOfFail.append(checkStructure(regT0, structure));
-
- Structure* currStructure = structure;
- RefPtr<Structure>* it = chain->head();
- JSObject* protoObject = 0;
- for (unsigned i = 0; i < count; ++i, ++it) {
- protoObject = asObject(currStructure->prototypeForLookup(callFrame));
- currStructure = it->get();
- testPrototype(protoObject, bucketsOfFail);
- }
- ASSERT(protoObject);
-
- bool needsStubLink = false;
- if (slot.cachedPropertyType() == PropertySlot::Getter) {
- needsStubLink = true;
- compileGetDirectOffset(protoObject, regT2, regT2, regT1, cachedOffset);
- JITStubCall stubCall(this, cti_op_get_by_id_getter_stub);
- stubCall.addArgument(regT1);
- stubCall.addArgument(regT0);
- stubCall.addArgument(ImmPtr(stubInfo->callReturnLocation.executableAddress()));
- stubCall.call();
- } else if (slot.cachedPropertyType() == PropertySlot::Custom) {
- needsStubLink = true;
- JITStubCall stubCall(this, cti_op_get_by_id_custom_stub);
- stubCall.addArgument(ImmPtr(protoObject));
- stubCall.addArgument(ImmPtr(FunctionPtr(slot.customGetter()).executableAddress()));
- stubCall.addArgument(ImmPtr(const_cast<Identifier*>(&ident)));
- stubCall.addArgument(ImmPtr(stubInfo->callReturnLocation.executableAddress()));
- stubCall.call();
- } else
- compileGetDirectOffset(protoObject, regT2, regT1, regT0, cachedOffset);
-
- Jump success = jump();
-
- LinkBuffer patchBuffer(this, m_codeBlock->executablePool(), 0);
- if (needsStubLink) {
- for (Vector<CallRecord>::iterator iter = m_calls.begin(); iter != m_calls.end(); ++iter) {
- if (iter->to)
- patchBuffer.link(iter->from, FunctionPtr(iter->to));
- }
- }
- // Use the patch information to link the failure cases back to the original slow case routine.
- CodeLocationLabel lastProtoBegin = prototypeStructures->list[currentIndex - 1].stubRoutine;
-
- patchBuffer.link(bucketsOfFail, lastProtoBegin);
-
- // On success return back to the hot patch code, at a point it will perform the store to dest for us.
- patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult));
-
- CodeLocationLabel entryLabel = patchBuffer.finalizeCodeAddendum();
-
- // Track the stub we have created so that it will be deleted later.
- structure->ref();
- chain->ref();
- prototypeStructures->list[currentIndex].set(entryLabel, structure, chain);
-
- // Finally patch the jump to slow case back in the hot path to jump here instead.
- CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase);
- RepatchBuffer repatchBuffer(m_codeBlock);
- repatchBuffer.relink(jumpLocation, entryLabel);
-}
-
-void JIT::privateCompileGetByIdChain(StructureStubInfo* stubInfo, Structure* structure, StructureChain* chain, size_t count, const Identifier& ident, const PropertySlot& slot, size_t cachedOffset, ReturnAddressPtr returnAddress, CallFrame* callFrame)
-{
- // regT0 holds a JSCell*
- ASSERT(count);
-
- JumpList bucketsOfFail;
-
- // Check eax is an object of the right Structure.
- bucketsOfFail.append(checkStructure(regT0, structure));
-
- Structure* currStructure = structure;
- RefPtr<Structure>* it = chain->head();
- JSObject* protoObject = 0;
- for (unsigned i = 0; i < count; ++i, ++it) {
- protoObject = asObject(currStructure->prototypeForLookup(callFrame));
- currStructure = it->get();
- testPrototype(protoObject, bucketsOfFail);
- }
- ASSERT(protoObject);
-
- bool needsStubLink = false;
- if (slot.cachedPropertyType() == PropertySlot::Getter) {
- needsStubLink = true;
- compileGetDirectOffset(protoObject, regT2, regT2, regT1, cachedOffset);
- JITStubCall stubCall(this, cti_op_get_by_id_getter_stub);
- stubCall.addArgument(regT1);
- stubCall.addArgument(regT0);
- stubCall.addArgument(ImmPtr(stubInfo->callReturnLocation.executableAddress()));
- stubCall.call();
- } else if (slot.cachedPropertyType() == PropertySlot::Custom) {
- needsStubLink = true;
- JITStubCall stubCall(this, cti_op_get_by_id_custom_stub);
- stubCall.addArgument(ImmPtr(protoObject));
- stubCall.addArgument(ImmPtr(FunctionPtr(slot.customGetter()).executableAddress()));
- stubCall.addArgument(ImmPtr(const_cast<Identifier*>(&ident)));
- stubCall.addArgument(ImmPtr(stubInfo->callReturnLocation.executableAddress()));
- stubCall.call();
- } else
- compileGetDirectOffset(protoObject, regT2, regT1, regT0, cachedOffset);
- Jump success = jump();
-
- LinkBuffer patchBuffer(this, m_codeBlock->executablePool(), 0);
- if (needsStubLink) {
- for (Vector<CallRecord>::iterator iter = m_calls.begin(); iter != m_calls.end(); ++iter) {
- if (iter->to)
- patchBuffer.link(iter->from, FunctionPtr(iter->to));
- }
- }
- // Use the patch information to link the failure cases back to the original slow case routine.
- patchBuffer.link(bucketsOfFail, stubInfo->callReturnLocation.labelAtOffset(-patchOffsetGetByIdSlowCaseCall));
-
- // On success return back to the hot patch code, at a point it will perform the store to dest for us.
- patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult));
-
- // Track the stub we have created so that it will be deleted later.
- CodeLocationLabel entryLabel = patchBuffer.finalizeCodeAddendum();
- stubInfo->stubRoutine = entryLabel;
-
- // Finally patch the jump to slow case back in the hot path to jump here instead.
- CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase);
- RepatchBuffer repatchBuffer(m_codeBlock);
- repatchBuffer.relink(jumpLocation, entryLabel);
-
- // We don't want to patch more than once - in future go to cti_op_put_by_id_generic.
- repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_get_by_id_proto_list));
-}
-
-/* ------------------------------ END: !ENABLE / ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS) ------------------------------ */
-
-#endif // !ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
-
-void JIT::compileGetDirectOffset(RegisterID base, RegisterID resultTag, RegisterID resultPayload, RegisterID structure, RegisterID offset)
-{
- ASSERT(sizeof(((Structure*)0)->m_propertyStorageCapacity) == sizeof(int32_t));
- ASSERT(sizeof(JSObject::inlineStorageCapacity) == sizeof(int32_t));
- ASSERT(sizeof(JSValue) == 8);
-
- Jump notUsingInlineStorage = branch32(NotEqual, Address(structure, OBJECT_OFFSETOF(Structure, m_propertyStorageCapacity)), Imm32(JSObject::inlineStorageCapacity));
- loadPtr(BaseIndex(base, offset, TimesEight, OBJECT_OFFSETOF(JSObject, m_inlineStorage) + OBJECT_OFFSETOF(JSValue, u.asBits.payload)), resultPayload);
- loadPtr(BaseIndex(base, offset, TimesEight, OBJECT_OFFSETOF(JSObject, m_inlineStorage) + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), resultTag);
- Jump finishedLoad = jump();
- notUsingInlineStorage.link(this);
- loadPtr(Address(base, OBJECT_OFFSETOF(JSObject, m_externalStorage)), base);
- loadPtr(BaseIndex(base, offset, TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.payload)), resultPayload);
- loadPtr(BaseIndex(base, offset, TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.tag)), resultTag);
- finishedLoad.link(this);
-}
-
-void JIT::emit_op_get_by_pname(Instruction* currentInstruction)
-{
- unsigned dst = currentInstruction[1].u.operand;
- unsigned base = currentInstruction[2].u.operand;
- unsigned property = currentInstruction[3].u.operand;
- unsigned expected = currentInstruction[4].u.operand;
- unsigned iter = currentInstruction[5].u.operand;
- unsigned i = currentInstruction[6].u.operand;
-
- emitLoad2(property, regT1, regT0, base, regT3, regT2);
- emitJumpSlowCaseIfNotJSCell(property, regT1);
- addSlowCase(branchPtr(NotEqual, regT0, payloadFor(expected)));
- // Property registers are now available as the property is known
- emitJumpSlowCaseIfNotJSCell(base, regT3);
- emitLoadPayload(iter, regT1);
-
- // Test base's structure
- loadPtr(Address(regT2, OBJECT_OFFSETOF(JSCell, m_structure)), regT0);
- addSlowCase(branchPtr(NotEqual, regT0, Address(regT1, OBJECT_OFFSETOF(JSPropertyNameIterator, m_cachedStructure))));
- load32(addressFor(i), regT3);
- sub32(Imm32(1), regT3);
- addSlowCase(branch32(AboveOrEqual, regT3, Address(regT1, OBJECT_OFFSETOF(JSPropertyNameIterator, m_numCacheableSlots))));
- compileGetDirectOffset(regT2, regT1, regT0, regT0, regT3);
-
- emitStore(dst, regT1, regT0);
- map(m_bytecodeOffset + OPCODE_LENGTH(op_get_by_pname), dst, regT1, regT0);
-}
-
-void JIT::emitSlow_op_get_by_pname(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- unsigned dst = currentInstruction[1].u.operand;
- unsigned base = currentInstruction[2].u.operand;
- unsigned property = currentInstruction[3].u.operand;
-
- linkSlowCaseIfNotJSCell(iter, property);
- linkSlowCase(iter);
- linkSlowCaseIfNotJSCell(iter, base);
- linkSlowCase(iter);
- linkSlowCase(iter);
-
- JITStubCall stubCall(this, cti_op_get_by_val);
- stubCall.addArgument(base);
- stubCall.addArgument(property);
- stubCall.call(dst);
-}
-
-} // namespace JSC
-
-#endif // USE(JSVALUE32_64)
-#endif // ENABLE(JIT)
diff --git a/JavaScriptCore/jit/JITStubCall.h b/JavaScriptCore/jit/JITStubCall.h
deleted file mode 100644
index 4478d06..0000000
--- a/JavaScriptCore/jit/JITStubCall.h
+++ /dev/null
@@ -1,237 +0,0 @@
-/*
- * Copyright (C) 2008 Apple Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
- * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef JITStubCall_h
-#define JITStubCall_h
-
-#include "MacroAssemblerCodeRef.h"
-
-#if ENABLE(JIT)
-
-namespace JSC {
-
- class JITStubCall {
- public:
- JITStubCall(JIT* jit, JSObject* (JIT_STUB *stub)(STUB_ARGS_DECLARATION))
- : m_jit(jit)
- , m_stub(stub)
- , m_returnType(Cell)
- , m_stackIndex(JITSTACKFRAME_ARGS_INDEX)
- {
- }
-
- JITStubCall(JIT* jit, JSPropertyNameIterator* (JIT_STUB *stub)(STUB_ARGS_DECLARATION))
- : m_jit(jit)
- , m_stub(stub)
- , m_returnType(Cell)
- , m_stackIndex(JITSTACKFRAME_ARGS_INDEX)
- {
- }
-
- JITStubCall(JIT* jit, void* (JIT_STUB *stub)(STUB_ARGS_DECLARATION))
- : m_jit(jit)
- , m_stub(stub)
- , m_returnType(VoidPtr)
- , m_stackIndex(JITSTACKFRAME_ARGS_INDEX)
- {
- }
-
- JITStubCall(JIT* jit, int (JIT_STUB *stub)(STUB_ARGS_DECLARATION))
- : m_jit(jit)
- , m_stub(stub)
- , m_returnType(Int)
- , m_stackIndex(JITSTACKFRAME_ARGS_INDEX)
- {
- }
-
- JITStubCall(JIT* jit, bool (JIT_STUB *stub)(STUB_ARGS_DECLARATION))
- : m_jit(jit)
- , m_stub(stub)
- , m_returnType(Int)
- , m_stackIndex(JITSTACKFRAME_ARGS_INDEX)
- {
- }
-
- JITStubCall(JIT* jit, void (JIT_STUB *stub)(STUB_ARGS_DECLARATION))
- : m_jit(jit)
- , m_stub(stub)
- , m_returnType(Void)
- , m_stackIndex(JITSTACKFRAME_ARGS_INDEX)
- {
- }
-
-#if USE(JSVALUE32_64)
- JITStubCall(JIT* jit, EncodedJSValue (JIT_STUB *stub)(STUB_ARGS_DECLARATION))
- : m_jit(jit)
- , m_stub(stub)
- , m_returnType(Value)
- , m_stackIndex(JITSTACKFRAME_ARGS_INDEX)
- {
- }
-#endif
-
- // Arguments are added first to last.
-
- void skipArgument()
- {
- m_stackIndex += stackIndexStep;
- }
-
- void addArgument(JIT::Imm32 argument)
- {
- m_jit->poke(argument, m_stackIndex);
- m_stackIndex += stackIndexStep;
- }
-
- void addArgument(JIT::ImmPtr argument)
- {
- m_jit->poke(argument, m_stackIndex);
- m_stackIndex += stackIndexStep;
- }
-
- void addArgument(JIT::RegisterID argument)
- {
- m_jit->poke(argument, m_stackIndex);
- m_stackIndex += stackIndexStep;
- }
-
-#if USE(JSVALUE32_64)
- void addArgument(const JSValue& value)
- {
- m_jit->poke(JIT::Imm32(value.payload()), m_stackIndex);
- m_jit->poke(JIT::Imm32(value.tag()), m_stackIndex + 1);
- m_stackIndex += stackIndexStep;
- }
-#endif
-
- void addArgument(JIT::RegisterID tag, JIT::RegisterID payload)
- {
- m_jit->poke(payload, m_stackIndex);
- m_jit->poke(tag, m_stackIndex + 1);
- m_stackIndex += stackIndexStep;
- }
-
-#if USE(JSVALUE32_64)
- void addArgument(unsigned srcVirtualRegister)
- {
- if (m_jit->m_codeBlock->isConstantRegisterIndex(srcVirtualRegister)) {
- addArgument(m_jit->getConstantOperand(srcVirtualRegister));
- return;
- }
-
- m_jit->emitLoad(srcVirtualRegister, JIT::regT1, JIT::regT0);
- addArgument(JIT::regT1, JIT::regT0);
- }
-
- void getArgument(size_t argumentNumber, JIT::RegisterID tag, JIT::RegisterID payload)
- {
- size_t stackIndex = JITSTACKFRAME_ARGS_INDEX + (argumentNumber * stackIndexStep);
- m_jit->peek(payload, stackIndex);
- m_jit->peek(tag, stackIndex + 1);
- }
-#else
- void addArgument(unsigned src, JIT::RegisterID scratchRegister) // src is a virtual register.
- {
- if (m_jit->m_codeBlock->isConstantRegisterIndex(src))
- addArgument(JIT::ImmPtr(JSValue::encode(m_jit->m_codeBlock->getConstant(src))));
- else {
- m_jit->loadPtr(JIT::Address(JIT::callFrameRegister, src * sizeof(Register)), scratchRegister);
- addArgument(scratchRegister);
- }
- m_jit->killLastResultRegister();
- }
-#endif
-
- JIT::Call call()
- {
-#if ENABLE(OPCODE_SAMPLING)
- if (m_jit->m_bytecodeOffset != (unsigned)-1)
- m_jit->sampleInstruction(m_jit->m_codeBlock->instructions().begin() + m_jit->m_bytecodeOffset, true);
-#endif
-
- m_jit->restoreArgumentReference();
- JIT::Call call = m_jit->call();
- m_jit->m_calls.append(CallRecord(call, m_jit->m_bytecodeOffset, m_stub.value()));
-
-#if ENABLE(OPCODE_SAMPLING)
- if (m_jit->m_bytecodeOffset != (unsigned)-1)
- m_jit->sampleInstruction(m_jit->m_codeBlock->instructions().begin() + m_jit->m_bytecodeOffset, false);
-#endif
-
-#if USE(JSVALUE32_64)
- m_jit->unmap();
-#else
- m_jit->killLastResultRegister();
-#endif
- return call;
- }
-
-#if USE(JSVALUE32_64)
- JIT::Call call(unsigned dst) // dst is a virtual register.
- {
- ASSERT(m_returnType == Value || m_returnType == Cell);
- JIT::Call call = this->call();
- if (m_returnType == Value)
- m_jit->emitStore(dst, JIT::regT1, JIT::regT0);
- else
- m_jit->emitStoreCell(dst, JIT::returnValueRegister);
- return call;
- }
-#else
- JIT::Call call(unsigned dst) // dst is a virtual register.
- {
- ASSERT(m_returnType == VoidPtr || m_returnType == Cell);
- JIT::Call call = this->call();
- m_jit->emitPutVirtualRegister(dst);
- return call;
- }
-#endif
-
- JIT::Call call(JIT::RegisterID dst) // dst is a machine register.
- {
-#if USE(JSVALUE32_64)
- ASSERT(m_returnType == Value || m_returnType == VoidPtr || m_returnType == Int || m_returnType == Cell);
-#else
- ASSERT(m_returnType == VoidPtr || m_returnType == Int || m_returnType == Cell);
-#endif
- JIT::Call call = this->call();
- if (dst != JIT::returnValueRegister)
- m_jit->move(JIT::returnValueRegister, dst);
- return call;
- }
-
- private:
- static const size_t stackIndexStep = sizeof(EncodedJSValue) == 2 * sizeof(void*) ? 2 : 1;
-
- JIT* m_jit;
- FunctionPtr m_stub;
- enum { Void, VoidPtr, Int, Value, Cell } m_returnType;
- size_t m_stackIndex;
- };
-}
-
-#endif // ENABLE(JIT)
-
-#endif // JITStubCall_h
diff --git a/JavaScriptCore/jit/JITStubs.cpp b/JavaScriptCore/jit/JITStubs.cpp
deleted file mode 100644
index 0959a6e..0000000
--- a/JavaScriptCore/jit/JITStubs.cpp
+++ /dev/null
@@ -1,3638 +0,0 @@
-/*
- * Copyright (C) 2008, 2009 Apple Inc. All rights reserved.
- * Copyright (C) 2008 Cameron Zwarich <cwzwarich@uwaterloo.ca>
- * Copyright (C) Research In Motion Limited 2010. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
- * its contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "config.h"
-
-#if ENABLE(JIT)
-#include "JITStubs.h"
-
-#include "Arguments.h"
-#include "CallFrame.h"
-#include "CodeBlock.h"
-#include "Collector.h"
-#include "Debugger.h"
-#include "ExceptionHelpers.h"
-#include "GetterSetter.h"
-#include "GlobalEvalFunction.h"
-#include "JIT.h"
-#include "JSActivation.h"
-#include "JSArray.h"
-#include "JSByteArray.h"
-#include "JSFunction.h"
-#include "JSGlobalObjectFunctions.h"
-#include "JSNotAnObject.h"
-#include "JSPropertyNameIterator.h"
-#include "JSStaticScopeObject.h"
-#include "JSString.h"
-#include "ObjectPrototype.h"
-#include "Operations.h"
-#include "Parser.h"
-#include "Profiler.h"
-#include "RegExpObject.h"
-#include "RegExpPrototype.h"
-#include "Register.h"
-#include "SamplingTool.h"
-#include <wtf/StdLibExtras.h>
-#include <stdarg.h>
-#include <stdio.h>
-
-using namespace std;
-
-namespace JSC {
-
-#if OS(DARWIN) || OS(WINDOWS)
-#define SYMBOL_STRING(name) "_" #name
-#else
-#define SYMBOL_STRING(name) #name
-#endif
-
-#if OS(IOS)
-#define THUMB_FUNC_PARAM(name) SYMBOL_STRING(name)
-#else
-#define THUMB_FUNC_PARAM(name)
-#endif
-
-#if OS(LINUX) && CPU(X86_64)
-#define SYMBOL_STRING_RELOCATION(name) #name "@plt"
-#elif OS(DARWIN)
-#define SYMBOL_STRING_RELOCATION(name) "_" #name
-#elif CPU(X86) && COMPILER(MINGW)
-#define SYMBOL_STRING_RELOCATION(name) "@" #name "@4"
-#else
-#define SYMBOL_STRING_RELOCATION(name) #name
-#endif
-
-#if OS(DARWIN)
- // Mach-O platform
-#define HIDE_SYMBOL(name) ".private_extern _" #name
-#elif OS(AIX)
- // IBM's own file format
-#define HIDE_SYMBOL(name) ".lglobl " #name
-#elif OS(LINUX) \
- || OS(FREEBSD) \
- || OS(OPENBSD) \
- || OS(SOLARIS) \
- || (OS(HPUX) && CPU(IA64)) \
- || OS(SYMBIAN) \
- || OS(NETBSD)
- // ELF platform
-#define HIDE_SYMBOL(name) ".hidden " #name
-#else
-#define HIDE_SYMBOL(name)
-#endif
-
-#if USE(JSVALUE32_64)
-
-#if COMPILER(GCC) && CPU(X86)
-
-// These ASSERTs remind you that, if you change the layout of JITStackFrame, you
-// need to change the assembly trampolines below to match.
-COMPILE_ASSERT(offsetof(struct JITStackFrame, code) % 16 == 0x0, JITStackFrame_maintains_16byte_stack_alignment);
-COMPILE_ASSERT(offsetof(struct JITStackFrame, savedEBX) == 0x3c, JITStackFrame_stub_argument_space_matches_ctiTrampoline);
-COMPILE_ASSERT(offsetof(struct JITStackFrame, callFrame) == 0x58, JITStackFrame_callFrame_offset_matches_ctiTrampoline);
-COMPILE_ASSERT(offsetof(struct JITStackFrame, code) == 0x50, JITStackFrame_code_offset_matches_ctiTrampoline);
-
-asm (
-".text\n"
-".globl " SYMBOL_STRING(ctiTrampoline) "\n"
-HIDE_SYMBOL(ctiTrampoline) "\n"
-SYMBOL_STRING(ctiTrampoline) ":" "\n"
- "pushl %ebp" "\n"
- "movl %esp, %ebp" "\n"
- "pushl %esi" "\n"
- "pushl %edi" "\n"
- "pushl %ebx" "\n"
- "subl $0x3c, %esp" "\n"
- "movl $512, %esi" "\n"
- "movl 0x58(%esp), %edi" "\n"
- "call *0x50(%esp)" "\n"
- "addl $0x3c, %esp" "\n"
- "popl %ebx" "\n"
- "popl %edi" "\n"
- "popl %esi" "\n"
- "popl %ebp" "\n"
- "ret" "\n"
-);
-
-asm (
-".globl " SYMBOL_STRING(ctiVMThrowTrampoline) "\n"
-HIDE_SYMBOL(ctiVMThrowTrampoline) "\n"
-SYMBOL_STRING(ctiVMThrowTrampoline) ":" "\n"
- "movl %esp, %ecx" "\n"
- "call " SYMBOL_STRING_RELOCATION(cti_vm_throw) "\n"
- "int3" "\n"
-);
-
-asm (
-".globl " SYMBOL_STRING(ctiOpThrowNotCaught) "\n"
-HIDE_SYMBOL(ctiOpThrowNotCaught) "\n"
-SYMBOL_STRING(ctiOpThrowNotCaught) ":" "\n"
- "addl $0x3c, %esp" "\n"
- "popl %ebx" "\n"
- "popl %edi" "\n"
- "popl %esi" "\n"
- "popl %ebp" "\n"
- "ret" "\n"
-);
-
-#elif COMPILER(GCC) && CPU(X86_64)
-
-// These ASSERTs remind you that, if you change the layout of JITStackFrame, you
-// need to change the assembly trampolines below to match.
-COMPILE_ASSERT(offsetof(struct JITStackFrame, code) % 32 == 0x0, JITStackFrame_maintains_32byte_stack_alignment);
-COMPILE_ASSERT(offsetof(struct JITStackFrame, savedRBX) == 0x48, JITStackFrame_stub_argument_space_matches_ctiTrampoline);
-COMPILE_ASSERT(offsetof(struct JITStackFrame, callFrame) == 0x90, JITStackFrame_callFrame_offset_matches_ctiTrampoline);
-COMPILE_ASSERT(offsetof(struct JITStackFrame, code) == 0x80, JITStackFrame_code_offset_matches_ctiTrampoline);
-
-asm (
-".globl " SYMBOL_STRING(ctiTrampoline) "\n"
-HIDE_SYMBOL(ctiTrampoline) "\n"
-SYMBOL_STRING(ctiTrampoline) ":" "\n"
- "pushq %rbp" "\n"
- "movq %rsp, %rbp" "\n"
- "pushq %r12" "\n"
- "pushq %r13" "\n"
- "pushq %r14" "\n"
- "pushq %r15" "\n"
- "pushq %rbx" "\n"
- "subq $0x48, %rsp" "\n"
- "movq $512, %r12" "\n"
- "movq $0xFFFF000000000000, %r14" "\n"
- "movq $0xFFFF000000000002, %r15" "\n"
- "movq 0x90(%rsp), %r13" "\n"
- "call *0x80(%rsp)" "\n"
- "addq $0x48, %rsp" "\n"
- "popq %rbx" "\n"
- "popq %r15" "\n"
- "popq %r14" "\n"
- "popq %r13" "\n"
- "popq %r12" "\n"
- "popq %rbp" "\n"
- "ret" "\n"
-);
-
-asm (
-".globl " SYMBOL_STRING(ctiVMThrowTrampoline) "\n"
-HIDE_SYMBOL(ctiVMThrowTrampoline) "\n"
-SYMBOL_STRING(ctiVMThrowTrampoline) ":" "\n"
- "movq %rsp, %rdi" "\n"
- "call " SYMBOL_STRING_RELOCATION(cti_vm_throw) "\n"
- "int3" "\n"
-);
-
-asm (
-".globl " SYMBOL_STRING(ctiOpThrowNotCaught) "\n"
-HIDE_SYMBOL(ctiOpThrowNotCaught) "\n"
-SYMBOL_STRING(ctiOpThrowNotCaught) ":" "\n"
- "addq $0x48, %rsp" "\n"
- "popq %rbx" "\n"
- "popq %r15" "\n"
- "popq %r14" "\n"
- "popq %r13" "\n"
- "popq %r12" "\n"
- "popq %rbp" "\n"
- "ret" "\n"
-);
-
-#elif COMPILER(GCC) && CPU(ARM_THUMB2)
-
-#define THUNK_RETURN_ADDRESS_OFFSET 0x38
-#define PRESERVED_RETURN_ADDRESS_OFFSET 0x3C
-#define PRESERVED_R4_OFFSET 0x40
-#define PRESERVED_R5_OFFSET 0x44
-#define PRESERVED_R6_OFFSET 0x48
-#define REGISTER_FILE_OFFSET 0x4C
-#define CALLFRAME_OFFSET 0x50
-#define EXCEPTION_OFFSET 0x54
-#define ENABLE_PROFILER_REFERENCE_OFFSET 0x58
-
-#elif (COMPILER(GCC) || COMPILER(RVCT)) && CPU(ARM_TRADITIONAL)
-
-#define THUNK_RETURN_ADDRESS_OFFSET 64
-#define PRESERVEDR4_OFFSET 68
-
-#elif COMPILER(MSVC) && CPU(X86)
-
-// These ASSERTs remind you that, if you change the layout of JITStackFrame, you
-// need to change the assembly trampolines below to match.
-COMPILE_ASSERT(offsetof(struct JITStackFrame, code) % 16 == 0x0, JITStackFrame_maintains_16byte_stack_alignment);
-COMPILE_ASSERT(offsetof(struct JITStackFrame, savedEBX) == 0x3c, JITStackFrame_stub_argument_space_matches_ctiTrampoline);
-COMPILE_ASSERT(offsetof(struct JITStackFrame, callFrame) == 0x58, JITStackFrame_callFrame_offset_matches_ctiTrampoline);
-COMPILE_ASSERT(offsetof(struct JITStackFrame, code) == 0x50, JITStackFrame_code_offset_matches_ctiTrampoline);
-
-extern "C" {
-
- __declspec(naked) EncodedJSValue ctiTrampoline(void* code, RegisterFile*, CallFrame*, void* /*unused1*/, Profiler**, JSGlobalData*)
- {
- __asm {
- push ebp;
- mov ebp, esp;
- push esi;
- push edi;
- push ebx;
- sub esp, 0x3c;
- mov esi, 512;
- mov ecx, esp;
- mov edi, [esp + 0x58];
- call [esp + 0x50];
- add esp, 0x3c;
- pop ebx;
- pop edi;
- pop esi;
- pop ebp;
- ret;
- }
- }
-
- __declspec(naked) void ctiVMThrowTrampoline()
- {
- __asm {
- mov ecx, esp;
- call cti_vm_throw;
- add esp, 0x3c;
- pop ebx;
- pop edi;
- pop esi;
- pop ebp;
- ret;
- }
- }
-
- __declspec(naked) void ctiOpThrowNotCaught()
- {
- __asm {
- add esp, 0x3c;
- pop ebx;
- pop edi;
- pop esi;
- pop ebp;
- ret;
- }
- }
-}
-
-#elif COMPILER(MSVC) && CPU(ARM_TRADITIONAL)
-
-#define THUNK_RETURN_ADDRESS_OFFSET 64
-#define PRESERVEDR4_OFFSET 68
-// See DEFINE_STUB_FUNCTION for more information.
-
-#elif CPU(MIPS)
-
-#define PRESERVED_GP_OFFSET 60
-#define PRESERVED_S0_OFFSET 64
-#define PRESERVED_S1_OFFSET 68
-#define PRESERVED_S2_OFFSET 72
-#define PRESERVED_RETURN_ADDRESS_OFFSET 76
-#define THUNK_RETURN_ADDRESS_OFFSET 80
-#define REGISTER_FILE_OFFSET 84
-#define CALLFRAME_OFFSET 88
-#define EXCEPTION_OFFSET 92
-#define ENABLE_PROFILER_REFERENCE_OFFSET 96
-#define GLOBAL_DATA_OFFSET 100
-#define STACK_LENGTH 104
-
-#else
- #error "JIT not supported on this platform."
-#endif
-
-#else // USE(JSVALUE32_64)
-
-#if COMPILER(GCC) && CPU(X86)
-
-// These ASSERTs remind you that, if you change the layout of JITStackFrame, you
-// need to change the assembly trampolines below to match.
-COMPILE_ASSERT(offsetof(struct JITStackFrame, callFrame) == 0x38, JITStackFrame_callFrame_offset_matches_ctiTrampoline);
-COMPILE_ASSERT(offsetof(struct JITStackFrame, code) == 0x30, JITStackFrame_code_offset_matches_ctiTrampoline);
-COMPILE_ASSERT(offsetof(struct JITStackFrame, savedEBX) == 0x1c, JITStackFrame_stub_argument_space_matches_ctiTrampoline);
-
-asm (
-".text\n"
-".globl " SYMBOL_STRING(ctiTrampoline) "\n"
-HIDE_SYMBOL(ctiTrampoline) "\n"
-SYMBOL_STRING(ctiTrampoline) ":" "\n"
- "pushl %ebp" "\n"
- "movl %esp, %ebp" "\n"
- "pushl %esi" "\n"
- "pushl %edi" "\n"
- "pushl %ebx" "\n"
- "subl $0x1c, %esp" "\n"
- "movl $512, %esi" "\n"
- "movl 0x38(%esp), %edi" "\n"
- "call *0x30(%esp)" "\n"
- "addl $0x1c, %esp" "\n"
- "popl %ebx" "\n"
- "popl %edi" "\n"
- "popl %esi" "\n"
- "popl %ebp" "\n"
- "ret" "\n"
-);
-
-asm (
-".globl " SYMBOL_STRING(ctiVMThrowTrampoline) "\n"
-HIDE_SYMBOL(ctiVMThrowTrampoline) "\n"
-SYMBOL_STRING(ctiVMThrowTrampoline) ":" "\n"
- "movl %esp, %ecx" "\n"
- "call " SYMBOL_STRING_RELOCATION(cti_vm_throw) "\n"
- "int3" "\n"
-);
-
-asm (
-".globl " SYMBOL_STRING(ctiOpThrowNotCaught) "\n"
-HIDE_SYMBOL(ctiOpThrowNotCaught) "\n"
-SYMBOL_STRING(ctiOpThrowNotCaught) ":" "\n"
- "addl $0x1c, %esp" "\n"
- "popl %ebx" "\n"
- "popl %edi" "\n"
- "popl %esi" "\n"
- "popl %ebp" "\n"
- "ret" "\n"
-);
-
-#elif COMPILER(GCC) && CPU(X86_64)
-
-// These ASSERTs remind you that, if you change the layout of JITStackFrame, you
-// need to change the assembly trampolines below to match.
-COMPILE_ASSERT(offsetof(struct JITStackFrame, callFrame) == 0x58, JITStackFrame_callFrame_offset_matches_ctiTrampoline);
-COMPILE_ASSERT(offsetof(struct JITStackFrame, code) == 0x48, JITStackFrame_code_offset_matches_ctiTrampoline);
-COMPILE_ASSERT(offsetof(struct JITStackFrame, savedRBX) == 0x78, JITStackFrame_stub_argument_space_matches_ctiTrampoline);
-
-asm (
-".text\n"
-".globl " SYMBOL_STRING(ctiTrampoline) "\n"
-HIDE_SYMBOL(ctiTrampoline) "\n"
-SYMBOL_STRING(ctiTrampoline) ":" "\n"
- "pushq %rbp" "\n"
- "movq %rsp, %rbp" "\n"
- "pushq %r12" "\n"
- "pushq %r13" "\n"
- "pushq %r14" "\n"
- "pushq %r15" "\n"
- "pushq %rbx" "\n"
- // Form the JIT stubs area
- "pushq %r9" "\n"
- "pushq %r8" "\n"
- "pushq %rcx" "\n"
- "pushq %rdx" "\n"
- "pushq %rsi" "\n"
- "pushq %rdi" "\n"
- "subq $0x48, %rsp" "\n"
- "movq $512, %r12" "\n"
- "movq $0xFFFF000000000000, %r14" "\n"
- "movq $0xFFFF000000000002, %r15" "\n"
- "movq %rdx, %r13" "\n"
- "call *%rdi" "\n"
- "addq $0x78, %rsp" "\n"
- "popq %rbx" "\n"
- "popq %r15" "\n"
- "popq %r14" "\n"
- "popq %r13" "\n"
- "popq %r12" "\n"
- "popq %rbp" "\n"
- "ret" "\n"
-);
-
-asm (
-".globl " SYMBOL_STRING(ctiVMThrowTrampoline) "\n"
-HIDE_SYMBOL(ctiVMThrowTrampoline) "\n"
-SYMBOL_STRING(ctiVMThrowTrampoline) ":" "\n"
- "movq %rsp, %rdi" "\n"
- "call " SYMBOL_STRING_RELOCATION(cti_vm_throw) "\n"
- "int3" "\n"
-);
-
-asm (
-".globl " SYMBOL_STRING(ctiOpThrowNotCaught) "\n"
-HIDE_SYMBOL(ctiOpThrowNotCaught) "\n"
-SYMBOL_STRING(ctiOpThrowNotCaught) ":" "\n"
- "addq $0x78, %rsp" "\n"
- "popq %rbx" "\n"
- "popq %r15" "\n"
- "popq %r14" "\n"
- "popq %r13" "\n"
- "popq %r12" "\n"
- "popq %rbp" "\n"
- "ret" "\n"
-);
-
-#elif COMPILER(GCC) && CPU(ARM_THUMB2)
-
-#define THUNK_RETURN_ADDRESS_OFFSET 0x20
-#define PRESERVED_RETURN_ADDRESS_OFFSET 0x24
-#define PRESERVED_R4_OFFSET 0x28
-#define PRESERVED_R5_OFFSET 0x2C
-#define PRESERVED_R6_OFFSET 0x30
-#define REGISTER_FILE_OFFSET 0x34
-#define CALLFRAME_OFFSET 0x38
-#define EXCEPTION_OFFSET 0x3C
-#define ENABLE_PROFILER_REFERENCE_OFFSET 0x40
-
-#elif (COMPILER(GCC) || COMPILER(RVCT)) && CPU(ARM_TRADITIONAL)
-
-#define THUNK_RETURN_ADDRESS_OFFSET 32
-#define PRESERVEDR4_OFFSET 36
-
-#elif CPU(MIPS)
-
-#define PRESERVED_GP_OFFSET 28
-#define PRESERVED_S0_OFFSET 32
-#define PRESERVED_S1_OFFSET 36
-#define PRESERVED_S2_OFFSET 40
-#define PRESERVED_RETURN_ADDRESS_OFFSET 44
-#define THUNK_RETURN_ADDRESS_OFFSET 48
-#define REGISTER_FILE_OFFSET 52
-#define CALLFRAME_OFFSET 56
-#define EXCEPTION_OFFSET 60
-#define ENABLE_PROFILER_REFERENCE_OFFSET 64
-#define GLOBAL_DATA_OFFSET 68
-#define STACK_LENGTH 72
-
-#elif COMPILER(MSVC) && CPU(X86)
-
-// These ASSERTs remind you that, if you change the layout of JITStackFrame, you
-// need to change the assembly trampolines below to match.
-COMPILE_ASSERT(offsetof(struct JITStackFrame, callFrame) == 0x38, JITStackFrame_callFrame_offset_matches_ctiTrampoline);
-COMPILE_ASSERT(offsetof(struct JITStackFrame, code) == 0x30, JITStackFrame_code_offset_matches_ctiTrampoline);
-COMPILE_ASSERT(offsetof(struct JITStackFrame, savedEBX) == 0x1c, JITStackFrame_stub_argument_space_matches_ctiTrampoline);
-
-extern "C" {
-
- __declspec(naked) EncodedJSValue ctiTrampoline(void* code, RegisterFile*, CallFrame*, void* /*unused1*/, Profiler**, JSGlobalData*)
- {
- __asm {
- push ebp;
- mov ebp, esp;
- push esi;
- push edi;
- push ebx;
- sub esp, 0x1c;
- mov esi, 512;
- mov ecx, esp;
- mov edi, [esp + 0x38];
- call [esp + 0x30];
- add esp, 0x1c;
- pop ebx;
- pop edi;
- pop esi;
- pop ebp;
- ret;
- }
- }
-
- __declspec(naked) void ctiVMThrowTrampoline()
- {
- __asm {
- mov ecx, esp;
- call cti_vm_throw;
- add esp, 0x1c;
- pop ebx;
- pop edi;
- pop esi;
- pop ebp;
- ret;
- }
- }
-
- __declspec(naked) void ctiOpThrowNotCaught()
- {
- __asm {
- add esp, 0x1c;
- pop ebx;
- pop edi;
- pop esi;
- pop ebp;
- ret;
- }
- }
-}
-
-#elif COMPILER(MSVC) && CPU(ARM_TRADITIONAL)
-
-#define THUNK_RETURN_ADDRESS_OFFSET 32
-#define PRESERVEDR4_OFFSET 36
-// See DEFINE_STUB_FUNCTION for more information.
-
-#else
- #error "JIT not supported on this platform."
-#endif
-
-#endif // USE(JSVALUE32_64)
-
-#if CPU(MIPS)
-asm (
-".text" "\n"
-".align 2" "\n"
-".set noreorder" "\n"
-".set nomacro" "\n"
-".set nomips16" "\n"
-".globl " SYMBOL_STRING(ctiTrampoline) "\n"
-".ent " SYMBOL_STRING(ctiTrampoline) "\n"
-SYMBOL_STRING(ctiTrampoline) ":" "\n"
- "addiu $29,$29,-" STRINGIZE_VALUE_OF(STACK_LENGTH) "\n"
- "sw $31," STRINGIZE_VALUE_OF(PRESERVED_RETURN_ADDRESS_OFFSET) "($29)" "\n"
- "sw $18," STRINGIZE_VALUE_OF(PRESERVED_S2_OFFSET) "($29)" "\n"
- "sw $17," STRINGIZE_VALUE_OF(PRESERVED_S1_OFFSET) "($29)" "\n"
- "sw $16," STRINGIZE_VALUE_OF(PRESERVED_S0_OFFSET) "($29)" "\n"
-#if WTF_MIPS_PIC
- "sw $28," STRINGIZE_VALUE_OF(PRESERVED_GP_OFFSET) "($29)" "\n"
-#endif
- "move $16,$6 # set callFrameRegister" "\n"
- "li $17,512 # set timeoutCheckRegister" "\n"
- "move $25,$4 # move executableAddress to t9" "\n"
- "sw $5," STRINGIZE_VALUE_OF(REGISTER_FILE_OFFSET) "($29) # store registerFile to current stack" "\n"
- "sw $6," STRINGIZE_VALUE_OF(CALLFRAME_OFFSET) "($29) # store callFrame to curent stack" "\n"
- "sw $7," STRINGIZE_VALUE_OF(EXCEPTION_OFFSET) "($29) # store exception to current stack" "\n"
- "lw $8," STRINGIZE_VALUE_OF(STACK_LENGTH + 16) "($29) # load enableProfilerReference from previous stack" "\n"
- "lw $9," STRINGIZE_VALUE_OF(STACK_LENGTH + 20) "($29) # load globalData from previous stack" "\n"
- "sw $8," STRINGIZE_VALUE_OF(ENABLE_PROFILER_REFERENCE_OFFSET) "($29) # store enableProfilerReference to current stack" "\n"
- "jalr $25" "\n"
- "sw $9," STRINGIZE_VALUE_OF(GLOBAL_DATA_OFFSET) "($29) # store globalData to current stack" "\n"
- "lw $16," STRINGIZE_VALUE_OF(PRESERVED_S0_OFFSET) "($29)" "\n"
- "lw $17," STRINGIZE_VALUE_OF(PRESERVED_S1_OFFSET) "($29)" "\n"
- "lw $18," STRINGIZE_VALUE_OF(PRESERVED_S2_OFFSET) "($29)" "\n"
- "lw $31," STRINGIZE_VALUE_OF(PRESERVED_RETURN_ADDRESS_OFFSET) "($29)" "\n"
- "jr $31" "\n"
- "addiu $29,$29," STRINGIZE_VALUE_OF(STACK_LENGTH) "\n"
-".set reorder" "\n"
-".set macro" "\n"
-".end " SYMBOL_STRING(ctiTrampoline) "\n"
-);
-
-asm (
-".text" "\n"
-".align 2" "\n"
-".set noreorder" "\n"
-".set nomacro" "\n"
-".set nomips16" "\n"
-".globl " SYMBOL_STRING(ctiVMThrowTrampoline) "\n"
-".ent " SYMBOL_STRING(ctiVMThrowTrampoline) "\n"
-SYMBOL_STRING(ctiVMThrowTrampoline) ":" "\n"
-#if WTF_MIPS_PIC
- "lw $28," STRINGIZE_VALUE_OF(PRESERVED_GP_OFFSET) "($29)" "\n"
-".set macro" "\n"
- "la $25," SYMBOL_STRING(cti_vm_throw) "\n"
-".set nomacro" "\n"
- "bal " SYMBOL_STRING(cti_vm_throw) "\n"
- "move $4,$29" "\n"
-#else
- "jal " SYMBOL_STRING(cti_vm_throw) "\n"
- "move $4,$29" "\n"
-#endif
- "lw $16," STRINGIZE_VALUE_OF(PRESERVED_S0_OFFSET) "($29)" "\n"
- "lw $17," STRINGIZE_VALUE_OF(PRESERVED_S1_OFFSET) "($29)" "\n"
- "lw $18," STRINGIZE_VALUE_OF(PRESERVED_S2_OFFSET) "($29)" "\n"
- "lw $31," STRINGIZE_VALUE_OF(PRESERVED_RETURN_ADDRESS_OFFSET) "($29)" "\n"
- "jr $31" "\n"
- "addiu $29,$29," STRINGIZE_VALUE_OF(STACK_LENGTH) "\n"
-".set reorder" "\n"
-".set macro" "\n"
-".end " SYMBOL_STRING(ctiVMThrowTrampoline) "\n"
-);
-
-asm (
-".text" "\n"
-".align 2" "\n"
-".set noreorder" "\n"
-".set nomacro" "\n"
-".set nomips16" "\n"
-".globl " SYMBOL_STRING(ctiOpThrowNotCaught) "\n"
-".ent " SYMBOL_STRING(ctiOpThrowNotCaught) "\n"
-SYMBOL_STRING(ctiOpThrowNotCaught) ":" "\n"
- "lw $16," STRINGIZE_VALUE_OF(PRESERVED_S0_OFFSET) "($29)" "\n"
- "lw $17," STRINGIZE_VALUE_OF(PRESERVED_S1_OFFSET) "($29)" "\n"
- "lw $18," STRINGIZE_VALUE_OF(PRESERVED_S2_OFFSET) "($29)" "\n"
- "lw $31," STRINGIZE_VALUE_OF(PRESERVED_RETURN_ADDRESS_OFFSET) "($29)" "\n"
- "jr $31" "\n"
- "addiu $29,$29," STRINGIZE_VALUE_OF(STACK_LENGTH) "\n"
-".set reorder" "\n"
-".set macro" "\n"
-".end " SYMBOL_STRING(ctiOpThrowNotCaught) "\n"
-);
-#endif
-
-#if COMPILER(GCC) && CPU(ARM_THUMB2)
-
-asm (
-".text" "\n"
-".align 2" "\n"
-".globl " SYMBOL_STRING(ctiTrampoline) "\n"
-HIDE_SYMBOL(ctiTrampoline) "\n"
-".thumb" "\n"
-".thumb_func " THUMB_FUNC_PARAM(ctiTrampoline) "\n"
-SYMBOL_STRING(ctiTrampoline) ":" "\n"
- "sub sp, sp, #" STRINGIZE_VALUE_OF(ENABLE_PROFILER_REFERENCE_OFFSET) "\n"
- "str lr, [sp, #" STRINGIZE_VALUE_OF(PRESERVED_RETURN_ADDRESS_OFFSET) "]" "\n"
- "str r4, [sp, #" STRINGIZE_VALUE_OF(PRESERVED_R4_OFFSET) "]" "\n"
- "str r5, [sp, #" STRINGIZE_VALUE_OF(PRESERVED_R5_OFFSET) "]" "\n"
- "str r6, [sp, #" STRINGIZE_VALUE_OF(PRESERVED_R6_OFFSET) "]" "\n"
- "str r1, [sp, #" STRINGIZE_VALUE_OF(REGISTER_FILE_OFFSET) "]" "\n"
- "str r2, [sp, #" STRINGIZE_VALUE_OF(CALLFRAME_OFFSET) "]" "\n"
- "str r3, [sp, #" STRINGIZE_VALUE_OF(EXCEPTION_OFFSET) "]" "\n"
- "cpy r5, r2" "\n"
- "mov r6, #512" "\n"
- "blx r0" "\n"
- "ldr r6, [sp, #" STRINGIZE_VALUE_OF(PRESERVED_R6_OFFSET) "]" "\n"
- "ldr r5, [sp, #" STRINGIZE_VALUE_OF(PRESERVED_R5_OFFSET) "]" "\n"
- "ldr r4, [sp, #" STRINGIZE_VALUE_OF(PRESERVED_R4_OFFSET) "]" "\n"
- "ldr lr, [sp, #" STRINGIZE_VALUE_OF(PRESERVED_RETURN_ADDRESS_OFFSET) "]" "\n"
- "add sp, sp, #" STRINGIZE_VALUE_OF(ENABLE_PROFILER_REFERENCE_OFFSET) "\n"
- "bx lr" "\n"
-);
-
-asm (
-".text" "\n"
-".align 2" "\n"
-".globl " SYMBOL_STRING(ctiVMThrowTrampoline) "\n"
-HIDE_SYMBOL(ctiVMThrowTrampoline) "\n"
-".thumb" "\n"
-".thumb_func " THUMB_FUNC_PARAM(ctiVMThrowTrampoline) "\n"
-SYMBOL_STRING(ctiVMThrowTrampoline) ":" "\n"
- "cpy r0, sp" "\n"
- "bl " SYMBOL_STRING_RELOCATION(cti_vm_throw) "\n"
- "ldr r6, [sp, #" STRINGIZE_VALUE_OF(PRESERVED_R6_OFFSET) "]" "\n"
- "ldr r5, [sp, #" STRINGIZE_VALUE_OF(PRESERVED_R5_OFFSET) "]" "\n"
- "ldr r4, [sp, #" STRINGIZE_VALUE_OF(PRESERVED_R4_OFFSET) "]" "\n"
- "ldr lr, [sp, #" STRINGIZE_VALUE_OF(PRESERVED_RETURN_ADDRESS_OFFSET) "]" "\n"
- "add sp, sp, #" STRINGIZE_VALUE_OF(ENABLE_PROFILER_REFERENCE_OFFSET) "\n"
- "bx lr" "\n"
-);
-
-asm (
-".text" "\n"
-".align 2" "\n"
-".globl " SYMBOL_STRING(ctiOpThrowNotCaught) "\n"
-HIDE_SYMBOL(ctiOpThrowNotCaught) "\n"
-".thumb" "\n"
-".thumb_func " THUMB_FUNC_PARAM(ctiOpThrowNotCaught) "\n"
-SYMBOL_STRING(ctiOpThrowNotCaught) ":" "\n"
- "ldr r6, [sp, #" STRINGIZE_VALUE_OF(PRESERVED_R6_OFFSET) "]" "\n"
- "ldr r5, [sp, #" STRINGIZE_VALUE_OF(PRESERVED_R5_OFFSET) "]" "\n"
- "ldr r4, [sp, #" STRINGIZE_VALUE_OF(PRESERVED_R4_OFFSET) "]" "\n"
- "ldr lr, [sp, #" STRINGIZE_VALUE_OF(PRESERVED_RETURN_ADDRESS_OFFSET) "]" "\n"
- "add sp, sp, #" STRINGIZE_VALUE_OF(ENABLE_PROFILER_REFERENCE_OFFSET) "\n"
- "bx lr" "\n"
-);
-
-#elif COMPILER(GCC) && CPU(ARM_TRADITIONAL)
-
-asm (
-".globl " SYMBOL_STRING(ctiTrampoline) "\n"
-HIDE_SYMBOL(ctiTrampoline) "\n"
-SYMBOL_STRING(ctiTrampoline) ":" "\n"
- "stmdb sp!, {r1-r3}" "\n"
- "stmdb sp!, {r4-r8, lr}" "\n"
- "sub sp, sp, #" STRINGIZE_VALUE_OF(PRESERVEDR4_OFFSET) "\n"
- "mov r4, r2" "\n"
- "mov r5, #512" "\n"
- // r0 contains the code
- "mov lr, pc" "\n"
- "mov pc, r0" "\n"
- "add sp, sp, #" STRINGIZE_VALUE_OF(PRESERVEDR4_OFFSET) "\n"
- "ldmia sp!, {r4-r8, lr}" "\n"
- "add sp, sp, #12" "\n"
- "mov pc, lr" "\n"
-);
-
-asm (
-".globl " SYMBOL_STRING(ctiVMThrowTrampoline) "\n"
-HIDE_SYMBOL(ctiVMThrowTrampoline) "\n"
-SYMBOL_STRING(ctiVMThrowTrampoline) ":" "\n"
- "mov r0, sp" "\n"
- "bl " SYMBOL_STRING(cti_vm_throw) "\n"
-
-// Both has the same return sequence
-".globl " SYMBOL_STRING(ctiOpThrowNotCaught) "\n"
-HIDE_SYMBOL(ctiOpThrowNotCaught) "\n"
-SYMBOL_STRING(ctiOpThrowNotCaught) ":" "\n"
- "add sp, sp, #" STRINGIZE_VALUE_OF(PRESERVEDR4_OFFSET) "\n"
- "ldmia sp!, {r4-r8, lr}" "\n"
- "add sp, sp, #12" "\n"
- "mov pc, lr" "\n"
-);
-
-#elif COMPILER(RVCT) && CPU(ARM_TRADITIONAL)
-
-__asm EncodedJSValue ctiTrampoline(void*, RegisterFile*, CallFrame*, void* /*unused1*/, Profiler**, JSGlobalData*)
-{
- ARM
- stmdb sp!, {r1-r3}
- stmdb sp!, {r4-r8, lr}
- sub sp, sp, # PRESERVEDR4_OFFSET
- mov r4, r2
- mov r5, #512
- mov lr, pc
- bx r0
- add sp, sp, # PRESERVEDR4_OFFSET
- ldmia sp!, {r4-r8, lr}
- add sp, sp, #12
- bx lr
-}
-
-__asm void ctiVMThrowTrampoline()
-{
- ARM
- PRESERVE8
- mov r0, sp
- bl cti_vm_throw
- add sp, sp, # PRESERVEDR4_OFFSET
- ldmia sp!, {r4-r8, lr}
- add sp, sp, #12
- bx lr
-}
-
-__asm void ctiOpThrowNotCaught()
-{
- ARM
- add sp, sp, # PRESERVEDR4_OFFSET
- ldmia sp!, {r4-r8, lr}
- add sp, sp, #12
- bx lr
-}
-#endif
-
-#if ENABLE(OPCODE_SAMPLING)
- #define CTI_SAMPLER stackFrame.globalData->interpreter->sampler()
-#else
- #define CTI_SAMPLER 0
-#endif
-
-JITThunks::JITThunks(JSGlobalData* globalData)
-{
- if (!globalData->executableAllocator.isValid())
- return;
-
- JIT::compileCTIMachineTrampolines(globalData, &m_executablePool, &m_trampolineStructure);
- ASSERT(m_executablePool);
-#if CPU(ARM_THUMB2)
- // Unfortunate the arm compiler does not like the use of offsetof on JITStackFrame (since it contains non POD types),
- // and the OBJECT_OFFSETOF macro does not appear constantish enough for it to be happy with its use in COMPILE_ASSERT
- // macros.
- ASSERT(OBJECT_OFFSETOF(struct JITStackFrame, preservedReturnAddress) == PRESERVED_RETURN_ADDRESS_OFFSET);
- ASSERT(OBJECT_OFFSETOF(struct JITStackFrame, preservedR4) == PRESERVED_R4_OFFSET);
- ASSERT(OBJECT_OFFSETOF(struct JITStackFrame, preservedR5) == PRESERVED_R5_OFFSET);
- ASSERT(OBJECT_OFFSETOF(struct JITStackFrame, preservedR6) == PRESERVED_R6_OFFSET);
-
- ASSERT(OBJECT_OFFSETOF(struct JITStackFrame, registerFile) == REGISTER_FILE_OFFSET);
- ASSERT(OBJECT_OFFSETOF(struct JITStackFrame, callFrame) == CALLFRAME_OFFSET);
- // The fifth argument is the first item already on the stack.
- ASSERT(OBJECT_OFFSETOF(struct JITStackFrame, enabledProfilerReference) == ENABLE_PROFILER_REFERENCE_OFFSET);
-
- ASSERT(OBJECT_OFFSETOF(struct JITStackFrame, thunkReturnAddress) == THUNK_RETURN_ADDRESS_OFFSET);
-
-#elif CPU(ARM_TRADITIONAL)
-
- ASSERT(OBJECT_OFFSETOF(struct JITStackFrame, thunkReturnAddress) == THUNK_RETURN_ADDRESS_OFFSET);
- ASSERT(OBJECT_OFFSETOF(struct JITStackFrame, preservedR4) == PRESERVEDR4_OFFSET);
-
-
-#elif CPU(MIPS)
- ASSERT(OBJECT_OFFSETOF(struct JITStackFrame, preservedGP) == PRESERVED_GP_OFFSET);
- ASSERT(OBJECT_OFFSETOF(struct JITStackFrame, preservedS0) == PRESERVED_S0_OFFSET);
- ASSERT(OBJECT_OFFSETOF(struct JITStackFrame, preservedS1) == PRESERVED_S1_OFFSET);
- ASSERT(OBJECT_OFFSETOF(struct JITStackFrame, preservedS2) == PRESERVED_S2_OFFSET);
- ASSERT(OBJECT_OFFSETOF(struct JITStackFrame, preservedReturnAddress) == PRESERVED_RETURN_ADDRESS_OFFSET);
- ASSERT(OBJECT_OFFSETOF(struct JITStackFrame, thunkReturnAddress) == THUNK_RETURN_ADDRESS_OFFSET);
- ASSERT(OBJECT_OFFSETOF(struct JITStackFrame, registerFile) == REGISTER_FILE_OFFSET);
- ASSERT(OBJECT_OFFSETOF(struct JITStackFrame, callFrame) == CALLFRAME_OFFSET);
- ASSERT(OBJECT_OFFSETOF(struct JITStackFrame, exception) == EXCEPTION_OFFSET);
- ASSERT(OBJECT_OFFSETOF(struct JITStackFrame, enabledProfilerReference) == ENABLE_PROFILER_REFERENCE_OFFSET);
- ASSERT(OBJECT_OFFSETOF(struct JITStackFrame, globalData) == GLOBAL_DATA_OFFSET);
-
-#endif
-}
-
-JITThunks::~JITThunks()
-{
-}
-
-#if ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
-
-NEVER_INLINE void JITThunks::tryCachePutByID(CallFrame* callFrame, CodeBlock* codeBlock, ReturnAddressPtr returnAddress, JSValue baseValue, const PutPropertySlot& slot, StructureStubInfo* stubInfo, bool direct)
-{
- // The interpreter checks for recursion here; I do not believe this can occur in CTI.
-
- if (!baseValue.isCell())
- return;
-
- // Uncacheable: give up.
- if (!slot.isCacheable()) {
- ctiPatchCallByReturnAddress(codeBlock, returnAddress, FunctionPtr(direct ? cti_op_put_by_id_direct_generic : cti_op_put_by_id_generic));
- return;
- }
-
- JSCell* baseCell = baseValue.asCell();
- Structure* structure = baseCell->structure();
-
- if (structure->isUncacheableDictionary()) {
- ctiPatchCallByReturnAddress(codeBlock, returnAddress, FunctionPtr(direct ? cti_op_put_by_id_direct_generic : cti_op_put_by_id_generic));
- return;
- }
-
- // If baseCell != base, then baseCell must be a proxy for another object.
- if (baseCell != slot.base()) {
- ctiPatchCallByReturnAddress(codeBlock, returnAddress, FunctionPtr(direct ? cti_op_put_by_id_direct_generic : cti_op_put_by_id_generic));
- return;
- }
-
- // Cache hit: Specialize instruction and ref Structures.
-
- // Structure transition, cache transition info
- if (slot.type() == PutPropertySlot::NewProperty) {
- if (structure->isDictionary()) {
- ctiPatchCallByReturnAddress(codeBlock, returnAddress, FunctionPtr(direct ? cti_op_put_by_id_direct_generic : cti_op_put_by_id_generic));
- return;
- }
-
- // put_by_id_transition checks the prototype chain for setters.
- normalizePrototypeChain(callFrame, baseCell);
-
- StructureChain* prototypeChain = structure->prototypeChain(callFrame);
- stubInfo->initPutByIdTransition(structure->previousID(), structure, prototypeChain);
- JIT::compilePutByIdTransition(callFrame->scopeChain()->globalData, codeBlock, stubInfo, structure->previousID(), structure, slot.cachedOffset(), prototypeChain, returnAddress, direct);
- return;
- }
-
- stubInfo->initPutByIdReplace(structure);
-
- JIT::patchPutByIdReplace(codeBlock, stubInfo, structure, slot.cachedOffset(), returnAddress, direct);
-}
-
-NEVER_INLINE void JITThunks::tryCacheGetByID(CallFrame* callFrame, CodeBlock* codeBlock, ReturnAddressPtr returnAddress, JSValue baseValue, const Identifier& propertyName, const PropertySlot& slot, StructureStubInfo* stubInfo)
-{
- // FIXME: Write a test that proves we need to check for recursion here just
- // like the interpreter does, then add a check for recursion.
-
- // FIXME: Cache property access for immediates.
- if (!baseValue.isCell()) {
- ctiPatchCallByReturnAddress(codeBlock, returnAddress, FunctionPtr(cti_op_get_by_id_generic));
- return;
- }
-
- JSGlobalData* globalData = &callFrame->globalData();
-
- if (isJSArray(globalData, baseValue) && propertyName == callFrame->propertyNames().length) {
- JIT::compilePatchGetArrayLength(callFrame->scopeChain()->globalData, codeBlock, returnAddress);
- return;
- }
-
- if (isJSString(globalData, baseValue) && propertyName == callFrame->propertyNames().length) {
- // The tradeoff of compiling an patched inline string length access routine does not seem
- // to pay off, so we currently only do this for arrays.
- ctiPatchCallByReturnAddress(codeBlock, returnAddress, globalData->jitStubs->ctiStringLengthTrampoline());
- return;
- }
-
- // Uncacheable: give up.
- if (!slot.isCacheable()) {
- ctiPatchCallByReturnAddress(codeBlock, returnAddress, FunctionPtr(cti_op_get_by_id_generic));
- return;
- }
-
- JSCell* baseCell = baseValue.asCell();
- Structure* structure = baseCell->structure();
-
- if (structure->isUncacheableDictionary()) {
- ctiPatchCallByReturnAddress(codeBlock, returnAddress, FunctionPtr(cti_op_get_by_id_generic));
- return;
- }
-
- // Cache hit: Specialize instruction and ref Structures.
-
- if (slot.slotBase() == baseValue) {
- // set this up, so derefStructures can do it's job.
- stubInfo->initGetByIdSelf(structure);
- if (slot.cachedPropertyType() != PropertySlot::Value)
- ctiPatchCallByReturnAddress(codeBlock, returnAddress, FunctionPtr(cti_op_get_by_id_self_fail));
- else
- JIT::patchGetByIdSelf(codeBlock, stubInfo, structure, slot.cachedOffset(), returnAddress);
- return;
- }
-
- if (structure->isDictionary()) {
- ctiPatchCallByReturnAddress(codeBlock, returnAddress, FunctionPtr(cti_op_get_by_id_generic));
- return;
- }
-
- if (slot.slotBase() == structure->prototypeForLookup(callFrame)) {
- ASSERT(slot.slotBase().isObject());
-
- JSObject* slotBaseObject = asObject(slot.slotBase());
- size_t offset = slot.cachedOffset();
-
- // Since we're accessing a prototype in a loop, it's a good bet that it
- // should not be treated as a dictionary.
- if (slotBaseObject->structure()->isDictionary()) {
- slotBaseObject->flattenDictionaryObject();
- offset = slotBaseObject->structure()->get(propertyName);
- }
-
- stubInfo->initGetByIdProto(structure, slotBaseObject->structure());
-
- ASSERT(!structure->isDictionary());
- ASSERT(!slotBaseObject->structure()->isDictionary());
- JIT::compileGetByIdProto(callFrame->scopeChain()->globalData, callFrame, codeBlock, stubInfo, structure, slotBaseObject->structure(), propertyName, slot, offset, returnAddress);
- return;
- }
-
- size_t offset = slot.cachedOffset();
- size_t count = normalizePrototypeChain(callFrame, baseValue, slot.slotBase(), propertyName, offset);
- if (!count) {
- stubInfo->accessType = access_get_by_id_generic;
- return;
- }
-
- StructureChain* prototypeChain = structure->prototypeChain(callFrame);
- stubInfo->initGetByIdChain(structure, prototypeChain);
- JIT::compileGetByIdChain(callFrame->scopeChain()->globalData, callFrame, codeBlock, stubInfo, structure, prototypeChain, count, propertyName, slot, offset, returnAddress);
-}
-
-#endif // ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
-
-#ifndef NDEBUG
-
-extern "C" {
-
-static void jscGeneratedNativeCode()
-{
- // When executing a JIT stub function (which might do an allocation), we hack the return address
- // to pretend to be executing this function, to keep stack logging tools from blowing out
- // memory.
-}
-
-}
-
-struct StackHack {
- ALWAYS_INLINE StackHack(JITStackFrame& stackFrame)
- : stackFrame(stackFrame)
- , savedReturnAddress(*stackFrame.returnAddressSlot())
- {
- *stackFrame.returnAddressSlot() = ReturnAddressPtr(FunctionPtr(jscGeneratedNativeCode));
- }
-
- ALWAYS_INLINE ~StackHack()
- {
- *stackFrame.returnAddressSlot() = savedReturnAddress;
- }
-
- JITStackFrame& stackFrame;
- ReturnAddressPtr savedReturnAddress;
-};
-
-#define STUB_INIT_STACK_FRAME(stackFrame) JITStackFrame& stackFrame = *reinterpret_cast_ptr<JITStackFrame*>(STUB_ARGS); StackHack stackHack(stackFrame)
-#define STUB_SET_RETURN_ADDRESS(returnAddress) stackHack.savedReturnAddress = ReturnAddressPtr(returnAddress)
-#define STUB_RETURN_ADDRESS stackHack.savedReturnAddress
-
-#else
-
-#define STUB_INIT_STACK_FRAME(stackFrame) JITStackFrame& stackFrame = *reinterpret_cast_ptr<JITStackFrame*>(STUB_ARGS)
-#define STUB_SET_RETURN_ADDRESS(returnAddress) *stackFrame.returnAddressSlot() = ReturnAddressPtr(returnAddress)
-#define STUB_RETURN_ADDRESS *stackFrame.returnAddressSlot()
-
-#endif
-
-// The reason this is not inlined is to avoid having to do a PIC branch
-// to get the address of the ctiVMThrowTrampoline function. It's also
-// good to keep the code size down by leaving as much of the exception
-// handling code out of line as possible.
-static NEVER_INLINE void returnToThrowTrampoline(JSGlobalData* globalData, ReturnAddressPtr exceptionLocation, ReturnAddressPtr& returnAddressSlot)
-{
- ASSERT(globalData->exception);
- globalData->exceptionLocation = exceptionLocation;
- returnAddressSlot = ReturnAddressPtr(FunctionPtr(ctiVMThrowTrampoline));
-}
-
-static NEVER_INLINE void throwStackOverflowError(CallFrame* callFrame, JSGlobalData* globalData, ReturnAddressPtr exceptionLocation, ReturnAddressPtr& returnAddressSlot)
-{
- globalData->exception = createStackOverflowError(callFrame);
- returnToThrowTrampoline(globalData, exceptionLocation, returnAddressSlot);
-}
-
-#define VM_THROW_EXCEPTION() \
- do { \
- VM_THROW_EXCEPTION_AT_END(); \
- return 0; \
- } while (0)
-#define VM_THROW_EXCEPTION_AT_END() \
- do {\
- returnToThrowTrampoline(stackFrame.globalData, STUB_RETURN_ADDRESS, STUB_RETURN_ADDRESS);\
- } while (0)
-
-#define CHECK_FOR_EXCEPTION() \
- do { \
- if (UNLIKELY(stackFrame.globalData->exception)) \
- VM_THROW_EXCEPTION(); \
- } while (0)
-#define CHECK_FOR_EXCEPTION_AT_END() \
- do { \
- if (UNLIKELY(stackFrame.globalData->exception)) \
- VM_THROW_EXCEPTION_AT_END(); \
- } while (0)
-#define CHECK_FOR_EXCEPTION_VOID() \
- do { \
- if (UNLIKELY(stackFrame.globalData->exception)) { \
- VM_THROW_EXCEPTION_AT_END(); \
- return; \
- } \
- } while (0)
-
-struct ExceptionHandler {
- void* catchRoutine;
- CallFrame* callFrame;
-};
-static ExceptionHandler jitThrow(JSGlobalData* globalData, CallFrame* callFrame, JSValue exceptionValue, ReturnAddressPtr faultLocation)
-{
- ASSERT(exceptionValue);
-
- unsigned vPCIndex = callFrame->codeBlock()->bytecodeOffset(faultLocation);
- globalData->exception = JSValue();
- HandlerInfo* handler = globalData->interpreter->throwException(callFrame, exceptionValue, vPCIndex); // This may update callFrame & exceptionValue!
- globalData->exception = exceptionValue;
-
- void* catchRoutine = handler ? handler->nativeCode.executableAddress() : FunctionPtr(ctiOpThrowNotCaught).value();
- ASSERT(catchRoutine);
- ExceptionHandler exceptionHandler = { catchRoutine, callFrame };
- return exceptionHandler;
-}
-
-#if CPU(ARM_THUMB2)
-
-#define DEFINE_STUB_FUNCTION(rtype, op) \
- extern "C" { \
- rtype JITStubThunked_##op(STUB_ARGS_DECLARATION); \
- }; \
- asm ( \
- ".text" "\n" \
- ".align 2" "\n" \
- ".globl " SYMBOL_STRING(cti_##op) "\n" \
- HIDE_SYMBOL(cti_##op) "\n" \
- ".thumb" "\n" \
- ".thumb_func " THUMB_FUNC_PARAM(cti_##op) "\n" \
- SYMBOL_STRING(cti_##op) ":" "\n" \
- "str lr, [sp, #" STRINGIZE_VALUE_OF(THUNK_RETURN_ADDRESS_OFFSET) "]" "\n" \
- "bl " SYMBOL_STRING(JITStubThunked_##op) "\n" \
- "ldr lr, [sp, #" STRINGIZE_VALUE_OF(THUNK_RETURN_ADDRESS_OFFSET) "]" "\n" \
- "bx lr" "\n" \
- ); \
- rtype JITStubThunked_##op(STUB_ARGS_DECLARATION) \
-
-#elif CPU(MIPS)
-#if WTF_MIPS_PIC
-#define DEFINE_STUB_FUNCTION(rtype, op) \
- extern "C" { \
- rtype JITStubThunked_##op(STUB_ARGS_DECLARATION); \
- }; \
- asm ( \
- ".text" "\n" \
- ".align 2" "\n" \
- ".set noreorder" "\n" \
- ".set nomacro" "\n" \
- ".set nomips16" "\n" \
- ".globl " SYMBOL_STRING(cti_##op) "\n" \
- ".ent " SYMBOL_STRING(cti_##op) "\n" \
- SYMBOL_STRING(cti_##op) ":" "\n" \
- "lw $28," STRINGIZE_VALUE_OF(PRESERVED_GP_OFFSET) "($29)" "\n" \
- "sw $31," STRINGIZE_VALUE_OF(THUNK_RETURN_ADDRESS_OFFSET) "($29)" "\n" \
- ".set macro" "\n" \
- "la $25," SYMBOL_STRING(JITStubThunked_##op) "\n" \
- ".set nomacro" "\n" \
- "bal " SYMBOL_STRING(JITStubThunked_##op) "\n" \
- "nop" "\n" \
- "lw $31," STRINGIZE_VALUE_OF(THUNK_RETURN_ADDRESS_OFFSET) "($29)" "\n" \
- "jr $31" "\n" \
- "nop" "\n" \
- ".set reorder" "\n" \
- ".set macro" "\n" \
- ".end " SYMBOL_STRING(cti_##op) "\n" \
- ); \
- rtype JITStubThunked_##op(STUB_ARGS_DECLARATION)
-
-#else // WTF_MIPS_PIC
-#define DEFINE_STUB_FUNCTION(rtype, op) \
- extern "C" { \
- rtype JITStubThunked_##op(STUB_ARGS_DECLARATION); \
- }; \
- asm ( \
- ".text" "\n" \
- ".align 2" "\n" \
- ".set noreorder" "\n" \
- ".set nomacro" "\n" \
- ".set nomips16" "\n" \
- ".globl " SYMBOL_STRING(cti_##op) "\n" \
- ".ent " SYMBOL_STRING(cti_##op) "\n" \
- SYMBOL_STRING(cti_##op) ":" "\n" \
- "sw $31," STRINGIZE_VALUE_OF(THUNK_RETURN_ADDRESS_OFFSET) "($29)" "\n" \
- "jal " SYMBOL_STRING(JITStubThunked_##op) "\n" \
- "nop" "\n" \
- "lw $31," STRINGIZE_VALUE_OF(THUNK_RETURN_ADDRESS_OFFSET) "($29)" "\n" \
- "jr $31" "\n" \
- "nop" "\n" \
- ".set reorder" "\n" \
- ".set macro" "\n" \
- ".end " SYMBOL_STRING(cti_##op) "\n" \
- ); \
- rtype JITStubThunked_##op(STUB_ARGS_DECLARATION)
-
-#endif
-
-#elif CPU(ARM_TRADITIONAL) && COMPILER(GCC)
-
-#define DEFINE_STUB_FUNCTION(rtype, op) \
- extern "C" { \
- rtype JITStubThunked_##op(STUB_ARGS_DECLARATION); \
- }; \
- asm ( \
- ".globl " SYMBOL_STRING(cti_##op) "\n" \
- SYMBOL_STRING(cti_##op) ":" "\n" \
- "str lr, [sp, #" STRINGIZE_VALUE_OF(THUNK_RETURN_ADDRESS_OFFSET) "]" "\n" \
- "bl " SYMBOL_STRING(JITStubThunked_##op) "\n" \
- "ldr lr, [sp, #" STRINGIZE_VALUE_OF(THUNK_RETURN_ADDRESS_OFFSET) "]" "\n" \
- "mov pc, lr" "\n" \
- ); \
- rtype JITStubThunked_##op(STUB_ARGS_DECLARATION)
-
-#elif CPU(ARM_TRADITIONAL) && COMPILER(RVCT)
-
-#define DEFINE_STUB_FUNCTION(rtype, op) rtype JITStubThunked_##op(STUB_ARGS_DECLARATION)
-
-/* The following is a workaround for RVCT toolchain; precompiler macros are not expanded before the code is passed to the assembler */
-
-/* The following section is a template to generate code for GeneratedJITStubs_RVCT.h */
-/* The pattern "#xxx#" will be replaced with "xxx" */
-
-/*
-RVCT(extern "C" #rtype# JITStubThunked_#op#(STUB_ARGS_DECLARATION);)
-RVCT(__asm #rtype# cti_#op#(STUB_ARGS_DECLARATION))
-RVCT({)
-RVCT( ARM)
-RVCT( IMPORT JITStubThunked_#op#)
-RVCT( str lr, [sp, # THUNK_RETURN_ADDRESS_OFFSET])
-RVCT( bl JITStubThunked_#op#)
-RVCT( ldr lr, [sp, # THUNK_RETURN_ADDRESS_OFFSET])
-RVCT( bx lr)
-RVCT(})
-RVCT()
-*/
-
-/* Include the generated file */
-#include "GeneratedJITStubs_RVCT.h"
-
-#elif CPU(ARM_TRADITIONAL) && COMPILER(MSVC)
-
-#define DEFINE_STUB_FUNCTION(rtype, op) extern "C" rtype JITStubThunked_##op(STUB_ARGS_DECLARATION)
-
-/* The following is a workaround for MSVC toolchain; inline assembler is not supported */
-
-/* The following section is a template to generate code for GeneratedJITStubs_MSVC.asm */
-/* The pattern "#xxx#" will be replaced with "xxx" */
-
-/*
-MSVC_BEGIN( AREA Trampoline, CODE)
-MSVC_BEGIN()
-MSVC_BEGIN( EXPORT ctiTrampoline)
-MSVC_BEGIN( EXPORT ctiVMThrowTrampoline)
-MSVC_BEGIN( EXPORT ctiOpThrowNotCaught)
-MSVC_BEGIN()
-MSVC_BEGIN(ctiTrampoline PROC)
-MSVC_BEGIN( stmdb sp!, {r1-r3})
-MSVC_BEGIN( stmdb sp!, {r4-r8, lr})
-MSVC_BEGIN( sub sp, sp, # THUNK_RETURN_ADDRESS_OFFSET + 4)
-MSVC_BEGIN( mov r4, r2)
-MSVC_BEGIN( mov r5, #512)
-MSVC_BEGIN( ; r0 contains the code)
-MSVC_BEGIN( mov lr, pc)
-MSVC_BEGIN( bx r0)
-MSVC_BEGIN( add sp, sp, # THUNK_RETURN_ADDRESS_OFFSET + 4)
-MSVC_BEGIN( ldmia sp!, {r4-r8, lr})
-MSVC_BEGIN( add sp, sp, #12)
-MSVC_BEGIN( bx lr)
-MSVC_BEGIN(ctiTrampoline ENDP)
-MSVC_BEGIN()
-MSVC_BEGIN(ctiVMThrowTrampoline PROC)
-MSVC_BEGIN( mov r0, sp)
-MSVC_BEGIN( mov lr, pc)
-MSVC_BEGIN( bl cti_vm_throw)
-MSVC_BEGIN(ctiOpThrowNotCaught)
-MSVC_BEGIN( add sp, sp, # THUNK_RETURN_ADDRESS_OFFSET + 4)
-MSVC_BEGIN( ldmia sp!, {r4-r8, lr})
-MSVC_BEGIN( add sp, sp, #12)
-MSVC_BEGIN( bx lr)
-MSVC_BEGIN(ctiVMThrowTrampoline ENDP)
-MSVC_BEGIN()
-
-MSVC( EXPORT cti_#op#)
-MSVC( IMPORT JITStubThunked_#op#)
-MSVC(cti_#op# PROC)
-MSVC( str lr, [sp, # THUNK_RETURN_ADDRESS_OFFSET])
-MSVC( bl JITStubThunked_#op#)
-MSVC( ldr lr, [sp, # THUNK_RETURN_ADDRESS_OFFSET])
-MSVC( bx lr)
-MSVC(cti_#op# ENDP)
-MSVC()
-
-MSVC_END( END)
-*/
-
-#else
-#define DEFINE_STUB_FUNCTION(rtype, op) rtype JIT_STUB cti_##op(STUB_ARGS_DECLARATION)
-#endif
-
-DEFINE_STUB_FUNCTION(EncodedJSValue, op_create_this)
-{
- STUB_INIT_STACK_FRAME(stackFrame);
- CallFrame* callFrame = stackFrame.callFrame;
-
- JSFunction* constructor = asFunction(callFrame->callee());
-#if !ASSERT_DISABLED
- ConstructData constructData;
- ASSERT(constructor->getConstructData(constructData) == ConstructTypeJS);
-#endif
-
- Structure* structure;
- JSValue proto = stackFrame.args[0].jsValue();
- if (proto.isObject())
- structure = asObject(proto)->inheritorID();
- else
- structure = constructor->scope().node()->globalObject->emptyObjectStructure();
- JSValue result = new (&callFrame->globalData()) JSObject(structure);
-
- return JSValue::encode(result);
-}
-
-DEFINE_STUB_FUNCTION(EncodedJSValue, op_convert_this)
-{
- STUB_INIT_STACK_FRAME(stackFrame);
-
- JSValue v1 = stackFrame.args[0].jsValue();
- CallFrame* callFrame = stackFrame.callFrame;
-
- JSObject* result = v1.toThisObject(callFrame);
- CHECK_FOR_EXCEPTION_AT_END();
- return JSValue::encode(result);
-}
-
-DEFINE_STUB_FUNCTION(EncodedJSValue, op_convert_this_strict)
-{
- STUB_INIT_STACK_FRAME(stackFrame);
-
- JSValue v1 = stackFrame.args[0].jsValue();
- CallFrame* callFrame = stackFrame.callFrame;
- ASSERT(v1.asCell()->structure()->typeInfo().needsThisConversion());
- JSValue result = v1.toStrictThisObject(callFrame);
- CHECK_FOR_EXCEPTION_AT_END();
- return JSValue::encode(result);
-}
-
-DEFINE_STUB_FUNCTION(void, op_end)
-{
- STUB_INIT_STACK_FRAME(stackFrame);
-
- ScopeChainNode* scopeChain = stackFrame.callFrame->scopeChain();
- ASSERT(scopeChain->refCount > 1);
- scopeChain->deref();
-}
-
-DEFINE_STUB_FUNCTION(EncodedJSValue, op_add)
-{
- STUB_INIT_STACK_FRAME(stackFrame);
-
- JSValue v1 = stackFrame.args[0].jsValue();
- JSValue v2 = stackFrame.args[1].jsValue();
- CallFrame* callFrame = stackFrame.callFrame;
-
- if (v1.isString()) {
- JSValue result = v2.isString()
- ? jsString(callFrame, asString(v1), asString(v2))
- : jsString(callFrame, asString(v1), v2.toPrimitiveString(callFrame));
- CHECK_FOR_EXCEPTION_AT_END();
- return JSValue::encode(result);
- }
-
- double left = 0.0, right;
- if (v1.getNumber(left) && v2.getNumber(right))
- return JSValue::encode(jsNumber(left + right));
-
- // All other cases are pretty uncommon
- JSValue result = jsAddSlowCase(callFrame, v1, v2);
- CHECK_FOR_EXCEPTION_AT_END();
- return JSValue::encode(result);
-}
-
-DEFINE_STUB_FUNCTION(EncodedJSValue, op_pre_inc)
-{
- STUB_INIT_STACK_FRAME(stackFrame);
-
- JSValue v = stackFrame.args[0].jsValue();
-
- CallFrame* callFrame = stackFrame.callFrame;
- JSValue result = jsNumber(v.toNumber(callFrame) + 1);
- CHECK_FOR_EXCEPTION_AT_END();
- return JSValue::encode(result);
-}
-
-DEFINE_STUB_FUNCTION(int, timeout_check)
-{
- STUB_INIT_STACK_FRAME(stackFrame);
-
- JSGlobalData* globalData = stackFrame.globalData;
- TimeoutChecker& timeoutChecker = globalData->timeoutChecker;
-
- if (globalData->terminator.shouldTerminate()) {
- globalData->exception = createTerminatedExecutionException(globalData);
- VM_THROW_EXCEPTION_AT_END();
- } else if (timeoutChecker.didTimeOut(stackFrame.callFrame)) {
- globalData->exception = createInterruptedExecutionException(globalData);
- VM_THROW_EXCEPTION_AT_END();
- }
-
- return timeoutChecker.ticksUntilNextCheck();
-}
-
-DEFINE_STUB_FUNCTION(void*, register_file_check)
-{
- STUB_INIT_STACK_FRAME(stackFrame);
- CallFrame* callFrame = stackFrame.callFrame;
-
- if (UNLIKELY(!stackFrame.registerFile->grow(&callFrame->registers()[callFrame->codeBlock()->m_numCalleeRegisters]))) {
- // Rewind to the previous call frame because op_call already optimistically
- // moved the call frame forward.
- CallFrame* oldCallFrame = callFrame->callerFrame();
- ExceptionHandler handler = jitThrow(stackFrame.globalData, oldCallFrame, createStackOverflowError(oldCallFrame), ReturnAddressPtr(oldCallFrame->returnPC()));
- STUB_SET_RETURN_ADDRESS(handler.catchRoutine);
- callFrame = handler.callFrame;
- }
-
- return callFrame;
-}
-
-DEFINE_STUB_FUNCTION(int, op_loop_if_lesseq)
-{
- STUB_INIT_STACK_FRAME(stackFrame);
-
- JSValue src1 = stackFrame.args[0].jsValue();
- JSValue src2 = stackFrame.args[1].jsValue();
- CallFrame* callFrame = stackFrame.callFrame;
-
- bool result = jsLessEq(callFrame, src1, src2);
- CHECK_FOR_EXCEPTION_AT_END();
- return result;
-}
-
-DEFINE_STUB_FUNCTION(JSObject*, op_new_object)
-{
- STUB_INIT_STACK_FRAME(stackFrame);
-
- return constructEmptyObject(stackFrame.callFrame);
-}
-
-DEFINE_STUB_FUNCTION(void, op_put_by_id_generic)
-{
- STUB_INIT_STACK_FRAME(stackFrame);
-
- PutPropertySlot slot(stackFrame.callFrame->codeBlock()->isStrictMode());
- stackFrame.args[0].jsValue().put(stackFrame.callFrame, stackFrame.args[1].identifier(), stackFrame.args[2].jsValue(), slot);
- CHECK_FOR_EXCEPTION_AT_END();
-}
-
-DEFINE_STUB_FUNCTION(void, op_put_by_id_direct_generic)
-{
- STUB_INIT_STACK_FRAME(stackFrame);
-
- PutPropertySlot slot(stackFrame.callFrame->codeBlock()->isStrictMode());
- stackFrame.args[0].jsValue().putDirect(stackFrame.callFrame, stackFrame.args[1].identifier(), stackFrame.args[2].jsValue(), slot);
- CHECK_FOR_EXCEPTION_AT_END();
-}
-
-DEFINE_STUB_FUNCTION(EncodedJSValue, op_get_by_id_generic)
-{
- STUB_INIT_STACK_FRAME(stackFrame);
-
- CallFrame* callFrame = stackFrame.callFrame;
- Identifier& ident = stackFrame.args[1].identifier();
-
- JSValue baseValue = stackFrame.args[0].jsValue();
- PropertySlot slot(baseValue);
- JSValue result = baseValue.get(callFrame, ident, slot);
-
- CHECK_FOR_EXCEPTION_AT_END();
- return JSValue::encode(result);
-}
-
-#if ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
-
-DEFINE_STUB_FUNCTION(void, op_put_by_id)
-{
- STUB_INIT_STACK_FRAME(stackFrame);
- CallFrame* callFrame = stackFrame.callFrame;
- Identifier& ident = stackFrame.args[1].identifier();
-
- PutPropertySlot slot(callFrame->codeBlock()->isStrictMode());
- stackFrame.args[0].jsValue().put(callFrame, ident, stackFrame.args[2].jsValue(), slot);
-
- CodeBlock* codeBlock = stackFrame.callFrame->codeBlock();
- StructureStubInfo* stubInfo = &codeBlock->getStubInfo(STUB_RETURN_ADDRESS);
- if (!stubInfo->seenOnce())
- stubInfo->setSeen();
- else
- JITThunks::tryCachePutByID(callFrame, codeBlock, STUB_RETURN_ADDRESS, stackFrame.args[0].jsValue(), slot, stubInfo, false);
-
- CHECK_FOR_EXCEPTION_AT_END();
-}
-
-DEFINE_STUB_FUNCTION(void, op_put_by_id_direct)
-{
- STUB_INIT_STACK_FRAME(stackFrame);
- CallFrame* callFrame = stackFrame.callFrame;
- Identifier& ident = stackFrame.args[1].identifier();
-
- PutPropertySlot slot(callFrame->codeBlock()->isStrictMode());
- stackFrame.args[0].jsValue().putDirect(callFrame, ident, stackFrame.args[2].jsValue(), slot);
-
- CodeBlock* codeBlock = stackFrame.callFrame->codeBlock();
- StructureStubInfo* stubInfo = &codeBlock->getStubInfo(STUB_RETURN_ADDRESS);
- if (!stubInfo->seenOnce())
- stubInfo->setSeen();
- else
- JITThunks::tryCachePutByID(callFrame, codeBlock, STUB_RETURN_ADDRESS, stackFrame.args[0].jsValue(), slot, stubInfo, true);
-
- CHECK_FOR_EXCEPTION_AT_END();
-}
-
-DEFINE_STUB_FUNCTION(void, op_put_by_id_fail)
-{
- STUB_INIT_STACK_FRAME(stackFrame);
-
- CallFrame* callFrame = stackFrame.callFrame;
- Identifier& ident = stackFrame.args[1].identifier();
-
- PutPropertySlot slot(callFrame->codeBlock()->isStrictMode());
- stackFrame.args[0].jsValue().put(callFrame, ident, stackFrame.args[2].jsValue(), slot);
-
- CHECK_FOR_EXCEPTION_AT_END();
-}
-
-DEFINE_STUB_FUNCTION(void, op_put_by_id_direct_fail)
-{
- STUB_INIT_STACK_FRAME(stackFrame);
-
- CallFrame* callFrame = stackFrame.callFrame;
- Identifier& ident = stackFrame.args[1].identifier();
-
- PutPropertySlot slot(callFrame->codeBlock()->isStrictMode());
- stackFrame.args[0].jsValue().putDirect(callFrame, ident, stackFrame.args[2].jsValue(), slot);
-
- CHECK_FOR_EXCEPTION_AT_END();
-}
-
-DEFINE_STUB_FUNCTION(JSObject*, op_put_by_id_transition_realloc)
-{
- STUB_INIT_STACK_FRAME(stackFrame);
-
- JSValue baseValue = stackFrame.args[0].jsValue();
- int32_t oldSize = stackFrame.args[3].int32();
- int32_t newSize = stackFrame.args[4].int32();
-
- ASSERT(baseValue.isObject());
- JSObject* base = asObject(baseValue);
- base->allocatePropertyStorage(oldSize, newSize);
-
- return base;
-}
-
-DEFINE_STUB_FUNCTION(EncodedJSValue, op_get_by_id_method_check)
-{
- STUB_INIT_STACK_FRAME(stackFrame);
-
- CallFrame* callFrame = stackFrame.callFrame;
- Identifier& ident = stackFrame.args[1].identifier();
-
- JSValue baseValue = stackFrame.args[0].jsValue();
- PropertySlot slot(baseValue);
- JSValue result = baseValue.get(callFrame, ident, slot);
- CHECK_FOR_EXCEPTION();
-
- CodeBlock* codeBlock = stackFrame.callFrame->codeBlock();
- MethodCallLinkInfo& methodCallLinkInfo = codeBlock->getMethodCallLinkInfo(STUB_RETURN_ADDRESS);
-
- if (!methodCallLinkInfo.seenOnce()) {
- methodCallLinkInfo.setSeen();
- return JSValue::encode(result);
- }
-
- // If we successfully got something, then the base from which it is being accessed must
- // be an object. (Assertion to ensure asObject() call below is safe, which comes after
- // an isCacheable() chceck.
- ASSERT(!slot.isCacheableValue() || slot.slotBase().isObject());
-
- // Check that:
- // * We're dealing with a JSCell,
- // * the property is cachable,
- // * it's not a dictionary
- // * there is a function cached.
- Structure* structure;
- JSCell* specific;
- JSObject* slotBaseObject;
- if (baseValue.isCell()
- && slot.isCacheableValue()
- && !(structure = baseValue.asCell()->structure())->isUncacheableDictionary()
- && (slotBaseObject = asObject(slot.slotBase()))->getPropertySpecificValue(callFrame, ident, specific)
- && specific
- ) {
-
- JSFunction* callee = (JSFunction*)specific;
-
- // Since we're accessing a prototype in a loop, it's a good bet that it
- // should not be treated as a dictionary.
- if (slotBaseObject->structure()->isDictionary())
- slotBaseObject->flattenDictionaryObject();
-
- // The result fetched should always be the callee!
- ASSERT(result == JSValue(callee));
-
- // Check to see if the function is on the object's prototype. Patch up the code to optimize.
- if (slot.slotBase() == structure->prototypeForLookup(callFrame)) {
- JIT::patchMethodCallProto(codeBlock, methodCallLinkInfo, callee, structure, slotBaseObject, STUB_RETURN_ADDRESS);
- return JSValue::encode(result);
- }
-
- // Check to see if the function is on the object itself.
- // Since we generate the method-check to check both the structure and a prototype-structure (since this
- // is the common case) we have a problem - we need to patch the prototype structure check to do something
- // useful. We could try to nop it out altogether, but that's a little messy, so lets do something simpler
- // for now. For now it performs a check on a special object on the global object only used for this
- // purpose. The object is in no way exposed, and as such the check will always pass.
- if (slot.slotBase() == baseValue) {
- JIT::patchMethodCallProto(codeBlock, methodCallLinkInfo, callee, structure, callFrame->scopeChain()->globalObject->methodCallDummy(), STUB_RETURN_ADDRESS);
- return JSValue::encode(result);
- }
- }
-
- // Revert the get_by_id op back to being a regular get_by_id - allow it to cache like normal, if it needs to.
- ctiPatchCallByReturnAddress(codeBlock, STUB_RETURN_ADDRESS, FunctionPtr(cti_op_get_by_id));
- return JSValue::encode(result);
-}
-
-DEFINE_STUB_FUNCTION(EncodedJSValue, op_get_by_id)
-{
- STUB_INIT_STACK_FRAME(stackFrame);
- CallFrame* callFrame = stackFrame.callFrame;
- Identifier& ident = stackFrame.args[1].identifier();
-
- JSValue baseValue = stackFrame.args[0].jsValue();
- PropertySlot slot(baseValue);
- JSValue result = baseValue.get(callFrame, ident, slot);
-
- CodeBlock* codeBlock = stackFrame.callFrame->codeBlock();
- StructureStubInfo* stubInfo = &codeBlock->getStubInfo(STUB_RETURN_ADDRESS);
- if (!stubInfo->seenOnce())
- stubInfo->setSeen();
- else
- JITThunks::tryCacheGetByID(callFrame, codeBlock, STUB_RETURN_ADDRESS, baseValue, ident, slot, stubInfo);
-
- CHECK_FOR_EXCEPTION_AT_END();
- return JSValue::encode(result);
-}
-
-DEFINE_STUB_FUNCTION(EncodedJSValue, op_get_by_id_self_fail)
-{
- STUB_INIT_STACK_FRAME(stackFrame);
-
- CallFrame* callFrame = stackFrame.callFrame;
- Identifier& ident = stackFrame.args[1].identifier();
-
- JSValue baseValue = stackFrame.args[0].jsValue();
- PropertySlot slot(baseValue);
- JSValue result = baseValue.get(callFrame, ident, slot);
-
- CHECK_FOR_EXCEPTION();
-
- if (baseValue.isCell()
- && slot.isCacheable()
- && !baseValue.asCell()->structure()->isUncacheableDictionary()
- && slot.slotBase() == baseValue) {
-
- CodeBlock* codeBlock = callFrame->codeBlock();
- StructureStubInfo* stubInfo = &codeBlock->getStubInfo(STUB_RETURN_ADDRESS);
-
- ASSERT(slot.slotBase().isObject());
-
- PolymorphicAccessStructureList* polymorphicStructureList;
- int listIndex = 1;
-
- if (stubInfo->accessType == access_get_by_id_self) {
- ASSERT(!stubInfo->stubRoutine);
- polymorphicStructureList = new PolymorphicAccessStructureList(CodeLocationLabel(), stubInfo->u.getByIdSelf.baseObjectStructure);
- stubInfo->initGetByIdSelfList(polymorphicStructureList, 1);
- } else {
- polymorphicStructureList = stubInfo->u.getByIdSelfList.structureList;
- listIndex = stubInfo->u.getByIdSelfList.listSize;
- }
- if (listIndex < POLYMORPHIC_LIST_CACHE_SIZE) {
- stubInfo->u.getByIdSelfList.listSize++;
- JIT::compileGetByIdSelfList(callFrame->scopeChain()->globalData, codeBlock, stubInfo, polymorphicStructureList, listIndex, baseValue.asCell()->structure(), ident, slot, slot.cachedOffset());
-
- if (listIndex == (POLYMORPHIC_LIST_CACHE_SIZE - 1))
- ctiPatchCallByReturnAddress(codeBlock, STUB_RETURN_ADDRESS, FunctionPtr(cti_op_get_by_id_generic));
- }
- } else
- ctiPatchCallByReturnAddress(callFrame->codeBlock(), STUB_RETURN_ADDRESS, FunctionPtr(cti_op_get_by_id_generic));
- return JSValue::encode(result);
-}
-
-static PolymorphicAccessStructureList* getPolymorphicAccessStructureListSlot(StructureStubInfo* stubInfo, int& listIndex)
-{
- PolymorphicAccessStructureList* prototypeStructureList = 0;
- listIndex = 1;
-
- switch (stubInfo->accessType) {
- case access_get_by_id_proto:
- prototypeStructureList = new PolymorphicAccessStructureList(stubInfo->stubRoutine, stubInfo->u.getByIdProto.baseObjectStructure, stubInfo->u.getByIdProto.prototypeStructure);
- stubInfo->stubRoutine = CodeLocationLabel();
- stubInfo->initGetByIdProtoList(prototypeStructureList, 2);
- break;
- case access_get_by_id_chain:
- prototypeStructureList = new PolymorphicAccessStructureList(stubInfo->stubRoutine, stubInfo->u.getByIdChain.baseObjectStructure, stubInfo->u.getByIdChain.chain);
- stubInfo->stubRoutine = CodeLocationLabel();
- stubInfo->initGetByIdProtoList(prototypeStructureList, 2);
- break;
- case access_get_by_id_proto_list:
- prototypeStructureList = stubInfo->u.getByIdProtoList.structureList;
- listIndex = stubInfo->u.getByIdProtoList.listSize;
- if (listIndex < POLYMORPHIC_LIST_CACHE_SIZE)
- stubInfo->u.getByIdProtoList.listSize++;
- break;
- default:
- ASSERT_NOT_REACHED();
- }
-
- ASSERT(listIndex <= POLYMORPHIC_LIST_CACHE_SIZE);
- return prototypeStructureList;
-}
-
-DEFINE_STUB_FUNCTION(EncodedJSValue, op_get_by_id_getter_stub)
-{
- STUB_INIT_STACK_FRAME(stackFrame);
- CallFrame* callFrame = stackFrame.callFrame;
- GetterSetter* getterSetter = asGetterSetter(stackFrame.args[0].jsObject());
- if (!getterSetter->getter())
- return JSValue::encode(jsUndefined());
- JSObject* getter = asObject(getterSetter->getter());
- CallData callData;
- CallType callType = getter->getCallData(callData);
- JSValue result = call(callFrame, getter, callType, callData, stackFrame.args[1].jsObject(), ArgList());
- if (callFrame->hadException())
- returnToThrowTrampoline(&callFrame->globalData(), stackFrame.args[2].returnAddress(), STUB_RETURN_ADDRESS);
-
- return JSValue::encode(result);
-}
-
-DEFINE_STUB_FUNCTION(EncodedJSValue, op_get_by_id_custom_stub)
-{
- STUB_INIT_STACK_FRAME(stackFrame);
- CallFrame* callFrame = stackFrame.callFrame;
- JSObject* slotBase = stackFrame.args[0].jsObject();
- PropertySlot::GetValueFunc getter = reinterpret_cast<PropertySlot::GetValueFunc>(stackFrame.args[1].asPointer);
- const Identifier& ident = stackFrame.args[2].identifier();
- JSValue result = getter(callFrame, slotBase, ident);
- if (callFrame->hadException())
- returnToThrowTrampoline(&callFrame->globalData(), stackFrame.args[3].returnAddress(), STUB_RETURN_ADDRESS);
-
- return JSValue::encode(result);
-}
-
-DEFINE_STUB_FUNCTION(EncodedJSValue, op_get_by_id_proto_list)
-{
- STUB_INIT_STACK_FRAME(stackFrame);
-
- CallFrame* callFrame = stackFrame.callFrame;
- const Identifier& propertyName = stackFrame.args[1].identifier();
-
- JSValue baseValue = stackFrame.args[0].jsValue();
- PropertySlot slot(baseValue);
- JSValue result = baseValue.get(callFrame, propertyName, slot);
-
- CHECK_FOR_EXCEPTION();
-
- if (!baseValue.isCell() || !slot.isCacheable() || baseValue.asCell()->structure()->isDictionary()) {
- ctiPatchCallByReturnAddress(callFrame->codeBlock(), STUB_RETURN_ADDRESS, FunctionPtr(cti_op_get_by_id_proto_fail));
- return JSValue::encode(result);
- }
-
- Structure* structure = baseValue.asCell()->structure();
- CodeBlock* codeBlock = callFrame->codeBlock();
- StructureStubInfo* stubInfo = &codeBlock->getStubInfo(STUB_RETURN_ADDRESS);
-
- ASSERT(slot.slotBase().isObject());
- JSObject* slotBaseObject = asObject(slot.slotBase());
-
- size_t offset = slot.cachedOffset();
-
- if (slot.slotBase() == baseValue)
- ctiPatchCallByReturnAddress(codeBlock, STUB_RETURN_ADDRESS, FunctionPtr(cti_op_get_by_id_proto_fail));
- else if (slot.slotBase() == baseValue.asCell()->structure()->prototypeForLookup(callFrame)) {
- ASSERT(!baseValue.asCell()->structure()->isDictionary());
- // Since we're accessing a prototype in a loop, it's a good bet that it
- // should not be treated as a dictionary.
- if (slotBaseObject->structure()->isDictionary()) {
- slotBaseObject->flattenDictionaryObject();
- offset = slotBaseObject->structure()->get(propertyName);
- }
-
- int listIndex;
- PolymorphicAccessStructureList* prototypeStructureList = getPolymorphicAccessStructureListSlot(stubInfo, listIndex);
- if (listIndex < POLYMORPHIC_LIST_CACHE_SIZE) {
- JIT::compileGetByIdProtoList(callFrame->scopeChain()->globalData, callFrame, codeBlock, stubInfo, prototypeStructureList, listIndex, structure, slotBaseObject->structure(), propertyName, slot, offset);
-
- if (listIndex == (POLYMORPHIC_LIST_CACHE_SIZE - 1))
- ctiPatchCallByReturnAddress(codeBlock, STUB_RETURN_ADDRESS, FunctionPtr(cti_op_get_by_id_proto_list_full));
- }
- } else if (size_t count = normalizePrototypeChain(callFrame, baseValue, slot.slotBase(), propertyName, offset)) {
- ASSERT(!baseValue.asCell()->structure()->isDictionary());
- int listIndex;
- PolymorphicAccessStructureList* prototypeStructureList = getPolymorphicAccessStructureListSlot(stubInfo, listIndex);
-
- if (listIndex < POLYMORPHIC_LIST_CACHE_SIZE) {
- StructureChain* protoChain = structure->prototypeChain(callFrame);
- JIT::compileGetByIdChainList(callFrame->scopeChain()->globalData, callFrame, codeBlock, stubInfo, prototypeStructureList, listIndex, structure, protoChain, count, propertyName, slot, offset);
-
- if (listIndex == (POLYMORPHIC_LIST_CACHE_SIZE - 1))
- ctiPatchCallByReturnAddress(codeBlock, STUB_RETURN_ADDRESS, FunctionPtr(cti_op_get_by_id_proto_list_full));
- }
- } else
- ctiPatchCallByReturnAddress(codeBlock, STUB_RETURN_ADDRESS, FunctionPtr(cti_op_get_by_id_proto_fail));
-
- return JSValue::encode(result);
-}
-
-DEFINE_STUB_FUNCTION(EncodedJSValue, op_get_by_id_proto_list_full)
-{
- STUB_INIT_STACK_FRAME(stackFrame);
-
- JSValue baseValue = stackFrame.args[0].jsValue();
- PropertySlot slot(baseValue);
- JSValue result = baseValue.get(stackFrame.callFrame, stackFrame.args[1].identifier(), slot);
-
- CHECK_FOR_EXCEPTION_AT_END();
- return JSValue::encode(result);
-}
-
-DEFINE_STUB_FUNCTION(EncodedJSValue, op_get_by_id_proto_fail)
-{
- STUB_INIT_STACK_FRAME(stackFrame);
-
- JSValue baseValue = stackFrame.args[0].jsValue();
- PropertySlot slot(baseValue);
- JSValue result = baseValue.get(stackFrame.callFrame, stackFrame.args[1].identifier(), slot);
-
- CHECK_FOR_EXCEPTION_AT_END();
- return JSValue::encode(result);
-}
-
-DEFINE_STUB_FUNCTION(EncodedJSValue, op_get_by_id_array_fail)
-{
- STUB_INIT_STACK_FRAME(stackFrame);
-
- JSValue baseValue = stackFrame.args[0].jsValue();
- PropertySlot slot(baseValue);
- JSValue result = baseValue.get(stackFrame.callFrame, stackFrame.args[1].identifier(), slot);
-
- CHECK_FOR_EXCEPTION_AT_END();
- return JSValue::encode(result);
-}
-
-DEFINE_STUB_FUNCTION(EncodedJSValue, op_get_by_id_string_fail)
-{
- STUB_INIT_STACK_FRAME(stackFrame);
-
- JSValue baseValue = stackFrame.args[0].jsValue();
- PropertySlot slot(baseValue);
- JSValue result = baseValue.get(stackFrame.callFrame, stackFrame.args[1].identifier(), slot);
-
- CHECK_FOR_EXCEPTION_AT_END();
- return JSValue::encode(result);
-}
-
-#endif // ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
-
-DEFINE_STUB_FUNCTION(void, op_check_has_instance)
-{
- STUB_INIT_STACK_FRAME(stackFrame);
-
- CallFrame* callFrame = stackFrame.callFrame;
- JSValue baseVal = stackFrame.args[0].jsValue();
-
- // ECMA-262 15.3.5.3:
- // Throw an exception either if baseVal is not an object, or if it does not implement 'HasInstance' (i.e. is a function).
-#ifndef NDEBUG
- TypeInfo typeInfo(UnspecifiedType);
- ASSERT(!baseVal.isObject() || !(typeInfo = asObject(baseVal)->structure()->typeInfo()).implementsHasInstance());
-#endif
- stackFrame.globalData->exception = createInvalidParamError(callFrame, "instanceof", baseVal);
- VM_THROW_EXCEPTION_AT_END();
-}
-
-DEFINE_STUB_FUNCTION(EncodedJSValue, op_instanceof)
-{
- STUB_INIT_STACK_FRAME(stackFrame);
-
- CallFrame* callFrame = stackFrame.callFrame;
- JSValue value = stackFrame.args[0].jsValue();
- JSValue baseVal = stackFrame.args[1].jsValue();
- JSValue proto = stackFrame.args[2].jsValue();
-
- // At least one of these checks must have failed to get to the slow case.
- ASSERT(!value.isCell() || !baseVal.isCell() || !proto.isCell()
- || !value.isObject() || !baseVal.isObject() || !proto.isObject()
- || (asObject(baseVal)->structure()->typeInfo().flags() & (ImplementsHasInstance | OverridesHasInstance)) != ImplementsHasInstance);
-
-
- // ECMA-262 15.3.5.3:
- // Throw an exception either if baseVal is not an object, or if it does not implement 'HasInstance' (i.e. is a function).
- TypeInfo typeInfo(UnspecifiedType);
- if (!baseVal.isObject() || !(typeInfo = asObject(baseVal)->structure()->typeInfo()).implementsHasInstance()) {
- stackFrame.globalData->exception = createInvalidParamError(stackFrame.callFrame, "instanceof", baseVal);
- VM_THROW_EXCEPTION();
- }
- ASSERT(typeInfo.type() != UnspecifiedType);
-
- if (!typeInfo.overridesHasInstance()) {
- if (!value.isObject())
- return JSValue::encode(jsBoolean(false));
-
- if (!proto.isObject()) {
- throwError(callFrame, createTypeError(callFrame, "instanceof called on an object with an invalid prototype property."));
- VM_THROW_EXCEPTION();
- }
- }
-
- JSValue result = jsBoolean(asObject(baseVal)->hasInstance(callFrame, value, proto));
- CHECK_FOR_EXCEPTION_AT_END();
-
- return JSValue::encode(result);
-}
-
-DEFINE_STUB_FUNCTION(EncodedJSValue, op_del_by_id)
-{
- STUB_INIT_STACK_FRAME(stackFrame);
-
- CallFrame* callFrame = stackFrame.callFrame;
-
- JSObject* baseObj = stackFrame.args[0].jsValue().toObject(callFrame);
-
- bool couldDelete = baseObj->deleteProperty(callFrame, stackFrame.args[1].identifier());
- JSValue result = jsBoolean(couldDelete);
- if (!couldDelete && callFrame->codeBlock()->isStrictMode())
- stackFrame.globalData->exception = createTypeError(stackFrame.callFrame, "Unable to delete property.");
-
- CHECK_FOR_EXCEPTION_AT_END();
- return JSValue::encode(result);
-}
-
-DEFINE_STUB_FUNCTION(EncodedJSValue, op_mul)
-{
- STUB_INIT_STACK_FRAME(stackFrame);
-
- JSValue src1 = stackFrame.args[0].jsValue();
- JSValue src2 = stackFrame.args[1].jsValue();
-
- double left;
- double right;
- if (src1.getNumber(left) && src2.getNumber(right))
- return JSValue::encode(jsNumber(left * right));
-
- CallFrame* callFrame = stackFrame.callFrame;
- JSValue result = jsNumber(src1.toNumber(callFrame) * src2.toNumber(callFrame));
- CHECK_FOR_EXCEPTION_AT_END();
- return JSValue::encode(result);
-}
-
-DEFINE_STUB_FUNCTION(JSObject*, op_new_func)
-{
- STUB_INIT_STACK_FRAME(stackFrame);
-
- ASSERT(stackFrame.callFrame->codeBlock()->codeType() != FunctionCode || !stackFrame.callFrame->codeBlock()->needsFullScopeChain() || stackFrame.callFrame->uncheckedR(stackFrame.callFrame->codeBlock()->activationRegister()).jsValue());
- return stackFrame.args[0].function()->make(stackFrame.callFrame, stackFrame.callFrame->scopeChain());
-}
-
-DEFINE_STUB_FUNCTION(void*, op_call_jitCompile)
-{
- STUB_INIT_STACK_FRAME(stackFrame);
-
-#if !ASSERT_DISABLED
- CallData callData;
- ASSERT(stackFrame.callFrame->callee()->getCallData(callData) == CallTypeJS);
-#endif
-
- JSFunction* function = asFunction(stackFrame.callFrame->callee());
- ASSERT(!function->isHostFunction());
- FunctionExecutable* executable = function->jsExecutable();
- ScopeChainNode* callDataScopeChain = function->scope().node();
- JSObject* error = executable->compileForCall(stackFrame.callFrame, callDataScopeChain);
- if (error) {
- stackFrame.callFrame->globalData().exception = error;
- return 0;
- }
- return function;
-}
-
-DEFINE_STUB_FUNCTION(void*, op_construct_jitCompile)
-{
- STUB_INIT_STACK_FRAME(stackFrame);
-
-#if !ASSERT_DISABLED
- ConstructData constructData;
- ASSERT(asFunction(stackFrame.callFrame->callee())->getConstructData(constructData) == ConstructTypeJS);
-#endif
-
- JSFunction* function = asFunction(stackFrame.callFrame->callee());
- ASSERT(!function->isHostFunction());
- FunctionExecutable* executable = function->jsExecutable();
- ScopeChainNode* callDataScopeChain = function->scope().node();
- JSObject* error = executable->compileForConstruct(stackFrame.callFrame, callDataScopeChain);
- if (error) {
- stackFrame.callFrame->globalData().exception = error;
- return 0;
- }
- return function;
-}
-
-DEFINE_STUB_FUNCTION(void*, op_call_arityCheck)
-{
- STUB_INIT_STACK_FRAME(stackFrame);
-
- CallFrame* callFrame = stackFrame.callFrame;
- JSFunction* callee = asFunction(callFrame->callee());
- ASSERT(!callee->isHostFunction());
- CodeBlock* newCodeBlock = &callee->jsExecutable()->generatedBytecodeForCall();
- int argCount = callFrame->argumentCountIncludingThis();
- ReturnAddressPtr pc = callFrame->returnPC();
-
- ASSERT(argCount != newCodeBlock->m_numParameters);
-
- CallFrame* oldCallFrame = callFrame->callerFrame();
-
- Register* r;
- if (argCount > newCodeBlock->m_numParameters) {
- size_t numParameters = newCodeBlock->m_numParameters;
- r = callFrame->registers() + numParameters;
- Register* newEnd = r + newCodeBlock->m_numCalleeRegisters;
- if (!stackFrame.registerFile->grow(newEnd)) {
- // Rewind to the previous call frame because op_call already optimistically
- // moved the call frame forward.
- ExceptionHandler handler = jitThrow(stackFrame.globalData, oldCallFrame, createStackOverflowError(oldCallFrame), pc);
- STUB_SET_RETURN_ADDRESS(handler.catchRoutine);
- return handler.callFrame;
- }
-
- Register* argv = r - RegisterFile::CallFrameHeaderSize - numParameters - argCount;
- for (size_t i = 0; i < numParameters; ++i)
- argv[i + argCount] = argv[i];
- } else {
- size_t omittedArgCount = newCodeBlock->m_numParameters - argCount;
- r = callFrame->registers() + omittedArgCount;
- Register* newEnd = r + newCodeBlock->m_numCalleeRegisters;
- if (!stackFrame.registerFile->grow(newEnd)) {
- // Rewind to the previous call frame because op_call already optimistically
- // moved the call frame forward.
- ExceptionHandler handler = jitThrow(stackFrame.globalData, oldCallFrame, createStackOverflowError(oldCallFrame), pc);
- STUB_SET_RETURN_ADDRESS(handler.catchRoutine);
- return handler.callFrame;
- }
-
- Register* argv = r - RegisterFile::CallFrameHeaderSize - omittedArgCount;
- for (size_t i = 0; i < omittedArgCount; ++i)
- argv[i] = jsUndefined();
- }
-
- callFrame = CallFrame::create(r);
- callFrame->setCallerFrame(oldCallFrame);
- callFrame->setArgumentCountIncludingThis(argCount);
- callFrame->setCallee(callee);
- callFrame->setScopeChain(callee->scope().node());
- callFrame->setReturnPC(pc.value());
-
- ASSERT((void*)callFrame <= stackFrame.registerFile->end());
- return callFrame;
-}
-
-DEFINE_STUB_FUNCTION(void*, op_construct_arityCheck)
-{
- STUB_INIT_STACK_FRAME(stackFrame);
-
- CallFrame* callFrame = stackFrame.callFrame;
- JSFunction* callee = asFunction(callFrame->callee());
- ASSERT(!callee->isHostFunction());
- CodeBlock* newCodeBlock = &callee->jsExecutable()->generatedBytecodeForConstruct();
- int argCount = callFrame->argumentCountIncludingThis();
- ReturnAddressPtr pc = callFrame->returnPC();
-
- ASSERT(argCount != newCodeBlock->m_numParameters);
-
- CallFrame* oldCallFrame = callFrame->callerFrame();
-
- Register* r;
- if (argCount > newCodeBlock->m_numParameters) {
- size_t numParameters = newCodeBlock->m_numParameters;
- r = callFrame->registers() + numParameters;
- Register* newEnd = r + newCodeBlock->m_numCalleeRegisters;
- if (!stackFrame.registerFile->grow(newEnd)) {
- // Rewind to the previous call frame because op_call already optimistically
- // moved the call frame forward.
- ExceptionHandler handler = jitThrow(stackFrame.globalData, oldCallFrame, createStackOverflowError(oldCallFrame), pc);
- STUB_SET_RETURN_ADDRESS(handler.catchRoutine);
- return handler.callFrame;
- }
-
- Register* argv = r - RegisterFile::CallFrameHeaderSize - numParameters - argCount;
- for (size_t i = 0; i < numParameters; ++i)
- argv[i + argCount] = argv[i];
- } else {
- size_t omittedArgCount = newCodeBlock->m_numParameters - argCount;
- r = callFrame->registers() + omittedArgCount;
- Register* newEnd = r + newCodeBlock->m_numCalleeRegisters;
- if (!stackFrame.registerFile->grow(newEnd)) {
- // Rewind to the previous call frame because op_call already optimistically
- // moved the call frame forward.
- ExceptionHandler handler = jitThrow(stackFrame.globalData, oldCallFrame, createStackOverflowError(oldCallFrame), pc);
- STUB_SET_RETURN_ADDRESS(handler.catchRoutine);
- return handler.callFrame;
- }
-
- Register* argv = r - RegisterFile::CallFrameHeaderSize - omittedArgCount;
- for (size_t i = 0; i < omittedArgCount; ++i)
- argv[i] = jsUndefined();
- }
-
- callFrame = CallFrame::create(r);
- callFrame->setCallerFrame(oldCallFrame);
- callFrame->setArgumentCountIncludingThis(argCount);
- callFrame->setCallee(callee);
- callFrame->setScopeChain(callee->scope().node());
- callFrame->setReturnPC(pc.value());
-
- ASSERT((void*)callFrame <= stackFrame.registerFile->end());
- return callFrame;
-}
-
-#if ENABLE(JIT_OPTIMIZE_CALL)
-DEFINE_STUB_FUNCTION(void*, vm_lazyLinkCall)
-{
- STUB_INIT_STACK_FRAME(stackFrame);
- CallFrame* callFrame = stackFrame.callFrame;
- JSFunction* callee = asFunction(callFrame->callee());
- ExecutableBase* executable = callee->executable();
-
- MacroAssemblerCodePtr codePtr;
- CodeBlock* codeBlock = 0;
- if (executable->isHostFunction())
- codePtr = executable->generatedJITCodeForCall().addressForCall();
- else {
- FunctionExecutable* functionExecutable = static_cast<FunctionExecutable*>(executable);
- JSObject* error = functionExecutable->compileForCall(callFrame, callee->scope().node());
- if (error) {
- callFrame->globalData().exception = createStackOverflowError(callFrame);
- return 0;
- }
- codeBlock = &functionExecutable->generatedBytecodeForCall();
- if (callFrame->argumentCountIncludingThis() == static_cast<size_t>(codeBlock->m_numParameters))
- codePtr = functionExecutable->generatedJITCodeForCall().addressForCall();
- else
- codePtr = functionExecutable->generatedJITCodeForCallWithArityCheck();
- }
- CallLinkInfo* callLinkInfo = &stackFrame.callFrame->callerFrame()->codeBlock()->getCallLinkInfo(callFrame->returnPC());
-
- if (!callLinkInfo->seenOnce())
- callLinkInfo->setSeen();
- else
- JIT::linkCall(callee, stackFrame.callFrame->callerFrame()->codeBlock(), codeBlock, codePtr, callLinkInfo, callFrame->argumentCountIncludingThis(), stackFrame.globalData);
-
- return codePtr.executableAddress();
-}
-
-DEFINE_STUB_FUNCTION(void*, vm_lazyLinkConstruct)
-{
- STUB_INIT_STACK_FRAME(stackFrame);
- CallFrame* callFrame = stackFrame.callFrame;
- JSFunction* callee = asFunction(callFrame->callee());
- ExecutableBase* executable = callee->executable();
-
- MacroAssemblerCodePtr codePtr;
- CodeBlock* codeBlock = 0;
- if (executable->isHostFunction())
- codePtr = executable->generatedJITCodeForConstruct().addressForCall();
- else {
- FunctionExecutable* functionExecutable = static_cast<FunctionExecutable*>(executable);
- JSObject* error = functionExecutable->compileForConstruct(callFrame, callee->scope().node());
- if (error) {
- throwStackOverflowError(callFrame, stackFrame.globalData, ReturnAddressPtr(callFrame->returnPC()), STUB_RETURN_ADDRESS);
- return 0;
- }
- codeBlock = &functionExecutable->generatedBytecodeForConstruct();
- if (callFrame->argumentCountIncludingThis() == static_cast<size_t>(codeBlock->m_numParameters))
- codePtr = functionExecutable->generatedJITCodeForConstruct().addressForCall();
- else
- codePtr = functionExecutable->generatedJITCodeForConstructWithArityCheck();
- }
- CallLinkInfo* callLinkInfo = &stackFrame.callFrame->callerFrame()->codeBlock()->getCallLinkInfo(callFrame->returnPC());
-
- if (!callLinkInfo->seenOnce())
- callLinkInfo->setSeen();
- else
- JIT::linkConstruct(callee, stackFrame.callFrame->callerFrame()->codeBlock(), codeBlock, codePtr, callLinkInfo, callFrame->argumentCountIncludingThis(), stackFrame.globalData);
-
- return codePtr.executableAddress();
-}
-#endif // !ENABLE(JIT_OPTIMIZE_CALL)
-
-DEFINE_STUB_FUNCTION(JSObject*, op_push_activation)
-{
- STUB_INIT_STACK_FRAME(stackFrame);
-
- JSActivation* activation = new (stackFrame.globalData) JSActivation(stackFrame.callFrame, static_cast<FunctionExecutable*>(stackFrame.callFrame->codeBlock()->ownerExecutable()));
- stackFrame.callFrame->setScopeChain(stackFrame.callFrame->scopeChain()->copy()->push(activation));
- return activation;
-}
-
-DEFINE_STUB_FUNCTION(EncodedJSValue, op_call_NotJSFunction)
-{
- STUB_INIT_STACK_FRAME(stackFrame);
-
- JSValue funcVal = stackFrame.args[0].jsValue();
-
- CallData callData;
- CallType callType = getCallData(funcVal, callData);
-
- ASSERT(callType != CallTypeJS);
-
- if (callType == CallTypeHost) {
- int registerOffset = stackFrame.args[1].int32();
- int argCount = stackFrame.args[2].int32();
- CallFrame* previousCallFrame = stackFrame.callFrame;
- CallFrame* callFrame = CallFrame::create(previousCallFrame->registers() + registerOffset);
- if (!stackFrame.registerFile->grow(callFrame->registers())) {
- throwStackOverflowError(previousCallFrame, stackFrame.globalData, callFrame->returnPC(), STUB_RETURN_ADDRESS);
- VM_THROW_EXCEPTION();
- }
-
- callFrame->init(0, static_cast<Instruction*>((STUB_RETURN_ADDRESS).value()), previousCallFrame->scopeChain(), previousCallFrame, argCount, asObject(funcVal));
-
- EncodedJSValue returnValue;
- {
- SamplingTool::HostCallRecord callRecord(CTI_SAMPLER);
- returnValue = callData.native.function(callFrame);
- }
-
- CHECK_FOR_EXCEPTION_AT_END();
- return returnValue;
- }
-
- ASSERT(callType == CallTypeNone);
-
- stackFrame.globalData->exception = createNotAFunctionError(stackFrame.callFrame, funcVal);
- VM_THROW_EXCEPTION();
-}
-
-DEFINE_STUB_FUNCTION(EncodedJSValue, op_create_arguments)
-{
- STUB_INIT_STACK_FRAME(stackFrame);
-
- Arguments* arguments = new (stackFrame.globalData) Arguments(stackFrame.callFrame);
- return JSValue::encode(JSValue(arguments));
-}
-
-DEFINE_STUB_FUNCTION(EncodedJSValue, op_create_arguments_no_params)
-{
- STUB_INIT_STACK_FRAME(stackFrame);
-
- Arguments* arguments = new (stackFrame.globalData) Arguments(stackFrame.callFrame, Arguments::NoParameters);
- return JSValue::encode(JSValue(arguments));
-}
-
-DEFINE_STUB_FUNCTION(void, op_tear_off_activation)
-{
- STUB_INIT_STACK_FRAME(stackFrame);
-
- ASSERT(stackFrame.callFrame->codeBlock()->needsFullScopeChain());
- JSValue activationValue = stackFrame.args[0].jsValue();
- if (!activationValue) {
- if (JSValue v = stackFrame.args[1].jsValue()) {
- if (!stackFrame.callFrame->codeBlock()->isStrictMode())
- asArguments(v)->copyRegisters();
- }
- return;
- }
- JSActivation* activation = asActivation(stackFrame.args[0].jsValue());
- activation->copyRegisters();
- if (JSValue v = stackFrame.args[1].jsValue()) {
- if (!stackFrame.callFrame->codeBlock()->isStrictMode())
- asArguments(v)->setActivation(activation);
- }
-}
-
-DEFINE_STUB_FUNCTION(void, op_tear_off_arguments)
-{
- STUB_INIT_STACK_FRAME(stackFrame);
-
- ASSERT(stackFrame.callFrame->codeBlock()->usesArguments() && !stackFrame.callFrame->codeBlock()->needsFullScopeChain());
- asArguments(stackFrame.args[0].jsValue())->copyRegisters();
-}
-
-DEFINE_STUB_FUNCTION(void, op_profile_will_call)
-{
- STUB_INIT_STACK_FRAME(stackFrame);
-
- ASSERT(*stackFrame.enabledProfilerReference);
- (*stackFrame.enabledProfilerReference)->willExecute(stackFrame.callFrame, stackFrame.args[0].jsValue());
-}
-
-DEFINE_STUB_FUNCTION(void, op_profile_did_call)
-{
- STUB_INIT_STACK_FRAME(stackFrame);
-
- ASSERT(*stackFrame.enabledProfilerReference);
- (*stackFrame.enabledProfilerReference)->didExecute(stackFrame.callFrame, stackFrame.args[0].jsValue());
-}
-
-DEFINE_STUB_FUNCTION(void, op_ret_scopeChain)
-{
- STUB_INIT_STACK_FRAME(stackFrame);
-
- ASSERT(stackFrame.callFrame->codeBlock()->needsFullScopeChain());
- stackFrame.callFrame->scopeChain()->deref();
-}
-
-DEFINE_STUB_FUNCTION(JSObject*, op_new_array)
-{
- STUB_INIT_STACK_FRAME(stackFrame);
-
- ArgList argList(&stackFrame.callFrame->registers()[stackFrame.args[0].int32()], stackFrame.args[1].int32());
- return constructArray(stackFrame.callFrame, argList);
-}
-
-DEFINE_STUB_FUNCTION(EncodedJSValue, op_resolve)
-{
- STUB_INIT_STACK_FRAME(stackFrame);
-
- CallFrame* callFrame = stackFrame.callFrame;
- ScopeChainNode* scopeChain = callFrame->scopeChain();
-
- ScopeChainIterator iter = scopeChain->begin();
- ScopeChainIterator end = scopeChain->end();
- ASSERT(iter != end);
-
- Identifier& ident = stackFrame.args[0].identifier();
- do {
- JSObject* o = *iter;
- PropertySlot slot(o);
- if (o->getPropertySlot(callFrame, ident, slot)) {
- JSValue result = slot.getValue(callFrame, ident);
- CHECK_FOR_EXCEPTION_AT_END();
- return JSValue::encode(result);
- }
- } while (++iter != end);
-
- stackFrame.globalData->exception = createUndefinedVariableError(callFrame, ident);
- VM_THROW_EXCEPTION();
-}
-
-DEFINE_STUB_FUNCTION(EncodedJSValue, op_construct_NotJSConstruct)
-{
- STUB_INIT_STACK_FRAME(stackFrame);
-
- JSValue constrVal = stackFrame.args[0].jsValue();
-
- ConstructData constructData;
- ConstructType constructType = getConstructData(constrVal, constructData);
-
- ASSERT(constructType != ConstructTypeJS);
-
- if (constructType == ConstructTypeHost) {
- int registerOffset = stackFrame.args[1].int32();
- int argCount = stackFrame.args[2].int32();
- CallFrame* previousCallFrame = stackFrame.callFrame;
- CallFrame* callFrame = CallFrame::create(previousCallFrame->registers() + registerOffset);
- if (!stackFrame.registerFile->grow(callFrame->registers())) {
- throwStackOverflowError(previousCallFrame, stackFrame.globalData, callFrame->returnPC(), STUB_RETURN_ADDRESS);
- VM_THROW_EXCEPTION();
- }
-
- callFrame->init(0, static_cast<Instruction*>((STUB_RETURN_ADDRESS).value()), previousCallFrame->scopeChain(), previousCallFrame, argCount, asObject(constrVal));
-
- EncodedJSValue returnValue;
- {
- SamplingTool::HostCallRecord callRecord(CTI_SAMPLER);
- returnValue = constructData.native.function(callFrame);
- }
-
- CHECK_FOR_EXCEPTION_AT_END();
- return returnValue;
- }
-
- ASSERT(constructType == ConstructTypeNone);
-
- stackFrame.globalData->exception = createNotAConstructorError(stackFrame.callFrame, constrVal);
- VM_THROW_EXCEPTION();
-}
-
-DEFINE_STUB_FUNCTION(EncodedJSValue, op_get_by_val)
-{
- STUB_INIT_STACK_FRAME(stackFrame);
-
- CallFrame* callFrame = stackFrame.callFrame;
- JSGlobalData* globalData = stackFrame.globalData;
-
- JSValue baseValue = stackFrame.args[0].jsValue();
- JSValue subscript = stackFrame.args[1].jsValue();
-
- if (LIKELY(baseValue.isCell() && subscript.isString())) {
- Identifier propertyName(callFrame, asString(subscript)->value(callFrame));
- PropertySlot slot(baseValue.asCell());
- // JSString::value may have thrown, but we shouldn't find a property with a null identifier,
- // so we should miss this case and wind up in the CHECK_FOR_EXCEPTION_AT_END, below.
- if (baseValue.asCell()->fastGetOwnPropertySlot(callFrame, propertyName, slot)) {
- JSValue result = slot.getValue(callFrame, propertyName);
- CHECK_FOR_EXCEPTION();
- return JSValue::encode(result);
- }
- }
-
- if (subscript.isUInt32()) {
- uint32_t i = subscript.asUInt32();
- if (isJSString(globalData, baseValue) && asString(baseValue)->canGetIndex(i)) {
- ctiPatchCallByReturnAddress(callFrame->codeBlock(), STUB_RETURN_ADDRESS, FunctionPtr(cti_op_get_by_val_string));
- JSValue result = asString(baseValue)->getIndex(callFrame, i);
- CHECK_FOR_EXCEPTION();
- return JSValue::encode(result);
- }
- if (isJSByteArray(globalData, baseValue) && asByteArray(baseValue)->canAccessIndex(i)) {
- // All fast byte array accesses are safe from exceptions so return immediately to avoid exception checks.
- ctiPatchCallByReturnAddress(callFrame->codeBlock(), STUB_RETURN_ADDRESS, FunctionPtr(cti_op_get_by_val_byte_array));
- return JSValue::encode(asByteArray(baseValue)->getIndex(callFrame, i));
- }
- JSValue result = baseValue.get(callFrame, i);
- CHECK_FOR_EXCEPTION();
- return JSValue::encode(result);
- }
-
- Identifier property(callFrame, subscript.toString(callFrame));
- JSValue result = baseValue.get(callFrame, property);
- CHECK_FOR_EXCEPTION_AT_END();
- return JSValue::encode(result);
-}
-
-DEFINE_STUB_FUNCTION(EncodedJSValue, op_get_by_val_string)
-{
- STUB_INIT_STACK_FRAME(stackFrame);
-
- CallFrame* callFrame = stackFrame.callFrame;
- JSGlobalData* globalData = stackFrame.globalData;
-
- JSValue baseValue = stackFrame.args[0].jsValue();
- JSValue subscript = stackFrame.args[1].jsValue();
-
- JSValue result;
-
- if (LIKELY(subscript.isUInt32())) {
- uint32_t i = subscript.asUInt32();
- if (isJSString(globalData, baseValue) && asString(baseValue)->canGetIndex(i))
- result = asString(baseValue)->getIndex(callFrame, i);
- else {
- result = baseValue.get(callFrame, i);
- if (!isJSString(globalData, baseValue))
- ctiPatchCallByReturnAddress(callFrame->codeBlock(), STUB_RETURN_ADDRESS, FunctionPtr(cti_op_get_by_val));
- }
- } else {
- Identifier property(callFrame, subscript.toString(callFrame));
- result = baseValue.get(callFrame, property);
- }
-
- CHECK_FOR_EXCEPTION_AT_END();
- return JSValue::encode(result);
-}
-
-DEFINE_STUB_FUNCTION(EncodedJSValue, op_get_by_val_byte_array)
-{
- STUB_INIT_STACK_FRAME(stackFrame);
-
- CallFrame* callFrame = stackFrame.callFrame;
- JSGlobalData* globalData = stackFrame.globalData;
-
- JSValue baseValue = stackFrame.args[0].jsValue();
- JSValue subscript = stackFrame.args[1].jsValue();
-
- JSValue result;
-
- if (LIKELY(subscript.isUInt32())) {
- uint32_t i = subscript.asUInt32();
- if (isJSByteArray(globalData, baseValue) && asByteArray(baseValue)->canAccessIndex(i)) {
- // All fast byte array accesses are safe from exceptions so return immediately to avoid exception checks.
- return JSValue::encode(asByteArray(baseValue)->getIndex(callFrame, i));
- }
-
- result = baseValue.get(callFrame, i);
- if (!isJSByteArray(globalData, baseValue))
- ctiPatchCallByReturnAddress(callFrame->codeBlock(), STUB_RETURN_ADDRESS, FunctionPtr(cti_op_get_by_val));
- } else {
- Identifier property(callFrame, subscript.toString(callFrame));
- result = baseValue.get(callFrame, property);
- }
-
- CHECK_FOR_EXCEPTION_AT_END();
- return JSValue::encode(result);
-}
-
-DEFINE_STUB_FUNCTION(EncodedJSValue, op_sub)
-{
- STUB_INIT_STACK_FRAME(stackFrame);
-
- JSValue src1 = stackFrame.args[0].jsValue();
- JSValue src2 = stackFrame.args[1].jsValue();
-
- double left;
- double right;
- if (src1.getNumber(left) && src2.getNumber(right))
- return JSValue::encode(jsNumber(left - right));
-
- CallFrame* callFrame = stackFrame.callFrame;
- JSValue result = jsNumber(src1.toNumber(callFrame) - src2.toNumber(callFrame));
- CHECK_FOR_EXCEPTION_AT_END();
- return JSValue::encode(result);
-}
-
-DEFINE_STUB_FUNCTION(void, op_put_by_val)
-{
- STUB_INIT_STACK_FRAME(stackFrame);
-
- CallFrame* callFrame = stackFrame.callFrame;
- JSGlobalData* globalData = stackFrame.globalData;
-
- JSValue baseValue = stackFrame.args[0].jsValue();
- JSValue subscript = stackFrame.args[1].jsValue();
- JSValue value = stackFrame.args[2].jsValue();
-
- if (LIKELY(subscript.isUInt32())) {
- uint32_t i = subscript.asUInt32();
- if (isJSArray(globalData, baseValue)) {
- JSArray* jsArray = asArray(baseValue);
- if (jsArray->canSetIndex(i))
- jsArray->setIndex(i, value);
- else
- jsArray->JSArray::put(callFrame, i, value);
- } else if (isJSByteArray(globalData, baseValue) && asByteArray(baseValue)->canAccessIndex(i)) {
- JSByteArray* jsByteArray = asByteArray(baseValue);
- ctiPatchCallByReturnAddress(callFrame->codeBlock(), STUB_RETURN_ADDRESS, FunctionPtr(cti_op_put_by_val_byte_array));
- // All fast byte array accesses are safe from exceptions so return immediately to avoid exception checks.
- if (value.isInt32()) {
- jsByteArray->setIndex(i, value.asInt32());
- return;
- } else {
- double dValue = 0;
- if (value.getNumber(dValue)) {
- jsByteArray->setIndex(i, dValue);
- return;
- }
- }
-
- baseValue.put(callFrame, i, value);
- } else
- baseValue.put(callFrame, i, value);
- } else {
- Identifier property(callFrame, subscript.toString(callFrame));
- if (!stackFrame.globalData->exception) { // Don't put to an object if toString threw an exception.
- PutPropertySlot slot(callFrame->codeBlock()->isStrictMode());
- baseValue.put(callFrame, property, value, slot);
- }
- }
-
- CHECK_FOR_EXCEPTION_AT_END();
-}
-
-DEFINE_STUB_FUNCTION(void, op_put_by_val_byte_array)
-{
- STUB_INIT_STACK_FRAME(stackFrame);
-
- CallFrame* callFrame = stackFrame.callFrame;
- JSGlobalData* globalData = stackFrame.globalData;
-
- JSValue baseValue = stackFrame.args[0].jsValue();
- JSValue subscript = stackFrame.args[1].jsValue();
- JSValue value = stackFrame.args[2].jsValue();
-
- if (LIKELY(subscript.isUInt32())) {
- uint32_t i = subscript.asUInt32();
- if (isJSByteArray(globalData, baseValue) && asByteArray(baseValue)->canAccessIndex(i)) {
- JSByteArray* jsByteArray = asByteArray(baseValue);
-
- // All fast byte array accesses are safe from exceptions so return immediately to avoid exception checks.
- if (value.isInt32()) {
- jsByteArray->setIndex(i, value.asInt32());
- return;
- } else {
- double dValue = 0;
- if (value.getNumber(dValue)) {
- jsByteArray->setIndex(i, dValue);
- return;
- }
- }
- }
-
- if (!isJSByteArray(globalData, baseValue))
- ctiPatchCallByReturnAddress(callFrame->codeBlock(), STUB_RETURN_ADDRESS, FunctionPtr(cti_op_put_by_val));
- baseValue.put(callFrame, i, value);
- } else {
- Identifier property(callFrame, subscript.toString(callFrame));
- if (!stackFrame.globalData->exception) { // Don't put to an object if toString threw an exception.
- PutPropertySlot slot(callFrame->codeBlock()->isStrictMode());
- baseValue.put(callFrame, property, value, slot);
- }
- }
-
- CHECK_FOR_EXCEPTION_AT_END();
-}
-
-DEFINE_STUB_FUNCTION(EncodedJSValue, op_lesseq)
-{
- STUB_INIT_STACK_FRAME(stackFrame);
-
- CallFrame* callFrame = stackFrame.callFrame;
- JSValue result = jsBoolean(jsLessEq(callFrame, stackFrame.args[0].jsValue(), stackFrame.args[1].jsValue()));
- CHECK_FOR_EXCEPTION_AT_END();
- return JSValue::encode(result);
-}
-
-DEFINE_STUB_FUNCTION(int, op_load_varargs)
-{
- STUB_INIT_STACK_FRAME(stackFrame);
-
- CallFrame* callFrame = stackFrame.callFrame;
- RegisterFile* registerFile = stackFrame.registerFile;
- int argsOffset = stackFrame.args[0].int32();
- JSValue arguments = callFrame->registers()[argsOffset].jsValue();
- uint32_t argCount = 0;
- if (!arguments) {
- int providedParams = callFrame->registers()[RegisterFile::ArgumentCount].i() - 1;
- argCount = providedParams;
- argCount = min(argCount, static_cast<uint32_t>(Arguments::MaxArguments));
- int32_t sizeDelta = argsOffset + argCount + RegisterFile::CallFrameHeaderSize;
- Register* newEnd = callFrame->registers() + sizeDelta;
- if (!registerFile->grow(newEnd) || ((newEnd - callFrame->registers()) != sizeDelta)) {
- stackFrame.globalData->exception = createStackOverflowError(callFrame);
- VM_THROW_EXCEPTION();
- }
- int32_t expectedParams = asFunction(callFrame->callee())->jsExecutable()->parameterCount();
- int32_t inplaceArgs = min(providedParams, expectedParams);
-
- Register* inplaceArgsDst = callFrame->registers() + argsOffset;
-
- Register* inplaceArgsEnd = inplaceArgsDst + inplaceArgs;
- Register* inplaceArgsEnd2 = inplaceArgsDst + providedParams;
-
- Register* inplaceArgsSrc = callFrame->registers() - RegisterFile::CallFrameHeaderSize - expectedParams;
- Register* inplaceArgsSrc2 = inplaceArgsSrc - providedParams - 1 + inplaceArgs;
-
- // First step is to copy the "expected" parameters from their normal location relative to the callframe
- while (inplaceArgsDst < inplaceArgsEnd)
- *inplaceArgsDst++ = *inplaceArgsSrc++;
-
- // Then we copy any additional arguments that may be further up the stack ('-1' to account for 'this')
- while (inplaceArgsDst < inplaceArgsEnd2)
- *inplaceArgsDst++ = *inplaceArgsSrc2++;
-
- } else if (!arguments.isUndefinedOrNull()) {
- if (!arguments.isObject()) {
- stackFrame.globalData->exception = createInvalidParamError(callFrame, "Function.prototype.apply", arguments);
- VM_THROW_EXCEPTION();
- }
- if (asObject(arguments)->classInfo() == &Arguments::info) {
- Arguments* argsObject = asArguments(arguments);
- argCount = argsObject->numProvidedArguments(callFrame);
- argCount = min(argCount, static_cast<uint32_t>(Arguments::MaxArguments));
- int32_t sizeDelta = argsOffset + argCount + RegisterFile::CallFrameHeaderSize;
- Register* newEnd = callFrame->registers() + sizeDelta;
- if (!registerFile->grow(newEnd) || ((newEnd - callFrame->registers()) != sizeDelta)) {
- stackFrame.globalData->exception = createStackOverflowError(callFrame);
- VM_THROW_EXCEPTION();
- }
- argsObject->copyToRegisters(callFrame, callFrame->registers() + argsOffset, argCount);
- } else if (isJSArray(&callFrame->globalData(), arguments)) {
- JSArray* array = asArray(arguments);
- argCount = array->length();
- argCount = min(argCount, static_cast<uint32_t>(Arguments::MaxArguments));
- int32_t sizeDelta = argsOffset + argCount + RegisterFile::CallFrameHeaderSize;
- Register* newEnd = callFrame->registers() + sizeDelta;
- if (!registerFile->grow(newEnd) || ((newEnd - callFrame->registers()) != sizeDelta)) {
- stackFrame.globalData->exception = createStackOverflowError(callFrame);
- VM_THROW_EXCEPTION();
- }
- array->copyToRegisters(callFrame, callFrame->registers() + argsOffset, argCount);
- } else if (asObject(arguments)->inherits(&JSArray::info)) {
- JSObject* argObject = asObject(arguments);
- argCount = argObject->get(callFrame, callFrame->propertyNames().length).toUInt32(callFrame);
- argCount = min(argCount, static_cast<uint32_t>(Arguments::MaxArguments));
- int32_t sizeDelta = argsOffset + argCount + RegisterFile::CallFrameHeaderSize;
- Register* newEnd = callFrame->registers() + sizeDelta;
- if (!registerFile->grow(newEnd) || ((newEnd - callFrame->registers()) != sizeDelta)) {
- stackFrame.globalData->exception = createStackOverflowError(callFrame);
- VM_THROW_EXCEPTION();
- }
- Register* argsBuffer = callFrame->registers() + argsOffset;
- for (unsigned i = 0; i < argCount; ++i) {
- argsBuffer[i] = asObject(arguments)->get(callFrame, i);
- CHECK_FOR_EXCEPTION();
- }
- } else {
- stackFrame.globalData->exception = createInvalidParamError(callFrame, "Function.prototype.apply", arguments);
- VM_THROW_EXCEPTION();
- }
- }
-
- return argCount + 1;
-}
-
-DEFINE_STUB_FUNCTION(EncodedJSValue, op_negate)
-{
- STUB_INIT_STACK_FRAME(stackFrame);
-
- JSValue src = stackFrame.args[0].jsValue();
-
- double v;
- if (src.getNumber(v))
- return JSValue::encode(jsNumber(-v));
-
- CallFrame* callFrame = stackFrame.callFrame;
- JSValue result = jsNumber(-src.toNumber(callFrame));
- CHECK_FOR_EXCEPTION_AT_END();
- return JSValue::encode(result);
-}
-
-DEFINE_STUB_FUNCTION(EncodedJSValue, op_resolve_base)
-{
- STUB_INIT_STACK_FRAME(stackFrame);
-
- return JSValue::encode(JSC::resolveBase(stackFrame.callFrame, stackFrame.args[0].identifier(), stackFrame.callFrame->scopeChain(), false));
-}
-
-DEFINE_STUB_FUNCTION(EncodedJSValue, op_resolve_base_strict_put)
-{
- STUB_INIT_STACK_FRAME(stackFrame);
- JSValue base = JSC::resolveBase(stackFrame.callFrame, stackFrame.args[0].identifier(), stackFrame.callFrame->scopeChain(), true);
- if (!base) {
- stackFrame.globalData->exception = createErrorForInvalidGlobalAssignment(stackFrame.callFrame, stackFrame.args[0].identifier().ustring());
- VM_THROW_EXCEPTION();
- }
- return JSValue::encode(base);
-}
-
-DEFINE_STUB_FUNCTION(EncodedJSValue, op_ensure_property_exists)
-{
- STUB_INIT_STACK_FRAME(stackFrame);
- JSValue base = stackFrame.callFrame->r(stackFrame.args[0].int32()).jsValue();
- JSObject* object = asObject(base);
- PropertySlot slot(object);
- ASSERT(stackFrame.callFrame->codeBlock()->isStrictMode());
- if (!object->getPropertySlot(stackFrame.callFrame, stackFrame.args[1].identifier(), slot)) {
- stackFrame.globalData->exception = createErrorForInvalidGlobalAssignment(stackFrame.callFrame, stackFrame.args[1].identifier().ustring());
- VM_THROW_EXCEPTION();
- }
-
- return JSValue::encode(base);
-}
-
-DEFINE_STUB_FUNCTION(EncodedJSValue, op_resolve_skip)
-{
- STUB_INIT_STACK_FRAME(stackFrame);
-
- CallFrame* callFrame = stackFrame.callFrame;
- ScopeChainNode* scopeChain = callFrame->scopeChain();
-
- int skip = stackFrame.args[1].int32();
-
- ScopeChainIterator iter = scopeChain->begin();
- ScopeChainIterator end = scopeChain->end();
- ASSERT(iter != end);
- CodeBlock* codeBlock = callFrame->codeBlock();
- bool checkTopLevel = codeBlock->codeType() == FunctionCode && codeBlock->needsFullScopeChain();
- ASSERT(skip || !checkTopLevel);
- if (checkTopLevel && skip--) {
- if (callFrame->uncheckedR(codeBlock->activationRegister()).jsValue())
- ++iter;
- }
- while (skip--) {
- ++iter;
- ASSERT(iter != end);
- }
- Identifier& ident = stackFrame.args[0].identifier();
- do {
- JSObject* o = *iter;
- PropertySlot slot(o);
- if (o->getPropertySlot(callFrame, ident, slot)) {
- JSValue result = slot.getValue(callFrame, ident);
- CHECK_FOR_EXCEPTION_AT_END();
- return JSValue::encode(result);
- }
- } while (++iter != end);
-
- stackFrame.globalData->exception = createUndefinedVariableError(callFrame, ident);
- VM_THROW_EXCEPTION();
-}
-
-DEFINE_STUB_FUNCTION(EncodedJSValue, op_resolve_global)
-{
- STUB_INIT_STACK_FRAME(stackFrame);
-
- CallFrame* callFrame = stackFrame.callFrame;
- CodeBlock* codeBlock = callFrame->codeBlock();
- JSGlobalObject* globalObject = codeBlock->globalObject();
- Identifier& ident = stackFrame.args[0].identifier();
- unsigned globalResolveInfoIndex = stackFrame.args[1].int32();
- ASSERT(globalObject->isGlobalObject());
-
- PropertySlot slot(globalObject);
- if (globalObject->getPropertySlot(callFrame, ident, slot)) {
- JSValue result = slot.getValue(callFrame, ident);
- if (slot.isCacheableValue() && !globalObject->structure()->isUncacheableDictionary() && slot.slotBase() == globalObject) {
- GlobalResolveInfo& globalResolveInfo = codeBlock->globalResolveInfo(globalResolveInfoIndex);
- if (globalResolveInfo.structure)
- globalResolveInfo.structure->deref();
- globalObject->structure()->ref();
- globalResolveInfo.structure = globalObject->structure();
- globalResolveInfo.offset = slot.cachedOffset();
- return JSValue::encode(result);
- }
-
- CHECK_FOR_EXCEPTION_AT_END();
- return JSValue::encode(result);
- }
-
- stackFrame.globalData->exception = createUndefinedVariableError(callFrame, ident);
- VM_THROW_EXCEPTION();
-}
-
-DEFINE_STUB_FUNCTION(EncodedJSValue, op_div)
-{
- STUB_INIT_STACK_FRAME(stackFrame);
-
- JSValue src1 = stackFrame.args[0].jsValue();
- JSValue src2 = stackFrame.args[1].jsValue();
-
- double left;
- double right;
- if (src1.getNumber(left) && src2.getNumber(right))
- return JSValue::encode(jsNumber(left / right));
-
- CallFrame* callFrame = stackFrame.callFrame;
- JSValue result = jsNumber(src1.toNumber(callFrame) / src2.toNumber(callFrame));
- CHECK_FOR_EXCEPTION_AT_END();
- return JSValue::encode(result);
-}
-
-DEFINE_STUB_FUNCTION(EncodedJSValue, op_pre_dec)
-{
- STUB_INIT_STACK_FRAME(stackFrame);
-
- JSValue v = stackFrame.args[0].jsValue();
-
- CallFrame* callFrame = stackFrame.callFrame;
- JSValue result = jsNumber(v.toNumber(callFrame) - 1);
- CHECK_FOR_EXCEPTION_AT_END();
- return JSValue::encode(result);
-}
-
-DEFINE_STUB_FUNCTION(int, op_jless)
-{
- STUB_INIT_STACK_FRAME(stackFrame);
-
- JSValue src1 = stackFrame.args[0].jsValue();
- JSValue src2 = stackFrame.args[1].jsValue();
- CallFrame* callFrame = stackFrame.callFrame;
-
- bool result = jsLess(callFrame, src1, src2);
- CHECK_FOR_EXCEPTION_AT_END();
- return result;
-}
-
-DEFINE_STUB_FUNCTION(int, op_jlesseq)
-{
- STUB_INIT_STACK_FRAME(stackFrame);
-
- JSValue src1 = stackFrame.args[0].jsValue();
- JSValue src2 = stackFrame.args[1].jsValue();
- CallFrame* callFrame = stackFrame.callFrame;
-
- bool result = jsLessEq(callFrame, src1, src2);
- CHECK_FOR_EXCEPTION_AT_END();
- return result;
-}
-
-DEFINE_STUB_FUNCTION(EncodedJSValue, op_not)
-{
- STUB_INIT_STACK_FRAME(stackFrame);
-
- JSValue src = stackFrame.args[0].jsValue();
-
- CallFrame* callFrame = stackFrame.callFrame;
-
- JSValue result = jsBoolean(!src.toBoolean(callFrame));
- CHECK_FOR_EXCEPTION_AT_END();
- return JSValue::encode(result);
-}
-
-DEFINE_STUB_FUNCTION(int, op_jtrue)
-{
- STUB_INIT_STACK_FRAME(stackFrame);
-
- JSValue src1 = stackFrame.args[0].jsValue();
-
- CallFrame* callFrame = stackFrame.callFrame;
-
- bool result = src1.toBoolean(callFrame);
- CHECK_FOR_EXCEPTION_AT_END();
- return result;
-}
-
-DEFINE_STUB_FUNCTION(EncodedJSValue, op_post_inc)
-{
- STUB_INIT_STACK_FRAME(stackFrame);
-
- JSValue v = stackFrame.args[0].jsValue();
-
- CallFrame* callFrame = stackFrame.callFrame;
-
- JSValue number = v.toJSNumber(callFrame);
- CHECK_FOR_EXCEPTION_AT_END();
-
- callFrame->registers()[stackFrame.args[1].int32()] = jsNumber(number.uncheckedGetNumber() + 1);
- return JSValue::encode(number);
-}
-
-DEFINE_STUB_FUNCTION(int, op_eq)
-{
- STUB_INIT_STACK_FRAME(stackFrame);
-
- JSValue src1 = stackFrame.args[0].jsValue();
- JSValue src2 = stackFrame.args[1].jsValue();
-
-#if USE(JSVALUE32_64)
- start:
- if (src2.isUndefined()) {
- return src1.isNull() ||
- (src1.isCell() && src1.asCell()->structure()->typeInfo().masqueradesAsUndefined())
- || src1.isUndefined();
- }
-
- if (src2.isNull()) {
- return src1.isUndefined() ||
- (src1.isCell() && src1.asCell()->structure()->typeInfo().masqueradesAsUndefined())
- || src1.isNull();
- }
-
- if (src1.isInt32()) {
- if (src2.isDouble())
- return src1.asInt32() == src2.asDouble();
- double d = src2.toNumber(stackFrame.callFrame);
- CHECK_FOR_EXCEPTION();
- return src1.asInt32() == d;
- }
-
- if (src1.isDouble()) {
- if (src2.isInt32())
- return src1.asDouble() == src2.asInt32();
- double d = src2.toNumber(stackFrame.callFrame);
- CHECK_FOR_EXCEPTION();
- return src1.asDouble() == d;
- }
-
- if (src1.isTrue()) {
- if (src2.isFalse())
- return false;
- double d = src2.toNumber(stackFrame.callFrame);
- CHECK_FOR_EXCEPTION();
- return d == 1.0;
- }
-
- if (src1.isFalse()) {
- if (src2.isTrue())
- return false;
- double d = src2.toNumber(stackFrame.callFrame);
- CHECK_FOR_EXCEPTION();
- return d == 0.0;
- }
-
- if (src1.isUndefined())
- return src2.isCell() && src2.asCell()->structure()->typeInfo().masqueradesAsUndefined();
-
- if (src1.isNull())
- return src2.isCell() && src2.asCell()->structure()->typeInfo().masqueradesAsUndefined();
-
- JSCell* cell1 = src1.asCell();
-
- if (cell1->isString()) {
- if (src2.isInt32())
- return jsToNumber(static_cast<JSString*>(cell1)->value(stackFrame.callFrame)) == src2.asInt32();
-
- if (src2.isDouble())
- return jsToNumber(static_cast<JSString*>(cell1)->value(stackFrame.callFrame)) == src2.asDouble();
-
- if (src2.isTrue())
- return jsToNumber(static_cast<JSString*>(cell1)->value(stackFrame.callFrame)) == 1.0;
-
- if (src2.isFalse())
- return jsToNumber(static_cast<JSString*>(cell1)->value(stackFrame.callFrame)) == 0.0;
-
- JSCell* cell2 = src2.asCell();
- if (cell2->isString())
- return static_cast<JSString*>(cell1)->value(stackFrame.callFrame) == static_cast<JSString*>(cell2)->value(stackFrame.callFrame);
-
- src2 = asObject(cell2)->toPrimitive(stackFrame.callFrame);
- CHECK_FOR_EXCEPTION();
- goto start;
- }
-
- if (src2.isObject())
- return asObject(cell1) == asObject(src2);
- src1 = asObject(cell1)->toPrimitive(stackFrame.callFrame);
- CHECK_FOR_EXCEPTION();
- goto start;
-
-#else // USE(JSVALUE32_64)
- CallFrame* callFrame = stackFrame.callFrame;
-
- bool result = JSValue::equalSlowCaseInline(callFrame, src1, src2);
- CHECK_FOR_EXCEPTION_AT_END();
- return result;
-#endif // USE(JSVALUE32_64)
-}
-
-DEFINE_STUB_FUNCTION(int, op_eq_strings)
-{
-#if USE(JSVALUE32_64)
- STUB_INIT_STACK_FRAME(stackFrame);
-
- JSString* string1 = stackFrame.args[0].jsString();
- JSString* string2 = stackFrame.args[1].jsString();
-
- ASSERT(string1->isString());
- ASSERT(string2->isString());
- return string1->value(stackFrame.callFrame) == string2->value(stackFrame.callFrame);
-#else
- UNUSED_PARAM(args);
- ASSERT_NOT_REACHED();
- return 0;
-#endif
-}
-
-DEFINE_STUB_FUNCTION(EncodedJSValue, op_lshift)
-{
- STUB_INIT_STACK_FRAME(stackFrame);
-
- JSValue val = stackFrame.args[0].jsValue();
- JSValue shift = stackFrame.args[1].jsValue();
-
- CallFrame* callFrame = stackFrame.callFrame;
- JSValue result = jsNumber((val.toInt32(callFrame)) << (shift.toUInt32(callFrame) & 0x1f));
- CHECK_FOR_EXCEPTION_AT_END();
- return JSValue::encode(result);
-}
-
-DEFINE_STUB_FUNCTION(EncodedJSValue, op_bitand)
-{
- STUB_INIT_STACK_FRAME(stackFrame);
-
- JSValue src1 = stackFrame.args[0].jsValue();
- JSValue src2 = stackFrame.args[1].jsValue();
-
- ASSERT(!src1.isInt32() || !src2.isInt32());
- CallFrame* callFrame = stackFrame.callFrame;
- JSValue result = jsNumber(src1.toInt32(callFrame) & src2.toInt32(callFrame));
- CHECK_FOR_EXCEPTION_AT_END();
- return JSValue::encode(result);
-}
-
-DEFINE_STUB_FUNCTION(EncodedJSValue, op_rshift)
-{
- STUB_INIT_STACK_FRAME(stackFrame);
-
- JSValue val = stackFrame.args[0].jsValue();
- JSValue shift = stackFrame.args[1].jsValue();
-
- CallFrame* callFrame = stackFrame.callFrame;
- JSValue result = jsNumber((val.toInt32(callFrame)) >> (shift.toUInt32(callFrame) & 0x1f));
-
- CHECK_FOR_EXCEPTION_AT_END();
- return JSValue::encode(result);
-}
-
-DEFINE_STUB_FUNCTION(EncodedJSValue, op_bitnot)
-{
- STUB_INIT_STACK_FRAME(stackFrame);
-
- JSValue src = stackFrame.args[0].jsValue();
-
- ASSERT(!src.isInt32());
- CallFrame* callFrame = stackFrame.callFrame;
- JSValue result = jsNumber(~src.toInt32(callFrame));
- CHECK_FOR_EXCEPTION_AT_END();
- return JSValue::encode(result);
-}
-
-DEFINE_STUB_FUNCTION(EncodedJSValue, op_resolve_with_base)
-{
- STUB_INIT_STACK_FRAME(stackFrame);
-
- CallFrame* callFrame = stackFrame.callFrame;
- ScopeChainNode* scopeChain = callFrame->scopeChain();
-
- ScopeChainIterator iter = scopeChain->begin();
- ScopeChainIterator end = scopeChain->end();
-
- // FIXME: add scopeDepthIsZero optimization
-
- ASSERT(iter != end);
-
- Identifier& ident = stackFrame.args[0].identifier();
- JSObject* base;
- do {
- base = *iter;
- PropertySlot slot(base);
- if (base->getPropertySlot(callFrame, ident, slot)) {
- JSValue result = slot.getValue(callFrame, ident);
- CHECK_FOR_EXCEPTION_AT_END();
-
- callFrame->registers()[stackFrame.args[1].int32()] = JSValue(base);
- return JSValue::encode(result);
- }
- ++iter;
- } while (iter != end);
-
- stackFrame.globalData->exception = createUndefinedVariableError(callFrame, ident);
- VM_THROW_EXCEPTION_AT_END();
- return JSValue::encode(JSValue());
-}
-
-DEFINE_STUB_FUNCTION(JSObject*, op_new_func_exp)
-{
- STUB_INIT_STACK_FRAME(stackFrame);
- CallFrame* callFrame = stackFrame.callFrame;
-
- FunctionExecutable* function = stackFrame.args[0].function();
- JSFunction* func = function->make(callFrame, callFrame->scopeChain());
- ASSERT(callFrame->codeBlock()->codeType() != FunctionCode || !callFrame->codeBlock()->needsFullScopeChain() || callFrame->uncheckedR(callFrame->codeBlock()->activationRegister()).jsValue());
-
- /*
- The Identifier in a FunctionExpression can be referenced from inside
- the FunctionExpression's FunctionBody to allow the function to call
- itself recursively. However, unlike in a FunctionDeclaration, the
- Identifier in a FunctionExpression cannot be referenced from and
- does not affect the scope enclosing the FunctionExpression.
- */
- if (!function->name().isNull()) {
- JSStaticScopeObject* functionScopeObject = new (callFrame) JSStaticScopeObject(callFrame, function->name(), func, ReadOnly | DontDelete);
- func->scope().push(functionScopeObject);
- }
-
- return func;
-}
-
-DEFINE_STUB_FUNCTION(EncodedJSValue, op_mod)
-{
- STUB_INIT_STACK_FRAME(stackFrame);
-
- JSValue dividendValue = stackFrame.args[0].jsValue();
- JSValue divisorValue = stackFrame.args[1].jsValue();
-
- CallFrame* callFrame = stackFrame.callFrame;
- double d = dividendValue.toNumber(callFrame);
- JSValue result = jsNumber(fmod(d, divisorValue.toNumber(callFrame)));
- CHECK_FOR_EXCEPTION_AT_END();
- return JSValue::encode(result);
-}
-
-DEFINE_STUB_FUNCTION(EncodedJSValue, op_less)
-{
- STUB_INIT_STACK_FRAME(stackFrame);
-
- CallFrame* callFrame = stackFrame.callFrame;
- JSValue result = jsBoolean(jsLess(callFrame, stackFrame.args[0].jsValue(), stackFrame.args[1].jsValue()));
- CHECK_FOR_EXCEPTION_AT_END();
- return JSValue::encode(result);
-}
-
-DEFINE_STUB_FUNCTION(EncodedJSValue, op_post_dec)
-{
- STUB_INIT_STACK_FRAME(stackFrame);
-
- JSValue v = stackFrame.args[0].jsValue();
-
- CallFrame* callFrame = stackFrame.callFrame;
-
- JSValue number = v.toJSNumber(callFrame);
- CHECK_FOR_EXCEPTION_AT_END();
-
- callFrame->registers()[stackFrame.args[1].int32()] = jsNumber(number.uncheckedGetNumber() - 1);
- return JSValue::encode(number);
-}
-
-DEFINE_STUB_FUNCTION(EncodedJSValue, op_urshift)
-{
- STUB_INIT_STACK_FRAME(stackFrame);
-
- JSValue val = stackFrame.args[0].jsValue();
- JSValue shift = stackFrame.args[1].jsValue();
-
- CallFrame* callFrame = stackFrame.callFrame;
- JSValue result = jsNumber((val.toUInt32(callFrame)) >> (shift.toUInt32(callFrame) & 0x1f));
- CHECK_FOR_EXCEPTION_AT_END();
- return JSValue::encode(result);
-}
-
-DEFINE_STUB_FUNCTION(EncodedJSValue, op_bitxor)
-{
- STUB_INIT_STACK_FRAME(stackFrame);
-
- JSValue src1 = stackFrame.args[0].jsValue();
- JSValue src2 = stackFrame.args[1].jsValue();
-
- CallFrame* callFrame = stackFrame.callFrame;
-
- JSValue result = jsNumber(src1.toInt32(callFrame) ^ src2.toInt32(callFrame));
- CHECK_FOR_EXCEPTION_AT_END();
- return JSValue::encode(result);
-}
-
-DEFINE_STUB_FUNCTION(JSObject*, op_new_regexp)
-{
- STUB_INIT_STACK_FRAME(stackFrame);
-
- return new (stackFrame.globalData) RegExpObject(stackFrame.callFrame->lexicalGlobalObject(), stackFrame.callFrame->lexicalGlobalObject()->regExpStructure(), stackFrame.args[0].regExp());
-}
-
-DEFINE_STUB_FUNCTION(EncodedJSValue, op_bitor)
-{
- STUB_INIT_STACK_FRAME(stackFrame);
-
- JSValue src1 = stackFrame.args[0].jsValue();
- JSValue src2 = stackFrame.args[1].jsValue();
-
- CallFrame* callFrame = stackFrame.callFrame;
-
- JSValue result = jsNumber(src1.toInt32(callFrame) | src2.toInt32(callFrame));
- CHECK_FOR_EXCEPTION_AT_END();
- return JSValue::encode(result);
-}
-
-DEFINE_STUB_FUNCTION(EncodedJSValue, op_call_eval)
-{
- STUB_INIT_STACK_FRAME(stackFrame);
- ASSERT(stackFrame.callFrame->codeBlock()->codeType() != FunctionCode || !stackFrame.callFrame->codeBlock()->needsFullScopeChain() || stackFrame.callFrame->uncheckedR(stackFrame.callFrame->codeBlock()->activationRegister()).jsValue());
-
- CallFrame* callFrame = stackFrame.callFrame;
- RegisterFile* registerFile = stackFrame.registerFile;
-
- Interpreter* interpreter = stackFrame.globalData->interpreter;
-
- JSValue funcVal = stackFrame.args[0].jsValue();
- int registerOffset = stackFrame.args[1].int32();
- int argCount = stackFrame.args[2].int32();
-
- Register* newCallFrame = callFrame->registers() + registerOffset;
- Register* argv = newCallFrame - RegisterFile::CallFrameHeaderSize - argCount;
- JSValue baseValue = argv[0].jsValue();
- JSGlobalObject* globalObject = callFrame->scopeChain()->globalObject;
-
- if (baseValue == globalObject && funcVal == globalObject->evalFunction()) {
- JSValue result = interpreter->callEval(callFrame, registerFile, argv, argCount, registerOffset);
- CHECK_FOR_EXCEPTION_AT_END();
- return JSValue::encode(result);
- }
-
- return JSValue::encode(JSValue());
-}
-
-DEFINE_STUB_FUNCTION(void*, op_throw)
-{
- STUB_INIT_STACK_FRAME(stackFrame);
- ExceptionHandler handler = jitThrow(stackFrame.globalData, stackFrame.callFrame, stackFrame.args[0].jsValue(), STUB_RETURN_ADDRESS);
- STUB_SET_RETURN_ADDRESS(handler.catchRoutine);
- return handler.callFrame;
-}
-
-DEFINE_STUB_FUNCTION(JSPropertyNameIterator*, op_get_pnames)
-{
- STUB_INIT_STACK_FRAME(stackFrame);
-
- CallFrame* callFrame = stackFrame.callFrame;
- JSObject* o = stackFrame.args[0].jsObject();
- Structure* structure = o->structure();
- JSPropertyNameIterator* jsPropertyNameIterator = structure->enumerationCache();
- if (!jsPropertyNameIterator || jsPropertyNameIterator->cachedPrototypeChain() != structure->prototypeChain(callFrame))
- jsPropertyNameIterator = JSPropertyNameIterator::create(callFrame, o);
- return jsPropertyNameIterator;
-}
-
-DEFINE_STUB_FUNCTION(int, has_property)
-{
- STUB_INIT_STACK_FRAME(stackFrame);
-
- JSObject* base = stackFrame.args[0].jsObject();
- JSString* property = stackFrame.args[1].jsString();
- int result = base->hasProperty(stackFrame.callFrame, Identifier(stackFrame.callFrame, property->value(stackFrame.callFrame)));
- CHECK_FOR_EXCEPTION_AT_END();
- return result;
-}
-
-DEFINE_STUB_FUNCTION(JSObject*, op_push_scope)
-{
- STUB_INIT_STACK_FRAME(stackFrame);
-
- JSObject* o = stackFrame.args[0].jsValue().toObject(stackFrame.callFrame);
- CHECK_FOR_EXCEPTION();
- stackFrame.callFrame->setScopeChain(stackFrame.callFrame->scopeChain()->push(o));
- return o;
-}
-
-DEFINE_STUB_FUNCTION(void, op_pop_scope)
-{
- STUB_INIT_STACK_FRAME(stackFrame);
-
- stackFrame.callFrame->setScopeChain(stackFrame.callFrame->scopeChain()->pop());
-}
-
-DEFINE_STUB_FUNCTION(EncodedJSValue, op_typeof)
-{
- STUB_INIT_STACK_FRAME(stackFrame);
-
- return JSValue::encode(jsTypeStringForValue(stackFrame.callFrame, stackFrame.args[0].jsValue()));
-}
-
-DEFINE_STUB_FUNCTION(EncodedJSValue, op_is_undefined)
-{
- STUB_INIT_STACK_FRAME(stackFrame);
-
- JSValue v = stackFrame.args[0].jsValue();
- return JSValue::encode(jsBoolean(v.isCell() ? v.asCell()->structure()->typeInfo().masqueradesAsUndefined() : v.isUndefined()));
-}
-
-DEFINE_STUB_FUNCTION(EncodedJSValue, op_is_boolean)
-{
- STUB_INIT_STACK_FRAME(stackFrame);
-
- return JSValue::encode(jsBoolean(stackFrame.args[0].jsValue().isBoolean()));
-}
-
-DEFINE_STUB_FUNCTION(EncodedJSValue, op_is_number)
-{
- STUB_INIT_STACK_FRAME(stackFrame);
-
- return JSValue::encode(jsBoolean(stackFrame.args[0].jsValue().isNumber()));
-}
-
-DEFINE_STUB_FUNCTION(EncodedJSValue, op_is_string)
-{
- STUB_INIT_STACK_FRAME(stackFrame);
-
- return JSValue::encode(jsBoolean(isJSString(stackFrame.globalData, stackFrame.args[0].jsValue())));
-}
-
-DEFINE_STUB_FUNCTION(EncodedJSValue, op_is_object)
-{
- STUB_INIT_STACK_FRAME(stackFrame);
-
- return JSValue::encode(jsBoolean(jsIsObjectType(stackFrame.args[0].jsValue())));
-}
-
-DEFINE_STUB_FUNCTION(EncodedJSValue, op_is_function)
-{
- STUB_INIT_STACK_FRAME(stackFrame);
-
- return JSValue::encode(jsBoolean(jsIsFunctionType(stackFrame.args[0].jsValue())));
-}
-
-DEFINE_STUB_FUNCTION(EncodedJSValue, op_stricteq)
-{
- STUB_INIT_STACK_FRAME(stackFrame);
-
- JSValue src1 = stackFrame.args[0].jsValue();
- JSValue src2 = stackFrame.args[1].jsValue();
-
- bool result = JSValue::strictEqual(stackFrame.callFrame, src1, src2);
- CHECK_FOR_EXCEPTION_AT_END();
- return JSValue::encode(jsBoolean(result));
-}
-
-DEFINE_STUB_FUNCTION(EncodedJSValue, op_to_primitive)
-{
- STUB_INIT_STACK_FRAME(stackFrame);
-
- return JSValue::encode(stackFrame.args[0].jsValue().toPrimitive(stackFrame.callFrame));
-}
-
-DEFINE_STUB_FUNCTION(EncodedJSValue, op_strcat)
-{
- STUB_INIT_STACK_FRAME(stackFrame);
-
- JSValue result = jsString(stackFrame.callFrame, &stackFrame.callFrame->registers()[stackFrame.args[0].int32()], stackFrame.args[1].int32());
- CHECK_FOR_EXCEPTION_AT_END();
- return JSValue::encode(result);
-}
-
-DEFINE_STUB_FUNCTION(EncodedJSValue, op_nstricteq)
-{
- STUB_INIT_STACK_FRAME(stackFrame);
-
- JSValue src1 = stackFrame.args[0].jsValue();
- JSValue src2 = stackFrame.args[1].jsValue();
-
- bool result = !JSValue::strictEqual(stackFrame.callFrame, src1, src2);
- CHECK_FOR_EXCEPTION_AT_END();
- return JSValue::encode(jsBoolean(result));
-}
-
-DEFINE_STUB_FUNCTION(EncodedJSValue, op_to_jsnumber)
-{
- STUB_INIT_STACK_FRAME(stackFrame);
-
- JSValue src = stackFrame.args[0].jsValue();
- CallFrame* callFrame = stackFrame.callFrame;
-
- JSValue result = src.toJSNumber(callFrame);
- CHECK_FOR_EXCEPTION_AT_END();
- return JSValue::encode(result);
-}
-
-DEFINE_STUB_FUNCTION(EncodedJSValue, op_in)
-{
- STUB_INIT_STACK_FRAME(stackFrame);
-
- CallFrame* callFrame = stackFrame.callFrame;
- JSValue baseVal = stackFrame.args[1].jsValue();
-
- if (!baseVal.isObject()) {
- stackFrame.globalData->exception = createInvalidParamError(stackFrame.callFrame, "in", baseVal);
- VM_THROW_EXCEPTION();
- }
-
- JSValue propName = stackFrame.args[0].jsValue();
- JSObject* baseObj = asObject(baseVal);
-
- uint32_t i;
- if (propName.getUInt32(i))
- return JSValue::encode(jsBoolean(baseObj->hasProperty(callFrame, i)));
-
- Identifier property(callFrame, propName.toString(callFrame));
- CHECK_FOR_EXCEPTION();
- return JSValue::encode(jsBoolean(baseObj->hasProperty(callFrame, property)));
-}
-
-DEFINE_STUB_FUNCTION(JSObject*, op_push_new_scope)
-{
- STUB_INIT_STACK_FRAME(stackFrame);
-
- JSObject* scope = new (stackFrame.globalData) JSStaticScopeObject(stackFrame.callFrame, stackFrame.args[0].identifier(), stackFrame.args[1].jsValue(), DontDelete);
-
- CallFrame* callFrame = stackFrame.callFrame;
- callFrame->setScopeChain(callFrame->scopeChain()->push(scope));
- return scope;
-}
-
-DEFINE_STUB_FUNCTION(void, op_jmp_scopes)
-{
- STUB_INIT_STACK_FRAME(stackFrame);
-
- unsigned count = stackFrame.args[0].int32();
- CallFrame* callFrame = stackFrame.callFrame;
-
- ScopeChainNode* tmp = callFrame->scopeChain();
- while (count--)
- tmp = tmp->pop();
- callFrame->setScopeChain(tmp);
-}
-
-DEFINE_STUB_FUNCTION(void, op_put_by_index)
-{
- STUB_INIT_STACK_FRAME(stackFrame);
-
- CallFrame* callFrame = stackFrame.callFrame;
- unsigned property = stackFrame.args[1].int32();
-
- stackFrame.args[0].jsValue().put(callFrame, property, stackFrame.args[2].jsValue());
-}
-
-DEFINE_STUB_FUNCTION(void*, op_switch_imm)
-{
- STUB_INIT_STACK_FRAME(stackFrame);
-
- JSValue scrutinee = stackFrame.args[0].jsValue();
- unsigned tableIndex = stackFrame.args[1].int32();
- CallFrame* callFrame = stackFrame.callFrame;
- CodeBlock* codeBlock = callFrame->codeBlock();
-
- if (scrutinee.isInt32())
- return codeBlock->immediateSwitchJumpTable(tableIndex).ctiForValue(scrutinee.asInt32()).executableAddress();
- else {
- double value;
- int32_t intValue;
- if (scrutinee.getNumber(value) && ((intValue = static_cast<int32_t>(value)) == value))
- return codeBlock->immediateSwitchJumpTable(tableIndex).ctiForValue(intValue).executableAddress();
- else
- return codeBlock->immediateSwitchJumpTable(tableIndex).ctiDefault.executableAddress();
- }
-}
-
-DEFINE_STUB_FUNCTION(void*, op_switch_char)
-{
- STUB_INIT_STACK_FRAME(stackFrame);
-
- JSValue scrutinee = stackFrame.args[0].jsValue();
- unsigned tableIndex = stackFrame.args[1].int32();
- CallFrame* callFrame = stackFrame.callFrame;
- CodeBlock* codeBlock = callFrame->codeBlock();
-
- void* result = codeBlock->characterSwitchJumpTable(tableIndex).ctiDefault.executableAddress();
-
- if (scrutinee.isString()) {
- StringImpl* value = asString(scrutinee)->value(callFrame).impl();
- if (value->length() == 1)
- result = codeBlock->characterSwitchJumpTable(tableIndex).ctiForValue(value->characters()[0]).executableAddress();
- }
-
- CHECK_FOR_EXCEPTION_AT_END();
- return result;
-}
-
-DEFINE_STUB_FUNCTION(void*, op_switch_string)
-{
- STUB_INIT_STACK_FRAME(stackFrame);
-
- JSValue scrutinee = stackFrame.args[0].jsValue();
- unsigned tableIndex = stackFrame.args[1].int32();
- CallFrame* callFrame = stackFrame.callFrame;
- CodeBlock* codeBlock = callFrame->codeBlock();
-
- void* result = codeBlock->stringSwitchJumpTable(tableIndex).ctiDefault.executableAddress();
-
- if (scrutinee.isString()) {
- StringImpl* value = asString(scrutinee)->value(callFrame).impl();
- result = codeBlock->stringSwitchJumpTable(tableIndex).ctiForValue(value).executableAddress();
- }
-
- CHECK_FOR_EXCEPTION_AT_END();
- return result;
-}
-
-DEFINE_STUB_FUNCTION(EncodedJSValue, op_del_by_val)
-{
- STUB_INIT_STACK_FRAME(stackFrame);
-
- CallFrame* callFrame = stackFrame.callFrame;
-
- JSValue baseValue = stackFrame.args[0].jsValue();
- JSObject* baseObj = baseValue.toObject(callFrame); // may throw
-
- JSValue subscript = stackFrame.args[1].jsValue();
- bool result;
- uint32_t i;
- if (subscript.getUInt32(i))
- result = baseObj->deleteProperty(callFrame, i);
- else {
- CHECK_FOR_EXCEPTION();
- Identifier property(callFrame, subscript.toString(callFrame));
- CHECK_FOR_EXCEPTION();
- result = baseObj->deleteProperty(callFrame, property);
- }
-
- if (!result && callFrame->codeBlock()->isStrictMode())
- stackFrame.globalData->exception = createTypeError(stackFrame.callFrame, "Unable to delete property.");
-
- CHECK_FOR_EXCEPTION_AT_END();
- return JSValue::encode(jsBoolean(result));
-}
-
-DEFINE_STUB_FUNCTION(void, op_put_getter)
-{
- STUB_INIT_STACK_FRAME(stackFrame);
-
- CallFrame* callFrame = stackFrame.callFrame;
-
- ASSERT(stackFrame.args[0].jsValue().isObject());
- JSObject* baseObj = asObject(stackFrame.args[0].jsValue());
- ASSERT(stackFrame.args[2].jsValue().isObject());
- baseObj->defineGetter(callFrame, stackFrame.args[1].identifier(), asObject(stackFrame.args[2].jsValue()));
-}
-
-DEFINE_STUB_FUNCTION(void, op_put_setter)
-{
- STUB_INIT_STACK_FRAME(stackFrame);
-
- CallFrame* callFrame = stackFrame.callFrame;
-
- ASSERT(stackFrame.args[0].jsValue().isObject());
- JSObject* baseObj = asObject(stackFrame.args[0].jsValue());
- ASSERT(stackFrame.args[2].jsValue().isObject());
- baseObj->defineSetter(callFrame, stackFrame.args[1].identifier(), asObject(stackFrame.args[2].jsValue()));
-}
-
-DEFINE_STUB_FUNCTION(void, op_throw_reference_error)
-{
- STUB_INIT_STACK_FRAME(stackFrame);
-
- CallFrame* callFrame = stackFrame.callFrame;
- UString message = stackFrame.args[0].jsValue().toString(callFrame);
- stackFrame.globalData->exception = createReferenceError(callFrame, message);
- VM_THROW_EXCEPTION_AT_END();
-}
-
-DEFINE_STUB_FUNCTION(void, op_throw_syntax_error)
-{
- STUB_INIT_STACK_FRAME(stackFrame);
-
- CallFrame* callFrame = stackFrame.callFrame;
- UString message = stackFrame.args[0].jsValue().toString(callFrame);
- stackFrame.globalData->exception = createSyntaxError(callFrame, message);
- VM_THROW_EXCEPTION_AT_END();
-}
-
-DEFINE_STUB_FUNCTION(void, op_debug)
-{
- STUB_INIT_STACK_FRAME(stackFrame);
-
- CallFrame* callFrame = stackFrame.callFrame;
-
- int debugHookID = stackFrame.args[0].int32();
- int firstLine = stackFrame.args[1].int32();
- int lastLine = stackFrame.args[2].int32();
-
- stackFrame.globalData->interpreter->debug(callFrame, static_cast<DebugHookID>(debugHookID), firstLine, lastLine);
-}
-
-DEFINE_STUB_FUNCTION(void*, vm_throw)
-{
- STUB_INIT_STACK_FRAME(stackFrame);
- JSGlobalData* globalData = stackFrame.globalData;
- ExceptionHandler handler = jitThrow(globalData, stackFrame.callFrame, globalData->exception, globalData->exceptionLocation);
- STUB_SET_RETURN_ADDRESS(handler.catchRoutine);
- return handler.callFrame;
-}
-
-DEFINE_STUB_FUNCTION(EncodedJSValue, to_object)
-{
- STUB_INIT_STACK_FRAME(stackFrame);
-
- CallFrame* callFrame = stackFrame.callFrame;
- return JSValue::encode(stackFrame.args[0].jsValue().toObject(callFrame));
-}
-
-MacroAssemblerCodePtr JITThunks::ctiStub(JSGlobalData* globalData, ThunkGenerator generator)
-{
- std::pair<CTIStubMap::iterator, bool> entry = m_ctiStubMap.add(generator, MacroAssemblerCodePtr());
- if (entry.second)
- entry.first->second = generator(globalData, m_executablePool.get());
- return entry.first->second;
-}
-
-PassRefPtr<NativeExecutable> JITThunks::hostFunctionStub(JSGlobalData* globalData, NativeFunction function)
-{
- std::pair<HostFunctionStubMap::iterator, bool> entry = m_hostFunctionStubMap.add(function, 0);
- if (entry.second)
- entry.first->second = NativeExecutable::create(JIT::compileCTINativeCall(globalData, m_executablePool, function), function, ctiNativeConstruct(), callHostFunctionAsConstructor);
- return entry.first->second;
-}
-
-PassRefPtr<NativeExecutable> JITThunks::hostFunctionStub(JSGlobalData* globalData, NativeFunction function, ThunkGenerator generator)
-{
- std::pair<HostFunctionStubMap::iterator, bool> entry = m_hostFunctionStubMap.add(function, 0);
- if (entry.second) {
- MacroAssemblerCodePtr code = globalData->canUseJIT() ? generator(globalData, m_executablePool.get()) : MacroAssemblerCodePtr();
- entry.first->second = NativeExecutable::create(code, function, ctiNativeConstruct(), callHostFunctionAsConstructor);
- }
- return entry.first->second;
-}
-
-} // namespace JSC
-
-#endif // ENABLE(JIT)
diff --git a/JavaScriptCore/jit/JITStubs.h b/JavaScriptCore/jit/JITStubs.h
deleted file mode 100644
index 937134b..0000000
--- a/JavaScriptCore/jit/JITStubs.h
+++ /dev/null
@@ -1,416 +0,0 @@
-/*
- * Copyright (C) 2008 Apple Inc. All rights reserved.
- * Copyright (C) Research In Motion Limited 2010. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
- * its contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef JITStubs_h
-#define JITStubs_h
-
-#include "CallData.h"
-#include "MacroAssemblerCodeRef.h"
-#include "Register.h"
-#include "ThunkGenerators.h"
-#include <wtf/HashMap.h>
-
-#if ENABLE(JIT)
-
-namespace JSC {
-
- struct StructureStubInfo;
-
- class CodeBlock;
- class ExecutablePool;
- class FunctionExecutable;
- class Identifier;
- class JSGlobalData;
- class JSGlobalObject;
- class JSObject;
- class JSPropertyNameIterator;
- class JSValue;
- class JSValueEncodedAsPointer;
- class NativeExecutable;
- class Profiler;
- class PropertySlot;
- class PutPropertySlot;
- class RegisterFile;
- class RegExp;
-
- union JITStubArg {
- void* asPointer;
- EncodedJSValue asEncodedJSValue;
- int32_t asInt32;
-
- JSValue jsValue() { return JSValue::decode(asEncodedJSValue); }
- JSObject* jsObject() { return static_cast<JSObject*>(asPointer); }
- Identifier& identifier() { return *static_cast<Identifier*>(asPointer); }
- int32_t int32() { return asInt32; }
- CodeBlock* codeBlock() { return static_cast<CodeBlock*>(asPointer); }
- FunctionExecutable* function() { return static_cast<FunctionExecutable*>(asPointer); }
- RegExp* regExp() { return static_cast<RegExp*>(asPointer); }
- JSPropertyNameIterator* propertyNameIterator() { return static_cast<JSPropertyNameIterator*>(asPointer); }
- JSGlobalObject* globalObject() { return static_cast<JSGlobalObject*>(asPointer); }
- JSString* jsString() { return static_cast<JSString*>(asPointer); }
- ReturnAddressPtr returnAddress() { return ReturnAddressPtr(asPointer); }
- };
-
- struct TrampolineStructure {
- MacroAssemblerCodePtr ctiStringLengthTrampoline;
- MacroAssemblerCodePtr ctiVirtualCallLink;
- MacroAssemblerCodePtr ctiVirtualConstructLink;
- MacroAssemblerCodePtr ctiVirtualCall;
- MacroAssemblerCodePtr ctiVirtualConstruct;
- MacroAssemblerCodePtr ctiNativeCall;
- MacroAssemblerCodePtr ctiNativeConstruct;
- MacroAssemblerCodePtr ctiSoftModulo;
- };
-
-#if CPU(X86_64)
- struct JITStackFrame {
- void* reserved; // Unused
- JITStubArg args[6];
- void* padding[2]; // Maintain 32-byte stack alignment (possibly overkill).
-
- void* code;
- RegisterFile* registerFile;
- CallFrame* callFrame;
- void* unused1;
- Profiler** enabledProfilerReference;
- JSGlobalData* globalData;
-
- void* savedRBX;
- void* savedR15;
- void* savedR14;
- void* savedR13;
- void* savedR12;
- void* savedRBP;
- void* savedRIP;
-
- // When JIT code makes a call, it pushes its return address just below the rest of the stack.
- ReturnAddressPtr* returnAddressSlot() { return reinterpret_cast<ReturnAddressPtr*>(this) - 1; }
- };
-#elif CPU(X86)
-#if COMPILER(MSVC) || (OS(WINDOWS) && COMPILER(GCC))
-#pragma pack(push)
-#pragma pack(4)
-#endif // COMPILER(MSVC) || (OS(WINDOWS) && COMPILER(GCC))
- struct JITStackFrame {
- void* reserved; // Unused
- JITStubArg args[6];
-#if USE(JSVALUE32_64)
- void* padding[2]; // Maintain 16-byte stack alignment.
-#endif
-
- void* savedEBX;
- void* savedEDI;
- void* savedESI;
- void* savedEBP;
- void* savedEIP;
-
- void* code;
- RegisterFile* registerFile;
- CallFrame* callFrame;
- void* unused1;
- Profiler** enabledProfilerReference;
- JSGlobalData* globalData;
-
- // When JIT code makes a call, it pushes its return address just below the rest of the stack.
- ReturnAddressPtr* returnAddressSlot() { return reinterpret_cast<ReturnAddressPtr*>(this) - 1; }
- };
-#if COMPILER(MSVC) || (OS(WINDOWS) && COMPILER(GCC))
-#pragma pack(pop)
-#endif // COMPILER(MSVC) || (OS(WINDOWS) && COMPILER(GCC))
-#elif CPU(ARM_THUMB2)
- struct JITStackFrame {
- JITStubArg reserved; // Unused
- JITStubArg args[6];
-#if USE(JSVALUE64)
- void* padding; // Maintain 16-byte stack alignment.
-#endif
-
- ReturnAddressPtr thunkReturnAddress;
-
- void* preservedReturnAddress;
- void* preservedR4;
- void* preservedR5;
- void* preservedR6;
-
- // These arguments passed in r1..r3 (r0 contained the entry code pointed, which is not preserved)
- RegisterFile* registerFile;
- CallFrame* callFrame;
- void* unused1;
-
- // These arguments passed on the stack.
- Profiler** enabledProfilerReference;
- JSGlobalData* globalData;
-
- ReturnAddressPtr* returnAddressSlot() { return &thunkReturnAddress; }
- };
-#elif CPU(ARM_TRADITIONAL)
-#if COMPILER(MSVC)
-#pragma pack(push)
-#pragma pack(4)
-#endif // COMPILER(MSVC)
- struct JITStackFrame {
- JITStubArg padding; // Unused
- JITStubArg args[7];
-
- ReturnAddressPtr thunkReturnAddress;
-
- void* preservedR4;
- void* preservedR5;
- void* preservedR6;
- void* preservedR7;
- void* preservedR8;
- void* preservedLink;
-
- RegisterFile* registerFile;
- CallFrame* callFrame;
- void* unused1;
-
- // These arguments passed on the stack.
- Profiler** enabledProfilerReference;
- JSGlobalData* globalData;
-
- // When JIT code makes a call, it pushes its return address just below the rest of the stack.
- ReturnAddressPtr* returnAddressSlot() { return &thunkReturnAddress; }
- };
-#if COMPILER(MSVC)
-#pragma pack(pop)
-#endif // COMPILER(MSVC)
-#elif CPU(MIPS)
- struct JITStackFrame {
- JITStubArg reserved; // Unused
- JITStubArg args[6];
-
-#if USE(JSVALUE32_64)
- void* padding; // Make the overall stack length 8-byte aligned.
-#endif
-
- void* preservedGP; // store GP when using PIC code
- void* preservedS0;
- void* preservedS1;
- void* preservedS2;
- void* preservedReturnAddress;
-
- ReturnAddressPtr thunkReturnAddress;
-
- // These arguments passed in a1..a3 (a0 contained the entry code pointed, which is not preserved)
- RegisterFile* registerFile;
- CallFrame* callFrame;
- void* unused1;
-
- // These arguments passed on the stack.
- Profiler** enabledProfilerReference;
- JSGlobalData* globalData;
-
- ReturnAddressPtr* returnAddressSlot() { return &thunkReturnAddress; }
- };
-#else
-#error "JITStackFrame not defined for this platform."
-#endif
-
-#define JITSTACKFRAME_ARGS_INDEX (OBJECT_OFFSETOF(JITStackFrame, args) / sizeof(void*))
-
-#define STUB_ARGS_DECLARATION void** args
-#define STUB_ARGS (args)
-
-#if CPU(X86)
- #if COMPILER(MSVC)
- #define JIT_STUB __fastcall
- #elif COMPILER(GCC)
- #define JIT_STUB __attribute__ ((fastcall))
- #else
- #error "JIT_STUB function calls require fastcall conventions on x86, add appropriate directive/attribute here for your compiler!"
- #endif
-#else
- #define JIT_STUB
-#endif
-
- extern "C" void ctiVMThrowTrampoline();
- extern "C" void ctiOpThrowNotCaught();
- extern "C" EncodedJSValue ctiTrampoline(void* code, RegisterFile*, CallFrame*, void* /*unused1*/, Profiler**, JSGlobalData*);
-
- class JITThunks {
- public:
- JITThunks(JSGlobalData*);
- ~JITThunks();
-
- static void tryCacheGetByID(CallFrame*, CodeBlock*, ReturnAddressPtr returnAddress, JSValue baseValue, const Identifier& propertyName, const PropertySlot&, StructureStubInfo* stubInfo);
- static void tryCachePutByID(CallFrame*, CodeBlock*, ReturnAddressPtr returnAddress, JSValue baseValue, const PutPropertySlot&, StructureStubInfo* stubInfo, bool direct);
-
- MacroAssemblerCodePtr ctiStringLengthTrampoline() { return m_trampolineStructure.ctiStringLengthTrampoline; }
- MacroAssemblerCodePtr ctiVirtualCallLink() { return m_trampolineStructure.ctiVirtualCallLink; }
- MacroAssemblerCodePtr ctiVirtualConstructLink() { return m_trampolineStructure.ctiVirtualConstructLink; }
- MacroAssemblerCodePtr ctiVirtualCall() { return m_trampolineStructure.ctiVirtualCall; }
- MacroAssemblerCodePtr ctiVirtualConstruct() { return m_trampolineStructure.ctiVirtualConstruct; }
- MacroAssemblerCodePtr ctiNativeCall() { return m_trampolineStructure.ctiNativeCall; }
- MacroAssemblerCodePtr ctiNativeConstruct() { return m_trampolineStructure.ctiNativeConstruct; }
- MacroAssemblerCodePtr ctiSoftModulo() { return m_trampolineStructure.ctiSoftModulo; }
-
- MacroAssemblerCodePtr ctiStub(JSGlobalData* globalData, ThunkGenerator generator);
-
- PassRefPtr<NativeExecutable> hostFunctionStub(JSGlobalData* globalData, NativeFunction func);
- PassRefPtr<NativeExecutable> hostFunctionStub(JSGlobalData* globalData, NativeFunction func, ThunkGenerator generator);
- private:
- typedef HashMap<ThunkGenerator, MacroAssemblerCodePtr> CTIStubMap;
- CTIStubMap m_ctiStubMap;
- typedef HashMap<NativeFunction, RefPtr<NativeExecutable> > HostFunctionStubMap;
- HostFunctionStubMap m_hostFunctionStubMap;
- RefPtr<ExecutablePool> m_executablePool;
-
- TrampolineStructure m_trampolineStructure;
- };
-
-extern "C" {
- EncodedJSValue JIT_STUB cti_op_add(STUB_ARGS_DECLARATION);
- EncodedJSValue JIT_STUB cti_op_bitand(STUB_ARGS_DECLARATION);
- EncodedJSValue JIT_STUB cti_op_bitnot(STUB_ARGS_DECLARATION);
- EncodedJSValue JIT_STUB cti_op_bitor(STUB_ARGS_DECLARATION);
- EncodedJSValue JIT_STUB cti_op_bitxor(STUB_ARGS_DECLARATION);
- EncodedJSValue JIT_STUB cti_op_call_NotJSFunction(STUB_ARGS_DECLARATION);
- EncodedJSValue JIT_STUB cti_op_call_eval(STUB_ARGS_DECLARATION);
- EncodedJSValue JIT_STUB cti_op_construct_NotJSConstruct(STUB_ARGS_DECLARATION);
- EncodedJSValue JIT_STUB cti_op_create_this(STUB_ARGS_DECLARATION);
- EncodedJSValue JIT_STUB cti_op_convert_this(STUB_ARGS_DECLARATION);
- EncodedJSValue JIT_STUB cti_op_convert_this_strict(STUB_ARGS_DECLARATION);
- EncodedJSValue JIT_STUB cti_op_create_arguments(STUB_ARGS_DECLARATION);
- EncodedJSValue JIT_STUB cti_op_create_arguments_no_params(STUB_ARGS_DECLARATION);
- EncodedJSValue JIT_STUB cti_op_del_by_id(STUB_ARGS_DECLARATION);
- EncodedJSValue JIT_STUB cti_op_del_by_val(STUB_ARGS_DECLARATION);
- EncodedJSValue JIT_STUB cti_op_div(STUB_ARGS_DECLARATION);
- EncodedJSValue JIT_STUB cti_op_get_by_id(STUB_ARGS_DECLARATION);
- EncodedJSValue JIT_STUB cti_op_get_by_id_array_fail(STUB_ARGS_DECLARATION);
- EncodedJSValue JIT_STUB cti_op_get_by_id_custom_stub(STUB_ARGS_DECLARATION);
- EncodedJSValue JIT_STUB cti_op_get_by_id_generic(STUB_ARGS_DECLARATION);
- EncodedJSValue JIT_STUB cti_op_get_by_id_getter_stub(STUB_ARGS_DECLARATION);
- EncodedJSValue JIT_STUB cti_op_get_by_id_method_check(STUB_ARGS_DECLARATION);
- EncodedJSValue JIT_STUB cti_op_get_by_id_proto_fail(STUB_ARGS_DECLARATION);
- EncodedJSValue JIT_STUB cti_op_get_by_id_proto_list(STUB_ARGS_DECLARATION);
- EncodedJSValue JIT_STUB cti_op_get_by_id_proto_list_full(STUB_ARGS_DECLARATION);
- EncodedJSValue JIT_STUB cti_op_get_by_id_self_fail(STUB_ARGS_DECLARATION);
- EncodedJSValue JIT_STUB cti_op_get_by_id_string_fail(STUB_ARGS_DECLARATION);
- EncodedJSValue JIT_STUB cti_op_get_by_val(STUB_ARGS_DECLARATION);
- EncodedJSValue JIT_STUB cti_op_get_by_val_byte_array(STUB_ARGS_DECLARATION);
- EncodedJSValue JIT_STUB cti_op_get_by_val_string(STUB_ARGS_DECLARATION);
- EncodedJSValue JIT_STUB cti_op_in(STUB_ARGS_DECLARATION);
- EncodedJSValue JIT_STUB cti_op_instanceof(STUB_ARGS_DECLARATION);
- EncodedJSValue JIT_STUB cti_op_is_boolean(STUB_ARGS_DECLARATION);
- EncodedJSValue JIT_STUB cti_op_is_function(STUB_ARGS_DECLARATION);
- EncodedJSValue JIT_STUB cti_op_is_number(STUB_ARGS_DECLARATION);
- EncodedJSValue JIT_STUB cti_op_is_object(STUB_ARGS_DECLARATION);
- EncodedJSValue JIT_STUB cti_op_is_string(STUB_ARGS_DECLARATION);
- EncodedJSValue JIT_STUB cti_op_is_undefined(STUB_ARGS_DECLARATION);
- EncodedJSValue JIT_STUB cti_op_less(STUB_ARGS_DECLARATION);
- EncodedJSValue JIT_STUB cti_op_lesseq(STUB_ARGS_DECLARATION);
- EncodedJSValue JIT_STUB cti_op_lshift(STUB_ARGS_DECLARATION);
- EncodedJSValue JIT_STUB cti_op_mod(STUB_ARGS_DECLARATION);
- EncodedJSValue JIT_STUB cti_op_mul(STUB_ARGS_DECLARATION);
- EncodedJSValue JIT_STUB cti_op_negate(STUB_ARGS_DECLARATION);
- EncodedJSValue JIT_STUB cti_op_not(STUB_ARGS_DECLARATION);
- EncodedJSValue JIT_STUB cti_op_nstricteq(STUB_ARGS_DECLARATION);
- EncodedJSValue JIT_STUB cti_op_post_dec(STUB_ARGS_DECLARATION);
- EncodedJSValue JIT_STUB cti_op_post_inc(STUB_ARGS_DECLARATION);
- EncodedJSValue JIT_STUB cti_op_pre_dec(STUB_ARGS_DECLARATION);
- EncodedJSValue JIT_STUB cti_op_pre_inc(STUB_ARGS_DECLARATION);
- EncodedJSValue JIT_STUB cti_op_resolve(STUB_ARGS_DECLARATION);
- EncodedJSValue JIT_STUB cti_op_resolve_base(STUB_ARGS_DECLARATION);
- EncodedJSValue JIT_STUB cti_op_resolve_base_strict_put(STUB_ARGS_DECLARATION);
- EncodedJSValue JIT_STUB cti_op_ensure_property_exists(STUB_ARGS_DECLARATION);
- EncodedJSValue JIT_STUB cti_op_resolve_global(STUB_ARGS_DECLARATION);
- EncodedJSValue JIT_STUB cti_op_resolve_global_dynamic(STUB_ARGS_DECLARATION);
- EncodedJSValue JIT_STUB cti_op_resolve_skip(STUB_ARGS_DECLARATION);
- EncodedJSValue JIT_STUB cti_op_resolve_with_base(STUB_ARGS_DECLARATION);
- EncodedJSValue JIT_STUB cti_op_rshift(STUB_ARGS_DECLARATION);
- EncodedJSValue JIT_STUB cti_op_strcat(STUB_ARGS_DECLARATION);
- EncodedJSValue JIT_STUB cti_op_stricteq(STUB_ARGS_DECLARATION);
- EncodedJSValue JIT_STUB cti_op_sub(STUB_ARGS_DECLARATION);
- EncodedJSValue JIT_STUB cti_op_to_jsnumber(STUB_ARGS_DECLARATION);
- EncodedJSValue JIT_STUB cti_op_to_primitive(STUB_ARGS_DECLARATION);
- EncodedJSValue JIT_STUB cti_op_typeof(STUB_ARGS_DECLARATION);
- EncodedJSValue JIT_STUB cti_op_urshift(STUB_ARGS_DECLARATION);
- EncodedJSValue JIT_STUB cti_to_object(STUB_ARGS_DECLARATION);
- JSObject* JIT_STUB cti_op_new_array(STUB_ARGS_DECLARATION);
- JSObject* JIT_STUB cti_op_new_func(STUB_ARGS_DECLARATION);
- JSObject* JIT_STUB cti_op_new_func_exp(STUB_ARGS_DECLARATION);
- JSObject* JIT_STUB cti_op_new_object(STUB_ARGS_DECLARATION);
- JSObject* JIT_STUB cti_op_new_regexp(STUB_ARGS_DECLARATION);
- JSObject* JIT_STUB cti_op_push_activation(STUB_ARGS_DECLARATION);
- JSObject* JIT_STUB cti_op_push_new_scope(STUB_ARGS_DECLARATION);
- JSObject* JIT_STUB cti_op_push_scope(STUB_ARGS_DECLARATION);
- JSObject* JIT_STUB cti_op_put_by_id_transition_realloc(STUB_ARGS_DECLARATION);
- JSPropertyNameIterator* JIT_STUB cti_op_get_pnames(STUB_ARGS_DECLARATION);
- int JIT_STUB cti_op_eq(STUB_ARGS_DECLARATION);
- int JIT_STUB cti_op_eq_strings(STUB_ARGS_DECLARATION);
- int JIT_STUB cti_op_jless(STUB_ARGS_DECLARATION);
- int JIT_STUB cti_op_jlesseq(STUB_ARGS_DECLARATION);
- int JIT_STUB cti_op_jtrue(STUB_ARGS_DECLARATION);
- int JIT_STUB cti_op_load_varargs(STUB_ARGS_DECLARATION);
- int JIT_STUB cti_op_loop_if_lesseq(STUB_ARGS_DECLARATION);
- int JIT_STUB cti_timeout_check(STUB_ARGS_DECLARATION);
- int JIT_STUB cti_has_property(STUB_ARGS_DECLARATION);
- void JIT_STUB cti_op_check_has_instance(STUB_ARGS_DECLARATION);
- void JIT_STUB cti_op_debug(STUB_ARGS_DECLARATION);
- void JIT_STUB cti_op_end(STUB_ARGS_DECLARATION);
- void JIT_STUB cti_op_jmp_scopes(STUB_ARGS_DECLARATION);
- void JIT_STUB cti_op_pop_scope(STUB_ARGS_DECLARATION);
- void JIT_STUB cti_op_profile_did_call(STUB_ARGS_DECLARATION);
- void JIT_STUB cti_op_profile_will_call(STUB_ARGS_DECLARATION);
- void JIT_STUB cti_op_put_by_id(STUB_ARGS_DECLARATION);
- void JIT_STUB cti_op_put_by_id_fail(STUB_ARGS_DECLARATION);
- void JIT_STUB cti_op_put_by_id_generic(STUB_ARGS_DECLARATION);
- void JIT_STUB cti_op_put_by_id_direct(STUB_ARGS_DECLARATION);
- void JIT_STUB cti_op_put_by_id_direct_fail(STUB_ARGS_DECLARATION);
- void JIT_STUB cti_op_put_by_id_direct_generic(STUB_ARGS_DECLARATION);
- void JIT_STUB cti_op_put_by_index(STUB_ARGS_DECLARATION);
- void JIT_STUB cti_op_put_by_val(STUB_ARGS_DECLARATION);
- void JIT_STUB cti_op_put_by_val_byte_array(STUB_ARGS_DECLARATION);
- void JIT_STUB cti_op_put_getter(STUB_ARGS_DECLARATION);
- void JIT_STUB cti_op_put_setter(STUB_ARGS_DECLARATION);
- void JIT_STUB cti_op_ret_scopeChain(STUB_ARGS_DECLARATION);
- void JIT_STUB cti_op_tear_off_activation(STUB_ARGS_DECLARATION);
- void JIT_STUB cti_op_tear_off_arguments(STUB_ARGS_DECLARATION);
- void JIT_STUB cti_op_throw_reference_error(STUB_ARGS_DECLARATION);
- void JIT_STUB cti_op_throw_syntax_error(STUB_ARGS_DECLARATION);
- void* JIT_STUB cti_op_call_arityCheck(STUB_ARGS_DECLARATION);
- void* JIT_STUB cti_op_construct_arityCheck(STUB_ARGS_DECLARATION);
- void* JIT_STUB cti_op_call_jitCompile(STUB_ARGS_DECLARATION);
- void* JIT_STUB cti_op_construct_jitCompile(STUB_ARGS_DECLARATION);
- void* JIT_STUB cti_op_switch_char(STUB_ARGS_DECLARATION);
- void* JIT_STUB cti_op_switch_imm(STUB_ARGS_DECLARATION);
- void* JIT_STUB cti_op_switch_string(STUB_ARGS_DECLARATION);
- void* JIT_STUB cti_op_throw(STUB_ARGS_DECLARATION);
- void* JIT_STUB cti_register_file_check(STUB_ARGS_DECLARATION);
- void* JIT_STUB cti_vm_lazyLinkCall(STUB_ARGS_DECLARATION);
- void* JIT_STUB cti_vm_lazyLinkConstruct(STUB_ARGS_DECLARATION);
- void* JIT_STUB cti_vm_throw(STUB_ARGS_DECLARATION);
-} // extern "C"
-
-} // namespace JSC
-
-#endif // ENABLE(JIT)
-
-#endif // JITStubs_h
diff --git a/JavaScriptCore/jit/JSInterfaceJIT.h b/JavaScriptCore/jit/JSInterfaceJIT.h
deleted file mode 100644
index 6453bab..0000000
--- a/JavaScriptCore/jit/JSInterfaceJIT.h
+++ /dev/null
@@ -1,292 +0,0 @@
-/*
- * Copyright (C) 2010 Apple Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
- * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
- * THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef JSInterfaceJIT_h
-#define JSInterfaceJIT_h
-
-#include "JITCode.h"
-#include "JITStubs.h"
-#include "JSImmediate.h"
-#include "MacroAssembler.h"
-#include "RegisterFile.h"
-#include <wtf/AlwaysInline.h>
-#include <wtf/Vector.h>
-
-namespace JSC {
- class JSInterfaceJIT : public MacroAssembler {
- public:
- // NOTES:
- //
- // regT0 has two special meanings. The return value from a stub
- // call will always be in regT0, and by default (unless
- // a register is specified) emitPutVirtualRegister() will store
- // the value from regT0.
- //
- // regT3 is required to be callee-preserved.
- //
- // tempRegister2 is has no such dependencies. It is important that
- // on x86/x86-64 it is ecx for performance reasons, since the
- // MacroAssembler will need to plant register swaps if it is not -
- // however the code will still function correctly.
-#if CPU(X86_64)
- static const RegisterID returnValueRegister = X86Registers::eax;
- static const RegisterID cachedResultRegister = X86Registers::eax;
- static const RegisterID firstArgumentRegister = X86Registers::edi;
-
- static const RegisterID timeoutCheckRegister = X86Registers::r12;
- static const RegisterID callFrameRegister = X86Registers::r13;
- static const RegisterID tagTypeNumberRegister = X86Registers::r14;
- static const RegisterID tagMaskRegister = X86Registers::r15;
-
- static const RegisterID regT0 = X86Registers::eax;
- static const RegisterID regT1 = X86Registers::edx;
- static const RegisterID regT2 = X86Registers::ecx;
- static const RegisterID regT3 = X86Registers::ebx;
-
- static const FPRegisterID fpRegT0 = X86Registers::xmm0;
- static const FPRegisterID fpRegT1 = X86Registers::xmm1;
- static const FPRegisterID fpRegT2 = X86Registers::xmm2;
- static const FPRegisterID fpRegT3 = X86Registers::xmm3;
-#elif CPU(X86)
- static const RegisterID returnValueRegister = X86Registers::eax;
- static const RegisterID cachedResultRegister = X86Registers::eax;
- // On x86 we always use fastcall conventions = but on
- // OS X if might make more sense to just use regparm.
- static const RegisterID firstArgumentRegister = X86Registers::ecx;
-
- static const RegisterID timeoutCheckRegister = X86Registers::esi;
- static const RegisterID callFrameRegister = X86Registers::edi;
-
- static const RegisterID regT0 = X86Registers::eax;
- static const RegisterID regT1 = X86Registers::edx;
- static const RegisterID regT2 = X86Registers::ecx;
- static const RegisterID regT3 = X86Registers::ebx;
-
- static const FPRegisterID fpRegT0 = X86Registers::xmm0;
- static const FPRegisterID fpRegT1 = X86Registers::xmm1;
- static const FPRegisterID fpRegT2 = X86Registers::xmm2;
- static const FPRegisterID fpRegT3 = X86Registers::xmm3;
-#elif CPU(ARM_THUMB2)
- static const RegisterID returnValueRegister = ARMRegisters::r0;
- static const RegisterID cachedResultRegister = ARMRegisters::r0;
- static const RegisterID firstArgumentRegister = ARMRegisters::r0;
-
- static const RegisterID regT0 = ARMRegisters::r0;
- static const RegisterID regT1 = ARMRegisters::r1;
- static const RegisterID regT2 = ARMRegisters::r2;
- static const RegisterID regT3 = ARMRegisters::r4;
-
- static const RegisterID callFrameRegister = ARMRegisters::r5;
- static const RegisterID timeoutCheckRegister = ARMRegisters::r6;
-
- static const FPRegisterID fpRegT0 = ARMRegisters::d0;
- static const FPRegisterID fpRegT1 = ARMRegisters::d1;
- static const FPRegisterID fpRegT2 = ARMRegisters::d2;
- static const FPRegisterID fpRegT3 = ARMRegisters::d3;
-#elif CPU(ARM_TRADITIONAL)
- static const RegisterID returnValueRegister = ARMRegisters::r0;
- static const RegisterID cachedResultRegister = ARMRegisters::r0;
- static const RegisterID firstArgumentRegister = ARMRegisters::r0;
-
- static const RegisterID timeoutCheckRegister = ARMRegisters::r5;
- static const RegisterID callFrameRegister = ARMRegisters::r4;
-
- static const RegisterID regT0 = ARMRegisters::r0;
- static const RegisterID regT1 = ARMRegisters::r1;
- static const RegisterID regT2 = ARMRegisters::r2;
- // Callee preserved
- static const RegisterID regT3 = ARMRegisters::r7;
-
- static const RegisterID regS0 = ARMRegisters::S0;
- // Callee preserved
- static const RegisterID regS1 = ARMRegisters::S1;
-
- static const RegisterID regStackPtr = ARMRegisters::sp;
- static const RegisterID regLink = ARMRegisters::lr;
-
- static const FPRegisterID fpRegT0 = ARMRegisters::d0;
- static const FPRegisterID fpRegT1 = ARMRegisters::d1;
- static const FPRegisterID fpRegT2 = ARMRegisters::d2;
- static const FPRegisterID fpRegT3 = ARMRegisters::d3;
-#elif CPU(MIPS)
- static const RegisterID returnValueRegister = MIPSRegisters::v0;
- static const RegisterID cachedResultRegister = MIPSRegisters::v0;
- static const RegisterID firstArgumentRegister = MIPSRegisters::a0;
-
- // regT0 must be v0 for returning a 32-bit value.
- static const RegisterID regT0 = MIPSRegisters::v0;
-
- // regT1 must be v1 for returning a pair of 32-bit value.
- static const RegisterID regT1 = MIPSRegisters::v1;
-
- static const RegisterID regT2 = MIPSRegisters::t4;
-
- // regT3 must be saved in the callee, so use an S register.
- static const RegisterID regT3 = MIPSRegisters::s2;
-
- static const RegisterID callFrameRegister = MIPSRegisters::s0;
- static const RegisterID timeoutCheckRegister = MIPSRegisters::s1;
-
- static const FPRegisterID fpRegT0 = MIPSRegisters::f4;
- static const FPRegisterID fpRegT1 = MIPSRegisters::f6;
- static const FPRegisterID fpRegT2 = MIPSRegisters::f8;
- static const FPRegisterID fpRegT3 = MIPSRegisters::f10;
-#else
-#error "JIT not supported on this platform."
-#endif
-
- inline Jump emitLoadJSCell(unsigned virtualRegisterIndex, RegisterID payload);
- inline Jump emitLoadInt32(unsigned virtualRegisterIndex, RegisterID dst);
- inline Jump emitLoadDouble(unsigned virtualRegisterIndex, FPRegisterID dst, RegisterID scratch);
-
-#if USE(JSVALUE32_64)
- inline Jump emitJumpIfNotJSCell(unsigned virtualRegisterIndex);
- inline Address tagFor(unsigned index, RegisterID base = callFrameRegister);
-#endif
-
-#if USE(JSVALUE64)
- Jump emitJumpIfImmediateNumber(RegisterID reg);
- Jump emitJumpIfNotImmediateNumber(RegisterID reg);
- void emitFastArithImmToInt(RegisterID reg);
-#endif
-
- inline Address payloadFor(unsigned index, RegisterID base = callFrameRegister);
- inline Address addressFor(unsigned index, RegisterID base = callFrameRegister);
- };
-
- struct ThunkHelpers {
- static unsigned stringImplDataOffset() { return StringImpl::dataOffset(); }
- static unsigned jsStringLengthOffset() { return OBJECT_OFFSETOF(JSString, m_length); }
- static unsigned jsStringValueOffset() { return OBJECT_OFFSETOF(JSString, m_value); }
- };
-
-#if USE(JSVALUE32_64)
- inline JSInterfaceJIT::Jump JSInterfaceJIT::emitLoadJSCell(unsigned virtualRegisterIndex, RegisterID payload)
- {
- loadPtr(payloadFor(virtualRegisterIndex), payload);
- return emitJumpIfNotJSCell(virtualRegisterIndex);
- }
-
- inline JSInterfaceJIT::Jump JSInterfaceJIT::emitJumpIfNotJSCell(unsigned virtualRegisterIndex)
- {
- ASSERT(static_cast<int>(virtualRegisterIndex) < FirstConstantRegisterIndex);
- return branch32(NotEqual, tagFor(virtualRegisterIndex), Imm32(JSValue::CellTag));
- }
-
- inline JSInterfaceJIT::Jump JSInterfaceJIT::emitLoadInt32(unsigned virtualRegisterIndex, RegisterID dst)
- {
- ASSERT(static_cast<int>(virtualRegisterIndex) < FirstConstantRegisterIndex);
- loadPtr(payloadFor(virtualRegisterIndex), dst);
- return branch32(NotEqual, tagFor(virtualRegisterIndex), Imm32(JSValue::Int32Tag));
- }
-
- inline JSInterfaceJIT::Address JSInterfaceJIT::tagFor(unsigned virtualRegisterIndex, RegisterID base)
- {
- ASSERT(static_cast<int>(virtualRegisterIndex) < FirstConstantRegisterIndex);
- return Address(base, (virtualRegisterIndex * sizeof(Register)) + OBJECT_OFFSETOF(JSValue, u.asBits.tag));
- }
-
- inline JSInterfaceJIT::Address JSInterfaceJIT::payloadFor(unsigned virtualRegisterIndex, RegisterID base)
- {
- ASSERT(static_cast<int>(virtualRegisterIndex) < FirstConstantRegisterIndex);
- return Address(base, (virtualRegisterIndex * sizeof(Register)) + OBJECT_OFFSETOF(JSValue, u.asBits.payload));
- }
-
- inline JSInterfaceJIT::Jump JSInterfaceJIT::emitLoadDouble(unsigned virtualRegisterIndex, FPRegisterID dst, RegisterID scratch)
- {
- ASSERT(static_cast<int>(virtualRegisterIndex) < FirstConstantRegisterIndex);
- loadPtr(tagFor(virtualRegisterIndex), scratch);
- Jump isDouble = branch32(Below, scratch, Imm32(JSValue::LowestTag));
- Jump notInt = branch32(NotEqual, scratch, Imm32(JSValue::Int32Tag));
- loadPtr(payloadFor(virtualRegisterIndex), scratch);
- convertInt32ToDouble(scratch, dst);
- Jump done = jump();
- isDouble.link(this);
- loadDouble(addressFor(virtualRegisterIndex), dst);
- done.link(this);
- return notInt;
- }
-#endif
-
-#if USE(JSVALUE64)
- ALWAYS_INLINE JSInterfaceJIT::Jump JSInterfaceJIT::emitJumpIfImmediateNumber(RegisterID reg)
- {
- return branchTestPtr(NonZero, reg, tagTypeNumberRegister);
- }
- ALWAYS_INLINE JSInterfaceJIT::Jump JSInterfaceJIT::emitJumpIfNotImmediateNumber(RegisterID reg)
- {
- return branchTestPtr(Zero, reg, tagTypeNumberRegister);
- }
- inline JSInterfaceJIT::Jump JSInterfaceJIT::emitLoadJSCell(unsigned virtualRegisterIndex, RegisterID dst)
- {
- loadPtr(addressFor(virtualRegisterIndex), dst);
- return branchTestPtr(NonZero, dst, tagMaskRegister);
- }
-
- inline JSInterfaceJIT::Jump JSInterfaceJIT::emitLoadInt32(unsigned virtualRegisterIndex, RegisterID dst)
- {
- loadPtr(addressFor(virtualRegisterIndex), dst);
- Jump result = branchPtr(Below, dst, tagTypeNumberRegister);
- zeroExtend32ToPtr(dst, dst);
- return result;
- }
-
- inline JSInterfaceJIT::Jump JSInterfaceJIT::emitLoadDouble(unsigned virtualRegisterIndex, FPRegisterID dst, RegisterID scratch)
- {
- loadPtr(addressFor(virtualRegisterIndex), scratch);
- Jump notNumber = emitJumpIfNotImmediateNumber(scratch);
- Jump notInt = branchPtr(Below, scratch, tagTypeNumberRegister);
- convertInt32ToDouble(scratch, dst);
- Jump done = jump();
- notInt.link(this);
- addPtr(tagTypeNumberRegister, scratch);
- movePtrToDouble(scratch, dst);
- done.link(this);
- return notNumber;
- }
-
- ALWAYS_INLINE void JSInterfaceJIT::emitFastArithImmToInt(RegisterID)
- {
- }
-
-#endif
-
-#if USE(JSVALUE64)
- inline JSInterfaceJIT::Address JSInterfaceJIT::payloadFor(unsigned virtualRegisterIndex, RegisterID base)
- {
- ASSERT(static_cast<int>(virtualRegisterIndex) < FirstConstantRegisterIndex);
- return addressFor(virtualRegisterIndex, base);
- }
-#endif
-
- inline JSInterfaceJIT::Address JSInterfaceJIT::addressFor(unsigned virtualRegisterIndex, RegisterID base)
- {
- ASSERT(static_cast<int>(virtualRegisterIndex) < FirstConstantRegisterIndex);
- return Address(base, (virtualRegisterIndex * sizeof(Register)));
- }
-
-}
-
-#endif // JSInterfaceJIT_h
diff --git a/JavaScriptCore/jit/SpecializedThunkJIT.h b/JavaScriptCore/jit/SpecializedThunkJIT.h
deleted file mode 100644
index 5c593d9..0000000
--- a/JavaScriptCore/jit/SpecializedThunkJIT.h
+++ /dev/null
@@ -1,165 +0,0 @@
-/*
- * Copyright (C) 2010 Apple Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
- * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
- * THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef SpecializedThunkJIT_h
-#define SpecializedThunkJIT_h
-
-#if ENABLE(JIT)
-
-#include "Executable.h"
-#include "JSInterfaceJIT.h"
-#include "LinkBuffer.h"
-
-namespace JSC {
-
- class SpecializedThunkJIT : public JSInterfaceJIT {
- public:
- static const int ThisArgument = -1;
- SpecializedThunkJIT(int expectedArgCount, JSGlobalData* globalData, ExecutablePool* pool)
- : m_expectedArgCount(expectedArgCount)
- , m_globalData(globalData)
- , m_pool(pool)
- {
- // Check that we have the expected number of arguments
- m_failures.append(branch32(NotEqual, Address(callFrameRegister, RegisterFile::ArgumentCount * (int)sizeof(Register)), Imm32(expectedArgCount + 1)));
- }
-
- void loadDoubleArgument(int argument, FPRegisterID dst, RegisterID scratch)
- {
- unsigned src = argumentToVirtualRegister(argument);
- m_failures.append(emitLoadDouble(src, dst, scratch));
- }
-
- void loadCellArgument(int argument, RegisterID dst)
- {
- unsigned src = argumentToVirtualRegister(argument);
- m_failures.append(emitLoadJSCell(src, dst));
- }
-
- void loadJSStringArgument(int argument, RegisterID dst)
- {
- loadCellArgument(argument, dst);
- m_failures.append(branchPtr(NotEqual, Address(dst, 0), ImmPtr(m_globalData->jsStringVPtr)));
- m_failures.append(branchTest32(NonZero, Address(dst, OBJECT_OFFSETOF(JSString, m_fiberCount))));
- }
-
- void loadInt32Argument(int argument, RegisterID dst, Jump& failTarget)
- {
- unsigned src = argumentToVirtualRegister(argument);
- failTarget = emitLoadInt32(src, dst);
- }
-
- void loadInt32Argument(int argument, RegisterID dst)
- {
- Jump conversionFailed;
- loadInt32Argument(argument, dst, conversionFailed);
- m_failures.append(conversionFailed);
- }
-
- void appendFailure(const Jump& failure)
- {
- m_failures.append(failure);
- }
-
- void returnJSValue(RegisterID src)
- {
- if (src != regT0)
- move(src, regT0);
- loadPtr(Address(callFrameRegister, RegisterFile::CallerFrame * (int)sizeof(Register)), callFrameRegister);
- ret();
- }
-
- void returnDouble(FPRegisterID src)
- {
-#if USE(JSVALUE64)
- moveDoubleToPtr(src, regT0);
- subPtr(tagTypeNumberRegister, regT0);
-#else
- storeDouble(src, Address(stackPointerRegister, -(int)sizeof(double)));
- loadPtr(Address(stackPointerRegister, OBJECT_OFFSETOF(JSValue, u.asBits.tag) - sizeof(double)), regT1);
- loadPtr(Address(stackPointerRegister, OBJECT_OFFSETOF(JSValue, u.asBits.payload) - sizeof(double)), regT0);
-#endif
- loadPtr(Address(callFrameRegister, RegisterFile::CallerFrame * (int)sizeof(Register)), callFrameRegister);
- ret();
- }
-
- void returnInt32(RegisterID src)
- {
- if (src != regT0)
- move(src, regT0);
- tagReturnAsInt32();
- loadPtr(Address(callFrameRegister, RegisterFile::CallerFrame * (int)sizeof(Register)), callFrameRegister);
- ret();
- }
-
- void returnJSCell(RegisterID src)
- {
- if (src != regT0)
- move(src, regT0);
- tagReturnAsJSCell();
- loadPtr(Address(callFrameRegister, RegisterFile::CallerFrame * (int)sizeof(Register)), callFrameRegister);
- ret();
- }
-
- MacroAssemblerCodePtr finalize(MacroAssemblerCodePtr fallback)
- {
- LinkBuffer patchBuffer(this, m_pool.get(), 0);
- patchBuffer.link(m_failures, CodeLocationLabel(fallback));
- return patchBuffer.finalizeCode().m_code;
- }
-
- private:
- int argumentToVirtualRegister(unsigned argument)
- {
- return -static_cast<int>(RegisterFile::CallFrameHeaderSize + (m_expectedArgCount - argument));
- }
-
- void tagReturnAsInt32()
- {
-#if USE(JSVALUE64)
- orPtr(tagTypeNumberRegister, regT0);
-#else
- move(Imm32(JSValue::Int32Tag), regT1);
-#endif
- }
-
- void tagReturnAsJSCell()
- {
-#if USE(JSVALUE32_64)
- move(Imm32(JSValue::CellTag), regT1);
-#endif
- }
-
- int m_expectedArgCount;
- JSGlobalData* m_globalData;
- RefPtr<ExecutablePool> m_pool;
- MacroAssembler::JumpList m_failures;
- };
-
-}
-
-#endif // ENABLE(JIT)
-
-#endif // SpecializedThunkJIT_h
diff --git a/JavaScriptCore/jit/ThunkGenerators.cpp b/JavaScriptCore/jit/ThunkGenerators.cpp
deleted file mode 100644
index 9b40f12..0000000
--- a/JavaScriptCore/jit/ThunkGenerators.cpp
+++ /dev/null
@@ -1,162 +0,0 @@
-/*
- * Copyright (C) 2010 Apple Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
- * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
- * THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "config.h"
-#include "ThunkGenerators.h"
-
-#include "CodeBlock.h"
-#include <wtf/text/StringImpl.h>
-#include "SpecializedThunkJIT.h"
-
-#if ENABLE(JIT)
-
-namespace JSC {
-
-static void stringCharLoad(SpecializedThunkJIT& jit)
-{
- // load string
- jit.loadJSStringArgument(SpecializedThunkJIT::ThisArgument, SpecializedThunkJIT::regT0);
- // regT0 now contains this, and is a non-rope JSString*
-
- // Load string length to regT2, and start the process of loading the data pointer into regT0
- jit.load32(MacroAssembler::Address(SpecializedThunkJIT::regT0, ThunkHelpers::jsStringLengthOffset()), SpecializedThunkJIT::regT2);
- jit.loadPtr(MacroAssembler::Address(SpecializedThunkJIT::regT0, ThunkHelpers::jsStringValueOffset()), SpecializedThunkJIT::regT0);
- jit.loadPtr(MacroAssembler::Address(SpecializedThunkJIT::regT0, ThunkHelpers::stringImplDataOffset()), SpecializedThunkJIT::regT0);
-
- // load index
- jit.loadInt32Argument(0, SpecializedThunkJIT::regT1); // regT1 contains the index
-
- // Do an unsigned compare to simultaneously filter negative indices as well as indices that are too large
- jit.appendFailure(jit.branch32(MacroAssembler::AboveOrEqual, SpecializedThunkJIT::regT1, SpecializedThunkJIT::regT2));
-
- // Load the character
- jit.load16(MacroAssembler::BaseIndex(SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT1, MacroAssembler::TimesTwo, 0), SpecializedThunkJIT::regT0);
-}
-
-static void charToString(SpecializedThunkJIT& jit, JSGlobalData* globalData, MacroAssembler::RegisterID src, MacroAssembler::RegisterID dst, MacroAssembler::RegisterID scratch)
-{
- jit.appendFailure(jit.branch32(MacroAssembler::AboveOrEqual, src, MacroAssembler::Imm32(0x100)));
- jit.move(MacroAssembler::ImmPtr(globalData->smallStrings.singleCharacterStrings()), scratch);
- jit.loadPtr(MacroAssembler::BaseIndex(scratch, src, MacroAssembler::ScalePtr, 0), dst);
- jit.appendFailure(jit.branchTestPtr(MacroAssembler::Zero, dst));
-}
-
-MacroAssemblerCodePtr charCodeAtThunkGenerator(JSGlobalData* globalData, ExecutablePool* pool)
-{
- SpecializedThunkJIT jit(1, globalData, pool);
- stringCharLoad(jit);
- jit.returnInt32(SpecializedThunkJIT::regT0);
- return jit.finalize(globalData->jitStubs->ctiNativeCall());
-}
-
-MacroAssemblerCodePtr charAtThunkGenerator(JSGlobalData* globalData, ExecutablePool* pool)
-{
- SpecializedThunkJIT jit(1, globalData, pool);
- stringCharLoad(jit);
- charToString(jit, globalData, SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT1);
- jit.returnJSCell(SpecializedThunkJIT::regT0);
- return jit.finalize(globalData->jitStubs->ctiNativeCall());
-}
-
-MacroAssemblerCodePtr fromCharCodeThunkGenerator(JSGlobalData* globalData, ExecutablePool* pool)
-{
- SpecializedThunkJIT jit(1, globalData, pool);
- // load char code
- jit.loadInt32Argument(0, SpecializedThunkJIT::regT0);
- charToString(jit, globalData, SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT1);
- jit.returnJSCell(SpecializedThunkJIT::regT0);
- return jit.finalize(globalData->jitStubs->ctiNativeCall());
-}
-
-MacroAssemblerCodePtr sqrtThunkGenerator(JSGlobalData* globalData, ExecutablePool* pool)
-{
- SpecializedThunkJIT jit(1, globalData, pool);
- if (!jit.supportsFloatingPointSqrt())
- return globalData->jitStubs->ctiNativeCall();
-
- jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0);
- jit.sqrtDouble(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT0);
- jit.returnDouble(SpecializedThunkJIT::fpRegT0);
- return jit.finalize(globalData->jitStubs->ctiNativeCall());
-}
-
-static const double oneConstant = 1.0;
-static const double negativeHalfConstant = -0.5;
-
-MacroAssemblerCodePtr powThunkGenerator(JSGlobalData* globalData, ExecutablePool* pool)
-{
- SpecializedThunkJIT jit(2, globalData, pool);
- if (!jit.supportsFloatingPoint())
- return globalData->jitStubs->ctiNativeCall();
-
- jit.loadDouble(&oneConstant, SpecializedThunkJIT::fpRegT1);
- jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0);
- MacroAssembler::Jump nonIntExponent;
- jit.loadInt32Argument(1, SpecializedThunkJIT::regT0, nonIntExponent);
- jit.appendFailure(jit.branch32(MacroAssembler::LessThan, SpecializedThunkJIT::regT0, MacroAssembler::Imm32(0)));
-
- MacroAssembler::Jump exponentIsZero = jit.branchTest32(MacroAssembler::Zero, SpecializedThunkJIT::regT0);
- MacroAssembler::Label startLoop(jit.label());
-
- MacroAssembler::Jump exponentIsEven = jit.branchTest32(MacroAssembler::Zero, SpecializedThunkJIT::regT0, MacroAssembler::Imm32(1));
- jit.mulDouble(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT1);
- exponentIsEven.link(&jit);
- jit.mulDouble(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT0);
- jit.rshift32(MacroAssembler::Imm32(1), SpecializedThunkJIT::regT0);
- jit.branchTest32(MacroAssembler::NonZero, SpecializedThunkJIT::regT0).linkTo(startLoop, &jit);
-
- exponentIsZero.link(&jit);
-
- {
- SpecializedThunkJIT::JumpList doubleResult;
- jit.branchConvertDoubleToInt32(SpecializedThunkJIT::fpRegT1, SpecializedThunkJIT::regT0, doubleResult, SpecializedThunkJIT::fpRegT0);
- jit.returnInt32(SpecializedThunkJIT::regT0);
- doubleResult.link(&jit);
- jit.returnDouble(SpecializedThunkJIT::fpRegT1);
- }
-
- if (jit.supportsFloatingPointSqrt()) {
- nonIntExponent.link(&jit);
- jit.loadDouble(&negativeHalfConstant, SpecializedThunkJIT::fpRegT3);
- jit.loadDoubleArgument(1, SpecializedThunkJIT::fpRegT2, SpecializedThunkJIT::regT0);
- jit.appendFailure(jit.branchDouble(MacroAssembler::DoubleLessThanOrEqual, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT1));
- jit.appendFailure(jit.branchDouble(MacroAssembler::DoubleNotEqualOrUnordered, SpecializedThunkJIT::fpRegT2, SpecializedThunkJIT::fpRegT3));
- jit.sqrtDouble(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT0);
- jit.divDouble(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT1);
-
- SpecializedThunkJIT::JumpList doubleResult;
- jit.branchConvertDoubleToInt32(SpecializedThunkJIT::fpRegT1, SpecializedThunkJIT::regT0, doubleResult, SpecializedThunkJIT::fpRegT0);
- jit.returnInt32(SpecializedThunkJIT::regT0);
- doubleResult.link(&jit);
- jit.returnDouble(SpecializedThunkJIT::fpRegT1);
- } else
- jit.appendFailure(nonIntExponent);
-
- return jit.finalize(globalData->jitStubs->ctiNativeCall());
-}
-
-}
-
-#endif // ENABLE(JIT)
diff --git a/JavaScriptCore/jit/ThunkGenerators.h b/JavaScriptCore/jit/ThunkGenerators.h
deleted file mode 100644
index 15261f7..0000000
--- a/JavaScriptCore/jit/ThunkGenerators.h
+++ /dev/null
@@ -1,45 +0,0 @@
-/*
- * Copyright (C) 2010 Apple Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
- * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
- * THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef ThunkGenerators_h
-#define ThunkGenerators_h
-
-#if ENABLE(JIT)
-namespace JSC {
- class ExecutablePool;
- class JSGlobalData;
- class NativeExecutable;
- class MacroAssemblerCodePtr;
-
- typedef MacroAssemblerCodePtr (*ThunkGenerator)(JSGlobalData*, ExecutablePool*);
- MacroAssemblerCodePtr charCodeAtThunkGenerator(JSGlobalData*, ExecutablePool*);
- MacroAssemblerCodePtr charAtThunkGenerator(JSGlobalData*, ExecutablePool*);
- MacroAssemblerCodePtr fromCharCodeThunkGenerator(JSGlobalData*, ExecutablePool*);
- MacroAssemblerCodePtr sqrtThunkGenerator(JSGlobalData*, ExecutablePool*);
- MacroAssemblerCodePtr powThunkGenerator(JSGlobalData*, ExecutablePool*);
-}
-#endif
-
-#endif // ThunkGenerator_h