summaryrefslogtreecommitdiffstats
path: root/JavaScriptCore/jit
diff options
context:
space:
mode:
Diffstat (limited to 'JavaScriptCore/jit')
-rw-r--r--JavaScriptCore/jit/ExecutableAllocator.cpp85
-rw-r--r--JavaScriptCore/jit/ExecutableAllocator.h120
-rw-r--r--JavaScriptCore/jit/ExecutableAllocatorFixedVMPool.cpp196
-rw-r--r--JavaScriptCore/jit/ExecutableAllocatorPosix.cpp85
-rw-r--r--JavaScriptCore/jit/ExecutableAllocatorSymbian.cpp75
-rw-r--r--JavaScriptCore/jit/ExecutableAllocatorWin.cpp63
-rw-r--r--JavaScriptCore/jit/JIT.cpp188
-rw-r--r--JavaScriptCore/jit/JIT.h420
-rw-r--r--JavaScriptCore/jit/JITArithmetic.cpp2045
-rw-r--r--JavaScriptCore/jit/JITArithmetic32_64.cpp1424
-rw-r--r--JavaScriptCore/jit/JITCall.cpp563
-rw-r--r--JavaScriptCore/jit/JITCall32_64.cpp356
-rw-r--r--JavaScriptCore/jit/JITCode.h7
-rw-r--r--JavaScriptCore/jit/JITInlineMethods.h220
-rw-r--r--JavaScriptCore/jit/JITOpcodes.cpp2465
-rw-r--r--JavaScriptCore/jit/JITOpcodes32_64.cpp1836
-rw-r--r--JavaScriptCore/jit/JITPropertyAccess.cpp1286
-rw-r--r--JavaScriptCore/jit/JITPropertyAccess32_64.cpp1186
-rw-r--r--JavaScriptCore/jit/JITStubCall.h14
-rw-r--r--JavaScriptCore/jit/JITStubs.cpp1700
-rw-r--r--JavaScriptCore/jit/JITStubs.h162
-rw-r--r--JavaScriptCore/jit/JSInterfaceJIT.h292
-rw-r--r--JavaScriptCore/jit/SpecializedThunkJIT.h165
-rw-r--r--JavaScriptCore/jit/ThunkGenerators.cpp162
-rw-r--r--JavaScriptCore/jit/ThunkGenerators.h45
25 files changed, 8507 insertions, 6653 deletions
diff --git a/JavaScriptCore/jit/ExecutableAllocator.cpp b/JavaScriptCore/jit/ExecutableAllocator.cpp
index f6b27ec..8742eda 100644
--- a/JavaScriptCore/jit/ExecutableAllocator.cpp
+++ b/JavaScriptCore/jit/ExecutableAllocator.cpp
@@ -33,6 +33,91 @@ namespace JSC {
size_t ExecutableAllocator::pageSize = 0;
+#if ENABLE(EXECUTABLE_ALLOCATOR_DEMAND)
+
+void ExecutableAllocator::intializePageSize()
+{
+#if CPU(ARMV5_OR_LOWER)
+ // The moving memory model (as used in ARMv5 and earlier platforms)
+ // on Symbian OS limits the number of chunks for each process to 16.
+ // To mitigate this limitation increase the pagesize to allocate
+ // fewer, larger chunks. Set the page size to 256 Kb to compensate
+ // for moving memory model limitation
+ ExecutableAllocator::pageSize = 256 * 1024;
+#else
+ ExecutableAllocator::pageSize = PageAllocation::pageSize();
+#endif
+}
+
+ExecutablePool::Allocation ExecutablePool::systemAlloc(size_t size)
+{
+ PageAllocation allocation = PageAllocation::allocate(size, PageAllocation::JSJITCodePages, EXECUTABLE_POOL_WRITABLE, true);
+ if (!allocation)
+ CRASH();
+ return allocation;
+}
+
+void ExecutablePool::systemRelease(ExecutablePool::Allocation& allocation)
+{
+ allocation.deallocate();
+}
+
+bool ExecutableAllocator::isValid() const
+{
+ return true;
+}
+
+size_t ExecutableAllocator::committedByteCount()
+{
+ return 0;
+}
+
+#endif
+
+#if ENABLE(ASSEMBLER_WX_EXCLUSIVE)
+
+#if OS(WINDOWS) || OS(SYMBIAN)
+#error "ASSEMBLER_WX_EXCLUSIVE not yet suported on this platform."
+#endif
+
+void ExecutableAllocator::reprotectRegion(void* start, size_t size, ProtectionSeting setting)
+{
+ if (!pageSize)
+ intializePageSize();
+
+ // Calculate the start of the page containing this region,
+ // and account for this extra memory within size.
+ intptr_t startPtr = reinterpret_cast<intptr_t>(start);
+ intptr_t pageStartPtr = startPtr & ~(pageSize - 1);
+ void* pageStart = reinterpret_cast<void*>(pageStartPtr);
+ size += (startPtr - pageStartPtr);
+
+ // Round size up
+ size += (pageSize - 1);
+ size &= ~(pageSize - 1);
+
+ mprotect(pageStart, size, (setting == Writable) ? PROTECTION_FLAGS_RW : PROTECTION_FLAGS_RX);
+}
+
+#endif
+
+#if CPU(ARM_TRADITIONAL) && OS(LINUX) && COMPILER(RVCT)
+
+__asm void ExecutableAllocator::cacheFlush(void* code, size_t size)
+{
+ ARM
+ push {r7}
+ add r1, r1, r0
+ mov r7, #0xf0000
+ add r7, r7, #0x2
+ mov r2, #0x0
+ svc #0x0
+ pop {r7}
+ bx lr
+}
+
+#endif
+
}
#endif // HAVE(ASSEMBLER)
diff --git a/JavaScriptCore/jit/ExecutableAllocator.h b/JavaScriptCore/jit/ExecutableAllocator.h
index 1fb8ff7..f362605 100644
--- a/JavaScriptCore/jit/ExecutableAllocator.h
+++ b/JavaScriptCore/jit/ExecutableAllocator.h
@@ -25,16 +25,16 @@
#ifndef ExecutableAllocator_h
#define ExecutableAllocator_h
-
#include <stddef.h> // for ptrdiff_t
#include <limits>
#include <wtf/Assertions.h>
+#include <wtf/PageAllocation.h>
#include <wtf/PassRefPtr.h>
#include <wtf/RefCounted.h>
#include <wtf/UnusedParam.h>
#include <wtf/Vector.h>
-#if OS(IPHONE_OS)
+#if OS(IOS)
#include <libkern/OSCacheControl.h>
#include <sys/mman.h>
#endif
@@ -43,21 +43,31 @@
#include <e32std.h>
#endif
+#if CPU(MIPS) && OS(LINUX)
+#include <sys/cachectl.h>
+#endif
+
#if OS(WINCE)
// From pkfuncs.h (private header file from the Platform Builder)
#define CACHE_SYNC_ALL 0x07F
extern "C" __declspec(dllimport) void CacheRangeFlush(LPVOID pAddr, DWORD dwLength, DWORD dwFlags);
#endif
+#if PLATFORM(BREWMP)
+#include <AEEIMemCache1.h>
+#include <AEEMemCache1.bid>
+#include <wtf/brew/RefPtrBrew.h>
+#endif
+
#define JIT_ALLOCATOR_PAGE_SIZE (ExecutableAllocator::pageSize)
#define JIT_ALLOCATOR_LARGE_ALLOC_SIZE (ExecutableAllocator::pageSize * 4)
#if ENABLE(ASSEMBLER_WX_EXCLUSIVE)
#define PROTECTION_FLAGS_RW (PROT_READ | PROT_WRITE)
#define PROTECTION_FLAGS_RX (PROT_READ | PROT_EXEC)
-#define INITIAL_PROTECTION_FLAGS PROTECTION_FLAGS_RX
+#define EXECUTABLE_POOL_WRITABLE false
#else
-#define INITIAL_PROTECTION_FLAGS (PROT_READ | PROT_WRITE | PROT_EXEC)
+#define EXECUTABLE_POOL_WRITABLE true
#endif
namespace JSC {
@@ -76,22 +86,33 @@ inline size_t roundUpAllocationSize(size_t request, size_t granularity)
}
-#if ENABLE(ASSEMBLER)
+#if ENABLE(JIT) && ENABLE(ASSEMBLER)
namespace JSC {
class ExecutablePool : public RefCounted<ExecutablePool> {
-private:
- struct Allocation {
- char* pages;
- size_t size;
-#if OS(SYMBIAN)
- RChunk* chunk;
-#endif
+public:
+#if ENABLE(EXECUTABLE_ALLOCATOR_DEMAND)
+ typedef PageAllocation Allocation;
+#else
+ class Allocation {
+ public:
+ Allocation(void* base, size_t size)
+ : m_base(base)
+ , m_size(size)
+ {
+ }
+ void* base() { return m_base; }
+ size_t size() { return m_size; }
+ bool operator!() const { return !m_base; }
+
+ private:
+ void* m_base;
+ size_t m_size;
};
+#endif
typedef Vector<Allocation, 2> AllocationList;
-public:
static PassRefPtr<ExecutablePool> create(size_t n)
{
return adoptRef(new ExecutablePool(n));
@@ -116,10 +137,15 @@ public:
return poolAllocate(n);
}
+ void returnLastBytes(size_t count)
+ {
+ m_freePtr -= count;
+ }
+
~ExecutablePool()
{
- AllocationList::const_iterator end = m_pools.end();
- for (AllocationList::const_iterator ptr = m_pools.begin(); ptr != end; ++ptr)
+ AllocationList::iterator end = m_pools.end();
+ for (AllocationList::iterator ptr = m_pools.begin(); ptr != end; ++ptr)
ExecutablePool::systemRelease(*ptr);
}
@@ -127,7 +153,7 @@ public:
private:
static Allocation systemAlloc(size_t n);
- static void systemRelease(const Allocation& alloc);
+ static void systemRelease(Allocation& alloc);
ExecutablePool(size_t n);
@@ -147,12 +173,20 @@ public:
{
if (!pageSize)
intializePageSize();
- m_smallAllocationPool = ExecutablePool::create(JIT_ALLOCATOR_LARGE_ALLOC_SIZE);
+ if (isValid())
+ m_smallAllocationPool = ExecutablePool::create(JIT_ALLOCATOR_LARGE_ALLOC_SIZE);
+#if !ENABLE(INTERPRETER)
+ else
+ CRASH();
+#endif
}
+ bool isValid() const;
+
PassRefPtr<ExecutablePool> poolForSize(size_t n)
{
// Try to fit in the existing small allocator
+ ASSERT(m_smallAllocationPool);
if (n < m_smallAllocationPool->available())
return m_smallAllocationPool;
@@ -190,7 +224,33 @@ public:
static void cacheFlush(void*, size_t)
{
}
-#elif CPU(ARM_THUMB2) && OS(IPHONE_OS)
+#elif CPU(MIPS)
+ static void cacheFlush(void* code, size_t size)
+ {
+#if COMPILER(GCC) && GCC_VERSION_AT_LEAST(4,3,0)
+#if WTF_MIPS_ISA_REV(2) && !GCC_VERSION_AT_LEAST(4,4,3)
+ int lineSize;
+ asm("rdhwr %0, $1" : "=r" (lineSize));
+ //
+ // Modify "start" and "end" to avoid GCC 4.3.0-4.4.2 bug in
+ // mips_expand_synci_loop that may execute synci one more time.
+ // "start" points to the fisrt byte of the cache line.
+ // "end" points to the last byte of the line before the last cache line.
+ // Because size is always a multiple of 4, this is safe to set
+ // "end" to the last byte.
+ //
+ intptr_t start = reinterpret_cast<intptr_t>(code) & (-lineSize);
+ intptr_t end = ((reinterpret_cast<intptr_t>(code) + size - 1) & (-lineSize)) - 1;
+ __builtin___clear_cache(reinterpret_cast<char*>(start), reinterpret_cast<char*>(end));
+#else
+ intptr_t end = reinterpret_cast<intptr_t>(code) + size;
+ __builtin___clear_cache(reinterpret_cast<char*>(code), reinterpret_cast<char*>(end));
+#endif
+#else
+ _flush_cache(reinterpret_cast<char*>(code), size, BCACHE);
+#endif
+ }
+#elif CPU(ARM_THUMB2) && OS(IOS)
static void cacheFlush(void* code, size_t size)
{
sys_dcache_flush(code, size);
@@ -217,7 +277,9 @@ public:
{
User::IMB_Range(code, static_cast<char*>(code) + size);
}
-#elif CPU(ARM_TRADITIONAL) && OS(LINUX)
+#elif CPU(ARM_TRADITIONAL) && OS(LINUX) && COMPILER(RVCT)
+ static __asm void cacheFlush(void* code, size_t size);
+#elif CPU(ARM_TRADITIONAL) && OS(LINUX) && COMPILER(GCC)
static void cacheFlush(void* code, size_t size)
{
asm volatile (
@@ -238,9 +300,17 @@ public:
{
CacheRangeFlush(code, size, CACHE_SYNC_ALL);
}
+#elif PLATFORM(BREWMP)
+ static void cacheFlush(void* code, size_t size)
+ {
+ PlatformRefPtr<IMemCache1> memCache = createRefPtrInstance<IMemCache1>(AEECLSID_MemCache1);
+ IMemCache1_ClearCache(memCache.get(), reinterpret_cast<uint32>(code), size, MEMSPACE_CACHE_FLUSH, MEMSPACE_DATACACHE);
+ IMemCache1_ClearCache(memCache.get(), reinterpret_cast<uint32>(code), size, MEMSPACE_CACHE_INVALIDATE, MEMSPACE_INSTCACHE);
+ }
#else
#error "The cacheFlush support is missing on this platform."
#endif
+ static size_t committedByteCount();
private:
@@ -257,7 +327,7 @@ inline ExecutablePool::ExecutablePool(size_t n)
size_t allocSize = roundUpAllocationSize(n, JIT_ALLOCATOR_PAGE_SIZE);
Allocation mem = systemAlloc(allocSize);
m_pools.append(mem);
- m_freePtr = mem.pages;
+ m_freePtr = static_cast<char*>(mem.base());
if (!m_freePtr)
CRASH(); // Failed to allocate
m_end = m_freePtr + allocSize;
@@ -268,22 +338,22 @@ inline void* ExecutablePool::poolAllocate(size_t n)
size_t allocSize = roundUpAllocationSize(n, JIT_ALLOCATOR_PAGE_SIZE);
Allocation result = systemAlloc(allocSize);
- if (!result.pages)
+ if (!result.base())
CRASH(); // Failed to allocate
ASSERT(m_end >= m_freePtr);
if ((allocSize - n) > static_cast<size_t>(m_end - m_freePtr)) {
// Replace allocation pool
- m_freePtr = result.pages + n;
- m_end = result.pages + allocSize;
+ m_freePtr = static_cast<char*>(result.base()) + n;
+ m_end = static_cast<char*>(result.base()) + allocSize;
}
m_pools.append(result);
- return result.pages;
+ return result.base();
}
}
-#endif // ENABLE(ASSEMBLER)
+#endif // ENABLE(JIT) && ENABLE(ASSEMBLER)
#endif // !defined(ExecutableAllocator)
diff --git a/JavaScriptCore/jit/ExecutableAllocatorFixedVMPool.cpp b/JavaScriptCore/jit/ExecutableAllocatorFixedVMPool.cpp
index dd1db4e..15247c2 100644
--- a/JavaScriptCore/jit/ExecutableAllocatorFixedVMPool.cpp
+++ b/JavaScriptCore/jit/ExecutableAllocatorFixedVMPool.cpp
@@ -27,24 +27,36 @@
#include "ExecutableAllocator.h"
-#include <errno.h>
+#if ENABLE(EXECUTABLE_ALLOCATOR_FIXED)
-#if ENABLE(ASSEMBLER) && OS(DARWIN) && CPU(X86_64)
+#include <errno.h>
#include "TCSpinLock.h"
-#include <mach/mach_init.h>
-#include <mach/vm_map.h>
#include <sys/mman.h>
#include <unistd.h>
#include <wtf/AVLTree.h>
+#include <wtf/PageReservation.h>
#include <wtf/VMTags.h>
+#if CPU(X86_64)
+ // These limits suitable on 64-bit platforms (particularly x86-64, where we require all jumps to have a 2Gb max range).
+ #define VM_POOL_SIZE (2u * 1024u * 1024u * 1024u) // 2Gb
+ #define COALESCE_LIMIT (16u * 1024u * 1024u) // 16Mb
+#else
+ // These limits are hopefully sensible on embedded platforms.
+ #define VM_POOL_SIZE (32u * 1024u * 1024u) // 32Mb
+ #define COALESCE_LIMIT (4u * 1024u * 1024u) // 4Mb
+#endif
+
+// ASLR currently only works on darwin (due to arc4random) & 64-bit (due to address space size).
+#define VM_POOL_ASLR (OS(DARWIN) && CPU(X86_64))
+
using namespace WTF;
namespace JSC {
-
-#define TWO_GB (2u * 1024u * 1024u * 1024u)
-#define SIXTEEN_MB (16u * 1024u * 1024u)
+
+static size_t committedBytesCount = 0;
+static SpinLock spinlock = SPINLOCK_INITIALIZER;
// FreeListEntry describes a free chunk of memory, stored in the freeList.
struct FreeListEntry {
@@ -116,32 +128,21 @@ class FixedVMPoolAllocator
// The free list is stored in a sorted tree.
typedef AVLTree<AVLTreeAbstractorForFreeList, 40> SizeSortedFreeTree;
- // Use madvise as apropriate to prevent freed pages from being spilled,
- // and to attempt to ensure that used memory is reported correctly.
-#if HAVE(MADV_FREE_REUSE)
void release(void* position, size_t size)
{
- while (madvise(position, size, MADV_FREE_REUSABLE) == -1 && errno == EAGAIN) { }
+ m_allocation.decommit(position, size);
+ addToCommittedByteCount(-static_cast<long>(size));
}
void reuse(void* position, size_t size)
{
- while (madvise(position, size, MADV_FREE_REUSE) == -1 && errno == EAGAIN) { }
- }
-#elif HAVE(MADV_DONTNEED)
- void release(void* position, size_t size)
- {
- while (madvise(position, size, MADV_DONTNEED) == -1 && errno == EAGAIN) { }
+ bool okay = m_allocation.commit(position, size);
+ ASSERT_UNUSED(okay, okay);
+ addToCommittedByteCount(static_cast<long>(size));
}
- void reuse(void*, size_t) {}
-#else
- void release(void*, size_t) {}
- void reuse(void*, size_t) {}
-#endif
-
// All addition to the free list should go through this method, rather than
- // calling insert directly, to avoid multiple entries beging added with the
+ // calling insert directly, to avoid multiple entries being added with the
// same key. All nodes being added should be singletons, they should not
// already be a part of a chain.
void addToFreeList(FreeListEntry* entry)
@@ -160,7 +161,7 @@ class FixedVMPoolAllocator
}
// We do not attempt to coalesce addition, which may lead to fragmentation;
- // instead we periodically perform a sweep to try to coalesce neigboring
+ // instead we periodically perform a sweep to try to coalesce neighboring
// entries in m_freeList. Presently this is triggered at the point 16MB
// of memory has been released.
void coalesceFreeSpace()
@@ -173,7 +174,7 @@ class FixedVMPoolAllocator
for (FreeListEntry* entry; (entry = *iter); ++iter) {
// Each entry in m_freeList might correspond to multiple
// free chunks of memory (of the same size). Walk the chain
- // (this is likely of couse only be one entry long!) adding
+ // (this is likely of course only be one entry long!) adding
// each entry to the Vector (at reseting the next in chain
// pointer to separate each node out).
FreeListEntry* next;
@@ -280,7 +281,6 @@ public:
FixedVMPoolAllocator(size_t commonSize, size_t totalHeapSize)
: m_commonSize(commonSize)
, m_countFreedSinceLastCoalesce(0)
- , m_totalHeapSize(totalHeapSize)
{
// Cook up an address to allocate at, using the following recipe:
// 17 bits of zero, stay in userspace kids.
@@ -289,26 +289,70 @@ public:
//
// But! - as a temporary workaround for some plugin problems (rdar://problem/6812854),
// for now instead of 2^26 bits of ASLR lets stick with 25 bits of randomization plus
- // 2^24, which should put up somewhere in the middle of usespace (in the address range
+ // 2^24, which should put up somewhere in the middle of userspace (in the address range
// 0x200000000000 .. 0x5fffffffffff).
- intptr_t randomLocation = arc4random() & ((1 << 25) - 1);
+#if VM_POOL_ASLR
+ intptr_t randomLocation = 0;
+ randomLocation = arc4random() & ((1 << 25) - 1);
randomLocation += (1 << 24);
randomLocation <<= 21;
- m_base = mmap(reinterpret_cast<void*>(randomLocation), m_totalHeapSize, INITIAL_PROTECTION_FLAGS, MAP_PRIVATE | MAP_ANON, VM_TAG_FOR_EXECUTABLEALLOCATOR_MEMORY, 0);
- if (!m_base)
+ m_allocation = PageReservation::reserveAt(reinterpret_cast<void*>(randomLocation), false, totalHeapSize, PageAllocation::JSJITCodePages, EXECUTABLE_POOL_WRITABLE, true);
+#else
+ m_allocation = PageReservation::reserve(totalHeapSize, PageAllocation::JSJITCodePages, EXECUTABLE_POOL_WRITABLE, true);
+#endif
+
+ if (!!m_allocation)
+ m_freeList.insert(new FreeListEntry(m_allocation.base(), m_allocation.size()));
+#if !ENABLE(INTERPRETER)
+ else
CRASH();
+#endif
+ }
- // For simplicity, we keep all memory in m_freeList in a 'released' state.
- // This means that we can simply reuse all memory when allocating, without
- // worrying about it's previous state, and also makes coalescing m_freeList
- // simpler since we need not worry about the possibility of coalescing released
- // chunks with non-released ones.
- release(m_base, m_totalHeapSize);
- m_freeList.insert(new FreeListEntry(m_base, m_totalHeapSize));
+ ExecutablePool::Allocation alloc(size_t size)
+ {
+ return ExecutablePool::Allocation(allocInternal(size), size);
+ }
+
+ void free(ExecutablePool::Allocation allocation)
+ {
+ void* pointer = allocation.base();
+ size_t size = allocation.size();
+
+ ASSERT(!!m_allocation);
+ // Call release to report to the operating system that this
+ // memory is no longer in use, and need not be paged out.
+ ASSERT(isWithinVMPool(pointer, size));
+ release(pointer, size);
+
+ // Common-sized allocations are stored in the m_commonSizedAllocations
+ // vector; all other freed chunks are added to m_freeList.
+ if (size == m_commonSize)
+ m_commonSizedAllocations.append(pointer);
+ else
+ addToFreeList(new FreeListEntry(pointer, size));
+
+ // Do some housekeeping. Every time we reach a point that
+ // 16MB of allocations have been freed, sweep m_freeList
+ // coalescing any neighboring fragments.
+ m_countFreedSinceLastCoalesce += size;
+ if (m_countFreedSinceLastCoalesce >= COALESCE_LIMIT) {
+ m_countFreedSinceLastCoalesce = 0;
+ coalesceFreeSpace();
+ }
}
- void* alloc(size_t size)
+ bool isValid() const { return !!m_allocation; }
+
+private:
+ void* allocInternal(size_t size)
{
+#if ENABLE(INTERPRETER)
+ if (!m_allocation)
+ return 0;
+#else
+ ASSERT(!!m_allocation);
+#endif
void* result;
// Freed allocations of the common size are not stored back into the main
@@ -318,12 +362,12 @@ public:
result = m_commonSizedAllocations.last();
m_commonSizedAllocations.removeLast();
} else {
- // Serach m_freeList for a suitable sized chunk to allocate memory from.
+ // Search m_freeList for a suitable sized chunk to allocate memory from.
FreeListEntry* entry = m_freeList.search(size, m_freeList.GREATER_EQUAL);
// This would be bad news.
if (!entry) {
- // Errk! Lets take a last-ditch desparation attempt at defragmentation...
+ // Errk! Lets take a last-ditch desperation attempt at defragmentation...
coalesceFreeSpace();
// Did that free up a large enough chunk?
entry = m_freeList.search(size, m_freeList.GREATER_EQUAL);
@@ -369,39 +413,20 @@ public:
return result;
}
- void free(void* pointer, size_t size)
- {
- // Call release to report to the operating system that this
- // memory is no longer in use, and need not be paged out.
- ASSERT(isWithinVMPool(pointer, size));
- release(pointer, size);
-
- // Common-sized allocations are stored in the m_commonSizedAllocations
- // vector; all other freed chunks are added to m_freeList.
- if (size == m_commonSize)
- m_commonSizedAllocations.append(pointer);
- else
- addToFreeList(new FreeListEntry(pointer, size));
-
- // Do some housekeeping. Every time we reach a point that
- // 16MB of allocations have been freed, sweep m_freeList
- // coalescing any neighboring fragments.
- m_countFreedSinceLastCoalesce += size;
- if (m_countFreedSinceLastCoalesce >= SIXTEEN_MB) {
- m_countFreedSinceLastCoalesce = 0;
- coalesceFreeSpace();
- }
- }
-
-private:
-
#ifndef NDEBUG
bool isWithinVMPool(void* pointer, size_t size)
{
- return pointer >= m_base && (reinterpret_cast<char*>(pointer) + size <= reinterpret_cast<char*>(m_base) + m_totalHeapSize);
+ return pointer >= m_allocation.base() && (reinterpret_cast<char*>(pointer) + size <= reinterpret_cast<char*>(m_allocation.base()) + m_allocation.size());
}
#endif
+ void addToCommittedByteCount(long byteCount)
+ {
+ ASSERT(spinlock.IsHeld());
+ ASSERT(static_cast<long>(committedBytesCount) + byteCount > -1);
+ committedBytesCount += byteCount;
+ }
+
// Freed space from the most common sized allocations will be held in this list, ...
const size_t m_commonSize;
Vector<void*> m_commonSizedAllocations;
@@ -412,36 +437,45 @@ private:
// This is used for housekeeping, to trigger defragmentation of the freed lists.
size_t m_countFreedSinceLastCoalesce;
- void* m_base;
- size_t m_totalHeapSize;
+ PageReservation m_allocation;
};
+size_t ExecutableAllocator::committedByteCount()
+{
+ SpinLockHolder lockHolder(&spinlock);
+ return committedBytesCount;
+}
+
void ExecutableAllocator::intializePageSize()
{
ExecutableAllocator::pageSize = getpagesize();
}
static FixedVMPoolAllocator* allocator = 0;
-static SpinLock spinlock = SPINLOCK_INITIALIZER;
-
-ExecutablePool::Allocation ExecutablePool::systemAlloc(size_t size)
+
+bool ExecutableAllocator::isValid() const
{
- SpinLockHolder lock_holder(&spinlock);
-
+ SpinLockHolder lock_holder(&spinlock);
if (!allocator)
- allocator = new FixedVMPoolAllocator(JIT_ALLOCATOR_LARGE_ALLOC_SIZE, TWO_GB);
- ExecutablePool::Allocation alloc = {reinterpret_cast<char*>(allocator->alloc(size)), size};
- return alloc;
+ allocator = new FixedVMPoolAllocator(JIT_ALLOCATOR_LARGE_ALLOC_SIZE, VM_POOL_SIZE);
+ return allocator->isValid();
}
-void ExecutablePool::systemRelease(const ExecutablePool::Allocation& allocation)
+ExecutablePool::Allocation ExecutablePool::systemAlloc(size_t size)
{
- SpinLockHolder lock_holder(&spinlock);
+ SpinLockHolder lock_holder(&spinlock);
+ ASSERT(allocator);
+ return allocator->alloc(size);
+}
+void ExecutablePool::systemRelease(ExecutablePool::Allocation& allocation)
+{
+ SpinLockHolder lock_holder(&spinlock);
ASSERT(allocator);
- allocator->free(allocation.pages, allocation.size);
+ allocator->free(allocation);
}
}
+
#endif // HAVE(ASSEMBLER)
diff --git a/JavaScriptCore/jit/ExecutableAllocatorPosix.cpp b/JavaScriptCore/jit/ExecutableAllocatorPosix.cpp
deleted file mode 100644
index 06375ad..0000000
--- a/JavaScriptCore/jit/ExecutableAllocatorPosix.cpp
+++ /dev/null
@@ -1,85 +0,0 @@
-/*
- * Copyright (C) 2008 Apple Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
- * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "config.h"
-
-#include "ExecutableAllocator.h"
-
-#if ENABLE(ASSEMBLER) && OS(UNIX) && !OS(SYMBIAN)
-
-#include <sys/mman.h>
-#include <unistd.h>
-#include <wtf/VMTags.h>
-
-namespace JSC {
-
-#if !(OS(DARWIN) && CPU(X86_64))
-
-void ExecutableAllocator::intializePageSize()
-{
- ExecutableAllocator::pageSize = getpagesize();
-}
-
-ExecutablePool::Allocation ExecutablePool::systemAlloc(size_t n)
-{
- void* allocation = mmap(NULL, n, INITIAL_PROTECTION_FLAGS, MAP_PRIVATE | MAP_ANON, VM_TAG_FOR_EXECUTABLEALLOCATOR_MEMORY, 0);
- if (allocation == MAP_FAILED)
- CRASH();
- ExecutablePool::Allocation alloc = { reinterpret_cast<char*>(allocation), n };
- return alloc;
-}
-
-void ExecutablePool::systemRelease(const ExecutablePool::Allocation& alloc)
-{
- int result = munmap(alloc.pages, alloc.size);
- ASSERT_UNUSED(result, !result);
-}
-
-#endif // !(OS(DARWIN) && CPU(X86_64))
-
-#if ENABLE(ASSEMBLER_WX_EXCLUSIVE)
-void ExecutableAllocator::reprotectRegion(void* start, size_t size, ProtectionSeting setting)
-{
- if (!pageSize)
- intializePageSize();
-
- // Calculate the start of the page containing this region,
- // and account for this extra memory within size.
- intptr_t startPtr = reinterpret_cast<intptr_t>(start);
- intptr_t pageStartPtr = startPtr & ~(pageSize - 1);
- void* pageStart = reinterpret_cast<void*>(pageStartPtr);
- size += (startPtr - pageStartPtr);
-
- // Round size up
- size += (pageSize - 1);
- size &= ~(pageSize - 1);
-
- mprotect(pageStart, size, (setting == Writable) ? PROTECTION_FLAGS_RW : PROTECTION_FLAGS_RX);
-}
-#endif
-
-}
-
-#endif // HAVE(ASSEMBLER)
diff --git a/JavaScriptCore/jit/ExecutableAllocatorSymbian.cpp b/JavaScriptCore/jit/ExecutableAllocatorSymbian.cpp
deleted file mode 100644
index e82975c..0000000
--- a/JavaScriptCore/jit/ExecutableAllocatorSymbian.cpp
+++ /dev/null
@@ -1,75 +0,0 @@
-/*
- * Copyright (C) 2009 Nokia Corporation and/or its subsidiary(-ies)
- *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2 of the License, or (at your option) any later version.
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with this library; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor,
- * Boston, MA 02110-1301 USA
- *
- */
-
-#include "config.h"
-
-#include "ExecutableAllocator.h"
-
-#if ENABLE(ASSEMBLER) && OS(SYMBIAN)
-
-#include <e32hal.h>
-#include <e32std.h>
-
-// Set the page size to 256 Kb to compensate for moving memory model limitation
-const size_t MOVING_MEM_PAGE_SIZE = 256 * 1024;
-
-namespace JSC {
-
-void ExecutableAllocator::intializePageSize()
-{
-#if CPU(ARMV5_OR_LOWER)
- // The moving memory model (as used in ARMv5 and earlier platforms)
- // on Symbian OS limits the number of chunks for each process to 16.
- // To mitigate this limitation increase the pagesize to
- // allocate less of larger chunks.
- ExecutableAllocator::pageSize = MOVING_MEM_PAGE_SIZE;
-#else
- TInt page_size;
- UserHal::PageSizeInBytes(page_size);
- ExecutableAllocator::pageSize = page_size;
-#endif
-}
-
-ExecutablePool::Allocation ExecutablePool::systemAlloc(size_t n)
-{
- RChunk* codeChunk = new RChunk();
-
- TInt errorCode = codeChunk->CreateLocalCode(n, n);
-
- char* allocation = reinterpret_cast<char*>(codeChunk->Base());
- if (!allocation)
- CRASH();
- ExecutablePool::Allocation alloc = { allocation, n, codeChunk };
- return alloc;
-}
-
-void ExecutablePool::systemRelease(const ExecutablePool::Allocation& alloc)
-{
- alloc.chunk->Close();
- delete alloc.chunk;
-}
-
-#if ENABLE(ASSEMBLER_WX_EXCLUSIVE)
-#error "ASSEMBLER_WX_EXCLUSIVE not yet suported on this platform."
-#endif
-
-}
-
-#endif // HAVE(ASSEMBLER)
diff --git a/JavaScriptCore/jit/ExecutableAllocatorWin.cpp b/JavaScriptCore/jit/ExecutableAllocatorWin.cpp
deleted file mode 100644
index e38323c..0000000
--- a/JavaScriptCore/jit/ExecutableAllocatorWin.cpp
+++ /dev/null
@@ -1,63 +0,0 @@
-/*
- * Copyright (C) 2008 Apple Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
- * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "config.h"
-
-#include "ExecutableAllocator.h"
-
-#if ENABLE(ASSEMBLER) && OS(WINDOWS)
-
-#include "windows.h"
-
-namespace JSC {
-
-void ExecutableAllocator::intializePageSize()
-{
- SYSTEM_INFO system_info;
- GetSystemInfo(&system_info);
- ExecutableAllocator::pageSize = system_info.dwPageSize;
-}
-
-ExecutablePool::Allocation ExecutablePool::systemAlloc(size_t n)
-{
- void* allocation = VirtualAlloc(0, n, MEM_COMMIT | MEM_RESERVE, PAGE_EXECUTE_READWRITE);
- if (!allocation)
- CRASH();
- ExecutablePool::Allocation alloc = {reinterpret_cast<char*>(allocation), n};
- return alloc;
-}
-
-void ExecutablePool::systemRelease(const ExecutablePool::Allocation& alloc)
-{
- VirtualFree(alloc.pages, 0, MEM_RELEASE);
-}
-
-#if ENABLE(ASSEMBLER_WX_EXCLUSIVE)
-#error "ASSEMBLER_WX_EXCLUSIVE not yet suported on this platform."
-#endif
-
-}
-
-#endif // HAVE(ASSEMBLER)
diff --git a/JavaScriptCore/jit/JIT.cpp b/JavaScriptCore/jit/JIT.cpp
index c0da66d..01401a7 100644
--- a/JavaScriptCore/jit/JIT.cpp
+++ b/JavaScriptCore/jit/JIT.cpp
@@ -24,6 +24,8 @@
*/
#include "config.h"
+
+#if ENABLE(JIT)
#include "JIT.h"
// This probably does not belong here; adding here for now as a quick Windows build fix.
@@ -32,8 +34,6 @@
JSC::MacroAssemblerX86Common::SSE2CheckState JSC::MacroAssemblerX86Common::s_sse2CheckState = NotCheckedSSE2;
#endif
-#if ENABLE(JIT)
-
#include "CodeBlock.h"
#include "Interpreter.h"
#include "JITInlineMethods.h"
@@ -71,17 +71,17 @@ void ctiPatchCallByReturnAddress(CodeBlock* codeblock, ReturnAddressPtr returnAd
repatchBuffer.relinkCallerToFunction(returnAddress, newCalleeFunction);
}
-JIT::JIT(JSGlobalData* globalData, CodeBlock* codeBlock)
+JIT::JIT(JSGlobalData* globalData, CodeBlock* codeBlock, void* linkerOffset)
: m_interpreter(globalData->interpreter)
, m_globalData(globalData)
, m_codeBlock(codeBlock)
, m_labels(codeBlock ? codeBlock->instructions().size() : 0)
, m_propertyAccessCompilationInfo(codeBlock ? codeBlock->numberOfStructureStubInfos() : 0)
, m_callStructureStubCompilationInfo(codeBlock ? codeBlock->numberOfCallLinkInfos() : 0)
- , m_bytecodeIndex((unsigned)-1)
+ , m_bytecodeOffset((unsigned)-1)
#if USE(JSVALUE32_64)
, m_jumpTargetIndex(0)
- , m_mappedBytecodeIndex((unsigned)-1)
+ , m_mappedBytecodeOffset((unsigned)-1)
, m_mappedVirtualRegisterIndex((unsigned)-1)
, m_mappedTag((RegisterID)-1)
, m_mappedPayload((RegisterID)-1)
@@ -89,6 +89,7 @@ JIT::JIT(JSGlobalData* globalData, CodeBlock* codeBlock)
, m_lastResultBytecodeRegister(std::numeric_limits<int>::max())
, m_jumpTargetsPosition(0)
#endif
+ , m_linkerOffset(linkerOffset)
{
}
@@ -114,7 +115,7 @@ void JIT::emitTimeoutCheck()
#endif
#define NEXT_OPCODE(name) \
- m_bytecodeIndex += OPCODE_LENGTH(name); \
+ m_bytecodeOffset += OPCODE_LENGTH(name); \
break;
#if USE(JSVALUE32_64)
@@ -176,38 +177,34 @@ void JIT::privateCompileMainPass()
m_globalResolveInfoIndex = 0;
m_callLinkInfoIndex = 0;
- for (m_bytecodeIndex = 0; m_bytecodeIndex < instructionCount; ) {
- Instruction* currentInstruction = instructionsBegin + m_bytecodeIndex;
- ASSERT_WITH_MESSAGE(m_interpreter->isOpcode(currentInstruction->u.opcode), "privateCompileMainPass gone bad @ %d", m_bytecodeIndex);
+ for (m_bytecodeOffset = 0; m_bytecodeOffset < instructionCount; ) {
+ Instruction* currentInstruction = instructionsBegin + m_bytecodeOffset;
+ ASSERT_WITH_MESSAGE(m_interpreter->isOpcode(currentInstruction->u.opcode), "privateCompileMainPass gone bad @ %d", m_bytecodeOffset);
#if ENABLE(OPCODE_SAMPLING)
- if (m_bytecodeIndex > 0) // Avoid the overhead of sampling op_enter twice.
+ if (m_bytecodeOffset > 0) // Avoid the overhead of sampling op_enter twice.
sampleInstruction(currentInstruction);
#endif
-#if !USE(JSVALUE32_64)
- if (m_labels[m_bytecodeIndex].isUsed())
+#if USE(JSVALUE64)
+ if (m_labels[m_bytecodeOffset].isUsed())
killLastResultRegister();
#endif
- m_labels[m_bytecodeIndex] = label();
+ m_labels[m_bytecodeOffset] = label();
switch (m_interpreter->getOpcodeID(currentInstruction->u.opcode)) {
DEFINE_BINARY_OP(op_del_by_val)
-#if USE(JSVALUE32)
- DEFINE_BINARY_OP(op_div)
-#endif
DEFINE_BINARY_OP(op_in)
DEFINE_BINARY_OP(op_less)
DEFINE_BINARY_OP(op_lesseq)
- DEFINE_BINARY_OP(op_urshift)
DEFINE_UNARY_OP(op_is_boolean)
DEFINE_UNARY_OP(op_is_function)
DEFINE_UNARY_OP(op_is_number)
DEFINE_UNARY_OP(op_is_object)
DEFINE_UNARY_OP(op_is_string)
DEFINE_UNARY_OP(op_is_undefined)
-#if !USE(JSVALUE32_64)
+#if USE(JSVALUE64)
DEFINE_UNARY_OP(op_negate)
#endif
DEFINE_UNARY_OP(op_typeof)
@@ -222,26 +219,29 @@ void JIT::privateCompileMainPass()
DEFINE_OP(op_call_varargs)
DEFINE_OP(op_catch)
DEFINE_OP(op_construct)
- DEFINE_OP(op_construct_verify)
+ DEFINE_OP(op_get_callee)
+ DEFINE_OP(op_create_this)
DEFINE_OP(op_convert_this)
- DEFINE_OP(op_init_arguments)
+ DEFINE_OP(op_convert_this_strict)
+ DEFINE_OP(op_init_lazy_reg)
DEFINE_OP(op_create_arguments)
DEFINE_OP(op_debug)
DEFINE_OP(op_del_by_id)
-#if !USE(JSVALUE32)
DEFINE_OP(op_div)
-#endif
DEFINE_OP(op_end)
DEFINE_OP(op_enter)
- DEFINE_OP(op_enter_with_activation)
+ DEFINE_OP(op_create_activation)
DEFINE_OP(op_eq)
DEFINE_OP(op_eq_null)
DEFINE_OP(op_get_by_id)
+ DEFINE_OP(op_get_arguments_length)
DEFINE_OP(op_get_by_val)
+ DEFINE_OP(op_get_argument_by_val)
DEFINE_OP(op_get_by_pname)
DEFINE_OP(op_get_global_var)
DEFINE_OP(op_get_pnames)
DEFINE_OP(op_get_scoped_var)
+ DEFINE_OP(op_check_has_instance)
DEFINE_OP(op_instanceof)
DEFINE_OP(op_jeq_null)
DEFINE_OP(op_jfalse)
@@ -251,6 +251,7 @@ void JIT::privateCompileMainPass()
DEFINE_OP(op_jneq_ptr)
DEFINE_OP(op_jnless)
DEFINE_OP(op_jless)
+ DEFINE_OP(op_jlesseq)
DEFINE_OP(op_jnlesseq)
DEFINE_OP(op_jsr)
DEFINE_OP(op_jtrue)
@@ -271,7 +272,6 @@ void JIT::privateCompileMainPass()
DEFINE_OP(op_neq)
DEFINE_OP(op_neq_null)
DEFINE_OP(op_new_array)
- DEFINE_OP(op_new_error)
DEFINE_OP(op_new_func)
DEFINE_OP(op_new_func_exp)
DEFINE_OP(op_new_object)
@@ -297,11 +297,16 @@ void JIT::privateCompileMainPass()
DEFINE_OP(op_put_setter)
DEFINE_OP(op_resolve)
DEFINE_OP(op_resolve_base)
+ DEFINE_OP(op_ensure_property_exists)
DEFINE_OP(op_resolve_global)
+ DEFINE_OP(op_resolve_global_dynamic)
DEFINE_OP(op_resolve_skip)
DEFINE_OP(op_resolve_with_base)
DEFINE_OP(op_ret)
+ DEFINE_OP(op_call_put_result)
+ DEFINE_OP(op_ret_object_or_this)
DEFINE_OP(op_rshift)
+ DEFINE_OP(op_urshift)
DEFINE_OP(op_sret)
DEFINE_OP(op_strcat)
DEFINE_OP(op_stricteq)
@@ -312,6 +317,8 @@ void JIT::privateCompileMainPass()
DEFINE_OP(op_tear_off_activation)
DEFINE_OP(op_tear_off_arguments)
DEFINE_OP(op_throw)
+ DEFINE_OP(op_throw_reference_error)
+ DEFINE_OP(op_throw_syntax_error)
DEFINE_OP(op_to_jsnumber)
DEFINE_OP(op_to_primitive)
@@ -322,6 +329,16 @@ void JIT::privateCompileMainPass()
case op_get_by_id_proto_list:
case op_get_by_id_self:
case op_get_by_id_self_list:
+ case op_get_by_id_getter_chain:
+ case op_get_by_id_getter_proto:
+ case op_get_by_id_getter_proto_list:
+ case op_get_by_id_getter_self:
+ case op_get_by_id_getter_self_list:
+ case op_get_by_id_custom_chain:
+ case op_get_by_id_custom_proto:
+ case op_get_by_id_custom_proto_list:
+ case op_get_by_id_custom_self:
+ case op_get_by_id_custom_self_list:
case op_get_string_length:
case op_put_by_id_generic:
case op_put_by_id_replace:
@@ -335,7 +352,7 @@ void JIT::privateCompileMainPass()
#ifndef NDEBUG
// Reset this, in order to guard its use with ASSERTs.
- m_bytecodeIndex = (unsigned)-1;
+ m_bytecodeOffset = (unsigned)-1;
#endif
}
@@ -344,7 +361,7 @@ void JIT::privateCompileLinkPass()
{
unsigned jmpTableCount = m_jmpTable.size();
for (unsigned i = 0; i < jmpTableCount; ++i)
- m_jmpTable[i].from.linkTo(m_labels[m_jmpTable[i].toBytecodeIndex], this);
+ m_jmpTable[i].from.linkTo(m_labels[m_jmpTable[i].toBytecodeOffset], this);
m_jmpTable.clear();
}
@@ -353,21 +370,19 @@ void JIT::privateCompileSlowCases()
Instruction* instructionsBegin = m_codeBlock->instructions().begin();
m_propertyAccessInstructionIndex = 0;
-#if USE(JSVALUE32_64)
m_globalResolveInfoIndex = 0;
-#endif
m_callLinkInfoIndex = 0;
for (Vector<SlowCaseEntry>::iterator iter = m_slowCases.begin(); iter != m_slowCases.end();) {
-#if !USE(JSVALUE32_64)
+#if USE(JSVALUE64)
killLastResultRegister();
#endif
- m_bytecodeIndex = iter->to;
+ m_bytecodeOffset = iter->to;
#ifndef NDEBUG
- unsigned firstTo = m_bytecodeIndex;
+ unsigned firstTo = m_bytecodeOffset;
#endif
- Instruction* currentInstruction = instructionsBegin + m_bytecodeIndex;
+ Instruction* currentInstruction = instructionsBegin + m_bytecodeOffset;
switch (m_interpreter->getOpcodeID(currentInstruction->u.opcode)) {
DEFINE_SLOWCASE_OP(op_add)
@@ -379,21 +394,24 @@ void JIT::privateCompileSlowCases()
DEFINE_SLOWCASE_OP(op_call_eval)
DEFINE_SLOWCASE_OP(op_call_varargs)
DEFINE_SLOWCASE_OP(op_construct)
- DEFINE_SLOWCASE_OP(op_construct_verify)
DEFINE_SLOWCASE_OP(op_convert_this)
-#if !USE(JSVALUE32)
+ DEFINE_SLOWCASE_OP(op_convert_this_strict)
DEFINE_SLOWCASE_OP(op_div)
-#endif
DEFINE_SLOWCASE_OP(op_eq)
DEFINE_SLOWCASE_OP(op_get_by_id)
+ DEFINE_SLOWCASE_OP(op_get_arguments_length)
DEFINE_SLOWCASE_OP(op_get_by_val)
+ DEFINE_SLOWCASE_OP(op_get_argument_by_val)
DEFINE_SLOWCASE_OP(op_get_by_pname)
+ DEFINE_SLOWCASE_OP(op_check_has_instance)
DEFINE_SLOWCASE_OP(op_instanceof)
DEFINE_SLOWCASE_OP(op_jfalse)
DEFINE_SLOWCASE_OP(op_jnless)
DEFINE_SLOWCASE_OP(op_jless)
+ DEFINE_SLOWCASE_OP(op_jlesseq)
DEFINE_SLOWCASE_OP(op_jnlesseq)
DEFINE_SLOWCASE_OP(op_jtrue)
+ DEFINE_SLOWCASE_OP(op_load_varargs)
DEFINE_SLOWCASE_OP(op_loop_if_less)
DEFINE_SLOWCASE_OP(op_loop_if_lesseq)
DEFINE_SLOWCASE_OP(op_loop_if_true)
@@ -414,10 +432,10 @@ void JIT::privateCompileSlowCases()
DEFINE_SLOWCASE_OP(op_pre_inc)
DEFINE_SLOWCASE_OP(op_put_by_id)
DEFINE_SLOWCASE_OP(op_put_by_val)
-#if USE(JSVALUE32_64)
DEFINE_SLOWCASE_OP(op_resolve_global)
-#endif
+ DEFINE_SLOWCASE_OP(op_resolve_global_dynamic)
DEFINE_SLOWCASE_OP(op_rshift)
+ DEFINE_SLOWCASE_OP(op_urshift)
DEFINE_SLOWCASE_OP(op_stricteq)
DEFINE_SLOWCASE_OP(op_sub)
DEFINE_SLOWCASE_OP(op_to_jsnumber)
@@ -439,76 +457,87 @@ void JIT::privateCompileSlowCases()
#ifndef NDEBUG
// Reset this, in order to guard its use with ASSERTs.
- m_bytecodeIndex = (unsigned)-1;
+ m_bytecodeOffset = (unsigned)-1;
#endif
}
-JITCode JIT::privateCompile()
+JITCode JIT::privateCompile(CodePtr* functionEntryArityCheck)
{
+ // Could use a pop_m, but would need to offset the following instruction if so.
+ preserveReturnAddressAfterCall(regT2);
+ emitPutToCallFrameHeader(regT2, RegisterFile::ReturnPC);
+
+ Label beginLabel(this);
+
sampleCodeBlock(m_codeBlock);
#if ENABLE(OPCODE_SAMPLING)
sampleInstruction(m_codeBlock->instructions().begin());
#endif
- // Could use a pop_m, but would need to offset the following instruction if so.
- preserveReturnAddressAfterCall(regT2);
- emitPutToCallFrameHeader(regT2, RegisterFile::ReturnPC);
-
- Jump slowRegisterFileCheck;
- Label afterRegisterFileCheck;
+ Jump registerFileCheck;
if (m_codeBlock->codeType() == FunctionCode) {
// In the case of a fast linked call, we do not set this up in the caller.
emitPutImmediateToCallFrameHeader(m_codeBlock, RegisterFile::CodeBlock);
- peek(regT0, OBJECT_OFFSETOF(JITStackFrame, registerFile) / sizeof (void*));
addPtr(Imm32(m_codeBlock->m_numCalleeRegisters * sizeof(Register)), callFrameRegister, regT1);
-
- slowRegisterFileCheck = branchPtr(Above, regT1, Address(regT0, OBJECT_OFFSETOF(RegisterFile, m_end)));
- afterRegisterFileCheck = label();
+ registerFileCheck = branchPtr(Below, AbsoluteAddress(&m_globalData->interpreter->registerFile().m_end), regT1);
}
+ Label functionBody = label();
+
privateCompileMainPass();
privateCompileLinkPass();
privateCompileSlowCases();
+ Label arityCheck;
+ Call callArityCheck;
if (m_codeBlock->codeType() == FunctionCode) {
- slowRegisterFileCheck.link(this);
- m_bytecodeIndex = 0;
+ registerFileCheck.link(this);
+ m_bytecodeOffset = 0;
JITStubCall(this, cti_register_file_check).call();
#ifndef NDEBUG
- m_bytecodeIndex = (unsigned)-1; // Reset this, in order to guard its use with ASSERTs.
+ m_bytecodeOffset = (unsigned)-1; // Reset this, in order to guard its use with ASSERTs.
#endif
- jump(afterRegisterFileCheck);
+ jump(functionBody);
+
+ arityCheck = label();
+ preserveReturnAddressAfterCall(regT2);
+ emitPutToCallFrameHeader(regT2, RegisterFile::ReturnPC);
+ branch32(Equal, regT1, Imm32(m_codeBlock->m_numParameters)).linkTo(beginLabel, this);
+ restoreArgumentReference();
+ callArityCheck = call();
+ move(regT0, callFrameRegister);
+ jump(beginLabel);
}
ASSERT(m_jmpTable.isEmpty());
- LinkBuffer patchBuffer(this, m_globalData->executableAllocator.poolForSize(m_assembler.size()));
+ LinkBuffer patchBuffer(this, m_globalData->executableAllocator.poolForSize(m_assembler.size()), m_linkerOffset);
// Translate vPC offsets into addresses in JIT generated code, for switch tables.
for (unsigned i = 0; i < m_switches.size(); ++i) {
SwitchRecord record = m_switches[i];
- unsigned bytecodeIndex = record.bytecodeIndex;
+ unsigned bytecodeOffset = record.bytecodeOffset;
if (record.type != SwitchRecord::String) {
ASSERT(record.type == SwitchRecord::Immediate || record.type == SwitchRecord::Character);
ASSERT(record.jumpTable.simpleJumpTable->branchOffsets.size() == record.jumpTable.simpleJumpTable->ctiOffsets.size());
- record.jumpTable.simpleJumpTable->ctiDefault = patchBuffer.locationOf(m_labels[bytecodeIndex + record.defaultOffset]);
+ record.jumpTable.simpleJumpTable->ctiDefault = patchBuffer.locationOf(m_labels[bytecodeOffset + record.defaultOffset]);
for (unsigned j = 0; j < record.jumpTable.simpleJumpTable->branchOffsets.size(); ++j) {
unsigned offset = record.jumpTable.simpleJumpTable->branchOffsets[j];
- record.jumpTable.simpleJumpTable->ctiOffsets[j] = offset ? patchBuffer.locationOf(m_labels[bytecodeIndex + offset]) : record.jumpTable.simpleJumpTable->ctiDefault;
+ record.jumpTable.simpleJumpTable->ctiOffsets[j] = offset ? patchBuffer.locationOf(m_labels[bytecodeOffset + offset]) : record.jumpTable.simpleJumpTable->ctiDefault;
}
} else {
ASSERT(record.type == SwitchRecord::String);
- record.jumpTable.stringJumpTable->ctiDefault = patchBuffer.locationOf(m_labels[bytecodeIndex + record.defaultOffset]);
+ record.jumpTable.stringJumpTable->ctiDefault = patchBuffer.locationOf(m_labels[bytecodeOffset + record.defaultOffset]);
StringJumpTable::StringOffsetTable::iterator end = record.jumpTable.stringJumpTable->offsetTable.end();
for (StringJumpTable::StringOffsetTable::iterator it = record.jumpTable.stringJumpTable->offsetTable.begin(); it != end; ++it) {
unsigned offset = it->second.branchOffset;
- it->second.ctiOffset = offset ? patchBuffer.locationOf(m_labels[bytecodeIndex + offset]) : record.jumpTable.stringJumpTable->ctiDefault;
+ it->second.ctiOffset = offset ? patchBuffer.locationOf(m_labels[bytecodeOffset + offset]) : record.jumpTable.stringJumpTable->ctiDefault;
}
}
}
@@ -523,10 +552,10 @@ JITCode JIT::privateCompile()
patchBuffer.link(iter->from, FunctionPtr(iter->to));
}
- if (m_codeBlock->hasExceptionInfo()) {
+ if (m_codeBlock->needsCallReturnIndices()) {
m_codeBlock->callReturnIndexVector().reserveCapacity(m_calls.size());
for (Vector<CallRecord>::iterator iter = m_calls.begin(); iter != m_calls.end(); ++iter)
- m_codeBlock->callReturnIndexVector().append(CallReturnOffsetToBytecodeIndex(patchBuffer.returnAddressOffset(iter->from), iter->bytecodeIndex));
+ m_codeBlock->callReturnIndexVector().append(CallReturnOffsetToBytecodeOffset(patchBuffer.returnAddressOffset(iter->from), iter->bytecodeOffset));
}
// Link absolute addresses for jsr
@@ -557,10 +586,15 @@ JITCode JIT::privateCompile()
info.callReturnLocation = m_codeBlock->structureStubInfo(m_methodCallCompilationInfo[i].propertyAccessIndex).callReturnLocation;
}
+ if (m_codeBlock->codeType() == FunctionCode && functionEntryArityCheck) {
+ patchBuffer.link(callArityCheck, FunctionPtr(m_codeBlock->m_isConstructor ? cti_op_construct_arityCheck : cti_op_call_arityCheck));
+ *functionEntryArityCheck = patchBuffer.locationOf(arityCheck);
+ }
+
return patchBuffer.finalizeCode();
}
-#if !USE(JSVALUE32_64)
+#if USE(JSVALUE64)
void JIT::emitGetVariableObjectRegister(RegisterID variableObject, int index, RegisterID dst)
{
loadPtr(Address(variableObject, OBJECT_OFFSETOF(JSVariableObject, d)), dst);
@@ -577,12 +611,12 @@ void JIT::emitPutVariableObjectRegister(RegisterID src, RegisterID variableObjec
#endif
#if ENABLE(JIT_OPTIMIZE_CALL)
-void JIT::unlinkCall(CallLinkInfo* callLinkInfo)
+void JIT::unlinkCallOrConstruct(CallLinkInfo* callLinkInfo)
{
// When the JSFunction is deleted the pointer embedded in the instruction stream will no longer be valid
// (and, if a new JSFunction happened to be constructed at the same location, we could get a false positive
// match). Reset the check so it no longer matches.
- RepatchBuffer repatchBuffer(callLinkInfo->ownerCodeBlock.get());
+ RepatchBuffer repatchBuffer(callLinkInfo->ownerCodeBlock);
#if USE(JSVALUE32_64)
repatchBuffer.repatch(callLinkInfo->hotPathBegin, 0);
#else
@@ -590,7 +624,27 @@ void JIT::unlinkCall(CallLinkInfo* callLinkInfo)
#endif
}
-void JIT::linkCall(JSFunction* callee, CodeBlock* callerCodeBlock, CodeBlock* calleeCodeBlock, JITCode& code, CallLinkInfo* callLinkInfo, int callerArgCount, JSGlobalData* globalData)
+void JIT::linkCall(JSFunction* callee, CodeBlock* callerCodeBlock, CodeBlock* calleeCodeBlock, JIT::CodePtr code, CallLinkInfo* callLinkInfo, int callerArgCount, JSGlobalData* globalData)
+{
+ RepatchBuffer repatchBuffer(callerCodeBlock);
+
+ // Currently we only link calls with the exact number of arguments.
+ // If this is a native call calleeCodeBlock is null so the number of parameters is unimportant
+ if (!calleeCodeBlock || (callerArgCount == calleeCodeBlock->m_numParameters)) {
+ ASSERT(!callLinkInfo->isLinked());
+
+ if (calleeCodeBlock)
+ calleeCodeBlock->addCaller(callLinkInfo);
+
+ repatchBuffer.repatch(callLinkInfo->hotPathBegin, callee);
+ repatchBuffer.relink(callLinkInfo->hotPathOther, code);
+ }
+
+ // patch the call so we do not continue to try to link.
+ repatchBuffer.relink(callLinkInfo->callReturnLocation, globalData->jitStubs->ctiVirtualCall());
+}
+
+void JIT::linkConstruct(JSFunction* callee, CodeBlock* callerCodeBlock, CodeBlock* calleeCodeBlock, JIT::CodePtr code, CallLinkInfo* callLinkInfo, int callerArgCount, JSGlobalData* globalData)
{
RepatchBuffer repatchBuffer(callerCodeBlock);
@@ -603,11 +657,11 @@ void JIT::linkCall(JSFunction* callee, CodeBlock* callerCodeBlock, CodeBlock* ca
calleeCodeBlock->addCaller(callLinkInfo);
repatchBuffer.repatch(callLinkInfo->hotPathBegin, callee);
- repatchBuffer.relink(callLinkInfo->hotPathOther, code.addressForCall());
+ repatchBuffer.relink(callLinkInfo->hotPathOther, code);
}
// patch the call so we do not continue to try to link.
- repatchBuffer.relink(callLinkInfo->callReturnLocation, globalData->jitStubs.ctiVirtualCall());
+ repatchBuffer.relink(callLinkInfo->callReturnLocation, globalData->jitStubs->ctiVirtualConstruct());
}
#endif // ENABLE(JIT_OPTIMIZE_CALL)
diff --git a/JavaScriptCore/jit/JIT.h b/JavaScriptCore/jit/JIT.h
index bfbb1ee..907a774 100644
--- a/JavaScriptCore/jit/JIT.h
+++ b/JavaScriptCore/jit/JIT.h
@@ -26,8 +26,6 @@
#ifndef JIT_h
#define JIT_h
-#include <wtf/Platform.h>
-
#if ENABLE(JIT)
// We've run into some problems where changing the size of the class JIT leads to
@@ -42,15 +40,10 @@
#include "CodeBlock.h"
#include "Interpreter.h"
-#include "JITCode.h"
-#include "JITStubs.h"
+#include "JSInterfaceJIT.h"
#include "Opcode.h"
-#include "RegisterFile.h"
-#include "MacroAssembler.h"
#include "Profiler.h"
#include <bytecode/SamplingTool.h>
-#include <wtf/AlwaysInline.h>
-#include <wtf/Vector.h>
namespace JSC {
@@ -73,16 +66,16 @@ namespace JSC {
struct CallRecord {
MacroAssembler::Call from;
- unsigned bytecodeIndex;
+ unsigned bytecodeOffset;
void* to;
CallRecord()
{
}
- CallRecord(MacroAssembler::Call from, unsigned bytecodeIndex, void* to = 0)
+ CallRecord(MacroAssembler::Call from, unsigned bytecodeOffset, void* to = 0)
: from(from)
- , bytecodeIndex(bytecodeIndex)
+ , bytecodeOffset(bytecodeOffset)
, to(to)
{
}
@@ -90,11 +83,11 @@ namespace JSC {
struct JumpTable {
MacroAssembler::Jump from;
- unsigned toBytecodeIndex;
+ unsigned toBytecodeOffset;
JumpTable(MacroAssembler::Jump f, unsigned t)
: from(f)
- , toBytecodeIndex(t)
+ , toBytecodeOffset(t)
{
}
};
@@ -126,20 +119,20 @@ namespace JSC {
StringJumpTable* stringJumpTable;
} jumpTable;
- unsigned bytecodeIndex;
+ unsigned bytecodeOffset;
unsigned defaultOffset;
- SwitchRecord(SimpleJumpTable* jumpTable, unsigned bytecodeIndex, unsigned defaultOffset, Type type)
+ SwitchRecord(SimpleJumpTable* jumpTable, unsigned bytecodeOffset, unsigned defaultOffset, Type type)
: type(type)
- , bytecodeIndex(bytecodeIndex)
+ , bytecodeOffset(bytecodeOffset)
, defaultOffset(defaultOffset)
{
this->jumpTable.simpleJumpTable = jumpTable;
}
- SwitchRecord(StringJumpTable* jumpTable, unsigned bytecodeIndex, unsigned defaultOffset)
+ SwitchRecord(StringJumpTable* jumpTable, unsigned bytecodeOffset, unsigned defaultOffset)
: type(String)
- , bytecodeIndex(bytecodeIndex)
+ , bytecodeOffset(bytecodeOffset)
, defaultOffset(defaultOffset)
{
this->jumpTable.stringJumpTable = jumpTable;
@@ -172,159 +165,76 @@ namespace JSC {
void ctiPatchCallByReturnAddress(CodeBlock* codeblock, ReturnAddressPtr returnAddress, MacroAssemblerCodePtr newCalleeFunction);
void ctiPatchCallByReturnAddress(CodeBlock* codeblock, ReturnAddressPtr returnAddress, FunctionPtr newCalleeFunction);
- class JIT : private MacroAssembler {
+ class JIT : private JSInterfaceJIT {
friend class JITStubCall;
using MacroAssembler::Jump;
using MacroAssembler::JumpList;
using MacroAssembler::Label;
- // NOTES:
- //
- // regT0 has two special meanings. The return value from a stub
- // call will always be in regT0, and by default (unless
- // a register is specified) emitPutVirtualRegister() will store
- // the value from regT0.
- //
- // regT3 is required to be callee-preserved.
- //
- // tempRegister2 is has no such dependencies. It is important that
- // on x86/x86-64 it is ecx for performance reasons, since the
- // MacroAssembler will need to plant register swaps if it is not -
- // however the code will still function correctly.
-#if CPU(X86_64)
- static const RegisterID returnValueRegister = X86Registers::eax;
- static const RegisterID cachedResultRegister = X86Registers::eax;
- static const RegisterID firstArgumentRegister = X86Registers::edi;
-
- static const RegisterID timeoutCheckRegister = X86Registers::r12;
- static const RegisterID callFrameRegister = X86Registers::r13;
- static const RegisterID tagTypeNumberRegister = X86Registers::r14;
- static const RegisterID tagMaskRegister = X86Registers::r15;
-
- static const RegisterID regT0 = X86Registers::eax;
- static const RegisterID regT1 = X86Registers::edx;
- static const RegisterID regT2 = X86Registers::ecx;
- static const RegisterID regT3 = X86Registers::ebx;
-
- static const FPRegisterID fpRegT0 = X86Registers::xmm0;
- static const FPRegisterID fpRegT1 = X86Registers::xmm1;
- static const FPRegisterID fpRegT2 = X86Registers::xmm2;
-#elif CPU(X86)
- static const RegisterID returnValueRegister = X86Registers::eax;
- static const RegisterID cachedResultRegister = X86Registers::eax;
- // On x86 we always use fastcall conventions = but on
- // OS X if might make more sense to just use regparm.
- static const RegisterID firstArgumentRegister = X86Registers::ecx;
-
- static const RegisterID timeoutCheckRegister = X86Registers::esi;
- static const RegisterID callFrameRegister = X86Registers::edi;
-
- static const RegisterID regT0 = X86Registers::eax;
- static const RegisterID regT1 = X86Registers::edx;
- static const RegisterID regT2 = X86Registers::ecx;
- static const RegisterID regT3 = X86Registers::ebx;
-
- static const FPRegisterID fpRegT0 = X86Registers::xmm0;
- static const FPRegisterID fpRegT1 = X86Registers::xmm1;
- static const FPRegisterID fpRegT2 = X86Registers::xmm2;
-#elif CPU(ARM_THUMB2)
- static const RegisterID returnValueRegister = ARMRegisters::r0;
- static const RegisterID cachedResultRegister = ARMRegisters::r0;
- static const RegisterID firstArgumentRegister = ARMRegisters::r0;
-
- static const RegisterID regT0 = ARMRegisters::r0;
- static const RegisterID regT1 = ARMRegisters::r1;
- static const RegisterID regT2 = ARMRegisters::r2;
- static const RegisterID regT3 = ARMRegisters::r4;
-
- static const RegisterID callFrameRegister = ARMRegisters::r5;
- static const RegisterID timeoutCheckRegister = ARMRegisters::r6;
-
- static const FPRegisterID fpRegT0 = ARMRegisters::d0;
- static const FPRegisterID fpRegT1 = ARMRegisters::d1;
- static const FPRegisterID fpRegT2 = ARMRegisters::d2;
-#elif CPU(ARM_TRADITIONAL)
- static const RegisterID returnValueRegister = ARMRegisters::r0;
- static const RegisterID cachedResultRegister = ARMRegisters::r0;
- static const RegisterID firstArgumentRegister = ARMRegisters::r0;
-
- static const RegisterID timeoutCheckRegister = ARMRegisters::r5;
- static const RegisterID callFrameRegister = ARMRegisters::r4;
-
- static const RegisterID regT0 = ARMRegisters::r0;
- static const RegisterID regT1 = ARMRegisters::r1;
- static const RegisterID regT2 = ARMRegisters::r2;
- // Callee preserved
- static const RegisterID regT3 = ARMRegisters::r7;
-
- static const RegisterID regS0 = ARMRegisters::S0;
- // Callee preserved
- static const RegisterID regS1 = ARMRegisters::S1;
-
- static const RegisterID regStackPtr = ARMRegisters::sp;
- static const RegisterID regLink = ARMRegisters::lr;
-
- static const FPRegisterID fpRegT0 = ARMRegisters::d0;
- static const FPRegisterID fpRegT1 = ARMRegisters::d1;
- static const FPRegisterID fpRegT2 = ARMRegisters::d2;
-#else
- #error "JIT not supported on this platform."
-#endif
-
static const int patchGetByIdDefaultStructure = -1;
// Magic number - initial offset cannot be representable as a signed 8bit value, or the X86Assembler
// will compress the displacement, and we may not be able to fit a patched offset.
static const int patchGetByIdDefaultOffset = 256;
public:
- static JITCode compile(JSGlobalData* globalData, CodeBlock* codeBlock)
+ static JITCode compile(JSGlobalData* globalData, CodeBlock* codeBlock, CodePtr* functionEntryArityCheck = 0, void* offsetBase = 0)
{
- return JIT(globalData, codeBlock).privateCompile();
+ return JIT(globalData, codeBlock, offsetBase).privateCompile(functionEntryArityCheck);
}
- static void compileGetByIdProto(JSGlobalData* globalData, CallFrame* callFrame, CodeBlock* codeBlock, StructureStubInfo* stubInfo, Structure* structure, Structure* prototypeStructure, size_t cachedOffset, ReturnAddressPtr returnAddress)
+ static void compileGetByIdProto(JSGlobalData* globalData, CallFrame* callFrame, CodeBlock* codeBlock, StructureStubInfo* stubInfo, Structure* structure, Structure* prototypeStructure, const Identifier& ident, const PropertySlot& slot, size_t cachedOffset, ReturnAddressPtr returnAddress)
{
JIT jit(globalData, codeBlock);
- jit.privateCompileGetByIdProto(stubInfo, structure, prototypeStructure, cachedOffset, returnAddress, callFrame);
+ jit.privateCompileGetByIdProto(stubInfo, structure, prototypeStructure, ident, slot, cachedOffset, returnAddress, callFrame);
}
- static void compileGetByIdSelfList(JSGlobalData* globalData, CodeBlock* codeBlock, StructureStubInfo* stubInfo, PolymorphicAccessStructureList* polymorphicStructures, int currentIndex, Structure* structure, size_t cachedOffset)
+ static void compileGetByIdSelfList(JSGlobalData* globalData, CodeBlock* codeBlock, StructureStubInfo* stubInfo, PolymorphicAccessStructureList* polymorphicStructures, int currentIndex, Structure* structure, const Identifier& ident, const PropertySlot& slot, size_t cachedOffset)
{
JIT jit(globalData, codeBlock);
- jit.privateCompileGetByIdSelfList(stubInfo, polymorphicStructures, currentIndex, structure, cachedOffset);
+ jit.privateCompileGetByIdSelfList(stubInfo, polymorphicStructures, currentIndex, structure, ident, slot, cachedOffset);
}
- static void compileGetByIdProtoList(JSGlobalData* globalData, CallFrame* callFrame, CodeBlock* codeBlock, StructureStubInfo* stubInfo, PolymorphicAccessStructureList* prototypeStructureList, int currentIndex, Structure* structure, Structure* prototypeStructure, size_t cachedOffset)
+ static void compileGetByIdProtoList(JSGlobalData* globalData, CallFrame* callFrame, CodeBlock* codeBlock, StructureStubInfo* stubInfo, PolymorphicAccessStructureList* prototypeStructureList, int currentIndex, Structure* structure, Structure* prototypeStructure, const Identifier& ident, const PropertySlot& slot, size_t cachedOffset)
{
JIT jit(globalData, codeBlock);
- jit.privateCompileGetByIdProtoList(stubInfo, prototypeStructureList, currentIndex, structure, prototypeStructure, cachedOffset, callFrame);
+ jit.privateCompileGetByIdProtoList(stubInfo, prototypeStructureList, currentIndex, structure, prototypeStructure, ident, slot, cachedOffset, callFrame);
}
- static void compileGetByIdChainList(JSGlobalData* globalData, CallFrame* callFrame, CodeBlock* codeBlock, StructureStubInfo* stubInfo, PolymorphicAccessStructureList* prototypeStructureList, int currentIndex, Structure* structure, StructureChain* chain, size_t count, size_t cachedOffset)
+ static void compileGetByIdChainList(JSGlobalData* globalData, CallFrame* callFrame, CodeBlock* codeBlock, StructureStubInfo* stubInfo, PolymorphicAccessStructureList* prototypeStructureList, int currentIndex, Structure* structure, StructureChain* chain, size_t count, const Identifier& ident, const PropertySlot& slot, size_t cachedOffset)
{
JIT jit(globalData, codeBlock);
- jit.privateCompileGetByIdChainList(stubInfo, prototypeStructureList, currentIndex, structure, chain, count, cachedOffset, callFrame);
+ jit.privateCompileGetByIdChainList(stubInfo, prototypeStructureList, currentIndex, structure, chain, count, ident, slot, cachedOffset, callFrame);
}
- static void compileGetByIdChain(JSGlobalData* globalData, CallFrame* callFrame, CodeBlock* codeBlock, StructureStubInfo* stubInfo, Structure* structure, StructureChain* chain, size_t count, size_t cachedOffset, ReturnAddressPtr returnAddress)
+ static void compileGetByIdChain(JSGlobalData* globalData, CallFrame* callFrame, CodeBlock* codeBlock, StructureStubInfo* stubInfo, Structure* structure, StructureChain* chain, size_t count, const Identifier& ident, const PropertySlot& slot, size_t cachedOffset, ReturnAddressPtr returnAddress)
{
JIT jit(globalData, codeBlock);
- jit.privateCompileGetByIdChain(stubInfo, structure, chain, count, cachedOffset, returnAddress, callFrame);
+ jit.privateCompileGetByIdChain(stubInfo, structure, chain, count, ident, slot, cachedOffset, returnAddress, callFrame);
}
- static void compilePutByIdTransition(JSGlobalData* globalData, CodeBlock* codeBlock, StructureStubInfo* stubInfo, Structure* oldStructure, Structure* newStructure, size_t cachedOffset, StructureChain* chain, ReturnAddressPtr returnAddress)
+ static void compilePutByIdTransition(JSGlobalData* globalData, CodeBlock* codeBlock, StructureStubInfo* stubInfo, Structure* oldStructure, Structure* newStructure, size_t cachedOffset, StructureChain* chain, ReturnAddressPtr returnAddress, bool direct)
{
JIT jit(globalData, codeBlock);
- jit.privateCompilePutByIdTransition(stubInfo, oldStructure, newStructure, cachedOffset, chain, returnAddress);
+ jit.privateCompilePutByIdTransition(stubInfo, oldStructure, newStructure, cachedOffset, chain, returnAddress, direct);
}
static void compileCTIMachineTrampolines(JSGlobalData* globalData, RefPtr<ExecutablePool>* executablePool, TrampolineStructure *trampolines)
{
- JIT jit(globalData);
+ if (!globalData->canUseJIT())
+ return;
+ JIT jit(globalData, 0, 0);
jit.privateCompileCTIMachineTrampolines(executablePool, globalData, trampolines);
}
+ static CodePtr compileCTINativeCall(JSGlobalData* globalData, PassRefPtr<ExecutablePool> executablePool, NativeFunction func)
+ {
+ if (!globalData->canUseJIT())
+ return CodePtr();
+ JIT jit(globalData, 0, 0);
+ return jit.privateCompileCTINativeCall(executablePool, globalData, func);
+ }
+
static void patchGetByIdSelf(CodeBlock* codeblock, StructureStubInfo*, Structure*, size_t cachedOffset, ReturnAddressPtr returnAddress);
- static void patchPutByIdReplace(CodeBlock* codeblock, StructureStubInfo*, Structure*, size_t cachedOffset, ReturnAddressPtr returnAddress);
+ static void patchPutByIdReplace(CodeBlock* codeblock, StructureStubInfo*, Structure*, size_t cachedOffset, ReturnAddressPtr returnAddress, bool direct);
static void patchMethodCallProto(CodeBlock* codeblock, MethodCallLinkInfo&, JSFunction*, Structure*, JSObject*, ReturnAddressPtr);
static void compilePatchGetArrayLength(JSGlobalData* globalData, CodeBlock* codeBlock, ReturnAddressPtr returnAddress)
@@ -333,8 +243,9 @@ namespace JSC {
return jit.privateCompilePatchGetArrayLength(returnAddress);
}
- static void linkCall(JSFunction* callee, CodeBlock* callerCodeBlock, CodeBlock* calleeCodeBlock, JITCode&, CallLinkInfo*, int callerArgCount, JSGlobalData*);
- static void unlinkCall(CallLinkInfo*);
+ static void linkCall(JSFunction* callee, CodeBlock* callerCodeBlock, CodeBlock* calleeCodeBlock, CodePtr, CallLinkInfo*, int callerArgCount, JSGlobalData*);
+ static void linkConstruct(JSFunction* callee, CodeBlock* callerCodeBlock, CodeBlock* calleeCodeBlock, CodePtr, CallLinkInfo*, int callerArgCount, JSGlobalData*);
+ static void unlinkCallOrConstruct(CallLinkInfo*);
private:
struct JSRInfo {
@@ -348,20 +259,22 @@ namespace JSC {
}
};
- JIT(JSGlobalData*, CodeBlock* = 0);
+ JIT(JSGlobalData*, CodeBlock* = 0, void* = 0);
void privateCompileMainPass();
void privateCompileLinkPass();
void privateCompileSlowCases();
- JITCode privateCompile();
- void privateCompileGetByIdProto(StructureStubInfo*, Structure*, Structure* prototypeStructure, size_t cachedOffset, ReturnAddressPtr returnAddress, CallFrame* callFrame);
- void privateCompileGetByIdSelfList(StructureStubInfo*, PolymorphicAccessStructureList*, int, Structure*, size_t cachedOffset);
- void privateCompileGetByIdProtoList(StructureStubInfo*, PolymorphicAccessStructureList*, int, Structure*, Structure* prototypeStructure, size_t cachedOffset, CallFrame* callFrame);
- void privateCompileGetByIdChainList(StructureStubInfo*, PolymorphicAccessStructureList*, int, Structure*, StructureChain* chain, size_t count, size_t cachedOffset, CallFrame* callFrame);
- void privateCompileGetByIdChain(StructureStubInfo*, Structure*, StructureChain*, size_t count, size_t cachedOffset, ReturnAddressPtr returnAddress, CallFrame* callFrame);
- void privateCompilePutByIdTransition(StructureStubInfo*, Structure*, Structure*, size_t cachedOffset, StructureChain*, ReturnAddressPtr returnAddress);
+ JITCode privateCompile(CodePtr* functionEntryArityCheck);
+ void privateCompileGetByIdProto(StructureStubInfo*, Structure*, Structure* prototypeStructure, const Identifier&, const PropertySlot&, size_t cachedOffset, ReturnAddressPtr returnAddress, CallFrame* callFrame);
+ void privateCompileGetByIdSelfList(StructureStubInfo*, PolymorphicAccessStructureList*, int, Structure*, const Identifier&, const PropertySlot&, size_t cachedOffset);
+ void privateCompileGetByIdProtoList(StructureStubInfo*, PolymorphicAccessStructureList*, int, Structure*, Structure* prototypeStructure, const Identifier&, const PropertySlot&, size_t cachedOffset, CallFrame* callFrame);
+ void privateCompileGetByIdChainList(StructureStubInfo*, PolymorphicAccessStructureList*, int, Structure*, StructureChain* chain, size_t count, const Identifier&, const PropertySlot&, size_t cachedOffset, CallFrame* callFrame);
+ void privateCompileGetByIdChain(StructureStubInfo*, Structure*, StructureChain*, size_t count, const Identifier&, const PropertySlot&, size_t cachedOffset, ReturnAddressPtr returnAddress, CallFrame* callFrame);
+ void privateCompilePutByIdTransition(StructureStubInfo*, Structure*, Structure*, size_t cachedOffset, StructureChain*, ReturnAddressPtr returnAddress, bool direct);
void privateCompileCTIMachineTrampolines(RefPtr<ExecutablePool>* executablePool, JSGlobalData* data, TrampolineStructure *trampolines);
+ Label privateCompileCTINativeCall(JSGlobalData*, bool isConstruct = false);
+ CodePtr privateCompileCTINativeCall(PassRefPtr<ExecutablePool> executablePool, JSGlobalData* data, NativeFunction func);
void privateCompilePatchGetArrayLength(ReturnAddressPtr returnAddress);
void addSlowCase(Jump);
@@ -372,11 +285,8 @@ namespace JSC {
void compileOpCall(OpcodeID, Instruction* instruction, unsigned callLinkInfoIndex);
void compileOpCallVarargs(Instruction* instruction);
void compileOpCallInitializeCallFrame();
- void compileOpCallSetupArgs(Instruction*);
- void compileOpCallVarargsSetupArgs(Instruction*);
void compileOpCallSlowCase(Instruction* instruction, Vector<SlowCaseEntry>::iterator& iter, unsigned callLinkInfoIndex, OpcodeID opcodeID);
void compileOpCallVarargsSlowCase(Instruction* instruction, Vector<SlowCaseEntry>::iterator& iter);
- void compileOpConstructSetupArgs(Instruction*);
enum CompileOpStrictEqType { OpStrictEq, OpNStrictEq };
void compileOpStrictEq(Instruction* instruction, CompileOpStrictEqType type);
@@ -385,14 +295,9 @@ namespace JSC {
void emitLoadDouble(unsigned index, FPRegisterID value);
void emitLoadInt32ToDouble(unsigned index, FPRegisterID value);
- Address addressFor(unsigned index, RegisterID base = callFrameRegister);
-
- void testPrototype(Structure*, JumpList& failureCases);
+ void testPrototype(JSValue, JumpList& failureCases);
#if USE(JSVALUE32_64)
- Address tagFor(unsigned index, RegisterID base = callFrameRegister);
- Address payloadFor(unsigned index, RegisterID base = callFrameRegister);
-
bool getOperandConstantImmediateInt(unsigned op1, unsigned op2, unsigned& op, int32_t& constant);
void emitLoadTag(unsigned index, RegisterID tag);
@@ -410,8 +315,8 @@ namespace JSC {
void emitStoreBool(unsigned index, RegisterID tag, bool indexIsBool = false);
void emitStoreDouble(unsigned index, FPRegisterID value);
- bool isLabeled(unsigned bytecodeIndex);
- void map(unsigned bytecodeIndex, unsigned virtualRegisterIndex, RegisterID tag, RegisterID payload);
+ bool isLabeled(unsigned bytecodeOffset);
+ void map(unsigned bytecodeOffset, unsigned virtualRegisterIndex, RegisterID tag, RegisterID payload);
void unmap(RegisterID);
void unmap();
bool isMapped(unsigned virtualRegisterIndex);
@@ -451,12 +356,8 @@ namespace JSC {
static const int patchOffsetGetByIdPropertyMapOffset1 = 22;
static const int patchOffsetGetByIdPropertyMapOffset2 = 28;
static const int patchOffsetGetByIdPutResult = 28;
-#if ENABLE(OPCODE_SAMPLING) && USE(JIT_STUB_ARGUMENT_VA_LIST)
- static const int patchOffsetGetByIdSlowCaseCall = 35;
-#elif ENABLE(OPCODE_SAMPLING)
+#if ENABLE(OPCODE_SAMPLING)
static const int patchOffsetGetByIdSlowCaseCall = 37;
-#elif USE(JIT_STUB_ARGUMENT_VA_LIST)
- static const int patchOffsetGetByIdSlowCaseCall = 25;
#else
static const int patchOffsetGetByIdSlowCaseCall = 27;
#endif
@@ -501,11 +402,98 @@ namespace JSC {
static const int sequenceGetByIdHotPathInstructionSpace = 36;
static const int sequenceGetByIdHotPathConstantSpace = 4;
// sequenceGetByIdSlowCase
+ static const int sequenceGetByIdSlowCaseInstructionSpace = 56;
+ static const int sequenceGetByIdSlowCaseConstantSpace = 2;
+ // sequencePutById
+ static const int sequencePutByIdInstructionSpace = 36;
+ static const int sequencePutByIdConstantSpace = 4;
+#elif CPU(ARM_THUMB2)
+ // These architecture specific value are used to enable patching - see comment on op_put_by_id.
+ static const int patchOffsetPutByIdStructure = 10;
+ static const int patchOffsetPutByIdExternalLoad = 26;
+ static const int patchLengthPutByIdExternalLoad = 12;
+ static const int patchOffsetPutByIdPropertyMapOffset1 = 46;
+ static const int patchOffsetPutByIdPropertyMapOffset2 = 58;
+ // These architecture specific value are used to enable patching - see comment on op_get_by_id.
+ static const int patchOffsetGetByIdStructure = 10;
+ static const int patchOffsetGetByIdBranchToSlowCase = 26;
+ static const int patchOffsetGetByIdExternalLoad = 26;
+ static const int patchLengthGetByIdExternalLoad = 12;
+ static const int patchOffsetGetByIdPropertyMapOffset1 = 46;
+ static const int patchOffsetGetByIdPropertyMapOffset2 = 58;
+ static const int patchOffsetGetByIdPutResult = 62;
+#if ENABLE(OPCODE_SAMPLING)
+ #error "OPCODE_SAMPLING is not yet supported"
+#else
+ static const int patchOffsetGetByIdSlowCaseCall = 30;
+#endif
+ static const int patchOffsetOpCallCompareToJump = 16;
+
+ static const int patchOffsetMethodCheckProtoObj = 24;
+ static const int patchOffsetMethodCheckProtoStruct = 34;
+ static const int patchOffsetMethodCheckPutFunction = 58;
+
+ // sequenceOpCall
+ static const int sequenceOpCallInstructionSpace = 12;
+ static const int sequenceOpCallConstantSpace = 2;
+ // sequenceMethodCheck
+ static const int sequenceMethodCheckInstructionSpace = 40;
+ static const int sequenceMethodCheckConstantSpace = 6;
+ // sequenceGetByIdHotPath
+ static const int sequenceGetByIdHotPathInstructionSpace = 36;
+ static const int sequenceGetByIdHotPathConstantSpace = 4;
+ // sequenceGetByIdSlowCase
static const int sequenceGetByIdSlowCaseInstructionSpace = 40;
static const int sequenceGetByIdSlowCaseConstantSpace = 2;
// sequencePutById
static const int sequencePutByIdInstructionSpace = 36;
static const int sequencePutByIdConstantSpace = 4;
+#elif CPU(MIPS)
+#if WTF_MIPS_ISA(1)
+ static const int patchOffsetPutByIdStructure = 16;
+ static const int patchOffsetPutByIdExternalLoad = 48;
+ static const int patchLengthPutByIdExternalLoad = 20;
+ static const int patchOffsetPutByIdPropertyMapOffset1 = 68;
+ static const int patchOffsetPutByIdPropertyMapOffset2 = 84;
+ static const int patchOffsetGetByIdStructure = 16;
+ static const int patchOffsetGetByIdBranchToSlowCase = 48;
+ static const int patchOffsetGetByIdExternalLoad = 48;
+ static const int patchLengthGetByIdExternalLoad = 20;
+ static const int patchOffsetGetByIdPropertyMapOffset1 = 68;
+ static const int patchOffsetGetByIdPropertyMapOffset2 = 88;
+ static const int patchOffsetGetByIdPutResult = 108;
+#if ENABLE(OPCODE_SAMPLING)
+ #error "OPCODE_SAMPLING is not yet supported"
+#else
+ static const int patchOffsetGetByIdSlowCaseCall = 44;
+#endif
+ static const int patchOffsetOpCallCompareToJump = 32;
+ static const int patchOffsetMethodCheckProtoObj = 32;
+ static const int patchOffsetMethodCheckProtoStruct = 56;
+ static const int patchOffsetMethodCheckPutFunction = 88;
+#else // WTF_MIPS_ISA(1)
+ static const int patchOffsetPutByIdStructure = 12;
+ static const int patchOffsetPutByIdExternalLoad = 44;
+ static const int patchLengthPutByIdExternalLoad = 16;
+ static const int patchOffsetPutByIdPropertyMapOffset1 = 60;
+ static const int patchOffsetPutByIdPropertyMapOffset2 = 76;
+ static const int patchOffsetGetByIdStructure = 12;
+ static const int patchOffsetGetByIdBranchToSlowCase = 44;
+ static const int patchOffsetGetByIdExternalLoad = 44;
+ static const int patchLengthGetByIdExternalLoad = 16;
+ static const int patchOffsetGetByIdPropertyMapOffset1 = 60;
+ static const int patchOffsetGetByIdPropertyMapOffset2 = 76;
+ static const int patchOffsetGetByIdPutResult = 92;
+#if ENABLE(OPCODE_SAMPLING)
+ #error "OPCODE_SAMPLING is not yet supported"
+#else
+ static const int patchOffsetGetByIdSlowCaseCall = 44;
+#endif
+ static const int patchOffsetOpCallCompareToJump = 32;
+ static const int patchOffsetMethodCheckProtoObj = 32;
+ static const int patchOffsetMethodCheckProtoStruct = 52;
+ static const int patchOffsetMethodCheckPutFunction = 84;
+#endif
#else
#error "JSVALUE32_64 not supported on this platform."
#endif
@@ -528,10 +516,7 @@ namespace JSC {
Jump emitJumpIfNotJSCell(RegisterID);
void emitJumpSlowCaseIfNotJSCell(RegisterID);
void emitJumpSlowCaseIfNotJSCell(RegisterID, int VReg);
-#if USE(JSVALUE64)
- JIT::Jump emitJumpIfImmediateNumber(RegisterID);
- JIT::Jump emitJumpIfNotImmediateNumber(RegisterID);
-#else
+#if USE(JSVALUE32_64)
JIT::Jump emitJumpIfImmediateNumber(RegisterID reg)
{
return emitJumpIfImmediateInteger(reg);
@@ -549,12 +534,11 @@ namespace JSC {
void emitJumpSlowCaseIfNotImmediateNumber(RegisterID);
void emitJumpSlowCaseIfNotImmediateIntegers(RegisterID, RegisterID, RegisterID);
-#if !USE(JSVALUE64)
+#if USE(JSVALUE32_64)
void emitFastArithDeTagImmediate(RegisterID);
Jump emitFastArithDeTagImmediateJumpIfZero(RegisterID);
#endif
void emitFastArithReTagImmediate(RegisterID src, RegisterID dest);
- void emitFastArithImmToInt(RegisterID);
void emitFastArithIntToImmNoCheck(RegisterID src, RegisterID dest);
void emitTagAsBoolImmediate(RegisterID reg);
@@ -610,12 +594,8 @@ namespace JSC {
static const int patchLengthGetByIdExternalLoad = 3;
static const int patchOffsetGetByIdPropertyMapOffset = 22;
static const int patchOffsetGetByIdPutResult = 22;
-#if ENABLE(OPCODE_SAMPLING) && USE(JIT_STUB_ARGUMENT_VA_LIST)
- static const int patchOffsetGetByIdSlowCaseCall = 31;
-#elif ENABLE(OPCODE_SAMPLING)
+#if ENABLE(OPCODE_SAMPLING)
static const int patchOffsetGetByIdSlowCaseCall = 33;
-#elif USE(JIT_STUB_ARGUMENT_VA_LIST)
- static const int patchOffsetGetByIdSlowCaseCall = 21;
#else
static const int patchOffsetGetByIdSlowCaseCall = 23;
#endif
@@ -686,19 +666,61 @@ namespace JSC {
// sequencePutById
static const int sequencePutByIdInstructionSpace = 28;
static const int sequencePutByIdConstantSpace = 3;
+#elif CPU(MIPS)
+#if WTF_MIPS_ISA(1)
+ static const int patchOffsetPutByIdStructure = 16;
+ static const int patchOffsetPutByIdExternalLoad = 48;
+ static const int patchLengthPutByIdExternalLoad = 20;
+ static const int patchOffsetPutByIdPropertyMapOffset = 68;
+ static const int patchOffsetGetByIdStructure = 16;
+ static const int patchOffsetGetByIdBranchToSlowCase = 48;
+ static const int patchOffsetGetByIdExternalLoad = 48;
+ static const int patchLengthGetByIdExternalLoad = 20;
+ static const int patchOffsetGetByIdPropertyMapOffset = 68;
+ static const int patchOffsetGetByIdPutResult = 88;
+#if ENABLE(OPCODE_SAMPLING)
+ #error "OPCODE_SAMPLING is not yet supported"
+#else
+ static const int patchOffsetGetByIdSlowCaseCall = 40;
+#endif
+ static const int patchOffsetOpCallCompareToJump = 32;
+ static const int patchOffsetMethodCheckProtoObj = 32;
+ static const int patchOffsetMethodCheckProtoStruct = 56;
+ static const int patchOffsetMethodCheckPutFunction = 88;
+#else // WTF_MIPS_ISA(1)
+ static const int patchOffsetPutByIdStructure = 12;
+ static const int patchOffsetPutByIdExternalLoad = 44;
+ static const int patchLengthPutByIdExternalLoad = 16;
+ static const int patchOffsetPutByIdPropertyMapOffset = 60;
+ static const int patchOffsetGetByIdStructure = 12;
+ static const int patchOffsetGetByIdBranchToSlowCase = 44;
+ static const int patchOffsetGetByIdExternalLoad = 44;
+ static const int patchLengthGetByIdExternalLoad = 16;
+ static const int patchOffsetGetByIdPropertyMapOffset = 60;
+ static const int patchOffsetGetByIdPutResult = 76;
+#if ENABLE(OPCODE_SAMPLING)
+ #error "OPCODE_SAMPLING is not yet supported"
+#else
+ static const int patchOffsetGetByIdSlowCaseCall = 40;
+#endif
+ static const int patchOffsetOpCallCompareToJump = 32;
+ static const int patchOffsetMethodCheckProtoObj = 32;
+ static const int patchOffsetMethodCheckProtoStruct = 52;
+ static const int patchOffsetMethodCheckPutFunction = 84;
+#endif
#endif
#endif // USE(JSVALUE32_64)
-#if defined(ASSEMBLER_HAS_CONSTANT_POOL) && ASSEMBLER_HAS_CONSTANT_POOL
-#define BEGIN_UNINTERRUPTED_SEQUENCE(name) beginUninterruptedSequence(name ## InstructionSpace, name ## ConstantSpace)
-#define END_UNINTERRUPTED_SEQUENCE(name) endUninterruptedSequence(name ## InstructionSpace, name ## ConstantSpace)
+#if (defined(ASSEMBLER_HAS_CONSTANT_POOL) && ASSEMBLER_HAS_CONSTANT_POOL)
+#define BEGIN_UNINTERRUPTED_SEQUENCE(name) do { beginUninterruptedSequence(name ## InstructionSpace, name ## ConstantSpace); } while (false)
+#define END_UNINTERRUPTED_SEQUENCE(name) do { endUninterruptedSequence(name ## InstructionSpace, name ## ConstantSpace); } while (false)
void beginUninterruptedSequence(int, int);
void endUninterruptedSequence(int, int);
#else
-#define BEGIN_UNINTERRUPTED_SEQUENCE(name)
-#define END_UNINTERRUPTED_SEQUENCE(name)
+#define BEGIN_UNINTERRUPTED_SEQUENCE(name) do { beginUninterruptedSequence(); } while (false)
+#define END_UNINTERRUPTED_SEQUENCE(name) do { endUninterruptedSequence(); } while (false)
#endif
void emit_op_add(Instruction*);
@@ -709,25 +731,31 @@ namespace JSC {
void emit_op_call(Instruction*);
void emit_op_call_eval(Instruction*);
void emit_op_call_varargs(Instruction*);
+ void emit_op_call_put_result(Instruction*);
void emit_op_catch(Instruction*);
void emit_op_construct(Instruction*);
- void emit_op_construct_verify(Instruction*);
+ void emit_op_get_callee(Instruction*);
+ void emit_op_create_this(Instruction*);
void emit_op_convert_this(Instruction*);
+ void emit_op_convert_this_strict(Instruction*);
void emit_op_create_arguments(Instruction*);
void emit_op_debug(Instruction*);
void emit_op_del_by_id(Instruction*);
void emit_op_div(Instruction*);
void emit_op_end(Instruction*);
void emit_op_enter(Instruction*);
- void emit_op_enter_with_activation(Instruction*);
+ void emit_op_create_activation(Instruction*);
void emit_op_eq(Instruction*);
void emit_op_eq_null(Instruction*);
void emit_op_get_by_id(Instruction*);
+ void emit_op_get_arguments_length(Instruction*);
void emit_op_get_by_val(Instruction*);
+ void emit_op_get_argument_by_val(Instruction*);
void emit_op_get_by_pname(Instruction*);
void emit_op_get_global_var(Instruction*);
void emit_op_get_scoped_var(Instruction*);
- void emit_op_init_arguments(Instruction*);
+ void emit_op_init_lazy_reg(Instruction*);
+ void emit_op_check_has_instance(Instruction*);
void emit_op_instanceof(Instruction*);
void emit_op_jeq_null(Instruction*);
void emit_op_jfalse(Instruction*);
@@ -737,6 +765,7 @@ namespace JSC {
void emit_op_jneq_ptr(Instruction*);
void emit_op_jnless(Instruction*);
void emit_op_jless(Instruction*);
+ void emit_op_jlesseq(Instruction*, bool invert = false);
void emit_op_jnlesseq(Instruction*);
void emit_op_jsr(Instruction*);
void emit_op_jtrue(Instruction*);
@@ -755,7 +784,6 @@ namespace JSC {
void emit_op_neq(Instruction*);
void emit_op_neq_null(Instruction*);
void emit_op_new_array(Instruction*);
- void emit_op_new_error(Instruction*);
void emit_op_new_func(Instruction*);
void emit_op_new_func_exp(Instruction*);
void emit_op_new_object(Instruction*);
@@ -782,10 +810,13 @@ namespace JSC {
void emit_op_put_setter(Instruction*);
void emit_op_resolve(Instruction*);
void emit_op_resolve_base(Instruction*);
- void emit_op_resolve_global(Instruction*);
+ void emit_op_ensure_property_exists(Instruction*);
+ void emit_op_resolve_global(Instruction*, bool dynamic = false);
+ void emit_op_resolve_global_dynamic(Instruction*);
void emit_op_resolve_skip(Instruction*);
void emit_op_resolve_with_base(Instruction*);
void emit_op_ret(Instruction*);
+ void emit_op_ret_object_or_this(Instruction*);
void emit_op_rshift(Instruction*);
void emit_op_sret(Instruction*);
void emit_op_strcat(Instruction*);
@@ -797,10 +828,13 @@ namespace JSC {
void emit_op_tear_off_activation(Instruction*);
void emit_op_tear_off_arguments(Instruction*);
void emit_op_throw(Instruction*);
+ void emit_op_throw_reference_error(Instruction*);
+ void emit_op_throw_syntax_error(Instruction*);
void emit_op_to_jsnumber(Instruction*);
void emit_op_to_primitive(Instruction*);
void emit_op_unexpected_load(Instruction*);
-#if ENABLE(JIT_OPTIMIZE_MOD)
+ void emit_op_urshift(Instruction*);
+#if ENABLE(JIT_USE_SOFT_MODULO)
void softModulo();
#endif
@@ -813,19 +847,24 @@ namespace JSC {
void emitSlow_op_call_eval(Instruction*, Vector<SlowCaseEntry>::iterator&);
void emitSlow_op_call_varargs(Instruction*, Vector<SlowCaseEntry>::iterator&);
void emitSlow_op_construct(Instruction*, Vector<SlowCaseEntry>::iterator&);
- void emitSlow_op_construct_verify(Instruction*, Vector<SlowCaseEntry>::iterator&);
void emitSlow_op_convert_this(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_convert_this_strict(Instruction*, Vector<SlowCaseEntry>::iterator&);
void emitSlow_op_div(Instruction*, Vector<SlowCaseEntry>::iterator&);
void emitSlow_op_eq(Instruction*, Vector<SlowCaseEntry>::iterator&);
void emitSlow_op_get_by_id(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_get_arguments_length(Instruction*, Vector<SlowCaseEntry>::iterator&);
void emitSlow_op_get_by_val(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_get_argument_by_val(Instruction*, Vector<SlowCaseEntry>::iterator&);
void emitSlow_op_get_by_pname(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_check_has_instance(Instruction*, Vector<SlowCaseEntry>::iterator&);
void emitSlow_op_instanceof(Instruction*, Vector<SlowCaseEntry>::iterator&);
void emitSlow_op_jfalse(Instruction*, Vector<SlowCaseEntry>::iterator&);
void emitSlow_op_jnless(Instruction*, Vector<SlowCaseEntry>::iterator&);
void emitSlow_op_jless(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_jlesseq(Instruction*, Vector<SlowCaseEntry>::iterator&, bool invert = false);
void emitSlow_op_jnlesseq(Instruction*, Vector<SlowCaseEntry>::iterator&);
void emitSlow_op_jtrue(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_load_varargs(Instruction*, Vector<SlowCaseEntry>::iterator&);
void emitSlow_op_loop_if_less(Instruction*, Vector<SlowCaseEntry>::iterator&);
void emitSlow_op_loop_if_lesseq(Instruction*, Vector<SlowCaseEntry>::iterator&);
void emitSlow_op_loop_if_true(Instruction*, Vector<SlowCaseEntry>::iterator&);
@@ -845,22 +884,19 @@ namespace JSC {
void emitSlow_op_put_by_id(Instruction*, Vector<SlowCaseEntry>::iterator&);
void emitSlow_op_put_by_val(Instruction*, Vector<SlowCaseEntry>::iterator&);
void emitSlow_op_resolve_global(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_resolve_global_dynamic(Instruction*, Vector<SlowCaseEntry>::iterator&);
void emitSlow_op_rshift(Instruction*, Vector<SlowCaseEntry>::iterator&);
void emitSlow_op_stricteq(Instruction*, Vector<SlowCaseEntry>::iterator&);
void emitSlow_op_sub(Instruction*, Vector<SlowCaseEntry>::iterator&);
void emitSlow_op_to_jsnumber(Instruction*, Vector<SlowCaseEntry>::iterator&);
void emitSlow_op_to_primitive(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_urshift(Instruction*, Vector<SlowCaseEntry>::iterator&);
- /* These functions are deprecated: Please use JITStubCall instead. */
- void emitPutJITStubArg(RegisterID src, unsigned argumentNumber);
-#if USE(JSVALUE32_64)
- void emitPutJITStubArg(RegisterID tag, RegisterID payload, unsigned argumentNumber);
- void emitPutJITStubArgFromVirtualRegister(unsigned src, unsigned argumentNumber, RegisterID scratch1, RegisterID scratch2);
-#else
- void emitPutJITStubArgFromVirtualRegister(unsigned src, unsigned argumentNumber, RegisterID scratch);
-#endif
- void emitPutJITStubArgConstant(unsigned value, unsigned argumentNumber);
- void emitPutJITStubArgConstant(void* value, unsigned argumentNumber);
+
+ void emitRightShift(Instruction*, bool isUnsigned);
+ void emitRightShiftSlowCase(Instruction*, Vector<SlowCaseEntry>::iterator&, bool isUnsigned);
+
+ /* This function is deprecated. */
void emitGetJITStubArg(unsigned argumentNumber, RegisterID dst);
void emitInitRegister(unsigned dst);
@@ -872,6 +908,7 @@ namespace JSC {
JSValue getConstantOperand(unsigned src);
bool isOperandConstantImmediateInt(unsigned src);
+ bool isOperandConstantImmediateChar(unsigned src);
Jump getSlowCase(Vector<SlowCaseEntry>::iterator& iter)
{
@@ -895,6 +932,9 @@ namespace JSC {
void restoreReturnAddressBeforeReturn(RegisterID);
void restoreReturnAddressBeforeReturn(Address);
+ // Loads the character value of a single character string into dst.
+ void emitLoadCharacterString(RegisterID src, RegisterID dst, JumpList& failures);
+
void emitTimeoutCheck();
#ifndef NDEBUG
void printBytecodeOperandTypes(unsigned src1, unsigned src2);
@@ -930,7 +970,7 @@ namespace JSC {
Vector<MethodCallCompilationInfo> m_methodCallCompilationInfo;
Vector<JumpTable> m_jmpTable;
- unsigned m_bytecodeIndex;
+ unsigned m_bytecodeOffset;
Vector<JSRInfo> m_jsrSites;
Vector<SlowCaseEntry> m_slowCases;
Vector<SwitchRecord> m_switches;
@@ -941,7 +981,7 @@ namespace JSC {
#if USE(JSVALUE32_64)
unsigned m_jumpTargetIndex;
- unsigned m_mappedBytecodeIndex;
+ unsigned m_mappedBytecodeOffset;
unsigned m_mappedVirtualRegisterIndex;
RegisterID m_mappedTag;
RegisterID m_mappedPayload;
@@ -956,6 +996,8 @@ namespace JSC {
int m_uninterruptedConstantSequenceBegin;
#endif
#endif
+ void* m_linkerOffset;
+ static CodePtr stringGetByValStubGenerator(JSGlobalData* globalData, ExecutablePool* pool);
} JIT_CLASS_ALIGNMENT;
inline void JIT::emit_op_loop(Instruction* currentInstruction)
diff --git a/JavaScriptCore/jit/JITArithmetic.cpp b/JavaScriptCore/jit/JITArithmetic.cpp
index 2f2ffe3..cd05f51 100644
--- a/JavaScriptCore/jit/JITArithmetic.cpp
+++ b/JavaScriptCore/jit/JITArithmetic.cpp
@@ -20,13 +20,14 @@
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "config.h"
-#include "JIT.h"
#if ENABLE(JIT)
+#if USE(JSVALUE64)
+#include "JIT.h"
#include "CodeBlock.h"
#include "JITInlineMethods.h"
@@ -46,1189 +47,6 @@ using namespace std;
namespace JSC {
-#if USE(JSVALUE32_64)
-
-void JIT::emit_op_negate(Instruction* currentInstruction)
-{
- unsigned dst = currentInstruction[1].u.operand;
- unsigned src = currentInstruction[2].u.operand;
-
- emitLoad(src, regT1, regT0);
-
- Jump srcNotInt = branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag));
- addSlowCase(branch32(Equal, regT0, Imm32(0)));
-
- neg32(regT0);
- emitStoreInt32(dst, regT0, (dst == src));
-
- Jump end = jump();
-
- srcNotInt.link(this);
- addSlowCase(branch32(Above, regT1, Imm32(JSValue::LowestTag)));
-
- xor32(Imm32(1 << 31), regT1);
- store32(regT1, tagFor(dst));
- if (dst != src)
- store32(regT0, payloadFor(dst));
-
- end.link(this);
-}
-
-void JIT::emitSlow_op_negate(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- unsigned dst = currentInstruction[1].u.operand;
-
- linkSlowCase(iter); // 0 check
- linkSlowCase(iter); // double check
-
- JITStubCall stubCall(this, cti_op_negate);
- stubCall.addArgument(regT1, regT0);
- stubCall.call(dst);
-}
-
-void JIT::emit_op_jnless(Instruction* currentInstruction)
-{
- unsigned op1 = currentInstruction[1].u.operand;
- unsigned op2 = currentInstruction[2].u.operand;
- unsigned target = currentInstruction[3].u.operand;
-
- JumpList notInt32Op1;
- JumpList notInt32Op2;
-
- // Int32 less.
- if (isOperandConstantImmediateInt(op1)) {
- emitLoad(op2, regT3, regT2);
- notInt32Op2.append(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag)));
- addJump(branch32(LessThanOrEqual, regT2, Imm32(getConstantOperand(op1).asInt32())), target);
- } else if (isOperandConstantImmediateInt(op2)) {
- emitLoad(op1, regT1, regT0);
- notInt32Op1.append(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
- addJump(branch32(GreaterThanOrEqual, regT0, Imm32(getConstantOperand(op2).asInt32())), target);
- } else {
- emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
- notInt32Op1.append(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
- notInt32Op2.append(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag)));
- addJump(branch32(GreaterThanOrEqual, regT0, regT2), target);
- }
-
- if (!supportsFloatingPoint()) {
- addSlowCase(notInt32Op1);
- addSlowCase(notInt32Op2);
- return;
- }
- Jump end = jump();
-
- // Double less.
- emitBinaryDoubleOp(op_jnless, target, op1, op2, OperandTypes(), notInt32Op1, notInt32Op2, !isOperandConstantImmediateInt(op1), isOperandConstantImmediateInt(op1) || !isOperandConstantImmediateInt(op2));
- end.link(this);
-}
-
-void JIT::emitSlow_op_jnless(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- unsigned op1 = currentInstruction[1].u.operand;
- unsigned op2 = currentInstruction[2].u.operand;
- unsigned target = currentInstruction[3].u.operand;
-
- if (!supportsFloatingPoint()) {
- if (!isOperandConstantImmediateInt(op1) && !isOperandConstantImmediateInt(op2))
- linkSlowCase(iter); // int32 check
- linkSlowCase(iter); // int32 check
- } else {
- if (!isOperandConstantImmediateInt(op1)) {
- linkSlowCase(iter); // double check
- linkSlowCase(iter); // int32 check
- }
- if (isOperandConstantImmediateInt(op1) || !isOperandConstantImmediateInt(op2))
- linkSlowCase(iter); // double check
- }
-
- JITStubCall stubCall(this, cti_op_jless);
- stubCall.addArgument(op1);
- stubCall.addArgument(op2);
- stubCall.call();
- emitJumpSlowToHot(branchTest32(Zero, regT0), target);
-}
-
-void JIT::emit_op_jless(Instruction* currentInstruction)
-{
- unsigned op1 = currentInstruction[1].u.operand;
- unsigned op2 = currentInstruction[2].u.operand;
- unsigned target = currentInstruction[3].u.operand;
-
- JumpList notInt32Op1;
- JumpList notInt32Op2;
-
- // Int32 less.
- if (isOperandConstantImmediateInt(op1)) {
- emitLoad(op2, regT3, regT2);
- notInt32Op2.append(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag)));
- addJump(branch32(GreaterThan, regT2, Imm32(getConstantOperand(op1).asInt32())), target);
- } else if (isOperandConstantImmediateInt(op2)) {
- emitLoad(op1, regT1, regT0);
- notInt32Op1.append(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
- addJump(branch32(LessThan, regT0, Imm32(getConstantOperand(op2).asInt32())), target);
- } else {
- emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
- notInt32Op1.append(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
- notInt32Op2.append(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag)));
- addJump(branch32(LessThan, regT0, regT2), target);
- }
-
- if (!supportsFloatingPoint()) {
- addSlowCase(notInt32Op1);
- addSlowCase(notInt32Op2);
- return;
- }
- Jump end = jump();
-
- // Double less.
- emitBinaryDoubleOp(op_jless, target, op1, op2, OperandTypes(), notInt32Op1, notInt32Op2, !isOperandConstantImmediateInt(op1), isOperandConstantImmediateInt(op1) || !isOperandConstantImmediateInt(op2));
- end.link(this);
-}
-
-void JIT::emitSlow_op_jless(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- unsigned op1 = currentInstruction[1].u.operand;
- unsigned op2 = currentInstruction[2].u.operand;
- unsigned target = currentInstruction[3].u.operand;
-
- if (!supportsFloatingPoint()) {
- if (!isOperandConstantImmediateInt(op1) && !isOperandConstantImmediateInt(op2))
- linkSlowCase(iter); // int32 check
- linkSlowCase(iter); // int32 check
- } else {
- if (!isOperandConstantImmediateInt(op1)) {
- linkSlowCase(iter); // double check
- linkSlowCase(iter); // int32 check
- }
- if (isOperandConstantImmediateInt(op1) || !isOperandConstantImmediateInt(op2))
- linkSlowCase(iter); // double check
- }
-
- JITStubCall stubCall(this, cti_op_jless);
- stubCall.addArgument(op1);
- stubCall.addArgument(op2);
- stubCall.call();
- emitJumpSlowToHot(branchTest32(NonZero, regT0), target);
-}
-
-void JIT::emit_op_jnlesseq(Instruction* currentInstruction)
-{
- unsigned op1 = currentInstruction[1].u.operand;
- unsigned op2 = currentInstruction[2].u.operand;
- unsigned target = currentInstruction[3].u.operand;
-
- JumpList notInt32Op1;
- JumpList notInt32Op2;
-
- // Int32 less.
- if (isOperandConstantImmediateInt(op1)) {
- emitLoad(op2, regT3, regT2);
- notInt32Op2.append(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag)));
- addJump(branch32(LessThan, regT2, Imm32(getConstantOperand(op1).asInt32())), target);
- } else if (isOperandConstantImmediateInt(op2)) {
- emitLoad(op1, regT1, regT0);
- notInt32Op1.append(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
- addJump(branch32(GreaterThan, regT0, Imm32(getConstantOperand(op2).asInt32())), target);
- } else {
- emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
- notInt32Op1.append(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
- notInt32Op2.append(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag)));
- addJump(branch32(GreaterThan, regT0, regT2), target);
- }
-
- if (!supportsFloatingPoint()) {
- addSlowCase(notInt32Op1);
- addSlowCase(notInt32Op2);
- return;
- }
- Jump end = jump();
-
- // Double less.
- emitBinaryDoubleOp(op_jnlesseq, target, op1, op2, OperandTypes(), notInt32Op1, notInt32Op2, !isOperandConstantImmediateInt(op1), isOperandConstantImmediateInt(op1) || !isOperandConstantImmediateInt(op2));
- end.link(this);
-}
-
-void JIT::emitSlow_op_jnlesseq(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- unsigned op1 = currentInstruction[1].u.operand;
- unsigned op2 = currentInstruction[2].u.operand;
- unsigned target = currentInstruction[3].u.operand;
-
- if (!supportsFloatingPoint()) {
- if (!isOperandConstantImmediateInt(op1) && !isOperandConstantImmediateInt(op2))
- linkSlowCase(iter); // int32 check
- linkSlowCase(iter); // int32 check
- } else {
- if (!isOperandConstantImmediateInt(op1)) {
- linkSlowCase(iter); // double check
- linkSlowCase(iter); // int32 check
- }
- if (isOperandConstantImmediateInt(op1) || !isOperandConstantImmediateInt(op2))
- linkSlowCase(iter); // double check
- }
-
- JITStubCall stubCall(this, cti_op_jlesseq);
- stubCall.addArgument(op1);
- stubCall.addArgument(op2);
- stubCall.call();
- emitJumpSlowToHot(branchTest32(Zero, regT0), target);
-}
-
-// LeftShift (<<)
-
-void JIT::emit_op_lshift(Instruction* currentInstruction)
-{
- unsigned dst = currentInstruction[1].u.operand;
- unsigned op1 = currentInstruction[2].u.operand;
- unsigned op2 = currentInstruction[3].u.operand;
-
- if (isOperandConstantImmediateInt(op2)) {
- emitLoad(op1, regT1, regT0);
- addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
- lshift32(Imm32(getConstantOperand(op2).asInt32()), regT0);
- emitStoreInt32(dst, regT0, dst == op1);
- return;
- }
-
- emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
- if (!isOperandConstantImmediateInt(op1))
- addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
- addSlowCase(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag)));
- lshift32(regT2, regT0);
- emitStoreInt32(dst, regT0, dst == op1 || dst == op2);
-}
-
-void JIT::emitSlow_op_lshift(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- unsigned dst = currentInstruction[1].u.operand;
- unsigned op1 = currentInstruction[2].u.operand;
- unsigned op2 = currentInstruction[3].u.operand;
-
- if (!isOperandConstantImmediateInt(op1) && !isOperandConstantImmediateInt(op2))
- linkSlowCase(iter); // int32 check
- linkSlowCase(iter); // int32 check
-
- JITStubCall stubCall(this, cti_op_lshift);
- stubCall.addArgument(op1);
- stubCall.addArgument(op2);
- stubCall.call(dst);
-}
-
-// RightShift (>>)
-
-void JIT::emit_op_rshift(Instruction* currentInstruction)
-{
- unsigned dst = currentInstruction[1].u.operand;
- unsigned op1 = currentInstruction[2].u.operand;
- unsigned op2 = currentInstruction[3].u.operand;
-
- if (isOperandConstantImmediateInt(op2)) {
- emitLoad(op1, regT1, regT0);
- addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
- rshift32(Imm32(getConstantOperand(op2).asInt32()), regT0);
- emitStoreInt32(dst, regT0, dst == op1);
- return;
- }
-
- emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
- if (!isOperandConstantImmediateInt(op1))
- addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
- addSlowCase(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag)));
- rshift32(regT2, regT0);
- emitStoreInt32(dst, regT0, dst == op1 || dst == op2);
-}
-
-void JIT::emitSlow_op_rshift(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- unsigned dst = currentInstruction[1].u.operand;
- unsigned op1 = currentInstruction[2].u.operand;
- unsigned op2 = currentInstruction[3].u.operand;
-
- if (!isOperandConstantImmediateInt(op1) && !isOperandConstantImmediateInt(op2))
- linkSlowCase(iter); // int32 check
- linkSlowCase(iter); // int32 check
-
- JITStubCall stubCall(this, cti_op_rshift);
- stubCall.addArgument(op1);
- stubCall.addArgument(op2);
- stubCall.call(dst);
-}
-
-// BitAnd (&)
-
-void JIT::emit_op_bitand(Instruction* currentInstruction)
-{
- unsigned dst = currentInstruction[1].u.operand;
- unsigned op1 = currentInstruction[2].u.operand;
- unsigned op2 = currentInstruction[3].u.operand;
-
- unsigned op;
- int32_t constant;
- if (getOperandConstantImmediateInt(op1, op2, op, constant)) {
- emitLoad(op, regT1, regT0);
- addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
- and32(Imm32(constant), regT0);
- emitStoreInt32(dst, regT0, (op == dst));
- return;
- }
-
- emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
- addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
- addSlowCase(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag)));
- and32(regT2, regT0);
- emitStoreInt32(dst, regT0, (op1 == dst || op2 == dst));
-}
-
-void JIT::emitSlow_op_bitand(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- unsigned dst = currentInstruction[1].u.operand;
- unsigned op1 = currentInstruction[2].u.operand;
- unsigned op2 = currentInstruction[3].u.operand;
-
- if (!isOperandConstantImmediateInt(op1) && !isOperandConstantImmediateInt(op2))
- linkSlowCase(iter); // int32 check
- linkSlowCase(iter); // int32 check
-
- JITStubCall stubCall(this, cti_op_bitand);
- stubCall.addArgument(op1);
- stubCall.addArgument(op2);
- stubCall.call(dst);
-}
-
-// BitOr (|)
-
-void JIT::emit_op_bitor(Instruction* currentInstruction)
-{
- unsigned dst = currentInstruction[1].u.operand;
- unsigned op1 = currentInstruction[2].u.operand;
- unsigned op2 = currentInstruction[3].u.operand;
-
- unsigned op;
- int32_t constant;
- if (getOperandConstantImmediateInt(op1, op2, op, constant)) {
- emitLoad(op, regT1, regT0);
- addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
- or32(Imm32(constant), regT0);
- emitStoreInt32(dst, regT0, (op == dst));
- return;
- }
-
- emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
- addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
- addSlowCase(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag)));
- or32(regT2, regT0);
- emitStoreInt32(dst, regT0, (op1 == dst || op2 == dst));
-}
-
-void JIT::emitSlow_op_bitor(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- unsigned dst = currentInstruction[1].u.operand;
- unsigned op1 = currentInstruction[2].u.operand;
- unsigned op2 = currentInstruction[3].u.operand;
-
- if (!isOperandConstantImmediateInt(op1) && !isOperandConstantImmediateInt(op2))
- linkSlowCase(iter); // int32 check
- linkSlowCase(iter); // int32 check
-
- JITStubCall stubCall(this, cti_op_bitor);
- stubCall.addArgument(op1);
- stubCall.addArgument(op2);
- stubCall.call(dst);
-}
-
-// BitXor (^)
-
-void JIT::emit_op_bitxor(Instruction* currentInstruction)
-{
- unsigned dst = currentInstruction[1].u.operand;
- unsigned op1 = currentInstruction[2].u.operand;
- unsigned op2 = currentInstruction[3].u.operand;
-
- unsigned op;
- int32_t constant;
- if (getOperandConstantImmediateInt(op1, op2, op, constant)) {
- emitLoad(op, regT1, regT0);
- addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
- xor32(Imm32(constant), regT0);
- emitStoreInt32(dst, regT0, (op == dst));
- return;
- }
-
- emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
- addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
- addSlowCase(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag)));
- xor32(regT2, regT0);
- emitStoreInt32(dst, regT0, (op1 == dst || op2 == dst));
-}
-
-void JIT::emitSlow_op_bitxor(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- unsigned dst = currentInstruction[1].u.operand;
- unsigned op1 = currentInstruction[2].u.operand;
- unsigned op2 = currentInstruction[3].u.operand;
-
- if (!isOperandConstantImmediateInt(op1) && !isOperandConstantImmediateInt(op2))
- linkSlowCase(iter); // int32 check
- linkSlowCase(iter); // int32 check
-
- JITStubCall stubCall(this, cti_op_bitxor);
- stubCall.addArgument(op1);
- stubCall.addArgument(op2);
- stubCall.call(dst);
-}
-
-// BitNot (~)
-
-void JIT::emit_op_bitnot(Instruction* currentInstruction)
-{
- unsigned dst = currentInstruction[1].u.operand;
- unsigned src = currentInstruction[2].u.operand;
-
- emitLoad(src, regT1, regT0);
- addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
-
- not32(regT0);
- emitStoreInt32(dst, regT0, (dst == src));
-}
-
-void JIT::emitSlow_op_bitnot(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- unsigned dst = currentInstruction[1].u.operand;
-
- linkSlowCase(iter); // int32 check
-
- JITStubCall stubCall(this, cti_op_bitnot);
- stubCall.addArgument(regT1, regT0);
- stubCall.call(dst);
-}
-
-// PostInc (i++)
-
-void JIT::emit_op_post_inc(Instruction* currentInstruction)
-{
- unsigned dst = currentInstruction[1].u.operand;
- unsigned srcDst = currentInstruction[2].u.operand;
-
- emitLoad(srcDst, regT1, regT0);
- addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
-
- if (dst == srcDst) // x = x++ is a noop for ints.
- return;
-
- emitStoreInt32(dst, regT0);
-
- addSlowCase(branchAdd32(Overflow, Imm32(1), regT0));
- emitStoreInt32(srcDst, regT0, true);
-}
-
-void JIT::emitSlow_op_post_inc(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- unsigned dst = currentInstruction[1].u.operand;
- unsigned srcDst = currentInstruction[2].u.operand;
-
- linkSlowCase(iter); // int32 check
- if (dst != srcDst)
- linkSlowCase(iter); // overflow check
-
- JITStubCall stubCall(this, cti_op_post_inc);
- stubCall.addArgument(srcDst);
- stubCall.addArgument(Imm32(srcDst));
- stubCall.call(dst);
-}
-
-// PostDec (i--)
-
-void JIT::emit_op_post_dec(Instruction* currentInstruction)
-{
- unsigned dst = currentInstruction[1].u.operand;
- unsigned srcDst = currentInstruction[2].u.operand;
-
- emitLoad(srcDst, regT1, regT0);
- addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
-
- if (dst == srcDst) // x = x-- is a noop for ints.
- return;
-
- emitStoreInt32(dst, regT0);
-
- addSlowCase(branchSub32(Overflow, Imm32(1), regT0));
- emitStoreInt32(srcDst, regT0, true);
-}
-
-void JIT::emitSlow_op_post_dec(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- unsigned dst = currentInstruction[1].u.operand;
- unsigned srcDst = currentInstruction[2].u.operand;
-
- linkSlowCase(iter); // int32 check
- if (dst != srcDst)
- linkSlowCase(iter); // overflow check
-
- JITStubCall stubCall(this, cti_op_post_dec);
- stubCall.addArgument(srcDst);
- stubCall.addArgument(Imm32(srcDst));
- stubCall.call(dst);
-}
-
-// PreInc (++i)
-
-void JIT::emit_op_pre_inc(Instruction* currentInstruction)
-{
- unsigned srcDst = currentInstruction[1].u.operand;
-
- emitLoad(srcDst, regT1, regT0);
-
- addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
- addSlowCase(branchAdd32(Overflow, Imm32(1), regT0));
- emitStoreInt32(srcDst, regT0, true);
-}
-
-void JIT::emitSlow_op_pre_inc(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- unsigned srcDst = currentInstruction[1].u.operand;
-
- linkSlowCase(iter); // int32 check
- linkSlowCase(iter); // overflow check
-
- JITStubCall stubCall(this, cti_op_pre_inc);
- stubCall.addArgument(srcDst);
- stubCall.call(srcDst);
-}
-
-// PreDec (--i)
-
-void JIT::emit_op_pre_dec(Instruction* currentInstruction)
-{
- unsigned srcDst = currentInstruction[1].u.operand;
-
- emitLoad(srcDst, regT1, regT0);
-
- addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
- addSlowCase(branchSub32(Overflow, Imm32(1), regT0));
- emitStoreInt32(srcDst, regT0, true);
-}
-
-void JIT::emitSlow_op_pre_dec(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- unsigned srcDst = currentInstruction[1].u.operand;
-
- linkSlowCase(iter); // int32 check
- linkSlowCase(iter); // overflow check
-
- JITStubCall stubCall(this, cti_op_pre_dec);
- stubCall.addArgument(srcDst);
- stubCall.call(srcDst);
-}
-
-// Addition (+)
-
-void JIT::emit_op_add(Instruction* currentInstruction)
-{
- unsigned dst = currentInstruction[1].u.operand;
- unsigned op1 = currentInstruction[2].u.operand;
- unsigned op2 = currentInstruction[3].u.operand;
- OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
-
- if (!types.first().mightBeNumber() || !types.second().mightBeNumber()) {
- JITStubCall stubCall(this, cti_op_add);
- stubCall.addArgument(op1);
- stubCall.addArgument(op2);
- stubCall.call(dst);
- return;
- }
-
- JumpList notInt32Op1;
- JumpList notInt32Op2;
-
- unsigned op;
- int32_t constant;
- if (getOperandConstantImmediateInt(op1, op2, op, constant)) {
- emitAdd32Constant(dst, op, constant, op == op1 ? types.first() : types.second());
- return;
- }
-
- emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
- notInt32Op1.append(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
- notInt32Op2.append(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag)));
-
- // Int32 case.
- addSlowCase(branchAdd32(Overflow, regT2, regT0));
- emitStoreInt32(dst, regT0, (op1 == dst || op2 == dst));
-
- if (!supportsFloatingPoint()) {
- addSlowCase(notInt32Op1);
- addSlowCase(notInt32Op2);
- return;
- }
- Jump end = jump();
-
- // Double case.
- emitBinaryDoubleOp(op_add, dst, op1, op2, types, notInt32Op1, notInt32Op2);
- end.link(this);
-}
-
-void JIT::emitAdd32Constant(unsigned dst, unsigned op, int32_t constant, ResultType opType)
-{
- // Int32 case.
- emitLoad(op, regT1, regT0);
- Jump notInt32 = branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag));
- addSlowCase(branchAdd32(Overflow, Imm32(constant), regT0));
- emitStoreInt32(dst, regT0, (op == dst));
-
- // Double case.
- if (!supportsFloatingPoint()) {
- addSlowCase(notInt32);
- return;
- }
- Jump end = jump();
-
- notInt32.link(this);
- if (!opType.definitelyIsNumber())
- addSlowCase(branch32(Above, regT1, Imm32(JSValue::LowestTag)));
- move(Imm32(constant), regT2);
- convertInt32ToDouble(regT2, fpRegT0);
- emitLoadDouble(op, fpRegT1);
- addDouble(fpRegT1, fpRegT0);
- emitStoreDouble(dst, fpRegT0);
-
- end.link(this);
-}
-
-void JIT::emitSlow_op_add(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- unsigned dst = currentInstruction[1].u.operand;
- unsigned op1 = currentInstruction[2].u.operand;
- unsigned op2 = currentInstruction[3].u.operand;
- OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
-
- if (!types.first().mightBeNumber() || !types.second().mightBeNumber())
- return;
-
- unsigned op;
- int32_t constant;
- if (getOperandConstantImmediateInt(op1, op2, op, constant)) {
- linkSlowCase(iter); // overflow check
-
- if (!supportsFloatingPoint())
- linkSlowCase(iter); // non-sse case
- else {
- ResultType opType = op == op1 ? types.first() : types.second();
- if (!opType.definitelyIsNumber())
- linkSlowCase(iter); // double check
- }
- } else {
- linkSlowCase(iter); // overflow check
-
- if (!supportsFloatingPoint()) {
- linkSlowCase(iter); // int32 check
- linkSlowCase(iter); // int32 check
- } else {
- if (!types.first().definitelyIsNumber())
- linkSlowCase(iter); // double check
-
- if (!types.second().definitelyIsNumber()) {
- linkSlowCase(iter); // int32 check
- linkSlowCase(iter); // double check
- }
- }
- }
-
- JITStubCall stubCall(this, cti_op_add);
- stubCall.addArgument(op1);
- stubCall.addArgument(op2);
- stubCall.call(dst);
-}
-
-// Subtraction (-)
-
-void JIT::emit_op_sub(Instruction* currentInstruction)
-{
- unsigned dst = currentInstruction[1].u.operand;
- unsigned op1 = currentInstruction[2].u.operand;
- unsigned op2 = currentInstruction[3].u.operand;
- OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
-
- JumpList notInt32Op1;
- JumpList notInt32Op2;
-
- if (isOperandConstantImmediateInt(op2)) {
- emitSub32Constant(dst, op1, getConstantOperand(op2).asInt32(), types.first());
- return;
- }
-
- emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
- notInt32Op1.append(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
- notInt32Op2.append(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag)));
-
- // Int32 case.
- addSlowCase(branchSub32(Overflow, regT2, regT0));
- emitStoreInt32(dst, regT0, (op1 == dst || op2 == dst));
-
- if (!supportsFloatingPoint()) {
- addSlowCase(notInt32Op1);
- addSlowCase(notInt32Op2);
- return;
- }
- Jump end = jump();
-
- // Double case.
- emitBinaryDoubleOp(op_sub, dst, op1, op2, types, notInt32Op1, notInt32Op2);
- end.link(this);
-}
-
-void JIT::emitSub32Constant(unsigned dst, unsigned op, int32_t constant, ResultType opType)
-{
- // Int32 case.
- emitLoad(op, regT1, regT0);
- Jump notInt32 = branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag));
- addSlowCase(branchSub32(Overflow, Imm32(constant), regT0));
- emitStoreInt32(dst, regT0, (op == dst));
-
- // Double case.
- if (!supportsFloatingPoint()) {
- addSlowCase(notInt32);
- return;
- }
- Jump end = jump();
-
- notInt32.link(this);
- if (!opType.definitelyIsNumber())
- addSlowCase(branch32(Above, regT1, Imm32(JSValue::LowestTag)));
- move(Imm32(constant), regT2);
- convertInt32ToDouble(regT2, fpRegT0);
- emitLoadDouble(op, fpRegT1);
- subDouble(fpRegT0, fpRegT1);
- emitStoreDouble(dst, fpRegT1);
-
- end.link(this);
-}
-
-void JIT::emitSlow_op_sub(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- unsigned dst = currentInstruction[1].u.operand;
- unsigned op1 = currentInstruction[2].u.operand;
- unsigned op2 = currentInstruction[3].u.operand;
- OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
-
- if (isOperandConstantImmediateInt(op2)) {
- linkSlowCase(iter); // overflow check
-
- if (!supportsFloatingPoint() || !types.first().definitelyIsNumber())
- linkSlowCase(iter); // int32 or double check
- } else {
- linkSlowCase(iter); // overflow check
-
- if (!supportsFloatingPoint()) {
- linkSlowCase(iter); // int32 check
- linkSlowCase(iter); // int32 check
- } else {
- if (!types.first().definitelyIsNumber())
- linkSlowCase(iter); // double check
-
- if (!types.second().definitelyIsNumber()) {
- linkSlowCase(iter); // int32 check
- linkSlowCase(iter); // double check
- }
- }
- }
-
- JITStubCall stubCall(this, cti_op_sub);
- stubCall.addArgument(op1);
- stubCall.addArgument(op2);
- stubCall.call(dst);
-}
-
-void JIT::emitBinaryDoubleOp(OpcodeID opcodeID, unsigned dst, unsigned op1, unsigned op2, OperandTypes types, JumpList& notInt32Op1, JumpList& notInt32Op2, bool op1IsInRegisters, bool op2IsInRegisters)
-{
- JumpList end;
-
- if (!notInt32Op1.empty()) {
- // Double case 1: Op1 is not int32; Op2 is unknown.
- notInt32Op1.link(this);
-
- ASSERT(op1IsInRegisters);
-
- // Verify Op1 is double.
- if (!types.first().definitelyIsNumber())
- addSlowCase(branch32(Above, regT1, Imm32(JSValue::LowestTag)));
-
- if (!op2IsInRegisters)
- emitLoad(op2, regT3, regT2);
-
- Jump doubleOp2 = branch32(Below, regT3, Imm32(JSValue::LowestTag));
-
- if (!types.second().definitelyIsNumber())
- addSlowCase(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag)));
-
- convertInt32ToDouble(regT2, fpRegT0);
- Jump doTheMath = jump();
-
- // Load Op2 as double into double register.
- doubleOp2.link(this);
- emitLoadDouble(op2, fpRegT0);
-
- // Do the math.
- doTheMath.link(this);
- switch (opcodeID) {
- case op_mul:
- emitLoadDouble(op1, fpRegT2);
- mulDouble(fpRegT2, fpRegT0);
- emitStoreDouble(dst, fpRegT0);
- break;
- case op_add:
- emitLoadDouble(op1, fpRegT2);
- addDouble(fpRegT2, fpRegT0);
- emitStoreDouble(dst, fpRegT0);
- break;
- case op_sub:
- emitLoadDouble(op1, fpRegT1);
- subDouble(fpRegT0, fpRegT1);
- emitStoreDouble(dst, fpRegT1);
- break;
- case op_div:
- emitLoadDouble(op1, fpRegT1);
- divDouble(fpRegT0, fpRegT1);
- emitStoreDouble(dst, fpRegT1);
- break;
- case op_jnless:
- emitLoadDouble(op1, fpRegT2);
- addJump(branchDouble(DoubleLessThanOrEqualOrUnordered, fpRegT0, fpRegT2), dst);
- break;
- case op_jless:
- emitLoadDouble(op1, fpRegT2);
- addJump(branchDouble(DoubleLessThan, fpRegT2, fpRegT0), dst);
- break;
- case op_jnlesseq:
- emitLoadDouble(op1, fpRegT2);
- addJump(branchDouble(DoubleLessThanOrUnordered, fpRegT0, fpRegT2), dst);
- break;
- default:
- ASSERT_NOT_REACHED();
- }
-
- if (!notInt32Op2.empty())
- end.append(jump());
- }
-
- if (!notInt32Op2.empty()) {
- // Double case 2: Op1 is int32; Op2 is not int32.
- notInt32Op2.link(this);
-
- ASSERT(op2IsInRegisters);
-
- if (!op1IsInRegisters)
- emitLoadPayload(op1, regT0);
-
- convertInt32ToDouble(regT0, fpRegT0);
-
- // Verify op2 is double.
- if (!types.second().definitelyIsNumber())
- addSlowCase(branch32(Above, regT3, Imm32(JSValue::LowestTag)));
-
- // Do the math.
- switch (opcodeID) {
- case op_mul:
- emitLoadDouble(op2, fpRegT2);
- mulDouble(fpRegT2, fpRegT0);
- emitStoreDouble(dst, fpRegT0);
- break;
- case op_add:
- emitLoadDouble(op2, fpRegT2);
- addDouble(fpRegT2, fpRegT0);
- emitStoreDouble(dst, fpRegT0);
- break;
- case op_sub:
- emitLoadDouble(op2, fpRegT2);
- subDouble(fpRegT2, fpRegT0);
- emitStoreDouble(dst, fpRegT0);
- break;
- case op_div:
- emitLoadDouble(op2, fpRegT2);
- divDouble(fpRegT2, fpRegT0);
- emitStoreDouble(dst, fpRegT0);
- break;
- case op_jnless:
- emitLoadDouble(op2, fpRegT1);
- addJump(branchDouble(DoubleLessThanOrEqualOrUnordered, fpRegT1, fpRegT0), dst);
- break;
- case op_jless:
- emitLoadDouble(op2, fpRegT1);
- addJump(branchDouble(DoubleLessThan, fpRegT0, fpRegT1), dst);
- break;
- case op_jnlesseq:
- emitLoadDouble(op2, fpRegT1);
- addJump(branchDouble(DoubleLessThanOrUnordered, fpRegT1, fpRegT0), dst);
- break;
- default:
- ASSERT_NOT_REACHED();
- }
- }
-
- end.link(this);
-}
-
-// Multiplication (*)
-
-void JIT::emit_op_mul(Instruction* currentInstruction)
-{
- unsigned dst = currentInstruction[1].u.operand;
- unsigned op1 = currentInstruction[2].u.operand;
- unsigned op2 = currentInstruction[3].u.operand;
- OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
-
- JumpList notInt32Op1;
- JumpList notInt32Op2;
-
- emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
- notInt32Op1.append(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
- notInt32Op2.append(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag)));
-
- // Int32 case.
- move(regT0, regT3);
- addSlowCase(branchMul32(Overflow, regT2, regT0));
- addSlowCase(branchTest32(Zero, regT0));
- emitStoreInt32(dst, regT0, (op1 == dst || op2 == dst));
-
- if (!supportsFloatingPoint()) {
- addSlowCase(notInt32Op1);
- addSlowCase(notInt32Op2);
- return;
- }
- Jump end = jump();
-
- // Double case.
- emitBinaryDoubleOp(op_mul, dst, op1, op2, types, notInt32Op1, notInt32Op2);
- end.link(this);
-}
-
-void JIT::emitSlow_op_mul(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- unsigned dst = currentInstruction[1].u.operand;
- unsigned op1 = currentInstruction[2].u.operand;
- unsigned op2 = currentInstruction[3].u.operand;
- OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
-
- Jump overflow = getSlowCase(iter); // overflow check
- linkSlowCase(iter); // zero result check
-
- Jump negZero = branchOr32(Signed, regT2, regT3);
- emitStoreInt32(dst, Imm32(0), (op1 == dst || op2 == dst));
-
- emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_mul));
-
- negZero.link(this);
- overflow.link(this);
-
- if (!supportsFloatingPoint()) {
- linkSlowCase(iter); // int32 check
- linkSlowCase(iter); // int32 check
- }
-
- if (supportsFloatingPoint()) {
- if (!types.first().definitelyIsNumber())
- linkSlowCase(iter); // double check
-
- if (!types.second().definitelyIsNumber()) {
- linkSlowCase(iter); // int32 check
- linkSlowCase(iter); // double check
- }
- }
-
- Label jitStubCall(this);
- JITStubCall stubCall(this, cti_op_mul);
- stubCall.addArgument(op1);
- stubCall.addArgument(op2);
- stubCall.call(dst);
-}
-
-// Division (/)
-
-void JIT::emit_op_div(Instruction* currentInstruction)
-{
- unsigned dst = currentInstruction[1].u.operand;
- unsigned op1 = currentInstruction[2].u.operand;
- unsigned op2 = currentInstruction[3].u.operand;
- OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
-
- if (!supportsFloatingPoint()) {
- addSlowCase(jump());
- return;
- }
-
- // Int32 divide.
- JumpList notInt32Op1;
- JumpList notInt32Op2;
-
- JumpList end;
-
- emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
-
- notInt32Op1.append(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
- notInt32Op2.append(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag)));
-
- convertInt32ToDouble(regT0, fpRegT0);
- convertInt32ToDouble(regT2, fpRegT1);
- divDouble(fpRegT1, fpRegT0);
-
- JumpList doubleResult;
- branchConvertDoubleToInt32(fpRegT0, regT0, doubleResult, fpRegT1);
-
- // Int32 result.
- emitStoreInt32(dst, regT0, (op1 == dst || op2 == dst));
- end.append(jump());
-
- // Double result.
- doubleResult.link(this);
- emitStoreDouble(dst, fpRegT0);
- end.append(jump());
-
- // Double divide.
- emitBinaryDoubleOp(op_div, dst, op1, op2, types, notInt32Op1, notInt32Op2);
- end.link(this);
-}
-
-void JIT::emitSlow_op_div(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- unsigned dst = currentInstruction[1].u.operand;
- unsigned op1 = currentInstruction[2].u.operand;
- unsigned op2 = currentInstruction[3].u.operand;
- OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
-
- if (!supportsFloatingPoint())
- linkSlowCase(iter);
- else {
- if (!types.first().definitelyIsNumber())
- linkSlowCase(iter); // double check
-
- if (!types.second().definitelyIsNumber()) {
- linkSlowCase(iter); // int32 check
- linkSlowCase(iter); // double check
- }
- }
-
- JITStubCall stubCall(this, cti_op_div);
- stubCall.addArgument(op1);
- stubCall.addArgument(op2);
- stubCall.call(dst);
-}
-
-// Mod (%)
-
-/* ------------------------------ BEGIN: OP_MOD ------------------------------ */
-
-#if CPU(X86) || CPU(X86_64)
-
-void JIT::emit_op_mod(Instruction* currentInstruction)
-{
- unsigned dst = currentInstruction[1].u.operand;
- unsigned op1 = currentInstruction[2].u.operand;
- unsigned op2 = currentInstruction[3].u.operand;
-
- if (isOperandConstantImmediateInt(op2) && getConstantOperand(op2).asInt32() != 0) {
- emitLoad(op1, X86Registers::edx, X86Registers::eax);
- move(Imm32(getConstantOperand(op2).asInt32()), X86Registers::ecx);
- addSlowCase(branch32(NotEqual, X86Registers::edx, Imm32(JSValue::Int32Tag)));
- if (getConstantOperand(op2).asInt32() == -1)
- addSlowCase(branch32(Equal, X86Registers::eax, Imm32(0x80000000))); // -2147483648 / -1 => EXC_ARITHMETIC
- } else {
- emitLoad2(op1, X86Registers::edx, X86Registers::eax, op2, X86Registers::ebx, X86Registers::ecx);
- addSlowCase(branch32(NotEqual, X86Registers::edx, Imm32(JSValue::Int32Tag)));
- addSlowCase(branch32(NotEqual, X86Registers::ebx, Imm32(JSValue::Int32Tag)));
-
- addSlowCase(branch32(Equal, X86Registers::eax, Imm32(0x80000000))); // -2147483648 / -1 => EXC_ARITHMETIC
- addSlowCase(branch32(Equal, X86Registers::ecx, Imm32(0))); // divide by 0
- }
-
- move(X86Registers::eax, X86Registers::ebx); // Save dividend payload, in case of 0.
- m_assembler.cdq();
- m_assembler.idivl_r(X86Registers::ecx);
-
- // If the remainder is zero and the dividend is negative, the result is -0.
- Jump storeResult1 = branchTest32(NonZero, X86Registers::edx);
- Jump storeResult2 = branchTest32(Zero, X86Registers::ebx, Imm32(0x80000000)); // not negative
- emitStore(dst, jsNumber(m_globalData, -0.0));
- Jump end = jump();
-
- storeResult1.link(this);
- storeResult2.link(this);
- emitStoreInt32(dst, X86Registers::edx, (op1 == dst || op2 == dst));
- end.link(this);
-}
-
-void JIT::emitSlow_op_mod(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- unsigned dst = currentInstruction[1].u.operand;
- unsigned op1 = currentInstruction[2].u.operand;
- unsigned op2 = currentInstruction[3].u.operand;
-
- if (isOperandConstantImmediateInt(op2) && getConstantOperand(op2).asInt32() != 0) {
- linkSlowCase(iter); // int32 check
- if (getConstantOperand(op2).asInt32() == -1)
- linkSlowCase(iter); // 0x80000000 check
- } else {
- linkSlowCase(iter); // int32 check
- linkSlowCase(iter); // int32 check
- linkSlowCase(iter); // 0 check
- linkSlowCase(iter); // 0x80000000 check
- }
-
- JITStubCall stubCall(this, cti_op_mod);
- stubCall.addArgument(op1);
- stubCall.addArgument(op2);
- stubCall.call(dst);
-}
-
-#else // CPU(X86) || CPU(X86_64)
-
-void JIT::emit_op_mod(Instruction* currentInstruction)
-{
- unsigned dst = currentInstruction[1].u.operand;
- unsigned op1 = currentInstruction[2].u.operand;
- unsigned op2 = currentInstruction[3].u.operand;
-
-#if ENABLE(JIT_OPTIMIZE_MOD)
- emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
- addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
- addSlowCase(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag)));
-
- addSlowCase(branch32(Equal, regT2, Imm32(0)));
-
- emitNakedCall(m_globalData->jitStubs.ctiSoftModulo());
-
- emitStoreInt32(dst, regT0, (op1 == dst || op2 == dst));
-#else
- JITStubCall stubCall(this, cti_op_mod);
- stubCall.addArgument(op1);
- stubCall.addArgument(op2);
- stubCall.call(dst);
-#endif
-}
-
-void JIT::emitSlow_op_mod(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
-#if ENABLE(JIT_OPTIMIZE_MOD)
- unsigned result = currentInstruction[1].u.operand;
- unsigned op1 = currentInstruction[2].u.operand;
- unsigned op2 = currentInstruction[3].u.operand;
- linkSlowCase(iter);
- linkSlowCase(iter);
- linkSlowCase(iter);
- JITStubCall stubCall(this, cti_op_mod);
- stubCall.addArgument(op1);
- stubCall.addArgument(op2);
- stubCall.call(result);
-#else
- ASSERT_NOT_REACHED();
-#endif
-}
-
-#endif // CPU(X86) || CPU(X86_64)
-
-/* ------------------------------ END: OP_MOD ------------------------------ */
-
-#else // USE(JSVALUE32_64)
-
void JIT::emit_op_lshift(Instruction* currentInstruction)
{
unsigned result = currentInstruction[1].u.operand;
@@ -1242,10 +60,6 @@ void JIT::emit_op_lshift(Instruction* currentInstruction)
emitFastArithImmToInt(regT0);
emitFastArithImmToInt(regT2);
lshift32(regT2, regT0);
-#if USE(JSVALUE32)
- addSlowCase(branchAdd32(Overflow, regT0, regT0));
- signExtend32ToPtr(regT0, regT0);
-#endif
emitFastArithReTagImmediate(regT0, regT0);
emitPutVirtualRegister(result);
}
@@ -1256,20 +70,10 @@ void JIT::emitSlow_op_lshift(Instruction* currentInstruction, Vector<SlowCaseEnt
unsigned op1 = currentInstruction[2].u.operand;
unsigned op2 = currentInstruction[3].u.operand;
-#if USE(JSVALUE64)
UNUSED_PARAM(op1);
UNUSED_PARAM(op2);
linkSlowCase(iter);
linkSlowCase(iter);
-#else
- // If we are limited to 32-bit immediates there is a third slow case, which required the operands to have been reloaded.
- Jump notImm1 = getSlowCase(iter);
- Jump notImm2 = getSlowCase(iter);
- linkSlowCase(iter);
- emitGetVirtualRegisters(op1, regT0, op2, regT2);
- notImm1.link(this);
- notImm2.link(this);
-#endif
JITStubCall stubCall(this, cti_op_lshift);
stubCall.addArgument(regT0);
stubCall.addArgument(regT2);
@@ -1292,20 +96,11 @@ void JIT::emit_op_rshift(Instruction* currentInstruction)
emitGetVirtualRegisters(op1, regT0, op2, regT2);
if (supportsFloatingPointTruncate()) {
Jump lhsIsInt = emitJumpIfImmediateInteger(regT0);
-#if USE(JSVALUE64)
// supportsFloatingPoint() && USE(JSVALUE64) => 3 SlowCases
addSlowCase(emitJumpIfNotImmediateNumber(regT0));
addPtr(tagTypeNumberRegister, regT0);
movePtrToDouble(regT0, fpRegT0);
addSlowCase(branchTruncateDoubleToInt32(fpRegT0, regT0));
-#else
- // supportsFloatingPoint() && !USE(JSVALUE64) => 5 SlowCases (of which 1 IfNotJSCell)
- emitJumpSlowCaseIfNotJSCell(regT0, op1);
- addSlowCase(checkStructure(regT0, m_globalData->numberStructure.get()));
- loadDouble(Address(regT0, OBJECT_OFFSETOF(JSNumberCell, m_value)), fpRegT0);
- addSlowCase(branchTruncateDoubleToInt32(fpRegT0, regT0));
- addSlowCase(branchAdd32(Overflow, regT0, regT0));
-#endif
lhsIsInt.link(this);
emitJumpSlowCaseIfNotImmediateInteger(regT2);
} else {
@@ -1315,15 +110,8 @@ void JIT::emit_op_rshift(Instruction* currentInstruction)
}
emitFastArithImmToInt(regT2);
rshift32(regT2, regT0);
-#if USE(JSVALUE32)
- signExtend32ToPtr(regT0, regT0);
-#endif
}
-#if USE(JSVALUE64)
emitFastArithIntToImmNoCheck(regT0, regT0);
-#else
- orPtr(Imm32(JSImmediate::TagTypeNumber), regT0);
-#endif
emitPutVirtualRegister(result);
}
@@ -1341,17 +129,9 @@ void JIT::emitSlow_op_rshift(Instruction* currentInstruction, Vector<SlowCaseEnt
stubCall.addArgument(op2, regT2);
} else {
if (supportsFloatingPointTruncate()) {
-#if USE(JSVALUE64)
- linkSlowCase(iter);
- linkSlowCase(iter);
- linkSlowCase(iter);
-#else
- linkSlowCaseIfNotJSCell(iter, op1);
linkSlowCase(iter);
linkSlowCase(iter);
linkSlowCase(iter);
- linkSlowCase(iter);
-#endif
// We're reloading op1 to regT0 as we can no longer guarantee that
// we have not munged the operand. It may have already been shifted
// correctly, but it still will not have been tagged.
@@ -1368,6 +148,100 @@ void JIT::emitSlow_op_rshift(Instruction* currentInstruction, Vector<SlowCaseEnt
stubCall.call(result);
}
+void JIT::emit_op_urshift(Instruction* currentInstruction)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned op1 = currentInstruction[2].u.operand;
+ unsigned op2 = currentInstruction[3].u.operand;
+
+ // Slow case of urshift makes assumptions about what registers hold the
+ // shift arguments, so any changes must be updated there as well.
+ if (isOperandConstantImmediateInt(op2)) {
+ emitGetVirtualRegister(op1, regT0);
+ emitJumpSlowCaseIfNotImmediateInteger(regT0);
+ emitFastArithImmToInt(regT0);
+ int shift = getConstantOperand(op2).asInt32();
+ if (shift)
+ urshift32(Imm32(shift & 0x1f), regT0);
+ // unsigned shift < 0 or shift = k*2^32 may result in (essentially)
+ // a toUint conversion, which can result in a value we can represent
+ // as an immediate int.
+ if (shift < 0 || !(shift & 31))
+ addSlowCase(branch32(LessThan, regT0, Imm32(0)));
+ emitFastArithReTagImmediate(regT0, regT0);
+ emitPutVirtualRegister(dst, regT0);
+ return;
+ }
+ emitGetVirtualRegisters(op1, regT0, op2, regT1);
+ if (!isOperandConstantImmediateInt(op1))
+ emitJumpSlowCaseIfNotImmediateInteger(regT0);
+ emitJumpSlowCaseIfNotImmediateInteger(regT1);
+ emitFastArithImmToInt(regT0);
+ emitFastArithImmToInt(regT1);
+ urshift32(regT1, regT0);
+ addSlowCase(branch32(LessThan, regT0, Imm32(0)));
+ emitFastArithReTagImmediate(regT0, regT0);
+ emitPutVirtualRegister(dst, regT0);
+}
+
+void JIT::emitSlow_op_urshift(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned op1 = currentInstruction[2].u.operand;
+ unsigned op2 = currentInstruction[3].u.operand;
+ if (isOperandConstantImmediateInt(op2)) {
+ int shift = getConstantOperand(op2).asInt32();
+ // op1 = regT0
+ linkSlowCase(iter); // int32 check
+ if (supportsFloatingPointTruncate()) {
+ JumpList failures;
+ failures.append(emitJumpIfNotImmediateNumber(regT0)); // op1 is not a double
+ addPtr(tagTypeNumberRegister, regT0);
+ movePtrToDouble(regT0, fpRegT0);
+ failures.append(branchTruncateDoubleToInt32(fpRegT0, regT0));
+ if (shift)
+ urshift32(Imm32(shift & 0x1f), regT0);
+ if (shift < 0 || !(shift & 31))
+ failures.append(branch32(LessThan, regT0, Imm32(0)));
+ emitFastArithReTagImmediate(regT0, regT0);
+ emitPutVirtualRegister(dst, regT0);
+ emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_rshift));
+ failures.link(this);
+ }
+ if (shift < 0 || !(shift & 31))
+ linkSlowCase(iter); // failed to box in hot path
+ } else {
+ // op1 = regT0
+ // op2 = regT1
+ if (!isOperandConstantImmediateInt(op1)) {
+ linkSlowCase(iter); // int32 check -- op1 is not an int
+ if (supportsFloatingPointTruncate()) {
+ JumpList failures;
+ failures.append(emitJumpIfNotImmediateNumber(regT0)); // op1 is not a double
+ addPtr(tagTypeNumberRegister, regT0);
+ movePtrToDouble(regT0, fpRegT0);
+ failures.append(branchTruncateDoubleToInt32(fpRegT0, regT0));
+ failures.append(emitJumpIfNotImmediateInteger(regT1)); // op2 is not an int
+ emitFastArithImmToInt(regT1);
+ urshift32(regT1, regT0);
+ failures.append(branch32(LessThan, regT0, Imm32(0)));
+ emitFastArithReTagImmediate(regT0, regT0);
+ emitPutVirtualRegister(dst, regT0);
+ emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_rshift));
+ failures.link(this);
+ }
+ }
+
+ linkSlowCase(iter); // int32 check - op2 is not an int
+ linkSlowCase(iter); // Can't represent unsigned result as an immediate
+ }
+
+ JITStubCall stubCall(this, cti_op_urshift);
+ stubCall.addArgument(op1, regT0);
+ stubCall.addArgument(op2, regT1);
+ stubCall.call(dst);
+}
+
void JIT::emit_op_jnless(Instruction* currentInstruction)
{
unsigned op1 = currentInstruction[1].u.operand;
@@ -1379,23 +253,33 @@ void JIT::emit_op_jnless(Instruction* currentInstruction)
// - constant int immediate to int immediate
// - int immediate to int immediate
+ if (isOperandConstantImmediateChar(op1)) {
+ emitGetVirtualRegister(op2, regT0);
+ addSlowCase(emitJumpIfNotJSCell(regT0));
+ JumpList failures;
+ emitLoadCharacterString(regT0, regT0, failures);
+ addSlowCase(failures);
+ addJump(branch32(LessThanOrEqual, regT0, Imm32(asString(getConstantOperand(op1))->tryGetValue()[0])), target);
+ return;
+ }
+ if (isOperandConstantImmediateChar(op2)) {
+ emitGetVirtualRegister(op1, regT0);
+ addSlowCase(emitJumpIfNotJSCell(regT0));
+ JumpList failures;
+ emitLoadCharacterString(regT0, regT0, failures);
+ addSlowCase(failures);
+ addJump(branch32(GreaterThanOrEqual, regT0, Imm32(asString(getConstantOperand(op2))->tryGetValue()[0])), target);
+ return;
+ }
if (isOperandConstantImmediateInt(op2)) {
emitGetVirtualRegister(op1, regT0);
emitJumpSlowCaseIfNotImmediateInteger(regT0);
-#if USE(JSVALUE64)
int32_t op2imm = getConstantOperandImmediateInt(op2);
-#else
- int32_t op2imm = static_cast<int32_t>(JSImmediate::rawValue(getConstantOperand(op2)));
-#endif
addJump(branch32(GreaterThanOrEqual, regT0, Imm32(op2imm)), target);
} else if (isOperandConstantImmediateInt(op1)) {
emitGetVirtualRegister(op2, regT1);
emitJumpSlowCaseIfNotImmediateInteger(regT1);
-#if USE(JSVALUE64)
int32_t op1imm = getConstantOperandImmediateInt(op1);
-#else
- int32_t op1imm = static_cast<int32_t>(JSImmediate::rawValue(getConstantOperand(op1)));
-#endif
addJump(branch32(LessThanOrEqual, regT1, Imm32(op1imm)), target);
} else {
emitGetVirtualRegisters(op1, regT0, op2, regT1);
@@ -1416,26 +300,29 @@ void JIT::emitSlow_op_jnless(Instruction* currentInstruction, Vector<SlowCaseEnt
// - floating-point number to constant int immediate
// - constant int immediate to floating-point number
// - floating-point number to floating-point number.
+ if (isOperandConstantImmediateChar(op1) || isOperandConstantImmediateChar(op2)) {
+ linkSlowCase(iter);
+ linkSlowCase(iter);
+ linkSlowCase(iter);
+ linkSlowCase(iter);
+ JITStubCall stubCall(this, cti_op_jless);
+ stubCall.addArgument(op1, regT0);
+ stubCall.addArgument(op2, regT1);
+ stubCall.call();
+ emitJumpSlowToHot(branchTest32(Zero, regT0), target);
+ return;
+ }
if (isOperandConstantImmediateInt(op2)) {
linkSlowCase(iter);
if (supportsFloatingPoint()) {
-#if USE(JSVALUE64)
Jump fail1 = emitJumpIfNotImmediateNumber(regT0);
addPtr(tagTypeNumberRegister, regT0);
movePtrToDouble(regT0, fpRegT0);
-#else
- Jump fail1;
- if (!m_codeBlock->isKnownNotImmediate(op1))
- fail1 = emitJumpIfNotJSCell(regT0);
- Jump fail2 = checkStructure(regT0, m_globalData->numberStructure.get());
- loadDouble(Address(regT0, OBJECT_OFFSETOF(JSNumberCell, m_value)), fpRegT0);
-#endif
-
int32_t op2imm = getConstantOperand(op2).asInt32();;
-
+
move(Imm32(op2imm), regT1);
convertInt32ToDouble(regT1, fpRegT1);
@@ -1443,13 +330,7 @@ void JIT::emitSlow_op_jnless(Instruction* currentInstruction, Vector<SlowCaseEnt
emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_jnless));
-#if USE(JSVALUE64)
fail1.link(this);
-#else
- if (!m_codeBlock->isKnownNotImmediate(op1))
- fail1.link(this);
- fail2.link(this);
-#endif
}
JITStubCall stubCall(this, cti_op_jless);
@@ -1462,21 +343,12 @@ void JIT::emitSlow_op_jnless(Instruction* currentInstruction, Vector<SlowCaseEnt
linkSlowCase(iter);
if (supportsFloatingPoint()) {
-#if USE(JSVALUE64)
Jump fail1 = emitJumpIfNotImmediateNumber(regT1);
addPtr(tagTypeNumberRegister, regT1);
movePtrToDouble(regT1, fpRegT1);
-#else
- Jump fail1;
- if (!m_codeBlock->isKnownNotImmediate(op2))
- fail1 = emitJumpIfNotJSCell(regT1);
-
- Jump fail2 = checkStructure(regT1, m_globalData->numberStructure.get());
- loadDouble(Address(regT1, OBJECT_OFFSETOF(JSNumberCell, m_value)), fpRegT1);
-#endif
-
+
int32_t op1imm = getConstantOperand(op1).asInt32();;
-
+
move(Imm32(op1imm), regT0);
convertInt32ToDouble(regT0, fpRegT0);
@@ -1484,13 +356,7 @@ void JIT::emitSlow_op_jnless(Instruction* currentInstruction, Vector<SlowCaseEnt
emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_jnless));
-#if USE(JSVALUE64)
fail1.link(this);
-#else
- if (!m_codeBlock->isKnownNotImmediate(op2))
- fail1.link(this);
- fail2.link(this);
-#endif
}
JITStubCall stubCall(this, cti_op_jless);
@@ -1503,7 +369,6 @@ void JIT::emitSlow_op_jnless(Instruction* currentInstruction, Vector<SlowCaseEnt
linkSlowCase(iter);
if (supportsFloatingPoint()) {
-#if USE(JSVALUE64)
Jump fail1 = emitJumpIfNotImmediateNumber(regT0);
Jump fail2 = emitJumpIfNotImmediateNumber(regT1);
Jump fail3 = emitJumpIfImmediateInteger(regT1);
@@ -1511,37 +376,14 @@ void JIT::emitSlow_op_jnless(Instruction* currentInstruction, Vector<SlowCaseEnt
addPtr(tagTypeNumberRegister, regT1);
movePtrToDouble(regT0, fpRegT0);
movePtrToDouble(regT1, fpRegT1);
-#else
- Jump fail1;
- if (!m_codeBlock->isKnownNotImmediate(op1))
- fail1 = emitJumpIfNotJSCell(regT0);
-
- Jump fail2;
- if (!m_codeBlock->isKnownNotImmediate(op2))
- fail2 = emitJumpIfNotJSCell(regT1);
-
- Jump fail3 = checkStructure(regT0, m_globalData->numberStructure.get());
- Jump fail4 = checkStructure(regT1, m_globalData->numberStructure.get());
- loadDouble(Address(regT0, OBJECT_OFFSETOF(JSNumberCell, m_value)), fpRegT0);
- loadDouble(Address(regT1, OBJECT_OFFSETOF(JSNumberCell, m_value)), fpRegT1);
-#endif
emitJumpSlowToHot(branchDouble(DoubleLessThanOrEqualOrUnordered, fpRegT1, fpRegT0), target);
emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_jnless));
-#if USE(JSVALUE64)
fail1.link(this);
fail2.link(this);
fail3.link(this);
-#else
- if (!m_codeBlock->isKnownNotImmediate(op1))
- fail1.link(this);
- if (!m_codeBlock->isKnownNotImmediate(op2))
- fail2.link(this);
- fail3.link(this);
- fail4.link(this);
-#endif
}
linkSlowCase(iter);
@@ -1564,23 +406,33 @@ void JIT::emit_op_jless(Instruction* currentInstruction)
// - constant int immediate to int immediate
// - int immediate to int immediate
+ if (isOperandConstantImmediateChar(op1)) {
+ emitGetVirtualRegister(op2, regT0);
+ addSlowCase(emitJumpIfNotJSCell(regT0));
+ JumpList failures;
+ emitLoadCharacterString(regT0, regT0, failures);
+ addSlowCase(failures);
+ addJump(branch32(GreaterThan, regT0, Imm32(asString(getConstantOperand(op1))->tryGetValue()[0])), target);
+ return;
+ }
+ if (isOperandConstantImmediateChar(op2)) {
+ emitGetVirtualRegister(op1, regT0);
+ addSlowCase(emitJumpIfNotJSCell(regT0));
+ JumpList failures;
+ emitLoadCharacterString(regT0, regT0, failures);
+ addSlowCase(failures);
+ addJump(branch32(LessThan, regT0, Imm32(asString(getConstantOperand(op2))->tryGetValue()[0])), target);
+ return;
+ }
if (isOperandConstantImmediateInt(op2)) {
emitGetVirtualRegister(op1, regT0);
emitJumpSlowCaseIfNotImmediateInteger(regT0);
-#if USE(JSVALUE64)
int32_t op2imm = getConstantOperandImmediateInt(op2);
-#else
- int32_t op2imm = static_cast<int32_t>(JSImmediate::rawValue(getConstantOperand(op2)));
-#endif
addJump(branch32(LessThan, regT0, Imm32(op2imm)), target);
} else if (isOperandConstantImmediateInt(op1)) {
emitGetVirtualRegister(op2, regT1);
emitJumpSlowCaseIfNotImmediateInteger(regT1);
-#if USE(JSVALUE64)
int32_t op1imm = getConstantOperandImmediateInt(op1);
-#else
- int32_t op1imm = static_cast<int32_t>(JSImmediate::rawValue(getConstantOperand(op1)));
-#endif
addJump(branch32(GreaterThan, regT1, Imm32(op1imm)), target);
} else {
emitGetVirtualRegisters(op1, regT0, op2, regT1);
@@ -1601,26 +453,29 @@ void JIT::emitSlow_op_jless(Instruction* currentInstruction, Vector<SlowCaseEntr
// - floating-point number to constant int immediate
// - constant int immediate to floating-point number
// - floating-point number to floating-point number.
+ if (isOperandConstantImmediateChar(op1) || isOperandConstantImmediateChar(op2)) {
+ linkSlowCase(iter);
+ linkSlowCase(iter);
+ linkSlowCase(iter);
+ linkSlowCase(iter);
+ JITStubCall stubCall(this, cti_op_jless);
+ stubCall.addArgument(op1, regT0);
+ stubCall.addArgument(op2, regT1);
+ stubCall.call();
+ emitJumpSlowToHot(branchTest32(NonZero, regT0), target);
+ return;
+ }
if (isOperandConstantImmediateInt(op2)) {
linkSlowCase(iter);
if (supportsFloatingPoint()) {
-#if USE(JSVALUE64)
Jump fail1 = emitJumpIfNotImmediateNumber(regT0);
addPtr(tagTypeNumberRegister, regT0);
movePtrToDouble(regT0, fpRegT0);
-#else
- Jump fail1;
- if (!m_codeBlock->isKnownNotImmediate(op1))
- fail1 = emitJumpIfNotJSCell(regT0);
- Jump fail2 = checkStructure(regT0, m_globalData->numberStructure.get());
- loadDouble(Address(regT0, OBJECT_OFFSETOF(JSNumberCell, m_value)), fpRegT0);
-#endif
-
int32_t op2imm = getConstantOperand(op2).asInt32();
-
+
move(Imm32(op2imm), regT1);
convertInt32ToDouble(regT1, fpRegT1);
@@ -1628,13 +483,7 @@ void JIT::emitSlow_op_jless(Instruction* currentInstruction, Vector<SlowCaseEntr
emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_jnless));
-#if USE(JSVALUE64)
fail1.link(this);
-#else
- if (!m_codeBlock->isKnownNotImmediate(op1))
- fail1.link(this);
- fail2.link(this);
-#endif
}
JITStubCall stubCall(this, cti_op_jless);
@@ -1647,21 +496,12 @@ void JIT::emitSlow_op_jless(Instruction* currentInstruction, Vector<SlowCaseEntr
linkSlowCase(iter);
if (supportsFloatingPoint()) {
-#if USE(JSVALUE64)
Jump fail1 = emitJumpIfNotImmediateNumber(regT1);
addPtr(tagTypeNumberRegister, regT1);
movePtrToDouble(regT1, fpRegT1);
-#else
- Jump fail1;
- if (!m_codeBlock->isKnownNotImmediate(op2))
- fail1 = emitJumpIfNotJSCell(regT1);
-
- Jump fail2 = checkStructure(regT1, m_globalData->numberStructure.get());
- loadDouble(Address(regT1, OBJECT_OFFSETOF(JSNumberCell, m_value)), fpRegT1);
-#endif
-
+
int32_t op1imm = getConstantOperand(op1).asInt32();
-
+
move(Imm32(op1imm), regT0);
convertInt32ToDouble(regT0, fpRegT0);
@@ -1669,13 +509,7 @@ void JIT::emitSlow_op_jless(Instruction* currentInstruction, Vector<SlowCaseEntr
emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_jnless));
-#if USE(JSVALUE64)
fail1.link(this);
-#else
- if (!m_codeBlock->isKnownNotImmediate(op2))
- fail1.link(this);
- fail2.link(this);
-#endif
}
JITStubCall stubCall(this, cti_op_jless);
@@ -1688,7 +522,6 @@ void JIT::emitSlow_op_jless(Instruction* currentInstruction, Vector<SlowCaseEntr
linkSlowCase(iter);
if (supportsFloatingPoint()) {
-#if USE(JSVALUE64)
Jump fail1 = emitJumpIfNotImmediateNumber(regT0);
Jump fail2 = emitJumpIfNotImmediateNumber(regT1);
Jump fail3 = emitJumpIfImmediateInteger(regT1);
@@ -1696,37 +529,14 @@ void JIT::emitSlow_op_jless(Instruction* currentInstruction, Vector<SlowCaseEntr
addPtr(tagTypeNumberRegister, regT1);
movePtrToDouble(regT0, fpRegT0);
movePtrToDouble(regT1, fpRegT1);
-#else
- Jump fail1;
- if (!m_codeBlock->isKnownNotImmediate(op1))
- fail1 = emitJumpIfNotJSCell(regT0);
-
- Jump fail2;
- if (!m_codeBlock->isKnownNotImmediate(op2))
- fail2 = emitJumpIfNotJSCell(regT1);
-
- Jump fail3 = checkStructure(regT0, m_globalData->numberStructure.get());
- Jump fail4 = checkStructure(regT1, m_globalData->numberStructure.get());
- loadDouble(Address(regT0, OBJECT_OFFSETOF(JSNumberCell, m_value)), fpRegT0);
- loadDouble(Address(regT1, OBJECT_OFFSETOF(JSNumberCell, m_value)), fpRegT1);
-#endif
emitJumpSlowToHot(branchDouble(DoubleLessThan, fpRegT0, fpRegT1), target);
emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_jnless));
-#if USE(JSVALUE64)
fail1.link(this);
fail2.link(this);
fail3.link(this);
-#else
- if (!m_codeBlock->isKnownNotImmediate(op1))
- fail1.link(this);
- if (!m_codeBlock->isKnownNotImmediate(op2))
- fail2.link(this);
- fail3.link(this);
- fail4.link(this);
-#endif
}
linkSlowCase(iter);
@@ -1738,7 +548,7 @@ void JIT::emitSlow_op_jless(Instruction* currentInstruction, Vector<SlowCaseEntr
}
}
-void JIT::emit_op_jnlesseq(Instruction* currentInstruction)
+void JIT::emit_op_jlesseq(Instruction* currentInstruction, bool invert)
{
unsigned op1 = currentInstruction[1].u.operand;
unsigned op2 = currentInstruction[2].u.operand;
@@ -1749,34 +559,44 @@ void JIT::emit_op_jnlesseq(Instruction* currentInstruction)
// - constant int immediate to int immediate
// - int immediate to int immediate
+ if (isOperandConstantImmediateChar(op1)) {
+ emitGetVirtualRegister(op2, regT0);
+ addSlowCase(emitJumpIfNotJSCell(regT0));
+ JumpList failures;
+ emitLoadCharacterString(regT0, regT0, failures);
+ addSlowCase(failures);
+ addJump(branch32(invert ? LessThan : GreaterThanOrEqual, regT0, Imm32(asString(getConstantOperand(op1))->tryGetValue()[0])), target);
+ return;
+ }
+ if (isOperandConstantImmediateChar(op2)) {
+ emitGetVirtualRegister(op1, regT0);
+ addSlowCase(emitJumpIfNotJSCell(regT0));
+ JumpList failures;
+ emitLoadCharacterString(regT0, regT0, failures);
+ addSlowCase(failures);
+ addJump(branch32(invert ? GreaterThan : LessThanOrEqual, regT0, Imm32(asString(getConstantOperand(op2))->tryGetValue()[0])), target);
+ return;
+ }
if (isOperandConstantImmediateInt(op2)) {
emitGetVirtualRegister(op1, regT0);
emitJumpSlowCaseIfNotImmediateInteger(regT0);
-#if USE(JSVALUE64)
int32_t op2imm = getConstantOperandImmediateInt(op2);
-#else
- int32_t op2imm = static_cast<int32_t>(JSImmediate::rawValue(getConstantOperand(op2)));
-#endif
- addJump(branch32(GreaterThan, regT0, Imm32(op2imm)), target);
+ addJump(branch32(invert ? GreaterThan : LessThanOrEqual, regT0, Imm32(op2imm)), target);
} else if (isOperandConstantImmediateInt(op1)) {
emitGetVirtualRegister(op2, regT1);
emitJumpSlowCaseIfNotImmediateInteger(regT1);
-#if USE(JSVALUE64)
int32_t op1imm = getConstantOperandImmediateInt(op1);
-#else
- int32_t op1imm = static_cast<int32_t>(JSImmediate::rawValue(getConstantOperand(op1)));
-#endif
- addJump(branch32(LessThan, regT1, Imm32(op1imm)), target);
+ addJump(branch32(invert ? LessThan : GreaterThanOrEqual, regT1, Imm32(op1imm)), target);
} else {
emitGetVirtualRegisters(op1, regT0, op2, regT1);
emitJumpSlowCaseIfNotImmediateInteger(regT0);
emitJumpSlowCaseIfNotImmediateInteger(regT1);
- addJump(branch32(GreaterThan, regT0, regT1), target);
+ addJump(branch32(invert ? GreaterThan : LessThanOrEqual, regT0, regT1), target);
}
}
-void JIT::emitSlow_op_jnlesseq(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+void JIT::emitSlow_op_jlesseq(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter, bool invert)
{
unsigned op1 = currentInstruction[1].u.operand;
unsigned op2 = currentInstruction[2].u.operand;
@@ -1787,93 +607,75 @@ void JIT::emitSlow_op_jnlesseq(Instruction* currentInstruction, Vector<SlowCaseE
// - constant int immediate to floating-point number
// - floating-point number to floating-point number.
+ if (isOperandConstantImmediateChar(op1) || isOperandConstantImmediateChar(op2)) {
+ linkSlowCase(iter);
+ linkSlowCase(iter);
+ linkSlowCase(iter);
+ linkSlowCase(iter);
+ JITStubCall stubCall(this, cti_op_jlesseq);
+ stubCall.addArgument(op1, regT0);
+ stubCall.addArgument(op2, regT1);
+ stubCall.call();
+ emitJumpSlowToHot(branchTest32(invert ? Zero : NonZero, regT0), target);
+ return;
+ }
+
if (isOperandConstantImmediateInt(op2)) {
linkSlowCase(iter);
if (supportsFloatingPoint()) {
-#if USE(JSVALUE64)
Jump fail1 = emitJumpIfNotImmediateNumber(regT0);
addPtr(tagTypeNumberRegister, regT0);
movePtrToDouble(regT0, fpRegT0);
-#else
- Jump fail1;
- if (!m_codeBlock->isKnownNotImmediate(op1))
- fail1 = emitJumpIfNotJSCell(regT0);
- Jump fail2 = checkStructure(regT0, m_globalData->numberStructure.get());
- loadDouble(Address(regT0, OBJECT_OFFSETOF(JSNumberCell, m_value)), fpRegT0);
-#endif
-
int32_t op2imm = getConstantOperand(op2).asInt32();;
-
+
move(Imm32(op2imm), regT1);
convertInt32ToDouble(regT1, fpRegT1);
- emitJumpSlowToHot(branchDouble(DoubleLessThanOrUnordered, fpRegT1, fpRegT0), target);
+ emitJumpSlowToHot(branchDouble(invert ? DoubleLessThanOrUnordered : DoubleGreaterThanOrEqual, fpRegT1, fpRegT0), target);
emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_jnlesseq));
-#if USE(JSVALUE64)
fail1.link(this);
-#else
- if (!m_codeBlock->isKnownNotImmediate(op1))
- fail1.link(this);
- fail2.link(this);
-#endif
}
JITStubCall stubCall(this, cti_op_jlesseq);
stubCall.addArgument(regT0);
stubCall.addArgument(op2, regT2);
stubCall.call();
- emitJumpSlowToHot(branchTest32(Zero, regT0), target);
+ emitJumpSlowToHot(branchTest32(invert ? Zero : NonZero, regT0), target);
} else if (isOperandConstantImmediateInt(op1)) {
linkSlowCase(iter);
if (supportsFloatingPoint()) {
-#if USE(JSVALUE64)
Jump fail1 = emitJumpIfNotImmediateNumber(regT1);
addPtr(tagTypeNumberRegister, regT1);
movePtrToDouble(regT1, fpRegT1);
-#else
- Jump fail1;
- if (!m_codeBlock->isKnownNotImmediate(op2))
- fail1 = emitJumpIfNotJSCell(regT1);
-
- Jump fail2 = checkStructure(regT1, m_globalData->numberStructure.get());
- loadDouble(Address(regT1, OBJECT_OFFSETOF(JSNumberCell, m_value)), fpRegT1);
-#endif
-
+
int32_t op1imm = getConstantOperand(op1).asInt32();;
-
+
move(Imm32(op1imm), regT0);
convertInt32ToDouble(regT0, fpRegT0);
- emitJumpSlowToHot(branchDouble(DoubleLessThanOrUnordered, fpRegT1, fpRegT0), target);
+ emitJumpSlowToHot(branchDouble(invert ? DoubleLessThanOrUnordered : DoubleGreaterThanOrEqual, fpRegT1, fpRegT0), target);
emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_jnlesseq));
-#if USE(JSVALUE64)
fail1.link(this);
-#else
- if (!m_codeBlock->isKnownNotImmediate(op2))
- fail1.link(this);
- fail2.link(this);
-#endif
}
JITStubCall stubCall(this, cti_op_jlesseq);
stubCall.addArgument(op1, regT2);
stubCall.addArgument(regT1);
stubCall.call();
- emitJumpSlowToHot(branchTest32(Zero, regT0), target);
+ emitJumpSlowToHot(branchTest32(invert ? Zero : NonZero, regT0), target);
} else {
linkSlowCase(iter);
if (supportsFloatingPoint()) {
-#if USE(JSVALUE64)
Jump fail1 = emitJumpIfNotImmediateNumber(regT0);
Jump fail2 = emitJumpIfNotImmediateNumber(regT1);
Jump fail3 = emitJumpIfImmediateInteger(regT1);
@@ -1881,37 +683,14 @@ void JIT::emitSlow_op_jnlesseq(Instruction* currentInstruction, Vector<SlowCaseE
addPtr(tagTypeNumberRegister, regT1);
movePtrToDouble(regT0, fpRegT0);
movePtrToDouble(regT1, fpRegT1);
-#else
- Jump fail1;
- if (!m_codeBlock->isKnownNotImmediate(op1))
- fail1 = emitJumpIfNotJSCell(regT0);
-
- Jump fail2;
- if (!m_codeBlock->isKnownNotImmediate(op2))
- fail2 = emitJumpIfNotJSCell(regT1);
-
- Jump fail3 = checkStructure(regT0, m_globalData->numberStructure.get());
- Jump fail4 = checkStructure(regT1, m_globalData->numberStructure.get());
- loadDouble(Address(regT0, OBJECT_OFFSETOF(JSNumberCell, m_value)), fpRegT0);
- loadDouble(Address(regT1, OBJECT_OFFSETOF(JSNumberCell, m_value)), fpRegT1);
-#endif
- emitJumpSlowToHot(branchDouble(DoubleLessThanOrUnordered, fpRegT1, fpRegT0), target);
+ emitJumpSlowToHot(branchDouble(invert ? DoubleLessThanOrUnordered : DoubleGreaterThanOrEqual, fpRegT1, fpRegT0), target);
emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_jnlesseq));
-#if USE(JSVALUE64)
fail1.link(this);
fail2.link(this);
fail3.link(this);
-#else
- if (!m_codeBlock->isKnownNotImmediate(op1))
- fail1.link(this);
- if (!m_codeBlock->isKnownNotImmediate(op2))
- fail2.link(this);
- fail3.link(this);
- fail4.link(this);
-#endif
}
linkSlowCase(iter);
@@ -1919,10 +698,20 @@ void JIT::emitSlow_op_jnlesseq(Instruction* currentInstruction, Vector<SlowCaseE
stubCall.addArgument(regT0);
stubCall.addArgument(regT1);
stubCall.call();
- emitJumpSlowToHot(branchTest32(Zero, regT0), target);
+ emitJumpSlowToHot(branchTest32(invert ? Zero : NonZero, regT0), target);
}
}
+void JIT::emit_op_jnlesseq(Instruction* currentInstruction)
+{
+ emit_op_jlesseq(currentInstruction, true);
+}
+
+void JIT::emitSlow_op_jnlesseq(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ emitSlow_op_jlesseq(currentInstruction, iter, true);
+}
+
void JIT::emit_op_bitand(Instruction* currentInstruction)
{
unsigned result = currentInstruction[1].u.operand;
@@ -1932,25 +721,17 @@ void JIT::emit_op_bitand(Instruction* currentInstruction)
if (isOperandConstantImmediateInt(op1)) {
emitGetVirtualRegister(op2, regT0);
emitJumpSlowCaseIfNotImmediateInteger(regT0);
-#if USE(JSVALUE64)
int32_t imm = getConstantOperandImmediateInt(op1);
andPtr(Imm32(imm), regT0);
if (imm >= 0)
emitFastArithIntToImmNoCheck(regT0, regT0);
-#else
- andPtr(Imm32(static_cast<int32_t>(JSImmediate::rawValue(getConstantOperand(op1)))), regT0);
-#endif
} else if (isOperandConstantImmediateInt(op2)) {
emitGetVirtualRegister(op1, regT0);
emitJumpSlowCaseIfNotImmediateInteger(regT0);
-#if USE(JSVALUE64)
int32_t imm = getConstantOperandImmediateInt(op2);
andPtr(Imm32(imm), regT0);
if (imm >= 0)
emitFastArithIntToImmNoCheck(regT0, regT0);
-#else
- andPtr(Imm32(static_cast<int32_t>(JSImmediate::rawValue(getConstantOperand(op2)))), regT0);
-#endif
} else {
emitGetVirtualRegisters(op1, regT0, op2, regT1);
andPtr(regT1, regT0);
@@ -1992,13 +773,8 @@ void JIT::emit_op_post_inc(Instruction* currentInstruction)
emitGetVirtualRegister(srcDst, regT0);
move(regT0, regT1);
emitJumpSlowCaseIfNotImmediateInteger(regT0);
-#if USE(JSVALUE64)
addSlowCase(branchAdd32(Overflow, Imm32(1), regT1));
emitFastArithIntToImmNoCheck(regT1, regT1);
-#else
- addSlowCase(branchAdd32(Overflow, Imm32(1 << JSImmediate::IntegerPayloadShift), regT1));
- signExtend32ToPtr(regT1, regT1);
-#endif
emitPutVirtualRegister(srcDst, regT1);
emitPutVirtualRegister(result);
}
@@ -2024,13 +800,8 @@ void JIT::emit_op_post_dec(Instruction* currentInstruction)
emitGetVirtualRegister(srcDst, regT0);
move(regT0, regT1);
emitJumpSlowCaseIfNotImmediateInteger(regT0);
-#if USE(JSVALUE64)
addSlowCase(branchSub32(Zero, Imm32(1), regT1));
emitFastArithIntToImmNoCheck(regT1, regT1);
-#else
- addSlowCase(branchSub32(Zero, Imm32(1 << JSImmediate::IntegerPayloadShift), regT1));
- signExtend32ToPtr(regT1, regT1);
-#endif
emitPutVirtualRegister(srcDst, regT1);
emitPutVirtualRegister(result);
}
@@ -2054,13 +825,8 @@ void JIT::emit_op_pre_inc(Instruction* currentInstruction)
emitGetVirtualRegister(srcDst, regT0);
emitJumpSlowCaseIfNotImmediateInteger(regT0);
-#if USE(JSVALUE64)
addSlowCase(branchAdd32(Overflow, Imm32(1), regT0));
emitFastArithIntToImmNoCheck(regT0, regT0);
-#else
- addSlowCase(branchAdd32(Overflow, Imm32(1 << JSImmediate::IntegerPayloadShift), regT0));
- signExtend32ToPtr(regT0, regT0);
-#endif
emitPutVirtualRegister(srcDst);
}
@@ -2083,13 +849,8 @@ void JIT::emit_op_pre_dec(Instruction* currentInstruction)
emitGetVirtualRegister(srcDst, regT0);
emitJumpSlowCaseIfNotImmediateInteger(regT0);
-#if USE(JSVALUE64)
addSlowCase(branchSub32(Zero, Imm32(1), regT0));
emitFastArithIntToImmNoCheck(regT0, regT0);
-#else
- addSlowCase(branchSub32(Zero, Imm32(1 << JSImmediate::IntegerPayloadShift), regT0));
- signExtend32ToPtr(regT0, regT0);
-#endif
emitPutVirtualRegister(srcDst);
}
@@ -2108,7 +869,7 @@ void JIT::emitSlow_op_pre_dec(Instruction* currentInstruction, Vector<SlowCaseEn
/* ------------------------------ BEGIN: OP_MOD ------------------------------ */
-#if CPU(X86) || CPU(X86_64)
+#if CPU(X86) || CPU(X86_64) || CPU(MIPS)
void JIT::emit_op_mod(Instruction* currentInstruction)
{
@@ -2116,21 +877,21 @@ void JIT::emit_op_mod(Instruction* currentInstruction)
unsigned op1 = currentInstruction[2].u.operand;
unsigned op2 = currentInstruction[3].u.operand;
- emitGetVirtualRegisters(op1, X86Registers::eax, op2, X86Registers::ecx);
- emitJumpSlowCaseIfNotImmediateInteger(X86Registers::eax);
- emitJumpSlowCaseIfNotImmediateInteger(X86Registers::ecx);
-#if USE(JSVALUE64)
- addSlowCase(branchPtr(Equal, X86Registers::ecx, ImmPtr(JSValue::encode(jsNumber(m_globalData, 0)))));
- m_assembler.cdq();
- m_assembler.idivl_r(X86Registers::ecx);
-#else
- emitFastArithDeTagImmediate(X86Registers::eax);
- addSlowCase(emitFastArithDeTagImmediateJumpIfZero(X86Registers::ecx));
- m_assembler.cdq();
- m_assembler.idivl_r(X86Registers::ecx);
- signExtend32ToPtr(X86Registers::edx, X86Registers::edx);
+#if CPU(X86) || CPU(X86_64)
+ // Make sure registers are correct for x86 IDIV instructions.
+ ASSERT(regT0 == X86Registers::eax);
+ ASSERT(regT1 == X86Registers::edx);
+ ASSERT(regT2 == X86Registers::ecx);
#endif
- emitFastArithReTagImmediate(X86Registers::edx, X86Registers::eax);
+
+ emitGetVirtualRegisters(op1, regT0, op2, regT2);
+ emitJumpSlowCaseIfNotImmediateInteger(regT0);
+ emitJumpSlowCaseIfNotImmediateInteger(regT2);
+
+ addSlowCase(branchPtr(Equal, regT2, ImmPtr(JSValue::encode(jsNumber(0)))));
+ m_assembler.cdq();
+ m_assembler.idivl_r(regT2);
+ emitFastArithReTagImmediate(regT1, regT0);
emitPutVirtualRegister(result);
}
@@ -2138,26 +899,16 @@ void JIT::emitSlow_op_mod(Instruction* currentInstruction, Vector<SlowCaseEntry>
{
unsigned result = currentInstruction[1].u.operand;
-#if USE(JSVALUE64)
- linkSlowCase(iter);
linkSlowCase(iter);
linkSlowCase(iter);
-#else
- Jump notImm1 = getSlowCase(iter);
- Jump notImm2 = getSlowCase(iter);
linkSlowCase(iter);
- emitFastArithReTagImmediate(X86Registers::eax, X86Registers::eax);
- emitFastArithReTagImmediate(X86Registers::ecx, X86Registers::ecx);
- notImm1.link(this);
- notImm2.link(this);
-#endif
JITStubCall stubCall(this, cti_op_mod);
- stubCall.addArgument(X86Registers::eax);
- stubCall.addArgument(X86Registers::ecx);
+ stubCall.addArgument(regT0);
+ stubCall.addArgument(regT2);
stubCall.call(result);
}
-#else // CPU(X86) || CPU(X86_64)
+#else // CPU(X86) || CPU(X86_64) || CPU(MIPS)
void JIT::emit_op_mod(Instruction* currentInstruction)
{
@@ -2165,27 +916,15 @@ void JIT::emit_op_mod(Instruction* currentInstruction)
unsigned op1 = currentInstruction[2].u.operand;
unsigned op2 = currentInstruction[3].u.operand;
-#if ENABLE(JIT_OPTIMIZE_MOD)
- emitGetVirtualRegisters(op1, regT0, op2, regT2);
- emitJumpSlowCaseIfNotImmediateInteger(regT0);
- emitJumpSlowCaseIfNotImmediateInteger(regT2);
-
- addSlowCase(branch32(Equal, regT2, Imm32(1)));
-
- emitNakedCall(m_globalData->jitStubs.ctiSoftModulo());
-
- emitPutVirtualRegister(result, regT0);
-#else
JITStubCall stubCall(this, cti_op_mod);
stubCall.addArgument(op1, regT2);
stubCall.addArgument(op2, regT2);
stubCall.call(result);
-#endif
}
void JIT::emitSlow_op_mod(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
{
-#if ENABLE(JIT_OPTIMIZE_MOD)
+#if ENABLE(JIT_USE_SOFT_MODULO)
unsigned result = currentInstruction[1].u.operand;
unsigned op1 = currentInstruction[2].u.operand;
unsigned op2 = currentInstruction[3].u.operand;
@@ -2205,8 +944,6 @@ void JIT::emitSlow_op_mod(Instruction* currentInstruction, Vector<SlowCaseEntry>
/* ------------------------------ END: OP_MOD ------------------------------ */
-#if USE(JSVALUE64)
-
/* ------------------------------ BEGIN: USE(JSVALUE64) (OP_ADD, OP_SUB, OP_MUL) ------------------------------ */
void JIT::compileBinaryArithOp(OpcodeID opcodeID, unsigned, unsigned op1, unsigned op2, OperandTypes)
@@ -2230,7 +967,7 @@ void JIT::compileBinaryArithOpSlowCase(OpcodeID opcodeID, Vector<SlowCaseEntry>:
{
// We assume that subtracting TagTypeNumber is equivalent to adding DoubleEncodeOffset.
COMPILE_ASSERT(((JSImmediate::TagTypeNumber + JSImmediate::DoubleEncodeOffset) == 0), TagTypeNumber_PLUS_DoubleEncodeOffset_EQUALS_0);
-
+
Jump notImm1;
Jump notImm2;
if (op1HasImmediateIntFastCase) {
@@ -2423,7 +1160,7 @@ void JIT::emit_op_div(Instruction* currentInstruction)
movePtrToDouble(regT0, fpRegT0);
skipDoubleLoad.link(this);
}
-
+
if (isOperandConstantImmediateDouble(op2)) {
emitGetVirtualRegister(op2, regT1);
addPtr(tagTypeNumberRegister, regT1);
@@ -2499,311 +1236,9 @@ void JIT::emitSlow_op_sub(Instruction* currentInstruction, Vector<SlowCaseEntry>
compileBinaryArithOpSlowCase(op_sub, iter, result, op1, op2, types, false, false);
}
-#else // USE(JSVALUE64)
-
-/* ------------------------------ BEGIN: !USE(JSVALUE64) (OP_ADD, OP_SUB, OP_MUL) ------------------------------ */
-
-void JIT::compileBinaryArithOp(OpcodeID opcodeID, unsigned dst, unsigned src1, unsigned src2, OperandTypes types)
-{
- Structure* numberStructure = m_globalData->numberStructure.get();
- Jump wasJSNumberCell1;
- Jump wasJSNumberCell2;
-
- emitGetVirtualRegisters(src1, regT0, src2, regT1);
-
- if (types.second().isReusable() && supportsFloatingPoint()) {
- ASSERT(types.second().mightBeNumber());
-
- // Check op2 is a number
- Jump op2imm = emitJumpIfImmediateInteger(regT1);
- if (!types.second().definitelyIsNumber()) {
- emitJumpSlowCaseIfNotJSCell(regT1, src2);
- addSlowCase(checkStructure(regT1, numberStructure));
- }
-
- // (1) In this case src2 is a reusable number cell.
- // Slow case if src1 is not a number type.
- Jump op1imm = emitJumpIfImmediateInteger(regT0);
- if (!types.first().definitelyIsNumber()) {
- emitJumpSlowCaseIfNotJSCell(regT0, src1);
- addSlowCase(checkStructure(regT0, numberStructure));
- }
-
- // (1a) if we get here, src1 is also a number cell
- loadDouble(Address(regT0, OBJECT_OFFSETOF(JSNumberCell, m_value)), fpRegT0);
- Jump loadedDouble = jump();
- // (1b) if we get here, src1 is an immediate
- op1imm.link(this);
- emitFastArithImmToInt(regT0);
- convertInt32ToDouble(regT0, fpRegT0);
- // (1c)
- loadedDouble.link(this);
- if (opcodeID == op_add)
- addDouble(Address(regT1, OBJECT_OFFSETOF(JSNumberCell, m_value)), fpRegT0);
- else if (opcodeID == op_sub)
- subDouble(Address(regT1, OBJECT_OFFSETOF(JSNumberCell, m_value)), fpRegT0);
- else {
- ASSERT(opcodeID == op_mul);
- mulDouble(Address(regT1, OBJECT_OFFSETOF(JSNumberCell, m_value)), fpRegT0);
- }
-
- // Store the result to the JSNumberCell and jump.
- storeDouble(fpRegT0, Address(regT1, OBJECT_OFFSETOF(JSNumberCell, m_value)));
- move(regT1, regT0);
- emitPutVirtualRegister(dst);
- wasJSNumberCell2 = jump();
-
- // (2) This handles cases where src2 is an immediate number.
- // Two slow cases - either src1 isn't an immediate, or the subtract overflows.
- op2imm.link(this);
- emitJumpSlowCaseIfNotImmediateInteger(regT0);
- } else if (types.first().isReusable() && supportsFloatingPoint()) {
- ASSERT(types.first().mightBeNumber());
-
- // Check op1 is a number
- Jump op1imm = emitJumpIfImmediateInteger(regT0);
- if (!types.first().definitelyIsNumber()) {
- emitJumpSlowCaseIfNotJSCell(regT0, src1);
- addSlowCase(checkStructure(regT0, numberStructure));
- }
-
- // (1) In this case src1 is a reusable number cell.
- // Slow case if src2 is not a number type.
- Jump op2imm = emitJumpIfImmediateInteger(regT1);
- if (!types.second().definitelyIsNumber()) {
- emitJumpSlowCaseIfNotJSCell(regT1, src2);
- addSlowCase(checkStructure(regT1, numberStructure));
- }
-
- // (1a) if we get here, src2 is also a number cell
- loadDouble(Address(regT1, OBJECT_OFFSETOF(JSNumberCell, m_value)), fpRegT1);
- Jump loadedDouble = jump();
- // (1b) if we get here, src2 is an immediate
- op2imm.link(this);
- emitFastArithImmToInt(regT1);
- convertInt32ToDouble(regT1, fpRegT1);
- // (1c)
- loadedDouble.link(this);
- loadDouble(Address(regT0, OBJECT_OFFSETOF(JSNumberCell, m_value)), fpRegT0);
- if (opcodeID == op_add)
- addDouble(fpRegT1, fpRegT0);
- else if (opcodeID == op_sub)
- subDouble(fpRegT1, fpRegT0);
- else {
- ASSERT(opcodeID == op_mul);
- mulDouble(fpRegT1, fpRegT0);
- }
- storeDouble(fpRegT0, Address(regT0, OBJECT_OFFSETOF(JSNumberCell, m_value)));
- emitPutVirtualRegister(dst);
-
- // Store the result to the JSNumberCell and jump.
- storeDouble(fpRegT0, Address(regT0, OBJECT_OFFSETOF(JSNumberCell, m_value)));
- emitPutVirtualRegister(dst);
- wasJSNumberCell1 = jump();
-
- // (2) This handles cases where src1 is an immediate number.
- // Two slow cases - either src2 isn't an immediate, or the subtract overflows.
- op1imm.link(this);
- emitJumpSlowCaseIfNotImmediateInteger(regT1);
- } else
- emitJumpSlowCaseIfNotImmediateIntegers(regT0, regT1, regT2);
-
- if (opcodeID == op_add) {
- emitFastArithDeTagImmediate(regT0);
- addSlowCase(branchAdd32(Overflow, regT1, regT0));
- } else if (opcodeID == op_sub) {
- addSlowCase(branchSub32(Overflow, regT1, regT0));
- signExtend32ToPtr(regT0, regT0);
- emitFastArithReTagImmediate(regT0, regT0);
- } else {
- ASSERT(opcodeID == op_mul);
- // convert eax & edx from JSImmediates to ints, and check if either are zero
- emitFastArithImmToInt(regT1);
- Jump op1Zero = emitFastArithDeTagImmediateJumpIfZero(regT0);
- Jump op2NonZero = branchTest32(NonZero, regT1);
- op1Zero.link(this);
- // if either input is zero, add the two together, and check if the result is < 0.
- // If it is, we have a problem (N < 0), (N * 0) == -0, not representatble as a JSImmediate.
- move(regT0, regT2);
- addSlowCase(branchAdd32(Signed, regT1, regT2));
- // Skip the above check if neither input is zero
- op2NonZero.link(this);
- addSlowCase(branchMul32(Overflow, regT1, regT0));
- signExtend32ToPtr(regT0, regT0);
- emitFastArithReTagImmediate(regT0, regT0);
- }
- emitPutVirtualRegister(dst);
-
- if (types.second().isReusable() && supportsFloatingPoint())
- wasJSNumberCell2.link(this);
- else if (types.first().isReusable() && supportsFloatingPoint())
- wasJSNumberCell1.link(this);
-}
-
-void JIT::compileBinaryArithOpSlowCase(OpcodeID opcodeID, Vector<SlowCaseEntry>::iterator& iter, unsigned dst, unsigned src1, unsigned src2, OperandTypes types)
-{
- linkSlowCase(iter);
- if (types.second().isReusable() && supportsFloatingPoint()) {
- if (!types.first().definitelyIsNumber()) {
- linkSlowCaseIfNotJSCell(iter, src1);
- linkSlowCase(iter);
- }
- if (!types.second().definitelyIsNumber()) {
- linkSlowCaseIfNotJSCell(iter, src2);
- linkSlowCase(iter);
- }
- } else if (types.first().isReusable() && supportsFloatingPoint()) {
- if (!types.first().definitelyIsNumber()) {
- linkSlowCaseIfNotJSCell(iter, src1);
- linkSlowCase(iter);
- }
- if (!types.second().definitelyIsNumber()) {
- linkSlowCaseIfNotJSCell(iter, src2);
- linkSlowCase(iter);
- }
- }
- linkSlowCase(iter);
-
- // additional entry point to handle -0 cases.
- if (opcodeID == op_mul)
- linkSlowCase(iter);
-
- JITStubCall stubCall(this, opcodeID == op_add ? cti_op_add : opcodeID == op_sub ? cti_op_sub : cti_op_mul);
- stubCall.addArgument(src1, regT2);
- stubCall.addArgument(src2, regT2);
- stubCall.call(dst);
-}
-
-void JIT::emit_op_add(Instruction* currentInstruction)
-{
- unsigned result = currentInstruction[1].u.operand;
- unsigned op1 = currentInstruction[2].u.operand;
- unsigned op2 = currentInstruction[3].u.operand;
- OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
-
- if (!types.first().mightBeNumber() || !types.second().mightBeNumber()) {
- JITStubCall stubCall(this, cti_op_add);
- stubCall.addArgument(op1, regT2);
- stubCall.addArgument(op2, regT2);
- stubCall.call(result);
- return;
- }
-
- if (isOperandConstantImmediateInt(op1)) {
- emitGetVirtualRegister(op2, regT0);
- emitJumpSlowCaseIfNotImmediateInteger(regT0);
- addSlowCase(branchAdd32(Overflow, Imm32(getConstantOperandImmediateInt(op1) << JSImmediate::IntegerPayloadShift), regT0));
- signExtend32ToPtr(regT0, regT0);
- emitPutVirtualRegister(result);
- } else if (isOperandConstantImmediateInt(op2)) {
- emitGetVirtualRegister(op1, regT0);
- emitJumpSlowCaseIfNotImmediateInteger(regT0);
- addSlowCase(branchAdd32(Overflow, Imm32(getConstantOperandImmediateInt(op2) << JSImmediate::IntegerPayloadShift), regT0));
- signExtend32ToPtr(regT0, regT0);
- emitPutVirtualRegister(result);
- } else {
- compileBinaryArithOp(op_add, result, op1, op2, OperandTypes::fromInt(currentInstruction[4].u.operand));
- }
-}
-
-void JIT::emitSlow_op_add(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- unsigned result = currentInstruction[1].u.operand;
- unsigned op1 = currentInstruction[2].u.operand;
- unsigned op2 = currentInstruction[3].u.operand;
-
- OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
- if (!types.first().mightBeNumber() || !types.second().mightBeNumber())
- return;
-
- if (isOperandConstantImmediateInt(op1)) {
- Jump notImm = getSlowCase(iter);
- linkSlowCase(iter);
- sub32(Imm32(getConstantOperandImmediateInt(op1) << JSImmediate::IntegerPayloadShift), regT0);
- notImm.link(this);
- JITStubCall stubCall(this, cti_op_add);
- stubCall.addArgument(op1, regT2);
- stubCall.addArgument(regT0);
- stubCall.call(result);
- } else if (isOperandConstantImmediateInt(op2)) {
- Jump notImm = getSlowCase(iter);
- linkSlowCase(iter);
- sub32(Imm32(getConstantOperandImmediateInt(op2) << JSImmediate::IntegerPayloadShift), regT0);
- notImm.link(this);
- JITStubCall stubCall(this, cti_op_add);
- stubCall.addArgument(regT0);
- stubCall.addArgument(op2, regT2);
- stubCall.call(result);
- } else {
- OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
- ASSERT(types.first().mightBeNumber() && types.second().mightBeNumber());
- compileBinaryArithOpSlowCase(op_add, iter, result, op1, op2, types);
- }
-}
-
-void JIT::emit_op_mul(Instruction* currentInstruction)
-{
- unsigned result = currentInstruction[1].u.operand;
- unsigned op1 = currentInstruction[2].u.operand;
- unsigned op2 = currentInstruction[3].u.operand;
-
- // For now, only plant a fast int case if the constant operand is greater than zero.
- int32_t value;
- if (isOperandConstantImmediateInt(op1) && ((value = getConstantOperandImmediateInt(op1)) > 0)) {
- emitGetVirtualRegister(op2, regT0);
- emitJumpSlowCaseIfNotImmediateInteger(regT0);
- emitFastArithDeTagImmediate(regT0);
- addSlowCase(branchMul32(Overflow, Imm32(value), regT0, regT0));
- signExtend32ToPtr(regT0, regT0);
- emitFastArithReTagImmediate(regT0, regT0);
- emitPutVirtualRegister(result);
- } else if (isOperandConstantImmediateInt(op2) && ((value = getConstantOperandImmediateInt(op2)) > 0)) {
- emitGetVirtualRegister(op1, regT0);
- emitJumpSlowCaseIfNotImmediateInteger(regT0);
- emitFastArithDeTagImmediate(regT0);
- addSlowCase(branchMul32(Overflow, Imm32(value), regT0, regT0));
- signExtend32ToPtr(regT0, regT0);
- emitFastArithReTagImmediate(regT0, regT0);
- emitPutVirtualRegister(result);
- } else
- compileBinaryArithOp(op_mul, result, op1, op2, OperandTypes::fromInt(currentInstruction[4].u.operand));
-}
-
-void JIT::emitSlow_op_mul(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- unsigned result = currentInstruction[1].u.operand;
- unsigned op1 = currentInstruction[2].u.operand;
- unsigned op2 = currentInstruction[3].u.operand;
-
- if ((isOperandConstantImmediateInt(op1) && (getConstantOperandImmediateInt(op1) > 0))
- || (isOperandConstantImmediateInt(op2) && (getConstantOperandImmediateInt(op2) > 0))) {
- linkSlowCase(iter);
- linkSlowCase(iter);
- // There is an extra slow case for (op1 * -N) or (-N * op2), to check for 0 since this should produce a result of -0.
- JITStubCall stubCall(this, cti_op_mul);
- stubCall.addArgument(op1, regT2);
- stubCall.addArgument(op2, regT2);
- stubCall.call(result);
- } else
- compileBinaryArithOpSlowCase(op_mul, iter, result, op1, op2, OperandTypes::fromInt(currentInstruction[4].u.operand));
-}
-
-void JIT::emit_op_sub(Instruction* currentInstruction)
-{
- compileBinaryArithOp(op_sub, currentInstruction[1].u.operand, currentInstruction[2].u.operand, currentInstruction[3].u.operand, OperandTypes::fromInt(currentInstruction[4].u.operand));
-}
-
-void JIT::emitSlow_op_sub(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- compileBinaryArithOpSlowCase(op_sub, iter, currentInstruction[1].u.operand, currentInstruction[2].u.operand, currentInstruction[3].u.operand, OperandTypes::fromInt(currentInstruction[4].u.operand));
-}
-
-#endif // USE(JSVALUE64)
-
/* ------------------------------ END: OP_ADD, OP_SUB, OP_MUL ------------------------------ */
-#endif // USE(JSVALUE32_64)
-
} // namespace JSC
+#endif // USE(JSVALUE64)
#endif // ENABLE(JIT)
diff --git a/JavaScriptCore/jit/JITArithmetic32_64.cpp b/JavaScriptCore/jit/JITArithmetic32_64.cpp
new file mode 100644
index 0000000..e0b31f0
--- /dev/null
+++ b/JavaScriptCore/jit/JITArithmetic32_64.cpp
@@ -0,0 +1,1424 @@
+/*
+* Copyright (C) 2008 Apple Inc. All rights reserved.
+*
+* Redistribution and use in source and binary forms, with or without
+* modification, are permitted provided that the following conditions
+* are met:
+* 1. Redistributions of source code must retain the above copyright
+* notice, this list of conditions and the following disclaimer.
+* 2. Redistributions in binary form must reproduce the above copyright
+* notice, this list of conditions and the following disclaimer in the
+* documentation and/or other materials provided with the distribution.
+*
+* THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+
+#include "config.h"
+
+#if ENABLE(JIT)
+#if USE(JSVALUE32_64)
+#include "JIT.h"
+
+#include "CodeBlock.h"
+#include "JITInlineMethods.h"
+#include "JITStubCall.h"
+#include "JITStubs.h"
+#include "JSArray.h"
+#include "JSFunction.h"
+#include "Interpreter.h"
+#include "ResultType.h"
+#include "SamplingTool.h"
+
+#ifndef NDEBUG
+#include <stdio.h>
+#endif
+
+using namespace std;
+
+namespace JSC {
+
+void JIT::emit_op_negate(Instruction* currentInstruction)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned src = currentInstruction[2].u.operand;
+
+ emitLoad(src, regT1, regT0);
+
+ Jump srcNotInt = branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag));
+ addSlowCase(branchTest32(Zero, regT0, Imm32(0x7fffffff)));
+ neg32(regT0);
+ emitStoreInt32(dst, regT0, (dst == src));
+
+ Jump end = jump();
+
+ srcNotInt.link(this);
+ addSlowCase(branch32(Above, regT1, Imm32(JSValue::LowestTag)));
+
+ xor32(Imm32(1 << 31), regT1);
+ store32(regT1, tagFor(dst));
+ if (dst != src)
+ store32(regT0, payloadFor(dst));
+
+ end.link(this);
+}
+
+void JIT::emitSlow_op_negate(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+
+ linkSlowCase(iter); // 0x7fffffff check
+ linkSlowCase(iter); // double check
+
+ JITStubCall stubCall(this, cti_op_negate);
+ stubCall.addArgument(regT1, regT0);
+ stubCall.call(dst);
+}
+
+void JIT::emit_op_jnless(Instruction* currentInstruction)
+{
+ unsigned op1 = currentInstruction[1].u.operand;
+ unsigned op2 = currentInstruction[2].u.operand;
+ unsigned target = currentInstruction[3].u.operand;
+
+ JumpList notInt32Op1;
+ JumpList notInt32Op2;
+
+ // Character less.
+ if (isOperandConstantImmediateChar(op1)) {
+ emitLoad(op2, regT1, regT0);
+ addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::CellTag)));
+ JumpList failures;
+ emitLoadCharacterString(regT0, regT0, failures);
+ addSlowCase(failures);
+ addJump(branch32(LessThanOrEqual, regT0, Imm32(asString(getConstantOperand(op1))->tryGetValue()[0])), target);
+ return;
+ }
+ if (isOperandConstantImmediateChar(op2)) {
+ emitLoad(op1, regT1, regT0);
+ addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::CellTag)));
+ JumpList failures;
+ emitLoadCharacterString(regT0, regT0, failures);
+ addSlowCase(failures);
+ addJump(branch32(GreaterThanOrEqual, regT0, Imm32(asString(getConstantOperand(op2))->tryGetValue()[0])), target);
+ return;
+ }
+ if (isOperandConstantImmediateInt(op1)) {
+ // Int32 less.
+ emitLoad(op2, regT3, regT2);
+ notInt32Op2.append(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag)));
+ addJump(branch32(LessThanOrEqual, regT2, Imm32(getConstantOperand(op1).asInt32())), target);
+ } else if (isOperandConstantImmediateInt(op2)) {
+ emitLoad(op1, regT1, regT0);
+ notInt32Op1.append(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
+ addJump(branch32(GreaterThanOrEqual, regT0, Imm32(getConstantOperand(op2).asInt32())), target);
+ } else {
+ emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
+ notInt32Op1.append(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
+ notInt32Op2.append(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag)));
+ addJump(branch32(GreaterThanOrEqual, regT0, regT2), target);
+ }
+
+ if (!supportsFloatingPoint()) {
+ addSlowCase(notInt32Op1);
+ addSlowCase(notInt32Op2);
+ return;
+ }
+ Jump end = jump();
+
+ // Double less.
+ emitBinaryDoubleOp(op_jnless, target, op1, op2, OperandTypes(), notInt32Op1, notInt32Op2, !isOperandConstantImmediateInt(op1), isOperandConstantImmediateInt(op1) || !isOperandConstantImmediateInt(op2));
+ end.link(this);
+}
+
+void JIT::emitSlow_op_jnless(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned op1 = currentInstruction[1].u.operand;
+ unsigned op2 = currentInstruction[2].u.operand;
+ unsigned target = currentInstruction[3].u.operand;
+
+ if (isOperandConstantImmediateChar(op1) || isOperandConstantImmediateChar(op2)) {
+ linkSlowCase(iter);
+ linkSlowCase(iter);
+ linkSlowCase(iter);
+ linkSlowCase(iter);
+ } else {
+ if (!supportsFloatingPoint()) {
+ if (!isOperandConstantImmediateInt(op1) && !isOperandConstantImmediateInt(op2))
+ linkSlowCase(iter); // int32 check
+ linkSlowCase(iter); // int32 check
+ } else {
+ if (!isOperandConstantImmediateInt(op1)) {
+ linkSlowCase(iter); // double check
+ linkSlowCase(iter); // int32 check
+ }
+ if (isOperandConstantImmediateInt(op1) || !isOperandConstantImmediateInt(op2))
+ linkSlowCase(iter); // double check
+ }
+ }
+
+ JITStubCall stubCall(this, cti_op_jless);
+ stubCall.addArgument(op1);
+ stubCall.addArgument(op2);
+ stubCall.call();
+ emitJumpSlowToHot(branchTest32(Zero, regT0), target);
+}
+
+void JIT::emit_op_jless(Instruction* currentInstruction)
+{
+ unsigned op1 = currentInstruction[1].u.operand;
+ unsigned op2 = currentInstruction[2].u.operand;
+ unsigned target = currentInstruction[3].u.operand;
+
+ JumpList notInt32Op1;
+ JumpList notInt32Op2;
+
+ // Character less.
+ if (isOperandConstantImmediateChar(op1)) {
+ emitLoad(op2, regT1, regT0);
+ addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::CellTag)));
+ JumpList failures;
+ emitLoadCharacterString(regT0, regT0, failures);
+ addSlowCase(failures);
+ addJump(branch32(GreaterThan, regT0, Imm32(asString(getConstantOperand(op1))->tryGetValue()[0])), target);
+ return;
+ }
+ if (isOperandConstantImmediateChar(op2)) {
+ emitLoad(op1, regT1, regT0);
+ addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::CellTag)));
+ JumpList failures;
+ emitLoadCharacterString(regT0, regT0, failures);
+ addSlowCase(failures);
+ addJump(branch32(LessThan, regT0, Imm32(asString(getConstantOperand(op2))->tryGetValue()[0])), target);
+ return;
+ }
+ if (isOperandConstantImmediateInt(op1)) {
+ emitLoad(op2, regT3, regT2);
+ notInt32Op2.append(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag)));
+ addJump(branch32(GreaterThan, regT2, Imm32(getConstantOperand(op1).asInt32())), target);
+ } else if (isOperandConstantImmediateInt(op2)) {
+ emitLoad(op1, regT1, regT0);
+ notInt32Op1.append(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
+ addJump(branch32(LessThan, regT0, Imm32(getConstantOperand(op2).asInt32())), target);
+ } else {
+ emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
+ notInt32Op1.append(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
+ notInt32Op2.append(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag)));
+ addJump(branch32(LessThan, regT0, regT2), target);
+ }
+
+ if (!supportsFloatingPoint()) {
+ addSlowCase(notInt32Op1);
+ addSlowCase(notInt32Op2);
+ return;
+ }
+ Jump end = jump();
+
+ // Double less.
+ emitBinaryDoubleOp(op_jless, target, op1, op2, OperandTypes(), notInt32Op1, notInt32Op2, !isOperandConstantImmediateInt(op1), isOperandConstantImmediateInt(op1) || !isOperandConstantImmediateInt(op2));
+ end.link(this);
+}
+
+void JIT::emitSlow_op_jless(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned op1 = currentInstruction[1].u.operand;
+ unsigned op2 = currentInstruction[2].u.operand;
+ unsigned target = currentInstruction[3].u.operand;
+
+ if (isOperandConstantImmediateChar(op1) || isOperandConstantImmediateChar(op2)) {
+ linkSlowCase(iter);
+ linkSlowCase(iter);
+ linkSlowCase(iter);
+ linkSlowCase(iter);
+ } else {
+ if (!supportsFloatingPoint()) {
+ if (!isOperandConstantImmediateInt(op1) && !isOperandConstantImmediateInt(op2))
+ linkSlowCase(iter); // int32 check
+ linkSlowCase(iter); // int32 check
+ } else {
+ if (!isOperandConstantImmediateInt(op1)) {
+ linkSlowCase(iter); // double check
+ linkSlowCase(iter); // int32 check
+ }
+ if (isOperandConstantImmediateInt(op1) || !isOperandConstantImmediateInt(op2))
+ linkSlowCase(iter); // double check
+ }
+ }
+ JITStubCall stubCall(this, cti_op_jless);
+ stubCall.addArgument(op1);
+ stubCall.addArgument(op2);
+ stubCall.call();
+ emitJumpSlowToHot(branchTest32(NonZero, regT0), target);
+}
+
+void JIT::emit_op_jlesseq(Instruction* currentInstruction, bool invert)
+{
+ unsigned op1 = currentInstruction[1].u.operand;
+ unsigned op2 = currentInstruction[2].u.operand;
+ unsigned target = currentInstruction[3].u.operand;
+
+ JumpList notInt32Op1;
+ JumpList notInt32Op2;
+
+ // Character less.
+ if (isOperandConstantImmediateChar(op1)) {
+ emitLoad(op2, regT1, regT0);
+ addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::CellTag)));
+ JumpList failures;
+ emitLoadCharacterString(regT0, regT0, failures);
+ addSlowCase(failures);
+ addJump(branch32(invert ? LessThan : GreaterThanOrEqual, regT0, Imm32(asString(getConstantOperand(op1))->tryGetValue()[0])), target);
+ return;
+ }
+ if (isOperandConstantImmediateChar(op2)) {
+ emitLoad(op1, regT1, regT0);
+ addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::CellTag)));
+ JumpList failures;
+ emitLoadCharacterString(regT0, regT0, failures);
+ addSlowCase(failures);
+ addJump(branch32(invert ? GreaterThan : LessThanOrEqual, regT0, Imm32(asString(getConstantOperand(op2))->tryGetValue()[0])), target);
+ return;
+ }
+ if (isOperandConstantImmediateInt(op1)) {
+ emitLoad(op2, regT3, regT2);
+ notInt32Op2.append(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag)));
+ addJump(branch32(invert ? LessThan : GreaterThanOrEqual, regT2, Imm32(getConstantOperand(op1).asInt32())), target);
+ } else if (isOperandConstantImmediateInt(op2)) {
+ emitLoad(op1, regT1, regT0);
+ notInt32Op1.append(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
+ addJump(branch32(invert ? GreaterThan : LessThanOrEqual, regT0, Imm32(getConstantOperand(op2).asInt32())), target);
+ } else {
+ emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
+ notInt32Op1.append(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
+ notInt32Op2.append(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag)));
+ addJump(branch32(invert ? GreaterThan : LessThanOrEqual, regT0, regT2), target);
+ }
+
+ if (!supportsFloatingPoint()) {
+ addSlowCase(notInt32Op1);
+ addSlowCase(notInt32Op2);
+ return;
+ }
+ Jump end = jump();
+
+ // Double less.
+ emitBinaryDoubleOp(invert ? op_jnlesseq : op_jlesseq, target, op1, op2, OperandTypes(), notInt32Op1, notInt32Op2, !isOperandConstantImmediateInt(op1), isOperandConstantImmediateInt(op1) || !isOperandConstantImmediateInt(op2));
+ end.link(this);
+}
+
+void JIT::emitSlow_op_jlesseq(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter, bool invert)
+{
+ unsigned op1 = currentInstruction[1].u.operand;
+ unsigned op2 = currentInstruction[2].u.operand;
+ unsigned target = currentInstruction[3].u.operand;
+
+ if (isOperandConstantImmediateChar(op1) || isOperandConstantImmediateChar(op2)) {
+ linkSlowCase(iter);
+ linkSlowCase(iter);
+ linkSlowCase(iter);
+ linkSlowCase(iter);
+ } else {
+ if (!supportsFloatingPoint()) {
+ if (!isOperandConstantImmediateInt(op1) && !isOperandConstantImmediateInt(op2))
+ linkSlowCase(iter); // int32 check
+ linkSlowCase(iter); // int32 check
+ } else {
+ if (!isOperandConstantImmediateInt(op1)) {
+ linkSlowCase(iter); // double check
+ linkSlowCase(iter); // int32 check
+ }
+ if (isOperandConstantImmediateInt(op1) || !isOperandConstantImmediateInt(op2))
+ linkSlowCase(iter); // double check
+ }
+ }
+
+ JITStubCall stubCall(this, cti_op_jlesseq);
+ stubCall.addArgument(op1);
+ stubCall.addArgument(op2);
+ stubCall.call();
+ emitJumpSlowToHot(branchTest32(invert ? Zero : NonZero, regT0), target);
+}
+
+void JIT::emit_op_jnlesseq(Instruction* currentInstruction)
+{
+ emit_op_jlesseq(currentInstruction, true);
+}
+
+void JIT::emitSlow_op_jnlesseq(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ emitSlow_op_jlesseq(currentInstruction, iter, true);
+}
+
+// LeftShift (<<)
+
+void JIT::emit_op_lshift(Instruction* currentInstruction)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned op1 = currentInstruction[2].u.operand;
+ unsigned op2 = currentInstruction[3].u.operand;
+
+ if (isOperandConstantImmediateInt(op2)) {
+ emitLoad(op1, regT1, regT0);
+ addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
+ lshift32(Imm32(getConstantOperand(op2).asInt32()), regT0);
+ emitStoreInt32(dst, regT0, dst == op1);
+ return;
+ }
+
+ emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
+ if (!isOperandConstantImmediateInt(op1))
+ addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
+ addSlowCase(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag)));
+ lshift32(regT2, regT0);
+ emitStoreInt32(dst, regT0, dst == op1 || dst == op2);
+}
+
+void JIT::emitSlow_op_lshift(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned op1 = currentInstruction[2].u.operand;
+ unsigned op2 = currentInstruction[3].u.operand;
+
+ if (!isOperandConstantImmediateInt(op1) && !isOperandConstantImmediateInt(op2))
+ linkSlowCase(iter); // int32 check
+ linkSlowCase(iter); // int32 check
+
+ JITStubCall stubCall(this, cti_op_lshift);
+ stubCall.addArgument(op1);
+ stubCall.addArgument(op2);
+ stubCall.call(dst);
+}
+
+// RightShift (>>) and UnsignedRightShift (>>>) helper
+
+void JIT::emitRightShift(Instruction* currentInstruction, bool isUnsigned)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned op1 = currentInstruction[2].u.operand;
+ unsigned op2 = currentInstruction[3].u.operand;
+
+ // Slow case of rshift makes assumptions about what registers hold the
+ // shift arguments, so any changes must be updated there as well.
+ if (isOperandConstantImmediateInt(op2)) {
+ emitLoad(op1, regT1, regT0);
+ addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
+ int shift = getConstantOperand(op2).asInt32();
+ if (isUnsigned) {
+ if (shift)
+ urshift32(Imm32(shift & 0x1f), regT0);
+ // unsigned shift < 0 or shift = k*2^32 may result in (essentially)
+ // a toUint conversion, which can result in a value we can represent
+ // as an immediate int.
+ if (shift < 0 || !(shift & 31))
+ addSlowCase(branch32(LessThan, regT0, Imm32(0)));
+ } else if (shift) { // signed right shift by zero is simply toInt conversion
+ rshift32(Imm32(shift & 0x1f), regT0);
+ }
+ emitStoreInt32(dst, regT0, dst == op1);
+ return;
+ }
+
+ emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
+ if (!isOperandConstantImmediateInt(op1))
+ addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
+ addSlowCase(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag)));
+ if (isUnsigned) {
+ urshift32(regT2, regT0);
+ addSlowCase(branch32(LessThan, regT0, Imm32(0)));
+ } else
+ rshift32(regT2, regT0);
+ emitStoreInt32(dst, regT0, dst == op1 || dst == op2);
+}
+
+void JIT::emitRightShiftSlowCase(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter, bool isUnsigned)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned op1 = currentInstruction[2].u.operand;
+ unsigned op2 = currentInstruction[3].u.operand;
+ if (isOperandConstantImmediateInt(op2)) {
+ int shift = getConstantOperand(op2).asInt32();
+ // op1 = regT1:regT0
+ linkSlowCase(iter); // int32 check
+ if (supportsFloatingPointTruncate()) {
+ JumpList failures;
+ failures.append(branch32(AboveOrEqual, regT1, Imm32(JSValue::LowestTag)));
+ emitLoadDouble(op1, fpRegT0);
+ failures.append(branchTruncateDoubleToInt32(fpRegT0, regT0));
+ if (isUnsigned) {
+ if (shift)
+ urshift32(Imm32(shift & 0x1f), regT0);
+ if (shift < 0 || !(shift & 31))
+ failures.append(branch32(LessThan, regT0, Imm32(0)));
+ } else if (shift)
+ rshift32(Imm32(shift & 0x1f), regT0);
+ emitStoreInt32(dst, regT0, false);
+ emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_rshift));
+ failures.link(this);
+ }
+ if (isUnsigned && (shift < 0 || !(shift & 31)))
+ linkSlowCase(iter); // failed to box in hot path
+ } else {
+ // op1 = regT1:regT0
+ // op2 = regT3:regT2
+ if (!isOperandConstantImmediateInt(op1)) {
+ linkSlowCase(iter); // int32 check -- op1 is not an int
+ if (supportsFloatingPointTruncate()) {
+ Jump notDouble = branch32(Above, regT1, Imm32(JSValue::LowestTag)); // op1 is not a double
+ emitLoadDouble(op1, fpRegT0);
+ Jump notInt = branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag)); // op2 is not an int
+ Jump cantTruncate = branchTruncateDoubleToInt32(fpRegT0, regT0);
+ if (isUnsigned)
+ urshift32(regT2, regT0);
+ else
+ rshift32(regT2, regT0);
+ emitStoreInt32(dst, regT0, false);
+ emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_rshift));
+ notDouble.link(this);
+ notInt.link(this);
+ cantTruncate.link(this);
+ }
+ }
+
+ linkSlowCase(iter); // int32 check - op2 is not an int
+ if (isUnsigned)
+ linkSlowCase(iter); // Can't represent unsigned result as an immediate
+ }
+
+ JITStubCall stubCall(this, isUnsigned ? cti_op_urshift : cti_op_rshift);
+ stubCall.addArgument(op1);
+ stubCall.addArgument(op2);
+ stubCall.call(dst);
+}
+
+// RightShift (>>)
+
+void JIT::emit_op_rshift(Instruction* currentInstruction)
+{
+ emitRightShift(currentInstruction, false);
+}
+
+void JIT::emitSlow_op_rshift(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ emitRightShiftSlowCase(currentInstruction, iter, false);
+}
+
+// UnsignedRightShift (>>>)
+
+void JIT::emit_op_urshift(Instruction* currentInstruction)
+{
+ emitRightShift(currentInstruction, true);
+}
+
+void JIT::emitSlow_op_urshift(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ emitRightShiftSlowCase(currentInstruction, iter, true);
+}
+
+// BitAnd (&)
+
+void JIT::emit_op_bitand(Instruction* currentInstruction)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned op1 = currentInstruction[2].u.operand;
+ unsigned op2 = currentInstruction[3].u.operand;
+
+ unsigned op;
+ int32_t constant;
+ if (getOperandConstantImmediateInt(op1, op2, op, constant)) {
+ emitLoad(op, regT1, regT0);
+ addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
+ and32(Imm32(constant), regT0);
+ emitStoreInt32(dst, regT0, (op == dst));
+ return;
+ }
+
+ emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
+ addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
+ addSlowCase(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag)));
+ and32(regT2, regT0);
+ emitStoreInt32(dst, regT0, (op1 == dst || op2 == dst));
+}
+
+void JIT::emitSlow_op_bitand(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned op1 = currentInstruction[2].u.operand;
+ unsigned op2 = currentInstruction[3].u.operand;
+
+ if (!isOperandConstantImmediateInt(op1) && !isOperandConstantImmediateInt(op2))
+ linkSlowCase(iter); // int32 check
+ linkSlowCase(iter); // int32 check
+
+ JITStubCall stubCall(this, cti_op_bitand);
+ stubCall.addArgument(op1);
+ stubCall.addArgument(op2);
+ stubCall.call(dst);
+}
+
+// BitOr (|)
+
+void JIT::emit_op_bitor(Instruction* currentInstruction)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned op1 = currentInstruction[2].u.operand;
+ unsigned op2 = currentInstruction[3].u.operand;
+
+ unsigned op;
+ int32_t constant;
+ if (getOperandConstantImmediateInt(op1, op2, op, constant)) {
+ emitLoad(op, regT1, regT0);
+ addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
+ or32(Imm32(constant), regT0);
+ emitStoreInt32(dst, regT0, (op == dst));
+ return;
+ }
+
+ emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
+ addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
+ addSlowCase(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag)));
+ or32(regT2, regT0);
+ emitStoreInt32(dst, regT0, (op1 == dst || op2 == dst));
+}
+
+void JIT::emitSlow_op_bitor(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned op1 = currentInstruction[2].u.operand;
+ unsigned op2 = currentInstruction[3].u.operand;
+
+ if (!isOperandConstantImmediateInt(op1) && !isOperandConstantImmediateInt(op2))
+ linkSlowCase(iter); // int32 check
+ linkSlowCase(iter); // int32 check
+
+ JITStubCall stubCall(this, cti_op_bitor);
+ stubCall.addArgument(op1);
+ stubCall.addArgument(op2);
+ stubCall.call(dst);
+}
+
+// BitXor (^)
+
+void JIT::emit_op_bitxor(Instruction* currentInstruction)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned op1 = currentInstruction[2].u.operand;
+ unsigned op2 = currentInstruction[3].u.operand;
+
+ unsigned op;
+ int32_t constant;
+ if (getOperandConstantImmediateInt(op1, op2, op, constant)) {
+ emitLoad(op, regT1, regT0);
+ addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
+ xor32(Imm32(constant), regT0);
+ emitStoreInt32(dst, regT0, (op == dst));
+ return;
+ }
+
+ emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
+ addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
+ addSlowCase(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag)));
+ xor32(regT2, regT0);
+ emitStoreInt32(dst, regT0, (op1 == dst || op2 == dst));
+}
+
+void JIT::emitSlow_op_bitxor(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned op1 = currentInstruction[2].u.operand;
+ unsigned op2 = currentInstruction[3].u.operand;
+
+ if (!isOperandConstantImmediateInt(op1) && !isOperandConstantImmediateInt(op2))
+ linkSlowCase(iter); // int32 check
+ linkSlowCase(iter); // int32 check
+
+ JITStubCall stubCall(this, cti_op_bitxor);
+ stubCall.addArgument(op1);
+ stubCall.addArgument(op2);
+ stubCall.call(dst);
+}
+
+// BitNot (~)
+
+void JIT::emit_op_bitnot(Instruction* currentInstruction)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned src = currentInstruction[2].u.operand;
+
+ emitLoad(src, regT1, regT0);
+ addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
+
+ not32(regT0);
+ emitStoreInt32(dst, regT0, (dst == src));
+}
+
+void JIT::emitSlow_op_bitnot(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+
+ linkSlowCase(iter); // int32 check
+
+ JITStubCall stubCall(this, cti_op_bitnot);
+ stubCall.addArgument(regT1, regT0);
+ stubCall.call(dst);
+}
+
+// PostInc (i++)
+
+void JIT::emit_op_post_inc(Instruction* currentInstruction)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned srcDst = currentInstruction[2].u.operand;
+
+ emitLoad(srcDst, regT1, regT0);
+ addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
+
+ if (dst == srcDst) // x = x++ is a noop for ints.
+ return;
+
+ emitStoreInt32(dst, regT0);
+
+ addSlowCase(branchAdd32(Overflow, Imm32(1), regT0));
+ emitStoreInt32(srcDst, regT0, true);
+}
+
+void JIT::emitSlow_op_post_inc(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned srcDst = currentInstruction[2].u.operand;
+
+ linkSlowCase(iter); // int32 check
+ if (dst != srcDst)
+ linkSlowCase(iter); // overflow check
+
+ JITStubCall stubCall(this, cti_op_post_inc);
+ stubCall.addArgument(srcDst);
+ stubCall.addArgument(Imm32(srcDst));
+ stubCall.call(dst);
+}
+
+// PostDec (i--)
+
+void JIT::emit_op_post_dec(Instruction* currentInstruction)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned srcDst = currentInstruction[2].u.operand;
+
+ emitLoad(srcDst, regT1, regT0);
+ addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
+
+ if (dst == srcDst) // x = x-- is a noop for ints.
+ return;
+
+ emitStoreInt32(dst, regT0);
+
+ addSlowCase(branchSub32(Overflow, Imm32(1), regT0));
+ emitStoreInt32(srcDst, regT0, true);
+}
+
+void JIT::emitSlow_op_post_dec(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned srcDst = currentInstruction[2].u.operand;
+
+ linkSlowCase(iter); // int32 check
+ if (dst != srcDst)
+ linkSlowCase(iter); // overflow check
+
+ JITStubCall stubCall(this, cti_op_post_dec);
+ stubCall.addArgument(srcDst);
+ stubCall.addArgument(Imm32(srcDst));
+ stubCall.call(dst);
+}
+
+// PreInc (++i)
+
+void JIT::emit_op_pre_inc(Instruction* currentInstruction)
+{
+ unsigned srcDst = currentInstruction[1].u.operand;
+
+ emitLoad(srcDst, regT1, regT0);
+
+ addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
+ addSlowCase(branchAdd32(Overflow, Imm32(1), regT0));
+ emitStoreInt32(srcDst, regT0, true);
+}
+
+void JIT::emitSlow_op_pre_inc(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned srcDst = currentInstruction[1].u.operand;
+
+ linkSlowCase(iter); // int32 check
+ linkSlowCase(iter); // overflow check
+
+ JITStubCall stubCall(this, cti_op_pre_inc);
+ stubCall.addArgument(srcDst);
+ stubCall.call(srcDst);
+}
+
+// PreDec (--i)
+
+void JIT::emit_op_pre_dec(Instruction* currentInstruction)
+{
+ unsigned srcDst = currentInstruction[1].u.operand;
+
+ emitLoad(srcDst, regT1, regT0);
+
+ addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
+ addSlowCase(branchSub32(Overflow, Imm32(1), regT0));
+ emitStoreInt32(srcDst, regT0, true);
+}
+
+void JIT::emitSlow_op_pre_dec(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned srcDst = currentInstruction[1].u.operand;
+
+ linkSlowCase(iter); // int32 check
+ linkSlowCase(iter); // overflow check
+
+ JITStubCall stubCall(this, cti_op_pre_dec);
+ stubCall.addArgument(srcDst);
+ stubCall.call(srcDst);
+}
+
+// Addition (+)
+
+void JIT::emit_op_add(Instruction* currentInstruction)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned op1 = currentInstruction[2].u.operand;
+ unsigned op2 = currentInstruction[3].u.operand;
+ OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
+
+ if (!types.first().mightBeNumber() || !types.second().mightBeNumber()) {
+ JITStubCall stubCall(this, cti_op_add);
+ stubCall.addArgument(op1);
+ stubCall.addArgument(op2);
+ stubCall.call(dst);
+ return;
+ }
+
+ JumpList notInt32Op1;
+ JumpList notInt32Op2;
+
+ unsigned op;
+ int32_t constant;
+ if (getOperandConstantImmediateInt(op1, op2, op, constant)) {
+ emitAdd32Constant(dst, op, constant, op == op1 ? types.first() : types.second());
+ return;
+ }
+
+ emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
+ notInt32Op1.append(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
+ notInt32Op2.append(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag)));
+
+ // Int32 case.
+ addSlowCase(branchAdd32(Overflow, regT2, regT0));
+ emitStoreInt32(dst, regT0, (op1 == dst || op2 == dst));
+
+ if (!supportsFloatingPoint()) {
+ addSlowCase(notInt32Op1);
+ addSlowCase(notInt32Op2);
+ return;
+ }
+ Jump end = jump();
+
+ // Double case.
+ emitBinaryDoubleOp(op_add, dst, op1, op2, types, notInt32Op1, notInt32Op2);
+ end.link(this);
+}
+
+void JIT::emitAdd32Constant(unsigned dst, unsigned op, int32_t constant, ResultType opType)
+{
+ // Int32 case.
+ emitLoad(op, regT1, regT0);
+ Jump notInt32 = branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag));
+ addSlowCase(branchAdd32(Overflow, Imm32(constant), regT0));
+ emitStoreInt32(dst, regT0, (op == dst));
+
+ // Double case.
+ if (!supportsFloatingPoint()) {
+ addSlowCase(notInt32);
+ return;
+ }
+ Jump end = jump();
+
+ notInt32.link(this);
+ if (!opType.definitelyIsNumber())
+ addSlowCase(branch32(Above, regT1, Imm32(JSValue::LowestTag)));
+ move(Imm32(constant), regT2);
+ convertInt32ToDouble(regT2, fpRegT0);
+ emitLoadDouble(op, fpRegT1);
+ addDouble(fpRegT1, fpRegT0);
+ emitStoreDouble(dst, fpRegT0);
+
+ end.link(this);
+}
+
+void JIT::emitSlow_op_add(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned op1 = currentInstruction[2].u.operand;
+ unsigned op2 = currentInstruction[3].u.operand;
+ OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
+
+ if (!types.first().mightBeNumber() || !types.second().mightBeNumber())
+ return;
+
+ unsigned op;
+ int32_t constant;
+ if (getOperandConstantImmediateInt(op1, op2, op, constant)) {
+ linkSlowCase(iter); // overflow check
+
+ if (!supportsFloatingPoint())
+ linkSlowCase(iter); // non-sse case
+ else {
+ ResultType opType = op == op1 ? types.first() : types.second();
+ if (!opType.definitelyIsNumber())
+ linkSlowCase(iter); // double check
+ }
+ } else {
+ linkSlowCase(iter); // overflow check
+
+ if (!supportsFloatingPoint()) {
+ linkSlowCase(iter); // int32 check
+ linkSlowCase(iter); // int32 check
+ } else {
+ if (!types.first().definitelyIsNumber())
+ linkSlowCase(iter); // double check
+
+ if (!types.second().definitelyIsNumber()) {
+ linkSlowCase(iter); // int32 check
+ linkSlowCase(iter); // double check
+ }
+ }
+ }
+
+ JITStubCall stubCall(this, cti_op_add);
+ stubCall.addArgument(op1);
+ stubCall.addArgument(op2);
+ stubCall.call(dst);
+}
+
+// Subtraction (-)
+
+void JIT::emit_op_sub(Instruction* currentInstruction)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned op1 = currentInstruction[2].u.operand;
+ unsigned op2 = currentInstruction[3].u.operand;
+ OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
+
+ JumpList notInt32Op1;
+ JumpList notInt32Op2;
+
+ if (isOperandConstantImmediateInt(op2)) {
+ emitSub32Constant(dst, op1, getConstantOperand(op2).asInt32(), types.first());
+ return;
+ }
+
+ emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
+ notInt32Op1.append(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
+ notInt32Op2.append(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag)));
+
+ // Int32 case.
+ addSlowCase(branchSub32(Overflow, regT2, regT0));
+ emitStoreInt32(dst, regT0, (op1 == dst || op2 == dst));
+
+ if (!supportsFloatingPoint()) {
+ addSlowCase(notInt32Op1);
+ addSlowCase(notInt32Op2);
+ return;
+ }
+ Jump end = jump();
+
+ // Double case.
+ emitBinaryDoubleOp(op_sub, dst, op1, op2, types, notInt32Op1, notInt32Op2);
+ end.link(this);
+}
+
+void JIT::emitSub32Constant(unsigned dst, unsigned op, int32_t constant, ResultType opType)
+{
+ // Int32 case.
+ emitLoad(op, regT1, regT0);
+ Jump notInt32 = branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag));
+ addSlowCase(branchSub32(Overflow, Imm32(constant), regT0));
+ emitStoreInt32(dst, regT0, (op == dst));
+
+ // Double case.
+ if (!supportsFloatingPoint()) {
+ addSlowCase(notInt32);
+ return;
+ }
+ Jump end = jump();
+
+ notInt32.link(this);
+ if (!opType.definitelyIsNumber())
+ addSlowCase(branch32(Above, regT1, Imm32(JSValue::LowestTag)));
+ move(Imm32(constant), regT2);
+ convertInt32ToDouble(regT2, fpRegT0);
+ emitLoadDouble(op, fpRegT1);
+ subDouble(fpRegT0, fpRegT1);
+ emitStoreDouble(dst, fpRegT1);
+
+ end.link(this);
+}
+
+void JIT::emitSlow_op_sub(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned op1 = currentInstruction[2].u.operand;
+ unsigned op2 = currentInstruction[3].u.operand;
+ OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
+
+ if (isOperandConstantImmediateInt(op2)) {
+ linkSlowCase(iter); // overflow check
+
+ if (!supportsFloatingPoint() || !types.first().definitelyIsNumber())
+ linkSlowCase(iter); // int32 or double check
+ } else {
+ linkSlowCase(iter); // overflow check
+
+ if (!supportsFloatingPoint()) {
+ linkSlowCase(iter); // int32 check
+ linkSlowCase(iter); // int32 check
+ } else {
+ if (!types.first().definitelyIsNumber())
+ linkSlowCase(iter); // double check
+
+ if (!types.second().definitelyIsNumber()) {
+ linkSlowCase(iter); // int32 check
+ linkSlowCase(iter); // double check
+ }
+ }
+ }
+
+ JITStubCall stubCall(this, cti_op_sub);
+ stubCall.addArgument(op1);
+ stubCall.addArgument(op2);
+ stubCall.call(dst);
+}
+
+void JIT::emitBinaryDoubleOp(OpcodeID opcodeID, unsigned dst, unsigned op1, unsigned op2, OperandTypes types, JumpList& notInt32Op1, JumpList& notInt32Op2, bool op1IsInRegisters, bool op2IsInRegisters)
+{
+ JumpList end;
+
+ if (!notInt32Op1.empty()) {
+ // Double case 1: Op1 is not int32; Op2 is unknown.
+ notInt32Op1.link(this);
+
+ ASSERT(op1IsInRegisters);
+
+ // Verify Op1 is double.
+ if (!types.first().definitelyIsNumber())
+ addSlowCase(branch32(Above, regT1, Imm32(JSValue::LowestTag)));
+
+ if (!op2IsInRegisters)
+ emitLoad(op2, regT3, regT2);
+
+ Jump doubleOp2 = branch32(Below, regT3, Imm32(JSValue::LowestTag));
+
+ if (!types.second().definitelyIsNumber())
+ addSlowCase(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag)));
+
+ convertInt32ToDouble(regT2, fpRegT0);
+ Jump doTheMath = jump();
+
+ // Load Op2 as double into double register.
+ doubleOp2.link(this);
+ emitLoadDouble(op2, fpRegT0);
+
+ // Do the math.
+ doTheMath.link(this);
+ switch (opcodeID) {
+ case op_mul:
+ emitLoadDouble(op1, fpRegT2);
+ mulDouble(fpRegT2, fpRegT0);
+ emitStoreDouble(dst, fpRegT0);
+ break;
+ case op_add:
+ emitLoadDouble(op1, fpRegT2);
+ addDouble(fpRegT2, fpRegT0);
+ emitStoreDouble(dst, fpRegT0);
+ break;
+ case op_sub:
+ emitLoadDouble(op1, fpRegT1);
+ subDouble(fpRegT0, fpRegT1);
+ emitStoreDouble(dst, fpRegT1);
+ break;
+ case op_div:
+ emitLoadDouble(op1, fpRegT1);
+ divDouble(fpRegT0, fpRegT1);
+ emitStoreDouble(dst, fpRegT1);
+ break;
+ case op_jnless:
+ emitLoadDouble(op1, fpRegT2);
+ addJump(branchDouble(DoubleLessThanOrEqualOrUnordered, fpRegT0, fpRegT2), dst);
+ break;
+ case op_jless:
+ emitLoadDouble(op1, fpRegT2);
+ addJump(branchDouble(DoubleLessThan, fpRegT2, fpRegT0), dst);
+ break;
+ case op_jlesseq:
+ emitLoadDouble(op1, fpRegT2);
+ addJump(branchDouble(DoubleLessThanOrEqual, fpRegT2, fpRegT0), dst);
+ break;
+ case op_jnlesseq:
+ emitLoadDouble(op1, fpRegT2);
+ addJump(branchDouble(DoubleLessThanOrUnordered, fpRegT0, fpRegT2), dst);
+ break;
+ default:
+ ASSERT_NOT_REACHED();
+ }
+
+ if (!notInt32Op2.empty())
+ end.append(jump());
+ }
+
+ if (!notInt32Op2.empty()) {
+ // Double case 2: Op1 is int32; Op2 is not int32.
+ notInt32Op2.link(this);
+
+ ASSERT(op2IsInRegisters);
+
+ if (!op1IsInRegisters)
+ emitLoadPayload(op1, regT0);
+
+ convertInt32ToDouble(regT0, fpRegT0);
+
+ // Verify op2 is double.
+ if (!types.second().definitelyIsNumber())
+ addSlowCase(branch32(Above, regT3, Imm32(JSValue::LowestTag)));
+
+ // Do the math.
+ switch (opcodeID) {
+ case op_mul:
+ emitLoadDouble(op2, fpRegT2);
+ mulDouble(fpRegT2, fpRegT0);
+ emitStoreDouble(dst, fpRegT0);
+ break;
+ case op_add:
+ emitLoadDouble(op2, fpRegT2);
+ addDouble(fpRegT2, fpRegT0);
+ emitStoreDouble(dst, fpRegT0);
+ break;
+ case op_sub:
+ emitLoadDouble(op2, fpRegT2);
+ subDouble(fpRegT2, fpRegT0);
+ emitStoreDouble(dst, fpRegT0);
+ break;
+ case op_div:
+ emitLoadDouble(op2, fpRegT2);
+ divDouble(fpRegT2, fpRegT0);
+ emitStoreDouble(dst, fpRegT0);
+ break;
+ case op_jnless:
+ emitLoadDouble(op2, fpRegT1);
+ addJump(branchDouble(DoubleLessThanOrEqualOrUnordered, fpRegT1, fpRegT0), dst);
+ break;
+ case op_jless:
+ emitLoadDouble(op2, fpRegT1);
+ addJump(branchDouble(DoubleLessThan, fpRegT0, fpRegT1), dst);
+ break;
+ case op_jnlesseq:
+ emitLoadDouble(op2, fpRegT1);
+ addJump(branchDouble(DoubleLessThanOrUnordered, fpRegT1, fpRegT0), dst);
+ break;
+ case op_jlesseq:
+ emitLoadDouble(op2, fpRegT1);
+ addJump(branchDouble(DoubleLessThanOrEqual, fpRegT0, fpRegT1), dst);
+ break;
+ default:
+ ASSERT_NOT_REACHED();
+ }
+ }
+
+ end.link(this);
+}
+
+// Multiplication (*)
+
+void JIT::emit_op_mul(Instruction* currentInstruction)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned op1 = currentInstruction[2].u.operand;
+ unsigned op2 = currentInstruction[3].u.operand;
+ OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
+
+ JumpList notInt32Op1;
+ JumpList notInt32Op2;
+
+ emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
+ notInt32Op1.append(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
+ notInt32Op2.append(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag)));
+
+ // Int32 case.
+ move(regT0, regT3);
+ addSlowCase(branchMul32(Overflow, regT2, regT0));
+ addSlowCase(branchTest32(Zero, regT0));
+ emitStoreInt32(dst, regT0, (op1 == dst || op2 == dst));
+
+ if (!supportsFloatingPoint()) {
+ addSlowCase(notInt32Op1);
+ addSlowCase(notInt32Op2);
+ return;
+ }
+ Jump end = jump();
+
+ // Double case.
+ emitBinaryDoubleOp(op_mul, dst, op1, op2, types, notInt32Op1, notInt32Op2);
+ end.link(this);
+}
+
+void JIT::emitSlow_op_mul(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned op1 = currentInstruction[2].u.operand;
+ unsigned op2 = currentInstruction[3].u.operand;
+ OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
+
+ Jump overflow = getSlowCase(iter); // overflow check
+ linkSlowCase(iter); // zero result check
+
+ Jump negZero = branchOr32(Signed, regT2, regT3);
+ emitStoreInt32(dst, Imm32(0), (op1 == dst || op2 == dst));
+
+ emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_mul));
+
+ negZero.link(this);
+ overflow.link(this);
+
+ if (!supportsFloatingPoint()) {
+ linkSlowCase(iter); // int32 check
+ linkSlowCase(iter); // int32 check
+ }
+
+ if (supportsFloatingPoint()) {
+ if (!types.first().definitelyIsNumber())
+ linkSlowCase(iter); // double check
+
+ if (!types.second().definitelyIsNumber()) {
+ linkSlowCase(iter); // int32 check
+ linkSlowCase(iter); // double check
+ }
+ }
+
+ Label jitStubCall(this);
+ JITStubCall stubCall(this, cti_op_mul);
+ stubCall.addArgument(op1);
+ stubCall.addArgument(op2);
+ stubCall.call(dst);
+}
+
+// Division (/)
+
+void JIT::emit_op_div(Instruction* currentInstruction)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned op1 = currentInstruction[2].u.operand;
+ unsigned op2 = currentInstruction[3].u.operand;
+ OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
+
+ if (!supportsFloatingPoint()) {
+ addSlowCase(jump());
+ return;
+ }
+
+ // Int32 divide.
+ JumpList notInt32Op1;
+ JumpList notInt32Op2;
+
+ JumpList end;
+
+ emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
+
+ notInt32Op1.append(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
+ notInt32Op2.append(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag)));
+
+ convertInt32ToDouble(regT0, fpRegT0);
+ convertInt32ToDouble(regT2, fpRegT1);
+ divDouble(fpRegT1, fpRegT0);
+
+ JumpList doubleResult;
+ branchConvertDoubleToInt32(fpRegT0, regT0, doubleResult, fpRegT1);
+
+ // Int32 result.
+ emitStoreInt32(dst, regT0, (op1 == dst || op2 == dst));
+ end.append(jump());
+
+ // Double result.
+ doubleResult.link(this);
+ emitStoreDouble(dst, fpRegT0);
+ end.append(jump());
+
+ // Double divide.
+ emitBinaryDoubleOp(op_div, dst, op1, op2, types, notInt32Op1, notInt32Op2);
+ end.link(this);
+}
+
+void JIT::emitSlow_op_div(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned op1 = currentInstruction[2].u.operand;
+ unsigned op2 = currentInstruction[3].u.operand;
+ OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
+
+ if (!supportsFloatingPoint())
+ linkSlowCase(iter);
+ else {
+ if (!types.first().definitelyIsNumber())
+ linkSlowCase(iter); // double check
+
+ if (!types.second().definitelyIsNumber()) {
+ linkSlowCase(iter); // int32 check
+ linkSlowCase(iter); // double check
+ }
+ }
+
+ JITStubCall stubCall(this, cti_op_div);
+ stubCall.addArgument(op1);
+ stubCall.addArgument(op2);
+ stubCall.call(dst);
+}
+
+// Mod (%)
+
+/* ------------------------------ BEGIN: OP_MOD ------------------------------ */
+
+#if CPU(X86) || CPU(X86_64) || CPU(MIPS)
+
+void JIT::emit_op_mod(Instruction* currentInstruction)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned op1 = currentInstruction[2].u.operand;
+ unsigned op2 = currentInstruction[3].u.operand;
+
+#if CPU(X86) || CPU(X86_64)
+ // Make sure registers are correct for x86 IDIV instructions.
+ ASSERT(regT0 == X86Registers::eax);
+ ASSERT(regT1 == X86Registers::edx);
+ ASSERT(regT2 == X86Registers::ecx);
+ ASSERT(regT3 == X86Registers::ebx);
+#endif
+
+ if (isOperandConstantImmediateInt(op2) && getConstantOperand(op2).asInt32() != 0) {
+ emitLoad(op1, regT1, regT0);
+ move(Imm32(getConstantOperand(op2).asInt32()), regT2);
+ addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
+ if (getConstantOperand(op2).asInt32() == -1)
+ addSlowCase(branch32(Equal, regT0, Imm32(0x80000000))); // -2147483648 / -1 => EXC_ARITHMETIC
+ } else {
+ emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
+ addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
+ addSlowCase(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag)));
+
+ addSlowCase(branch32(Equal, regT0, Imm32(0x80000000))); // -2147483648 / -1 => EXC_ARITHMETIC
+ addSlowCase(branch32(Equal, regT2, Imm32(0))); // divide by 0
+ }
+
+ move(regT0, regT3); // Save dividend payload, in case of 0.
+#if CPU(X86) || CPU(X86_64)
+ m_assembler.cdq();
+ m_assembler.idivl_r(regT2);
+#elif CPU(MIPS)
+ m_assembler.div(regT0, regT2);
+ m_assembler.mfhi(regT1);
+#endif
+
+ // If the remainder is zero and the dividend is negative, the result is -0.
+ Jump storeResult1 = branchTest32(NonZero, regT1);
+ Jump storeResult2 = branchTest32(Zero, regT3, Imm32(0x80000000)); // not negative
+ emitStore(dst, jsNumber(-0.0));
+ Jump end = jump();
+
+ storeResult1.link(this);
+ storeResult2.link(this);
+ emitStoreInt32(dst, regT1, (op1 == dst || op2 == dst));
+ end.link(this);
+}
+
+void JIT::emitSlow_op_mod(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned op1 = currentInstruction[2].u.operand;
+ unsigned op2 = currentInstruction[3].u.operand;
+
+ if (isOperandConstantImmediateInt(op2) && getConstantOperand(op2).asInt32() != 0) {
+ linkSlowCase(iter); // int32 check
+ if (getConstantOperand(op2).asInt32() == -1)
+ linkSlowCase(iter); // 0x80000000 check
+ } else {
+ linkSlowCase(iter); // int32 check
+ linkSlowCase(iter); // int32 check
+ linkSlowCase(iter); // 0 check
+ linkSlowCase(iter); // 0x80000000 check
+ }
+
+ JITStubCall stubCall(this, cti_op_mod);
+ stubCall.addArgument(op1);
+ stubCall.addArgument(op2);
+ stubCall.call(dst);
+}
+
+#else // CPU(X86) || CPU(X86_64) || CPU(MIPS)
+
+void JIT::emit_op_mod(Instruction* currentInstruction)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned op1 = currentInstruction[2].u.operand;
+ unsigned op2 = currentInstruction[3].u.operand;
+
+#if ENABLE(JIT_USE_SOFT_MODULO)
+ emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
+ addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
+ addSlowCase(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag)));
+
+ addSlowCase(branch32(Equal, regT2, Imm32(0)));
+
+ emitNakedCall(m_globalData->jitStubs->ctiSoftModulo());
+
+ emitStoreInt32(dst, regT0, (op1 == dst || op2 == dst));
+#else
+ JITStubCall stubCall(this, cti_op_mod);
+ stubCall.addArgument(op1);
+ stubCall.addArgument(op2);
+ stubCall.call(dst);
+#endif
+}
+
+void JIT::emitSlow_op_mod(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ UNUSED_PARAM(currentInstruction);
+ UNUSED_PARAM(iter);
+#if ENABLE(JIT_USE_SOFT_MODULO)
+ unsigned result = currentInstruction[1].u.operand;
+ unsigned op1 = currentInstruction[2].u.operand;
+ unsigned op2 = currentInstruction[3].u.operand;
+ linkSlowCase(iter);
+ linkSlowCase(iter);
+ linkSlowCase(iter);
+ JITStubCall stubCall(this, cti_op_mod);
+ stubCall.addArgument(op1);
+ stubCall.addArgument(op2);
+ stubCall.call(result);
+#else
+ ASSERT_NOT_REACHED();
+#endif
+}
+
+#endif // CPU(X86) || CPU(X86_64)
+
+/* ------------------------------ END: OP_MOD ------------------------------ */
+
+} // namespace JSC
+
+#endif // USE(JSVALUE32_64)
+#endif // ENABLE(JIT)
diff --git a/JavaScriptCore/jit/JITCall.cpp b/JavaScriptCore/jit/JITCall.cpp
index 179aad7..fdd0d47 100644
--- a/JavaScriptCore/jit/JITCall.cpp
+++ b/JavaScriptCore/jit/JITCall.cpp
@@ -24,9 +24,10 @@
*/
#include "config.h"
-#include "JIT.h"
#if ENABLE(JIT)
+#if USE(JSVALUE64)
+#include "JIT.h"
#include "CodeBlock.h"
#include "JITInlineMethods.h"
@@ -45,448 +46,29 @@ using namespace std;
namespace JSC {
-#if USE(JSVALUE32_64)
-
-void JIT::compileOpCallInitializeCallFrame()
-{
- // regT0 holds callee, regT1 holds argCount
- store32(regT1, Address(callFrameRegister, RegisterFile::ArgumentCount * static_cast<int>(sizeof(Register))));
-
- loadPtr(Address(regT0, OBJECT_OFFSETOF(JSFunction, m_data) + OBJECT_OFFSETOF(ScopeChain, m_node)), regT1); // scopeChain
-
- emitStore(static_cast<unsigned>(RegisterFile::OptionalCalleeArguments), JSValue());
- storePtr(regT0, Address(callFrameRegister, RegisterFile::Callee * static_cast<int>(sizeof(Register)))); // callee
- storePtr(regT1, Address(callFrameRegister, RegisterFile::ScopeChain * static_cast<int>(sizeof(Register)))); // scopeChain
-}
-
-void JIT::compileOpCallSetupArgs(Instruction* instruction)
-{
- int argCount = instruction[3].u.operand;
- int registerOffset = instruction[4].u.operand;
-
- emitPutJITStubArg(regT1, regT0, 0);
- emitPutJITStubArgConstant(registerOffset, 1);
- emitPutJITStubArgConstant(argCount, 2);
-}
-
-void JIT::compileOpConstructSetupArgs(Instruction* instruction)
-{
- int argCount = instruction[3].u.operand;
- int registerOffset = instruction[4].u.operand;
- int proto = instruction[5].u.operand;
- int thisRegister = instruction[6].u.operand;
-
- emitPutJITStubArg(regT1, regT0, 0);
- emitPutJITStubArgConstant(registerOffset, 1);
- emitPutJITStubArgConstant(argCount, 2);
- emitPutJITStubArgFromVirtualRegister(proto, 3, regT2, regT3);
- emitPutJITStubArgConstant(thisRegister, 4);
-}
-
-void JIT::compileOpCallVarargsSetupArgs(Instruction*)
-{
- emitPutJITStubArg(regT1, regT0, 0);
- emitPutJITStubArg(regT3, 1); // registerOffset
- emitPutJITStubArg(regT2, 2); // argCount
-}
-
-void JIT::compileOpCallVarargs(Instruction* instruction)
-{
- int dst = instruction[1].u.operand;
- int callee = instruction[2].u.operand;
- int argCountRegister = instruction[3].u.operand;
- int registerOffset = instruction[4].u.operand;
-
- emitLoad(callee, regT1, regT0);
- emitLoadPayload(argCountRegister, regT2); // argCount
- addPtr(Imm32(registerOffset), regT2, regT3); // registerOffset
-
- compileOpCallVarargsSetupArgs(instruction);
-
- emitJumpSlowCaseIfNotJSCell(callee, regT1);
- addSlowCase(branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsFunctionVPtr)));
-
- // Speculatively roll the callframe, assuming argCount will match the arity.
- mul32(Imm32(sizeof(Register)), regT3, regT3);
- addPtr(callFrameRegister, regT3);
- storePtr(callFrameRegister, Address(regT3, RegisterFile::CallerFrame * static_cast<int>(sizeof(Register))));
- move(regT3, callFrameRegister);
-
- move(regT2, regT1); // argCount
-
- emitNakedCall(m_globalData->jitStubs.ctiVirtualCall());
-
- emitStore(dst, regT1, regT0);
-
- sampleCodeBlock(m_codeBlock);
-}
-
-void JIT::compileOpCallVarargsSlowCase(Instruction* instruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- int dst = instruction[1].u.operand;
- int callee = instruction[2].u.operand;
-
- linkSlowCaseIfNotJSCell(iter, callee);
- linkSlowCase(iter);
-
- JITStubCall stubCall(this, cti_op_call_NotJSFunction);
- stubCall.call(dst); // In the interpreter, the callee puts the return value in dst.
-
- map(m_bytecodeIndex + OPCODE_LENGTH(op_call_varargs), dst, regT1, regT0);
- sampleCodeBlock(m_codeBlock);
-}
-
-void JIT::emit_op_ret(Instruction* currentInstruction)
-{
- unsigned dst = currentInstruction[1].u.operand;
-
- // We could JIT generate the deref, only calling out to C when the refcount hits zero.
- if (m_codeBlock->needsFullScopeChain())
- JITStubCall(this, cti_op_ret_scopeChain).call();
-
- emitLoad(dst, regT1, regT0);
- emitGetFromCallFrameHeaderPtr(RegisterFile::ReturnPC, regT2);
- emitGetFromCallFrameHeaderPtr(RegisterFile::CallerFrame, callFrameRegister);
-
- restoreReturnAddressBeforeReturn(regT2);
- ret();
-}
-
-void JIT::emit_op_construct_verify(Instruction* currentInstruction)
-{
- unsigned dst = currentInstruction[1].u.operand;
-
- emitLoad(dst, regT1, regT0);
- addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::CellTag)));
- loadPtr(Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), regT2);
- addSlowCase(branch32(NotEqual, Address(regT2, OBJECT_OFFSETOF(Structure, m_typeInfo) + OBJECT_OFFSETOF(TypeInfo, m_type)), Imm32(ObjectType)));
-}
-
-void JIT::emitSlow_op_construct_verify(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- unsigned dst = currentInstruction[1].u.operand;
- unsigned src = currentInstruction[2].u.operand;
-
- linkSlowCase(iter);
- linkSlowCase(iter);
- emitLoad(src, regT1, regT0);
- emitStore(dst, regT1, regT0);
-}
-
-void JIT::emitSlow_op_call(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- compileOpCallSlowCase(currentInstruction, iter, m_callLinkInfoIndex++, op_call);
-}
-
-void JIT::emitSlow_op_call_eval(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- compileOpCallSlowCase(currentInstruction, iter, m_callLinkInfoIndex++, op_call_eval);
-}
-
-void JIT::emitSlow_op_call_varargs(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- compileOpCallVarargsSlowCase(currentInstruction, iter);
-}
-
-void JIT::emitSlow_op_construct(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- compileOpCallSlowCase(currentInstruction, iter, m_callLinkInfoIndex++, op_construct);
-}
-
-void JIT::emit_op_call(Instruction* currentInstruction)
-{
- compileOpCall(op_call, currentInstruction, m_callLinkInfoIndex++);
-}
-
-void JIT::emit_op_call_eval(Instruction* currentInstruction)
-{
- compileOpCall(op_call_eval, currentInstruction, m_callLinkInfoIndex++);
-}
-
-void JIT::emit_op_load_varargs(Instruction* currentInstruction)
-{
- int argCountDst = currentInstruction[1].u.operand;
- int argsOffset = currentInstruction[2].u.operand;
-
- JITStubCall stubCall(this, cti_op_load_varargs);
- stubCall.addArgument(Imm32(argsOffset));
- stubCall.call();
- // Stores a naked int32 in the register file.
- store32(returnValueRegister, Address(callFrameRegister, argCountDst * sizeof(Register)));
-}
-
-void JIT::emit_op_call_varargs(Instruction* currentInstruction)
-{
- compileOpCallVarargs(currentInstruction);
-}
-
-void JIT::emit_op_construct(Instruction* currentInstruction)
-{
- compileOpCall(op_construct, currentInstruction, m_callLinkInfoIndex++);
-}
-
-#if !ENABLE(JIT_OPTIMIZE_CALL)
-
-/* ------------------------------ BEGIN: !ENABLE(JIT_OPTIMIZE_CALL) ------------------------------ */
-
-void JIT::compileOpCall(OpcodeID opcodeID, Instruction* instruction, unsigned)
-{
- int dst = instruction[1].u.operand;
- int callee = instruction[2].u.operand;
- int argCount = instruction[3].u.operand;
- int registerOffset = instruction[4].u.operand;
-
- Jump wasEval;
- if (opcodeID == op_call_eval) {
- JITStubCall stubCall(this, cti_op_call_eval);
- stubCall.addArgument(callee);
- stubCall.addArgument(JIT::Imm32(registerOffset));
- stubCall.addArgument(JIT::Imm32(argCount));
- stubCall.call();
- wasEval = branch32(NotEqual, regT1, Imm32(JSValue::EmptyValueTag));
- }
-
- emitLoad(callee, regT1, regT0);
-
- if (opcodeID == op_call)
- compileOpCallSetupArgs(instruction);
- else if (opcodeID == op_construct)
- compileOpConstructSetupArgs(instruction);
-
- emitJumpSlowCaseIfNotJSCell(callee, regT1);
- addSlowCase(branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsFunctionVPtr)));
-
- // First, in the case of a construct, allocate the new object.
- if (opcodeID == op_construct) {
- JITStubCall(this, cti_op_construct_JSConstruct).call(registerOffset - RegisterFile::CallFrameHeaderSize - argCount);
- emitLoad(callee, regT1, regT0);
- }
-
- // Speculatively roll the callframe, assuming argCount will match the arity.
- storePtr(callFrameRegister, Address(callFrameRegister, (RegisterFile::CallerFrame + registerOffset) * static_cast<int>(sizeof(Register))));
- addPtr(Imm32(registerOffset * static_cast<int>(sizeof(Register))), callFrameRegister);
- move(Imm32(argCount), regT1);
-
- emitNakedCall(m_globalData->jitStubs.ctiVirtualCall());
-
- if (opcodeID == op_call_eval)
- wasEval.link(this);
-
- emitStore(dst, regT1, regT0);
-
- sampleCodeBlock(m_codeBlock);
-}
-
-void JIT::compileOpCallSlowCase(Instruction* instruction, Vector<SlowCaseEntry>::iterator& iter, unsigned, OpcodeID opcodeID)
-{
- int dst = instruction[1].u.operand;
- int callee = instruction[2].u.operand;
-
- linkSlowCaseIfNotJSCell(iter, callee);
- linkSlowCase(iter);
-
- JITStubCall stubCall(this, opcodeID == op_construct ? cti_op_construct_NotJSConstruct : cti_op_call_NotJSFunction);
- stubCall.call(dst); // In the interpreter, the callee puts the return value in dst.
-
- sampleCodeBlock(m_codeBlock);
-}
-
-#else // !ENABLE(JIT_OPTIMIZE_CALL)
-
-/* ------------------------------ BEGIN: ENABLE(JIT_OPTIMIZE_CALL) ------------------------------ */
-
-void JIT::compileOpCall(OpcodeID opcodeID, Instruction* instruction, unsigned callLinkInfoIndex)
-{
- int dst = instruction[1].u.operand;
- int callee = instruction[2].u.operand;
- int argCount = instruction[3].u.operand;
- int registerOffset = instruction[4].u.operand;
-
- Jump wasEval;
- if (opcodeID == op_call_eval) {
- JITStubCall stubCall(this, cti_op_call_eval);
- stubCall.addArgument(callee);
- stubCall.addArgument(JIT::Imm32(registerOffset));
- stubCall.addArgument(JIT::Imm32(argCount));
- stubCall.call();
- wasEval = branch32(NotEqual, regT1, Imm32(JSValue::EmptyValueTag));
- }
-
- emitLoad(callee, regT1, regT0);
-
- DataLabelPtr addressOfLinkedFunctionCheck;
-
- BEGIN_UNINTERRUPTED_SEQUENCE(sequenceOpCall);
-
- Jump jumpToSlow = branchPtrWithPatch(NotEqual, regT0, addressOfLinkedFunctionCheck, ImmPtr(0));
-
- END_UNINTERRUPTED_SEQUENCE(sequenceOpCall);
-
- addSlowCase(jumpToSlow);
- ASSERT(differenceBetween(addressOfLinkedFunctionCheck, jumpToSlow) == patchOffsetOpCallCompareToJump);
- m_callStructureStubCompilationInfo[callLinkInfoIndex].hotPathBegin = addressOfLinkedFunctionCheck;
-
- addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::CellTag)));
-
- // The following is the fast case, only used whan a callee can be linked.
-
- // In the case of OpConstruct, call out to a cti_ function to create the new object.
- if (opcodeID == op_construct) {
- int proto = instruction[5].u.operand;
- int thisRegister = instruction[6].u.operand;
-
- JITStubCall stubCall(this, cti_op_construct_JSConstruct);
- stubCall.addArgument(regT1, regT0);
- stubCall.addArgument(Imm32(0)); // FIXME: Remove this unused JITStub argument.
- stubCall.addArgument(Imm32(0)); // FIXME: Remove this unused JITStub argument.
- stubCall.addArgument(proto);
- stubCall.call(thisRegister);
-
- emitLoad(callee, regT1, regT0);
- }
-
- // Fast version of stack frame initialization, directly relative to edi.
- // Note that this omits to set up RegisterFile::CodeBlock, which is set in the callee
- emitStore(registerOffset + RegisterFile::OptionalCalleeArguments, JSValue());
- emitStore(registerOffset + RegisterFile::Callee, regT1, regT0);
-
- loadPtr(Address(regT0, OBJECT_OFFSETOF(JSFunction, m_data) + OBJECT_OFFSETOF(ScopeChain, m_node)), regT1); // newScopeChain
- store32(Imm32(argCount), Address(callFrameRegister, (registerOffset + RegisterFile::ArgumentCount) * static_cast<int>(sizeof(Register))));
- storePtr(callFrameRegister, Address(callFrameRegister, (registerOffset + RegisterFile::CallerFrame) * static_cast<int>(sizeof(Register))));
- storePtr(regT1, Address(callFrameRegister, (registerOffset + RegisterFile::ScopeChain) * static_cast<int>(sizeof(Register))));
- addPtr(Imm32(registerOffset * sizeof(Register)), callFrameRegister);
-
- // Call to the callee
- m_callStructureStubCompilationInfo[callLinkInfoIndex].hotPathOther = emitNakedCall();
-
- if (opcodeID == op_call_eval)
- wasEval.link(this);
-
- // Put the return value in dst. In the interpreter, op_ret does this.
- emitStore(dst, regT1, regT0);
- map(m_bytecodeIndex + opcodeLengths[opcodeID], dst, regT1, regT0);
-
- sampleCodeBlock(m_codeBlock);
-}
-
-void JIT::compileOpCallSlowCase(Instruction* instruction, Vector<SlowCaseEntry>::iterator& iter, unsigned callLinkInfoIndex, OpcodeID opcodeID)
-{
- int dst = instruction[1].u.operand;
- int callee = instruction[2].u.operand;
- int argCount = instruction[3].u.operand;
- int registerOffset = instruction[4].u.operand;
-
- linkSlowCase(iter);
- linkSlowCase(iter);
-
- // The arguments have been set up on the hot path for op_call_eval
- if (opcodeID == op_call)
- compileOpCallSetupArgs(instruction);
- else if (opcodeID == op_construct)
- compileOpConstructSetupArgs(instruction);
-
- // Fast check for JS function.
- Jump callLinkFailNotObject = branch32(NotEqual, regT1, Imm32(JSValue::CellTag));
- Jump callLinkFailNotJSFunction = branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsFunctionVPtr));
-
- // First, in the case of a construct, allocate the new object.
- if (opcodeID == op_construct) {
- JITStubCall(this, cti_op_construct_JSConstruct).call(registerOffset - RegisterFile::CallFrameHeaderSize - argCount);
- emitLoad(callee, regT1, regT0);
- }
-
- // Speculatively roll the callframe, assuming argCount will match the arity.
- storePtr(callFrameRegister, Address(callFrameRegister, (RegisterFile::CallerFrame + registerOffset) * static_cast<int>(sizeof(Register))));
- addPtr(Imm32(registerOffset * static_cast<int>(sizeof(Register))), callFrameRegister);
- move(Imm32(argCount), regT1);
-
- m_callStructureStubCompilationInfo[callLinkInfoIndex].callReturnLocation = emitNakedCall(m_globalData->jitStubs.ctiVirtualCallLink());
-
- // Put the return value in dst.
- emitStore(dst, regT1, regT0);;
- sampleCodeBlock(m_codeBlock);
-
- // If not, we need an extra case in the if below!
- ASSERT(OPCODE_LENGTH(op_call) == OPCODE_LENGTH(op_call_eval));
-
- // Done! - return back to the hot path.
- if (opcodeID == op_construct)
- emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_construct));
- else
- emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_call));
-
- // This handles host functions
- callLinkFailNotObject.link(this);
- callLinkFailNotJSFunction.link(this);
- JITStubCall(this, opcodeID == op_construct ? cti_op_construct_NotJSConstruct : cti_op_call_NotJSFunction).call();
-
- emitStore(dst, regT1, regT0);;
- sampleCodeBlock(m_codeBlock);
-}
-
-/* ------------------------------ END: !ENABLE / ENABLE(JIT_OPTIMIZE_CALL) ------------------------------ */
-
-#endif // !ENABLE(JIT_OPTIMIZE_CALL)
-
-#else // USE(JSVALUE32_64)
-
void JIT::compileOpCallInitializeCallFrame()
{
store32(regT1, Address(callFrameRegister, RegisterFile::ArgumentCount * static_cast<int>(sizeof(Register))));
-
- loadPtr(Address(regT0, OBJECT_OFFSETOF(JSFunction, m_data) + OBJECT_OFFSETOF(ScopeChain, m_node)), regT1); // newScopeChain
-
- storePtr(ImmPtr(JSValue::encode(JSValue())), Address(callFrameRegister, RegisterFile::OptionalCalleeArguments * static_cast<int>(sizeof(Register))));
+ loadPtr(Address(regT0, OBJECT_OFFSETOF(JSFunction, m_scopeChain) + OBJECT_OFFSETOF(ScopeChain, m_node)), regT3); // newScopeChain
storePtr(regT0, Address(callFrameRegister, RegisterFile::Callee * static_cast<int>(sizeof(Register))));
- storePtr(regT1, Address(callFrameRegister, RegisterFile::ScopeChain * static_cast<int>(sizeof(Register))));
+ storePtr(regT3, Address(callFrameRegister, RegisterFile::ScopeChain * static_cast<int>(sizeof(Register))));
}
-void JIT::compileOpCallSetupArgs(Instruction* instruction)
+void JIT::emit_op_call_put_result(Instruction* instruction)
{
- int argCount = instruction[3].u.operand;
- int registerOffset = instruction[4].u.operand;
-
- // ecx holds func
- emitPutJITStubArg(regT0, 0);
- emitPutJITStubArgConstant(argCount, 2);
- emitPutJITStubArgConstant(registerOffset, 1);
-}
-
-void JIT::compileOpCallVarargsSetupArgs(Instruction* instruction)
-{
- int registerOffset = instruction[4].u.operand;
-
- // ecx holds func
- emitPutJITStubArg(regT0, 0);
- emitPutJITStubArg(regT1, 2);
- addPtr(Imm32(registerOffset), regT1, regT2);
- emitPutJITStubArg(regT2, 1);
-}
-
-void JIT::compileOpConstructSetupArgs(Instruction* instruction)
-{
- int argCount = instruction[3].u.operand;
- int registerOffset = instruction[4].u.operand;
- int proto = instruction[5].u.operand;
- int thisRegister = instruction[6].u.operand;
-
- // ecx holds func
- emitPutJITStubArg(regT0, 0);
- emitPutJITStubArgConstant(registerOffset, 1);
- emitPutJITStubArgConstant(argCount, 2);
- emitPutJITStubArgFromVirtualRegister(proto, 3, regT2);
- emitPutJITStubArgConstant(thisRegister, 4);
+ int dst = instruction[1].u.operand;
+ emitPutVirtualRegister(dst);
}
void JIT::compileOpCallVarargs(Instruction* instruction)
{
- int dst = instruction[1].u.operand;
- int callee = instruction[2].u.operand;
- int argCountRegister = instruction[3].u.operand;
+ int callee = instruction[1].u.operand;
+ int argCountRegister = instruction[2].u.operand;
+ int registerOffset = instruction[3].u.operand;
emitGetVirtualRegister(argCountRegister, regT1);
emitGetVirtualRegister(callee, regT0);
- compileOpCallVarargsSetupArgs(instruction);
+ addPtr(Imm32(registerOffset), regT1, regT2);
// Check for JSFunctions.
emitJumpSlowCaseIfNotJSCell(regT0);
@@ -499,22 +81,21 @@ void JIT::compileOpCallVarargs(Instruction* instruction)
addPtr(callFrameRegister, regT3);
storePtr(callFrameRegister, regT3);
addPtr(regT2, callFrameRegister);
- emitNakedCall(m_globalData->jitStubs.ctiVirtualCall());
+ emitNakedCall(m_globalData->jitStubs->ctiVirtualCall());
- // Put the return value in dst. In the interpreter, op_ret does this.
- emitPutVirtualRegister(dst);
-
sampleCodeBlock(m_codeBlock);
}
-void JIT::compileOpCallVarargsSlowCase(Instruction* instruction, Vector<SlowCaseEntry>::iterator& iter)
+void JIT::compileOpCallVarargsSlowCase(Instruction*, Vector<SlowCaseEntry>::iterator& iter)
{
- int dst = instruction[1].u.operand;
-
linkSlowCase(iter);
linkSlowCase(iter);
+
JITStubCall stubCall(this, cti_op_call_NotJSFunction);
- stubCall.call(dst); // In the interpreter, the callee puts the return value in dst.
+ stubCall.addArgument(regT0);
+ stubCall.addArgument(regT2);
+ stubCall.addArgument(regT1);
+ stubCall.call();
sampleCodeBlock(m_codeBlock);
}
@@ -525,10 +106,9 @@ void JIT::compileOpCallVarargsSlowCase(Instruction* instruction, Vector<SlowCase
void JIT::compileOpCall(OpcodeID opcodeID, Instruction* instruction, unsigned)
{
- int dst = instruction[1].u.operand;
- int callee = instruction[2].u.operand;
- int argCount = instruction[3].u.operand;
- int registerOffset = instruction[4].u.operand;
+ int callee = instruction[1].u.operand;
+ int argCount = instruction[2].u.operand;
+ int registerOffset = instruction[3].u.operand;
// Handle eval
Jump wasEval;
@@ -542,46 +122,37 @@ void JIT::compileOpCall(OpcodeID opcodeID, Instruction* instruction, unsigned)
}
emitGetVirtualRegister(callee, regT0);
- // The arguments have been set up on the hot path for op_call_eval
- if (opcodeID == op_call)
- compileOpCallSetupArgs(instruction);
- else if (opcodeID == op_construct)
- compileOpConstructSetupArgs(instruction);
// Check for JSFunctions.
emitJumpSlowCaseIfNotJSCell(regT0);
addSlowCase(branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsFunctionVPtr)));
- // First, in the case of a construct, allocate the new object.
- if (opcodeID == op_construct) {
- JITStubCall(this, cti_op_construct_JSConstruct).call(registerOffset - RegisterFile::CallFrameHeaderSize - argCount);
- emitGetVirtualRegister(callee, regT0);
- }
-
// Speculatively roll the callframe, assuming argCount will match the arity.
storePtr(callFrameRegister, Address(callFrameRegister, (RegisterFile::CallerFrame + registerOffset) * static_cast<int>(sizeof(Register))));
addPtr(Imm32(registerOffset * static_cast<int>(sizeof(Register))), callFrameRegister);
move(Imm32(argCount), regT1);
- emitNakedCall(m_globalData->jitStubs.ctiVirtualCall());
+ emitNakedCall(opcodeID == op_construct ? m_globalData->jitStubs->ctiVirtualConstruct() : m_globalData->jitStubs->ctiVirtualCall());
if (opcodeID == op_call_eval)
wasEval.link(this);
- // Put the return value in dst. In the interpreter, op_ret does this.
- emitPutVirtualRegister(dst);
-
sampleCodeBlock(m_codeBlock);
}
void JIT::compileOpCallSlowCase(Instruction* instruction, Vector<SlowCaseEntry>::iterator& iter, unsigned, OpcodeID opcodeID)
{
- int dst = instruction[1].u.operand;
+ int argCount = instruction[2].u.operand;
+ int registerOffset = instruction[3].u.operand;
linkSlowCase(iter);
linkSlowCase(iter);
+
JITStubCall stubCall(this, opcodeID == op_construct ? cti_op_construct_NotJSConstruct : cti_op_call_NotJSFunction);
- stubCall.call(dst); // In the interpreter, the callee puts the return value in dst.
+ stubCall.addArgument(regT0);
+ stubCall.addArgument(JIT::Imm32(registerOffset));
+ stubCall.addArgument(JIT::Imm32(argCount));
+ stubCall.call();
sampleCodeBlock(m_codeBlock);
}
@@ -592,10 +163,9 @@ void JIT::compileOpCallSlowCase(Instruction* instruction, Vector<SlowCaseEntry>:
void JIT::compileOpCall(OpcodeID opcodeID, Instruction* instruction, unsigned callLinkInfoIndex)
{
- int dst = instruction[1].u.operand;
- int callee = instruction[2].u.operand;
- int argCount = instruction[3].u.operand;
- int registerOffset = instruction[4].u.operand;
+ int callee = instruction[1].u.operand;
+ int argCount = instruction[2].u.operand;
+ int registerOffset = instruction[3].u.operand;
// Handle eval
Jump wasEval;
@@ -625,25 +195,14 @@ void JIT::compileOpCall(OpcodeID opcodeID, Instruction* instruction, unsigned ca
// The following is the fast case, only used whan a callee can be linked.
- // In the case of OpConstruct, call out to a cti_ function to create the new object.
- if (opcodeID == op_construct) {
- int proto = instruction[5].u.operand;
- int thisRegister = instruction[6].u.operand;
-
- emitPutJITStubArg(regT0, 0);
- emitPutJITStubArgFromVirtualRegister(proto, 3, regT2);
- JITStubCall stubCall(this, cti_op_construct_JSConstruct);
- stubCall.call(thisRegister);
- emitGetVirtualRegister(callee, regT0);
- }
-
// Fast version of stack frame initialization, directly relative to edi.
// Note that this omits to set up RegisterFile::CodeBlock, which is set in the callee
- storePtr(ImmPtr(JSValue::encode(JSValue())), Address(callFrameRegister, (registerOffset + RegisterFile::OptionalCalleeArguments) * static_cast<int>(sizeof(Register))));
- storePtr(regT0, Address(callFrameRegister, (registerOffset + RegisterFile::Callee) * static_cast<int>(sizeof(Register))));
- loadPtr(Address(regT0, OBJECT_OFFSETOF(JSFunction, m_data) + OBJECT_OFFSETOF(ScopeChain, m_node)), regT1); // newScopeChain
+
+ loadPtr(Address(regT0, OBJECT_OFFSETOF(JSFunction, m_scopeChain) + OBJECT_OFFSETOF(ScopeChain, m_node)), regT1); // newScopeChain
+
store32(Imm32(argCount), Address(callFrameRegister, (registerOffset + RegisterFile::ArgumentCount) * static_cast<int>(sizeof(Register))));
storePtr(callFrameRegister, Address(callFrameRegister, (registerOffset + RegisterFile::CallerFrame) * static_cast<int>(sizeof(Register))));
+ storePtr(regT0, Address(callFrameRegister, (registerOffset + RegisterFile::Callee) * static_cast<int>(sizeof(Register))));
storePtr(regT1, Address(callFrameRegister, (registerOffset + RegisterFile::ScopeChain) * static_cast<int>(sizeof(Register))));
addPtr(Imm32(registerOffset * sizeof(Register)), callFrameRegister);
@@ -653,65 +212,42 @@ void JIT::compileOpCall(OpcodeID opcodeID, Instruction* instruction, unsigned ca
if (opcodeID == op_call_eval)
wasEval.link(this);
- // Put the return value in dst. In the interpreter, op_ret does this.
- emitPutVirtualRegister(dst);
-
sampleCodeBlock(m_codeBlock);
}
void JIT::compileOpCallSlowCase(Instruction* instruction, Vector<SlowCaseEntry>::iterator& iter, unsigned callLinkInfoIndex, OpcodeID opcodeID)
{
- int dst = instruction[1].u.operand;
- int callee = instruction[2].u.operand;
- int argCount = instruction[3].u.operand;
- int registerOffset = instruction[4].u.operand;
+ int argCount = instruction[2].u.operand;
+ int registerOffset = instruction[3].u.operand;
linkSlowCase(iter);
- // The arguments have been set up on the hot path for op_call_eval
- if (opcodeID == op_call)
- compileOpCallSetupArgs(instruction);
- else if (opcodeID == op_construct)
- compileOpConstructSetupArgs(instruction);
-
// Fast check for JS function.
Jump callLinkFailNotObject = emitJumpIfNotJSCell(regT0);
Jump callLinkFailNotJSFunction = branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsFunctionVPtr));
- // First, in the case of a construct, allocate the new object.
- if (opcodeID == op_construct) {
- JITStubCall(this, cti_op_construct_JSConstruct).call(registerOffset - RegisterFile::CallFrameHeaderSize - argCount);
- emitGetVirtualRegister(callee, regT0);
- }
-
// Speculatively roll the callframe, assuming argCount will match the arity.
storePtr(callFrameRegister, Address(callFrameRegister, (RegisterFile::CallerFrame + registerOffset) * static_cast<int>(sizeof(Register))));
addPtr(Imm32(registerOffset * static_cast<int>(sizeof(Register))), callFrameRegister);
move(Imm32(argCount), regT1);
- move(regT0, regT2);
-
- m_callStructureStubCompilationInfo[callLinkInfoIndex].callReturnLocation = emitNakedCall(m_globalData->jitStubs.ctiVirtualCallLink());
-
- // Put the return value in dst.
- emitPutVirtualRegister(dst);
- sampleCodeBlock(m_codeBlock);
-
- // If not, we need an extra case in the if below!
- ASSERT(OPCODE_LENGTH(op_call) == OPCODE_LENGTH(op_call_eval));
+ m_callStructureStubCompilationInfo[callLinkInfoIndex].callReturnLocation = emitNakedCall(opcodeID == op_construct ? m_globalData->jitStubs->ctiVirtualConstructLink() : m_globalData->jitStubs->ctiVirtualCallLink());
// Done! - return back to the hot path.
- if (opcodeID == op_construct)
- emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_construct));
- else
- emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_call));
+ ASSERT(OPCODE_LENGTH(op_call) == OPCODE_LENGTH(op_call_eval));
+ ASSERT(OPCODE_LENGTH(op_call) == OPCODE_LENGTH(op_construct));
+ emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_call));
// This handles host functions
callLinkFailNotObject.link(this);
callLinkFailNotJSFunction.link(this);
- JITStubCall(this, opcodeID == op_construct ? cti_op_construct_NotJSConstruct : cti_op_call_NotJSFunction).call();
- emitPutVirtualRegister(dst);
+ JITStubCall stubCall(this, opcodeID == op_construct ? cti_op_construct_NotJSConstruct : cti_op_call_NotJSFunction);
+ stubCall.addArgument(regT0);
+ stubCall.addArgument(JIT::Imm32(registerOffset));
+ stubCall.addArgument(JIT::Imm32(argCount));
+ stubCall.call();
+
sampleCodeBlock(m_codeBlock);
}
@@ -719,8 +255,7 @@ void JIT::compileOpCallSlowCase(Instruction* instruction, Vector<SlowCaseEntry>:
#endif // !ENABLE(JIT_OPTIMIZE_CALL)
-#endif // USE(JSVALUE32_64)
-
} // namespace JSC
+#endif // USE(JSVALUE64)
#endif // ENABLE(JIT)
diff --git a/JavaScriptCore/jit/JITCall32_64.cpp b/JavaScriptCore/jit/JITCall32_64.cpp
new file mode 100644
index 0000000..daf5d2d
--- /dev/null
+++ b/JavaScriptCore/jit/JITCall32_64.cpp
@@ -0,0 +1,356 @@
+/*
+ * Copyright (C) 2008 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+
+#if ENABLE(JIT)
+#if USE(JSVALUE32_64)
+#include "JIT.h"
+
+#include "CodeBlock.h"
+#include "Interpreter.h"
+#include "JITInlineMethods.h"
+#include "JITStubCall.h"
+#include "JSArray.h"
+#include "JSFunction.h"
+#include "ResultType.h"
+#include "SamplingTool.h"
+
+#ifndef NDEBUG
+#include <stdio.h>
+#endif
+
+using namespace std;
+
+namespace JSC {
+
+void JIT::compileOpCallInitializeCallFrame()
+{
+ // regT0 holds callee, regT1 holds argCount
+ store32(regT1, Address(callFrameRegister, RegisterFile::ArgumentCount * static_cast<int>(sizeof(Register))));
+ loadPtr(Address(regT0, OBJECT_OFFSETOF(JSFunction, m_scopeChain) + OBJECT_OFFSETOF(ScopeChain, m_node)), regT3); // scopeChain
+ storePtr(regT0, Address(callFrameRegister, RegisterFile::Callee * static_cast<int>(sizeof(Register)))); // callee
+ storePtr(regT3, Address(callFrameRegister, RegisterFile::ScopeChain * static_cast<int>(sizeof(Register)))); // scopeChain
+}
+
+void JIT::emit_op_call_put_result(Instruction* instruction)
+{
+ int dst = instruction[1].u.operand;
+ emitStore(dst, regT1, regT0);
+}
+
+void JIT::compileOpCallVarargs(Instruction* instruction)
+{
+ int callee = instruction[1].u.operand;
+ int argCountRegister = instruction[2].u.operand;
+ int registerOffset = instruction[3].u.operand;
+
+ emitLoad(callee, regT1, regT0);
+ emitLoadPayload(argCountRegister, regT2); // argCount
+ addPtr(Imm32(registerOffset), regT2, regT3); // registerOffset
+
+ emitJumpSlowCaseIfNotJSCell(callee, regT1);
+ addSlowCase(branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsFunctionVPtr)));
+
+ // Speculatively roll the callframe, assuming argCount will match the arity.
+ mul32(Imm32(sizeof(Register)), regT3, regT3);
+ addPtr(callFrameRegister, regT3);
+ storePtr(callFrameRegister, Address(regT3, RegisterFile::CallerFrame * static_cast<int>(sizeof(Register))));
+ move(regT3, callFrameRegister);
+
+ move(regT2, regT1); // argCount
+
+ emitNakedCall(m_globalData->jitStubs->ctiVirtualCall());
+
+ sampleCodeBlock(m_codeBlock);
+}
+
+void JIT::compileOpCallVarargsSlowCase(Instruction* instruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ int callee = instruction[1].u.operand;
+
+ linkSlowCaseIfNotJSCell(iter, callee);
+ linkSlowCase(iter);
+
+ JITStubCall stubCall(this, cti_op_call_NotJSFunction);
+ stubCall.addArgument(regT1, regT0);
+ stubCall.addArgument(regT3);
+ stubCall.addArgument(regT2);
+ stubCall.call();
+
+ sampleCodeBlock(m_codeBlock);
+}
+
+void JIT::emit_op_ret(Instruction* currentInstruction)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+
+ // We could JIT generate the deref, only calling out to C when the refcount hits zero.
+ if (m_codeBlock->needsFullScopeChain()) {
+ Jump activationNotCreated = branch32(Equal, tagFor(m_codeBlock->activationRegister()), Imm32(JSValue::EmptyValueTag));
+ JITStubCall(this, cti_op_ret_scopeChain).call();
+ activationNotCreated.link(this);
+ }
+ emitLoad(dst, regT1, regT0);
+ emitGetFromCallFrameHeaderPtr(RegisterFile::ReturnPC, regT2);
+ emitGetFromCallFrameHeaderPtr(RegisterFile::CallerFrame, callFrameRegister);
+
+ restoreReturnAddressBeforeReturn(regT2);
+ ret();
+}
+
+void JIT::emit_op_ret_object_or_this(Instruction* currentInstruction)
+{
+ unsigned result = currentInstruction[1].u.operand;
+ unsigned thisReg = currentInstruction[2].u.operand;
+
+ // We could JIT generate the deref, only calling out to C when the refcount hits zero.
+ if (m_codeBlock->needsFullScopeChain()) {
+ Jump activationNotCreated = branch32(Equal, tagFor(m_codeBlock->activationRegister()), Imm32(JSValue::EmptyValueTag));
+ JITStubCall(this, cti_op_ret_scopeChain).call();
+ activationNotCreated.link(this);
+ }
+
+ emitLoad(result, regT1, regT0);
+ Jump notJSCell = branch32(NotEqual, regT1, Imm32(JSValue::CellTag));
+ loadPtr(Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), regT2);
+ Jump notObject = branch8(NotEqual, Address(regT2, OBJECT_OFFSETOF(Structure, m_typeInfo) + OBJECT_OFFSETOF(TypeInfo, m_type)), Imm32(ObjectType));
+
+ emitGetFromCallFrameHeaderPtr(RegisterFile::ReturnPC, regT2);
+ emitGetFromCallFrameHeaderPtr(RegisterFile::CallerFrame, callFrameRegister);
+
+ restoreReturnAddressBeforeReturn(regT2);
+ ret();
+
+ notJSCell.link(this);
+ notObject.link(this);
+ emitLoad(thisReg, regT1, regT0);
+
+ emitGetFromCallFrameHeaderPtr(RegisterFile::ReturnPC, regT2);
+ emitGetFromCallFrameHeaderPtr(RegisterFile::CallerFrame, callFrameRegister);
+
+ restoreReturnAddressBeforeReturn(regT2);
+ ret();
+}
+
+void JIT::emitSlow_op_call(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ compileOpCallSlowCase(currentInstruction, iter, m_callLinkInfoIndex++, op_call);
+}
+
+void JIT::emitSlow_op_call_eval(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ compileOpCallSlowCase(currentInstruction, iter, m_callLinkInfoIndex++, op_call_eval);
+}
+
+void JIT::emitSlow_op_call_varargs(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ compileOpCallVarargsSlowCase(currentInstruction, iter);
+}
+
+void JIT::emitSlow_op_construct(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ compileOpCallSlowCase(currentInstruction, iter, m_callLinkInfoIndex++, op_construct);
+}
+
+void JIT::emit_op_call(Instruction* currentInstruction)
+{
+ compileOpCall(op_call, currentInstruction, m_callLinkInfoIndex++);
+}
+
+void JIT::emit_op_call_eval(Instruction* currentInstruction)
+{
+ compileOpCall(op_call_eval, currentInstruction, m_callLinkInfoIndex++);
+}
+
+void JIT::emit_op_call_varargs(Instruction* currentInstruction)
+{
+ compileOpCallVarargs(currentInstruction);
+}
+
+void JIT::emit_op_construct(Instruction* currentInstruction)
+{
+ compileOpCall(op_construct, currentInstruction, m_callLinkInfoIndex++);
+}
+
+#if !ENABLE(JIT_OPTIMIZE_CALL)
+
+/* ------------------------------ BEGIN: !ENABLE(JIT_OPTIMIZE_CALL) ------------------------------ */
+
+void JIT::compileOpCall(OpcodeID opcodeID, Instruction* instruction, unsigned)
+{
+ int callee = instruction[1].u.operand;
+ int argCount = instruction[2].u.operand;
+ int registerOffset = instruction[3].u.operand;
+
+ Jump wasEval;
+ if (opcodeID == op_call_eval) {
+ JITStubCall stubCall(this, cti_op_call_eval);
+ stubCall.addArgument(callee);
+ stubCall.addArgument(JIT::Imm32(registerOffset));
+ stubCall.addArgument(JIT::Imm32(argCount));
+ stubCall.call();
+ wasEval = branch32(NotEqual, regT1, Imm32(JSValue::EmptyValueTag));
+ }
+
+ emitLoad(callee, regT1, regT0);
+
+ emitJumpSlowCaseIfNotJSCell(callee, regT1);
+ addSlowCase(branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsFunctionVPtr)));
+
+ // Speculatively roll the callframe, assuming argCount will match the arity.
+ storePtr(callFrameRegister, Address(callFrameRegister, (RegisterFile::CallerFrame + registerOffset) * static_cast<int>(sizeof(Register))));
+ addPtr(Imm32(registerOffset * static_cast<int>(sizeof(Register))), callFrameRegister);
+ move(Imm32(argCount), regT1);
+
+ emitNakedCall(opcodeID == op_construct ? m_globalData->jitStubs->ctiVirtualConstruct() : m_globalData->jitStubs->ctiVirtualCall());
+
+ if (opcodeID == op_call_eval)
+ wasEval.link(this);
+
+ sampleCodeBlock(m_codeBlock);
+}
+
+void JIT::compileOpCallSlowCase(Instruction* instruction, Vector<SlowCaseEntry>::iterator& iter, unsigned, OpcodeID opcodeID)
+{
+ int callee = instruction[1].u.operand;
+ int argCount = instruction[2].u.operand;
+ int registerOffset = instruction[3].u.operand;
+
+ linkSlowCaseIfNotJSCell(iter, callee);
+ linkSlowCase(iter);
+
+ JITStubCall stubCall(this, opcodeID == op_construct ? cti_op_construct_NotJSConstruct : cti_op_call_NotJSFunction);
+ stubCall.addArgument(callee);
+ stubCall.addArgument(JIT::Imm32(registerOffset));
+ stubCall.addArgument(JIT::Imm32(argCount));
+ stubCall.call();
+
+ sampleCodeBlock(m_codeBlock);
+}
+
+#else // !ENABLE(JIT_OPTIMIZE_CALL)
+
+/* ------------------------------ BEGIN: ENABLE(JIT_OPTIMIZE_CALL) ------------------------------ */
+
+void JIT::compileOpCall(OpcodeID opcodeID, Instruction* instruction, unsigned callLinkInfoIndex)
+{
+ int callee = instruction[1].u.operand;
+ int argCount = instruction[2].u.operand;
+ int registerOffset = instruction[3].u.operand;
+
+ Jump wasEval;
+ if (opcodeID == op_call_eval) {
+ JITStubCall stubCall(this, cti_op_call_eval);
+ stubCall.addArgument(callee);
+ stubCall.addArgument(JIT::Imm32(registerOffset));
+ stubCall.addArgument(JIT::Imm32(argCount));
+ stubCall.call();
+ wasEval = branch32(NotEqual, regT1, Imm32(JSValue::EmptyValueTag));
+ }
+
+ emitLoad(callee, regT1, regT0);
+
+ DataLabelPtr addressOfLinkedFunctionCheck;
+
+ BEGIN_UNINTERRUPTED_SEQUENCE(sequenceOpCall);
+
+ Jump jumpToSlow = branchPtrWithPatch(NotEqual, regT0, addressOfLinkedFunctionCheck, ImmPtr(0));
+
+ END_UNINTERRUPTED_SEQUENCE(sequenceOpCall);
+
+ addSlowCase(jumpToSlow);
+ ASSERT(differenceBetween(addressOfLinkedFunctionCheck, jumpToSlow) == patchOffsetOpCallCompareToJump);
+ m_callStructureStubCompilationInfo[callLinkInfoIndex].hotPathBegin = addressOfLinkedFunctionCheck;
+
+ addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::CellTag)));
+
+ // The following is the fast case, only used whan a callee can be linked.
+
+ // Fast version of stack frame initialization, directly relative to edi.
+ // Note that this omits to set up RegisterFile::CodeBlock, which is set in the callee
+ loadPtr(Address(regT0, OBJECT_OFFSETOF(JSFunction, m_scopeChain) + OBJECT_OFFSETOF(ScopeChain, m_node)), regT2);
+
+ store32(Imm32(argCount), Address(callFrameRegister, (registerOffset + RegisterFile::ArgumentCount) * static_cast<int>(sizeof(Register))));
+ storePtr(callFrameRegister, Address(callFrameRegister, (registerOffset + RegisterFile::CallerFrame) * static_cast<int>(sizeof(Register))));
+ emitStore(registerOffset + RegisterFile::Callee, regT1, regT0);
+ storePtr(regT2, Address(callFrameRegister, (registerOffset + RegisterFile::ScopeChain) * static_cast<int>(sizeof(Register))));
+ addPtr(Imm32(registerOffset * sizeof(Register)), callFrameRegister);
+
+ // Call to the callee
+ m_callStructureStubCompilationInfo[callLinkInfoIndex].hotPathOther = emitNakedCall();
+
+ if (opcodeID == op_call_eval)
+ wasEval.link(this);
+
+ sampleCodeBlock(m_codeBlock);
+}
+
+void JIT::compileOpCallSlowCase(Instruction* instruction, Vector<SlowCaseEntry>::iterator& iter, unsigned callLinkInfoIndex, OpcodeID opcodeID)
+{
+ int callee = instruction[1].u.operand;
+ int argCount = instruction[2].u.operand;
+ int registerOffset = instruction[3].u.operand;
+
+ linkSlowCase(iter);
+ linkSlowCase(iter);
+
+ // Fast check for JS function.
+ Jump callLinkFailNotObject = branch32(NotEqual, regT1, Imm32(JSValue::CellTag));
+ Jump callLinkFailNotJSFunction = branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsFunctionVPtr));
+
+ // Speculatively roll the callframe, assuming argCount will match the arity.
+ storePtr(callFrameRegister, Address(callFrameRegister, (RegisterFile::CallerFrame + registerOffset) * static_cast<int>(sizeof(Register))));
+ addPtr(Imm32(registerOffset * static_cast<int>(sizeof(Register))), callFrameRegister);
+ move(Imm32(argCount), regT1);
+
+ m_callStructureStubCompilationInfo[callLinkInfoIndex].callReturnLocation = emitNakedCall(opcodeID == op_construct ? m_globalData->jitStubs->ctiVirtualConstructLink() : m_globalData->jitStubs->ctiVirtualCallLink());
+
+ // Done! - return back to the hot path.
+ ASSERT(OPCODE_LENGTH(op_call) == OPCODE_LENGTH(op_call_eval));
+ ASSERT(OPCODE_LENGTH(op_call) == OPCODE_LENGTH(op_construct));
+ emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_call));
+
+ // This handles host functions
+ callLinkFailNotObject.link(this);
+ callLinkFailNotJSFunction.link(this);
+
+ JITStubCall stubCall(this, opcodeID == op_construct ? cti_op_construct_NotJSConstruct : cti_op_call_NotJSFunction);
+ stubCall.addArgument(callee);
+ stubCall.addArgument(JIT::Imm32(registerOffset));
+ stubCall.addArgument(JIT::Imm32(argCount));
+ stubCall.call();
+
+ sampleCodeBlock(m_codeBlock);
+}
+
+/* ------------------------------ END: !ENABLE / ENABLE(JIT_OPTIMIZE_CALL) ------------------------------ */
+
+#endif // !ENABLE(JIT_OPTIMIZE_CALL)
+
+} // namespace JSC
+
+#endif // USE(JSVALUE32_64)
+#endif // ENABLE(JIT)
diff --git a/JavaScriptCore/jit/JITCode.h b/JavaScriptCore/jit/JITCode.h
index 69cf167..7346fd5 100644
--- a/JavaScriptCore/jit/JITCode.h
+++ b/JavaScriptCore/jit/JITCode.h
@@ -26,8 +26,6 @@
#ifndef JITCode_h
#define JITCode_h
-#include <wtf/Platform.h>
-
#if ENABLE(JIT)
#include "CallFrame.h"
@@ -74,9 +72,10 @@ namespace JSC {
}
// Execute the code!
- inline JSValue execute(RegisterFile* registerFile, CallFrame* callFrame, JSGlobalData* globalData, JSValue* exception)
+ inline JSValue execute(RegisterFile* registerFile, CallFrame* callFrame, JSGlobalData* globalData)
{
- return JSValue::decode(ctiTrampoline(m_ref.m_code.executableAddress(), registerFile, callFrame, exception, Profiler::enabledProfilerReference(), globalData));
+ JSValue result = JSValue::decode(ctiTrampoline(m_ref.m_code.executableAddress(), registerFile, callFrame, 0, Profiler::enabledProfilerReference(), globalData));
+ return globalData->exception ? jsNull() : result;
}
void* start()
diff --git a/JavaScriptCore/jit/JITInlineMethods.h b/JavaScriptCore/jit/JITInlineMethods.h
index 5af7565..39ca4a5 100644
--- a/JavaScriptCore/jit/JITInlineMethods.h
+++ b/JavaScriptCore/jit/JITInlineMethods.h
@@ -26,7 +26,6 @@
#ifndef JITInlineMethods_h
#define JITInlineMethods_h
-#include <wtf/Platform.h>
#if ENABLE(JIT)
@@ -34,31 +33,6 @@ namespace JSC {
/* Deprecated: Please use JITStubCall instead. */
-// puts an arg onto the stack, as an arg to a context threaded function.
-ALWAYS_INLINE void JIT::emitPutJITStubArg(RegisterID src, unsigned argumentNumber)
-{
- unsigned argumentStackOffset = (argumentNumber * (sizeof(JSValue) / sizeof(void*))) + JITSTACKFRAME_ARGS_INDEX;
- poke(src, argumentStackOffset);
-}
-
-/* Deprecated: Please use JITStubCall instead. */
-
-ALWAYS_INLINE void JIT::emitPutJITStubArgConstant(unsigned value, unsigned argumentNumber)
-{
- unsigned argumentStackOffset = (argumentNumber * (sizeof(JSValue) / sizeof(void*))) + JITSTACKFRAME_ARGS_INDEX;
- poke(Imm32(value), argumentStackOffset);
-}
-
-/* Deprecated: Please use JITStubCall instead. */
-
-ALWAYS_INLINE void JIT::emitPutJITStubArgConstant(void* value, unsigned argumentNumber)
-{
- unsigned argumentStackOffset = (argumentNumber * (sizeof(JSValue) / sizeof(void*))) + JITSTACKFRAME_ARGS_INDEX;
- poke(ImmPtr(value), argumentStackOffset);
-}
-
-/* Deprecated: Please use JITStubCall instead. */
-
ALWAYS_INLINE void JIT::emitGetJITStubArg(unsigned argumentNumber, RegisterID dst)
{
unsigned argumentStackOffset = (argumentNumber * (sizeof(JSValue) / sizeof(void*))) + JITSTACKFRAME_ARGS_INDEX;
@@ -89,25 +63,35 @@ ALWAYS_INLINE void JIT::emitPutImmediateToCallFrameHeader(void* value, RegisterF
ALWAYS_INLINE void JIT::emitGetFromCallFrameHeaderPtr(RegisterFile::CallFrameHeaderEntry entry, RegisterID to, RegisterID from)
{
loadPtr(Address(from, entry * sizeof(Register)), to);
-#if !USE(JSVALUE32_64)
+#if USE(JSVALUE64)
killLastResultRegister();
#endif
}
+ALWAYS_INLINE void JIT::emitLoadCharacterString(RegisterID src, RegisterID dst, JumpList& failures)
+{
+ failures.append(branchPtr(NotEqual, Address(src), ImmPtr(m_globalData->jsStringVPtr)));
+ failures.append(branchTest32(NonZero, Address(src, OBJECT_OFFSETOF(JSString, m_fiberCount))));
+ failures.append(branch32(NotEqual, MacroAssembler::Address(src, ThunkHelpers::jsStringLengthOffset()), Imm32(1)));
+ loadPtr(MacroAssembler::Address(src, ThunkHelpers::jsStringValueOffset()), dst);
+ loadPtr(MacroAssembler::Address(dst, ThunkHelpers::stringImplDataOffset()), dst);
+ load16(MacroAssembler::Address(dst, 0), dst);
+}
+
ALWAYS_INLINE void JIT::emitGetFromCallFrameHeader32(RegisterFile::CallFrameHeaderEntry entry, RegisterID to, RegisterID from)
{
load32(Address(from, entry * sizeof(Register)), to);
-#if !USE(JSVALUE32_64)
+#if USE(JSVALUE64)
killLastResultRegister();
#endif
}
ALWAYS_INLINE JIT::Call JIT::emitNakedCall(CodePtr function)
{
- ASSERT(m_bytecodeIndex != (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeIndex is set.
+ ASSERT(m_bytecodeOffset != (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeOffset is set.
Call nakedCall = nearCall();
- m_calls.append(CallRecord(nakedCall, m_bytecodeIndex, function.executableAddress()));
+ m_calls.append(CallRecord(nakedCall, m_bytecodeOffset, function.executableAddress()));
return nakedCall;
}
@@ -115,6 +99,7 @@ ALWAYS_INLINE JIT::Call JIT::emitNakedCall(CodePtr function)
ALWAYS_INLINE void JIT::beginUninterruptedSequence(int insnSpace, int constSpace)
{
+ JSInterfaceJIT::beginUninterruptedSequence();
#if CPU(ARM_TRADITIONAL)
#ifndef NDEBUG
// Ensure the label after the sequence can also fit
@@ -137,9 +122,17 @@ ALWAYS_INLINE void JIT::beginUninterruptedSequence(int insnSpace, int constSpace
ALWAYS_INLINE void JIT::endUninterruptedSequence(int insnSpace, int constSpace)
{
#if defined(ASSEMBLER_HAS_CONSTANT_POOL) && ASSEMBLER_HAS_CONSTANT_POOL
- ASSERT(differenceBetween(m_uninterruptedInstructionSequenceBegin, label()) == insnSpace);
- ASSERT(sizeOfConstantPool() - m_uninterruptedConstantSequenceBegin == constSpace);
+ /* There are several cases when the uninterrupted sequence is larger than
+ * maximum required offset for pathing the same sequence. Eg.: if in a
+ * uninterrupted sequence the last macroassembler's instruction is a stub
+ * call, it emits store instruction(s) which should not be included in the
+ * calculation of length of uninterrupted sequence. So, the insnSpace and
+ * constSpace should be upper limit instead of hard limit.
+ */
+ ASSERT(differenceBetween(m_uninterruptedInstructionSequenceBegin, label()) <= insnSpace);
+ ASSERT(sizeOfConstantPool() - m_uninterruptedConstantSequenceBegin <= constSpace);
#endif
+ JSInterfaceJIT::endUninterruptedSequence();
}
#endif
@@ -161,6 +154,23 @@ ALWAYS_INLINE void JIT::restoreReturnAddressBeforeReturn(Address address)
loadPtr(address, linkRegister);
}
+#elif CPU(MIPS)
+
+ALWAYS_INLINE void JIT::preserveReturnAddressAfterCall(RegisterID reg)
+{
+ move(returnAddressRegister, reg);
+}
+
+ALWAYS_INLINE void JIT::restoreReturnAddressBeforeReturn(RegisterID reg)
+{
+ move(reg, returnAddressRegister);
+}
+
+ALWAYS_INLINE void JIT::restoreReturnAddressBeforeReturn(Address address)
+{
+ loadPtr(address, returnAddressRegister);
+}
+
#else // CPU(X86) || CPU(X86_64)
ALWAYS_INLINE void JIT::preserveReturnAddressAfterCall(RegisterID reg)
@@ -180,18 +190,12 @@ ALWAYS_INLINE void JIT::restoreReturnAddressBeforeReturn(Address address)
#endif
-#if USE(JIT_STUB_ARGUMENT_VA_LIST)
-ALWAYS_INLINE void JIT::restoreArgumentReference()
-{
- poke(callFrameRegister, OBJECT_OFFSETOF(struct JITStackFrame, callFrame) / sizeof (void*));
-}
-ALWAYS_INLINE void JIT::restoreArgumentReferenceForTrampoline() {}
-#else
ALWAYS_INLINE void JIT::restoreArgumentReference()
{
move(stackPointerRegister, firstArgumentRegister);
- poke(callFrameRegister, OBJECT_OFFSETOF(struct JITStackFrame, callFrame) / sizeof (void*));
+ poke(callFrameRegister, OBJECT_OFFSETOF(struct JITStackFrame, callFrame) / sizeof(void*));
}
+
ALWAYS_INLINE void JIT::restoreArgumentReferenceForTrampoline()
{
#if CPU(X86)
@@ -202,7 +206,6 @@ ALWAYS_INLINE void JIT::restoreArgumentReferenceForTrampoline()
#endif
// In the trampoline on x86-64, the first argument register is not overwritten.
}
-#endif
ALWAYS_INLINE JIT::Jump JIT::checkStructure(RegisterID reg, Structure* structure)
{
@@ -217,33 +220,33 @@ ALWAYS_INLINE void JIT::linkSlowCaseIfNotJSCell(Vector<SlowCaseEntry>::iterator&
ALWAYS_INLINE void JIT::addSlowCase(Jump jump)
{
- ASSERT(m_bytecodeIndex != (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeIndex is set.
+ ASSERT(m_bytecodeOffset != (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeOffset is set.
- m_slowCases.append(SlowCaseEntry(jump, m_bytecodeIndex));
+ m_slowCases.append(SlowCaseEntry(jump, m_bytecodeOffset));
}
ALWAYS_INLINE void JIT::addSlowCase(JumpList jumpList)
{
- ASSERT(m_bytecodeIndex != (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeIndex is set.
+ ASSERT(m_bytecodeOffset != (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeOffset is set.
const JumpList::JumpVector& jumpVector = jumpList.jumps();
size_t size = jumpVector.size();
for (size_t i = 0; i < size; ++i)
- m_slowCases.append(SlowCaseEntry(jumpVector[i], m_bytecodeIndex));
+ m_slowCases.append(SlowCaseEntry(jumpVector[i], m_bytecodeOffset));
}
ALWAYS_INLINE void JIT::addJump(Jump jump, int relativeOffset)
{
- ASSERT(m_bytecodeIndex != (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeIndex is set.
+ ASSERT(m_bytecodeOffset != (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeOffset is set.
- m_jmpTable.append(JumpTable(jump, m_bytecodeIndex + relativeOffset));
+ m_jmpTable.append(JumpTable(jump, m_bytecodeOffset + relativeOffset));
}
ALWAYS_INLINE void JIT::emitJumpSlowToHot(Jump jump, int relativeOffset)
{
- ASSERT(m_bytecodeIndex != (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeIndex is set.
+ ASSERT(m_bytecodeOffset != (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeOffset is set.
- jump.linkTo(m_labels[m_bytecodeIndex + relativeOffset], this);
+ jump.linkTo(m_labels[m_bytecodeOffset + relativeOffset], this);
}
#if ENABLE(SAMPLING_FLAGS)
@@ -307,23 +310,13 @@ ALWAYS_INLINE void JIT::sampleCodeBlock(CodeBlock* codeBlock)
#endif
#endif
-inline JIT::Address JIT::addressFor(unsigned index, RegisterID base)
+ALWAYS_INLINE bool JIT::isOperandConstantImmediateChar(unsigned src)
{
- return Address(base, (index * sizeof(Register)));
+ return m_codeBlock->isConstantRegisterIndex(src) && getConstantOperand(src).isString() && asString(getConstantOperand(src).asCell())->length() == 1;
}
#if USE(JSVALUE32_64)
-inline JIT::Address JIT::tagFor(unsigned index, RegisterID base)
-{
- return Address(base, (index * sizeof(Register)) + OBJECT_OFFSETOF(JSValue, u.asBits.tag));
-}
-
-inline JIT::Address JIT::payloadFor(unsigned index, RegisterID base)
-{
- return Address(base, (index * sizeof(Register)) + OBJECT_OFFSETOF(JSValue, u.asBits.payload));
-}
-
inline void JIT::emitLoadTag(unsigned index, RegisterID tag)
{
RegisterID mappedTag;
@@ -469,24 +462,24 @@ ALWAYS_INLINE void JIT::emitInitRegister(unsigned dst)
emitStore(dst, jsUndefined());
}
-inline bool JIT::isLabeled(unsigned bytecodeIndex)
+inline bool JIT::isLabeled(unsigned bytecodeOffset)
{
for (size_t numberOfJumpTargets = m_codeBlock->numberOfJumpTargets(); m_jumpTargetIndex != numberOfJumpTargets; ++m_jumpTargetIndex) {
unsigned jumpTarget = m_codeBlock->jumpTarget(m_jumpTargetIndex);
- if (jumpTarget == bytecodeIndex)
+ if (jumpTarget == bytecodeOffset)
return true;
- if (jumpTarget > bytecodeIndex)
+ if (jumpTarget > bytecodeOffset)
return false;
}
return false;
}
-inline void JIT::map(unsigned bytecodeIndex, unsigned virtualRegisterIndex, RegisterID tag, RegisterID payload)
+inline void JIT::map(unsigned bytecodeOffset, unsigned virtualRegisterIndex, RegisterID tag, RegisterID payload)
{
- if (isLabeled(bytecodeIndex))
+ if (isLabeled(bytecodeOffset))
return;
- m_mappedBytecodeIndex = bytecodeIndex;
+ m_mappedBytecodeOffset = bytecodeOffset;
m_mappedVirtualRegisterIndex = virtualRegisterIndex;
m_mappedTag = tag;
m_mappedPayload = payload;
@@ -502,7 +495,7 @@ inline void JIT::unmap(RegisterID registerID)
inline void JIT::unmap()
{
- m_mappedBytecodeIndex = (unsigned)-1;
+ m_mappedBytecodeOffset = (unsigned)-1;
m_mappedVirtualRegisterIndex = (unsigned)-1;
m_mappedTag = (RegisterID)-1;
m_mappedPayload = (RegisterID)-1;
@@ -510,7 +503,7 @@ inline void JIT::unmap()
inline bool JIT::isMapped(unsigned virtualRegisterIndex)
{
- if (m_mappedBytecodeIndex != m_bytecodeIndex)
+ if (m_mappedBytecodeOffset != m_bytecodeOffset)
return false;
if (m_mappedVirtualRegisterIndex != virtualRegisterIndex)
return false;
@@ -519,7 +512,7 @@ inline bool JIT::isMapped(unsigned virtualRegisterIndex)
inline bool JIT::getMappedPayload(unsigned virtualRegisterIndex, RegisterID& payload)
{
- if (m_mappedBytecodeIndex != m_bytecodeIndex)
+ if (m_mappedBytecodeOffset != m_bytecodeOffset)
return false;
if (m_mappedVirtualRegisterIndex != virtualRegisterIndex)
return false;
@@ -531,7 +524,7 @@ inline bool JIT::getMappedPayload(unsigned virtualRegisterIndex, RegisterID& pay
inline bool JIT::getMappedTag(unsigned virtualRegisterIndex, RegisterID& tag)
{
- if (m_mappedBytecodeIndex != m_bytecodeIndex)
+ if (m_mappedBytecodeOffset != m_bytecodeOffset)
return false;
if (m_mappedVirtualRegisterIndex != virtualRegisterIndex)
return false;
@@ -543,14 +536,22 @@ inline bool JIT::getMappedTag(unsigned virtualRegisterIndex, RegisterID& tag)
inline void JIT::emitJumpSlowCaseIfNotJSCell(unsigned virtualRegisterIndex)
{
- if (!m_codeBlock->isKnownNotImmediate(virtualRegisterIndex))
- addSlowCase(branch32(NotEqual, tagFor(virtualRegisterIndex), Imm32(JSValue::CellTag)));
+ if (!m_codeBlock->isKnownNotImmediate(virtualRegisterIndex)) {
+ if (m_codeBlock->isConstantRegisterIndex(virtualRegisterIndex))
+ addSlowCase(jump());
+ else
+ addSlowCase(emitJumpIfNotJSCell(virtualRegisterIndex));
+ }
}
inline void JIT::emitJumpSlowCaseIfNotJSCell(unsigned virtualRegisterIndex, RegisterID tag)
{
- if (!m_codeBlock->isKnownNotImmediate(virtualRegisterIndex))
- addSlowCase(branch32(NotEqual, tag, Imm32(JSValue::CellTag)));
+ if (!m_codeBlock->isKnownNotImmediate(virtualRegisterIndex)) {
+ if (m_codeBlock->isConstantRegisterIndex(virtualRegisterIndex))
+ addSlowCase(jump());
+ else
+ addSlowCase(branch32(NotEqual, tag, Imm32(JSValue::CellTag)));
+ }
}
inline void JIT::linkSlowCaseIfNotJSCell(Vector<SlowCaseEntry>::iterator& iter, unsigned virtualRegisterIndex)
@@ -581,31 +582,6 @@ ALWAYS_INLINE bool JIT::getOperandConstantImmediateInt(unsigned op1, unsigned op
return false;
}
-/* Deprecated: Please use JITStubCall instead. */
-
-ALWAYS_INLINE void JIT::emitPutJITStubArg(RegisterID tag, RegisterID payload, unsigned argumentNumber)
-{
- unsigned argumentStackOffset = (argumentNumber * (sizeof(JSValue) / sizeof(void*))) + JITSTACKFRAME_ARGS_INDEX;
- poke(payload, argumentStackOffset);
- poke(tag, argumentStackOffset + 1);
-}
-
-/* Deprecated: Please use JITStubCall instead. */
-
-ALWAYS_INLINE void JIT::emitPutJITStubArgFromVirtualRegister(unsigned src, unsigned argumentNumber, RegisterID scratch1, RegisterID scratch2)
-{
- unsigned argumentStackOffset = (argumentNumber * (sizeof(JSValue) / sizeof(void*))) + JITSTACKFRAME_ARGS_INDEX;
- if (m_codeBlock->isConstantRegisterIndex(src)) {
- JSValue constant = m_codeBlock->getConstant(src);
- poke(Imm32(constant.payload()), argumentStackOffset);
- poke(Imm32(constant.tag()), argumentStackOffset + 1);
- } else {
- emitLoad(src, scratch1, scratch2);
- poke(scratch2, argumentStackOffset);
- poke(scratch1, argumentStackOffset + 1);
- }
-}
-
#else // USE(JSVALUE32_64)
ALWAYS_INLINE void JIT::killLastResultRegister()
@@ -616,7 +592,7 @@ ALWAYS_INLINE void JIT::killLastResultRegister()
// get arg puts an arg from the SF register array into a h/w register
ALWAYS_INLINE void JIT::emitGetVirtualRegister(int src, RegisterID dst)
{
- ASSERT(m_bytecodeIndex != (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeIndex is set.
+ ASSERT(m_bytecodeOffset != (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeOffset is set.
// TODO: we want to reuse values that are already in registers if we can - add a register allocator!
if (m_codeBlock->isConstantRegisterIndex(src)) {
@@ -628,8 +604,8 @@ ALWAYS_INLINE void JIT::emitGetVirtualRegister(int src, RegisterID dst)
if (src == m_lastResultBytecodeRegister && m_codeBlock->isTemporaryRegisterIndex(src)) {
bool atJumpTarget = false;
- while (m_jumpTargetsPosition < m_codeBlock->numberOfJumpTargets() && m_codeBlock->jumpTarget(m_jumpTargetsPosition) <= m_bytecodeIndex) {
- if (m_codeBlock->jumpTarget(m_jumpTargetsPosition) == m_bytecodeIndex)
+ while (m_jumpTargetsPosition < m_codeBlock->numberOfJumpTargets() && m_codeBlock->jumpTarget(m_jumpTargetsPosition) <= m_bytecodeOffset) {
+ if (m_codeBlock->jumpTarget(m_jumpTargetsPosition) == m_bytecodeOffset)
atJumpTarget = true;
++m_jumpTargetsPosition;
}
@@ -671,7 +647,7 @@ ALWAYS_INLINE bool JIT::isOperandConstantImmediateInt(unsigned src)
ALWAYS_INLINE void JIT::emitPutVirtualRegister(unsigned dst, RegisterID from)
{
storePtr(from, Address(callFrameRegister, dst * sizeof(Register)));
- m_lastResultBytecodeRegister = (from == cachedResultRegister) ? dst : std::numeric_limits<int>::max();
+ m_lastResultBytecodeRegister = (from == cachedResultRegister) ? static_cast<int>(dst) : std::numeric_limits<int>::max();
}
ALWAYS_INLINE void JIT::emitInitRegister(unsigned dst)
@@ -721,14 +697,6 @@ ALWAYS_INLINE void JIT::emitJumpSlowCaseIfNotJSCell(RegisterID reg, int vReg)
}
#if USE(JSVALUE64)
-ALWAYS_INLINE JIT::Jump JIT::emitJumpIfImmediateNumber(RegisterID reg)
-{
- return branchTestPtr(NonZero, reg, tagTypeNumberRegister);
-}
-ALWAYS_INLINE JIT::Jump JIT::emitJumpIfNotImmediateNumber(RegisterID reg)
-{
- return branchTestPtr(Zero, reg, tagTypeNumberRegister);
-}
inline void JIT::emitLoadDouble(unsigned index, FPRegisterID value)
{
@@ -789,7 +757,7 @@ ALWAYS_INLINE void JIT::emitJumpSlowCaseIfNotImmediateNumber(RegisterID reg)
addSlowCase(emitJumpIfNotImmediateNumber(reg));
}
-#if !USE(JSVALUE64)
+#if USE(JSVALUE32_64)
ALWAYS_INLINE void JIT::emitFastArithDeTagImmediate(RegisterID reg)
{
subPtr(Imm32(JSImmediate::TagTypeNumber), reg);
@@ -812,15 +780,6 @@ ALWAYS_INLINE void JIT::emitFastArithReTagImmediate(RegisterID src, RegisterID d
#endif
}
-ALWAYS_INLINE void JIT::emitFastArithImmToInt(RegisterID reg)
-{
-#if USE(JSVALUE64)
- UNUSED_PARAM(reg);
-#else
- rshift32(Imm32(JSImmediate::IntegerPayloadShift), reg);
-#endif
-}
-
// operand is int32_t, must have been zero-extended if register is 64-bit.
ALWAYS_INLINE void JIT::emitFastArithIntToImmNoCheck(RegisterID src, RegisterID dest)
{
@@ -841,23 +800,6 @@ ALWAYS_INLINE void JIT::emitTagAsBoolImmediate(RegisterID reg)
or32(Imm32(static_cast<int32_t>(JSImmediate::FullTagTypeBool)), reg);
}
-/* Deprecated: Please use JITStubCall instead. */
-
-// get arg puts an arg from the SF register array onto the stack, as an arg to a context threaded function.
-ALWAYS_INLINE void JIT::emitPutJITStubArgFromVirtualRegister(unsigned src, unsigned argumentNumber, RegisterID scratch)
-{
- unsigned argumentStackOffset = (argumentNumber * (sizeof(JSValue) / sizeof(void*))) + JITSTACKFRAME_ARGS_INDEX;
- if (m_codeBlock->isConstantRegisterIndex(src)) {
- JSValue value = m_codeBlock->getConstant(src);
- poke(ImmPtr(JSValue::encode(value)), argumentStackOffset);
- } else {
- loadPtr(Address(callFrameRegister, src * sizeof(Register)), scratch);
- poke(scratch, argumentStackOffset);
- }
-
- killLastResultRegister();
-}
-
#endif // USE(JSVALUE32_64)
} // namespace JSC
diff --git a/JavaScriptCore/jit/JITOpcodes.cpp b/JavaScriptCore/jit/JITOpcodes.cpp
index c3f20f1..66285ae 100644
--- a/JavaScriptCore/jit/JITOpcodes.cpp
+++ b/JavaScriptCore/jit/JITOpcodes.cpp
@@ -1,5 +1,6 @@
/*
* Copyright (C) 2009 Apple Inc. All rights reserved.
+ * Copyright (C) 2010 Patrick Gansterer <paroga@paroga.com>
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -24,10 +25,10 @@
*/
#include "config.h"
-#include "JIT.h"
-
#if ENABLE(JIT)
+#include "JIT.h"
+#include "Arguments.h"
#include "JITInlineMethods.h"
#include "JITStubCall.h"
#include "JSArray.h"
@@ -38,312 +39,118 @@
namespace JSC {
-#if USE(JSVALUE32_64)
+#if USE(JSVALUE64)
+
+#define RECORD_JUMP_TARGET(targetOffset) \
+ do { m_labels[m_bytecodeOffset + (targetOffset)].used(); } while (false)
void JIT::privateCompileCTIMachineTrampolines(RefPtr<ExecutablePool>* executablePool, JSGlobalData* globalData, TrampolineStructure *trampolines)
{
-#if ENABLE(JIT_OPTIMIZE_MOD)
- Label softModBegin = align();
- softModulo();
-#endif
#if ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
- // (1) This function provides fast property access for string length
+ // (2) The second function provides fast property access for string length
Label stringLengthBegin = align();
-
- // regT0 holds payload, regT1 holds tag
-
- Jump string_failureCases1 = branch32(NotEqual, regT1, Imm32(JSValue::CellTag));
+
+ // Check eax is a string
+ Jump string_failureCases1 = emitJumpIfNotJSCell(regT0);
Jump string_failureCases2 = branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsStringVPtr));
// Checks out okay! - get the length from the Ustring.
- load32(Address(regT0, OBJECT_OFFSETOF(JSString, m_stringLength)), regT2);
+ load32(Address(regT0, OBJECT_OFFSETOF(JSString, m_length)), regT0);
- Jump string_failureCases3 = branch32(Above, regT2, Imm32(INT_MAX));
- move(regT2, regT0);
- move(Imm32(JSValue::Int32Tag), regT1);
+ Jump string_failureCases3 = branch32(Above, regT0, Imm32(JSImmediate::maxImmediateInt));
+ // regT0 contains a 64 bit value (is positive, is zero extended) so we don't need sign extend here.
+ emitFastArithIntToImmNoCheck(regT0, regT0);
+
ret();
#endif
- // (2) Trampolines for the slow cases of op_call / op_call_eval / op_construct.
+ // (3) Trampolines for the slow cases of op_call / op_call_eval / op_construct.
+ COMPILE_ASSERT(sizeof(CodeType) == 4, CodeTypeEnumMustBe32Bit);
-#if ENABLE(JIT_OPTIMIZE_CALL)
// VirtualCallLink Trampoline
// regT0 holds callee, regT1 holds argCount. regT2 will hold the FunctionExecutable.
+ JumpList callLinkFailures;
Label virtualCallLinkBegin = align();
- loadPtr(Address(regT0, OBJECT_OFFSETOF(JSFunction, m_executable)), regT2);
-
- Jump isNativeFunc2 = branch32(Equal, Address(regT2, OBJECT_OFFSETOF(FunctionExecutable, m_numParameters)), Imm32(0));
-
- Jump hasCodeBlock2 = branch32(GreaterThan, Address(regT2, OBJECT_OFFSETOF(FunctionExecutable, m_numParameters)), Imm32(0));
- preserveReturnAddressAfterCall(regT3);
- restoreArgumentReference();
- Call callJSFunction2 = call();
- loadPtr(Address(regT0, OBJECT_OFFSETOF(JSFunction, m_executable)), regT2);
- emitGetJITStubArg(2, regT1); // argCount
- restoreReturnAddressBeforeReturn(regT3);
- hasCodeBlock2.link(this);
-
- // Check argCount matches callee arity.
- Jump arityCheckOkay2 = branch32(Equal, Address(regT2, OBJECT_OFFSETOF(FunctionExecutable, m_numParameters)), regT1);
+ compileOpCallInitializeCallFrame();
preserveReturnAddressAfterCall(regT3);
- emitPutJITStubArg(regT3, 1); // return address
+ emitPutToCallFrameHeader(regT3, RegisterFile::ReturnPC);
restoreArgumentReference();
- Call callArityCheck2 = call();
- move(regT1, callFrameRegister);
- emitGetJITStubArg(2, regT1); // argCount
+ Call callLazyLinkCall = call();
+ callLinkFailures.append(branchTestPtr(Zero, regT0));
restoreReturnAddressBeforeReturn(regT3);
- arityCheckOkay2.link(this);
-
- isNativeFunc2.link(this);
+ emitGetFromCallFrameHeader32(RegisterFile::ArgumentCount, regT1);
+ jump(regT0);
+ // VirtualConstructLink Trampoline
+ // regT0 holds callee, regT1 holds argCount. regT2 will hold the FunctionExecutable.
+ Label virtualConstructLinkBegin = align();
compileOpCallInitializeCallFrame();
-
preserveReturnAddressAfterCall(regT3);
- emitPutJITStubArg(regT3, 1); // return address
+ emitPutToCallFrameHeader(regT3, RegisterFile::ReturnPC);
restoreArgumentReference();
- Call callLazyLinkCall = call();
+ Call callLazyLinkConstruct = call();
+ callLinkFailures.append(branchTestPtr(Zero, regT0));
restoreReturnAddressBeforeReturn(regT3);
+ emitGetFromCallFrameHeader32(RegisterFile::ArgumentCount, regT1);
jump(regT0);
-#endif // ENABLE(JIT_OPTIMIZE_CALL)
// VirtualCall Trampoline
// regT0 holds callee, regT1 holds argCount. regT2 will hold the FunctionExecutable.
Label virtualCallBegin = align();
- loadPtr(Address(regT0, OBJECT_OFFSETOF(JSFunction, m_executable)), regT2);
+ compileOpCallInitializeCallFrame();
- Jump isNativeFunc3 = branch32(Equal, Address(regT2, OBJECT_OFFSETOF(FunctionExecutable, m_numParameters)), Imm32(0));
+ loadPtr(Address(regT0, OBJECT_OFFSETOF(JSFunction, m_executable)), regT2);
- Jump hasCodeBlock3 = branch32(GreaterThan, Address(regT2, OBJECT_OFFSETOF(FunctionExecutable, m_numParameters)), Imm32(0));
+ Jump hasCodeBlock3 = branch32(GreaterThanOrEqual, Address(regT2, OBJECT_OFFSETOF(FunctionExecutable, m_numParametersForCall)), Imm32(0));
preserveReturnAddressAfterCall(regT3);
restoreArgumentReference();
- Call callJSFunction1 = call();
- emitGetJITStubArg(2, regT1); // argCount
+ Call callCompileCall = call();
+ callLinkFailures.append(branchTestPtr(Zero, regT0));
+ emitGetFromCallFrameHeader32(RegisterFile::ArgumentCount, regT1);
restoreReturnAddressBeforeReturn(regT3);
loadPtr(Address(regT0, OBJECT_OFFSETOF(JSFunction, m_executable)), regT2);
hasCodeBlock3.link(this);
-
- // Check argCount matches callee arity.
- Jump arityCheckOkay3 = branch32(Equal, Address(regT2, OBJECT_OFFSETOF(FunctionExecutable, m_numParameters)), regT1);
- preserveReturnAddressAfterCall(regT3);
- emitPutJITStubArg(regT3, 1); // return address
- restoreArgumentReference();
- Call callArityCheck1 = call();
- move(regT1, callFrameRegister);
- emitGetJITStubArg(2, regT1); // argCount
- restoreReturnAddressBeforeReturn(regT3);
- loadPtr(Address(regT0, OBJECT_OFFSETOF(JSFunction, m_executable)), regT2);
- arityCheckOkay3.link(this);
-
- isNativeFunc3.link(this);
- compileOpCallInitializeCallFrame();
- loadPtr(Address(regT2, OBJECT_OFFSETOF(FunctionExecutable, m_jitCode)), regT0);
+ loadPtr(Address(regT2, OBJECT_OFFSETOF(FunctionExecutable, m_jitCodeForCallWithArityCheck)), regT0);
jump(regT0);
-#if CPU(X86) || CPU(ARM_TRADITIONAL)
- Label nativeCallThunk = align();
- preserveReturnAddressAfterCall(regT0);
- emitPutToCallFrameHeader(regT0, RegisterFile::ReturnPC); // Push return address
-
- // Load caller frame's scope chain into this callframe so that whatever we call can
- // get to its global data.
- emitGetFromCallFrameHeaderPtr(RegisterFile::CallerFrame, regT1);
- emitGetFromCallFrameHeaderPtr(RegisterFile::ScopeChain, regT1, regT1);
- emitPutToCallFrameHeader(regT1, RegisterFile::ScopeChain);
-
-#if CPU(X86)
- emitGetFromCallFrameHeader32(RegisterFile::ArgumentCount, regT0);
-
- /* We have two structs that we use to describe the stackframe we set up for our
- * call to native code. NativeCallFrameStructure describes the how we set up the stack
- * in advance of the call. NativeFunctionCalleeSignature describes the callframe
- * as the native code expects it. We do this as we are using the fastcall calling
- * convention which results in the callee popping its arguments off the stack, but
- * not the rest of the callframe so we need a nice way to ensure we increment the
- * stack pointer by the right amount after the call.
- */
-
-#if COMPILER(MSVC) || OS(LINUX)
-#if COMPILER(MSVC)
-#pragma pack(push)
-#pragma pack(4)
-#endif // COMPILER(MSVC)
- struct NativeCallFrameStructure {
- // CallFrame* callFrame; // passed in EDX
- JSObject* callee;
- JSValue thisValue;
- ArgList* argPointer;
- ArgList args;
- JSValue result;
- };
- struct NativeFunctionCalleeSignature {
- JSObject* callee;
- JSValue thisValue;
- ArgList* argPointer;
- };
-#if COMPILER(MSVC)
-#pragma pack(pop)
-#endif // COMPILER(MSVC)
-#else
- struct NativeCallFrameStructure {
- // CallFrame* callFrame; // passed in ECX
- // JSObject* callee; // passed in EDX
- JSValue thisValue;
- ArgList* argPointer;
- ArgList args;
- };
- struct NativeFunctionCalleeSignature {
- JSValue thisValue;
- ArgList* argPointer;
- };
-#endif
-
- const int NativeCallFrameSize = (sizeof(NativeCallFrameStructure) + 15) & ~15;
- // Allocate system stack frame
- subPtr(Imm32(NativeCallFrameSize), stackPointerRegister);
-
- // Set up arguments
- subPtr(Imm32(1), regT0); // Don't include 'this' in argcount
-
- // push argcount
- storePtr(regT0, Address(stackPointerRegister, OBJECT_OFFSETOF(NativeCallFrameStructure, args) + OBJECT_OFFSETOF(ArgList, m_argCount)));
-
- // Calculate the start of the callframe header, and store in regT1
- addPtr(Imm32(-RegisterFile::CallFrameHeaderSize * (int)sizeof(Register)), callFrameRegister, regT1);
-
- // Calculate start of arguments as callframe header - sizeof(Register) * argcount (regT0)
- mul32(Imm32(sizeof(Register)), regT0, regT0);
- subPtr(regT0, regT1);
- storePtr(regT1, Address(stackPointerRegister, OBJECT_OFFSETOF(NativeCallFrameStructure, args) + OBJECT_OFFSETOF(ArgList, m_args)));
-
- // ArgList is passed by reference so is stackPointerRegister + 4 * sizeof(Register)
- addPtr(Imm32(OBJECT_OFFSETOF(NativeCallFrameStructure, args)), stackPointerRegister, regT0);
- storePtr(regT0, Address(stackPointerRegister, OBJECT_OFFSETOF(NativeCallFrameStructure, argPointer)));
-
- // regT1 currently points to the first argument, regT1 - sizeof(Register) points to 'this'
- loadPtr(Address(regT1, -(int)sizeof(Register) + OBJECT_OFFSETOF(JSValue, u.asBits.payload)), regT2);
- loadPtr(Address(regT1, -(int)sizeof(Register) + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), regT3);
- storePtr(regT2, Address(stackPointerRegister, OBJECT_OFFSETOF(NativeCallFrameStructure, thisValue) + OBJECT_OFFSETOF(JSValue, u.asBits.payload)));
- storePtr(regT3, Address(stackPointerRegister, OBJECT_OFFSETOF(NativeCallFrameStructure, thisValue) + OBJECT_OFFSETOF(JSValue, u.asBits.tag)));
-
-#if COMPILER(MSVC) || OS(LINUX)
- // ArgList is passed by reference so is stackPointerRegister + 4 * sizeof(Register)
- addPtr(Imm32(OBJECT_OFFSETOF(NativeCallFrameStructure, result)), stackPointerRegister, X86Registers::ecx);
-
- // Plant callee
- emitGetFromCallFrameHeaderPtr(RegisterFile::Callee, X86Registers::eax);
- storePtr(X86Registers::eax, Address(stackPointerRegister, OBJECT_OFFSETOF(NativeCallFrameStructure, callee)));
-
- // Plant callframe
- move(callFrameRegister, X86Registers::edx);
-
- call(Address(X86Registers::eax, OBJECT_OFFSETOF(JSFunction, m_data)));
-
- // JSValue is a non-POD type, so eax points to it
- emitLoad(0, regT1, regT0, X86Registers::eax);
-#else
- emitGetFromCallFrameHeaderPtr(RegisterFile::Callee, X86Registers::edx); // callee
- move(callFrameRegister, X86Registers::ecx); // callFrame
- call(Address(X86Registers::edx, OBJECT_OFFSETOF(JSFunction, m_data)));
-#endif
-
- // We've put a few temporaries on the stack in addition to the actual arguments
- // so pull them off now
- addPtr(Imm32(NativeCallFrameSize - sizeof(NativeFunctionCalleeSignature)), stackPointerRegister);
-
-#elif CPU(ARM_TRADITIONAL)
- emitGetFromCallFrameHeader32(RegisterFile::ArgumentCount, regT0);
-
- // Allocate stack space for our arglist
- COMPILE_ASSERT((sizeof(ArgList) & 0x7) == 0 && sizeof(JSValue) == 8 && sizeof(Register) == 8, ArgList_should_by_8byte_aligned);
- subPtr(Imm32(sizeof(ArgList)), stackPointerRegister);
-
- // Set up arguments
- subPtr(Imm32(1), regT0); // Don't include 'this' in argcount
-
- // Push argcount
- storePtr(regT0, Address(stackPointerRegister, OBJECT_OFFSETOF(ArgList, m_argCount)));
-
- // Calculate the start of the callframe header, and store in regT1
- move(callFrameRegister, regT1);
- sub32(Imm32(RegisterFile::CallFrameHeaderSize * (int32_t)sizeof(Register)), regT1);
-
- // Calculate start of arguments as callframe header - sizeof(Register) * argcount (regT1)
- mul32(Imm32(sizeof(Register)), regT0, regT0);
- subPtr(regT0, regT1);
-
- // push pointer to arguments
- storePtr(regT1, Address(stackPointerRegister, OBJECT_OFFSETOF(ArgList, m_args)));
-
- // Argument passing method:
- // r0 - points to return value
- // r1 - callFrame
- // r2 - callee
- // stack: this(JSValue) and a pointer to ArgList
-
- move(stackPointerRegister, regT3);
- subPtr(Imm32(8), stackPointerRegister);
- move(stackPointerRegister, regT0);
- subPtr(Imm32(8 + 4 + 4 /* padding */), stackPointerRegister);
-
- // Setup arg4:
- storePtr(regT3, Address(stackPointerRegister, 8));
-
- // Setup arg3
- // regT1 currently points to the first argument, regT1-sizeof(Register) points to 'this'
- load32(Address(regT1, -(int32_t)sizeof(void*) * 2), regT3);
- storePtr(regT3, Address(stackPointerRegister, 0));
- load32(Address(regT1, -(int32_t)sizeof(void*)), regT3);
- storePtr(regT3, Address(stackPointerRegister, 4));
-
- // Setup arg2:
- emitGetFromCallFrameHeaderPtr(RegisterFile::Callee, regT2);
-
- // Setup arg1:
- move(callFrameRegister, regT1);
-
- call(Address(regT2, OBJECT_OFFSETOF(JSFunction, m_data)));
-
- // Load return value
- load32(Address(stackPointerRegister, 16), regT0);
- load32(Address(stackPointerRegister, 20), regT1);
-
- addPtr(Imm32(sizeof(ArgList) + 16 + 8), stackPointerRegister);
-#endif
+ // VirtualConstruct Trampoline
+ // regT0 holds callee, regT1 holds argCount. regT2 will hold the FunctionExecutable.
+ Label virtualConstructBegin = align();
+ compileOpCallInitializeCallFrame();
- // Check for an exception
- move(ImmPtr(&globalData->exception), regT2);
- Jump sawException = branch32(NotEqual, tagFor(0, regT2), Imm32(JSValue::EmptyValueTag));
+ loadPtr(Address(regT0, OBJECT_OFFSETOF(JSFunction, m_executable)), regT2);
- // Grab the return address.
- emitGetFromCallFrameHeaderPtr(RegisterFile::ReturnPC, regT3);
-
- // Restore our caller's "r".
- emitGetFromCallFrameHeaderPtr(RegisterFile::CallerFrame, callFrameRegister);
-
- // Return.
+ Jump hasCodeBlock4 = branch32(GreaterThanOrEqual, Address(regT2, OBJECT_OFFSETOF(FunctionExecutable, m_numParametersForConstruct)), Imm32(0));
+ preserveReturnAddressAfterCall(regT3);
+ restoreArgumentReference();
+ Call callCompileConstruct = call();
+ callLinkFailures.append(branchTestPtr(Zero, regT0));
+ emitGetFromCallFrameHeader32(RegisterFile::ArgumentCount, regT1);
restoreReturnAddressBeforeReturn(regT3);
- ret();
+ loadPtr(Address(regT0, OBJECT_OFFSETOF(JSFunction, m_executable)), regT2);
+ hasCodeBlock4.link(this);
- // Handle an exception
- sawException.link(this);
- // Grab the return address.
+ loadPtr(Address(regT2, OBJECT_OFFSETOF(FunctionExecutable, m_jitCodeForConstructWithArityCheck)), regT0);
+ jump(regT0);
+
+ // If the parser fails we want to be able to be able to keep going,
+ // So we handle this as a parse failure.
+ callLinkFailures.link(this);
emitGetFromCallFrameHeaderPtr(RegisterFile::ReturnPC, regT1);
+ emitGetFromCallFrameHeaderPtr(RegisterFile::CallerFrame, callFrameRegister);
+ restoreReturnAddressBeforeReturn(regT1);
move(ImmPtr(&globalData->exceptionLocation), regT2);
storePtr(regT1, regT2);
- move(ImmPtr(FunctionPtr(ctiVMThrowTrampoline).value()), regT2);
- emitGetFromCallFrameHeaderPtr(RegisterFile::CallerFrame, callFrameRegister);
- poke(callFrameRegister, OBJECT_OFFSETOF(struct JITStackFrame, callFrame) / sizeof (void*));
- restoreReturnAddressBeforeReturn(regT2);
+ poke(callFrameRegister, 1 + OBJECT_OFFSETOF(struct JITStackFrame, callFrame) / sizeof(void*));
+ poke(ImmPtr(FunctionPtr(ctiVMThrowTrampoline).value()));
ret();
-#elif ENABLE(JIT_OPTIMIZE_NATIVE_CALL)
-#error "JIT_OPTIMIZE_NATIVE_CALL not yet supported on this platform."
-#else
- breakpoint();
-#endif
-
+ // NativeCall Trampoline
+ Label nativeCallThunk = privateCompileCTINativeCall(globalData);
+ Label nativeConstructThunk = privateCompileCTINativeCall(globalData, true);
+
#if ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
Call string_failureCases1Call = makeTailRecursiveCall(string_failureCases1);
Call string_failureCases2Call = makeTailRecursiveCall(string_failureCases2);
@@ -351,1438 +158,121 @@ void JIT::privateCompileCTIMachineTrampolines(RefPtr<ExecutablePool>* executable
#endif
// All trampolines constructed! copy the code, link up calls, and set the pointers on the Machine object.
- LinkBuffer patchBuffer(this, m_globalData->executableAllocator.poolForSize(m_assembler.size()));
+ LinkBuffer patchBuffer(this, m_globalData->executableAllocator.poolForSize(m_assembler.size()), 0);
#if ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
patchBuffer.link(string_failureCases1Call, FunctionPtr(cti_op_get_by_id_string_fail));
patchBuffer.link(string_failureCases2Call, FunctionPtr(cti_op_get_by_id_string_fail));
patchBuffer.link(string_failureCases3Call, FunctionPtr(cti_op_get_by_id_string_fail));
#endif
- patchBuffer.link(callArityCheck1, FunctionPtr(cti_op_call_arityCheck));
- patchBuffer.link(callJSFunction1, FunctionPtr(cti_op_call_JSFunction));
#if ENABLE(JIT_OPTIMIZE_CALL)
- patchBuffer.link(callArityCheck2, FunctionPtr(cti_op_call_arityCheck));
- patchBuffer.link(callJSFunction2, FunctionPtr(cti_op_call_JSFunction));
patchBuffer.link(callLazyLinkCall, FunctionPtr(cti_vm_lazyLinkCall));
+ patchBuffer.link(callLazyLinkConstruct, FunctionPtr(cti_vm_lazyLinkConstruct));
#endif
+ patchBuffer.link(callCompileCall, FunctionPtr(cti_op_call_jitCompile));
+ patchBuffer.link(callCompileConstruct, FunctionPtr(cti_op_construct_jitCompile));
CodeRef finalCode = patchBuffer.finalizeCode();
*executablePool = finalCode.m_executablePool;
- trampolines->ctiVirtualCall = trampolineAt(finalCode, virtualCallBegin);
- trampolines->ctiNativeCallThunk = trampolineAt(finalCode, nativeCallThunk);
+ trampolines->ctiVirtualCallLink = patchBuffer.trampolineAt(virtualCallLinkBegin);
+ trampolines->ctiVirtualConstructLink = patchBuffer.trampolineAt(virtualConstructLinkBegin);
+ trampolines->ctiVirtualCall = patchBuffer.trampolineAt(virtualCallBegin);
+ trampolines->ctiVirtualConstruct = patchBuffer.trampolineAt(virtualConstructBegin);
+ trampolines->ctiNativeCall = patchBuffer.trampolineAt(nativeCallThunk);
+ trampolines->ctiNativeConstruct = patchBuffer.trampolineAt(nativeConstructThunk);
#if ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
- trampolines->ctiStringLengthTrampoline = trampolineAt(finalCode, stringLengthBegin);
-#else
- UNUSED_PARAM(ctiStringLengthTrampoline);
-#endif
-#if ENABLE(JIT_OPTIMIZE_CALL)
- trampolines->ctiVirtualCallLink = trampolineAt(finalCode, virtualCallLinkBegin);
-#else
- UNUSED_PARAM(ctiVirtualCallLink);
+ trampolines->ctiStringLengthTrampoline = patchBuffer.trampolineAt(stringLengthBegin);
#endif
-#if ENABLE(JIT_OPTIMIZE_MOD)
- trampolines->ctiSoftModulo = trampolineAt(finalCode, softModBegin);
-#endif
-}
-
-void JIT::emit_op_mov(Instruction* currentInstruction)
-{
- unsigned dst = currentInstruction[1].u.operand;
- unsigned src = currentInstruction[2].u.operand;
-
- if (m_codeBlock->isConstantRegisterIndex(src))
- emitStore(dst, getConstantOperand(src));
- else {
- emitLoad(src, regT1, regT0);
- emitStore(dst, regT1, regT0);
- map(m_bytecodeIndex + OPCODE_LENGTH(op_mov), dst, regT1, regT0);
- }
-}
-
-void JIT::emit_op_end(Instruction* currentInstruction)
-{
- if (m_codeBlock->needsFullScopeChain())
- JITStubCall(this, cti_op_end).call();
- ASSERT(returnValueRegister != callFrameRegister);
- emitLoad(currentInstruction[1].u.operand, regT1, regT0);
- restoreReturnAddressBeforeReturn(Address(callFrameRegister, RegisterFile::ReturnPC * static_cast<int>(sizeof(Register))));
- ret();
-}
-
-void JIT::emit_op_jmp(Instruction* currentInstruction)
-{
- unsigned target = currentInstruction[1].u.operand;
- addJump(jump(), target);
-}
-
-void JIT::emit_op_loop_if_lesseq(Instruction* currentInstruction)
-{
- unsigned op1 = currentInstruction[1].u.operand;
- unsigned op2 = currentInstruction[2].u.operand;
- unsigned target = currentInstruction[3].u.operand;
-
- emitTimeoutCheck();
-
- if (isOperandConstantImmediateInt(op1)) {
- emitLoad(op2, regT1, regT0);
- addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
- addJump(branch32(GreaterThanOrEqual, regT0, Imm32(getConstantOperand(op1).asInt32())), target);
- return;
- }
-
- if (isOperandConstantImmediateInt(op2)) {
- emitLoad(op1, regT1, regT0);
- addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
- addJump(branch32(LessThanOrEqual, regT0, Imm32(getConstantOperand(op2).asInt32())), target);
- return;
- }
-
- emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
- addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
- addSlowCase(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag)));
- addJump(branch32(LessThanOrEqual, regT0, regT2), target);
-}
-
-void JIT::emitSlow_op_loop_if_lesseq(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- unsigned op1 = currentInstruction[1].u.operand;
- unsigned op2 = currentInstruction[2].u.operand;
- unsigned target = currentInstruction[3].u.operand;
-
- if (!isOperandConstantImmediateInt(op1) && !isOperandConstantImmediateInt(op2))
- linkSlowCase(iter); // int32 check
- linkSlowCase(iter); // int32 check
-
- JITStubCall stubCall(this, cti_op_loop_if_lesseq);
- stubCall.addArgument(op1);
- stubCall.addArgument(op2);
- stubCall.call();
- emitJumpSlowToHot(branchTest32(NonZero, regT0), target);
-}
-
-void JIT::emit_op_new_object(Instruction* currentInstruction)
-{
- JITStubCall(this, cti_op_new_object).call(currentInstruction[1].u.operand);
-}
-
-void JIT::emit_op_instanceof(Instruction* currentInstruction)
-{
- unsigned dst = currentInstruction[1].u.operand;
- unsigned value = currentInstruction[2].u.operand;
- unsigned baseVal = currentInstruction[3].u.operand;
- unsigned proto = currentInstruction[4].u.operand;
-
- // Load the operands into registers.
- // We use regT0 for baseVal since we will be done with this first, and we can then use it for the result.
- emitLoadPayload(value, regT2);
- emitLoadPayload(baseVal, regT0);
- emitLoadPayload(proto, regT1);
-
- // Check that value, baseVal, and proto are cells.
- emitJumpSlowCaseIfNotJSCell(value);
- emitJumpSlowCaseIfNotJSCell(baseVal);
- emitJumpSlowCaseIfNotJSCell(proto);
-
- // Check that baseVal 'ImplementsDefaultHasInstance'.
- loadPtr(Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), regT0);
- addSlowCase(branchTest32(Zero, Address(regT0, OBJECT_OFFSETOF(Structure, m_typeInfo.m_flags)), Imm32(ImplementsDefaultHasInstance)));
-
- // Optimistically load the result true, and start looping.
- // Initially, regT1 still contains proto and regT2 still contains value.
- // As we loop regT2 will be updated with its prototype, recursively walking the prototype chain.
- move(Imm32(JSValue::TrueTag), regT0);
- Label loop(this);
-
- // Load the prototype of the cell in regT2. If this is equal to regT1 - WIN!
- // Otherwise, check if we've hit null - if we have then drop out of the loop, if not go again.
- loadPtr(Address(regT2, OBJECT_OFFSETOF(JSCell, m_structure)), regT2);
- load32(Address(regT2, OBJECT_OFFSETOF(Structure, m_prototype) + OBJECT_OFFSETOF(JSValue, u.asBits.payload)), regT2);
- Jump isInstance = branchPtr(Equal, regT2, regT1);
- branchTest32(NonZero, regT2).linkTo(loop, this);
-
- // We get here either by dropping out of the loop, or if value was not an Object. Result is false.
- move(Imm32(JSValue::FalseTag), regT0);
-
- // isInstance jumps right down to here, to skip setting the result to false (it has already set true).
- isInstance.link(this);
- emitStoreBool(dst, regT0);
-}
-
-void JIT::emitSlow_op_instanceof(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- unsigned dst = currentInstruction[1].u.operand;
- unsigned value = currentInstruction[2].u.operand;
- unsigned baseVal = currentInstruction[3].u.operand;
- unsigned proto = currentInstruction[4].u.operand;
-
- linkSlowCaseIfNotJSCell(iter, value);
- linkSlowCaseIfNotJSCell(iter, baseVal);
- linkSlowCaseIfNotJSCell(iter, proto);
- linkSlowCase(iter);
-
- JITStubCall stubCall(this, cti_op_instanceof);
- stubCall.addArgument(value);
- stubCall.addArgument(baseVal);
- stubCall.addArgument(proto);
- stubCall.call(dst);
-}
-
-void JIT::emit_op_new_func(Instruction* currentInstruction)
-{
- JITStubCall stubCall(this, cti_op_new_func);
- stubCall.addArgument(ImmPtr(m_codeBlock->functionDecl(currentInstruction[2].u.operand)));
- stubCall.call(currentInstruction[1].u.operand);
-}
-
-void JIT::emit_op_get_global_var(Instruction* currentInstruction)
-{
- int dst = currentInstruction[1].u.operand;
- JSGlobalObject* globalObject = static_cast<JSGlobalObject*>(currentInstruction[2].u.jsCell);
- ASSERT(globalObject->isGlobalObject());
- int index = currentInstruction[3].u.operand;
-
- loadPtr(&globalObject->d()->registers, regT2);
-
- emitLoad(index, regT1, regT0, regT2);
- emitStore(dst, regT1, regT0);
- map(m_bytecodeIndex + OPCODE_LENGTH(op_get_global_var), dst, regT1, regT0);
-}
-
-void JIT::emit_op_put_global_var(Instruction* currentInstruction)
-{
- JSGlobalObject* globalObject = static_cast<JSGlobalObject*>(currentInstruction[1].u.jsCell);
- ASSERT(globalObject->isGlobalObject());
- int index = currentInstruction[2].u.operand;
- int value = currentInstruction[3].u.operand;
-
- emitLoad(value, regT1, regT0);
-
- loadPtr(&globalObject->d()->registers, regT2);
- emitStore(index, regT1, regT0, regT2);
- map(m_bytecodeIndex + OPCODE_LENGTH(op_put_global_var), value, regT1, regT0);
-}
-
-void JIT::emit_op_get_scoped_var(Instruction* currentInstruction)
-{
- int dst = currentInstruction[1].u.operand;
- int index = currentInstruction[2].u.operand;
- int skip = currentInstruction[3].u.operand + m_codeBlock->needsFullScopeChain();
-
- emitGetFromCallFrameHeaderPtr(RegisterFile::ScopeChain, regT2);
- while (skip--)
- loadPtr(Address(regT2, OBJECT_OFFSETOF(ScopeChainNode, next)), regT2);
-
- loadPtr(Address(regT2, OBJECT_OFFSETOF(ScopeChainNode, object)), regT2);
- loadPtr(Address(regT2, OBJECT_OFFSETOF(JSVariableObject, d)), regT2);
- loadPtr(Address(regT2, OBJECT_OFFSETOF(JSVariableObject::JSVariableObjectData, registers)), regT2);
-
- emitLoad(index, regT1, regT0, regT2);
- emitStore(dst, regT1, regT0);
- map(m_bytecodeIndex + OPCODE_LENGTH(op_get_scoped_var), dst, regT1, regT0);
-}
-
-void JIT::emit_op_put_scoped_var(Instruction* currentInstruction)
-{
- int index = currentInstruction[1].u.operand;
- int skip = currentInstruction[2].u.operand + m_codeBlock->needsFullScopeChain();
- int value = currentInstruction[3].u.operand;
-
- emitLoad(value, regT1, regT0);
-
- emitGetFromCallFrameHeaderPtr(RegisterFile::ScopeChain, regT2);
- while (skip--)
- loadPtr(Address(regT2, OBJECT_OFFSETOF(ScopeChainNode, next)), regT2);
-
- loadPtr(Address(regT2, OBJECT_OFFSETOF(ScopeChainNode, object)), regT2);
- loadPtr(Address(regT2, OBJECT_OFFSETOF(JSVariableObject, d)), regT2);
- loadPtr(Address(regT2, OBJECT_OFFSETOF(JSVariableObject::JSVariableObjectData, registers)), regT2);
-
- emitStore(index, regT1, regT0, regT2);
- map(m_bytecodeIndex + OPCODE_LENGTH(op_put_scoped_var), value, regT1, regT0);
-}
-
-void JIT::emit_op_tear_off_activation(Instruction* currentInstruction)
-{
- JITStubCall stubCall(this, cti_op_tear_off_activation);
- stubCall.addArgument(currentInstruction[1].u.operand);
- stubCall.call();
-}
-
-void JIT::emit_op_tear_off_arguments(Instruction*)
-{
- JITStubCall(this, cti_op_tear_off_arguments).call();
-}
-
-void JIT::emit_op_new_array(Instruction* currentInstruction)
-{
- JITStubCall stubCall(this, cti_op_new_array);
- stubCall.addArgument(Imm32(currentInstruction[2].u.operand));
- stubCall.addArgument(Imm32(currentInstruction[3].u.operand));
- stubCall.call(currentInstruction[1].u.operand);
-}
-
-void JIT::emit_op_resolve(Instruction* currentInstruction)
-{
- JITStubCall stubCall(this, cti_op_resolve);
- stubCall.addArgument(ImmPtr(&m_codeBlock->identifier(currentInstruction[2].u.operand)));
- stubCall.call(currentInstruction[1].u.operand);
-}
-
-void JIT::emit_op_to_primitive(Instruction* currentInstruction)
-{
- int dst = currentInstruction[1].u.operand;
- int src = currentInstruction[2].u.operand;
-
- emitLoad(src, regT1, regT0);
-
- Jump isImm = branch32(NotEqual, regT1, Imm32(JSValue::CellTag));
- addSlowCase(branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsStringVPtr)));
- isImm.link(this);
-
- if (dst != src)
- emitStore(dst, regT1, regT0);
- map(m_bytecodeIndex + OPCODE_LENGTH(op_to_primitive), dst, regT1, regT0);
-}
-
-void JIT::emitSlow_op_to_primitive(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- int dst = currentInstruction[1].u.operand;
-
- linkSlowCase(iter);
-
- JITStubCall stubCall(this, cti_op_to_primitive);
- stubCall.addArgument(regT1, regT0);
- stubCall.call(dst);
-}
-
-void JIT::emit_op_strcat(Instruction* currentInstruction)
-{
- JITStubCall stubCall(this, cti_op_strcat);
- stubCall.addArgument(Imm32(currentInstruction[2].u.operand));
- stubCall.addArgument(Imm32(currentInstruction[3].u.operand));
- stubCall.call(currentInstruction[1].u.operand);
-}
-
-void JIT::emit_op_resolve_base(Instruction* currentInstruction)
-{
- JITStubCall stubCall(this, cti_op_resolve_base);
- stubCall.addArgument(ImmPtr(&m_codeBlock->identifier(currentInstruction[2].u.operand)));
- stubCall.call(currentInstruction[1].u.operand);
-}
-
-void JIT::emit_op_resolve_skip(Instruction* currentInstruction)
-{
- JITStubCall stubCall(this, cti_op_resolve_skip);
- stubCall.addArgument(ImmPtr(&m_codeBlock->identifier(currentInstruction[2].u.operand)));
- stubCall.addArgument(Imm32(currentInstruction[3].u.operand + m_codeBlock->needsFullScopeChain()));
- stubCall.call(currentInstruction[1].u.operand);
-}
-
-void JIT::emit_op_resolve_global(Instruction* currentInstruction)
-{
- // FIXME: Optimize to use patching instead of so many memory accesses.
-
- unsigned dst = currentInstruction[1].u.operand;
- void* globalObject = currentInstruction[2].u.jsCell;
-
- unsigned currentIndex = m_globalResolveInfoIndex++;
- void* structureAddress = &(m_codeBlock->globalResolveInfo(currentIndex).structure);
- void* offsetAddr = &(m_codeBlock->globalResolveInfo(currentIndex).offset);
-
- // Verify structure.
- move(ImmPtr(globalObject), regT0);
- loadPtr(structureAddress, regT1);
- addSlowCase(branchPtr(NotEqual, regT1, Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure))));
-
- // Load property.
- loadPtr(Address(regT0, OBJECT_OFFSETOF(JSGlobalObject, m_externalStorage)), regT2);
- load32(offsetAddr, regT3);
- load32(BaseIndex(regT2, regT3, TimesEight), regT0); // payload
- load32(BaseIndex(regT2, regT3, TimesEight, 4), regT1); // tag
- emitStore(dst, regT1, regT0);
- map(m_bytecodeIndex + OPCODE_LENGTH(op_resolve_global), dst, regT1, regT0);
-}
-
-void JIT::emitSlow_op_resolve_global(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- unsigned dst = currentInstruction[1].u.operand;
- void* globalObject = currentInstruction[2].u.jsCell;
- Identifier* ident = &m_codeBlock->identifier(currentInstruction[3].u.operand);
-
- unsigned currentIndex = m_globalResolveInfoIndex++;
-
- linkSlowCase(iter);
- JITStubCall stubCall(this, cti_op_resolve_global);
- stubCall.addArgument(ImmPtr(globalObject));
- stubCall.addArgument(ImmPtr(ident));
- stubCall.addArgument(Imm32(currentIndex));
- stubCall.call(dst);
-}
-
-void JIT::emit_op_not(Instruction* currentInstruction)
-{
- unsigned dst = currentInstruction[1].u.operand;
- unsigned src = currentInstruction[2].u.operand;
-
- emitLoadTag(src, regT0);
-
- xor32(Imm32(JSValue::FalseTag), regT0);
- addSlowCase(branchTest32(NonZero, regT0, Imm32(~1)));
- xor32(Imm32(JSValue::TrueTag), regT0);
-
- emitStoreBool(dst, regT0, (dst == src));
-}
-
-void JIT::emitSlow_op_not(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- unsigned dst = currentInstruction[1].u.operand;
- unsigned src = currentInstruction[2].u.operand;
-
- linkSlowCase(iter);
-
- JITStubCall stubCall(this, cti_op_not);
- stubCall.addArgument(src);
- stubCall.call(dst);
-}
-
-void JIT::emit_op_jfalse(Instruction* currentInstruction)
-{
- unsigned cond = currentInstruction[1].u.operand;
- unsigned target = currentInstruction[2].u.operand;
-
- emitLoad(cond, regT1, regT0);
-
- Jump isTrue = branch32(Equal, regT1, Imm32(JSValue::TrueTag));
- addJump(branch32(Equal, regT1, Imm32(JSValue::FalseTag)), target);
-
- Jump isNotInteger = branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag));
- Jump isTrue2 = branch32(NotEqual, regT0, Imm32(0));
- addJump(jump(), target);
-
- if (supportsFloatingPoint()) {
- isNotInteger.link(this);
-
- addSlowCase(branch32(Above, regT1, Imm32(JSValue::LowestTag)));
-
- zeroDouble(fpRegT0);
- emitLoadDouble(cond, fpRegT1);
- addJump(branchDouble(DoubleEqualOrUnordered, fpRegT0, fpRegT1), target);
- } else
- addSlowCase(isNotInteger);
-
- isTrue.link(this);
- isTrue2.link(this);
-}
-
-void JIT::emitSlow_op_jfalse(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- unsigned cond = currentInstruction[1].u.operand;
- unsigned target = currentInstruction[2].u.operand;
-
- linkSlowCase(iter);
- JITStubCall stubCall(this, cti_op_jtrue);
- stubCall.addArgument(cond);
- stubCall.call();
- emitJumpSlowToHot(branchTest32(Zero, regT0), target); // Inverted.
-}
-
-void JIT::emit_op_jtrue(Instruction* currentInstruction)
-{
- unsigned cond = currentInstruction[1].u.operand;
- unsigned target = currentInstruction[2].u.operand;
-
- emitLoad(cond, regT1, regT0);
-
- Jump isFalse = branch32(Equal, regT1, Imm32(JSValue::FalseTag));
- addJump(branch32(Equal, regT1, Imm32(JSValue::TrueTag)), target);
-
- Jump isNotInteger = branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag));
- Jump isFalse2 = branch32(Equal, regT0, Imm32(0));
- addJump(jump(), target);
-
- if (supportsFloatingPoint()) {
- isNotInteger.link(this);
-
- addSlowCase(branch32(Above, regT1, Imm32(JSValue::LowestTag)));
-
- zeroDouble(fpRegT0);
- emitLoadDouble(cond, fpRegT1);
- addJump(branchDouble(DoubleNotEqual, fpRegT0, fpRegT1), target);
- } else
- addSlowCase(isNotInteger);
-
- isFalse.link(this);
- isFalse2.link(this);
-}
-
-void JIT::emitSlow_op_jtrue(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- unsigned cond = currentInstruction[1].u.operand;
- unsigned target = currentInstruction[2].u.operand;
-
- linkSlowCase(iter);
- JITStubCall stubCall(this, cti_op_jtrue);
- stubCall.addArgument(cond);
- stubCall.call();
- emitJumpSlowToHot(branchTest32(NonZero, regT0), target);
-}
-
-void JIT::emit_op_jeq_null(Instruction* currentInstruction)
-{
- unsigned src = currentInstruction[1].u.operand;
- unsigned target = currentInstruction[2].u.operand;
-
- emitLoad(src, regT1, regT0);
-
- Jump isImmediate = branch32(NotEqual, regT1, Imm32(JSValue::CellTag));
-
- // First, handle JSCell cases - check MasqueradesAsUndefined bit on the structure.
- loadPtr(Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), regT2);
- addJump(branchTest32(NonZero, Address(regT2, OBJECT_OFFSETOF(Structure, m_typeInfo.m_flags)), Imm32(MasqueradesAsUndefined)), target);
-
- Jump wasNotImmediate = jump();
-
- // Now handle the immediate cases - undefined & null
- isImmediate.link(this);
-
- set32(Equal, regT1, Imm32(JSValue::NullTag), regT2);
- set32(Equal, regT1, Imm32(JSValue::UndefinedTag), regT1);
- or32(regT2, regT1);
-
- addJump(branchTest32(NonZero, regT1), target);
-
- wasNotImmediate.link(this);
-}
-
-void JIT::emit_op_jneq_null(Instruction* currentInstruction)
-{
- unsigned src = currentInstruction[1].u.operand;
- unsigned target = currentInstruction[2].u.operand;
-
- emitLoad(src, regT1, regT0);
-
- Jump isImmediate = branch32(NotEqual, regT1, Imm32(JSValue::CellTag));
-
- // First, handle JSCell cases - check MasqueradesAsUndefined bit on the structure.
- loadPtr(Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), regT2);
- addJump(branchTest32(Zero, Address(regT2, OBJECT_OFFSETOF(Structure, m_typeInfo.m_flags)), Imm32(MasqueradesAsUndefined)), target);
-
- Jump wasNotImmediate = jump();
-
- // Now handle the immediate cases - undefined & null
- isImmediate.link(this);
-
- set32(Equal, regT1, Imm32(JSValue::NullTag), regT2);
- set32(Equal, regT1, Imm32(JSValue::UndefinedTag), regT1);
- or32(regT2, regT1);
-
- addJump(branchTest32(Zero, regT1), target);
-
- wasNotImmediate.link(this);
-}
-
-void JIT::emit_op_jneq_ptr(Instruction* currentInstruction)
-{
- unsigned src = currentInstruction[1].u.operand;
- JSCell* ptr = currentInstruction[2].u.jsCell;
- unsigned target = currentInstruction[3].u.operand;
-
- emitLoad(src, regT1, regT0);
- addJump(branch32(NotEqual, regT1, Imm32(JSValue::CellTag)), target);
- addJump(branchPtr(NotEqual, regT0, ImmPtr(ptr)), target);
-}
-
-void JIT::emit_op_jsr(Instruction* currentInstruction)
-{
- int retAddrDst = currentInstruction[1].u.operand;
- int target = currentInstruction[2].u.operand;
- DataLabelPtr storeLocation = storePtrWithPatch(ImmPtr(0), Address(callFrameRegister, sizeof(Register) * retAddrDst));
- addJump(jump(), target);
- m_jsrSites.append(JSRInfo(storeLocation, label()));
-}
-
-void JIT::emit_op_sret(Instruction* currentInstruction)
-{
- jump(Address(callFrameRegister, sizeof(Register) * currentInstruction[1].u.operand));
-}
-
-void JIT::emit_op_eq(Instruction* currentInstruction)
-{
- unsigned dst = currentInstruction[1].u.operand;
- unsigned src1 = currentInstruction[2].u.operand;
- unsigned src2 = currentInstruction[3].u.operand;
-
- emitLoad2(src1, regT1, regT0, src2, regT3, regT2);
- addSlowCase(branch32(NotEqual, regT1, regT3));
- addSlowCase(branch32(Equal, regT1, Imm32(JSValue::CellTag)));
- addSlowCase(branch32(Below, regT1, Imm32(JSValue::LowestTag)));
-
- set8(Equal, regT0, regT2, regT0);
- or32(Imm32(JSValue::FalseTag), regT0);
-
- emitStoreBool(dst, regT0);
-}
-
-void JIT::emitSlow_op_eq(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- unsigned dst = currentInstruction[1].u.operand;
- unsigned op1 = currentInstruction[2].u.operand;
- unsigned op2 = currentInstruction[3].u.operand;
-
- JumpList storeResult;
- JumpList genericCase;
-
- genericCase.append(getSlowCase(iter)); // tags not equal
-
- linkSlowCase(iter); // tags equal and JSCell
- genericCase.append(branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsStringVPtr)));
- genericCase.append(branchPtr(NotEqual, Address(regT2), ImmPtr(m_globalData->jsStringVPtr)));
-
- // String case.
- JITStubCall stubCallEqStrings(this, cti_op_eq_strings);
- stubCallEqStrings.addArgument(regT0);
- stubCallEqStrings.addArgument(regT2);
- stubCallEqStrings.call();
- storeResult.append(jump());
-
- // Generic case.
- genericCase.append(getSlowCase(iter)); // doubles
- genericCase.link(this);
- JITStubCall stubCallEq(this, cti_op_eq);
- stubCallEq.addArgument(op1);
- stubCallEq.addArgument(op2);
- stubCallEq.call(regT0);
-
- storeResult.link(this);
- or32(Imm32(JSValue::FalseTag), regT0);
- emitStoreBool(dst, regT0);
-}
-
-void JIT::emit_op_neq(Instruction* currentInstruction)
-{
- unsigned dst = currentInstruction[1].u.operand;
- unsigned src1 = currentInstruction[2].u.operand;
- unsigned src2 = currentInstruction[3].u.operand;
-
- emitLoad2(src1, regT1, regT0, src2, regT3, regT2);
- addSlowCase(branch32(NotEqual, regT1, regT3));
- addSlowCase(branch32(Equal, regT1, Imm32(JSValue::CellTag)));
- addSlowCase(branch32(Below, regT1, Imm32(JSValue::LowestTag)));
-
- set8(NotEqual, regT0, regT2, regT0);
- or32(Imm32(JSValue::FalseTag), regT0);
-
- emitStoreBool(dst, regT0);
-}
-
-void JIT::emitSlow_op_neq(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- unsigned dst = currentInstruction[1].u.operand;
-
- JumpList storeResult;
- JumpList genericCase;
-
- genericCase.append(getSlowCase(iter)); // tags not equal
-
- linkSlowCase(iter); // tags equal and JSCell
- genericCase.append(branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsStringVPtr)));
- genericCase.append(branchPtr(NotEqual, Address(regT2), ImmPtr(m_globalData->jsStringVPtr)));
-
- // String case.
- JITStubCall stubCallEqStrings(this, cti_op_eq_strings);
- stubCallEqStrings.addArgument(regT0);
- stubCallEqStrings.addArgument(regT2);
- stubCallEqStrings.call(regT0);
- storeResult.append(jump());
-
- // Generic case.
- genericCase.append(getSlowCase(iter)); // doubles
- genericCase.link(this);
- JITStubCall stubCallEq(this, cti_op_eq);
- stubCallEq.addArgument(regT1, regT0);
- stubCallEq.addArgument(regT3, regT2);
- stubCallEq.call(regT0);
-
- storeResult.link(this);
- xor32(Imm32(0x1), regT0);
- or32(Imm32(JSValue::FalseTag), regT0);
- emitStoreBool(dst, regT0);
-}
-
-void JIT::compileOpStrictEq(Instruction* currentInstruction, CompileOpStrictEqType type)
-{
- unsigned dst = currentInstruction[1].u.operand;
- unsigned src1 = currentInstruction[2].u.operand;
- unsigned src2 = currentInstruction[3].u.operand;
-
- emitLoadTag(src1, regT0);
- emitLoadTag(src2, regT1);
-
- // Jump to a slow case if either operand is double, or if both operands are
- // cells and/or Int32s.
- move(regT0, regT2);
- and32(regT1, regT2);
- addSlowCase(branch32(Below, regT2, Imm32(JSValue::LowestTag)));
- addSlowCase(branch32(AboveOrEqual, regT2, Imm32(JSValue::CellTag)));
-
- if (type == OpStrictEq)
- set8(Equal, regT0, regT1, regT0);
- else
- set8(NotEqual, regT0, regT1, regT0);
-
- or32(Imm32(JSValue::FalseTag), regT0);
-
- emitStoreBool(dst, regT0);
-}
-
-void JIT::emit_op_stricteq(Instruction* currentInstruction)
-{
- compileOpStrictEq(currentInstruction, OpStrictEq);
-}
-
-void JIT::emitSlow_op_stricteq(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- unsigned dst = currentInstruction[1].u.operand;
- unsigned src1 = currentInstruction[2].u.operand;
- unsigned src2 = currentInstruction[3].u.operand;
-
- linkSlowCase(iter);
- linkSlowCase(iter);
-
- JITStubCall stubCall(this, cti_op_stricteq);
- stubCall.addArgument(src1);
- stubCall.addArgument(src2);
- stubCall.call(dst);
-}
-
-void JIT::emit_op_nstricteq(Instruction* currentInstruction)
-{
- compileOpStrictEq(currentInstruction, OpNStrictEq);
-}
-
-void JIT::emitSlow_op_nstricteq(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- unsigned dst = currentInstruction[1].u.operand;
- unsigned src1 = currentInstruction[2].u.operand;
- unsigned src2 = currentInstruction[3].u.operand;
-
- linkSlowCase(iter);
- linkSlowCase(iter);
-
- JITStubCall stubCall(this, cti_op_nstricteq);
- stubCall.addArgument(src1);
- stubCall.addArgument(src2);
- stubCall.call(dst);
-}
-
-void JIT::emit_op_eq_null(Instruction* currentInstruction)
-{
- unsigned dst = currentInstruction[1].u.operand;
- unsigned src = currentInstruction[2].u.operand;
-
- emitLoad(src, regT1, regT0);
- Jump isImmediate = branch32(NotEqual, regT1, Imm32(JSValue::CellTag));
-
- loadPtr(Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), regT1);
- setTest8(NonZero, Address(regT1, OBJECT_OFFSETOF(Structure, m_typeInfo.m_flags)), Imm32(MasqueradesAsUndefined), regT1);
-
- Jump wasNotImmediate = jump();
-
- isImmediate.link(this);
-
- set8(Equal, regT1, Imm32(JSValue::NullTag), regT2);
- set8(Equal, regT1, Imm32(JSValue::UndefinedTag), regT1);
- or32(regT2, regT1);
-
- wasNotImmediate.link(this);
-
- or32(Imm32(JSValue::FalseTag), regT1);
-
- emitStoreBool(dst, regT1);
-}
-
-void JIT::emit_op_neq_null(Instruction* currentInstruction)
-{
- unsigned dst = currentInstruction[1].u.operand;
- unsigned src = currentInstruction[2].u.operand;
-
- emitLoad(src, regT1, regT0);
- Jump isImmediate = branch32(NotEqual, regT1, Imm32(JSValue::CellTag));
-
- loadPtr(Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), regT1);
- setTest8(Zero, Address(regT1, OBJECT_OFFSETOF(Structure, m_typeInfo.m_flags)), Imm32(MasqueradesAsUndefined), regT1);
-
- Jump wasNotImmediate = jump();
-
- isImmediate.link(this);
-
- set8(NotEqual, regT1, Imm32(JSValue::NullTag), regT2);
- set8(NotEqual, regT1, Imm32(JSValue::UndefinedTag), regT1);
- and32(regT2, regT1);
-
- wasNotImmediate.link(this);
-
- or32(Imm32(JSValue::FalseTag), regT1);
-
- emitStoreBool(dst, regT1);
-}
-
-void JIT::emit_op_resolve_with_base(Instruction* currentInstruction)
-{
- JITStubCall stubCall(this, cti_op_resolve_with_base);
- stubCall.addArgument(ImmPtr(&m_codeBlock->identifier(currentInstruction[3].u.operand)));
- stubCall.addArgument(Imm32(currentInstruction[1].u.operand));
- stubCall.call(currentInstruction[2].u.operand);
-}
-
-void JIT::emit_op_new_func_exp(Instruction* currentInstruction)
-{
- JITStubCall stubCall(this, cti_op_new_func_exp);
- stubCall.addArgument(ImmPtr(m_codeBlock->functionExpr(currentInstruction[2].u.operand)));
- stubCall.call(currentInstruction[1].u.operand);
-}
-
-void JIT::emit_op_new_regexp(Instruction* currentInstruction)
-{
- JITStubCall stubCall(this, cti_op_new_regexp);
- stubCall.addArgument(ImmPtr(m_codeBlock->regexp(currentInstruction[2].u.operand)));
- stubCall.call(currentInstruction[1].u.operand);
-}
-
-void JIT::emit_op_throw(Instruction* currentInstruction)
-{
- unsigned exception = currentInstruction[1].u.operand;
- JITStubCall stubCall(this, cti_op_throw);
- stubCall.addArgument(exception);
- stubCall.call();
-
-#ifndef NDEBUG
- // cti_op_throw always changes it's return address,
- // this point in the code should never be reached.
- breakpoint();
-#endif
-}
-
-void JIT::emit_op_get_pnames(Instruction* currentInstruction)
-{
- int dst = currentInstruction[1].u.operand;
- int base = currentInstruction[2].u.operand;
- int i = currentInstruction[3].u.operand;
- int size = currentInstruction[4].u.operand;
- int breakTarget = currentInstruction[5].u.operand;
-
- JumpList isNotObject;
-
- emitLoad(base, regT1, regT0);
- if (!m_codeBlock->isKnownNotImmediate(base))
- isNotObject.append(branch32(NotEqual, regT1, Imm32(JSValue::CellTag)));
- if (base != m_codeBlock->thisRegister()) {
- loadPtr(Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), regT2);
- isNotObject.append(branch32(NotEqual, Address(regT2, OBJECT_OFFSETOF(Structure, m_typeInfo.m_type)), Imm32(ObjectType)));
- }
-
- // We could inline the case where you have a valid cache, but
- // this call doesn't seem to be hot.
- Label isObject(this);
- JITStubCall getPnamesStubCall(this, cti_op_get_pnames);
- getPnamesStubCall.addArgument(regT0);
- getPnamesStubCall.call(dst);
- load32(Address(regT0, OBJECT_OFFSETOF(JSPropertyNameIterator, m_jsStringsSize)), regT3);
- store32(Imm32(0), addressFor(i));
- store32(regT3, addressFor(size));
- Jump end = jump();
-
- isNotObject.link(this);
- addJump(branch32(Equal, regT1, Imm32(JSValue::NullTag)), breakTarget);
- addJump(branch32(Equal, regT1, Imm32(JSValue::UndefinedTag)), breakTarget);
- JITStubCall toObjectStubCall(this, cti_to_object);
- toObjectStubCall.addArgument(regT1, regT0);
- toObjectStubCall.call(base);
- jump().linkTo(isObject, this);
-
- end.link(this);
-}
-
-void JIT::emit_op_next_pname(Instruction* currentInstruction)
-{
- int dst = currentInstruction[1].u.operand;
- int base = currentInstruction[2].u.operand;
- int i = currentInstruction[3].u.operand;
- int size = currentInstruction[4].u.operand;
- int it = currentInstruction[5].u.operand;
- int target = currentInstruction[6].u.operand;
-
- JumpList callHasProperty;
-
- Label begin(this);
- load32(addressFor(i), regT0);
- Jump end = branch32(Equal, regT0, addressFor(size));
-
- // Grab key @ i
- loadPtr(addressFor(it), regT1);
- loadPtr(Address(regT1, OBJECT_OFFSETOF(JSPropertyNameIterator, m_jsStrings)), regT2);
- load32(BaseIndex(regT2, regT0, TimesEight), regT2);
- store32(Imm32(JSValue::CellTag), tagFor(dst));
- store32(regT2, payloadFor(dst));
-
- // Increment i
- add32(Imm32(1), regT0);
- store32(regT0, addressFor(i));
-
- // Verify that i is valid:
- loadPtr(addressFor(base), regT0);
-
- // Test base's structure
- loadPtr(Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), regT2);
- callHasProperty.append(branchPtr(NotEqual, regT2, Address(Address(regT1, OBJECT_OFFSETOF(JSPropertyNameIterator, m_cachedStructure)))));
-
- // Test base's prototype chain
- loadPtr(Address(Address(regT1, OBJECT_OFFSETOF(JSPropertyNameIterator, m_cachedPrototypeChain))), regT3);
- loadPtr(Address(regT3, OBJECT_OFFSETOF(StructureChain, m_vector)), regT3);
- addJump(branchTestPtr(Zero, Address(regT3)), target);
-
- Label checkPrototype(this);
- callHasProperty.append(branch32(Equal, Address(regT2, OBJECT_OFFSETOF(Structure, m_prototype) + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), Imm32(JSValue::NullTag)));
- loadPtr(Address(regT2, OBJECT_OFFSETOF(Structure, m_prototype) + OBJECT_OFFSETOF(JSValue, u.asBits.payload)), regT2);
- loadPtr(Address(regT2, OBJECT_OFFSETOF(JSCell, m_structure)), regT2);
- callHasProperty.append(branchPtr(NotEqual, regT2, Address(regT3)));
- addPtr(Imm32(sizeof(Structure*)), regT3);
- branchTestPtr(NonZero, Address(regT3)).linkTo(checkPrototype, this);
-
- // Continue loop.
- addJump(jump(), target);
-
- // Slow case: Ask the object if i is valid.
- callHasProperty.link(this);
- loadPtr(addressFor(dst), regT1);
- JITStubCall stubCall(this, cti_has_property);
- stubCall.addArgument(regT0);
- stubCall.addArgument(regT1);
- stubCall.call();
-
- // Test for valid key.
- addJump(branchTest32(NonZero, regT0), target);
- jump().linkTo(begin, this);
-
- // End of loop.
- end.link(this);
-}
-
-void JIT::emit_op_push_scope(Instruction* currentInstruction)
-{
- JITStubCall stubCall(this, cti_op_push_scope);
- stubCall.addArgument(currentInstruction[1].u.operand);
- stubCall.call(currentInstruction[1].u.operand);
-}
-
-void JIT::emit_op_pop_scope(Instruction*)
-{
- JITStubCall(this, cti_op_pop_scope).call();
-}
-
-void JIT::emit_op_to_jsnumber(Instruction* currentInstruction)
-{
- int dst = currentInstruction[1].u.operand;
- int src = currentInstruction[2].u.operand;
-
- emitLoad(src, regT1, regT0);
-
- Jump isInt32 = branch32(Equal, regT1, Imm32(JSValue::Int32Tag));
- addSlowCase(branch32(AboveOrEqual, regT1, Imm32(JSValue::EmptyValueTag)));
- isInt32.link(this);
-
- if (src != dst)
- emitStore(dst, regT1, regT0);
- map(m_bytecodeIndex + OPCODE_LENGTH(op_to_jsnumber), dst, regT1, regT0);
-}
-
-void JIT::emitSlow_op_to_jsnumber(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- int dst = currentInstruction[1].u.operand;
-
- linkSlowCase(iter);
-
- JITStubCall stubCall(this, cti_op_to_jsnumber);
- stubCall.addArgument(regT1, regT0);
- stubCall.call(dst);
}
-void JIT::emit_op_push_new_scope(Instruction* currentInstruction)
-{
- JITStubCall stubCall(this, cti_op_push_new_scope);
- stubCall.addArgument(ImmPtr(&m_codeBlock->identifier(currentInstruction[2].u.operand)));
- stubCall.addArgument(currentInstruction[3].u.operand);
- stubCall.call(currentInstruction[1].u.operand);
-}
-
-void JIT::emit_op_catch(Instruction* currentInstruction)
-{
- unsigned exception = currentInstruction[1].u.operand;
-
- // This opcode only executes after a return from cti_op_throw.
-
- // cti_op_throw may have taken us to a call frame further up the stack; reload
- // the call frame pointer to adjust.
- peek(callFrameRegister, OBJECT_OFFSETOF(struct JITStackFrame, callFrame) / sizeof (void*));
-
- // Now store the exception returned by cti_op_throw.
- emitStore(exception, regT1, regT0);
- map(m_bytecodeIndex + OPCODE_LENGTH(op_catch), exception, regT1, regT0);
-}
-
-void JIT::emit_op_jmp_scopes(Instruction* currentInstruction)
-{
- JITStubCall stubCall(this, cti_op_jmp_scopes);
- stubCall.addArgument(Imm32(currentInstruction[1].u.operand));
- stubCall.call();
- addJump(jump(), currentInstruction[2].u.operand);
-}
-
-void JIT::emit_op_switch_imm(Instruction* currentInstruction)
-{
- unsigned tableIndex = currentInstruction[1].u.operand;
- unsigned defaultOffset = currentInstruction[2].u.operand;
- unsigned scrutinee = currentInstruction[3].u.operand;
-
- // create jump table for switch destinations, track this switch statement.
- SimpleJumpTable* jumpTable = &m_codeBlock->immediateSwitchJumpTable(tableIndex);
- m_switches.append(SwitchRecord(jumpTable, m_bytecodeIndex, defaultOffset, SwitchRecord::Immediate));
- jumpTable->ctiOffsets.grow(jumpTable->branchOffsets.size());
-
- JITStubCall stubCall(this, cti_op_switch_imm);
- stubCall.addArgument(scrutinee);
- stubCall.addArgument(Imm32(tableIndex));
- stubCall.call();
- jump(regT0);
-}
-
-void JIT::emit_op_switch_char(Instruction* currentInstruction)
-{
- unsigned tableIndex = currentInstruction[1].u.operand;
- unsigned defaultOffset = currentInstruction[2].u.operand;
- unsigned scrutinee = currentInstruction[3].u.operand;
-
- // create jump table for switch destinations, track this switch statement.
- SimpleJumpTable* jumpTable = &m_codeBlock->characterSwitchJumpTable(tableIndex);
- m_switches.append(SwitchRecord(jumpTable, m_bytecodeIndex, defaultOffset, SwitchRecord::Character));
- jumpTable->ctiOffsets.grow(jumpTable->branchOffsets.size());
-
- JITStubCall stubCall(this, cti_op_switch_char);
- stubCall.addArgument(scrutinee);
- stubCall.addArgument(Imm32(tableIndex));
- stubCall.call();
- jump(regT0);
-}
-
-void JIT::emit_op_switch_string(Instruction* currentInstruction)
-{
- unsigned tableIndex = currentInstruction[1].u.operand;
- unsigned defaultOffset = currentInstruction[2].u.operand;
- unsigned scrutinee = currentInstruction[3].u.operand;
-
- // create jump table for switch destinations, track this switch statement.
- StringJumpTable* jumpTable = &m_codeBlock->stringSwitchJumpTable(tableIndex);
- m_switches.append(SwitchRecord(jumpTable, m_bytecodeIndex, defaultOffset));
-
- JITStubCall stubCall(this, cti_op_switch_string);
- stubCall.addArgument(scrutinee);
- stubCall.addArgument(Imm32(tableIndex));
- stubCall.call();
- jump(regT0);
-}
-
-void JIT::emit_op_new_error(Instruction* currentInstruction)
+JIT::Label JIT::privateCompileCTINativeCall(JSGlobalData* globalData, bool isConstruct)
{
- unsigned dst = currentInstruction[1].u.operand;
- unsigned type = currentInstruction[2].u.operand;
- unsigned message = currentInstruction[3].u.operand;
-
- JITStubCall stubCall(this, cti_op_new_error);
- stubCall.addArgument(Imm32(type));
- stubCall.addArgument(m_codeBlock->getConstant(message));
- stubCall.addArgument(Imm32(m_bytecodeIndex));
- stubCall.call(dst);
-}
-
-void JIT::emit_op_debug(Instruction* currentInstruction)
-{
- JITStubCall stubCall(this, cti_op_debug);
- stubCall.addArgument(Imm32(currentInstruction[1].u.operand));
- stubCall.addArgument(Imm32(currentInstruction[2].u.operand));
- stubCall.addArgument(Imm32(currentInstruction[3].u.operand));
- stubCall.call();
-}
-
-
-void JIT::emit_op_enter(Instruction*)
-{
- // Even though JIT code doesn't use them, we initialize our constant
- // registers to zap stale pointers, to avoid unnecessarily prolonging
- // object lifetime and increasing GC pressure.
- for (int i = 0; i < m_codeBlock->m_numVars; ++i)
- emitStore(i, jsUndefined());
-}
-
-void JIT::emit_op_enter_with_activation(Instruction* currentInstruction)
-{
- emit_op_enter(currentInstruction);
-
- JITStubCall(this, cti_op_push_activation).call(currentInstruction[1].u.operand);
-}
-
-void JIT::emit_op_create_arguments(Instruction*)
-{
- Jump argsCreated = branch32(NotEqual, tagFor(RegisterFile::ArgumentsRegister, callFrameRegister), Imm32(JSValue::EmptyValueTag));
-
- // If we get here the arguments pointer is a null cell - i.e. arguments need lazy creation.
- if (m_codeBlock->m_numParameters == 1)
- JITStubCall(this, cti_op_create_arguments_no_params).call();
- else
- JITStubCall(this, cti_op_create_arguments).call();
-
- argsCreated.link(this);
-}
-
-void JIT::emit_op_init_arguments(Instruction*)
-{
- emitStore(RegisterFile::ArgumentsRegister, JSValue(), callFrameRegister);
-}
-
-void JIT::emit_op_convert_this(Instruction* currentInstruction)
-{
- unsigned thisRegister = currentInstruction[1].u.operand;
-
- emitLoad(thisRegister, regT1, regT0);
-
- addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::CellTag)));
-
- loadPtr(Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), regT2);
- addSlowCase(branchTest32(NonZero, Address(regT2, OBJECT_OFFSETOF(Structure, m_typeInfo.m_flags)), Imm32(NeedsThisConversion)));
-
- map(m_bytecodeIndex + OPCODE_LENGTH(op_convert_this), thisRegister, regT1, regT0);
-}
-
-void JIT::emitSlow_op_convert_this(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- unsigned thisRegister = currentInstruction[1].u.operand;
-
- linkSlowCase(iter);
- linkSlowCase(iter);
-
- JITStubCall stubCall(this, cti_op_convert_this);
- stubCall.addArgument(regT1, regT0);
- stubCall.call(thisRegister);
-}
-
-void JIT::emit_op_profile_will_call(Instruction* currentInstruction)
-{
- peek(regT2, OBJECT_OFFSETOF(JITStackFrame, enabledProfilerReference) / sizeof (void*));
- Jump noProfiler = branchTestPtr(Zero, Address(regT2));
-
- JITStubCall stubCall(this, cti_op_profile_will_call);
- stubCall.addArgument(currentInstruction[1].u.operand);
- stubCall.call();
- noProfiler.link(this);
-}
-
-void JIT::emit_op_profile_did_call(Instruction* currentInstruction)
-{
- peek(regT2, OBJECT_OFFSETOF(JITStackFrame, enabledProfilerReference) / sizeof (void*));
- Jump noProfiler = branchTestPtr(Zero, Address(regT2));
-
- JITStubCall stubCall(this, cti_op_profile_did_call);
- stubCall.addArgument(currentInstruction[1].u.operand);
- stubCall.call();
- noProfiler.link(this);
-}
-
-#else // USE(JSVALUE32_64)
-
-#define RECORD_JUMP_TARGET(targetOffset) \
- do { m_labels[m_bytecodeIndex + (targetOffset)].used(); } while (false)
-
-void JIT::privateCompileCTIMachineTrampolines(RefPtr<ExecutablePool>* executablePool, JSGlobalData* globalData, TrampolineStructure *trampolines)
-{
-#if ENABLE(JIT_OPTIMIZE_MOD)
- Label softModBegin = align();
- softModulo();
-#endif
-#if ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
- // (2) The second function provides fast property access for string length
- Label stringLengthBegin = align();
-
- // Check eax is a string
- Jump string_failureCases1 = emitJumpIfNotJSCell(regT0);
- Jump string_failureCases2 = branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsStringVPtr));
-
- // Checks out okay! - get the length from the Ustring.
- load32(Address(regT0, OBJECT_OFFSETOF(JSString, m_stringLength)), regT0);
-
- Jump string_failureCases3 = branch32(Above, regT0, Imm32(JSImmediate::maxImmediateInt));
-
- // regT0 contains a 64 bit value (is positive, is zero extended) so we don't need sign extend here.
- emitFastArithIntToImmNoCheck(regT0, regT0);
-
- ret();
-#endif
-
- // (3) Trampolines for the slow cases of op_call / op_call_eval / op_construct.
- COMPILE_ASSERT(sizeof(CodeType) == 4, CodeTypeEnumMustBe32Bit);
-
- // VirtualCallLink Trampoline
- // regT0 holds callee, regT1 holds argCount. regT2 will hold the FunctionExecutable.
- Label virtualCallLinkBegin = align();
- loadPtr(Address(regT0, OBJECT_OFFSETOF(JSFunction, m_executable)), regT2);
-
- Jump isNativeFunc2 = branch32(Equal, Address(regT2, OBJECT_OFFSETOF(FunctionExecutable, m_numParameters)), Imm32(0));
-
- Jump hasCodeBlock2 = branch32(GreaterThan, Address(regT2, OBJECT_OFFSETOF(FunctionExecutable, m_numParameters)), Imm32(0));
- preserveReturnAddressAfterCall(regT3);
- restoreArgumentReference();
- Call callJSFunction2 = call();
- loadPtr(Address(regT0, OBJECT_OFFSETOF(JSFunction, m_executable)), regT2);
- emitGetJITStubArg(2, regT1); // argCount
- restoreReturnAddressBeforeReturn(regT3);
- hasCodeBlock2.link(this);
-
- // Check argCount matches callee arity.
- Jump arityCheckOkay2 = branch32(Equal, Address(regT2, OBJECT_OFFSETOF(FunctionExecutable, m_numParameters)), regT1);
- preserveReturnAddressAfterCall(regT3);
- emitPutJITStubArg(regT3, 1); // return address
- restoreArgumentReference();
- Call callArityCheck2 = call();
- move(regT1, callFrameRegister);
- emitGetJITStubArg(2, regT1); // argCount
- restoreReturnAddressBeforeReturn(regT3);
- arityCheckOkay2.link(this);
-
- isNativeFunc2.link(this);
-
- compileOpCallInitializeCallFrame();
- preserveReturnAddressAfterCall(regT3);
- emitPutJITStubArg(regT3, 1); // return address
- restoreArgumentReference();
- Call callLazyLinkCall = call();
- restoreReturnAddressBeforeReturn(regT3);
- jump(regT0);
-
- // VirtualCall Trampoline
- // regT0 holds callee, regT1 holds argCount. regT2 will hold the FunctionExecutable.
- Label virtualCallBegin = align();
- loadPtr(Address(regT0, OBJECT_OFFSETOF(JSFunction, m_executable)), regT2);
-
- Jump isNativeFunc3 = branch32(Equal, Address(regT2, OBJECT_OFFSETOF(FunctionExecutable, m_numParameters)), Imm32(0));
-
- Jump hasCodeBlock3 = branch32(GreaterThan, Address(regT2, OBJECT_OFFSETOF(FunctionExecutable, m_numParameters)), Imm32(0));
- preserveReturnAddressAfterCall(regT3);
- restoreArgumentReference();
- Call callJSFunction1 = call();
- emitGetJITStubArg(2, regT1); // argCount
- restoreReturnAddressBeforeReturn(regT3);
- loadPtr(Address(regT0, OBJECT_OFFSETOF(JSFunction, m_executable)), regT2);
- hasCodeBlock3.link(this);
-
- // Check argCount matches callee arity.
- Jump arityCheckOkay3 = branch32(Equal, Address(regT2, OBJECT_OFFSETOF(FunctionExecutable, m_numParameters)), regT1);
- preserveReturnAddressAfterCall(regT3);
- emitPutJITStubArg(regT3, 1); // return address
- restoreArgumentReference();
- Call callArityCheck1 = call();
- move(regT1, callFrameRegister);
- emitGetJITStubArg(2, regT1); // argCount
- restoreReturnAddressBeforeReturn(regT3);
- loadPtr(Address(regT0, OBJECT_OFFSETOF(JSFunction, m_executable)), regT2);
- arityCheckOkay3.link(this);
-
- isNativeFunc3.link(this);
-
- compileOpCallInitializeCallFrame();
- loadPtr(Address(regT2, OBJECT_OFFSETOF(FunctionExecutable, m_jitCode)), regT0);
- jump(regT0);
+ int executableOffsetToFunction = isConstruct ? OBJECT_OFFSETOF(NativeExecutable, m_constructor) : OBJECT_OFFSETOF(NativeExecutable, m_function);
Label nativeCallThunk = align();
- preserveReturnAddressAfterCall(regT0);
- emitPutToCallFrameHeader(regT0, RegisterFile::ReturnPC); // Push return address
+
+ emitPutImmediateToCallFrameHeader(0, RegisterFile::CodeBlock);
+#if CPU(X86_64)
// Load caller frame's scope chain into this callframe so that whatever we call can
// get to its global data.
- emitGetFromCallFrameHeaderPtr(RegisterFile::CallerFrame, regT1);
- emitGetFromCallFrameHeaderPtr(RegisterFile::ScopeChain, regT1, regT1);
+ emitGetFromCallFrameHeaderPtr(RegisterFile::CallerFrame, regT0);
+ emitGetFromCallFrameHeaderPtr(RegisterFile::ScopeChain, regT1, regT0);
emitPutToCallFrameHeader(regT1, RegisterFile::ScopeChain);
-
-#if CPU(X86_64)
- emitGetFromCallFrameHeader32(RegisterFile::ArgumentCount, X86Registers::ecx);
+ peek(regT1);
+ emitPutToCallFrameHeader(regT1, RegisterFile::ReturnPC);
- // Allocate stack space for our arglist
- subPtr(Imm32(sizeof(ArgList)), stackPointerRegister);
- COMPILE_ASSERT((sizeof(ArgList) & 0xf) == 0, ArgList_should_by_16byte_aligned);
-
- // Set up arguments
- subPtr(Imm32(1), X86Registers::ecx); // Don't include 'this' in argcount
-
- // Push argcount
- storePtr(X86Registers::ecx, Address(stackPointerRegister, OBJECT_OFFSETOF(ArgList, m_argCount)));
+ // Calling convention: f(edi, esi, edx, ecx, ...);
+ // Host function signature: f(ExecState*);
+ move(callFrameRegister, X86Registers::edi);
- // Calculate the start of the callframe header, and store in edx
- addPtr(Imm32(-RegisterFile::CallFrameHeaderSize * (int32_t)sizeof(Register)), callFrameRegister, X86Registers::edx);
-
- // Calculate start of arguments as callframe header - sizeof(Register) * argcount (ecx)
- mul32(Imm32(sizeof(Register)), X86Registers::ecx, X86Registers::ecx);
- subPtr(X86Registers::ecx, X86Registers::edx);
+ subPtr(Imm32(16 - sizeof(void*)), stackPointerRegister); // Align stack after call.
- // push pointer to arguments
- storePtr(X86Registers::edx, Address(stackPointerRegister, OBJECT_OFFSETOF(ArgList, m_args)));
-
- // ArgList is passed by reference so is stackPointerRegister
- move(stackPointerRegister, X86Registers::ecx);
-
- // edx currently points to the first argument, edx-sizeof(Register) points to 'this'
- loadPtr(Address(X86Registers::edx, -(int32_t)sizeof(Register)), X86Registers::edx);
-
emitGetFromCallFrameHeaderPtr(RegisterFile::Callee, X86Registers::esi);
+ loadPtr(Address(X86Registers::esi, OBJECT_OFFSETOF(JSFunction, m_executable)), X86Registers::r9);
+ move(regT0, callFrameRegister); // Eagerly restore caller frame register to avoid loading from stack.
+ call(Address(X86Registers::r9, executableOffsetToFunction));
- move(callFrameRegister, X86Registers::edi);
-
- call(Address(X86Registers::esi, OBJECT_OFFSETOF(JSFunction, m_data)));
-
- addPtr(Imm32(sizeof(ArgList)), stackPointerRegister);
-#elif CPU(X86)
- emitGetFromCallFrameHeader32(RegisterFile::ArgumentCount, regT0);
-
- /* We have two structs that we use to describe the stackframe we set up for our
- * call to native code. NativeCallFrameStructure describes the how we set up the stack
- * in advance of the call. NativeFunctionCalleeSignature describes the callframe
- * as the native code expects it. We do this as we are using the fastcall calling
- * convention which results in the callee popping its arguments off the stack, but
- * not the rest of the callframe so we need a nice way to ensure we increment the
- * stack pointer by the right amount after the call.
- */
-#if COMPILER(MSVC) || OS(LINUX)
- struct NativeCallFrameStructure {
- // CallFrame* callFrame; // passed in EDX
- JSObject* callee;
- JSValue thisValue;
- ArgList* argPointer;
- ArgList args;
- JSValue result;
- };
- struct NativeFunctionCalleeSignature {
- JSObject* callee;
- JSValue thisValue;
- ArgList* argPointer;
- };
-#else
- struct NativeCallFrameStructure {
- // CallFrame* callFrame; // passed in ECX
- // JSObject* callee; // passed in EDX
- JSValue thisValue;
- ArgList* argPointer;
- ArgList args;
- };
- struct NativeFunctionCalleeSignature {
- JSValue thisValue;
- ArgList* argPointer;
- };
-#endif
- const int NativeCallFrameSize = (sizeof(NativeCallFrameStructure) + 15) & ~15;
- // Allocate system stack frame
- subPtr(Imm32(NativeCallFrameSize), stackPointerRegister);
-
- // Set up arguments
- subPtr(Imm32(1), regT0); // Don't include 'this' in argcount
-
- // push argcount
- storePtr(regT0, Address(stackPointerRegister, OBJECT_OFFSETOF(NativeCallFrameStructure, args) + OBJECT_OFFSETOF(ArgList, m_argCount)));
-
- // Calculate the start of the callframe header, and store in regT1
- addPtr(Imm32(-RegisterFile::CallFrameHeaderSize * (int)sizeof(Register)), callFrameRegister, regT1);
-
- // Calculate start of arguments as callframe header - sizeof(Register) * argcount (regT0)
- mul32(Imm32(sizeof(Register)), regT0, regT0);
- subPtr(regT0, regT1);
- storePtr(regT1, Address(stackPointerRegister, OBJECT_OFFSETOF(NativeCallFrameStructure, args) + OBJECT_OFFSETOF(ArgList, m_args)));
-
- // ArgList is passed by reference so is stackPointerRegister + 4 * sizeof(Register)
- addPtr(Imm32(OBJECT_OFFSETOF(NativeCallFrameStructure, args)), stackPointerRegister, regT0);
- storePtr(regT0, Address(stackPointerRegister, OBJECT_OFFSETOF(NativeCallFrameStructure, argPointer)));
-
- // regT1 currently points to the first argument, regT1 - sizeof(Register) points to 'this'
- loadPtr(Address(regT1, -(int)sizeof(Register)), regT1);
- storePtr(regT1, Address(stackPointerRegister, OBJECT_OFFSETOF(NativeCallFrameStructure, thisValue)));
-
-#if COMPILER(MSVC) || OS(LINUX)
- // ArgList is passed by reference so is stackPointerRegister + 4 * sizeof(Register)
- addPtr(Imm32(OBJECT_OFFSETOF(NativeCallFrameStructure, result)), stackPointerRegister, X86Registers::ecx);
-
- // Plant callee
- emitGetFromCallFrameHeaderPtr(RegisterFile::Callee, X86Registers::eax);
- storePtr(X86Registers::eax, Address(stackPointerRegister, OBJECT_OFFSETOF(NativeCallFrameStructure, callee)));
-
- // Plant callframe
- move(callFrameRegister, X86Registers::edx);
-
- call(Address(X86Registers::eax, OBJECT_OFFSETOF(JSFunction, m_data)));
-
- // JSValue is a non-POD type
- loadPtr(Address(X86Registers::eax), X86Registers::eax);
-#else
- // Plant callee
- emitGetFromCallFrameHeaderPtr(RegisterFile::Callee, X86Registers::edx);
-
- // Plant callframe
- move(callFrameRegister, X86Registers::ecx);
- call(Address(X86Registers::edx, OBJECT_OFFSETOF(JSFunction, m_data)));
-#endif
-
- // We've put a few temporaries on the stack in addition to the actual arguments
- // so pull them off now
- addPtr(Imm32(NativeCallFrameSize - sizeof(NativeFunctionCalleeSignature)), stackPointerRegister);
+ addPtr(Imm32(16 - sizeof(void*)), stackPointerRegister);
#elif CPU(ARM)
- emitGetFromCallFrameHeader32(RegisterFile::ArgumentCount, regT0);
+ // Load caller frame's scope chain into this callframe so that whatever we call can
+ // get to its global data.
+ emitGetFromCallFrameHeaderPtr(RegisterFile::CallerFrame, regT2);
+ emitGetFromCallFrameHeaderPtr(RegisterFile::ScopeChain, regT1, regT2);
+ emitPutToCallFrameHeader(regT1, RegisterFile::ScopeChain);
- // Allocate stack space for our arglist
- COMPILE_ASSERT((sizeof(ArgList) & 0x7) == 0, ArgList_should_by_8byte_aligned);
- subPtr(Imm32(sizeof(ArgList)), stackPointerRegister);
+ preserveReturnAddressAfterCall(regT3); // Callee preserved
+ emitPutToCallFrameHeader(regT3, RegisterFile::ReturnPC);
- // Set up arguments
- subPtr(Imm32(1), regT0); // Don't include 'this' in argcount
+ // Calling convention: f(r0 == regT0, r1 == regT1, ...);
+ // Host function signature: f(ExecState*);
+ move(callFrameRegister, ARMRegisters::r0);
- // Push argcount
- storePtr(regT0, Address(stackPointerRegister, OBJECT_OFFSETOF(ArgList, m_argCount)));
+ emitGetFromCallFrameHeaderPtr(RegisterFile::Callee, ARMRegisters::r1);
+ move(regT2, callFrameRegister); // Eagerly restore caller frame register to avoid loading from stack.
+ loadPtr(Address(ARMRegisters::r1, OBJECT_OFFSETOF(JSFunction, m_executable)), regT2);
+ call(Address(regT2, executableOffsetToFunction));
- // Calculate the start of the callframe header, and store in regT1
- move(callFrameRegister, regT1);
- sub32(Imm32(RegisterFile::CallFrameHeaderSize * (int32_t)sizeof(Register)), regT1);
+ restoreReturnAddressBeforeReturn(regT3);
- // Calculate start of arguments as callframe header - sizeof(Register) * argcount (regT1)
- mul32(Imm32(sizeof(Register)), regT0, regT0);
- subPtr(regT0, regT1);
+#elif CPU(MIPS)
+ // Load caller frame's scope chain into this callframe so that whatever we call can
+ // get to its global data.
+ emitGetFromCallFrameHeaderPtr(RegisterFile::CallerFrame, regT0);
+ emitGetFromCallFrameHeaderPtr(RegisterFile::ScopeChain, regT1, regT0);
+ emitPutToCallFrameHeader(regT1, RegisterFile::ScopeChain);
- // push pointer to arguments
- storePtr(regT1, Address(stackPointerRegister, OBJECT_OFFSETOF(ArgList, m_args)));
+ preserveReturnAddressAfterCall(regT3); // Callee preserved
+ emitPutToCallFrameHeader(regT3, RegisterFile::ReturnPC);
- // Setup arg3: regT1 currently points to the first argument, regT1-sizeof(Register) points to 'this'
- loadPtr(Address(regT1, -(int32_t)sizeof(Register)), regT2);
+ // Calling convention: f(a0, a1, a2, a3);
+ // Host function signature: f(ExecState*);
- // Setup arg2:
- emitGetFromCallFrameHeaderPtr(RegisterFile::Callee, regT1);
+ // Allocate stack space for 16 bytes (8-byte aligned)
+ // 16 bytes (unused) for 4 arguments
+ subPtr(Imm32(16), stackPointerRegister);
- // Setup arg1:
- move(callFrameRegister, regT0);
+ // Setup arg0
+ move(callFrameRegister, MIPSRegisters::a0);
- // Setup arg4: This is a plain hack
- move(stackPointerRegister, ARMRegisters::r3);
+ // Call
+ emitGetFromCallFrameHeaderPtr(RegisterFile::Callee, MIPSRegisters::a2);
+ loadPtr(Address(MIPSRegisters::a2, OBJECT_OFFSETOF(JSFunction, m_executable)), regT2);
+ move(regT0, callFrameRegister); // Eagerly restore caller frame register to avoid loading from stack.
+ call(Address(regT2, executableOffsetToFunction));
- call(Address(regT1, OBJECT_OFFSETOF(JSFunction, m_data)));
+ // Restore stack space
+ addPtr(Imm32(16), stackPointerRegister);
- addPtr(Imm32(sizeof(ArgList)), stackPointerRegister);
+ restoreReturnAddressBeforeReturn(regT3);
#elif ENABLE(JIT_OPTIMIZE_NATIVE_CALL)
#error "JIT_OPTIMIZE_NATIVE_CALL not yet supported on this platform."
#else
+ UNUSED_PARAM(executableOffsetToFunction);
breakpoint();
#endif
@@ -1790,65 +280,31 @@ void JIT::privateCompileCTIMachineTrampolines(RefPtr<ExecutablePool>* executable
loadPtr(&(globalData->exception), regT2);
Jump exceptionHandler = branchTestPtr(NonZero, regT2);
- // Grab the return address.
- emitGetFromCallFrameHeaderPtr(RegisterFile::ReturnPC, regT1);
-
- // Restore our caller's "r".
- emitGetFromCallFrameHeaderPtr(RegisterFile::CallerFrame, callFrameRegister);
-
// Return.
- restoreReturnAddressBeforeReturn(regT1);
ret();
// Handle an exception
exceptionHandler.link(this);
+
// Grab the return address.
- emitGetFromCallFrameHeaderPtr(RegisterFile::ReturnPC, regT1);
+ preserveReturnAddressAfterCall(regT1);
+
move(ImmPtr(&globalData->exceptionLocation), regT2);
storePtr(regT1, regT2);
- move(ImmPtr(FunctionPtr(ctiVMThrowTrampoline).value()), regT2);
- emitGetFromCallFrameHeaderPtr(RegisterFile::CallerFrame, callFrameRegister);
- poke(callFrameRegister, OBJECT_OFFSETOF(struct JITStackFrame, callFrame) / sizeof (void*));
- restoreReturnAddressBeforeReturn(regT2);
- ret();
-
-
-#if ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
- Call string_failureCases1Call = makeTailRecursiveCall(string_failureCases1);
- Call string_failureCases2Call = makeTailRecursiveCall(string_failureCases2);
- Call string_failureCases3Call = makeTailRecursiveCall(string_failureCases3);
-#endif
+ poke(callFrameRegister, OBJECT_OFFSETOF(struct JITStackFrame, callFrame) / sizeof(void*));
- // All trampolines constructed! copy the code, link up calls, and set the pointers on the Machine object.
- LinkBuffer patchBuffer(this, m_globalData->executableAllocator.poolForSize(m_assembler.size()));
+ // Set the return address.
+ move(ImmPtr(FunctionPtr(ctiVMThrowTrampoline).value()), regT1);
+ restoreReturnAddressBeforeReturn(regT1);
-#if ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
- patchBuffer.link(string_failureCases1Call, FunctionPtr(cti_op_get_by_id_string_fail));
- patchBuffer.link(string_failureCases2Call, FunctionPtr(cti_op_get_by_id_string_fail));
- patchBuffer.link(string_failureCases3Call, FunctionPtr(cti_op_get_by_id_string_fail));
-#endif
- patchBuffer.link(callArityCheck1, FunctionPtr(cti_op_call_arityCheck));
- patchBuffer.link(callJSFunction1, FunctionPtr(cti_op_call_JSFunction));
-#if ENABLE(JIT_OPTIMIZE_CALL)
- patchBuffer.link(callArityCheck2, FunctionPtr(cti_op_call_arityCheck));
- patchBuffer.link(callJSFunction2, FunctionPtr(cti_op_call_JSFunction));
- patchBuffer.link(callLazyLinkCall, FunctionPtr(cti_vm_lazyLinkCall));
-#endif
+ ret();
- CodeRef finalCode = patchBuffer.finalizeCode();
- *executablePool = finalCode.m_executablePool;
+ return nativeCallThunk;
+}
- trampolines->ctiVirtualCallLink = trampolineAt(finalCode, virtualCallLinkBegin);
- trampolines->ctiVirtualCall = trampolineAt(finalCode, virtualCallBegin);
- trampolines->ctiNativeCallThunk = trampolineAt(finalCode, nativeCallThunk);
-#if ENABLE(JIT_OPTIMIZE_MOD)
- trampolines->ctiSoftModulo = trampolineAt(finalCode, softModBegin);
-#endif
-#if ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
- trampolines->ctiStringLengthTrampoline = trampolineAt(finalCode, stringLengthBegin);
-#else
- UNUSED_PARAM(ctiStringLengthTrampoline);
-#endif
+JIT::CodePtr JIT::privateCompileCTINativeCall(PassRefPtr<ExecutablePool>, JSGlobalData* globalData, NativeFunction)
+{
+ return globalData->jitStubs->ctiNativeCall();
}
void JIT::emit_op_mov(Instruction* currentInstruction)
@@ -1876,6 +332,7 @@ void JIT::emit_op_end(Instruction* currentInstruction)
{
if (m_codeBlock->needsFullScopeChain())
JITStubCall(this, cti_op_end).call();
+
ASSERT(returnValueRegister != callFrameRegister);
emitGetVirtualRegister(currentInstruction[1].u.operand, returnValueRegister);
restoreReturnAddressBeforeReturn(Address(callFrameRegister, RegisterFile::ReturnPC * static_cast<int>(sizeof(Register))));
@@ -1899,11 +356,7 @@ void JIT::emit_op_loop_if_lesseq(Instruction* currentInstruction)
if (isOperandConstantImmediateInt(op2)) {
emitGetVirtualRegister(op1, regT0);
emitJumpSlowCaseIfNotImmediateInteger(regT0);
-#if USE(JSVALUE64)
int32_t op2imm = getConstantOperandImmediateInt(op2);
-#else
- int32_t op2imm = static_cast<int32_t>(JSImmediate::rawValue(getConstantOperand(op2)));
-#endif
addJump(branch32(LessThanOrEqual, regT0, Imm32(op2imm)), target);
} else {
emitGetVirtualRegisters(op1, regT0, op2, regT1);
@@ -1918,6 +371,20 @@ void JIT::emit_op_new_object(Instruction* currentInstruction)
JITStubCall(this, cti_op_new_object).call(currentInstruction[1].u.operand);
}
+void JIT::emit_op_check_has_instance(Instruction* currentInstruction)
+{
+ unsigned baseVal = currentInstruction[1].u.operand;
+
+ emitGetVirtualRegister(baseVal, regT0);
+
+ // Check that baseVal is a cell.
+ emitJumpSlowCaseIfNotJSCell(regT0, baseVal);
+
+ // Check that baseVal 'ImplementsHasInstance'.
+ loadPtr(Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), regT0);
+ addSlowCase(branchTest8(Zero, Address(regT0, OBJECT_OFFSETOF(Structure, m_typeInfo.m_flags)), Imm32(ImplementsHasInstance)));
+}
+
void JIT::emit_op_instanceof(Instruction* currentInstruction)
{
unsigned dst = currentInstruction[1].u.operand;
@@ -1931,14 +398,18 @@ void JIT::emit_op_instanceof(Instruction* currentInstruction)
emitGetVirtualRegister(baseVal, regT0);
emitGetVirtualRegister(proto, regT1);
- // Check that baseVal & proto are cells.
+ // Check that proto are cells. baseVal must be a cell - this is checked by op_check_has_instance.
emitJumpSlowCaseIfNotJSCell(regT2, value);
- emitJumpSlowCaseIfNotJSCell(regT0, baseVal);
emitJumpSlowCaseIfNotJSCell(regT1, proto);
+ // Check that prototype is an object
+ loadPtr(Address(regT1, OBJECT_OFFSETOF(JSCell, m_structure)), regT3);
+ addSlowCase(branch8(NotEqual, Address(regT3, OBJECT_OFFSETOF(Structure, m_typeInfo.m_type)), Imm32(ObjectType)));
+
+ // Fixme: this check is only needed because the JSC API allows HasInstance to be overridden; we should deprecate this.
// Check that baseVal 'ImplementsDefaultHasInstance'.
loadPtr(Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), regT0);
- addSlowCase(branchTest32(Zero, Address(regT0, OBJECT_OFFSETOF(Structure, m_typeInfo.m_flags)), Imm32(ImplementsDefaultHasInstance)));
+ addSlowCase(branchTest8(Zero, Address(regT0, OBJECT_OFFSETOF(Structure, m_typeInfo.m_flags)), Imm32(ImplementsDefaultHasInstance)));
// Optimistically load the result true, and start looping.
// Initially, regT1 still contains proto and regT2 still contains value.
@@ -1961,13 +432,6 @@ void JIT::emit_op_instanceof(Instruction* currentInstruction)
emitPutVirtualRegister(dst);
}
-void JIT::emit_op_new_func(Instruction* currentInstruction)
-{
- JITStubCall stubCall(this, cti_op_new_func);
- stubCall.addArgument(ImmPtr(m_codeBlock->functionDecl(currentInstruction[2].u.operand)));
- stubCall.call(currentInstruction[1].u.operand);
-}
-
void JIT::emit_op_call(Instruction* currentInstruction)
{
compileOpCall(op_call, currentInstruction, m_callLinkInfoIndex++);
@@ -1978,18 +442,6 @@ void JIT::emit_op_call_eval(Instruction* currentInstruction)
compileOpCall(op_call_eval, currentInstruction, m_callLinkInfoIndex++);
}
-void JIT::emit_op_load_varargs(Instruction* currentInstruction)
-{
- int argCountDst = currentInstruction[1].u.operand;
- int argsOffset = currentInstruction[2].u.operand;
-
- JITStubCall stubCall(this, cti_op_load_varargs);
- stubCall.addArgument(Imm32(argsOffset));
- stubCall.call();
- // Stores a naked int32 in the register file.
- store32(returnValueRegister, Address(callFrameRegister, argCountDst * sizeof(Register)));
-}
-
void JIT::emit_op_call_varargs(Instruction* currentInstruction)
{
compileOpCallVarargs(currentInstruction);
@@ -2002,25 +454,34 @@ void JIT::emit_op_construct(Instruction* currentInstruction)
void JIT::emit_op_get_global_var(Instruction* currentInstruction)
{
- JSVariableObject* globalObject = static_cast<JSVariableObject*>(currentInstruction[2].u.jsCell);
+ JSVariableObject* globalObject = m_codeBlock->globalObject();
move(ImmPtr(globalObject), regT0);
- emitGetVariableObjectRegister(regT0, currentInstruction[3].u.operand, regT0);
+ emitGetVariableObjectRegister(regT0, currentInstruction[2].u.operand, regT0);
emitPutVirtualRegister(currentInstruction[1].u.operand);
}
void JIT::emit_op_put_global_var(Instruction* currentInstruction)
{
- emitGetVirtualRegister(currentInstruction[3].u.operand, regT1);
- JSVariableObject* globalObject = static_cast<JSVariableObject*>(currentInstruction[1].u.jsCell);
+ emitGetVirtualRegister(currentInstruction[2].u.operand, regT1);
+ JSVariableObject* globalObject = m_codeBlock->globalObject();
move(ImmPtr(globalObject), regT0);
- emitPutVariableObjectRegister(regT1, regT0, currentInstruction[2].u.operand);
+ emitPutVariableObjectRegister(regT1, regT0, currentInstruction[1].u.operand);
}
void JIT::emit_op_get_scoped_var(Instruction* currentInstruction)
{
- int skip = currentInstruction[3].u.operand + m_codeBlock->needsFullScopeChain();
+ int skip = currentInstruction[3].u.operand;
emitGetFromCallFrameHeaderPtr(RegisterFile::ScopeChain, regT0);
+ bool checkTopLevel = m_codeBlock->codeType() == FunctionCode && m_codeBlock->needsFullScopeChain();
+ ASSERT(skip || !checkTopLevel);
+ if (checkTopLevel && skip--) {
+ Jump activationNotCreated;
+ if (checkTopLevel)
+ activationNotCreated = branchTestPtr(Zero, addressFor(m_codeBlock->activationRegister()));
+ loadPtr(Address(regT0, OBJECT_OFFSETOF(ScopeChainNode, next)), regT0);
+ activationNotCreated.link(this);
+ }
while (skip--)
loadPtr(Address(regT0, OBJECT_OFFSETOF(ScopeChainNode, next)), regT0);
@@ -2031,10 +492,19 @@ void JIT::emit_op_get_scoped_var(Instruction* currentInstruction)
void JIT::emit_op_put_scoped_var(Instruction* currentInstruction)
{
- int skip = currentInstruction[2].u.operand + m_codeBlock->needsFullScopeChain();
+ int skip = currentInstruction[2].u.operand;
emitGetFromCallFrameHeaderPtr(RegisterFile::ScopeChain, regT1);
emitGetVirtualRegister(currentInstruction[3].u.operand, regT0);
+ bool checkTopLevel = m_codeBlock->codeType() == FunctionCode && m_codeBlock->needsFullScopeChain();
+ ASSERT(skip || !checkTopLevel);
+ if (checkTopLevel && skip--) {
+ Jump activationNotCreated;
+ if (checkTopLevel)
+ activationNotCreated = branchTestPtr(Zero, addressFor(m_codeBlock->activationRegister()));
+ loadPtr(Address(regT1, OBJECT_OFFSETOF(ScopeChainNode, next)), regT1);
+ activationNotCreated.link(this);
+ }
while (skip--)
loadPtr(Address(regT1, OBJECT_OFFSETOF(ScopeChainNode, next)), regT1);
@@ -2044,21 +514,63 @@ void JIT::emit_op_put_scoped_var(Instruction* currentInstruction)
void JIT::emit_op_tear_off_activation(Instruction* currentInstruction)
{
+ unsigned activation = currentInstruction[1].u.operand;
+ unsigned arguments = currentInstruction[2].u.operand;
+ Jump activationCreated = branchTestPtr(NonZero, addressFor(activation));
+ Jump argumentsNotCreated = branchTestPtr(Zero, addressFor(arguments));
+ activationCreated.link(this);
JITStubCall stubCall(this, cti_op_tear_off_activation);
- stubCall.addArgument(currentInstruction[1].u.operand, regT2);
+ stubCall.addArgument(activation, regT2);
+ stubCall.addArgument(unmodifiedArgumentsRegister(arguments), regT2);
stubCall.call();
+ argumentsNotCreated.link(this);
}
-void JIT::emit_op_tear_off_arguments(Instruction*)
+void JIT::emit_op_tear_off_arguments(Instruction* currentInstruction)
{
- JITStubCall(this, cti_op_tear_off_arguments).call();
+ unsigned dst = currentInstruction[1].u.operand;
+
+ Jump argsNotCreated = branchTestPtr(Zero, Address(callFrameRegister, sizeof(Register) * (unmodifiedArgumentsRegister(dst))));
+ JITStubCall stubCall(this, cti_op_tear_off_arguments);
+ stubCall.addArgument(unmodifiedArgumentsRegister(dst), regT2);
+ stubCall.call();
+ argsNotCreated.link(this);
}
void JIT::emit_op_ret(Instruction* currentInstruction)
{
// We could JIT generate the deref, only calling out to C when the refcount hits zero.
- if (m_codeBlock->needsFullScopeChain())
+ if (m_codeBlock->needsFullScopeChain()) {
+ Jump activationNotCreated = branchTestPtr(Zero, addressFor(m_codeBlock->activationRegister()));
JITStubCall(this, cti_op_ret_scopeChain).call();
+ activationNotCreated.link(this);
+ }
+ ASSERT(callFrameRegister != regT1);
+ ASSERT(regT1 != returnValueRegister);
+ ASSERT(returnValueRegister != callFrameRegister);
+
+ // Return the result in %eax.
+ emitGetVirtualRegister(currentInstruction[1].u.operand, returnValueRegister);
+
+ // Grab the return address.
+ emitGetFromCallFrameHeaderPtr(RegisterFile::ReturnPC, regT1);
+
+ // Restore our caller's "r".
+ emitGetFromCallFrameHeaderPtr(RegisterFile::CallerFrame, callFrameRegister);
+
+ // Return.
+ restoreReturnAddressBeforeReturn(regT1);
+ ret();
+}
+
+void JIT::emit_op_ret_object_or_this(Instruction* currentInstruction)
+{
+ // We could JIT generate the deref, only calling out to C when the refcount hits zero.
+ if (m_codeBlock->needsFullScopeChain()) {
+ Jump activationNotCreated = branchTestPtr(Zero, addressFor(m_codeBlock->activationRegister()));
+ JITStubCall(this, cti_op_ret_scopeChain).call();
+ activationNotCreated.link(this);
+ }
ASSERT(callFrameRegister != regT1);
ASSERT(regT1 != returnValueRegister);
@@ -2066,6 +578,24 @@ void JIT::emit_op_ret(Instruction* currentInstruction)
// Return the result in %eax.
emitGetVirtualRegister(currentInstruction[1].u.operand, returnValueRegister);
+ Jump notJSCell = emitJumpIfNotJSCell(returnValueRegister);
+ loadPtr(Address(returnValueRegister, OBJECT_OFFSETOF(JSCell, m_structure)), regT2);
+ Jump notObject = branch8(NotEqual, Address(regT2, OBJECT_OFFSETOF(Structure, m_typeInfo) + OBJECT_OFFSETOF(TypeInfo, m_type)), Imm32(ObjectType));
+
+ // Grab the return address.
+ emitGetFromCallFrameHeaderPtr(RegisterFile::ReturnPC, regT1);
+
+ // Restore our caller's "r".
+ emitGetFromCallFrameHeaderPtr(RegisterFile::CallerFrame, callFrameRegister);
+
+ // Return.
+ restoreReturnAddressBeforeReturn(regT1);
+ ret();
+
+ // Return 'this' in %eax.
+ notJSCell.link(this);
+ notObject.link(this);
+ emitGetVirtualRegister(currentInstruction[2].u.operand, returnValueRegister);
// Grab the return address.
emitGetFromCallFrameHeaderPtr(RegisterFile::ReturnPC, regT1);
@@ -2093,16 +623,6 @@ void JIT::emit_op_resolve(Instruction* currentInstruction)
stubCall.call(currentInstruction[1].u.operand);
}
-void JIT::emit_op_construct_verify(Instruction* currentInstruction)
-{
- emitGetVirtualRegister(currentInstruction[1].u.operand, regT0);
-
- emitJumpSlowCaseIfNotJSCell(regT0);
- loadPtr(Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), regT2);
- addSlowCase(branch32(NotEqual, Address(regT2, OBJECT_OFFSETOF(Structure, m_typeInfo) + OBJECT_OFFSETOF(TypeInfo, m_type)), Imm32(ObjectType)));
-
-}
-
void JIT::emit_op_to_primitive(Instruction* currentInstruction)
{
int dst = currentInstruction[1].u.operand;
@@ -2129,7 +649,15 @@ void JIT::emit_op_strcat(Instruction* currentInstruction)
void JIT::emit_op_resolve_base(Instruction* currentInstruction)
{
- JITStubCall stubCall(this, cti_op_resolve_base);
+ JITStubCall stubCall(this, currentInstruction[3].u.operand ? cti_op_resolve_base_strict_put : cti_op_resolve_base);
+ stubCall.addArgument(ImmPtr(&m_codeBlock->identifier(currentInstruction[2].u.operand)));
+ stubCall.call(currentInstruction[1].u.operand);
+}
+
+void JIT::emit_op_ensure_property_exists(Instruction* currentInstruction)
+{
+ JITStubCall stubCall(this, cti_op_ensure_property_exists);
+ stubCall.addArgument(Imm32(currentInstruction[1].u.operand));
stubCall.addArgument(ImmPtr(&m_codeBlock->identifier(currentInstruction[2].u.operand)));
stubCall.call(currentInstruction[1].u.operand);
}
@@ -2138,16 +666,14 @@ void JIT::emit_op_resolve_skip(Instruction* currentInstruction)
{
JITStubCall stubCall(this, cti_op_resolve_skip);
stubCall.addArgument(ImmPtr(&m_codeBlock->identifier(currentInstruction[2].u.operand)));
- stubCall.addArgument(Imm32(currentInstruction[3].u.operand + m_codeBlock->needsFullScopeChain()));
+ stubCall.addArgument(Imm32(currentInstruction[3].u.operand));
stubCall.call(currentInstruction[1].u.operand);
}
-void JIT::emit_op_resolve_global(Instruction* currentInstruction)
+void JIT::emit_op_resolve_global(Instruction* currentInstruction, bool)
{
// Fast case
- void* globalObject = currentInstruction[2].u.jsCell;
- Identifier* ident = &m_codeBlock->identifier(currentInstruction[3].u.operand);
-
+ void* globalObject = m_codeBlock->globalObject();
unsigned currentIndex = m_globalResolveInfoIndex++;
void* structureAddress = &(m_codeBlock->globalResolveInfo(currentIndex).structure);
void* offsetAddr = &(m_codeBlock->globalResolveInfo(currentIndex).offset);
@@ -2155,7 +681,7 @@ void JIT::emit_op_resolve_global(Instruction* currentInstruction)
// Check Structure of global object
move(ImmPtr(globalObject), regT0);
loadPtr(structureAddress, regT1);
- Jump noMatch = branchPtr(NotEqual, regT1, Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure))); // Structures don't match
+ addSlowCase(branchPtr(NotEqual, regT1, Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)))); // Structures don't match
// Load cached property
// Assume that the global object always uses external storage.
@@ -2163,16 +689,21 @@ void JIT::emit_op_resolve_global(Instruction* currentInstruction)
load32(offsetAddr, regT1);
loadPtr(BaseIndex(regT0, regT1, ScalePtr), regT0);
emitPutVirtualRegister(currentInstruction[1].u.operand);
- Jump end = jump();
+}
- // Slow case
- noMatch.link(this);
+void JIT::emitSlow_op_resolve_global(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ Identifier* ident = &m_codeBlock->identifier(currentInstruction[2].u.operand);
+
+ unsigned currentIndex = m_globalResolveInfoIndex++;
+
+ linkSlowCase(iter);
JITStubCall stubCall(this, cti_op_resolve_global);
- stubCall.addArgument(ImmPtr(globalObject));
stubCall.addArgument(ImmPtr(ident));
stubCall.addArgument(Imm32(currentIndex));
- stubCall.call(currentInstruction[1].u.operand);
- end.link(this);
+ stubCall.addArgument(regT0);
+ stubCall.call(dst);
}
void JIT::emit_op_not(Instruction* currentInstruction)
@@ -2189,7 +720,7 @@ void JIT::emit_op_jfalse(Instruction* currentInstruction)
unsigned target = currentInstruction[2].u.operand;
emitGetVirtualRegister(currentInstruction[1].u.operand, regT0);
- addJump(branchPtr(Equal, regT0, ImmPtr(JSValue::encode(jsNumber(m_globalData, 0)))), target);
+ addJump(branchPtr(Equal, regT0, ImmPtr(JSValue::encode(jsNumber(0)))), target);
Jump isNonZero = emitJumpIfImmediateInteger(regT0);
addJump(branchPtr(Equal, regT0, ImmPtr(JSValue::encode(jsBoolean(false)))), target);
@@ -2197,7 +728,8 @@ void JIT::emit_op_jfalse(Instruction* currentInstruction)
isNonZero.link(this);
RECORD_JUMP_TARGET(target);
-};
+}
+
void JIT::emit_op_jeq_null(Instruction* currentInstruction)
{
unsigned src = currentInstruction[1].u.operand;
@@ -2208,7 +740,7 @@ void JIT::emit_op_jeq_null(Instruction* currentInstruction)
// First, handle JSCell cases - check MasqueradesAsUndefined bit on the structure.
loadPtr(Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), regT2);
- addJump(branchTest32(NonZero, Address(regT2, OBJECT_OFFSETOF(Structure, m_typeInfo.m_flags)), Imm32(MasqueradesAsUndefined)), target);
+ addJump(branchTest8(NonZero, Address(regT2, OBJECT_OFFSETOF(Structure, m_typeInfo.m_flags)), Imm32(MasqueradesAsUndefined)), target);
Jump wasNotImmediate = jump();
// Now handle the immediate cases - undefined & null
@@ -2229,7 +761,7 @@ void JIT::emit_op_jneq_null(Instruction* currentInstruction)
// First, handle JSCell cases - check MasqueradesAsUndefined bit on the structure.
loadPtr(Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), regT2);
- addJump(branchTest32(Zero, Address(regT2, OBJECT_OFFSETOF(Structure, m_typeInfo.m_flags)), Imm32(MasqueradesAsUndefined)), target);
+ addJump(branchTest8(Zero, Address(regT2, OBJECT_OFFSETOF(Structure, m_typeInfo.m_flags)), Imm32(MasqueradesAsUndefined)), target);
Jump wasNotImmediate = jump();
// Now handle the immediate cases - undefined & null
@@ -2283,12 +815,8 @@ void JIT::emit_op_bitnot(Instruction* currentInstruction)
{
emitGetVirtualRegister(currentInstruction[2].u.operand, regT0);
emitJumpSlowCaseIfNotImmediateInteger(regT0);
-#if USE(JSVALUE64)
not32(regT0);
emitFastArithIntToImmNoCheck(regT0, regT0);
-#else
- xorPtr(Imm32(~JSImmediate::TagTypeNumber), regT0);
-#endif
emitPutVirtualRegister(currentInstruction[1].u.operand);
}
@@ -2312,7 +840,7 @@ void JIT::emit_op_jtrue(Instruction* currentInstruction)
unsigned target = currentInstruction[2].u.operand;
emitGetVirtualRegister(currentInstruction[1].u.operand, regT0);
- Jump isZero = branchPtr(Equal, regT0, ImmPtr(JSValue::encode(jsNumber(m_globalData, 0))));
+ Jump isZero = branchPtr(Equal, regT0, ImmPtr(JSValue::encode(jsNumber(0))));
addJump(emitJumpIfImmediateInteger(regT0), target);
addJump(branchPtr(Equal, regT0, ImmPtr(JSValue::encode(jsBoolean(true)))), target);
@@ -2342,13 +870,6 @@ void JIT::emit_op_bitxor(Instruction* currentInstruction)
emitPutVirtualRegister(currentInstruction[1].u.operand);
}
-void JIT::emit_op_new_regexp(Instruction* currentInstruction)
-{
- JITStubCall stubCall(this, cti_op_new_regexp);
- stubCall.addArgument(ImmPtr(m_codeBlock->regexp(currentInstruction[2].u.operand)));
- stubCall.call(currentInstruction[1].u.operand);
-}
-
void JIT::emit_op_bitor(Instruction* currentInstruction)
{
emitGetVirtualRegisters(currentInstruction[2].u.operand, regT0, currentInstruction[3].u.operand, regT1);
@@ -2383,9 +904,9 @@ void JIT::emit_op_get_pnames(Instruction* currentInstruction)
emitGetVirtualRegister(base, regT0);
if (!m_codeBlock->isKnownNotImmediate(base))
isNotObject.append(emitJumpIfNotJSCell(regT0));
- if (base != m_codeBlock->thisRegister()) {
+ if (base != m_codeBlock->thisRegister() || m_codeBlock->isStrictMode()) {
loadPtr(Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), regT2);
- isNotObject.append(branch32(NotEqual, Address(regT2, OBJECT_OFFSETOF(Structure, m_typeInfo.m_type)), Imm32(ObjectType)));
+ isNotObject.append(branch8(NotEqual, Address(regT2, OBJECT_OFFSETOF(Structure, m_typeInfo.m_type)), Imm32(ObjectType)));
}
// We could inline the case where you have a valid cache, but
@@ -2431,11 +952,7 @@ void JIT::emit_op_next_pname(Instruction* currentInstruction)
loadPtr(addressFor(it), regT1);
loadPtr(Address(regT1, OBJECT_OFFSETOF(JSPropertyNameIterator, m_jsStrings)), regT2);
-#if USE(JSVALUE64)
loadPtr(BaseIndex(regT2, regT0, TimesEight), regT2);
-#else
- loadPtr(BaseIndex(regT2, regT0, TimesFour), regT2);
-#endif
emitPutVirtualRegister(dst, regT2);
@@ -2536,7 +1053,7 @@ void JIT::emit_op_to_jsnumber(Instruction* currentInstruction)
emitJumpSlowCaseIfNotJSCell(regT0, srcVReg);
loadPtr(Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), regT2);
- addSlowCase(branch32(NotEqual, Address(regT2, OBJECT_OFFSETOF(Structure, m_typeInfo.m_type)), Imm32(NumberType)));
+ addSlowCase(branch8(NotEqual, Address(regT2, OBJECT_OFFSETOF(Structure, m_typeInfo.m_type)), Imm32(NumberType)));
wasImmediate.link(this);
@@ -2554,7 +1071,10 @@ void JIT::emit_op_push_new_scope(Instruction* currentInstruction)
void JIT::emit_op_catch(Instruction* currentInstruction)
{
killLastResultRegister(); // FIXME: Implicitly treat op_catch as a labeled statement, and remove this line of code.
- peek(callFrameRegister, OBJECT_OFFSETOF(struct JITStackFrame, callFrame) / sizeof (void*));
+ move(regT0, callFrameRegister);
+ peek(regT3, OBJECT_OFFSETOF(struct JITStackFrame, globalData) / sizeof(void*));
+ loadPtr(Address(regT3, OBJECT_OFFSETOF(JSGlobalData, exception)), regT0);
+ storePtr(ImmPtr(JSValue::encode(JSValue())), Address(regT3, OBJECT_OFFSETOF(JSGlobalData, exception)));
emitPutVirtualRegister(currentInstruction[1].u.operand);
}
@@ -2575,7 +1095,7 @@ void JIT::emit_op_switch_imm(Instruction* currentInstruction)
// create jump table for switch destinations, track this switch statement.
SimpleJumpTable* jumpTable = &m_codeBlock->immediateSwitchJumpTable(tableIndex);
- m_switches.append(SwitchRecord(jumpTable, m_bytecodeIndex, defaultOffset, SwitchRecord::Immediate));
+ m_switches.append(SwitchRecord(jumpTable, m_bytecodeOffset, defaultOffset, SwitchRecord::Immediate));
jumpTable->ctiOffsets.grow(jumpTable->branchOffsets.size());
JITStubCall stubCall(this, cti_op_switch_imm);
@@ -2593,7 +1113,7 @@ void JIT::emit_op_switch_char(Instruction* currentInstruction)
// create jump table for switch destinations, track this switch statement.
SimpleJumpTable* jumpTable = &m_codeBlock->characterSwitchJumpTable(tableIndex);
- m_switches.append(SwitchRecord(jumpTable, m_bytecodeIndex, defaultOffset, SwitchRecord::Character));
+ m_switches.append(SwitchRecord(jumpTable, m_bytecodeOffset, defaultOffset, SwitchRecord::Character));
jumpTable->ctiOffsets.grow(jumpTable->branchOffsets.size());
JITStubCall stubCall(this, cti_op_switch_char);
@@ -2611,7 +1131,7 @@ void JIT::emit_op_switch_string(Instruction* currentInstruction)
// create jump table for switch destinations, track this switch statement.
StringJumpTable* jumpTable = &m_codeBlock->stringSwitchJumpTable(tableIndex);
- m_switches.append(SwitchRecord(jumpTable, m_bytecodeIndex, defaultOffset));
+ m_switches.append(SwitchRecord(jumpTable, m_bytecodeOffset, defaultOffset));
JITStubCall stubCall(this, cti_op_switch_string);
stubCall.addArgument(scrutinee, regT2);
@@ -2620,22 +1140,32 @@ void JIT::emit_op_switch_string(Instruction* currentInstruction)
jump(regT0);
}
-void JIT::emit_op_new_error(Instruction* currentInstruction)
+void JIT::emit_op_throw_reference_error(Instruction* currentInstruction)
{
- JITStubCall stubCall(this, cti_op_new_error);
- stubCall.addArgument(Imm32(currentInstruction[2].u.operand));
- stubCall.addArgument(ImmPtr(JSValue::encode(m_codeBlock->getConstant(currentInstruction[3].u.operand))));
- stubCall.addArgument(Imm32(m_bytecodeIndex));
- stubCall.call(currentInstruction[1].u.operand);
+ JITStubCall stubCall(this, cti_op_throw_reference_error);
+ stubCall.addArgument(ImmPtr(JSValue::encode(m_codeBlock->getConstant(currentInstruction[1].u.operand))));
+ stubCall.call();
+}
+
+void JIT::emit_op_throw_syntax_error(Instruction* currentInstruction)
+{
+ JITStubCall stubCall(this, cti_op_throw_syntax_error);
+ stubCall.addArgument(ImmPtr(JSValue::encode(m_codeBlock->getConstant(currentInstruction[1].u.operand))));
+ stubCall.call();
}
void JIT::emit_op_debug(Instruction* currentInstruction)
{
+#if ENABLE(DEBUG_WITH_BREAKPOINT)
+ UNUSED_PARAM(currentInstruction);
+ breakpoint();
+#else
JITStubCall stubCall(this, cti_op_debug);
stubCall.addArgument(Imm32(currentInstruction[1].u.operand));
stubCall.addArgument(Imm32(currentInstruction[2].u.operand));
stubCall.addArgument(Imm32(currentInstruction[3].u.operand));
stubCall.call();
+#endif
}
void JIT::emit_op_eq_null(Instruction* currentInstruction)
@@ -2647,7 +1177,7 @@ void JIT::emit_op_eq_null(Instruction* currentInstruction)
Jump isImmediate = emitJumpIfNotJSCell(regT0);
loadPtr(Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), regT2);
- setTest32(NonZero, Address(regT2, OBJECT_OFFSETOF(Structure, m_typeInfo.m_flags)), Imm32(MasqueradesAsUndefined), regT0);
+ setTest8(NonZero, Address(regT2, OBJECT_OFFSETOF(Structure, m_typeInfo.m_flags)), Imm32(MasqueradesAsUndefined), regT0);
Jump wasNotImmediate = jump();
@@ -2672,7 +1202,7 @@ void JIT::emit_op_neq_null(Instruction* currentInstruction)
Jump isImmediate = emitJumpIfNotJSCell(regT0);
loadPtr(Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), regT2);
- setTest32(Zero, Address(regT2, OBJECT_OFFSETOF(Structure, m_typeInfo.m_flags)), Imm32(MasqueradesAsUndefined), regT0);
+ setTest8(Zero, Address(regT2, OBJECT_OFFSETOF(Structure, m_typeInfo.m_flags)), Imm32(MasqueradesAsUndefined), regT0);
Jump wasNotImmediate = jump();
@@ -2685,7 +1215,6 @@ void JIT::emit_op_neq_null(Instruction* currentInstruction)
emitTagAsBoolImmediate(regT0);
emitPutVirtualRegister(dst);
-
}
void JIT::emit_op_enter(Instruction*)
@@ -2699,31 +1228,35 @@ void JIT::emit_op_enter(Instruction*)
}
-void JIT::emit_op_enter_with_activation(Instruction* currentInstruction)
+void JIT::emit_op_create_activation(Instruction* currentInstruction)
{
- // Even though CTI doesn't use them, we initialize our constant
- // registers to zap stale pointers, to avoid unnecessarily prolonging
- // object lifetime and increasing GC pressure.
- size_t count = m_codeBlock->m_numVars;
- for (size_t j = 0; j < count; ++j)
- emitInitRegister(j);
-
+ unsigned dst = currentInstruction[1].u.operand;
+
+ Jump activationCreated = branchTestPtr(NonZero, Address(callFrameRegister, sizeof(Register) * dst));
JITStubCall(this, cti_op_push_activation).call(currentInstruction[1].u.operand);
+ emitPutVirtualRegister(dst);
+ activationCreated.link(this);
}
-void JIT::emit_op_create_arguments(Instruction*)
+void JIT::emit_op_create_arguments(Instruction* currentInstruction)
{
- Jump argsCreated = branchTestPtr(NonZero, Address(callFrameRegister, sizeof(Register) * RegisterFile::ArgumentsRegister));
+ unsigned dst = currentInstruction[1].u.operand;
+
+ Jump argsCreated = branchTestPtr(NonZero, Address(callFrameRegister, sizeof(Register) * dst));
if (m_codeBlock->m_numParameters == 1)
JITStubCall(this, cti_op_create_arguments_no_params).call();
else
JITStubCall(this, cti_op_create_arguments).call();
+ emitPutVirtualRegister(dst);
+ emitPutVirtualRegister(unmodifiedArgumentsRegister(dst));
argsCreated.link(this);
}
-
-void JIT::emit_op_init_arguments(Instruction*)
+
+void JIT::emit_op_init_lazy_reg(Instruction* currentInstruction)
{
- storePtr(ImmPtr(0), Address(callFrameRegister, sizeof(Register) * RegisterFile::ArgumentsRegister));
+ unsigned dst = currentInstruction[1].u.operand;
+
+ storePtr(ImmPtr(0), Address(callFrameRegister, sizeof(Register) * dst));
}
void JIT::emit_op_convert_this(Instruction* currentInstruction)
@@ -2732,13 +1265,43 @@ void JIT::emit_op_convert_this(Instruction* currentInstruction)
emitJumpSlowCaseIfNotJSCell(regT0);
loadPtr(Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), regT1);
- addSlowCase(branchTest32(NonZero, Address(regT1, OBJECT_OFFSETOF(Structure, m_typeInfo.m_flags)), Imm32(NeedsThisConversion)));
+ addSlowCase(branchTest8(NonZero, Address(regT1, OBJECT_OFFSETOF(Structure, m_typeInfo.m_flags)), Imm32(NeedsThisConversion)));
+}
+void JIT::emit_op_convert_this_strict(Instruction* currentInstruction)
+{
+ emitGetVirtualRegister(currentInstruction[1].u.operand, regT0);
+ Jump notNull = branchTestPtr(NonZero, regT0);
+ move(ImmPtr(JSValue::encode(jsNull())), regT0);
+ emitPutVirtualRegister(currentInstruction[1].u.operand, regT0);
+ Jump setThis = jump();
+ notNull.link(this);
+ Jump isImmediate = emitJumpIfNotJSCell(regT0);
+ loadPtr(Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), regT1);
+ Jump notAnObject = branch8(NotEqual, Address(regT1, OBJECT_OFFSETOF(Structure, m_typeInfo.m_type)), Imm32(ObjectType));
+ addSlowCase(branchTest8(NonZero, Address(regT1, OBJECT_OFFSETOF(Structure, m_typeInfo.m_flags)), Imm32(NeedsThisConversion)));
+ isImmediate.link(this);
+ notAnObject.link(this);
+ setThis.link(this);
+}
+
+void JIT::emit_op_get_callee(Instruction* currentInstruction)
+{
+ unsigned result = currentInstruction[1].u.operand;
+ emitGetFromCallFrameHeaderPtr(RegisterFile::Callee, regT0);
+ emitPutVirtualRegister(result);
+}
+
+void JIT::emit_op_create_this(Instruction* currentInstruction)
+{
+ JITStubCall stubCall(this, cti_op_create_this);
+ stubCall.addArgument(currentInstruction[2].u.operand, regT1);
+ stubCall.call(currentInstruction[1].u.operand);
}
void JIT::emit_op_profile_will_call(Instruction* currentInstruction)
{
- peek(regT1, OBJECT_OFFSETOF(JITStackFrame, enabledProfilerReference) / sizeof (void*));
+ peek(regT1, OBJECT_OFFSETOF(JITStackFrame, enabledProfilerReference) / sizeof(void*));
Jump noProfiler = branchTestPtr(Zero, Address(regT1));
JITStubCall stubCall(this, cti_op_profile_will_call);
@@ -2750,7 +1313,7 @@ void JIT::emit_op_profile_will_call(Instruction* currentInstruction)
void JIT::emit_op_profile_did_call(Instruction* currentInstruction)
{
- peek(regT1, OBJECT_OFFSETOF(JITStackFrame, enabledProfilerReference) / sizeof (void*));
+ peek(regT1, OBJECT_OFFSETOF(JITStackFrame, enabledProfilerReference) / sizeof(void*));
Jump noProfiler = branchTestPtr(Zero, Address(regT1));
JITStubCall stubCall(this, cti_op_profile_did_call);
@@ -2771,12 +1334,12 @@ void JIT::emitSlow_op_convert_this(Instruction* currentInstruction, Vector<SlowC
stubCall.call(currentInstruction[1].u.operand);
}
-void JIT::emitSlow_op_construct_verify(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+void JIT::emitSlow_op_convert_this_strict(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
{
linkSlowCase(iter);
- linkSlowCase(iter);
- emitGetVirtualRegister(currentInstruction[2].u.operand, regT0);
- emitPutVirtualRegister(currentInstruction[1].u.operand);
+ JITStubCall stubCall(this, cti_op_convert_this_strict);
+ stubCall.addArgument(regT0);
+ stubCall.call(currentInstruction[1].u.operand);
}
void JIT::emitSlow_op_to_primitive(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
@@ -2788,24 +1351,6 @@ void JIT::emitSlow_op_to_primitive(Instruction* currentInstruction, Vector<SlowC
stubCall.call(currentInstruction[1].u.operand);
}
-void JIT::emitSlow_op_get_by_val(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- unsigned dst = currentInstruction[1].u.operand;
- unsigned base = currentInstruction[2].u.operand;
- unsigned property = currentInstruction[3].u.operand;
-
- linkSlowCase(iter); // property int32 check
- linkSlowCaseIfNotJSCell(iter, base); // base cell check
- linkSlowCase(iter); // base array check
- linkSlowCase(iter); // vector length check
- linkSlowCase(iter); // empty value
-
- JITStubCall stubCall(this, cti_op_get_by_val);
- stubCall.addArgument(base, regT2);
- stubCall.addArgument(property, regT2);
- stubCall.call(dst);
-}
-
void JIT::emitSlow_op_loop_if_lesseq(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
{
unsigned op2 = currentInstruction[2].u.operand;
@@ -2942,6 +1487,17 @@ void JIT::emitSlow_op_nstricteq(Instruction* currentInstruction, Vector<SlowCase
stubCall.call(currentInstruction[1].u.operand);
}
+void JIT::emitSlow_op_check_has_instance(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned baseVal = currentInstruction[1].u.operand;
+
+ linkSlowCaseIfNotJSCell(iter, baseVal);
+ linkSlowCase(iter);
+ JITStubCall stubCall(this, cti_op_check_has_instance);
+ stubCall.addArgument(baseVal, regT2);
+ stubCall.call();
+}
+
void JIT::emitSlow_op_instanceof(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
{
unsigned dst = currentInstruction[1].u.operand;
@@ -2950,9 +1506,9 @@ void JIT::emitSlow_op_instanceof(Instruction* currentInstruction, Vector<SlowCas
unsigned proto = currentInstruction[4].u.operand;
linkSlowCaseIfNotJSCell(iter, value);
- linkSlowCaseIfNotJSCell(iter, baseVal);
linkSlowCaseIfNotJSCell(iter, proto);
linkSlowCase(iter);
+ linkSlowCase(iter);
JITStubCall stubCall(this, cti_op_instanceof);
stubCall.addArgument(value, regT2);
stubCall.addArgument(baseVal, regT2);
@@ -2990,83 +1546,230 @@ void JIT::emitSlow_op_to_jsnumber(Instruction* currentInstruction, Vector<SlowCa
stubCall.call(currentInstruction[1].u.operand);
}
-#endif // USE(JSVALUE32_64)
+void JIT::emit_op_get_arguments_length(Instruction* currentInstruction)
+{
+ int dst = currentInstruction[1].u.operand;
+ int argumentsRegister = currentInstruction[2].u.operand;
+ addSlowCase(branchTestPtr(NonZero, addressFor(argumentsRegister)));
+ emitGetFromCallFrameHeader32(RegisterFile::ArgumentCount, regT0);
+ sub32(Imm32(1), regT0);
+ emitFastArithReTagImmediate(regT0, regT0);
+ emitPutVirtualRegister(dst, regT0);
+}
-// For both JSValue32_64 and JSValue32
-#if ENABLE(JIT_OPTIMIZE_MOD)
-#if CPU(ARM_TRADITIONAL)
-void JIT::softModulo()
+void JIT::emitSlow_op_get_arguments_length(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
{
- push(regS0);
- push(regS1);
- push(regT1);
- push(regT3);
-#if USE(JSVALUE32_64)
- m_assembler.mov_r(regT3, regT2);
- m_assembler.mov_r(regT2, regT0);
-#else
- m_assembler.mov_r(regT3, m_assembler.asr(regT2, 1));
- m_assembler.mov_r(regT2, m_assembler.asr(regT0, 1));
-#endif
- m_assembler.mov_r(regT1, ARMAssembler::getOp2(0));
+ linkSlowCase(iter);
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned base = currentInstruction[2].u.operand;
+ Identifier* ident = &(m_codeBlock->identifier(currentInstruction[3].u.operand));
- m_assembler.teq_r(regT3, ARMAssembler::getOp2(0));
- m_assembler.rsb_r(regT3, regT3, ARMAssembler::getOp2(0), ARMAssembler::MI);
- m_assembler.eor_r(regT1, regT1, ARMAssembler::getOp2(1), ARMAssembler::MI);
+ emitGetVirtualRegister(base, regT0);
+ JITStubCall stubCall(this, cti_op_get_by_id_generic);
+ stubCall.addArgument(regT0);
+ stubCall.addArgument(ImmPtr(ident));
+ stubCall.call(dst);
+}
+
+void JIT::emit_op_get_argument_by_val(Instruction* currentInstruction)
+{
+ int dst = currentInstruction[1].u.operand;
+ int argumentsRegister = currentInstruction[2].u.operand;
+ int property = currentInstruction[3].u.operand;
+ addSlowCase(branchTestPtr(NonZero, addressFor(argumentsRegister)));
+ emitGetVirtualRegister(property, regT1);
+ addSlowCase(emitJumpIfNotImmediateInteger(regT1));
+ add32(Imm32(1), regT1);
+ // regT1 now contains the integer index of the argument we want, including this
+ emitGetFromCallFrameHeader32(RegisterFile::ArgumentCount, regT2);
+ addSlowCase(branch32(AboveOrEqual, regT1, regT2));
- m_assembler.teq_r(regT2, ARMAssembler::getOp2(0));
- m_assembler.rsb_r(regT2, regT2, ARMAssembler::getOp2(0), ARMAssembler::MI);
- m_assembler.eor_r(regT1, regT1, ARMAssembler::getOp2(2), ARMAssembler::MI);
+ Jump skipOutofLineParams;
+ int numArgs = m_codeBlock->m_numParameters;
+ if (numArgs) {
+ Jump notInInPlaceArgs = branch32(AboveOrEqual, regT1, Imm32(numArgs));
+ addPtr(Imm32(static_cast<unsigned>(-(RegisterFile::CallFrameHeaderSize + numArgs) * sizeof(Register))), callFrameRegister, regT0);
+ loadPtr(BaseIndex(regT0, regT1, TimesEight, 0), regT0);
+ skipOutofLineParams = jump();
+ notInInPlaceArgs.link(this);
+ }
- Jump exitBranch = branch32(LessThan, regT2, regT3);
+ addPtr(Imm32(static_cast<unsigned>(-(RegisterFile::CallFrameHeaderSize + numArgs) * sizeof(Register))), callFrameRegister, regT0);
+ mul32(Imm32(sizeof(Register)), regT2, regT2);
+ subPtr(regT2, regT0);
+ loadPtr(BaseIndex(regT0, regT1, TimesEight, 0), regT0);
+ if (numArgs)
+ skipOutofLineParams.link(this);
+ emitPutVirtualRegister(dst, regT0);
+}
- m_assembler.sub_r(regS1, regT3, ARMAssembler::getOp2(1));
- m_assembler.tst_r(regS1, regT3);
- m_assembler.and_r(regT2, regT2, regS1, ARMAssembler::EQ);
- m_assembler.and_r(regT0, regS1, regT3);
- Jump exitBranch2 = branchTest32(Zero, regT0);
+void JIT::emitSlow_op_get_argument_by_val(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned arguments = currentInstruction[2].u.operand;
+ unsigned property = currentInstruction[3].u.operand;
- m_assembler.clz_r(regS1, regT2);
- m_assembler.clz_r(regS0, regT3);
- m_assembler.sub_r(regS0, regS0, regS1);
-
- m_assembler.rsbs_r(regS0, regS0, ARMAssembler::getOp2(31));
+ linkSlowCase(iter);
+ Jump skipArgumentsCreation = jump();
+
+ linkSlowCase(iter);
+ linkSlowCase(iter);
+ if (m_codeBlock->m_numParameters == 1)
+ JITStubCall(this, cti_op_create_arguments_no_params).call();
+ else
+ JITStubCall(this, cti_op_create_arguments).call();
+ emitPutVirtualRegister(arguments);
+ emitPutVirtualRegister(unmodifiedArgumentsRegister(arguments));
+
+ skipArgumentsCreation.link(this);
+ JITStubCall stubCall(this, cti_op_get_by_val);
+ stubCall.addArgument(arguments, regT2);
+ stubCall.addArgument(property, regT2);
+ stubCall.call(dst);
+}
- m_assembler.mov_r(regS0, m_assembler.lsl(regS0, 1), ARMAssembler::NE);
+#endif // USE(JSVALUE64)
- m_assembler.add_r(ARMRegisters::pc, ARMRegisters::pc, m_assembler.lsl(regS0, 2), ARMAssembler::NE);
- m_assembler.mov_r(regT0, regT0);
+void JIT::emit_op_resolve_global_dynamic(Instruction* currentInstruction)
+{
+ int skip = currentInstruction[5].u.operand;
+
+ emitGetFromCallFrameHeaderPtr(RegisterFile::ScopeChain, regT0);
- for (int i = 31; i > 0; --i) {
- m_assembler.cmp_r(regT2, m_assembler.lsl(regT3, i));
- m_assembler.sub_r(regT2, regT2, m_assembler.lsl(regT3, i), ARMAssembler::CS);
+ bool checkTopLevel = m_codeBlock->codeType() == FunctionCode && m_codeBlock->needsFullScopeChain();
+ ASSERT(skip || !checkTopLevel);
+ if (checkTopLevel && skip--) {
+ Jump activationNotCreated;
+ if (checkTopLevel)
+ activationNotCreated = branchTestPtr(Zero, addressFor(m_codeBlock->activationRegister()));
+ loadPtr(Address(regT0, OBJECT_OFFSETOF(ScopeChainNode, object)), regT1);
+ addSlowCase(checkStructure(regT1, m_globalData->activationStructure.get()));
+ loadPtr(Address(regT0, OBJECT_OFFSETOF(ScopeChainNode, next)), regT0);
+ activationNotCreated.link(this);
+ }
+ while (skip--) {
+ loadPtr(Address(regT0, OBJECT_OFFSETOF(ScopeChainNode, object)), regT1);
+ addSlowCase(checkStructure(regT1, m_globalData->activationStructure.get()));
+ loadPtr(Address(regT0, OBJECT_OFFSETOF(ScopeChainNode, next)), regT0);
}
+ emit_op_resolve_global(currentInstruction, true);
+}
- m_assembler.cmp_r(regT2, regT3);
- m_assembler.sub_r(regT2, regT2, regT3, ARMAssembler::CS);
+void JIT::emitSlow_op_resolve_global_dynamic(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ Identifier* ident = &m_codeBlock->identifier(currentInstruction[2].u.operand);
+ int skip = currentInstruction[5].u.operand;
+ while (skip--)
+ linkSlowCase(iter);
+ JITStubCall resolveStubCall(this, cti_op_resolve);
+ resolveStubCall.addArgument(ImmPtr(ident));
+ resolveStubCall.call(dst);
+ emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_resolve_global_dynamic));
- exitBranch.link(this);
- exitBranch2.link(this);
+ unsigned currentIndex = m_globalResolveInfoIndex++;
- m_assembler.teq_r(regT1, ARMAssembler::getOp2(0));
- m_assembler.rsb_r(regT2, regT2, ARMAssembler::getOp2(0), ARMAssembler::GT);
+ linkSlowCase(iter); // We managed to skip all the nodes in the scope chain, but the cache missed.
+ JITStubCall stubCall(this, cti_op_resolve_global);
+ stubCall.addArgument(ImmPtr(ident));
+ stubCall.addArgument(Imm32(currentIndex));
+ stubCall.addArgument(regT0);
+ stubCall.call(dst);
+}
+
+void JIT::emit_op_new_regexp(Instruction* currentInstruction)
+{
+ JITStubCall stubCall(this, cti_op_new_regexp);
+ stubCall.addArgument(ImmPtr(m_codeBlock->regexp(currentInstruction[2].u.operand)));
+ stubCall.call(currentInstruction[1].u.operand);
+}
+
+void JIT::emit_op_load_varargs(Instruction* currentInstruction)
+{
+ int argCountDst = currentInstruction[1].u.operand;
+ int argsOffset = currentInstruction[2].u.operand;
+ int registerOffset = currentInstruction[3].u.operand;
+ ASSERT(argsOffset <= registerOffset);
+ int expectedParams = m_codeBlock->m_numParameters - 1;
+ // Don't do inline copying if we aren't guaranteed to have a single stream
+ // of arguments
+ if (expectedParams) {
+ JITStubCall stubCall(this, cti_op_load_varargs);
+ stubCall.addArgument(Imm32(argsOffset));
+ stubCall.call();
+ // Stores a naked int32 in the register file.
+ store32(returnValueRegister, Address(callFrameRegister, argCountDst * sizeof(Register)));
+ return;
+ }
+
#if USE(JSVALUE32_64)
- m_assembler.mov_r(regT0, regT2);
+ addSlowCase(branch32(NotEqual, tagFor(argsOffset), Imm32(JSValue::EmptyValueTag)));
#else
- m_assembler.mov_r(regT0, m_assembler.lsl(regT2, 1));
- m_assembler.eor_r(regT0, regT0, ARMAssembler::getOp2(1));
+ addSlowCase(branchTestPtr(NonZero, addressFor(argsOffset)));
#endif
- pop(regT3);
- pop(regT1);
- pop(regS1);
- pop(regS0);
- ret();
+ // Load arg count into regT0
+ emitGetFromCallFrameHeader32(RegisterFile::ArgumentCount, regT0);
+ storePtr(regT0, addressFor(argCountDst));
+ Jump endBranch = branch32(Equal, regT0, Imm32(1));
+
+ mul32(Imm32(sizeof(Register)), regT0, regT3);
+ addPtr(Imm32(static_cast<unsigned>(sizeof(Register) - RegisterFile::CallFrameHeaderSize * sizeof(Register))), callFrameRegister, regT1);
+ subPtr(regT3, regT1); // regT1 is now the start of the out of line arguments
+ addPtr(Imm32(argsOffset * sizeof(Register)), callFrameRegister, regT2); // regT2 is the target buffer
+
+ // Bounds check the registerfile
+ addPtr(regT2, regT3);
+ addPtr(Imm32((registerOffset - argsOffset) * sizeof(Register)), regT3);
+ addSlowCase(branchPtr(Below, AbsoluteAddress(&m_globalData->interpreter->registerFile().m_end), regT3));
+
+ sub32(Imm32(1), regT0);
+ Label loopStart = label();
+ loadPtr(BaseIndex(regT1, regT0, TimesEight, static_cast<unsigned>(0 - 2 * sizeof(Register))), regT3);
+ storePtr(regT3, BaseIndex(regT2, regT0, TimesEight, static_cast<unsigned>(0 - sizeof(Register))));
+#if USE(JSVALUE32_64)
+ loadPtr(BaseIndex(regT1, regT0, TimesEight, static_cast<unsigned>(sizeof(void*) - 2 * sizeof(Register))), regT3);
+ storePtr(regT3, BaseIndex(regT2, regT0, TimesEight, static_cast<unsigned>(sizeof(void*) - sizeof(Register))));
+#endif
+ branchSubPtr(NonZero, Imm32(1), regT0).linkTo(loopStart, this);
+ endBranch.link(this);
}
+
+void JIT::emitSlow_op_load_varargs(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ int argCountDst = currentInstruction[1].u.operand;
+ int argsOffset = currentInstruction[2].u.operand;
+ int expectedParams = m_codeBlock->m_numParameters - 1;
+ if (expectedParams)
+ return;
+
+ linkSlowCase(iter);
+ linkSlowCase(iter);
+ JITStubCall stubCall(this, cti_op_load_varargs);
+ stubCall.addArgument(Imm32(argsOffset));
+ stubCall.call();
+ // Stores a naked int32 in the register file.
+ store32(returnValueRegister, Address(callFrameRegister, argCountDst * sizeof(Register)));
+}
+
+void JIT::emit_op_new_func(Instruction* currentInstruction)
+{
+ Jump lazyJump;
+ int dst = currentInstruction[1].u.operand;
+ if (currentInstruction[3].u.operand) {
+#if USE(JSVALUE32_64)
+ lazyJump = branch32(NotEqual, tagFor(dst), Imm32(JSValue::EmptyValueTag));
#else
-#error "JIT_OPTIMIZE_MOD not yet supported on this platform."
-#endif // CPU(ARM_TRADITIONAL)
+ lazyJump = branchTestPtr(NonZero, addressFor(dst));
#endif
+ }
+ JITStubCall stubCall(this, cti_op_new_func);
+ stubCall.addArgument(ImmPtr(m_codeBlock->functionDecl(currentInstruction[2].u.operand)));
+ stubCall.call(currentInstruction[1].u.operand);
+ if (currentInstruction[3].u.operand)
+ lazyJump.link(this);
+}
+
} // namespace JSC
#endif // ENABLE(JIT)
diff --git a/JavaScriptCore/jit/JITOpcodes32_64.cpp b/JavaScriptCore/jit/JITOpcodes32_64.cpp
new file mode 100644
index 0000000..a2bb159
--- /dev/null
+++ b/JavaScriptCore/jit/JITOpcodes32_64.cpp
@@ -0,0 +1,1836 @@
+/*
+ * Copyright (C) 2009 Apple Inc. All rights reserved.
+ * Copyright (C) 2010 Patrick Gansterer <paroga@paroga.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+
+#if ENABLE(JIT)
+#if USE(JSVALUE32_64)
+#include "JIT.h"
+
+#include "JITInlineMethods.h"
+#include "JITStubCall.h"
+#include "JSArray.h"
+#include "JSCell.h"
+#include "JSFunction.h"
+#include "JSPropertyNameIterator.h"
+#include "LinkBuffer.h"
+
+namespace JSC {
+
+void JIT::privateCompileCTIMachineTrampolines(RefPtr<ExecutablePool>* executablePool, JSGlobalData* globalData, TrampolineStructure *trampolines)
+{
+#if ENABLE(JIT_USE_SOFT_MODULO)
+ Label softModBegin = align();
+ softModulo();
+#endif
+#if ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
+ // (1) This function provides fast property access for string length
+ Label stringLengthBegin = align();
+
+ // regT0 holds payload, regT1 holds tag
+
+ Jump string_failureCases1 = branch32(NotEqual, regT1, Imm32(JSValue::CellTag));
+ Jump string_failureCases2 = branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsStringVPtr));
+
+ // Checks out okay! - get the length from the Ustring.
+ load32(Address(regT0, OBJECT_OFFSETOF(JSString, m_length)), regT2);
+
+ Jump string_failureCases3 = branch32(Above, regT2, Imm32(INT_MAX));
+ move(regT2, regT0);
+ move(Imm32(JSValue::Int32Tag), regT1);
+
+ ret();
+#endif
+
+ JumpList callLinkFailures;
+ // (2) Trampolines for the slow cases of op_call / op_call_eval / op_construct.
+#if ENABLE(JIT_OPTIMIZE_CALL)
+ // VirtualCallLink Trampoline
+ // regT0 holds callee, regT1 holds argCount. regT2 will hold the FunctionExecutable.
+ Label virtualCallLinkBegin = align();
+ compileOpCallInitializeCallFrame();
+ preserveReturnAddressAfterCall(regT3);
+ emitPutToCallFrameHeader(regT3, RegisterFile::ReturnPC);
+ restoreArgumentReference();
+ Call callLazyLinkCall = call();
+ callLinkFailures.append(branchTestPtr(Zero, regT0));
+ restoreReturnAddressBeforeReturn(regT3);
+ emitGetFromCallFrameHeader32(RegisterFile::ArgumentCount, regT1);
+ jump(regT0);
+
+ // VirtualConstructLink Trampoline
+ // regT0 holds callee, regT1 holds argCount. regT2 will hold the FunctionExecutable.
+ Label virtualConstructLinkBegin = align();
+ compileOpCallInitializeCallFrame();
+ preserveReturnAddressAfterCall(regT3);
+ emitPutToCallFrameHeader(regT3, RegisterFile::ReturnPC);
+ restoreArgumentReference();
+ Call callLazyLinkConstruct = call();
+ restoreReturnAddressBeforeReturn(regT3);
+ callLinkFailures.append(branchTestPtr(Zero, regT0));
+ emitGetFromCallFrameHeader32(RegisterFile::ArgumentCount, regT1);
+ jump(regT0);
+
+#endif // ENABLE(JIT_OPTIMIZE_CALL)
+
+ // VirtualCall Trampoline
+ // regT0 holds callee, regT1 holds argCount. regT2 will hold the FunctionExecutable.
+ Label virtualCallBegin = align();
+ compileOpCallInitializeCallFrame();
+
+ loadPtr(Address(regT0, OBJECT_OFFSETOF(JSFunction, m_executable)), regT2);
+
+ Jump hasCodeBlock3 = branch32(GreaterThanOrEqual, Address(regT2, OBJECT_OFFSETOF(FunctionExecutable, m_numParametersForCall)), Imm32(0));
+ preserveReturnAddressAfterCall(regT3);
+ restoreArgumentReference();
+ Call callCompileCall = call();
+ callLinkFailures.append(branchTestPtr(Zero, regT0));
+ emitGetFromCallFrameHeader32(RegisterFile::ArgumentCount, regT1);
+ restoreReturnAddressBeforeReturn(regT3);
+ loadPtr(Address(regT0, OBJECT_OFFSETOF(JSFunction, m_executable)), regT2);
+ hasCodeBlock3.link(this);
+
+ loadPtr(Address(regT2, OBJECT_OFFSETOF(FunctionExecutable, m_jitCodeForCallWithArityCheck)), regT0);
+ jump(regT0);
+
+ // VirtualConstruct Trampoline
+ // regT0 holds callee, regT1 holds argCount. regT2 will hold the FunctionExecutable.
+ Label virtualConstructBegin = align();
+ compileOpCallInitializeCallFrame();
+
+ loadPtr(Address(regT0, OBJECT_OFFSETOF(JSFunction, m_executable)), regT2);
+
+ Jump hasCodeBlock4 = branch32(GreaterThanOrEqual, Address(regT2, OBJECT_OFFSETOF(FunctionExecutable, m_numParametersForConstruct)), Imm32(0));
+ preserveReturnAddressAfterCall(regT3);
+ restoreArgumentReference();
+ Call callCompileCconstruct = call();
+ callLinkFailures.append(branchTestPtr(Zero, regT0));
+ emitGetFromCallFrameHeader32(RegisterFile::ArgumentCount, regT1);
+ restoreReturnAddressBeforeReturn(regT3);
+ loadPtr(Address(regT0, OBJECT_OFFSETOF(JSFunction, m_executable)), regT2);
+ hasCodeBlock4.link(this);
+
+ loadPtr(Address(regT2, OBJECT_OFFSETOF(FunctionExecutable, m_jitCodeForConstructWithArityCheck)), regT0);
+ jump(regT0);
+
+ // If the parser fails we want to be able to be able to keep going,
+ // So we handle this as a parse failure.
+ callLinkFailures.link(this);
+ emitGetFromCallFrameHeaderPtr(RegisterFile::ReturnPC, regT1);
+ emitGetFromCallFrameHeaderPtr(RegisterFile::CallerFrame, callFrameRegister);
+ restoreReturnAddressBeforeReturn(regT1);
+ move(ImmPtr(&globalData->exceptionLocation), regT2);
+ storePtr(regT1, regT2);
+ poke(callFrameRegister, 1 + OBJECT_OFFSETOF(struct JITStackFrame, callFrame) / sizeof(void*));
+ poke(ImmPtr(FunctionPtr(ctiVMThrowTrampoline).value()));
+ ret();
+
+ // NativeCall Trampoline
+ Label nativeCallThunk = privateCompileCTINativeCall(globalData);
+ Label nativeConstructThunk = privateCompileCTINativeCall(globalData, true);
+
+#if ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
+ Call string_failureCases1Call = makeTailRecursiveCall(string_failureCases1);
+ Call string_failureCases2Call = makeTailRecursiveCall(string_failureCases2);
+ Call string_failureCases3Call = makeTailRecursiveCall(string_failureCases3);
+#endif
+
+ // All trampolines constructed! copy the code, link up calls, and set the pointers on the Machine object.
+ LinkBuffer patchBuffer(this, m_globalData->executableAllocator.poolForSize(m_assembler.size()), 0);
+
+#if ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
+ patchBuffer.link(string_failureCases1Call, FunctionPtr(cti_op_get_by_id_string_fail));
+ patchBuffer.link(string_failureCases2Call, FunctionPtr(cti_op_get_by_id_string_fail));
+ patchBuffer.link(string_failureCases3Call, FunctionPtr(cti_op_get_by_id_string_fail));
+#endif
+#if ENABLE(JIT_OPTIMIZE_CALL)
+ patchBuffer.link(callLazyLinkCall, FunctionPtr(cti_vm_lazyLinkCall));
+ patchBuffer.link(callLazyLinkConstruct, FunctionPtr(cti_vm_lazyLinkConstruct));
+#endif
+ patchBuffer.link(callCompileCall, FunctionPtr(cti_op_call_jitCompile));
+ patchBuffer.link(callCompileCconstruct, FunctionPtr(cti_op_construct_jitCompile));
+
+ CodeRef finalCode = patchBuffer.finalizeCode();
+ *executablePool = finalCode.m_executablePool;
+
+ trampolines->ctiVirtualCall = patchBuffer.trampolineAt(virtualCallBegin);
+ trampolines->ctiVirtualConstruct = patchBuffer.trampolineAt(virtualConstructBegin);
+ trampolines->ctiNativeCall = patchBuffer.trampolineAt(nativeCallThunk);
+ trampolines->ctiNativeConstruct = patchBuffer.trampolineAt(nativeConstructThunk);
+#if ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
+ trampolines->ctiStringLengthTrampoline = patchBuffer.trampolineAt(stringLengthBegin);
+#endif
+#if ENABLE(JIT_OPTIMIZE_CALL)
+ trampolines->ctiVirtualCallLink = patchBuffer.trampolineAt(virtualCallLinkBegin);
+ trampolines->ctiVirtualConstructLink = patchBuffer.trampolineAt(virtualConstructLinkBegin);
+#endif
+#if ENABLE(JIT_USE_SOFT_MODULO)
+ trampolines->ctiSoftModulo = patchBuffer.trampolineAt(softModBegin);
+#endif
+}
+
+JIT::Label JIT::privateCompileCTINativeCall(JSGlobalData* globalData, bool isConstruct)
+{
+ int executableOffsetToFunction = isConstruct ? OBJECT_OFFSETOF(NativeExecutable, m_constructor) : OBJECT_OFFSETOF(NativeExecutable, m_function);
+
+ Label nativeCallThunk = align();
+
+ emitPutImmediateToCallFrameHeader(0, RegisterFile::CodeBlock);
+
+#if CPU(X86)
+ // Load caller frame's scope chain into this callframe so that whatever we call can
+ // get to its global data.
+ emitGetFromCallFrameHeaderPtr(RegisterFile::CallerFrame, regT0);
+ emitGetFromCallFrameHeaderPtr(RegisterFile::ScopeChain, regT1, regT0);
+ emitPutToCallFrameHeader(regT1, RegisterFile::ScopeChain);
+
+ peek(regT1);
+ emitPutToCallFrameHeader(regT1, RegisterFile::ReturnPC);
+
+ // Calling convention: f(ecx, edx, ...);
+ // Host function signature: f(ExecState*);
+ move(callFrameRegister, X86Registers::ecx);
+
+ subPtr(Imm32(16 - sizeof(void*)), stackPointerRegister); // Align stack after call.
+
+ // call the function
+ emitGetFromCallFrameHeaderPtr(RegisterFile::Callee, regT1);
+ loadPtr(Address(regT1, OBJECT_OFFSETOF(JSFunction, m_executable)), regT1);
+ move(regT0, callFrameRegister); // Eagerly restore caller frame register to avoid loading from stack.
+ call(Address(regT1, executableOffsetToFunction));
+
+ addPtr(Imm32(16 - sizeof(void*)), stackPointerRegister);
+
+#elif CPU(ARM)
+ // Load caller frame's scope chain into this callframe so that whatever we call can
+ // get to its global data.
+ emitGetFromCallFrameHeaderPtr(RegisterFile::CallerFrame, regT2);
+ emitGetFromCallFrameHeaderPtr(RegisterFile::ScopeChain, regT1, regT2);
+ emitPutToCallFrameHeader(regT1, RegisterFile::ScopeChain);
+
+ preserveReturnAddressAfterCall(regT3); // Callee preserved
+ emitPutToCallFrameHeader(regT3, RegisterFile::ReturnPC);
+
+ // Calling convention: f(r0 == regT0, r1 == regT1, ...);
+ // Host function signature: f(ExecState*);
+ move(callFrameRegister, ARMRegisters::r0);
+
+ // call the function
+ emitGetFromCallFrameHeaderPtr(RegisterFile::Callee, ARMRegisters::r1);
+ move(regT2, callFrameRegister); // Eagerly restore caller frame register to avoid loading from stack.
+ loadPtr(Address(ARMRegisters::r1, OBJECT_OFFSETOF(JSFunction, m_executable)), regT2);
+ call(Address(regT2, executableOffsetToFunction));
+
+ restoreReturnAddressBeforeReturn(regT3);
+
+#elif CPU(MIPS)
+ // Load caller frame's scope chain into this callframe so that whatever we call can
+ // get to its global data.
+ emitGetFromCallFrameHeaderPtr(RegisterFile::CallerFrame, regT0);
+ emitGetFromCallFrameHeaderPtr(RegisterFile::ScopeChain, regT1, regT0);
+ emitPutToCallFrameHeader(regT1, RegisterFile::ScopeChain);
+
+ preserveReturnAddressAfterCall(regT3); // Callee preserved
+ emitPutToCallFrameHeader(regT3, RegisterFile::ReturnPC);
+
+ // Calling convention: f(a0, a1, a2, a3);
+ // Host function signature: f(ExecState*);
+
+ // Allocate stack space for 16 bytes (8-byte aligned)
+ // 16 bytes (unused) for 4 arguments
+ subPtr(Imm32(16), stackPointerRegister);
+
+ // Setup arg0
+ move(callFrameRegister, MIPSRegisters::a0);
+
+ // Call
+ emitGetFromCallFrameHeaderPtr(RegisterFile::Callee, MIPSRegisters::a2);
+ loadPtr(Address(MIPSRegisters::a2, OBJECT_OFFSETOF(JSFunction, m_executable)), regT2);
+ move(regT0, callFrameRegister); // Eagerly restore caller frame register to avoid loading from stack.
+ call(Address(regT2, executableOffsetToFunction));
+
+ // Restore stack space
+ addPtr(Imm32(16), stackPointerRegister);
+
+ restoreReturnAddressBeforeReturn(regT3);
+
+#elif ENABLE(JIT_OPTIMIZE_NATIVE_CALL)
+#error "JIT_OPTIMIZE_NATIVE_CALL not yet supported on this platform."
+#else
+ UNUSED_PARAM(executableOffsetToFunction);
+ breakpoint();
+#endif // CPU(X86)
+
+ // Check for an exception
+ Jump sawException = branch32(NotEqual, AbsoluteAddress(reinterpret_cast<char*>(&globalData->exception) + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), Imm32(JSValue::EmptyValueTag));
+
+ // Return.
+ ret();
+
+ // Handle an exception
+ sawException.link(this);
+
+ // Grab the return address.
+ preserveReturnAddressAfterCall(regT1);
+
+ move(ImmPtr(&globalData->exceptionLocation), regT2);
+ storePtr(regT1, regT2);
+ poke(callFrameRegister, OBJECT_OFFSETOF(struct JITStackFrame, callFrame) / sizeof(void*));
+
+ // Set the return address.
+ move(ImmPtr(FunctionPtr(ctiVMThrowTrampoline).value()), regT1);
+ restoreReturnAddressBeforeReturn(regT1);
+
+ ret();
+
+ return nativeCallThunk;
+}
+
+JIT::CodePtr JIT::privateCompileCTINativeCall(PassRefPtr<ExecutablePool> executablePool, JSGlobalData* globalData, NativeFunction func)
+{
+ Call nativeCall;
+ Label nativeCallThunk = align();
+
+ emitPutImmediateToCallFrameHeader(0, RegisterFile::CodeBlock);
+
+#if CPU(X86)
+ // Load caller frame's scope chain into this callframe so that whatever we call can
+ // get to its global data.
+ emitGetFromCallFrameHeaderPtr(RegisterFile::CallerFrame, regT0);
+ emitGetFromCallFrameHeaderPtr(RegisterFile::ScopeChain, regT1, regT0);
+ emitPutToCallFrameHeader(regT1, RegisterFile::ScopeChain);
+
+ peek(regT1);
+ emitPutToCallFrameHeader(regT1, RegisterFile::ReturnPC);
+
+ // Calling convention: f(ecx, edx, ...);
+ // Host function signature: f(ExecState*);
+ move(callFrameRegister, X86Registers::ecx);
+
+ subPtr(Imm32(16 - sizeof(void*)), stackPointerRegister); // Align stack after call.
+
+ move(regT0, callFrameRegister); // Eagerly restore caller frame register to avoid loading from stack.
+
+ // call the function
+ nativeCall = call();
+
+ addPtr(Imm32(16 - sizeof(void*)), stackPointerRegister);
+
+#elif CPU(ARM)
+ // Load caller frame's scope chain into this callframe so that whatever we call can
+ // get to its global data.
+ emitGetFromCallFrameHeaderPtr(RegisterFile::CallerFrame, regT2);
+ emitGetFromCallFrameHeaderPtr(RegisterFile::ScopeChain, regT1, regT2);
+ emitPutToCallFrameHeader(regT1, RegisterFile::ScopeChain);
+
+ preserveReturnAddressAfterCall(regT3); // Callee preserved
+ emitPutToCallFrameHeader(regT3, RegisterFile::ReturnPC);
+
+ // Calling convention: f(r0 == regT0, r1 == regT1, ...);
+ // Host function signature: f(ExecState*);
+ move(callFrameRegister, ARMRegisters::r0);
+
+ emitGetFromCallFrameHeaderPtr(RegisterFile::Callee, ARMRegisters::r1);
+ move(regT2, callFrameRegister); // Eagerly restore caller frame register to avoid loading from stack.
+ loadPtr(Address(ARMRegisters::r1, OBJECT_OFFSETOF(JSFunction, m_executable)), regT2);
+
+ // call the function
+ nativeCall = call();
+
+ restoreReturnAddressBeforeReturn(regT3);
+
+#elif CPU(MIPS)
+ // Load caller frame's scope chain into this callframe so that whatever we call can
+ // get to its global data.
+ emitGetFromCallFrameHeaderPtr(RegisterFile::CallerFrame, regT0);
+ emitGetFromCallFrameHeaderPtr(RegisterFile::ScopeChain, regT1, regT0);
+ emitPutToCallFrameHeader(regT1, RegisterFile::ScopeChain);
+
+ preserveReturnAddressAfterCall(regT3); // Callee preserved
+ emitPutToCallFrameHeader(regT3, RegisterFile::ReturnPC);
+
+ // Calling convention: f(a0, a1, a2, a3);
+ // Host function signature: f(ExecState*);
+
+ // Allocate stack space for 16 bytes (8-byte aligned)
+ // 16 bytes (unused) for 4 arguments
+ subPtr(Imm32(16), stackPointerRegister);
+
+ // Setup arg0
+ move(callFrameRegister, MIPSRegisters::a0);
+
+ // Call
+ emitGetFromCallFrameHeaderPtr(RegisterFile::Callee, MIPSRegisters::a2);
+ loadPtr(Address(MIPSRegisters::a2, OBJECT_OFFSETOF(JSFunction, m_executable)), regT2);
+ move(regT0, callFrameRegister); // Eagerly restore caller frame register to avoid loading from stack.
+
+ // call the function
+ nativeCall = call();
+
+ // Restore stack space
+ addPtr(Imm32(16), stackPointerRegister);
+
+ restoreReturnAddressBeforeReturn(regT3);
+
+#elif ENABLE(JIT_OPTIMIZE_NATIVE_CALL)
+#error "JIT_OPTIMIZE_NATIVE_CALL not yet supported on this platform."
+#else
+ breakpoint();
+#endif // CPU(X86)
+
+ // Check for an exception
+ Jump sawException = branch32(NotEqual, AbsoluteAddress(reinterpret_cast<char*>(&globalData->exception) + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), Imm32(JSValue::EmptyValueTag));
+
+ // Return.
+ ret();
+
+ // Handle an exception
+ sawException.link(this);
+
+ // Grab the return address.
+ preserveReturnAddressAfterCall(regT1);
+
+ move(ImmPtr(&globalData->exceptionLocation), regT2);
+ storePtr(regT1, regT2);
+ poke(callFrameRegister, OBJECT_OFFSETOF(struct JITStackFrame, callFrame) / sizeof(void*));
+
+ // Set the return address.
+ move(ImmPtr(FunctionPtr(ctiVMThrowTrampoline).value()), regT1);
+ restoreReturnAddressBeforeReturn(regT1);
+
+ ret();
+
+ // All trampolines constructed! copy the code, link up calls, and set the pointers on the Machine object.
+ LinkBuffer patchBuffer(this, executablePool, 0);
+
+ patchBuffer.link(nativeCall, FunctionPtr(func));
+ patchBuffer.finalizeCode();
+
+ return patchBuffer.trampolineAt(nativeCallThunk);
+}
+
+void JIT::emit_op_mov(Instruction* currentInstruction)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned src = currentInstruction[2].u.operand;
+
+ if (m_codeBlock->isConstantRegisterIndex(src))
+ emitStore(dst, getConstantOperand(src));
+ else {
+ emitLoad(src, regT1, regT0);
+ emitStore(dst, regT1, regT0);
+ map(m_bytecodeOffset + OPCODE_LENGTH(op_mov), dst, regT1, regT0);
+ }
+}
+
+void JIT::emit_op_end(Instruction* currentInstruction)
+{
+ if (m_codeBlock->needsFullScopeChain())
+ JITStubCall(this, cti_op_end).call();
+ ASSERT(returnValueRegister != callFrameRegister);
+ emitLoad(currentInstruction[1].u.operand, regT1, regT0);
+ restoreReturnAddressBeforeReturn(Address(callFrameRegister, RegisterFile::ReturnPC * static_cast<int>(sizeof(Register))));
+ ret();
+}
+
+void JIT::emit_op_jmp(Instruction* currentInstruction)
+{
+ unsigned target = currentInstruction[1].u.operand;
+ addJump(jump(), target);
+}
+
+void JIT::emit_op_loop_if_lesseq(Instruction* currentInstruction)
+{
+ unsigned op1 = currentInstruction[1].u.operand;
+ unsigned op2 = currentInstruction[2].u.operand;
+ unsigned target = currentInstruction[3].u.operand;
+
+ emitTimeoutCheck();
+
+ if (isOperandConstantImmediateInt(op1)) {
+ emitLoad(op2, regT1, regT0);
+ addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
+ addJump(branch32(GreaterThanOrEqual, regT0, Imm32(getConstantOperand(op1).asInt32())), target);
+ return;
+ }
+
+ if (isOperandConstantImmediateInt(op2)) {
+ emitLoad(op1, regT1, regT0);
+ addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
+ addJump(branch32(LessThanOrEqual, regT0, Imm32(getConstantOperand(op2).asInt32())), target);
+ return;
+ }
+
+ emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
+ addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
+ addSlowCase(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag)));
+ addJump(branch32(LessThanOrEqual, regT0, regT2), target);
+}
+
+void JIT::emitSlow_op_loop_if_lesseq(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned op1 = currentInstruction[1].u.operand;
+ unsigned op2 = currentInstruction[2].u.operand;
+ unsigned target = currentInstruction[3].u.operand;
+
+ if (!isOperandConstantImmediateInt(op1) && !isOperandConstantImmediateInt(op2))
+ linkSlowCase(iter); // int32 check
+ linkSlowCase(iter); // int32 check
+
+ JITStubCall stubCall(this, cti_op_loop_if_lesseq);
+ stubCall.addArgument(op1);
+ stubCall.addArgument(op2);
+ stubCall.call();
+ emitJumpSlowToHot(branchTest32(NonZero, regT0), target);
+}
+
+void JIT::emit_op_new_object(Instruction* currentInstruction)
+{
+ JITStubCall(this, cti_op_new_object).call(currentInstruction[1].u.operand);
+}
+
+void JIT::emit_op_check_has_instance(Instruction* currentInstruction)
+{
+ unsigned baseVal = currentInstruction[1].u.operand;
+
+ emitLoadPayload(baseVal, regT0);
+
+ // Check that baseVal is a cell.
+ emitJumpSlowCaseIfNotJSCell(baseVal);
+
+ // Check that baseVal 'ImplementsHasInstance'.
+ loadPtr(Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), regT0);
+ addSlowCase(branchTest8(Zero, Address(regT0, OBJECT_OFFSETOF(Structure, m_typeInfo.m_flags)), Imm32(ImplementsHasInstance)));
+}
+
+void JIT::emit_op_instanceof(Instruction* currentInstruction)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned value = currentInstruction[2].u.operand;
+ unsigned baseVal = currentInstruction[3].u.operand;
+ unsigned proto = currentInstruction[4].u.operand;
+
+ // Load the operands into registers.
+ // We use regT0 for baseVal since we will be done with this first, and we can then use it for the result.
+ emitLoadPayload(value, regT2);
+ emitLoadPayload(baseVal, regT0);
+ emitLoadPayload(proto, regT1);
+
+ // Check that proto are cells. baseVal must be a cell - this is checked by op_check_has_instance.
+ emitJumpSlowCaseIfNotJSCell(value);
+ emitJumpSlowCaseIfNotJSCell(proto);
+
+ // Check that prototype is an object
+ loadPtr(Address(regT1, OBJECT_OFFSETOF(JSCell, m_structure)), regT3);
+ addSlowCase(branch8(NotEqual, Address(regT3, OBJECT_OFFSETOF(Structure, m_typeInfo.m_type)), Imm32(ObjectType)));
+
+ // Fixme: this check is only needed because the JSC API allows HasInstance to be overridden; we should deprecate this.
+ // Check that baseVal 'ImplementsDefaultHasInstance'.
+ loadPtr(Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), regT0);
+ addSlowCase(branchTest8(Zero, Address(regT0, OBJECT_OFFSETOF(Structure, m_typeInfo.m_flags)), Imm32(ImplementsDefaultHasInstance)));
+
+ // Optimistically load the result true, and start looping.
+ // Initially, regT1 still contains proto and regT2 still contains value.
+ // As we loop regT2 will be updated with its prototype, recursively walking the prototype chain.
+ move(Imm32(JSValue::TrueTag), regT0);
+ Label loop(this);
+
+ // Load the prototype of the cell in regT2. If this is equal to regT1 - WIN!
+ // Otherwise, check if we've hit null - if we have then drop out of the loop, if not go again.
+ loadPtr(Address(regT2, OBJECT_OFFSETOF(JSCell, m_structure)), regT2);
+ load32(Address(regT2, OBJECT_OFFSETOF(Structure, m_prototype) + OBJECT_OFFSETOF(JSValue, u.asBits.payload)), regT2);
+ Jump isInstance = branchPtr(Equal, regT2, regT1);
+ branchTest32(NonZero, regT2).linkTo(loop, this);
+
+ // We get here either by dropping out of the loop, or if value was not an Object. Result is false.
+ move(Imm32(JSValue::FalseTag), regT0);
+
+ // isInstance jumps right down to here, to skip setting the result to false (it has already set true).
+ isInstance.link(this);
+ emitStoreBool(dst, regT0);
+}
+
+void JIT::emitSlow_op_check_has_instance(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned baseVal = currentInstruction[1].u.operand;
+
+ linkSlowCaseIfNotJSCell(iter, baseVal);
+ linkSlowCase(iter);
+
+ JITStubCall stubCall(this, cti_op_check_has_instance);
+ stubCall.addArgument(baseVal);
+ stubCall.call();
+}
+
+void JIT::emitSlow_op_instanceof(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned value = currentInstruction[2].u.operand;
+ unsigned baseVal = currentInstruction[3].u.operand;
+ unsigned proto = currentInstruction[4].u.operand;
+
+ linkSlowCaseIfNotJSCell(iter, value);
+ linkSlowCaseIfNotJSCell(iter, proto);
+ linkSlowCase(iter);
+ linkSlowCase(iter);
+
+ JITStubCall stubCall(this, cti_op_instanceof);
+ stubCall.addArgument(value);
+ stubCall.addArgument(baseVal);
+ stubCall.addArgument(proto);
+ stubCall.call(dst);
+}
+
+void JIT::emit_op_get_global_var(Instruction* currentInstruction)
+{
+ int dst = currentInstruction[1].u.operand;
+ JSGlobalObject* globalObject = m_codeBlock->globalObject();
+ ASSERT(globalObject->isGlobalObject());
+ int index = currentInstruction[2].u.operand;
+
+ loadPtr(&globalObject->d()->registers, regT2);
+
+ emitLoad(index, regT1, regT0, regT2);
+ emitStore(dst, regT1, regT0);
+ map(m_bytecodeOffset + OPCODE_LENGTH(op_get_global_var), dst, regT1, regT0);
+}
+
+void JIT::emit_op_put_global_var(Instruction* currentInstruction)
+{
+ JSGlobalObject* globalObject = m_codeBlock->globalObject();
+ ASSERT(globalObject->isGlobalObject());
+ int index = currentInstruction[1].u.operand;
+ int value = currentInstruction[2].u.operand;
+
+ emitLoad(value, regT1, regT0);
+
+ loadPtr(&globalObject->d()->registers, regT2);
+ emitStore(index, regT1, regT0, regT2);
+ map(m_bytecodeOffset + OPCODE_LENGTH(op_put_global_var), value, regT1, regT0);
+}
+
+void JIT::emit_op_get_scoped_var(Instruction* currentInstruction)
+{
+ int dst = currentInstruction[1].u.operand;
+ int index = currentInstruction[2].u.operand;
+ int skip = currentInstruction[3].u.operand;
+
+ emitGetFromCallFrameHeaderPtr(RegisterFile::ScopeChain, regT2);
+ bool checkTopLevel = m_codeBlock->codeType() == FunctionCode && m_codeBlock->needsFullScopeChain();
+ ASSERT(skip || !checkTopLevel);
+ if (checkTopLevel && skip--) {
+ Jump activationNotCreated;
+ if (checkTopLevel)
+ activationNotCreated = branch32(Equal, tagFor(m_codeBlock->activationRegister()), Imm32(JSValue::EmptyValueTag));
+ loadPtr(Address(regT2, OBJECT_OFFSETOF(ScopeChainNode, next)), regT2);
+ activationNotCreated.link(this);
+ }
+ while (skip--)
+ loadPtr(Address(regT2, OBJECT_OFFSETOF(ScopeChainNode, next)), regT2);
+
+ loadPtr(Address(regT2, OBJECT_OFFSETOF(ScopeChainNode, object)), regT2);
+ loadPtr(Address(regT2, OBJECT_OFFSETOF(JSVariableObject, d)), regT2);
+ loadPtr(Address(regT2, OBJECT_OFFSETOF(JSVariableObject::JSVariableObjectData, registers)), regT2);
+
+ emitLoad(index, regT1, regT0, regT2);
+ emitStore(dst, regT1, regT0);
+ map(m_bytecodeOffset + OPCODE_LENGTH(op_get_scoped_var), dst, regT1, regT0);
+}
+
+void JIT::emit_op_put_scoped_var(Instruction* currentInstruction)
+{
+ int index = currentInstruction[1].u.operand;
+ int skip = currentInstruction[2].u.operand;
+ int value = currentInstruction[3].u.operand;
+
+ emitLoad(value, regT1, regT0);
+
+ emitGetFromCallFrameHeaderPtr(RegisterFile::ScopeChain, regT2);
+ bool checkTopLevel = m_codeBlock->codeType() == FunctionCode && m_codeBlock->needsFullScopeChain();
+ ASSERT(skip || !checkTopLevel);
+ if (checkTopLevel && skip--) {
+ Jump activationNotCreated;
+ if (checkTopLevel)
+ activationNotCreated = branch32(Equal, tagFor(m_codeBlock->activationRegister()), Imm32(JSValue::EmptyValueTag));
+ loadPtr(Address(regT2, OBJECT_OFFSETOF(ScopeChainNode, next)), regT2);
+ activationNotCreated.link(this);
+ }
+ while (skip--)
+ loadPtr(Address(regT2, OBJECT_OFFSETOF(ScopeChainNode, next)), regT2);
+
+ loadPtr(Address(regT2, OBJECT_OFFSETOF(ScopeChainNode, object)), regT2);
+ loadPtr(Address(regT2, OBJECT_OFFSETOF(JSVariableObject, d)), regT2);
+ loadPtr(Address(regT2, OBJECT_OFFSETOF(JSVariableObject::JSVariableObjectData, registers)), regT2);
+
+ emitStore(index, regT1, regT0, regT2);
+ map(m_bytecodeOffset + OPCODE_LENGTH(op_put_scoped_var), value, regT1, regT0);
+}
+
+void JIT::emit_op_tear_off_activation(Instruction* currentInstruction)
+{
+ unsigned activation = currentInstruction[1].u.operand;
+ unsigned arguments = currentInstruction[2].u.operand;
+ Jump activationCreated = branch32(NotEqual, tagFor(activation), Imm32(JSValue::EmptyValueTag));
+ Jump argumentsNotCreated = branch32(Equal, tagFor(arguments), Imm32(JSValue::EmptyValueTag));
+ activationCreated.link(this);
+ JITStubCall stubCall(this, cti_op_tear_off_activation);
+ stubCall.addArgument(currentInstruction[1].u.operand);
+ stubCall.addArgument(unmodifiedArgumentsRegister(currentInstruction[2].u.operand));
+ stubCall.call();
+ argumentsNotCreated.link(this);
+}
+
+void JIT::emit_op_tear_off_arguments(Instruction* currentInstruction)
+{
+ int dst = currentInstruction[1].u.operand;
+
+ Jump argsNotCreated = branch32(Equal, tagFor(unmodifiedArgumentsRegister(dst)), Imm32(JSValue::EmptyValueTag));
+ JITStubCall stubCall(this, cti_op_tear_off_arguments);
+ stubCall.addArgument(unmodifiedArgumentsRegister(dst));
+ stubCall.call();
+ argsNotCreated.link(this);
+}
+
+void JIT::emit_op_new_array(Instruction* currentInstruction)
+{
+ JITStubCall stubCall(this, cti_op_new_array);
+ stubCall.addArgument(Imm32(currentInstruction[2].u.operand));
+ stubCall.addArgument(Imm32(currentInstruction[3].u.operand));
+ stubCall.call(currentInstruction[1].u.operand);
+}
+
+void JIT::emit_op_resolve(Instruction* currentInstruction)
+{
+ JITStubCall stubCall(this, cti_op_resolve);
+ stubCall.addArgument(ImmPtr(&m_codeBlock->identifier(currentInstruction[2].u.operand)));
+ stubCall.call(currentInstruction[1].u.operand);
+}
+
+void JIT::emit_op_to_primitive(Instruction* currentInstruction)
+{
+ int dst = currentInstruction[1].u.operand;
+ int src = currentInstruction[2].u.operand;
+
+ emitLoad(src, regT1, regT0);
+
+ Jump isImm = branch32(NotEqual, regT1, Imm32(JSValue::CellTag));
+ addSlowCase(branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsStringVPtr)));
+ isImm.link(this);
+
+ if (dst != src)
+ emitStore(dst, regT1, regT0);
+ map(m_bytecodeOffset + OPCODE_LENGTH(op_to_primitive), dst, regT1, regT0);
+}
+
+void JIT::emitSlow_op_to_primitive(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ int dst = currentInstruction[1].u.operand;
+
+ linkSlowCase(iter);
+
+ JITStubCall stubCall(this, cti_op_to_primitive);
+ stubCall.addArgument(regT1, regT0);
+ stubCall.call(dst);
+}
+
+void JIT::emit_op_strcat(Instruction* currentInstruction)
+{
+ JITStubCall stubCall(this, cti_op_strcat);
+ stubCall.addArgument(Imm32(currentInstruction[2].u.operand));
+ stubCall.addArgument(Imm32(currentInstruction[3].u.operand));
+ stubCall.call(currentInstruction[1].u.operand);
+}
+
+void JIT::emit_op_resolve_base(Instruction* currentInstruction)
+{
+ JITStubCall stubCall(this, currentInstruction[3].u.operand ? cti_op_resolve_base_strict_put : cti_op_resolve_base);
+ stubCall.addArgument(ImmPtr(&m_codeBlock->identifier(currentInstruction[2].u.operand)));
+ stubCall.call(currentInstruction[1].u.operand);
+}
+
+void JIT::emit_op_ensure_property_exists(Instruction* currentInstruction)
+{
+ JITStubCall stubCall(this, cti_op_ensure_property_exists);
+ stubCall.addArgument(Imm32(currentInstruction[1].u.operand));
+ stubCall.addArgument(ImmPtr(&m_codeBlock->identifier(currentInstruction[2].u.operand)));
+ stubCall.call(currentInstruction[1].u.operand);
+}
+
+void JIT::emit_op_resolve_skip(Instruction* currentInstruction)
+{
+ JITStubCall stubCall(this, cti_op_resolve_skip);
+ stubCall.addArgument(ImmPtr(&m_codeBlock->identifier(currentInstruction[2].u.operand)));
+ stubCall.addArgument(Imm32(currentInstruction[3].u.operand));
+ stubCall.call(currentInstruction[1].u.operand);
+}
+
+void JIT::emit_op_resolve_global(Instruction* currentInstruction, bool dynamic)
+{
+ // FIXME: Optimize to use patching instead of so many memory accesses.
+
+ unsigned dst = currentInstruction[1].u.operand;
+ void* globalObject = m_codeBlock->globalObject();
+
+ unsigned currentIndex = m_globalResolveInfoIndex++;
+ void* structureAddress = &(m_codeBlock->globalResolveInfo(currentIndex).structure);
+ void* offsetAddr = &(m_codeBlock->globalResolveInfo(currentIndex).offset);
+
+ // Verify structure.
+ move(ImmPtr(globalObject), regT0);
+ loadPtr(structureAddress, regT1);
+ addSlowCase(branchPtr(NotEqual, regT1, Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure))));
+
+ // Load property.
+ loadPtr(Address(regT0, OBJECT_OFFSETOF(JSGlobalObject, m_externalStorage)), regT2);
+ load32(offsetAddr, regT3);
+ load32(BaseIndex(regT2, regT3, TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.payload)), regT0); // payload
+ load32(BaseIndex(regT2, regT3, TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.tag)), regT1); // tag
+ emitStore(dst, regT1, regT0);
+ map(m_bytecodeOffset + dynamic ? OPCODE_LENGTH(op_resolve_global_dynamic) : OPCODE_LENGTH(op_resolve_global), dst, regT1, regT0);
+}
+
+void JIT::emitSlow_op_resolve_global(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ Identifier* ident = &m_codeBlock->identifier(currentInstruction[2].u.operand);
+
+ unsigned currentIndex = m_globalResolveInfoIndex++;
+
+ linkSlowCase(iter);
+ JITStubCall stubCall(this, cti_op_resolve_global);
+ stubCall.addArgument(ImmPtr(ident));
+ stubCall.addArgument(Imm32(currentIndex));
+ stubCall.call(dst);
+}
+
+void JIT::emit_op_not(Instruction* currentInstruction)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned src = currentInstruction[2].u.operand;
+
+ emitLoadTag(src, regT0);
+
+ xor32(Imm32(JSValue::FalseTag), regT0);
+ addSlowCase(branchTest32(NonZero, regT0, Imm32(~1)));
+ xor32(Imm32(JSValue::TrueTag), regT0);
+
+ emitStoreBool(dst, regT0, (dst == src));
+}
+
+void JIT::emitSlow_op_not(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned src = currentInstruction[2].u.operand;
+
+ linkSlowCase(iter);
+
+ JITStubCall stubCall(this, cti_op_not);
+ stubCall.addArgument(src);
+ stubCall.call(dst);
+}
+
+void JIT::emit_op_jfalse(Instruction* currentInstruction)
+{
+ unsigned cond = currentInstruction[1].u.operand;
+ unsigned target = currentInstruction[2].u.operand;
+
+ emitLoad(cond, regT1, regT0);
+
+ Jump isTrue = branch32(Equal, regT1, Imm32(JSValue::TrueTag));
+ addJump(branch32(Equal, regT1, Imm32(JSValue::FalseTag)), target);
+
+ Jump isNotInteger = branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag));
+ Jump isTrue2 = branch32(NotEqual, regT0, Imm32(0));
+ addJump(jump(), target);
+
+ if (supportsFloatingPoint()) {
+ isNotInteger.link(this);
+
+ addSlowCase(branch32(Above, regT1, Imm32(JSValue::LowestTag)));
+
+ zeroDouble(fpRegT0);
+ emitLoadDouble(cond, fpRegT1);
+ addJump(branchDouble(DoubleEqualOrUnordered, fpRegT0, fpRegT1), target);
+ } else
+ addSlowCase(isNotInteger);
+
+ isTrue.link(this);
+ isTrue2.link(this);
+}
+
+void JIT::emitSlow_op_jfalse(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned cond = currentInstruction[1].u.operand;
+ unsigned target = currentInstruction[2].u.operand;
+
+ linkSlowCase(iter);
+ JITStubCall stubCall(this, cti_op_jtrue);
+ stubCall.addArgument(cond);
+ stubCall.call();
+ emitJumpSlowToHot(branchTest32(Zero, regT0), target); // Inverted.
+}
+
+void JIT::emit_op_jtrue(Instruction* currentInstruction)
+{
+ unsigned cond = currentInstruction[1].u.operand;
+ unsigned target = currentInstruction[2].u.operand;
+
+ emitLoad(cond, regT1, regT0);
+
+ Jump isFalse = branch32(Equal, regT1, Imm32(JSValue::FalseTag));
+ addJump(branch32(Equal, regT1, Imm32(JSValue::TrueTag)), target);
+
+ Jump isNotInteger = branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag));
+ Jump isFalse2 = branch32(Equal, regT0, Imm32(0));
+ addJump(jump(), target);
+
+ if (supportsFloatingPoint()) {
+ isNotInteger.link(this);
+
+ addSlowCase(branch32(Above, regT1, Imm32(JSValue::LowestTag)));
+
+ zeroDouble(fpRegT0);
+ emitLoadDouble(cond, fpRegT1);
+ addJump(branchDouble(DoubleNotEqual, fpRegT0, fpRegT1), target);
+ } else
+ addSlowCase(isNotInteger);
+
+ isFalse.link(this);
+ isFalse2.link(this);
+}
+
+void JIT::emitSlow_op_jtrue(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned cond = currentInstruction[1].u.operand;
+ unsigned target = currentInstruction[2].u.operand;
+
+ linkSlowCase(iter);
+ JITStubCall stubCall(this, cti_op_jtrue);
+ stubCall.addArgument(cond);
+ stubCall.call();
+ emitJumpSlowToHot(branchTest32(NonZero, regT0), target);
+}
+
+void JIT::emit_op_jeq_null(Instruction* currentInstruction)
+{
+ unsigned src = currentInstruction[1].u.operand;
+ unsigned target = currentInstruction[2].u.operand;
+
+ emitLoad(src, regT1, regT0);
+
+ Jump isImmediate = branch32(NotEqual, regT1, Imm32(JSValue::CellTag));
+
+ // First, handle JSCell cases - check MasqueradesAsUndefined bit on the structure.
+ loadPtr(Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), regT2);
+ addJump(branchTest8(NonZero, Address(regT2, OBJECT_OFFSETOF(Structure, m_typeInfo.m_flags)), Imm32(MasqueradesAsUndefined)), target);
+
+ Jump wasNotImmediate = jump();
+
+ // Now handle the immediate cases - undefined & null
+ isImmediate.link(this);
+
+ ASSERT((JSValue::UndefinedTag + 1 == JSValue::NullTag) && !(JSValue::NullTag + 1));
+ addJump(branch32(AboveOrEqual, regT1, Imm32(JSValue::UndefinedTag)), target);
+
+ wasNotImmediate.link(this);
+}
+
+void JIT::emit_op_jneq_null(Instruction* currentInstruction)
+{
+ unsigned src = currentInstruction[1].u.operand;
+ unsigned target = currentInstruction[2].u.operand;
+
+ emitLoad(src, regT1, regT0);
+
+ Jump isImmediate = branch32(NotEqual, regT1, Imm32(JSValue::CellTag));
+
+ // First, handle JSCell cases - check MasqueradesAsUndefined bit on the structure.
+ loadPtr(Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), regT2);
+ addJump(branchTest8(Zero, Address(regT2, OBJECT_OFFSETOF(Structure, m_typeInfo.m_flags)), Imm32(MasqueradesAsUndefined)), target);
+
+ Jump wasNotImmediate = jump();
+
+ // Now handle the immediate cases - undefined & null
+ isImmediate.link(this);
+
+ ASSERT((JSValue::UndefinedTag + 1 == JSValue::NullTag) && !(JSValue::NullTag + 1));
+ addJump(branch32(Below, regT1, Imm32(JSValue::UndefinedTag)), target);
+
+ wasNotImmediate.link(this);
+}
+
+void JIT::emit_op_jneq_ptr(Instruction* currentInstruction)
+{
+ unsigned src = currentInstruction[1].u.operand;
+ JSCell* ptr = currentInstruction[2].u.jsCell;
+ unsigned target = currentInstruction[3].u.operand;
+
+ emitLoad(src, regT1, regT0);
+ addJump(branch32(NotEqual, regT1, Imm32(JSValue::CellTag)), target);
+ addJump(branchPtr(NotEqual, regT0, ImmPtr(ptr)), target);
+}
+
+void JIT::emit_op_jsr(Instruction* currentInstruction)
+{
+ int retAddrDst = currentInstruction[1].u.operand;
+ int target = currentInstruction[2].u.operand;
+ DataLabelPtr storeLocation = storePtrWithPatch(ImmPtr(0), Address(callFrameRegister, sizeof(Register) * retAddrDst));
+ addJump(jump(), target);
+ m_jsrSites.append(JSRInfo(storeLocation, label()));
+}
+
+void JIT::emit_op_sret(Instruction* currentInstruction)
+{
+ jump(Address(callFrameRegister, sizeof(Register) * currentInstruction[1].u.operand));
+}
+
+void JIT::emit_op_eq(Instruction* currentInstruction)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned src1 = currentInstruction[2].u.operand;
+ unsigned src2 = currentInstruction[3].u.operand;
+
+ emitLoad2(src1, regT1, regT0, src2, regT3, regT2);
+ addSlowCase(branch32(NotEqual, regT1, regT3));
+ addSlowCase(branch32(Equal, regT1, Imm32(JSValue::CellTag)));
+ addSlowCase(branch32(Below, regT1, Imm32(JSValue::LowestTag)));
+
+ set8(Equal, regT0, regT2, regT0);
+ or32(Imm32(JSValue::FalseTag), regT0);
+
+ emitStoreBool(dst, regT0);
+}
+
+void JIT::emitSlow_op_eq(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned op1 = currentInstruction[2].u.operand;
+ unsigned op2 = currentInstruction[3].u.operand;
+
+ JumpList storeResult;
+ JumpList genericCase;
+
+ genericCase.append(getSlowCase(iter)); // tags not equal
+
+ linkSlowCase(iter); // tags equal and JSCell
+ genericCase.append(branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsStringVPtr)));
+ genericCase.append(branchPtr(NotEqual, Address(regT2), ImmPtr(m_globalData->jsStringVPtr)));
+
+ // String case.
+ JITStubCall stubCallEqStrings(this, cti_op_eq_strings);
+ stubCallEqStrings.addArgument(regT0);
+ stubCallEqStrings.addArgument(regT2);
+ stubCallEqStrings.call();
+ storeResult.append(jump());
+
+ // Generic case.
+ genericCase.append(getSlowCase(iter)); // doubles
+ genericCase.link(this);
+ JITStubCall stubCallEq(this, cti_op_eq);
+ stubCallEq.addArgument(op1);
+ stubCallEq.addArgument(op2);
+ stubCallEq.call(regT0);
+
+ storeResult.link(this);
+ or32(Imm32(JSValue::FalseTag), regT0);
+ emitStoreBool(dst, regT0);
+}
+
+void JIT::emit_op_neq(Instruction* currentInstruction)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned src1 = currentInstruction[2].u.operand;
+ unsigned src2 = currentInstruction[3].u.operand;
+
+ emitLoad2(src1, regT1, regT0, src2, regT3, regT2);
+ addSlowCase(branch32(NotEqual, regT1, regT3));
+ addSlowCase(branch32(Equal, regT1, Imm32(JSValue::CellTag)));
+ addSlowCase(branch32(Below, regT1, Imm32(JSValue::LowestTag)));
+
+ set8(NotEqual, regT0, regT2, regT0);
+ or32(Imm32(JSValue::FalseTag), regT0);
+
+ emitStoreBool(dst, regT0);
+}
+
+void JIT::emitSlow_op_neq(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+
+ JumpList storeResult;
+ JumpList genericCase;
+
+ genericCase.append(getSlowCase(iter)); // tags not equal
+
+ linkSlowCase(iter); // tags equal and JSCell
+ genericCase.append(branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsStringVPtr)));
+ genericCase.append(branchPtr(NotEqual, Address(regT2), ImmPtr(m_globalData->jsStringVPtr)));
+
+ // String case.
+ JITStubCall stubCallEqStrings(this, cti_op_eq_strings);
+ stubCallEqStrings.addArgument(regT0);
+ stubCallEqStrings.addArgument(regT2);
+ stubCallEqStrings.call(regT0);
+ storeResult.append(jump());
+
+ // Generic case.
+ genericCase.append(getSlowCase(iter)); // doubles
+ genericCase.link(this);
+ JITStubCall stubCallEq(this, cti_op_eq);
+ stubCallEq.addArgument(regT1, regT0);
+ stubCallEq.addArgument(regT3, regT2);
+ stubCallEq.call(regT0);
+
+ storeResult.link(this);
+ xor32(Imm32(0x1), regT0);
+ or32(Imm32(JSValue::FalseTag), regT0);
+ emitStoreBool(dst, regT0);
+}
+
+void JIT::compileOpStrictEq(Instruction* currentInstruction, CompileOpStrictEqType type)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned src1 = currentInstruction[2].u.operand;
+ unsigned src2 = currentInstruction[3].u.operand;
+
+ emitLoadTag(src1, regT0);
+ emitLoadTag(src2, regT1);
+
+ // Jump to a slow case if either operand is double, or if both operands are
+ // cells and/or Int32s.
+ move(regT0, regT2);
+ and32(regT1, regT2);
+ addSlowCase(branch32(Below, regT2, Imm32(JSValue::LowestTag)));
+ addSlowCase(branch32(AboveOrEqual, regT2, Imm32(JSValue::CellTag)));
+
+ if (type == OpStrictEq)
+ set8(Equal, regT0, regT1, regT0);
+ else
+ set8(NotEqual, regT0, regT1, regT0);
+
+ or32(Imm32(JSValue::FalseTag), regT0);
+
+ emitStoreBool(dst, regT0);
+}
+
+void JIT::emit_op_stricteq(Instruction* currentInstruction)
+{
+ compileOpStrictEq(currentInstruction, OpStrictEq);
+}
+
+void JIT::emitSlow_op_stricteq(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned src1 = currentInstruction[2].u.operand;
+ unsigned src2 = currentInstruction[3].u.operand;
+
+ linkSlowCase(iter);
+ linkSlowCase(iter);
+
+ JITStubCall stubCall(this, cti_op_stricteq);
+ stubCall.addArgument(src1);
+ stubCall.addArgument(src2);
+ stubCall.call(dst);
+}
+
+void JIT::emit_op_nstricteq(Instruction* currentInstruction)
+{
+ compileOpStrictEq(currentInstruction, OpNStrictEq);
+}
+
+void JIT::emitSlow_op_nstricteq(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned src1 = currentInstruction[2].u.operand;
+ unsigned src2 = currentInstruction[3].u.operand;
+
+ linkSlowCase(iter);
+ linkSlowCase(iter);
+
+ JITStubCall stubCall(this, cti_op_nstricteq);
+ stubCall.addArgument(src1);
+ stubCall.addArgument(src2);
+ stubCall.call(dst);
+}
+
+void JIT::emit_op_eq_null(Instruction* currentInstruction)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned src = currentInstruction[2].u.operand;
+
+ emitLoad(src, regT1, regT0);
+ Jump isImmediate = branch32(NotEqual, regT1, Imm32(JSValue::CellTag));
+
+ loadPtr(Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), regT1);
+ setTest8(NonZero, Address(regT1, OBJECT_OFFSETOF(Structure, m_typeInfo.m_flags)), Imm32(MasqueradesAsUndefined), regT1);
+
+ Jump wasNotImmediate = jump();
+
+ isImmediate.link(this);
+
+ set8(Equal, regT1, Imm32(JSValue::NullTag), regT2);
+ set8(Equal, regT1, Imm32(JSValue::UndefinedTag), regT1);
+ or32(regT2, regT1);
+
+ wasNotImmediate.link(this);
+
+ or32(Imm32(JSValue::FalseTag), regT1);
+
+ emitStoreBool(dst, regT1);
+}
+
+void JIT::emit_op_neq_null(Instruction* currentInstruction)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned src = currentInstruction[2].u.operand;
+
+ emitLoad(src, regT1, regT0);
+ Jump isImmediate = branch32(NotEqual, regT1, Imm32(JSValue::CellTag));
+
+ loadPtr(Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), regT1);
+ setTest8(Zero, Address(regT1, OBJECT_OFFSETOF(Structure, m_typeInfo.m_flags)), Imm32(MasqueradesAsUndefined), regT1);
+
+ Jump wasNotImmediate = jump();
+
+ isImmediate.link(this);
+
+ set8(NotEqual, regT1, Imm32(JSValue::NullTag), regT2);
+ set8(NotEqual, regT1, Imm32(JSValue::UndefinedTag), regT1);
+ and32(regT2, regT1);
+
+ wasNotImmediate.link(this);
+
+ or32(Imm32(JSValue::FalseTag), regT1);
+
+ emitStoreBool(dst, regT1);
+}
+
+void JIT::emit_op_resolve_with_base(Instruction* currentInstruction)
+{
+ JITStubCall stubCall(this, cti_op_resolve_with_base);
+ stubCall.addArgument(ImmPtr(&m_codeBlock->identifier(currentInstruction[3].u.operand)));
+ stubCall.addArgument(Imm32(currentInstruction[1].u.operand));
+ stubCall.call(currentInstruction[2].u.operand);
+}
+
+void JIT::emit_op_new_func_exp(Instruction* currentInstruction)
+{
+ JITStubCall stubCall(this, cti_op_new_func_exp);
+ stubCall.addArgument(ImmPtr(m_codeBlock->functionExpr(currentInstruction[2].u.operand)));
+ stubCall.call(currentInstruction[1].u.operand);
+}
+
+void JIT::emit_op_throw(Instruction* currentInstruction)
+{
+ unsigned exception = currentInstruction[1].u.operand;
+ JITStubCall stubCall(this, cti_op_throw);
+ stubCall.addArgument(exception);
+ stubCall.call();
+
+#ifndef NDEBUG
+ // cti_op_throw always changes it's return address,
+ // this point in the code should never be reached.
+ breakpoint();
+#endif
+}
+
+void JIT::emit_op_get_pnames(Instruction* currentInstruction)
+{
+ int dst = currentInstruction[1].u.operand;
+ int base = currentInstruction[2].u.operand;
+ int i = currentInstruction[3].u.operand;
+ int size = currentInstruction[4].u.operand;
+ int breakTarget = currentInstruction[5].u.operand;
+
+ JumpList isNotObject;
+
+ emitLoad(base, regT1, regT0);
+ if (!m_codeBlock->isKnownNotImmediate(base))
+ isNotObject.append(branch32(NotEqual, regT1, Imm32(JSValue::CellTag)));
+ if (base != m_codeBlock->thisRegister() || m_codeBlock->isStrictMode()) {
+ loadPtr(Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), regT2);
+ isNotObject.append(branch8(NotEqual, Address(regT2, OBJECT_OFFSETOF(Structure, m_typeInfo.m_type)), Imm32(ObjectType)));
+ }
+
+ // We could inline the case where you have a valid cache, but
+ // this call doesn't seem to be hot.
+ Label isObject(this);
+ JITStubCall getPnamesStubCall(this, cti_op_get_pnames);
+ getPnamesStubCall.addArgument(regT0);
+ getPnamesStubCall.call(dst);
+ load32(Address(regT0, OBJECT_OFFSETOF(JSPropertyNameIterator, m_jsStringsSize)), regT3);
+ store32(Imm32(0), addressFor(i));
+ store32(regT3, addressFor(size));
+ Jump end = jump();
+
+ isNotObject.link(this);
+ addJump(branch32(Equal, regT1, Imm32(JSValue::NullTag)), breakTarget);
+ addJump(branch32(Equal, regT1, Imm32(JSValue::UndefinedTag)), breakTarget);
+ JITStubCall toObjectStubCall(this, cti_to_object);
+ toObjectStubCall.addArgument(regT1, regT0);
+ toObjectStubCall.call(base);
+ jump().linkTo(isObject, this);
+
+ end.link(this);
+}
+
+void JIT::emit_op_next_pname(Instruction* currentInstruction)
+{
+ int dst = currentInstruction[1].u.operand;
+ int base = currentInstruction[2].u.operand;
+ int i = currentInstruction[3].u.operand;
+ int size = currentInstruction[4].u.operand;
+ int it = currentInstruction[5].u.operand;
+ int target = currentInstruction[6].u.operand;
+
+ JumpList callHasProperty;
+
+ Label begin(this);
+ load32(addressFor(i), regT0);
+ Jump end = branch32(Equal, regT0, addressFor(size));
+
+ // Grab key @ i
+ loadPtr(addressFor(it), regT1);
+ loadPtr(Address(regT1, OBJECT_OFFSETOF(JSPropertyNameIterator, m_jsStrings)), regT2);
+ load32(BaseIndex(regT2, regT0, TimesEight), regT2);
+ store32(Imm32(JSValue::CellTag), tagFor(dst));
+ store32(regT2, payloadFor(dst));
+
+ // Increment i
+ add32(Imm32(1), regT0);
+ store32(regT0, addressFor(i));
+
+ // Verify that i is valid:
+ loadPtr(addressFor(base), regT0);
+
+ // Test base's structure
+ loadPtr(Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), regT2);
+ callHasProperty.append(branchPtr(NotEqual, regT2, Address(Address(regT1, OBJECT_OFFSETOF(JSPropertyNameIterator, m_cachedStructure)))));
+
+ // Test base's prototype chain
+ loadPtr(Address(Address(regT1, OBJECT_OFFSETOF(JSPropertyNameIterator, m_cachedPrototypeChain))), regT3);
+ loadPtr(Address(regT3, OBJECT_OFFSETOF(StructureChain, m_vector)), regT3);
+ addJump(branchTestPtr(Zero, Address(regT3)), target);
+
+ Label checkPrototype(this);
+ callHasProperty.append(branch32(Equal, Address(regT2, OBJECT_OFFSETOF(Structure, m_prototype) + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), Imm32(JSValue::NullTag)));
+ loadPtr(Address(regT2, OBJECT_OFFSETOF(Structure, m_prototype) + OBJECT_OFFSETOF(JSValue, u.asBits.payload)), regT2);
+ loadPtr(Address(regT2, OBJECT_OFFSETOF(JSCell, m_structure)), regT2);
+ callHasProperty.append(branchPtr(NotEqual, regT2, Address(regT3)));
+ addPtr(Imm32(sizeof(Structure*)), regT3);
+ branchTestPtr(NonZero, Address(regT3)).linkTo(checkPrototype, this);
+
+ // Continue loop.
+ addJump(jump(), target);
+
+ // Slow case: Ask the object if i is valid.
+ callHasProperty.link(this);
+ loadPtr(addressFor(dst), regT1);
+ JITStubCall stubCall(this, cti_has_property);
+ stubCall.addArgument(regT0);
+ stubCall.addArgument(regT1);
+ stubCall.call();
+
+ // Test for valid key.
+ addJump(branchTest32(NonZero, regT0), target);
+ jump().linkTo(begin, this);
+
+ // End of loop.
+ end.link(this);
+}
+
+void JIT::emit_op_push_scope(Instruction* currentInstruction)
+{
+ JITStubCall stubCall(this, cti_op_push_scope);
+ stubCall.addArgument(currentInstruction[1].u.operand);
+ stubCall.call(currentInstruction[1].u.operand);
+}
+
+void JIT::emit_op_pop_scope(Instruction*)
+{
+ JITStubCall(this, cti_op_pop_scope).call();
+}
+
+void JIT::emit_op_to_jsnumber(Instruction* currentInstruction)
+{
+ int dst = currentInstruction[1].u.operand;
+ int src = currentInstruction[2].u.operand;
+
+ emitLoad(src, regT1, regT0);
+
+ Jump isInt32 = branch32(Equal, regT1, Imm32(JSValue::Int32Tag));
+ addSlowCase(branch32(AboveOrEqual, regT1, Imm32(JSValue::EmptyValueTag)));
+ isInt32.link(this);
+
+ if (src != dst)
+ emitStore(dst, regT1, regT0);
+ map(m_bytecodeOffset + OPCODE_LENGTH(op_to_jsnumber), dst, regT1, regT0);
+}
+
+void JIT::emitSlow_op_to_jsnumber(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ int dst = currentInstruction[1].u.operand;
+
+ linkSlowCase(iter);
+
+ JITStubCall stubCall(this, cti_op_to_jsnumber);
+ stubCall.addArgument(regT1, regT0);
+ stubCall.call(dst);
+}
+
+void JIT::emit_op_push_new_scope(Instruction* currentInstruction)
+{
+ JITStubCall stubCall(this, cti_op_push_new_scope);
+ stubCall.addArgument(ImmPtr(&m_codeBlock->identifier(currentInstruction[2].u.operand)));
+ stubCall.addArgument(currentInstruction[3].u.operand);
+ stubCall.call(currentInstruction[1].u.operand);
+}
+
+void JIT::emit_op_catch(Instruction* currentInstruction)
+{
+ // cti_op_throw returns the callFrame for the handler.
+ move(regT0, callFrameRegister);
+
+ // Now store the exception returned by cti_op_throw.
+ loadPtr(Address(stackPointerRegister, OBJECT_OFFSETOF(struct JITStackFrame, globalData)), regT3);
+ load32(Address(regT3, OBJECT_OFFSETOF(JSGlobalData, exception) + OBJECT_OFFSETOF(JSValue, u.asBits.payload)), regT0);
+ load32(Address(regT3, OBJECT_OFFSETOF(JSGlobalData, exception) + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), regT1);
+ store32(Imm32(JSValue().payload()), Address(regT3, OBJECT_OFFSETOF(JSGlobalData, exception) + OBJECT_OFFSETOF(JSValue, u.asBits.payload)));
+ store32(Imm32(JSValue().tag()), Address(regT3, OBJECT_OFFSETOF(JSGlobalData, exception) + OBJECT_OFFSETOF(JSValue, u.asBits.tag)));
+
+ unsigned exception = currentInstruction[1].u.operand;
+ emitStore(exception, regT1, regT0);
+ map(m_bytecodeOffset + OPCODE_LENGTH(op_catch), exception, regT1, regT0);
+}
+
+void JIT::emit_op_jmp_scopes(Instruction* currentInstruction)
+{
+ JITStubCall stubCall(this, cti_op_jmp_scopes);
+ stubCall.addArgument(Imm32(currentInstruction[1].u.operand));
+ stubCall.call();
+ addJump(jump(), currentInstruction[2].u.operand);
+}
+
+void JIT::emit_op_switch_imm(Instruction* currentInstruction)
+{
+ unsigned tableIndex = currentInstruction[1].u.operand;
+ unsigned defaultOffset = currentInstruction[2].u.operand;
+ unsigned scrutinee = currentInstruction[3].u.operand;
+
+ // create jump table for switch destinations, track this switch statement.
+ SimpleJumpTable* jumpTable = &m_codeBlock->immediateSwitchJumpTable(tableIndex);
+ m_switches.append(SwitchRecord(jumpTable, m_bytecodeOffset, defaultOffset, SwitchRecord::Immediate));
+ jumpTable->ctiOffsets.grow(jumpTable->branchOffsets.size());
+
+ JITStubCall stubCall(this, cti_op_switch_imm);
+ stubCall.addArgument(scrutinee);
+ stubCall.addArgument(Imm32(tableIndex));
+ stubCall.call();
+ jump(regT0);
+}
+
+void JIT::emit_op_switch_char(Instruction* currentInstruction)
+{
+ unsigned tableIndex = currentInstruction[1].u.operand;
+ unsigned defaultOffset = currentInstruction[2].u.operand;
+ unsigned scrutinee = currentInstruction[3].u.operand;
+
+ // create jump table for switch destinations, track this switch statement.
+ SimpleJumpTable* jumpTable = &m_codeBlock->characterSwitchJumpTable(tableIndex);
+ m_switches.append(SwitchRecord(jumpTable, m_bytecodeOffset, defaultOffset, SwitchRecord::Character));
+ jumpTable->ctiOffsets.grow(jumpTable->branchOffsets.size());
+
+ JITStubCall stubCall(this, cti_op_switch_char);
+ stubCall.addArgument(scrutinee);
+ stubCall.addArgument(Imm32(tableIndex));
+ stubCall.call();
+ jump(regT0);
+}
+
+void JIT::emit_op_switch_string(Instruction* currentInstruction)
+{
+ unsigned tableIndex = currentInstruction[1].u.operand;
+ unsigned defaultOffset = currentInstruction[2].u.operand;
+ unsigned scrutinee = currentInstruction[3].u.operand;
+
+ // create jump table for switch destinations, track this switch statement.
+ StringJumpTable* jumpTable = &m_codeBlock->stringSwitchJumpTable(tableIndex);
+ m_switches.append(SwitchRecord(jumpTable, m_bytecodeOffset, defaultOffset));
+
+ JITStubCall stubCall(this, cti_op_switch_string);
+ stubCall.addArgument(scrutinee);
+ stubCall.addArgument(Imm32(tableIndex));
+ stubCall.call();
+ jump(regT0);
+}
+
+void JIT::emit_op_throw_reference_error(Instruction* currentInstruction)
+{
+ unsigned message = currentInstruction[1].u.operand;
+
+ JITStubCall stubCall(this, cti_op_throw_reference_error);
+ stubCall.addArgument(m_codeBlock->getConstant(message));
+ stubCall.call();
+}
+
+void JIT::emit_op_throw_syntax_error(Instruction* currentInstruction)
+{
+ unsigned message = currentInstruction[1].u.operand;
+
+ JITStubCall stubCall(this, cti_op_throw_syntax_error);
+ stubCall.addArgument(m_codeBlock->getConstant(message));
+ stubCall.call();
+}
+
+void JIT::emit_op_debug(Instruction* currentInstruction)
+{
+#if ENABLE(DEBUG_WITH_BREAKPOINT)
+ UNUSED_PARAM(currentInstruction);
+ breakpoint();
+#else
+ JITStubCall stubCall(this, cti_op_debug);
+ stubCall.addArgument(Imm32(currentInstruction[1].u.operand));
+ stubCall.addArgument(Imm32(currentInstruction[2].u.operand));
+ stubCall.addArgument(Imm32(currentInstruction[3].u.operand));
+ stubCall.call();
+#endif
+}
+
+
+void JIT::emit_op_enter(Instruction*)
+{
+ // Even though JIT code doesn't use them, we initialize our constant
+ // registers to zap stale pointers, to avoid unnecessarily prolonging
+ // object lifetime and increasing GC pressure.
+ for (int i = 0; i < m_codeBlock->m_numVars; ++i)
+ emitStore(i, jsUndefined());
+}
+
+void JIT::emit_op_create_activation(Instruction* currentInstruction)
+{
+ unsigned activation = currentInstruction[1].u.operand;
+
+ Jump activationCreated = branch32(NotEqual, tagFor(activation), Imm32(JSValue::EmptyValueTag));
+ JITStubCall(this, cti_op_push_activation).call(activation);
+ activationCreated.link(this);
+}
+
+void JIT::emit_op_create_arguments(Instruction* currentInstruction)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+
+ Jump argsCreated = branch32(NotEqual, tagFor(dst), Imm32(JSValue::EmptyValueTag));
+
+ if (m_codeBlock->m_numParameters == 1)
+ JITStubCall(this, cti_op_create_arguments_no_params).call();
+ else
+ JITStubCall(this, cti_op_create_arguments).call();
+
+ emitStore(dst, regT1, regT0);
+ emitStore(unmodifiedArgumentsRegister(dst), regT1, regT0);
+
+ argsCreated.link(this);
+}
+
+void JIT::emit_op_init_lazy_reg(Instruction* currentInstruction)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+
+ emitStore(dst, JSValue());
+}
+
+void JIT::emit_op_get_callee(Instruction* currentInstruction)
+{
+ int dst = currentInstruction[1].u.operand;
+ emitGetFromCallFrameHeaderPtr(RegisterFile::Callee, regT0);
+ emitStoreCell(dst, regT0);
+}
+
+void JIT::emit_op_create_this(Instruction* currentInstruction)
+{
+ unsigned protoRegister = currentInstruction[2].u.operand;
+ emitLoad(protoRegister, regT1, regT0);
+ JITStubCall stubCall(this, cti_op_create_this);
+ stubCall.addArgument(regT1, regT0);
+ stubCall.call(currentInstruction[1].u.operand);
+}
+
+void JIT::emit_op_convert_this(Instruction* currentInstruction)
+{
+ unsigned thisRegister = currentInstruction[1].u.operand;
+
+ emitLoad(thisRegister, regT1, regT0);
+
+ addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::CellTag)));
+
+ loadPtr(Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), regT2);
+ addSlowCase(branchTest8(NonZero, Address(regT2, OBJECT_OFFSETOF(Structure, m_typeInfo.m_flags)), Imm32(NeedsThisConversion)));
+
+ map(m_bytecodeOffset + OPCODE_LENGTH(op_convert_this), thisRegister, regT1, regT0);
+}
+
+void JIT::emit_op_convert_this_strict(Instruction* currentInstruction)
+{
+ unsigned thisRegister = currentInstruction[1].u.operand;
+
+ emitLoad(thisRegister, regT1, regT0);
+
+ Jump notNull = branch32(NotEqual, regT1, Imm32(JSValue::EmptyValueTag));
+ emitStore(thisRegister, jsNull());
+ Jump setThis = jump();
+ notNull.link(this);
+ Jump isImmediate = branch32(NotEqual, regT1, Imm32(JSValue::CellTag));
+ loadPtr(Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), regT2);
+ Jump notAnObject = branch8(NotEqual, Address(regT2, OBJECT_OFFSETOF(Structure, m_typeInfo.m_type)), Imm32(ObjectType));
+ addSlowCase(branchTest8(NonZero, Address(regT2, OBJECT_OFFSETOF(Structure, m_typeInfo.m_flags)), Imm32(NeedsThisConversion)));
+ isImmediate.link(this);
+ notAnObject.link(this);
+ setThis.link(this);
+ map(m_bytecodeOffset + OPCODE_LENGTH(op_convert_this_strict), thisRegister, regT1, regT0);
+}
+
+void JIT::emitSlow_op_convert_this(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned thisRegister = currentInstruction[1].u.operand;
+
+ linkSlowCase(iter);
+ linkSlowCase(iter);
+
+ JITStubCall stubCall(this, cti_op_convert_this);
+ stubCall.addArgument(regT1, regT0);
+ stubCall.call(thisRegister);
+}
+
+void JIT::emitSlow_op_convert_this_strict(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned thisRegister = currentInstruction[1].u.operand;
+
+ linkSlowCase(iter);
+
+ JITStubCall stubCall(this, cti_op_convert_this_strict);
+ stubCall.addArgument(regT1, regT0);
+ stubCall.call(thisRegister);
+}
+
+void JIT::emit_op_profile_will_call(Instruction* currentInstruction)
+{
+ peek(regT2, OBJECT_OFFSETOF(JITStackFrame, enabledProfilerReference) / sizeof(void*));
+ Jump noProfiler = branchTestPtr(Zero, Address(regT2));
+
+ JITStubCall stubCall(this, cti_op_profile_will_call);
+ stubCall.addArgument(currentInstruction[1].u.operand);
+ stubCall.call();
+ noProfiler.link(this);
+}
+
+void JIT::emit_op_profile_did_call(Instruction* currentInstruction)
+{
+ peek(regT2, OBJECT_OFFSETOF(JITStackFrame, enabledProfilerReference) / sizeof(void*));
+ Jump noProfiler = branchTestPtr(Zero, Address(regT2));
+
+ JITStubCall stubCall(this, cti_op_profile_did_call);
+ stubCall.addArgument(currentInstruction[1].u.operand);
+ stubCall.call();
+ noProfiler.link(this);
+}
+
+void JIT::emit_op_get_arguments_length(Instruction* currentInstruction)
+{
+ int dst = currentInstruction[1].u.operand;
+ int argumentsRegister = currentInstruction[2].u.operand;
+ addSlowCase(branch32(NotEqual, tagFor(argumentsRegister), Imm32(JSValue::EmptyValueTag)));
+ emitGetFromCallFrameHeader32(RegisterFile::ArgumentCount, regT0);
+ sub32(Imm32(1), regT0);
+ emitStoreInt32(dst, regT0);
+}
+
+void JIT::emitSlow_op_get_arguments_length(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ linkSlowCase(iter);
+ int dst = currentInstruction[1].u.operand;
+ int base = currentInstruction[2].u.operand;
+ int ident = currentInstruction[3].u.operand;
+
+ JITStubCall stubCall(this, cti_op_get_by_id_generic);
+ stubCall.addArgument(base);
+ stubCall.addArgument(ImmPtr(&(m_codeBlock->identifier(ident))));
+ stubCall.call(dst);
+}
+
+void JIT::emit_op_get_argument_by_val(Instruction* currentInstruction)
+{
+ int dst = currentInstruction[1].u.operand;
+ int argumentsRegister = currentInstruction[2].u.operand;
+ int property = currentInstruction[3].u.operand;
+ addSlowCase(branch32(NotEqual, tagFor(argumentsRegister), Imm32(JSValue::EmptyValueTag)));
+ emitLoad(property, regT1, regT2);
+ addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
+ add32(Imm32(1), regT2);
+ // regT2 now contains the integer index of the argument we want, including this
+ emitGetFromCallFrameHeader32(RegisterFile::ArgumentCount, regT3);
+ addSlowCase(branch32(AboveOrEqual, regT2, regT3));
+
+ Jump skipOutofLineParams;
+ int numArgs = m_codeBlock->m_numParameters;
+ if (numArgs) {
+ Jump notInInPlaceArgs = branch32(AboveOrEqual, regT2, Imm32(numArgs));
+ addPtr(Imm32(static_cast<unsigned>(-(RegisterFile::CallFrameHeaderSize + numArgs) * sizeof(Register))), callFrameRegister, regT1);
+ loadPtr(BaseIndex(regT1, regT2, TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.payload)), regT0);
+ loadPtr(BaseIndex(regT1, regT2, TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.tag)), regT1);
+ skipOutofLineParams = jump();
+ notInInPlaceArgs.link(this);
+ }
+
+ addPtr(Imm32(static_cast<unsigned>(-(RegisterFile::CallFrameHeaderSize + numArgs) * sizeof(Register))), callFrameRegister, regT1);
+ mul32(Imm32(sizeof(Register)), regT3, regT3);
+ subPtr(regT3, regT1);
+ loadPtr(BaseIndex(regT1, regT2, TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.payload)), regT0);
+ loadPtr(BaseIndex(regT1, regT2, TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.tag)), regT1);
+ if (numArgs)
+ skipOutofLineParams.link(this);
+ emitStore(dst, regT1, regT0);
+}
+
+void JIT::emitSlow_op_get_argument_by_val(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned arguments = currentInstruction[2].u.operand;
+ unsigned property = currentInstruction[3].u.operand;
+
+ linkSlowCase(iter);
+ Jump skipArgumentsCreation = jump();
+
+ linkSlowCase(iter);
+ linkSlowCase(iter);
+ if (m_codeBlock->m_numParameters == 1)
+ JITStubCall(this, cti_op_create_arguments_no_params).call();
+ else
+ JITStubCall(this, cti_op_create_arguments).call();
+
+ emitStore(arguments, regT1, regT0);
+ emitStore(unmodifiedArgumentsRegister(arguments), regT1, regT0);
+
+ skipArgumentsCreation.link(this);
+ JITStubCall stubCall(this, cti_op_get_by_val);
+ stubCall.addArgument(arguments);
+ stubCall.addArgument(property);
+ stubCall.call(dst);
+}
+
+#if ENABLE(JIT_USE_SOFT_MODULO)
+void JIT::softModulo()
+{
+ push(regT1);
+ push(regT3);
+ move(regT2, regT3);
+ move(regT0, regT2);
+ move(Imm32(0), regT1);
+
+ // Check for negative result reminder
+ Jump positiveRegT3 = branch32(GreaterThanOrEqual, regT3, Imm32(0));
+ neg32(regT3);
+ xor32(Imm32(1), regT1);
+ positiveRegT3.link(this);
+
+ Jump positiveRegT2 = branch32(GreaterThanOrEqual, regT2, Imm32(0));
+ neg32(regT2);
+ xor32(Imm32(2), regT1);
+ positiveRegT2.link(this);
+
+ // Save the condition for negative reminder
+ push(regT1);
+
+ Jump exitBranch = branch32(LessThan, regT2, regT3);
+
+ // Power of two fast case
+ move(regT3, regT0);
+ sub32(Imm32(1), regT0);
+ Jump powerOfTwo = branchTest32(NotEqual, regT0, regT3);
+ and32(regT0, regT2);
+ powerOfTwo.link(this);
+
+ and32(regT3, regT0);
+
+ Jump exitBranch2 = branchTest32(Zero, regT0);
+
+ countLeadingZeros32(regT2, regT0);
+ countLeadingZeros32(regT3, regT1);
+ sub32(regT0, regT1);
+
+ Jump useFullTable = branch32(Equal, regT1, Imm32(31));
+
+ neg32(regT1);
+ add32(Imm32(31), regT1);
+
+ int elementSizeByShift = -1;
+#if CPU(ARM)
+ elementSizeByShift = 3;
+#else
+#error "JIT_OPTIMIZE_MOD not yet supported on this platform."
+#endif
+ relativeTableJump(regT1, elementSizeByShift);
+
+ useFullTable.link(this);
+ // Modulo table
+ for (int i = 31; i > 0; --i) {
+#if CPU(ARM_TRADITIONAL)
+ m_assembler.cmp_r(regT2, m_assembler.lsl(regT3, i));
+ m_assembler.sub_r(regT2, regT2, m_assembler.lsl(regT3, i), ARMAssembler::CS);
+#elif CPU(ARM_THUMB2)
+ ShiftTypeAndAmount shift(SRType_LSL, i);
+ m_assembler.sub_S(regT1, regT2, regT3, shift);
+ m_assembler.it(ARMv7Assembler::ConditionCS);
+ m_assembler.mov(regT2, regT1);
+#else
+#error "JIT_OPTIMIZE_MOD not yet supported on this platform."
+#endif
+ }
+
+ Jump lower = branch32(Below, regT2, regT3);
+ sub32(regT3, regT2);
+ lower.link(this);
+
+ exitBranch.link(this);
+ exitBranch2.link(this);
+
+ // Check for negative reminder
+ pop(regT1);
+ Jump positiveResult = branch32(Equal, regT1, Imm32(0));
+ neg32(regT2);
+ positiveResult.link(this);
+
+ move(regT2, regT0);
+
+ pop(regT3);
+ pop(regT1);
+ ret();
+}
+#endif // ENABLE(JIT_USE_SOFT_MODULO)
+
+} // namespace JSC
+
+#endif // USE(JSVALUE32_64)
+#endif // ENABLE(JIT)
diff --git a/JavaScriptCore/jit/JITPropertyAccess.cpp b/JavaScriptCore/jit/JITPropertyAccess.cpp
index ef95f99..1b95eec 100644
--- a/JavaScriptCore/jit/JITPropertyAccess.cpp
+++ b/JavaScriptCore/jit/JITPropertyAccess.cpp
@@ -24,11 +24,13 @@
*/
#include "config.h"
-#include "JIT.h"
#if ENABLE(JIT)
+#if USE(JSVALUE64)
+#include "JIT.h"
#include "CodeBlock.h"
+#include "GetterSetter.h"
#include "JITInlineMethods.h"
#include "JITStubCall.h"
#include "JSArray.h"
@@ -48,978 +50,37 @@ using namespace std;
namespace JSC {
-#if USE(JSVALUE32_64)
-
-void JIT::emit_op_put_by_index(Instruction* currentInstruction)
-{
- unsigned base = currentInstruction[1].u.operand;
- unsigned property = currentInstruction[2].u.operand;
- unsigned value = currentInstruction[3].u.operand;
-
- JITStubCall stubCall(this, cti_op_put_by_index);
- stubCall.addArgument(base);
- stubCall.addArgument(Imm32(property));
- stubCall.addArgument(value);
- stubCall.call();
-}
-
-void JIT::emit_op_put_getter(Instruction* currentInstruction)
-{
- unsigned base = currentInstruction[1].u.operand;
- unsigned property = currentInstruction[2].u.operand;
- unsigned function = currentInstruction[3].u.operand;
-
- JITStubCall stubCall(this, cti_op_put_getter);
- stubCall.addArgument(base);
- stubCall.addArgument(ImmPtr(&m_codeBlock->identifier(property)));
- stubCall.addArgument(function);
- stubCall.call();
-}
-
-void JIT::emit_op_put_setter(Instruction* currentInstruction)
-{
- unsigned base = currentInstruction[1].u.operand;
- unsigned property = currentInstruction[2].u.operand;
- unsigned function = currentInstruction[3].u.operand;
-
- JITStubCall stubCall(this, cti_op_put_setter);
- stubCall.addArgument(base);
- stubCall.addArgument(ImmPtr(&m_codeBlock->identifier(property)));
- stubCall.addArgument(function);
- stubCall.call();
-}
-
-void JIT::emit_op_del_by_id(Instruction* currentInstruction)
-{
- unsigned dst = currentInstruction[1].u.operand;
- unsigned base = currentInstruction[2].u.operand;
- unsigned property = currentInstruction[3].u.operand;
-
- JITStubCall stubCall(this, cti_op_del_by_id);
- stubCall.addArgument(base);
- stubCall.addArgument(ImmPtr(&m_codeBlock->identifier(property)));
- stubCall.call(dst);
-}
-
-
-#if !ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
-
-/* ------------------------------ BEGIN: !ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS) ------------------------------ */
-
-// Treat these as nops - the call will be handed as a regular get_by_id/op_call pair.
-void JIT::emit_op_method_check(Instruction*) {}
-void JIT::emitSlow_op_method_check(Instruction*, Vector<SlowCaseEntry>::iterator&) { ASSERT_NOT_REACHED(); }
-#if ENABLE(JIT_OPTIMIZE_METHOD_CALLS)
-#error "JIT_OPTIMIZE_METHOD_CALLS requires JIT_OPTIMIZE_PROPERTY_ACCESS"
-#endif
-
-void JIT::emit_op_get_by_val(Instruction* currentInstruction)
-{
- unsigned dst = currentInstruction[1].u.operand;
- unsigned base = currentInstruction[2].u.operand;
- unsigned property = currentInstruction[3].u.operand;
-
- JITStubCall stubCall(this, cti_op_get_by_val);
- stubCall.addArgument(base);
- stubCall.addArgument(property);
- stubCall.call(dst);
-}
-
-void JIT::emitSlow_op_get_by_val(Instruction*, Vector<SlowCaseEntry>::iterator&)
-{
- ASSERT_NOT_REACHED();
-}
-
-void JIT::emit_op_put_by_val(Instruction* currentInstruction)
-{
- unsigned base = currentInstruction[1].u.operand;
- unsigned property = currentInstruction[2].u.operand;
- unsigned value = currentInstruction[3].u.operand;
-
- JITStubCall stubCall(this, cti_op_put_by_val);
- stubCall.addArgument(base);
- stubCall.addArgument(property);
- stubCall.addArgument(value);
- stubCall.call();
-}
-
-void JIT::emitSlow_op_put_by_val(Instruction*, Vector<SlowCaseEntry>::iterator&)
-{
- ASSERT_NOT_REACHED();
-}
-
-void JIT::emit_op_get_by_id(Instruction* currentInstruction)
-{
- int dst = currentInstruction[1].u.operand;
- int base = currentInstruction[2].u.operand;
- int ident = currentInstruction[3].u.operand;
-
- JITStubCall stubCall(this, cti_op_get_by_id_generic);
- stubCall.addArgument(base);
- stubCall.addArgument(ImmPtr(&(m_codeBlock->identifier(ident))));
- stubCall.call(dst);
-
- m_propertyAccessInstructionIndex++;
-}
-
-void JIT::emitSlow_op_get_by_id(Instruction*, Vector<SlowCaseEntry>::iterator&)
-{
- m_propertyAccessInstructionIndex++;
- ASSERT_NOT_REACHED();
-}
-
-void JIT::emit_op_put_by_id(Instruction* currentInstruction)
-{
- int base = currentInstruction[1].u.operand;
- int ident = currentInstruction[2].u.operand;
- int value = currentInstruction[3].u.operand;
-
- JITStubCall stubCall(this, cti_op_put_by_id_generic);
- stubCall.addArgument(base);
- stubCall.addArgument(ImmPtr(&(m_codeBlock->identifier(ident))));
- stubCall.addArgument(value);
- stubCall.call();
-
- m_propertyAccessInstructionIndex++;
-}
-
-void JIT::emitSlow_op_put_by_id(Instruction*, Vector<SlowCaseEntry>::iterator&)
-{
- m_propertyAccessInstructionIndex++;
- ASSERT_NOT_REACHED();
-}
-
-#else // !ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
-
-/* ------------------------------ BEGIN: ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS) ------------------------------ */
-
-#if ENABLE(JIT_OPTIMIZE_METHOD_CALLS)
-
-void JIT::emit_op_method_check(Instruction* currentInstruction)
-{
- // Assert that the following instruction is a get_by_id.
- ASSERT(m_interpreter->getOpcodeID((currentInstruction + OPCODE_LENGTH(op_method_check))->u.opcode) == op_get_by_id);
-
- currentInstruction += OPCODE_LENGTH(op_method_check);
-
- // Do the method check - check the object & its prototype's structure inline (this is the common case).
- m_methodCallCompilationInfo.append(MethodCallCompilationInfo(m_propertyAccessInstructionIndex));
- MethodCallCompilationInfo& info = m_methodCallCompilationInfo.last();
-
- int dst = currentInstruction[1].u.operand;
- int base = currentInstruction[2].u.operand;
-
- emitLoad(base, regT1, regT0);
- emitJumpSlowCaseIfNotJSCell(base, regT1);
-
- BEGIN_UNINTERRUPTED_SEQUENCE(sequenceMethodCheck);
-
- Jump structureCheck = branchPtrWithPatch(NotEqual, Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), info.structureToCompare, ImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure)));
- DataLabelPtr protoStructureToCompare, protoObj = moveWithPatch(ImmPtr(0), regT2);
- Jump protoStructureCheck = branchPtrWithPatch(NotEqual, Address(regT2, OBJECT_OFFSETOF(JSCell, m_structure)), protoStructureToCompare, ImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure)));
-
- // This will be relinked to load the function without doing a load.
- DataLabelPtr putFunction = moveWithPatch(ImmPtr(0), regT0);
-
- END_UNINTERRUPTED_SEQUENCE(sequenceMethodCheck);
-
- move(Imm32(JSValue::CellTag), regT1);
- Jump match = jump();
-
- ASSERT(differenceBetween(info.structureToCompare, protoObj) == patchOffsetMethodCheckProtoObj);
- ASSERT(differenceBetween(info.structureToCompare, protoStructureToCompare) == patchOffsetMethodCheckProtoStruct);
- ASSERT(differenceBetween(info.structureToCompare, putFunction) == patchOffsetMethodCheckPutFunction);
-
- // Link the failure cases here.
- structureCheck.link(this);
- protoStructureCheck.link(this);
-
- // Do a regular(ish) get_by_id (the slow case will be link to
- // cti_op_get_by_id_method_check instead of cti_op_get_by_id.
- compileGetByIdHotPath();
-
- match.link(this);
- emitStore(dst, regT1, regT0);
- map(m_bytecodeIndex + OPCODE_LENGTH(op_method_check), dst, regT1, regT0);
-
- // We've already generated the following get_by_id, so make sure it's skipped over.
- m_bytecodeIndex += OPCODE_LENGTH(op_get_by_id);
-}
-
-void JIT::emitSlow_op_method_check(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- currentInstruction += OPCODE_LENGTH(op_method_check);
-
- int dst = currentInstruction[1].u.operand;
- int base = currentInstruction[2].u.operand;
- int ident = currentInstruction[3].u.operand;
-
- compileGetByIdSlowCase(dst, base, &(m_codeBlock->identifier(ident)), iter, true);
-
- // We've already generated the following get_by_id, so make sure it's skipped over.
- m_bytecodeIndex += OPCODE_LENGTH(op_get_by_id);
-}
-
-#else //!ENABLE(JIT_OPTIMIZE_METHOD_CALLS)
-
-// Treat these as nops - the call will be handed as a regular get_by_id/op_call pair.
-void JIT::emit_op_method_check(Instruction*) {}
-void JIT::emitSlow_op_method_check(Instruction*, Vector<SlowCaseEntry>::iterator&) { ASSERT_NOT_REACHED(); }
-
-#endif
-
-void JIT::emit_op_get_by_val(Instruction* currentInstruction)
-{
- unsigned dst = currentInstruction[1].u.operand;
- unsigned base = currentInstruction[2].u.operand;
- unsigned property = currentInstruction[3].u.operand;
-
- emitLoad2(base, regT1, regT0, property, regT3, regT2);
-
- addSlowCase(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag)));
- emitJumpSlowCaseIfNotJSCell(base, regT1);
- addSlowCase(branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsArrayVPtr)));
-
- loadPtr(Address(regT0, OBJECT_OFFSETOF(JSArray, m_storage)), regT3);
- addSlowCase(branch32(AboveOrEqual, regT2, Address(regT0, OBJECT_OFFSETOF(JSArray, m_vectorLength))));
-
- load32(BaseIndex(regT3, regT2, TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + 4), regT1); // tag
- load32(BaseIndex(regT3, regT2, TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0])), regT0); // payload
- addSlowCase(branch32(Equal, regT1, Imm32(JSValue::EmptyValueTag)));
-
- emitStore(dst, regT1, regT0);
- map(m_bytecodeIndex + OPCODE_LENGTH(op_get_by_val), dst, regT1, regT0);
-}
-
-void JIT::emitSlow_op_get_by_val(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- unsigned dst = currentInstruction[1].u.operand;
- unsigned base = currentInstruction[2].u.operand;
- unsigned property = currentInstruction[3].u.operand;
-
- linkSlowCase(iter); // property int32 check
- linkSlowCaseIfNotJSCell(iter, base); // base cell check
- linkSlowCase(iter); // base array check
- linkSlowCase(iter); // vector length check
- linkSlowCase(iter); // empty value
-
- JITStubCall stubCall(this, cti_op_get_by_val);
- stubCall.addArgument(base);
- stubCall.addArgument(property);
- stubCall.call(dst);
-}
-
-void JIT::emit_op_put_by_val(Instruction* currentInstruction)
-{
- unsigned base = currentInstruction[1].u.operand;
- unsigned property = currentInstruction[2].u.operand;
- unsigned value = currentInstruction[3].u.operand;
-
- emitLoad2(base, regT1, regT0, property, regT3, regT2);
-
- addSlowCase(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag)));
- emitJumpSlowCaseIfNotJSCell(base, regT1);
- addSlowCase(branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsArrayVPtr)));
- addSlowCase(branch32(AboveOrEqual, regT2, Address(regT0, OBJECT_OFFSETOF(JSArray, m_vectorLength))));
-
- loadPtr(Address(regT0, OBJECT_OFFSETOF(JSArray, m_storage)), regT3);
-
- Jump empty = branch32(Equal, BaseIndex(regT3, regT2, TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + 4), Imm32(JSValue::EmptyValueTag));
-
- Label storeResult(this);
- emitLoad(value, regT1, regT0);
- store32(regT0, BaseIndex(regT3, regT2, TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]))); // payload
- store32(regT1, BaseIndex(regT3, regT2, TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + 4)); // tag
- Jump end = jump();
-
- empty.link(this);
- add32(Imm32(1), Address(regT3, OBJECT_OFFSETOF(ArrayStorage, m_numValuesInVector)));
- branch32(Below, regT2, Address(regT3, OBJECT_OFFSETOF(ArrayStorage, m_length))).linkTo(storeResult, this);
-
- add32(Imm32(1), regT2, regT0);
- store32(regT0, Address(regT3, OBJECT_OFFSETOF(ArrayStorage, m_length)));
- jump().linkTo(storeResult, this);
-
- end.link(this);
-}
-
-void JIT::emitSlow_op_put_by_val(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- unsigned base = currentInstruction[1].u.operand;
- unsigned property = currentInstruction[2].u.operand;
- unsigned value = currentInstruction[3].u.operand;
-
- linkSlowCase(iter); // property int32 check
- linkSlowCaseIfNotJSCell(iter, base); // base cell check
- linkSlowCase(iter); // base not array check
- linkSlowCase(iter); // in vector check
-
- JITStubCall stubPutByValCall(this, cti_op_put_by_val);
- stubPutByValCall.addArgument(base);
- stubPutByValCall.addArgument(property);
- stubPutByValCall.addArgument(value);
- stubPutByValCall.call();
-}
-
-void JIT::emit_op_get_by_id(Instruction* currentInstruction)
-{
- int dst = currentInstruction[1].u.operand;
- int base = currentInstruction[2].u.operand;
-
- emitLoad(base, regT1, regT0);
- emitJumpSlowCaseIfNotJSCell(base, regT1);
- compileGetByIdHotPath();
- emitStore(dst, regT1, regT0);
- map(m_bytecodeIndex + OPCODE_LENGTH(op_get_by_id), dst, regT1, regT0);
-}
-
-void JIT::compileGetByIdHotPath()
-{
- // As for put_by_id, get_by_id requires the offset of the Structure and the offset of the access to be patched.
- // Additionally, for get_by_id we need patch the offset of the branch to the slow case (we patch this to jump
- // to array-length / prototype access tranpolines, and finally we also the the property-map access offset as a label
- // to jump back to if one of these trampolies finds a match.
-
- BEGIN_UNINTERRUPTED_SEQUENCE(sequenceGetByIdHotPath);
-
- Label hotPathBegin(this);
- m_propertyAccessCompilationInfo[m_propertyAccessInstructionIndex].hotPathBegin = hotPathBegin;
- m_propertyAccessInstructionIndex++;
-
- DataLabelPtr structureToCompare;
- Jump structureCheck = branchPtrWithPatch(NotEqual, Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), structureToCompare, ImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure)));
- addSlowCase(structureCheck);
- ASSERT(differenceBetween(hotPathBegin, structureToCompare) == patchOffsetGetByIdStructure);
- ASSERT(differenceBetween(hotPathBegin, structureCheck) == patchOffsetGetByIdBranchToSlowCase);
-
- Label externalLoad = loadPtrWithPatchToLEA(Address(regT0, OBJECT_OFFSETOF(JSObject, m_externalStorage)), regT2);
- Label externalLoadComplete(this);
- ASSERT(differenceBetween(hotPathBegin, externalLoad) == patchOffsetGetByIdExternalLoad);
- ASSERT(differenceBetween(externalLoad, externalLoadComplete) == patchLengthGetByIdExternalLoad);
-
- DataLabel32 displacementLabel1 = loadPtrWithAddressOffsetPatch(Address(regT2, patchGetByIdDefaultOffset), regT0); // payload
- ASSERT(differenceBetween(hotPathBegin, displacementLabel1) == patchOffsetGetByIdPropertyMapOffset1);
- DataLabel32 displacementLabel2 = loadPtrWithAddressOffsetPatch(Address(regT2, patchGetByIdDefaultOffset), regT1); // tag
- ASSERT(differenceBetween(hotPathBegin, displacementLabel2) == patchOffsetGetByIdPropertyMapOffset2);
-
- Label putResult(this);
- ASSERT(differenceBetween(hotPathBegin, putResult) == patchOffsetGetByIdPutResult);
-
- END_UNINTERRUPTED_SEQUENCE(sequenceGetByIdHotPath);
-}
-
-void JIT::emitSlow_op_get_by_id(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- int dst = currentInstruction[1].u.operand;
- int base = currentInstruction[2].u.operand;
- int ident = currentInstruction[3].u.operand;
-
- compileGetByIdSlowCase(dst, base, &(m_codeBlock->identifier(ident)), iter);
-}
-
-void JIT::compileGetByIdSlowCase(int dst, int base, Identifier* ident, Vector<SlowCaseEntry>::iterator& iter, bool isMethodCheck)
-{
- // As for the hot path of get_by_id, above, we ensure that we can use an architecture specific offset
- // so that we only need track one pointer into the slow case code - we track a pointer to the location
- // of the call (which we can use to look up the patch information), but should a array-length or
- // prototype access trampoline fail we want to bail out back to here. To do so we can subtract back
- // the distance from the call to the head of the slow case.
- linkSlowCaseIfNotJSCell(iter, base);
- linkSlowCase(iter);
-
- BEGIN_UNINTERRUPTED_SEQUENCE(sequenceGetByIdSlowCase);
-
-#ifndef NDEBUG
- Label coldPathBegin(this);
-#endif
- JITStubCall stubCall(this, isMethodCheck ? cti_op_get_by_id_method_check : cti_op_get_by_id);
- stubCall.addArgument(regT1, regT0);
- stubCall.addArgument(ImmPtr(ident));
- Call call = stubCall.call(dst);
-
- END_UNINTERRUPTED_SEQUENCE(sequenceGetByIdSlowCase);
-
- ASSERT(differenceBetween(coldPathBegin, call) == patchOffsetGetByIdSlowCaseCall);
-
- // Track the location of the call; this will be used to recover patch information.
- m_propertyAccessCompilationInfo[m_propertyAccessInstructionIndex].callReturnLocation = call;
- m_propertyAccessInstructionIndex++;
-}
-
-void JIT::emit_op_put_by_id(Instruction* currentInstruction)
-{
- // In order to be able to patch both the Structure, and the object offset, we store one pointer,
- // to just after the arguments have been loaded into registers 'hotPathBegin', and we generate code
- // such that the Structure & offset are always at the same distance from this.
-
- int base = currentInstruction[1].u.operand;
- int value = currentInstruction[3].u.operand;
-
- emitLoad2(base, regT1, regT0, value, regT3, regT2);
-
- emitJumpSlowCaseIfNotJSCell(base, regT1);
-
- BEGIN_UNINTERRUPTED_SEQUENCE(sequencePutById);
-
- Label hotPathBegin(this);
- m_propertyAccessCompilationInfo[m_propertyAccessInstructionIndex].hotPathBegin = hotPathBegin;
- m_propertyAccessInstructionIndex++;
-
- // It is important that the following instruction plants a 32bit immediate, in order that it can be patched over.
- DataLabelPtr structureToCompare;
- addSlowCase(branchPtrWithPatch(NotEqual, Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), structureToCompare, ImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure))));
- ASSERT(differenceBetween(hotPathBegin, structureToCompare) == patchOffsetPutByIdStructure);
-
- // Plant a load from a bogus ofset in the object's property map; we will patch this later, if it is to be used.
- Label externalLoad = loadPtrWithPatchToLEA(Address(regT0, OBJECT_OFFSETOF(JSObject, m_externalStorage)), regT0);
- Label externalLoadComplete(this);
- ASSERT(differenceBetween(hotPathBegin, externalLoad) == patchOffsetPutByIdExternalLoad);
- ASSERT(differenceBetween(externalLoad, externalLoadComplete) == patchLengthPutByIdExternalLoad);
-
- DataLabel32 displacementLabel1 = storePtrWithAddressOffsetPatch(regT2, Address(regT0, patchGetByIdDefaultOffset)); // payload
- DataLabel32 displacementLabel2 = storePtrWithAddressOffsetPatch(regT3, Address(regT0, patchGetByIdDefaultOffset)); // tag
-
- END_UNINTERRUPTED_SEQUENCE(sequencePutById);
-
- ASSERT(differenceBetween(hotPathBegin, displacementLabel1) == patchOffsetPutByIdPropertyMapOffset1);
- ASSERT(differenceBetween(hotPathBegin, displacementLabel2) == patchOffsetPutByIdPropertyMapOffset2);
-}
-
-void JIT::emitSlow_op_put_by_id(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- int base = currentInstruction[1].u.operand;
- int ident = currentInstruction[2].u.operand;
-
- linkSlowCaseIfNotJSCell(iter, base);
- linkSlowCase(iter);
-
- JITStubCall stubCall(this, cti_op_put_by_id);
- stubCall.addArgument(regT1, regT0);
- stubCall.addArgument(ImmPtr(&(m_codeBlock->identifier(ident))));
- stubCall.addArgument(regT3, regT2);
- Call call = stubCall.call();
-
- // Track the location of the call; this will be used to recover patch information.
- m_propertyAccessCompilationInfo[m_propertyAccessInstructionIndex].callReturnLocation = call;
- m_propertyAccessInstructionIndex++;
-}
-
-// Compile a store into an object's property storage. May overwrite base.
-void JIT::compilePutDirectOffset(RegisterID base, RegisterID valueTag, RegisterID valuePayload, Structure* structure, size_t cachedOffset)
-{
- int offset = cachedOffset;
- if (structure->isUsingInlineStorage())
- offset += OBJECT_OFFSETOF(JSObject, m_inlineStorage) / sizeof(Register);
- else
- loadPtr(Address(base, OBJECT_OFFSETOF(JSObject, m_externalStorage)), base);
- emitStore(offset, valueTag, valuePayload, base);
-}
-
-// Compile a load from an object's property storage. May overwrite base.
-void JIT::compileGetDirectOffset(RegisterID base, RegisterID resultTag, RegisterID resultPayload, Structure* structure, size_t cachedOffset)
-{
- int offset = cachedOffset;
- if (structure->isUsingInlineStorage())
- offset += OBJECT_OFFSETOF(JSObject, m_inlineStorage) / sizeof(Register);
- else
- loadPtr(Address(base, OBJECT_OFFSETOF(JSObject, m_externalStorage)), base);
- emitLoad(offset, resultTag, resultPayload, base);
-}
-
-void JIT::compileGetDirectOffset(JSObject* base, RegisterID temp, RegisterID resultTag, RegisterID resultPayload, size_t cachedOffset)
-{
- if (base->isUsingInlineStorage()) {
- load32(reinterpret_cast<char*>(&base->m_inlineStorage[cachedOffset]), resultPayload);
- load32(reinterpret_cast<char*>(&base->m_inlineStorage[cachedOffset]) + 4, resultTag);
- return;
- }
-
- size_t offset = cachedOffset * sizeof(JSValue);
-
- PropertyStorage* protoPropertyStorage = &base->m_externalStorage;
- loadPtr(static_cast<void*>(protoPropertyStorage), temp);
- load32(Address(temp, offset), resultPayload);
- load32(Address(temp, offset + 4), resultTag);
-}
-
-void JIT::testPrototype(Structure* structure, JumpList& failureCases)
-{
- if (structure->m_prototype.isNull())
- return;
-
- failureCases.append(branchPtr(NotEqual, AbsoluteAddress(&asCell(structure->m_prototype)->m_structure), ImmPtr(asCell(structure->m_prototype)->m_structure)));
-}
-
-void JIT::privateCompilePutByIdTransition(StructureStubInfo* stubInfo, Structure* oldStructure, Structure* newStructure, size_t cachedOffset, StructureChain* chain, ReturnAddressPtr returnAddress)
-{
- // It is assumed that regT0 contains the basePayload and regT1 contains the baseTag. The value can be found on the stack.
-
- JumpList failureCases;
- failureCases.append(branch32(NotEqual, regT1, Imm32(JSValue::CellTag)));
- failureCases.append(branchPtr(NotEqual, Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), ImmPtr(oldStructure)));
- testPrototype(oldStructure, failureCases);
-
- // Verify that nothing in the prototype chain has a setter for this property.
- for (RefPtr<Structure>* it = chain->head(); *it; ++it)
- testPrototype(it->get(), failureCases);
-
- // Reallocate property storage if needed.
- Call callTarget;
- bool willNeedStorageRealloc = oldStructure->propertyStorageCapacity() != newStructure->propertyStorageCapacity();
- if (willNeedStorageRealloc) {
- // This trampoline was called to like a JIT stub; before we can can call again we need to
- // remove the return address from the stack, to prevent the stack from becoming misaligned.
- preserveReturnAddressAfterCall(regT3);
-
- JITStubCall stubCall(this, cti_op_put_by_id_transition_realloc);
- stubCall.skipArgument(); // base
- stubCall.skipArgument(); // ident
- stubCall.skipArgument(); // value
- stubCall.addArgument(Imm32(oldStructure->propertyStorageCapacity()));
- stubCall.addArgument(Imm32(newStructure->propertyStorageCapacity()));
- stubCall.call(regT0);
-
- restoreReturnAddressBeforeReturn(regT3);
- }
-
- sub32(Imm32(1), AbsoluteAddress(oldStructure->addressOfCount()));
- add32(Imm32(1), AbsoluteAddress(newStructure->addressOfCount()));
- storePtr(ImmPtr(newStructure), Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)));
-
- load32(Address(stackPointerRegister, offsetof(struct JITStackFrame, args[2]) + sizeof(void*)), regT3);
- load32(Address(stackPointerRegister, offsetof(struct JITStackFrame, args[2]) + sizeof(void*) + 4), regT2);
-
- // Write the value
- compilePutDirectOffset(regT0, regT2, regT3, newStructure, cachedOffset);
-
- ret();
-
- ASSERT(!failureCases.empty());
- failureCases.link(this);
- restoreArgumentReferenceForTrampoline();
- Call failureCall = tailRecursiveCall();
-
- LinkBuffer patchBuffer(this, m_codeBlock->executablePool());
-
- patchBuffer.link(failureCall, FunctionPtr(cti_op_put_by_id_fail));
-
- if (willNeedStorageRealloc) {
- ASSERT(m_calls.size() == 1);
- patchBuffer.link(m_calls[0].from, FunctionPtr(cti_op_put_by_id_transition_realloc));
- }
-
- CodeLocationLabel entryLabel = patchBuffer.finalizeCodeAddendum();
- stubInfo->stubRoutine = entryLabel;
- RepatchBuffer repatchBuffer(m_codeBlock);
- repatchBuffer.relinkCallerToTrampoline(returnAddress, entryLabel);
-}
-
-void JIT::patchGetByIdSelf(CodeBlock* codeBlock, StructureStubInfo* stubInfo, Structure* structure, size_t cachedOffset, ReturnAddressPtr returnAddress)
-{
- RepatchBuffer repatchBuffer(codeBlock);
-
- // We don't want to patch more than once - in future go to cti_op_get_by_id_generic.
- // Should probably go to JITStubs::cti_op_get_by_id_fail, but that doesn't do anything interesting right now.
- repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_get_by_id_self_fail));
-
- int offset = sizeof(JSValue) * cachedOffset;
-
- // If we're patching to use inline storage, convert the initial load to a lea; this avoids the extra load
- // and makes the subsequent load's offset automatically correct
- if (structure->isUsingInlineStorage())
- repatchBuffer.repatchLoadPtrToLEA(stubInfo->hotPathBegin.instructionAtOffset(patchOffsetGetByIdExternalLoad));
-
- // Patch the offset into the propoerty map to load from, then patch the Structure to look for.
- repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabelPtrAtOffset(patchOffsetGetByIdStructure), structure);
- repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabel32AtOffset(patchOffsetGetByIdPropertyMapOffset1), offset); // payload
- repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabel32AtOffset(patchOffsetGetByIdPropertyMapOffset2), offset + 4); // tag
-}
-
-void JIT::patchMethodCallProto(CodeBlock* codeBlock, MethodCallLinkInfo& methodCallLinkInfo, JSFunction* callee, Structure* structure, JSObject* proto, ReturnAddressPtr returnAddress)
-{
- RepatchBuffer repatchBuffer(codeBlock);
-
- ASSERT(!methodCallLinkInfo.cachedStructure);
- methodCallLinkInfo.cachedStructure = structure;
- structure->ref();
-
- Structure* prototypeStructure = proto->structure();
- ASSERT(!methodCallLinkInfo.cachedPrototypeStructure);
- methodCallLinkInfo.cachedPrototypeStructure = prototypeStructure;
- prototypeStructure->ref();
-
- repatchBuffer.repatch(methodCallLinkInfo.structureLabel, structure);
- repatchBuffer.repatch(methodCallLinkInfo.structureLabel.dataLabelPtrAtOffset(patchOffsetMethodCheckProtoObj), proto);
- repatchBuffer.repatch(methodCallLinkInfo.structureLabel.dataLabelPtrAtOffset(patchOffsetMethodCheckProtoStruct), prototypeStructure);
- repatchBuffer.repatch(methodCallLinkInfo.structureLabel.dataLabelPtrAtOffset(patchOffsetMethodCheckPutFunction), callee);
-
- repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_get_by_id));
-}
-
-void JIT::patchPutByIdReplace(CodeBlock* codeBlock, StructureStubInfo* stubInfo, Structure* structure, size_t cachedOffset, ReturnAddressPtr returnAddress)
-{
- RepatchBuffer repatchBuffer(codeBlock);
-
- // We don't want to patch more than once - in future go to cti_op_put_by_id_generic.
- // Should probably go to cti_op_put_by_id_fail, but that doesn't do anything interesting right now.
- repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_put_by_id_generic));
-
- int offset = sizeof(JSValue) * cachedOffset;
-
- // If we're patching to use inline storage, convert the initial load to a lea; this avoids the extra load
- // and makes the subsequent load's offset automatically correct
- if (structure->isUsingInlineStorage())
- repatchBuffer.repatchLoadPtrToLEA(stubInfo->hotPathBegin.instructionAtOffset(patchOffsetPutByIdExternalLoad));
-
- // Patch the offset into the propoerty map to load from, then patch the Structure to look for.
- repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabelPtrAtOffset(patchOffsetPutByIdStructure), structure);
- repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabel32AtOffset(patchOffsetPutByIdPropertyMapOffset1), offset); // payload
- repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabel32AtOffset(patchOffsetPutByIdPropertyMapOffset2), offset + 4); // tag
-}
-
-void JIT::privateCompilePatchGetArrayLength(ReturnAddressPtr returnAddress)
-{
- StructureStubInfo* stubInfo = &m_codeBlock->getStubInfo(returnAddress);
-
- // regT0 holds a JSCell*
-
- // Check for array
- Jump failureCases1 = branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsArrayVPtr));
-
- // Checks out okay! - get the length from the storage
- loadPtr(Address(regT0, OBJECT_OFFSETOF(JSArray, m_storage)), regT2);
- load32(Address(regT2, OBJECT_OFFSETOF(ArrayStorage, m_length)), regT2);
-
- Jump failureCases2 = branch32(Above, regT2, Imm32(INT_MAX));
- move(regT2, regT0);
- move(Imm32(JSValue::Int32Tag), regT1);
- Jump success = jump();
-
- LinkBuffer patchBuffer(this, m_codeBlock->executablePool());
-
- // Use the patch information to link the failure cases back to the original slow case routine.
- CodeLocationLabel slowCaseBegin = stubInfo->callReturnLocation.labelAtOffset(-patchOffsetGetByIdSlowCaseCall);
- patchBuffer.link(failureCases1, slowCaseBegin);
- patchBuffer.link(failureCases2, slowCaseBegin);
-
- // On success return back to the hot patch code, at a point it will perform the store to dest for us.
- patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult));
-
- // Track the stub we have created so that it will be deleted later.
- CodeLocationLabel entryLabel = patchBuffer.finalizeCodeAddendum();
- stubInfo->stubRoutine = entryLabel;
-
- // Finally patch the jump to slow case back in the hot path to jump here instead.
- CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase);
- RepatchBuffer repatchBuffer(m_codeBlock);
- repatchBuffer.relink(jumpLocation, entryLabel);
-
- // We don't want to patch more than once - in future go to cti_op_put_by_id_generic.
- repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_get_by_id_array_fail));
-}
-
-void JIT::privateCompileGetByIdProto(StructureStubInfo* stubInfo, Structure* structure, Structure* prototypeStructure, size_t cachedOffset, ReturnAddressPtr returnAddress, CallFrame* callFrame)
-{
- // regT0 holds a JSCell*
-
- // The prototype object definitely exists (if this stub exists the CodeBlock is referencing a Structure that is
- // referencing the prototype object - let's speculatively load it's table nice and early!)
- JSObject* protoObject = asObject(structure->prototypeForLookup(callFrame));
-
- Jump failureCases1 = checkStructure(regT0, structure);
-
- // Check the prototype object's Structure had not changed.
- Structure** prototypeStructureAddress = &(protoObject->m_structure);
-#if CPU(X86_64)
- move(ImmPtr(prototypeStructure), regT3);
- Jump failureCases2 = branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), regT3);
-#else
- Jump failureCases2 = branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), ImmPtr(prototypeStructure));
-#endif
-
- // Checks out okay! - getDirectOffset
- compileGetDirectOffset(protoObject, regT2, regT1, regT0, cachedOffset);
-
- Jump success = jump();
-
- LinkBuffer patchBuffer(this, m_codeBlock->executablePool());
-
- // Use the patch information to link the failure cases back to the original slow case routine.
- CodeLocationLabel slowCaseBegin = stubInfo->callReturnLocation.labelAtOffset(-patchOffsetGetByIdSlowCaseCall);
- patchBuffer.link(failureCases1, slowCaseBegin);
- patchBuffer.link(failureCases2, slowCaseBegin);
-
- // On success return back to the hot patch code, at a point it will perform the store to dest for us.
- patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult));
-
- // Track the stub we have created so that it will be deleted later.
- CodeLocationLabel entryLabel = patchBuffer.finalizeCodeAddendum();
- stubInfo->stubRoutine = entryLabel;
-
- // Finally patch the jump to slow case back in the hot path to jump here instead.
- CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase);
- RepatchBuffer repatchBuffer(m_codeBlock);
- repatchBuffer.relink(jumpLocation, entryLabel);
-
- // We don't want to patch more than once - in future go to cti_op_put_by_id_generic.
- repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_get_by_id_proto_list));
-}
-
-
-void JIT::privateCompileGetByIdSelfList(StructureStubInfo* stubInfo, PolymorphicAccessStructureList* polymorphicStructures, int currentIndex, Structure* structure, size_t cachedOffset)
-{
- // regT0 holds a JSCell*
-
- Jump failureCase = checkStructure(regT0, structure);
- compileGetDirectOffset(regT0, regT1, regT0, structure, cachedOffset);
- Jump success = jump();
-
- LinkBuffer patchBuffer(this, m_codeBlock->executablePool());
-
- // Use the patch information to link the failure cases back to the original slow case routine.
- CodeLocationLabel lastProtoBegin = polymorphicStructures->list[currentIndex - 1].stubRoutine;
- if (!lastProtoBegin)
- lastProtoBegin = stubInfo->callReturnLocation.labelAtOffset(-patchOffsetGetByIdSlowCaseCall);
-
- patchBuffer.link(failureCase, lastProtoBegin);
-
- // On success return back to the hot patch code, at a point it will perform the store to dest for us.
- patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult));
-
- CodeLocationLabel entryLabel = patchBuffer.finalizeCodeAddendum();
-
- structure->ref();
- polymorphicStructures->list[currentIndex].set(entryLabel, structure);
-
- // Finally patch the jump to slow case back in the hot path to jump here instead.
- CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase);
- RepatchBuffer repatchBuffer(m_codeBlock);
- repatchBuffer.relink(jumpLocation, entryLabel);
-}
-
-void JIT::privateCompileGetByIdProtoList(StructureStubInfo* stubInfo, PolymorphicAccessStructureList* prototypeStructures, int currentIndex, Structure* structure, Structure* prototypeStructure, size_t cachedOffset, CallFrame* callFrame)
-{
- // regT0 holds a JSCell*
-
- // The prototype object definitely exists (if this stub exists the CodeBlock is referencing a Structure that is
- // referencing the prototype object - let's speculatively load it's table nice and early!)
- JSObject* protoObject = asObject(structure->prototypeForLookup(callFrame));
-
- // Check eax is an object of the right Structure.
- Jump failureCases1 = checkStructure(regT0, structure);
-
- // Check the prototype object's Structure had not changed.
- Structure** prototypeStructureAddress = &(protoObject->m_structure);
-#if CPU(X86_64)
- move(ImmPtr(prototypeStructure), regT3);
- Jump failureCases2 = branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), regT3);
-#else
- Jump failureCases2 = branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), ImmPtr(prototypeStructure));
-#endif
-
- compileGetDirectOffset(protoObject, regT2, regT1, regT0, cachedOffset);
-
- Jump success = jump();
-
- LinkBuffer patchBuffer(this, m_codeBlock->executablePool());
-
- // Use the patch information to link the failure cases back to the original slow case routine.
- CodeLocationLabel lastProtoBegin = prototypeStructures->list[currentIndex - 1].stubRoutine;
- patchBuffer.link(failureCases1, lastProtoBegin);
- patchBuffer.link(failureCases2, lastProtoBegin);
-
- // On success return back to the hot patch code, at a point it will perform the store to dest for us.
- patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult));
-
- CodeLocationLabel entryLabel = patchBuffer.finalizeCodeAddendum();
-
- structure->ref();
- prototypeStructure->ref();
- prototypeStructures->list[currentIndex].set(entryLabel, structure, prototypeStructure);
-
- // Finally patch the jump to slow case back in the hot path to jump here instead.
- CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase);
- RepatchBuffer repatchBuffer(m_codeBlock);
- repatchBuffer.relink(jumpLocation, entryLabel);
-}
-
-void JIT::privateCompileGetByIdChainList(StructureStubInfo* stubInfo, PolymorphicAccessStructureList* prototypeStructures, int currentIndex, Structure* structure, StructureChain* chain, size_t count, size_t cachedOffset, CallFrame* callFrame)
+JIT::CodePtr JIT::stringGetByValStubGenerator(JSGlobalData* globalData, ExecutablePool* pool)
{
- // regT0 holds a JSCell*
-
- ASSERT(count);
-
- JumpList bucketsOfFail;
-
- // Check eax is an object of the right Structure.
- bucketsOfFail.append(checkStructure(regT0, structure));
-
- Structure* currStructure = structure;
- RefPtr<Structure>* chainEntries = chain->head();
- JSObject* protoObject = 0;
- for (unsigned i = 0; i < count; ++i) {
- protoObject = asObject(currStructure->prototypeForLookup(callFrame));
- currStructure = chainEntries[i].get();
-
- // Check the prototype object's Structure had not changed.
- Structure** prototypeStructureAddress = &(protoObject->m_structure);
-#if CPU(X86_64)
- move(ImmPtr(currStructure), regT3);
- bucketsOfFail.append(branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), regT3));
-#else
- bucketsOfFail.append(branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), ImmPtr(currStructure)));
-#endif
- }
- ASSERT(protoObject);
-
- compileGetDirectOffset(protoObject, regT2, regT1, regT0, cachedOffset);
- Jump success = jump();
-
- LinkBuffer patchBuffer(this, m_codeBlock->executablePool());
-
- // Use the patch information to link the failure cases back to the original slow case routine.
- CodeLocationLabel lastProtoBegin = prototypeStructures->list[currentIndex - 1].stubRoutine;
-
- patchBuffer.link(bucketsOfFail, lastProtoBegin);
+ JSInterfaceJIT jit;
+ JumpList failures;
+ failures.append(jit.branchPtr(NotEqual, Address(regT0), ImmPtr(globalData->jsStringVPtr)));
+ failures.append(jit.branchTest32(NonZero, Address(regT0, OBJECT_OFFSETOF(JSString, m_fiberCount))));
- // On success return back to the hot patch code, at a point it will perform the store to dest for us.
- patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult));
-
- CodeLocationLabel entryLabel = patchBuffer.finalizeCodeAddendum();
-
- // Track the stub we have created so that it will be deleted later.
- structure->ref();
- chain->ref();
- prototypeStructures->list[currentIndex].set(entryLabel, structure, chain);
-
- // Finally patch the jump to slow case back in the hot path to jump here instead.
- CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase);
- RepatchBuffer repatchBuffer(m_codeBlock);
- repatchBuffer.relink(jumpLocation, entryLabel);
-}
-
-void JIT::privateCompileGetByIdChain(StructureStubInfo* stubInfo, Structure* structure, StructureChain* chain, size_t count, size_t cachedOffset, ReturnAddressPtr returnAddress, CallFrame* callFrame)
-{
- // regT0 holds a JSCell*
+ // Load string length to regT1, and start the process of loading the data pointer into regT0
+ jit.load32(Address(regT0, ThunkHelpers::jsStringLengthOffset()), regT2);
+ jit.loadPtr(Address(regT0, ThunkHelpers::jsStringValueOffset()), regT0);
+ jit.loadPtr(Address(regT0, ThunkHelpers::stringImplDataOffset()), regT0);
- ASSERT(count);
+ // Do an unsigned compare to simultaneously filter negative indices as well as indices that are too large
+ failures.append(jit.branch32(AboveOrEqual, regT1, regT2));
- JumpList bucketsOfFail;
-
- // Check eax is an object of the right Structure.
- bucketsOfFail.append(checkStructure(regT0, structure));
-
- Structure* currStructure = structure;
- RefPtr<Structure>* chainEntries = chain->head();
- JSObject* protoObject = 0;
- for (unsigned i = 0; i < count; ++i) {
- protoObject = asObject(currStructure->prototypeForLookup(callFrame));
- currStructure = chainEntries[i].get();
-
- // Check the prototype object's Structure had not changed.
- Structure** prototypeStructureAddress = &(protoObject->m_structure);
-#if CPU(X86_64)
- move(ImmPtr(currStructure), regT3);
- bucketsOfFail.append(branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), regT3));
-#else
- bucketsOfFail.append(branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), ImmPtr(currStructure)));
-#endif
- }
- ASSERT(protoObject);
-
- compileGetDirectOffset(protoObject, regT2, regT1, regT0, cachedOffset);
- Jump success = jump();
-
- LinkBuffer patchBuffer(this, m_codeBlock->executablePool());
-
- // Use the patch information to link the failure cases back to the original slow case routine.
- patchBuffer.link(bucketsOfFail, stubInfo->callReturnLocation.labelAtOffset(-patchOffsetGetByIdSlowCaseCall));
-
- // On success return back to the hot patch code, at a point it will perform the store to dest for us.
- patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult));
-
- // Track the stub we have created so that it will be deleted later.
- CodeLocationLabel entryLabel = patchBuffer.finalizeCodeAddendum();
- stubInfo->stubRoutine = entryLabel;
-
- // Finally patch the jump to slow case back in the hot path to jump here instead.
- CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase);
- RepatchBuffer repatchBuffer(m_codeBlock);
- repatchBuffer.relink(jumpLocation, entryLabel);
-
- // We don't want to patch more than once - in future go to cti_op_put_by_id_generic.
- repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_get_by_id_proto_list));
-}
-
-/* ------------------------------ END: !ENABLE / ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS) ------------------------------ */
-
-#endif // !ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
-
-void JIT::compileGetDirectOffset(RegisterID base, RegisterID resultTag, RegisterID resultPayload, RegisterID structure, RegisterID offset)
-{
- ASSERT(sizeof(((Structure*)0)->m_propertyStorageCapacity) == sizeof(int32_t));
- ASSERT(sizeof(JSObject::inlineStorageCapacity) == sizeof(int32_t));
- ASSERT(sizeof(JSValue) == 8);
-
- Jump notUsingInlineStorage = branch32(NotEqual, Address(structure, OBJECT_OFFSETOF(Structure, m_propertyStorageCapacity)), Imm32(JSObject::inlineStorageCapacity));
- loadPtr(BaseIndex(base, offset, TimesEight, OBJECT_OFFSETOF(JSObject, m_inlineStorage)+OBJECT_OFFSETOF(JSValue, u.asBits.payload)), resultPayload);
- loadPtr(BaseIndex(base, offset, TimesEight, OBJECT_OFFSETOF(JSObject, m_inlineStorage)+OBJECT_OFFSETOF(JSValue, u.asBits.tag)), resultTag);
- Jump finishedLoad = jump();
- notUsingInlineStorage.link(this);
- loadPtr(Address(base, OBJECT_OFFSETOF(JSObject, m_externalStorage)), base);
- loadPtr(BaseIndex(base, offset, TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.payload)), resultPayload);
- loadPtr(BaseIndex(base, offset, TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.tag)), resultTag);
- finishedLoad.link(this);
-}
-
-void JIT::emit_op_get_by_pname(Instruction* currentInstruction)
-{
- unsigned dst = currentInstruction[1].u.operand;
- unsigned base = currentInstruction[2].u.operand;
- unsigned property = currentInstruction[3].u.operand;
- unsigned expected = currentInstruction[4].u.operand;
- unsigned iter = currentInstruction[5].u.operand;
- unsigned i = currentInstruction[6].u.operand;
-
- emitLoad2(property, regT1, regT0, base, regT3, regT2);
- emitJumpSlowCaseIfNotJSCell(property, regT1);
- addSlowCase(branchPtr(NotEqual, regT0, payloadFor(expected)));
- // Property registers are now available as the property is known
- emitJumpSlowCaseIfNotJSCell(base, regT3);
- emitLoadPayload(iter, regT1);
+ // Load the character
+ jit.load16(BaseIndex(regT0, regT1, TimesTwo, 0), regT0);
- // Test base's structure
- loadPtr(Address(regT2, OBJECT_OFFSETOF(JSCell, m_structure)), regT0);
- addSlowCase(branchPtr(NotEqual, regT0, Address(regT1, OBJECT_OFFSETOF(JSPropertyNameIterator, m_cachedStructure))));
- load32(addressFor(i), regT3);
- sub32(Imm32(1), regT3);
- addSlowCase(branch32(AboveOrEqual, regT3, Address(regT1, OBJECT_OFFSETOF(JSPropertyNameIterator, m_numCacheableSlots))));
- compileGetDirectOffset(regT2, regT1, regT0, regT0, regT3);
-
- emitStore(dst, regT1, regT0);
- map(m_bytecodeIndex + OPCODE_LENGTH(op_get_by_pname), dst, regT1, regT0);
-}
-
-void JIT::emitSlow_op_get_by_pname(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- unsigned dst = currentInstruction[1].u.operand;
- unsigned base = currentInstruction[2].u.operand;
- unsigned property = currentInstruction[3].u.operand;
+ failures.append(jit.branch32(AboveOrEqual, regT0, Imm32(0x100)));
+ jit.move(ImmPtr(globalData->smallStrings.singleCharacterStrings()), regT1);
+ jit.loadPtr(BaseIndex(regT1, regT0, ScalePtr, 0), regT0);
+ jit.ret();
- linkSlowCaseIfNotJSCell(iter, property);
- linkSlowCase(iter);
- linkSlowCaseIfNotJSCell(iter, base);
- linkSlowCase(iter);
- linkSlowCase(iter);
+ failures.link(&jit);
+ jit.move(Imm32(0), regT0);
+ jit.ret();
- JITStubCall stubCall(this, cti_op_get_by_val);
- stubCall.addArgument(base);
- stubCall.addArgument(property);
- stubCall.call(dst);
+ LinkBuffer patchBuffer(&jit, pool, 0);
+ return patchBuffer.finalizeCode().m_code;
}
-#else // USE(JSVALUE32_64)
-
void JIT::emit_op_get_by_val(Instruction* currentInstruction)
{
unsigned dst = currentInstruction[1].u.operand;
@@ -1028,7 +89,7 @@ void JIT::emit_op_get_by_val(Instruction* currentInstruction)
emitGetVirtualRegisters(base, regT0, property, regT1);
emitJumpSlowCaseIfNotImmediateInteger(regT1);
-#if USE(JSVALUE64)
+
// This is technically incorrect - we're zero-extending an int32. On the hot path this doesn't matter.
// We check the value as if it was a uint32 against the m_vectorLength - which will always fail if
// number was signed since m_vectorLength is always less than intmax (since the total allocation
@@ -1036,9 +97,7 @@ void JIT::emit_op_get_by_val(Instruction* currentInstruction)
// to 64-bits is necessary since it's used in the address calculation. We zero extend rather than sign
// extending since it makes it easier to re-tag the value in the slow case.
zeroExtend32ToPtr(regT1, regT1);
-#else
- emitFastArithImmToInt(regT1);
-#endif
+
emitJumpSlowCaseIfNotJSCell(regT0, base);
addSlowCase(branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsArrayVPtr)));
@@ -1051,6 +110,34 @@ void JIT::emit_op_get_by_val(Instruction* currentInstruction)
emitPutVirtualRegister(dst);
}
+void JIT::emitSlow_op_get_by_val(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned base = currentInstruction[2].u.operand;
+ unsigned property = currentInstruction[3].u.operand;
+
+ linkSlowCase(iter); // property int32 check
+ linkSlowCaseIfNotJSCell(iter, base); // base cell check
+ Jump nonCell = jump();
+ linkSlowCase(iter); // base array check
+ Jump notString = branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsStringVPtr));
+ emitNakedCall(m_globalData->getCTIStub(stringGetByValStubGenerator));
+ Jump failed = branchTestPtr(Zero, regT0);
+ emitPutVirtualRegister(dst, regT0);
+ emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_get_by_val));
+ failed.link(this);
+ notString.link(this);
+ nonCell.link(this);
+
+ linkSlowCase(iter); // vector length check
+ linkSlowCase(iter); // empty value
+
+ JITStubCall stubCall(this, cti_op_get_by_val);
+ stubCall.addArgument(base, regT2);
+ stubCall.addArgument(property, regT2);
+ stubCall.call(dst);
+}
+
void JIT::compileGetDirectOffset(RegisterID base, RegisterID result, RegisterID structure, RegisterID offset, RegisterID scratch)
{
ASSERT(sizeof(((Structure*)0)->m_propertyStorageCapacity) == sizeof(int32_t));
@@ -1115,18 +202,13 @@ void JIT::emit_op_put_by_val(Instruction* currentInstruction)
emitGetVirtualRegisters(base, regT0, property, regT1);
emitJumpSlowCaseIfNotImmediateInteger(regT1);
-#if USE(JSVALUE64)
// See comment in op_get_by_val.
zeroExtend32ToPtr(regT1, regT1);
-#else
- emitFastArithImmToInt(regT1);
-#endif
emitJumpSlowCaseIfNotJSCell(regT0, base);
addSlowCase(branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsArrayVPtr)));
addSlowCase(branch32(AboveOrEqual, regT1, Address(regT0, OBJECT_OFFSETOF(JSArray, m_vectorLength))));
loadPtr(Address(regT0, OBJECT_OFFSETOF(JSArray, m_storage)), regT2);
-
Jump empty = branchTestPtr(Zero, BaseIndex(regT2, regT1, ScalePtr, OBJECT_OFFSETOF(ArrayStorage, m_vector[0])));
Label storeResult(this);
@@ -1218,10 +300,11 @@ void JIT::emit_op_put_by_id(Instruction* currentInstruction)
unsigned baseVReg = currentInstruction[1].u.operand;
Identifier* ident = &(m_codeBlock->identifier(currentInstruction[2].u.operand));
unsigned valueVReg = currentInstruction[3].u.operand;
+ unsigned direct = currentInstruction[8].u.operand;
emitGetVirtualRegisters(baseVReg, regT0, valueVReg, regT1);
- JITStubCall stubCall(this, cti_op_put_by_id_generic);
+ JITStubCall stubCall(this, direct ? cti_op_put_by_id_direct_generic, cti_op_put_by_id_generic);
stubCall.addArgument(regT0);
stubCall.addArgument(ImmPtr(ident));
stubCall.addArgument(regT1);
@@ -1289,7 +372,7 @@ void JIT::emit_op_method_check(Instruction* currentInstruction)
emitPutVirtualRegister(resultVReg);
// We've already generated the following get_by_id, so make sure it's skipped over.
- m_bytecodeIndex += OPCODE_LENGTH(op_get_by_id);
+ m_bytecodeOffset += OPCODE_LENGTH(op_get_by_id);
}
void JIT::emitSlow_op_method_check(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
@@ -1302,7 +385,7 @@ void JIT::emitSlow_op_method_check(Instruction* currentInstruction, Vector<SlowC
compileGetByIdSlowCase(resultVReg, baseVReg, ident, iter, true);
// We've already generated the following get_by_id, so make sure it's skipped over.
- m_bytecodeIndex += OPCODE_LENGTH(op_get_by_id);
+ m_bytecodeOffset += OPCODE_LENGTH(op_get_by_id);
}
#else //!ENABLE(JIT_OPTIMIZE_METHOD_CALLS)
@@ -1441,13 +524,14 @@ void JIT::emitSlow_op_put_by_id(Instruction* currentInstruction, Vector<SlowCase
{
unsigned baseVReg = currentInstruction[1].u.operand;
Identifier* ident = &(m_codeBlock->identifier(currentInstruction[2].u.operand));
+ unsigned direct = currentInstruction[8].u.operand;
unsigned propertyAccessInstructionIndex = m_propertyAccessInstructionIndex++;
linkSlowCaseIfNotJSCell(iter, baseVReg);
linkSlowCase(iter);
- JITStubCall stubCall(this, cti_op_put_by_id);
+ JITStubCall stubCall(this, direct ? cti_op_put_by_id_direct : cti_op_put_by_id);
stubCall.addArgument(regT0);
stubCall.addArgument(ImmPtr(ident));
stubCall.addArgument(regT1);
@@ -1491,27 +575,36 @@ void JIT::compileGetDirectOffset(JSObject* base, RegisterID temp, RegisterID res
}
}
-void JIT::testPrototype(Structure* structure, JumpList& failureCases)
+void JIT::testPrototype(JSValue prototype, JumpList& failureCases)
{
- if (structure->m_prototype.isNull())
+ if (prototype.isNull())
return;
- move(ImmPtr(&asCell(structure->m_prototype)->m_structure), regT2);
- move(ImmPtr(asCell(structure->m_prototype)->m_structure), regT3);
- failureCases.append(branchPtr(NotEqual, Address(regT2), regT3));
+ // We have a special case for X86_64 here because X86 instructions that take immediate values
+ // only take 32 bit immediate values, wheras the pointer constants we are using here are 64 bit
+ // values. In the non X86_64 case, the generated code is slightly more efficient because it uses
+ // two less instructions and doesn't require any scratch registers.
+#if CPU(X86_64)
+ move(ImmPtr(prototype.asCell()->structure()), regT3);
+ failureCases.append(branchPtr(NotEqual, AbsoluteAddress(&prototype.asCell()->m_structure), regT3));
+#else
+ failureCases.append(branchPtr(NotEqual, AbsoluteAddress(&prototype.asCell()->m_structure), ImmPtr(prototype.asCell()->structure())));
+#endif
}
-void JIT::privateCompilePutByIdTransition(StructureStubInfo* stubInfo, Structure* oldStructure, Structure* newStructure, size_t cachedOffset, StructureChain* chain, ReturnAddressPtr returnAddress)
+void JIT::privateCompilePutByIdTransition(StructureStubInfo* stubInfo, Structure* oldStructure, Structure* newStructure, size_t cachedOffset, StructureChain* chain, ReturnAddressPtr returnAddress, bool direct)
{
JumpList failureCases;
// Check eax is an object of the right Structure.
failureCases.append(emitJumpIfNotJSCell(regT0));
failureCases.append(branchPtr(NotEqual, Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), ImmPtr(oldStructure)));
- testPrototype(oldStructure, failureCases);
+ testPrototype(oldStructure->storedPrototype(), failureCases);
// ecx = baseObject->m_structure
- for (RefPtr<Structure>* it = chain->head(); *it; ++it)
- testPrototype(it->get(), failureCases);
+ if (!direct) {
+ for (RefPtr<Structure>* it = chain->head(); *it; ++it)
+ testPrototype((*it)->storedPrototype(), failureCases);
+ }
Call callTarget;
@@ -1550,9 +643,9 @@ void JIT::privateCompilePutByIdTransition(StructureStubInfo* stubInfo, Structure
restoreArgumentReferenceForTrampoline();
Call failureCall = tailRecursiveCall();
- LinkBuffer patchBuffer(this, m_codeBlock->executablePool());
+ LinkBuffer patchBuffer(this, m_codeBlock->executablePool(), 0);
- patchBuffer.link(failureCall, FunctionPtr(cti_op_put_by_id_fail));
+ patchBuffer.link(failureCall, FunctionPtr(direct ? cti_op_put_by_id_direct_fail : cti_op_put_by_id_fail));
if (willNeedStorageRealloc) {
ASSERT(m_calls.size() == 1);
@@ -1594,7 +687,6 @@ void JIT::patchMethodCallProto(CodeBlock* codeBlock, MethodCallLinkInfo& methodC
structure->ref();
Structure* prototypeStructure = proto->structure();
- ASSERT(!methodCallLinkInfo.cachedPrototypeStructure);
methodCallLinkInfo.cachedPrototypeStructure = prototypeStructure;
prototypeStructure->ref();
@@ -1606,13 +698,13 @@ void JIT::patchMethodCallProto(CodeBlock* codeBlock, MethodCallLinkInfo& methodC
repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_get_by_id));
}
-void JIT::patchPutByIdReplace(CodeBlock* codeBlock, StructureStubInfo* stubInfo, Structure* structure, size_t cachedOffset, ReturnAddressPtr returnAddress)
+void JIT::patchPutByIdReplace(CodeBlock* codeBlock, StructureStubInfo* stubInfo, Structure* structure, size_t cachedOffset, ReturnAddressPtr returnAddress, bool direct)
{
RepatchBuffer repatchBuffer(codeBlock);
// We don't want to patch more than once - in future go to cti_op_put_by_id_generic.
// Should probably go to cti_op_put_by_id_fail, but that doesn't do anything interesting right now.
- repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_put_by_id_generic));
+ repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(direct ? cti_op_put_by_id_direct_generic : cti_op_put_by_id_generic));
int offset = sizeof(JSValue) * cachedOffset;
@@ -1634,15 +726,14 @@ void JIT::privateCompilePatchGetArrayLength(ReturnAddressPtr returnAddress)
Jump failureCases1 = branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsArrayVPtr));
// Checks out okay! - get the length from the storage
- loadPtr(Address(regT0, OBJECT_OFFSETOF(JSArray, m_storage)), regT2);
- load32(Address(regT2, OBJECT_OFFSETOF(ArrayStorage, m_length)), regT2);
-
+ loadPtr(Address(regT0, OBJECT_OFFSETOF(JSArray, m_storage)), regT3);
+ load32(Address(regT3, OBJECT_OFFSETOF(ArrayStorage, m_length)), regT2);
Jump failureCases2 = branch32(Above, regT2, Imm32(JSImmediate::maxImmediateInt));
emitFastArithIntToImmNoCheck(regT2, regT0);
Jump success = jump();
- LinkBuffer patchBuffer(this, m_codeBlock->executablePool());
+ LinkBuffer patchBuffer(this, m_codeBlock->executablePool(), 0);
// Use the patch information to link the failure cases back to the original slow case routine.
CodeLocationLabel slowCaseBegin = stubInfo->callReturnLocation.labelAtOffset(-patchOffsetGetByIdSlowCaseCall);
@@ -1665,7 +756,7 @@ void JIT::privateCompilePatchGetArrayLength(ReturnAddressPtr returnAddress)
repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_get_by_id_array_fail));
}
-void JIT::privateCompileGetByIdProto(StructureStubInfo* stubInfo, Structure* structure, Structure* prototypeStructure, size_t cachedOffset, ReturnAddressPtr returnAddress, CallFrame* callFrame)
+void JIT::privateCompileGetByIdProto(StructureStubInfo* stubInfo, Structure* structure, Structure* prototypeStructure, const Identifier& ident, const PropertySlot& slot, size_t cachedOffset, ReturnAddressPtr returnAddress, CallFrame* callFrame)
{
// The prototype object definitely exists (if this stub exists the CodeBlock is referencing a Structure that is
// referencing the prototype object - let's speculatively load it's table nice and early!)
@@ -1683,12 +774,29 @@ void JIT::privateCompileGetByIdProto(StructureStubInfo* stubInfo, Structure* str
Jump failureCases2 = branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), ImmPtr(prototypeStructure));
#endif
- // Checks out okay! - getDirectOffset
- compileGetDirectOffset(protoObject, regT1, regT0, cachedOffset);
-
+ bool needsStubLink = false;
+
+ // Checks out okay!
+ if (slot.cachedPropertyType() == PropertySlot::Getter) {
+ needsStubLink = true;
+ compileGetDirectOffset(protoObject, regT1, regT1, cachedOffset);
+ JITStubCall stubCall(this, cti_op_get_by_id_getter_stub);
+ stubCall.addArgument(regT1);
+ stubCall.addArgument(regT0);
+ stubCall.addArgument(ImmPtr(stubInfo->callReturnLocation.executableAddress()));
+ stubCall.call();
+ } else if (slot.cachedPropertyType() == PropertySlot::Custom) {
+ needsStubLink = true;
+ JITStubCall stubCall(this, cti_op_get_by_id_custom_stub);
+ stubCall.addArgument(ImmPtr(protoObject));
+ stubCall.addArgument(ImmPtr(FunctionPtr(slot.customGetter()).executableAddress()));
+ stubCall.addArgument(ImmPtr(const_cast<Identifier*>(&ident)));
+ stubCall.addArgument(ImmPtr(stubInfo->callReturnLocation.executableAddress()));
+ stubCall.call();
+ } else
+ compileGetDirectOffset(protoObject, regT1, regT0, cachedOffset);
Jump success = jump();
-
- LinkBuffer patchBuffer(this, m_codeBlock->executablePool());
+ LinkBuffer patchBuffer(this, m_codeBlock->executablePool(), 0);
// Use the patch information to link the failure cases back to the original slow case routine.
CodeLocationLabel slowCaseBegin = stubInfo->callReturnLocation.labelAtOffset(-patchOffsetGetByIdSlowCaseCall);
@@ -1698,6 +806,12 @@ void JIT::privateCompileGetByIdProto(StructureStubInfo* stubInfo, Structure* str
// On success return back to the hot patch code, at a point it will perform the store to dest for us.
patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult));
+ if (needsStubLink) {
+ for (Vector<CallRecord>::iterator iter = m_calls.begin(); iter != m_calls.end(); ++iter) {
+ if (iter->to)
+ patchBuffer.link(iter->from, FunctionPtr(iter->to));
+ }
+ }
// Track the stub we have created so that it will be deleted later.
CodeLocationLabel entryLabel = patchBuffer.finalizeCodeAddendum();
stubInfo->stubRoutine = entryLabel;
@@ -1711,13 +825,42 @@ void JIT::privateCompileGetByIdProto(StructureStubInfo* stubInfo, Structure* str
repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_get_by_id_proto_list));
}
-void JIT::privateCompileGetByIdSelfList(StructureStubInfo* stubInfo, PolymorphicAccessStructureList* polymorphicStructures, int currentIndex, Structure* structure, size_t cachedOffset)
+void JIT::privateCompileGetByIdSelfList(StructureStubInfo* stubInfo, PolymorphicAccessStructureList* polymorphicStructures, int currentIndex, Structure* structure, const Identifier& ident, const PropertySlot& slot, size_t cachedOffset)
{
Jump failureCase = checkStructure(regT0, structure);
- compileGetDirectOffset(regT0, regT0, structure, cachedOffset);
+ bool needsStubLink = false;
+ if (slot.cachedPropertyType() == PropertySlot::Getter) {
+ needsStubLink = true;
+ if (!structure->isUsingInlineStorage()) {
+ move(regT0, regT1);
+ compileGetDirectOffset(regT1, regT1, structure, cachedOffset);
+ } else
+ compileGetDirectOffset(regT0, regT1, structure, cachedOffset);
+ JITStubCall stubCall(this, cti_op_get_by_id_getter_stub);
+ stubCall.addArgument(regT1);
+ stubCall.addArgument(regT0);
+ stubCall.addArgument(ImmPtr(stubInfo->callReturnLocation.executableAddress()));
+ stubCall.call();
+ } else if (slot.cachedPropertyType() == PropertySlot::Custom) {
+ needsStubLink = true;
+ JITStubCall stubCall(this, cti_op_get_by_id_custom_stub);
+ stubCall.addArgument(regT0);
+ stubCall.addArgument(ImmPtr(FunctionPtr(slot.customGetter()).executableAddress()));
+ stubCall.addArgument(ImmPtr(const_cast<Identifier*>(&ident)));
+ stubCall.addArgument(ImmPtr(stubInfo->callReturnLocation.executableAddress()));
+ stubCall.call();
+ } else
+ compileGetDirectOffset(regT0, regT0, structure, cachedOffset);
Jump success = jump();
- LinkBuffer patchBuffer(this, m_codeBlock->executablePool());
+ LinkBuffer patchBuffer(this, m_codeBlock->executablePool(), 0);
+
+ if (needsStubLink) {
+ for (Vector<CallRecord>::iterator iter = m_calls.begin(); iter != m_calls.end(); ++iter) {
+ if (iter->to)
+ patchBuffer.link(iter->from, FunctionPtr(iter->to));
+ }
+ }
// Use the patch information to link the failure cases back to the original slow case routine.
CodeLocationLabel lastProtoBegin = polymorphicStructures->list[currentIndex - 1].stubRoutine;
@@ -1740,7 +883,7 @@ void JIT::privateCompileGetByIdSelfList(StructureStubInfo* stubInfo, Polymorphic
repatchBuffer.relink(jumpLocation, entryLabel);
}
-void JIT::privateCompileGetByIdProtoList(StructureStubInfo* stubInfo, PolymorphicAccessStructureList* prototypeStructures, int currentIndex, Structure* structure, Structure* prototypeStructure, size_t cachedOffset, CallFrame* callFrame)
+void JIT::privateCompileGetByIdProtoList(StructureStubInfo* stubInfo, PolymorphicAccessStructureList* prototypeStructures, int currentIndex, Structure* structure, Structure* prototypeStructure, const Identifier& ident, const PropertySlot& slot, size_t cachedOffset, CallFrame* callFrame)
{
// The prototype object definitely exists (if this stub exists the CodeBlock is referencing a Structure that is
// referencing the prototype object - let's speculatively load it's table nice and early!)
@@ -1758,12 +901,37 @@ void JIT::privateCompileGetByIdProtoList(StructureStubInfo* stubInfo, Polymorphi
Jump failureCases2 = branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), ImmPtr(prototypeStructure));
#endif
- // Checks out okay! - getDirectOffset
- compileGetDirectOffset(protoObject, regT1, regT0, cachedOffset);
+ // Checks out okay!
+ bool needsStubLink = false;
+ if (slot.cachedPropertyType() == PropertySlot::Getter) {
+ needsStubLink = true;
+ compileGetDirectOffset(protoObject, regT1, regT1, cachedOffset);
+ JITStubCall stubCall(this, cti_op_get_by_id_getter_stub);
+ stubCall.addArgument(regT1);
+ stubCall.addArgument(regT0);
+ stubCall.addArgument(ImmPtr(stubInfo->callReturnLocation.executableAddress()));
+ stubCall.call();
+ } else if (slot.cachedPropertyType() == PropertySlot::Custom) {
+ needsStubLink = true;
+ JITStubCall stubCall(this, cti_op_get_by_id_custom_stub);
+ stubCall.addArgument(ImmPtr(protoObject));
+ stubCall.addArgument(ImmPtr(FunctionPtr(slot.customGetter()).executableAddress()));
+ stubCall.addArgument(ImmPtr(const_cast<Identifier*>(&ident)));
+ stubCall.addArgument(ImmPtr(stubInfo->callReturnLocation.executableAddress()));
+ stubCall.call();
+ } else
+ compileGetDirectOffset(protoObject, regT1, regT0, cachedOffset);
Jump success = jump();
- LinkBuffer patchBuffer(this, m_codeBlock->executablePool());
+ LinkBuffer patchBuffer(this, m_codeBlock->executablePool(), 0);
+
+ if (needsStubLink) {
+ for (Vector<CallRecord>::iterator iter = m_calls.begin(); iter != m_calls.end(); ++iter) {
+ if (iter->to)
+ patchBuffer.link(iter->from, FunctionPtr(iter->to));
+ }
+ }
// Use the patch information to link the failure cases back to the original slow case routine.
CodeLocationLabel lastProtoBegin = prototypeStructures->list[currentIndex - 1].stubRoutine;
@@ -1785,10 +953,9 @@ void JIT::privateCompileGetByIdProtoList(StructureStubInfo* stubInfo, Polymorphi
repatchBuffer.relink(jumpLocation, entryLabel);
}
-void JIT::privateCompileGetByIdChainList(StructureStubInfo* stubInfo, PolymorphicAccessStructureList* prototypeStructures, int currentIndex, Structure* structure, StructureChain* chain, size_t count, size_t cachedOffset, CallFrame* callFrame)
+void JIT::privateCompileGetByIdChainList(StructureStubInfo* stubInfo, PolymorphicAccessStructureList* prototypeStructures, int currentIndex, Structure* structure, StructureChain* chain, size_t count, const Identifier& ident, const PropertySlot& slot, size_t cachedOffset, CallFrame* callFrame)
{
ASSERT(count);
-
JumpList bucketsOfFail;
// Check eax is an object of the right Structure.
@@ -1796,27 +963,44 @@ void JIT::privateCompileGetByIdChainList(StructureStubInfo* stubInfo, Polymorphi
bucketsOfFail.append(baseObjectCheck);
Structure* currStructure = structure;
- RefPtr<Structure>* chainEntries = chain->head();
+ RefPtr<Structure>* it = chain->head();
JSObject* protoObject = 0;
- for (unsigned i = 0; i < count; ++i) {
+ for (unsigned i = 0; i < count; ++i, ++it) {
protoObject = asObject(currStructure->prototypeForLookup(callFrame));
- currStructure = chainEntries[i].get();
-
- // Check the prototype object's Structure had not changed.
- Structure** prototypeStructureAddress = &(protoObject->m_structure);
-#if CPU(X86_64)
- move(ImmPtr(currStructure), regT3);
- bucketsOfFail.append(branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), regT3));
-#else
- bucketsOfFail.append(branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), ImmPtr(currStructure)));
-#endif
+ currStructure = it->get();
+ testPrototype(protoObject, bucketsOfFail);
}
ASSERT(protoObject);
-
- compileGetDirectOffset(protoObject, regT1, regT0, cachedOffset);
+
+ bool needsStubLink = false;
+ if (slot.cachedPropertyType() == PropertySlot::Getter) {
+ needsStubLink = true;
+ compileGetDirectOffset(protoObject, regT1, regT1, cachedOffset);
+ JITStubCall stubCall(this, cti_op_get_by_id_getter_stub);
+ stubCall.addArgument(regT1);
+ stubCall.addArgument(regT0);
+ stubCall.addArgument(ImmPtr(stubInfo->callReturnLocation.executableAddress()));
+ stubCall.call();
+ } else if (slot.cachedPropertyType() == PropertySlot::Custom) {
+ needsStubLink = true;
+ JITStubCall stubCall(this, cti_op_get_by_id_custom_stub);
+ stubCall.addArgument(ImmPtr(protoObject));
+ stubCall.addArgument(ImmPtr(FunctionPtr(slot.customGetter()).executableAddress()));
+ stubCall.addArgument(ImmPtr(const_cast<Identifier*>(&ident)));
+ stubCall.addArgument(ImmPtr(stubInfo->callReturnLocation.executableAddress()));
+ stubCall.call();
+ } else
+ compileGetDirectOffset(protoObject, regT1, regT0, cachedOffset);
Jump success = jump();
- LinkBuffer patchBuffer(this, m_codeBlock->executablePool());
+ LinkBuffer patchBuffer(this, m_codeBlock->executablePool(), 0);
+
+ if (needsStubLink) {
+ for (Vector<CallRecord>::iterator iter = m_calls.begin(); iter != m_calls.end(); ++iter) {
+ if (iter->to)
+ patchBuffer.link(iter->from, FunctionPtr(iter->to));
+ }
+ }
// Use the patch information to link the failure cases back to the original slow case routine.
CodeLocationLabel lastProtoBegin = prototypeStructures->list[currentIndex - 1].stubRoutine;
@@ -1839,37 +1023,54 @@ void JIT::privateCompileGetByIdChainList(StructureStubInfo* stubInfo, Polymorphi
repatchBuffer.relink(jumpLocation, entryLabel);
}
-void JIT::privateCompileGetByIdChain(StructureStubInfo* stubInfo, Structure* structure, StructureChain* chain, size_t count, size_t cachedOffset, ReturnAddressPtr returnAddress, CallFrame* callFrame)
+void JIT::privateCompileGetByIdChain(StructureStubInfo* stubInfo, Structure* structure, StructureChain* chain, size_t count, const Identifier& ident, const PropertySlot& slot, size_t cachedOffset, ReturnAddressPtr returnAddress, CallFrame* callFrame)
{
ASSERT(count);
-
+
JumpList bucketsOfFail;
// Check eax is an object of the right Structure.
bucketsOfFail.append(checkStructure(regT0, structure));
Structure* currStructure = structure;
- RefPtr<Structure>* chainEntries = chain->head();
+ RefPtr<Structure>* it = chain->head();
JSObject* protoObject = 0;
- for (unsigned i = 0; i < count; ++i) {
+ for (unsigned i = 0; i < count; ++i, ++it) {
protoObject = asObject(currStructure->prototypeForLookup(callFrame));
- currStructure = chainEntries[i].get();
-
- // Check the prototype object's Structure had not changed.
- Structure** prototypeStructureAddress = &(protoObject->m_structure);
-#if CPU(X86_64)
- move(ImmPtr(currStructure), regT3);
- bucketsOfFail.append(branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), regT3));
-#else
- bucketsOfFail.append(branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), ImmPtr(currStructure)));
-#endif
+ currStructure = it->get();
+ testPrototype(protoObject, bucketsOfFail);
}
ASSERT(protoObject);
- compileGetDirectOffset(protoObject, regT1, regT0, cachedOffset);
+ bool needsStubLink = false;
+ if (slot.cachedPropertyType() == PropertySlot::Getter) {
+ needsStubLink = true;
+ compileGetDirectOffset(protoObject, regT1, regT1, cachedOffset);
+ JITStubCall stubCall(this, cti_op_get_by_id_getter_stub);
+ stubCall.addArgument(regT1);
+ stubCall.addArgument(regT0);
+ stubCall.addArgument(ImmPtr(stubInfo->callReturnLocation.executableAddress()));
+ stubCall.call();
+ } else if (slot.cachedPropertyType() == PropertySlot::Custom) {
+ needsStubLink = true;
+ JITStubCall stubCall(this, cti_op_get_by_id_custom_stub);
+ stubCall.addArgument(ImmPtr(protoObject));
+ stubCall.addArgument(ImmPtr(FunctionPtr(slot.customGetter()).executableAddress()));
+ stubCall.addArgument(ImmPtr(const_cast<Identifier*>(&ident)));
+ stubCall.addArgument(ImmPtr(stubInfo->callReturnLocation.executableAddress()));
+ stubCall.call();
+ } else
+ compileGetDirectOffset(protoObject, regT1, regT0, cachedOffset);
Jump success = jump();
- LinkBuffer patchBuffer(this, m_codeBlock->executablePool());
+ LinkBuffer patchBuffer(this, m_codeBlock->executablePool(), 0);
+
+ if (needsStubLink) {
+ for (Vector<CallRecord>::iterator iter = m_calls.begin(); iter != m_calls.end(); ++iter) {
+ if (iter->to)
+ patchBuffer.link(iter->from, FunctionPtr(iter->to));
+ }
+ }
// Use the patch information to link the failure cases back to the original slow case routine.
patchBuffer.link(bucketsOfFail, stubInfo->callReturnLocation.labelAtOffset(-patchOffsetGetByIdSlowCaseCall));
@@ -1894,8 +1095,7 @@ void JIT::privateCompileGetByIdChain(StructureStubInfo* stubInfo, Structure* str
#endif // !ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
-#endif // USE(JSVALUE32_64)
-
} // namespace JSC
+#endif // USE(JSVALUE64)
#endif // ENABLE(JIT)
diff --git a/JavaScriptCore/jit/JITPropertyAccess32_64.cpp b/JavaScriptCore/jit/JITPropertyAccess32_64.cpp
new file mode 100644
index 0000000..710a155
--- /dev/null
+++ b/JavaScriptCore/jit/JITPropertyAccess32_64.cpp
@@ -0,0 +1,1186 @@
+/*
+ * Copyright (C) 2008, 2009 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+
+#if ENABLE(JIT)
+#if USE(JSVALUE32_64)
+#include "JIT.h"
+
+#include "CodeBlock.h"
+#include "JITInlineMethods.h"
+#include "JITStubCall.h"
+#include "JSArray.h"
+#include "JSFunction.h"
+#include "JSPropertyNameIterator.h"
+#include "Interpreter.h"
+#include "LinkBuffer.h"
+#include "RepatchBuffer.h"
+#include "ResultType.h"
+#include "SamplingTool.h"
+
+#ifndef NDEBUG
+#include <stdio.h>
+#endif
+
+using namespace std;
+
+namespace JSC {
+
+void JIT::emit_op_put_by_index(Instruction* currentInstruction)
+{
+ unsigned base = currentInstruction[1].u.operand;
+ unsigned property = currentInstruction[2].u.operand;
+ unsigned value = currentInstruction[3].u.operand;
+
+ JITStubCall stubCall(this, cti_op_put_by_index);
+ stubCall.addArgument(base);
+ stubCall.addArgument(Imm32(property));
+ stubCall.addArgument(value);
+ stubCall.call();
+}
+
+void JIT::emit_op_put_getter(Instruction* currentInstruction)
+{
+ unsigned base = currentInstruction[1].u.operand;
+ unsigned property = currentInstruction[2].u.operand;
+ unsigned function = currentInstruction[3].u.operand;
+
+ JITStubCall stubCall(this, cti_op_put_getter);
+ stubCall.addArgument(base);
+ stubCall.addArgument(ImmPtr(&m_codeBlock->identifier(property)));
+ stubCall.addArgument(function);
+ stubCall.call();
+}
+
+void JIT::emit_op_put_setter(Instruction* currentInstruction)
+{
+ unsigned base = currentInstruction[1].u.operand;
+ unsigned property = currentInstruction[2].u.operand;
+ unsigned function = currentInstruction[3].u.operand;
+
+ JITStubCall stubCall(this, cti_op_put_setter);
+ stubCall.addArgument(base);
+ stubCall.addArgument(ImmPtr(&m_codeBlock->identifier(property)));
+ stubCall.addArgument(function);
+ stubCall.call();
+}
+
+void JIT::emit_op_del_by_id(Instruction* currentInstruction)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned base = currentInstruction[2].u.operand;
+ unsigned property = currentInstruction[3].u.operand;
+
+ JITStubCall stubCall(this, cti_op_del_by_id);
+ stubCall.addArgument(base);
+ stubCall.addArgument(ImmPtr(&m_codeBlock->identifier(property)));
+ stubCall.call(dst);
+}
+
+
+#if !ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
+
+/* ------------------------------ BEGIN: !ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS) ------------------------------ */
+
+// Treat these as nops - the call will be handed as a regular get_by_id/op_call pair.
+void JIT::emit_op_method_check(Instruction*) {}
+void JIT::emitSlow_op_method_check(Instruction*, Vector<SlowCaseEntry>::iterator&) { ASSERT_NOT_REACHED(); }
+#if ENABLE(JIT_OPTIMIZE_METHOD_CALLS)
+#error "JIT_OPTIMIZE_METHOD_CALLS requires JIT_OPTIMIZE_PROPERTY_ACCESS"
+#endif
+
+void JIT::emit_op_get_by_val(Instruction* currentInstruction)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned base = currentInstruction[2].u.operand;
+ unsigned property = currentInstruction[3].u.operand;
+
+ JITStubCall stubCall(this, cti_op_get_by_val);
+ stubCall.addArgument(base);
+ stubCall.addArgument(property);
+ stubCall.call(dst);
+}
+
+void JIT::emitSlow_op_get_by_val(Instruction*, Vector<SlowCaseEntry>::iterator&)
+{
+ ASSERT_NOT_REACHED();
+}
+
+void JIT::emit_op_put_by_val(Instruction* currentInstruction)
+{
+ unsigned base = currentInstruction[1].u.operand;
+ unsigned property = currentInstruction[2].u.operand;
+ unsigned value = currentInstruction[3].u.operand;
+
+ JITStubCall stubCall(this, cti_op_put_by_val);
+ stubCall.addArgument(base);
+ stubCall.addArgument(property);
+ stubCall.addArgument(value);
+ stubCall.call();
+}
+
+void JIT::emitSlow_op_put_by_val(Instruction*, Vector<SlowCaseEntry>::iterator&)
+{
+ ASSERT_NOT_REACHED();
+}
+
+void JIT::emit_op_get_by_id(Instruction* currentInstruction)
+{
+ int dst = currentInstruction[1].u.operand;
+ int base = currentInstruction[2].u.operand;
+ int ident = currentInstruction[3].u.operand;
+
+ JITStubCall stubCall(this, cti_op_get_by_id_generic);
+ stubCall.addArgument(base);
+ stubCall.addArgument(ImmPtr(&(m_codeBlock->identifier(ident))));
+ stubCall.call(dst);
+
+ m_propertyAccessInstructionIndex++;
+}
+
+void JIT::emitSlow_op_get_by_id(Instruction*, Vector<SlowCaseEntry>::iterator&)
+{
+ m_propertyAccessInstructionIndex++;
+ ASSERT_NOT_REACHED();
+}
+
+void JIT::emit_op_put_by_id(Instruction* currentInstruction)
+{
+ int base = currentInstruction[1].u.operand;
+ int ident = currentInstruction[2].u.operand;
+ int value = currentInstruction[3].u.operand;
+
+ JITStubCall stubCall(this, cti_op_put_by_id_generic);
+ stubCall.addArgument(base);
+ stubCall.addArgument(ImmPtr(&(m_codeBlock->identifier(ident))));
+ stubCall.addArgument(value);
+ stubCall.call();
+
+ m_propertyAccessInstructionIndex++;
+}
+
+void JIT::emitSlow_op_put_by_id(Instruction*, Vector<SlowCaseEntry>::iterator&)
+{
+ m_propertyAccessInstructionIndex++;
+ ASSERT_NOT_REACHED();
+}
+
+#else // !ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
+
+/* ------------------------------ BEGIN: ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS) ------------------------------ */
+
+#if ENABLE(JIT_OPTIMIZE_METHOD_CALLS)
+
+void JIT::emit_op_method_check(Instruction* currentInstruction)
+{
+ // Assert that the following instruction is a get_by_id.
+ ASSERT(m_interpreter->getOpcodeID((currentInstruction + OPCODE_LENGTH(op_method_check))->u.opcode) == op_get_by_id);
+
+ currentInstruction += OPCODE_LENGTH(op_method_check);
+
+ // Do the method check - check the object & its prototype's structure inline (this is the common case).
+ m_methodCallCompilationInfo.append(MethodCallCompilationInfo(m_propertyAccessInstructionIndex));
+ MethodCallCompilationInfo& info = m_methodCallCompilationInfo.last();
+
+ int dst = currentInstruction[1].u.operand;
+ int base = currentInstruction[2].u.operand;
+
+ emitLoad(base, regT1, regT0);
+ emitJumpSlowCaseIfNotJSCell(base, regT1);
+
+ BEGIN_UNINTERRUPTED_SEQUENCE(sequenceMethodCheck);
+
+ Jump structureCheck = branchPtrWithPatch(NotEqual, Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), info.structureToCompare, ImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure)));
+ DataLabelPtr protoStructureToCompare, protoObj = moveWithPatch(ImmPtr(0), regT2);
+ Jump protoStructureCheck = branchPtrWithPatch(NotEqual, Address(regT2, OBJECT_OFFSETOF(JSCell, m_structure)), protoStructureToCompare, ImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure)));
+
+ // This will be relinked to load the function without doing a load.
+ DataLabelPtr putFunction = moveWithPatch(ImmPtr(0), regT0);
+
+ END_UNINTERRUPTED_SEQUENCE(sequenceMethodCheck);
+
+ move(Imm32(JSValue::CellTag), regT1);
+ Jump match = jump();
+
+ ASSERT(differenceBetween(info.structureToCompare, protoObj) == patchOffsetMethodCheckProtoObj);
+ ASSERT(differenceBetween(info.structureToCompare, protoStructureToCompare) == patchOffsetMethodCheckProtoStruct);
+ ASSERT(differenceBetween(info.structureToCompare, putFunction) == patchOffsetMethodCheckPutFunction);
+
+ // Link the failure cases here.
+ structureCheck.link(this);
+ protoStructureCheck.link(this);
+
+ // Do a regular(ish) get_by_id (the slow case will be link to
+ // cti_op_get_by_id_method_check instead of cti_op_get_by_id.
+ compileGetByIdHotPath();
+
+ match.link(this);
+ emitStore(dst, regT1, regT0);
+ map(m_bytecodeOffset + OPCODE_LENGTH(op_method_check), dst, regT1, regT0);
+
+ // We've already generated the following get_by_id, so make sure it's skipped over.
+ m_bytecodeOffset += OPCODE_LENGTH(op_get_by_id);
+}
+
+void JIT::emitSlow_op_method_check(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ currentInstruction += OPCODE_LENGTH(op_method_check);
+
+ int dst = currentInstruction[1].u.operand;
+ int base = currentInstruction[2].u.operand;
+ int ident = currentInstruction[3].u.operand;
+
+ compileGetByIdSlowCase(dst, base, &(m_codeBlock->identifier(ident)), iter, true);
+
+ // We've already generated the following get_by_id, so make sure it's skipped over.
+ m_bytecodeOffset += OPCODE_LENGTH(op_get_by_id);
+}
+
+#else //!ENABLE(JIT_OPTIMIZE_METHOD_CALLS)
+
+// Treat these as nops - the call will be handed as a regular get_by_id/op_call pair.
+void JIT::emit_op_method_check(Instruction*) {}
+void JIT::emitSlow_op_method_check(Instruction*, Vector<SlowCaseEntry>::iterator&) { ASSERT_NOT_REACHED(); }
+
+#endif
+
+JIT::CodePtr JIT::stringGetByValStubGenerator(JSGlobalData* globalData, ExecutablePool* pool)
+{
+ JSInterfaceJIT jit;
+ JumpList failures;
+ failures.append(jit.branchPtr(NotEqual, Address(regT0), ImmPtr(globalData->jsStringVPtr)));
+ failures.append(jit.branchTest32(NonZero, Address(regT0, OBJECT_OFFSETOF(JSString, m_fiberCount))));
+
+ // Load string length to regT1, and start the process of loading the data pointer into regT0
+ jit.load32(Address(regT0, ThunkHelpers::jsStringLengthOffset()), regT1);
+ jit.loadPtr(Address(regT0, ThunkHelpers::jsStringValueOffset()), regT0);
+ jit.loadPtr(Address(regT0, ThunkHelpers::stringImplDataOffset()), regT0);
+
+ // Do an unsigned compare to simultaneously filter negative indices as well as indices that are too large
+ failures.append(jit.branch32(AboveOrEqual, regT2, regT1));
+
+ // Load the character
+ jit.load16(BaseIndex(regT0, regT2, TimesTwo, 0), regT0);
+
+ failures.append(jit.branch32(AboveOrEqual, regT0, Imm32(0x100)));
+ jit.move(ImmPtr(globalData->smallStrings.singleCharacterStrings()), regT1);
+ jit.loadPtr(BaseIndex(regT1, regT0, ScalePtr, 0), regT0);
+ jit.move(Imm32(JSValue::CellTag), regT1); // We null check regT0 on return so this is safe
+ jit.ret();
+
+ failures.link(&jit);
+ jit.move(Imm32(0), regT0);
+ jit.ret();
+
+ LinkBuffer patchBuffer(&jit, pool, 0);
+ return patchBuffer.finalizeCode().m_code;
+}
+
+void JIT::emit_op_get_by_val(Instruction* currentInstruction)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned base = currentInstruction[2].u.operand;
+ unsigned property = currentInstruction[3].u.operand;
+
+ emitLoad2(base, regT1, regT0, property, regT3, regT2);
+
+ addSlowCase(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag)));
+ emitJumpSlowCaseIfNotJSCell(base, regT1);
+ addSlowCase(branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsArrayVPtr)));
+
+ loadPtr(Address(regT0, OBJECT_OFFSETOF(JSArray, m_storage)), regT3);
+ addSlowCase(branch32(AboveOrEqual, regT2, Address(regT0, OBJECT_OFFSETOF(JSArray, m_vectorLength))));
+
+ load32(BaseIndex(regT3, regT2, TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), regT1); // tag
+ load32(BaseIndex(regT3, regT2, TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + OBJECT_OFFSETOF(JSValue, u.asBits.payload)), regT0); // payload
+ addSlowCase(branch32(Equal, regT1, Imm32(JSValue::EmptyValueTag)));
+
+ emitStore(dst, regT1, regT0);
+ map(m_bytecodeOffset + OPCODE_LENGTH(op_get_by_val), dst, regT1, regT0);
+}
+
+void JIT::emitSlow_op_get_by_val(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned base = currentInstruction[2].u.operand;
+ unsigned property = currentInstruction[3].u.operand;
+
+ linkSlowCase(iter); // property int32 check
+ linkSlowCaseIfNotJSCell(iter, base); // base cell check
+
+ Jump nonCell = jump();
+ linkSlowCase(iter); // base array check
+ Jump notString = branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsStringVPtr));
+ emitNakedCall(m_globalData->getCTIStub(stringGetByValStubGenerator));
+ Jump failed = branchTestPtr(Zero, regT0);
+ emitStore(dst, regT1, regT0);
+ emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_get_by_val));
+ failed.link(this);
+ notString.link(this);
+ nonCell.link(this);
+
+ linkSlowCase(iter); // vector length check
+ linkSlowCase(iter); // empty value
+
+ JITStubCall stubCall(this, cti_op_get_by_val);
+ stubCall.addArgument(base);
+ stubCall.addArgument(property);
+ stubCall.call(dst);
+}
+
+void JIT::emit_op_put_by_val(Instruction* currentInstruction)
+{
+ unsigned base = currentInstruction[1].u.operand;
+ unsigned property = currentInstruction[2].u.operand;
+ unsigned value = currentInstruction[3].u.operand;
+
+ emitLoad2(base, regT1, regT0, property, regT3, regT2);
+
+ addSlowCase(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag)));
+ emitJumpSlowCaseIfNotJSCell(base, regT1);
+ addSlowCase(branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsArrayVPtr)));
+ addSlowCase(branch32(AboveOrEqual, regT2, Address(regT0, OBJECT_OFFSETOF(JSArray, m_vectorLength))));
+
+ loadPtr(Address(regT0, OBJECT_OFFSETOF(JSArray, m_storage)), regT3);
+
+ Jump empty = branch32(Equal, BaseIndex(regT3, regT2, TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), Imm32(JSValue::EmptyValueTag));
+
+ Label storeResult(this);
+ emitLoad(value, regT1, regT0);
+ store32(regT0, BaseIndex(regT3, regT2, TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + OBJECT_OFFSETOF(JSValue, u.asBits.payload))); // payload
+ store32(regT1, BaseIndex(regT3, regT2, TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + OBJECT_OFFSETOF(JSValue, u.asBits.tag))); // tag
+ Jump end = jump();
+
+ empty.link(this);
+ add32(Imm32(1), Address(regT3, OBJECT_OFFSETOF(ArrayStorage, m_numValuesInVector)));
+ branch32(Below, regT2, Address(regT3, OBJECT_OFFSETOF(ArrayStorage, m_length))).linkTo(storeResult, this);
+
+ add32(Imm32(1), regT2, regT0);
+ store32(regT0, Address(regT3, OBJECT_OFFSETOF(ArrayStorage, m_length)));
+ jump().linkTo(storeResult, this);
+
+ end.link(this);
+}
+
+void JIT::emitSlow_op_put_by_val(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned base = currentInstruction[1].u.operand;
+ unsigned property = currentInstruction[2].u.operand;
+ unsigned value = currentInstruction[3].u.operand;
+
+ linkSlowCase(iter); // property int32 check
+ linkSlowCaseIfNotJSCell(iter, base); // base cell check
+ linkSlowCase(iter); // base not array check
+ linkSlowCase(iter); // in vector check
+
+ JITStubCall stubPutByValCall(this, cti_op_put_by_val);
+ stubPutByValCall.addArgument(base);
+ stubPutByValCall.addArgument(property);
+ stubPutByValCall.addArgument(value);
+ stubPutByValCall.call();
+}
+
+void JIT::emit_op_get_by_id(Instruction* currentInstruction)
+{
+ int dst = currentInstruction[1].u.operand;
+ int base = currentInstruction[2].u.operand;
+
+ emitLoad(base, regT1, regT0);
+ emitJumpSlowCaseIfNotJSCell(base, regT1);
+ compileGetByIdHotPath();
+ emitStore(dst, regT1, regT0);
+ map(m_bytecodeOffset + OPCODE_LENGTH(op_get_by_id), dst, regT1, regT0);
+}
+
+void JIT::compileGetByIdHotPath()
+{
+ // As for put_by_id, get_by_id requires the offset of the Structure and the offset of the access to be patched.
+ // Additionally, for get_by_id we need patch the offset of the branch to the slow case (we patch this to jump
+ // to array-length / prototype access tranpolines, and finally we also the the property-map access offset as a label
+ // to jump back to if one of these trampolies finds a match.
+
+ BEGIN_UNINTERRUPTED_SEQUENCE(sequenceGetByIdHotPath);
+
+ Label hotPathBegin(this);
+ m_propertyAccessCompilationInfo[m_propertyAccessInstructionIndex].hotPathBegin = hotPathBegin;
+ m_propertyAccessInstructionIndex++;
+
+ DataLabelPtr structureToCompare;
+ Jump structureCheck = branchPtrWithPatch(NotEqual, Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), structureToCompare, ImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure)));
+ addSlowCase(structureCheck);
+ ASSERT(differenceBetween(hotPathBegin, structureToCompare) == patchOffsetGetByIdStructure);
+ ASSERT(differenceBetween(hotPathBegin, structureCheck) == patchOffsetGetByIdBranchToSlowCase);
+
+ Label externalLoad = loadPtrWithPatchToLEA(Address(regT0, OBJECT_OFFSETOF(JSObject, m_externalStorage)), regT2);
+ Label externalLoadComplete(this);
+ ASSERT(differenceBetween(hotPathBegin, externalLoad) == patchOffsetGetByIdExternalLoad);
+ ASSERT(differenceBetween(externalLoad, externalLoadComplete) == patchLengthGetByIdExternalLoad);
+
+ DataLabel32 displacementLabel1 = loadPtrWithAddressOffsetPatch(Address(regT2, patchGetByIdDefaultOffset), regT0); // payload
+ ASSERT(differenceBetween(hotPathBegin, displacementLabel1) == patchOffsetGetByIdPropertyMapOffset1);
+ DataLabel32 displacementLabel2 = loadPtrWithAddressOffsetPatch(Address(regT2, patchGetByIdDefaultOffset), regT1); // tag
+ ASSERT(differenceBetween(hotPathBegin, displacementLabel2) == patchOffsetGetByIdPropertyMapOffset2);
+
+ Label putResult(this);
+ ASSERT(differenceBetween(hotPathBegin, putResult) == patchOffsetGetByIdPutResult);
+
+ END_UNINTERRUPTED_SEQUENCE(sequenceGetByIdHotPath);
+}
+
+void JIT::emitSlow_op_get_by_id(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ int dst = currentInstruction[1].u.operand;
+ int base = currentInstruction[2].u.operand;
+ int ident = currentInstruction[3].u.operand;
+
+ compileGetByIdSlowCase(dst, base, &(m_codeBlock->identifier(ident)), iter);
+}
+
+void JIT::compileGetByIdSlowCase(int dst, int base, Identifier* ident, Vector<SlowCaseEntry>::iterator& iter, bool isMethodCheck)
+{
+ // As for the hot path of get_by_id, above, we ensure that we can use an architecture specific offset
+ // so that we only need track one pointer into the slow case code - we track a pointer to the location
+ // of the call (which we can use to look up the patch information), but should a array-length or
+ // prototype access trampoline fail we want to bail out back to here. To do so we can subtract back
+ // the distance from the call to the head of the slow case.
+ linkSlowCaseIfNotJSCell(iter, base);
+ linkSlowCase(iter);
+
+ BEGIN_UNINTERRUPTED_SEQUENCE(sequenceGetByIdSlowCase);
+
+#ifndef NDEBUG
+ Label coldPathBegin(this);
+#endif
+ JITStubCall stubCall(this, isMethodCheck ? cti_op_get_by_id_method_check : cti_op_get_by_id);
+ stubCall.addArgument(regT1, regT0);
+ stubCall.addArgument(ImmPtr(ident));
+ Call call = stubCall.call(dst);
+
+ END_UNINTERRUPTED_SEQUENCE(sequenceGetByIdSlowCase);
+
+ ASSERT(differenceBetween(coldPathBegin, call) == patchOffsetGetByIdSlowCaseCall);
+
+ // Track the location of the call; this will be used to recover patch information.
+ m_propertyAccessCompilationInfo[m_propertyAccessInstructionIndex].callReturnLocation = call;
+ m_propertyAccessInstructionIndex++;
+}
+
+void JIT::emit_op_put_by_id(Instruction* currentInstruction)
+{
+ // In order to be able to patch both the Structure, and the object offset, we store one pointer,
+ // to just after the arguments have been loaded into registers 'hotPathBegin', and we generate code
+ // such that the Structure & offset are always at the same distance from this.
+
+ int base = currentInstruction[1].u.operand;
+ int value = currentInstruction[3].u.operand;
+
+ emitLoad2(base, regT1, regT0, value, regT3, regT2);
+
+ emitJumpSlowCaseIfNotJSCell(base, regT1);
+
+ BEGIN_UNINTERRUPTED_SEQUENCE(sequencePutById);
+
+ Label hotPathBegin(this);
+ m_propertyAccessCompilationInfo[m_propertyAccessInstructionIndex].hotPathBegin = hotPathBegin;
+ m_propertyAccessInstructionIndex++;
+
+ // It is important that the following instruction plants a 32bit immediate, in order that it can be patched over.
+ DataLabelPtr structureToCompare;
+ addSlowCase(branchPtrWithPatch(NotEqual, Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), structureToCompare, ImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure))));
+ ASSERT(differenceBetween(hotPathBegin, structureToCompare) == patchOffsetPutByIdStructure);
+
+ // Plant a load from a bogus ofset in the object's property map; we will patch this later, if it is to be used.
+ Label externalLoad = loadPtrWithPatchToLEA(Address(regT0, OBJECT_OFFSETOF(JSObject, m_externalStorage)), regT0);
+ Label externalLoadComplete(this);
+ ASSERT(differenceBetween(hotPathBegin, externalLoad) == patchOffsetPutByIdExternalLoad);
+ ASSERT(differenceBetween(externalLoad, externalLoadComplete) == patchLengthPutByIdExternalLoad);
+
+ DataLabel32 displacementLabel1 = storePtrWithAddressOffsetPatch(regT2, Address(regT0, patchGetByIdDefaultOffset)); // payload
+ DataLabel32 displacementLabel2 = storePtrWithAddressOffsetPatch(regT3, Address(regT0, patchGetByIdDefaultOffset)); // tag
+
+ END_UNINTERRUPTED_SEQUENCE(sequencePutById);
+
+ ASSERT(differenceBetween(hotPathBegin, displacementLabel1) == patchOffsetPutByIdPropertyMapOffset1);
+ ASSERT(differenceBetween(hotPathBegin, displacementLabel2) == patchOffsetPutByIdPropertyMapOffset2);
+}
+
+void JIT::emitSlow_op_put_by_id(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ int base = currentInstruction[1].u.operand;
+ int ident = currentInstruction[2].u.operand;
+ int direct = currentInstruction[8].u.operand;
+
+ linkSlowCaseIfNotJSCell(iter, base);
+ linkSlowCase(iter);
+
+ JITStubCall stubCall(this, direct ? cti_op_put_by_id_direct : cti_op_put_by_id);
+ stubCall.addArgument(regT1, regT0);
+ stubCall.addArgument(ImmPtr(&(m_codeBlock->identifier(ident))));
+ stubCall.addArgument(regT3, regT2);
+ Call call = stubCall.call();
+
+ // Track the location of the call; this will be used to recover patch information.
+ m_propertyAccessCompilationInfo[m_propertyAccessInstructionIndex].callReturnLocation = call;
+ m_propertyAccessInstructionIndex++;
+}
+
+// Compile a store into an object's property storage. May overwrite base.
+void JIT::compilePutDirectOffset(RegisterID base, RegisterID valueTag, RegisterID valuePayload, Structure* structure, size_t cachedOffset)
+{
+ int offset = cachedOffset;
+ if (structure->isUsingInlineStorage())
+ offset += OBJECT_OFFSETOF(JSObject, m_inlineStorage) / sizeof(Register);
+ else
+ loadPtr(Address(base, OBJECT_OFFSETOF(JSObject, m_externalStorage)), base);
+ emitStore(offset, valueTag, valuePayload, base);
+}
+
+// Compile a load from an object's property storage. May overwrite base.
+void JIT::compileGetDirectOffset(RegisterID base, RegisterID resultTag, RegisterID resultPayload, Structure* structure, size_t cachedOffset)
+{
+ int offset = cachedOffset;
+ if (structure->isUsingInlineStorage())
+ offset += OBJECT_OFFSETOF(JSObject, m_inlineStorage) / sizeof(Register);
+ else
+ loadPtr(Address(base, OBJECT_OFFSETOF(JSObject, m_externalStorage)), base);
+ emitLoad(offset, resultTag, resultPayload, base);
+}
+
+void JIT::compileGetDirectOffset(JSObject* base, RegisterID temp, RegisterID resultTag, RegisterID resultPayload, size_t cachedOffset)
+{
+ if (base->isUsingInlineStorage()) {
+ load32(reinterpret_cast<char*>(&base->m_inlineStorage[cachedOffset]) + OBJECT_OFFSETOF(JSValue, u.asBits.payload), resultPayload);
+ load32(reinterpret_cast<char*>(&base->m_inlineStorage[cachedOffset]) + OBJECT_OFFSETOF(JSValue, u.asBits.tag), resultTag);
+ return;
+ }
+
+ size_t offset = cachedOffset * sizeof(JSValue);
+
+ PropertyStorage* protoPropertyStorage = &base->m_externalStorage;
+ loadPtr(static_cast<void*>(protoPropertyStorage), temp);
+ load32(Address(temp, offset + OBJECT_OFFSETOF(JSValue, u.asBits.payload)), resultPayload);
+ load32(Address(temp, offset + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), resultTag);
+}
+
+void JIT::testPrototype(JSValue prototype, JumpList& failureCases)
+{
+ if (prototype.isNull())
+ return;
+
+ // We have a special case for X86_64 here because X86 instructions that take immediate values
+ // only take 32 bit immediate values, wheras the pointer constants we are using here are 64 bit
+ // values. In the non X86_64 case, the generated code is slightly more efficient because it uses
+ // two less instructions and doesn't require any scratch registers.
+#if CPU(X86_64)
+ move(ImmPtr(prototype.asCell()->structure()), regT3);
+ failureCases.append(branchPtr(NotEqual, AbsoluteAddress(&prototype.asCell()->m_structure), regT3));
+#else
+ failureCases.append(branchPtr(NotEqual, AbsoluteAddress(&prototype.asCell()->m_structure), ImmPtr(prototype.asCell()->structure())));
+#endif
+}
+
+void JIT::privateCompilePutByIdTransition(StructureStubInfo* stubInfo, Structure* oldStructure, Structure* newStructure, size_t cachedOffset, StructureChain* chain, ReturnAddressPtr returnAddress, bool direct)
+{
+ // It is assumed that regT0 contains the basePayload and regT1 contains the baseTag. The value can be found on the stack.
+
+ JumpList failureCases;
+ failureCases.append(branch32(NotEqual, regT1, Imm32(JSValue::CellTag)));
+ failureCases.append(branchPtr(NotEqual, Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), ImmPtr(oldStructure)));
+ testPrototype(oldStructure->storedPrototype(), failureCases);
+
+ if (!direct) {
+ // Verify that nothing in the prototype chain has a setter for this property.
+ for (RefPtr<Structure>* it = chain->head(); *it; ++it)
+ testPrototype((*it)->storedPrototype(), failureCases);
+ }
+
+ // Reallocate property storage if needed.
+ Call callTarget;
+ bool willNeedStorageRealloc = oldStructure->propertyStorageCapacity() != newStructure->propertyStorageCapacity();
+ if (willNeedStorageRealloc) {
+ // This trampoline was called to like a JIT stub; before we can can call again we need to
+ // remove the return address from the stack, to prevent the stack from becoming misaligned.
+ preserveReturnAddressAfterCall(regT3);
+
+ JITStubCall stubCall(this, cti_op_put_by_id_transition_realloc);
+ stubCall.skipArgument(); // base
+ stubCall.skipArgument(); // ident
+ stubCall.skipArgument(); // value
+ stubCall.addArgument(Imm32(oldStructure->propertyStorageCapacity()));
+ stubCall.addArgument(Imm32(newStructure->propertyStorageCapacity()));
+ stubCall.call(regT0);
+
+ restoreReturnAddressBeforeReturn(regT3);
+ }
+
+ sub32(Imm32(1), AbsoluteAddress(oldStructure->addressOfCount()));
+ add32(Imm32(1), AbsoluteAddress(newStructure->addressOfCount()));
+ storePtr(ImmPtr(newStructure), Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)));
+
+#if CPU(MIPS)
+ // For MIPS, we don't add sizeof(void*) to the stack offset.
+ load32(Address(stackPointerRegister, OBJECT_OFFSETOF(JITStackFrame, args[2]) + OBJECT_OFFSETOF(JSValue, u.asBits.payload)), regT3);
+ load32(Address(stackPointerRegister, OBJECT_OFFSETOF(JITStackFrame, args[2]) + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), regT2);
+#else
+ load32(Address(stackPointerRegister, OBJECT_OFFSETOF(JITStackFrame, args[2]) + sizeof(void*) + OBJECT_OFFSETOF(JSValue, u.asBits.payload)), regT3);
+ load32(Address(stackPointerRegister, OBJECT_OFFSETOF(JITStackFrame, args[2]) + sizeof(void*) + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), regT2);
+#endif
+
+ // Write the value
+ compilePutDirectOffset(regT0, regT2, regT3, newStructure, cachedOffset);
+
+ ret();
+
+ ASSERT(!failureCases.empty());
+ failureCases.link(this);
+ restoreArgumentReferenceForTrampoline();
+ Call failureCall = tailRecursiveCall();
+
+ LinkBuffer patchBuffer(this, m_codeBlock->executablePool(), 0);
+
+ patchBuffer.link(failureCall, FunctionPtr(direct ? cti_op_put_by_id_direct_fail : cti_op_put_by_id_fail));
+
+ if (willNeedStorageRealloc) {
+ ASSERT(m_calls.size() == 1);
+ patchBuffer.link(m_calls[0].from, FunctionPtr(cti_op_put_by_id_transition_realloc));
+ }
+
+ CodeLocationLabel entryLabel = patchBuffer.finalizeCodeAddendum();
+ stubInfo->stubRoutine = entryLabel;
+ RepatchBuffer repatchBuffer(m_codeBlock);
+ repatchBuffer.relinkCallerToTrampoline(returnAddress, entryLabel);
+}
+
+void JIT::patchGetByIdSelf(CodeBlock* codeBlock, StructureStubInfo* stubInfo, Structure* structure, size_t cachedOffset, ReturnAddressPtr returnAddress)
+{
+ RepatchBuffer repatchBuffer(codeBlock);
+
+ // We don't want to patch more than once - in future go to cti_op_get_by_id_generic.
+ // Should probably go to JITStubs::cti_op_get_by_id_fail, but that doesn't do anything interesting right now.
+ repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_get_by_id_self_fail));
+
+ int offset = sizeof(JSValue) * cachedOffset;
+
+ // If we're patching to use inline storage, convert the initial load to a lea; this avoids the extra load
+ // and makes the subsequent load's offset automatically correct
+ if (structure->isUsingInlineStorage())
+ repatchBuffer.repatchLoadPtrToLEA(stubInfo->hotPathBegin.instructionAtOffset(patchOffsetGetByIdExternalLoad));
+
+ // Patch the offset into the propoerty map to load from, then patch the Structure to look for.
+ repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabelPtrAtOffset(patchOffsetGetByIdStructure), structure);
+ repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabel32AtOffset(patchOffsetGetByIdPropertyMapOffset1), offset + OBJECT_OFFSETOF(JSValue, u.asBits.payload)); // payload
+ repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabel32AtOffset(patchOffsetGetByIdPropertyMapOffset2), offset + OBJECT_OFFSETOF(JSValue, u.asBits.tag)); // tag
+}
+
+void JIT::patchMethodCallProto(CodeBlock* codeBlock, MethodCallLinkInfo& methodCallLinkInfo, JSFunction* callee, Structure* structure, JSObject* proto, ReturnAddressPtr returnAddress)
+{
+ RepatchBuffer repatchBuffer(codeBlock);
+
+ ASSERT(!methodCallLinkInfo.cachedStructure);
+ methodCallLinkInfo.cachedStructure = structure;
+ structure->ref();
+
+ Structure* prototypeStructure = proto->structure();
+ methodCallLinkInfo.cachedPrototypeStructure = prototypeStructure;
+ prototypeStructure->ref();
+
+ repatchBuffer.repatch(methodCallLinkInfo.structureLabel, structure);
+ repatchBuffer.repatch(methodCallLinkInfo.structureLabel.dataLabelPtrAtOffset(patchOffsetMethodCheckProtoObj), proto);
+ repatchBuffer.repatch(methodCallLinkInfo.structureLabel.dataLabelPtrAtOffset(patchOffsetMethodCheckProtoStruct), prototypeStructure);
+ repatchBuffer.repatch(methodCallLinkInfo.structureLabel.dataLabelPtrAtOffset(patchOffsetMethodCheckPutFunction), callee);
+
+ repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_get_by_id));
+}
+
+void JIT::patchPutByIdReplace(CodeBlock* codeBlock, StructureStubInfo* stubInfo, Structure* structure, size_t cachedOffset, ReturnAddressPtr returnAddress, bool direct)
+{
+ RepatchBuffer repatchBuffer(codeBlock);
+
+ // We don't want to patch more than once - in future go to cti_op_put_by_id_generic.
+ // Should probably go to cti_op_put_by_id_fail, but that doesn't do anything interesting right now.
+ repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(direct ? cti_op_put_by_id_direct_generic : cti_op_put_by_id_generic));
+
+ int offset = sizeof(JSValue) * cachedOffset;
+
+ // If we're patching to use inline storage, convert the initial load to a lea; this avoids the extra load
+ // and makes the subsequent load's offset automatically correct
+ if (structure->isUsingInlineStorage())
+ repatchBuffer.repatchLoadPtrToLEA(stubInfo->hotPathBegin.instructionAtOffset(patchOffsetPutByIdExternalLoad));
+
+ // Patch the offset into the propoerty map to load from, then patch the Structure to look for.
+ repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabelPtrAtOffset(patchOffsetPutByIdStructure), structure);
+ repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabel32AtOffset(patchOffsetPutByIdPropertyMapOffset1), offset + OBJECT_OFFSETOF(JSValue, u.asBits.payload)); // payload
+ repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabel32AtOffset(patchOffsetPutByIdPropertyMapOffset2), offset + OBJECT_OFFSETOF(JSValue, u.asBits.tag)); // tag
+}
+
+void JIT::privateCompilePatchGetArrayLength(ReturnAddressPtr returnAddress)
+{
+ StructureStubInfo* stubInfo = &m_codeBlock->getStubInfo(returnAddress);
+
+ // regT0 holds a JSCell*
+
+ // Check for array
+ Jump failureCases1 = branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsArrayVPtr));
+
+ // Checks out okay! - get the length from the storage
+ loadPtr(Address(regT0, OBJECT_OFFSETOF(JSArray, m_storage)), regT2);
+ load32(Address(regT2, OBJECT_OFFSETOF(ArrayStorage, m_length)), regT2);
+
+ Jump failureCases2 = branch32(Above, regT2, Imm32(INT_MAX));
+ move(regT2, regT0);
+ move(Imm32(JSValue::Int32Tag), regT1);
+ Jump success = jump();
+
+ LinkBuffer patchBuffer(this, m_codeBlock->executablePool(), 0);
+
+ // Use the patch information to link the failure cases back to the original slow case routine.
+ CodeLocationLabel slowCaseBegin = stubInfo->callReturnLocation.labelAtOffset(-patchOffsetGetByIdSlowCaseCall);
+ patchBuffer.link(failureCases1, slowCaseBegin);
+ patchBuffer.link(failureCases2, slowCaseBegin);
+
+ // On success return back to the hot patch code, at a point it will perform the store to dest for us.
+ patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult));
+
+ // Track the stub we have created so that it will be deleted later.
+ CodeLocationLabel entryLabel = patchBuffer.finalizeCodeAddendum();
+ stubInfo->stubRoutine = entryLabel;
+
+ // Finally patch the jump to slow case back in the hot path to jump here instead.
+ CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase);
+ RepatchBuffer repatchBuffer(m_codeBlock);
+ repatchBuffer.relink(jumpLocation, entryLabel);
+
+ // We don't want to patch more than once - in future go to cti_op_put_by_id_generic.
+ repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_get_by_id_array_fail));
+}
+
+void JIT::privateCompileGetByIdProto(StructureStubInfo* stubInfo, Structure* structure, Structure* prototypeStructure, const Identifier& ident, const PropertySlot& slot, size_t cachedOffset, ReturnAddressPtr returnAddress, CallFrame* callFrame)
+{
+ // regT0 holds a JSCell*
+
+ // The prototype object definitely exists (if this stub exists the CodeBlock is referencing a Structure that is
+ // referencing the prototype object - let's speculatively load it's table nice and early!)
+ JSObject* protoObject = asObject(structure->prototypeForLookup(callFrame));
+
+ Jump failureCases1 = checkStructure(regT0, structure);
+
+ // Check the prototype object's Structure had not changed.
+ Structure** prototypeStructureAddress = &(protoObject->m_structure);
+#if CPU(X86_64)
+ move(ImmPtr(prototypeStructure), regT3);
+ Jump failureCases2 = branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), regT3);
+#else
+ Jump failureCases2 = branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), ImmPtr(prototypeStructure));
+#endif
+ bool needsStubLink = false;
+ // Checks out okay!
+ if (slot.cachedPropertyType() == PropertySlot::Getter) {
+ needsStubLink = true;
+ compileGetDirectOffset(protoObject, regT2, regT2, regT1, cachedOffset);
+ JITStubCall stubCall(this, cti_op_get_by_id_getter_stub);
+ stubCall.addArgument(regT1);
+ stubCall.addArgument(regT0);
+ stubCall.addArgument(ImmPtr(stubInfo->callReturnLocation.executableAddress()));
+ stubCall.call();
+ } else if (slot.cachedPropertyType() == PropertySlot::Custom) {
+ needsStubLink = true;
+ JITStubCall stubCall(this, cti_op_get_by_id_custom_stub);
+ stubCall.addArgument(ImmPtr(protoObject));
+ stubCall.addArgument(ImmPtr(FunctionPtr(slot.customGetter()).executableAddress()));
+ stubCall.addArgument(ImmPtr(const_cast<Identifier*>(&ident)));
+ stubCall.addArgument(ImmPtr(stubInfo->callReturnLocation.executableAddress()));
+ stubCall.call();
+ } else
+ compileGetDirectOffset(protoObject, regT2, regT1, regT0, cachedOffset);
+
+ Jump success = jump();
+
+ LinkBuffer patchBuffer(this, m_codeBlock->executablePool(), 0);
+
+ // Use the patch information to link the failure cases back to the original slow case routine.
+ CodeLocationLabel slowCaseBegin = stubInfo->callReturnLocation.labelAtOffset(-patchOffsetGetByIdSlowCaseCall);
+ patchBuffer.link(failureCases1, slowCaseBegin);
+ patchBuffer.link(failureCases2, slowCaseBegin);
+
+ // On success return back to the hot patch code, at a point it will perform the store to dest for us.
+ patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult));
+
+ if (needsStubLink) {
+ for (Vector<CallRecord>::iterator iter = m_calls.begin(); iter != m_calls.end(); ++iter) {
+ if (iter->to)
+ patchBuffer.link(iter->from, FunctionPtr(iter->to));
+ }
+ }
+
+ // Track the stub we have created so that it will be deleted later.
+ CodeLocationLabel entryLabel = patchBuffer.finalizeCodeAddendum();
+ stubInfo->stubRoutine = entryLabel;
+
+ // Finally patch the jump to slow case back in the hot path to jump here instead.
+ CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase);
+ RepatchBuffer repatchBuffer(m_codeBlock);
+ repatchBuffer.relink(jumpLocation, entryLabel);
+
+ // We don't want to patch more than once - in future go to cti_op_put_by_id_generic.
+ repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_get_by_id_proto_list));
+}
+
+
+void JIT::privateCompileGetByIdSelfList(StructureStubInfo* stubInfo, PolymorphicAccessStructureList* polymorphicStructures, int currentIndex, Structure* structure, const Identifier& ident, const PropertySlot& slot, size_t cachedOffset)
+{
+ // regT0 holds a JSCell*
+ Jump failureCase = checkStructure(regT0, structure);
+ bool needsStubLink = false;
+ if (slot.cachedPropertyType() == PropertySlot::Getter) {
+ needsStubLink = true;
+ if (!structure->isUsingInlineStorage()) {
+ move(regT0, regT1);
+ compileGetDirectOffset(regT1, regT2, regT1, structure, cachedOffset);
+ } else
+ compileGetDirectOffset(regT0, regT2, regT1, structure, cachedOffset);
+ JITStubCall stubCall(this, cti_op_get_by_id_getter_stub);
+ stubCall.addArgument(regT1);
+ stubCall.addArgument(regT0);
+ stubCall.addArgument(ImmPtr(stubInfo->callReturnLocation.executableAddress()));
+ stubCall.call();
+ } else if (slot.cachedPropertyType() == PropertySlot::Custom) {
+ needsStubLink = true;
+ JITStubCall stubCall(this, cti_op_get_by_id_custom_stub);
+ stubCall.addArgument(regT0);
+ stubCall.addArgument(ImmPtr(FunctionPtr(slot.customGetter()).executableAddress()));
+ stubCall.addArgument(ImmPtr(const_cast<Identifier*>(&ident)));
+ stubCall.addArgument(ImmPtr(stubInfo->callReturnLocation.executableAddress()));
+ stubCall.call();
+ } else
+ compileGetDirectOffset(regT0, regT1, regT0, structure, cachedOffset);
+
+ Jump success = jump();
+
+ LinkBuffer patchBuffer(this, m_codeBlock->executablePool(), 0);
+ if (needsStubLink) {
+ for (Vector<CallRecord>::iterator iter = m_calls.begin(); iter != m_calls.end(); ++iter) {
+ if (iter->to)
+ patchBuffer.link(iter->from, FunctionPtr(iter->to));
+ }
+ }
+ // Use the patch information to link the failure cases back to the original slow case routine.
+ CodeLocationLabel lastProtoBegin = polymorphicStructures->list[currentIndex - 1].stubRoutine;
+ if (!lastProtoBegin)
+ lastProtoBegin = stubInfo->callReturnLocation.labelAtOffset(-patchOffsetGetByIdSlowCaseCall);
+
+ patchBuffer.link(failureCase, lastProtoBegin);
+
+ // On success return back to the hot patch code, at a point it will perform the store to dest for us.
+ patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult));
+
+ CodeLocationLabel entryLabel = patchBuffer.finalizeCodeAddendum();
+
+ structure->ref();
+ polymorphicStructures->list[currentIndex].set(entryLabel, structure);
+
+ // Finally patch the jump to slow case back in the hot path to jump here instead.
+ CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase);
+ RepatchBuffer repatchBuffer(m_codeBlock);
+ repatchBuffer.relink(jumpLocation, entryLabel);
+}
+
+void JIT::privateCompileGetByIdProtoList(StructureStubInfo* stubInfo, PolymorphicAccessStructureList* prototypeStructures, int currentIndex, Structure* structure, Structure* prototypeStructure, const Identifier& ident, const PropertySlot& slot, size_t cachedOffset, CallFrame* callFrame)
+{
+ // regT0 holds a JSCell*
+
+ // The prototype object definitely exists (if this stub exists the CodeBlock is referencing a Structure that is
+ // referencing the prototype object - let's speculatively load it's table nice and early!)
+ JSObject* protoObject = asObject(structure->prototypeForLookup(callFrame));
+
+ // Check eax is an object of the right Structure.
+ Jump failureCases1 = checkStructure(regT0, structure);
+
+ // Check the prototype object's Structure had not changed.
+ Structure** prototypeStructureAddress = &(protoObject->m_structure);
+#if CPU(X86_64)
+ move(ImmPtr(prototypeStructure), regT3);
+ Jump failureCases2 = branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), regT3);
+#else
+ Jump failureCases2 = branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), ImmPtr(prototypeStructure));
+#endif
+
+ bool needsStubLink = false;
+ if (slot.cachedPropertyType() == PropertySlot::Getter) {
+ needsStubLink = true;
+ compileGetDirectOffset(protoObject, regT2, regT2, regT1, cachedOffset);
+ JITStubCall stubCall(this, cti_op_get_by_id_getter_stub);
+ stubCall.addArgument(regT1);
+ stubCall.addArgument(regT0);
+ stubCall.addArgument(ImmPtr(stubInfo->callReturnLocation.executableAddress()));
+ stubCall.call();
+ } else if (slot.cachedPropertyType() == PropertySlot::Custom) {
+ needsStubLink = true;
+ JITStubCall stubCall(this, cti_op_get_by_id_custom_stub);
+ stubCall.addArgument(ImmPtr(protoObject));
+ stubCall.addArgument(ImmPtr(FunctionPtr(slot.customGetter()).executableAddress()));
+ stubCall.addArgument(ImmPtr(const_cast<Identifier*>(&ident)));
+ stubCall.addArgument(ImmPtr(stubInfo->callReturnLocation.executableAddress()));
+ stubCall.call();
+ } else
+ compileGetDirectOffset(protoObject, regT2, regT1, regT0, cachedOffset);
+
+ Jump success = jump();
+
+ LinkBuffer patchBuffer(this, m_codeBlock->executablePool(), 0);
+ if (needsStubLink) {
+ for (Vector<CallRecord>::iterator iter = m_calls.begin(); iter != m_calls.end(); ++iter) {
+ if (iter->to)
+ patchBuffer.link(iter->from, FunctionPtr(iter->to));
+ }
+ }
+ // Use the patch information to link the failure cases back to the original slow case routine.
+ CodeLocationLabel lastProtoBegin = prototypeStructures->list[currentIndex - 1].stubRoutine;
+ patchBuffer.link(failureCases1, lastProtoBegin);
+ patchBuffer.link(failureCases2, lastProtoBegin);
+
+ // On success return back to the hot patch code, at a point it will perform the store to dest for us.
+ patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult));
+
+ CodeLocationLabel entryLabel = patchBuffer.finalizeCodeAddendum();
+
+ structure->ref();
+ prototypeStructure->ref();
+ prototypeStructures->list[currentIndex].set(entryLabel, structure, prototypeStructure);
+
+ // Finally patch the jump to slow case back in the hot path to jump here instead.
+ CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase);
+ RepatchBuffer repatchBuffer(m_codeBlock);
+ repatchBuffer.relink(jumpLocation, entryLabel);
+}
+
+void JIT::privateCompileGetByIdChainList(StructureStubInfo* stubInfo, PolymorphicAccessStructureList* prototypeStructures, int currentIndex, Structure* structure, StructureChain* chain, size_t count, const Identifier& ident, const PropertySlot& slot, size_t cachedOffset, CallFrame* callFrame)
+{
+ // regT0 holds a JSCell*
+ ASSERT(count);
+
+ JumpList bucketsOfFail;
+
+ // Check eax is an object of the right Structure.
+ bucketsOfFail.append(checkStructure(regT0, structure));
+
+ Structure* currStructure = structure;
+ RefPtr<Structure>* it = chain->head();
+ JSObject* protoObject = 0;
+ for (unsigned i = 0; i < count; ++i, ++it) {
+ protoObject = asObject(currStructure->prototypeForLookup(callFrame));
+ currStructure = it->get();
+ testPrototype(protoObject, bucketsOfFail);
+ }
+ ASSERT(protoObject);
+
+ bool needsStubLink = false;
+ if (slot.cachedPropertyType() == PropertySlot::Getter) {
+ needsStubLink = true;
+ compileGetDirectOffset(protoObject, regT2, regT2, regT1, cachedOffset);
+ JITStubCall stubCall(this, cti_op_get_by_id_getter_stub);
+ stubCall.addArgument(regT1);
+ stubCall.addArgument(regT0);
+ stubCall.addArgument(ImmPtr(stubInfo->callReturnLocation.executableAddress()));
+ stubCall.call();
+ } else if (slot.cachedPropertyType() == PropertySlot::Custom) {
+ needsStubLink = true;
+ JITStubCall stubCall(this, cti_op_get_by_id_custom_stub);
+ stubCall.addArgument(ImmPtr(protoObject));
+ stubCall.addArgument(ImmPtr(FunctionPtr(slot.customGetter()).executableAddress()));
+ stubCall.addArgument(ImmPtr(const_cast<Identifier*>(&ident)));
+ stubCall.addArgument(ImmPtr(stubInfo->callReturnLocation.executableAddress()));
+ stubCall.call();
+ } else
+ compileGetDirectOffset(protoObject, regT2, regT1, regT0, cachedOffset);
+
+ Jump success = jump();
+
+ LinkBuffer patchBuffer(this, m_codeBlock->executablePool(), 0);
+ if (needsStubLink) {
+ for (Vector<CallRecord>::iterator iter = m_calls.begin(); iter != m_calls.end(); ++iter) {
+ if (iter->to)
+ patchBuffer.link(iter->from, FunctionPtr(iter->to));
+ }
+ }
+ // Use the patch information to link the failure cases back to the original slow case routine.
+ CodeLocationLabel lastProtoBegin = prototypeStructures->list[currentIndex - 1].stubRoutine;
+
+ patchBuffer.link(bucketsOfFail, lastProtoBegin);
+
+ // On success return back to the hot patch code, at a point it will perform the store to dest for us.
+ patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult));
+
+ CodeLocationLabel entryLabel = patchBuffer.finalizeCodeAddendum();
+
+ // Track the stub we have created so that it will be deleted later.
+ structure->ref();
+ chain->ref();
+ prototypeStructures->list[currentIndex].set(entryLabel, structure, chain);
+
+ // Finally patch the jump to slow case back in the hot path to jump here instead.
+ CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase);
+ RepatchBuffer repatchBuffer(m_codeBlock);
+ repatchBuffer.relink(jumpLocation, entryLabel);
+}
+
+void JIT::privateCompileGetByIdChain(StructureStubInfo* stubInfo, Structure* structure, StructureChain* chain, size_t count, const Identifier& ident, const PropertySlot& slot, size_t cachedOffset, ReturnAddressPtr returnAddress, CallFrame* callFrame)
+{
+ // regT0 holds a JSCell*
+ ASSERT(count);
+
+ JumpList bucketsOfFail;
+
+ // Check eax is an object of the right Structure.
+ bucketsOfFail.append(checkStructure(regT0, structure));
+
+ Structure* currStructure = structure;
+ RefPtr<Structure>* it = chain->head();
+ JSObject* protoObject = 0;
+ for (unsigned i = 0; i < count; ++i, ++it) {
+ protoObject = asObject(currStructure->prototypeForLookup(callFrame));
+ currStructure = it->get();
+ testPrototype(protoObject, bucketsOfFail);
+ }
+ ASSERT(protoObject);
+
+ bool needsStubLink = false;
+ if (slot.cachedPropertyType() == PropertySlot::Getter) {
+ needsStubLink = true;
+ compileGetDirectOffset(protoObject, regT2, regT2, regT1, cachedOffset);
+ JITStubCall stubCall(this, cti_op_get_by_id_getter_stub);
+ stubCall.addArgument(regT1);
+ stubCall.addArgument(regT0);
+ stubCall.addArgument(ImmPtr(stubInfo->callReturnLocation.executableAddress()));
+ stubCall.call();
+ } else if (slot.cachedPropertyType() == PropertySlot::Custom) {
+ needsStubLink = true;
+ JITStubCall stubCall(this, cti_op_get_by_id_custom_stub);
+ stubCall.addArgument(ImmPtr(protoObject));
+ stubCall.addArgument(ImmPtr(FunctionPtr(slot.customGetter()).executableAddress()));
+ stubCall.addArgument(ImmPtr(const_cast<Identifier*>(&ident)));
+ stubCall.addArgument(ImmPtr(stubInfo->callReturnLocation.executableAddress()));
+ stubCall.call();
+ } else
+ compileGetDirectOffset(protoObject, regT2, regT1, regT0, cachedOffset);
+ Jump success = jump();
+
+ LinkBuffer patchBuffer(this, m_codeBlock->executablePool(), 0);
+ if (needsStubLink) {
+ for (Vector<CallRecord>::iterator iter = m_calls.begin(); iter != m_calls.end(); ++iter) {
+ if (iter->to)
+ patchBuffer.link(iter->from, FunctionPtr(iter->to));
+ }
+ }
+ // Use the patch information to link the failure cases back to the original slow case routine.
+ patchBuffer.link(bucketsOfFail, stubInfo->callReturnLocation.labelAtOffset(-patchOffsetGetByIdSlowCaseCall));
+
+ // On success return back to the hot patch code, at a point it will perform the store to dest for us.
+ patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult));
+
+ // Track the stub we have created so that it will be deleted later.
+ CodeLocationLabel entryLabel = patchBuffer.finalizeCodeAddendum();
+ stubInfo->stubRoutine = entryLabel;
+
+ // Finally patch the jump to slow case back in the hot path to jump here instead.
+ CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase);
+ RepatchBuffer repatchBuffer(m_codeBlock);
+ repatchBuffer.relink(jumpLocation, entryLabel);
+
+ // We don't want to patch more than once - in future go to cti_op_put_by_id_generic.
+ repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_get_by_id_proto_list));
+}
+
+/* ------------------------------ END: !ENABLE / ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS) ------------------------------ */
+
+#endif // !ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
+
+void JIT::compileGetDirectOffset(RegisterID base, RegisterID resultTag, RegisterID resultPayload, RegisterID structure, RegisterID offset)
+{
+ ASSERT(sizeof(((Structure*)0)->m_propertyStorageCapacity) == sizeof(int32_t));
+ ASSERT(sizeof(JSObject::inlineStorageCapacity) == sizeof(int32_t));
+ ASSERT(sizeof(JSValue) == 8);
+
+ Jump notUsingInlineStorage = branch32(NotEqual, Address(structure, OBJECT_OFFSETOF(Structure, m_propertyStorageCapacity)), Imm32(JSObject::inlineStorageCapacity));
+ loadPtr(BaseIndex(base, offset, TimesEight, OBJECT_OFFSETOF(JSObject, m_inlineStorage) + OBJECT_OFFSETOF(JSValue, u.asBits.payload)), resultPayload);
+ loadPtr(BaseIndex(base, offset, TimesEight, OBJECT_OFFSETOF(JSObject, m_inlineStorage) + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), resultTag);
+ Jump finishedLoad = jump();
+ notUsingInlineStorage.link(this);
+ loadPtr(Address(base, OBJECT_OFFSETOF(JSObject, m_externalStorage)), base);
+ loadPtr(BaseIndex(base, offset, TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.payload)), resultPayload);
+ loadPtr(BaseIndex(base, offset, TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.tag)), resultTag);
+ finishedLoad.link(this);
+}
+
+void JIT::emit_op_get_by_pname(Instruction* currentInstruction)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned base = currentInstruction[2].u.operand;
+ unsigned property = currentInstruction[3].u.operand;
+ unsigned expected = currentInstruction[4].u.operand;
+ unsigned iter = currentInstruction[5].u.operand;
+ unsigned i = currentInstruction[6].u.operand;
+
+ emitLoad2(property, regT1, regT0, base, regT3, regT2);
+ emitJumpSlowCaseIfNotJSCell(property, regT1);
+ addSlowCase(branchPtr(NotEqual, regT0, payloadFor(expected)));
+ // Property registers are now available as the property is known
+ emitJumpSlowCaseIfNotJSCell(base, regT3);
+ emitLoadPayload(iter, regT1);
+
+ // Test base's structure
+ loadPtr(Address(regT2, OBJECT_OFFSETOF(JSCell, m_structure)), regT0);
+ addSlowCase(branchPtr(NotEqual, regT0, Address(regT1, OBJECT_OFFSETOF(JSPropertyNameIterator, m_cachedStructure))));
+ load32(addressFor(i), regT3);
+ sub32(Imm32(1), regT3);
+ addSlowCase(branch32(AboveOrEqual, regT3, Address(regT1, OBJECT_OFFSETOF(JSPropertyNameIterator, m_numCacheableSlots))));
+ compileGetDirectOffset(regT2, regT1, regT0, regT0, regT3);
+
+ emitStore(dst, regT1, regT0);
+ map(m_bytecodeOffset + OPCODE_LENGTH(op_get_by_pname), dst, regT1, regT0);
+}
+
+void JIT::emitSlow_op_get_by_pname(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned base = currentInstruction[2].u.operand;
+ unsigned property = currentInstruction[3].u.operand;
+
+ linkSlowCaseIfNotJSCell(iter, property);
+ linkSlowCase(iter);
+ linkSlowCaseIfNotJSCell(iter, base);
+ linkSlowCase(iter);
+ linkSlowCase(iter);
+
+ JITStubCall stubCall(this, cti_op_get_by_val);
+ stubCall.addArgument(base);
+ stubCall.addArgument(property);
+ stubCall.call(dst);
+}
+
+} // namespace JSC
+
+#endif // USE(JSVALUE32_64)
+#endif // ENABLE(JIT)
diff --git a/JavaScriptCore/jit/JITStubCall.h b/JavaScriptCore/jit/JITStubCall.h
index cfbd7dc..4478d06 100644
--- a/JavaScriptCore/jit/JITStubCall.h
+++ b/JavaScriptCore/jit/JITStubCall.h
@@ -116,13 +116,15 @@ namespace JSC {
m_jit->poke(argument, m_stackIndex);
m_stackIndex += stackIndexStep;
}
-
+
+#if USE(JSVALUE32_64)
void addArgument(const JSValue& value)
{
m_jit->poke(JIT::Imm32(value.payload()), m_stackIndex);
m_jit->poke(JIT::Imm32(value.tag()), m_stackIndex + 1);
m_stackIndex += stackIndexStep;
}
+#endif
void addArgument(JIT::RegisterID tag, JIT::RegisterID payload)
{
@@ -165,17 +167,17 @@ namespace JSC {
JIT::Call call()
{
#if ENABLE(OPCODE_SAMPLING)
- if (m_jit->m_bytecodeIndex != (unsigned)-1)
- m_jit->sampleInstruction(m_jit->m_codeBlock->instructions().begin() + m_jit->m_bytecodeIndex, true);
+ if (m_jit->m_bytecodeOffset != (unsigned)-1)
+ m_jit->sampleInstruction(m_jit->m_codeBlock->instructions().begin() + m_jit->m_bytecodeOffset, true);
#endif
m_jit->restoreArgumentReference();
JIT::Call call = m_jit->call();
- m_jit->m_calls.append(CallRecord(call, m_jit->m_bytecodeIndex, m_stub.value()));
+ m_jit->m_calls.append(CallRecord(call, m_jit->m_bytecodeOffset, m_stub.value()));
#if ENABLE(OPCODE_SAMPLING)
- if (m_jit->m_bytecodeIndex != (unsigned)-1)
- m_jit->sampleInstruction(m_jit->m_codeBlock->instructions().begin() + m_jit->m_bytecodeIndex, false);
+ if (m_jit->m_bytecodeOffset != (unsigned)-1)
+ m_jit->sampleInstruction(m_jit->m_codeBlock->instructions().begin() + m_jit->m_bytecodeOffset, false);
#endif
#if USE(JSVALUE32_64)
diff --git a/JavaScriptCore/jit/JITStubs.cpp b/JavaScriptCore/jit/JITStubs.cpp
index 85471de..097d55b 100644
--- a/JavaScriptCore/jit/JITStubs.cpp
+++ b/JavaScriptCore/jit/JITStubs.cpp
@@ -1,6 +1,7 @@
/*
* Copyright (C) 2008, 2009 Apple Inc. All rights reserved.
* Copyright (C) 2008 Cameron Zwarich <cwzwarich@uwaterloo.ca>
+ * Copyright (C) Research In Motion Limited 2010. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -28,9 +29,9 @@
*/
#include "config.h"
-#include "JITStubs.h"
#if ENABLE(JIT)
+#include "JITStubs.h"
#include "Arguments.h"
#include "CallFrame.h"
@@ -38,12 +39,14 @@
#include "Collector.h"
#include "Debugger.h"
#include "ExceptionHelpers.h"
+#include "GetterSetter.h"
#include "GlobalEvalFunction.h"
#include "JIT.h"
#include "JSActivation.h"
#include "JSArray.h"
#include "JSByteArray.h"
#include "JSFunction.h"
+#include "JSGlobalObjectFunctions.h"
#include "JSNotAnObject.h"
#include "JSPropertyNameIterator.h"
#include "JSStaticScopeObject.h"
@@ -70,7 +73,7 @@ namespace JSC {
#define SYMBOL_STRING(name) #name
#endif
-#if OS(IPHONE_OS)
+#if OS(IOS)
#define THUMB_FUNC_PARAM(name) SYMBOL_STRING(name)
#else
#define THUMB_FUNC_PARAM(name)
@@ -78,8 +81,12 @@ namespace JSC {
#if OS(LINUX) && CPU(X86_64)
#define SYMBOL_STRING_RELOCATION(name) #name "@plt"
+#elif OS(DARWIN)
+#define SYMBOL_STRING_RELOCATION(name) "_" #name
+#elif CPU(X86) && COMPILER(MINGW)
+#define SYMBOL_STRING_RELOCATION(name) "@" #name "@4"
#else
-#define SYMBOL_STRING_RELOCATION(name) SYMBOL_STRING(name)
+#define SYMBOL_STRING_RELOCATION(name) #name
#endif
#if OS(DARWIN)
@@ -112,7 +119,7 @@ COMPILE_ASSERT(offsetof(struct JITStackFrame, savedEBX) == 0x3c, JITStackFrame_s
COMPILE_ASSERT(offsetof(struct JITStackFrame, callFrame) == 0x58, JITStackFrame_callFrame_offset_matches_ctiTrampoline);
COMPILE_ASSERT(offsetof(struct JITStackFrame, code) == 0x50, JITStackFrame_code_offset_matches_ctiTrampoline);
-asm volatile (
+asm (
".text\n"
".globl " SYMBOL_STRING(ctiTrampoline) "\n"
HIDE_SYMBOL(ctiTrampoline) "\n"
@@ -134,23 +141,16 @@ SYMBOL_STRING(ctiTrampoline) ":" "\n"
"ret" "\n"
);
-asm volatile (
+asm (
".globl " SYMBOL_STRING(ctiVMThrowTrampoline) "\n"
HIDE_SYMBOL(ctiVMThrowTrampoline) "\n"
SYMBOL_STRING(ctiVMThrowTrampoline) ":" "\n"
-#if !USE(JIT_STUB_ARGUMENT_VA_LIST)
"movl %esp, %ecx" "\n"
-#endif
"call " SYMBOL_STRING_RELOCATION(cti_vm_throw) "\n"
- "addl $0x3c, %esp" "\n"
- "popl %ebx" "\n"
- "popl %edi" "\n"
- "popl %esi" "\n"
- "popl %ebp" "\n"
- "ret" "\n"
+ "int3" "\n"
);
-asm volatile (
+asm (
".globl " SYMBOL_STRING(ctiOpThrowNotCaught) "\n"
HIDE_SYMBOL(ctiOpThrowNotCaught) "\n"
SYMBOL_STRING(ctiOpThrowNotCaught) ":" "\n"
@@ -164,10 +164,6 @@ SYMBOL_STRING(ctiOpThrowNotCaught) ":" "\n"
#elif COMPILER(GCC) && CPU(X86_64)
-#if USE(JIT_STUB_ARGUMENT_VA_LIST)
-#error "JIT_STUB_ARGUMENT_VA_LIST not supported on x86-64."
-#endif
-
// These ASSERTs remind you that, if you change the layout of JITStackFrame, you
// need to change the assembly trampolines below to match.
COMPILE_ASSERT(offsetof(struct JITStackFrame, code) % 32 == 0x0, JITStackFrame_maintains_32byte_stack_alignment);
@@ -175,7 +171,7 @@ COMPILE_ASSERT(offsetof(struct JITStackFrame, savedRBX) == 0x48, JITStackFrame_s
COMPILE_ASSERT(offsetof(struct JITStackFrame, callFrame) == 0x90, JITStackFrame_callFrame_offset_matches_ctiTrampoline);
COMPILE_ASSERT(offsetof(struct JITStackFrame, code) == 0x80, JITStackFrame_code_offset_matches_ctiTrampoline);
-asm volatile (
+asm (
".globl " SYMBOL_STRING(ctiTrampoline) "\n"
HIDE_SYMBOL(ctiTrampoline) "\n"
SYMBOL_STRING(ctiTrampoline) ":" "\n"
@@ -202,23 +198,16 @@ SYMBOL_STRING(ctiTrampoline) ":" "\n"
"ret" "\n"
);
-asm volatile (
+asm (
".globl " SYMBOL_STRING(ctiVMThrowTrampoline) "\n"
HIDE_SYMBOL(ctiVMThrowTrampoline) "\n"
SYMBOL_STRING(ctiVMThrowTrampoline) ":" "\n"
"movq %rsp, %rdi" "\n"
"call " SYMBOL_STRING_RELOCATION(cti_vm_throw) "\n"
- "addq $0x48, %rsp" "\n"
- "popq %rbx" "\n"
- "popq %r15" "\n"
- "popq %r14" "\n"
- "popq %r13" "\n"
- "popq %r12" "\n"
- "popq %rbp" "\n"
- "ret" "\n"
+ "int3" "\n"
);
-asm volatile (
+asm (
".globl " SYMBOL_STRING(ctiOpThrowNotCaught) "\n"
HIDE_SYMBOL(ctiOpThrowNotCaught) "\n"
SYMBOL_STRING(ctiOpThrowNotCaught) ":" "\n"
@@ -234,110 +223,23 @@ SYMBOL_STRING(ctiOpThrowNotCaught) ":" "\n"
#elif COMPILER(GCC) && CPU(ARM_THUMB2)
-#if USE(JIT_STUB_ARGUMENT_VA_LIST)
-#error "JIT_STUB_ARGUMENT_VA_LIST not supported on ARMv7."
-#endif
-
-asm volatile (
-".text" "\n"
-".align 2" "\n"
-".globl " SYMBOL_STRING(ctiTrampoline) "\n"
-HIDE_SYMBOL(ctiTrampoline) "\n"
-".thumb" "\n"
-".thumb_func " THUMB_FUNC_PARAM(ctiTrampoline) "\n"
-SYMBOL_STRING(ctiTrampoline) ":" "\n"
- "sub sp, sp, #0x3c" "\n"
- "str lr, [sp, #0x20]" "\n"
- "str r4, [sp, #0x24]" "\n"
- "str r5, [sp, #0x28]" "\n"
- "str r6, [sp, #0x2c]" "\n"
- "str r1, [sp, #0x30]" "\n"
- "str r2, [sp, #0x34]" "\n"
- "str r3, [sp, #0x38]" "\n"
- "cpy r5, r2" "\n"
- "mov r6, #512" "\n"
- "blx r0" "\n"
- "ldr r6, [sp, #0x2c]" "\n"
- "ldr r5, [sp, #0x28]" "\n"
- "ldr r4, [sp, #0x24]" "\n"
- "ldr lr, [sp, #0x20]" "\n"
- "add sp, sp, #0x3c" "\n"
- "bx lr" "\n"
-);
-
-asm volatile (
-".text" "\n"
-".align 2" "\n"
-".globl " SYMBOL_STRING(ctiVMThrowTrampoline) "\n"
-HIDE_SYMBOL(ctiVMThrowTrampoline) "\n"
-".thumb" "\n"
-".thumb_func " THUMB_FUNC_PARAM(ctiVMThrowTrampoline) "\n"
-SYMBOL_STRING(ctiVMThrowTrampoline) ":" "\n"
- "cpy r0, sp" "\n"
- "bl " SYMBOL_STRING_RELOCATION(cti_vm_throw) "\n"
- "ldr r6, [sp, #0x2c]" "\n"
- "ldr r5, [sp, #0x28]" "\n"
- "ldr r4, [sp, #0x24]" "\n"
- "ldr lr, [sp, #0x20]" "\n"
- "add sp, sp, #0x3c" "\n"
- "bx lr" "\n"
-);
-
-asm volatile (
-".text" "\n"
-".align 2" "\n"
-".globl " SYMBOL_STRING(ctiOpThrowNotCaught) "\n"
-".thumb" "\n"
-".thumb_func " THUMB_FUNC_PARAM(ctiOpThrowNotCaught) "\n"
-SYMBOL_STRING(ctiOpThrowNotCaught) ":" "\n"
- "ldr r6, [sp, #0x2c]" "\n"
- "ldr r5, [sp, #0x28]" "\n"
- "ldr r4, [sp, #0x24]" "\n"
- "ldr lr, [sp, #0x20]" "\n"
- "add sp, sp, #0x3c" "\n"
- "bx lr" "\n"
-);
-
-#elif COMPILER(GCC) && CPU(ARM_TRADITIONAL)
-
-asm volatile (
-".globl " SYMBOL_STRING(ctiTrampoline) "\n"
-SYMBOL_STRING(ctiTrampoline) ":" "\n"
- "stmdb sp!, {r1-r3}" "\n"
- "stmdb sp!, {r4-r8, lr}" "\n"
- "sub sp, sp, #68" "\n"
- "mov r4, r2" "\n"
- "mov r5, #512" "\n"
- // r0 contains the code
- "mov lr, pc" "\n"
- "mov pc, r0" "\n"
- "add sp, sp, #68" "\n"
- "ldmia sp!, {r4-r8, lr}" "\n"
- "add sp, sp, #12" "\n"
- "mov pc, lr" "\n"
-);
+#define THUNK_RETURN_ADDRESS_OFFSET 0x38
+#define PRESERVED_RETURN_ADDRESS_OFFSET 0x3C
+#define PRESERVED_R4_OFFSET 0x40
+#define PRESERVED_R5_OFFSET 0x44
+#define PRESERVED_R6_OFFSET 0x48
+#define REGISTER_FILE_OFFSET 0x4C
+#define CALLFRAME_OFFSET 0x50
+#define EXCEPTION_OFFSET 0x54
+#define ENABLE_PROFILER_REFERENCE_OFFSET 0x58
-asm volatile (
-".globl " SYMBOL_STRING(ctiVMThrowTrampoline) "\n"
-SYMBOL_STRING(ctiVMThrowTrampoline) ":" "\n"
- "mov r0, sp" "\n"
- "bl " SYMBOL_STRING(cti_vm_throw) "\n"
+#elif (COMPILER(GCC) || COMPILER(RVCT)) && CPU(ARM_TRADITIONAL)
-// Both has the same return sequence
-".globl " SYMBOL_STRING(ctiOpThrowNotCaught) "\n"
-SYMBOL_STRING(ctiOpThrowNotCaught) ":" "\n"
- "add sp, sp, #68" "\n"
- "ldmia sp!, {r4-r8, lr}" "\n"
- "add sp, sp, #12" "\n"
- "mov pc, lr" "\n"
-);
+#define THUNK_RETURN_ADDRESS_OFFSET 64
+#define PRESERVEDR4_OFFSET 68
#elif COMPILER(MSVC) && CPU(X86)
-#if USE(JIT_STUB_ARGUMENT_VA_LIST)
-#error "JIT_STUB_ARGUMENT_VA_LIST configuration not supported on MSVC."
-#endif
-
// These ASSERTs remind you that, if you change the layout of JITStackFrame, you
// need to change the assembly trampolines below to match.
COMPILE_ASSERT(offsetof(struct JITStackFrame, code) % 16 == 0x0, JITStackFrame_maintains_16byte_stack_alignment);
@@ -347,7 +249,7 @@ COMPILE_ASSERT(offsetof(struct JITStackFrame, code) == 0x50, JITStackFrame_code_
extern "C" {
- __declspec(naked) EncodedJSValue ctiTrampoline(void* code, RegisterFile*, CallFrame*, JSValue* exception, Profiler**, JSGlobalData*)
+ __declspec(naked) EncodedJSValue ctiTrampoline(void* code, RegisterFile*, CallFrame*, void* /*unused1*/, Profiler**, JSGlobalData*)
{
__asm {
push ebp;
@@ -396,6 +298,27 @@ extern "C" {
}
}
+#elif COMPILER(MSVC) && CPU(ARM_TRADITIONAL)
+
+#define THUNK_RETURN_ADDRESS_OFFSET 64
+#define PRESERVEDR4_OFFSET 68
+// See DEFINE_STUB_FUNCTION for more information.
+
+#elif CPU(MIPS)
+
+#define PRESERVED_GP_OFFSET 60
+#define PRESERVED_S0_OFFSET 64
+#define PRESERVED_S1_OFFSET 68
+#define PRESERVED_S2_OFFSET 72
+#define PRESERVED_RETURN_ADDRESS_OFFSET 76
+#define THUNK_RETURN_ADDRESS_OFFSET 80
+#define REGISTER_FILE_OFFSET 84
+#define CALLFRAME_OFFSET 88
+#define EXCEPTION_OFFSET 92
+#define ENABLE_PROFILER_REFERENCE_OFFSET 96
+#define GLOBAL_DATA_OFFSET 100
+#define STACK_LENGTH 104
+
#else
#error "JIT not supported on this platform."
#endif
@@ -410,7 +333,7 @@ COMPILE_ASSERT(offsetof(struct JITStackFrame, callFrame) == 0x38, JITStackFrame_
COMPILE_ASSERT(offsetof(struct JITStackFrame, code) == 0x30, JITStackFrame_code_offset_matches_ctiTrampoline);
COMPILE_ASSERT(offsetof(struct JITStackFrame, savedEBX) == 0x1c, JITStackFrame_stub_argument_space_matches_ctiTrampoline);
-asm volatile (
+asm (
".text\n"
".globl " SYMBOL_STRING(ctiTrampoline) "\n"
HIDE_SYMBOL(ctiTrampoline) "\n"
@@ -432,23 +355,16 @@ SYMBOL_STRING(ctiTrampoline) ":" "\n"
"ret" "\n"
);
-asm volatile (
+asm (
".globl " SYMBOL_STRING(ctiVMThrowTrampoline) "\n"
HIDE_SYMBOL(ctiVMThrowTrampoline) "\n"
SYMBOL_STRING(ctiVMThrowTrampoline) ":" "\n"
-#if !USE(JIT_STUB_ARGUMENT_VA_LIST)
"movl %esp, %ecx" "\n"
-#endif
"call " SYMBOL_STRING_RELOCATION(cti_vm_throw) "\n"
- "addl $0x1c, %esp" "\n"
- "popl %ebx" "\n"
- "popl %edi" "\n"
- "popl %esi" "\n"
- "popl %ebp" "\n"
- "ret" "\n"
+ "int3" "\n"
);
-asm volatile (
+asm (
".globl " SYMBOL_STRING(ctiOpThrowNotCaught) "\n"
HIDE_SYMBOL(ctiOpThrowNotCaught) "\n"
SYMBOL_STRING(ctiOpThrowNotCaught) ":" "\n"
@@ -462,17 +378,13 @@ SYMBOL_STRING(ctiOpThrowNotCaught) ":" "\n"
#elif COMPILER(GCC) && CPU(X86_64)
-#if USE(JIT_STUB_ARGUMENT_VA_LIST)
-#error "JIT_STUB_ARGUMENT_VA_LIST not supported on x86-64."
-#endif
-
// These ASSERTs remind you that, if you change the layout of JITStackFrame, you
// need to change the assembly trampolines below to match.
COMPILE_ASSERT(offsetof(struct JITStackFrame, callFrame) == 0x58, JITStackFrame_callFrame_offset_matches_ctiTrampoline);
COMPILE_ASSERT(offsetof(struct JITStackFrame, code) == 0x48, JITStackFrame_code_offset_matches_ctiTrampoline);
COMPILE_ASSERT(offsetof(struct JITStackFrame, savedRBX) == 0x78, JITStackFrame_stub_argument_space_matches_ctiTrampoline);
-asm volatile (
+asm (
".text\n"
".globl " SYMBOL_STRING(ctiTrampoline) "\n"
HIDE_SYMBOL(ctiTrampoline) "\n"
@@ -507,23 +419,16 @@ SYMBOL_STRING(ctiTrampoline) ":" "\n"
"ret" "\n"
);
-asm volatile (
+asm (
".globl " SYMBOL_STRING(ctiVMThrowTrampoline) "\n"
HIDE_SYMBOL(ctiVMThrowTrampoline) "\n"
SYMBOL_STRING(ctiVMThrowTrampoline) ":" "\n"
"movq %rsp, %rdi" "\n"
"call " SYMBOL_STRING_RELOCATION(cti_vm_throw) "\n"
- "addq $0x78, %rsp" "\n"
- "popq %rbx" "\n"
- "popq %r15" "\n"
- "popq %r14" "\n"
- "popq %r13" "\n"
- "popq %r12" "\n"
- "popq %rbp" "\n"
- "ret" "\n"
+ "int3" "\n"
);
-asm volatile (
+asm (
".globl " SYMBOL_STRING(ctiOpThrowNotCaught) "\n"
HIDE_SYMBOL(ctiOpThrowNotCaught) "\n"
SYMBOL_STRING(ctiOpThrowNotCaught) ":" "\n"
@@ -539,11 +444,202 @@ SYMBOL_STRING(ctiOpThrowNotCaught) ":" "\n"
#elif COMPILER(GCC) && CPU(ARM_THUMB2)
-#if USE(JIT_STUB_ARGUMENT_VA_LIST)
-#error "JIT_STUB_ARGUMENT_VA_LIST not supported on ARMv7."
+#define THUNK_RETURN_ADDRESS_OFFSET 0x20
+#define PRESERVED_RETURN_ADDRESS_OFFSET 0x24
+#define PRESERVED_R4_OFFSET 0x28
+#define PRESERVED_R5_OFFSET 0x2C
+#define PRESERVED_R6_OFFSET 0x30
+#define REGISTER_FILE_OFFSET 0x34
+#define CALLFRAME_OFFSET 0x38
+#define EXCEPTION_OFFSET 0x3C
+#define ENABLE_PROFILER_REFERENCE_OFFSET 0x40
+
+#elif (COMPILER(GCC) || COMPILER(RVCT)) && CPU(ARM_TRADITIONAL)
+
+#define THUNK_RETURN_ADDRESS_OFFSET 32
+#define PRESERVEDR4_OFFSET 36
+
+#elif CPU(MIPS)
+
+#define PRESERVED_GP_OFFSET 28
+#define PRESERVED_S0_OFFSET 32
+#define PRESERVED_S1_OFFSET 36
+#define PRESERVED_S2_OFFSET 40
+#define PRESERVED_RETURN_ADDRESS_OFFSET 44
+#define THUNK_RETURN_ADDRESS_OFFSET 48
+#define REGISTER_FILE_OFFSET 52
+#define CALLFRAME_OFFSET 56
+#define EXCEPTION_OFFSET 60
+#define ENABLE_PROFILER_REFERENCE_OFFSET 64
+#define GLOBAL_DATA_OFFSET 68
+#define STACK_LENGTH 72
+
+#elif COMPILER(MSVC) && CPU(X86)
+
+// These ASSERTs remind you that, if you change the layout of JITStackFrame, you
+// need to change the assembly trampolines below to match.
+COMPILE_ASSERT(offsetof(struct JITStackFrame, callFrame) == 0x38, JITStackFrame_callFrame_offset_matches_ctiTrampoline);
+COMPILE_ASSERT(offsetof(struct JITStackFrame, code) == 0x30, JITStackFrame_code_offset_matches_ctiTrampoline);
+COMPILE_ASSERT(offsetof(struct JITStackFrame, savedEBX) == 0x1c, JITStackFrame_stub_argument_space_matches_ctiTrampoline);
+
+extern "C" {
+
+ __declspec(naked) EncodedJSValue ctiTrampoline(void* code, RegisterFile*, CallFrame*, void* /*unused1*/, Profiler**, JSGlobalData*)
+ {
+ __asm {
+ push ebp;
+ mov ebp, esp;
+ push esi;
+ push edi;
+ push ebx;
+ sub esp, 0x1c;
+ mov esi, 512;
+ mov ecx, esp;
+ mov edi, [esp + 0x38];
+ call [esp + 0x30];
+ add esp, 0x1c;
+ pop ebx;
+ pop edi;
+ pop esi;
+ pop ebp;
+ ret;
+ }
+ }
+
+ __declspec(naked) void ctiVMThrowTrampoline()
+ {
+ __asm {
+ mov ecx, esp;
+ call cti_vm_throw;
+ add esp, 0x1c;
+ pop ebx;
+ pop edi;
+ pop esi;
+ pop ebp;
+ ret;
+ }
+ }
+
+ __declspec(naked) void ctiOpThrowNotCaught()
+ {
+ __asm {
+ add esp, 0x1c;
+ pop ebx;
+ pop edi;
+ pop esi;
+ pop ebp;
+ ret;
+ }
+ }
+}
+
+#elif COMPILER(MSVC) && CPU(ARM_TRADITIONAL)
+
+#define THUNK_RETURN_ADDRESS_OFFSET 32
+#define PRESERVEDR4_OFFSET 36
+// See DEFINE_STUB_FUNCTION for more information.
+
+#else
+ #error "JIT not supported on this platform."
+#endif
+
+#endif // USE(JSVALUE32_64)
+
+#if CPU(MIPS)
+asm volatile(
+".text" "\n"
+".align 2" "\n"
+".set noreorder" "\n"
+".set nomacro" "\n"
+".set nomips16" "\n"
+".globl " SYMBOL_STRING(ctiTrampoline) "\n"
+".ent " SYMBOL_STRING(ctiTrampoline) "\n"
+SYMBOL_STRING(ctiTrampoline) ":" "\n"
+ "addiu $29,$29,-" STRINGIZE_VALUE_OF(STACK_LENGTH) "\n"
+ "sw $31," STRINGIZE_VALUE_OF(PRESERVED_RETURN_ADDRESS_OFFSET) "($29)" "\n"
+ "sw $18," STRINGIZE_VALUE_OF(PRESERVED_S2_OFFSET) "($29)" "\n"
+ "sw $17," STRINGIZE_VALUE_OF(PRESERVED_S1_OFFSET) "($29)" "\n"
+ "sw $16," STRINGIZE_VALUE_OF(PRESERVED_S0_OFFSET) "($29)" "\n"
+#if WTF_MIPS_PIC
+ "sw $28," STRINGIZE_VALUE_OF(PRESERVED_GP_OFFSET) "($29)" "\n"
+#endif
+ "move $16,$6 # set callFrameRegister" "\n"
+ "li $17,512 # set timeoutCheckRegister" "\n"
+ "move $25,$4 # move executableAddress to t9" "\n"
+ "sw $5," STRINGIZE_VALUE_OF(REGISTER_FILE_OFFSET) "($29) # store registerFile to current stack" "\n"
+ "sw $6," STRINGIZE_VALUE_OF(CALLFRAME_OFFSET) "($29) # store callFrame to curent stack" "\n"
+ "sw $7," STRINGIZE_VALUE_OF(EXCEPTION_OFFSET) "($29) # store exception to current stack" "\n"
+ "lw $8," STRINGIZE_VALUE_OF(STACK_LENGTH + 16) "($29) # load enableProfilerReference from previous stack" "\n"
+ "lw $9," STRINGIZE_VALUE_OF(STACK_LENGTH + 20) "($29) # load globalData from previous stack" "\n"
+ "sw $8," STRINGIZE_VALUE_OF(ENABLE_PROFILER_REFERENCE_OFFSET) "($29) # store enableProfilerReference to current stack" "\n"
+ "jalr $25" "\n"
+ "sw $9," STRINGIZE_VALUE_OF(GLOBAL_DATA_OFFSET) "($29) # store globalData to current stack" "\n"
+ "lw $16," STRINGIZE_VALUE_OF(PRESERVED_S0_OFFSET) "($29)" "\n"
+ "lw $17," STRINGIZE_VALUE_OF(PRESERVED_S1_OFFSET) "($29)" "\n"
+ "lw $18," STRINGIZE_VALUE_OF(PRESERVED_S2_OFFSET) "($29)" "\n"
+ "lw $31," STRINGIZE_VALUE_OF(PRESERVED_RETURN_ADDRESS_OFFSET) "($29)" "\n"
+ "jr $31" "\n"
+ "addiu $29,$29," STRINGIZE_VALUE_OF(STACK_LENGTH) "\n"
+".set reorder" "\n"
+".set macro" "\n"
+".end " SYMBOL_STRING(ctiTrampoline) "\n"
+);
+
+asm volatile(
+".text" "\n"
+".align 2" "\n"
+".set noreorder" "\n"
+".set nomacro" "\n"
+".set nomips16" "\n"
+".globl " SYMBOL_STRING(ctiVMThrowTrampoline) "\n"
+".ent " SYMBOL_STRING(ctiVMThrowTrampoline) "\n"
+SYMBOL_STRING(ctiVMThrowTrampoline) ":" "\n"
+#if WTF_MIPS_PIC
+ "lw $28," STRINGIZE_VALUE_OF(PRESERVED_GP_OFFSET) "($29)" "\n"
+".set macro" "\n"
+ "la $25," SYMBOL_STRING(cti_vm_throw) "\n"
+".set nomacro" "\n"
+ "bal " SYMBOL_STRING(cti_vm_throw) "\n"
+ "move $4,$29" "\n"
+#else
+ "jal " SYMBOL_STRING(cti_vm_throw) "\n"
+ "move $4,$29" "\n"
+#endif
+ "lw $16," STRINGIZE_VALUE_OF(PRESERVED_S0_OFFSET) "($29)" "\n"
+ "lw $17," STRINGIZE_VALUE_OF(PRESERVED_S1_OFFSET) "($29)" "\n"
+ "lw $18," STRINGIZE_VALUE_OF(PRESERVED_S2_OFFSET) "($29)" "\n"
+ "lw $31," STRINGIZE_VALUE_OF(PRESERVED_RETURN_ADDRESS_OFFSET) "($29)" "\n"
+ "jr $31" "\n"
+ "addiu $29,$29," STRINGIZE_VALUE_OF(STACK_LENGTH) "\n"
+".set reorder" "\n"
+".set macro" "\n"
+".end " SYMBOL_STRING(ctiVMThrowTrampoline) "\n"
+);
+
+asm volatile(
+".text" "\n"
+".align 2" "\n"
+".set noreorder" "\n"
+".set nomacro" "\n"
+".set nomips16" "\n"
+".globl " SYMBOL_STRING(ctiOpThrowNotCaught) "\n"
+".ent " SYMBOL_STRING(ctiOpThrowNotCaught) "\n"
+SYMBOL_STRING(ctiOpThrowNotCaught) ":" "\n"
+ "lw $16," STRINGIZE_VALUE_OF(PRESERVED_S0_OFFSET) "($29)" "\n"
+ "lw $17," STRINGIZE_VALUE_OF(PRESERVED_S1_OFFSET) "($29)" "\n"
+ "lw $18," STRINGIZE_VALUE_OF(PRESERVED_S2_OFFSET) "($29)" "\n"
+ "lw $31," STRINGIZE_VALUE_OF(PRESERVED_RETURN_ADDRESS_OFFSET) "($29)" "\n"
+ "jr $31" "\n"
+ "addiu $29,$29," STRINGIZE_VALUE_OF(STACK_LENGTH) "\n"
+".set reorder" "\n"
+".set macro" "\n"
+".end " SYMBOL_STRING(ctiOpThrowNotCaught) "\n"
+);
#endif
-asm volatile (
+#if COMPILER(GCC) && CPU(ARM_THUMB2)
+
+asm volatile(
".text" "\n"
".align 2" "\n"
".globl " SYMBOL_STRING(ctiTrampoline) "\n"
@@ -551,26 +647,26 @@ HIDE_SYMBOL(ctiTrampoline) "\n"
".thumb" "\n"
".thumb_func " THUMB_FUNC_PARAM(ctiTrampoline) "\n"
SYMBOL_STRING(ctiTrampoline) ":" "\n"
- "sub sp, sp, #0x40" "\n"
- "str lr, [sp, #0x20]" "\n"
- "str r4, [sp, #0x24]" "\n"
- "str r5, [sp, #0x28]" "\n"
- "str r6, [sp, #0x2c]" "\n"
- "str r1, [sp, #0x30]" "\n"
- "str r2, [sp, #0x34]" "\n"
- "str r3, [sp, #0x38]" "\n"
+ "sub sp, sp, #" STRINGIZE_VALUE_OF(ENABLE_PROFILER_REFERENCE_OFFSET) "\n"
+ "str lr, [sp, #" STRINGIZE_VALUE_OF(PRESERVED_RETURN_ADDRESS_OFFSET) "]" "\n"
+ "str r4, [sp, #" STRINGIZE_VALUE_OF(PRESERVED_R4_OFFSET) "]" "\n"
+ "str r5, [sp, #" STRINGIZE_VALUE_OF(PRESERVED_R5_OFFSET) "]" "\n"
+ "str r6, [sp, #" STRINGIZE_VALUE_OF(PRESERVED_R6_OFFSET) "]" "\n"
+ "str r1, [sp, #" STRINGIZE_VALUE_OF(REGISTER_FILE_OFFSET) "]" "\n"
+ "str r2, [sp, #" STRINGIZE_VALUE_OF(CALLFRAME_OFFSET) "]" "\n"
+ "str r3, [sp, #" STRINGIZE_VALUE_OF(EXCEPTION_OFFSET) "]" "\n"
"cpy r5, r2" "\n"
"mov r6, #512" "\n"
"blx r0" "\n"
- "ldr r6, [sp, #0x2c]" "\n"
- "ldr r5, [sp, #0x28]" "\n"
- "ldr r4, [sp, #0x24]" "\n"
- "ldr lr, [sp, #0x20]" "\n"
- "add sp, sp, #0x40" "\n"
+ "ldr r6, [sp, #" STRINGIZE_VALUE_OF(PRESERVED_R6_OFFSET) "]" "\n"
+ "ldr r5, [sp, #" STRINGIZE_VALUE_OF(PRESERVED_R5_OFFSET) "]" "\n"
+ "ldr r4, [sp, #" STRINGIZE_VALUE_OF(PRESERVED_R4_OFFSET) "]" "\n"
+ "ldr lr, [sp, #" STRINGIZE_VALUE_OF(PRESERVED_RETURN_ADDRESS_OFFSET) "]" "\n"
+ "add sp, sp, #" STRINGIZE_VALUE_OF(ENABLE_PROFILER_REFERENCE_OFFSET) "\n"
"bx lr" "\n"
);
-asm volatile (
+asm volatile(
".text" "\n"
".align 2" "\n"
".globl " SYMBOL_STRING(ctiVMThrowTrampoline) "\n"
@@ -580,15 +676,15 @@ HIDE_SYMBOL(ctiVMThrowTrampoline) "\n"
SYMBOL_STRING(ctiVMThrowTrampoline) ":" "\n"
"cpy r0, sp" "\n"
"bl " SYMBOL_STRING_RELOCATION(cti_vm_throw) "\n"
- "ldr r6, [sp, #0x2c]" "\n"
- "ldr r5, [sp, #0x28]" "\n"
- "ldr r4, [sp, #0x24]" "\n"
- "ldr lr, [sp, #0x20]" "\n"
- "add sp, sp, #0x40" "\n"
+ "ldr r6, [sp, #" STRINGIZE_VALUE_OF(PRESERVED_R6_OFFSET) "]" "\n"
+ "ldr r5, [sp, #" STRINGIZE_VALUE_OF(PRESERVED_R5_OFFSET) "]" "\n"
+ "ldr r4, [sp, #" STRINGIZE_VALUE_OF(PRESERVED_R4_OFFSET) "]" "\n"
+ "ldr lr, [sp, #" STRINGIZE_VALUE_OF(PRESERVED_RETURN_ADDRESS_OFFSET) "]" "\n"
+ "add sp, sp, #" STRINGIZE_VALUE_OF(ENABLE_PROFILER_REFERENCE_OFFSET) "\n"
"bx lr" "\n"
);
-asm volatile (
+asm volatile(
".text" "\n"
".align 2" "\n"
".globl " SYMBOL_STRING(ctiOpThrowNotCaught) "\n"
@@ -596,46 +692,46 @@ HIDE_SYMBOL(ctiOpThrowNotCaught) "\n"
".thumb" "\n"
".thumb_func " THUMB_FUNC_PARAM(ctiOpThrowNotCaught) "\n"
SYMBOL_STRING(ctiOpThrowNotCaught) ":" "\n"
- "ldr r6, [sp, #0x2c]" "\n"
- "ldr r5, [sp, #0x28]" "\n"
- "ldr r4, [sp, #0x24]" "\n"
- "ldr lr, [sp, #0x20]" "\n"
- "add sp, sp, #0x3c" "\n"
+ "ldr r6, [sp, #" STRINGIZE_VALUE_OF(PRESERVED_R6_OFFSET) "]" "\n"
+ "ldr r5, [sp, #" STRINGIZE_VALUE_OF(PRESERVED_R5_OFFSET) "]" "\n"
+ "ldr r4, [sp, #" STRINGIZE_VALUE_OF(PRESERVED_R4_OFFSET) "]" "\n"
+ "ldr lr, [sp, #" STRINGIZE_VALUE_OF(PRESERVED_RETURN_ADDRESS_OFFSET) "]" "\n"
+ "add sp, sp, #" STRINGIZE_VALUE_OF(ENABLE_PROFILER_REFERENCE_OFFSET) "\n"
"bx lr" "\n"
);
#elif COMPILER(GCC) && CPU(ARM_TRADITIONAL)
-asm volatile (
-".text\n"
+asm volatile(
".globl " SYMBOL_STRING(ctiTrampoline) "\n"
HIDE_SYMBOL(ctiTrampoline) "\n"
SYMBOL_STRING(ctiTrampoline) ":" "\n"
"stmdb sp!, {r1-r3}" "\n"
"stmdb sp!, {r4-r8, lr}" "\n"
- "sub sp, sp, #36" "\n"
+ "sub sp, sp, #" STRINGIZE_VALUE_OF(PRESERVEDR4_OFFSET) "\n"
"mov r4, r2" "\n"
"mov r5, #512" "\n"
+ // r0 contains the code
"mov lr, pc" "\n"
"mov pc, r0" "\n"
- "add sp, sp, #36" "\n"
+ "add sp, sp, #" STRINGIZE_VALUE_OF(PRESERVEDR4_OFFSET) "\n"
"ldmia sp!, {r4-r8, lr}" "\n"
"add sp, sp, #12" "\n"
"mov pc, lr" "\n"
);
-asm volatile (
+asm volatile(
".globl " SYMBOL_STRING(ctiVMThrowTrampoline) "\n"
HIDE_SYMBOL(ctiVMThrowTrampoline) "\n"
SYMBOL_STRING(ctiVMThrowTrampoline) ":" "\n"
"mov r0, sp" "\n"
- "bl " SYMBOL_STRING_RELOCATION(cti_vm_throw) "\n"
+ "bl " SYMBOL_STRING(cti_vm_throw) "\n"
// Both has the same return sequence
".globl " SYMBOL_STRING(ctiOpThrowNotCaught) "\n"
HIDE_SYMBOL(ctiOpThrowNotCaught) "\n"
SYMBOL_STRING(ctiOpThrowNotCaught) ":" "\n"
- "add sp, sp, #36" "\n"
+ "add sp, sp, #" STRINGIZE_VALUE_OF(PRESERVEDR4_OFFSET) "\n"
"ldmia sp!, {r4-r8, lr}" "\n"
"add sp, sp, #12" "\n"
"mov pc, lr" "\n"
@@ -643,17 +739,17 @@ SYMBOL_STRING(ctiOpThrowNotCaught) ":" "\n"
#elif COMPILER(RVCT) && CPU(ARM_TRADITIONAL)
-__asm EncodedJSValue ctiTrampoline(void*, RegisterFile*, CallFrame*, JSValue*, Profiler**, JSGlobalData*)
+__asm EncodedJSValue ctiTrampoline(void*, RegisterFile*, CallFrame*, void* /*unused1*/, Profiler**, JSGlobalData*)
{
ARM
stmdb sp!, {r1-r3}
stmdb sp!, {r4-r8, lr}
- sub sp, sp, #36
+ sub sp, sp, # PRESERVEDR4_OFFSET
mov r4, r2
mov r5, #512
mov lr, pc
bx r0
- add sp, sp, #36
+ add sp, sp, # PRESERVEDR4_OFFSET
ldmia sp!, {r4-r8, lr}
add sp, sp, #12
bx lr
@@ -665,7 +761,7 @@ __asm void ctiVMThrowTrampoline()
PRESERVE8
mov r0, sp
bl cti_vm_throw
- add sp, sp, #36
+ add sp, sp, # PRESERVEDR4_OFFSET
ldmia sp!, {r4-r8, lr}
add sp, sp, #12
bx lr
@@ -674,81 +770,13 @@ __asm void ctiVMThrowTrampoline()
__asm void ctiOpThrowNotCaught()
{
ARM
- add sp, sp, #36
+ add sp, sp, # PRESERVEDR4_OFFSET
ldmia sp!, {r4-r8, lr}
add sp, sp, #12
bx lr
}
-
-#elif COMPILER(MSVC) && CPU(X86)
-
-#if USE(JIT_STUB_ARGUMENT_VA_LIST)
-#error "JIT_STUB_ARGUMENT_VA_LIST configuration not supported on MSVC."
-#endif
-
-// These ASSERTs remind you that, if you change the layout of JITStackFrame, you
-// need to change the assembly trampolines below to match.
-COMPILE_ASSERT(offsetof(struct JITStackFrame, callFrame) == 0x38, JITStackFrame_callFrame_offset_matches_ctiTrampoline);
-COMPILE_ASSERT(offsetof(struct JITStackFrame, code) == 0x30, JITStackFrame_code_offset_matches_ctiTrampoline);
-COMPILE_ASSERT(offsetof(struct JITStackFrame, savedEBX) == 0x1c, JITStackFrame_stub_argument_space_matches_ctiTrampoline);
-
-extern "C" {
-
- __declspec(naked) EncodedJSValue ctiTrampoline(void* code, RegisterFile*, CallFrame*, JSValue* exception, Profiler**, JSGlobalData*)
- {
- __asm {
- push ebp;
- mov ebp, esp;
- push esi;
- push edi;
- push ebx;
- sub esp, 0x1c;
- mov esi, 512;
- mov ecx, esp;
- mov edi, [esp + 0x38];
- call [esp + 0x30];
- add esp, 0x1c;
- pop ebx;
- pop edi;
- pop esi;
- pop ebp;
- ret;
- }
- }
-
- __declspec(naked) void ctiVMThrowTrampoline()
- {
- __asm {
- mov ecx, esp;
- call cti_vm_throw;
- add esp, 0x1c;
- pop ebx;
- pop edi;
- pop esi;
- pop ebp;
- ret;
- }
- }
-
- __declspec(naked) void ctiOpThrowNotCaught()
- {
- __asm {
- add esp, 0x1c;
- pop ebx;
- pop edi;
- pop esi;
- pop ebp;
- ret;
- }
- }
-}
-
-#else
- #error "JIT not supported on this platform."
#endif
-#endif // USE(JSVALUE32_64)
-
#if ENABLE(OPCODE_SAMPLING)
#define CTI_SAMPLER stackFrame.globalData->interpreter->sampler()
#else
@@ -757,30 +785,56 @@ extern "C" {
JITThunks::JITThunks(JSGlobalData* globalData)
{
- JIT::compileCTIMachineTrampolines(globalData, &m_executablePool, &m_trampolineStructure);
+ if (!globalData->executableAllocator.isValid())
+ return;
+ JIT::compileCTIMachineTrampolines(globalData, &m_executablePool, &m_trampolineStructure);
+ ASSERT(m_executablePool);
#if CPU(ARM_THUMB2)
// Unfortunate the arm compiler does not like the use of offsetof on JITStackFrame (since it contains non POD types),
// and the OBJECT_OFFSETOF macro does not appear constantish enough for it to be happy with its use in COMPILE_ASSERT
// macros.
- ASSERT(OBJECT_OFFSETOF(struct JITStackFrame, preservedReturnAddress) == 0x20);
- ASSERT(OBJECT_OFFSETOF(struct JITStackFrame, preservedR4) == 0x24);
- ASSERT(OBJECT_OFFSETOF(struct JITStackFrame, preservedR5) == 0x28);
- ASSERT(OBJECT_OFFSETOF(struct JITStackFrame, preservedR6) == 0x2c);
-
- ASSERT(OBJECT_OFFSETOF(struct JITStackFrame, registerFile) == 0x30);
- ASSERT(OBJECT_OFFSETOF(struct JITStackFrame, callFrame) == 0x34);
- ASSERT(OBJECT_OFFSETOF(struct JITStackFrame, exception) == 0x38);
+ ASSERT(OBJECT_OFFSETOF(struct JITStackFrame, preservedReturnAddress) == PRESERVED_RETURN_ADDRESS_OFFSET);
+ ASSERT(OBJECT_OFFSETOF(struct JITStackFrame, preservedR4) == PRESERVED_R4_OFFSET);
+ ASSERT(OBJECT_OFFSETOF(struct JITStackFrame, preservedR5) == PRESERVED_R5_OFFSET);
+ ASSERT(OBJECT_OFFSETOF(struct JITStackFrame, preservedR6) == PRESERVED_R6_OFFSET);
+
+ ASSERT(OBJECT_OFFSETOF(struct JITStackFrame, registerFile) == REGISTER_FILE_OFFSET);
+ ASSERT(OBJECT_OFFSETOF(struct JITStackFrame, callFrame) == CALLFRAME_OFFSET);
// The fifth argument is the first item already on the stack.
- ASSERT(OBJECT_OFFSETOF(struct JITStackFrame, enabledProfilerReference) == 0x40);
+ ASSERT(OBJECT_OFFSETOF(struct JITStackFrame, enabledProfilerReference) == ENABLE_PROFILER_REFERENCE_OFFSET);
+
+ ASSERT(OBJECT_OFFSETOF(struct JITStackFrame, thunkReturnAddress) == THUNK_RETURN_ADDRESS_OFFSET);
+
+#elif CPU(ARM_TRADITIONAL)
+
+ ASSERT(OBJECT_OFFSETOF(struct JITStackFrame, thunkReturnAddress) == THUNK_RETURN_ADDRESS_OFFSET);
+ ASSERT(OBJECT_OFFSETOF(struct JITStackFrame, preservedR4) == PRESERVEDR4_OFFSET);
+
+
+#elif CPU(MIPS)
+ ASSERT(OBJECT_OFFSETOF(struct JITStackFrame, preservedGP) == PRESERVED_GP_OFFSET);
+ ASSERT(OBJECT_OFFSETOF(struct JITStackFrame, preservedS0) == PRESERVED_S0_OFFSET);
+ ASSERT(OBJECT_OFFSETOF(struct JITStackFrame, preservedS1) == PRESERVED_S1_OFFSET);
+ ASSERT(OBJECT_OFFSETOF(struct JITStackFrame, preservedS2) == PRESERVED_S2_OFFSET);
+ ASSERT(OBJECT_OFFSETOF(struct JITStackFrame, preservedReturnAddress) == PRESERVED_RETURN_ADDRESS_OFFSET);
+ ASSERT(OBJECT_OFFSETOF(struct JITStackFrame, thunkReturnAddress) == THUNK_RETURN_ADDRESS_OFFSET);
+ ASSERT(OBJECT_OFFSETOF(struct JITStackFrame, registerFile) == REGISTER_FILE_OFFSET);
+ ASSERT(OBJECT_OFFSETOF(struct JITStackFrame, callFrame) == CALLFRAME_OFFSET);
+ ASSERT(OBJECT_OFFSETOF(struct JITStackFrame, exception) == EXCEPTION_OFFSET);
+ ASSERT(OBJECT_OFFSETOF(struct JITStackFrame, enabledProfilerReference) == ENABLE_PROFILER_REFERENCE_OFFSET);
+ ASSERT(OBJECT_OFFSETOF(struct JITStackFrame, globalData) == GLOBAL_DATA_OFFSET);
- ASSERT(OBJECT_OFFSETOF(struct JITStackFrame, thunkReturnAddress) == 0x1C);
#endif
}
+JITThunks::~JITThunks()
+{
+}
+
#if ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
-NEVER_INLINE void JITThunks::tryCachePutByID(CallFrame* callFrame, CodeBlock* codeBlock, ReturnAddressPtr returnAddress, JSValue baseValue, const PutPropertySlot& slot, StructureStubInfo* stubInfo)
+NEVER_INLINE void JITThunks::tryCachePutByID(CallFrame* callFrame, CodeBlock* codeBlock, ReturnAddressPtr returnAddress, JSValue baseValue, const PutPropertySlot& slot, StructureStubInfo* stubInfo, bool direct)
{
// The interpreter checks for recursion here; I do not believe this can occur in CTI.
@@ -789,21 +843,21 @@ NEVER_INLINE void JITThunks::tryCachePutByID(CallFrame* callFrame, CodeBlock* co
// Uncacheable: give up.
if (!slot.isCacheable()) {
- ctiPatchCallByReturnAddress(codeBlock, returnAddress, FunctionPtr(cti_op_put_by_id_generic));
+ ctiPatchCallByReturnAddress(codeBlock, returnAddress, FunctionPtr(direct ? cti_op_put_by_id_direct_generic : cti_op_put_by_id_generic));
return;
}
- JSCell* baseCell = asCell(baseValue);
+ JSCell* baseCell = baseValue.asCell();
Structure* structure = baseCell->structure();
if (structure->isUncacheableDictionary()) {
- ctiPatchCallByReturnAddress(codeBlock, returnAddress, FunctionPtr(cti_op_put_by_id_generic));
+ ctiPatchCallByReturnAddress(codeBlock, returnAddress, FunctionPtr(direct ? cti_op_put_by_id_direct_generic : cti_op_put_by_id_generic));
return;
}
// If baseCell != base, then baseCell must be a proxy for another object.
if (baseCell != slot.base()) {
- ctiPatchCallByReturnAddress(codeBlock, returnAddress, FunctionPtr(cti_op_put_by_id_generic));
+ ctiPatchCallByReturnAddress(codeBlock, returnAddress, FunctionPtr(direct ? cti_op_put_by_id_direct_generic : cti_op_put_by_id_generic));
return;
}
@@ -812,7 +866,7 @@ NEVER_INLINE void JITThunks::tryCachePutByID(CallFrame* callFrame, CodeBlock* co
// Structure transition, cache transition info
if (slot.type() == PutPropertySlot::NewProperty) {
if (structure->isDictionary()) {
- ctiPatchCallByReturnAddress(codeBlock, returnAddress, FunctionPtr(cti_op_put_by_id_generic));
+ ctiPatchCallByReturnAddress(codeBlock, returnAddress, FunctionPtr(direct ? cti_op_put_by_id_direct_generic : cti_op_put_by_id_generic));
return;
}
@@ -821,13 +875,13 @@ NEVER_INLINE void JITThunks::tryCachePutByID(CallFrame* callFrame, CodeBlock* co
StructureChain* prototypeChain = structure->prototypeChain(callFrame);
stubInfo->initPutByIdTransition(structure->previousID(), structure, prototypeChain);
- JIT::compilePutByIdTransition(callFrame->scopeChain()->globalData, codeBlock, stubInfo, structure->previousID(), structure, slot.cachedOffset(), prototypeChain, returnAddress);
+ JIT::compilePutByIdTransition(callFrame->scopeChain()->globalData, codeBlock, stubInfo, structure->previousID(), structure, slot.cachedOffset(), prototypeChain, returnAddress, direct);
return;
}
stubInfo->initPutByIdReplace(structure);
- JIT::patchPutByIdReplace(codeBlock, stubInfo, structure, slot.cachedOffset(), returnAddress);
+ JIT::patchPutByIdReplace(codeBlock, stubInfo, structure, slot.cachedOffset(), returnAddress, direct);
}
NEVER_INLINE void JITThunks::tryCacheGetByID(CallFrame* callFrame, CodeBlock* codeBlock, ReturnAddressPtr returnAddress, JSValue baseValue, const Identifier& propertyName, const PropertySlot& slot, StructureStubInfo* stubInfo)
@@ -851,7 +905,7 @@ NEVER_INLINE void JITThunks::tryCacheGetByID(CallFrame* callFrame, CodeBlock* co
if (isJSString(globalData, baseValue) && propertyName == callFrame->propertyNames().length) {
// The tradeoff of compiling an patched inline string length access routine does not seem
// to pay off, so we currently only do this for arrays.
- ctiPatchCallByReturnAddress(codeBlock, returnAddress, globalData->jitStubs.ctiStringLengthTrampoline());
+ ctiPatchCallByReturnAddress(codeBlock, returnAddress, globalData->jitStubs->ctiStringLengthTrampoline());
return;
}
@@ -861,7 +915,7 @@ NEVER_INLINE void JITThunks::tryCacheGetByID(CallFrame* callFrame, CodeBlock* co
return;
}
- JSCell* baseCell = asCell(baseValue);
+ JSCell* baseCell = baseValue.asCell();
Structure* structure = baseCell->structure();
if (structure->isUncacheableDictionary()) {
@@ -874,8 +928,10 @@ NEVER_INLINE void JITThunks::tryCacheGetByID(CallFrame* callFrame, CodeBlock* co
if (slot.slotBase() == baseValue) {
// set this up, so derefStructures can do it's job.
stubInfo->initGetByIdSelf(structure);
-
- JIT::patchGetByIdSelf(codeBlock, stubInfo, structure, slot.cachedOffset(), returnAddress);
+ if (slot.cachedPropertyType() != PropertySlot::Value)
+ ctiPatchCallByReturnAddress(codeBlock, returnAddress, FunctionPtr(cti_op_get_by_id_self_fail));
+ else
+ JIT::patchGetByIdSelf(codeBlock, stubInfo, structure, slot.cachedOffset(), returnAddress);
return;
}
@@ -901,7 +957,7 @@ NEVER_INLINE void JITThunks::tryCacheGetByID(CallFrame* callFrame, CodeBlock* co
ASSERT(!structure->isDictionary());
ASSERT(!slotBaseObject->structure()->isDictionary());
- JIT::compileGetByIdProto(callFrame->scopeChain()->globalData, callFrame, codeBlock, stubInfo, structure, slotBaseObject->structure(), offset, returnAddress);
+ JIT::compileGetByIdProto(callFrame->scopeChain()->globalData, callFrame, codeBlock, stubInfo, structure, slotBaseObject->structure(), propertyName, slot, offset, returnAddress);
return;
}
@@ -914,17 +970,11 @@ NEVER_INLINE void JITThunks::tryCacheGetByID(CallFrame* callFrame, CodeBlock* co
StructureChain* prototypeChain = structure->prototypeChain(callFrame);
stubInfo->initGetByIdChain(structure, prototypeChain);
- JIT::compileGetByIdChain(callFrame->scopeChain()->globalData, callFrame, codeBlock, stubInfo, structure, prototypeChain, count, offset, returnAddress);
+ JIT::compileGetByIdChain(callFrame->scopeChain()->globalData, callFrame, codeBlock, stubInfo, structure, prototypeChain, count, propertyName, slot, offset, returnAddress);
}
#endif // ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
-#if USE(JIT_STUB_ARGUMENT_VA_LIST)
-#define SETUP_VA_LISTL_ARGS va_list vl_args; va_start(vl_args, args)
-#else
-#define SETUP_VA_LISTL_ARGS
-#endif
-
#ifndef NDEBUG
extern "C" {
@@ -955,13 +1005,13 @@ struct StackHack {
ReturnAddressPtr savedReturnAddress;
};
-#define STUB_INIT_STACK_FRAME(stackFrame) SETUP_VA_LISTL_ARGS; JITStackFrame& stackFrame = *reinterpret_cast<JITStackFrame*>(STUB_ARGS); StackHack stackHack(stackFrame)
+#define STUB_INIT_STACK_FRAME(stackFrame) JITStackFrame& stackFrame = *reinterpret_cast_ptr<JITStackFrame*>(STUB_ARGS); StackHack stackHack(stackFrame)
#define STUB_SET_RETURN_ADDRESS(returnAddress) stackHack.savedReturnAddress = ReturnAddressPtr(returnAddress)
#define STUB_RETURN_ADDRESS stackHack.savedReturnAddress
#else
-#define STUB_INIT_STACK_FRAME(stackFrame) SETUP_VA_LISTL_ARGS; JITStackFrame& stackFrame = *reinterpret_cast<JITStackFrame*>(STUB_ARGS)
+#define STUB_INIT_STACK_FRAME(stackFrame) JITStackFrame& stackFrame = *reinterpret_cast_ptr<JITStackFrame*>(STUB_ARGS)
#define STUB_SET_RETURN_ADDRESS(returnAddress) *stackFrame.returnAddressSlot() = ReturnAddressPtr(returnAddress)
#define STUB_RETURN_ADDRESS *stackFrame.returnAddressSlot()
@@ -990,7 +1040,9 @@ static NEVER_INLINE void throwStackOverflowError(CallFrame* callFrame, JSGlobalD
return 0; \
} while (0)
#define VM_THROW_EXCEPTION_AT_END() \
- returnToThrowTrampoline(stackFrame.globalData, STUB_RETURN_ADDRESS, STUB_RETURN_ADDRESS)
+ do {\
+ returnToThrowTrampoline(stackFrame.globalData, STUB_RETURN_ADDRESS, STUB_RETURN_ADDRESS);\
+ } while (0)
#define CHECK_FOR_EXCEPTION() \
do { \
@@ -1010,13 +1062,32 @@ static NEVER_INLINE void throwStackOverflowError(CallFrame* callFrame, JSGlobalD
} \
} while (0)
+struct ExceptionHandler {
+ void* catchRoutine;
+ CallFrame* callFrame;
+};
+static ExceptionHandler jitThrow(JSGlobalData* globalData, CallFrame* callFrame, JSValue exceptionValue, ReturnAddressPtr faultLocation)
+{
+ ASSERT(exceptionValue);
+
+ unsigned vPCIndex = callFrame->codeBlock()->bytecodeOffset(faultLocation);
+ globalData->exception = JSValue();
+ HandlerInfo* handler = globalData->interpreter->throwException(callFrame, exceptionValue, vPCIndex); // This may update callFrame & exceptionValue!
+ globalData->exception = exceptionValue;
+
+ void* catchRoutine = handler ? handler->nativeCode.executableAddress() : FunctionPtr(ctiOpThrowNotCaught).value();
+ ASSERT(catchRoutine);
+ ExceptionHandler exceptionHandler = { catchRoutine, callFrame };
+ return exceptionHandler;
+}
+
#if CPU(ARM_THUMB2)
#define DEFINE_STUB_FUNCTION(rtype, op) \
extern "C" { \
rtype JITStubThunked_##op(STUB_ARGS_DECLARATION); \
}; \
- asm volatile ( \
+ asm ( \
".text" "\n" \
".align 2" "\n" \
".globl " SYMBOL_STRING(cti_##op) "\n" \
@@ -1024,28 +1095,79 @@ static NEVER_INLINE void throwStackOverflowError(CallFrame* callFrame, JSGlobalD
".thumb" "\n" \
".thumb_func " THUMB_FUNC_PARAM(cti_##op) "\n" \
SYMBOL_STRING(cti_##op) ":" "\n" \
- "str lr, [sp, #0x1c]" "\n" \
+ "str lr, [sp, #" STRINGIZE_VALUE_OF(THUNK_RETURN_ADDRESS_OFFSET) "]" "\n" \
"bl " SYMBOL_STRING(JITStubThunked_##op) "\n" \
- "ldr lr, [sp, #0x1c]" "\n" \
+ "ldr lr, [sp, #" STRINGIZE_VALUE_OF(THUNK_RETURN_ADDRESS_OFFSET) "]" "\n" \
"bx lr" "\n" \
); \
rtype JITStubThunked_##op(STUB_ARGS_DECLARATION) \
-#elif CPU(ARM_TRADITIONAL) && COMPILER(GCC)
+#elif CPU(MIPS)
+#if WTF_MIPS_PIC
+#define DEFINE_STUB_FUNCTION(rtype, op) \
+ extern "C" { \
+ rtype JITStubThunked_##op(STUB_ARGS_DECLARATION); \
+ }; \
+ asm volatile( \
+ ".text" "\n" \
+ ".align 2" "\n" \
+ ".set noreorder" "\n" \
+ ".set nomacro" "\n" \
+ ".set nomips16" "\n" \
+ ".globl " SYMBOL_STRING(cti_##op) "\n" \
+ ".ent " SYMBOL_STRING(cti_##op) "\n" \
+ SYMBOL_STRING(cti_##op) ":" "\n" \
+ "lw $28," STRINGIZE_VALUE_OF(PRESERVED_GP_OFFSET) "($29)" "\n" \
+ "sw $31," STRINGIZE_VALUE_OF(THUNK_RETURN_ADDRESS_OFFSET) "($29)" "\n" \
+ ".set macro" "\n" \
+ "la $25," SYMBOL_STRING(JITStubThunked_##op) "\n" \
+ ".set nomacro" "\n" \
+ "bal " SYMBOL_STRING(JITStubThunked_##op) "\n" \
+ "nop" "\n" \
+ "lw $31," STRINGIZE_VALUE_OF(THUNK_RETURN_ADDRESS_OFFSET) "($29)" "\n" \
+ "jr $31" "\n" \
+ "nop" "\n" \
+ ".set reorder" "\n" \
+ ".set macro" "\n" \
+ ".end " SYMBOL_STRING(cti_##op) "\n" \
+ ); \
+ rtype JITStubThunked_##op(STUB_ARGS_DECLARATION)
+
+#else // WTF_MIPS_PIC
+#define DEFINE_STUB_FUNCTION(rtype, op) \
+ extern "C" { \
+ rtype JITStubThunked_##op(STUB_ARGS_DECLARATION); \
+ }; \
+ asm volatile( \
+ ".text" "\n" \
+ ".align 2" "\n" \
+ ".set noreorder" "\n" \
+ ".set nomacro" "\n" \
+ ".set nomips16" "\n" \
+ ".globl " SYMBOL_STRING(cti_##op) "\n" \
+ ".ent " SYMBOL_STRING(cti_##op) "\n" \
+ SYMBOL_STRING(cti_##op) ":" "\n" \
+ "sw $31," STRINGIZE_VALUE_OF(THUNK_RETURN_ADDRESS_OFFSET) "($29)" "\n" \
+ "jal " SYMBOL_STRING(JITStubThunked_##op) "\n" \
+ "nop" "\n" \
+ "lw $31," STRINGIZE_VALUE_OF(THUNK_RETURN_ADDRESS_OFFSET) "($29)" "\n" \
+ "jr $31" "\n" \
+ "nop" "\n" \
+ ".set reorder" "\n" \
+ ".set macro" "\n" \
+ ".end " SYMBOL_STRING(cti_##op) "\n" \
+ ); \
+ rtype JITStubThunked_##op(STUB_ARGS_DECLARATION)
-#if USE(JSVALUE32_64)
-#define THUNK_RETURN_ADDRESS_OFFSET 64
-#else
-#define THUNK_RETURN_ADDRESS_OFFSET 32
#endif
-COMPILE_ASSERT(offsetof(struct JITStackFrame, thunkReturnAddress) == THUNK_RETURN_ADDRESS_OFFSET, JITStackFrame_thunkReturnAddress_offset_mismatch);
+#elif CPU(ARM_TRADITIONAL) && COMPILER(GCC)
#define DEFINE_STUB_FUNCTION(rtype, op) \
extern "C" { \
rtype JITStubThunked_##op(STUB_ARGS_DECLARATION); \
}; \
- asm volatile ( \
+ asm ( \
".globl " SYMBOL_STRING(cti_##op) "\n" \
SYMBOL_STRING(cti_##op) ":" "\n" \
"str lr, [sp, #" STRINGIZE_VALUE_OF(THUNK_RETURN_ADDRESS_OFFSET) "]" "\n" \
@@ -1070,9 +1192,9 @@ RVCT(__asm #rtype# cti_#op#(STUB_ARGS_DECLARATION))
RVCT({)
RVCT( ARM)
RVCT( IMPORT JITStubThunked_#op#)
-RVCT( str lr, [sp, #32])
+RVCT( str lr, [sp, # THUNK_RETURN_ADDRESS_OFFSET])
RVCT( bl JITStubThunked_#op#)
-RVCT( ldr lr, [sp, #32])
+RVCT( ldr lr, [sp, # THUNK_RETURN_ADDRESS_OFFSET])
RVCT( bx lr)
RVCT(})
RVCT()
@@ -1081,10 +1203,88 @@ RVCT()
/* Include the generated file */
#include "GeneratedJITStubs_RVCT.h"
+#elif CPU(ARM_TRADITIONAL) && COMPILER(MSVC)
+
+#define DEFINE_STUB_FUNCTION(rtype, op) extern "C" rtype JITStubThunked_##op(STUB_ARGS_DECLARATION)
+
+/* The following is a workaround for MSVC toolchain; inline assembler is not supported */
+
+/* The following section is a template to generate code for GeneratedJITStubs_MSVC.asm */
+/* The pattern "#xxx#" will be replaced with "xxx" */
+
+/*
+MSVC_BEGIN( AREA Trampoline, CODE)
+MSVC_BEGIN()
+MSVC_BEGIN( EXPORT ctiTrampoline)
+MSVC_BEGIN( EXPORT ctiVMThrowTrampoline)
+MSVC_BEGIN( EXPORT ctiOpThrowNotCaught)
+MSVC_BEGIN()
+MSVC_BEGIN(ctiTrampoline PROC)
+MSVC_BEGIN( stmdb sp!, {r1-r3})
+MSVC_BEGIN( stmdb sp!, {r4-r8, lr})
+MSVC_BEGIN( sub sp, sp, # THUNK_RETURN_ADDRESS_OFFSET + 4)
+MSVC_BEGIN( mov r4, r2)
+MSVC_BEGIN( mov r5, #512)
+MSVC_BEGIN( ; r0 contains the code)
+MSVC_BEGIN( mov lr, pc)
+MSVC_BEGIN( bx r0)
+MSVC_BEGIN( add sp, sp, # THUNK_RETURN_ADDRESS_OFFSET + 4)
+MSVC_BEGIN( ldmia sp!, {r4-r8, lr})
+MSVC_BEGIN( add sp, sp, #12)
+MSVC_BEGIN( bx lr)
+MSVC_BEGIN(ctiTrampoline ENDP)
+MSVC_BEGIN()
+MSVC_BEGIN(ctiVMThrowTrampoline PROC)
+MSVC_BEGIN( mov r0, sp)
+MSVC_BEGIN( mov lr, pc)
+MSVC_BEGIN( bl cti_vm_throw)
+MSVC_BEGIN(ctiOpThrowNotCaught)
+MSVC_BEGIN( add sp, sp, # THUNK_RETURN_ADDRESS_OFFSET + 4)
+MSVC_BEGIN( ldmia sp!, {r4-r8, lr})
+MSVC_BEGIN( add sp, sp, #12)
+MSVC_BEGIN( bx lr)
+MSVC_BEGIN(ctiVMThrowTrampoline ENDP)
+MSVC_BEGIN()
+
+MSVC( EXPORT cti_#op#)
+MSVC( IMPORT JITStubThunked_#op#)
+MSVC(cti_#op# PROC)
+MSVC( str lr, [sp, # THUNK_RETURN_ADDRESS_OFFSET])
+MSVC( bl JITStubThunked_#op#)
+MSVC( ldr lr, [sp, # THUNK_RETURN_ADDRESS_OFFSET])
+MSVC( bx lr)
+MSVC(cti_#op# ENDP)
+MSVC()
+
+MSVC_END( END)
+*/
+
#else
#define DEFINE_STUB_FUNCTION(rtype, op) rtype JIT_STUB cti_##op(STUB_ARGS_DECLARATION)
#endif
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_create_this)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+ CallFrame* callFrame = stackFrame.callFrame;
+
+ JSFunction* constructor = asFunction(callFrame->callee());
+#if !ASSERT_DISABLED
+ ConstructData constructData;
+ ASSERT(constructor->getConstructData(constructData) == ConstructTypeJS);
+#endif
+
+ Structure* structure;
+ JSValue proto = stackFrame.args[0].jsValue();
+ if (proto.isObject())
+ structure = asObject(proto)->inheritorID();
+ else
+ structure = constructor->scope().node()->globalObject->emptyObjectStructure();
+ JSValue result = new (&callFrame->globalData()) JSObject(structure);
+
+ return JSValue::encode(result);
+}
+
DEFINE_STUB_FUNCTION(EncodedJSValue, op_convert_this)
{
STUB_INIT_STACK_FRAME(stackFrame);
@@ -1097,6 +1297,18 @@ DEFINE_STUB_FUNCTION(EncodedJSValue, op_convert_this)
return JSValue::encode(result);
}
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_convert_this_strict)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ JSValue v1 = stackFrame.args[0].jsValue();
+ CallFrame* callFrame = stackFrame.callFrame;
+ ASSERT(v1.asCell()->structure()->typeInfo().needsThisConversion());
+ JSValue result = v1.toStrictThisObject(callFrame);
+ CHECK_FOR_EXCEPTION_AT_END();
+ return JSValue::encode(result);
+}
+
DEFINE_STUB_FUNCTION(void, op_end)
{
STUB_INIT_STACK_FRAME(stackFrame);
@@ -1124,7 +1336,7 @@ DEFINE_STUB_FUNCTION(EncodedJSValue, op_add)
double left = 0.0, right;
if (v1.getNumber(left) && v2.getNumber(right))
- return JSValue::encode(jsNumber(stackFrame.globalData, left + right));
+ return JSValue::encode(jsNumber(left + right));
// All other cases are pretty uncommon
JSValue result = jsAddSlowCase(callFrame, v1, v2);
@@ -1139,7 +1351,7 @@ DEFINE_STUB_FUNCTION(EncodedJSValue, op_pre_inc)
JSValue v = stackFrame.args[0].jsValue();
CallFrame* callFrame = stackFrame.callFrame;
- JSValue result = jsNumber(stackFrame.globalData, v.toNumber(callFrame) + 1);
+ JSValue result = jsNumber(v.toNumber(callFrame) + 1);
CHECK_FOR_EXCEPTION_AT_END();
return JSValue::encode(result);
}
@@ -1147,30 +1359,36 @@ DEFINE_STUB_FUNCTION(EncodedJSValue, op_pre_inc)
DEFINE_STUB_FUNCTION(int, timeout_check)
{
STUB_INIT_STACK_FRAME(stackFrame);
-
+
JSGlobalData* globalData = stackFrame.globalData;
TimeoutChecker& timeoutChecker = globalData->timeoutChecker;
- if (timeoutChecker.didTimeOut(stackFrame.callFrame)) {
+ if (globalData->terminator.shouldTerminate()) {
+ globalData->exception = createTerminatedExecutionException(globalData);
+ VM_THROW_EXCEPTION_AT_END();
+ } else if (timeoutChecker.didTimeOut(stackFrame.callFrame)) {
globalData->exception = createInterruptedExecutionException(globalData);
VM_THROW_EXCEPTION_AT_END();
}
-
+
return timeoutChecker.ticksUntilNextCheck();
}
-DEFINE_STUB_FUNCTION(void, register_file_check)
+DEFINE_STUB_FUNCTION(void*, register_file_check)
{
STUB_INIT_STACK_FRAME(stackFrame);
+ CallFrame* callFrame = stackFrame.callFrame;
- if (LIKELY(stackFrame.registerFile->grow(&stackFrame.callFrame->registers()[stackFrame.callFrame->codeBlock()->m_numCalleeRegisters])))
- return;
+ if (UNLIKELY(!stackFrame.registerFile->grow(&callFrame->registers()[callFrame->codeBlock()->m_numCalleeRegisters]))) {
+ // Rewind to the previous call frame because op_call already optimistically
+ // moved the call frame forward.
+ CallFrame* oldCallFrame = callFrame->callerFrame();
+ ExceptionHandler handler = jitThrow(stackFrame.globalData, oldCallFrame, createStackOverflowError(oldCallFrame), ReturnAddressPtr(oldCallFrame->returnPC()));
+ STUB_SET_RETURN_ADDRESS(handler.catchRoutine);
+ callFrame = handler.callFrame;
+ }
- // Rewind to the previous call frame because op_call already optimistically
- // moved the call frame forward.
- CallFrame* oldCallFrame = stackFrame.callFrame->callerFrame();
- stackFrame.callFrame = oldCallFrame;
- throwStackOverflowError(oldCallFrame, stackFrame.globalData, ReturnAddressPtr(oldCallFrame->returnPC()), STUB_RETURN_ADDRESS);
+ return callFrame;
}
DEFINE_STUB_FUNCTION(int, op_loop_if_lesseq)
@@ -1197,11 +1415,20 @@ DEFINE_STUB_FUNCTION(void, op_put_by_id_generic)
{
STUB_INIT_STACK_FRAME(stackFrame);
- PutPropertySlot slot;
+ PutPropertySlot slot(stackFrame.callFrame->codeBlock()->isStrictMode());
stackFrame.args[0].jsValue().put(stackFrame.callFrame, stackFrame.args[1].identifier(), stackFrame.args[2].jsValue(), slot);
CHECK_FOR_EXCEPTION_AT_END();
}
+DEFINE_STUB_FUNCTION(void, op_put_by_id_direct_generic)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ PutPropertySlot slot(stackFrame.callFrame->codeBlock()->isStrictMode());
+ stackFrame.args[0].jsValue().putDirect(stackFrame.callFrame, stackFrame.args[1].identifier(), stackFrame.args[2].jsValue(), slot);
+ CHECK_FOR_EXCEPTION_AT_END();
+}
+
DEFINE_STUB_FUNCTION(EncodedJSValue, op_get_by_id_generic)
{
STUB_INIT_STACK_FRAME(stackFrame);
@@ -1224,17 +1451,36 @@ DEFINE_STUB_FUNCTION(void, op_put_by_id)
STUB_INIT_STACK_FRAME(stackFrame);
CallFrame* callFrame = stackFrame.callFrame;
Identifier& ident = stackFrame.args[1].identifier();
-
- PutPropertySlot slot;
+
+ PutPropertySlot slot(callFrame->codeBlock()->isStrictMode());
stackFrame.args[0].jsValue().put(callFrame, ident, stackFrame.args[2].jsValue(), slot);
-
+
CodeBlock* codeBlock = stackFrame.callFrame->codeBlock();
StructureStubInfo* stubInfo = &codeBlock->getStubInfo(STUB_RETURN_ADDRESS);
if (!stubInfo->seenOnce())
stubInfo->setSeen();
else
- JITThunks::tryCachePutByID(callFrame, codeBlock, STUB_RETURN_ADDRESS, stackFrame.args[0].jsValue(), slot, stubInfo);
+ JITThunks::tryCachePutByID(callFrame, codeBlock, STUB_RETURN_ADDRESS, stackFrame.args[0].jsValue(), slot, stubInfo, false);
+
+ CHECK_FOR_EXCEPTION_AT_END();
+}
+DEFINE_STUB_FUNCTION(void, op_put_by_id_direct)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+ CallFrame* callFrame = stackFrame.callFrame;
+ Identifier& ident = stackFrame.args[1].identifier();
+
+ PutPropertySlot slot(callFrame->codeBlock()->isStrictMode());
+ stackFrame.args[0].jsValue().putDirect(callFrame, ident, stackFrame.args[2].jsValue(), slot);
+
+ CodeBlock* codeBlock = stackFrame.callFrame->codeBlock();
+ StructureStubInfo* stubInfo = &codeBlock->getStubInfo(STUB_RETURN_ADDRESS);
+ if (!stubInfo->seenOnce())
+ stubInfo->setSeen();
+ else
+ JITThunks::tryCachePutByID(callFrame, codeBlock, STUB_RETURN_ADDRESS, stackFrame.args[0].jsValue(), slot, stubInfo, true);
+
CHECK_FOR_EXCEPTION_AT_END();
}
@@ -1244,13 +1490,26 @@ DEFINE_STUB_FUNCTION(void, op_put_by_id_fail)
CallFrame* callFrame = stackFrame.callFrame;
Identifier& ident = stackFrame.args[1].identifier();
-
- PutPropertySlot slot;
+
+ PutPropertySlot slot(callFrame->codeBlock()->isStrictMode());
stackFrame.args[0].jsValue().put(callFrame, ident, stackFrame.args[2].jsValue(), slot);
CHECK_FOR_EXCEPTION_AT_END();
}
+DEFINE_STUB_FUNCTION(void, op_put_by_id_direct_fail)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ CallFrame* callFrame = stackFrame.callFrame;
+ Identifier& ident = stackFrame.args[1].identifier();
+
+ PutPropertySlot slot(callFrame->codeBlock()->isStrictMode());
+ stackFrame.args[0].jsValue().putDirect(callFrame, ident, stackFrame.args[2].jsValue(), slot);
+
+ CHECK_FOR_EXCEPTION_AT_END();
+}
+
DEFINE_STUB_FUNCTION(JSObject*, op_put_by_id_transition_realloc)
{
STUB_INIT_STACK_FRAME(stackFrame);
@@ -1289,7 +1548,7 @@ DEFINE_STUB_FUNCTION(EncodedJSValue, op_get_by_id_method_check)
// If we successfully got something, then the base from which it is being accessed must
// be an object. (Assertion to ensure asObject() call below is safe, which comes after
// an isCacheable() chceck.
- ASSERT(!slot.isCacheable() || slot.slotBase().isObject());
+ ASSERT(!slot.isCacheableValue() || slot.slotBase().isObject());
// Check that:
// * We're dealing with a JSCell,
@@ -1300,8 +1559,8 @@ DEFINE_STUB_FUNCTION(EncodedJSValue, op_get_by_id_method_check)
JSCell* specific;
JSObject* slotBaseObject;
if (baseValue.isCell()
- && slot.isCacheable()
- && !(structure = asCell(baseValue)->structure())->isUncacheableDictionary()
+ && slot.isCacheableValue()
+ && !(structure = baseValue.asCell()->structure())->isUncacheableDictionary()
&& (slotBaseObject = asObject(slot.slotBase()))->getPropertySpecificValue(callFrame, ident, specific)
&& specific
) {
@@ -1375,7 +1634,7 @@ DEFINE_STUB_FUNCTION(EncodedJSValue, op_get_by_id_self_fail)
if (baseValue.isCell()
&& slot.isCacheable()
- && !asCell(baseValue)->structure()->isUncacheableDictionary()
+ && !baseValue.asCell()->structure()->isUncacheableDictionary()
&& slot.slotBase() == baseValue) {
CodeBlock* codeBlock = callFrame->codeBlock();
@@ -1389,17 +1648,18 @@ DEFINE_STUB_FUNCTION(EncodedJSValue, op_get_by_id_self_fail)
if (stubInfo->accessType == access_get_by_id_self) {
ASSERT(!stubInfo->stubRoutine);
polymorphicStructureList = new PolymorphicAccessStructureList(CodeLocationLabel(), stubInfo->u.getByIdSelf.baseObjectStructure);
- stubInfo->initGetByIdSelfList(polymorphicStructureList, 2);
+ stubInfo->initGetByIdSelfList(polymorphicStructureList, 1);
} else {
polymorphicStructureList = stubInfo->u.getByIdSelfList.structureList;
listIndex = stubInfo->u.getByIdSelfList.listSize;
- stubInfo->u.getByIdSelfList.listSize++;
}
+ if (listIndex < POLYMORPHIC_LIST_CACHE_SIZE) {
+ stubInfo->u.getByIdSelfList.listSize++;
+ JIT::compileGetByIdSelfList(callFrame->scopeChain()->globalData, codeBlock, stubInfo, polymorphicStructureList, listIndex, baseValue.asCell()->structure(), ident, slot, slot.cachedOffset());
- JIT::compileGetByIdSelfList(callFrame->scopeChain()->globalData, codeBlock, stubInfo, polymorphicStructureList, listIndex, asCell(baseValue)->structure(), slot.cachedOffset());
-
- if (listIndex == (POLYMORPHIC_LIST_CACHE_SIZE - 1))
- ctiPatchCallByReturnAddress(codeBlock, STUB_RETURN_ADDRESS, FunctionPtr(cti_op_get_by_id_generic));
+ if (listIndex == (POLYMORPHIC_LIST_CACHE_SIZE - 1))
+ ctiPatchCallByReturnAddress(codeBlock, STUB_RETURN_ADDRESS, FunctionPtr(cti_op_get_by_id_generic));
+ }
} else
ctiPatchCallByReturnAddress(callFrame->codeBlock(), STUB_RETURN_ADDRESS, FunctionPtr(cti_op_get_by_id_generic));
return JSValue::encode(result);
@@ -1424,16 +1684,48 @@ static PolymorphicAccessStructureList* getPolymorphicAccessStructureListSlot(Str
case access_get_by_id_proto_list:
prototypeStructureList = stubInfo->u.getByIdProtoList.structureList;
listIndex = stubInfo->u.getByIdProtoList.listSize;
- stubInfo->u.getByIdProtoList.listSize++;
+ if (listIndex < POLYMORPHIC_LIST_CACHE_SIZE)
+ stubInfo->u.getByIdProtoList.listSize++;
break;
default:
ASSERT_NOT_REACHED();
}
- ASSERT(listIndex < POLYMORPHIC_LIST_CACHE_SIZE);
+ ASSERT(listIndex <= POLYMORPHIC_LIST_CACHE_SIZE);
return prototypeStructureList;
}
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_get_by_id_getter_stub)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+ CallFrame* callFrame = stackFrame.callFrame;
+ GetterSetter* getterSetter = asGetterSetter(stackFrame.args[0].jsObject());
+ if (!getterSetter->getter())
+ return JSValue::encode(jsUndefined());
+ JSObject* getter = asObject(getterSetter->getter());
+ CallData callData;
+ CallType callType = getter->getCallData(callData);
+ JSValue result = call(callFrame, getter, callType, callData, stackFrame.args[1].jsObject(), ArgList());
+ if (callFrame->hadException())
+ returnToThrowTrampoline(&callFrame->globalData(), stackFrame.args[2].returnAddress(), STUB_RETURN_ADDRESS);
+
+ return JSValue::encode(result);
+}
+
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_get_by_id_custom_stub)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+ CallFrame* callFrame = stackFrame.callFrame;
+ JSObject* slotBase = stackFrame.args[0].jsObject();
+ PropertySlot::GetValueFunc getter = reinterpret_cast<PropertySlot::GetValueFunc>(stackFrame.args[1].asPointer);
+ const Identifier& ident = stackFrame.args[2].identifier();
+ JSValue result = getter(callFrame, slotBase, ident);
+ if (callFrame->hadException())
+ returnToThrowTrampoline(&callFrame->globalData(), stackFrame.args[3].returnAddress(), STUB_RETURN_ADDRESS);
+
+ return JSValue::encode(result);
+}
+
DEFINE_STUB_FUNCTION(EncodedJSValue, op_get_by_id_proto_list)
{
STUB_INIT_STACK_FRAME(stackFrame);
@@ -1447,12 +1739,12 @@ DEFINE_STUB_FUNCTION(EncodedJSValue, op_get_by_id_proto_list)
CHECK_FOR_EXCEPTION();
- if (!baseValue.isCell() || !slot.isCacheable() || asCell(baseValue)->structure()->isDictionary()) {
+ if (!baseValue.isCell() || !slot.isCacheable() || baseValue.asCell()->structure()->isDictionary()) {
ctiPatchCallByReturnAddress(callFrame->codeBlock(), STUB_RETURN_ADDRESS, FunctionPtr(cti_op_get_by_id_proto_fail));
return JSValue::encode(result);
}
- Structure* structure = asCell(baseValue)->structure();
+ Structure* structure = baseValue.asCell()->structure();
CodeBlock* codeBlock = callFrame->codeBlock();
StructureStubInfo* stubInfo = &codeBlock->getStubInfo(STUB_RETURN_ADDRESS);
@@ -1463,8 +1755,8 @@ DEFINE_STUB_FUNCTION(EncodedJSValue, op_get_by_id_proto_list)
if (slot.slotBase() == baseValue)
ctiPatchCallByReturnAddress(codeBlock, STUB_RETURN_ADDRESS, FunctionPtr(cti_op_get_by_id_proto_fail));
- else if (slot.slotBase() == asCell(baseValue)->structure()->prototypeForLookup(callFrame)) {
- ASSERT(!asCell(baseValue)->structure()->isDictionary());
+ else if (slot.slotBase() == baseValue.asCell()->structure()->prototypeForLookup(callFrame)) {
+ ASSERT(!baseValue.asCell()->structure()->isDictionary());
// Since we're accessing a prototype in a loop, it's a good bet that it
// should not be treated as a dictionary.
if (slotBaseObject->structure()->isDictionary()) {
@@ -1474,21 +1766,24 @@ DEFINE_STUB_FUNCTION(EncodedJSValue, op_get_by_id_proto_list)
int listIndex;
PolymorphicAccessStructureList* prototypeStructureList = getPolymorphicAccessStructureListSlot(stubInfo, listIndex);
+ if (listIndex < POLYMORPHIC_LIST_CACHE_SIZE) {
+ JIT::compileGetByIdProtoList(callFrame->scopeChain()->globalData, callFrame, codeBlock, stubInfo, prototypeStructureList, listIndex, structure, slotBaseObject->structure(), propertyName, slot, offset);
- JIT::compileGetByIdProtoList(callFrame->scopeChain()->globalData, callFrame, codeBlock, stubInfo, prototypeStructureList, listIndex, structure, slotBaseObject->structure(), offset);
-
- if (listIndex == (POLYMORPHIC_LIST_CACHE_SIZE - 1))
- ctiPatchCallByReturnAddress(codeBlock, STUB_RETURN_ADDRESS, FunctionPtr(cti_op_get_by_id_proto_list_full));
+ if (listIndex == (POLYMORPHIC_LIST_CACHE_SIZE - 1))
+ ctiPatchCallByReturnAddress(codeBlock, STUB_RETURN_ADDRESS, FunctionPtr(cti_op_get_by_id_proto_list_full));
+ }
} else if (size_t count = normalizePrototypeChain(callFrame, baseValue, slot.slotBase(), propertyName, offset)) {
- ASSERT(!asCell(baseValue)->structure()->isDictionary());
+ ASSERT(!baseValue.asCell()->structure()->isDictionary());
int listIndex;
PolymorphicAccessStructureList* prototypeStructureList = getPolymorphicAccessStructureListSlot(stubInfo, listIndex);
+
+ if (listIndex < POLYMORPHIC_LIST_CACHE_SIZE) {
+ StructureChain* protoChain = structure->prototypeChain(callFrame);
+ JIT::compileGetByIdChainList(callFrame->scopeChain()->globalData, callFrame, codeBlock, stubInfo, prototypeStructureList, listIndex, structure, protoChain, count, propertyName, slot, offset);
- StructureChain* protoChain = structure->prototypeChain(callFrame);
- JIT::compileGetByIdChainList(callFrame->scopeChain()->globalData, callFrame, codeBlock, stubInfo, prototypeStructureList, listIndex, structure, protoChain, count, offset);
-
- if (listIndex == (POLYMORPHIC_LIST_CACHE_SIZE - 1))
- ctiPatchCallByReturnAddress(codeBlock, STUB_RETURN_ADDRESS, FunctionPtr(cti_op_get_by_id_proto_list_full));
+ if (listIndex == (POLYMORPHIC_LIST_CACHE_SIZE - 1))
+ ctiPatchCallByReturnAddress(codeBlock, STUB_RETURN_ADDRESS, FunctionPtr(cti_op_get_by_id_proto_list_full));
+ }
} else
ctiPatchCallByReturnAddress(codeBlock, STUB_RETURN_ADDRESS, FunctionPtr(cti_op_get_by_id_proto_fail));
@@ -1545,6 +1840,23 @@ DEFINE_STUB_FUNCTION(EncodedJSValue, op_get_by_id_string_fail)
#endif // ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
+DEFINE_STUB_FUNCTION(void, op_check_has_instance)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ CallFrame* callFrame = stackFrame.callFrame;
+ JSValue baseVal = stackFrame.args[0].jsValue();
+
+ // ECMA-262 15.3.5.3:
+ // Throw an exception either if baseVal is not an object, or if it does not implement 'HasInstance' (i.e. is a function).
+#ifndef NDEBUG
+ TypeInfo typeInfo(UnspecifiedType);
+ ASSERT(!baseVal.isObject() || !(typeInfo = asObject(baseVal)->structure()->typeInfo()).implementsHasInstance());
+#endif
+ stackFrame.globalData->exception = createInvalidParamError(callFrame, "instanceof", baseVal);
+ VM_THROW_EXCEPTION_AT_END();
+}
+
DEFINE_STUB_FUNCTION(EncodedJSValue, op_instanceof)
{
STUB_INIT_STACK_FRAME(stackFrame);
@@ -1564,10 +1876,7 @@ DEFINE_STUB_FUNCTION(EncodedJSValue, op_instanceof)
// Throw an exception either if baseVal is not an object, or if it does not implement 'HasInstance' (i.e. is a function).
TypeInfo typeInfo(UnspecifiedType);
if (!baseVal.isObject() || !(typeInfo = asObject(baseVal)->structure()->typeInfo()).implementsHasInstance()) {
- CallFrame* callFrame = stackFrame.callFrame;
- CodeBlock* codeBlock = callFrame->codeBlock();
- unsigned vPCIndex = codeBlock->getBytecodeIndex(callFrame, STUB_RETURN_ADDRESS);
- stackFrame.globalData->exception = createInvalidParamError(callFrame, "instanceof", baseVal, vPCIndex, codeBlock);
+ stackFrame.globalData->exception = createInvalidParamError(stackFrame.callFrame, "instanceof", baseVal);
VM_THROW_EXCEPTION();
}
ASSERT(typeInfo.type() != UnspecifiedType);
@@ -1577,7 +1886,7 @@ DEFINE_STUB_FUNCTION(EncodedJSValue, op_instanceof)
return JSValue::encode(jsBoolean(false));
if (!proto.isObject()) {
- throwError(callFrame, TypeError, "instanceof called on an object with an invalid prototype property.");
+ throwError(callFrame, createTypeError(callFrame, "instanceof called on an object with an invalid prototype property."));
VM_THROW_EXCEPTION();
}
}
@@ -1596,7 +1905,11 @@ DEFINE_STUB_FUNCTION(EncodedJSValue, op_del_by_id)
JSObject* baseObj = stackFrame.args[0].jsValue().toObject(callFrame);
- JSValue result = jsBoolean(baseObj->deleteProperty(callFrame, stackFrame.args[1].identifier()));
+ bool couldDelete = baseObj->deleteProperty(callFrame, stackFrame.args[1].identifier());
+ JSValue result = jsBoolean(couldDelete);
+ if (!couldDelete && callFrame->codeBlock()->isStrictMode())
+ stackFrame.globalData->exception = createTypeError(stackFrame.callFrame, "Unable to delete property.");
+
CHECK_FOR_EXCEPTION_AT_END();
return JSValue::encode(result);
}
@@ -1611,10 +1924,10 @@ DEFINE_STUB_FUNCTION(EncodedJSValue, op_mul)
double left;
double right;
if (src1.getNumber(left) && src2.getNumber(right))
- return JSValue::encode(jsNumber(stackFrame.globalData, left * right));
+ return JSValue::encode(jsNumber(left * right));
CallFrame* callFrame = stackFrame.callFrame;
- JSValue result = jsNumber(stackFrame.globalData, src1.toNumber(callFrame) * src2.toNumber(callFrame));
+ JSValue result = jsNumber(src1.toNumber(callFrame) * src2.toNumber(callFrame));
CHECK_FOR_EXCEPTION_AT_END();
return JSValue::encode(result);
}
@@ -1622,94 +1935,238 @@ DEFINE_STUB_FUNCTION(EncodedJSValue, op_mul)
DEFINE_STUB_FUNCTION(JSObject*, op_new_func)
{
STUB_INIT_STACK_FRAME(stackFrame);
-
+
+ ASSERT(stackFrame.callFrame->codeBlock()->codeType() != FunctionCode || !stackFrame.callFrame->codeBlock()->needsFullScopeChain() || stackFrame.callFrame->uncheckedR(stackFrame.callFrame->codeBlock()->activationRegister()).jsValue());
return stackFrame.args[0].function()->make(stackFrame.callFrame, stackFrame.callFrame->scopeChain());
}
-DEFINE_STUB_FUNCTION(void*, op_call_JSFunction)
+DEFINE_STUB_FUNCTION(void*, op_call_jitCompile)
{
STUB_INIT_STACK_FRAME(stackFrame);
#if !ASSERT_DISABLED
CallData callData;
- ASSERT(stackFrame.args[0].jsValue().getCallData(callData) == CallTypeJS);
+ ASSERT(stackFrame.callFrame->callee()->getCallData(callData) == CallTypeJS);
#endif
- JSFunction* function = asFunction(stackFrame.args[0].jsValue());
+ JSFunction* function = asFunction(stackFrame.callFrame->callee());
ASSERT(!function->isHostFunction());
FunctionExecutable* executable = function->jsExecutable();
ScopeChainNode* callDataScopeChain = function->scope().node();
- executable->jitCode(stackFrame.callFrame, callDataScopeChain);
+ JSObject* error = executable->compileForCall(stackFrame.callFrame, callDataScopeChain);
+ if (error) {
+ stackFrame.callFrame->globalData().exception = error;
+ return 0;
+ }
+ return function;
+}
+
+DEFINE_STUB_FUNCTION(void*, op_construct_jitCompile)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+#if !ASSERT_DISABLED
+ ConstructData constructData;
+ ASSERT(asFunction(stackFrame.callFrame->callee())->getConstructData(constructData) == ConstructTypeJS);
+#endif
+
+ JSFunction* function = asFunction(stackFrame.callFrame->callee());
+ ASSERT(!function->isHostFunction());
+ FunctionExecutable* executable = function->jsExecutable();
+ ScopeChainNode* callDataScopeChain = function->scope().node();
+ JSObject* error = executable->compileForConstruct(stackFrame.callFrame, callDataScopeChain);
+ if (error) {
+ stackFrame.callFrame->globalData().exception = error;
+ return 0;
+ }
return function;
}
-DEFINE_STUB_FUNCTION(VoidPtrPair, op_call_arityCheck)
+DEFINE_STUB_FUNCTION(void*, op_call_arityCheck)
{
STUB_INIT_STACK_FRAME(stackFrame);
CallFrame* callFrame = stackFrame.callFrame;
- JSFunction* callee = asFunction(stackFrame.args[0].jsValue());
+ JSFunction* callee = asFunction(callFrame->callee());
ASSERT(!callee->isHostFunction());
- CodeBlock* newCodeBlock = &callee->jsExecutable()->generatedBytecode();
- int argCount = stackFrame.args[2].int32();
+ CodeBlock* newCodeBlock = &callee->jsExecutable()->generatedBytecodeForCall();
+ int argCount = callFrame->argumentCountIncludingThis();
+ ReturnAddressPtr pc = callFrame->returnPC();
ASSERT(argCount != newCodeBlock->m_numParameters);
CallFrame* oldCallFrame = callFrame->callerFrame();
+ Register* r;
if (argCount > newCodeBlock->m_numParameters) {
size_t numParameters = newCodeBlock->m_numParameters;
- Register* r = callFrame->registers() + numParameters;
+ r = callFrame->registers() + numParameters;
+ Register* newEnd = r + newCodeBlock->m_numCalleeRegisters;
+ if (!stackFrame.registerFile->grow(newEnd)) {
+ // Rewind to the previous call frame because op_call already optimistically
+ // moved the call frame forward.
+ ExceptionHandler handler = jitThrow(stackFrame.globalData, oldCallFrame, createStackOverflowError(oldCallFrame), pc);
+ STUB_SET_RETURN_ADDRESS(handler.catchRoutine);
+ return handler.callFrame;
+ }
Register* argv = r - RegisterFile::CallFrameHeaderSize - numParameters - argCount;
for (size_t i = 0; i < numParameters; ++i)
argv[i + argCount] = argv[i];
-
- callFrame = CallFrame::create(r);
- callFrame->setCallerFrame(oldCallFrame);
} else {
size_t omittedArgCount = newCodeBlock->m_numParameters - argCount;
- Register* r = callFrame->registers() + omittedArgCount;
+ r = callFrame->registers() + omittedArgCount;
Register* newEnd = r + newCodeBlock->m_numCalleeRegisters;
if (!stackFrame.registerFile->grow(newEnd)) {
// Rewind to the previous call frame because op_call already optimistically
// moved the call frame forward.
- stackFrame.callFrame = oldCallFrame;
- throwStackOverflowError(oldCallFrame, stackFrame.globalData, stackFrame.args[1].returnAddress(), STUB_RETURN_ADDRESS);
- RETURN_POINTER_PAIR(0, 0);
+ ExceptionHandler handler = jitThrow(stackFrame.globalData, oldCallFrame, createStackOverflowError(oldCallFrame), pc);
+ STUB_SET_RETURN_ADDRESS(handler.catchRoutine);
+ return handler.callFrame;
}
Register* argv = r - RegisterFile::CallFrameHeaderSize - omittedArgCount;
for (size_t i = 0; i < omittedArgCount; ++i)
argv[i] = jsUndefined();
+ }
+
+ callFrame = CallFrame::create(r);
+ callFrame->setCallerFrame(oldCallFrame);
+ callFrame->setArgumentCountIncludingThis(argCount);
+ callFrame->setCallee(callee);
+ callFrame->setScopeChain(callee->scope().node());
+ callFrame->setReturnPC(pc.value());
+
+ ASSERT((void*)callFrame <= stackFrame.registerFile->end());
+ return callFrame;
+}
+
+DEFINE_STUB_FUNCTION(void*, op_construct_arityCheck)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ CallFrame* callFrame = stackFrame.callFrame;
+ JSFunction* callee = asFunction(callFrame->callee());
+ ASSERT(!callee->isHostFunction());
+ CodeBlock* newCodeBlock = &callee->jsExecutable()->generatedBytecodeForConstruct();
+ int argCount = callFrame->argumentCountIncludingThis();
+ ReturnAddressPtr pc = callFrame->returnPC();
+
+ ASSERT(argCount != newCodeBlock->m_numParameters);
+
+ CallFrame* oldCallFrame = callFrame->callerFrame();
+
+ Register* r;
+ if (argCount > newCodeBlock->m_numParameters) {
+ size_t numParameters = newCodeBlock->m_numParameters;
+ r = callFrame->registers() + numParameters;
+ Register* newEnd = r + newCodeBlock->m_numCalleeRegisters;
+ if (!stackFrame.registerFile->grow(newEnd)) {
+ // Rewind to the previous call frame because op_call already optimistically
+ // moved the call frame forward.
+ ExceptionHandler handler = jitThrow(stackFrame.globalData, oldCallFrame, createStackOverflowError(oldCallFrame), pc);
+ STUB_SET_RETURN_ADDRESS(handler.catchRoutine);
+ return handler.callFrame;
+ }
- callFrame = CallFrame::create(r);
- callFrame->setCallerFrame(oldCallFrame);
+ Register* argv = r - RegisterFile::CallFrameHeaderSize - numParameters - argCount;
+ for (size_t i = 0; i < numParameters; ++i)
+ argv[i + argCount] = argv[i];
+ } else {
+ size_t omittedArgCount = newCodeBlock->m_numParameters - argCount;
+ r = callFrame->registers() + omittedArgCount;
+ Register* newEnd = r + newCodeBlock->m_numCalleeRegisters;
+ if (!stackFrame.registerFile->grow(newEnd)) {
+ // Rewind to the previous call frame because op_call already optimistically
+ // moved the call frame forward.
+ ExceptionHandler handler = jitThrow(stackFrame.globalData, oldCallFrame, createStackOverflowError(oldCallFrame), pc);
+ STUB_SET_RETURN_ADDRESS(handler.catchRoutine);
+ return handler.callFrame;
+ }
+
+ Register* argv = r - RegisterFile::CallFrameHeaderSize - omittedArgCount;
+ for (size_t i = 0; i < omittedArgCount; ++i)
+ argv[i] = jsUndefined();
}
- RETURN_POINTER_PAIR(callee, callFrame);
+ callFrame = CallFrame::create(r);
+ callFrame->setCallerFrame(oldCallFrame);
+ callFrame->setArgumentCountIncludingThis(argCount);
+ callFrame->setCallee(callee);
+ callFrame->setScopeChain(callee->scope().node());
+ callFrame->setReturnPC(pc.value());
+
+ ASSERT((void*)callFrame <= stackFrame.registerFile->end());
+ return callFrame;
}
#if ENABLE(JIT_OPTIMIZE_CALL)
DEFINE_STUB_FUNCTION(void*, vm_lazyLinkCall)
{
STUB_INIT_STACK_FRAME(stackFrame);
- JSFunction* callee = asFunction(stackFrame.args[0].jsValue());
+ CallFrame* callFrame = stackFrame.callFrame;
+ JSFunction* callee = asFunction(callFrame->callee());
ExecutableBase* executable = callee->executable();
- JITCode& jitCode = executable->generatedJITCode();
-
+
+ MacroAssemblerCodePtr codePtr;
+ CodeBlock* codeBlock = 0;
+ if (executable->isHostFunction())
+ codePtr = executable->generatedJITCodeForCall().addressForCall();
+ else {
+ FunctionExecutable* functionExecutable = static_cast<FunctionExecutable*>(executable);
+ JSObject* error = functionExecutable->compileForCall(callFrame, callee->scope().node());
+ if (error) {
+ callFrame->globalData().exception = createStackOverflowError(callFrame);
+ return 0;
+ }
+ codeBlock = &functionExecutable->generatedBytecodeForCall();
+ if (callFrame->argumentCountIncludingThis() == static_cast<size_t>(codeBlock->m_numParameters))
+ codePtr = functionExecutable->generatedJITCodeForCall().addressForCall();
+ else
+ codePtr = functionExecutable->generatedJITCodeForCallWithArityCheck();
+ }
+ CallLinkInfo* callLinkInfo = &stackFrame.callFrame->callerFrame()->codeBlock()->getCallLinkInfo(callFrame->returnPC());
+
+ if (!callLinkInfo->seenOnce())
+ callLinkInfo->setSeen();
+ else
+ JIT::linkCall(callee, stackFrame.callFrame->callerFrame()->codeBlock(), codeBlock, codePtr, callLinkInfo, callFrame->argumentCountIncludingThis(), stackFrame.globalData);
+
+ return codePtr.executableAddress();
+}
+
+DEFINE_STUB_FUNCTION(void*, vm_lazyLinkConstruct)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+ CallFrame* callFrame = stackFrame.callFrame;
+ JSFunction* callee = asFunction(callFrame->callee());
+ ExecutableBase* executable = callee->executable();
+
+ MacroAssemblerCodePtr codePtr;
CodeBlock* codeBlock = 0;
- if (!executable->isHostFunction())
- codeBlock = &static_cast<FunctionExecutable*>(executable)->bytecode(stackFrame.callFrame, callee->scope().node());
- CallLinkInfo* callLinkInfo = &stackFrame.callFrame->callerFrame()->codeBlock()->getCallLinkInfo(stackFrame.args[1].returnAddress());
+ if (executable->isHostFunction())
+ codePtr = executable->generatedJITCodeForConstruct().addressForCall();
+ else {
+ FunctionExecutable* functionExecutable = static_cast<FunctionExecutable*>(executable);
+ JSObject* error = functionExecutable->compileForConstruct(callFrame, callee->scope().node());
+ if (error) {
+ throwStackOverflowError(callFrame, stackFrame.globalData, ReturnAddressPtr(callFrame->returnPC()), STUB_RETURN_ADDRESS);
+ return 0;
+ }
+ codeBlock = &functionExecutable->generatedBytecodeForConstruct();
+ if (callFrame->argumentCountIncludingThis() == static_cast<size_t>(codeBlock->m_numParameters))
+ codePtr = functionExecutable->generatedJITCodeForConstruct().addressForCall();
+ else
+ codePtr = functionExecutable->generatedJITCodeForConstructWithArityCheck();
+ }
+ CallLinkInfo* callLinkInfo = &stackFrame.callFrame->callerFrame()->codeBlock()->getCallLinkInfo(callFrame->returnPC());
if (!callLinkInfo->seenOnce())
callLinkInfo->setSeen();
else
- JIT::linkCall(callee, stackFrame.callFrame->callerFrame()->codeBlock(), codeBlock, jitCode, callLinkInfo, stackFrame.args[2].int32(), stackFrame.globalData);
+ JIT::linkConstruct(callee, stackFrame.callFrame->callerFrame()->codeBlock(), codeBlock, codePtr, callLinkInfo, callFrame->argumentCountIncludingThis(), stackFrame.globalData);
- return jitCode.addressForCall().executableAddress();
+ return codePtr.executableAddress();
}
#endif // !ENABLE(JIT_OPTIMIZE_CALL)
@@ -1729,7 +2186,7 @@ DEFINE_STUB_FUNCTION(EncodedJSValue, op_call_NotJSFunction)
JSValue funcVal = stackFrame.args[0].jsValue();
CallData callData;
- CallType callType = funcVal.getCallData(callData);
+ CallType callType = getCallData(funcVal, callData);
ASSERT(callType != CallTypeJS);
@@ -1738,55 +2195,43 @@ DEFINE_STUB_FUNCTION(EncodedJSValue, op_call_NotJSFunction)
int argCount = stackFrame.args[2].int32();
CallFrame* previousCallFrame = stackFrame.callFrame;
CallFrame* callFrame = CallFrame::create(previousCallFrame->registers() + registerOffset);
+ if (!stackFrame.registerFile->grow(callFrame->registers())) {
+ throwStackOverflowError(previousCallFrame, stackFrame.globalData, callFrame->returnPC(), STUB_RETURN_ADDRESS);
+ VM_THROW_EXCEPTION();
+ }
- callFrame->init(0, static_cast<Instruction*>((STUB_RETURN_ADDRESS).value()), previousCallFrame->scopeChain(), previousCallFrame, 0, argCount, 0);
- stackFrame.callFrame = callFrame;
-
- Register* argv = stackFrame.callFrame->registers() - RegisterFile::CallFrameHeaderSize - argCount;
- ArgList argList(argv + 1, argCount - 1);
+ callFrame->init(0, static_cast<Instruction*>((STUB_RETURN_ADDRESS).value()), previousCallFrame->scopeChain(), previousCallFrame, argCount, asObject(funcVal));
- JSValue returnValue;
+ EncodedJSValue returnValue;
{
SamplingTool::HostCallRecord callRecord(CTI_SAMPLER);
-
- // FIXME: All host methods should be calling toThisObject, but this is not presently the case.
- JSValue thisValue = argv[0].jsValue();
- if (thisValue == jsNull())
- thisValue = callFrame->globalThisValue();
-
- returnValue = callData.native.function(callFrame, asObject(funcVal), thisValue, argList);
+ returnValue = callData.native.function(callFrame);
}
- stackFrame.callFrame = previousCallFrame;
- CHECK_FOR_EXCEPTION();
- return JSValue::encode(returnValue);
+ CHECK_FOR_EXCEPTION_AT_END();
+ return returnValue;
}
ASSERT(callType == CallTypeNone);
- CallFrame* callFrame = stackFrame.callFrame;
- CodeBlock* codeBlock = callFrame->codeBlock();
- unsigned vPCIndex = codeBlock->getBytecodeIndex(callFrame, STUB_RETURN_ADDRESS);
- stackFrame.globalData->exception = createNotAFunctionError(stackFrame.callFrame, funcVal, vPCIndex, codeBlock);
+ stackFrame.globalData->exception = createNotAFunctionError(stackFrame.callFrame, funcVal);
VM_THROW_EXCEPTION();
}
-DEFINE_STUB_FUNCTION(void, op_create_arguments)
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_create_arguments)
{
STUB_INIT_STACK_FRAME(stackFrame);
Arguments* arguments = new (stackFrame.globalData) Arguments(stackFrame.callFrame);
- stackFrame.callFrame->setCalleeArguments(arguments);
- stackFrame.callFrame[RegisterFile::ArgumentsRegister] = JSValue(arguments);
+ return JSValue::encode(JSValue(arguments));
}
-DEFINE_STUB_FUNCTION(void, op_create_arguments_no_params)
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_create_arguments_no_params)
{
STUB_INIT_STACK_FRAME(stackFrame);
Arguments* arguments = new (stackFrame.globalData) Arguments(stackFrame.callFrame, Arguments::NoParameters);
- stackFrame.callFrame->setCalleeArguments(arguments);
- stackFrame.callFrame[RegisterFile::ArgumentsRegister] = JSValue(arguments);
+ return JSValue::encode(JSValue(arguments));
}
DEFINE_STUB_FUNCTION(void, op_tear_off_activation)
@@ -1794,7 +2239,20 @@ DEFINE_STUB_FUNCTION(void, op_tear_off_activation)
STUB_INIT_STACK_FRAME(stackFrame);
ASSERT(stackFrame.callFrame->codeBlock()->needsFullScopeChain());
- asActivation(stackFrame.args[0].jsValue())->copyRegisters(stackFrame.callFrame->optionalCalleeArguments());
+ JSValue activationValue = stackFrame.args[0].jsValue();
+ if (!activationValue) {
+ if (JSValue v = stackFrame.args[1].jsValue()) {
+ if (!stackFrame.callFrame->codeBlock()->isStrictMode())
+ asArguments(v)->copyRegisters();
+ }
+ return;
+ }
+ JSActivation* activation = asActivation(stackFrame.args[0].jsValue());
+ activation->copyRegisters();
+ if (JSValue v = stackFrame.args[1].jsValue()) {
+ if (!stackFrame.callFrame->codeBlock()->isStrictMode())
+ asArguments(v)->setActivation(activation);
+ }
}
DEFINE_STUB_FUNCTION(void, op_tear_off_arguments)
@@ -1802,8 +2260,7 @@ DEFINE_STUB_FUNCTION(void, op_tear_off_arguments)
STUB_INIT_STACK_FRAME(stackFrame);
ASSERT(stackFrame.callFrame->codeBlock()->usesArguments() && !stackFrame.callFrame->codeBlock()->needsFullScopeChain());
- if (stackFrame.callFrame->optionalCalleeArguments())
- stackFrame.callFrame->optionalCalleeArguments()->copyRegisters();
+ asArguments(stackFrame.args[0].jsValue())->copyRegisters();
}
DEFINE_STUB_FUNCTION(void, op_profile_will_call)
@@ -1860,69 +2317,46 @@ DEFINE_STUB_FUNCTION(EncodedJSValue, op_resolve)
}
} while (++iter != end);
- CodeBlock* codeBlock = callFrame->codeBlock();
- unsigned vPCIndex = codeBlock->getBytecodeIndex(callFrame, STUB_RETURN_ADDRESS);
- stackFrame.globalData->exception = createUndefinedVariableError(callFrame, ident, vPCIndex, codeBlock);
+ stackFrame.globalData->exception = createUndefinedVariableError(callFrame, ident);
VM_THROW_EXCEPTION();
}
-DEFINE_STUB_FUNCTION(JSObject*, op_construct_JSConstruct)
-{
- STUB_INIT_STACK_FRAME(stackFrame);
-
- JSFunction* constructor = asFunction(stackFrame.args[0].jsValue());
- if (constructor->isHostFunction()) {
- CallFrame* callFrame = stackFrame.callFrame;
- CodeBlock* codeBlock = callFrame->codeBlock();
- unsigned vPCIndex = codeBlock->getBytecodeIndex(callFrame, STUB_RETURN_ADDRESS);
- stackFrame.globalData->exception = createNotAConstructorError(callFrame, constructor, vPCIndex, codeBlock);
- VM_THROW_EXCEPTION();
- }
-
-#if !ASSERT_DISABLED
- ConstructData constructData;
- ASSERT(constructor->getConstructData(constructData) == ConstructTypeJS);
-#endif
-
- Structure* structure;
- if (stackFrame.args[3].jsValue().isObject())
- structure = asObject(stackFrame.args[3].jsValue())->inheritorID();
- else
- structure = constructor->scope().node()->globalObject->emptyObjectStructure();
- return new (stackFrame.globalData) JSObject(structure);
-}
-
DEFINE_STUB_FUNCTION(EncodedJSValue, op_construct_NotJSConstruct)
{
STUB_INIT_STACK_FRAME(stackFrame);
- CallFrame* callFrame = stackFrame.callFrame;
-
JSValue constrVal = stackFrame.args[0].jsValue();
- int argCount = stackFrame.args[2].int32();
- int thisRegister = stackFrame.args[4].int32();
ConstructData constructData;
- ConstructType constructType = constrVal.getConstructData(constructData);
+ ConstructType constructType = getConstructData(constrVal, constructData);
+
+ ASSERT(constructType != ConstructTypeJS);
if (constructType == ConstructTypeHost) {
- ArgList argList(callFrame->registers() + thisRegister + 1, argCount - 1);
+ int registerOffset = stackFrame.args[1].int32();
+ int argCount = stackFrame.args[2].int32();
+ CallFrame* previousCallFrame = stackFrame.callFrame;
+ CallFrame* callFrame = CallFrame::create(previousCallFrame->registers() + registerOffset);
+ if (!stackFrame.registerFile->grow(callFrame->registers())) {
+ throwStackOverflowError(previousCallFrame, stackFrame.globalData, callFrame->returnPC(), STUB_RETURN_ADDRESS);
+ VM_THROW_EXCEPTION();
+ }
+
+ callFrame->init(0, static_cast<Instruction*>((STUB_RETURN_ADDRESS).value()), previousCallFrame->scopeChain(), previousCallFrame, argCount, asObject(constrVal));
- JSValue returnValue;
+ EncodedJSValue returnValue;
{
SamplingTool::HostCallRecord callRecord(CTI_SAMPLER);
- returnValue = constructData.native.function(callFrame, asObject(constrVal), argList);
+ returnValue = constructData.native.function(callFrame);
}
- CHECK_FOR_EXCEPTION();
- return JSValue::encode(returnValue);
+ CHECK_FOR_EXCEPTION_AT_END();
+ return returnValue;
}
ASSERT(constructType == ConstructTypeNone);
- CodeBlock* codeBlock = callFrame->codeBlock();
- unsigned vPCIndex = codeBlock->getBytecodeIndex(callFrame, STUB_RETURN_ADDRESS);
- stackFrame.globalData->exception = createNotAConstructorError(callFrame, constrVal, vPCIndex, codeBlock);
+ stackFrame.globalData->exception = createNotAConstructorError(stackFrame.callFrame, constrVal);
VM_THROW_EXCEPTION();
}
@@ -1936,31 +2370,38 @@ DEFINE_STUB_FUNCTION(EncodedJSValue, op_get_by_val)
JSValue baseValue = stackFrame.args[0].jsValue();
JSValue subscript = stackFrame.args[1].jsValue();
- JSValue result;
+ if (LIKELY(baseValue.isCell() && subscript.isString())) {
+ Identifier propertyName(callFrame, asString(subscript)->value(callFrame));
+ PropertySlot slot(baseValue.asCell());
+ // JSString::value may have thrown, but we shouldn't find a property with a null identifier,
+ // so we should miss this case and wind up in the CHECK_FOR_EXCEPTION_AT_END, below.
+ if (baseValue.asCell()->fastGetOwnPropertySlot(callFrame, propertyName, slot)) {
+ JSValue result = slot.getValue(callFrame, propertyName);
+ CHECK_FOR_EXCEPTION();
+ return JSValue::encode(result);
+ }
+ }
- if (LIKELY(subscript.isUInt32())) {
+ if (subscript.isUInt32()) {
uint32_t i = subscript.asUInt32();
- if (isJSArray(globalData, baseValue)) {
- JSArray* jsArray = asArray(baseValue);
- if (jsArray->canGetIndex(i))
- result = jsArray->getIndex(i);
- else
- result = jsArray->JSArray::get(callFrame, i);
- } else if (isJSString(globalData, baseValue) && asString(baseValue)->canGetIndex(i)) {
- // All fast byte array accesses are safe from exceptions so return immediately to avoid exception checks.
+ if (isJSString(globalData, baseValue) && asString(baseValue)->canGetIndex(i)) {
ctiPatchCallByReturnAddress(callFrame->codeBlock(), STUB_RETURN_ADDRESS, FunctionPtr(cti_op_get_by_val_string));
- result = asString(baseValue)->getIndex(callFrame, i);
- } else if (isJSByteArray(globalData, baseValue) && asByteArray(baseValue)->canAccessIndex(i)) {
+ JSValue result = asString(baseValue)->getIndex(callFrame, i);
+ CHECK_FOR_EXCEPTION();
+ return JSValue::encode(result);
+ }
+ if (isJSByteArray(globalData, baseValue) && asByteArray(baseValue)->canAccessIndex(i)) {
// All fast byte array accesses are safe from exceptions so return immediately to avoid exception checks.
ctiPatchCallByReturnAddress(callFrame->codeBlock(), STUB_RETURN_ADDRESS, FunctionPtr(cti_op_get_by_val_byte_array));
return JSValue::encode(asByteArray(baseValue)->getIndex(callFrame, i));
- } else
- result = baseValue.get(callFrame, i);
- } else {
- Identifier property(callFrame, subscript.toString(callFrame));
- result = baseValue.get(callFrame, property);
+ }
+ JSValue result = baseValue.get(callFrame, i);
+ CHECK_FOR_EXCEPTION();
+ return JSValue::encode(result);
}
-
+
+ Identifier property(callFrame, subscript.toString(callFrame));
+ JSValue result = baseValue.get(callFrame, property);
CHECK_FOR_EXCEPTION_AT_END();
return JSValue::encode(result);
}
@@ -2036,10 +2477,10 @@ DEFINE_STUB_FUNCTION(EncodedJSValue, op_sub)
double left;
double right;
if (src1.getNumber(left) && src2.getNumber(right))
- return JSValue::encode(jsNumber(stackFrame.globalData, left - right));
+ return JSValue::encode(jsNumber(left - right));
CallFrame* callFrame = stackFrame.callFrame;
- JSValue result = jsNumber(stackFrame.globalData, src1.toNumber(callFrame) - src2.toNumber(callFrame));
+ JSValue result = jsNumber(src1.toNumber(callFrame) - src2.toNumber(callFrame));
CHECK_FOR_EXCEPTION_AT_END();
return JSValue::encode(result);
}
@@ -2084,7 +2525,7 @@ DEFINE_STUB_FUNCTION(void, op_put_by_val)
} else {
Identifier property(callFrame, subscript.toString(callFrame));
if (!stackFrame.globalData->exception) { // Don't put to an object if toString threw an exception.
- PutPropertySlot slot;
+ PutPropertySlot slot(callFrame->codeBlock()->isStrictMode());
baseValue.put(callFrame, property, value, slot);
}
}
@@ -2127,7 +2568,7 @@ DEFINE_STUB_FUNCTION(void, op_put_by_val_byte_array)
} else {
Identifier property(callFrame, subscript.toString(callFrame));
if (!stackFrame.globalData->exception) { // Don't put to an object if toString threw an exception.
- PutPropertySlot slot;
+ PutPropertySlot slot(callFrame->codeBlock()->isStrictMode());
baseValue.put(callFrame, property, value, slot);
}
}
@@ -2157,13 +2598,14 @@ DEFINE_STUB_FUNCTION(int, op_load_varargs)
if (!arguments) {
int providedParams = callFrame->registers()[RegisterFile::ArgumentCount].i() - 1;
argCount = providedParams;
+ argCount = min(argCount, static_cast<uint32_t>(Arguments::MaxArguments));
int32_t sizeDelta = argsOffset + argCount + RegisterFile::CallFrameHeaderSize;
Register* newEnd = callFrame->registers() + sizeDelta;
if (!registerFile->grow(newEnd) || ((newEnd - callFrame->registers()) != sizeDelta)) {
stackFrame.globalData->exception = createStackOverflowError(callFrame);
VM_THROW_EXCEPTION();
}
- int32_t expectedParams = callFrame->callee()->jsExecutable()->parameterCount();
+ int32_t expectedParams = asFunction(callFrame->callee())->jsExecutable()->parameterCount();
int32_t inplaceArgs = min(providedParams, expectedParams);
Register* inplaceArgsDst = callFrame->registers() + argsOffset;
@@ -2184,14 +2626,13 @@ DEFINE_STUB_FUNCTION(int, op_load_varargs)
} else if (!arguments.isUndefinedOrNull()) {
if (!arguments.isObject()) {
- CodeBlock* codeBlock = callFrame->codeBlock();
- unsigned vPCIndex = codeBlock->getBytecodeIndex(callFrame, STUB_RETURN_ADDRESS);
- stackFrame.globalData->exception = createInvalidParamError(callFrame, "Function.prototype.apply", arguments, vPCIndex, codeBlock);
+ stackFrame.globalData->exception = createInvalidParamError(callFrame, "Function.prototype.apply", arguments);
VM_THROW_EXCEPTION();
}
if (asObject(arguments)->classInfo() == &Arguments::info) {
Arguments* argsObject = asArguments(arguments);
argCount = argsObject->numProvidedArguments(callFrame);
+ argCount = min(argCount, static_cast<uint32_t>(Arguments::MaxArguments));
int32_t sizeDelta = argsOffset + argCount + RegisterFile::CallFrameHeaderSize;
Register* newEnd = callFrame->registers() + sizeDelta;
if (!registerFile->grow(newEnd) || ((newEnd - callFrame->registers()) != sizeDelta)) {
@@ -2202,6 +2643,7 @@ DEFINE_STUB_FUNCTION(int, op_load_varargs)
} else if (isJSArray(&callFrame->globalData(), arguments)) {
JSArray* array = asArray(arguments);
argCount = array->length();
+ argCount = min(argCount, static_cast<uint32_t>(Arguments::MaxArguments));
int32_t sizeDelta = argsOffset + argCount + RegisterFile::CallFrameHeaderSize;
Register* newEnd = callFrame->registers() + sizeDelta;
if (!registerFile->grow(newEnd) || ((newEnd - callFrame->registers()) != sizeDelta)) {
@@ -2212,6 +2654,7 @@ DEFINE_STUB_FUNCTION(int, op_load_varargs)
} else if (asObject(arguments)->inherits(&JSArray::info)) {
JSObject* argObject = asObject(arguments);
argCount = argObject->get(callFrame, callFrame->propertyNames().length).toUInt32(callFrame);
+ argCount = min(argCount, static_cast<uint32_t>(Arguments::MaxArguments));
int32_t sizeDelta = argsOffset + argCount + RegisterFile::CallFrameHeaderSize;
Register* newEnd = callFrame->registers() + sizeDelta;
if (!registerFile->grow(newEnd) || ((newEnd - callFrame->registers()) != sizeDelta)) {
@@ -2224,9 +2667,7 @@ DEFINE_STUB_FUNCTION(int, op_load_varargs)
CHECK_FOR_EXCEPTION();
}
} else {
- CodeBlock* codeBlock = callFrame->codeBlock();
- unsigned vPCIndex = codeBlock->getBytecodeIndex(callFrame, STUB_RETURN_ADDRESS);
- stackFrame.globalData->exception = createInvalidParamError(callFrame, "Function.prototype.apply", arguments, vPCIndex, codeBlock);
+ stackFrame.globalData->exception = createInvalidParamError(callFrame, "Function.prototype.apply", arguments);
VM_THROW_EXCEPTION();
}
}
@@ -2242,10 +2683,10 @@ DEFINE_STUB_FUNCTION(EncodedJSValue, op_negate)
double v;
if (src.getNumber(v))
- return JSValue::encode(jsNumber(stackFrame.globalData, -v));
+ return JSValue::encode(jsNumber(-v));
CallFrame* callFrame = stackFrame.callFrame;
- JSValue result = jsNumber(stackFrame.globalData, -src.toNumber(callFrame));
+ JSValue result = jsNumber(-src.toNumber(callFrame));
CHECK_FOR_EXCEPTION_AT_END();
return JSValue::encode(result);
}
@@ -2254,9 +2695,35 @@ DEFINE_STUB_FUNCTION(EncodedJSValue, op_resolve_base)
{
STUB_INIT_STACK_FRAME(stackFrame);
- return JSValue::encode(JSC::resolveBase(stackFrame.callFrame, stackFrame.args[0].identifier(), stackFrame.callFrame->scopeChain()));
+ return JSValue::encode(JSC::resolveBase(stackFrame.callFrame, stackFrame.args[0].identifier(), stackFrame.callFrame->scopeChain(), false));
}
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_resolve_base_strict_put)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+ JSValue base = JSC::resolveBase(stackFrame.callFrame, stackFrame.args[0].identifier(), stackFrame.callFrame->scopeChain(), true);
+ if (!base) {
+ stackFrame.globalData->exception = createErrorForInvalidGlobalAssignment(stackFrame.callFrame, stackFrame.args[0].identifier().ustring());
+ VM_THROW_EXCEPTION();
+ }
+ return JSValue::encode(base);
+}
+
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_ensure_property_exists)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+ JSValue base = stackFrame.callFrame->r(stackFrame.args[0].int32()).jsValue();
+ JSObject* object = asObject(base);
+ PropertySlot slot(object);
+ ASSERT(stackFrame.callFrame->codeBlock()->isStrictMode());
+ if (!object->getPropertySlot(stackFrame.callFrame, stackFrame.args[1].identifier(), slot)) {
+ stackFrame.globalData->exception = createErrorForInvalidGlobalAssignment(stackFrame.callFrame, stackFrame.args[1].identifier().ustring());
+ VM_THROW_EXCEPTION();
+ }
+
+ return JSValue::encode(base);
+}
+
DEFINE_STUB_FUNCTION(EncodedJSValue, op_resolve_skip)
{
STUB_INIT_STACK_FRAME(stackFrame);
@@ -2269,6 +2736,13 @@ DEFINE_STUB_FUNCTION(EncodedJSValue, op_resolve_skip)
ScopeChainIterator iter = scopeChain->begin();
ScopeChainIterator end = scopeChain->end();
ASSERT(iter != end);
+ CodeBlock* codeBlock = callFrame->codeBlock();
+ bool checkTopLevel = codeBlock->codeType() == FunctionCode && codeBlock->needsFullScopeChain();
+ ASSERT(skip || !checkTopLevel);
+ if (checkTopLevel && skip--) {
+ if (callFrame->uncheckedR(codeBlock->activationRegister()).jsValue())
+ ++iter;
+ }
while (skip--) {
++iter;
ASSERT(iter != end);
@@ -2284,9 +2758,7 @@ DEFINE_STUB_FUNCTION(EncodedJSValue, op_resolve_skip)
}
} while (++iter != end);
- CodeBlock* codeBlock = callFrame->codeBlock();
- unsigned vPCIndex = codeBlock->getBytecodeIndex(callFrame, STUB_RETURN_ADDRESS);
- stackFrame.globalData->exception = createUndefinedVariableError(callFrame, ident, vPCIndex, codeBlock);
+ stackFrame.globalData->exception = createUndefinedVariableError(callFrame, ident);
VM_THROW_EXCEPTION();
}
@@ -2295,16 +2767,17 @@ DEFINE_STUB_FUNCTION(EncodedJSValue, op_resolve_global)
STUB_INIT_STACK_FRAME(stackFrame);
CallFrame* callFrame = stackFrame.callFrame;
- JSGlobalObject* globalObject = stackFrame.args[0].globalObject();
- Identifier& ident = stackFrame.args[1].identifier();
- unsigned globalResolveInfoIndex = stackFrame.args[2].int32();
+ CodeBlock* codeBlock = callFrame->codeBlock();
+ JSGlobalObject* globalObject = codeBlock->globalObject();
+ Identifier& ident = stackFrame.args[0].identifier();
+ unsigned globalResolveInfoIndex = stackFrame.args[1].int32();
ASSERT(globalObject->isGlobalObject());
PropertySlot slot(globalObject);
if (globalObject->getPropertySlot(callFrame, ident, slot)) {
JSValue result = slot.getValue(callFrame, ident);
- if (slot.isCacheable() && !globalObject->structure()->isUncacheableDictionary() && slot.slotBase() == globalObject) {
- GlobalResolveInfo& globalResolveInfo = callFrame->codeBlock()->globalResolveInfo(globalResolveInfoIndex);
+ if (slot.isCacheableValue() && !globalObject->structure()->isUncacheableDictionary() && slot.slotBase() == globalObject) {
+ GlobalResolveInfo& globalResolveInfo = codeBlock->globalResolveInfo(globalResolveInfoIndex);
if (globalResolveInfo.structure)
globalResolveInfo.structure->deref();
globalObject->structure()->ref();
@@ -2317,8 +2790,7 @@ DEFINE_STUB_FUNCTION(EncodedJSValue, op_resolve_global)
return JSValue::encode(result);
}
- unsigned vPCIndex = callFrame->codeBlock()->getBytecodeIndex(callFrame, STUB_RETURN_ADDRESS);
- stackFrame.globalData->exception = createUndefinedVariableError(callFrame, ident, vPCIndex, callFrame->codeBlock());
+ stackFrame.globalData->exception = createUndefinedVariableError(callFrame, ident);
VM_THROW_EXCEPTION();
}
@@ -2332,10 +2804,10 @@ DEFINE_STUB_FUNCTION(EncodedJSValue, op_div)
double left;
double right;
if (src1.getNumber(left) && src2.getNumber(right))
- return JSValue::encode(jsNumber(stackFrame.globalData, left / right));
+ return JSValue::encode(jsNumber(left / right));
CallFrame* callFrame = stackFrame.callFrame;
- JSValue result = jsNumber(stackFrame.globalData, src1.toNumber(callFrame) / src2.toNumber(callFrame));
+ JSValue result = jsNumber(src1.toNumber(callFrame) / src2.toNumber(callFrame));
CHECK_FOR_EXCEPTION_AT_END();
return JSValue::encode(result);
}
@@ -2347,7 +2819,7 @@ DEFINE_STUB_FUNCTION(EncodedJSValue, op_pre_dec)
JSValue v = stackFrame.args[0].jsValue();
CallFrame* callFrame = stackFrame.callFrame;
- JSValue result = jsNumber(stackFrame.globalData, v.toNumber(callFrame) - 1);
+ JSValue result = jsNumber(v.toNumber(callFrame) - 1);
CHECK_FOR_EXCEPTION_AT_END();
return JSValue::encode(result);
}
@@ -2415,7 +2887,7 @@ DEFINE_STUB_FUNCTION(EncodedJSValue, op_post_inc)
JSValue number = v.toJSNumber(callFrame);
CHECK_FOR_EXCEPTION_AT_END();
- callFrame->registers()[stackFrame.args[1].int32()] = jsNumber(stackFrame.globalData, number.uncheckedGetNumber() + 1);
+ callFrame->registers()[stackFrame.args[1].int32()] = jsNumber(number.uncheckedGetNumber() + 1);
return JSValue::encode(number);
}
@@ -2430,14 +2902,14 @@ DEFINE_STUB_FUNCTION(int, op_eq)
start:
if (src2.isUndefined()) {
return src1.isNull() ||
- (src1.isCell() && asCell(src1)->structure()->typeInfo().masqueradesAsUndefined()) ||
- src1.isUndefined();
+ (src1.isCell() && src1.asCell()->structure()->typeInfo().masqueradesAsUndefined())
+ || src1.isUndefined();
}
if (src2.isNull()) {
return src1.isUndefined() ||
- (src1.isCell() && asCell(src1)->structure()->typeInfo().masqueradesAsUndefined()) ||
- src1.isNull();
+ (src1.isCell() && src1.asCell()->structure()->typeInfo().masqueradesAsUndefined())
+ || src1.isNull();
}
if (src1.isInt32()) {
@@ -2473,27 +2945,27 @@ DEFINE_STUB_FUNCTION(int, op_eq)
}
if (src1.isUndefined())
- return src2.isCell() && asCell(src2)->structure()->typeInfo().masqueradesAsUndefined();
+ return src2.isCell() && src2.asCell()->structure()->typeInfo().masqueradesAsUndefined();
if (src1.isNull())
- return src2.isCell() && asCell(src2)->structure()->typeInfo().masqueradesAsUndefined();
+ return src2.isCell() && src2.asCell()->structure()->typeInfo().masqueradesAsUndefined();
- JSCell* cell1 = asCell(src1);
+ JSCell* cell1 = src1.asCell();
if (cell1->isString()) {
if (src2.isInt32())
- return static_cast<JSString*>(cell1)->value(stackFrame.callFrame).toDouble() == src2.asInt32();
+ return jsToNumber(static_cast<JSString*>(cell1)->value(stackFrame.callFrame)) == src2.asInt32();
if (src2.isDouble())
- return static_cast<JSString*>(cell1)->value(stackFrame.callFrame).toDouble() == src2.asDouble();
+ return jsToNumber(static_cast<JSString*>(cell1)->value(stackFrame.callFrame)) == src2.asDouble();
if (src2.isTrue())
- return static_cast<JSString*>(cell1)->value(stackFrame.callFrame).toDouble() == 1.0;
+ return jsToNumber(static_cast<JSString*>(cell1)->value(stackFrame.callFrame)) == 1.0;
if (src2.isFalse())
- return static_cast<JSString*>(cell1)->value(stackFrame.callFrame).toDouble() == 0.0;
+ return jsToNumber(static_cast<JSString*>(cell1)->value(stackFrame.callFrame)) == 0.0;
- JSCell* cell2 = asCell(src2);
+ JSCell* cell2 = src2.asCell();
if (cell2->isString())
return static_cast<JSString*>(cell1)->value(stackFrame.callFrame) == static_cast<JSString*>(cell2)->value(stackFrame.callFrame);
@@ -2517,10 +2989,9 @@ DEFINE_STUB_FUNCTION(int, op_eq)
#endif // USE(JSVALUE32_64)
}
-#if USE(JSVALUE32_64)
-
DEFINE_STUB_FUNCTION(int, op_eq_strings)
{
+#if USE(JSVALUE32_64)
STUB_INIT_STACK_FRAME(stackFrame);
JSString* string1 = stackFrame.args[0].jsString();
@@ -2529,9 +3000,12 @@ DEFINE_STUB_FUNCTION(int, op_eq_strings)
ASSERT(string1->isString());
ASSERT(string2->isString());
return string1->value(stackFrame.callFrame) == string2->value(stackFrame.callFrame);
-}
-
+#else
+ UNUSED_PARAM(args);
+ ASSERT_NOT_REACHED();
+ return 0;
#endif
+}
DEFINE_STUB_FUNCTION(EncodedJSValue, op_lshift)
{
@@ -2541,7 +3015,7 @@ DEFINE_STUB_FUNCTION(EncodedJSValue, op_lshift)
JSValue shift = stackFrame.args[1].jsValue();
CallFrame* callFrame = stackFrame.callFrame;
- JSValue result = jsNumber(stackFrame.globalData, (val.toInt32(callFrame)) << (shift.toUInt32(callFrame) & 0x1f));
+ JSValue result = jsNumber((val.toInt32(callFrame)) << (shift.toUInt32(callFrame) & 0x1f));
CHECK_FOR_EXCEPTION_AT_END();
return JSValue::encode(result);
}
@@ -2555,7 +3029,7 @@ DEFINE_STUB_FUNCTION(EncodedJSValue, op_bitand)
ASSERT(!src1.isInt32() || !src2.isInt32());
CallFrame* callFrame = stackFrame.callFrame;
- JSValue result = jsNumber(stackFrame.globalData, src1.toInt32(callFrame) & src2.toInt32(callFrame));
+ JSValue result = jsNumber(src1.toInt32(callFrame) & src2.toInt32(callFrame));
CHECK_FOR_EXCEPTION_AT_END();
return JSValue::encode(result);
}
@@ -2568,7 +3042,7 @@ DEFINE_STUB_FUNCTION(EncodedJSValue, op_rshift)
JSValue shift = stackFrame.args[1].jsValue();
CallFrame* callFrame = stackFrame.callFrame;
- JSValue result = jsNumber(stackFrame.globalData, (val.toInt32(callFrame)) >> (shift.toUInt32(callFrame) & 0x1f));
+ JSValue result = jsNumber((val.toInt32(callFrame)) >> (shift.toUInt32(callFrame) & 0x1f));
CHECK_FOR_EXCEPTION_AT_END();
return JSValue::encode(result);
@@ -2582,7 +3056,7 @@ DEFINE_STUB_FUNCTION(EncodedJSValue, op_bitnot)
ASSERT(!src.isInt32());
CallFrame* callFrame = stackFrame.callFrame;
- JSValue result = jsNumber(stackFrame.globalData, ~src.toInt32(callFrame));
+ JSValue result = jsNumber(~src.toInt32(callFrame));
CHECK_FOR_EXCEPTION_AT_END();
return JSValue::encode(result);
}
@@ -2616,9 +3090,7 @@ DEFINE_STUB_FUNCTION(EncodedJSValue, op_resolve_with_base)
++iter;
} while (iter != end);
- CodeBlock* codeBlock = callFrame->codeBlock();
- unsigned vPCIndex = codeBlock->getBytecodeIndex(callFrame, STUB_RETURN_ADDRESS);
- stackFrame.globalData->exception = createUndefinedVariableError(callFrame, ident, vPCIndex, codeBlock);
+ stackFrame.globalData->exception = createUndefinedVariableError(callFrame, ident);
VM_THROW_EXCEPTION_AT_END();
return JSValue::encode(JSValue());
}
@@ -2630,6 +3102,7 @@ DEFINE_STUB_FUNCTION(JSObject*, op_new_func_exp)
FunctionExecutable* function = stackFrame.args[0].function();
JSFunction* func = function->make(callFrame, callFrame->scopeChain());
+ ASSERT(callFrame->codeBlock()->codeType() != FunctionCode || !callFrame->codeBlock()->needsFullScopeChain() || callFrame->uncheckedR(callFrame->codeBlock()->activationRegister()).jsValue());
/*
The Identifier in a FunctionExpression can be referenced from inside
@@ -2655,7 +3128,7 @@ DEFINE_STUB_FUNCTION(EncodedJSValue, op_mod)
CallFrame* callFrame = stackFrame.callFrame;
double d = dividendValue.toNumber(callFrame);
- JSValue result = jsNumber(stackFrame.globalData, fmod(d, divisorValue.toNumber(callFrame)));
+ JSValue result = jsNumber(fmod(d, divisorValue.toNumber(callFrame)));
CHECK_FOR_EXCEPTION_AT_END();
return JSValue::encode(result);
}
@@ -2681,7 +3154,7 @@ DEFINE_STUB_FUNCTION(EncodedJSValue, op_post_dec)
JSValue number = v.toJSNumber(callFrame);
CHECK_FOR_EXCEPTION_AT_END();
- callFrame->registers()[stackFrame.args[1].int32()] = jsNumber(stackFrame.globalData, number.uncheckedGetNumber() - 1);
+ callFrame->registers()[stackFrame.args[1].int32()] = jsNumber(number.uncheckedGetNumber() - 1);
return JSValue::encode(number);
}
@@ -2693,7 +3166,7 @@ DEFINE_STUB_FUNCTION(EncodedJSValue, op_urshift)
JSValue shift = stackFrame.args[1].jsValue();
CallFrame* callFrame = stackFrame.callFrame;
- JSValue result = jsNumber(stackFrame.globalData, (val.toUInt32(callFrame)) >> (shift.toUInt32(callFrame) & 0x1f));
+ JSValue result = jsNumber((val.toUInt32(callFrame)) >> (shift.toUInt32(callFrame) & 0x1f));
CHECK_FOR_EXCEPTION_AT_END();
return JSValue::encode(result);
}
@@ -2707,7 +3180,7 @@ DEFINE_STUB_FUNCTION(EncodedJSValue, op_bitxor)
CallFrame* callFrame = stackFrame.callFrame;
- JSValue result = jsNumber(stackFrame.globalData, src1.toInt32(callFrame) ^ src2.toInt32(callFrame));
+ JSValue result = jsNumber(src1.toInt32(callFrame) ^ src2.toInt32(callFrame));
CHECK_FOR_EXCEPTION_AT_END();
return JSValue::encode(result);
}
@@ -2716,7 +3189,7 @@ DEFINE_STUB_FUNCTION(JSObject*, op_new_regexp)
{
STUB_INIT_STACK_FRAME(stackFrame);
- return new (stackFrame.globalData) RegExpObject(stackFrame.callFrame->lexicalGlobalObject()->regExpStructure(), stackFrame.args[0].regExp());
+ return new (stackFrame.globalData) RegExpObject(stackFrame.callFrame->lexicalGlobalObject(), stackFrame.callFrame->lexicalGlobalObject()->regExpStructure(), stackFrame.args[0].regExp());
}
DEFINE_STUB_FUNCTION(EncodedJSValue, op_bitor)
@@ -2728,7 +3201,7 @@ DEFINE_STUB_FUNCTION(EncodedJSValue, op_bitor)
CallFrame* callFrame = stackFrame.callFrame;
- JSValue result = jsNumber(stackFrame.globalData, src1.toInt32(callFrame) | src2.toInt32(callFrame));
+ JSValue result = jsNumber(src1.toInt32(callFrame) | src2.toInt32(callFrame));
CHECK_FOR_EXCEPTION_AT_END();
return JSValue::encode(result);
}
@@ -2736,6 +3209,7 @@ DEFINE_STUB_FUNCTION(EncodedJSValue, op_bitor)
DEFINE_STUB_FUNCTION(EncodedJSValue, op_call_eval)
{
STUB_INIT_STACK_FRAME(stackFrame);
+ ASSERT(stackFrame.callFrame->codeBlock()->codeType() != FunctionCode || !stackFrame.callFrame->codeBlock()->needsFullScopeChain() || stackFrame.callFrame->uncheckedR(stackFrame.callFrame->codeBlock()->activationRegister()).jsValue());
CallFrame* callFrame = stackFrame.callFrame;
RegisterFile* registerFile = stackFrame.registerFile;
@@ -2748,47 +3222,24 @@ DEFINE_STUB_FUNCTION(EncodedJSValue, op_call_eval)
Register* newCallFrame = callFrame->registers() + registerOffset;
Register* argv = newCallFrame - RegisterFile::CallFrameHeaderSize - argCount;
- JSValue thisValue = argv[0].jsValue();
+ JSValue baseValue = argv[0].jsValue();
JSGlobalObject* globalObject = callFrame->scopeChain()->globalObject;
- if (thisValue == globalObject && funcVal == globalObject->evalFunction()) {
- JSValue exceptionValue;
- JSValue result = interpreter->callEval(callFrame, registerFile, argv, argCount, registerOffset, exceptionValue);
- if (UNLIKELY(exceptionValue)) {
- stackFrame.globalData->exception = exceptionValue;
- VM_THROW_EXCEPTION_AT_END();
- }
+ if (baseValue == globalObject && funcVal == globalObject->evalFunction()) {
+ JSValue result = interpreter->callEval(callFrame, registerFile, argv, argCount, registerOffset);
+ CHECK_FOR_EXCEPTION_AT_END();
return JSValue::encode(result);
}
return JSValue::encode(JSValue());
}
-DEFINE_STUB_FUNCTION(EncodedJSValue, op_throw)
+DEFINE_STUB_FUNCTION(void*, op_throw)
{
STUB_INIT_STACK_FRAME(stackFrame);
-
- CallFrame* callFrame = stackFrame.callFrame;
- CodeBlock* codeBlock = callFrame->codeBlock();
-
- unsigned vPCIndex = codeBlock->getBytecodeIndex(callFrame, STUB_RETURN_ADDRESS);
-
- JSValue exceptionValue = stackFrame.args[0].jsValue();
- ASSERT(exceptionValue);
-
- HandlerInfo* handler = stackFrame.globalData->interpreter->throwException(callFrame, exceptionValue, vPCIndex, true);
-
- if (!handler) {
- *stackFrame.exception = exceptionValue;
- STUB_SET_RETURN_ADDRESS(FunctionPtr(ctiOpThrowNotCaught).value());
- return JSValue::encode(jsNull());
- }
-
- stackFrame.callFrame = callFrame;
- void* catchRoutine = handler->nativeCode.executableAddress();
- ASSERT(catchRoutine);
- STUB_SET_RETURN_ADDRESS(catchRoutine);
- return JSValue::encode(exceptionValue);
+ ExceptionHandler handler = jitThrow(stackFrame.globalData, stackFrame.callFrame, stackFrame.args[0].jsValue(), STUB_RETURN_ADDRESS);
+ STUB_SET_RETURN_ADDRESS(handler.catchRoutine);
+ return handler.callFrame;
}
DEFINE_STUB_FUNCTION(JSPropertyNameIterator*, op_get_pnames)
@@ -2810,7 +3261,9 @@ DEFINE_STUB_FUNCTION(int, has_property)
JSObject* base = stackFrame.args[0].jsObject();
JSString* property = stackFrame.args[1].jsString();
- return base->hasProperty(stackFrame.callFrame, Identifier(stackFrame.callFrame, property->value(stackFrame.callFrame)));
+ int result = base->hasProperty(stackFrame.callFrame, Identifier(stackFrame.callFrame, property->value(stackFrame.callFrame)));
+ CHECK_FOR_EXCEPTION_AT_END();
+ return result;
}
DEFINE_STUB_FUNCTION(JSObject*, op_push_scope)
@@ -2887,7 +3340,9 @@ DEFINE_STUB_FUNCTION(EncodedJSValue, op_stricteq)
JSValue src1 = stackFrame.args[0].jsValue();
JSValue src2 = stackFrame.args[1].jsValue();
- return JSValue::encode(jsBoolean(JSValue::strictEqual(stackFrame.callFrame, src1, src2)));
+ bool result = JSValue::strictEqual(stackFrame.callFrame, src1, src2);
+ CHECK_FOR_EXCEPTION_AT_END();
+ return JSValue::encode(jsBoolean(result));
}
DEFINE_STUB_FUNCTION(EncodedJSValue, op_to_primitive)
@@ -2913,7 +3368,9 @@ DEFINE_STUB_FUNCTION(EncodedJSValue, op_nstricteq)
JSValue src1 = stackFrame.args[0].jsValue();
JSValue src2 = stackFrame.args[1].jsValue();
- return JSValue::encode(jsBoolean(!JSValue::strictEqual(stackFrame.callFrame, src1, src2)));
+ bool result = !JSValue::strictEqual(stackFrame.callFrame, src1, src2);
+ CHECK_FOR_EXCEPTION_AT_END();
+ return JSValue::encode(jsBoolean(result));
}
DEFINE_STUB_FUNCTION(EncodedJSValue, op_to_jsnumber)
@@ -2936,10 +3393,7 @@ DEFINE_STUB_FUNCTION(EncodedJSValue, op_in)
JSValue baseVal = stackFrame.args[1].jsValue();
if (!baseVal.isObject()) {
- CallFrame* callFrame = stackFrame.callFrame;
- CodeBlock* codeBlock = callFrame->codeBlock();
- unsigned vPCIndex = codeBlock->getBytecodeIndex(callFrame, STUB_RETURN_ADDRESS);
- stackFrame.globalData->exception = createInvalidParamError(callFrame, "in", baseVal, vPCIndex, codeBlock);
+ stackFrame.globalData->exception = createInvalidParamError(stackFrame.callFrame, "in", baseVal);
VM_THROW_EXCEPTION();
}
@@ -3022,11 +3476,12 @@ DEFINE_STUB_FUNCTION(void*, op_switch_char)
void* result = codeBlock->characterSwitchJumpTable(tableIndex).ctiDefault.executableAddress();
if (scrutinee.isString()) {
- UString::Rep* value = asString(scrutinee)->value(callFrame).rep();
- if (value->size() == 1)
- result = codeBlock->characterSwitchJumpTable(tableIndex).ctiForValue(value->data()[0]).executableAddress();
+ StringImpl* value = asString(scrutinee)->value(callFrame).impl();
+ if (value->length() == 1)
+ result = codeBlock->characterSwitchJumpTable(tableIndex).ctiForValue(value->characters()[0]).executableAddress();
}
+ CHECK_FOR_EXCEPTION_AT_END();
return result;
}
@@ -3042,10 +3497,11 @@ DEFINE_STUB_FUNCTION(void*, op_switch_string)
void* result = codeBlock->stringSwitchJumpTable(tableIndex).ctiDefault.executableAddress();
if (scrutinee.isString()) {
- UString::Rep* value = asString(scrutinee)->value(callFrame).rep();
+ StringImpl* value = asString(scrutinee)->value(callFrame).impl();
result = codeBlock->stringSwitchJumpTable(tableIndex).ctiForValue(value).executableAddress();
}
+ CHECK_FOR_EXCEPTION_AT_END();
return result;
}
@@ -3059,19 +3515,22 @@ DEFINE_STUB_FUNCTION(EncodedJSValue, op_del_by_val)
JSObject* baseObj = baseValue.toObject(callFrame); // may throw
JSValue subscript = stackFrame.args[1].jsValue();
- JSValue result;
+ bool result;
uint32_t i;
if (subscript.getUInt32(i))
- result = jsBoolean(baseObj->deleteProperty(callFrame, i));
+ result = baseObj->deleteProperty(callFrame, i);
else {
CHECK_FOR_EXCEPTION();
Identifier property(callFrame, subscript.toString(callFrame));
CHECK_FOR_EXCEPTION();
- result = jsBoolean(baseObj->deleteProperty(callFrame, property));
+ result = baseObj->deleteProperty(callFrame, property);
}
+ if (!result && callFrame->codeBlock()->isStrictMode())
+ stackFrame.globalData->exception = createTypeError(stackFrame.callFrame, "Unable to delete property.");
+
CHECK_FOR_EXCEPTION_AT_END();
- return JSValue::encode(result);
+ return JSValue::encode(jsBoolean(result));
}
DEFINE_STUB_FUNCTION(void, op_put_getter)
@@ -3098,18 +3557,24 @@ DEFINE_STUB_FUNCTION(void, op_put_setter)
baseObj->defineSetter(callFrame, stackFrame.args[1].identifier(), asObject(stackFrame.args[2].jsValue()));
}
-DEFINE_STUB_FUNCTION(JSObject*, op_new_error)
+DEFINE_STUB_FUNCTION(void, op_throw_reference_error)
{
STUB_INIT_STACK_FRAME(stackFrame);
CallFrame* callFrame = stackFrame.callFrame;
- CodeBlock* codeBlock = callFrame->codeBlock();
- unsigned type = stackFrame.args[0].int32();
- JSValue message = stackFrame.args[1].jsValue();
- unsigned bytecodeOffset = stackFrame.args[2].int32();
+ UString message = stackFrame.args[0].jsValue().toString(callFrame);
+ stackFrame.globalData->exception = createReferenceError(callFrame, message);
+ VM_THROW_EXCEPTION_AT_END();
+}
+
+DEFINE_STUB_FUNCTION(void, op_throw_syntax_error)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
- unsigned lineNumber = codeBlock->lineNumberForBytecodeOffset(callFrame, bytecodeOffset);
- return Error::create(callFrame, static_cast<ErrorType>(type), message.toString(callFrame), lineNumber, codeBlock->ownerExecutable()->sourceID(), codeBlock->ownerExecutable()->sourceURL());
+ CallFrame* callFrame = stackFrame.callFrame;
+ UString message = stackFrame.args[0].jsValue().toString(callFrame);
+ stackFrame.globalData->exception = createSyntaxError(callFrame, message);
+ VM_THROW_EXCEPTION_AT_END();
}
DEFINE_STUB_FUNCTION(void, op_debug)
@@ -3125,32 +3590,13 @@ DEFINE_STUB_FUNCTION(void, op_debug)
stackFrame.globalData->interpreter->debug(callFrame, static_cast<DebugHookID>(debugHookID), firstLine, lastLine);
}
-DEFINE_STUB_FUNCTION(EncodedJSValue, vm_throw)
+DEFINE_STUB_FUNCTION(void*, vm_throw)
{
STUB_INIT_STACK_FRAME(stackFrame);
-
- CallFrame* callFrame = stackFrame.callFrame;
- CodeBlock* codeBlock = callFrame->codeBlock();
JSGlobalData* globalData = stackFrame.globalData;
-
- unsigned vPCIndex = codeBlock->getBytecodeIndex(callFrame, globalData->exceptionLocation);
-
- JSValue exceptionValue = globalData->exception;
- ASSERT(exceptionValue);
- globalData->exception = JSValue();
-
- HandlerInfo* handler = globalData->interpreter->throwException(callFrame, exceptionValue, vPCIndex, false);
-
- if (!handler) {
- *stackFrame.exception = exceptionValue;
- return JSValue::encode(jsNull());
- }
-
- stackFrame.callFrame = callFrame;
- void* catchRoutine = handler->nativeCode.executableAddress();
- ASSERT(catchRoutine);
- STUB_SET_RETURN_ADDRESS(catchRoutine);
- return JSValue::encode(exceptionValue);
+ ExceptionHandler handler = jitThrow(globalData, stackFrame.callFrame, globalData->exception, globalData->exceptionLocation);
+ STUB_SET_RETURN_ADDRESS(handler.catchRoutine);
+ return handler.callFrame;
}
DEFINE_STUB_FUNCTION(EncodedJSValue, to_object)
@@ -3161,6 +3607,32 @@ DEFINE_STUB_FUNCTION(EncodedJSValue, to_object)
return JSValue::encode(stackFrame.args[0].jsValue().toObject(callFrame));
}
+MacroAssemblerCodePtr JITThunks::ctiStub(JSGlobalData* globalData, ThunkGenerator generator)
+{
+ std::pair<CTIStubMap::iterator, bool> entry = m_ctiStubMap.add(generator, MacroAssemblerCodePtr());
+ if (entry.second)
+ entry.first->second = generator(globalData, m_executablePool.get());
+ return entry.first->second;
+}
+
+PassRefPtr<NativeExecutable> JITThunks::hostFunctionStub(JSGlobalData* globalData, NativeFunction function)
+{
+ std::pair<HostFunctionStubMap::iterator, bool> entry = m_hostFunctionStubMap.add(function, 0);
+ if (entry.second)
+ entry.first->second = NativeExecutable::create(JIT::compileCTINativeCall(globalData, m_executablePool, function), function, ctiNativeConstruct(), callHostFunctionAsConstructor);
+ return entry.first->second;
+}
+
+PassRefPtr<NativeExecutable> JITThunks::hostFunctionStub(JSGlobalData* globalData, NativeFunction function, ThunkGenerator generator)
+{
+ std::pair<HostFunctionStubMap::iterator, bool> entry = m_hostFunctionStubMap.add(function, 0);
+ if (entry.second) {
+ MacroAssemblerCodePtr code = globalData->canUseJIT() ? generator(globalData, m_executablePool.get()) : MacroAssemblerCodePtr();
+ entry.first->second = NativeExecutable::create(code, function, ctiNativeConstruct(), callHostFunctionAsConstructor);
+ }
+ return entry.first->second;
+}
+
} // namespace JSC
#endif // ENABLE(JIT)
diff --git a/JavaScriptCore/jit/JITStubs.h b/JavaScriptCore/jit/JITStubs.h
index 17fd0d9..937134b 100644
--- a/JavaScriptCore/jit/JITStubs.h
+++ b/JavaScriptCore/jit/JITStubs.h
@@ -1,5 +1,6 @@
/*
* Copyright (C) 2008 Apple Inc. All rights reserved.
+ * Copyright (C) Research In Motion Limited 2010. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -29,10 +30,11 @@
#ifndef JITStubs_h
#define JITStubs_h
-#include <wtf/Platform.h>
-
+#include "CallData.h"
#include "MacroAssemblerCodeRef.h"
#include "Register.h"
+#include "ThunkGenerators.h"
+#include <wtf/HashMap.h>
#if ENABLE(JIT)
@@ -45,16 +47,16 @@ namespace JSC {
class FunctionExecutable;
class Identifier;
class JSGlobalData;
- class JSGlobalData;
+ class JSGlobalObject;
class JSObject;
class JSPropertyNameIterator;
class JSValue;
class JSValueEncodedAsPointer;
+ class NativeExecutable;
class Profiler;
class PropertySlot;
class PutPropertySlot;
class RegisterFile;
- class JSGlobalObject;
class RegExp;
union JITStubArg {
@@ -78,8 +80,11 @@ namespace JSC {
struct TrampolineStructure {
MacroAssemblerCodePtr ctiStringLengthTrampoline;
MacroAssemblerCodePtr ctiVirtualCallLink;
+ MacroAssemblerCodePtr ctiVirtualConstructLink;
MacroAssemblerCodePtr ctiVirtualCall;
- MacroAssemblerCodePtr ctiNativeCallThunk;
+ MacroAssemblerCodePtr ctiVirtualConstruct;
+ MacroAssemblerCodePtr ctiNativeCall;
+ MacroAssemblerCodePtr ctiNativeConstruct;
MacroAssemblerCodePtr ctiSoftModulo;
};
@@ -92,7 +97,7 @@ namespace JSC {
void* code;
RegisterFile* registerFile;
CallFrame* callFrame;
- JSValue* exception;
+ void* unused1;
Profiler** enabledProfilerReference;
JSGlobalData* globalData;
@@ -108,10 +113,10 @@ namespace JSC {
ReturnAddressPtr* returnAddressSlot() { return reinterpret_cast<ReturnAddressPtr*>(this) - 1; }
};
#elif CPU(X86)
-#if COMPILER(MSVC)
+#if COMPILER(MSVC) || (OS(WINDOWS) && COMPILER(GCC))
#pragma pack(push)
#pragma pack(4)
-#endif // COMPILER(MSVC)
+#endif // COMPILER(MSVC) || (OS(WINDOWS) && COMPILER(GCC))
struct JITStackFrame {
void* reserved; // Unused
JITStubArg args[6];
@@ -128,22 +133,22 @@ namespace JSC {
void* code;
RegisterFile* registerFile;
CallFrame* callFrame;
- JSValue* exception;
+ void* unused1;
Profiler** enabledProfilerReference;
JSGlobalData* globalData;
// When JIT code makes a call, it pushes its return address just below the rest of the stack.
ReturnAddressPtr* returnAddressSlot() { return reinterpret_cast<ReturnAddressPtr*>(this) - 1; }
};
-#if COMPILER(MSVC)
+#if COMPILER(MSVC) || (OS(WINDOWS) && COMPILER(GCC))
#pragma pack(pop)
-#endif // COMPILER(MSVC)
+#endif // COMPILER(MSVC) || (OS(WINDOWS) && COMPILER(GCC))
#elif CPU(ARM_THUMB2)
struct JITStackFrame {
- void* reserved; // Unused
+ JITStubArg reserved; // Unused
JITStubArg args[6];
-#if USE(JSVALUE32_64)
- void* padding[2]; // Maintain 16-byte stack alignment.
+#if USE(JSVALUE64)
+ void* padding; // Maintain 16-byte stack alignment.
#endif
ReturnAddressPtr thunkReturnAddress;
@@ -156,9 +161,7 @@ namespace JSC {
// These arguments passed in r1..r3 (r0 contained the entry code pointed, which is not preserved)
RegisterFile* registerFile;
CallFrame* callFrame;
- JSValue* exception;
-
- void* padding2;
+ void* unused1;
// These arguments passed on the stack.
Profiler** enabledProfilerReference;
@@ -167,6 +170,10 @@ namespace JSC {
ReturnAddressPtr* returnAddressSlot() { return &thunkReturnAddress; }
};
#elif CPU(ARM_TRADITIONAL)
+#if COMPILER(MSVC)
+#pragma pack(push)
+#pragma pack(4)
+#endif // COMPILER(MSVC)
struct JITStackFrame {
JITStubArg padding; // Unused
JITStubArg args[7];
@@ -182,7 +189,7 @@ namespace JSC {
RegisterFile* registerFile;
CallFrame* callFrame;
- JSValue* exception;
+ void* unused1;
// These arguments passed on the stack.
Profiler** enabledProfilerReference;
@@ -191,69 +198,88 @@ namespace JSC {
// When JIT code makes a call, it pushes its return address just below the rest of the stack.
ReturnAddressPtr* returnAddressSlot() { return &thunkReturnAddress; }
};
+#if COMPILER(MSVC)
+#pragma pack(pop)
+#endif // COMPILER(MSVC)
+#elif CPU(MIPS)
+ struct JITStackFrame {
+ JITStubArg reserved; // Unused
+ JITStubArg args[6];
+
+#if USE(JSVALUE32_64)
+ void* padding; // Make the overall stack length 8-byte aligned.
+#endif
+
+ void* preservedGP; // store GP when using PIC code
+ void* preservedS0;
+ void* preservedS1;
+ void* preservedS2;
+ void* preservedReturnAddress;
+
+ ReturnAddressPtr thunkReturnAddress;
+
+ // These arguments passed in a1..a3 (a0 contained the entry code pointed, which is not preserved)
+ RegisterFile* registerFile;
+ CallFrame* callFrame;
+ void* unused1;
+
+ // These arguments passed on the stack.
+ Profiler** enabledProfilerReference;
+ JSGlobalData* globalData;
+
+ ReturnAddressPtr* returnAddressSlot() { return &thunkReturnAddress; }
+ };
#else
#error "JITStackFrame not defined for this platform."
#endif
#define JITSTACKFRAME_ARGS_INDEX (OBJECT_OFFSETOF(JITStackFrame, args) / sizeof(void*))
-#if USE(JIT_STUB_ARGUMENT_VA_LIST)
- #define STUB_ARGS_DECLARATION void* args, ...
- #define STUB_ARGS (reinterpret_cast<void**>(vl_args) - 1)
+#define STUB_ARGS_DECLARATION void** args
+#define STUB_ARGS (args)
+#if CPU(X86)
#if COMPILER(MSVC)
- #define JIT_STUB __cdecl
- #else
- #define JIT_STUB
- #endif
-#else
- #define STUB_ARGS_DECLARATION void** args
- #define STUB_ARGS (args)
-
- #if CPU(X86) && COMPILER(MSVC)
#define JIT_STUB __fastcall
- #elif CPU(X86) && COMPILER(GCC)
+ #elif COMPILER(GCC)
#define JIT_STUB __attribute__ ((fastcall))
#else
- #define JIT_STUB
+ #error "JIT_STUB function calls require fastcall conventions on x86, add appropriate directive/attribute here for your compiler!"
#endif
-#endif
-
-#if CPU(X86_64)
- struct VoidPtrPair {
- void* first;
- void* second;
- };
- #define RETURN_POINTER_PAIR(a,b) VoidPtrPair pair = { a, b }; return pair
#else
- // MSVC doesn't support returning a two-value struct in two registers, so
- // we cast the struct to int64_t instead.
- typedef uint64_t VoidPtrPair;
- union VoidPtrPairUnion {
- struct { void* first; void* second; } s;
- VoidPtrPair i;
- };
- #define RETURN_POINTER_PAIR(a,b) VoidPtrPairUnion pair = {{ a, b }}; return pair.i
+ #define JIT_STUB
#endif
extern "C" void ctiVMThrowTrampoline();
extern "C" void ctiOpThrowNotCaught();
- extern "C" EncodedJSValue ctiTrampoline(void* code, RegisterFile*, CallFrame*, JSValue* exception, Profiler**, JSGlobalData*);
+ extern "C" EncodedJSValue ctiTrampoline(void* code, RegisterFile*, CallFrame*, void* /*unused1*/, Profiler**, JSGlobalData*);
class JITThunks {
public:
JITThunks(JSGlobalData*);
+ ~JITThunks();
static void tryCacheGetByID(CallFrame*, CodeBlock*, ReturnAddressPtr returnAddress, JSValue baseValue, const Identifier& propertyName, const PropertySlot&, StructureStubInfo* stubInfo);
- static void tryCachePutByID(CallFrame*, CodeBlock*, ReturnAddressPtr returnAddress, JSValue baseValue, const PutPropertySlot&, StructureStubInfo* stubInfo);
+ static void tryCachePutByID(CallFrame*, CodeBlock*, ReturnAddressPtr returnAddress, JSValue baseValue, const PutPropertySlot&, StructureStubInfo* stubInfo, bool direct);
MacroAssemblerCodePtr ctiStringLengthTrampoline() { return m_trampolineStructure.ctiStringLengthTrampoline; }
MacroAssemblerCodePtr ctiVirtualCallLink() { return m_trampolineStructure.ctiVirtualCallLink; }
+ MacroAssemblerCodePtr ctiVirtualConstructLink() { return m_trampolineStructure.ctiVirtualConstructLink; }
MacroAssemblerCodePtr ctiVirtualCall() { return m_trampolineStructure.ctiVirtualCall; }
- MacroAssemblerCodePtr ctiNativeCallThunk() { return m_trampolineStructure.ctiNativeCallThunk; }
+ MacroAssemblerCodePtr ctiVirtualConstruct() { return m_trampolineStructure.ctiVirtualConstruct; }
+ MacroAssemblerCodePtr ctiNativeCall() { return m_trampolineStructure.ctiNativeCall; }
+ MacroAssemblerCodePtr ctiNativeConstruct() { return m_trampolineStructure.ctiNativeConstruct; }
MacroAssemblerCodePtr ctiSoftModulo() { return m_trampolineStructure.ctiSoftModulo; }
+ MacroAssemblerCodePtr ctiStub(JSGlobalData* globalData, ThunkGenerator generator);
+
+ PassRefPtr<NativeExecutable> hostFunctionStub(JSGlobalData* globalData, NativeFunction func);
+ PassRefPtr<NativeExecutable> hostFunctionStub(JSGlobalData* globalData, NativeFunction func, ThunkGenerator generator);
private:
+ typedef HashMap<ThunkGenerator, MacroAssemblerCodePtr> CTIStubMap;
+ CTIStubMap m_ctiStubMap;
+ typedef HashMap<NativeFunction, RefPtr<NativeExecutable> > HostFunctionStubMap;
+ HostFunctionStubMap m_hostFunctionStubMap;
RefPtr<ExecutablePool> m_executablePool;
TrampolineStructure m_trampolineStructure;
@@ -268,13 +294,19 @@ extern "C" {
EncodedJSValue JIT_STUB cti_op_call_NotJSFunction(STUB_ARGS_DECLARATION);
EncodedJSValue JIT_STUB cti_op_call_eval(STUB_ARGS_DECLARATION);
EncodedJSValue JIT_STUB cti_op_construct_NotJSConstruct(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_create_this(STUB_ARGS_DECLARATION);
EncodedJSValue JIT_STUB cti_op_convert_this(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_convert_this_strict(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_create_arguments(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_create_arguments_no_params(STUB_ARGS_DECLARATION);
EncodedJSValue JIT_STUB cti_op_del_by_id(STUB_ARGS_DECLARATION);
EncodedJSValue JIT_STUB cti_op_del_by_val(STUB_ARGS_DECLARATION);
EncodedJSValue JIT_STUB cti_op_div(STUB_ARGS_DECLARATION);
EncodedJSValue JIT_STUB cti_op_get_by_id(STUB_ARGS_DECLARATION);
EncodedJSValue JIT_STUB cti_op_get_by_id_array_fail(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_get_by_id_custom_stub(STUB_ARGS_DECLARATION);
EncodedJSValue JIT_STUB cti_op_get_by_id_generic(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_get_by_id_getter_stub(STUB_ARGS_DECLARATION);
EncodedJSValue JIT_STUB cti_op_get_by_id_method_check(STUB_ARGS_DECLARATION);
EncodedJSValue JIT_STUB cti_op_get_by_id_proto_fail(STUB_ARGS_DECLARATION);
EncodedJSValue JIT_STUB cti_op_get_by_id_proto_list(STUB_ARGS_DECLARATION);
@@ -306,23 +338,22 @@ extern "C" {
EncodedJSValue JIT_STUB cti_op_pre_inc(STUB_ARGS_DECLARATION);
EncodedJSValue JIT_STUB cti_op_resolve(STUB_ARGS_DECLARATION);
EncodedJSValue JIT_STUB cti_op_resolve_base(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_resolve_base_strict_put(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_ensure_property_exists(STUB_ARGS_DECLARATION);
EncodedJSValue JIT_STUB cti_op_resolve_global(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_resolve_global_dynamic(STUB_ARGS_DECLARATION);
EncodedJSValue JIT_STUB cti_op_resolve_skip(STUB_ARGS_DECLARATION);
EncodedJSValue JIT_STUB cti_op_resolve_with_base(STUB_ARGS_DECLARATION);
EncodedJSValue JIT_STUB cti_op_rshift(STUB_ARGS_DECLARATION);
EncodedJSValue JIT_STUB cti_op_strcat(STUB_ARGS_DECLARATION);
EncodedJSValue JIT_STUB cti_op_stricteq(STUB_ARGS_DECLARATION);
EncodedJSValue JIT_STUB cti_op_sub(STUB_ARGS_DECLARATION);
- EncodedJSValue JIT_STUB cti_op_throw(STUB_ARGS_DECLARATION);
EncodedJSValue JIT_STUB cti_op_to_jsnumber(STUB_ARGS_DECLARATION);
EncodedJSValue JIT_STUB cti_op_to_primitive(STUB_ARGS_DECLARATION);
EncodedJSValue JIT_STUB cti_op_typeof(STUB_ARGS_DECLARATION);
EncodedJSValue JIT_STUB cti_op_urshift(STUB_ARGS_DECLARATION);
- EncodedJSValue JIT_STUB cti_vm_throw(STUB_ARGS_DECLARATION);
EncodedJSValue JIT_STUB cti_to_object(STUB_ARGS_DECLARATION);
- JSObject* JIT_STUB cti_op_construct_JSConstruct(STUB_ARGS_DECLARATION);
JSObject* JIT_STUB cti_op_new_array(STUB_ARGS_DECLARATION);
- JSObject* JIT_STUB cti_op_new_error(STUB_ARGS_DECLARATION);
JSObject* JIT_STUB cti_op_new_func(STUB_ARGS_DECLARATION);
JSObject* JIT_STUB cti_op_new_func_exp(STUB_ARGS_DECLARATION);
JSObject* JIT_STUB cti_op_new_object(STUB_ARGS_DECLARATION);
@@ -332,11 +363,8 @@ extern "C" {
JSObject* JIT_STUB cti_op_push_scope(STUB_ARGS_DECLARATION);
JSObject* JIT_STUB cti_op_put_by_id_transition_realloc(STUB_ARGS_DECLARATION);
JSPropertyNameIterator* JIT_STUB cti_op_get_pnames(STUB_ARGS_DECLARATION);
- VoidPtrPair JIT_STUB cti_op_call_arityCheck(STUB_ARGS_DECLARATION);
int JIT_STUB cti_op_eq(STUB_ARGS_DECLARATION);
-#if USE(JSVALUE32_64)
int JIT_STUB cti_op_eq_strings(STUB_ARGS_DECLARATION);
-#endif
int JIT_STUB cti_op_jless(STUB_ARGS_DECLARATION);
int JIT_STUB cti_op_jlesseq(STUB_ARGS_DECLARATION);
int JIT_STUB cti_op_jtrue(STUB_ARGS_DECLARATION);
@@ -344,8 +372,7 @@ extern "C" {
int JIT_STUB cti_op_loop_if_lesseq(STUB_ARGS_DECLARATION);
int JIT_STUB cti_timeout_check(STUB_ARGS_DECLARATION);
int JIT_STUB cti_has_property(STUB_ARGS_DECLARATION);
- void JIT_STUB cti_op_create_arguments(STUB_ARGS_DECLARATION);
- void JIT_STUB cti_op_create_arguments_no_params(STUB_ARGS_DECLARATION);
+ void JIT_STUB cti_op_check_has_instance(STUB_ARGS_DECLARATION);
void JIT_STUB cti_op_debug(STUB_ARGS_DECLARATION);
void JIT_STUB cti_op_end(STUB_ARGS_DECLARATION);
void JIT_STUB cti_op_jmp_scopes(STUB_ARGS_DECLARATION);
@@ -355,6 +382,9 @@ extern "C" {
void JIT_STUB cti_op_put_by_id(STUB_ARGS_DECLARATION);
void JIT_STUB cti_op_put_by_id_fail(STUB_ARGS_DECLARATION);
void JIT_STUB cti_op_put_by_id_generic(STUB_ARGS_DECLARATION);
+ void JIT_STUB cti_op_put_by_id_direct(STUB_ARGS_DECLARATION);
+ void JIT_STUB cti_op_put_by_id_direct_fail(STUB_ARGS_DECLARATION);
+ void JIT_STUB cti_op_put_by_id_direct_generic(STUB_ARGS_DECLARATION);
void JIT_STUB cti_op_put_by_index(STUB_ARGS_DECLARATION);
void JIT_STUB cti_op_put_by_val(STUB_ARGS_DECLARATION);
void JIT_STUB cti_op_put_by_val_byte_array(STUB_ARGS_DECLARATION);
@@ -363,12 +393,20 @@ extern "C" {
void JIT_STUB cti_op_ret_scopeChain(STUB_ARGS_DECLARATION);
void JIT_STUB cti_op_tear_off_activation(STUB_ARGS_DECLARATION);
void JIT_STUB cti_op_tear_off_arguments(STUB_ARGS_DECLARATION);
- void JIT_STUB cti_register_file_check(STUB_ARGS_DECLARATION);
- void* JIT_STUB cti_op_call_JSFunction(STUB_ARGS_DECLARATION);
+ void JIT_STUB cti_op_throw_reference_error(STUB_ARGS_DECLARATION);
+ void JIT_STUB cti_op_throw_syntax_error(STUB_ARGS_DECLARATION);
+ void* JIT_STUB cti_op_call_arityCheck(STUB_ARGS_DECLARATION);
+ void* JIT_STUB cti_op_construct_arityCheck(STUB_ARGS_DECLARATION);
+ void* JIT_STUB cti_op_call_jitCompile(STUB_ARGS_DECLARATION);
+ void* JIT_STUB cti_op_construct_jitCompile(STUB_ARGS_DECLARATION);
void* JIT_STUB cti_op_switch_char(STUB_ARGS_DECLARATION);
void* JIT_STUB cti_op_switch_imm(STUB_ARGS_DECLARATION);
void* JIT_STUB cti_op_switch_string(STUB_ARGS_DECLARATION);
+ void* JIT_STUB cti_op_throw(STUB_ARGS_DECLARATION);
+ void* JIT_STUB cti_register_file_check(STUB_ARGS_DECLARATION);
void* JIT_STUB cti_vm_lazyLinkCall(STUB_ARGS_DECLARATION);
+ void* JIT_STUB cti_vm_lazyLinkConstruct(STUB_ARGS_DECLARATION);
+ void* JIT_STUB cti_vm_throw(STUB_ARGS_DECLARATION);
} // extern "C"
} // namespace JSC
diff --git a/JavaScriptCore/jit/JSInterfaceJIT.h b/JavaScriptCore/jit/JSInterfaceJIT.h
new file mode 100644
index 0000000..6453bab
--- /dev/null
+++ b/JavaScriptCore/jit/JSInterfaceJIT.h
@@ -0,0 +1,292 @@
+/*
+ * Copyright (C) 2010 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef JSInterfaceJIT_h
+#define JSInterfaceJIT_h
+
+#include "JITCode.h"
+#include "JITStubs.h"
+#include "JSImmediate.h"
+#include "MacroAssembler.h"
+#include "RegisterFile.h"
+#include <wtf/AlwaysInline.h>
+#include <wtf/Vector.h>
+
+namespace JSC {
+ class JSInterfaceJIT : public MacroAssembler {
+ public:
+ // NOTES:
+ //
+ // regT0 has two special meanings. The return value from a stub
+ // call will always be in regT0, and by default (unless
+ // a register is specified) emitPutVirtualRegister() will store
+ // the value from regT0.
+ //
+ // regT3 is required to be callee-preserved.
+ //
+ // tempRegister2 is has no such dependencies. It is important that
+ // on x86/x86-64 it is ecx for performance reasons, since the
+ // MacroAssembler will need to plant register swaps if it is not -
+ // however the code will still function correctly.
+#if CPU(X86_64)
+ static const RegisterID returnValueRegister = X86Registers::eax;
+ static const RegisterID cachedResultRegister = X86Registers::eax;
+ static const RegisterID firstArgumentRegister = X86Registers::edi;
+
+ static const RegisterID timeoutCheckRegister = X86Registers::r12;
+ static const RegisterID callFrameRegister = X86Registers::r13;
+ static const RegisterID tagTypeNumberRegister = X86Registers::r14;
+ static const RegisterID tagMaskRegister = X86Registers::r15;
+
+ static const RegisterID regT0 = X86Registers::eax;
+ static const RegisterID regT1 = X86Registers::edx;
+ static const RegisterID regT2 = X86Registers::ecx;
+ static const RegisterID regT3 = X86Registers::ebx;
+
+ static const FPRegisterID fpRegT0 = X86Registers::xmm0;
+ static const FPRegisterID fpRegT1 = X86Registers::xmm1;
+ static const FPRegisterID fpRegT2 = X86Registers::xmm2;
+ static const FPRegisterID fpRegT3 = X86Registers::xmm3;
+#elif CPU(X86)
+ static const RegisterID returnValueRegister = X86Registers::eax;
+ static const RegisterID cachedResultRegister = X86Registers::eax;
+ // On x86 we always use fastcall conventions = but on
+ // OS X if might make more sense to just use regparm.
+ static const RegisterID firstArgumentRegister = X86Registers::ecx;
+
+ static const RegisterID timeoutCheckRegister = X86Registers::esi;
+ static const RegisterID callFrameRegister = X86Registers::edi;
+
+ static const RegisterID regT0 = X86Registers::eax;
+ static const RegisterID regT1 = X86Registers::edx;
+ static const RegisterID regT2 = X86Registers::ecx;
+ static const RegisterID regT3 = X86Registers::ebx;
+
+ static const FPRegisterID fpRegT0 = X86Registers::xmm0;
+ static const FPRegisterID fpRegT1 = X86Registers::xmm1;
+ static const FPRegisterID fpRegT2 = X86Registers::xmm2;
+ static const FPRegisterID fpRegT3 = X86Registers::xmm3;
+#elif CPU(ARM_THUMB2)
+ static const RegisterID returnValueRegister = ARMRegisters::r0;
+ static const RegisterID cachedResultRegister = ARMRegisters::r0;
+ static const RegisterID firstArgumentRegister = ARMRegisters::r0;
+
+ static const RegisterID regT0 = ARMRegisters::r0;
+ static const RegisterID regT1 = ARMRegisters::r1;
+ static const RegisterID regT2 = ARMRegisters::r2;
+ static const RegisterID regT3 = ARMRegisters::r4;
+
+ static const RegisterID callFrameRegister = ARMRegisters::r5;
+ static const RegisterID timeoutCheckRegister = ARMRegisters::r6;
+
+ static const FPRegisterID fpRegT0 = ARMRegisters::d0;
+ static const FPRegisterID fpRegT1 = ARMRegisters::d1;
+ static const FPRegisterID fpRegT2 = ARMRegisters::d2;
+ static const FPRegisterID fpRegT3 = ARMRegisters::d3;
+#elif CPU(ARM_TRADITIONAL)
+ static const RegisterID returnValueRegister = ARMRegisters::r0;
+ static const RegisterID cachedResultRegister = ARMRegisters::r0;
+ static const RegisterID firstArgumentRegister = ARMRegisters::r0;
+
+ static const RegisterID timeoutCheckRegister = ARMRegisters::r5;
+ static const RegisterID callFrameRegister = ARMRegisters::r4;
+
+ static const RegisterID regT0 = ARMRegisters::r0;
+ static const RegisterID regT1 = ARMRegisters::r1;
+ static const RegisterID regT2 = ARMRegisters::r2;
+ // Callee preserved
+ static const RegisterID regT3 = ARMRegisters::r7;
+
+ static const RegisterID regS0 = ARMRegisters::S0;
+ // Callee preserved
+ static const RegisterID regS1 = ARMRegisters::S1;
+
+ static const RegisterID regStackPtr = ARMRegisters::sp;
+ static const RegisterID regLink = ARMRegisters::lr;
+
+ static const FPRegisterID fpRegT0 = ARMRegisters::d0;
+ static const FPRegisterID fpRegT1 = ARMRegisters::d1;
+ static const FPRegisterID fpRegT2 = ARMRegisters::d2;
+ static const FPRegisterID fpRegT3 = ARMRegisters::d3;
+#elif CPU(MIPS)
+ static const RegisterID returnValueRegister = MIPSRegisters::v0;
+ static const RegisterID cachedResultRegister = MIPSRegisters::v0;
+ static const RegisterID firstArgumentRegister = MIPSRegisters::a0;
+
+ // regT0 must be v0 for returning a 32-bit value.
+ static const RegisterID regT0 = MIPSRegisters::v0;
+
+ // regT1 must be v1 for returning a pair of 32-bit value.
+ static const RegisterID regT1 = MIPSRegisters::v1;
+
+ static const RegisterID regT2 = MIPSRegisters::t4;
+
+ // regT3 must be saved in the callee, so use an S register.
+ static const RegisterID regT3 = MIPSRegisters::s2;
+
+ static const RegisterID callFrameRegister = MIPSRegisters::s0;
+ static const RegisterID timeoutCheckRegister = MIPSRegisters::s1;
+
+ static const FPRegisterID fpRegT0 = MIPSRegisters::f4;
+ static const FPRegisterID fpRegT1 = MIPSRegisters::f6;
+ static const FPRegisterID fpRegT2 = MIPSRegisters::f8;
+ static const FPRegisterID fpRegT3 = MIPSRegisters::f10;
+#else
+#error "JIT not supported on this platform."
+#endif
+
+ inline Jump emitLoadJSCell(unsigned virtualRegisterIndex, RegisterID payload);
+ inline Jump emitLoadInt32(unsigned virtualRegisterIndex, RegisterID dst);
+ inline Jump emitLoadDouble(unsigned virtualRegisterIndex, FPRegisterID dst, RegisterID scratch);
+
+#if USE(JSVALUE32_64)
+ inline Jump emitJumpIfNotJSCell(unsigned virtualRegisterIndex);
+ inline Address tagFor(unsigned index, RegisterID base = callFrameRegister);
+#endif
+
+#if USE(JSVALUE64)
+ Jump emitJumpIfImmediateNumber(RegisterID reg);
+ Jump emitJumpIfNotImmediateNumber(RegisterID reg);
+ void emitFastArithImmToInt(RegisterID reg);
+#endif
+
+ inline Address payloadFor(unsigned index, RegisterID base = callFrameRegister);
+ inline Address addressFor(unsigned index, RegisterID base = callFrameRegister);
+ };
+
+ struct ThunkHelpers {
+ static unsigned stringImplDataOffset() { return StringImpl::dataOffset(); }
+ static unsigned jsStringLengthOffset() { return OBJECT_OFFSETOF(JSString, m_length); }
+ static unsigned jsStringValueOffset() { return OBJECT_OFFSETOF(JSString, m_value); }
+ };
+
+#if USE(JSVALUE32_64)
+ inline JSInterfaceJIT::Jump JSInterfaceJIT::emitLoadJSCell(unsigned virtualRegisterIndex, RegisterID payload)
+ {
+ loadPtr(payloadFor(virtualRegisterIndex), payload);
+ return emitJumpIfNotJSCell(virtualRegisterIndex);
+ }
+
+ inline JSInterfaceJIT::Jump JSInterfaceJIT::emitJumpIfNotJSCell(unsigned virtualRegisterIndex)
+ {
+ ASSERT(static_cast<int>(virtualRegisterIndex) < FirstConstantRegisterIndex);
+ return branch32(NotEqual, tagFor(virtualRegisterIndex), Imm32(JSValue::CellTag));
+ }
+
+ inline JSInterfaceJIT::Jump JSInterfaceJIT::emitLoadInt32(unsigned virtualRegisterIndex, RegisterID dst)
+ {
+ ASSERT(static_cast<int>(virtualRegisterIndex) < FirstConstantRegisterIndex);
+ loadPtr(payloadFor(virtualRegisterIndex), dst);
+ return branch32(NotEqual, tagFor(virtualRegisterIndex), Imm32(JSValue::Int32Tag));
+ }
+
+ inline JSInterfaceJIT::Address JSInterfaceJIT::tagFor(unsigned virtualRegisterIndex, RegisterID base)
+ {
+ ASSERT(static_cast<int>(virtualRegisterIndex) < FirstConstantRegisterIndex);
+ return Address(base, (virtualRegisterIndex * sizeof(Register)) + OBJECT_OFFSETOF(JSValue, u.asBits.tag));
+ }
+
+ inline JSInterfaceJIT::Address JSInterfaceJIT::payloadFor(unsigned virtualRegisterIndex, RegisterID base)
+ {
+ ASSERT(static_cast<int>(virtualRegisterIndex) < FirstConstantRegisterIndex);
+ return Address(base, (virtualRegisterIndex * sizeof(Register)) + OBJECT_OFFSETOF(JSValue, u.asBits.payload));
+ }
+
+ inline JSInterfaceJIT::Jump JSInterfaceJIT::emitLoadDouble(unsigned virtualRegisterIndex, FPRegisterID dst, RegisterID scratch)
+ {
+ ASSERT(static_cast<int>(virtualRegisterIndex) < FirstConstantRegisterIndex);
+ loadPtr(tagFor(virtualRegisterIndex), scratch);
+ Jump isDouble = branch32(Below, scratch, Imm32(JSValue::LowestTag));
+ Jump notInt = branch32(NotEqual, scratch, Imm32(JSValue::Int32Tag));
+ loadPtr(payloadFor(virtualRegisterIndex), scratch);
+ convertInt32ToDouble(scratch, dst);
+ Jump done = jump();
+ isDouble.link(this);
+ loadDouble(addressFor(virtualRegisterIndex), dst);
+ done.link(this);
+ return notInt;
+ }
+#endif
+
+#if USE(JSVALUE64)
+ ALWAYS_INLINE JSInterfaceJIT::Jump JSInterfaceJIT::emitJumpIfImmediateNumber(RegisterID reg)
+ {
+ return branchTestPtr(NonZero, reg, tagTypeNumberRegister);
+ }
+ ALWAYS_INLINE JSInterfaceJIT::Jump JSInterfaceJIT::emitJumpIfNotImmediateNumber(RegisterID reg)
+ {
+ return branchTestPtr(Zero, reg, tagTypeNumberRegister);
+ }
+ inline JSInterfaceJIT::Jump JSInterfaceJIT::emitLoadJSCell(unsigned virtualRegisterIndex, RegisterID dst)
+ {
+ loadPtr(addressFor(virtualRegisterIndex), dst);
+ return branchTestPtr(NonZero, dst, tagMaskRegister);
+ }
+
+ inline JSInterfaceJIT::Jump JSInterfaceJIT::emitLoadInt32(unsigned virtualRegisterIndex, RegisterID dst)
+ {
+ loadPtr(addressFor(virtualRegisterIndex), dst);
+ Jump result = branchPtr(Below, dst, tagTypeNumberRegister);
+ zeroExtend32ToPtr(dst, dst);
+ return result;
+ }
+
+ inline JSInterfaceJIT::Jump JSInterfaceJIT::emitLoadDouble(unsigned virtualRegisterIndex, FPRegisterID dst, RegisterID scratch)
+ {
+ loadPtr(addressFor(virtualRegisterIndex), scratch);
+ Jump notNumber = emitJumpIfNotImmediateNumber(scratch);
+ Jump notInt = branchPtr(Below, scratch, tagTypeNumberRegister);
+ convertInt32ToDouble(scratch, dst);
+ Jump done = jump();
+ notInt.link(this);
+ addPtr(tagTypeNumberRegister, scratch);
+ movePtrToDouble(scratch, dst);
+ done.link(this);
+ return notNumber;
+ }
+
+ ALWAYS_INLINE void JSInterfaceJIT::emitFastArithImmToInt(RegisterID)
+ {
+ }
+
+#endif
+
+#if USE(JSVALUE64)
+ inline JSInterfaceJIT::Address JSInterfaceJIT::payloadFor(unsigned virtualRegisterIndex, RegisterID base)
+ {
+ ASSERT(static_cast<int>(virtualRegisterIndex) < FirstConstantRegisterIndex);
+ return addressFor(virtualRegisterIndex, base);
+ }
+#endif
+
+ inline JSInterfaceJIT::Address JSInterfaceJIT::addressFor(unsigned virtualRegisterIndex, RegisterID base)
+ {
+ ASSERT(static_cast<int>(virtualRegisterIndex) < FirstConstantRegisterIndex);
+ return Address(base, (virtualRegisterIndex * sizeof(Register)));
+ }
+
+}
+
+#endif // JSInterfaceJIT_h
diff --git a/JavaScriptCore/jit/SpecializedThunkJIT.h b/JavaScriptCore/jit/SpecializedThunkJIT.h
new file mode 100644
index 0000000..5c593d9
--- /dev/null
+++ b/JavaScriptCore/jit/SpecializedThunkJIT.h
@@ -0,0 +1,165 @@
+/*
+ * Copyright (C) 2010 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef SpecializedThunkJIT_h
+#define SpecializedThunkJIT_h
+
+#if ENABLE(JIT)
+
+#include "Executable.h"
+#include "JSInterfaceJIT.h"
+#include "LinkBuffer.h"
+
+namespace JSC {
+
+ class SpecializedThunkJIT : public JSInterfaceJIT {
+ public:
+ static const int ThisArgument = -1;
+ SpecializedThunkJIT(int expectedArgCount, JSGlobalData* globalData, ExecutablePool* pool)
+ : m_expectedArgCount(expectedArgCount)
+ , m_globalData(globalData)
+ , m_pool(pool)
+ {
+ // Check that we have the expected number of arguments
+ m_failures.append(branch32(NotEqual, Address(callFrameRegister, RegisterFile::ArgumentCount * (int)sizeof(Register)), Imm32(expectedArgCount + 1)));
+ }
+
+ void loadDoubleArgument(int argument, FPRegisterID dst, RegisterID scratch)
+ {
+ unsigned src = argumentToVirtualRegister(argument);
+ m_failures.append(emitLoadDouble(src, dst, scratch));
+ }
+
+ void loadCellArgument(int argument, RegisterID dst)
+ {
+ unsigned src = argumentToVirtualRegister(argument);
+ m_failures.append(emitLoadJSCell(src, dst));
+ }
+
+ void loadJSStringArgument(int argument, RegisterID dst)
+ {
+ loadCellArgument(argument, dst);
+ m_failures.append(branchPtr(NotEqual, Address(dst, 0), ImmPtr(m_globalData->jsStringVPtr)));
+ m_failures.append(branchTest32(NonZero, Address(dst, OBJECT_OFFSETOF(JSString, m_fiberCount))));
+ }
+
+ void loadInt32Argument(int argument, RegisterID dst, Jump& failTarget)
+ {
+ unsigned src = argumentToVirtualRegister(argument);
+ failTarget = emitLoadInt32(src, dst);
+ }
+
+ void loadInt32Argument(int argument, RegisterID dst)
+ {
+ Jump conversionFailed;
+ loadInt32Argument(argument, dst, conversionFailed);
+ m_failures.append(conversionFailed);
+ }
+
+ void appendFailure(const Jump& failure)
+ {
+ m_failures.append(failure);
+ }
+
+ void returnJSValue(RegisterID src)
+ {
+ if (src != regT0)
+ move(src, regT0);
+ loadPtr(Address(callFrameRegister, RegisterFile::CallerFrame * (int)sizeof(Register)), callFrameRegister);
+ ret();
+ }
+
+ void returnDouble(FPRegisterID src)
+ {
+#if USE(JSVALUE64)
+ moveDoubleToPtr(src, regT0);
+ subPtr(tagTypeNumberRegister, regT0);
+#else
+ storeDouble(src, Address(stackPointerRegister, -(int)sizeof(double)));
+ loadPtr(Address(stackPointerRegister, OBJECT_OFFSETOF(JSValue, u.asBits.tag) - sizeof(double)), regT1);
+ loadPtr(Address(stackPointerRegister, OBJECT_OFFSETOF(JSValue, u.asBits.payload) - sizeof(double)), regT0);
+#endif
+ loadPtr(Address(callFrameRegister, RegisterFile::CallerFrame * (int)sizeof(Register)), callFrameRegister);
+ ret();
+ }
+
+ void returnInt32(RegisterID src)
+ {
+ if (src != regT0)
+ move(src, regT0);
+ tagReturnAsInt32();
+ loadPtr(Address(callFrameRegister, RegisterFile::CallerFrame * (int)sizeof(Register)), callFrameRegister);
+ ret();
+ }
+
+ void returnJSCell(RegisterID src)
+ {
+ if (src != regT0)
+ move(src, regT0);
+ tagReturnAsJSCell();
+ loadPtr(Address(callFrameRegister, RegisterFile::CallerFrame * (int)sizeof(Register)), callFrameRegister);
+ ret();
+ }
+
+ MacroAssemblerCodePtr finalize(MacroAssemblerCodePtr fallback)
+ {
+ LinkBuffer patchBuffer(this, m_pool.get(), 0);
+ patchBuffer.link(m_failures, CodeLocationLabel(fallback));
+ return patchBuffer.finalizeCode().m_code;
+ }
+
+ private:
+ int argumentToVirtualRegister(unsigned argument)
+ {
+ return -static_cast<int>(RegisterFile::CallFrameHeaderSize + (m_expectedArgCount - argument));
+ }
+
+ void tagReturnAsInt32()
+ {
+#if USE(JSVALUE64)
+ orPtr(tagTypeNumberRegister, regT0);
+#else
+ move(Imm32(JSValue::Int32Tag), regT1);
+#endif
+ }
+
+ void tagReturnAsJSCell()
+ {
+#if USE(JSVALUE32_64)
+ move(Imm32(JSValue::CellTag), regT1);
+#endif
+ }
+
+ int m_expectedArgCount;
+ JSGlobalData* m_globalData;
+ RefPtr<ExecutablePool> m_pool;
+ MacroAssembler::JumpList m_failures;
+ };
+
+}
+
+#endif // ENABLE(JIT)
+
+#endif // SpecializedThunkJIT_h
diff --git a/JavaScriptCore/jit/ThunkGenerators.cpp b/JavaScriptCore/jit/ThunkGenerators.cpp
new file mode 100644
index 0000000..9b40f12
--- /dev/null
+++ b/JavaScriptCore/jit/ThunkGenerators.cpp
@@ -0,0 +1,162 @@
+/*
+ * Copyright (C) 2010 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "ThunkGenerators.h"
+
+#include "CodeBlock.h"
+#include <wtf/text/StringImpl.h>
+#include "SpecializedThunkJIT.h"
+
+#if ENABLE(JIT)
+
+namespace JSC {
+
+static void stringCharLoad(SpecializedThunkJIT& jit)
+{
+ // load string
+ jit.loadJSStringArgument(SpecializedThunkJIT::ThisArgument, SpecializedThunkJIT::regT0);
+ // regT0 now contains this, and is a non-rope JSString*
+
+ // Load string length to regT2, and start the process of loading the data pointer into regT0
+ jit.load32(MacroAssembler::Address(SpecializedThunkJIT::regT0, ThunkHelpers::jsStringLengthOffset()), SpecializedThunkJIT::regT2);
+ jit.loadPtr(MacroAssembler::Address(SpecializedThunkJIT::regT0, ThunkHelpers::jsStringValueOffset()), SpecializedThunkJIT::regT0);
+ jit.loadPtr(MacroAssembler::Address(SpecializedThunkJIT::regT0, ThunkHelpers::stringImplDataOffset()), SpecializedThunkJIT::regT0);
+
+ // load index
+ jit.loadInt32Argument(0, SpecializedThunkJIT::regT1); // regT1 contains the index
+
+ // Do an unsigned compare to simultaneously filter negative indices as well as indices that are too large
+ jit.appendFailure(jit.branch32(MacroAssembler::AboveOrEqual, SpecializedThunkJIT::regT1, SpecializedThunkJIT::regT2));
+
+ // Load the character
+ jit.load16(MacroAssembler::BaseIndex(SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT1, MacroAssembler::TimesTwo, 0), SpecializedThunkJIT::regT0);
+}
+
+static void charToString(SpecializedThunkJIT& jit, JSGlobalData* globalData, MacroAssembler::RegisterID src, MacroAssembler::RegisterID dst, MacroAssembler::RegisterID scratch)
+{
+ jit.appendFailure(jit.branch32(MacroAssembler::AboveOrEqual, src, MacroAssembler::Imm32(0x100)));
+ jit.move(MacroAssembler::ImmPtr(globalData->smallStrings.singleCharacterStrings()), scratch);
+ jit.loadPtr(MacroAssembler::BaseIndex(scratch, src, MacroAssembler::ScalePtr, 0), dst);
+ jit.appendFailure(jit.branchTestPtr(MacroAssembler::Zero, dst));
+}
+
+MacroAssemblerCodePtr charCodeAtThunkGenerator(JSGlobalData* globalData, ExecutablePool* pool)
+{
+ SpecializedThunkJIT jit(1, globalData, pool);
+ stringCharLoad(jit);
+ jit.returnInt32(SpecializedThunkJIT::regT0);
+ return jit.finalize(globalData->jitStubs->ctiNativeCall());
+}
+
+MacroAssemblerCodePtr charAtThunkGenerator(JSGlobalData* globalData, ExecutablePool* pool)
+{
+ SpecializedThunkJIT jit(1, globalData, pool);
+ stringCharLoad(jit);
+ charToString(jit, globalData, SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT1);
+ jit.returnJSCell(SpecializedThunkJIT::regT0);
+ return jit.finalize(globalData->jitStubs->ctiNativeCall());
+}
+
+MacroAssemblerCodePtr fromCharCodeThunkGenerator(JSGlobalData* globalData, ExecutablePool* pool)
+{
+ SpecializedThunkJIT jit(1, globalData, pool);
+ // load char code
+ jit.loadInt32Argument(0, SpecializedThunkJIT::regT0);
+ charToString(jit, globalData, SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT1);
+ jit.returnJSCell(SpecializedThunkJIT::regT0);
+ return jit.finalize(globalData->jitStubs->ctiNativeCall());
+}
+
+MacroAssemblerCodePtr sqrtThunkGenerator(JSGlobalData* globalData, ExecutablePool* pool)
+{
+ SpecializedThunkJIT jit(1, globalData, pool);
+ if (!jit.supportsFloatingPointSqrt())
+ return globalData->jitStubs->ctiNativeCall();
+
+ jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0);
+ jit.sqrtDouble(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT0);
+ jit.returnDouble(SpecializedThunkJIT::fpRegT0);
+ return jit.finalize(globalData->jitStubs->ctiNativeCall());
+}
+
+static const double oneConstant = 1.0;
+static const double negativeHalfConstant = -0.5;
+
+MacroAssemblerCodePtr powThunkGenerator(JSGlobalData* globalData, ExecutablePool* pool)
+{
+ SpecializedThunkJIT jit(2, globalData, pool);
+ if (!jit.supportsFloatingPoint())
+ return globalData->jitStubs->ctiNativeCall();
+
+ jit.loadDouble(&oneConstant, SpecializedThunkJIT::fpRegT1);
+ jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0);
+ MacroAssembler::Jump nonIntExponent;
+ jit.loadInt32Argument(1, SpecializedThunkJIT::regT0, nonIntExponent);
+ jit.appendFailure(jit.branch32(MacroAssembler::LessThan, SpecializedThunkJIT::regT0, MacroAssembler::Imm32(0)));
+
+ MacroAssembler::Jump exponentIsZero = jit.branchTest32(MacroAssembler::Zero, SpecializedThunkJIT::regT0);
+ MacroAssembler::Label startLoop(jit.label());
+
+ MacroAssembler::Jump exponentIsEven = jit.branchTest32(MacroAssembler::Zero, SpecializedThunkJIT::regT0, MacroAssembler::Imm32(1));
+ jit.mulDouble(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT1);
+ exponentIsEven.link(&jit);
+ jit.mulDouble(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT0);
+ jit.rshift32(MacroAssembler::Imm32(1), SpecializedThunkJIT::regT0);
+ jit.branchTest32(MacroAssembler::NonZero, SpecializedThunkJIT::regT0).linkTo(startLoop, &jit);
+
+ exponentIsZero.link(&jit);
+
+ {
+ SpecializedThunkJIT::JumpList doubleResult;
+ jit.branchConvertDoubleToInt32(SpecializedThunkJIT::fpRegT1, SpecializedThunkJIT::regT0, doubleResult, SpecializedThunkJIT::fpRegT0);
+ jit.returnInt32(SpecializedThunkJIT::regT0);
+ doubleResult.link(&jit);
+ jit.returnDouble(SpecializedThunkJIT::fpRegT1);
+ }
+
+ if (jit.supportsFloatingPointSqrt()) {
+ nonIntExponent.link(&jit);
+ jit.loadDouble(&negativeHalfConstant, SpecializedThunkJIT::fpRegT3);
+ jit.loadDoubleArgument(1, SpecializedThunkJIT::fpRegT2, SpecializedThunkJIT::regT0);
+ jit.appendFailure(jit.branchDouble(MacroAssembler::DoubleLessThanOrEqual, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT1));
+ jit.appendFailure(jit.branchDouble(MacroAssembler::DoubleNotEqualOrUnordered, SpecializedThunkJIT::fpRegT2, SpecializedThunkJIT::fpRegT3));
+ jit.sqrtDouble(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT0);
+ jit.divDouble(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT1);
+
+ SpecializedThunkJIT::JumpList doubleResult;
+ jit.branchConvertDoubleToInt32(SpecializedThunkJIT::fpRegT1, SpecializedThunkJIT::regT0, doubleResult, SpecializedThunkJIT::fpRegT0);
+ jit.returnInt32(SpecializedThunkJIT::regT0);
+ doubleResult.link(&jit);
+ jit.returnDouble(SpecializedThunkJIT::fpRegT1);
+ } else
+ jit.appendFailure(nonIntExponent);
+
+ return jit.finalize(globalData->jitStubs->ctiNativeCall());
+}
+
+}
+
+#endif // ENABLE(JIT)
diff --git a/JavaScriptCore/jit/ThunkGenerators.h b/JavaScriptCore/jit/ThunkGenerators.h
new file mode 100644
index 0000000..15261f7
--- /dev/null
+++ b/JavaScriptCore/jit/ThunkGenerators.h
@@ -0,0 +1,45 @@
+/*
+ * Copyright (C) 2010 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef ThunkGenerators_h
+#define ThunkGenerators_h
+
+#if ENABLE(JIT)
+namespace JSC {
+ class ExecutablePool;
+ class JSGlobalData;
+ class NativeExecutable;
+ class MacroAssemblerCodePtr;
+
+ typedef MacroAssemblerCodePtr (*ThunkGenerator)(JSGlobalData*, ExecutablePool*);
+ MacroAssemblerCodePtr charCodeAtThunkGenerator(JSGlobalData*, ExecutablePool*);
+ MacroAssemblerCodePtr charAtThunkGenerator(JSGlobalData*, ExecutablePool*);
+ MacroAssemblerCodePtr fromCharCodeThunkGenerator(JSGlobalData*, ExecutablePool*);
+ MacroAssemblerCodePtr sqrtThunkGenerator(JSGlobalData*, ExecutablePool*);
+ MacroAssemblerCodePtr powThunkGenerator(JSGlobalData*, ExecutablePool*);
+}
+#endif
+
+#endif // ThunkGenerator_h