summaryrefslogtreecommitdiffstats
path: root/JavaScriptCore/jit
diff options
context:
space:
mode:
authorFeng Qian <fqian@google.com>2009-06-17 12:12:20 -0700
committerFeng Qian <fqian@google.com>2009-06-17 12:12:20 -0700
commit5f1ab04193ad0130ca8204aadaceae083aca9881 (patch)
tree5a92cd389e2cfe7fb67197ce14b38469462379f8 /JavaScriptCore/jit
parent194315e5a908cc8ed67d597010544803eef1ac59 (diff)
downloadexternal_webkit-5f1ab04193ad0130ca8204aadaceae083aca9881.zip
external_webkit-5f1ab04193ad0130ca8204aadaceae083aca9881.tar.gz
external_webkit-5f1ab04193ad0130ca8204aadaceae083aca9881.tar.bz2
Get WebKit r44544.
Diffstat (limited to 'JavaScriptCore/jit')
-rw-r--r--JavaScriptCore/jit/ExecutableAllocator.h87
-rw-r--r--JavaScriptCore/jit/ExecutableAllocatorFixedVMPool.cpp447
-rw-r--r--JavaScriptCore/jit/ExecutableAllocatorPosix.cpp30
-rw-r--r--JavaScriptCore/jit/ExecutableAllocatorWin.cpp4
-rw-r--r--JavaScriptCore/jit/JIT.cpp1988
-rw-r--r--JavaScriptCore/jit/JIT.h466
-rw-r--r--JavaScriptCore/jit/JITArithmetic.cpp1200
-rw-r--r--JavaScriptCore/jit/JITCall.cpp142
-rw-r--r--JavaScriptCore/jit/JITCode.h64
-rw-r--r--JavaScriptCore/jit/JITInlineMethods.h179
-rw-r--r--JavaScriptCore/jit/JITOpcodes.cpp1183
-rw-r--r--JavaScriptCore/jit/JITPropertyAccess.cpp587
-rw-r--r--JavaScriptCore/jit/JITStubCall.h170
-rw-r--r--JavaScriptCore/jit/JITStubs.cpp1992
-rw-r--r--JavaScriptCore/jit/JITStubs.h410
15 files changed, 5604 insertions, 3345 deletions
diff --git a/JavaScriptCore/jit/ExecutableAllocator.h b/JavaScriptCore/jit/ExecutableAllocator.h
index 0cb78ad..a545b0c 100644
--- a/JavaScriptCore/jit/ExecutableAllocator.h
+++ b/JavaScriptCore/jit/ExecutableAllocator.h
@@ -26,18 +26,29 @@
#ifndef ExecutableAllocator_h
#define ExecutableAllocator_h
-#if ENABLE(ASSEMBLER)
-
+#include <limits>
#include <wtf/Assertions.h>
#include <wtf/PassRefPtr.h>
#include <wtf/RefCounted.h>
+#include <wtf/UnusedParam.h>
#include <wtf/Vector.h>
-#include <limits>
+#if PLATFORM(IPHONE)
+#include <libkern/OSCacheControl.h>
+#include <sys/mman.h>
+#endif
#define JIT_ALLOCATOR_PAGE_SIZE (ExecutableAllocator::pageSize)
#define JIT_ALLOCATOR_LARGE_ALLOC_SIZE (ExecutableAllocator::pageSize * 4)
+#if ENABLE(ASSEMBLER_WX_EXCLUSIVE)
+#define PROTECTION_FLAGS_RW (PROT_READ | PROT_WRITE)
+#define PROTECTION_FLAGS_RX (PROT_READ | PROT_EXEC)
+#define INITIAL_PROTECTION_FLAGS PROTECTION_FLAGS_RX
+#else
+#define INITIAL_PROTECTION_FLAGS (PROT_READ | PROT_WRITE | PROT_EXEC)
+#endif
+
namespace JSC {
inline size_t roundUpAllocationSize(size_t request, size_t granularity)
@@ -52,6 +63,12 @@ inline size_t roundUpAllocationSize(size_t request, size_t granularity)
return size;
}
+}
+
+#if ENABLE(ASSEMBLER)
+
+namespace JSC {
+
class ExecutablePool : public RefCounted<ExecutablePool> {
private:
struct Allocation {
@@ -108,6 +125,8 @@ private:
};
class ExecutableAllocator {
+ enum ProtectionSeting { Writable, Executable };
+
public:
static size_t pageSize;
ExecutableAllocator()
@@ -137,7 +156,69 @@ public:
return pool.release();
}
+#if ENABLE(ASSEMBLER_WX_EXCLUSIVE) || !(PLATFORM(X86) || PLATFORM(X86_64))
+ static void makeWritable(void* start, size_t size)
+ {
+ reprotectRegion(start, size, Writable);
+ }
+
+ static void makeExecutable(void* start, size_t size)
+ {
+ reprotectRegion(start, size, Executable);
+ cacheFlush(start, size);
+ }
+
+ // If ASSEMBLER_WX_EXCLUSIVE protection is turned on, or on non-x86 platforms,
+ // we need to track start & size so we can makeExecutable/cacheFlush at the end.
+ class MakeWritable {
+ public:
+ MakeWritable(void* start, size_t size)
+ : m_start(start)
+ , m_size(size)
+ {
+ makeWritable(start, size);
+ }
+
+ ~MakeWritable()
+ {
+ makeExecutable(m_start, m_size);
+ }
+
+ private:
+ void* m_start;
+ size_t m_size;
+ };
+#else
+ static void makeWritable(void*, size_t) {}
+ static void makeExecutable(void*, size_t) {}
+
+ // On x86, without ASSEMBLER_WX_EXCLUSIVE, there is nothing to do here.
+ class MakeWritable { public: MakeWritable(void*, size_t) {} };
+#endif
+
private:
+
+#if ENABLE(ASSEMBLER_WX_EXCLUSIVE) || !(PLATFORM(X86) || PLATFORM(X86_64))
+#if ENABLE(ASSEMBLER_WX_EXCLUSIVE)
+ static void reprotectRegion(void*, size_t, ProtectionSeting);
+#else
+ static void reprotectRegion(void*, size_t, ProtectionSeting) {}
+#endif
+
+ static void cacheFlush(void* code, size_t size)
+ {
+#if PLATFORM(X86) || PLATFORM(X86_64)
+ UNUSED_PARAM(code);
+ UNUSED_PARAM(size);
+#elif PLATFORM(ARM_V7) && PLATFORM(IPHONE)
+ sys_dcache_flush(code, size);
+ sys_icache_invalidate(code, size);
+#else
+#error "ExecutableAllocator::cacheFlush not implemented on this platform."
+#endif
+ }
+#endif
+
RefPtr<ExecutablePool> m_smallAllocationPool;
static void intializePageSize();
};
diff --git a/JavaScriptCore/jit/ExecutableAllocatorFixedVMPool.cpp b/JavaScriptCore/jit/ExecutableAllocatorFixedVMPool.cpp
new file mode 100644
index 0000000..7682b9c
--- /dev/null
+++ b/JavaScriptCore/jit/ExecutableAllocatorFixedVMPool.cpp
@@ -0,0 +1,447 @@
+/*
+ * Copyright (C) 2009 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+
+#include "ExecutableAllocator.h"
+
+#include <errno.h>
+
+#if ENABLE(ASSEMBLER) && PLATFORM(MAC) && PLATFORM(X86_64)
+
+#include "TCSpinLock.h"
+#include <mach/mach_init.h>
+#include <mach/vm_map.h>
+#include <sys/mman.h>
+#include <unistd.h>
+#include <wtf/AVLTree.h>
+#include <wtf/VMTags.h>
+
+using namespace WTF;
+
+namespace JSC {
+
+#define TWO_GB (2u * 1024u * 1024u * 1024u)
+#define SIXTEEN_MB (16u * 1024u * 1024u)
+
+// FreeListEntry describes a free chunk of memory, stored in the freeList.
+struct FreeListEntry {
+ FreeListEntry(void* pointer, size_t size)
+ : pointer(pointer)
+ , size(size)
+ , nextEntry(0)
+ , less(0)
+ , greater(0)
+ , balanceFactor(0)
+ {
+ }
+
+ // All entries of the same size share a single entry
+ // in the AVLTree, and are linked together in a linked
+ // list, using nextEntry.
+ void* pointer;
+ size_t size;
+ FreeListEntry* nextEntry;
+
+ // These fields are used by AVLTree.
+ FreeListEntry* less;
+ FreeListEntry* greater;
+ int balanceFactor;
+};
+
+// Abstractor class for use in AVLTree.
+// Nodes in the AVLTree are of type FreeListEntry, keyed on
+// (and thus sorted by) their size.
+struct AVLTreeAbstractorForFreeList {
+ typedef FreeListEntry* handle;
+ typedef int32_t size;
+ typedef size_t key;
+
+ handle get_less(handle h) { return h->less; }
+ void set_less(handle h, handle lh) { h->less = lh; }
+ handle get_greater(handle h) { return h->greater; }
+ void set_greater(handle h, handle gh) { h->greater = gh; }
+ int get_balance_factor(handle h) { return h->balanceFactor; }
+ void set_balance_factor(handle h, int bf) { h->balanceFactor = bf; }
+
+ static handle null() { return 0; }
+
+ int compare_key_key(key va, key vb) { return va - vb; }
+ int compare_key_node(key k, handle h) { return compare_key_key(k, h->size); }
+ int compare_node_node(handle h1, handle h2) { return compare_key_key(h1->size, h2->size); }
+};
+
+// Used to reverse sort an array of FreeListEntry pointers.
+static int reverseSortFreeListEntriesByPointer(const void* leftPtr, const void* rightPtr)
+{
+ FreeListEntry* left = *(FreeListEntry**)leftPtr;
+ FreeListEntry* right = *(FreeListEntry**)rightPtr;
+
+ return (intptr_t)(right->pointer) - (intptr_t)(left->pointer);
+}
+
+// Used to reverse sort an array of pointers.
+static int reverseSortCommonSizedAllocations(const void* leftPtr, const void* rightPtr)
+{
+ void* left = *(void**)leftPtr;
+ void* right = *(void**)rightPtr;
+
+ return (intptr_t)right - (intptr_t)left;
+}
+
+class FixedVMPoolAllocator
+{
+ // The free list is stored in a sorted tree.
+ typedef AVLTree<AVLTreeAbstractorForFreeList, 40> SizeSortedFreeTree;
+
+ // Use madvise as apropriate to prevent freed pages from being spilled,
+ // and to attempt to ensure that used memory is reported correctly.
+#if HAVE(MADV_FREE_REUSE)
+ void release(void* position, size_t size)
+ {
+ while (madvise(position, size, MADV_FREE_REUSABLE) == -1 && errno == EAGAIN) { }
+ }
+
+ void reuse(void* position, size_t size)
+ {
+ while (madvise(position, size, MADV_FREE_REUSE) == -1 && errno == EAGAIN) { }
+ }
+#elif HAVE(MADV_DONTNEED)
+ void release(void* position, size_t size)
+ {
+ while (madvise(position, size, MADV_DONTNEED) == -1 && errno == EAGAIN) { }
+ }
+
+ void reuse(void*, size_t) {}
+#else
+ void release(void*, size_t) {}
+ void reuse(void*, size_t) {}
+#endif
+
+ // All addition to the free list should go through this method, rather than
+ // calling insert directly, to avoid multiple entries beging added with the
+ // same key. All nodes being added should be singletons, they should not
+ // already be a part of a chain.
+ void addToFreeList(FreeListEntry* entry)
+ {
+ ASSERT(!entry->nextEntry);
+
+ if (entry->size == m_commonSize) {
+ m_commonSizedAllocations.append(entry->pointer);
+ delete entry;
+ } else if (FreeListEntry* entryInFreeList = m_freeList.search(entry->size, m_freeList.EQUAL)) {
+ // m_freeList already contain an entry for this size - insert this node into the chain.
+ entry->nextEntry = entryInFreeList->nextEntry;
+ entryInFreeList->nextEntry = entry;
+ } else
+ m_freeList.insert(entry);
+ }
+
+ // We do not attempt to coalesce addition, which may lead to fragmentation;
+ // instead we periodically perform a sweep to try to coalesce neigboring
+ // entries in m_freeList. Presently this is triggered at the point 16MB
+ // of memory has been released.
+ void coalesceFreeSpace()
+ {
+ Vector<FreeListEntry*> freeListEntries;
+ SizeSortedFreeTree::Iterator iter;
+ iter.start_iter_least(m_freeList);
+
+ // Empty m_freeList into a Vector.
+ for (FreeListEntry* entry; (entry = *iter); ++iter) {
+ // Each entry in m_freeList might correspond to multiple
+ // free chunks of memory (of the same size). Walk the chain
+ // (this is likely of couse only be one entry long!) adding
+ // each entry to the Vector (at reseting the next in chain
+ // pointer to separate each node out).
+ FreeListEntry* next;
+ do {
+ next = entry->nextEntry;
+ entry->nextEntry = 0;
+ freeListEntries.append(entry);
+ } while ((entry = next));
+ }
+ // All entries are now in the Vector; purge the tree.
+ m_freeList.purge();
+
+ // Reverse-sort the freeListEntries and m_commonSizedAllocations Vectors.
+ // We reverse-sort so that we can logically work forwards through memory,
+ // whilst popping items off the end of the Vectors using last() and removeLast().
+ qsort(freeListEntries.begin(), freeListEntries.size(), sizeof(FreeListEntry*), reverseSortFreeListEntriesByPointer);
+ qsort(m_commonSizedAllocations.begin(), m_commonSizedAllocations.size(), sizeof(void*), reverseSortCommonSizedAllocations);
+
+ // The entries from m_commonSizedAllocations that cannot be
+ // coalesced into larger chunks will be temporarily stored here.
+ Vector<void*> newCommonSizedAllocations;
+
+ // Keep processing so long as entries remain in either of the vectors.
+ while (freeListEntries.size() || m_commonSizedAllocations.size()) {
+ // We're going to try to find a FreeListEntry node that we can coalesce onto.
+ FreeListEntry* coalescionEntry = 0;
+
+ // Is the lowest addressed chunk of free memory of common-size, or is it in the free list?
+ if (m_commonSizedAllocations.size() && (!freeListEntries.size() || (m_commonSizedAllocations.last() < freeListEntries.last()->pointer))) {
+ // Pop an item from the m_commonSizedAllocations vector - this is the lowest
+ // addressed free chunk. Find out the begin and end addresses of the memory chunk.
+ void* begin = m_commonSizedAllocations.last();
+ void* end = (void*)((intptr_t)begin + m_commonSize);
+ m_commonSizedAllocations.removeLast();
+
+ // Try to find another free chunk abutting onto the end of the one we have already found.
+ if (freeListEntries.size() && (freeListEntries.last()->pointer == end)) {
+ // There is an existing FreeListEntry for the next chunk of memory!
+ // we can reuse this. Pop it off the end of m_freeList.
+ coalescionEntry = freeListEntries.last();
+ freeListEntries.removeLast();
+ // Update the existing node to include the common-sized chunk that we also found.
+ coalescionEntry->pointer = (void*)((intptr_t)coalescionEntry->pointer - m_commonSize);
+ coalescionEntry->size += m_commonSize;
+ } else if (m_commonSizedAllocations.size() && (m_commonSizedAllocations.last() == end)) {
+ // There is a second common-sized chunk that can be coalesced.
+ // Allocate a new node.
+ m_commonSizedAllocations.removeLast();
+ coalescionEntry = new FreeListEntry(begin, 2 * m_commonSize);
+ } else {
+ // Nope - this poor little guy is all on his own. :-(
+ // Add him into the newCommonSizedAllocations vector for now, we're
+ // going to end up adding him back into the m_commonSizedAllocations
+ // list when we're done.
+ newCommonSizedAllocations.append(begin);
+ continue;
+ }
+ } else {
+ ASSERT(freeListEntries.size());
+ ASSERT(!m_commonSizedAllocations.size() || (freeListEntries.last()->pointer < m_commonSizedAllocations.last()));
+ // The lowest addressed item is from m_freeList; pop it from the Vector.
+ coalescionEntry = freeListEntries.last();
+ freeListEntries.removeLast();
+ }
+
+ // Right, we have a FreeListEntry, we just need check if there is anything else
+ // to coalesce onto the end.
+ ASSERT(coalescionEntry);
+ while (true) {
+ // Calculate the end address of the chunk we have found so far.
+ void* end = (void*)((intptr_t)coalescionEntry->pointer - coalescionEntry->size);
+
+ // Is there another chunk adjacent to the one we already have?
+ if (freeListEntries.size() && (freeListEntries.last()->pointer == end)) {
+ // Yes - another FreeListEntry -pop it from the list.
+ FreeListEntry* coalescee = freeListEntries.last();
+ freeListEntries.removeLast();
+ // Add it's size onto our existing node.
+ coalescionEntry->size += coalescee->size;
+ delete coalescee;
+ } else if (m_commonSizedAllocations.size() && (m_commonSizedAllocations.last() == end)) {
+ // We can coalesce the next common-sized chunk.
+ m_commonSizedAllocations.removeLast();
+ coalescionEntry->size += m_commonSize;
+ } else
+ break; // Nope, nothing to be added - stop here.
+ }
+
+ // We've coalesced everything we can onto the current chunk.
+ // Add it back into m_freeList.
+ addToFreeList(coalescionEntry);
+ }
+
+ // All chunks of free memory larger than m_commonSize should be
+ // back in m_freeList by now. All that remains to be done is to
+ // copy the contents on the newCommonSizedAllocations back into
+ // the m_commonSizedAllocations Vector.
+ ASSERT(m_commonSizedAllocations.size() == 0);
+ m_commonSizedAllocations.append(newCommonSizedAllocations);
+ }
+
+public:
+
+ FixedVMPoolAllocator(size_t commonSize, size_t totalHeapSize)
+ : m_commonSize(commonSize)
+ , m_countFreedSinceLastCoalesce(0)
+ , m_totalHeapSize(totalHeapSize)
+ {
+ // Cook up an address to allocate at, using the following recipe:
+ // 17 bits of zero, stay in userspace kids.
+ // 26 bits of randomness for ASLR.
+ // 21 bits of zero, at least stay aligned within one level of the pagetables.
+ //
+ // But! - as a temporary workaround for some plugin problems (rdar://problem/6812854),
+ // for now instead of 2^26 bits of ASLR lets stick with 25 bits of randomization plus
+ // 2^24, which should put up somewhere in the middle of usespace (in the address range
+ // 0x200000000000 .. 0x5fffffffffff).
+ intptr_t randomLocation = arc4random() & ((1 << 25) - 1);
+ randomLocation += (1 << 24);
+ randomLocation <<= 21;
+ m_base = mmap(reinterpret_cast<void*>(randomLocation), m_totalHeapSize, INITIAL_PROTECTION_FLAGS, MAP_PRIVATE | MAP_ANON, VM_TAG_FOR_EXECUTABLEALLOCATOR_MEMORY, 0);
+ if (!m_base)
+ CRASH();
+
+ // For simplicity, we keep all memory in m_freeList in a 'released' state.
+ // This means that we can simply reuse all memory when allocating, without
+ // worrying about it's previous state, and also makes coalescing m_freeList
+ // simpler since we need not worry about the possibility of coalescing released
+ // chunks with non-released ones.
+ release(m_base, m_totalHeapSize);
+ m_freeList.insert(new FreeListEntry(m_base, m_totalHeapSize));
+ }
+
+ void* alloc(size_t size)
+ {
+ void* result;
+
+ // Freed allocations of the common size are not stored back into the main
+ // m_freeList, but are instead stored in a separate vector. If the request
+ // is for a common sized allocation, check this list.
+ if ((size == m_commonSize) && m_commonSizedAllocations.size()) {
+ result = m_commonSizedAllocations.last();
+ m_commonSizedAllocations.removeLast();
+ } else {
+ // Serach m_freeList for a suitable sized chunk to allocate memory from.
+ FreeListEntry* entry = m_freeList.search(size, m_freeList.GREATER_EQUAL);
+
+ // This would be bad news.
+ if (!entry) {
+ // Errk! Lets take a last-ditch desparation attempt at defragmentation...
+ coalesceFreeSpace();
+ // Did that free up a large enough chunk?
+ entry = m_freeList.search(size, m_freeList.GREATER_EQUAL);
+ // No?... *BOOM!*
+ if (!entry)
+ CRASH();
+ }
+ ASSERT(entry->size != m_commonSize);
+
+ // Remove the entry from m_freeList. But! -
+ // Each entry in the tree may represent a chain of multiple chunks of the
+ // same size, and we only want to remove one on them. So, if this entry
+ // does have a chain, just remove the first-but-one item from the chain.
+ if (FreeListEntry* next = entry->nextEntry) {
+ // We're going to leave 'entry' in the tree; remove 'next' from its chain.
+ entry->nextEntry = next->nextEntry;
+ next->nextEntry = 0;
+ entry = next;
+ } else
+ m_freeList.remove(entry->size);
+
+ // Whoo!, we have a result!
+ ASSERT(entry->size >= size);
+ result = entry->pointer;
+
+ // If the allocation exactly fits the chunk we found in the,
+ // m_freeList then the FreeListEntry node is no longer needed.
+ if (entry->size == size)
+ delete entry;
+ else {
+ // There is memory left over, and it is not of the common size.
+ // We can reuse the existing FreeListEntry node to add this back
+ // into m_freeList.
+ entry->pointer = (void*)((intptr_t)entry->pointer + size);
+ entry->size -= size;
+ addToFreeList(entry);
+ }
+ }
+
+ // Call reuse to report to the operating system that this memory is in use.
+ ASSERT(isWithinVMPool(result, size));
+ reuse(result, size);
+ return result;
+ }
+
+ void free(void* pointer, size_t size)
+ {
+ // Call release to report to the operating system that this
+ // memory is no longer in use, and need not be paged out.
+ ASSERT(isWithinVMPool(pointer, size));
+ release(pointer, size);
+
+ // Common-sized allocations are stored in the m_commonSizedAllocations
+ // vector; all other freed chunks are added to m_freeList.
+ if (size == m_commonSize)
+ m_commonSizedAllocations.append(pointer);
+ else
+ addToFreeList(new FreeListEntry(pointer, size));
+
+ // Do some housekeeping. Every time we reach a point that
+ // 16MB of allocations have been freed, sweep m_freeList
+ // coalescing any neighboring fragments.
+ m_countFreedSinceLastCoalesce += size;
+ if (m_countFreedSinceLastCoalesce >= SIXTEEN_MB) {
+ m_countFreedSinceLastCoalesce = 0;
+ coalesceFreeSpace();
+ }
+ }
+
+private:
+
+#ifndef NDEBUG
+ bool isWithinVMPool(void* pointer, size_t size)
+ {
+ return pointer >= m_base && (reinterpret_cast<char*>(pointer) + size <= reinterpret_cast<char*>(m_base) + m_totalHeapSize);
+ }
+#endif
+
+ // Freed space from the most common sized allocations will be held in this list, ...
+ const size_t m_commonSize;
+ Vector<void*> m_commonSizedAllocations;
+
+ // ... and all other freed allocations are held in m_freeList.
+ SizeSortedFreeTree m_freeList;
+
+ // This is used for housekeeping, to trigger defragmentation of the freed lists.
+ size_t m_countFreedSinceLastCoalesce;
+
+ void* m_base;
+ size_t m_totalHeapSize;
+};
+
+void ExecutableAllocator::intializePageSize()
+{
+ ExecutableAllocator::pageSize = getpagesize();
+}
+
+static FixedVMPoolAllocator* allocator = 0;
+static SpinLock spinlock = SPINLOCK_INITIALIZER;
+
+ExecutablePool::Allocation ExecutablePool::systemAlloc(size_t size)
+{
+ SpinLockHolder lock_holder(&spinlock);
+
+ if (!allocator)
+ allocator = new FixedVMPoolAllocator(JIT_ALLOCATOR_LARGE_ALLOC_SIZE, TWO_GB);
+ ExecutablePool::Allocation alloc = {reinterpret_cast<char*>(allocator->alloc(size)), size};
+ return alloc;
+}
+
+void ExecutablePool::systemRelease(const ExecutablePool::Allocation& allocation)
+{
+ SpinLockHolder lock_holder(&spinlock);
+
+ ASSERT(allocator);
+ allocator->free(allocation.pages, allocation.size);
+}
+
+}
+
+#endif // HAVE(ASSEMBLER)
diff --git a/JavaScriptCore/jit/ExecutableAllocatorPosix.cpp b/JavaScriptCore/jit/ExecutableAllocatorPosix.cpp
index 21955d7..4bd5a2c 100644
--- a/JavaScriptCore/jit/ExecutableAllocatorPosix.cpp
+++ b/JavaScriptCore/jit/ExecutableAllocatorPosix.cpp
@@ -31,9 +31,12 @@
#include <sys/mman.h>
#include <unistd.h>
+#include <wtf/VMTags.h>
namespace JSC {
+#if !(PLATFORM(MAC) && PLATFORM(X86_64))
+
void ExecutableAllocator::intializePageSize()
{
ExecutableAllocator::pageSize = getpagesize();
@@ -41,16 +44,39 @@ void ExecutableAllocator::intializePageSize()
ExecutablePool::Allocation ExecutablePool::systemAlloc(size_t n)
{
- ExecutablePool::Allocation alloc = {reinterpret_cast<char*>(mmap(NULL, n, PROT_READ | PROT_WRITE | PROT_EXEC, MAP_PRIVATE | MAP_ANON, -1, 0)), n};
+ ExecutablePool::Allocation alloc = { reinterpret_cast<char*>(mmap(NULL, n, INITIAL_PROTECTION_FLAGS, MAP_PRIVATE | MAP_ANON, VM_TAG_FOR_EXECUTABLEALLOCATOR_MEMORY, 0)), n };
return alloc;
}
-void ExecutablePool::systemRelease(const ExecutablePool::Allocation& alloc)
+void ExecutablePool::systemRelease(const ExecutablePool::Allocation& alloc)
{
int result = munmap(alloc.pages, alloc.size);
ASSERT_UNUSED(result, !result);
}
+#endif // !(PLATFORM(MAC) && PLATFORM(X86_64))
+
+#if ENABLE(ASSEMBLER_WX_EXCLUSIVE)
+void ExecutableAllocator::reprotectRegion(void* start, size_t size, ProtectionSeting setting)
+{
+ if (!pageSize)
+ intializePageSize();
+
+ // Calculate the start of the page containing this region,
+ // and account for this extra memory within size.
+ intptr_t startPtr = reinterpret_cast<intptr_t>(start);
+ intptr_t pageStartPtr = startPtr & ~(pageSize - 1);
+ void* pageStart = reinterpret_cast<void*>(pageStartPtr);
+ size += (startPtr - pageStartPtr);
+
+ // Round size up
+ size += (pageSize - 1);
+ size &= ~(pageSize - 1);
+
+ mprotect(pageStart, size, (setting == Writable) ? PROTECTION_FLAGS_RW : PROTECTION_FLAGS_RX);
+}
+#endif
+
}
#endif // HAVE(ASSEMBLER)
diff --git a/JavaScriptCore/jit/ExecutableAllocatorWin.cpp b/JavaScriptCore/jit/ExecutableAllocatorWin.cpp
index 7467f81..a9ba7d0 100644
--- a/JavaScriptCore/jit/ExecutableAllocatorWin.cpp
+++ b/JavaScriptCore/jit/ExecutableAllocatorWin.cpp
@@ -51,6 +51,10 @@ void ExecutablePool::systemRelease(const ExecutablePool::Allocation& alloc)
VirtualFree(alloc.pages, 0, MEM_RELEASE);
}
+#if ENABLE(ASSEMBLER_WX_EXCLUSIVE)
+#error "ASSEMBLER_WX_EXCLUSIVE not yet suported on this platform."
+#endif
+
}
#endif // HAVE(ASSEMBLER)
diff --git a/JavaScriptCore/jit/JIT.cpp b/JavaScriptCore/jit/JIT.cpp
index e6113fc..0cfb535 100644
--- a/JavaScriptCore/jit/JIT.cpp
+++ b/JavaScriptCore/jit/JIT.cpp
@@ -1,3 +1,4 @@
+
/*
* Copyright (C) 2008, 2009 Apple Inc. All rights reserved.
*
@@ -29,10 +30,11 @@
#if ENABLE(JIT)
#include "CodeBlock.h"
+#include "Interpreter.h"
#include "JITInlineMethods.h"
+#include "JITStubCall.h"
#include "JSArray.h"
#include "JSFunction.h"
-#include "Interpreter.h"
#include "ResultType.h"
#include "SamplingTool.h"
@@ -44,175 +46,19 @@ using namespace std;
namespace JSC {
-#if COMPILER(GCC) && PLATFORM(X86)
-
-COMPILE_ASSERT(STUB_ARGS_code == 0x0C, STUB_ARGS_code_is_0x0C);
-COMPILE_ASSERT(STUB_ARGS_callFrame == 0x0E, STUB_ARGS_callFrame_is_0x0E);
-
-#if PLATFORM(DARWIN)
-#define SYMBOL_STRING(name) "_" #name
-#else
-#define SYMBOL_STRING(name) #name
-#endif
-
-asm(
-".globl " SYMBOL_STRING(ctiTrampoline) "\n"
-SYMBOL_STRING(ctiTrampoline) ":" "\n"
- "pushl %ebp" "\n"
- "movl %esp, %ebp" "\n"
- "pushl %esi" "\n"
- "pushl %edi" "\n"
- "pushl %ebx" "\n"
- "subl $0x1c, %esp" "\n"
- "movl $512, %esi" "\n"
- "movl 0x38(%esp), %edi" "\n" // Ox38 = 0x0E * 4, 0x0E = STUB_ARGS_callFrame (see assertion above)
- "call *0x30(%esp)" "\n" // Ox30 = 0x0C * 4, 0x0C = STUB_ARGS_code (see assertion above)
- "addl $0x1c, %esp" "\n"
- "popl %ebx" "\n"
- "popl %edi" "\n"
- "popl %esi" "\n"
- "popl %ebp" "\n"
- "ret" "\n"
-);
-
-asm(
-".globl " SYMBOL_STRING(ctiVMThrowTrampoline) "\n"
-SYMBOL_STRING(ctiVMThrowTrampoline) ":" "\n"
-#if USE(JIT_STUB_ARGUMENT_VA_LIST)
- "call " SYMBOL_STRING(_ZN3JSC8JITStubs12cti_vm_throwEPvz) "\n"
-#else
-#if USE(JIT_STUB_ARGUMENT_REGISTER)
- "movl %esp, %ecx" "\n"
-#else // JIT_STUB_ARGUMENT_STACK
- "movl %esp, 0(%esp)" "\n"
-#endif
- "call " SYMBOL_STRING(_ZN3JSC8JITStubs12cti_vm_throwEPPv) "\n"
-#endif
- "addl $0x1c, %esp" "\n"
- "popl %ebx" "\n"
- "popl %edi" "\n"
- "popl %esi" "\n"
- "popl %ebp" "\n"
- "ret" "\n"
-);
-
-#elif COMPILER(GCC) && PLATFORM(X86_64)
-
-COMPILE_ASSERT(STUB_ARGS_code == 0x10, STUB_ARGS_code_is_0x10);
-COMPILE_ASSERT(STUB_ARGS_callFrame == 0x12, STUB_ARGS_callFrame_is_0x12);
-
-#if PLATFORM(DARWIN)
-#define SYMBOL_STRING(name) "_" #name
-#else
-#define SYMBOL_STRING(name) #name
-#endif
-
-asm(
-".globl " SYMBOL_STRING(ctiTrampoline) "\n"
-SYMBOL_STRING(ctiTrampoline) ":" "\n"
- "pushq %rbp" "\n"
- "movq %rsp, %rbp" "\n"
- "pushq %r12" "\n"
- "pushq %r13" "\n"
- "pushq %r14" "\n"
- "pushq %r15" "\n"
- "pushq %rbx" "\n"
- "subq $0x48, %rsp" "\n"
- "movq $512, %r12" "\n"
- "movq $0xFFFF000000000000, %r14" "\n"
- "movq $0xFFFF000000000002, %r15" "\n"
- "movq 0x90(%rsp), %r13" "\n" // Ox90 = 0x12 * 8, 0x12 = STUB_ARGS_callFrame (see assertion above)
- "call *0x80(%rsp)" "\n" // Ox80 = 0x10 * 8, 0x10 = STUB_ARGS_code (see assertion above)
- "addq $0x48, %rsp" "\n"
- "popq %rbx" "\n"
- "popq %r15" "\n"
- "popq %r14" "\n"
- "popq %r13" "\n"
- "popq %r12" "\n"
- "popq %rbp" "\n"
- "ret" "\n"
-);
-
-asm(
-".globl " SYMBOL_STRING(ctiVMThrowTrampoline) "\n"
-SYMBOL_STRING(ctiVMThrowTrampoline) ":" "\n"
-#if USE(JIT_STUB_ARGUMENT_REGISTER)
- "movq %rsp, %rdi" "\n"
- "call " SYMBOL_STRING(_ZN3JSC8JITStubs12cti_vm_throwEPPv) "\n"
-#else // JIT_STUB_ARGUMENT_VA_LIST or JIT_STUB_ARGUMENT_STACK
-#error "JIT_STUB_ARGUMENT configuration not supported."
-#endif
- "addq $0x48, %rsp" "\n"
- "popq %rbx" "\n"
- "popq %r15" "\n"
- "popq %r14" "\n"
- "popq %r13" "\n"
- "popq %r12" "\n"
- "popq %rbp" "\n"
- "ret" "\n"
-);
-
-#elif COMPILER(MSVC)
-
-extern "C" {
-
- __declspec(naked) JSValueEncodedAsPointer* ctiTrampoline(void* code, RegisterFile*, CallFrame*, JSValuePtr* exception, Profiler**, JSGlobalData*)
- {
- __asm {
- push ebp;
- mov ebp, esp;
- push esi;
- push edi;
- push ebx;
- sub esp, 0x1c;
- mov esi, 512;
- mov ecx, esp;
- mov edi, [esp + 0x38];
- call [esp + 0x30]; // Ox30 = 0x0C * 4, 0x0C = STUB_ARGS_code (see assertion above)
- add esp, 0x1c;
- pop ebx;
- pop edi;
- pop esi;
- pop ebp;
- ret;
- }
- }
-
- __declspec(naked) void ctiVMThrowTrampoline()
- {
- __asm {
-#if USE(JIT_STUB_ARGUMENT_REGISTER)
- mov ecx, esp;
-#else // JIT_STUB_ARGUMENT_VA_LIST or JIT_STUB_ARGUMENT_STACK
-#error "JIT_STUB_ARGUMENT configuration not supported."
-#endif
- call JSC::JITStubs::cti_vm_throw;
- add esp, 0x1c;
- pop ebx;
- pop edi;
- pop esi;
- pop ebp;
- ret;
- }
- }
-
-}
-
-#endif
-
-void ctiSetReturnAddress(void** addressOfReturnAddress, void* newDestinationToReturnTo)
+void ctiPatchNearCallByReturnAddress(MacroAssembler::ProcessorReturnAddress returnAddress, MacroAssemblerCodePtr newCalleeFunction)
{
- *addressOfReturnAddress = newDestinationToReturnTo;
+ returnAddress.relinkNearCallerToTrampoline(newCalleeFunction);
}
-void ctiPatchCallByReturnAddress(MacroAssembler::ProcessorReturnAddress returnAddress, void* newCalleeFunction)
+void ctiPatchCallByReturnAddress(MacroAssembler::ProcessorReturnAddress returnAddress, MacroAssemblerCodePtr newCalleeFunction)
{
- returnAddress.relinkCallerToFunction(newCalleeFunction);
+ returnAddress.relinkCallerToTrampoline(newCalleeFunction);
}
-void ctiPatchNearCallByReturnAddress(MacroAssembler::ProcessorReturnAddress returnAddress, void* newCalleeFunction)
+void ctiPatchCallByReturnAddress(MacroAssembler::ProcessorReturnAddress returnAddress, FunctionPtr newCalleeFunction)
{
- returnAddress.relinkNearCallerToFunction(newCalleeFunction);
+ returnAddress.relinkCallerToFunction(newCalleeFunction);
}
JIT::JIT(JSGlobalData* globalData, CodeBlock* codeBlock)
@@ -253,8 +99,7 @@ void JIT::compileOpStrictEq(Instruction* currentInstruction, CompileOpStrictEqTy
void JIT::emitTimeoutCheck()
{
Jump skipTimeout = branchSub32(NonZero, Imm32(1), timeoutCheckRegister);
- emitCTICall(JITStubs::cti_timeout_check);
- move(regT0, timeoutCheckRegister);
+ JITStubCall(this, JITStubs::cti_timeout_check).call(timeoutCheckRegister);
skipTimeout.link(this);
killLastResultRegister();
@@ -265,33 +110,43 @@ void JIT::emitTimeoutCheck()
m_bytecodeIndex += OPCODE_LENGTH(name); \
break;
-#define CTI_COMPILE_BINARY_OP(name) \
+#define DEFINE_BINARY_OP(name) \
case name: { \
- emitPutJITStubArgFromVirtualRegister(currentInstruction[2].u.operand, 1, regT2); \
- emitPutJITStubArgFromVirtualRegister(currentInstruction[3].u.operand, 2, regT2); \
- emitCTICall(JITStubs::cti_##name); \
- emitPutVirtualRegister(currentInstruction[1].u.operand); \
+ JITStubCall stubCall(this, JITStubs::cti_##name); \
+ stubCall.addArgument(currentInstruction[2].u.operand, regT2); \
+ stubCall.addArgument(currentInstruction[3].u.operand, regT2); \
+ stubCall.call(currentInstruction[1].u.operand); \
NEXT_OPCODE(name); \
}
-#define CTI_COMPILE_UNARY_OP(name) \
+#define DEFINE_UNARY_OP(name) \
case name: { \
- emitPutJITStubArgFromVirtualRegister(currentInstruction[2].u.operand, 1, regT2); \
- emitCTICall(JITStubs::cti_##name); \
- emitPutVirtualRegister(currentInstruction[1].u.operand); \
+ JITStubCall stubCall(this, JITStubs::cti_##name); \
+ stubCall.addArgument(currentInstruction[2].u.operand, regT2); \
+ stubCall.call(currentInstruction[1].u.operand); \
NEXT_OPCODE(name); \
}
-#define RECORD_JUMP_TARGET(targetOffset) \
- do { m_labels[m_bytecodeIndex + (targetOffset)].used(); } while (false)
+#define DEFINE_OP(name) \
+ case name: { \
+ emit_##name(currentInstruction); \
+ NEXT_OPCODE(name); \
+ }
+
+#define DEFINE_SLOWCASE_OP(name) \
+ case name: { \
+ emitSlow_##name(currentInstruction, iter); \
+ NEXT_OPCODE(name); \
+ }
void JIT::privateCompileMainPass()
{
Instruction* instructionsBegin = m_codeBlock->instructions().begin();
unsigned instructionCount = m_codeBlock->instructions().size();
- unsigned propertyAccessInstructionIndex = 0;
- unsigned globalResolveInfoIndex = 0;
- unsigned callLinkInfoIndex = 0;
+
+ m_propertyAccessInstructionIndex = 0;
+ m_globalResolveInfoIndex = 0;
+ m_callLinkInfoIndex = 0;
for (m_bytecodeIndex = 0; m_bytecodeIndex < instructionCount; ) {
Instruction* currentInstruction = instructionsBegin + m_bytecodeIndex;
@@ -306,947 +161,119 @@ void JIT::privateCompileMainPass()
killLastResultRegister();
m_labels[m_bytecodeIndex] = label();
- OpcodeID opcodeID = m_interpreter->getOpcodeID(currentInstruction->u.opcode);
-
- switch (opcodeID) {
- case op_mov: {
- int src = currentInstruction[2].u.operand;
- int dst = currentInstruction[1].u.operand;
-
- if (m_codeBlock->isConstantRegisterIndex(src)) {
- storePtr(ImmPtr(JSValuePtr::encode(getConstantOperand(src))), Address(callFrameRegister, dst * sizeof(Register)));
- if (dst == m_lastResultBytecodeRegister)
- killLastResultRegister();
- } else if ((src == m_lastResultBytecodeRegister) || (dst == m_lastResultBytecodeRegister)) {
- // If either the src or dst is the cached register go though
- // get/put registers to make sure we track this correctly.
- emitGetVirtualRegister(src, regT0);
- emitPutVirtualRegister(dst);
- } else {
- // Perform the copy via regT1; do not disturb any mapping in regT0.
- loadPtr(Address(callFrameRegister, src * sizeof(Register)), regT1);
- storePtr(regT1, Address(callFrameRegister, dst * sizeof(Register)));
- }
- NEXT_OPCODE(op_mov);
- }
- case op_add: {
- compileFastArith_op_add(currentInstruction);
- NEXT_OPCODE(op_add);
- }
- case op_end: {
- if (m_codeBlock->needsFullScopeChain())
- emitCTICall(JITStubs::cti_op_end);
- ASSERT(returnValueRegister != callFrameRegister);
- emitGetVirtualRegister(currentInstruction[1].u.operand, returnValueRegister);
- push(Address(callFrameRegister, RegisterFile::ReturnPC * static_cast<int>(sizeof(Register))));
- ret();
- NEXT_OPCODE(op_end);
- }
- case op_jmp: {
- unsigned target = currentInstruction[1].u.operand;
- addJump(jump(), target + 1);
- RECORD_JUMP_TARGET(target + 1);
- NEXT_OPCODE(op_jmp);
- }
- case op_pre_inc: {
- compileFastArith_op_pre_inc(currentInstruction[1].u.operand);
- NEXT_OPCODE(op_pre_inc);
- }
- case op_loop: {
- emitTimeoutCheck();
-
- unsigned target = currentInstruction[1].u.operand;
- addJump(jump(), target + 1);
- NEXT_OPCODE(op_end);
- }
- case op_loop_if_less: {
- emitTimeoutCheck();
-
- unsigned op1 = currentInstruction[1].u.operand;
- unsigned op2 = currentInstruction[2].u.operand;
- unsigned target = currentInstruction[3].u.operand;
- if (isOperandConstantImmediateInt(op2)) {
- emitGetVirtualRegister(op1, regT0);
- emitJumpSlowCaseIfNotImmediateInteger(regT0);
-#if USE(ALTERNATE_JSIMMEDIATE)
- int32_t op2imm = getConstantOperandImmediateInt(op2);
-#else
- int32_t op2imm = static_cast<int32_t>(JSImmediate::rawValue(getConstantOperand(op2)));
-#endif
- addJump(branch32(LessThan, regT0, Imm32(op2imm)), target + 3);
- } else {
- emitGetVirtualRegisters(op1, regT0, op2, regT1);
- emitJumpSlowCaseIfNotImmediateInteger(regT0);
- emitJumpSlowCaseIfNotImmediateInteger(regT1);
- addJump(branch32(LessThan, regT0, regT1), target + 3);
- }
- NEXT_OPCODE(op_loop_if_less);
- }
- case op_loop_if_lesseq: {
- emitTimeoutCheck();
-
- unsigned op1 = currentInstruction[1].u.operand;
- unsigned op2 = currentInstruction[2].u.operand;
- unsigned target = currentInstruction[3].u.operand;
- if (isOperandConstantImmediateInt(op2)) {
- emitGetVirtualRegister(op1, regT0);
- emitJumpSlowCaseIfNotImmediateInteger(regT0);
-#if USE(ALTERNATE_JSIMMEDIATE)
- int32_t op2imm = getConstantOperandImmediateInt(op2);
-#else
- int32_t op2imm = static_cast<int32_t>(JSImmediate::rawValue(getConstantOperand(op2)));
-#endif
- addJump(branch32(LessThanOrEqual, regT0, Imm32(op2imm)), target + 3);
- } else {
- emitGetVirtualRegisters(op1, regT0, op2, regT1);
- emitJumpSlowCaseIfNotImmediateInteger(regT0);
- emitJumpSlowCaseIfNotImmediateInteger(regT1);
- addJump(branch32(LessThanOrEqual, regT0, regT1), target + 3);
- }
- NEXT_OPCODE(op_loop_if_less);
- }
- case op_new_object: {
- emitCTICall(JITStubs::cti_op_new_object);
- emitPutVirtualRegister(currentInstruction[1].u.operand);
- NEXT_OPCODE(op_new_object);
- }
- case op_put_by_id: {
- compilePutByIdHotPath(currentInstruction[1].u.operand, &(m_codeBlock->identifier(currentInstruction[2].u.operand)), currentInstruction[3].u.operand, propertyAccessInstructionIndex++);
- NEXT_OPCODE(op_put_by_id);
- }
- case op_get_by_id: {
- compileGetByIdHotPath(currentInstruction[1].u.operand, currentInstruction[2].u.operand, &(m_codeBlock->identifier(currentInstruction[3].u.operand)), propertyAccessInstructionIndex++);
- NEXT_OPCODE(op_get_by_id);
- }
- case op_instanceof: {
- emitGetVirtualRegister(currentInstruction[2].u.operand, regT0); // value
- emitGetVirtualRegister(currentInstruction[3].u.operand, regT2); // baseVal
- emitGetVirtualRegister(currentInstruction[4].u.operand, regT1); // proto
-
- // check if any are immediates
- move(regT0, regT3);
- orPtr(regT2, regT3);
- orPtr(regT1, regT3);
- emitJumpSlowCaseIfNotJSCell(regT3);
-
- // check that all are object type - this is a bit of a bithack to avoid excess branching;
- // we check that the sum of the three type codes from Structures is exactly 3 * ObjectType,
- // this works because NumberType and StringType are smaller
- move(Imm32(3 * ObjectType), regT3);
- loadPtr(Address(regT0, FIELD_OFFSET(JSCell, m_structure)), regT0);
- loadPtr(Address(regT2, FIELD_OFFSET(JSCell, m_structure)), regT2);
- loadPtr(Address(regT1, FIELD_OFFSET(JSCell, m_structure)), regT1);
- sub32(Address(regT0, FIELD_OFFSET(Structure, m_typeInfo.m_type)), regT3);
- sub32(Address(regT2, FIELD_OFFSET(Structure, m_typeInfo.m_type)), regT3);
- addSlowCase(branch32(NotEqual, Address(regT1, FIELD_OFFSET(Structure, m_typeInfo.m_type)), regT3));
-
- // check that baseVal's flags include ImplementsHasInstance but not OverridesHasInstance
- load32(Address(regT2, FIELD_OFFSET(Structure, m_typeInfo.m_flags)), regT2);
- and32(Imm32(ImplementsHasInstance | OverridesHasInstance), regT2);
- addSlowCase(branch32(NotEqual, regT2, Imm32(ImplementsHasInstance)));
-
- emitGetVirtualRegister(currentInstruction[2].u.operand, regT2); // reload value
- emitGetVirtualRegister(currentInstruction[4].u.operand, regT1); // reload proto
-
- // optimistically load true result
- move(ImmPtr(JSValuePtr::encode(jsBoolean(true))), regT0);
-
- Label loop(this);
-
- // load value's prototype
- loadPtr(Address(regT2, FIELD_OFFSET(JSCell, m_structure)), regT2);
- loadPtr(Address(regT2, FIELD_OFFSET(Structure, m_prototype)), regT2);
-
- Jump exit = branchPtr(Equal, regT2, regT1);
-
- branchPtr(NotEqual, regT2, ImmPtr(JSValuePtr::encode(jsNull())), loop);
-
- move(ImmPtr(JSValuePtr::encode(jsBoolean(false))), regT0);
-
- exit.link(this);
-
- emitPutVirtualRegister(currentInstruction[1].u.operand);
-
- NEXT_OPCODE(op_instanceof);
- }
- case op_del_by_id: {
- emitPutJITStubArgFromVirtualRegister(currentInstruction[2].u.operand, 1, regT2);
- Identifier* ident = &(m_codeBlock->identifier(currentInstruction[3].u.operand));
- emitPutJITStubArgConstant(ident, 2);
- emitCTICall(JITStubs::cti_op_del_by_id);
- emitPutVirtualRegister(currentInstruction[1].u.operand);
- NEXT_OPCODE(op_del_by_id);
- }
- case op_mul: {
- compileFastArith_op_mul(currentInstruction);
- NEXT_OPCODE(op_mul);
- }
- case op_new_func: {
- FuncDeclNode* func = m_codeBlock->function(currentInstruction[2].u.operand);
- emitPutJITStubArgConstant(func, 1);
- emitCTICall(JITStubs::cti_op_new_func);
- emitPutVirtualRegister(currentInstruction[1].u.operand);
- NEXT_OPCODE(op_new_func);
- }
- case op_call: {
- compileOpCall(opcodeID, currentInstruction, callLinkInfoIndex++);
- NEXT_OPCODE(op_call);
- }
- case op_call_eval: {
- compileOpCall(opcodeID, currentInstruction, callLinkInfoIndex++);
- NEXT_OPCODE(op_call_eval);
- }
- case op_construct: {
- compileOpCall(opcodeID, currentInstruction, callLinkInfoIndex++);
- NEXT_OPCODE(op_construct);
- }
- case op_get_global_var: {
- JSVariableObject* globalObject = static_cast<JSVariableObject*>(currentInstruction[2].u.jsCell);
- move(ImmPtr(globalObject), regT0);
- emitGetVariableObjectRegister(regT0, currentInstruction[3].u.operand, regT0);
- emitPutVirtualRegister(currentInstruction[1].u.operand);
- NEXT_OPCODE(op_get_global_var);
- }
- case op_put_global_var: {
- emitGetVirtualRegister(currentInstruction[3].u.operand, regT1);
- JSVariableObject* globalObject = static_cast<JSVariableObject*>(currentInstruction[1].u.jsCell);
- move(ImmPtr(globalObject), regT0);
- emitPutVariableObjectRegister(regT1, regT0, currentInstruction[2].u.operand);
- NEXT_OPCODE(op_put_global_var);
- }
- case op_get_scoped_var: {
- int skip = currentInstruction[3].u.operand + m_codeBlock->needsFullScopeChain();
-
- emitGetFromCallFrameHeader(RegisterFile::ScopeChain, regT0);
- while (skip--)
- loadPtr(Address(regT0, FIELD_OFFSET(ScopeChainNode, next)), regT0);
-
- loadPtr(Address(regT0, FIELD_OFFSET(ScopeChainNode, object)), regT0);
- emitGetVariableObjectRegister(regT0, currentInstruction[2].u.operand, regT0);
- emitPutVirtualRegister(currentInstruction[1].u.operand);
- NEXT_OPCODE(op_get_scoped_var);
- }
- case op_put_scoped_var: {
- int skip = currentInstruction[2].u.operand + m_codeBlock->needsFullScopeChain();
-
- emitGetFromCallFrameHeader(RegisterFile::ScopeChain, regT1);
- emitGetVirtualRegister(currentInstruction[3].u.operand, regT0);
- while (skip--)
- loadPtr(Address(regT1, FIELD_OFFSET(ScopeChainNode, next)), regT1);
-
- loadPtr(Address(regT1, FIELD_OFFSET(ScopeChainNode, object)), regT1);
- emitPutVariableObjectRegister(regT0, regT1, currentInstruction[1].u.operand);
- NEXT_OPCODE(op_put_scoped_var);
- }
- case op_tear_off_activation: {
- emitPutJITStubArgFromVirtualRegister(currentInstruction[1].u.operand, 1, regT2);
- emitCTICall(JITStubs::cti_op_tear_off_activation);
- NEXT_OPCODE(op_tear_off_activation);
- }
- case op_tear_off_arguments: {
- emitCTICall(JITStubs::cti_op_tear_off_arguments);
- NEXT_OPCODE(op_tear_off_arguments);
- }
- case op_ret: {
- // We could JIT generate the deref, only calling out to C when the refcount hits zero.
- if (m_codeBlock->needsFullScopeChain())
- emitCTICall(JITStubs::cti_op_ret_scopeChain);
-
- ASSERT(callFrameRegister != regT1);
- ASSERT(regT1 != returnValueRegister);
- ASSERT(returnValueRegister != callFrameRegister);
-
- // Return the result in %eax.
- emitGetVirtualRegister(currentInstruction[1].u.operand, returnValueRegister);
-
- // Grab the return address.
- emitGetFromCallFrameHeader(RegisterFile::ReturnPC, regT1);
-
- // Restore our caller's "r".
- emitGetFromCallFrameHeader(RegisterFile::CallerFrame, callFrameRegister);
-
- // Return.
- push(regT1);
- ret();
-
- NEXT_OPCODE(op_ret);
- }
- case op_new_array: {
- emitPutJITStubArgConstant(currentInstruction[2].u.operand, 1);
- emitPutJITStubArgConstant(currentInstruction[3].u.operand, 2);
- emitCTICall(JITStubs::cti_op_new_array);
- emitPutVirtualRegister(currentInstruction[1].u.operand);
- NEXT_OPCODE(op_new_array);
- }
- case op_resolve: {
- Identifier* ident = &(m_codeBlock->identifier(currentInstruction[2].u.operand));
- emitPutJITStubArgConstant(ident, 1);
- emitCTICall(JITStubs::cti_op_resolve);
- emitPutVirtualRegister(currentInstruction[1].u.operand);
- NEXT_OPCODE(op_resolve);
- }
- case op_construct_verify: {
- emitGetVirtualRegister(currentInstruction[1].u.operand, regT0);
-
- emitJumpSlowCaseIfNotJSCell(regT0);
- loadPtr(Address(regT0, FIELD_OFFSET(JSCell, m_structure)), regT2);
- addSlowCase(branch32(NotEqual, Address(regT2, FIELD_OFFSET(Structure, m_typeInfo) + FIELD_OFFSET(TypeInfo, m_type)), Imm32(ObjectType)));
-
- NEXT_OPCODE(op_construct_verify);
- }
- case op_get_by_val: {
- emitGetVirtualRegisters(currentInstruction[2].u.operand, regT0, currentInstruction[3].u.operand, regT1);
- emitJumpSlowCaseIfNotImmediateInteger(regT1);
-#if USE(ALTERNATE_JSIMMEDIATE)
- // This is technically incorrect - we're zero-extending an int32. On the hot path this doesn't matter.
- // We check the value as if it was a uint32 against the m_fastAccessCutoff - which will always fail if
- // number was signed since m_fastAccessCutoff is always less than intmax (since the total allocation
- // size is always less than 4Gb). As such zero extending wil have been correct (and extending the value
- // to 64-bits is necessary since it's used in the address calculation. We zero extend rather than sign
- // extending since it makes it easier to re-tag the value in the slow case.
- zeroExtend32ToPtr(regT1, regT1);
-#else
- emitFastArithImmToInt(regT1);
-#endif
- emitJumpSlowCaseIfNotJSCell(regT0);
- addSlowCase(branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsArrayVPtr)));
-
- // This is an array; get the m_storage pointer into ecx, then check if the index is below the fast cutoff
- loadPtr(Address(regT0, FIELD_OFFSET(JSArray, m_storage)), regT2);
- addSlowCase(branch32(AboveOrEqual, regT1, Address(regT0, FIELD_OFFSET(JSArray, m_fastAccessCutoff))));
-
- // Get the value from the vector
- loadPtr(BaseIndex(regT2, regT1, ScalePtr, FIELD_OFFSET(ArrayStorage, m_vector[0])), regT0);
- emitPutVirtualRegister(currentInstruction[1].u.operand);
- NEXT_OPCODE(op_get_by_val);
- }
- case op_resolve_func: {
- Identifier* ident = &(m_codeBlock->identifier(currentInstruction[3].u.operand));
- emitPutJITStubArgConstant(ident, 1);
- emitCTICall(JITStubs::cti_op_resolve_func);
- emitPutVirtualRegister(currentInstruction[2].u.operand, regT1);
- emitPutVirtualRegister(currentInstruction[1].u.operand);
- NEXT_OPCODE(op_resolve_func);
- }
- case op_sub: {
- compileFastArith_op_sub(currentInstruction);
- NEXT_OPCODE(op_sub);
- }
- case op_put_by_val: {
- emitGetVirtualRegisters(currentInstruction[1].u.operand, regT0, currentInstruction[2].u.operand, regT1);
- emitJumpSlowCaseIfNotImmediateInteger(regT1);
-#if USE(ALTERNATE_JSIMMEDIATE)
- // See comment in op_get_by_val.
- zeroExtend32ToPtr(regT1, regT1);
-#else
- emitFastArithImmToInt(regT1);
-#endif
- emitJumpSlowCaseIfNotJSCell(regT0);
- addSlowCase(branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsArrayVPtr)));
-
- // This is an array; get the m_storage pointer into ecx, then check if the index is below the fast cutoff
- loadPtr(Address(regT0, FIELD_OFFSET(JSArray, m_storage)), regT2);
- Jump inFastVector = branch32(Below, regT1, Address(regT0, FIELD_OFFSET(JSArray, m_fastAccessCutoff)));
- // No; oh well, check if the access if within the vector - if so, we may still be okay.
- addSlowCase(branch32(AboveOrEqual, regT1, Address(regT2, FIELD_OFFSET(ArrayStorage, m_vectorLength))));
-
- // This is a write to the slow part of the vector; first, we have to check if this would be the first write to this location.
- // FIXME: should be able to handle initial write to array; increment the the number of items in the array, and potentially update fast access cutoff.
- addSlowCase(branchTestPtr(Zero, BaseIndex(regT2, regT1, ScalePtr, FIELD_OFFSET(ArrayStorage, m_vector[0]))));
-
- // All good - put the value into the array.
- inFastVector.link(this);
- emitGetVirtualRegister(currentInstruction[3].u.operand, regT0);
- storePtr(regT0, BaseIndex(regT2, regT1, ScalePtr, FIELD_OFFSET(ArrayStorage, m_vector[0])));
- NEXT_OPCODE(op_put_by_val);
- }
- CTI_COMPILE_BINARY_OP(op_lesseq)
- case op_loop_if_true: {
- emitTimeoutCheck();
-
- unsigned target = currentInstruction[2].u.operand;
- emitGetVirtualRegister(currentInstruction[1].u.operand, regT0);
-
- Jump isZero = branchPtr(Equal, regT0, ImmPtr(JSValuePtr::encode(js0())));
- addJump(emitJumpIfImmediateInteger(regT0), target + 2);
-
- addJump(branchPtr(Equal, regT0, ImmPtr(JSValuePtr::encode(jsBoolean(true)))), target + 2);
- addSlowCase(branchPtr(NotEqual, regT0, ImmPtr(JSValuePtr::encode(jsBoolean(false)))));
-
- isZero.link(this);
- NEXT_OPCODE(op_loop_if_true);
- };
- case op_resolve_base: {
- Identifier* ident = &(m_codeBlock->identifier(currentInstruction[2].u.operand));
- emitPutJITStubArgConstant(ident, 1);
- emitCTICall(JITStubs::cti_op_resolve_base);
- emitPutVirtualRegister(currentInstruction[1].u.operand);
- NEXT_OPCODE(op_resolve_base);
- }
- case op_negate: {
- emitPutJITStubArgFromVirtualRegister(currentInstruction[2].u.operand, 1, regT2);
- emitCTICall(JITStubs::cti_op_negate);
- emitPutVirtualRegister(currentInstruction[1].u.operand);
- NEXT_OPCODE(op_negate);
- }
- case op_resolve_skip: {
- Identifier* ident = &(m_codeBlock->identifier(currentInstruction[2].u.operand));
- emitPutJITStubArgConstant(ident, 1);
- emitPutJITStubArgConstant(currentInstruction[3].u.operand + m_codeBlock->needsFullScopeChain(), 2);
- emitCTICall(JITStubs::cti_op_resolve_skip);
- emitPutVirtualRegister(currentInstruction[1].u.operand);
- NEXT_OPCODE(op_resolve_skip);
- }
- case op_resolve_global: {
- // Fast case
- void* globalObject = currentInstruction[2].u.jsCell;
- Identifier* ident = &(m_codeBlock->identifier(currentInstruction[3].u.operand));
-
- unsigned currentIndex = globalResolveInfoIndex++;
- void* structureAddress = &(m_codeBlock->globalResolveInfo(currentIndex).structure);
- void* offsetAddr = &(m_codeBlock->globalResolveInfo(currentIndex).offset);
-
- // Check Structure of global object
- move(ImmPtr(globalObject), regT0);
- loadPtr(structureAddress, regT1);
- Jump noMatch = branchPtr(NotEqual, regT1, Address(regT0, FIELD_OFFSET(JSCell, m_structure))); // Structures don't match
-
- // Load cached property
- loadPtr(Address(regT0, FIELD_OFFSET(JSGlobalObject, m_propertyStorage)), regT0);
- load32(offsetAddr, regT1);
- loadPtr(BaseIndex(regT0, regT1, ScalePtr), regT0);
- emitPutVirtualRegister(currentInstruction[1].u.operand);
- Jump end = jump();
-
- // Slow case
- noMatch.link(this);
- emitPutJITStubArgConstant(globalObject, 1);
- emitPutJITStubArgConstant(ident, 2);
- emitPutJITStubArgConstant(currentIndex, 3);
- emitCTICall(JITStubs::cti_op_resolve_global);
- emitPutVirtualRegister(currentInstruction[1].u.operand);
- end.link(this);
- NEXT_OPCODE(op_resolve_global);
- }
- CTI_COMPILE_BINARY_OP(op_div)
- case op_pre_dec: {
- compileFastArith_op_pre_dec(currentInstruction[1].u.operand);
- NEXT_OPCODE(op_pre_dec);
- }
- case op_jnless: {
- unsigned op1 = currentInstruction[1].u.operand;
- unsigned op2 = currentInstruction[2].u.operand;
- unsigned target = currentInstruction[3].u.operand;
- if (isOperandConstantImmediateInt(op2)) {
- emitGetVirtualRegister(op1, regT0);
- emitJumpSlowCaseIfNotImmediateInteger(regT0);
-#if USE(ALTERNATE_JSIMMEDIATE)
- int32_t op2imm = getConstantOperandImmediateInt(op2);
-#else
- int32_t op2imm = static_cast<int32_t>(JSImmediate::rawValue(getConstantOperand(op2)));
-#endif
- addJump(branch32(GreaterThanOrEqual, regT0, Imm32(op2imm)), target + 3);
- } else {
- emitGetVirtualRegisters(op1, regT0, op2, regT1);
- emitJumpSlowCaseIfNotImmediateInteger(regT0);
- emitJumpSlowCaseIfNotImmediateInteger(regT1);
- addJump(branch32(GreaterThanOrEqual, regT0, regT1), target + 3);
- }
- RECORD_JUMP_TARGET(target + 3);
- NEXT_OPCODE(op_jnless);
- }
- case op_not: {
- emitGetVirtualRegister(currentInstruction[2].u.operand, regT0);
- xorPtr(Imm32(static_cast<int32_t>(JSImmediate::FullTagTypeBool)), regT0);
- addSlowCase(branchTestPtr(NonZero, regT0, Imm32(static_cast<int32_t>(~JSImmediate::ExtendedPayloadBitBoolValue))));
- xorPtr(Imm32(static_cast<int32_t>(JSImmediate::FullTagTypeBool | JSImmediate::ExtendedPayloadBitBoolValue)), regT0);
- emitPutVirtualRegister(currentInstruction[1].u.operand);
- NEXT_OPCODE(op_not);
- }
- case op_jfalse: {
- unsigned target = currentInstruction[2].u.operand;
- emitGetVirtualRegister(currentInstruction[1].u.operand, regT0);
-
- addJump(branchPtr(Equal, regT0, ImmPtr(JSValuePtr::encode(js0()))), target + 2);
- Jump isNonZero = emitJumpIfImmediateInteger(regT0);
-
- addJump(branchPtr(Equal, regT0, ImmPtr(JSValuePtr::encode(jsBoolean(false)))), target + 2);
- addSlowCase(branchPtr(NotEqual, regT0, ImmPtr(JSValuePtr::encode(jsBoolean(true)))));
-
- isNonZero.link(this);
- RECORD_JUMP_TARGET(target + 2);
- NEXT_OPCODE(op_jfalse);
- };
- case op_jeq_null: {
- unsigned src = currentInstruction[1].u.operand;
- unsigned target = currentInstruction[2].u.operand;
-
- emitGetVirtualRegister(src, regT0);
- Jump isImmediate = emitJumpIfNotJSCell(regT0);
-
- // First, handle JSCell cases - check MasqueradesAsUndefined bit on the structure.
- loadPtr(Address(regT0, FIELD_OFFSET(JSCell, m_structure)), regT2);
- addJump(branchTest32(NonZero, Address(regT2, FIELD_OFFSET(Structure, m_typeInfo.m_flags)), Imm32(MasqueradesAsUndefined)), target + 2);
- Jump wasNotImmediate = jump();
-
- // Now handle the immediate cases - undefined & null
- isImmediate.link(this);
- andPtr(Imm32(~JSImmediate::ExtendedTagBitUndefined), regT0);
- addJump(branchPtr(Equal, regT0, ImmPtr(JSValuePtr::encode(jsNull()))), target + 2);
-
- wasNotImmediate.link(this);
- RECORD_JUMP_TARGET(target + 2);
- NEXT_OPCODE(op_jeq_null);
- };
- case op_jneq_null: {
- unsigned src = currentInstruction[1].u.operand;
- unsigned target = currentInstruction[2].u.operand;
-
- emitGetVirtualRegister(src, regT0);
- Jump isImmediate = emitJumpIfNotJSCell(regT0);
-
- // First, handle JSCell cases - check MasqueradesAsUndefined bit on the structure.
- loadPtr(Address(regT0, FIELD_OFFSET(JSCell, m_structure)), regT2);
- addJump(branchTest32(Zero, Address(regT2, FIELD_OFFSET(Structure, m_typeInfo.m_flags)), Imm32(MasqueradesAsUndefined)), target + 2);
- Jump wasNotImmediate = jump();
-
- // Now handle the immediate cases - undefined & null
- isImmediate.link(this);
- andPtr(Imm32(~JSImmediate::ExtendedTagBitUndefined), regT0);
- addJump(branchPtr(NotEqual, regT0, ImmPtr(JSValuePtr::encode(jsNull()))), target + 2);
-
- wasNotImmediate.link(this);
- RECORD_JUMP_TARGET(target + 2);
- NEXT_OPCODE(op_jneq_null);
- }
- case op_post_inc: {
- compileFastArith_op_post_inc(currentInstruction[1].u.operand, currentInstruction[2].u.operand);
- NEXT_OPCODE(op_post_inc);
- }
- case op_unexpected_load: {
- JSValuePtr v = m_codeBlock->unexpectedConstant(currentInstruction[2].u.operand);
- move(ImmPtr(JSValuePtr::encode(v)), regT0);
- emitPutVirtualRegister(currentInstruction[1].u.operand);
- NEXT_OPCODE(op_unexpected_load);
- }
- case op_jsr: {
- int retAddrDst = currentInstruction[1].u.operand;
- int target = currentInstruction[2].u.operand;
- DataLabelPtr storeLocation = storePtrWithPatch(Address(callFrameRegister, sizeof(Register) * retAddrDst));
- addJump(jump(), target + 2);
- m_jsrSites.append(JSRInfo(storeLocation, label()));
- killLastResultRegister();
- RECORD_JUMP_TARGET(target + 2);
- NEXT_OPCODE(op_jsr);
- }
- case op_sret: {
- jump(Address(callFrameRegister, sizeof(Register) * currentInstruction[1].u.operand));
- killLastResultRegister();
- NEXT_OPCODE(op_sret);
- }
- case op_eq: {
- emitGetVirtualRegisters(currentInstruction[2].u.operand, regT0, currentInstruction[3].u.operand, regT1);
- emitJumpSlowCaseIfNotImmediateIntegers(regT0, regT1, regT2);
- set32(Equal, regT1, regT0, regT0);
- emitTagAsBoolImmediate(regT0);
- emitPutVirtualRegister(currentInstruction[1].u.operand);
- NEXT_OPCODE(op_eq);
- }
- case op_lshift: {
- compileFastArith_op_lshift(currentInstruction[1].u.operand, currentInstruction[2].u.operand, currentInstruction[3].u.operand);
- NEXT_OPCODE(op_lshift);
- }
- case op_bitand: {
- compileFastArith_op_bitand(currentInstruction[1].u.operand, currentInstruction[2].u.operand, currentInstruction[3].u.operand);
- NEXT_OPCODE(op_bitand);
- }
- case op_rshift: {
- compileFastArith_op_rshift(currentInstruction[1].u.operand, currentInstruction[2].u.operand, currentInstruction[3].u.operand);
- NEXT_OPCODE(op_rshift);
- }
- case op_bitnot: {
- emitGetVirtualRegister(currentInstruction[2].u.operand, regT0);
- emitJumpSlowCaseIfNotImmediateInteger(regT0);
-#if USE(ALTERNATE_JSIMMEDIATE)
- not32(regT0);
- emitFastArithIntToImmNoCheck(regT0, regT0);
-#else
- xorPtr(Imm32(~JSImmediate::TagTypeNumber), regT0);
-#endif
- emitPutVirtualRegister(currentInstruction[1].u.operand);
- NEXT_OPCODE(op_bitnot);
- }
- case op_resolve_with_base: {
- Identifier* ident = &(m_codeBlock->identifier(currentInstruction[3].u.operand));
- emitPutJITStubArgConstant(ident, 1);
- emitCTICall(JITStubs::cti_op_resolve_with_base);
- emitPutVirtualRegister(currentInstruction[2].u.operand, regT1);
- emitPutVirtualRegister(currentInstruction[1].u.operand);
- NEXT_OPCODE(op_resolve_with_base);
- }
- case op_new_func_exp: {
- FuncExprNode* func = m_codeBlock->functionExpression(currentInstruction[2].u.operand);
- emitPutJITStubArgConstant(func, 1);
- emitCTICall(JITStubs::cti_op_new_func_exp);
- emitPutVirtualRegister(currentInstruction[1].u.operand);
- NEXT_OPCODE(op_new_func_exp);
- }
- case op_mod: {
- compileFastArith_op_mod(currentInstruction[1].u.operand, currentInstruction[2].u.operand, currentInstruction[3].u.operand);
- NEXT_OPCODE(op_mod);
- }
- case op_jtrue: {
- unsigned target = currentInstruction[2].u.operand;
- emitGetVirtualRegister(currentInstruction[1].u.operand, regT0);
-
- Jump isZero = branchPtr(Equal, regT0, ImmPtr(JSValuePtr::encode(js0())));
- addJump(emitJumpIfImmediateInteger(regT0), target + 2);
-
- addJump(branchPtr(Equal, regT0, ImmPtr(JSValuePtr::encode(jsBoolean(true)))), target + 2);
- addSlowCase(branchPtr(NotEqual, regT0, ImmPtr(JSValuePtr::encode(jsBoolean(false)))));
-
- isZero.link(this);
- RECORD_JUMP_TARGET(target + 2);
- NEXT_OPCODE(op_jtrue);
- }
- CTI_COMPILE_BINARY_OP(op_less)
- case op_neq: {
- emitGetVirtualRegisters(currentInstruction[2].u.operand, regT0, currentInstruction[3].u.operand, regT1);
- emitJumpSlowCaseIfNotImmediateIntegers(regT0, regT1, regT2);
- set32(NotEqual, regT1, regT0, regT0);
- emitTagAsBoolImmediate(regT0);
-
- emitPutVirtualRegister(currentInstruction[1].u.operand);
-
- NEXT_OPCODE(op_neq);
- }
- case op_post_dec: {
- compileFastArith_op_post_dec(currentInstruction[1].u.operand, currentInstruction[2].u.operand);
- NEXT_OPCODE(op_post_dec);
- }
- CTI_COMPILE_BINARY_OP(op_urshift)
- case op_bitxor: {
- emitGetVirtualRegisters(currentInstruction[2].u.operand, regT0, currentInstruction[3].u.operand, regT1);
- emitJumpSlowCaseIfNotImmediateIntegers(regT0, regT1, regT2);
- xorPtr(regT1, regT0);
- emitFastArithReTagImmediate(regT0, regT0);
- emitPutVirtualRegister(currentInstruction[1].u.operand);
- NEXT_OPCODE(op_bitxor);
- }
- case op_new_regexp: {
- RegExp* regExp = m_codeBlock->regexp(currentInstruction[2].u.operand);
- emitPutJITStubArgConstant(regExp, 1);
- emitCTICall(JITStubs::cti_op_new_regexp);
- emitPutVirtualRegister(currentInstruction[1].u.operand);
- NEXT_OPCODE(op_new_regexp);
- }
- case op_bitor: {
- emitGetVirtualRegisters(currentInstruction[2].u.operand, regT0, currentInstruction[3].u.operand, regT1);
- emitJumpSlowCaseIfNotImmediateIntegers(regT0, regT1, regT2);
- orPtr(regT1, regT0);
- emitPutVirtualRegister(currentInstruction[1].u.operand);
- NEXT_OPCODE(op_bitor);
- }
- case op_throw: {
- emitPutJITStubArgFromVirtualRegister(currentInstruction[1].u.operand, 1, regT2);
- emitCTICall(JITStubs::cti_op_throw);
- ASSERT(regT0 == returnValueRegister);
-#if PLATFORM(X86_64)
- addPtr(Imm32(0x48), X86::esp);
- pop(X86::ebx);
- pop(X86::r15);
- pop(X86::r14);
- pop(X86::r13);
- pop(X86::r12);
- pop(X86::ebp);
- ret();
-#else
- addPtr(Imm32(0x1c), X86::esp);
- pop(X86::ebx);
- pop(X86::edi);
- pop(X86::esi);
- pop(X86::ebp);
- ret();
-#endif
- NEXT_OPCODE(op_throw);
- }
- case op_get_pnames: {
- emitPutJITStubArgFromVirtualRegister(currentInstruction[2].u.operand, 1, regT2);
- emitCTICall(JITStubs::cti_op_get_pnames);
- emitPutVirtualRegister(currentInstruction[1].u.operand);
- NEXT_OPCODE(op_get_pnames);
- }
- case op_next_pname: {
- emitPutJITStubArgFromVirtualRegister(currentInstruction[2].u.operand, 1, regT2);
- unsigned target = currentInstruction[3].u.operand;
- emitCTICall(JITStubs::cti_op_next_pname);
- Jump endOfIter = branchTestPtr(Zero, regT0);
- emitPutVirtualRegister(currentInstruction[1].u.operand);
- addJump(jump(), target + 3);
- endOfIter.link(this);
- NEXT_OPCODE(op_next_pname);
- }
- case op_push_scope: {
- emitPutJITStubArgFromVirtualRegister(currentInstruction[1].u.operand, 1, regT2);
- emitCTICall(JITStubs::cti_op_push_scope);
- emitPutVirtualRegister(currentInstruction[1].u.operand);
- NEXT_OPCODE(op_push_scope);
- }
- case op_pop_scope: {
- emitCTICall(JITStubs::cti_op_pop_scope);
- NEXT_OPCODE(op_pop_scope);
- }
- CTI_COMPILE_UNARY_OP(op_typeof)
- CTI_COMPILE_UNARY_OP(op_is_undefined)
- CTI_COMPILE_UNARY_OP(op_is_boolean)
- CTI_COMPILE_UNARY_OP(op_is_number)
- CTI_COMPILE_UNARY_OP(op_is_string)
- CTI_COMPILE_UNARY_OP(op_is_object)
- CTI_COMPILE_UNARY_OP(op_is_function)
- case op_stricteq: {
- compileOpStrictEq(currentInstruction, OpStrictEq);
- NEXT_OPCODE(op_stricteq);
- }
- case op_nstricteq: {
- compileOpStrictEq(currentInstruction, OpNStrictEq);
- NEXT_OPCODE(op_nstricteq);
- }
- case op_to_jsnumber: {
- int srcVReg = currentInstruction[2].u.operand;
- emitGetVirtualRegister(srcVReg, regT0);
-
- Jump wasImmediate = emitJumpIfImmediateInteger(regT0);
-
- emitJumpSlowCaseIfNotJSCell(regT0, srcVReg);
- loadPtr(Address(regT0, FIELD_OFFSET(JSCell, m_structure)), regT2);
- addSlowCase(branch32(NotEqual, Address(regT2, FIELD_OFFSET(Structure, m_typeInfo.m_type)), Imm32(NumberType)));
-
- wasImmediate.link(this);
-
- emitPutVirtualRegister(currentInstruction[1].u.operand);
- NEXT_OPCODE(op_to_jsnumber);
- }
- CTI_COMPILE_BINARY_OP(op_in)
- case op_push_new_scope: {
- Identifier* ident = &(m_codeBlock->identifier(currentInstruction[2].u.operand));
- emitPutJITStubArgConstant(ident, 1);
- emitPutJITStubArgFromVirtualRegister(currentInstruction[3].u.operand, 2, regT2);
- emitCTICall(JITStubs::cti_op_push_new_scope);
- emitPutVirtualRegister(currentInstruction[1].u.operand);
- NEXT_OPCODE(op_push_new_scope);
- }
- case op_catch: {
- emitGetCTIParam(STUB_ARGS_callFrame, callFrameRegister);
- emitPutVirtualRegister(currentInstruction[1].u.operand);
- NEXT_OPCODE(op_catch);
- }
- case op_jmp_scopes: {
- unsigned count = currentInstruction[1].u.operand;
- emitPutJITStubArgConstant(count, 1);
- emitCTICall(JITStubs::cti_op_jmp_scopes);
- unsigned target = currentInstruction[2].u.operand;
- addJump(jump(), target + 2);
- RECORD_JUMP_TARGET(target + 2);
- NEXT_OPCODE(op_jmp_scopes);
- }
- case op_put_by_index: {
- emitPutJITStubArgFromVirtualRegister(currentInstruction[1].u.operand, 1, regT2);
- emitPutJITStubArgConstant(currentInstruction[2].u.operand, 2);
- emitPutJITStubArgFromVirtualRegister(currentInstruction[3].u.operand, 3, regT2);
- emitCTICall(JITStubs::cti_op_put_by_index);
- NEXT_OPCODE(op_put_by_index);
- }
- case op_switch_imm: {
- unsigned tableIndex = currentInstruction[1].u.operand;
- unsigned defaultOffset = currentInstruction[2].u.operand;
- unsigned scrutinee = currentInstruction[3].u.operand;
-
- // create jump table for switch destinations, track this switch statement.
- SimpleJumpTable* jumpTable = &m_codeBlock->immediateSwitchJumpTable(tableIndex);
- m_switches.append(SwitchRecord(jumpTable, m_bytecodeIndex, defaultOffset, SwitchRecord::Immediate));
- jumpTable->ctiOffsets.grow(jumpTable->branchOffsets.size());
-
- emitPutJITStubArgFromVirtualRegister(scrutinee, 1, regT2);
- emitPutJITStubArgConstant(tableIndex, 2);
- emitCTICall(JITStubs::cti_op_switch_imm);
- jump(regT0);
- NEXT_OPCODE(op_switch_imm);
- }
- case op_switch_char: {
- unsigned tableIndex = currentInstruction[1].u.operand;
- unsigned defaultOffset = currentInstruction[2].u.operand;
- unsigned scrutinee = currentInstruction[3].u.operand;
-
- // create jump table for switch destinations, track this switch statement.
- SimpleJumpTable* jumpTable = &m_codeBlock->characterSwitchJumpTable(tableIndex);
- m_switches.append(SwitchRecord(jumpTable, m_bytecodeIndex, defaultOffset, SwitchRecord::Character));
- jumpTable->ctiOffsets.grow(jumpTable->branchOffsets.size());
-
- emitPutJITStubArgFromVirtualRegister(scrutinee, 1, regT2);
- emitPutJITStubArgConstant(tableIndex, 2);
- emitCTICall(JITStubs::cti_op_switch_char);
- jump(regT0);
- NEXT_OPCODE(op_switch_char);
- }
- case op_switch_string: {
- unsigned tableIndex = currentInstruction[1].u.operand;
- unsigned defaultOffset = currentInstruction[2].u.operand;
- unsigned scrutinee = currentInstruction[3].u.operand;
-
- // create jump table for switch destinations, track this switch statement.
- StringJumpTable* jumpTable = &m_codeBlock->stringSwitchJumpTable(tableIndex);
- m_switches.append(SwitchRecord(jumpTable, m_bytecodeIndex, defaultOffset));
-
- emitPutJITStubArgFromVirtualRegister(scrutinee, 1, regT2);
- emitPutJITStubArgConstant(tableIndex, 2);
- emitCTICall(JITStubs::cti_op_switch_string);
- jump(regT0);
- NEXT_OPCODE(op_switch_string);
- }
- case op_del_by_val: {
- emitPutJITStubArgFromVirtualRegister(currentInstruction[2].u.operand, 1, regT2);
- emitPutJITStubArgFromVirtualRegister(currentInstruction[3].u.operand, 2, regT2);
- emitCTICall(JITStubs::cti_op_del_by_val);
- emitPutVirtualRegister(currentInstruction[1].u.operand);
- NEXT_OPCODE(op_del_by_val);
- }
- case op_put_getter: {
- emitPutJITStubArgFromVirtualRegister(currentInstruction[1].u.operand, 1, regT2);
- Identifier* ident = &(m_codeBlock->identifier(currentInstruction[2].u.operand));
- emitPutJITStubArgConstant(ident, 2);
- emitPutJITStubArgFromVirtualRegister(currentInstruction[3].u.operand, 3, regT2);
- emitCTICall(JITStubs::cti_op_put_getter);
- NEXT_OPCODE(op_put_getter);
- }
- case op_put_setter: {
- emitPutJITStubArgFromVirtualRegister(currentInstruction[1].u.operand, 1, regT2);
- Identifier* ident = &(m_codeBlock->identifier(currentInstruction[2].u.operand));
- emitPutJITStubArgConstant(ident, 2);
- emitPutJITStubArgFromVirtualRegister(currentInstruction[3].u.operand, 3, regT2);
- emitCTICall(JITStubs::cti_op_put_setter);
- NEXT_OPCODE(op_put_setter);
- }
- case op_new_error: {
- JSValuePtr message = m_codeBlock->unexpectedConstant(currentInstruction[3].u.operand);
- emitPutJITStubArgConstant(currentInstruction[2].u.operand, 1);
- emitPutJITStubArgConstant(JSValuePtr::encode(message), 2);
- emitPutJITStubArgConstant(m_bytecodeIndex, 3);
- emitCTICall(JITStubs::cti_op_new_error);
- emitPutVirtualRegister(currentInstruction[1].u.operand);
- NEXT_OPCODE(op_new_error);
- }
- case op_debug: {
- emitPutJITStubArgConstant(currentInstruction[1].u.operand, 1);
- emitPutJITStubArgConstant(currentInstruction[2].u.operand, 2);
- emitPutJITStubArgConstant(currentInstruction[3].u.operand, 3);
- emitCTICall(JITStubs::cti_op_debug);
- NEXT_OPCODE(op_debug);
- }
- case op_eq_null: {
- unsigned dst = currentInstruction[1].u.operand;
- unsigned src1 = currentInstruction[2].u.operand;
-
- emitGetVirtualRegister(src1, regT0);
- Jump isImmediate = emitJumpIfNotJSCell(regT0);
-
- loadPtr(Address(regT0, FIELD_OFFSET(JSCell, m_structure)), regT2);
- setTest32(NonZero, Address(regT2, FIELD_OFFSET(Structure, m_typeInfo.m_flags)), Imm32(MasqueradesAsUndefined), regT0);
-
- Jump wasNotImmediate = jump();
-
- isImmediate.link(this);
- andPtr(Imm32(~JSImmediate::ExtendedTagBitUndefined), regT0);
- setPtr(Equal, regT0, Imm32(JSImmediate::FullTagTypeNull), regT0);
+ switch (m_interpreter->getOpcodeID(currentInstruction->u.opcode)) {
+ DEFINE_BINARY_OP(op_del_by_val)
+ DEFINE_BINARY_OP(op_div)
+ DEFINE_BINARY_OP(op_in)
+ DEFINE_BINARY_OP(op_less)
+ DEFINE_BINARY_OP(op_lesseq)
+ DEFINE_BINARY_OP(op_urshift)
+ DEFINE_UNARY_OP(op_get_pnames)
+ DEFINE_UNARY_OP(op_is_boolean)
+ DEFINE_UNARY_OP(op_is_function)
+ DEFINE_UNARY_OP(op_is_number)
+ DEFINE_UNARY_OP(op_is_object)
+ DEFINE_UNARY_OP(op_is_string)
+ DEFINE_UNARY_OP(op_is_undefined)
+ DEFINE_UNARY_OP(op_negate)
+ DEFINE_UNARY_OP(op_typeof)
+
+ DEFINE_OP(op_add)
+ DEFINE_OP(op_bitand)
+ DEFINE_OP(op_bitnot)
+ DEFINE_OP(op_bitor)
+ DEFINE_OP(op_bitxor)
+ DEFINE_OP(op_call)
+ DEFINE_OP(op_call_eval)
+ DEFINE_OP(op_call_varargs)
+ DEFINE_OP(op_catch)
+ DEFINE_OP(op_construct)
+ DEFINE_OP(op_construct_verify)
+ DEFINE_OP(op_convert_this)
+ DEFINE_OP(op_init_arguments)
+ DEFINE_OP(op_create_arguments)
+ DEFINE_OP(op_debug)
+ DEFINE_OP(op_del_by_id)
+ DEFINE_OP(op_end)
+ DEFINE_OP(op_enter)
+ DEFINE_OP(op_enter_with_activation)
+ DEFINE_OP(op_eq)
+ DEFINE_OP(op_eq_null)
+ DEFINE_OP(op_get_by_id)
+ DEFINE_OP(op_get_by_val)
+ DEFINE_OP(op_get_global_var)
+ DEFINE_OP(op_get_scoped_var)
+ DEFINE_OP(op_instanceof)
+ DEFINE_OP(op_jeq_null)
+ DEFINE_OP(op_jfalse)
+ DEFINE_OP(op_jmp)
+ DEFINE_OP(op_jmp_scopes)
+ DEFINE_OP(op_jneq_null)
+ DEFINE_OP(op_jneq_ptr)
+ DEFINE_OP(op_jnless)
+ DEFINE_OP(op_jnlesseq)
+ DEFINE_OP(op_jsr)
+ DEFINE_OP(op_jtrue)
+ DEFINE_OP(op_load_varargs)
+ DEFINE_OP(op_loop)
+ DEFINE_OP(op_loop_if_less)
+ DEFINE_OP(op_loop_if_lesseq)
+ DEFINE_OP(op_loop_if_true)
+ DEFINE_OP(op_lshift)
+ DEFINE_OP(op_method_check)
+ DEFINE_OP(op_mod)
+ DEFINE_OP(op_mov)
+ DEFINE_OP(op_mul)
+ DEFINE_OP(op_neq)
+ DEFINE_OP(op_neq_null)
+ DEFINE_OP(op_new_array)
+ DEFINE_OP(op_new_error)
+ DEFINE_OP(op_new_func)
+ DEFINE_OP(op_new_func_exp)
+ DEFINE_OP(op_new_object)
+ DEFINE_OP(op_new_regexp)
+ DEFINE_OP(op_next_pname)
+ DEFINE_OP(op_not)
+ DEFINE_OP(op_nstricteq)
+ DEFINE_OP(op_pop_scope)
+ DEFINE_OP(op_post_dec)
+ DEFINE_OP(op_post_inc)
+ DEFINE_OP(op_pre_dec)
+ DEFINE_OP(op_pre_inc)
+ DEFINE_OP(op_profile_did_call)
+ DEFINE_OP(op_profile_will_call)
+ DEFINE_OP(op_push_new_scope)
+ DEFINE_OP(op_push_scope)
+ DEFINE_OP(op_put_by_id)
+ DEFINE_OP(op_put_by_index)
+ DEFINE_OP(op_put_by_val)
+ DEFINE_OP(op_put_getter)
+ DEFINE_OP(op_put_global_var)
+ DEFINE_OP(op_put_scoped_var)
+ DEFINE_OP(op_put_setter)
+ DEFINE_OP(op_resolve)
+ DEFINE_OP(op_resolve_base)
+ DEFINE_OP(op_resolve_func)
+ DEFINE_OP(op_resolve_global)
+ DEFINE_OP(op_resolve_skip)
+ DEFINE_OP(op_resolve_with_base)
+ DEFINE_OP(op_ret)
+ DEFINE_OP(op_rshift)
+ DEFINE_OP(op_sret)
+ DEFINE_OP(op_strcat)
+ DEFINE_OP(op_stricteq)
+ DEFINE_OP(op_sub)
+ DEFINE_OP(op_switch_char)
+ DEFINE_OP(op_switch_imm)
+ DEFINE_OP(op_switch_string)
+ DEFINE_OP(op_tear_off_activation)
+ DEFINE_OP(op_tear_off_arguments)
+ DEFINE_OP(op_throw)
+ DEFINE_OP(op_to_jsnumber)
+ DEFINE_OP(op_to_primitive)
+ DEFINE_OP(op_unexpected_load)
- wasNotImmediate.link(this);
-
- emitTagAsBoolImmediate(regT0);
- emitPutVirtualRegister(dst);
-
- NEXT_OPCODE(op_eq_null);
- }
- case op_neq_null: {
- unsigned dst = currentInstruction[1].u.operand;
- unsigned src1 = currentInstruction[2].u.operand;
-
- emitGetVirtualRegister(src1, regT0);
- Jump isImmediate = emitJumpIfNotJSCell(regT0);
-
- loadPtr(Address(regT0, FIELD_OFFSET(JSCell, m_structure)), regT2);
- setTest32(Zero, Address(regT2, FIELD_OFFSET(Structure, m_typeInfo.m_flags)), Imm32(MasqueradesAsUndefined), regT0);
-
- Jump wasNotImmediate = jump();
-
- isImmediate.link(this);
-
- andPtr(Imm32(~JSImmediate::ExtendedTagBitUndefined), regT0);
- setPtr(NotEqual, regT0, Imm32(JSImmediate::FullTagTypeNull), regT0);
-
- wasNotImmediate.link(this);
-
- emitTagAsBoolImmediate(regT0);
- emitPutVirtualRegister(dst);
-
- NEXT_OPCODE(op_neq_null);
- }
- case op_enter: {
- // Even though CTI doesn't use them, we initialize our constant
- // registers to zap stale pointers, to avoid unnecessarily prolonging
- // object lifetime and increasing GC pressure.
- size_t count = m_codeBlock->m_numVars + m_codeBlock->numberOfConstantRegisters();
- for (size_t j = 0; j < count; ++j)
- emitInitRegister(j);
-
- NEXT_OPCODE(op_enter);
- }
- case op_enter_with_activation: {
- // Even though CTI doesn't use them, we initialize our constant
- // registers to zap stale pointers, to avoid unnecessarily prolonging
- // object lifetime and increasing GC pressure.
- size_t count = m_codeBlock->m_numVars + m_codeBlock->numberOfConstantRegisters();
- for (size_t j = 0; j < count; ++j)
- emitInitRegister(j);
-
- emitCTICall(JITStubs::cti_op_push_activation);
- emitPutVirtualRegister(currentInstruction[1].u.operand);
-
- NEXT_OPCODE(op_enter_with_activation);
- }
- case op_create_arguments: {
- if (m_codeBlock->m_numParameters == 1)
- emitCTICall(JITStubs::cti_op_create_arguments_no_params);
- else
- emitCTICall(JITStubs::cti_op_create_arguments);
- NEXT_OPCODE(op_create_arguments);
- }
- case op_convert_this: {
- emitGetVirtualRegister(currentInstruction[1].u.operand, regT0);
-
- emitJumpSlowCaseIfNotJSCell(regT0);
- loadPtr(Address(regT0, FIELD_OFFSET(JSCell, m_structure)), regT1);
- addSlowCase(branchTest32(NonZero, Address(regT1, FIELD_OFFSET(Structure, m_typeInfo.m_flags)), Imm32(NeedsThisConversion)));
-
- NEXT_OPCODE(op_convert_this);
- }
- case op_profile_will_call: {
- emitGetCTIParam(STUB_ARGS_profilerReference, regT0);
- Jump noProfiler = branchTestPtr(Zero, Address(regT0));
- emitPutJITStubArgFromVirtualRegister(currentInstruction[1].u.operand, 1, regT0);
- emitCTICall(JITStubs::cti_op_profile_will_call);
- noProfiler.link(this);
-
- NEXT_OPCODE(op_profile_will_call);
- }
- case op_profile_did_call: {
- emitGetCTIParam(STUB_ARGS_profilerReference, regT0);
- Jump noProfiler = branchTestPtr(Zero, Address(regT0));
- emitPutJITStubArgFromVirtualRegister(currentInstruction[1].u.operand, 1, regT0);
- emitCTICall(JITStubs::cti_op_profile_did_call);
- noProfiler.link(this);
-
- NEXT_OPCODE(op_profile_did_call);
- }
case op_get_array_length:
case op_get_by_id_chain:
case op_get_by_id_generic:
@@ -1262,11 +289,11 @@ void JIT::privateCompileMainPass()
}
}
- ASSERT(propertyAccessInstructionIndex == m_codeBlock->numberOfStructureStubInfos());
- ASSERT(callLinkInfoIndex == m_codeBlock->numberOfCallLinkInfos());
+ ASSERT(m_propertyAccessInstructionIndex == m_codeBlock->numberOfStructureStubInfos());
+ ASSERT(m_callLinkInfoIndex == m_codeBlock->numberOfCallLinkInfos());
#ifndef NDEBUG
- // reset this, in order to guard it's use with asserts
+ // Reset this, in order to guard its use with ASSERTs.
m_bytecodeIndex = (unsigned)-1;
#endif
}
@@ -1283,8 +310,9 @@ void JIT::privateCompileLinkPass()
void JIT::privateCompileSlowCases()
{
Instruction* instructionsBegin = m_codeBlock->instructions().begin();
- unsigned propertyAccessInstructionIndex = 0;
- unsigned callLinkInfoIndex = 0;
+
+ m_propertyAccessInstructionIndex = 0;
+ m_callLinkInfoIndex = 0;
for (Vector<SlowCaseEntry>::iterator iter = m_slowCases.begin(); iter != m_slowCases.end();) {
// FIXME: enable peephole optimizations for slow cases when applicable
@@ -1296,310 +324,47 @@ void JIT::privateCompileSlowCases()
#endif
Instruction* currentInstruction = instructionsBegin + m_bytecodeIndex;
- switch (OpcodeID opcodeID = m_interpreter->getOpcodeID(currentInstruction->u.opcode)) {
- case op_convert_this: {
- linkSlowCase(iter);
- linkSlowCase(iter);
- emitPutJITStubArg(regT0, 1);
- emitCTICall(JITStubs::cti_op_convert_this);
- emitPutVirtualRegister(currentInstruction[1].u.operand);
- NEXT_OPCODE(op_convert_this);
- }
- case op_add: {
- compileFastArithSlow_op_add(currentInstruction, iter);
- NEXT_OPCODE(op_add);
- }
- case op_construct_verify: {
- linkSlowCase(iter);
- linkSlowCase(iter);
- emitGetVirtualRegister(currentInstruction[2].u.operand, regT0);
- emitPutVirtualRegister(currentInstruction[1].u.operand);
-
- NEXT_OPCODE(op_construct_verify);
- }
- case op_get_by_val: {
- // The slow case that handles accesses to arrays (below) may jump back up to here.
- Label beginGetByValSlow(this);
-
- Jump notImm = getSlowCase(iter);
- linkSlowCase(iter);
- linkSlowCase(iter);
- emitFastArithIntToImmNoCheck(regT1, regT1);
- notImm.link(this);
- emitPutJITStubArg(regT0, 1);
- emitPutJITStubArg(regT1, 2);
- emitCTICall(JITStubs::cti_op_get_by_val);
- emitPutVirtualRegister(currentInstruction[1].u.operand);
- emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_get_by_val));
-
- // This is slow case that handles accesses to arrays above the fast cut-off.
- // First, check if this is an access to the vector
- linkSlowCase(iter);
- branch32(AboveOrEqual, regT1, Address(regT2, FIELD_OFFSET(ArrayStorage, m_vectorLength)), beginGetByValSlow);
-
- // okay, missed the fast region, but it is still in the vector. Get the value.
- loadPtr(BaseIndex(regT2, regT1, ScalePtr, FIELD_OFFSET(ArrayStorage, m_vector[0])), regT2);
- // Check whether the value loaded is zero; if so we need to return undefined.
- branchTestPtr(Zero, regT2, beginGetByValSlow);
- move(regT2, regT0);
- emitPutVirtualRegister(currentInstruction[1].u.operand, regT0);
-
- NEXT_OPCODE(op_get_by_val);
- }
- case op_sub: {
- compileFastArithSlow_op_sub(currentInstruction, iter);
- NEXT_OPCODE(op_sub);
- }
- case op_rshift: {
- compileFastArithSlow_op_rshift(currentInstruction[1].u.operand, currentInstruction[2].u.operand, currentInstruction[3].u.operand, iter);
- NEXT_OPCODE(op_rshift);
- }
- case op_lshift: {
- compileFastArithSlow_op_lshift(currentInstruction[1].u.operand, currentInstruction[2].u.operand, currentInstruction[3].u.operand, iter);
- NEXT_OPCODE(op_lshift);
- }
- case op_loop_if_less: {
- unsigned op2 = currentInstruction[2].u.operand;
- unsigned target = currentInstruction[3].u.operand;
- if (isOperandConstantImmediateInt(op2)) {
- linkSlowCase(iter);
- emitPutJITStubArg(regT0, 1);
- emitPutJITStubArgFromVirtualRegister(op2, 2, regT2);
- emitCTICall(JITStubs::cti_op_loop_if_less);
- emitJumpSlowToHot(branchTest32(NonZero, regT0), target + 3);
- } else {
- linkSlowCase(iter);
- linkSlowCase(iter);
- emitPutJITStubArg(regT0, 1);
- emitPutJITStubArg(regT1, 2);
- emitCTICall(JITStubs::cti_op_loop_if_less);
- emitJumpSlowToHot(branchTest32(NonZero, regT0), target + 3);
- }
- NEXT_OPCODE(op_loop_if_less);
- }
- case op_put_by_id: {
- compilePutByIdSlowCase(currentInstruction[1].u.operand, &(m_codeBlock->identifier(currentInstruction[2].u.operand)), currentInstruction[3].u.operand, iter, propertyAccessInstructionIndex++);
- NEXT_OPCODE(op_put_by_id);
- }
- case op_get_by_id: {
- compileGetByIdSlowCase(currentInstruction[1].u.operand, currentInstruction[2].u.operand, &(m_codeBlock->identifier(currentInstruction[3].u.operand)), iter, propertyAccessInstructionIndex++);
- NEXT_OPCODE(op_get_by_id);
- }
- case op_loop_if_lesseq: {
- unsigned op2 = currentInstruction[2].u.operand;
- unsigned target = currentInstruction[3].u.operand;
- if (isOperandConstantImmediateInt(op2)) {
- linkSlowCase(iter);
- emitPutJITStubArg(regT0, 1);
- emitPutJITStubArgFromVirtualRegister(currentInstruction[2].u.operand, 2, regT2);
- emitCTICall(JITStubs::cti_op_loop_if_lesseq);
- emitJumpSlowToHot(branchTest32(NonZero, regT0), target + 3);
- } else {
- linkSlowCase(iter);
- linkSlowCase(iter);
- emitPutJITStubArg(regT0, 1);
- emitPutJITStubArg(regT1, 2);
- emitCTICall(JITStubs::cti_op_loop_if_lesseq);
- emitJumpSlowToHot(branchTest32(NonZero, regT0), target + 3);
- }
- NEXT_OPCODE(op_loop_if_lesseq);
- }
- case op_pre_inc: {
- compileFastArithSlow_op_pre_inc(currentInstruction[1].u.operand, iter);
- NEXT_OPCODE(op_pre_inc);
- }
- case op_put_by_val: {
- // Normal slow cases - either is not an immediate imm, or is an array.
- Jump notImm = getSlowCase(iter);
- linkSlowCase(iter);
- linkSlowCase(iter);
- emitFastArithIntToImmNoCheck(regT1, regT1);
- notImm.link(this);
- emitGetVirtualRegister(currentInstruction[3].u.operand, regT2);
- emitPutJITStubArg(regT0, 1);
- emitPutJITStubArg(regT1, 2);
- emitPutJITStubArg(regT2, 3);
- emitCTICall(JITStubs::cti_op_put_by_val);
- emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_put_by_val));
-
- // slow cases for immediate int accesses to arrays
- linkSlowCase(iter);
- linkSlowCase(iter);
- emitGetVirtualRegister(currentInstruction[3].u.operand, regT2);
- emitPutJITStubArg(regT0, 1);
- emitPutJITStubArg(regT1, 2);
- emitPutJITStubArg(regT2, 3);
- emitCTICall(JITStubs::cti_op_put_by_val_array);
-
- NEXT_OPCODE(op_put_by_val);
- }
- case op_loop_if_true: {
- linkSlowCase(iter);
- emitPutJITStubArg(regT0, 1);
- emitCTICall(JITStubs::cti_op_jtrue);
- unsigned target = currentInstruction[2].u.operand;
- emitJumpSlowToHot(branchTest32(NonZero, regT0), target + 2);
- NEXT_OPCODE(op_loop_if_true);
- }
- case op_pre_dec: {
- compileFastArithSlow_op_pre_dec(currentInstruction[1].u.operand, iter);
- NEXT_OPCODE(op_pre_dec);
- }
- case op_jnless: {
- unsigned op2 = currentInstruction[2].u.operand;
- unsigned target = currentInstruction[3].u.operand;
- if (isOperandConstantImmediateInt(op2)) {
- linkSlowCase(iter);
- emitPutJITStubArg(regT0, 1);
- emitPutJITStubArgFromVirtualRegister(currentInstruction[2].u.operand, 2, regT2);
- emitCTICall(JITStubs::cti_op_jless);
- emitJumpSlowToHot(branchTest32(Zero, regT0), target + 3);
- } else {
- linkSlowCase(iter);
- linkSlowCase(iter);
- emitPutJITStubArg(regT0, 1);
- emitPutJITStubArg(regT1, 2);
- emitCTICall(JITStubs::cti_op_jless);
- emitJumpSlowToHot(branchTest32(Zero, regT0), target + 3);
- }
- NEXT_OPCODE(op_jnless);
- }
- case op_not: {
- linkSlowCase(iter);
- xorPtr(Imm32(static_cast<int32_t>(JSImmediate::FullTagTypeBool)), regT0);
- emitPutJITStubArg(regT0, 1);
- emitCTICall(JITStubs::cti_op_not);
- emitPutVirtualRegister(currentInstruction[1].u.operand);
- NEXT_OPCODE(op_not);
- }
- case op_jfalse: {
- linkSlowCase(iter);
- emitPutJITStubArg(regT0, 1);
- emitCTICall(JITStubs::cti_op_jtrue);
- unsigned target = currentInstruction[2].u.operand;
- emitJumpSlowToHot(branchTest32(Zero, regT0), target + 2); // inverted!
- NEXT_OPCODE(op_jfalse);
- }
- case op_post_inc: {
- compileFastArithSlow_op_post_inc(currentInstruction[1].u.operand, currentInstruction[2].u.operand, iter);
- NEXT_OPCODE(op_post_inc);
- }
- case op_bitnot: {
- linkSlowCase(iter);
- emitPutJITStubArg(regT0, 1);
- emitCTICall(JITStubs::cti_op_bitnot);
- emitPutVirtualRegister(currentInstruction[1].u.operand);
- NEXT_OPCODE(op_bitnot);
- }
- case op_bitand: {
- compileFastArithSlow_op_bitand(currentInstruction[1].u.operand, currentInstruction[2].u.operand, currentInstruction[3].u.operand, iter);
- NEXT_OPCODE(op_bitand);
- }
- case op_jtrue: {
- linkSlowCase(iter);
- emitPutJITStubArg(regT0, 1);
- emitCTICall(JITStubs::cti_op_jtrue);
- unsigned target = currentInstruction[2].u.operand;
- emitJumpSlowToHot(branchTest32(NonZero, regT0), target + 2);
- NEXT_OPCODE(op_jtrue);
- }
- case op_post_dec: {
- compileFastArithSlow_op_post_dec(currentInstruction[1].u.operand, currentInstruction[2].u.operand, iter);
- NEXT_OPCODE(op_post_dec);
- }
- case op_bitxor: {
- linkSlowCase(iter);
- emitPutJITStubArg(regT0, 1);
- emitPutJITStubArg(regT1, 2);
- emitCTICall(JITStubs::cti_op_bitxor);
- emitPutVirtualRegister(currentInstruction[1].u.operand);
- NEXT_OPCODE(op_bitxor);
- }
- case op_bitor: {
- linkSlowCase(iter);
- emitPutJITStubArg(regT0, 1);
- emitPutJITStubArg(regT1, 2);
- emitCTICall(JITStubs::cti_op_bitor);
- emitPutVirtualRegister(currentInstruction[1].u.operand);
- NEXT_OPCODE(op_bitor);
- }
- case op_eq: {
- linkSlowCase(iter);
- emitPutJITStubArg(regT0, 1);
- emitPutJITStubArg(regT1, 2);
- emitCTICall(JITStubs::cti_op_eq);
- emitPutVirtualRegister(currentInstruction[1].u.operand);
- NEXT_OPCODE(op_eq);
- }
- case op_neq: {
- linkSlowCase(iter);
- emitPutJITStubArg(regT0, 1);
- emitPutJITStubArg(regT1, 2);
- emitCTICall(JITStubs::cti_op_neq);
- emitPutVirtualRegister(currentInstruction[1].u.operand);
- NEXT_OPCODE(op_neq);
- }
- case op_stricteq: {
- linkSlowCase(iter);
- linkSlowCase(iter);
- emitPutJITStubArg(regT0, 1);
- emitPutJITStubArg(regT1, 2);
- emitCTICall(JITStubs::cti_op_stricteq);
- emitPutVirtualRegister(currentInstruction[1].u.operand);
- NEXT_OPCODE(op_stricteq);
- }
- case op_nstricteq: {
- linkSlowCase(iter);
- linkSlowCase(iter);
- emitPutJITStubArg(regT0, 1);
- emitPutJITStubArg(regT1, 2);
- emitCTICall(JITStubs::cti_op_nstricteq);
- emitPutVirtualRegister(currentInstruction[1].u.operand);
- NEXT_OPCODE(op_nstricteq);
- }
- case op_instanceof: {
- linkSlowCase(iter);
- linkSlowCase(iter);
- linkSlowCase(iter);
- emitPutJITStubArgFromVirtualRegister(currentInstruction[2].u.operand, 1, regT2);
- emitPutJITStubArgFromVirtualRegister(currentInstruction[3].u.operand, 2, regT2);
- emitPutJITStubArgFromVirtualRegister(currentInstruction[4].u.operand, 3, regT2);
- emitCTICall(JITStubs::cti_op_instanceof);
- emitPutVirtualRegister(currentInstruction[1].u.operand);
- NEXT_OPCODE(op_instanceof);
- }
- case op_mod: {
- compileFastArithSlow_op_mod(currentInstruction[1].u.operand, currentInstruction[2].u.operand, currentInstruction[3].u.operand, iter);
- NEXT_OPCODE(op_mod);
- }
- case op_mul: {
- compileFastArithSlow_op_mul(currentInstruction, iter);
- NEXT_OPCODE(op_mul);
- }
-
- case op_call: {
- compileOpCallSlowCase(currentInstruction, iter, callLinkInfoIndex++, opcodeID);
- NEXT_OPCODE(op_call);
- }
- case op_call_eval: {
- compileOpCallSlowCase(currentInstruction, iter, callLinkInfoIndex++, opcodeID);
- NEXT_OPCODE(op_call_eval);
- }
- case op_construct: {
- compileOpCallSlowCase(currentInstruction, iter, callLinkInfoIndex++, opcodeID);
- NEXT_OPCODE(op_construct);
- }
- case op_to_jsnumber: {
- linkSlowCaseIfNotJSCell(iter, currentInstruction[2].u.operand);
- linkSlowCase(iter);
-
- emitPutJITStubArg(regT0, 1);
- emitCTICall(JITStubs::cti_op_to_jsnumber);
-
- emitPutVirtualRegister(currentInstruction[1].u.operand);
- NEXT_OPCODE(op_to_jsnumber);
- }
-
+ switch (m_interpreter->getOpcodeID(currentInstruction->u.opcode)) {
+ DEFINE_SLOWCASE_OP(op_add)
+ DEFINE_SLOWCASE_OP(op_bitand)
+ DEFINE_SLOWCASE_OP(op_bitnot)
+ DEFINE_SLOWCASE_OP(op_bitor)
+ DEFINE_SLOWCASE_OP(op_bitxor)
+ DEFINE_SLOWCASE_OP(op_call)
+ DEFINE_SLOWCASE_OP(op_call_eval)
+ DEFINE_SLOWCASE_OP(op_call_varargs)
+ DEFINE_SLOWCASE_OP(op_construct)
+ DEFINE_SLOWCASE_OP(op_construct_verify)
+ DEFINE_SLOWCASE_OP(op_convert_this)
+ DEFINE_SLOWCASE_OP(op_eq)
+ DEFINE_SLOWCASE_OP(op_get_by_id)
+ DEFINE_SLOWCASE_OP(op_get_by_val)
+ DEFINE_SLOWCASE_OP(op_instanceof)
+ DEFINE_SLOWCASE_OP(op_jfalse)
+ DEFINE_SLOWCASE_OP(op_jnless)
+ DEFINE_SLOWCASE_OP(op_jnlesseq)
+ DEFINE_SLOWCASE_OP(op_jtrue)
+ DEFINE_SLOWCASE_OP(op_loop_if_less)
+ DEFINE_SLOWCASE_OP(op_loop_if_lesseq)
+ DEFINE_SLOWCASE_OP(op_loop_if_true)
+ DEFINE_SLOWCASE_OP(op_lshift)
+ DEFINE_SLOWCASE_OP(op_mod)
+ DEFINE_SLOWCASE_OP(op_mul)
+ DEFINE_SLOWCASE_OP(op_method_check)
+ DEFINE_SLOWCASE_OP(op_neq)
+ DEFINE_SLOWCASE_OP(op_not)
+ DEFINE_SLOWCASE_OP(op_nstricteq)
+ DEFINE_SLOWCASE_OP(op_post_dec)
+ DEFINE_SLOWCASE_OP(op_post_inc)
+ DEFINE_SLOWCASE_OP(op_pre_dec)
+ DEFINE_SLOWCASE_OP(op_pre_inc)
+ DEFINE_SLOWCASE_OP(op_put_by_id)
+ DEFINE_SLOWCASE_OP(op_put_by_val)
+ DEFINE_SLOWCASE_OP(op_rshift)
+ DEFINE_SLOWCASE_OP(op_stricteq)
+ DEFINE_SLOWCASE_OP(op_sub)
+ DEFINE_SLOWCASE_OP(op_to_jsnumber)
+ DEFINE_SLOWCASE_OP(op_to_primitive)
default:
ASSERT_NOT_REACHED();
}
@@ -1611,12 +376,12 @@ void JIT::privateCompileSlowCases()
}
#if ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
- ASSERT(propertyAccessInstructionIndex == m_codeBlock->numberOfStructureStubInfos());
+ ASSERT(m_propertyAccessInstructionIndex == m_codeBlock->numberOfStructureStubInfos());
#endif
- ASSERT(callLinkInfoIndex == m_codeBlock->numberOfCallLinkInfos());
+ ASSERT(m_callLinkInfoIndex == m_codeBlock->numberOfCallLinkInfos());
#ifndef NDEBUG
- // reset this, in order to guard it's use with asserts
+ // Reset this, in order to guard its use with ASSERTs.
m_bytecodeIndex = (unsigned)-1;
#endif
}
@@ -1629,7 +394,7 @@ void JIT::privateCompile()
#endif
// Could use a pop_m, but would need to offset the following instruction if so.
- pop(regT2);
+ preverveReturnAddressAfterCall(regT2);
emitPutToCallFrameHeader(regT2, RegisterFile::ReturnPC);
Jump slowRegisterFileCheck;
@@ -1638,10 +403,10 @@ void JIT::privateCompile()
// In the case of a fast linked call, we do not set this up in the caller.
emitPutImmediateToCallFrameHeader(m_codeBlock, RegisterFile::CodeBlock);
- emitGetCTIParam(STUB_ARGS_registerFile, regT0);
+ peek(regT0, FIELD_OFFSET(JITStackFrame, registerFile) / sizeof (void*));
addPtr(Imm32(m_codeBlock->m_numCalleeRegisters * sizeof(Register)), callFrameRegister, regT1);
-
- slowRegisterFileCheck = branch32(GreaterThan, regT1, Address(regT0, FIELD_OFFSET(RegisterFile, m_end)));
+
+ slowRegisterFileCheck = branchPtr(Above, regT1, Address(regT0, FIELD_OFFSET(RegisterFile, m_end)));
afterRegisterFileCheck = label();
}
@@ -1651,25 +416,17 @@ void JIT::privateCompile()
if (m_codeBlock->codeType() == FunctionCode) {
slowRegisterFileCheck.link(this);
- m_bytecodeIndex = 0; // emitCTICall will add to the map, but doesn't actually need this...
- emitCTICall(JITStubs::cti_register_file_check);
+ m_bytecodeIndex = 0;
+ JITStubCall(this, JITStubs::cti_register_file_check).call();
#ifndef NDEBUG
- // reset this, in order to guard it's use with asserts
- m_bytecodeIndex = (unsigned)-1;
+ m_bytecodeIndex = (unsigned)-1; // Reset this, in order to guard its use with ASSERTs.
#endif
jump(afterRegisterFileCheck);
}
ASSERT(m_jmpTable.isEmpty());
- RefPtr<ExecutablePool> allocator = m_globalData->executableAllocator.poolForSize(m_assembler.size());
- void* code = m_assembler.executableCopy(allocator.get());
- JITCodeRef codeRef(code, allocator);
-#ifndef NDEBUG
- codeRef.codeSize = m_assembler.size();
-#endif
-
- PatchBuffer patchBuffer(code);
+ PatchBuffer patchBuffer(this, m_globalData->executableAllocator.poolForSize(m_assembler.size()));
// Translate vPC offsets into addresses in JIT generated code, for switch tables.
for (unsigned i = 0; i < m_switches.size(); ++i) {
@@ -1706,7 +463,7 @@ void JIT::privateCompile()
for (Vector<CallRecord>::iterator iter = m_calls.begin(); iter != m_calls.end(); ++iter) {
if (iter->to)
- patchBuffer.link(iter->from, iter->to);
+ patchBuffer.link(iter->from, FunctionPtr(iter->to));
}
if (m_codeBlock->hasExceptionInfo()) {
@@ -1735,11 +492,18 @@ void JIT::privateCompile()
info.coldPathOther = patchBuffer.locationOf(m_callStructureStubCompilationInfo[i].coldPathOther);
}
#endif
+ unsigned methodCallCount = m_methodCallCompilationInfo.size();
+ m_codeBlock->addMethodCallLinkInfos(methodCallCount);
+ for (unsigned i = 0; i < methodCallCount; ++i) {
+ MethodCallLinkInfo& info = m_codeBlock->methodCallLinkInfo(i);
+ info.structureLabel = patchBuffer.locationOf(m_methodCallCompilationInfo[i].structureToCompare);
+ info.callReturnLocation = m_codeBlock->structureStubInfo(m_methodCallCompilationInfo[i].propertyAccessIndex).callReturnLocation;
+ }
- m_codeBlock->setJITCode(codeRef);
+ m_codeBlock->setJITCode(patchBuffer.finalizeCode());
}
-void JIT::privateCompileCTIMachineTrampolines(RefPtr<ExecutablePool>* executablePool, void** ctiArrayLengthTrampoline, void** ctiStringLengthTrampoline, void** ctiVirtualCallPreLink, void** ctiVirtualCallLink, void** ctiVirtualCall)
+void JIT::privateCompileCTIMachineTrampolines(RefPtr<ExecutablePool>* executablePool, JSGlobalData* globalData, CodePtr* ctiArrayLengthTrampoline, CodePtr* ctiStringLengthTrampoline, CodePtr* ctiVirtualCallPreLink, CodePtr* ctiVirtualCallLink, CodePtr* ctiVirtualCall, CodePtr* ctiNativeCallThunk)
{
#if ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
// (1) The first function provides fast property access for array length
@@ -1779,29 +543,28 @@ void JIT::privateCompileCTIMachineTrampolines(RefPtr<ExecutablePool>* executable
ret();
#endif
-#if !(PLATFORM(X86) || PLATFORM(X86_64))
-#error "This code is less portable than it looks this code assumes that regT3 is callee preserved, which happens to be true on x86/x86-64."
-#endif
-
// (3) Trampolines for the slow cases of op_call / op_call_eval / op_construct.
Label virtualCallPreLinkBegin = align();
// Load the callee CodeBlock* into eax
- loadPtr(Address(regT2, FIELD_OFFSET(JSFunction, m_body)), regT0);
- loadPtr(Address(regT0, FIELD_OFFSET(FunctionBodyNode, m_code)), regT0);
+ loadPtr(Address(regT2, FIELD_OFFSET(JSFunction, m_body)), regT3);
+ loadPtr(Address(regT3, FIELD_OFFSET(FunctionBodyNode, m_code)), regT0);
Jump hasCodeBlock1 = branchTestPtr(NonZero, regT0);
- pop(regT3);
+ // If m_code is null and m_jitCode is not, then we have a native function, so arity is irrelevant
+ loadPtr(Address(regT3, FIELD_OFFSET(FunctionBodyNode, m_jitCode)), regT0);
+ Jump isNativeFunc1 = branchTestPtr(NonZero, regT0);
+ preverveReturnAddressAfterCall(regT3);
restoreArgumentReference();
Call callJSFunction1 = call();
emitGetJITStubArg(1, regT2);
emitGetJITStubArg(3, regT1);
- push(regT3);
+ restoreReturnAddressBeforeReturn(regT3);
hasCodeBlock1.link(this);
// Check argCount matches callee arity.
Jump arityCheckOkay1 = branch32(Equal, Address(regT0, FIELD_OFFSET(CodeBlock, m_numParameters)), regT1);
- pop(regT3);
+ preverveReturnAddressAfterCall(regT3);
emitPutJITStubArg(regT3, 2);
emitPutJITStubArg(regT0, 4);
restoreArgumentReference();
@@ -1809,36 +572,41 @@ void JIT::privateCompileCTIMachineTrampolines(RefPtr<ExecutablePool>* executable
move(regT1, callFrameRegister);
emitGetJITStubArg(1, regT2);
emitGetJITStubArg(3, regT1);
- push(regT3);
+ restoreReturnAddressBeforeReturn(regT3);
arityCheckOkay1.link(this);
+ isNativeFunc1.link(this);
compileOpCallInitializeCallFrame();
- pop(regT3);
+ preverveReturnAddressAfterCall(regT3);
emitPutJITStubArg(regT3, 2);
restoreArgumentReference();
Call callDontLazyLinkCall = call();
- push(regT3);
+ emitGetJITStubArg(1, regT2);
+ restoreReturnAddressBeforeReturn(regT3);
jump(regT0);
Label virtualCallLinkBegin = align();
// Load the callee CodeBlock* into eax
- loadPtr(Address(regT2, FIELD_OFFSET(JSFunction, m_body)), regT0);
- loadPtr(Address(regT0, FIELD_OFFSET(FunctionBodyNode, m_code)), regT0);
+ loadPtr(Address(regT2, FIELD_OFFSET(JSFunction, m_body)), regT3);
+ loadPtr(Address(regT3, FIELD_OFFSET(FunctionBodyNode, m_code)), regT0);
Jump hasCodeBlock2 = branchTestPtr(NonZero, regT0);
- pop(regT3);
+ // If m_code is null and m_jitCode is not, then we have a native function, so arity is irrelevant
+ loadPtr(Address(regT3, FIELD_OFFSET(FunctionBodyNode, m_jitCode)), regT0);
+ Jump isNativeFunc2 = branchTestPtr(NonZero, regT0);
+ preverveReturnAddressAfterCall(regT3);
restoreArgumentReference();
Call callJSFunction2 = call();
emitGetJITStubArg(1, regT2);
emitGetJITStubArg(3, regT1);
- push(regT3);
+ restoreReturnAddressBeforeReturn(regT3);
hasCodeBlock2.link(this);
// Check argCount matches callee arity.
Jump arityCheckOkay2 = branch32(Equal, Address(regT0, FIELD_OFFSET(CodeBlock, m_numParameters)), regT1);
- pop(regT3);
+ preverveReturnAddressAfterCall(regT3);
emitPutJITStubArg(regT3, 2);
emitPutJITStubArg(regT0, 4);
restoreArgumentReference();
@@ -1846,36 +614,41 @@ void JIT::privateCompileCTIMachineTrampolines(RefPtr<ExecutablePool>* executable
move(regT1, callFrameRegister);
emitGetJITStubArg(1, regT2);
emitGetJITStubArg(3, regT1);
- push(regT3);
+ restoreReturnAddressBeforeReturn(regT3);
arityCheckOkay2.link(this);
+ isNativeFunc2.link(this);
compileOpCallInitializeCallFrame();
- pop(regT3);
+ preverveReturnAddressAfterCall(regT3);
emitPutJITStubArg(regT3, 2);
restoreArgumentReference();
Call callLazyLinkCall = call();
- push(regT3);
+ restoreReturnAddressBeforeReturn(regT3);
jump(regT0);
Label virtualCallBegin = align();
// Load the callee CodeBlock* into eax
- loadPtr(Address(regT2, FIELD_OFFSET(JSFunction, m_body)), regT0);
- loadPtr(Address(regT0, FIELD_OFFSET(FunctionBodyNode, m_code)), regT0);
+ loadPtr(Address(regT2, FIELD_OFFSET(JSFunction, m_body)), regT3);
+ loadPtr(Address(regT3, FIELD_OFFSET(FunctionBodyNode, m_code)), regT0);
Jump hasCodeBlock3 = branchTestPtr(NonZero, regT0);
- pop(regT3);
+ // If m_code is null and m_jitCode is not, then we have a native function, so arity is irrelevant
+ loadPtr(Address(regT3, FIELD_OFFSET(FunctionBodyNode, m_jitCode)), regT0);
+ Jump isNativeFunc3 = branchTestPtr(NonZero, regT0);
+ preverveReturnAddressAfterCall(regT3);
restoreArgumentReference();
Call callJSFunction3 = call();
emitGetJITStubArg(1, regT2);
emitGetJITStubArg(3, regT1);
- push(regT3);
+ restoreReturnAddressBeforeReturn(regT3);
+ loadPtr(Address(regT2, FIELD_OFFSET(JSFunction, m_body)), regT3); // reload the function body nody, so we can reload the code pointer.
hasCodeBlock3.link(this);
// Check argCount matches callee arity.
Jump arityCheckOkay3 = branch32(Equal, Address(regT0, FIELD_OFFSET(CodeBlock, m_numParameters)), regT1);
- pop(regT3);
+ preverveReturnAddressAfterCall(regT3);
emitPutJITStubArg(regT3, 2);
emitPutJITStubArg(regT0, 4);
restoreArgumentReference();
@@ -1883,15 +656,188 @@ void JIT::privateCompileCTIMachineTrampolines(RefPtr<ExecutablePool>* executable
move(regT1, callFrameRegister);
emitGetJITStubArg(1, regT2);
emitGetJITStubArg(3, regT1);
- push(regT3);
+ restoreReturnAddressBeforeReturn(regT3);
+ loadPtr(Address(regT2, FIELD_OFFSET(JSFunction, m_body)), regT3); // reload the function body nody, so we can reload the code pointer.
arityCheckOkay3.link(this);
+ // load ctiCode from the new codeBlock.
+ loadPtr(Address(regT3, FIELD_OFFSET(FunctionBodyNode, m_jitCode)), regT0);
+ isNativeFunc3.link(this);
compileOpCallInitializeCallFrame();
+ jump(regT0);
- // load ctiCode from the new codeBlock.
- loadPtr(Address(regT0, FIELD_OFFSET(CodeBlock, m_jitCode)), regT0);
+
+ Label nativeCallThunk = align();
+ preverveReturnAddressAfterCall(regT0);
+ emitPutToCallFrameHeader(regT0, RegisterFile::ReturnPC); // Push return address
+
+ // Load caller frame's scope chain into this callframe so that whatever we call can
+ // get to its global data.
+ emitGetFromCallFrameHeaderPtr(RegisterFile::CallerFrame, regT1);
+ emitGetFromCallFrameHeaderPtr(RegisterFile::ScopeChain, regT1, regT1);
+ emitPutToCallFrameHeader(regT1, RegisterFile::ScopeChain);
+
- jump(regT0);
+#if PLATFORM(X86_64)
+ emitGetFromCallFrameHeader32(RegisterFile::ArgumentCount, X86::ecx);
+
+ // Allocate stack space for our arglist
+ subPtr(Imm32(sizeof(ArgList)), stackPointerRegister);
+ COMPILE_ASSERT((sizeof(ArgList) & 0xf) == 0, ArgList_should_by_16byte_aligned);
+
+ // Set up arguments
+ subPtr(Imm32(1), X86::ecx); // Don't include 'this' in argcount
+
+ // Push argcount
+ storePtr(X86::ecx, Address(stackPointerRegister, FIELD_OFFSET(ArgList, m_argCount)));
+
+ // Calculate the start of the callframe header, and store in edx
+ addPtr(Imm32(-RegisterFile::CallFrameHeaderSize * (int32_t)sizeof(Register)), callFrameRegister, X86::edx);
+
+ // Calculate start of arguments as callframe header - sizeof(Register) * argcount (ecx)
+ mul32(Imm32(sizeof(Register)), X86::ecx, X86::ecx);
+ subPtr(X86::ecx, X86::edx);
+
+ // push pointer to arguments
+ storePtr(X86::edx, Address(stackPointerRegister, FIELD_OFFSET(ArgList, m_args)));
+
+ // ArgList is passed by reference so is stackPointerRegister
+ move(stackPointerRegister, X86::ecx);
+
+ // edx currently points to the first argument, edx-sizeof(Register) points to 'this'
+ loadPtr(Address(X86::edx, -(int32_t)sizeof(Register)), X86::edx);
+
+ emitGetFromCallFrameHeaderPtr(RegisterFile::Callee, X86::esi);
+
+ move(callFrameRegister, X86::edi);
+
+ call(Address(X86::esi, FIELD_OFFSET(JSFunction, m_data)));
+
+ addPtr(Imm32(sizeof(ArgList)), stackPointerRegister);
+#elif PLATFORM(X86)
+ emitGetFromCallFrameHeader32(RegisterFile::ArgumentCount, regT0);
+
+ /* We have two structs that we use to describe the stackframe we set up for our
+ * call to native code. NativeCallFrameStructure describes the how we set up the stack
+ * in advance of the call. NativeFunctionCalleeSignature describes the callframe
+ * as the native code expects it. We do this as we are using the fastcall calling
+ * convention which results in the callee popping its arguments off the stack, but
+ * not the rest of the callframe so we need a nice way to ensure we increment the
+ * stack pointer by the right amount after the call.
+ */
+#if COMPILER(MSVC) || PLATFORM(LINUX)
+ struct NativeCallFrameStructure {
+ // CallFrame* callFrame; // passed in EDX
+ JSObject* callee;
+ JSValue thisValue;
+ ArgList* argPointer;
+ ArgList args;
+ JSValue result;
+ };
+ struct NativeFunctionCalleeSignature {
+ JSObject* callee;
+ JSValue thisValue;
+ ArgList* argPointer;
+ };
+#else
+ struct NativeCallFrameStructure {
+ // CallFrame* callFrame; // passed in ECX
+ // JSObject* callee; // passed in EDX
+ JSValue thisValue;
+ ArgList* argPointer;
+ ArgList args;
+ };
+ struct NativeFunctionCalleeSignature {
+ JSValue thisValue;
+ ArgList* argPointer;
+ };
+#endif
+ const int NativeCallFrameSize = (sizeof(NativeCallFrameStructure) + 15) & ~15;
+ // Allocate system stack frame
+ subPtr(Imm32(NativeCallFrameSize), stackPointerRegister);
+
+ // Set up arguments
+ subPtr(Imm32(1), regT0); // Don't include 'this' in argcount
+
+ // push argcount
+ storePtr(regT0, Address(stackPointerRegister, FIELD_OFFSET(NativeCallFrameStructure, args) + FIELD_OFFSET(ArgList, m_argCount)));
+
+ // Calculate the start of the callframe header, and store in regT1
+ addPtr(Imm32(-RegisterFile::CallFrameHeaderSize * (int)sizeof(Register)), callFrameRegister, regT1);
+
+ // Calculate start of arguments as callframe header - sizeof(Register) * argcount (regT0)
+ mul32(Imm32(sizeof(Register)), regT0, regT0);
+ subPtr(regT0, regT1);
+ storePtr(regT1, Address(stackPointerRegister, FIELD_OFFSET(NativeCallFrameStructure, args) + FIELD_OFFSET(ArgList, m_args)));
+
+ // ArgList is passed by reference so is stackPointerRegister + 4 * sizeof(Register)
+ addPtr(Imm32(FIELD_OFFSET(NativeCallFrameStructure, args)), stackPointerRegister, regT0);
+ storePtr(regT0, Address(stackPointerRegister, FIELD_OFFSET(NativeCallFrameStructure, argPointer)));
+
+ // regT1 currently points to the first argument, regT1 - sizeof(Register) points to 'this'
+ loadPtr(Address(regT1, -(int)sizeof(Register)), regT1);
+ storePtr(regT1, Address(stackPointerRegister, FIELD_OFFSET(NativeCallFrameStructure, thisValue)));
+
+#if COMPILER(MSVC) || PLATFORM(LINUX)
+ // ArgList is passed by reference so is stackPointerRegister + 4 * sizeof(Register)
+ addPtr(Imm32(FIELD_OFFSET(NativeCallFrameStructure, result)), stackPointerRegister, X86::ecx);
+
+ // Plant callee
+ emitGetFromCallFrameHeaderPtr(RegisterFile::Callee, X86::eax);
+ storePtr(X86::eax, Address(stackPointerRegister, FIELD_OFFSET(NativeCallFrameStructure, callee)));
+
+ // Plant callframe
+ move(callFrameRegister, X86::edx);
+
+ call(Address(X86::eax, FIELD_OFFSET(JSFunction, m_data)));
+
+ // JSValue is a non-POD type
+ loadPtr(Address(X86::eax), X86::eax);
+#else
+ // Plant callee
+ emitGetFromCallFrameHeaderPtr(RegisterFile::Callee, X86::edx);
+
+ // Plant callframe
+ move(callFrameRegister, X86::ecx);
+ call(Address(X86::edx, FIELD_OFFSET(JSFunction, m_data)));
+#endif
+
+ // We've put a few temporaries on the stack in addition to the actual arguments
+ // so pull them off now
+ addPtr(Imm32(NativeCallFrameSize - sizeof(NativeFunctionCalleeSignature)), stackPointerRegister);
+
+#elif ENABLE(JIT_OPTIMIZE_NATIVE_CALL)
+#error "JIT_OPTIMIZE_NATIVE_CALL not yet supported on this platform."
+#else
+ breakpoint();
+#endif
+
+ // Check for an exception
+ loadPtr(&(globalData->exception), regT2);
+ Jump exceptionHandler = branchTestPtr(NonZero, regT2);
+
+ // Grab the return address.
+ emitGetFromCallFrameHeaderPtr(RegisterFile::ReturnPC, regT1);
+
+ // Restore our caller's "r".
+ emitGetFromCallFrameHeaderPtr(RegisterFile::CallerFrame, callFrameRegister);
+
+ // Return.
+ restoreReturnAddressBeforeReturn(regT1);
+ ret();
+
+ // Handle an exception
+ exceptionHandler.link(this);
+ // Grab the return address.
+ emitGetFromCallFrameHeaderPtr(RegisterFile::ReturnPC, regT1);
+ move(ImmPtr(&globalData->exceptionLocation), regT2);
+ storePtr(regT1, regT2);
+ move(ImmPtr(reinterpret_cast<void*>(ctiVMThrowTrampoline)), regT2);
+ emitGetFromCallFrameHeaderPtr(RegisterFile::CallerFrame, callFrameRegister);
+ poke(callFrameRegister, offsetof(struct JITStackFrame, callFrame) / sizeof (void*));
+ restoreReturnAddressBeforeReturn(regT2);
+ ret();
+
#if ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
Call array_failureCases1Call = makeTailRecursiveCall(array_failureCases1);
@@ -1903,36 +849,39 @@ void JIT::privateCompileCTIMachineTrampolines(RefPtr<ExecutablePool>* executable
#endif
// All trampolines constructed! copy the code, link up calls, and set the pointers on the Machine object.
- *executablePool = m_globalData->executableAllocator.poolForSize(m_assembler.size());
- void* code = m_assembler.executableCopy((*executablePool).get());
+ PatchBuffer patchBuffer(this, m_globalData->executableAllocator.poolForSize(m_assembler.size()));
- PatchBuffer patchBuffer(code);
#if ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
- patchBuffer.link(array_failureCases1Call, JITStubs::cti_op_get_by_id_array_fail);
- patchBuffer.link(array_failureCases2Call, JITStubs::cti_op_get_by_id_array_fail);
- patchBuffer.link(array_failureCases3Call, JITStubs::cti_op_get_by_id_array_fail);
- patchBuffer.link(string_failureCases1Call, JITStubs::cti_op_get_by_id_string_fail);
- patchBuffer.link(string_failureCases2Call, JITStubs::cti_op_get_by_id_string_fail);
- patchBuffer.link(string_failureCases3Call, JITStubs::cti_op_get_by_id_string_fail);
-
- *ctiArrayLengthTrampoline = patchBuffer.trampolineAt(arrayLengthBegin);
- *ctiStringLengthTrampoline = patchBuffer.trampolineAt(stringLengthBegin);
+ patchBuffer.link(array_failureCases1Call, FunctionPtr(JITStubs::cti_op_get_by_id_array_fail));
+ patchBuffer.link(array_failureCases2Call, FunctionPtr(JITStubs::cti_op_get_by_id_array_fail));
+ patchBuffer.link(array_failureCases3Call, FunctionPtr(JITStubs::cti_op_get_by_id_array_fail));
+ patchBuffer.link(string_failureCases1Call, FunctionPtr(JITStubs::cti_op_get_by_id_string_fail));
+ patchBuffer.link(string_failureCases2Call, FunctionPtr(JITStubs::cti_op_get_by_id_string_fail));
+ patchBuffer.link(string_failureCases3Call, FunctionPtr(JITStubs::cti_op_get_by_id_string_fail));
+#endif
+ patchBuffer.link(callArityCheck1, FunctionPtr(JITStubs::cti_op_call_arityCheck));
+ patchBuffer.link(callArityCheck2, FunctionPtr(JITStubs::cti_op_call_arityCheck));
+ patchBuffer.link(callArityCheck3, FunctionPtr(JITStubs::cti_op_call_arityCheck));
+ patchBuffer.link(callJSFunction1, FunctionPtr(JITStubs::cti_op_call_JSFunction));
+ patchBuffer.link(callJSFunction2, FunctionPtr(JITStubs::cti_op_call_JSFunction));
+ patchBuffer.link(callJSFunction3, FunctionPtr(JITStubs::cti_op_call_JSFunction));
+ patchBuffer.link(callDontLazyLinkCall, FunctionPtr(JITStubs::cti_vm_dontLazyLinkCall));
+ patchBuffer.link(callLazyLinkCall, FunctionPtr(JITStubs::cti_vm_lazyLinkCall));
+
+ CodeRef finalCode = patchBuffer.finalizeCode();
+ *executablePool = finalCode.m_executablePool;
+
+ *ctiVirtualCallPreLink = trampolineAt(finalCode, virtualCallPreLinkBegin);
+ *ctiVirtualCallLink = trampolineAt(finalCode, virtualCallLinkBegin);
+ *ctiVirtualCall = trampolineAt(finalCode, virtualCallBegin);
+ *ctiNativeCallThunk = trampolineAt(finalCode, nativeCallThunk);
+#if ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
+ *ctiArrayLengthTrampoline = trampolineAt(finalCode, arrayLengthBegin);
+ *ctiStringLengthTrampoline = trampolineAt(finalCode, stringLengthBegin);
#else
UNUSED_PARAM(ctiArrayLengthTrampoline);
UNUSED_PARAM(ctiStringLengthTrampoline);
#endif
- patchBuffer.link(callArityCheck1, JITStubs::cti_op_call_arityCheck);
- patchBuffer.link(callArityCheck2, JITStubs::cti_op_call_arityCheck);
- patchBuffer.link(callArityCheck3, JITStubs::cti_op_call_arityCheck);
- patchBuffer.link(callJSFunction1, JITStubs::cti_op_call_JSFunction);
- patchBuffer.link(callJSFunction2, JITStubs::cti_op_call_JSFunction);
- patchBuffer.link(callJSFunction3, JITStubs::cti_op_call_JSFunction);
- patchBuffer.link(callDontLazyLinkCall, JITStubs::cti_vm_dontLazyLinkCall);
- patchBuffer.link(callLazyLinkCall, JITStubs::cti_vm_lazyLinkCall);
-
- *ctiVirtualCallPreLink = patchBuffer.trampolineAt(virtualCallPreLinkBegin);
- *ctiVirtualCallLink = patchBuffer.trampolineAt(virtualCallLinkBegin);
- *ctiVirtualCall = patchBuffer.trampolineAt(virtualCallBegin);
}
void JIT::emitGetVariableObjectRegister(RegisterID variableObject, int index, RegisterID dst)
@@ -1949,6 +898,41 @@ void JIT::emitPutVariableObjectRegister(RegisterID src, RegisterID variableObjec
storePtr(src, Address(variableObject, index * sizeof(Register)));
}
+void JIT::unlinkCall(CallLinkInfo* callLinkInfo)
+{
+ // When the JSFunction is deleted the pointer embedded in the instruction stream will no longer be valid
+ // (and, if a new JSFunction happened to be constructed at the same location, we could get a false positive
+ // match). Reset the check so it no longer matches.
+ callLinkInfo->hotPathBegin.repatch(JSValue::encode(JSValue()));
+}
+
+void JIT::linkCall(JSFunction* callee, CodeBlock* calleeCodeBlock, JITCode& code, CallLinkInfo* callLinkInfo, int callerArgCount)
+{
+ // Currently we only link calls with the exact number of arguments.
+ // If this is a native call calleeCodeBlock is null so the number of parameters is unimportant
+ if (!calleeCodeBlock || callerArgCount == calleeCodeBlock->m_numParameters) {
+ ASSERT(!callLinkInfo->isLinked());
+
+ if (calleeCodeBlock)
+ calleeCodeBlock->addCaller(callLinkInfo);
+
+ callLinkInfo->hotPathBegin.repatch(callee);
+ callLinkInfo->hotPathOther.relink(code.addressForCall());
+ }
+
+ // patch the instruction that jumps out to the cold path, so that we only try to link once.
+ callLinkInfo->hotPathBegin.jumpAtOffset(patchOffsetOpCallCompareToJump).relink(callLinkInfo->coldPathOther);
+}
+
} // namespace JSC
#endif // ENABLE(JIT)
+
+// This probably does not belong here; adding here for now as a quick Windows build fix.
+#if ENABLE(ASSEMBLER)
+
+#if PLATFORM(X86) && !PLATFORM(MAC)
+JSC::MacroAssemblerX86Common::SSE2CheckState JSC::MacroAssemblerX86Common::s_sse2CheckState = NotCheckedSSE2;
+#endif
+
+#endif
diff --git a/JavaScriptCore/jit/JIT.h b/JavaScriptCore/jit/JIT.h
index 25c7825..81f804a 100644
--- a/JavaScriptCore/jit/JIT.h
+++ b/JavaScriptCore/jit/JIT.h
@@ -30,8 +30,15 @@
#if ENABLE(JIT)
-#define WTF_USE_CTI_REPATCH_PIC 1
+// We've run into some problems where changing the size of the class JIT leads to
+// performance fluctuations. Try forcing alignment in an attempt to stabalize this.
+#if COMPILER(GCC)
+#define JIT_CLASS_ALIGNMENT __attribute__ ((aligned (32)))
+#else
+#define JIT_CLASS_ALIGNMENT
+#endif
+#include "CodeBlock.h"
#include "Interpreter.h"
#include "JITCode.h"
#include "JITStubs.h"
@@ -43,54 +50,10 @@
#include <wtf/AlwaysInline.h>
#include <wtf/Vector.h>
-#if PLATFORM(X86_64)
-#define STUB_ARGS_offset 0x10
-#else
-#define STUB_ARGS_offset 0x0C
-#endif
-
-#define STUB_ARGS_code (STUB_ARGS_offset)
-#define STUB_ARGS_registerFile (STUB_ARGS_offset + 1)
-#define STUB_ARGS_callFrame (STUB_ARGS_offset + 2)
-#define STUB_ARGS_exception (STUB_ARGS_offset + 3)
-#define STUB_ARGS_profilerReference (STUB_ARGS_offset + 4)
-#define STUB_ARGS_globalData (STUB_ARGS_offset + 5)
-
-#define ARG_callFrame static_cast<CallFrame*>(ARGS[STUB_ARGS_callFrame])
-#define ARG_registerFile static_cast<RegisterFile*>(ARGS[STUB_ARGS_registerFile])
-#define ARG_exception static_cast<JSValuePtr*>(ARGS[STUB_ARGS_exception])
-#define ARG_profilerReference static_cast<Profiler**>(ARGS[STUB_ARGS_profilerReference])
-#define ARG_globalData static_cast<JSGlobalData*>(ARGS[STUB_ARGS_globalData])
-
-#define ARG_setCallFrame(newCallFrame) (ARGS[STUB_ARGS_callFrame] = (newCallFrame))
-
-#define ARG_src1 JSValuePtr::decode(static_cast<JSValueEncodedAsPointer*>(ARGS[1]))
-#define ARG_src2 JSValuePtr::decode(static_cast<JSValueEncodedAsPointer*>(ARGS[2]))
-#define ARG_src3 JSValuePtr::decode(static_cast<JSValueEncodedAsPointer*>(ARGS[3]))
-#define ARG_src4 JSValuePtr::decode(static_cast<JSValueEncodedAsPointer*>(ARGS[4]))
-#define ARG_src5 JSValuePtr::decode(static_cast<JSValueEncodedAsPointer*>(ARGS[5]))
-#define ARG_id1 static_cast<Identifier*>(ARGS[1])
-#define ARG_id2 static_cast<Identifier*>(ARGS[2])
-#define ARG_id3 static_cast<Identifier*>(ARGS[3])
-#define ARG_id4 static_cast<Identifier*>(ARGS[4])
-#define ARG_int1 static_cast<int32_t>(reinterpret_cast<intptr_t>(ARGS[1]))
-#define ARG_int2 static_cast<int32_t>(reinterpret_cast<intptr_t>(ARGS[2]))
-#define ARG_int3 static_cast<int32_t>(reinterpret_cast<intptr_t>(ARGS[3]))
-#define ARG_int4 static_cast<int32_t>(reinterpret_cast<intptr_t>(ARGS[4]))
-#define ARG_int5 static_cast<int32_t>(reinterpret_cast<intptr_t>(ARGS[5]))
-#define ARG_int6 static_cast<int32_t>(reinterpret_cast<intptr_t>(ARGS[6]))
-#define ARG_func1 static_cast<FuncDeclNode*>(ARGS[1])
-#define ARG_funcexp1 static_cast<FuncExprNode*>(ARGS[1])
-#define ARG_regexp1 static_cast<RegExp*>(ARGS[1])
-#define ARG_pni1 static_cast<JSPropertyNameIterator*>(ARGS[1])
-#define ARG_returnAddress2 static_cast<void*>(ARGS[2])
-#define ARG_codeBlock4 static_cast<CodeBlock*>(ARGS[4])
-
-#define STUB_RETURN_ADDRESS_SLOT (ARGS[-1])
-
namespace JSC {
class CodeBlock;
+ class JIT;
class JSPropertyNameIterator;
class Interpreter;
class Register;
@@ -106,14 +69,6 @@ namespace JSC {
struct PolymorphicAccessStructureList;
struct StructureStubInfo;
- typedef JSValueEncodedAsPointer* (JIT_STUB *CTIHelper_j)(STUB_ARGS);
- typedef JSObject* (JIT_STUB *CTIHelper_o)(STUB_ARGS);
- typedef JSPropertyNameIterator* (JIT_STUB *CTIHelper_p)(STUB_ARGS);
- typedef void (JIT_STUB *CTIHelper_v)(STUB_ARGS);
- typedef void* (JIT_STUB *CTIHelper_s)(STUB_ARGS);
- typedef int (JIT_STUB *CTIHelper_b)(STUB_ARGS);
- typedef VoidPtrPair (JIT_STUB *CTIHelper_2)(STUB_ARGS);
-
struct CallRecord {
MacroAssembler::Call from;
unsigned bytecodeIndex;
@@ -201,15 +156,25 @@ namespace JSC {
MacroAssembler::Label coldPathOther;
};
- extern "C" {
- void ctiVMThrowTrampoline();
+ struct MethodCallCompilationInfo {
+ MethodCallCompilationInfo(unsigned propertyAccessIndex)
+ : propertyAccessIndex(propertyAccessIndex)
+ {
+ }
+
+ MacroAssembler::DataLabelPtr structureToCompare;
+ unsigned propertyAccessIndex;
};
- void ctiSetReturnAddress(void** addressOfReturnAddress, void* newDestinationToReturnTo);
- void ctiPatchCallByReturnAddress(MacroAssembler::ProcessorReturnAddress returnAddress, void* newCalleeFunction);
- void ctiPatchNearCallByReturnAddress(MacroAssembler::ProcessorReturnAddress returnAddress, void* newCalleeFunction);
+ // Near calls can only be patched to other JIT code, regular calls can be patched to JIT code or relinked to stub functions.
+ void ctiPatchNearCallByReturnAddress(MacroAssembler::ProcessorReturnAddress returnAddress, MacroAssemblerCodePtr newCalleeFunction);
+ void ctiPatchCallByReturnAddress(MacroAssembler::ProcessorReturnAddress returnAddress, MacroAssemblerCodePtr newCalleeFunction);
+ void ctiPatchCallByReturnAddress(MacroAssembler::ProcessorReturnAddress returnAddress, FunctionPtr newCalleeFunction);
class JIT : private MacroAssembler {
+ friend class JITStubCall;
+ friend class CallEvalJITStub;
+
using MacroAssembler::Jump;
using MacroAssembler::JumpList;
using MacroAssembler::Label;
@@ -221,6 +186,8 @@ namespace JSC {
// a register is specified) emitPutVirtualRegister() will store
// the value from regT0.
//
+ // regT3 is required to be callee-preserved.
+ //
// tempRegister2 is has no such dependencies. It is important that
// on x86/x86-64 it is ecx for performance reasons, since the
// MacroAssembler will need to plant register swaps if it is not -
@@ -238,8 +205,11 @@ namespace JSC {
static const RegisterID regT0 = X86::eax;
static const RegisterID regT1 = X86::edx;
static const RegisterID regT2 = X86::ecx;
- // NOTE: privateCompileCTIMachineTrampolines() relies on this being callee preserved; this should be considered non-interface.
static const RegisterID regT3 = X86::ebx;
+
+ static const FPRegisterID fpRegT0 = X86::xmm0;
+ static const FPRegisterID fpRegT1 = X86::xmm1;
+ static const FPRegisterID fpRegT2 = X86::xmm2;
#elif PLATFORM(X86)
static const RegisterID returnValueRegister = X86::eax;
static const RegisterID cachedResultRegister = X86::eax;
@@ -253,8 +223,27 @@ namespace JSC {
static const RegisterID regT0 = X86::eax;
static const RegisterID regT1 = X86::edx;
static const RegisterID regT2 = X86::ecx;
- // NOTE: privateCompileCTIMachineTrampolines() relies on this being callee preserved; this should be considered non-interface.
static const RegisterID regT3 = X86::ebx;
+
+ static const FPRegisterID fpRegT0 = X86::xmm0;
+ static const FPRegisterID fpRegT1 = X86::xmm1;
+ static const FPRegisterID fpRegT2 = X86::xmm2;
+#elif PLATFORM(ARM_V7)
+ static const RegisterID returnValueRegister = ARM::r0;
+ static const RegisterID cachedResultRegister = ARM::r0;
+ static const RegisterID firstArgumentRegister = ARM::r0;
+
+ static const RegisterID regT0 = ARM::r0;
+ static const RegisterID regT1 = ARM::r1;
+ static const RegisterID regT2 = ARM::r2;
+ static const RegisterID regT3 = ARM::r4;
+
+ static const RegisterID callFrameRegister = ARM::r5;
+ static const RegisterID timeoutCheckRegister = ARM::r6;
+
+ static const FPRegisterID fpRegT0 = ARM::d0;
+ static const FPRegisterID fpRegT1 = ARM::d1;
+ static const FPRegisterID fpRegT2 = ARM::d2;
#else
#error "JIT not supported on this platform."
#endif
@@ -264,48 +253,79 @@ namespace JSC {
// will compress the displacement, and we may not be able to fit a patched offset.
static const int patchGetByIdDefaultOffset = 256;
-#if USE(JIT_STUB_ARGUMENT_REGISTER)
-#if PLATFORM(X86_64)
- static const int ctiArgumentInitSize = 6;
-#else
- static const int ctiArgumentInitSize = 2;
-#endif
-#elif USE(JIT_STUB_ARGUMENT_STACK)
- static const int ctiArgumentInitSize = 4;
-#else // JIT_STUB_ARGUMENT_VA_LIST
- static const int ctiArgumentInitSize = 0;
-#endif
-
#if PLATFORM(X86_64)
// These architecture specific value are used to enable patching - see comment on op_put_by_id.
static const int patchOffsetPutByIdStructure = 10;
+ static const int patchOffsetPutByIdExternalLoad = 20;
+ static const int patchLengthPutByIdExternalLoad = 4;
static const int patchOffsetPutByIdPropertyMapOffset = 31;
// These architecture specific value are used to enable patching - see comment on op_get_by_id.
static const int patchOffsetGetByIdStructure = 10;
static const int patchOffsetGetByIdBranchToSlowCase = 20;
+ static const int patchOffsetGetByIdExternalLoad = 20;
+ static const int patchLengthGetByIdExternalLoad = 4;
static const int patchOffsetGetByIdPropertyMapOffset = 31;
static const int patchOffsetGetByIdPutResult = 31;
#if ENABLE(OPCODE_SAMPLING)
- static const int patchOffsetGetByIdSlowCaseCall = 61 + ctiArgumentInitSize;
+ static const int patchOffsetGetByIdSlowCaseCall = 66;
#else
- static const int patchOffsetGetByIdSlowCaseCall = 38 + ctiArgumentInitSize;
+ static const int patchOffsetGetByIdSlowCaseCall = 44;
#endif
static const int patchOffsetOpCallCompareToJump = 9;
-#else
+
+ static const int patchOffsetMethodCheckProtoObj = 20;
+ static const int patchOffsetMethodCheckProtoStruct = 30;
+ static const int patchOffsetMethodCheckPutFunction = 50;
+#elif PLATFORM(X86)
// These architecture specific value are used to enable patching - see comment on op_put_by_id.
static const int patchOffsetPutByIdStructure = 7;
+ static const int patchOffsetPutByIdExternalLoad = 13;
+ static const int patchLengthPutByIdExternalLoad = 3;
static const int patchOffsetPutByIdPropertyMapOffset = 22;
// These architecture specific value are used to enable patching - see comment on op_get_by_id.
static const int patchOffsetGetByIdStructure = 7;
static const int patchOffsetGetByIdBranchToSlowCase = 13;
+ static const int patchOffsetGetByIdExternalLoad = 13;
+ static const int patchLengthGetByIdExternalLoad = 3;
static const int patchOffsetGetByIdPropertyMapOffset = 22;
static const int patchOffsetGetByIdPutResult = 22;
-#if ENABLE(OPCODE_SAMPLING)
- static const int patchOffsetGetByIdSlowCaseCall = 31 + ctiArgumentInitSize;
+#if ENABLE(OPCODE_SAMPLING) && USE(JIT_STUB_ARGUMENT_VA_LIST)
+ static const int patchOffsetGetByIdSlowCaseCall = 31;
+#elif ENABLE(OPCODE_SAMPLING)
+ static const int patchOffsetGetByIdSlowCaseCall = 33;
+#elif USE(JIT_STUB_ARGUMENT_VA_LIST)
+ static const int patchOffsetGetByIdSlowCaseCall = 21;
#else
- static const int patchOffsetGetByIdSlowCaseCall = 21 + ctiArgumentInitSize;
+ static const int patchOffsetGetByIdSlowCaseCall = 23;
#endif
static const int patchOffsetOpCallCompareToJump = 6;
+
+ static const int patchOffsetMethodCheckProtoObj = 11;
+ static const int patchOffsetMethodCheckProtoStruct = 18;
+ static const int patchOffsetMethodCheckPutFunction = 29;
+#elif PLATFORM(ARM_V7)
+ // These architecture specific value are used to enable patching - see comment on op_put_by_id.
+ static const int patchOffsetPutByIdStructure = 10;
+ static const int patchOffsetPutByIdExternalLoad = 20;
+ static const int patchLengthPutByIdExternalLoad = 12;
+ static const int patchOffsetPutByIdPropertyMapOffset = 40;
+ // These architecture specific value are used to enable patching - see comment on op_get_by_id.
+ static const int patchOffsetGetByIdStructure = 10;
+ static const int patchOffsetGetByIdBranchToSlowCase = 20;
+ static const int patchOffsetGetByIdExternalLoad = 20;
+ static const int patchLengthGetByIdExternalLoad = 12;
+ static const int patchOffsetGetByIdPropertyMapOffset = 40;
+ static const int patchOffsetGetByIdPutResult = 44;
+#if ENABLE(OPCODE_SAMPLING)
+ static const int patchOffsetGetByIdSlowCaseCall = 0; // FIMXE
+#else
+ static const int patchOffsetGetByIdSlowCaseCall = 28;
+#endif
+ static const int patchOffsetOpCallCompareToJump = 10;
+
+ static const int patchOffsetMethodCheckProtoObj = 18;
+ static const int patchOffsetMethodCheckProtoStruct = 28;
+ static const int patchOffsetMethodCheckPutFunction = 46;
#endif
public:
@@ -315,19 +335,12 @@ namespace JSC {
jit.privateCompile();
}
- static void compileGetByIdSelf(JSGlobalData* globalData, CodeBlock* codeBlock, StructureStubInfo* stubInfo, Structure* structure, size_t cachedOffset, ProcessorReturnAddress returnAddress)
- {
- JIT jit(globalData, codeBlock);
- jit.privateCompileGetByIdSelf(stubInfo, structure, cachedOffset, returnAddress);
- }
-
static void compileGetByIdProto(JSGlobalData* globalData, CallFrame* callFrame, CodeBlock* codeBlock, StructureStubInfo* stubInfo, Structure* structure, Structure* prototypeStructure, size_t cachedOffset, ProcessorReturnAddress returnAddress)
{
JIT jit(globalData, codeBlock);
jit.privateCompileGetByIdProto(stubInfo, structure, prototypeStructure, cachedOffset, returnAddress, callFrame);
}
-#if USE(CTI_REPATCH_PIC)
static void compileGetByIdSelfList(JSGlobalData* globalData, CodeBlock* codeBlock, StructureStubInfo* stubInfo, PolymorphicAccessStructureList* polymorphicStructures, int currentIndex, Structure* structure, size_t cachedOffset)
{
JIT jit(globalData, codeBlock);
@@ -343,19 +356,12 @@ namespace JSC {
JIT jit(globalData, codeBlock);
jit.privateCompileGetByIdChainList(stubInfo, prototypeStructureList, currentIndex, structure, chain, count, cachedOffset, callFrame);
}
-#endif
static void compileGetByIdChain(JSGlobalData* globalData, CallFrame* callFrame, CodeBlock* codeBlock, StructureStubInfo* stubInfo, Structure* structure, StructureChain* chain, size_t count, size_t cachedOffset, ProcessorReturnAddress returnAddress)
{
JIT jit(globalData, codeBlock);
jit.privateCompileGetByIdChain(stubInfo, structure, chain, count, cachedOffset, returnAddress, callFrame);
}
-
- static void compilePutByIdReplace(JSGlobalData* globalData, CodeBlock* codeBlock, StructureStubInfo* stubInfo, Structure* structure, size_t cachedOffset, ProcessorReturnAddress returnAddress)
- {
- JIT jit(globalData, codeBlock);
- jit.privateCompilePutByIdReplace(stubInfo, structure, cachedOffset, returnAddress);
- }
static void compilePutByIdTransition(JSGlobalData* globalData, CodeBlock* codeBlock, StructureStubInfo* stubInfo, Structure* oldStructure, Structure* newStructure, size_t cachedOffset, StructureChain* chain, ProcessorReturnAddress returnAddress)
{
@@ -363,15 +369,15 @@ namespace JSC {
jit.privateCompilePutByIdTransition(stubInfo, oldStructure, newStructure, cachedOffset, chain, returnAddress);
}
- static void compileCTIMachineTrampolines(JSGlobalData* globalData, RefPtr<ExecutablePool>* executablePool, void** ctiArrayLengthTrampoline, void** ctiStringLengthTrampoline, void** ctiVirtualCallPreLink, void** ctiVirtualCallLink, void** ctiVirtualCall)
-
+ static void compileCTIMachineTrampolines(JSGlobalData* globalData, RefPtr<ExecutablePool>* executablePool, CodePtr* ctiArrayLengthTrampoline, CodePtr* ctiStringLengthTrampoline, CodePtr* ctiVirtualCallPreLink, CodePtr* ctiVirtualCallLink, CodePtr* ctiVirtualCall, CodePtr* ctiNativeCallThunk)
{
JIT jit(globalData);
- jit.privateCompileCTIMachineTrampolines(executablePool, ctiArrayLengthTrampoline, ctiStringLengthTrampoline, ctiVirtualCallPreLink, ctiVirtualCallLink, ctiVirtualCall);
+ jit.privateCompileCTIMachineTrampolines(executablePool, globalData, ctiArrayLengthTrampoline, ctiStringLengthTrampoline, ctiVirtualCallPreLink, ctiVirtualCallLink, ctiVirtualCall, ctiNativeCallThunk);
}
static void patchGetByIdSelf(StructureStubInfo*, Structure*, size_t cachedOffset, ProcessorReturnAddress returnAddress);
static void patchPutByIdReplace(StructureStubInfo*, Structure*, size_t cachedOffset, ProcessorReturnAddress returnAddress);
+ static void patchMethodCallProto(MethodCallLinkInfo&, JSFunction*, Structure*, JSObject*);
static void compilePatchGetArrayLength(JSGlobalData* globalData, CodeBlock* codeBlock, ProcessorReturnAddress returnAddress)
{
@@ -379,69 +385,200 @@ namespace JSC {
return jit.privateCompilePatchGetArrayLength(returnAddress);
}
- static void linkCall(JSFunction* callee, CodeBlock* calleeCodeBlock, JITCode ctiCode, CallLinkInfo* callLinkInfo, int callerArgCount);
+ static void linkCall(JSFunction* callee, CodeBlock* calleeCodeBlock, JITCode&, CallLinkInfo*, int callerArgCount);
static void unlinkCall(CallLinkInfo*);
private:
+ struct JSRInfo {
+ DataLabelPtr storeLocation;
+ Label target;
+
+ JSRInfo(DataLabelPtr storeLocation, Label targetLocation)
+ : storeLocation(storeLocation)
+ , target(targetLocation)
+ {
+ }
+ };
+
JIT(JSGlobalData*, CodeBlock* = 0);
void privateCompileMainPass();
void privateCompileLinkPass();
void privateCompileSlowCases();
void privateCompile();
- void privateCompileGetByIdSelf(StructureStubInfo*, Structure*, size_t cachedOffset, ProcessorReturnAddress returnAddress);
void privateCompileGetByIdProto(StructureStubInfo*, Structure*, Structure* prototypeStructure, size_t cachedOffset, ProcessorReturnAddress returnAddress, CallFrame* callFrame);
-#if USE(CTI_REPATCH_PIC)
void privateCompileGetByIdSelfList(StructureStubInfo*, PolymorphicAccessStructureList*, int, Structure*, size_t cachedOffset);
void privateCompileGetByIdProtoList(StructureStubInfo*, PolymorphicAccessStructureList*, int, Structure*, Structure* prototypeStructure, size_t cachedOffset, CallFrame* callFrame);
void privateCompileGetByIdChainList(StructureStubInfo*, PolymorphicAccessStructureList*, int, Structure*, StructureChain* chain, size_t count, size_t cachedOffset, CallFrame* callFrame);
-#endif
void privateCompileGetByIdChain(StructureStubInfo*, Structure*, StructureChain*, size_t count, size_t cachedOffset, ProcessorReturnAddress returnAddress, CallFrame* callFrame);
- void privateCompilePutByIdReplace(StructureStubInfo*, Structure*, size_t cachedOffset, ProcessorReturnAddress returnAddress);
void privateCompilePutByIdTransition(StructureStubInfo*, Structure*, Structure*, size_t cachedOffset, StructureChain*, ProcessorReturnAddress returnAddress);
- void privateCompileCTIMachineTrampolines(RefPtr<ExecutablePool>* executablePool, void** ctiArrayLengthTrampoline, void** ctiStringLengthTrampoline, void** ctiVirtualCallPreLink, void** ctiVirtualCallLink, void** ctiVirtualCall);
+ void privateCompileCTIMachineTrampolines(RefPtr<ExecutablePool>* executablePool, JSGlobalData* data, CodePtr* ctiArrayLengthTrampoline, CodePtr* ctiStringLengthTrampoline, CodePtr* ctiVirtualCallPreLink, CodePtr* ctiVirtualCallLink, CodePtr* ctiVirtualCall, CodePtr* ctiNativeCallThunk);
void privateCompilePatchGetArrayLength(ProcessorReturnAddress returnAddress);
void addSlowCase(Jump);
void addJump(Jump, int);
void emitJumpSlowToHot(Jump, int);
+#if ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
void compileGetByIdHotPath(int resultVReg, int baseVReg, Identifier* ident, unsigned propertyAccessInstructionIndex);
- void compileGetByIdSlowCase(int resultVReg, int baseVReg, Identifier* ident, Vector<SlowCaseEntry>::iterator& iter, unsigned propertyAccessInstructionIndex);
- void compilePutByIdHotPath(int baseVReg, Identifier* ident, int valueVReg, unsigned propertyAccessInstructionIndex);
- void compilePutByIdSlowCase(int baseVReg, Identifier* ident, int valueVReg, Vector<SlowCaseEntry>::iterator& iter, unsigned propertyAccessInstructionIndex);
+ void compileGetByIdSlowCase(int resultVReg, int baseVReg, Identifier* ident, Vector<SlowCaseEntry>::iterator& iter, unsigned propertyAccessInstructionIndex, bool isMethodCheck = false);
+#endif
void compileOpCall(OpcodeID, Instruction* instruction, unsigned callLinkInfoIndex);
+ void compileOpCallVarargs(Instruction* instruction);
void compileOpCallInitializeCallFrame();
void compileOpCallSetupArgs(Instruction*);
- void compileOpCallEvalSetupArgs(Instruction*);
+ void compileOpCallVarargsSetupArgs(Instruction*);
void compileOpCallSlowCase(Instruction* instruction, Vector<SlowCaseEntry>::iterator& iter, unsigned callLinkInfoIndex, OpcodeID opcodeID);
+ void compileOpCallVarargsSlowCase(Instruction* instruction, Vector<SlowCaseEntry>::iterator& iter);
void compileOpConstructSetupArgs(Instruction*);
enum CompileOpStrictEqType { OpStrictEq, OpNStrictEq };
void compileOpStrictEq(Instruction* instruction, CompileOpStrictEqType type);
- void compileFastArith_op_add(Instruction*);
- void compileFastArith_op_sub(Instruction*);
- void compileFastArith_op_mul(Instruction*);
- void compileFastArith_op_mod(unsigned result, unsigned op1, unsigned op2);
- void compileFastArith_op_bitand(unsigned result, unsigned op1, unsigned op2);
- void compileFastArith_op_lshift(unsigned result, unsigned op1, unsigned op2);
- void compileFastArith_op_rshift(unsigned result, unsigned op1, unsigned op2);
- void compileFastArith_op_pre_inc(unsigned srcDst);
- void compileFastArith_op_pre_dec(unsigned srcDst);
- void compileFastArith_op_post_inc(unsigned result, unsigned srcDst);
- void compileFastArith_op_post_dec(unsigned result, unsigned srcDst);
- void compileFastArithSlow_op_add(Instruction*, Vector<SlowCaseEntry>::iterator&);
- void compileFastArithSlow_op_sub(Instruction*, Vector<SlowCaseEntry>::iterator&);
- void compileFastArithSlow_op_mul(Instruction*, Vector<SlowCaseEntry>::iterator&);
- void compileFastArithSlow_op_mod(unsigned result, unsigned op1, unsigned op2, Vector<SlowCaseEntry>::iterator&);
- void compileFastArithSlow_op_bitand(unsigned result, unsigned op1, unsigned op2, Vector<SlowCaseEntry>::iterator&);
- void compileFastArithSlow_op_lshift(unsigned result, unsigned op1, unsigned op2, Vector<SlowCaseEntry>::iterator&);
- void compileFastArithSlow_op_rshift(unsigned result, unsigned op1, unsigned op2, Vector<SlowCaseEntry>::iterator&);
- void compileFastArithSlow_op_pre_inc(unsigned srcDst, Vector<SlowCaseEntry>::iterator&);
- void compileFastArithSlow_op_pre_dec(unsigned srcDst, Vector<SlowCaseEntry>::iterator&);
- void compileFastArithSlow_op_post_inc(unsigned result, unsigned srcDst, Vector<SlowCaseEntry>::iterator&);
- void compileFastArithSlow_op_post_dec(unsigned result, unsigned srcDst, Vector<SlowCaseEntry>::iterator&);
+ void compileGetDirectOffset(RegisterID base, RegisterID result, Structure* structure, size_t cachedOffset);
+ void compileGetDirectOffset(JSObject* base, RegisterID result, size_t cachedOffset);
+ void compilePutDirectOffset(RegisterID base, RegisterID value, Structure* structure, size_t cachedOffset);
+
+ // Arithmetic Ops
+
+ void emit_op_add(Instruction*);
+ void emit_op_sub(Instruction*);
+ void emit_op_mul(Instruction*);
+ void emit_op_mod(Instruction*);
+ void emit_op_bitand(Instruction*);
+ void emit_op_lshift(Instruction*);
+ void emit_op_rshift(Instruction*);
+ void emit_op_jnless(Instruction*);
+ void emit_op_jnlesseq(Instruction*);
+ void emit_op_pre_inc(Instruction*);
+ void emit_op_pre_dec(Instruction*);
+ void emit_op_post_inc(Instruction*);
+ void emit_op_post_dec(Instruction*);
+ void emitSlow_op_add(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_sub(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_mul(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_mod(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_bitand(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_lshift(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_rshift(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_jnless(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_jnlesseq(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_pre_inc(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_pre_dec(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_post_inc(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_post_dec(Instruction*, Vector<SlowCaseEntry>::iterator&);
+
+ void emit_op_get_by_val(Instruction*);
+ void emit_op_put_by_val(Instruction*);
+ void emit_op_put_by_index(Instruction*);
+ void emit_op_put_getter(Instruction*);
+ void emit_op_put_setter(Instruction*);
+ void emit_op_del_by_id(Instruction*);
+
+ void emit_op_mov(Instruction*);
+ void emit_op_end(Instruction*);
+ void emit_op_jmp(Instruction*);
+ void emit_op_loop(Instruction*);
+ void emit_op_loop_if_less(Instruction*);
+ void emit_op_loop_if_lesseq(Instruction*);
+ void emit_op_new_object(Instruction*);
+ void emit_op_put_by_id(Instruction*);
+ void emit_op_get_by_id(Instruction*);
+ void emit_op_instanceof(Instruction*);
+ void emit_op_new_func(Instruction*);
+ void emit_op_call(Instruction*);
+ void emit_op_call_eval(Instruction*);
+ void emit_op_method_check(Instruction*);
+ void emit_op_load_varargs(Instruction*);
+ void emit_op_call_varargs(Instruction*);
+ void emit_op_construct(Instruction*);
+ void emit_op_get_global_var(Instruction*);
+ void emit_op_put_global_var(Instruction*);
+ void emit_op_get_scoped_var(Instruction*);
+ void emit_op_put_scoped_var(Instruction*);
+ void emit_op_tear_off_activation(Instruction*);
+ void emit_op_tear_off_arguments(Instruction*);
+ void emit_op_ret(Instruction*);
+ void emit_op_new_array(Instruction*);
+ void emit_op_resolve(Instruction*);
+ void emit_op_construct_verify(Instruction*);
+ void emit_op_to_primitive(Instruction*);
+ void emit_op_strcat(Instruction*);
+ void emit_op_resolve_func(Instruction*);
+ void emit_op_loop_if_true(Instruction*);
+ void emit_op_resolve_base(Instruction*);
+ void emit_op_resolve_skip(Instruction*);
+ void emit_op_resolve_global(Instruction*);
+ void emit_op_not(Instruction*);
+ void emit_op_jfalse(Instruction*);
+ void emit_op_jeq_null(Instruction*);
+ void emit_op_jneq_null(Instruction*);
+ void emit_op_jneq_ptr(Instruction*);
+ void emit_op_unexpected_load(Instruction*);
+ void emit_op_jsr(Instruction*);
+ void emit_op_sret(Instruction*);
+ void emit_op_eq(Instruction*);
+ void emit_op_bitnot(Instruction*);
+ void emit_op_resolve_with_base(Instruction*);
+ void emit_op_new_func_exp(Instruction*);
+ void emit_op_jtrue(Instruction*);
+ void emit_op_neq(Instruction*);
+ void emit_op_bitxor(Instruction*);
+ void emit_op_new_regexp(Instruction*);
+ void emit_op_bitor(Instruction*);
+ void emit_op_throw(Instruction*);
+ void emit_op_next_pname(Instruction*);
+ void emit_op_push_scope(Instruction*);
+ void emit_op_pop_scope(Instruction*);
+ void emit_op_stricteq(Instruction*);
+ void emit_op_nstricteq(Instruction*);
+ void emit_op_to_jsnumber(Instruction*);
+ void emit_op_push_new_scope(Instruction*);
+ void emit_op_catch(Instruction*);
+ void emit_op_jmp_scopes(Instruction*);
+ void emit_op_switch_imm(Instruction*);
+ void emit_op_switch_char(Instruction*);
+ void emit_op_switch_string(Instruction*);
+ void emit_op_new_error(Instruction*);
+ void emit_op_debug(Instruction*);
+ void emit_op_eq_null(Instruction*);
+ void emit_op_neq_null(Instruction*);
+ void emit_op_enter(Instruction*);
+ void emit_op_enter_with_activation(Instruction*);
+ void emit_op_init_arguments(Instruction*);
+ void emit_op_create_arguments(Instruction*);
+ void emit_op_convert_this(Instruction*);
+ void emit_op_profile_will_call(Instruction*);
+ void emit_op_profile_did_call(Instruction*);
+
+ void emitSlow_op_convert_this(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_construct_verify(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_to_primitive(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_get_by_val(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_loop_if_less(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_put_by_id(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_get_by_id(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_loop_if_lesseq(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_put_by_val(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_loop_if_true(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_not(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_jfalse(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_bitnot(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_jtrue(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_bitxor(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_bitor(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_eq(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_neq(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_stricteq(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_nstricteq(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_instanceof(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_call(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_call_eval(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_method_check(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_call_varargs(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_construct(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_to_jsnumber(Instruction*, Vector<SlowCaseEntry>::iterator&);
+
#if ENABLE(JIT_OPTIMIZE_ARITHMETIC)
void compileBinaryArithOp(OpcodeID, unsigned dst, unsigned src1, unsigned src2, OperandTypes opi);
void compileBinaryArithOpSlowCase(OpcodeID, Vector<SlowCaseEntry>::iterator&, unsigned dst, unsigned src1, unsigned src2, OperandTypes opi);
@@ -459,15 +596,12 @@ namespace JSC {
void emitInitRegister(unsigned dst);
- void emitPutCTIParam(void* value, unsigned name);
- void emitPutCTIParam(RegisterID from, unsigned name);
- void emitGetCTIParam(unsigned name, RegisterID to);
-
void emitPutToCallFrameHeader(RegisterID from, RegisterFile::CallFrameHeaderEntry entry);
void emitPutImmediateToCallFrameHeader(void* value, RegisterFile::CallFrameHeaderEntry entry);
- void emitGetFromCallFrameHeader(RegisterFile::CallFrameHeaderEntry entry, RegisterID to);
+ void emitGetFromCallFrameHeaderPtr(RegisterFile::CallFrameHeaderEntry entry, RegisterID to, RegisterID from = callFrameRegister);
+ void emitGetFromCallFrameHeader32(RegisterFile::CallFrameHeaderEntry entry, RegisterID to, RegisterID from = callFrameRegister);
- JSValuePtr getConstantOperand(unsigned src);
+ JSValue getConstantOperand(unsigned src);
int32_t getConstantOperandImmediateInt(unsigned src);
bool isOperandConstantImmediateInt(unsigned src);
@@ -524,15 +658,10 @@ namespace JSC {
void restoreArgumentReference();
void restoreArgumentReferenceForTrampoline();
- Call emitNakedCall(void* function);
- Call emitCTICall_internal(void*);
- Call emitCTICall(CTIHelper_j helper) { return emitCTICall_internal(reinterpret_cast<void*>(helper)); }
- Call emitCTICall(CTIHelper_o helper) { return emitCTICall_internal(reinterpret_cast<void*>(helper)); }
- Call emitCTICall(CTIHelper_p helper) { return emitCTICall_internal(reinterpret_cast<void*>(helper)); }
- Call emitCTICall(CTIHelper_v helper) { return emitCTICall_internal(reinterpret_cast<void*>(helper)); }
- Call emitCTICall(CTIHelper_s helper) { return emitCTICall_internal(reinterpret_cast<void*>(helper)); }
- Call emitCTICall(CTIHelper_b helper) { return emitCTICall_internal(reinterpret_cast<void*>(helper)); }
- Call emitCTICall(CTIHelper_2 helper) { return emitCTICall_internal(reinterpret_cast<void*>(helper)); }
+ Call emitNakedCall(CodePtr function = CodePtr());
+ void preverveReturnAddressAfterCall(RegisterID);
+ void restoreReturnAddressBeforeReturn(RegisterID);
+ void restoreReturnAddressBeforeReturn(Address);
void emitGetVariableObjectRegister(RegisterID variableObject, int index, RegisterID dst);
void emitPutVariableObjectRegister(RegisterID src, RegisterID variableObject, int index);
@@ -544,32 +673,24 @@ namespace JSC {
void killLastResultRegister();
-#if ENABLE(CODEBLOCK_SAMPLING)
- void sampleCodeBlock(CodeBlock* codeBlock)
- {
-#if PLATFORM(X86_64)
- move(ImmPtr(m_interpreter->sampler()->codeBlockSlot()), X86::ecx);
- storePtr(ImmPtr(codeBlock), X86::ecx);
-#else
- storePtr(ImmPtr(codeBlock), m_interpreter->sampler()->codeBlockSlot());
+
+#if ENABLE(SAMPLING_FLAGS)
+ void setSamplingFlag(int32_t);
+ void clearSamplingFlag(int32_t);
#endif
- }
-#else
- void sampleCodeBlock(CodeBlock*) {}
+
+#if ENABLE(SAMPLING_COUNTERS)
+ void emitCount(AbstractSamplingCounter&, uint32_t = 1);
#endif
#if ENABLE(OPCODE_SAMPLING)
- void sampleInstruction(Instruction* instruction, bool inHostFunction=false)
- {
-#if PLATFORM(X86_64)
- move(ImmPtr(m_interpreter->sampler()->sampleSlot()), X86::ecx);
- storePtr(ImmPtr(m_interpreter->sampler()->encodeSample(instruction, inHostFunction)), X86::ecx);
-#else
- storePtr(ImmPtr(m_interpreter->sampler()->encodeSample(instruction, inHostFunction)), m_interpreter->sampler()->sampleSlot());
+ void sampleInstruction(Instruction*, bool = false);
#endif
- }
+
+#if ENABLE(CODEBLOCK_SAMPLING)
+ void sampleCodeBlock(CodeBlock*);
#else
- void sampleInstruction(Instruction*, bool) {}
+ void sampleCodeBlock(CodeBlock*) {}
#endif
Interpreter* m_interpreter;
@@ -580,19 +701,9 @@ namespace JSC {
Vector<Label> m_labels;
Vector<PropertyStubCompilationInfo> m_propertyAccessCompilationInfo;
Vector<StructureStubCompilationInfo> m_callStructureStubCompilationInfo;
+ Vector<MethodCallCompilationInfo> m_methodCallCompilationInfo;
Vector<JumpTable> m_jmpTable;
- struct JSRInfo {
- DataLabelPtr storeLocation;
- Label target;
-
- JSRInfo(DataLabelPtr storeLocation, Label targetLocation)
- : storeLocation(storeLocation)
- , target(targetLocation)
- {
- }
- };
-
unsigned m_bytecodeIndex;
Vector<JSRInfo> m_jsrSites;
Vector<SlowCaseEntry> m_slowCases;
@@ -600,7 +711,12 @@ namespace JSC {
int m_lastResultBytecodeRegister;
unsigned m_jumpTargetsPosition;
- };
+
+ unsigned m_propertyAccessInstructionIndex;
+ unsigned m_globalResolveInfoIndex;
+ unsigned m_callLinkInfoIndex;
+ } JIT_CLASS_ALIGNMENT;
+
}
#endif // ENABLE(JIT)
diff --git a/JavaScriptCore/jit/JITArithmetic.cpp b/JavaScriptCore/jit/JITArithmetic.cpp
index 8fe245e..86c01d9 100644
--- a/JavaScriptCore/jit/JITArithmetic.cpp
+++ b/JavaScriptCore/jit/JITArithmetic.cpp
@@ -30,6 +30,7 @@
#include "CodeBlock.h"
#include "JITInlineMethods.h"
+#include "JITStubCall.h"
#include "JSArray.h"
#include "JSFunction.h"
#include "Interpreter.h"
@@ -40,14 +41,17 @@
#include <stdio.h>
#endif
-#define __ m_assembler.
using namespace std;
namespace JSC {
-void JIT::compileFastArith_op_lshift(unsigned result, unsigned op1, unsigned op2)
+void JIT::emit_op_lshift(Instruction* currentInstruction)
{
+ unsigned result = currentInstruction[1].u.operand;
+ unsigned op1 = currentInstruction[2].u.operand;
+ unsigned op2 = currentInstruction[3].u.operand;
+
emitGetVirtualRegisters(op1, regT0, op2, regT2);
// FIXME: would we be better using 'emitJumpSlowCaseIfNotImmediateIntegers'? - we *probably* ought to be consistent.
emitJumpSlowCaseIfNotImmediateInteger(regT0);
@@ -67,8 +71,13 @@ void JIT::compileFastArith_op_lshift(unsigned result, unsigned op1, unsigned op2
emitFastArithReTagImmediate(regT0, regT0);
emitPutVirtualRegister(result);
}
-void JIT::compileFastArithSlow_op_lshift(unsigned result, unsigned op1, unsigned op2, Vector<SlowCaseEntry>::iterator& iter)
+
+void JIT::emitSlow_op_lshift(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
{
+ unsigned result = currentInstruction[1].u.operand;
+ unsigned op1 = currentInstruction[2].u.operand;
+ unsigned op2 = currentInstruction[3].u.operand;
+
#if USE(ALTERNATE_JSIMMEDIATE)
UNUSED_PARAM(op1);
UNUSED_PARAM(op2);
@@ -83,15 +92,20 @@ void JIT::compileFastArithSlow_op_lshift(unsigned result, unsigned op1, unsigned
notImm1.link(this);
notImm2.link(this);
#endif
- emitPutJITStubArg(regT0, 1);
- emitPutJITStubArg(regT2, 2);
- emitCTICall(JITStubs::cti_op_lshift);
- emitPutVirtualRegister(result);
+ JITStubCall stubCall(this, JITStubs::cti_op_lshift);
+ stubCall.addArgument(regT0);
+ stubCall.addArgument(regT2);
+ stubCall.call(result);
}
-void JIT::compileFastArith_op_rshift(unsigned result, unsigned op1, unsigned op2)
+void JIT::emit_op_rshift(Instruction* currentInstruction)
{
+ unsigned result = currentInstruction[1].u.operand;
+ unsigned op1 = currentInstruction[2].u.operand;
+ unsigned op2 = currentInstruction[3].u.operand;
+
if (isOperandConstantImmediateInt(op2)) {
+ // isOperandConstantImmediateInt(op2) => 1 SlowCase
emitGetVirtualRegister(op1, regT0);
emitJumpSlowCaseIfNotImmediateInteger(regT0);
// Mask with 0x1f as per ecma-262 11.7.2 step 7.
@@ -102,8 +116,28 @@ void JIT::compileFastArith_op_rshift(unsigned result, unsigned op1, unsigned op2
#endif
} else {
emitGetVirtualRegisters(op1, regT0, op2, regT2);
- emitJumpSlowCaseIfNotImmediateInteger(regT0);
- emitJumpSlowCaseIfNotImmediateInteger(regT2);
+ if (supportsFloatingPointTruncate()) {
+ Jump lhsIsInt = emitJumpIfImmediateInteger(regT0);
+#if USE(ALTERNATE_JSIMMEDIATE)
+ // supportsFloatingPoint() && USE(ALTERNATE_JSIMMEDIATE) => 3 SlowCases
+ addSlowCase(emitJumpIfNotImmediateNumber(regT0));
+ movePtrToDouble(regT0, fpRegT0);
+ addSlowCase(branchTruncateDoubleToInt32(fpRegT0, regT0));
+#else
+ // supportsFloatingPoint() && !USE(ALTERNATE_JSIMMEDIATE) => 5 SlowCases (of which 1 IfNotJSCell)
+ emitJumpSlowCaseIfNotJSCell(regT0, op1);
+ addSlowCase(checkStructure(regT0, m_globalData->numberStructure.get()));
+ loadDouble(Address(regT0, FIELD_OFFSET(JSNumberCell, m_value)), fpRegT0);
+ addSlowCase(branchTruncateDoubleToInt32(fpRegT0, regT0));
+ addSlowCase(branchAdd32(Overflow, regT0, regT0));
+#endif
+ lhsIsInt.link(this);
+ emitJumpSlowCaseIfNotImmediateInteger(regT2);
+ } else {
+ // !supportsFloatingPoint() => 2 SlowCases
+ emitJumpSlowCaseIfNotImmediateInteger(regT0);
+ emitJumpSlowCaseIfNotImmediateInteger(regT2);
+ }
emitFastArithImmToInt(regT2);
#if !PLATFORM(X86)
// Mask with 0x1f as per ecma-262 11.7.2 step 7.
@@ -123,23 +157,424 @@ void JIT::compileFastArith_op_rshift(unsigned result, unsigned op1, unsigned op2
#endif
emitPutVirtualRegister(result);
}
-void JIT::compileFastArithSlow_op_rshift(unsigned result, unsigned, unsigned op2, Vector<SlowCaseEntry>::iterator& iter)
+
+void JIT::emitSlow_op_rshift(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
{
- linkSlowCase(iter);
- if (isOperandConstantImmediateInt(op2))
- emitPutJITStubArgFromVirtualRegister(op2, 2, regT2);
- else {
+ unsigned result = currentInstruction[1].u.operand;
+ unsigned op1 = currentInstruction[2].u.operand;
+ unsigned op2 = currentInstruction[3].u.operand;
+
+ JITStubCall stubCall(this, JITStubs::cti_op_rshift);
+
+ if (isOperandConstantImmediateInt(op2)) {
linkSlowCase(iter);
- emitPutJITStubArg(regT2, 2);
+ stubCall.addArgument(regT0);
+ stubCall.addArgument(op2, regT2);
+ } else {
+ if (supportsFloatingPointTruncate()) {
+#if USE(ALTERNATE_JSIMMEDIATE)
+ linkSlowCase(iter);
+ linkSlowCase(iter);
+ linkSlowCase(iter);
+#else
+ linkSlowCaseIfNotJSCell(iter, op1);
+ linkSlowCase(iter);
+ linkSlowCase(iter);
+ linkSlowCase(iter);
+ linkSlowCase(iter);
+#endif
+ // We're reloading op1 to regT0 as we can no longer guarantee that
+ // we have not munged the operand. It may have already been shifted
+ // correctly, but it still will not have been tagged.
+ stubCall.addArgument(op1, regT0);
+ stubCall.addArgument(regT2);
+ } else {
+ linkSlowCase(iter);
+ linkSlowCase(iter);
+ stubCall.addArgument(regT0);
+ stubCall.addArgument(regT2);
+ }
}
- emitPutJITStubArg(regT0, 1);
- emitCTICall(JITStubs::cti_op_rshift);
- emitPutVirtualRegister(result);
+ stubCall.call(result);
+}
+
+void JIT::emit_op_jnless(Instruction* currentInstruction)
+{
+ unsigned op1 = currentInstruction[1].u.operand;
+ unsigned op2 = currentInstruction[2].u.operand;
+ unsigned target = currentInstruction[3].u.operand;
+
+ // We generate inline code for the following cases in the fast path:
+ // - int immediate to constant int immediate
+ // - constant int immediate to int immediate
+ // - int immediate to int immediate
+
+ if (isOperandConstantImmediateInt(op2)) {
+ emitGetVirtualRegister(op1, regT0);
+ emitJumpSlowCaseIfNotImmediateInteger(regT0);
+#if USE(ALTERNATE_JSIMMEDIATE)
+ int32_t op2imm = getConstantOperandImmediateInt(op2);
+#else
+ int32_t op2imm = static_cast<int32_t>(JSImmediate::rawValue(getConstantOperand(op2)));
+#endif
+ addJump(branch32(GreaterThanOrEqual, regT0, Imm32(op2imm)), target + 3);
+ } else if (isOperandConstantImmediateInt(op1)) {
+ emitGetVirtualRegister(op2, regT1);
+ emitJumpSlowCaseIfNotImmediateInteger(regT1);
+#if USE(ALTERNATE_JSIMMEDIATE)
+ int32_t op1imm = getConstantOperandImmediateInt(op1);
+#else
+ int32_t op1imm = static_cast<int32_t>(JSImmediate::rawValue(getConstantOperand(op1)));
+#endif
+ addJump(branch32(LessThanOrEqual, regT1, Imm32(op1imm)), target + 3);
+ } else {
+ emitGetVirtualRegisters(op1, regT0, op2, regT1);
+ emitJumpSlowCaseIfNotImmediateInteger(regT0);
+ emitJumpSlowCaseIfNotImmediateInteger(regT1);
+
+ addJump(branch32(GreaterThanOrEqual, regT0, regT1), target + 3);
+ }
+}
+
+void JIT::emitSlow_op_jnless(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned op1 = currentInstruction[1].u.operand;
+ unsigned op2 = currentInstruction[2].u.operand;
+ unsigned target = currentInstruction[3].u.operand;
+
+ // We generate inline code for the following cases in the slow path:
+ // - floating-point number to constant int immediate
+ // - constant int immediate to floating-point number
+ // - floating-point number to floating-point number.
+
+ if (isOperandConstantImmediateInt(op2)) {
+ linkSlowCase(iter);
+
+ if (supportsFloatingPoint()) {
+#if USE(ALTERNATE_JSIMMEDIATE)
+ Jump fail1 = emitJumpIfNotImmediateNumber(regT0);
+ addPtr(tagTypeNumberRegister, regT0);
+ movePtrToDouble(regT0, fpRegT0);
+#else
+ Jump fail1;
+ if (!m_codeBlock->isKnownNotImmediate(op1))
+ fail1 = emitJumpIfNotJSCell(regT0);
+
+ Jump fail2 = checkStructure(regT0, m_globalData->numberStructure.get());
+ loadDouble(Address(regT0, FIELD_OFFSET(JSNumberCell, m_value)), fpRegT0);
+#endif
+
+ int32_t op2imm = getConstantOperand(op2).getInt32Fast();;
+
+ move(Imm32(op2imm), regT1);
+ convertInt32ToDouble(regT1, fpRegT1);
+
+ emitJumpSlowToHot(branchDouble(DoubleLessThanOrEqual, fpRegT1, fpRegT0), target + 3);
+
+ emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_jnless));
+
+#if USE(ALTERNATE_JSIMMEDIATE)
+ fail1.link(this);
+#else
+ if (!m_codeBlock->isKnownNotImmediate(op1))
+ fail1.link(this);
+ fail2.link(this);
+#endif
+ }
+
+ JITStubCall stubCall(this, JITStubs::cti_op_jless);
+ stubCall.addArgument(regT0);
+ stubCall.addArgument(op2, regT2);
+ stubCall.call();
+ emitJumpSlowToHot(branchTest32(Zero, regT0), target + 3);
+
+ } else if (isOperandConstantImmediateInt(op1)) {
+ linkSlowCase(iter);
+
+ if (supportsFloatingPoint()) {
+#if USE(ALTERNATE_JSIMMEDIATE)
+ Jump fail1 = emitJumpIfNotImmediateNumber(regT1);
+ addPtr(tagTypeNumberRegister, regT1);
+ movePtrToDouble(regT1, fpRegT1);
+#else
+ Jump fail1;
+ if (!m_codeBlock->isKnownNotImmediate(op2))
+ fail1 = emitJumpIfNotJSCell(regT1);
+
+ Jump fail2 = checkStructure(regT1, m_globalData->numberStructure.get());
+ loadDouble(Address(regT1, FIELD_OFFSET(JSNumberCell, m_value)), fpRegT1);
+#endif
+
+ int32_t op1imm = getConstantOperand(op1).getInt32Fast();;
+
+ move(Imm32(op1imm), regT0);
+ convertInt32ToDouble(regT0, fpRegT0);
+
+ emitJumpSlowToHot(branchDouble(DoubleLessThanOrEqual, fpRegT1, fpRegT0), target + 3);
+
+ emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_jnless));
+
+#if USE(ALTERNATE_JSIMMEDIATE)
+ fail1.link(this);
+#else
+ if (!m_codeBlock->isKnownNotImmediate(op2))
+ fail1.link(this);
+ fail2.link(this);
+#endif
+ }
+
+ JITStubCall stubCall(this, JITStubs::cti_op_jless);
+ stubCall.addArgument(op1, regT2);
+ stubCall.addArgument(regT1);
+ stubCall.call();
+ emitJumpSlowToHot(branchTest32(Zero, regT0), target + 3);
+
+ } else {
+ linkSlowCase(iter);
+
+ if (supportsFloatingPoint()) {
+#if USE(ALTERNATE_JSIMMEDIATE)
+ Jump fail1 = emitJumpIfNotImmediateNumber(regT0);
+ Jump fail2 = emitJumpIfNotImmediateNumber(regT1);
+ Jump fail3 = emitJumpIfImmediateInteger(regT1);
+ addPtr(tagTypeNumberRegister, regT0);
+ addPtr(tagTypeNumberRegister, regT1);
+ movePtrToDouble(regT0, fpRegT0);
+ movePtrToDouble(regT1, fpRegT1);
+#else
+ Jump fail1;
+ if (!m_codeBlock->isKnownNotImmediate(op1))
+ fail1 = emitJumpIfNotJSCell(regT0);
+
+ Jump fail2;
+ if (!m_codeBlock->isKnownNotImmediate(op2))
+ fail2 = emitJumpIfNotJSCell(regT1);
+
+ Jump fail3 = checkStructure(regT0, m_globalData->numberStructure.get());
+ Jump fail4 = checkStructure(regT1, m_globalData->numberStructure.get());
+ loadDouble(Address(regT0, FIELD_OFFSET(JSNumberCell, m_value)), fpRegT0);
+ loadDouble(Address(regT1, FIELD_OFFSET(JSNumberCell, m_value)), fpRegT1);
+#endif
+
+ emitJumpSlowToHot(branchDouble(DoubleLessThanOrEqual, fpRegT1, fpRegT0), target + 3);
+
+ emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_jnless));
+
+#if USE(ALTERNATE_JSIMMEDIATE)
+ fail1.link(this);
+ fail2.link(this);
+ fail3.link(this);
+#else
+ if (!m_codeBlock->isKnownNotImmediate(op1))
+ fail1.link(this);
+ if (!m_codeBlock->isKnownNotImmediate(op2))
+ fail2.link(this);
+ fail3.link(this);
+ fail4.link(this);
+#endif
+ }
+
+ linkSlowCase(iter);
+ JITStubCall stubCall(this, JITStubs::cti_op_jless);
+ stubCall.addArgument(regT0);
+ stubCall.addArgument(regT1);
+ stubCall.call();
+ emitJumpSlowToHot(branchTest32(Zero, regT0), target + 3);
+ }
+}
+
+void JIT::emit_op_jnlesseq(Instruction* currentInstruction)
+{
+ unsigned op1 = currentInstruction[1].u.operand;
+ unsigned op2 = currentInstruction[2].u.operand;
+ unsigned target = currentInstruction[3].u.operand;
+
+ // We generate inline code for the following cases in the fast path:
+ // - int immediate to constant int immediate
+ // - constant int immediate to int immediate
+ // - int immediate to int immediate
+
+ if (isOperandConstantImmediateInt(op2)) {
+ emitGetVirtualRegister(op1, regT0);
+ emitJumpSlowCaseIfNotImmediateInteger(regT0);
+#if USE(ALTERNATE_JSIMMEDIATE)
+ int32_t op2imm = getConstantOperandImmediateInt(op2);
+#else
+ int32_t op2imm = static_cast<int32_t>(JSImmediate::rawValue(getConstantOperand(op2)));
+#endif
+ addJump(branch32(GreaterThan, regT0, Imm32(op2imm)), target + 3);
+ } else if (isOperandConstantImmediateInt(op1)) {
+ emitGetVirtualRegister(op2, regT1);
+ emitJumpSlowCaseIfNotImmediateInteger(regT1);
+#if USE(ALTERNATE_JSIMMEDIATE)
+ int32_t op1imm = getConstantOperandImmediateInt(op1);
+#else
+ int32_t op1imm = static_cast<int32_t>(JSImmediate::rawValue(getConstantOperand(op1)));
+#endif
+ addJump(branch32(LessThan, regT1, Imm32(op1imm)), target + 3);
+ } else {
+ emitGetVirtualRegisters(op1, regT0, op2, regT1);
+ emitJumpSlowCaseIfNotImmediateInteger(regT0);
+ emitJumpSlowCaseIfNotImmediateInteger(regT1);
+
+ addJump(branch32(GreaterThan, regT0, regT1), target + 3);
+ }
+}
+
+void JIT::emitSlow_op_jnlesseq(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned op1 = currentInstruction[1].u.operand;
+ unsigned op2 = currentInstruction[2].u.operand;
+ unsigned target = currentInstruction[3].u.operand;
+
+ // We generate inline code for the following cases in the slow path:
+ // - floating-point number to constant int immediate
+ // - constant int immediate to floating-point number
+ // - floating-point number to floating-point number.
+
+ if (isOperandConstantImmediateInt(op2)) {
+ linkSlowCase(iter);
+
+ if (supportsFloatingPoint()) {
+#if USE(ALTERNATE_JSIMMEDIATE)
+ Jump fail1 = emitJumpIfNotImmediateNumber(regT0);
+ addPtr(tagTypeNumberRegister, regT0);
+ movePtrToDouble(regT0, fpRegT0);
+#else
+ Jump fail1;
+ if (!m_codeBlock->isKnownNotImmediate(op1))
+ fail1 = emitJumpIfNotJSCell(regT0);
+
+ Jump fail2 = checkStructure(regT0, m_globalData->numberStructure.get());
+ loadDouble(Address(regT0, FIELD_OFFSET(JSNumberCell, m_value)), fpRegT0);
+#endif
+
+ int32_t op2imm = getConstantOperand(op2).getInt32Fast();;
+
+ move(Imm32(op2imm), regT1);
+ convertInt32ToDouble(regT1, fpRegT1);
+
+ emitJumpSlowToHot(branchDouble(DoubleLessThan, fpRegT1, fpRegT0), target + 3);
+
+ emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_jnlesseq));
+
+#if USE(ALTERNATE_JSIMMEDIATE)
+ fail1.link(this);
+#else
+ if (!m_codeBlock->isKnownNotImmediate(op1))
+ fail1.link(this);
+ fail2.link(this);
+#endif
+ }
+
+ JITStubCall stubCall(this, JITStubs::cti_op_jlesseq);
+ stubCall.addArgument(regT0);
+ stubCall.addArgument(op2, regT2);
+ stubCall.call();
+ emitJumpSlowToHot(branchTest32(Zero, regT0), target + 3);
+
+ } else if (isOperandConstantImmediateInt(op1)) {
+ linkSlowCase(iter);
+
+ if (supportsFloatingPoint()) {
+#if USE(ALTERNATE_JSIMMEDIATE)
+ Jump fail1 = emitJumpIfNotImmediateNumber(regT1);
+ addPtr(tagTypeNumberRegister, regT1);
+ movePtrToDouble(regT1, fpRegT1);
+#else
+ Jump fail1;
+ if (!m_codeBlock->isKnownNotImmediate(op2))
+ fail1 = emitJumpIfNotJSCell(regT1);
+
+ Jump fail2 = checkStructure(regT1, m_globalData->numberStructure.get());
+ loadDouble(Address(regT1, FIELD_OFFSET(JSNumberCell, m_value)), fpRegT1);
+#endif
+
+ int32_t op1imm = getConstantOperand(op1).getInt32Fast();;
+
+ move(Imm32(op1imm), regT0);
+ convertInt32ToDouble(regT0, fpRegT0);
+
+ emitJumpSlowToHot(branchDouble(DoubleLessThan, fpRegT1, fpRegT0), target + 3);
+
+ emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_jnlesseq));
+
+#if USE(ALTERNATE_JSIMMEDIATE)
+ fail1.link(this);
+#else
+ if (!m_codeBlock->isKnownNotImmediate(op2))
+ fail1.link(this);
+ fail2.link(this);
+#endif
+ }
+
+ JITStubCall stubCall(this, JITStubs::cti_op_jlesseq);
+ stubCall.addArgument(op1, regT2);
+ stubCall.addArgument(regT1);
+ stubCall.call();
+ emitJumpSlowToHot(branchTest32(Zero, regT0), target + 3);
+
+ } else {
+ linkSlowCase(iter);
+
+ if (supportsFloatingPoint()) {
+#if USE(ALTERNATE_JSIMMEDIATE)
+ Jump fail1 = emitJumpIfNotImmediateNumber(regT0);
+ Jump fail2 = emitJumpIfNotImmediateNumber(regT1);
+ Jump fail3 = emitJumpIfImmediateInteger(regT1);
+ addPtr(tagTypeNumberRegister, regT0);
+ addPtr(tagTypeNumberRegister, regT1);
+ movePtrToDouble(regT0, fpRegT0);
+ movePtrToDouble(regT1, fpRegT1);
+#else
+ Jump fail1;
+ if (!m_codeBlock->isKnownNotImmediate(op1))
+ fail1 = emitJumpIfNotJSCell(regT0);
+
+ Jump fail2;
+ if (!m_codeBlock->isKnownNotImmediate(op2))
+ fail2 = emitJumpIfNotJSCell(regT1);
+
+ Jump fail3 = checkStructure(regT0, m_globalData->numberStructure.get());
+ Jump fail4 = checkStructure(regT1, m_globalData->numberStructure.get());
+ loadDouble(Address(regT0, FIELD_OFFSET(JSNumberCell, m_value)), fpRegT0);
+ loadDouble(Address(regT1, FIELD_OFFSET(JSNumberCell, m_value)), fpRegT1);
+#endif
+
+ emitJumpSlowToHot(branchDouble(DoubleLessThan, fpRegT1, fpRegT0), target + 3);
+
+ emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_jnlesseq));
+
+#if USE(ALTERNATE_JSIMMEDIATE)
+ fail1.link(this);
+ fail2.link(this);
+ fail3.link(this);
+#else
+ if (!m_codeBlock->isKnownNotImmediate(op1))
+ fail1.link(this);
+ if (!m_codeBlock->isKnownNotImmediate(op2))
+ fail2.link(this);
+ fail3.link(this);
+ fail4.link(this);
+#endif
+ }
+
+ linkSlowCase(iter);
+ JITStubCall stubCall(this, JITStubs::cti_op_jlesseq);
+ stubCall.addArgument(regT0);
+ stubCall.addArgument(regT1);
+ stubCall.call();
+ emitJumpSlowToHot(branchTest32(Zero, regT0), target + 3);
+ }
}
-void JIT::compileFastArith_op_bitand(unsigned result, unsigned op1, unsigned op2)
+void JIT::emit_op_bitand(Instruction* currentInstruction)
{
+ unsigned result = currentInstruction[1].u.operand;
+ unsigned op1 = currentInstruction[2].u.operand;
+ unsigned op2 = currentInstruction[3].u.operand;
+
if (isOperandConstantImmediateInt(op1)) {
emitGetVirtualRegister(op2, regT0);
emitJumpSlowCaseIfNotImmediateInteger(regT0);
@@ -169,79 +604,37 @@ void JIT::compileFastArith_op_bitand(unsigned result, unsigned op1, unsigned op2
}
emitPutVirtualRegister(result);
}
-void JIT::compileFastArithSlow_op_bitand(unsigned result, unsigned op1, unsigned op2, Vector<SlowCaseEntry>::iterator& iter)
+
+void JIT::emitSlow_op_bitand(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
{
+ unsigned result = currentInstruction[1].u.operand;
+ unsigned op1 = currentInstruction[2].u.operand;
+ unsigned op2 = currentInstruction[3].u.operand;
+
linkSlowCase(iter);
if (isOperandConstantImmediateInt(op1)) {
- emitPutJITStubArgFromVirtualRegister(op1, 1, regT2);
- emitPutJITStubArg(regT0, 2);
+ JITStubCall stubCall(this, JITStubs::cti_op_bitand);
+ stubCall.addArgument(op1, regT2);
+ stubCall.addArgument(regT0);
+ stubCall.call(result);
} else if (isOperandConstantImmediateInt(op2)) {
- emitPutJITStubArg(regT0, 1);
- emitPutJITStubArgFromVirtualRegister(op2, 2, regT2);
+ JITStubCall stubCall(this, JITStubs::cti_op_bitand);
+ stubCall.addArgument(regT0);
+ stubCall.addArgument(op2, regT2);
+ stubCall.call(result);
} else {
- emitPutJITStubArgFromVirtualRegister(op1, 1, regT2);
- emitPutJITStubArg(regT1, 2);
+ JITStubCall stubCall(this, JITStubs::cti_op_bitand);
+ stubCall.addArgument(op1, regT2);
+ stubCall.addArgument(regT1);
+ stubCall.call(result);
}
- emitCTICall(JITStubs::cti_op_bitand);
- emitPutVirtualRegister(result);
}
-#if PLATFORM(X86) || PLATFORM(X86_64)
-void JIT::compileFastArith_op_mod(unsigned result, unsigned op1, unsigned op2)
+void JIT::emit_op_post_inc(Instruction* currentInstruction)
{
- emitGetVirtualRegisters(op1, X86::eax, op2, X86::ecx);
- emitJumpSlowCaseIfNotImmediateInteger(X86::eax);
- emitJumpSlowCaseIfNotImmediateInteger(X86::ecx);
-#if USE(ALTERNATE_JSIMMEDIATE)
- addSlowCase(branchPtr(Equal, X86::ecx, ImmPtr(JSValuePtr::encode(js0()))));
- m_assembler.cdq();
- m_assembler.idivl_r(X86::ecx);
-#else
- emitFastArithDeTagImmediate(X86::eax);
- addSlowCase(emitFastArithDeTagImmediateJumpIfZero(X86::ecx));
- m_assembler.cdq();
- m_assembler.idivl_r(X86::ecx);
- signExtend32ToPtr(X86::edx, X86::edx);
-#endif
- emitFastArithReTagImmediate(X86::edx, X86::eax);
- emitPutVirtualRegister(result);
-}
-void JIT::compileFastArithSlow_op_mod(unsigned result, unsigned, unsigned, Vector<SlowCaseEntry>::iterator& iter)
-{
-#if USE(ALTERNATE_JSIMMEDIATE)
- linkSlowCase(iter);
- linkSlowCase(iter);
- linkSlowCase(iter);
-#else
- Jump notImm1 = getSlowCase(iter);
- Jump notImm2 = getSlowCase(iter);
- linkSlowCase(iter);
- emitFastArithReTagImmediate(X86::eax, X86::eax);
- emitFastArithReTagImmediate(X86::ecx, X86::ecx);
- notImm1.link(this);
- notImm2.link(this);
-#endif
- emitPutJITStubArg(X86::eax, 1);
- emitPutJITStubArg(X86::ecx, 2);
- emitCTICall(JITStubs::cti_op_mod);
- emitPutVirtualRegister(result);
-}
-#else
-void JIT::compileFastArith_op_mod(unsigned result, unsigned op1, unsigned op2)
-{
- emitPutJITStubArgFromVirtualRegister(op1, 1, regT2);
- emitPutJITStubArgFromVirtualRegister(op2, 2, regT2);
- emitCTICall(JITStubs::cti_op_mod);
- emitPutVirtualRegister(result);
-}
-void JIT::compileFastArithSlow_op_mod(unsigned, unsigned, unsigned, Vector<SlowCaseEntry>::iterator&)
-{
- ASSERT_NOT_REACHED();
-}
-#endif
+ unsigned result = currentInstruction[1].u.operand;
+ unsigned srcDst = currentInstruction[2].u.operand;
-void JIT::compileFastArith_op_post_inc(unsigned result, unsigned srcDst)
-{
emitGetVirtualRegister(srcDst, regT0);
move(regT0, regT1);
emitJumpSlowCaseIfNotImmediateInteger(regT0);
@@ -255,18 +648,25 @@ void JIT::compileFastArith_op_post_inc(unsigned result, unsigned srcDst)
emitPutVirtualRegister(srcDst, regT1);
emitPutVirtualRegister(result);
}
-void JIT::compileFastArithSlow_op_post_inc(unsigned result, unsigned srcDst, Vector<SlowCaseEntry>::iterator& iter)
+
+void JIT::emitSlow_op_post_inc(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
{
+ unsigned result = currentInstruction[1].u.operand;
+ unsigned srcDst = currentInstruction[2].u.operand;
+
linkSlowCase(iter);
linkSlowCase(iter);
- emitPutJITStubArg(regT0, 1);
- emitCTICall(JITStubs::cti_op_post_inc);
- emitPutVirtualRegister(srcDst, regT1);
- emitPutVirtualRegister(result);
+ JITStubCall stubCall(this, JITStubs::cti_op_post_inc);
+ stubCall.addArgument(regT0);
+ stubCall.addArgument(Imm32(srcDst));
+ stubCall.call(result);
}
-void JIT::compileFastArith_op_post_dec(unsigned result, unsigned srcDst)
+void JIT::emit_op_post_dec(Instruction* currentInstruction)
{
+ unsigned result = currentInstruction[1].u.operand;
+ unsigned srcDst = currentInstruction[2].u.operand;
+
emitGetVirtualRegister(srcDst, regT0);
move(regT0, regT1);
emitJumpSlowCaseIfNotImmediateInteger(regT0);
@@ -280,18 +680,24 @@ void JIT::compileFastArith_op_post_dec(unsigned result, unsigned srcDst)
emitPutVirtualRegister(srcDst, regT1);
emitPutVirtualRegister(result);
}
-void JIT::compileFastArithSlow_op_post_dec(unsigned result, unsigned srcDst, Vector<SlowCaseEntry>::iterator& iter)
+
+void JIT::emitSlow_op_post_dec(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
{
+ unsigned result = currentInstruction[1].u.operand;
+ unsigned srcDst = currentInstruction[2].u.operand;
+
linkSlowCase(iter);
linkSlowCase(iter);
- emitPutJITStubArg(regT0, 1);
- emitCTICall(JITStubs::cti_op_post_dec);
- emitPutVirtualRegister(srcDst, regT1);
- emitPutVirtualRegister(result);
+ JITStubCall stubCall(this, JITStubs::cti_op_post_dec);
+ stubCall.addArgument(regT0);
+ stubCall.addArgument(Imm32(srcDst));
+ stubCall.call(result);
}
-void JIT::compileFastArith_op_pre_inc(unsigned srcDst)
+void JIT::emit_op_pre_inc(Instruction* currentInstruction)
{
+ unsigned srcDst = currentInstruction[1].u.operand;
+
emitGetVirtualRegister(srcDst, regT0);
emitJumpSlowCaseIfNotImmediateInteger(regT0);
#if USE(ALTERNATE_JSIMMEDIATE)
@@ -303,19 +709,24 @@ void JIT::compileFastArith_op_pre_inc(unsigned srcDst)
#endif
emitPutVirtualRegister(srcDst);
}
-void JIT::compileFastArithSlow_op_pre_inc(unsigned srcDst, Vector<SlowCaseEntry>::iterator& iter)
+
+void JIT::emitSlow_op_pre_inc(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
{
+ unsigned srcDst = currentInstruction[1].u.operand;
+
Jump notImm = getSlowCase(iter);
linkSlowCase(iter);
emitGetVirtualRegister(srcDst, regT0);
notImm.link(this);
- emitPutJITStubArg(regT0, 1);
- emitCTICall(JITStubs::cti_op_pre_inc);
- emitPutVirtualRegister(srcDst);
+ JITStubCall stubCall(this, JITStubs::cti_op_pre_inc);
+ stubCall.addArgument(regT0);
+ stubCall.call(srcDst);
}
-void JIT::compileFastArith_op_pre_dec(unsigned srcDst)
+void JIT::emit_op_pre_dec(Instruction* currentInstruction)
{
+ unsigned srcDst = currentInstruction[1].u.operand;
+
emitGetVirtualRegister(srcDst, regT0);
emitJumpSlowCaseIfNotImmediateInteger(regT0);
#if USE(ALTERNATE_JSIMMEDIATE)
@@ -327,88 +738,171 @@ void JIT::compileFastArith_op_pre_dec(unsigned srcDst)
#endif
emitPutVirtualRegister(srcDst);
}
-void JIT::compileFastArithSlow_op_pre_dec(unsigned srcDst, Vector<SlowCaseEntry>::iterator& iter)
+
+void JIT::emitSlow_op_pre_dec(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
{
+ unsigned srcDst = currentInstruction[1].u.operand;
+
Jump notImm = getSlowCase(iter);
linkSlowCase(iter);
emitGetVirtualRegister(srcDst, regT0);
notImm.link(this);
- emitPutJITStubArg(regT0, 1);
- emitCTICall(JITStubs::cti_op_pre_dec);
- emitPutVirtualRegister(srcDst);
+ JITStubCall stubCall(this, JITStubs::cti_op_pre_dec);
+ stubCall.addArgument(regT0);
+ stubCall.call(srcDst);
}
+/* ------------------------------ BEGIN: OP_MOD ------------------------------ */
-#if !ENABLE(JIT_OPTIMIZE_ARITHMETIC)
+#if PLATFORM(X86) || PLATFORM(X86_64)
-void JIT::compileFastArith_op_add(Instruction* currentInstruction)
+void JIT::emit_op_mod(Instruction* currentInstruction)
{
unsigned result = currentInstruction[1].u.operand;
unsigned op1 = currentInstruction[2].u.operand;
unsigned op2 = currentInstruction[3].u.operand;
- emitPutJITStubArgFromVirtualRegister(op1, 1, regT2);
- emitPutJITStubArgFromVirtualRegister(op2, 2, regT2);
- emitCTICall(JITStubs::cti_op_add);
+ emitGetVirtualRegisters(op1, X86::eax, op2, X86::ecx);
+ emitJumpSlowCaseIfNotImmediateInteger(X86::eax);
+ emitJumpSlowCaseIfNotImmediateInteger(X86::ecx);
+#if USE(ALTERNATE_JSIMMEDIATE)
+ addSlowCase(branchPtr(Equal, X86::ecx, ImmPtr(JSValue::encode(jsNumber(m_globalData, 0)))));
+ m_assembler.cdq();
+ m_assembler.idivl_r(X86::ecx);
+#else
+ emitFastArithDeTagImmediate(X86::eax);
+ addSlowCase(emitFastArithDeTagImmediateJumpIfZero(X86::ecx));
+ m_assembler.cdq();
+ m_assembler.idivl_r(X86::ecx);
+ signExtend32ToPtr(X86::edx, X86::edx);
+#endif
+ emitFastArithReTagImmediate(X86::edx, X86::eax);
emitPutVirtualRegister(result);
}
-void JIT::compileFastArithSlow_op_add(Instruction*, Vector<SlowCaseEntry>::iterator&)
+
+void JIT::emitSlow_op_mod(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned result = currentInstruction[1].u.operand;
+
+#if USE(ALTERNATE_JSIMMEDIATE)
+ linkSlowCase(iter);
+ linkSlowCase(iter);
+ linkSlowCase(iter);
+#else
+ Jump notImm1 = getSlowCase(iter);
+ Jump notImm2 = getSlowCase(iter);
+ linkSlowCase(iter);
+ emitFastArithReTagImmediate(X86::eax, X86::eax);
+ emitFastArithReTagImmediate(X86::ecx, X86::ecx);
+ notImm1.link(this);
+ notImm2.link(this);
+#endif
+ JITStubCall stubCall(this, JITStubs::cti_op_mod);
+ stubCall.addArgument(X86::eax);
+ stubCall.addArgument(X86::ecx);
+ stubCall.call(result);
+}
+
+#else // PLATFORM(X86) || PLATFORM(X86_64)
+
+void JIT::emit_op_mod(Instruction* currentInstruction)
+{
+ unsigned result = currentInstruction[1].u.operand;
+ unsigned op1 = currentInstruction[2].u.operand;
+ unsigned op2 = currentInstruction[3].u.operand;
+
+ JITStubCall stubCall(this, JITStubs::cti_op_mod);
+ stubCall.addArgument(op1, regT2);
+ stubCall.addArgument(op2, regT2);
+ stubCall.call(result);
+}
+
+void JIT::emitSlow_op_mod(Instruction*, Vector<SlowCaseEntry>::iterator&)
{
ASSERT_NOT_REACHED();
}
-void JIT::compileFastArith_op_mul(Instruction* currentInstruction)
+#endif // PLATFORM(X86) || PLATFORM(X86_64)
+
+/* ------------------------------ END: OP_MOD ------------------------------ */
+
+#if !ENABLE(JIT_OPTIMIZE_ARITHMETIC)
+
+/* ------------------------------ BEGIN: !ENABLE(JIT_OPTIMIZE_ARITHMETIC) (OP_ADD, OP_SUB, OP_MUL) ------------------------------ */
+
+void JIT::emit_op_add(Instruction* currentInstruction)
{
unsigned result = currentInstruction[1].u.operand;
unsigned op1 = currentInstruction[2].u.operand;
unsigned op2 = currentInstruction[3].u.operand;
- emitPutJITStubArgFromVirtualRegister(op1, 1, regT2);
- emitPutJITStubArgFromVirtualRegister(op2, 2, regT2);
- emitCTICall(JITStubs::cti_op_mul);
- emitPutVirtualRegister(result);
+ JITStubCall stubCall(this, JITStubs::cti_op_add);
+ stubCall.addArgument(op1, regT2);
+ stubCall.addArgument(op2, regT2);
+ stubCall.call(result);
}
-void JIT::compileFastArithSlow_op_mul(Instruction*, Vector<SlowCaseEntry>::iterator&)
+
+void JIT::emitSlow_op_add(Instruction*, Vector<SlowCaseEntry>::iterator&)
{
ASSERT_NOT_REACHED();
}
-void JIT::compileFastArith_op_sub(Instruction* currentInstruction)
+void JIT::emit_op_mul(Instruction* currentInstruction)
{
unsigned result = currentInstruction[1].u.operand;
unsigned op1 = currentInstruction[2].u.operand;
unsigned op2 = currentInstruction[3].u.operand;
- emitPutJITStubArgFromVirtualRegister(op1, 1, regT2);
- emitPutJITStubArgFromVirtualRegister(op2, 2, regT2);
- emitCTICall(JITStubs::cti_op_sub);
- emitPutVirtualRegister(result);
+ JITStubCall stubCall(this, JITStubs::cti_op_mul);
+ stubCall.addArgument(op1, regT2);
+ stubCall.addArgument(op2, regT2);
+ stubCall.call(result);
}
-void JIT::compileFastArithSlow_op_sub(Instruction*, Vector<SlowCaseEntry>::iterator&)
+
+void JIT::emitSlow_op_mul(Instruction*, Vector<SlowCaseEntry>::iterator&)
+{
+ ASSERT_NOT_REACHED();
+}
+
+void JIT::emit_op_sub(Instruction* currentInstruction)
+{
+ unsigned result = currentInstruction[1].u.operand;
+ unsigned op1 = currentInstruction[2].u.operand;
+ unsigned op2 = currentInstruction[3].u.operand;
+
+ JITStubCall stubCall(this, JITStubs::cti_op_sub);
+ stubCall.addArgument(op1, regT2);
+ stubCall.addArgument(op2, regT2);
+ stubCall.call(result);
+}
+
+void JIT::emitSlow_op_sub(Instruction*, Vector<SlowCaseEntry>::iterator&)
{
ASSERT_NOT_REACHED();
}
#elif USE(ALTERNATE_JSIMMEDIATE) // *AND* ENABLE(JIT_OPTIMIZE_ARITHMETIC)
+/* ------------------------------ BEGIN: USE(ALTERNATE_JSIMMEDIATE) (OP_ADD, OP_SUB, OP_MUL) ------------------------------ */
+
void JIT::compileBinaryArithOp(OpcodeID opcodeID, unsigned, unsigned op1, unsigned op2, OperandTypes)
{
- emitGetVirtualRegisters(op1, X86::eax, op2, X86::edx);
- emitJumpSlowCaseIfNotImmediateInteger(X86::eax);
- emitJumpSlowCaseIfNotImmediateInteger(X86::edx);
+ emitGetVirtualRegisters(op1, regT0, op2, regT1);
+ emitJumpSlowCaseIfNotImmediateInteger(regT0);
+ emitJumpSlowCaseIfNotImmediateInteger(regT1);
if (opcodeID == op_add)
- addSlowCase(branchAdd32(Overflow, X86::edx, X86::eax));
+ addSlowCase(branchAdd32(Overflow, regT1, regT0));
else if (opcodeID == op_sub)
- addSlowCase(branchSub32(Overflow, X86::edx, X86::eax));
+ addSlowCase(branchSub32(Overflow, regT1, regT0));
else {
ASSERT(opcodeID == op_mul);
- addSlowCase(branchMul32(Overflow, X86::edx, X86::eax));
- addSlowCase(branchTest32(Zero, X86::eax));
+ addSlowCase(branchMul32(Overflow, regT1, regT0));
+ addSlowCase(branchTest32(Zero, regT0));
}
- emitFastArithIntToImmNoCheck(X86::eax, X86::eax);
+ emitFastArithIntToImmNoCheck(regT0, regT0);
}
-void JIT::compileBinaryArithOpSlowCase(OpcodeID opcodeID, Vector<SlowCaseEntry>::iterator& iter, unsigned, unsigned op1, unsigned, OperandTypes types)
+void JIT::compileBinaryArithOpSlowCase(OpcodeID opcodeID, Vector<SlowCaseEntry>::iterator& iter, unsigned result, unsigned op1, unsigned, OperandTypes types)
{
// We assume that subtracting TagTypeNumber is equivalent to adding DoubleEncodeOffset.
COMPILE_ASSERT(((JSImmediate::TagTypeNumber + JSImmediate::DoubleEncodeOffset) == 0), TagTypeNumber_PLUS_DoubleEncodeOffset_EQUALS_0);
@@ -419,58 +913,53 @@ void JIT::compileBinaryArithOpSlowCase(OpcodeID opcodeID, Vector<SlowCaseEntry>:
linkSlowCase(iter); // Integer overflow case - we could handle this in JIT code, but this is likely rare.
if (opcodeID == op_mul) // op_mul has an extra slow case to handle 0 * negative number.
linkSlowCase(iter);
- emitGetVirtualRegister(op1, X86::eax);
+ emitGetVirtualRegister(op1, regT0);
Label stubFunctionCall(this);
- emitPutJITStubArg(X86::eax, 1);
- emitPutJITStubArg(X86::edx, 2);
- if (opcodeID == op_add)
- emitCTICall(JITStubs::cti_op_add);
- else if (opcodeID == op_sub)
- emitCTICall(JITStubs::cti_op_sub);
- else {
- ASSERT(opcodeID == op_mul);
- emitCTICall(JITStubs::cti_op_mul);
- }
+ JITStubCall stubCall(this, opcodeID == op_add ? JITStubs::cti_op_add : opcodeID == op_sub ? JITStubs::cti_op_sub : JITStubs::cti_op_mul);
+ stubCall.addArgument(regT0);
+ stubCall.addArgument(regT1);
+ stubCall.call(result);
Jump end = jump();
// if we get here, eax is not an int32, edx not yet checked.
notImm1.link(this);
if (!types.first().definitelyIsNumber())
- emitJumpIfNotImmediateNumber(X86::eax).linkTo(stubFunctionCall, this);
+ emitJumpIfNotImmediateNumber(regT0).linkTo(stubFunctionCall, this);
if (!types.second().definitelyIsNumber())
- emitJumpIfNotImmediateNumber(X86::edx).linkTo(stubFunctionCall, this);
- addPtr(tagTypeNumberRegister, X86::eax);
- m_assembler.movq_rr(X86::eax, X86::xmm1);
- Jump op2isDouble = emitJumpIfNotImmediateInteger(X86::edx);
- m_assembler.cvtsi2sd_rr(X86::edx, X86::xmm2);
+ emitJumpIfNotImmediateNumber(regT1).linkTo(stubFunctionCall, this);
+ addPtr(tagTypeNumberRegister, regT0);
+ movePtrToDouble(regT0, fpRegT1);
+ Jump op2isDouble = emitJumpIfNotImmediateInteger(regT1);
+ convertInt32ToDouble(regT1, fpRegT2);
Jump op2wasInteger = jump();
// if we get here, eax IS an int32, edx is not.
notImm2.link(this);
if (!types.second().definitelyIsNumber())
- emitJumpIfNotImmediateNumber(X86::edx).linkTo(stubFunctionCall, this);
- m_assembler.cvtsi2sd_rr(X86::eax, X86::xmm1);
+ emitJumpIfNotImmediateNumber(regT1).linkTo(stubFunctionCall, this);
+ convertInt32ToDouble(regT0, fpRegT1);
op2isDouble.link(this);
- addPtr(tagTypeNumberRegister, X86::edx);
- m_assembler.movq_rr(X86::edx, X86::xmm2);
+ addPtr(tagTypeNumberRegister, regT1);
+ movePtrToDouble(regT1, fpRegT2);
op2wasInteger.link(this);
if (opcodeID == op_add)
- m_assembler.addsd_rr(X86::xmm2, X86::xmm1);
+ addDouble(fpRegT2, fpRegT1);
else if (opcodeID == op_sub)
- m_assembler.subsd_rr(X86::xmm2, X86::xmm1);
+ subDouble(fpRegT2, fpRegT1);
else {
ASSERT(opcodeID == op_mul);
- m_assembler.mulsd_rr(X86::xmm2, X86::xmm1);
+ mulDouble(fpRegT2, fpRegT1);
}
- m_assembler.movq_rr(X86::xmm1, X86::eax);
- subPtr(tagTypeNumberRegister, X86::eax);
+ moveDoubleToPtr(fpRegT1, regT0);
+ subPtr(tagTypeNumberRegister, regT0);
+ emitPutVirtualRegister(result, regT0);
end.link(this);
}
-void JIT::compileFastArith_op_add(Instruction* currentInstruction)
+void JIT::emit_op_add(Instruction* currentInstruction)
{
unsigned result = currentInstruction[1].u.operand;
unsigned op1 = currentInstruction[2].u.operand;
@@ -478,54 +967,47 @@ void JIT::compileFastArith_op_add(Instruction* currentInstruction)
OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
if (!types.first().mightBeNumber() || !types.second().mightBeNumber()) {
- emitPutJITStubArgFromVirtualRegister(op1, 1, X86::ecx);
- emitPutJITStubArgFromVirtualRegister(op2, 2, X86::ecx);
- emitCTICall(JITStubs::cti_op_add);
- emitPutVirtualRegister(result);
+ JITStubCall stubCall(this, JITStubs::cti_op_add);
+ stubCall.addArgument(op1, regT2);
+ stubCall.addArgument(op2, regT2);
+ stubCall.call(result);
return;
}
if (isOperandConstantImmediateInt(op1)) {
- emitGetVirtualRegister(op2, X86::eax);
- emitJumpSlowCaseIfNotImmediateInteger(X86::eax);
- addSlowCase(branchAdd32(Overflow, Imm32(getConstantOperandImmediateInt(op1)), X86::eax));
- emitFastArithIntToImmNoCheck(X86::eax, X86::eax);
+ emitGetVirtualRegister(op2, regT0);
+ emitJumpSlowCaseIfNotImmediateInteger(regT0);
+ addSlowCase(branchAdd32(Overflow, Imm32(getConstantOperandImmediateInt(op1)), regT0));
+ emitFastArithIntToImmNoCheck(regT0, regT0);
} else if (isOperandConstantImmediateInt(op2)) {
- emitGetVirtualRegister(op1, X86::eax);
- emitJumpSlowCaseIfNotImmediateInteger(X86::eax);
- addSlowCase(branchAdd32(Overflow, Imm32(getConstantOperandImmediateInt(op2)), X86::eax));
- emitFastArithIntToImmNoCheck(X86::eax, X86::eax);
+ emitGetVirtualRegister(op1, regT0);
+ emitJumpSlowCaseIfNotImmediateInteger(regT0);
+ addSlowCase(branchAdd32(Overflow, Imm32(getConstantOperandImmediateInt(op2)), regT0));
+ emitFastArithIntToImmNoCheck(regT0, regT0);
} else
compileBinaryArithOp(op_add, result, op1, op2, types);
emitPutVirtualRegister(result);
}
-void JIT::compileFastArithSlow_op_add(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+
+void JIT::emitSlow_op_add(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
{
unsigned result = currentInstruction[1].u.operand;
unsigned op1 = currentInstruction[2].u.operand;
unsigned op2 = currentInstruction[3].u.operand;
- OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
- if (isOperandConstantImmediateInt(op1)) {
- linkSlowCase(iter);
- linkSlowCase(iter);
- emitPutJITStubArgFromVirtualRegister(op1, 1, X86::ecx);
- emitPutJITStubArgFromVirtualRegister(op2, 2, X86::ecx);
- emitCTICall(JITStubs::cti_op_add);
- } else if (isOperandConstantImmediateInt(op2)) {
+ if (isOperandConstantImmediateInt(op1) || isOperandConstantImmediateInt(op2)) {
linkSlowCase(iter);
linkSlowCase(iter);
- emitPutJITStubArgFromVirtualRegister(op1, 1, X86::ecx);
- emitPutJITStubArgFromVirtualRegister(op2, 2, X86::ecx);
- emitCTICall(JITStubs::cti_op_add);
+ JITStubCall stubCall(this, JITStubs::cti_op_add);
+ stubCall.addArgument(op1, regT2);
+ stubCall.addArgument(op2, regT2);
+ stubCall.call(result);
} else
- compileBinaryArithOpSlowCase(op_add, iter, result, op1, op2, types);
-
- emitPutVirtualRegister(result);
+ compileBinaryArithOpSlowCase(op_add, iter, result, op1, op2, OperandTypes::fromInt(currentInstruction[4].u.operand));
}
-void JIT::compileFastArith_op_mul(Instruction* currentInstruction)
+void JIT::emit_op_mul(Instruction* currentInstruction)
{
unsigned result = currentInstruction[1].u.operand;
unsigned op1 = currentInstruction[2].u.operand;
@@ -535,21 +1017,22 @@ void JIT::compileFastArith_op_mul(Instruction* currentInstruction)
// For now, only plant a fast int case if the constant operand is greater than zero.
int32_t value;
if (isOperandConstantImmediateInt(op1) && ((value = getConstantOperandImmediateInt(op1)) > 0)) {
- emitGetVirtualRegister(op2, X86::eax);
- emitJumpSlowCaseIfNotImmediateInteger(X86::eax);
- addSlowCase(branchMul32(Overflow, Imm32(value), X86::eax, X86::eax));
- emitFastArithReTagImmediate(X86::eax, X86::eax);
+ emitGetVirtualRegister(op2, regT0);
+ emitJumpSlowCaseIfNotImmediateInteger(regT0);
+ addSlowCase(branchMul32(Overflow, Imm32(value), regT0, regT0));
+ emitFastArithReTagImmediate(regT0, regT0);
} else if (isOperandConstantImmediateInt(op2) && ((value = getConstantOperandImmediateInt(op2)) > 0)) {
- emitGetVirtualRegister(op1, X86::eax);
- emitJumpSlowCaseIfNotImmediateInteger(X86::eax);
- addSlowCase(branchMul32(Overflow, Imm32(value), X86::eax, X86::eax));
- emitFastArithReTagImmediate(X86::eax, X86::eax);
+ emitGetVirtualRegister(op1, regT0);
+ emitJumpSlowCaseIfNotImmediateInteger(regT0);
+ addSlowCase(branchMul32(Overflow, Imm32(value), regT0, regT0));
+ emitFastArithReTagImmediate(regT0, regT0);
} else
compileBinaryArithOp(op_mul, result, op1, op2, types);
emitPutVirtualRegister(result);
}
-void JIT::compileFastArithSlow_op_mul(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+
+void JIT::emitSlow_op_mul(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
{
unsigned result = currentInstruction[1].u.operand;
unsigned op1 = currentInstruction[2].u.operand;
@@ -561,16 +1044,15 @@ void JIT::compileFastArithSlow_op_mul(Instruction* currentInstruction, Vector<Sl
linkSlowCase(iter);
linkSlowCase(iter);
// There is an extra slow case for (op1 * -N) or (-N * op2), to check for 0 since this should produce a result of -0.
- emitPutJITStubArgFromVirtualRegister(op1, 1, X86::ecx);
- emitPutJITStubArgFromVirtualRegister(op2, 2, X86::ecx);
- emitCTICall(JITStubs::cti_op_mul);
+ JITStubCall stubCall(this, JITStubs::cti_op_mul);
+ stubCall.addArgument(op1, regT2);
+ stubCall.addArgument(op2, regT2);
+ stubCall.call(result);
} else
compileBinaryArithOpSlowCase(op_mul, iter, result, op1, op2, types);
-
- emitPutVirtualRegister(result);
}
-void JIT::compileFastArith_op_sub(Instruction* currentInstruction)
+void JIT::emit_op_sub(Instruction* currentInstruction)
{
unsigned result = currentInstruction[1].u.operand;
unsigned op1 = currentInstruction[2].u.operand;
@@ -581,7 +1063,8 @@ void JIT::compileFastArith_op_sub(Instruction* currentInstruction)
emitPutVirtualRegister(result);
}
-void JIT::compileFastArithSlow_op_sub(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+
+void JIT::emitSlow_op_sub(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
{
unsigned result = currentInstruction[1].u.operand;
unsigned op1 = currentInstruction[2].u.operand;
@@ -589,218 +1072,153 @@ void JIT::compileFastArithSlow_op_sub(Instruction* currentInstruction, Vector<Sl
OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
compileBinaryArithOpSlowCase(op_sub, iter, result, op1, op2, types);
-
- emitPutVirtualRegister(result);
-}
-
-#else
-
-typedef X86Assembler::JmpSrc JmpSrc;
-typedef X86Assembler::JmpDst JmpDst;
-typedef X86Assembler::XMMRegisterID XMMRegisterID;
-
-#if PLATFORM(MAC)
-
-static inline bool isSSE2Present()
-{
- return true; // All X86 Macs are guaranteed to support at least SSE2
}
-#else
-
-static bool isSSE2Present()
-{
- static const int SSE2FeatureBit = 1 << 26;
- struct SSE2Check {
- SSE2Check()
- {
- int flags;
-#if COMPILER(MSVC)
- _asm {
- mov eax, 1 // cpuid function 1 gives us the standard feature set
- cpuid;
- mov flags, edx;
- }
-#elif COMPILER(GCC)
- asm (
- "movl $0x1, %%eax;"
- "pushl %%ebx;"
- "cpuid;"
- "popl %%ebx;"
- "movl %%edx, %0;"
- : "=g" (flags)
- :
- : "%eax", "%ecx", "%edx"
- );
-#else
- flags = 0;
-#endif
- present = (flags & SSE2FeatureBit) != 0;
- }
- bool present;
- };
- static SSE2Check check;
- return check.present;
-}
+#else // !ENABLE(JIT_OPTIMIZE_ARITHMETIC)
-#endif
+/* ------------------------------ BEGIN: !ENABLE(JIT_OPTIMIZE_ARITHMETIC) (OP_ADD, OP_SUB, OP_MUL) ------------------------------ */
void JIT::compileBinaryArithOp(OpcodeID opcodeID, unsigned dst, unsigned src1, unsigned src2, OperandTypes types)
{
Structure* numberStructure = m_globalData->numberStructure.get();
- JmpSrc wasJSNumberCell1;
- JmpSrc wasJSNumberCell2;
+ Jump wasJSNumberCell1;
+ Jump wasJSNumberCell2;
- emitGetVirtualRegisters(src1, X86::eax, src2, X86::edx);
+ emitGetVirtualRegisters(src1, regT0, src2, regT1);
- if (types.second().isReusable() && isSSE2Present()) {
+ if (types.second().isReusable() && supportsFloatingPoint()) {
ASSERT(types.second().mightBeNumber());
// Check op2 is a number
- __ testl_i32r(JSImmediate::TagTypeNumber, X86::edx);
- JmpSrc op2imm = __ jne();
+ Jump op2imm = emitJumpIfImmediateInteger(regT1);
if (!types.second().definitelyIsNumber()) {
- emitJumpSlowCaseIfNotJSCell(X86::edx, src2);
- __ cmpl_im(reinterpret_cast<unsigned>(numberStructure), FIELD_OFFSET(JSCell, m_structure), X86::edx);
- addSlowCase(__ jne());
+ emitJumpSlowCaseIfNotJSCell(regT1, src2);
+ addSlowCase(checkStructure(regT1, numberStructure));
}
// (1) In this case src2 is a reusable number cell.
// Slow case if src1 is not a number type.
- __ testl_i32r(JSImmediate::TagTypeNumber, X86::eax);
- JmpSrc op1imm = __ jne();
+ Jump op1imm = emitJumpIfImmediateInteger(regT0);
if (!types.first().definitelyIsNumber()) {
- emitJumpSlowCaseIfNotJSCell(X86::eax, src1);
- __ cmpl_im(reinterpret_cast<unsigned>(numberStructure), FIELD_OFFSET(JSCell, m_structure), X86::eax);
- addSlowCase(__ jne());
+ emitJumpSlowCaseIfNotJSCell(regT0, src1);
+ addSlowCase(checkStructure(regT0, numberStructure));
}
// (1a) if we get here, src1 is also a number cell
- __ movsd_mr(FIELD_OFFSET(JSNumberCell, m_value), X86::eax, X86::xmm0);
- JmpSrc loadedDouble = __ jmp();
+ loadDouble(Address(regT0, FIELD_OFFSET(JSNumberCell, m_value)), fpRegT0);
+ Jump loadedDouble = jump();
// (1b) if we get here, src1 is an immediate
- __ linkJump(op1imm, __ label());
- emitFastArithImmToInt(X86::eax);
- __ cvtsi2sd_rr(X86::eax, X86::xmm0);
+ op1imm.link(this);
+ emitFastArithImmToInt(regT0);
+ convertInt32ToDouble(regT0, fpRegT0);
// (1c)
- __ linkJump(loadedDouble, __ label());
+ loadedDouble.link(this);
if (opcodeID == op_add)
- __ addsd_mr(FIELD_OFFSET(JSNumberCell, m_value), X86::edx, X86::xmm0);
+ addDouble(Address(regT1, FIELD_OFFSET(JSNumberCell, m_value)), fpRegT0);
else if (opcodeID == op_sub)
- __ subsd_mr(FIELD_OFFSET(JSNumberCell, m_value), X86::edx, X86::xmm0);
+ subDouble(Address(regT1, FIELD_OFFSET(JSNumberCell, m_value)), fpRegT0);
else {
ASSERT(opcodeID == op_mul);
- __ mulsd_mr(FIELD_OFFSET(JSNumberCell, m_value), X86::edx, X86::xmm0);
+ mulDouble(Address(regT1, FIELD_OFFSET(JSNumberCell, m_value)), fpRegT0);
}
// Store the result to the JSNumberCell and jump.
- __ movsd_rm(X86::xmm0, FIELD_OFFSET(JSNumberCell, m_value), X86::edx);
- __ movl_rr(X86::edx, X86::eax);
+ storeDouble(fpRegT0, Address(regT1, FIELD_OFFSET(JSNumberCell, m_value)));
+ move(regT1, regT0);
emitPutVirtualRegister(dst);
- wasJSNumberCell2 = __ jmp();
+ wasJSNumberCell2 = jump();
// (2) This handles cases where src2 is an immediate number.
// Two slow cases - either src1 isn't an immediate, or the subtract overflows.
- __ linkJump(op2imm, __ label());
- emitJumpSlowCaseIfNotImmediateInteger(X86::eax);
- } else if (types.first().isReusable() && isSSE2Present()) {
+ op2imm.link(this);
+ emitJumpSlowCaseIfNotImmediateInteger(regT0);
+ } else if (types.first().isReusable() && supportsFloatingPoint()) {
ASSERT(types.first().mightBeNumber());
// Check op1 is a number
- __ testl_i32r(JSImmediate::TagTypeNumber, X86::eax);
- JmpSrc op1imm = __ jne();
+ Jump op1imm = emitJumpIfImmediateInteger(regT0);
if (!types.first().definitelyIsNumber()) {
- emitJumpSlowCaseIfNotJSCell(X86::eax, src1);
- __ cmpl_im(reinterpret_cast<unsigned>(numberStructure), FIELD_OFFSET(JSCell, m_structure), X86::eax);
- addSlowCase(__ jne());
+ emitJumpSlowCaseIfNotJSCell(regT0, src1);
+ addSlowCase(checkStructure(regT0, numberStructure));
}
// (1) In this case src1 is a reusable number cell.
// Slow case if src2 is not a number type.
- __ testl_i32r(JSImmediate::TagTypeNumber, X86::edx);
- JmpSrc op2imm = __ jne();
+ Jump op2imm = emitJumpIfImmediateInteger(regT1);
if (!types.second().definitelyIsNumber()) {
- emitJumpSlowCaseIfNotJSCell(X86::edx, src2);
- __ cmpl_im(reinterpret_cast<unsigned>(numberStructure), FIELD_OFFSET(JSCell, m_structure), X86::edx);
- addSlowCase(__ jne());
+ emitJumpSlowCaseIfNotJSCell(regT1, src2);
+ addSlowCase(checkStructure(regT1, numberStructure));
}
// (1a) if we get here, src2 is also a number cell
- __ movsd_mr(FIELD_OFFSET(JSNumberCell, m_value), X86::edx, X86::xmm1);
- JmpSrc loadedDouble = __ jmp();
+ loadDouble(Address(regT1, FIELD_OFFSET(JSNumberCell, m_value)), fpRegT1);
+ Jump loadedDouble = jump();
// (1b) if we get here, src2 is an immediate
- __ linkJump(op2imm, __ label());
- emitFastArithImmToInt(X86::edx);
- __ cvtsi2sd_rr(X86::edx, X86::xmm1);
+ op2imm.link(this);
+ emitFastArithImmToInt(regT1);
+ convertInt32ToDouble(regT1, fpRegT1);
// (1c)
- __ linkJump(loadedDouble, __ label());
- __ movsd_mr(FIELD_OFFSET(JSNumberCell, m_value), X86::eax, X86::xmm0);
+ loadedDouble.link(this);
+ loadDouble(Address(regT0, FIELD_OFFSET(JSNumberCell, m_value)), fpRegT0);
if (opcodeID == op_add)
- __ addsd_rr(X86::xmm1, X86::xmm0);
+ addDouble(fpRegT1, fpRegT0);
else if (opcodeID == op_sub)
- __ subsd_rr(X86::xmm1, X86::xmm0);
+ subDouble(fpRegT1, fpRegT0);
else {
ASSERT(opcodeID == op_mul);
- __ mulsd_rr(X86::xmm1, X86::xmm0);
+ mulDouble(fpRegT1, fpRegT0);
}
- __ movsd_rm(X86::xmm0, FIELD_OFFSET(JSNumberCell, m_value), X86::eax);
+ storeDouble(fpRegT0, Address(regT0, FIELD_OFFSET(JSNumberCell, m_value)));
emitPutVirtualRegister(dst);
// Store the result to the JSNumberCell and jump.
- __ movsd_rm(X86::xmm0, FIELD_OFFSET(JSNumberCell, m_value), X86::eax);
+ storeDouble(fpRegT0, Address(regT0, FIELD_OFFSET(JSNumberCell, m_value)));
emitPutVirtualRegister(dst);
- wasJSNumberCell1 = __ jmp();
+ wasJSNumberCell1 = jump();
// (2) This handles cases where src1 is an immediate number.
// Two slow cases - either src2 isn't an immediate, or the subtract overflows.
- __ linkJump(op1imm, __ label());
- emitJumpSlowCaseIfNotImmediateInteger(X86::edx);
+ op1imm.link(this);
+ emitJumpSlowCaseIfNotImmediateInteger(regT1);
} else
- emitJumpSlowCaseIfNotImmediateIntegers(X86::eax, X86::edx, X86::ecx);
+ emitJumpSlowCaseIfNotImmediateIntegers(regT0, regT1, regT2);
if (opcodeID == op_add) {
- emitFastArithDeTagImmediate(X86::eax);
- __ addl_rr(X86::edx, X86::eax);
- addSlowCase(__ jo());
+ emitFastArithDeTagImmediate(regT0);
+ addSlowCase(branchAdd32(Overflow, regT1, regT0));
} else if (opcodeID == op_sub) {
- __ subl_rr(X86::edx, X86::eax);
- addSlowCase(__ jo());
- signExtend32ToPtr(X86::eax, X86::eax);
- emitFastArithReTagImmediate(X86::eax, X86::eax);
+ addSlowCase(branchSub32(Overflow, regT1, regT0));
+ signExtend32ToPtr(regT0, regT0);
+ emitFastArithReTagImmediate(regT0, regT0);
} else {
ASSERT(opcodeID == op_mul);
// convert eax & edx from JSImmediates to ints, and check if either are zero
- emitFastArithImmToInt(X86::edx);
- Jump op1Zero = emitFastArithDeTagImmediateJumpIfZero(X86::eax);
- __ testl_rr(X86::edx, X86::edx);
- JmpSrc op2NonZero = __ jne();
+ emitFastArithImmToInt(regT1);
+ Jump op1Zero = emitFastArithDeTagImmediateJumpIfZero(regT0);
+ Jump op2NonZero = branchTest32(NonZero, regT1);
op1Zero.link(this);
// if either input is zero, add the two together, and check if the result is < 0.
// If it is, we have a problem (N < 0), (N * 0) == -0, not representatble as a JSImmediate.
- __ movl_rr(X86::eax, X86::ecx);
- __ addl_rr(X86::edx, X86::ecx);
- addSlowCase(__ js());
+ move(regT0, regT2);
+ addSlowCase(branchAdd32(Signed, regT1, regT2));
// Skip the above check if neither input is zero
- __ linkJump(op2NonZero, __ label());
- __ imull_rr(X86::edx, X86::eax);
- addSlowCase(__ jo());
- signExtend32ToPtr(X86::eax, X86::eax);
- emitFastArithReTagImmediate(X86::eax, X86::eax);
+ op2NonZero.link(this);
+ addSlowCase(branchMul32(Overflow, regT1, regT0));
+ signExtend32ToPtr(regT0, regT0);
+ emitFastArithReTagImmediate(regT0, regT0);
}
emitPutVirtualRegister(dst);
- if (types.second().isReusable() && isSSE2Present()) {
- __ linkJump(wasJSNumberCell2, __ label());
- }
- else if (types.first().isReusable() && isSSE2Present()) {
- __ linkJump(wasJSNumberCell1, __ label());
- }
+ if (types.second().isReusable() && supportsFloatingPoint())
+ wasJSNumberCell2.link(this);
+ else if (types.first().isReusable() && supportsFloatingPoint())
+ wasJSNumberCell1.link(this);
}
void JIT::compileBinaryArithOpSlowCase(OpcodeID opcodeID, Vector<SlowCaseEntry>::iterator& iter, unsigned dst, unsigned src1, unsigned src2, OperandTypes types)
{
linkSlowCase(iter);
- if (types.second().isReusable() && isSSE2Present()) {
+ if (types.second().isReusable() && supportsFloatingPoint()) {
if (!types.first().definitelyIsNumber()) {
linkSlowCaseIfNotJSCell(iter, src1);
linkSlowCase(iter);
@@ -809,7 +1227,7 @@ void JIT::compileBinaryArithOpSlowCase(OpcodeID opcodeID, Vector<SlowCaseEntry>:
linkSlowCaseIfNotJSCell(iter, src2);
linkSlowCase(iter);
}
- } else if (types.first().isReusable() && isSSE2Present()) {
+ } else if (types.first().isReusable() && supportsFloatingPoint()) {
if (!types.first().definitelyIsNumber()) {
linkSlowCaseIfNotJSCell(iter, src1);
linkSlowCase(iter);
@@ -825,50 +1243,44 @@ void JIT::compileBinaryArithOpSlowCase(OpcodeID opcodeID, Vector<SlowCaseEntry>:
if (opcodeID == op_mul)
linkSlowCase(iter);
- emitPutJITStubArgFromVirtualRegister(src1, 1, X86::ecx);
- emitPutJITStubArgFromVirtualRegister(src2, 2, X86::ecx);
- if (opcodeID == op_add)
- emitCTICall(JITStubs::cti_op_add);
- else if (opcodeID == op_sub)
- emitCTICall(JITStubs::cti_op_sub);
- else {
- ASSERT(opcodeID == op_mul);
- emitCTICall(JITStubs::cti_op_mul);
- }
- emitPutVirtualRegister(dst);
+ JITStubCall stubCall(this, opcodeID == op_add ? JITStubs::cti_op_add : opcodeID == op_sub ? JITStubs::cti_op_sub : JITStubs::cti_op_mul);
+ stubCall.addArgument(src1, regT2);
+ stubCall.addArgument(src2, regT2);
+ stubCall.call(dst);
}
-void JIT::compileFastArith_op_add(Instruction* currentInstruction)
+void JIT::emit_op_add(Instruction* currentInstruction)
{
unsigned result = currentInstruction[1].u.operand;
unsigned op1 = currentInstruction[2].u.operand;
unsigned op2 = currentInstruction[3].u.operand;
if (isOperandConstantImmediateInt(op1)) {
- emitGetVirtualRegister(op2, X86::eax);
- emitJumpSlowCaseIfNotImmediateInteger(X86::eax);
- addSlowCase(branchAdd32(Overflow, Imm32(getConstantOperandImmediateInt(op1) << JSImmediate::IntegerPayloadShift), X86::eax));
- signExtend32ToPtr(X86::eax, X86::eax);
+ emitGetVirtualRegister(op2, regT0);
+ emitJumpSlowCaseIfNotImmediateInteger(regT0);
+ addSlowCase(branchAdd32(Overflow, Imm32(getConstantOperandImmediateInt(op1) << JSImmediate::IntegerPayloadShift), regT0));
+ signExtend32ToPtr(regT0, regT0);
emitPutVirtualRegister(result);
} else if (isOperandConstantImmediateInt(op2)) {
- emitGetVirtualRegister(op1, X86::eax);
- emitJumpSlowCaseIfNotImmediateInteger(X86::eax);
- addSlowCase(branchAdd32(Overflow, Imm32(getConstantOperandImmediateInt(op2) << JSImmediate::IntegerPayloadShift), X86::eax));
- signExtend32ToPtr(X86::eax, X86::eax);
+ emitGetVirtualRegister(op1, regT0);
+ emitJumpSlowCaseIfNotImmediateInteger(regT0);
+ addSlowCase(branchAdd32(Overflow, Imm32(getConstantOperandImmediateInt(op2) << JSImmediate::IntegerPayloadShift), regT0));
+ signExtend32ToPtr(regT0, regT0);
emitPutVirtualRegister(result);
} else {
OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
if (types.first().mightBeNumber() && types.second().mightBeNumber())
compileBinaryArithOp(op_add, result, op1, op2, OperandTypes::fromInt(currentInstruction[4].u.operand));
else {
- emitPutJITStubArgFromVirtualRegister(op1, 1, X86::ecx);
- emitPutJITStubArgFromVirtualRegister(op2, 2, X86::ecx);
- emitCTICall(JITStubs::cti_op_add);
- emitPutVirtualRegister(result);
+ JITStubCall stubCall(this, JITStubs::cti_op_add);
+ stubCall.addArgument(op1, regT2);
+ stubCall.addArgument(op2, regT2);
+ stubCall.call(result);
}
}
}
-void JIT::compileFastArithSlow_op_add(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+
+void JIT::emitSlow_op_add(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
{
unsigned result = currentInstruction[1].u.operand;
unsigned op1 = currentInstruction[2].u.operand;
@@ -877,21 +1289,21 @@ void JIT::compileFastArithSlow_op_add(Instruction* currentInstruction, Vector<Sl
if (isOperandConstantImmediateInt(op1)) {
Jump notImm = getSlowCase(iter);
linkSlowCase(iter);
- sub32(Imm32(getConstantOperandImmediateInt(op1) << JSImmediate::IntegerPayloadShift), X86::eax);
+ sub32(Imm32(getConstantOperandImmediateInt(op1) << JSImmediate::IntegerPayloadShift), regT0);
notImm.link(this);
- emitPutJITStubArgFromVirtualRegister(op1, 1, X86::ecx);
- emitPutJITStubArg(X86::eax, 2);
- emitCTICall(JITStubs::cti_op_add);
- emitPutVirtualRegister(result);
+ JITStubCall stubCall(this, JITStubs::cti_op_add);
+ stubCall.addArgument(op1, regT2);
+ stubCall.addArgument(regT0);
+ stubCall.call(result);
} else if (isOperandConstantImmediateInt(op2)) {
Jump notImm = getSlowCase(iter);
linkSlowCase(iter);
- sub32(Imm32(getConstantOperandImmediateInt(op2) << JSImmediate::IntegerPayloadShift), X86::eax);
+ sub32(Imm32(getConstantOperandImmediateInt(op2) << JSImmediate::IntegerPayloadShift), regT0);
notImm.link(this);
- emitPutJITStubArg(X86::eax, 1);
- emitPutJITStubArgFromVirtualRegister(op2, 2, X86::ecx);
- emitCTICall(JITStubs::cti_op_add);
- emitPutVirtualRegister(result);
+ JITStubCall stubCall(this, JITStubs::cti_op_add);
+ stubCall.addArgument(regT0);
+ stubCall.addArgument(op2, regT2);
+ stubCall.call(result);
} else {
OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
ASSERT(types.first().mightBeNumber() && types.second().mightBeNumber());
@@ -899,7 +1311,7 @@ void JIT::compileFastArithSlow_op_add(Instruction* currentInstruction, Vector<Sl
}
}
-void JIT::compileFastArith_op_mul(Instruction* currentInstruction)
+void JIT::emit_op_mul(Instruction* currentInstruction)
{
unsigned result = currentInstruction[1].u.operand;
unsigned op1 = currentInstruction[2].u.operand;
@@ -908,25 +1320,26 @@ void JIT::compileFastArith_op_mul(Instruction* currentInstruction)
// For now, only plant a fast int case if the constant operand is greater than zero.
int32_t value;
if (isOperandConstantImmediateInt(op1) && ((value = getConstantOperandImmediateInt(op1)) > 0)) {
- emitGetVirtualRegister(op2, X86::eax);
- emitJumpSlowCaseIfNotImmediateInteger(X86::eax);
- emitFastArithDeTagImmediate(X86::eax);
- addSlowCase(branchMul32(Overflow, Imm32(value), X86::eax, X86::eax));
- signExtend32ToPtr(X86::eax, X86::eax);
- emitFastArithReTagImmediate(X86::eax, X86::eax);
+ emitGetVirtualRegister(op2, regT0);
+ emitJumpSlowCaseIfNotImmediateInteger(regT0);
+ emitFastArithDeTagImmediate(regT0);
+ addSlowCase(branchMul32(Overflow, Imm32(value), regT0, regT0));
+ signExtend32ToPtr(regT0, regT0);
+ emitFastArithReTagImmediate(regT0, regT0);
emitPutVirtualRegister(result);
} else if (isOperandConstantImmediateInt(op2) && ((value = getConstantOperandImmediateInt(op2)) > 0)) {
- emitGetVirtualRegister(op1, X86::eax);
- emitJumpSlowCaseIfNotImmediateInteger(X86::eax);
- emitFastArithDeTagImmediate(X86::eax);
- addSlowCase(branchMul32(Overflow, Imm32(value), X86::eax, X86::eax));
- signExtend32ToPtr(X86::eax, X86::eax);
- emitFastArithReTagImmediate(X86::eax, X86::eax);
+ emitGetVirtualRegister(op1, regT0);
+ emitJumpSlowCaseIfNotImmediateInteger(regT0);
+ emitFastArithDeTagImmediate(regT0);
+ addSlowCase(branchMul32(Overflow, Imm32(value), regT0, regT0));
+ signExtend32ToPtr(regT0, regT0);
+ emitFastArithReTagImmediate(regT0, regT0);
emitPutVirtualRegister(result);
} else
compileBinaryArithOp(op_mul, result, op1, op2, OperandTypes::fromInt(currentInstruction[4].u.operand));
}
-void JIT::compileFastArithSlow_op_mul(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+
+void JIT::emitSlow_op_mul(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
{
unsigned result = currentInstruction[1].u.operand;
unsigned op1 = currentInstruction[2].u.operand;
@@ -937,24 +1350,27 @@ void JIT::compileFastArithSlow_op_mul(Instruction* currentInstruction, Vector<Sl
linkSlowCase(iter);
linkSlowCase(iter);
// There is an extra slow case for (op1 * -N) or (-N * op2), to check for 0 since this should produce a result of -0.
- emitPutJITStubArgFromVirtualRegister(op1, 1, X86::ecx);
- emitPutJITStubArgFromVirtualRegister(op2, 2, X86::ecx);
- emitCTICall(JITStubs::cti_op_mul);
- emitPutVirtualRegister(result);
+ JITStubCall stubCall(this, JITStubs::cti_op_mul);
+ stubCall.addArgument(op1, regT2);
+ stubCall.addArgument(op2, regT2);
+ stubCall.call(result);
} else
compileBinaryArithOpSlowCase(op_mul, iter, result, op1, op2, OperandTypes::fromInt(currentInstruction[4].u.operand));
}
-void JIT::compileFastArith_op_sub(Instruction* currentInstruction)
+void JIT::emit_op_sub(Instruction* currentInstruction)
{
compileBinaryArithOp(op_sub, currentInstruction[1].u.operand, currentInstruction[2].u.operand, currentInstruction[3].u.operand, OperandTypes::fromInt(currentInstruction[4].u.operand));
}
-void JIT::compileFastArithSlow_op_sub(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+
+void JIT::emitSlow_op_sub(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
{
compileBinaryArithOpSlowCase(op_sub, iter, currentInstruction[1].u.operand, currentInstruction[2].u.operand, currentInstruction[3].u.operand, OperandTypes::fromInt(currentInstruction[4].u.operand));
}
-#endif
+#endif // !ENABLE(JIT_OPTIMIZE_ARITHMETIC)
+
+/* ------------------------------ END: OP_ADD, OP_SUB, OP_MUL ------------------------------ */
} // namespace JSC
diff --git a/JavaScriptCore/jit/JITCall.cpp b/JavaScriptCore/jit/JITCall.cpp
index 62c7149..cf852be 100644
--- a/JavaScriptCore/jit/JITCall.cpp
+++ b/JavaScriptCore/jit/JITCall.cpp
@@ -30,6 +30,7 @@
#include "CodeBlock.h"
#include "JITInlineMethods.h"
+#include "JITStubCall.h"
#include "JSArray.h"
#include "JSFunction.h"
#include "Interpreter.h"
@@ -44,38 +45,13 @@ using namespace std;
namespace JSC {
-void JIT::unlinkCall(CallLinkInfo* callLinkInfo)
-{
- // When the JSFunction is deleted the pointer embedded in the instruction stream will no longer be valid
- // (and, if a new JSFunction happened to be constructed at the same location, we could get a false positive
- // match). Reset the check so it no longer matches.
- callLinkInfo->hotPathBegin.repatch(JSValuePtr::encode(jsImpossibleValue()));
-}
-
-//void JIT::linkCall(JSFunction* , CodeBlock* , JITCode , CallLinkInfo* callLinkInfo, int )
-void JIT::linkCall(JSFunction* callee, CodeBlock* calleeCodeBlock, JITCode ctiCode, CallLinkInfo* callLinkInfo, int callerArgCount)
-{
- // Currently we only link calls with the exact number of arguments.
- if (callerArgCount == calleeCodeBlock->m_numParameters) {
- ASSERT(!callLinkInfo->isLinked());
-
- calleeCodeBlock->addCaller(callLinkInfo);
-
- callLinkInfo->hotPathBegin.repatch(callee);
- callLinkInfo->hotPathOther.relink(ctiCode.addressForCall());
- }
-
- // patch the instruction that jumps out to the cold path, so that we only try to link once.
- callLinkInfo->hotPathBegin.jumpAtOffset(patchOffsetOpCallCompareToJump).relink(callLinkInfo->coldPathOther);
-}
-
void JIT::compileOpCallInitializeCallFrame()
{
store32(regT1, Address(callFrameRegister, RegisterFile::ArgumentCount * static_cast<int>(sizeof(Register))));
- loadPtr(Address(regT2, FIELD_OFFSET(JSFunction, m_scopeChain) + FIELD_OFFSET(ScopeChain, m_node)), regT1); // newScopeChain
+ loadPtr(Address(regT2, FIELD_OFFSET(JSFunction, m_data) + FIELD_OFFSET(ScopeChain, m_node)), regT1); // newScopeChain
- storePtr(ImmPtr(JSValuePtr::encode(noValue())), Address(callFrameRegister, RegisterFile::OptionalCalleeArguments * static_cast<int>(sizeof(Register))));
+ storePtr(ImmPtr(JSValue::encode(JSValue())), Address(callFrameRegister, RegisterFile::OptionalCalleeArguments * static_cast<int>(sizeof(Register))));
storePtr(regT2, Address(callFrameRegister, RegisterFile::Callee * static_cast<int>(sizeof(Register))));
storePtr(regT1, Address(callFrameRegister, RegisterFile::ScopeChain * static_cast<int>(sizeof(Register))));
}
@@ -87,19 +63,19 @@ void JIT::compileOpCallSetupArgs(Instruction* instruction)
// ecx holds func
emitPutJITStubArg(regT2, 1);
- emitPutJITStubArgConstant(registerOffset, 2);
emitPutJITStubArgConstant(argCount, 3);
+ emitPutJITStubArgConstant(registerOffset, 2);
}
-
-void JIT::compileOpCallEvalSetupArgs(Instruction* instruction)
+
+void JIT::compileOpCallVarargsSetupArgs(Instruction* instruction)
{
- int argCount = instruction[3].u.operand;
int registerOffset = instruction[4].u.operand;
-
+
// ecx holds func
emitPutJITStubArg(regT2, 1);
- emitPutJITStubArgConstant(registerOffset, 2);
- emitPutJITStubArgConstant(argCount, 3);
+ emitPutJITStubArg(regT1, 3);
+ addPtr(Imm32(registerOffset), regT1, regT0);
+ emitPutJITStubArg(regT0, 2);
}
void JIT::compileOpConstructSetupArgs(Instruction* instruction)
@@ -117,8 +93,51 @@ void JIT::compileOpConstructSetupArgs(Instruction* instruction)
emitPutJITStubArgConstant(thisRegister, 5);
}
+void JIT::compileOpCallVarargs(Instruction* instruction)
+{
+ int dst = instruction[1].u.operand;
+ int callee = instruction[2].u.operand;
+ int argCountRegister = instruction[3].u.operand;
+
+ emitGetVirtualRegister(argCountRegister, regT1);
+ emitGetVirtualRegister(callee, regT2);
+ compileOpCallVarargsSetupArgs(instruction);
+
+ // Check for JSFunctions.
+ emitJumpSlowCaseIfNotJSCell(regT2);
+ addSlowCase(branchPtr(NotEqual, Address(regT2), ImmPtr(m_globalData->jsFunctionVPtr)));
+
+ // Speculatively roll the callframe, assuming argCount will match the arity.
+ mul32(Imm32(sizeof(Register)), regT0, regT0);
+ intptr_t offset = (intptr_t)sizeof(Register) * (intptr_t)RegisterFile::CallerFrame;
+ addPtr(Imm32((int32_t)offset), regT0, regT3);
+ addPtr(callFrameRegister, regT3);
+ storePtr(callFrameRegister, regT3);
+ addPtr(regT0, callFrameRegister);
+ emitNakedCall(m_globalData->jitStubs.ctiVirtualCall());
+
+ // Put the return value in dst. In the interpreter, op_ret does this.
+ emitPutVirtualRegister(dst);
+
+ sampleCodeBlock(m_codeBlock);
+}
+
+void JIT::compileOpCallVarargsSlowCase(Instruction* instruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ int dst = instruction[1].u.operand;
+
+ linkSlowCase(iter);
+ linkSlowCase(iter);
+ JITStubCall stubCall(this, JITStubs::cti_op_call_NotJSFunction);
+ stubCall.call(dst); // In the interpreter, the callee puts the return value in dst.
+
+ sampleCodeBlock(m_codeBlock);
+}
+
#if !ENABLE(JIT_OPTIMIZE_CALL)
+/* ------------------------------ BEGIN: !ENABLE(JIT_OPTIMIZE_CALL) ------------------------------ */
+
void JIT::compileOpCall(OpcodeID opcodeID, Instruction* instruction, unsigned)
{
int dst = instruction[1].u.operand;
@@ -129,11 +148,8 @@ void JIT::compileOpCall(OpcodeID opcodeID, Instruction* instruction, unsigned)
// Handle eval
Jump wasEval;
if (opcodeID == op_call_eval) {
- emitGetVirtualRegister(callee, regT2);
- compileOpCallEvalSetupArgs(instruction);
-
- emitCTICall(JITStubs::cti_op_call_eval);
- wasEval = branchPtr(NotEqual, regT0, ImmPtr(JSValuePtr::encode(jsImpossibleValue())));
+ CallEvalJITStub(this, instruction).call();
+ wasEval = branchPtr(NotEqual, regT0, ImmPtr(JSValue::encode(JSValue())));
}
emitGetVirtualRegister(callee, regT2);
@@ -149,8 +165,7 @@ void JIT::compileOpCall(OpcodeID opcodeID, Instruction* instruction, unsigned)
// First, in the case of a construct, allocate the new object.
if (opcodeID == op_construct) {
- emitCTICall(JITStubs::cti_op_construct_JSConstruct);
- emitPutVirtualRegister(registerOffset - RegisterFile::CallFrameHeaderSize - argCount);
+ JITStubCall(this, JITStubs::cti_op_construct_JSConstruct).call(registerOffset - RegisterFile::CallFrameHeaderSize - argCount);
emitGetVirtualRegister(callee, regT2);
}
@@ -176,16 +191,15 @@ void JIT::compileOpCallSlowCase(Instruction* instruction, Vector<SlowCaseEntry>:
linkSlowCase(iter);
linkSlowCase(iter);
-
- // This handles host functions
- emitCTICall(((opcodeID == op_construct) ? JITStubs::cti_op_construct_NotJSConstruct : JITStubs::cti_op_call_NotJSFunction));
- // Put the return value in dst. In the interpreter, op_ret does this.
- emitPutVirtualRegister(dst);
+ JITStubCall stubCall(this, opcodeID == op_construct ? JITStubs::cti_op_construct_NotJSConstruct : JITStubs::cti_op_call_NotJSFunction);
+ stubCall.call(dst); // In the interpreter, the callee puts the return value in dst.
sampleCodeBlock(m_codeBlock);
}
-#else
+#else // !ENABLE(JIT_OPTIMIZE_CALL)
+
+/* ------------------------------ BEGIN: ENABLE(JIT_OPTIMIZE_CALL) ------------------------------ */
void JIT::compileOpCall(OpcodeID opcodeID, Instruction* instruction, unsigned callLinkInfoIndex)
{
@@ -197,18 +211,15 @@ void JIT::compileOpCall(OpcodeID opcodeID, Instruction* instruction, unsigned ca
// Handle eval
Jump wasEval;
if (opcodeID == op_call_eval) {
- emitGetVirtualRegister(callee, regT2);
- compileOpCallEvalSetupArgs(instruction);
-
- emitCTICall(JITStubs::cti_op_call_eval);
- wasEval = branchPtr(NotEqual, regT0, ImmPtr(JSValuePtr::encode(jsImpossibleValue())));
+ CallEvalJITStub(this, instruction).call();
+ wasEval = branchPtr(NotEqual, regT0, ImmPtr(JSValue::encode(JSValue())));
}
// This plants a check for a cached JSFunction value, so we can plant a fast link to the callee.
// This deliberately leaves the callee in ecx, used when setting up the stack frame below
emitGetVirtualRegister(callee, regT2);
DataLabelPtr addressOfLinkedFunctionCheck;
- Jump jumpToSlow = branchPtrWithPatch(NotEqual, regT2, addressOfLinkedFunctionCheck, ImmPtr(JSValuePtr::encode(jsImpossibleValue())));
+ Jump jumpToSlow = branchPtrWithPatch(NotEqual, regT2, addressOfLinkedFunctionCheck, ImmPtr(JSValue::encode(JSValue())));
addSlowCase(jumpToSlow);
ASSERT(differenceBetween(addressOfLinkedFunctionCheck, jumpToSlow) == patchOffsetOpCallCompareToJump);
m_callStructureStubCompilationInfo[callLinkInfoIndex].hotPathBegin = addressOfLinkedFunctionCheck;
@@ -222,23 +233,23 @@ void JIT::compileOpCall(OpcodeID opcodeID, Instruction* instruction, unsigned ca
emitPutJITStubArg(regT2, 1);
emitPutJITStubArgFromVirtualRegister(proto, 4, regT0);
- emitCTICall(JITStubs::cti_op_construct_JSConstruct);
- emitPutVirtualRegister(thisRegister);
+ JITStubCall stubCall(this, JITStubs::cti_op_construct_JSConstruct);
+ stubCall.call(thisRegister);
emitGetVirtualRegister(callee, regT2);
}
// Fast version of stack frame initialization, directly relative to edi.
// Note that this omits to set up RegisterFile::CodeBlock, which is set in the callee
- storePtr(ImmPtr(JSValuePtr::encode(noValue())), Address(callFrameRegister, (registerOffset + RegisterFile::OptionalCalleeArguments) * static_cast<int>(sizeof(Register))));
+ storePtr(ImmPtr(JSValue::encode(JSValue())), Address(callFrameRegister, (registerOffset + RegisterFile::OptionalCalleeArguments) * static_cast<int>(sizeof(Register))));
storePtr(regT2, Address(callFrameRegister, (registerOffset + RegisterFile::Callee) * static_cast<int>(sizeof(Register))));
- loadPtr(Address(regT2, FIELD_OFFSET(JSFunction, m_scopeChain) + FIELD_OFFSET(ScopeChain, m_node)), regT1); // newScopeChain
+ loadPtr(Address(regT2, FIELD_OFFSET(JSFunction, m_data) + FIELD_OFFSET(ScopeChain, m_node)), regT1); // newScopeChain
store32(Imm32(argCount), Address(callFrameRegister, (registerOffset + RegisterFile::ArgumentCount) * static_cast<int>(sizeof(Register))));
storePtr(callFrameRegister, Address(callFrameRegister, (registerOffset + RegisterFile::CallerFrame) * static_cast<int>(sizeof(Register))));
storePtr(regT1, Address(callFrameRegister, (registerOffset + RegisterFile::ScopeChain) * static_cast<int>(sizeof(Register))));
addPtr(Imm32(registerOffset * sizeof(Register)), callFrameRegister);
// Call to the callee
- m_callStructureStubCompilationInfo[callLinkInfoIndex].hotPathOther = emitNakedCall(reinterpret_cast<void*>(0));
+ m_callStructureStubCompilationInfo[callLinkInfoIndex].hotPathOther = emitNakedCall();
if (opcodeID == op_call_eval)
wasEval.link(this);
@@ -270,8 +281,7 @@ void JIT::compileOpCallSlowCase(Instruction* instruction, Vector<SlowCaseEntry>:
// First, in the case of a construct, allocate the new object.
if (opcodeID == op_construct) {
- emitCTICall(JITStubs::cti_op_construct_JSConstruct);
- emitPutVirtualRegister(registerOffset - RegisterFile::CallFrameHeaderSize - argCount);
+ JITStubCall(this, JITStubs::cti_op_construct_JSConstruct).call(registerOffset - RegisterFile::CallFrameHeaderSize - argCount);
emitGetVirtualRegister(callee, regT2);
}
@@ -286,7 +296,6 @@ void JIT::compileOpCallSlowCase(Instruction* instruction, Vector<SlowCaseEntry>:
Jump storeResultForFirstRun = jump();
-// FIXME: this label can be removed, since it is a fixed offset from 'callReturnLocation'.
// This is the address for the cold path *after* the first run (which tries to link the call).
m_callStructureStubCompilationInfo[callLinkInfoIndex].coldPathOther = MacroAssembler::Label(this);
@@ -304,7 +313,8 @@ void JIT::compileOpCallSlowCase(Instruction* instruction, Vector<SlowCaseEntry>:
isNotObject.link(this);
callLinkFailNotObject.link(this);
callLinkFailNotJSFunction.link(this);
- emitCTICall(((opcodeID == op_construct) ? JITStubs::cti_op_construct_NotJSConstruct : JITStubs::cti_op_call_NotJSFunction));
+ JITStubCall stubCall(this, opcodeID == op_construct ? JITStubs::cti_op_construct_NotJSConstruct : JITStubs::cti_op_call_NotJSFunction);
+ stubCall.call();
Jump wasNotJSFunction = jump();
// Next, handle JSFunctions...
@@ -312,8 +322,8 @@ void JIT::compileOpCallSlowCase(Instruction* instruction, Vector<SlowCaseEntry>:
// First, in the case of a construct, allocate the new object.
if (opcodeID == op_construct) {
- emitCTICall(JITStubs::cti_op_construct_JSConstruct);
- emitPutVirtualRegister(registerOffset - RegisterFile::CallFrameHeaderSize - argCount);
+ JITStubCall stubCall(this, JITStubs::cti_op_construct_JSConstruct);
+ stubCall.call(registerOffset - RegisterFile::CallFrameHeaderSize - argCount);
emitGetVirtualRegister(callee, regT2);
}
@@ -332,7 +342,9 @@ void JIT::compileOpCallSlowCase(Instruction* instruction, Vector<SlowCaseEntry>:
sampleCodeBlock(m_codeBlock);
}
-#endif
+/* ------------------------------ END: !ENABLE / ENABLE(JIT_OPTIMIZE_CALL) ------------------------------ */
+
+#endif // !ENABLE(JIT_OPTIMIZE_CALL)
} // namespace JSC
diff --git a/JavaScriptCore/jit/JITCode.h b/JavaScriptCore/jit/JITCode.h
index 0490d0e..7ee644b 100644
--- a/JavaScriptCore/jit/JITCode.h
+++ b/JavaScriptCore/jit/JITCode.h
@@ -32,6 +32,7 @@
#include "CallFrame.h"
#include "JSValue.h"
+#include "MacroAssemblerCodeRef.h"
#include "Profiler.h"
namespace JSC {
@@ -39,31 +40,27 @@ namespace JSC {
class JSGlobalData;
class RegisterFile;
- extern "C" {
- JSValueEncodedAsPointer* ctiTrampoline(
-#if PLATFORM(X86_64)
- // FIXME: (bug #22910) this will force all arguments onto the stack (regparm(0) does not appear to have any effect).
- // We can allow register passing here, and move the writes of these values into the trampoline.
- void*, void*, void*, void*, void*, void*,
-#endif
- void* code, RegisterFile*, CallFrame*, JSValuePtr* exception, Profiler**, JSGlobalData*);
- };
-
class JITCode {
+ typedef MacroAssemblerCodeRef CodeRef;
+ typedef MacroAssemblerCodePtr CodePtr;
public:
- JITCode(void* code)
- : code(code)
+ JITCode()
+ {
+ }
+
+ JITCode(const CodeRef ref)
+ : m_ref(ref)
{
}
- operator bool()
+ bool operator !() const
{
- return code != 0;
+ return !m_ref.m_code.executableAddress();
}
- void* addressForCall()
+ CodePtr addressForCall()
{
- return code;
+ return m_ref.m_code;
}
// This function returns the offset in bytes of 'pointerIntoCode' into
@@ -71,23 +68,48 @@ namespace JSC {
// block of code. It is ASSERTed that no codeblock >4gb in size.
unsigned offsetOf(void* pointerIntoCode)
{
- intptr_t result = reinterpret_cast<intptr_t>(pointerIntoCode) - reinterpret_cast<intptr_t>(code);
+ intptr_t result = reinterpret_cast<intptr_t>(pointerIntoCode) - reinterpret_cast<intptr_t>(m_ref.m_code.executableAddress());
ASSERT(static_cast<intptr_t>(static_cast<unsigned>(result)) == result);
return static_cast<unsigned>(result);
}
// Execute the code!
- inline JSValuePtr execute(RegisterFile* registerFile, CallFrame* callFrame, JSGlobalData* globalData, JSValuePtr* exception)
+ inline JSValue execute(RegisterFile* registerFile, CallFrame* callFrame, JSGlobalData* globalData, JSValue* exception)
{
- return JSValuePtr::decode(ctiTrampoline(
+ return JSValue::decode(ctiTrampoline(
#if PLATFORM(X86_64)
0, 0, 0, 0, 0, 0,
#endif
- code, registerFile, callFrame, exception, Profiler::enabledProfilerReference(), globalData));
+ m_ref.m_code.executableAddress(), registerFile, callFrame, exception, Profiler::enabledProfilerReference(), globalData));
+ }
+
+#ifndef NDEBUG
+ size_t size()
+ {
+ ASSERT(m_ref.m_code.executableAddress());
+ return m_ref.m_size;
+ }
+#endif
+
+ ExecutablePool* getExecutablePool()
+ {
+ return m_ref.m_executablePool.get();
+ }
+
+ // Host functions are a bit special; they have a m_code pointer but they
+ // do not individully ref the executable pool containing the trampoline.
+ static JITCode HostFunction(CodePtr code)
+ {
+ return JITCode(code.dataLocation(), 0, 0);
}
private:
- void* code;
+ JITCode(void* code, PassRefPtr<ExecutablePool> executablePool, size_t size)
+ : m_ref(code, executablePool, size)
+ {
+ }
+
+ CodeRef m_ref;
};
};
diff --git a/JavaScriptCore/jit/JITInlineMethods.h b/JavaScriptCore/jit/JITInlineMethods.h
index 684c404..b3dc418 100644
--- a/JavaScriptCore/jit/JITInlineMethods.h
+++ b/JavaScriptCore/jit/JITInlineMethods.h
@@ -53,8 +53,8 @@ ALWAYS_INLINE void JIT::emitGetVirtualRegister(int src, RegisterID dst)
// TODO: we want to reuse values that are already in registers if we can - add a register allocator!
if (m_codeBlock->isConstantRegisterIndex(src)) {
- JSValuePtr value = m_codeBlock->getConstant(src);
- move(ImmPtr(JSValuePtr::encode(value)), dst);
+ JSValue value = m_codeBlock->getConstant(src);
+ move(ImmPtr(JSValue::encode(value)), dst);
killLastResultRegister();
return;
}
@@ -112,7 +112,7 @@ ALWAYS_INLINE void JIT::emitGetJITStubArg(unsigned argumentNumber, RegisterID ds
peek(dst, argumentNumber);
}
-ALWAYS_INLINE JSValuePtr JIT::getConstantOperand(unsigned src)
+ALWAYS_INLINE JSValue JIT::getConstantOperand(unsigned src)
{
ASSERT(m_codeBlock->isConstantRegisterIndex(src));
return m_codeBlock->getConstant(src);
@@ -132,8 +132,8 @@ ALWAYS_INLINE bool JIT::isOperandConstantImmediateInt(unsigned src)
ALWAYS_INLINE void JIT::emitPutJITStubArgFromVirtualRegister(unsigned src, unsigned argumentNumber, RegisterID scratch)
{
if (m_codeBlock->isConstantRegisterIndex(src)) {
- JSValuePtr value = m_codeBlock->getConstant(src);
- emitPutJITStubArgConstant(JSValuePtr::encode(value), argumentNumber);
+ JSValue value = m_codeBlock->getConstant(src);
+ emitPutJITStubArgConstant(JSValue::encode(value), argumentNumber);
} else {
loadPtr(Address(callFrameRegister, src * sizeof(Register)), scratch);
emitPutJITStubArg(scratch, argumentNumber);
@@ -142,22 +142,6 @@ ALWAYS_INLINE void JIT::emitPutJITStubArgFromVirtualRegister(unsigned src, unsig
killLastResultRegister();
}
-ALWAYS_INLINE void JIT::emitPutCTIParam(void* value, unsigned name)
-{
- poke(ImmPtr(value), name);
-}
-
-ALWAYS_INLINE void JIT::emitPutCTIParam(RegisterID from, unsigned name)
-{
- poke(from, name);
-}
-
-ALWAYS_INLINE void JIT::emitGetCTIParam(unsigned name, RegisterID to)
-{
- peek(to, name);
- killLastResultRegister();
-}
-
ALWAYS_INLINE void JIT::emitPutToCallFrameHeader(RegisterID from, RegisterFile::CallFrameHeaderEntry entry)
{
storePtr(from, Address(callFrameRegister, entry * sizeof(Register)));
@@ -168,9 +152,15 @@ ALWAYS_INLINE void JIT::emitPutImmediateToCallFrameHeader(void* value, RegisterF
storePtr(ImmPtr(value), Address(callFrameRegister, entry * sizeof(Register)));
}
-ALWAYS_INLINE void JIT::emitGetFromCallFrameHeader(RegisterFile::CallFrameHeaderEntry entry, RegisterID to)
+ALWAYS_INLINE void JIT::emitGetFromCallFrameHeaderPtr(RegisterFile::CallFrameHeaderEntry entry, RegisterID to, RegisterID from)
{
- loadPtr(Address(callFrameRegister, entry * sizeof(Register)), to);
+ loadPtr(Address(from, entry * sizeof(Register)), to);
+ killLastResultRegister();
+}
+
+ALWAYS_INLINE void JIT::emitGetFromCallFrameHeader32(RegisterFile::CallFrameHeaderEntry entry, RegisterID to, RegisterID from)
+{
+ load32(Address(from, entry * sizeof(Register)), to);
killLastResultRegister();
}
@@ -183,65 +173,78 @@ ALWAYS_INLINE void JIT::emitPutVirtualRegister(unsigned dst, RegisterID from)
ALWAYS_INLINE void JIT::emitInitRegister(unsigned dst)
{
- storePtr(ImmPtr(JSValuePtr::encode(jsUndefined())), Address(callFrameRegister, dst * sizeof(Register)));
+ storePtr(ImmPtr(JSValue::encode(jsUndefined())), Address(callFrameRegister, dst * sizeof(Register)));
// FIXME: #ifndef NDEBUG, Write the correct m_type to the register.
}
-ALWAYS_INLINE JIT::Call JIT::emitNakedCall(void* function)
+ALWAYS_INLINE JIT::Call JIT::emitNakedCall(CodePtr function)
{
ASSERT(m_bytecodeIndex != (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeIndex is set.
Call nakedCall = nearCall();
- m_calls.append(CallRecord(nakedCall, m_bytecodeIndex, function));
+ m_calls.append(CallRecord(nakedCall, m_bytecodeIndex, function.executableAddress()));
return nakedCall;
}
-#if USE(JIT_STUB_ARGUMENT_REGISTER)
-ALWAYS_INLINE void JIT::restoreArgumentReference()
+#if PLATFORM(X86) || PLATFORM(X86_64)
+
+ALWAYS_INLINE void JIT::preverveReturnAddressAfterCall(RegisterID reg)
{
- move(stackPointerRegister, firstArgumentRegister);
- emitPutCTIParam(callFrameRegister, STUB_ARGS_callFrame);
+ pop(reg);
}
-ALWAYS_INLINE void JIT::restoreArgumentReferenceForTrampoline()
+
+ALWAYS_INLINE void JIT::restoreReturnAddressBeforeReturn(RegisterID reg)
{
- // In the trampoline on x86-64, the first argument register is not overwritten.
-#if !PLATFORM(X86_64)
- move(stackPointerRegister, firstArgumentRegister);
- addPtr(Imm32(sizeof(void*)), firstArgumentRegister);
-#endif
+ push(reg);
}
-#elif USE(JIT_STUB_ARGUMENT_STACK)
-ALWAYS_INLINE void JIT::restoreArgumentReference()
+
+ALWAYS_INLINE void JIT::restoreReturnAddressBeforeReturn(Address address)
{
- poke(stackPointerRegister);
- emitPutCTIParam(callFrameRegister, STUB_ARGS_callFrame);
+ push(address);
}
-ALWAYS_INLINE void JIT::restoreArgumentReferenceForTrampoline() {}
-#else // JIT_STUB_ARGUMENT_VA_LIST
-ALWAYS_INLINE void JIT::restoreArgumentReference()
+
+#elif PLATFORM(ARM_V7)
+
+ALWAYS_INLINE void JIT::preverveReturnAddressAfterCall(RegisterID reg)
{
- emitPutCTIParam(callFrameRegister, STUB_ARGS_callFrame);
+ move(linkRegister, reg);
}
-ALWAYS_INLINE void JIT::restoreArgumentReferenceForTrampoline() {}
-#endif
-ALWAYS_INLINE JIT::Call JIT::emitCTICall_internal(void* helper)
+ALWAYS_INLINE void JIT::restoreReturnAddressBeforeReturn(RegisterID reg)
{
- ASSERT(m_bytecodeIndex != (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeIndex is set.
+ move(reg, linkRegister);
+}
+
+ALWAYS_INLINE void JIT::restoreReturnAddressBeforeReturn(Address address)
+{
+ loadPtr(address, linkRegister);
+}
-#if ENABLE(OPCODE_SAMPLING)
- sampleInstruction(m_codeBlock->instructions().begin() + m_bytecodeIndex, true);
-#endif
- restoreArgumentReference();
- Call ctiCall = call();
- m_calls.append(CallRecord(ctiCall, m_bytecodeIndex, helper));
-#if ENABLE(OPCODE_SAMPLING)
- sampleInstruction(m_codeBlock->instructions().begin() + m_bytecodeIndex, false);
#endif
- killLastResultRegister();
- return ctiCall;
+#if USE(JIT_STUB_ARGUMENT_VA_LIST)
+ALWAYS_INLINE void JIT::restoreArgumentReference()
+{
+ poke(callFrameRegister, offsetof(struct JITStackFrame, callFrame) / sizeof (void*));
}
+ALWAYS_INLINE void JIT::restoreArgumentReferenceForTrampoline() {}
+#else
+ALWAYS_INLINE void JIT::restoreArgumentReference()
+{
+ move(stackPointerRegister, firstArgumentRegister);
+ poke(callFrameRegister, offsetof(struct JITStackFrame, callFrame) / sizeof (void*));
+}
+ALWAYS_INLINE void JIT::restoreArgumentReferenceForTrampoline()
+{
+#if PLATFORM(X86)
+ // Within a trampoline the return address will be on the stack at this point.
+ addPtr(Imm32(sizeof(void*)), stackPointerRegister, firstArgumentRegister);
+#elif PLATFORM(ARM_V7)
+ move(stackPointerRegister, firstArgumentRegister);
+#endif
+ // In the trampoline on x86-64, the first argument register is not overwritten.
+}
+#endif
ALWAYS_INLINE JIT::Jump JIT::checkStructure(RegisterID reg, Structure* structure)
{
@@ -414,6 +417,66 @@ ALWAYS_INLINE void JIT::emitJumpSlowToHot(Jump jump, int relativeOffset)
jump.linkTo(m_labels[m_bytecodeIndex + relativeOffset], this);
}
+#if ENABLE(SAMPLING_FLAGS)
+ALWAYS_INLINE void JIT::setSamplingFlag(int32_t flag)
+{
+ ASSERT(flag >= 1);
+ ASSERT(flag <= 32);
+ or32(Imm32(1u << (flag - 1)), AbsoluteAddress(&SamplingFlags::s_flags));
+}
+
+ALWAYS_INLINE void JIT::clearSamplingFlag(int32_t flag)
+{
+ ASSERT(flag >= 1);
+ ASSERT(flag <= 32);
+ and32(Imm32(~(1u << (flag - 1))), AbsoluteAddress(&SamplingFlags::s_flags));
+}
+#endif
+
+#if ENABLE(SAMPLING_COUNTERS)
+ALWAYS_INLINE void JIT::emitCount(AbstractSamplingCounter& counter, uint32_t count)
+{
+#if PLATFORM(X86_64) // Or any other 64-bit plattform.
+ addPtr(Imm32(count), AbsoluteAddress(&counter.m_counter));
+#elif PLATFORM(X86) // Or any other little-endian 32-bit plattform.
+ intptr_t hiWord = reinterpret_cast<intptr_t>(&counter.m_counter) + sizeof(int32_t);
+ add32(Imm32(count), AbsoluteAddress(&counter.m_counter));
+ addWithCarry32(Imm32(0), AbsoluteAddress(reinterpret_cast<void*>(hiWord)));
+#else
+#error "SAMPLING_FLAGS not implemented on this platform."
+#endif
+}
+#endif
+
+#if ENABLE(OPCODE_SAMPLING)
+#if PLATFORM(X86_64)
+ALWAYS_INLINE void JIT::sampleInstruction(Instruction* instruction, bool inHostFunction)
+{
+ move(ImmPtr(m_interpreter->sampler()->sampleSlot()), X86::ecx);
+ storePtr(ImmPtr(m_interpreter->sampler()->encodeSample(instruction, inHostFunction)), X86::ecx);
+}
+#else
+ALWAYS_INLINE void JIT::sampleInstruction(Instruction* instruction, bool inHostFunction)
+{
+ storePtr(ImmPtr(m_interpreter->sampler()->encodeSample(instruction, inHostFunction)), m_interpreter->sampler()->sampleSlot());
+}
+#endif
+#endif
+
+#if ENABLE(CODEBLOCK_SAMPLING)
+#if PLATFORM(X86_64)
+ALWAYS_INLINE void JIT::sampleCodeBlock(CodeBlock* codeBlock)
+{
+ move(ImmPtr(m_interpreter->sampler()->codeBlockSlot()), X86::ecx);
+ storePtr(ImmPtr(codeBlock), X86::ecx);
+}
+#else
+ALWAYS_INLINE void JIT::sampleCodeBlock(CodeBlock* codeBlock)
+{
+ storePtr(ImmPtr(codeBlock), m_interpreter->sampler()->codeBlockSlot());
+}
+#endif
+#endif
}
#endif // ENABLE(JIT)
diff --git a/JavaScriptCore/jit/JITOpcodes.cpp b/JavaScriptCore/jit/JITOpcodes.cpp
new file mode 100644
index 0000000..1737551
--- /dev/null
+++ b/JavaScriptCore/jit/JITOpcodes.cpp
@@ -0,0 +1,1183 @@
+/*
+ * Copyright (C) 2009 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "JIT.h"
+
+#if ENABLE(JIT)
+
+#include "JITInlineMethods.h"
+#include "JITStubCall.h"
+#include "JSArray.h"
+#include "JSCell.h"
+
+namespace JSC {
+
+#define RECORD_JUMP_TARGET(targetOffset) \
+ do { m_labels[m_bytecodeIndex + (targetOffset)].used(); } while (false)
+
+void JIT::emit_op_mov(Instruction* currentInstruction)
+{
+ int dst = currentInstruction[1].u.operand;
+ int src = currentInstruction[2].u.operand;
+
+ if (m_codeBlock->isConstantRegisterIndex(src)) {
+ storePtr(ImmPtr(JSValue::encode(getConstantOperand(src))), Address(callFrameRegister, dst * sizeof(Register)));
+ if (dst == m_lastResultBytecodeRegister)
+ killLastResultRegister();
+ } else if ((src == m_lastResultBytecodeRegister) || (dst == m_lastResultBytecodeRegister)) {
+ // If either the src or dst is the cached register go though
+ // get/put registers to make sure we track this correctly.
+ emitGetVirtualRegister(src, regT0);
+ emitPutVirtualRegister(dst);
+ } else {
+ // Perform the copy via regT1; do not disturb any mapping in regT0.
+ loadPtr(Address(callFrameRegister, src * sizeof(Register)), regT1);
+ storePtr(regT1, Address(callFrameRegister, dst * sizeof(Register)));
+ }
+}
+
+void JIT::emit_op_end(Instruction* currentInstruction)
+{
+ if (m_codeBlock->needsFullScopeChain())
+ JITStubCall(this, JITStubs::cti_op_end).call();
+ ASSERT(returnValueRegister != callFrameRegister);
+ emitGetVirtualRegister(currentInstruction[1].u.operand, returnValueRegister);
+ restoreReturnAddressBeforeReturn(Address(callFrameRegister, RegisterFile::ReturnPC * static_cast<int>(sizeof(Register))));
+ ret();
+}
+
+void JIT::emit_op_jmp(Instruction* currentInstruction)
+{
+ unsigned target = currentInstruction[1].u.operand;
+ addJump(jump(), target + 1);
+ RECORD_JUMP_TARGET(target + 1);
+}
+
+void JIT::emit_op_loop(Instruction* currentInstruction)
+{
+ emitTimeoutCheck();
+
+ unsigned target = currentInstruction[1].u.operand;
+ addJump(jump(), target + 1);
+}
+
+void JIT::emit_op_loop_if_less(Instruction* currentInstruction)
+{
+ emitTimeoutCheck();
+
+ unsigned op1 = currentInstruction[1].u.operand;
+ unsigned op2 = currentInstruction[2].u.operand;
+ unsigned target = currentInstruction[3].u.operand;
+ if (isOperandConstantImmediateInt(op2)) {
+ emitGetVirtualRegister(op1, regT0);
+ emitJumpSlowCaseIfNotImmediateInteger(regT0);
+#if USE(ALTERNATE_JSIMMEDIATE)
+ int32_t op2imm = getConstantOperandImmediateInt(op2);
+#else
+ int32_t op2imm = static_cast<int32_t>(JSImmediate::rawValue(getConstantOperand(op2)));
+#endif
+ addJump(branch32(LessThan, regT0, Imm32(op2imm)), target + 3);
+ } else if (isOperandConstantImmediateInt(op1)) {
+ emitGetVirtualRegister(op2, regT0);
+ emitJumpSlowCaseIfNotImmediateInteger(regT0);
+#if USE(ALTERNATE_JSIMMEDIATE)
+ int32_t op1imm = getConstantOperandImmediateInt(op1);
+#else
+ int32_t op1imm = static_cast<int32_t>(JSImmediate::rawValue(getConstantOperand(op1)));
+#endif
+ addJump(branch32(GreaterThan, regT0, Imm32(op1imm)), target + 3);
+ } else {
+ emitGetVirtualRegisters(op1, regT0, op2, regT1);
+ emitJumpSlowCaseIfNotImmediateInteger(regT0);
+ emitJumpSlowCaseIfNotImmediateInteger(regT1);
+ addJump(branch32(LessThan, regT0, regT1), target + 3);
+ }
+}
+
+void JIT::emit_op_loop_if_lesseq(Instruction* currentInstruction)
+{
+ emitTimeoutCheck();
+
+ unsigned op1 = currentInstruction[1].u.operand;
+ unsigned op2 = currentInstruction[2].u.operand;
+ unsigned target = currentInstruction[3].u.operand;
+ if (isOperandConstantImmediateInt(op2)) {
+ emitGetVirtualRegister(op1, regT0);
+ emitJumpSlowCaseIfNotImmediateInteger(regT0);
+#if USE(ALTERNATE_JSIMMEDIATE)
+ int32_t op2imm = getConstantOperandImmediateInt(op2);
+#else
+ int32_t op2imm = static_cast<int32_t>(JSImmediate::rawValue(getConstantOperand(op2)));
+#endif
+ addJump(branch32(LessThanOrEqual, regT0, Imm32(op2imm)), target + 3);
+ } else {
+ emitGetVirtualRegisters(op1, regT0, op2, regT1);
+ emitJumpSlowCaseIfNotImmediateInteger(regT0);
+ emitJumpSlowCaseIfNotImmediateInteger(regT1);
+ addJump(branch32(LessThanOrEqual, regT0, regT1), target + 3);
+ }
+}
+
+void JIT::emit_op_new_object(Instruction* currentInstruction)
+{
+ JITStubCall(this, JITStubs::cti_op_new_object).call(currentInstruction[1].u.operand);
+}
+
+void JIT::emit_op_instanceof(Instruction* currentInstruction)
+{
+ // Load the operands (baseVal, proto, and value respectively) into registers.
+ // We use regT0 for baseVal since we will be done with this first, and we can then use it for the result.
+ emitGetVirtualRegister(currentInstruction[3].u.operand, regT0);
+ emitGetVirtualRegister(currentInstruction[4].u.operand, regT1);
+ emitGetVirtualRegister(currentInstruction[2].u.operand, regT2);
+
+ // Check that baseVal & proto are cells.
+ emitJumpSlowCaseIfNotJSCell(regT0);
+ emitJumpSlowCaseIfNotJSCell(regT1);
+
+ // Check that baseVal is an object, that it 'ImplementsHasInstance' but that it does not 'OverridesHasInstance'.
+ loadPtr(Address(regT0, FIELD_OFFSET(JSCell, m_structure)), regT0);
+ addSlowCase(branch32(NotEqual, Address(regT0, FIELD_OFFSET(Structure, m_typeInfo.m_type)), Imm32(ObjectType)));
+ addSlowCase(branchTest32(Zero, Address(regT0, FIELD_OFFSET(Structure, m_typeInfo.m_flags)), Imm32(ImplementsDefaultHasInstance)));
+
+ // If value is not an Object, return false.
+ Jump valueIsImmediate = emitJumpIfNotJSCell(regT2);
+ loadPtr(Address(regT2, FIELD_OFFSET(JSCell, m_structure)), regT0);
+ Jump valueIsNotObject = branch32(NotEqual, Address(regT0, FIELD_OFFSET(Structure, m_typeInfo.m_type)), Imm32(ObjectType));
+
+ // Check proto is object.
+ loadPtr(Address(regT1, FIELD_OFFSET(JSCell, m_structure)), regT0);
+ addSlowCase(branch32(NotEqual, Address(regT0, FIELD_OFFSET(Structure, m_typeInfo.m_type)), Imm32(ObjectType)));
+
+ // Optimistically load the result true, and start looping.
+ // Initially, regT1 still contains proto and regT2 still contains value.
+ // As we loop regT2 will be updated with its prototype, recursively walking the prototype chain.
+ move(ImmPtr(JSValue::encode(jsBoolean(true))), regT0);
+ Label loop(this);
+
+ // Load the prototype of the object in regT2. If this is equal to regT1 - WIN!
+ // Otherwise, check if we've hit null - if we have then drop out of the loop, if not go again.
+ loadPtr(Address(regT2, FIELD_OFFSET(JSCell, m_structure)), regT2);
+ loadPtr(Address(regT2, FIELD_OFFSET(Structure, m_prototype)), regT2);
+ Jump isInstance = branchPtr(Equal, regT2, regT1);
+ branchPtr(NotEqual, regT2, ImmPtr(JSValue::encode(jsNull())), loop);
+
+ // We get here either by dropping out of the loop, or if value was not an Object. Result is false.
+ valueIsImmediate.link(this);
+ valueIsNotObject.link(this);
+ move(ImmPtr(JSValue::encode(jsBoolean(false))), regT0);
+
+ // isInstance jumps right down to here, to skip setting the result to false (it has already set true).
+ isInstance.link(this);
+ emitPutVirtualRegister(currentInstruction[1].u.operand);
+}
+
+void JIT::emit_op_new_func(Instruction* currentInstruction)
+{
+ JITStubCall stubCall(this, JITStubs::cti_op_new_func);
+ stubCall.addArgument(ImmPtr(m_codeBlock->function(currentInstruction[2].u.operand)));
+ stubCall.call(currentInstruction[1].u.operand);
+}
+
+void JIT::emit_op_call(Instruction* currentInstruction)
+{
+ compileOpCall(op_call, currentInstruction, m_callLinkInfoIndex++);
+}
+
+void JIT::emit_op_call_eval(Instruction* currentInstruction)
+{
+ compileOpCall(op_call_eval, currentInstruction, m_callLinkInfoIndex++);
+}
+
+void JIT::emit_op_load_varargs(Instruction* currentInstruction)
+{
+ JITStubCall stubCall(this, JITStubs::cti_op_load_varargs);
+ stubCall.addArgument(Imm32(currentInstruction[2].u.operand));
+ stubCall.call(currentInstruction[1].u.operand);
+}
+
+void JIT::emit_op_call_varargs(Instruction* currentInstruction)
+{
+ compileOpCallVarargs(currentInstruction);
+}
+
+void JIT::emit_op_construct(Instruction* currentInstruction)
+{
+ compileOpCall(op_construct, currentInstruction, m_callLinkInfoIndex++);
+}
+
+void JIT::emit_op_get_global_var(Instruction* currentInstruction)
+{
+ JSVariableObject* globalObject = static_cast<JSVariableObject*>(currentInstruction[2].u.jsCell);
+ move(ImmPtr(globalObject), regT0);
+ emitGetVariableObjectRegister(regT0, currentInstruction[3].u.operand, regT0);
+ emitPutVirtualRegister(currentInstruction[1].u.operand);
+}
+
+void JIT::emit_op_put_global_var(Instruction* currentInstruction)
+{
+ emitGetVirtualRegister(currentInstruction[3].u.operand, regT1);
+ JSVariableObject* globalObject = static_cast<JSVariableObject*>(currentInstruction[1].u.jsCell);
+ move(ImmPtr(globalObject), regT0);
+ emitPutVariableObjectRegister(regT1, regT0, currentInstruction[2].u.operand);
+}
+
+void JIT::emit_op_get_scoped_var(Instruction* currentInstruction)
+{
+ int skip = currentInstruction[3].u.operand + m_codeBlock->needsFullScopeChain();
+
+ emitGetFromCallFrameHeaderPtr(RegisterFile::ScopeChain, regT0);
+ while (skip--)
+ loadPtr(Address(regT0, FIELD_OFFSET(ScopeChainNode, next)), regT0);
+
+ loadPtr(Address(regT0, FIELD_OFFSET(ScopeChainNode, object)), regT0);
+ emitGetVariableObjectRegister(regT0, currentInstruction[2].u.operand, regT0);
+ emitPutVirtualRegister(currentInstruction[1].u.operand);
+}
+
+void JIT::emit_op_put_scoped_var(Instruction* currentInstruction)
+{
+ int skip = currentInstruction[2].u.operand + m_codeBlock->needsFullScopeChain();
+
+ emitGetFromCallFrameHeaderPtr(RegisterFile::ScopeChain, regT1);
+ emitGetVirtualRegister(currentInstruction[3].u.operand, regT0);
+ while (skip--)
+ loadPtr(Address(regT1, FIELD_OFFSET(ScopeChainNode, next)), regT1);
+
+ loadPtr(Address(regT1, FIELD_OFFSET(ScopeChainNode, object)), regT1);
+ emitPutVariableObjectRegister(regT0, regT1, currentInstruction[1].u.operand);
+}
+
+void JIT::emit_op_tear_off_activation(Instruction* currentInstruction)
+{
+ JITStubCall stubCall(this, JITStubs::cti_op_tear_off_activation);
+ stubCall.addArgument(currentInstruction[1].u.operand, regT2);
+ stubCall.call();
+}
+
+void JIT::emit_op_tear_off_arguments(Instruction*)
+{
+ JITStubCall(this, JITStubs::cti_op_tear_off_arguments).call();
+}
+
+void JIT::emit_op_ret(Instruction* currentInstruction)
+{
+ // We could JIT generate the deref, only calling out to C when the refcount hits zero.
+ if (m_codeBlock->needsFullScopeChain())
+ JITStubCall(this, JITStubs::cti_op_ret_scopeChain).call();
+
+ ASSERT(callFrameRegister != regT1);
+ ASSERT(regT1 != returnValueRegister);
+ ASSERT(returnValueRegister != callFrameRegister);
+
+ // Return the result in %eax.
+ emitGetVirtualRegister(currentInstruction[1].u.operand, returnValueRegister);
+
+ // Grab the return address.
+ emitGetFromCallFrameHeaderPtr(RegisterFile::ReturnPC, regT1);
+
+ // Restore our caller's "r".
+ emitGetFromCallFrameHeaderPtr(RegisterFile::CallerFrame, callFrameRegister);
+
+ // Return.
+ restoreReturnAddressBeforeReturn(regT1);
+ ret();
+}
+
+void JIT::emit_op_new_array(Instruction* currentInstruction)
+{
+ JITStubCall stubCall(this, JITStubs::cti_op_new_array);
+ stubCall.addArgument(Imm32(currentInstruction[2].u.operand));
+ stubCall.addArgument(Imm32(currentInstruction[3].u.operand));
+ stubCall.call(currentInstruction[1].u.operand);
+}
+
+void JIT::emit_op_resolve(Instruction* currentInstruction)
+{
+ JITStubCall stubCall(this, JITStubs::cti_op_resolve);
+ stubCall.addArgument(ImmPtr(&m_codeBlock->identifier(currentInstruction[2].u.operand)));
+ stubCall.call(currentInstruction[1].u.operand);
+}
+
+void JIT::emit_op_construct_verify(Instruction* currentInstruction)
+{
+ emitGetVirtualRegister(currentInstruction[1].u.operand, regT0);
+
+ emitJumpSlowCaseIfNotJSCell(regT0);
+ loadPtr(Address(regT0, FIELD_OFFSET(JSCell, m_structure)), regT2);
+ addSlowCase(branch32(NotEqual, Address(regT2, FIELD_OFFSET(Structure, m_typeInfo) + FIELD_OFFSET(TypeInfo, m_type)), Imm32(ObjectType)));
+
+}
+
+void JIT::emit_op_to_primitive(Instruction* currentInstruction)
+{
+ int dst = currentInstruction[1].u.operand;
+ int src = currentInstruction[2].u.operand;
+
+ emitGetVirtualRegister(src, regT0);
+
+ Jump isImm = emitJumpIfNotJSCell(regT0);
+ addSlowCase(branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsStringVPtr)));
+ isImm.link(this);
+
+ if (dst != src)
+ emitPutVirtualRegister(dst);
+
+}
+
+void JIT::emit_op_strcat(Instruction* currentInstruction)
+{
+ JITStubCall stubCall(this, JITStubs::cti_op_strcat);
+ stubCall.addArgument(Imm32(currentInstruction[2].u.operand));
+ stubCall.addArgument(Imm32(currentInstruction[3].u.operand));
+ stubCall.call(currentInstruction[1].u.operand);
+}
+
+void JIT::emit_op_resolve_func(Instruction* currentInstruction)
+{
+ JITStubCall stubCall(this, JITStubs::cti_op_resolve_func);
+ stubCall.addArgument(ImmPtr(&m_codeBlock->identifier(currentInstruction[3].u.operand)));
+ stubCall.addArgument(Imm32(currentInstruction[1].u.operand));
+ stubCall.call(currentInstruction[2].u.operand);
+}
+
+void JIT::emit_op_loop_if_true(Instruction* currentInstruction)
+{
+ emitTimeoutCheck();
+
+ unsigned target = currentInstruction[2].u.operand;
+ emitGetVirtualRegister(currentInstruction[1].u.operand, regT0);
+
+ Jump isZero = branchPtr(Equal, regT0, ImmPtr(JSValue::encode(jsNumber(m_globalData, 0))));
+ addJump(emitJumpIfImmediateInteger(regT0), target + 2);
+
+ addJump(branchPtr(Equal, regT0, ImmPtr(JSValue::encode(jsBoolean(true)))), target + 2);
+ addSlowCase(branchPtr(NotEqual, regT0, ImmPtr(JSValue::encode(jsBoolean(false)))));
+
+ isZero.link(this);
+};
+void JIT::emit_op_resolve_base(Instruction* currentInstruction)
+{
+ JITStubCall stubCall(this, JITStubs::cti_op_resolve_base);
+ stubCall.addArgument(ImmPtr(&m_codeBlock->identifier(currentInstruction[2].u.operand)));
+ stubCall.call(currentInstruction[1].u.operand);
+}
+
+void JIT::emit_op_resolve_skip(Instruction* currentInstruction)
+{
+ JITStubCall stubCall(this, JITStubs::cti_op_resolve_skip);
+ stubCall.addArgument(ImmPtr(&m_codeBlock->identifier(currentInstruction[2].u.operand)));
+ stubCall.addArgument(Imm32(currentInstruction[3].u.operand + m_codeBlock->needsFullScopeChain()));
+ stubCall.call(currentInstruction[1].u.operand);
+}
+
+void JIT::emit_op_resolve_global(Instruction* currentInstruction)
+{
+ // Fast case
+ void* globalObject = currentInstruction[2].u.jsCell;
+ Identifier* ident = &m_codeBlock->identifier(currentInstruction[3].u.operand);
+
+ unsigned currentIndex = m_globalResolveInfoIndex++;
+ void* structureAddress = &(m_codeBlock->globalResolveInfo(currentIndex).structure);
+ void* offsetAddr = &(m_codeBlock->globalResolveInfo(currentIndex).offset);
+
+ // Check Structure of global object
+ move(ImmPtr(globalObject), regT0);
+ loadPtr(structureAddress, regT1);
+ Jump noMatch = branchPtr(NotEqual, regT1, Address(regT0, FIELD_OFFSET(JSCell, m_structure))); // Structures don't match
+
+ // Load cached property
+ // Assume that the global object always uses external storage.
+ loadPtr(Address(regT0, FIELD_OFFSET(JSGlobalObject, m_externalStorage)), regT0);
+ load32(offsetAddr, regT1);
+ loadPtr(BaseIndex(regT0, regT1, ScalePtr), regT0);
+ emitPutVirtualRegister(currentInstruction[1].u.operand);
+ Jump end = jump();
+
+ // Slow case
+ noMatch.link(this);
+ JITStubCall stubCall(this, JITStubs::cti_op_resolve_global);
+ stubCall.addArgument(ImmPtr(globalObject));
+ stubCall.addArgument(ImmPtr(ident));
+ stubCall.addArgument(Imm32(currentIndex));
+ stubCall.call(currentInstruction[1].u.operand);
+ end.link(this);
+}
+
+void JIT::emit_op_not(Instruction* currentInstruction)
+{
+ emitGetVirtualRegister(currentInstruction[2].u.operand, regT0);
+ xorPtr(Imm32(static_cast<int32_t>(JSImmediate::FullTagTypeBool)), regT0);
+ addSlowCase(branchTestPtr(NonZero, regT0, Imm32(static_cast<int32_t>(~JSImmediate::ExtendedPayloadBitBoolValue))));
+ xorPtr(Imm32(static_cast<int32_t>(JSImmediate::FullTagTypeBool | JSImmediate::ExtendedPayloadBitBoolValue)), regT0);
+ emitPutVirtualRegister(currentInstruction[1].u.operand);
+}
+
+void JIT::emit_op_jfalse(Instruction* currentInstruction)
+{
+ unsigned target = currentInstruction[2].u.operand;
+ emitGetVirtualRegister(currentInstruction[1].u.operand, regT0);
+
+ addJump(branchPtr(Equal, regT0, ImmPtr(JSValue::encode(jsNumber(m_globalData, 0)))), target + 2);
+ Jump isNonZero = emitJumpIfImmediateInteger(regT0);
+
+ addJump(branchPtr(Equal, regT0, ImmPtr(JSValue::encode(jsBoolean(false)))), target + 2);
+ addSlowCase(branchPtr(NotEqual, regT0, ImmPtr(JSValue::encode(jsBoolean(true)))));
+
+ isNonZero.link(this);
+ RECORD_JUMP_TARGET(target + 2);
+};
+void JIT::emit_op_jeq_null(Instruction* currentInstruction)
+{
+ unsigned src = currentInstruction[1].u.operand;
+ unsigned target = currentInstruction[2].u.operand;
+
+ emitGetVirtualRegister(src, regT0);
+ Jump isImmediate = emitJumpIfNotJSCell(regT0);
+
+ // First, handle JSCell cases - check MasqueradesAsUndefined bit on the structure.
+ loadPtr(Address(regT0, FIELD_OFFSET(JSCell, m_structure)), regT2);
+ addJump(branchTest32(NonZero, Address(regT2, FIELD_OFFSET(Structure, m_typeInfo.m_flags)), Imm32(MasqueradesAsUndefined)), target + 2);
+ Jump wasNotImmediate = jump();
+
+ // Now handle the immediate cases - undefined & null
+ isImmediate.link(this);
+ andPtr(Imm32(~JSImmediate::ExtendedTagBitUndefined), regT0);
+ addJump(branchPtr(Equal, regT0, ImmPtr(JSValue::encode(jsNull()))), target + 2);
+
+ wasNotImmediate.link(this);
+ RECORD_JUMP_TARGET(target + 2);
+};
+void JIT::emit_op_jneq_null(Instruction* currentInstruction)
+{
+ unsigned src = currentInstruction[1].u.operand;
+ unsigned target = currentInstruction[2].u.operand;
+
+ emitGetVirtualRegister(src, regT0);
+ Jump isImmediate = emitJumpIfNotJSCell(regT0);
+
+ // First, handle JSCell cases - check MasqueradesAsUndefined bit on the structure.
+ loadPtr(Address(regT0, FIELD_OFFSET(JSCell, m_structure)), regT2);
+ addJump(branchTest32(Zero, Address(regT2, FIELD_OFFSET(Structure, m_typeInfo.m_flags)), Imm32(MasqueradesAsUndefined)), target + 2);
+ Jump wasNotImmediate = jump();
+
+ // Now handle the immediate cases - undefined & null
+ isImmediate.link(this);
+ andPtr(Imm32(~JSImmediate::ExtendedTagBitUndefined), regT0);
+ addJump(branchPtr(NotEqual, regT0, ImmPtr(JSValue::encode(jsNull()))), target + 2);
+
+ wasNotImmediate.link(this);
+ RECORD_JUMP_TARGET(target + 2);
+}
+
+void JIT::emit_op_jneq_ptr(Instruction* currentInstruction)
+{
+ unsigned src = currentInstruction[1].u.operand;
+ JSCell* ptr = currentInstruction[2].u.jsCell;
+ unsigned target = currentInstruction[3].u.operand;
+
+ emitGetVirtualRegister(src, regT0);
+ addJump(branchPtr(NotEqual, regT0, ImmPtr(JSValue::encode(JSValue(ptr)))), target + 3);
+
+ RECORD_JUMP_TARGET(target + 3);
+}
+
+void JIT::emit_op_unexpected_load(Instruction* currentInstruction)
+{
+ JSValue v = m_codeBlock->unexpectedConstant(currentInstruction[2].u.operand);
+ move(ImmPtr(JSValue::encode(v)), regT0);
+ emitPutVirtualRegister(currentInstruction[1].u.operand);
+}
+
+void JIT::emit_op_jsr(Instruction* currentInstruction)
+{
+ int retAddrDst = currentInstruction[1].u.operand;
+ int target = currentInstruction[2].u.operand;
+ DataLabelPtr storeLocation = storePtrWithPatch(ImmPtr(0), Address(callFrameRegister, sizeof(Register) * retAddrDst));
+ addJump(jump(), target + 2);
+ m_jsrSites.append(JSRInfo(storeLocation, label()));
+ killLastResultRegister();
+ RECORD_JUMP_TARGET(target + 2);
+}
+
+void JIT::emit_op_sret(Instruction* currentInstruction)
+{
+ jump(Address(callFrameRegister, sizeof(Register) * currentInstruction[1].u.operand));
+ killLastResultRegister();
+}
+
+void JIT::emit_op_eq(Instruction* currentInstruction)
+{
+ emitGetVirtualRegisters(currentInstruction[2].u.operand, regT0, currentInstruction[3].u.operand, regT1);
+ emitJumpSlowCaseIfNotImmediateIntegers(regT0, regT1, regT2);
+ set32(Equal, regT1, regT0, regT0);
+ emitTagAsBoolImmediate(regT0);
+ emitPutVirtualRegister(currentInstruction[1].u.operand);
+}
+
+void JIT::emit_op_bitnot(Instruction* currentInstruction)
+{
+ emitGetVirtualRegister(currentInstruction[2].u.operand, regT0);
+ emitJumpSlowCaseIfNotImmediateInteger(regT0);
+#if USE(ALTERNATE_JSIMMEDIATE)
+ not32(regT0);
+ emitFastArithIntToImmNoCheck(regT0, regT0);
+#else
+ xorPtr(Imm32(~JSImmediate::TagTypeNumber), regT0);
+#endif
+ emitPutVirtualRegister(currentInstruction[1].u.operand);
+}
+
+void JIT::emit_op_resolve_with_base(Instruction* currentInstruction)
+{
+ JITStubCall stubCall(this, JITStubs::cti_op_resolve_with_base);
+ stubCall.addArgument(ImmPtr(&m_codeBlock->identifier(currentInstruction[3].u.operand)));
+ stubCall.addArgument(Imm32(currentInstruction[1].u.operand));
+ stubCall.call(currentInstruction[2].u.operand);
+}
+
+void JIT::emit_op_new_func_exp(Instruction* currentInstruction)
+{
+ JITStubCall stubCall(this, JITStubs::cti_op_new_func_exp);
+ stubCall.addArgument(ImmPtr(m_codeBlock->functionExpression(currentInstruction[2].u.operand)));
+ stubCall.call(currentInstruction[1].u.operand);
+}
+
+void JIT::emit_op_jtrue(Instruction* currentInstruction)
+{
+ unsigned target = currentInstruction[2].u.operand;
+ emitGetVirtualRegister(currentInstruction[1].u.operand, regT0);
+
+ Jump isZero = branchPtr(Equal, regT0, ImmPtr(JSValue::encode(jsNumber(m_globalData, 0))));
+ addJump(emitJumpIfImmediateInteger(regT0), target + 2);
+
+ addJump(branchPtr(Equal, regT0, ImmPtr(JSValue::encode(jsBoolean(true)))), target + 2);
+ addSlowCase(branchPtr(NotEqual, regT0, ImmPtr(JSValue::encode(jsBoolean(false)))));
+
+ isZero.link(this);
+ RECORD_JUMP_TARGET(target + 2);
+}
+
+void JIT::emit_op_neq(Instruction* currentInstruction)
+{
+ emitGetVirtualRegisters(currentInstruction[2].u.operand, regT0, currentInstruction[3].u.operand, regT1);
+ emitJumpSlowCaseIfNotImmediateIntegers(regT0, regT1, regT2);
+ set32(NotEqual, regT1, regT0, regT0);
+ emitTagAsBoolImmediate(regT0);
+
+ emitPutVirtualRegister(currentInstruction[1].u.operand);
+
+}
+
+void JIT::emit_op_bitxor(Instruction* currentInstruction)
+{
+ emitGetVirtualRegisters(currentInstruction[2].u.operand, regT0, currentInstruction[3].u.operand, regT1);
+ emitJumpSlowCaseIfNotImmediateIntegers(regT0, regT1, regT2);
+ xorPtr(regT1, regT0);
+ emitFastArithReTagImmediate(regT0, regT0);
+ emitPutVirtualRegister(currentInstruction[1].u.operand);
+}
+
+void JIT::emit_op_new_regexp(Instruction* currentInstruction)
+{
+ JITStubCall stubCall(this, JITStubs::cti_op_new_regexp);
+ stubCall.addArgument(ImmPtr(m_codeBlock->regexp(currentInstruction[2].u.operand)));
+ stubCall.call(currentInstruction[1].u.operand);
+}
+
+void JIT::emit_op_bitor(Instruction* currentInstruction)
+{
+ emitGetVirtualRegisters(currentInstruction[2].u.operand, regT0, currentInstruction[3].u.operand, regT1);
+ emitJumpSlowCaseIfNotImmediateIntegers(regT0, regT1, regT2);
+ orPtr(regT1, regT0);
+ emitPutVirtualRegister(currentInstruction[1].u.operand);
+}
+
+void JIT::emit_op_throw(Instruction* currentInstruction)
+{
+ JITStubCall stubCall(this, JITStubs::cti_op_throw);
+ stubCall.addArgument(currentInstruction[1].u.operand, regT2);
+ stubCall.call();
+ ASSERT(regT0 == returnValueRegister);
+#ifndef NDEBUG
+ // cti_op_throw always changes it's return address,
+ // this point in the code should never be reached.
+ breakpoint();
+#endif
+}
+
+void JIT::emit_op_next_pname(Instruction* currentInstruction)
+{
+ JITStubCall stubCall(this, JITStubs::cti_op_next_pname);
+ stubCall.addArgument(currentInstruction[2].u.operand, regT2);
+ stubCall.call();
+ Jump endOfIter = branchTestPtr(Zero, regT0);
+ emitPutVirtualRegister(currentInstruction[1].u.operand);
+ addJump(jump(), currentInstruction[3].u.operand + 3);
+ endOfIter.link(this);
+}
+
+void JIT::emit_op_push_scope(Instruction* currentInstruction)
+{
+ JITStubCall stubCall(this, JITStubs::cti_op_push_scope);
+ stubCall.addArgument(currentInstruction[1].u.operand, regT2);
+ stubCall.call(currentInstruction[1].u.operand);
+}
+
+void JIT::emit_op_pop_scope(Instruction*)
+{
+ JITStubCall(this, JITStubs::cti_op_pop_scope).call();
+}
+
+void JIT::emit_op_stricteq(Instruction* currentInstruction)
+{
+ compileOpStrictEq(currentInstruction, OpStrictEq);
+}
+
+void JIT::emit_op_nstricteq(Instruction* currentInstruction)
+{
+ compileOpStrictEq(currentInstruction, OpNStrictEq);
+}
+
+void JIT::emit_op_to_jsnumber(Instruction* currentInstruction)
+{
+ int srcVReg = currentInstruction[2].u.operand;
+ emitGetVirtualRegister(srcVReg, regT0);
+
+ Jump wasImmediate = emitJumpIfImmediateInteger(regT0);
+
+ emitJumpSlowCaseIfNotJSCell(regT0, srcVReg);
+ loadPtr(Address(regT0, FIELD_OFFSET(JSCell, m_structure)), regT2);
+ addSlowCase(branch32(NotEqual, Address(regT2, FIELD_OFFSET(Structure, m_typeInfo.m_type)), Imm32(NumberType)));
+
+ wasImmediate.link(this);
+
+ emitPutVirtualRegister(currentInstruction[1].u.operand);
+}
+
+void JIT::emit_op_push_new_scope(Instruction* currentInstruction)
+{
+ JITStubCall stubCall(this, JITStubs::cti_op_push_new_scope);
+ stubCall.addArgument(ImmPtr(&m_codeBlock->identifier(currentInstruction[2].u.operand)));
+ stubCall.addArgument(currentInstruction[3].u.operand, regT2);
+ stubCall.call(currentInstruction[1].u.operand);
+}
+
+void JIT::emit_op_catch(Instruction* currentInstruction)
+{
+ killLastResultRegister(); // FIXME: Implicitly treat op_catch as a labeled statement, and remove this line of code.
+ peek(callFrameRegister, offsetof(struct JITStackFrame, callFrame) / sizeof (void*));
+ emitPutVirtualRegister(currentInstruction[1].u.operand);
+}
+
+void JIT::emit_op_jmp_scopes(Instruction* currentInstruction)
+{
+ JITStubCall stubCall(this, JITStubs::cti_op_jmp_scopes);
+ stubCall.addArgument(Imm32(currentInstruction[1].u.operand));
+ stubCall.call();
+ addJump(jump(), currentInstruction[2].u.operand + 2);
+ RECORD_JUMP_TARGET(currentInstruction[2].u.operand + 2);
+}
+
+void JIT::emit_op_switch_imm(Instruction* currentInstruction)
+{
+ unsigned tableIndex = currentInstruction[1].u.operand;
+ unsigned defaultOffset = currentInstruction[2].u.operand;
+ unsigned scrutinee = currentInstruction[3].u.operand;
+
+ // create jump table for switch destinations, track this switch statement.
+ SimpleJumpTable* jumpTable = &m_codeBlock->immediateSwitchJumpTable(tableIndex);
+ m_switches.append(SwitchRecord(jumpTable, m_bytecodeIndex, defaultOffset, SwitchRecord::Immediate));
+ jumpTable->ctiOffsets.grow(jumpTable->branchOffsets.size());
+
+ JITStubCall stubCall(this, JITStubs::cti_op_switch_imm);
+ stubCall.addArgument(scrutinee, regT2);
+ stubCall.addArgument(Imm32(tableIndex));
+ stubCall.call();
+ jump(regT0);
+}
+
+void JIT::emit_op_switch_char(Instruction* currentInstruction)
+{
+ unsigned tableIndex = currentInstruction[1].u.operand;
+ unsigned defaultOffset = currentInstruction[2].u.operand;
+ unsigned scrutinee = currentInstruction[3].u.operand;
+
+ // create jump table for switch destinations, track this switch statement.
+ SimpleJumpTable* jumpTable = &m_codeBlock->characterSwitchJumpTable(tableIndex);
+ m_switches.append(SwitchRecord(jumpTable, m_bytecodeIndex, defaultOffset, SwitchRecord::Character));
+ jumpTable->ctiOffsets.grow(jumpTable->branchOffsets.size());
+
+ JITStubCall stubCall(this, JITStubs::cti_op_switch_char);
+ stubCall.addArgument(scrutinee, regT2);
+ stubCall.addArgument(Imm32(tableIndex));
+ stubCall.call();
+ jump(regT0);
+}
+
+void JIT::emit_op_switch_string(Instruction* currentInstruction)
+{
+ unsigned tableIndex = currentInstruction[1].u.operand;
+ unsigned defaultOffset = currentInstruction[2].u.operand;
+ unsigned scrutinee = currentInstruction[3].u.operand;
+
+ // create jump table for switch destinations, track this switch statement.
+ StringJumpTable* jumpTable = &m_codeBlock->stringSwitchJumpTable(tableIndex);
+ m_switches.append(SwitchRecord(jumpTable, m_bytecodeIndex, defaultOffset));
+
+ JITStubCall stubCall(this, JITStubs::cti_op_switch_string);
+ stubCall.addArgument(scrutinee, regT2);
+ stubCall.addArgument(Imm32(tableIndex));
+ stubCall.call();
+ jump(regT0);
+}
+
+void JIT::emit_op_new_error(Instruction* currentInstruction)
+{
+ JITStubCall stubCall(this, JITStubs::cti_op_new_error);
+ stubCall.addArgument(Imm32(currentInstruction[2].u.operand));
+ stubCall.addArgument(ImmPtr(JSValue::encode(m_codeBlock->unexpectedConstant(currentInstruction[3].u.operand))));
+ stubCall.addArgument(Imm32(m_bytecodeIndex));
+ stubCall.call(currentInstruction[1].u.operand);
+}
+
+void JIT::emit_op_debug(Instruction* currentInstruction)
+{
+ JITStubCall stubCall(this, JITStubs::cti_op_debug);
+ stubCall.addArgument(Imm32(currentInstruction[1].u.operand));
+ stubCall.addArgument(Imm32(currentInstruction[2].u.operand));
+ stubCall.addArgument(Imm32(currentInstruction[3].u.operand));
+ stubCall.call();
+}
+
+void JIT::emit_op_eq_null(Instruction* currentInstruction)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned src1 = currentInstruction[2].u.operand;
+
+ emitGetVirtualRegister(src1, regT0);
+ Jump isImmediate = emitJumpIfNotJSCell(regT0);
+
+ loadPtr(Address(regT0, FIELD_OFFSET(JSCell, m_structure)), regT2);
+ setTest32(NonZero, Address(regT2, FIELD_OFFSET(Structure, m_typeInfo.m_flags)), Imm32(MasqueradesAsUndefined), regT0);
+
+ Jump wasNotImmediate = jump();
+
+ isImmediate.link(this);
+
+ andPtr(Imm32(~JSImmediate::ExtendedTagBitUndefined), regT0);
+ setPtr(Equal, regT0, Imm32(JSImmediate::FullTagTypeNull), regT0);
+
+ wasNotImmediate.link(this);
+
+ emitTagAsBoolImmediate(regT0);
+ emitPutVirtualRegister(dst);
+
+}
+
+void JIT::emit_op_neq_null(Instruction* currentInstruction)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned src1 = currentInstruction[2].u.operand;
+
+ emitGetVirtualRegister(src1, regT0);
+ Jump isImmediate = emitJumpIfNotJSCell(regT0);
+
+ loadPtr(Address(regT0, FIELD_OFFSET(JSCell, m_structure)), regT2);
+ setTest32(Zero, Address(regT2, FIELD_OFFSET(Structure, m_typeInfo.m_flags)), Imm32(MasqueradesAsUndefined), regT0);
+
+ Jump wasNotImmediate = jump();
+
+ isImmediate.link(this);
+
+ andPtr(Imm32(~JSImmediate::ExtendedTagBitUndefined), regT0);
+ setPtr(NotEqual, regT0, Imm32(JSImmediate::FullTagTypeNull), regT0);
+
+ wasNotImmediate.link(this);
+
+ emitTagAsBoolImmediate(regT0);
+ emitPutVirtualRegister(dst);
+
+}
+
+void JIT::emit_op_enter(Instruction*)
+{
+ // Even though CTI doesn't use them, we initialize our constant
+ // registers to zap stale pointers, to avoid unnecessarily prolonging
+ // object lifetime and increasing GC pressure.
+ size_t count = m_codeBlock->m_numVars + m_codeBlock->numberOfConstantRegisters();
+ for (size_t j = 0; j < count; ++j)
+ emitInitRegister(j);
+
+}
+
+void JIT::emit_op_enter_with_activation(Instruction* currentInstruction)
+{
+ // Even though CTI doesn't use them, we initialize our constant
+ // registers to zap stale pointers, to avoid unnecessarily prolonging
+ // object lifetime and increasing GC pressure.
+ size_t count = m_codeBlock->m_numVars + m_codeBlock->numberOfConstantRegisters();
+ for (size_t j = 0; j < count; ++j)
+ emitInitRegister(j);
+
+ JITStubCall(this, JITStubs::cti_op_push_activation).call(currentInstruction[1].u.operand);
+}
+
+void JIT::emit_op_create_arguments(Instruction*)
+{
+ Jump argsCreated = branchTestPtr(NonZero, Address(callFrameRegister, sizeof(Register) * RegisterFile::ArgumentsRegister));
+ if (m_codeBlock->m_numParameters == 1)
+ JITStubCall(this, JITStubs::cti_op_create_arguments_no_params).call();
+ else
+ JITStubCall(this, JITStubs::cti_op_create_arguments).call();
+ argsCreated.link(this);
+}
+
+void JIT::emit_op_init_arguments(Instruction*)
+{
+ storePtr(ImmPtr(0), Address(callFrameRegister, sizeof(Register) * RegisterFile::ArgumentsRegister));
+}
+
+void JIT::emit_op_convert_this(Instruction* currentInstruction)
+{
+ emitGetVirtualRegister(currentInstruction[1].u.operand, regT0);
+
+ emitJumpSlowCaseIfNotJSCell(regT0);
+ loadPtr(Address(regT0, FIELD_OFFSET(JSCell, m_structure)), regT1);
+ addSlowCase(branchTest32(NonZero, Address(regT1, FIELD_OFFSET(Structure, m_typeInfo.m_flags)), Imm32(NeedsThisConversion)));
+
+}
+
+void JIT::emit_op_profile_will_call(Instruction* currentInstruction)
+{
+ peek(regT1, FIELD_OFFSET(JITStackFrame, enabledProfilerReference) / sizeof (void*));
+ Jump noProfiler = branchTestPtr(Zero, Address(regT1));
+
+ JITStubCall stubCall(this, JITStubs::cti_op_profile_will_call);
+ stubCall.addArgument(currentInstruction[1].u.operand, regT1);
+ stubCall.call();
+ noProfiler.link(this);
+
+}
+
+void JIT::emit_op_profile_did_call(Instruction* currentInstruction)
+{
+ peek(regT1, FIELD_OFFSET(JITStackFrame, enabledProfilerReference) / sizeof (void*));
+ Jump noProfiler = branchTestPtr(Zero, Address(regT1));
+
+ JITStubCall stubCall(this, JITStubs::cti_op_profile_did_call);
+ stubCall.addArgument(currentInstruction[1].u.operand, regT1);
+ stubCall.call();
+ noProfiler.link(this);
+}
+
+
+// Slow cases
+
+void JIT::emitSlow_op_convert_this(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ linkSlowCase(iter);
+ linkSlowCase(iter);
+ JITStubCall stubCall(this, JITStubs::cti_op_convert_this);
+ stubCall.addArgument(regT0);
+ stubCall.call(currentInstruction[1].u.operand);
+}
+
+void JIT::emitSlow_op_construct_verify(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ linkSlowCase(iter);
+ linkSlowCase(iter);
+ emitGetVirtualRegister(currentInstruction[2].u.operand, regT0);
+ emitPutVirtualRegister(currentInstruction[1].u.operand);
+}
+
+void JIT::emitSlow_op_to_primitive(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ linkSlowCase(iter);
+
+ JITStubCall stubCall(this, JITStubs::cti_op_to_primitive);
+ stubCall.addArgument(regT0);
+ stubCall.call(currentInstruction[1].u.operand);
+}
+
+void JIT::emitSlow_op_get_by_val(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ // The slow void JIT::emitSlow_that handles accesses to arrays (below) may jump back up to here.
+ Label beginGetByValSlow(this);
+
+ Jump notImm = getSlowCase(iter);
+ linkSlowCase(iter);
+ linkSlowCase(iter);
+ emitFastArithIntToImmNoCheck(regT1, regT1);
+
+ notImm.link(this);
+ JITStubCall stubCall(this, JITStubs::cti_op_get_by_val);
+ stubCall.addArgument(regT0);
+ stubCall.addArgument(regT1);
+ stubCall.call(currentInstruction[1].u.operand);
+ emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_get_by_val));
+
+ // This is slow void JIT::emitSlow_that handles accesses to arrays above the fast cut-off.
+ // First, check if this is an access to the vector
+ linkSlowCase(iter);
+ branch32(AboveOrEqual, regT1, Address(regT2, FIELD_OFFSET(ArrayStorage, m_vectorLength)), beginGetByValSlow);
+
+ // okay, missed the fast region, but it is still in the vector. Get the value.
+ loadPtr(BaseIndex(regT2, regT1, ScalePtr, FIELD_OFFSET(ArrayStorage, m_vector[0])), regT2);
+ // Check whether the value loaded is zero; if so we need to return undefined.
+ branchTestPtr(Zero, regT2, beginGetByValSlow);
+ move(regT2, regT0);
+ emitPutVirtualRegister(currentInstruction[1].u.operand, regT0);
+}
+
+void JIT::emitSlow_op_loop_if_less(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned op1 = currentInstruction[1].u.operand;
+ unsigned op2 = currentInstruction[2].u.operand;
+ unsigned target = currentInstruction[3].u.operand;
+ if (isOperandConstantImmediateInt(op2)) {
+ linkSlowCase(iter);
+ JITStubCall stubCall(this, JITStubs::cti_op_loop_if_less);
+ stubCall.addArgument(regT0);
+ stubCall.addArgument(op2, regT2);
+ stubCall.call();
+ emitJumpSlowToHot(branchTest32(NonZero, regT0), target + 3);
+ } else if (isOperandConstantImmediateInt(op1)) {
+ linkSlowCase(iter);
+ JITStubCall stubCall(this, JITStubs::cti_op_loop_if_less);
+ stubCall.addArgument(op1, regT2);
+ stubCall.addArgument(regT0);
+ stubCall.call();
+ emitJumpSlowToHot(branchTest32(NonZero, regT0), target + 3);
+ } else {
+ linkSlowCase(iter);
+ linkSlowCase(iter);
+ JITStubCall stubCall(this, JITStubs::cti_op_loop_if_less);
+ stubCall.addArgument(regT0);
+ stubCall.addArgument(regT1);
+ stubCall.call();
+ emitJumpSlowToHot(branchTest32(NonZero, regT0), target + 3);
+ }
+}
+
+void JIT::emitSlow_op_loop_if_lesseq(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned op2 = currentInstruction[2].u.operand;
+ unsigned target = currentInstruction[3].u.operand;
+ if (isOperandConstantImmediateInt(op2)) {
+ linkSlowCase(iter);
+ JITStubCall stubCall(this, JITStubs::cti_op_loop_if_lesseq);
+ stubCall.addArgument(regT0);
+ stubCall.addArgument(currentInstruction[2].u.operand, regT2);
+ stubCall.call();
+ emitJumpSlowToHot(branchTest32(NonZero, regT0), target + 3);
+ } else {
+ linkSlowCase(iter);
+ linkSlowCase(iter);
+ JITStubCall stubCall(this, JITStubs::cti_op_loop_if_lesseq);
+ stubCall.addArgument(regT0);
+ stubCall.addArgument(regT1);
+ stubCall.call();
+ emitJumpSlowToHot(branchTest32(NonZero, regT0), target + 3);
+ }
+}
+
+void JIT::emitSlow_op_put_by_val(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ // Normal slow cases - either is not an immediate imm, or is an array.
+ Jump notImm = getSlowCase(iter);
+ linkSlowCase(iter);
+ linkSlowCase(iter);
+ emitFastArithIntToImmNoCheck(regT1, regT1);
+
+ notImm.link(this); {
+ JITStubCall stubCall(this, JITStubs::cti_op_put_by_val);
+ stubCall.addArgument(regT0);
+ stubCall.addArgument(regT1);
+ stubCall.addArgument(currentInstruction[3].u.operand, regT2);
+ stubCall.call();
+ emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_put_by_val));
+ }
+
+ // slow cases for immediate int accesses to arrays
+ linkSlowCase(iter);
+ linkSlowCase(iter); {
+ JITStubCall stubCall(this, JITStubs::cti_op_put_by_val_array);
+ stubCall.addArgument(regT0);
+ stubCall.addArgument(regT1);
+ stubCall.addArgument(currentInstruction[3].u.operand, regT2);
+ stubCall.call();
+ }
+}
+
+void JIT::emitSlow_op_loop_if_true(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ linkSlowCase(iter);
+ JITStubCall stubCall(this, JITStubs::cti_op_jtrue);
+ stubCall.addArgument(regT0);
+ stubCall.call();
+ emitJumpSlowToHot(branchTest32(NonZero, regT0), currentInstruction[2].u.operand + 2);
+}
+
+void JIT::emitSlow_op_not(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ linkSlowCase(iter);
+ xorPtr(Imm32(static_cast<int32_t>(JSImmediate::FullTagTypeBool)), regT0);
+ JITStubCall stubCall(this, JITStubs::cti_op_not);
+ stubCall.addArgument(regT0);
+ stubCall.call(currentInstruction[1].u.operand);
+}
+
+void JIT::emitSlow_op_jfalse(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ linkSlowCase(iter);
+ JITStubCall stubCall(this, JITStubs::cti_op_jtrue);
+ stubCall.addArgument(regT0);
+ stubCall.call();
+ emitJumpSlowToHot(branchTest32(Zero, regT0), currentInstruction[2].u.operand + 2); // inverted!
+}
+
+void JIT::emitSlow_op_bitnot(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ linkSlowCase(iter);
+ JITStubCall stubCall(this, JITStubs::cti_op_bitnot);
+ stubCall.addArgument(regT0);
+ stubCall.call(currentInstruction[1].u.operand);
+}
+
+void JIT::emitSlow_op_jtrue(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ linkSlowCase(iter);
+ JITStubCall stubCall(this, JITStubs::cti_op_jtrue);
+ stubCall.addArgument(regT0);
+ stubCall.call();
+ emitJumpSlowToHot(branchTest32(NonZero, regT0), currentInstruction[2].u.operand + 2);
+}
+
+void JIT::emitSlow_op_bitxor(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ linkSlowCase(iter);
+ JITStubCall stubCall(this, JITStubs::cti_op_bitxor);
+ stubCall.addArgument(regT0);
+ stubCall.addArgument(regT1);
+ stubCall.call(currentInstruction[1].u.operand);
+}
+
+void JIT::emitSlow_op_bitor(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ linkSlowCase(iter);
+ JITStubCall stubCall(this, JITStubs::cti_op_bitor);
+ stubCall.addArgument(regT0);
+ stubCall.addArgument(regT1);
+ stubCall.call(currentInstruction[1].u.operand);
+}
+
+void JIT::emitSlow_op_eq(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ linkSlowCase(iter);
+ JITStubCall stubCall(this, JITStubs::cti_op_eq);
+ stubCall.addArgument(regT0);
+ stubCall.addArgument(regT1);
+ stubCall.call(currentInstruction[1].u.operand);
+}
+
+void JIT::emitSlow_op_neq(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ linkSlowCase(iter);
+ JITStubCall stubCall(this, JITStubs::cti_op_neq);
+ stubCall.addArgument(regT0);
+ stubCall.addArgument(regT1);
+ stubCall.call(currentInstruction[1].u.operand);
+}
+
+void JIT::emitSlow_op_stricteq(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ linkSlowCase(iter);
+ linkSlowCase(iter);
+ JITStubCall stubCall(this, JITStubs::cti_op_stricteq);
+ stubCall.addArgument(regT0);
+ stubCall.addArgument(regT1);
+ stubCall.call(currentInstruction[1].u.operand);
+}
+
+void JIT::emitSlow_op_nstricteq(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ linkSlowCase(iter);
+ linkSlowCase(iter);
+ JITStubCall stubCall(this, JITStubs::cti_op_nstricteq);
+ stubCall.addArgument(regT0);
+ stubCall.addArgument(regT1);
+ stubCall.call(currentInstruction[1].u.operand);
+}
+
+void JIT::emitSlow_op_instanceof(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ linkSlowCase(iter);
+ linkSlowCase(iter);
+ linkSlowCase(iter);
+ linkSlowCase(iter);
+ linkSlowCase(iter);
+ JITStubCall stubCall(this, JITStubs::cti_op_instanceof);
+ stubCall.addArgument(currentInstruction[2].u.operand, regT2);
+ stubCall.addArgument(currentInstruction[3].u.operand, regT2);
+ stubCall.addArgument(currentInstruction[4].u.operand, regT2);
+ stubCall.call(currentInstruction[1].u.operand);
+}
+
+void JIT::emitSlow_op_call(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ compileOpCallSlowCase(currentInstruction, iter, m_callLinkInfoIndex++, op_call);
+}
+
+void JIT::emitSlow_op_call_eval(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ compileOpCallSlowCase(currentInstruction, iter, m_callLinkInfoIndex++, op_call_eval);
+}
+
+void JIT::emitSlow_op_call_varargs(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ compileOpCallVarargsSlowCase(currentInstruction, iter);
+}
+
+void JIT::emitSlow_op_construct(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ compileOpCallSlowCase(currentInstruction, iter, m_callLinkInfoIndex++, op_construct);
+}
+
+void JIT::emitSlow_op_to_jsnumber(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ linkSlowCaseIfNotJSCell(iter, currentInstruction[2].u.operand);
+ linkSlowCase(iter);
+
+ JITStubCall stubCall(this, JITStubs::cti_op_to_jsnumber);
+ stubCall.addArgument(regT0);
+ stubCall.call(currentInstruction[1].u.operand);
+}
+
+
+} // namespace JSC
+
+#endif // ENABLE(JIT)
diff --git a/JavaScriptCore/jit/JITPropertyAccess.cpp b/JavaScriptCore/jit/JITPropertyAccess.cpp
index ce90ee4..3a6f9b3 100644
--- a/JavaScriptCore/jit/JITPropertyAccess.cpp
+++ b/JavaScriptCore/jit/JITPropertyAccess.cpp
@@ -30,6 +30,7 @@
#include "CodeBlock.h"
#include "JITInlineMethods.h"
+#include "JITStubCall.h"
#include "JSArray.h"
#include "JSFunction.h"
#include "Interpreter.h"
@@ -44,59 +45,240 @@ using namespace std;
namespace JSC {
+void JIT::emit_op_get_by_val(Instruction* currentInstruction)
+{
+ emitGetVirtualRegisters(currentInstruction[2].u.operand, regT0, currentInstruction[3].u.operand, regT1);
+ emitJumpSlowCaseIfNotImmediateInteger(regT1);
+#if USE(ALTERNATE_JSIMMEDIATE)
+ // This is technically incorrect - we're zero-extending an int32. On the hot path this doesn't matter.
+ // We check the value as if it was a uint32 against the m_fastAccessCutoff - which will always fail if
+ // number was signed since m_fastAccessCutoff is always less than intmax (since the total allocation
+ // size is always less than 4Gb). As such zero extending wil have been correct (and extending the value
+ // to 64-bits is necessary since it's used in the address calculation. We zero extend rather than sign
+ // extending since it makes it easier to re-tag the value in the slow case.
+ zeroExtend32ToPtr(regT1, regT1);
+#else
+ emitFastArithImmToInt(regT1);
+#endif
+ emitJumpSlowCaseIfNotJSCell(regT0);
+ addSlowCase(branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsArrayVPtr)));
+
+ // This is an array; get the m_storage pointer into ecx, then check if the index is below the fast cutoff
+ loadPtr(Address(regT0, FIELD_OFFSET(JSArray, m_storage)), regT2);
+ addSlowCase(branch32(AboveOrEqual, regT1, Address(regT0, FIELD_OFFSET(JSArray, m_fastAccessCutoff))));
+
+ // Get the value from the vector
+ loadPtr(BaseIndex(regT2, regT1, ScalePtr, FIELD_OFFSET(ArrayStorage, m_vector[0])), regT0);
+ emitPutVirtualRegister(currentInstruction[1].u.operand);
+}
+
+void JIT::emit_op_put_by_val(Instruction* currentInstruction)
+{
+ emitGetVirtualRegisters(currentInstruction[1].u.operand, regT0, currentInstruction[2].u.operand, regT1);
+ emitJumpSlowCaseIfNotImmediateInteger(regT1);
+#if USE(ALTERNATE_JSIMMEDIATE)
+ // See comment in op_get_by_val.
+ zeroExtend32ToPtr(regT1, regT1);
+#else
+ emitFastArithImmToInt(regT1);
+#endif
+ emitJumpSlowCaseIfNotJSCell(regT0);
+ addSlowCase(branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsArrayVPtr)));
+
+ // This is an array; get the m_storage pointer into ecx, then check if the index is below the fast cutoff
+ loadPtr(Address(regT0, FIELD_OFFSET(JSArray, m_storage)), regT2);
+ Jump inFastVector = branch32(Below, regT1, Address(regT0, FIELD_OFFSET(JSArray, m_fastAccessCutoff)));
+ // No; oh well, check if the access if within the vector - if so, we may still be okay.
+ addSlowCase(branch32(AboveOrEqual, regT1, Address(regT2, FIELD_OFFSET(ArrayStorage, m_vectorLength))));
+
+ // This is a write to the slow part of the vector; first, we have to check if this would be the first write to this location.
+ // FIXME: should be able to handle initial write to array; increment the the number of items in the array, and potentially update fast access cutoff.
+ addSlowCase(branchTestPtr(Zero, BaseIndex(regT2, regT1, ScalePtr, FIELD_OFFSET(ArrayStorage, m_vector[0]))));
+
+ // All good - put the value into the array.
+ inFastVector.link(this);
+ emitGetVirtualRegister(currentInstruction[3].u.operand, regT0);
+ storePtr(regT0, BaseIndex(regT2, regT1, ScalePtr, FIELD_OFFSET(ArrayStorage, m_vector[0])));
+}
+
+void JIT::emit_op_put_by_index(Instruction* currentInstruction)
+{
+ JITStubCall stubCall(this, JITStubs::cti_op_put_by_index);
+ stubCall.addArgument(currentInstruction[1].u.operand, regT2);
+ stubCall.addArgument(Imm32(currentInstruction[2].u.operand));
+ stubCall.addArgument(currentInstruction[3].u.operand, regT2);
+ stubCall.call();
+}
+
+void JIT::emit_op_put_getter(Instruction* currentInstruction)
+{
+ JITStubCall stubCall(this, JITStubs::cti_op_put_getter);
+ stubCall.addArgument(currentInstruction[1].u.operand, regT2);
+ stubCall.addArgument(ImmPtr(&m_codeBlock->identifier(currentInstruction[2].u.operand)));
+ stubCall.addArgument(currentInstruction[3].u.operand, regT2);
+ stubCall.call();
+}
+
+void JIT::emit_op_put_setter(Instruction* currentInstruction)
+{
+ JITStubCall stubCall(this, JITStubs::cti_op_put_setter);
+ stubCall.addArgument(currentInstruction[1].u.operand, regT2);
+ stubCall.addArgument(ImmPtr(&m_codeBlock->identifier(currentInstruction[2].u.operand)));
+ stubCall.addArgument(currentInstruction[3].u.operand, regT2);
+ stubCall.call();
+}
+
+void JIT::emit_op_del_by_id(Instruction* currentInstruction)
+{
+ JITStubCall stubCall(this, JITStubs::cti_op_del_by_id);
+ stubCall.addArgument(currentInstruction[2].u.operand, regT2);
+ stubCall.addArgument(ImmPtr(&m_codeBlock->identifier(currentInstruction[3].u.operand)));
+ stubCall.call(currentInstruction[1].u.operand);
+}
+
+
#if !ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
-void JIT::compileGetByIdHotPath(int resultVReg, int baseVReg, Identifier* ident, unsigned)
+/* ------------------------------ BEGIN: !ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS) ------------------------------ */
+
+// Treat these as nops - the call will be handed as a regular get_by_id/op_call pair.
+void JIT::emit_op_method_check(Instruction*) {}
+void JIT::emitSlow_op_method_check(Instruction*, Vector<SlowCaseEntry>::iterator&) { ASSERT_NOT_REACHED(); }
+#if ENABLE(JIT_OPTIMIZE_METHOD_CALLS)
+#error "JIT_OPTIMIZE_METHOD_CALLS requires JIT_OPTIMIZE_PROPERTY_ACCESS"
+#endif
+
+void JIT::emit_op_get_by_id(Instruction* currentInstruction)
{
- // As for put_by_id, get_by_id requires the offset of the Structure and the offset of the access to be patched.
- // Additionally, for get_by_id we need patch the offset of the branch to the slow case (we patch this to jump
- // to array-length / prototype access tranpolines, and finally we also the the property-map access offset as a label
- // to jump back to if one of these trampolies finds a match.
+ unsigned resultVReg = currentInstruction[1].u.operand;
+ unsigned baseVReg = currentInstruction[2].u.operand;
+ Identifier* ident = &(m_codeBlock->identifier(currentInstruction[3].u.operand));
emitGetVirtualRegister(baseVReg, regT0);
+ JITStubCall stubCall(this, JITStubs::cti_op_get_by_id_generic);
+ stubCall.addArgument(regT0);
+ stubCall.addArgument(ImmPtr(ident));
+ stubCall.call(resultVReg);
- emitPutJITStubArg(regT0, 1);
- emitPutJITStubArgConstant(ident, 2);
- emitCTICall(JITStubs::cti_op_get_by_id_generic);
- emitPutVirtualRegister(resultVReg);
+ m_propertyAccessInstructionIndex++;
}
-
-void JIT::compileGetByIdSlowCase(int, int, Identifier*, Vector<SlowCaseEntry>::iterator&, unsigned)
+void JIT::emitSlow_op_get_by_id(Instruction*, Vector<SlowCaseEntry>::iterator&)
{
ASSERT_NOT_REACHED();
}
-void JIT::compilePutByIdHotPath(int baseVReg, Identifier* ident, int valueVReg, unsigned)
+void JIT::emit_op_put_by_id(Instruction* currentInstruction)
{
- // In order to be able to patch both the Structure, and the object offset, we store one pointer,
- // to just after the arguments have been loaded into registers 'hotPathBegin', and we generate code
- // such that the Structure & offset are always at the same distance from this.
+ unsigned baseVReg = currentInstruction[1].u.operand;
+ Identifier* ident = &(m_codeBlock->identifier(currentInstruction[2].u.operand));
+ unsigned valueVReg = currentInstruction[3].u.operand;
emitGetVirtualRegisters(baseVReg, regT0, valueVReg, regT1);
- emitPutJITStubArgConstant(ident, 2);
- emitPutJITStubArg(regT0, 1);
- emitPutJITStubArg(regT1, 3);
- emitCTICall(JITStubs::cti_op_put_by_id_generic);
+ JITStubCall stubCall(this, JITStubs::cti_op_put_by_id_generic);
+ stubCall.addArgument(regT0);
+ stubCall.addArgument(ImmPtr(ident));
+ stubCall.addArgument(regT1);
+ stubCall.call();
+
+ m_propertyAccessInstructionIndex++;
}
-void JIT::compilePutByIdSlowCase(int, Identifier*, int, Vector<SlowCaseEntry>::iterator&, unsigned)
+void JIT::emitSlow_op_put_by_id(Instruction*, Vector<SlowCaseEntry>::iterator&)
{
ASSERT_NOT_REACHED();
}
-#else
+#else // !ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
+
+/* ------------------------------ BEGIN: ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS) ------------------------------ */
-void JIT::compileGetByIdHotPath(int resultVReg, int baseVReg, Identifier*, unsigned propertyAccessInstructionIndex)
+#if ENABLE(JIT_OPTIMIZE_METHOD_CALLS)
+
+void JIT::emit_op_method_check(Instruction* currentInstruction)
+{
+ // Assert that the following instruction is a get_by_id.
+ ASSERT(m_interpreter->getOpcodeID((currentInstruction + OPCODE_LENGTH(op_method_check))->u.opcode) == op_get_by_id);
+
+ currentInstruction += OPCODE_LENGTH(op_method_check);
+ unsigned resultVReg = currentInstruction[1].u.operand;
+ unsigned baseVReg = currentInstruction[2].u.operand;
+ Identifier* ident = &(m_codeBlock->identifier(currentInstruction[3].u.operand));
+
+ emitGetVirtualRegister(baseVReg, regT0);
+
+ // Do the method check - check the object & its prototype's structure inline (this is the common case).
+ m_methodCallCompilationInfo.append(MethodCallCompilationInfo(m_propertyAccessInstructionIndex));
+ MethodCallCompilationInfo& info = m_methodCallCompilationInfo.last();
+ Jump notCell = emitJumpIfNotJSCell(regT0);
+ Jump structureCheck = branchPtrWithPatch(NotEqual, Address(regT0, FIELD_OFFSET(JSCell, m_structure)), info.structureToCompare, ImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure)));
+ DataLabelPtr protoStructureToCompare, protoObj = moveWithPatch(ImmPtr(0), regT1);
+ Jump protoStructureCheck = branchPtrWithPatch(NotEqual, Address(regT1, FIELD_OFFSET(JSCell, m_structure)), protoStructureToCompare, ImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure)));
+
+ // This will be relinked to load the function without doing a load.
+ DataLabelPtr putFunction = moveWithPatch(ImmPtr(0), regT0);
+ Jump match = jump();
+
+ ASSERT(differenceBetween(info.structureToCompare, protoObj) == patchOffsetMethodCheckProtoObj);
+ ASSERT(differenceBetween(info.structureToCompare, protoStructureToCompare) == patchOffsetMethodCheckProtoStruct);
+ ASSERT(differenceBetween(info.structureToCompare, putFunction) == patchOffsetMethodCheckPutFunction);
+
+ // Link the failure cases here.
+ notCell.link(this);
+ structureCheck.link(this);
+ protoStructureCheck.link(this);
+
+ // Do a regular(ish) get_by_id (the slow case will be link to
+ // cti_op_get_by_id_method_check instead of cti_op_get_by_id.
+ compileGetByIdHotPath(resultVReg, baseVReg, ident, m_propertyAccessInstructionIndex++);
+
+ match.link(this);
+ emitPutVirtualRegister(resultVReg);
+
+ // We've already generated the following get_by_id, so make sure it's skipped over.
+ m_bytecodeIndex += OPCODE_LENGTH(op_get_by_id);
+}
+
+void JIT::emitSlow_op_method_check(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ currentInstruction += OPCODE_LENGTH(op_method_check);
+ unsigned resultVReg = currentInstruction[1].u.operand;
+ unsigned baseVReg = currentInstruction[2].u.operand;
+ Identifier* ident = &(m_codeBlock->identifier(currentInstruction[3].u.operand));
+
+ compileGetByIdSlowCase(resultVReg, baseVReg, ident, iter, m_propertyAccessInstructionIndex++, true);
+
+ // We've already generated the following get_by_id, so make sure it's skipped over.
+ m_bytecodeIndex += OPCODE_LENGTH(op_get_by_id);
+}
+
+#else //!ENABLE(JIT_OPTIMIZE_METHOD_CALLS)
+
+// Treat these as nops - the call will be handed as a regular get_by_id/op_call pair.
+void JIT::emit_op_method_check(Instruction*) {}
+void JIT::emitSlow_op_method_check(Instruction*, Vector<SlowCaseEntry>::iterator&) { ASSERT_NOT_REACHED(); }
+
+#endif
+
+void JIT::emit_op_get_by_id(Instruction* currentInstruction)
+{
+ unsigned resultVReg = currentInstruction[1].u.operand;
+ unsigned baseVReg = currentInstruction[2].u.operand;
+ Identifier* ident = &(m_codeBlock->identifier(currentInstruction[3].u.operand));
+
+ emitGetVirtualRegister(baseVReg, regT0);
+ compileGetByIdHotPath(resultVReg, baseVReg, ident, m_propertyAccessInstructionIndex++);
+ emitPutVirtualRegister(resultVReg);
+}
+
+void JIT::compileGetByIdHotPath(int, int baseVReg, Identifier*, unsigned propertyAccessInstructionIndex)
{
// As for put_by_id, get_by_id requires the offset of the Structure and the offset of the access to be patched.
// Additionally, for get_by_id we need patch the offset of the branch to the slow case (we patch this to jump
// to array-length / prototype access tranpolines, and finally we also the the property-map access offset as a label
// to jump back to if one of these trampolies finds a match.
- emitGetVirtualRegister(baseVReg, regT0);
-
emitJumpSlowCaseIfNotJSCell(regT0, baseVReg);
Label hotPathBegin(this);
@@ -108,17 +290,28 @@ void JIT::compileGetByIdHotPath(int resultVReg, int baseVReg, Identifier*, unsig
ASSERT(differenceBetween(hotPathBegin, structureToCompare) == patchOffsetGetByIdStructure);
ASSERT(differenceBetween(hotPathBegin, structureCheck) == patchOffsetGetByIdBranchToSlowCase);
- loadPtr(Address(regT0, FIELD_OFFSET(JSObject, m_propertyStorage)), regT0);
+ Label externalLoad = loadPtrWithPatchToLEA(Address(regT0, FIELD_OFFSET(JSObject, m_externalStorage)), regT0);
+ Label externalLoadComplete(this);
+ ASSERT(differenceBetween(hotPathBegin, externalLoad) == patchOffsetGetByIdExternalLoad);
+ ASSERT(differenceBetween(externalLoad, externalLoadComplete) == patchLengthGetByIdExternalLoad);
+
DataLabel32 displacementLabel = loadPtrWithAddressOffsetPatch(Address(regT0, patchGetByIdDefaultOffset), regT0);
ASSERT(differenceBetween(hotPathBegin, displacementLabel) == patchOffsetGetByIdPropertyMapOffset);
Label putResult(this);
ASSERT(differenceBetween(hotPathBegin, putResult) == patchOffsetGetByIdPutResult);
- emitPutVirtualRegister(resultVReg);
}
+void JIT::emitSlow_op_get_by_id(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned resultVReg = currentInstruction[1].u.operand;
+ unsigned baseVReg = currentInstruction[2].u.operand;
+ Identifier* ident = &(m_codeBlock->identifier(currentInstruction[3].u.operand));
+
+ compileGetByIdSlowCase(resultVReg, baseVReg, ident, iter, m_propertyAccessInstructionIndex++, false);
+}
-void JIT::compileGetByIdSlowCase(int resultVReg, int baseVReg, Identifier* ident, Vector<SlowCaseEntry>::iterator& iter, unsigned propertyAccessInstructionIndex)
+void JIT::compileGetByIdSlowCase(int resultVReg, int baseVReg, Identifier* ident, Vector<SlowCaseEntry>::iterator& iter, unsigned propertyAccessInstructionIndex, bool isMethodCheck)
{
// As for the hot path of get_by_id, above, we ensure that we can use an architecture specific offset
// so that we only need track one pointer into the slow case code - we track a pointer to the location
@@ -132,10 +325,10 @@ void JIT::compileGetByIdSlowCase(int resultVReg, int baseVReg, Identifier* ident
#ifndef NDEBUG
Label coldPathBegin(this);
#endif
- emitPutJITStubArg(regT0, 1);
- emitPutJITStubArgConstant(ident, 2);
- Call call = emitCTICall(JITStubs::cti_op_get_by_id);
- emitPutVirtualRegister(resultVReg);
+ JITStubCall stubCall(this, isMethodCheck ? JITStubs::cti_op_get_by_id_method_check : JITStubs::cti_op_get_by_id);
+ stubCall.addArgument(regT0);
+ stubCall.addArgument(ImmPtr(ident));
+ Call call = stubCall.call(resultVReg);
ASSERT(differenceBetween(coldPathBegin, call) == patchOffsetGetByIdSlowCaseCall);
@@ -143,8 +336,13 @@ void JIT::compileGetByIdSlowCase(int resultVReg, int baseVReg, Identifier* ident
m_propertyAccessCompilationInfo[propertyAccessInstructionIndex].callReturnLocation = call;
}
-void JIT::compilePutByIdHotPath(int baseVReg, Identifier*, int valueVReg, unsigned propertyAccessInstructionIndex)
+void JIT::emit_op_put_by_id(Instruction* currentInstruction)
{
+ unsigned baseVReg = currentInstruction[1].u.operand;
+ unsigned valueVReg = currentInstruction[3].u.operand;
+
+ unsigned propertyAccessInstructionIndex = m_propertyAccessInstructionIndex++;
+
// In order to be able to patch both the Structure, and the object offset, we store one pointer,
// to just after the arguments have been loaded into registers 'hotPathBegin', and we generate code
// such that the Structure & offset are always at the same distance from this.
@@ -163,34 +361,64 @@ void JIT::compilePutByIdHotPath(int baseVReg, Identifier*, int valueVReg, unsign
ASSERT(differenceBetween(hotPathBegin, structureToCompare) == patchOffsetPutByIdStructure);
// Plant a load from a bogus ofset in the object's property map; we will patch this later, if it is to be used.
- loadPtr(Address(regT0, FIELD_OFFSET(JSObject, m_propertyStorage)), regT0);
+ Label externalLoad = loadPtrWithPatchToLEA(Address(regT0, FIELD_OFFSET(JSObject, m_externalStorage)), regT0);
+ Label externalLoadComplete(this);
+ ASSERT(differenceBetween(hotPathBegin, externalLoad) == patchOffsetPutByIdExternalLoad);
+ ASSERT(differenceBetween(externalLoad, externalLoadComplete) == patchLengthPutByIdExternalLoad);
+
DataLabel32 displacementLabel = storePtrWithAddressOffsetPatch(regT1, Address(regT0, patchGetByIdDefaultOffset));
ASSERT(differenceBetween(hotPathBegin, displacementLabel) == patchOffsetPutByIdPropertyMapOffset);
}
-void JIT::compilePutByIdSlowCase(int baseVReg, Identifier* ident, int, Vector<SlowCaseEntry>::iterator& iter, unsigned propertyAccessInstructionIndex)
+void JIT::emitSlow_op_put_by_id(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
{
+ unsigned baseVReg = currentInstruction[1].u.operand;
+ Identifier* ident = &(m_codeBlock->identifier(currentInstruction[2].u.operand));
+
+ unsigned propertyAccessInstructionIndex = m_propertyAccessInstructionIndex++;
+
linkSlowCaseIfNotJSCell(iter, baseVReg);
linkSlowCase(iter);
- emitPutJITStubArgConstant(ident, 2);
- emitPutJITStubArg(regT0, 1);
- emitPutJITStubArg(regT1, 3);
- Call call = emitCTICall(JITStubs::cti_op_put_by_id);
+ JITStubCall stubCall(this, JITStubs::cti_op_put_by_id);
+ stubCall.addArgument(regT0);
+ stubCall.addArgument(ImmPtr(ident));
+ stubCall.addArgument(regT1);
+ Call call = stubCall.call();
// Track the location of the call; this will be used to recover patch information.
m_propertyAccessCompilationInfo[propertyAccessInstructionIndex].callReturnLocation = call;
}
-static JSObject* resizePropertyStorage(JSObject* baseObject, int32_t oldSize, int32_t newSize)
+// Compile a store into an object's property storage. May overwrite the
+// value in objectReg.
+void JIT::compilePutDirectOffset(RegisterID base, RegisterID value, Structure* structure, size_t cachedOffset)
+{
+ int offset = cachedOffset * sizeof(JSValue);
+ if (structure->isUsingInlineStorage())
+ offset += FIELD_OFFSET(JSObject, m_inlineStorage);
+ else
+ loadPtr(Address(base, FIELD_OFFSET(JSObject, m_externalStorage)), base);
+ storePtr(value, Address(base, offset));
+}
+
+// Compile a load from an object's property storage. May overwrite base.
+void JIT::compileGetDirectOffset(RegisterID base, RegisterID result, Structure* structure, size_t cachedOffset)
{
- baseObject->allocatePropertyStorage(oldSize, newSize);
- return baseObject;
+ int offset = cachedOffset * sizeof(JSValue);
+ if (structure->isUsingInlineStorage())
+ offset += FIELD_OFFSET(JSObject, m_inlineStorage);
+ else
+ loadPtr(Address(base, FIELD_OFFSET(JSObject, m_externalStorage)), base);
+ loadPtr(Address(base, offset), result);
}
-static inline bool transitionWillNeedStorageRealloc(Structure* oldStructure, Structure* newStructure)
+void JIT::compileGetDirectOffset(JSObject* base, RegisterID result, size_t cachedOffset)
{
- return oldStructure->propertyStorageCapacity() != newStructure->propertyStorageCapacity();
+ if (base->isUsingInlineStorage())
+ loadPtr(static_cast<void*>(&base->m_inlineStorage[cachedOffset]), result);
+ else
+ loadPtr(static_cast<void*>(&base->m_externalStorage[cachedOffset]), result);
}
void JIT::privateCompilePutByIdTransition(StructureStubInfo* stubInfo, Structure* oldStructure, Structure* newStructure, size_t cachedOffset, StructureChain* chain, ProcessorReturnAddress returnAddress)
@@ -201,7 +429,7 @@ void JIT::privateCompilePutByIdTransition(StructureStubInfo* stubInfo, Structure
failureCases.append(branchPtr(NotEqual, Address(regT0, FIELD_OFFSET(JSCell, m_structure)), ImmPtr(oldStructure)));
JumpList successCases;
- // ecx = baseObject
+ // ecx = baseObject
loadPtr(Address(regT0, FIELD_OFFSET(JSCell, m_structure)), regT2);
// proto(ecx) = baseObject->structure()->prototype()
failureCases.append(branch32(NotEqual, Address(regT2, FIELD_OFFSET(Structure, m_typeInfo) + FIELD_OFFSET(TypeInfo, m_type)), Imm32(ObjectType)));
@@ -211,7 +439,7 @@ void JIT::privateCompilePutByIdTransition(StructureStubInfo* stubInfo, Structure
// ecx = baseObject->m_structure
for (RefPtr<Structure>* it = chain->head(); *it; ++it) {
// null check the prototype
- successCases.append(branchPtr(Equal, regT2, ImmPtr(JSValuePtr::encode(jsNull()))));
+ successCases.append(branchPtr(Equal, regT2, ImmPtr(JSValue::encode(jsNull()))));
// Check the structure id
failureCases.append(branchPtr(NotEqual, Address(regT2, FIELD_OFFSET(JSCell, m_structure)), ImmPtr(it->get())));
@@ -226,23 +454,21 @@ void JIT::privateCompilePutByIdTransition(StructureStubInfo* stubInfo, Structure
Call callTarget;
// emit a call only if storage realloc is needed
- bool willNeedStorageRealloc = transitionWillNeedStorageRealloc(oldStructure, newStructure);
+ bool willNeedStorageRealloc = oldStructure->propertyStorageCapacity() != newStructure->propertyStorageCapacity();
if (willNeedStorageRealloc) {
- pop(X86::ebx);
-#if PLATFORM(X86_64)
- move(Imm32(newStructure->propertyStorageCapacity()), regT1);
- move(Imm32(oldStructure->propertyStorageCapacity()), X86::esi);
- move(regT0, X86::edi);
- callTarget = call();
-#else
- push(Imm32(newStructure->propertyStorageCapacity()));
- push(Imm32(oldStructure->propertyStorageCapacity()));
- push(regT0);
- callTarget = call();
- addPtr(Imm32(3 * sizeof(void*)), X86::esp);
-#endif
- emitGetJITStubArg(3, regT1);
- push(X86::ebx);
+ // This trampoline was called to like a JIT stub; before we can can call again we need to
+ // remove the return address from the stack, to prevent the stack from becoming misaligned.
+ preverveReturnAddressAfterCall(regT3);
+
+ JITStubCall stubCall(this, JITStubs::cti_op_put_by_id_transition_realloc);
+ stubCall.addArgument(regT0);
+ stubCall.addArgument(Imm32(oldStructure->propertyStorageCapacity()));
+ stubCall.addArgument(Imm32(newStructure->propertyStorageCapacity()));
+ stubCall.addArgument(regT1); // This argument is not used in the stub; we set it up on the stack so that it can be restored, below.
+ stubCall.call(regT0);
+ emitGetJITStubArg(4, regT1);
+
+ restoreReturnAddressBeforeReturn(regT3);
}
// Assumes m_refCount can be decremented easily, refcount decrement is safe as
@@ -252,8 +478,7 @@ void JIT::privateCompilePutByIdTransition(StructureStubInfo* stubInfo, Structure
storePtr(ImmPtr(newStructure), Address(regT0, FIELD_OFFSET(JSCell, m_structure)));
// write the value
- loadPtr(Address(regT0, FIELD_OFFSET(JSObject, m_propertyStorage)), regT0);
- storePtr(regT1, Address(regT0, cachedOffset * sizeof(JSValuePtr)));
+ compilePutDirectOffset(regT0, regT1, newStructure, cachedOffset);
ret();
@@ -262,47 +487,74 @@ void JIT::privateCompilePutByIdTransition(StructureStubInfo* stubInfo, Structure
restoreArgumentReferenceForTrampoline();
Call failureCall = tailRecursiveCall();
- void* code = m_assembler.executableCopy(m_codeBlock->executablePool());
- PatchBuffer patchBuffer(code);
+ PatchBuffer patchBuffer(this, m_codeBlock->executablePool());
- patchBuffer.link(failureCall, JITStubs::cti_op_put_by_id_fail);
+ patchBuffer.link(failureCall, FunctionPtr(JITStubs::cti_op_put_by_id_fail));
- if (willNeedStorageRealloc)
- patchBuffer.link(callTarget, resizePropertyStorage);
+ if (willNeedStorageRealloc) {
+ ASSERT(m_calls.size() == 1);
+ patchBuffer.link(m_calls[0].from, FunctionPtr(JITStubs::cti_op_put_by_id_transition_realloc));
+ }
- stubInfo->stubRoutine = patchBuffer.entry();
-
- returnAddress.relinkCallerToFunction(code);
+ CodeLocationLabel entryLabel = patchBuffer.finalizeCodeAddendum();
+ stubInfo->stubRoutine = entryLabel;
+ returnAddress.relinkCallerToTrampoline(entryLabel);
}
void JIT::patchGetByIdSelf(StructureStubInfo* stubInfo, Structure* structure, size_t cachedOffset, ProcessorReturnAddress returnAddress)
{
// We don't want to patch more than once - in future go to cti_op_get_by_id_generic.
// Should probably go to JITStubs::cti_op_get_by_id_fail, but that doesn't do anything interesting right now.
- returnAddress.relinkCallerToFunction(JITStubs::cti_op_get_by_id_self_fail);
+ returnAddress.relinkCallerToFunction(FunctionPtr(JITStubs::cti_op_get_by_id_self_fail));
+
+ int offset = sizeof(JSValue) * cachedOffset;
+
+ // If we're patching to use inline storage, convert the initial load to a lea; this avoids the extra load
+ // and makes the subsequent load's offset automatically correct
+ if (structure->isUsingInlineStorage())
+ stubInfo->hotPathBegin.instructionAtOffset(patchOffsetGetByIdExternalLoad).repatchLoadPtrToLEA();
// Patch the offset into the propoerty map to load from, then patch the Structure to look for.
stubInfo->hotPathBegin.dataLabelPtrAtOffset(patchOffsetGetByIdStructure).repatch(structure);
- stubInfo->hotPathBegin.dataLabel32AtOffset(patchOffsetGetByIdPropertyMapOffset).repatch(cachedOffset * sizeof(JSValuePtr));
+ stubInfo->hotPathBegin.dataLabel32AtOffset(patchOffsetGetByIdPropertyMapOffset).repatch(offset);
+}
+
+void JIT::patchMethodCallProto(MethodCallLinkInfo& methodCallLinkInfo, JSFunction* callee, Structure* structure, JSObject* proto)
+{
+ ASSERT(!methodCallLinkInfo.cachedStructure);
+ methodCallLinkInfo.cachedStructure = structure;
+ structure->ref();
+
+ methodCallLinkInfo.structureLabel.repatch(structure);
+ methodCallLinkInfo.structureLabel.dataLabelPtrAtOffset(patchOffsetMethodCheckProtoObj).repatch(proto);
+ methodCallLinkInfo.structureLabel.dataLabelPtrAtOffset(patchOffsetMethodCheckProtoStruct).repatch(proto->structure());
+ methodCallLinkInfo.structureLabel.dataLabelPtrAtOffset(patchOffsetMethodCheckPutFunction).repatch(callee);
}
void JIT::patchPutByIdReplace(StructureStubInfo* stubInfo, Structure* structure, size_t cachedOffset, ProcessorReturnAddress returnAddress)
{
// We don't want to patch more than once - in future go to cti_op_put_by_id_generic.
// Should probably go to JITStubs::cti_op_put_by_id_fail, but that doesn't do anything interesting right now.
- returnAddress.relinkCallerToFunction(JITStubs::cti_op_put_by_id_generic);
+ returnAddress.relinkCallerToFunction(FunctionPtr(JITStubs::cti_op_put_by_id_generic));
+
+ int offset = sizeof(JSValue) * cachedOffset;
+
+ // If we're patching to use inline storage, convert the initial load to a lea; this avoids the extra load
+ // and makes the subsequent load's offset automatically correct
+ if (structure->isUsingInlineStorage())
+ stubInfo->hotPathBegin.instructionAtOffset(patchOffsetPutByIdExternalLoad).repatchLoadPtrToLEA();
// Patch the offset into the propoerty map to load from, then patch the Structure to look for.
stubInfo->hotPathBegin.dataLabelPtrAtOffset(patchOffsetPutByIdStructure).repatch(structure);
- stubInfo->hotPathBegin.dataLabel32AtOffset(patchOffsetPutByIdPropertyMapOffset).repatch(cachedOffset * sizeof(JSValuePtr));
+ stubInfo->hotPathBegin.dataLabel32AtOffset(patchOffsetPutByIdPropertyMapOffset).repatch(offset);
}
void JIT::privateCompilePatchGetArrayLength(ProcessorReturnAddress returnAddress)
{
- StructureStubInfo* stubInfo = &m_codeBlock->getStubInfo(returnAddress);
+ StructureStubInfo* stubInfo = &m_codeBlock->getStubInfo(returnAddress.addressForLookup());
// We don't want to patch more than once - in future go to cti_op_put_by_id_generic.
- returnAddress.relinkCallerToFunction(JITStubs::cti_op_get_by_id_array_fail);
+ returnAddress.relinkCallerToFunction(FunctionPtr(JITStubs::cti_op_get_by_id_array_fail));
// Check eax is an array
Jump failureCases1 = branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsArrayVPtr));
@@ -316,8 +568,7 @@ void JIT::privateCompilePatchGetArrayLength(ProcessorReturnAddress returnAddress
emitFastArithIntToImmNoCheck(regT2, regT0);
Jump success = jump();
- void* code = m_assembler.executableCopy(m_codeBlock->executablePool());
- PatchBuffer patchBuffer(code);
+ PatchBuffer patchBuffer(this, m_codeBlock->executablePool());
// Use the patch information to link the failure cases back to the original slow case routine.
CodeLocationLabel slowCaseBegin = stubInfo->callReturnLocation.labelAtOffset(-patchOffsetGetByIdSlowCaseCall);
@@ -328,7 +579,7 @@ void JIT::privateCompilePatchGetArrayLength(ProcessorReturnAddress returnAddress
patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult));
// Track the stub we have created so that it will be deleted later.
- CodeLocationLabel entryLabel = patchBuffer.entry();
+ CodeLocationLabel entryLabel = patchBuffer.finalizeCodeAddendum();
stubInfo->stubRoutine = entryLabel;
// Finally patch the jump to slow case back in the hot path to jump here instead.
@@ -336,42 +587,14 @@ void JIT::privateCompilePatchGetArrayLength(ProcessorReturnAddress returnAddress
jumpLocation.relink(entryLabel);
}
-void JIT::privateCompileGetByIdSelf(StructureStubInfo* stubInfo, Structure* structure, size_t cachedOffset, ProcessorReturnAddress returnAddress)
-{
- // Check eax is an object of the right Structure.
- Jump failureCases1 = emitJumpIfNotJSCell(regT0);
- Jump failureCases2 = checkStructure(regT0, structure);
-
- // Checks out okay! - getDirectOffset
- loadPtr(Address(regT0, FIELD_OFFSET(JSObject, m_propertyStorage)), regT0);
- loadPtr(Address(regT0, cachedOffset * sizeof(JSValuePtr)), regT0);
- ret();
-
- Call failureCases1Call = makeTailRecursiveCall(failureCases1);
- Call failureCases2Call = makeTailRecursiveCall(failureCases2);
-
- void* code = m_assembler.executableCopy(m_codeBlock->executablePool());
- PatchBuffer patchBuffer(code);
-
- patchBuffer.link(failureCases1Call, JITStubs::cti_op_get_by_id_self_fail);
- patchBuffer.link(failureCases2Call, JITStubs::cti_op_get_by_id_self_fail);
-
- stubInfo->stubRoutine = patchBuffer.entry();
-
- returnAddress.relinkCallerToFunction(code);
-}
-
void JIT::privateCompileGetByIdProto(StructureStubInfo* stubInfo, Structure* structure, Structure* prototypeStructure, size_t cachedOffset, ProcessorReturnAddress returnAddress, CallFrame* callFrame)
{
-#if USE(CTI_REPATCH_PIC)
// We don't want to patch more than once - in future go to cti_op_put_by_id_generic.
- returnAddress.relinkCallerToFunction(JITStubs::cti_op_get_by_id_proto_list);
+ returnAddress.relinkCallerToFunction(FunctionPtr(JITStubs::cti_op_get_by_id_proto_list));
// The prototype object definitely exists (if this stub exists the CodeBlock is referencing a Structure that is
// referencing the prototype object - let's speculatively load it's table nice and early!)
JSObject* protoObject = asObject(structure->prototypeForLookup(callFrame));
- PropertyStorage* protoPropertyStorage = &protoObject->m_propertyStorage;
- loadPtr(static_cast<void*>(protoPropertyStorage), regT1);
// Check eax is an object of the right Structure.
Jump failureCases1 = checkStructure(regT0, structure);
@@ -386,12 +609,11 @@ void JIT::privateCompileGetByIdProto(StructureStubInfo* stubInfo, Structure* str
#endif
// Checks out okay! - getDirectOffset
- loadPtr(Address(regT1, cachedOffset * sizeof(JSValuePtr)), regT0);
+ compileGetDirectOffset(protoObject, regT0, cachedOffset);
Jump success = jump();
- void* code = m_assembler.executableCopy(m_codeBlock->executablePool());
- PatchBuffer patchBuffer(code);
+ PatchBuffer patchBuffer(this, m_codeBlock->executablePool());
// Use the patch information to link the failure cases back to the original slow case routine.
CodeLocationLabel slowCaseBegin = stubInfo->callReturnLocation.labelAtOffset(-patchOffsetGetByIdSlowCaseCall);
@@ -402,56 +624,21 @@ void JIT::privateCompileGetByIdProto(StructureStubInfo* stubInfo, Structure* str
patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult));
// Track the stub we have created so that it will be deleted later.
- CodeLocationLabel entryLabel = patchBuffer.entry();
+ CodeLocationLabel entryLabel = patchBuffer.finalizeCodeAddendum();
stubInfo->stubRoutine = entryLabel;
// Finally patch the jump to slow case back in the hot path to jump here instead.
CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase);
jumpLocation.relink(entryLabel);
-#else
- // The prototype object definitely exists (if this stub exists the CodeBlock is referencing a Structure that is
- // referencing the prototype object - let's speculatively load it's table nice and early!)
- JSObject* protoObject = asObject(structure->prototypeForLookup(callFrame));
- PropertyStorage* protoPropertyStorage = &protoObject->m_propertyStorage;
- loadPtr(protoPropertyStorage, regT1);
-
- // Check eax is an object of the right Structure.
- Jump failureCases1 = emitJumpIfNotJSCell(regT0);
- Jump failureCases2 = checkStructure(regT0, structure);
-
- // Check the prototype object's Structure had not changed.
- Structure** prototypeStructureAddress = &(protoObject->m_structure);
- Jump failureCases3 = branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), ImmPtr(prototypeStructure));
-
- // Checks out okay! - getDirectOffset
- loadPtr(Address(regT1, cachedOffset * sizeof(JSValuePtr)), regT0);
-
- ret();
-
- void* code = m_assembler.executableCopy(m_codeBlock->executablePool());
- PatchBuffer patchBuffer(code);
-
- patchBuffer.link(failureCases1, JITStubs::cti_op_get_by_id_proto_fail);
- patchBuffer.link(failureCases2, JITStubs::cti_op_get_by_id_proto_fail);
- patchBuffer.link(failureCases3, JITStubs::cti_op_get_by_id_proto_fail);
-
- stubInfo->stubRoutine = patchBuffer.entry();
-
- returnAddress.relinkCallerToFunction(code);
-#endif
}
-#if USE(CTI_REPATCH_PIC)
void JIT::privateCompileGetByIdSelfList(StructureStubInfo* stubInfo, PolymorphicAccessStructureList* polymorphicStructures, int currentIndex, Structure* structure, size_t cachedOffset)
{
Jump failureCase = checkStructure(regT0, structure);
- loadPtr(Address(regT0, FIELD_OFFSET(JSObject, m_propertyStorage)), regT0);
- loadPtr(Address(regT0, cachedOffset * sizeof(JSValuePtr)), regT0);
+ compileGetDirectOffset(regT0, regT0, structure, cachedOffset);
Jump success = jump();
- void* code = m_assembler.executableCopy(m_codeBlock->executablePool());
- ASSERT(code);
- PatchBuffer patchBuffer(code);
+ PatchBuffer patchBuffer(this, m_codeBlock->executablePool());
// Use the patch information to link the failure cases back to the original slow case routine.
CodeLocationLabel lastProtoBegin = polymorphicStructures->list[currentIndex - 1].stubRoutine;
@@ -463,7 +650,7 @@ void JIT::privateCompileGetByIdSelfList(StructureStubInfo* stubInfo, Polymorphic
// On success return back to the hot patch code, at a point it will perform the store to dest for us.
patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult));
- CodeLocationLabel entryLabel = patchBuffer.entry();
+ CodeLocationLabel entryLabel = patchBuffer.finalizeCodeAddendum();
structure->ref();
polymorphicStructures->list[currentIndex].set(entryLabel, structure);
@@ -478,8 +665,6 @@ void JIT::privateCompileGetByIdProtoList(StructureStubInfo* stubInfo, Polymorphi
// The prototype object definitely exists (if this stub exists the CodeBlock is referencing a Structure that is
// referencing the prototype object - let's speculatively load it's table nice and early!)
JSObject* protoObject = asObject(structure->prototypeForLookup(callFrame));
- PropertyStorage* protoPropertyStorage = &protoObject->m_propertyStorage;
- loadPtr(protoPropertyStorage, regT1);
// Check eax is an object of the right Structure.
Jump failureCases1 = checkStructure(regT0, structure);
@@ -494,12 +679,11 @@ void JIT::privateCompileGetByIdProtoList(StructureStubInfo* stubInfo, Polymorphi
#endif
// Checks out okay! - getDirectOffset
- loadPtr(Address(regT1, cachedOffset * sizeof(JSValuePtr)), regT0);
+ compileGetDirectOffset(protoObject, regT0, cachedOffset);
Jump success = jump();
- void* code = m_assembler.executableCopy(m_codeBlock->executablePool());
- PatchBuffer patchBuffer(code);
+ PatchBuffer patchBuffer(this, m_codeBlock->executablePool());
// Use the patch information to link the failure cases back to the original slow case routine.
CodeLocationLabel lastProtoBegin = prototypeStructures->list[currentIndex - 1].stubRoutine;
@@ -509,7 +693,7 @@ void JIT::privateCompileGetByIdProtoList(StructureStubInfo* stubInfo, Polymorphi
// On success return back to the hot patch code, at a point it will perform the store to dest for us.
patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult));
- CodeLocationLabel entryLabel = patchBuffer.entry();
+ CodeLocationLabel entryLabel = patchBuffer.finalizeCodeAddendum();
structure->ref();
prototypeStructure->ref();
@@ -548,13 +732,10 @@ void JIT::privateCompileGetByIdChainList(StructureStubInfo* stubInfo, Polymorphi
}
ASSERT(protoObject);
- PropertyStorage* protoPropertyStorage = &protoObject->m_propertyStorage;
- loadPtr(protoPropertyStorage, regT1);
- loadPtr(Address(regT1, cachedOffset * sizeof(JSValuePtr)), regT0);
+ compileGetDirectOffset(protoObject, regT0, cachedOffset);
Jump success = jump();
- void* code = m_assembler.executableCopy(m_codeBlock->executablePool());
- PatchBuffer patchBuffer(code);
+ PatchBuffer patchBuffer(this, m_codeBlock->executablePool());
// Use the patch information to link the failure cases back to the original slow case routine.
CodeLocationLabel lastProtoBegin = prototypeStructures->list[currentIndex - 1].stubRoutine;
@@ -564,7 +745,7 @@ void JIT::privateCompileGetByIdChainList(StructureStubInfo* stubInfo, Polymorphi
// On success return back to the hot patch code, at a point it will perform the store to dest for us.
patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult));
- CodeLocationLabel entryLabel = patchBuffer.entry();
+ CodeLocationLabel entryLabel = patchBuffer.finalizeCodeAddendum();
// Track the stub we have created so that it will be deleted later.
structure->ref();
@@ -575,13 +756,11 @@ void JIT::privateCompileGetByIdChainList(StructureStubInfo* stubInfo, Polymorphi
CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase);
jumpLocation.relink(entryLabel);
}
-#endif
void JIT::privateCompileGetByIdChain(StructureStubInfo* stubInfo, Structure* structure, StructureChain* chain, size_t count, size_t cachedOffset, ProcessorReturnAddress returnAddress, CallFrame* callFrame)
{
-#if USE(CTI_REPATCH_PIC)
// We don't want to patch more than once - in future go to cti_op_put_by_id_generic.
- returnAddress.relinkCallerToFunction(JITStubs::cti_op_get_by_id_proto_list);
+ returnAddress.relinkCallerToFunction(FunctionPtr(JITStubs::cti_op_get_by_id_proto_list));
ASSERT(count);
@@ -608,13 +787,10 @@ void JIT::privateCompileGetByIdChain(StructureStubInfo* stubInfo, Structure* str
}
ASSERT(protoObject);
- PropertyStorage* protoPropertyStorage = &protoObject->m_propertyStorage;
- loadPtr(protoPropertyStorage, regT1);
- loadPtr(Address(regT1, cachedOffset * sizeof(JSValuePtr)), regT0);
+ compileGetDirectOffset(protoObject, regT0, cachedOffset);
Jump success = jump();
- void* code = m_assembler.executableCopy(m_codeBlock->executablePool());
- PatchBuffer patchBuffer(code);
+ PatchBuffer patchBuffer(this, m_codeBlock->executablePool());
// Use the patch information to link the failure cases back to the original slow case routine.
patchBuffer.link(bucketsOfFail, stubInfo->callReturnLocation.labelAtOffset(-patchOffsetGetByIdSlowCaseCall));
@@ -623,80 +799,17 @@ void JIT::privateCompileGetByIdChain(StructureStubInfo* stubInfo, Structure* str
patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult));
// Track the stub we have created so that it will be deleted later.
- CodeLocationLabel entryLabel = patchBuffer.entry();
+ CodeLocationLabel entryLabel = patchBuffer.finalizeCodeAddendum();
stubInfo->stubRoutine = entryLabel;
// Finally patch the jump to slow case back in the hot path to jump here instead.
CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase);
jumpLocation.relink(entryLabel);
-#else
- ASSERT(count);
-
- JumpList bucketsOfFail;
-
- // Check eax is an object of the right Structure.
- bucketsOfFail.append(emitJumpIfNotJSCell(regT0));
- bucketsOfFail.append(checkStructure(regT0, structure));
-
- Structure* currStructure = structure;
- RefPtr<Structure>* chainEntries = chain->head();
- JSObject* protoObject = 0;
- for (unsigned i = 0; i < count; ++i) {
- protoObject = asObject(currStructure->prototypeForLookup(callFrame));
- currStructure = chainEntries[i].get();
-
- // Check the prototype object's Structure had not changed.
- Structure** prototypeStructureAddress = &(protoObject->m_structure);
-#if PLATFORM(X86_64)
- move(ImmPtr(currStructure), regT3);
- bucketsOfFail.append(branchPtr(NotEqual, regT3, AbsoluteAddress(prototypeStructureAddress)));
-#else
- bucketsOfFail.append(branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), ImmPtr(currStructure)));
-#endif
- }
- ASSERT(protoObject);
-
- PropertyStorage* protoPropertyStorage = &protoObject->m_propertyStorage;
- loadPtr(protoPropertyStorage, regT1);
- loadPtr(Address(regT1, cachedOffset * sizeof(JSValuePtr)), regT0);
- ret();
-
- void* code = m_assembler.executableCopy(m_codeBlock->executablePool());
-
- patchBuffer.link(bucketsOfFail, JITStubs::cti_op_get_by_id_proto_fail);
-
- stubInfo->stubRoutine = patchBuffer.entry();
-
- returnAddress.relinkCallerToFunction(code);
-#endif
}
-void JIT::privateCompilePutByIdReplace(StructureStubInfo* stubInfo, Structure* structure, size_t cachedOffset, ProcessorReturnAddress returnAddress)
-{
- // Check eax is an object of the right Structure.
- Jump failureCases1 = emitJumpIfNotJSCell(regT0);
- Jump failureCases2 = checkStructure(regT0, structure);
-
- // checks out okay! - putDirectOffset
- loadPtr(Address(regT0, FIELD_OFFSET(JSObject, m_propertyStorage)), regT0);
- storePtr(regT1, Address(regT0, cachedOffset * sizeof(JSValuePtr)));
- ret();
-
- Call failureCases1Call = makeTailRecursiveCall(failureCases1);
- Call failureCases2Call = makeTailRecursiveCall(failureCases2);
+/* ------------------------------ END: !ENABLE / ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS) ------------------------------ */
- void* code = m_assembler.executableCopy(m_codeBlock->executablePool());
- PatchBuffer patchBuffer(code);
-
- patchBuffer.link(failureCases1Call, JITStubs::cti_op_put_by_id_fail);
- patchBuffer.link(failureCases2Call, JITStubs::cti_op_put_by_id_fail);
-
- stubInfo->stubRoutine = patchBuffer.entry();
-
- returnAddress.relinkCallerToFunction(code);
-}
-
-#endif
+#endif // !ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
} // namespace JSC
diff --git a/JavaScriptCore/jit/JITStubCall.h b/JavaScriptCore/jit/JITStubCall.h
new file mode 100644
index 0000000..6c9ccc1
--- /dev/null
+++ b/JavaScriptCore/jit/JITStubCall.h
@@ -0,0 +1,170 @@
+/*
+ * Copyright (C) 2008 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef JITStubCall_h
+#define JITStubCall_h
+
+#include <wtf/Platform.h>
+
+#if ENABLE(JIT)
+
+namespace JSC {
+
+ class JITStubCall {
+ public:
+ JITStubCall(JIT* jit, JSObject* (JIT_STUB *stub)(STUB_ARGS_DECLARATION))
+ : m_jit(jit)
+ , m_stub(reinterpret_cast<void*>(stub))
+ , m_returnType(Value)
+ , m_argumentIndex(1) // Index 0 is reserved for restoreArgumentReference();
+ {
+ }
+
+ JITStubCall(JIT* jit, JSPropertyNameIterator* (JIT_STUB *stub)(STUB_ARGS_DECLARATION))
+ : m_jit(jit)
+ , m_stub(reinterpret_cast<void*>(stub))
+ , m_returnType(Value)
+ , m_argumentIndex(1) // Index 0 is reserved for restoreArgumentReference();
+ {
+ }
+
+ JITStubCall(JIT* jit, void* (JIT_STUB *stub)(STUB_ARGS_DECLARATION))
+ : m_jit(jit)
+ , m_stub(reinterpret_cast<void*>(stub))
+ , m_returnType(Value)
+ , m_argumentIndex(1) // Index 0 is reserved for restoreArgumentReference();
+ {
+ }
+
+ JITStubCall(JIT* jit, int (JIT_STUB *stub)(STUB_ARGS_DECLARATION))
+ : m_jit(jit)
+ , m_stub(reinterpret_cast<void*>(stub))
+ , m_returnType(Value)
+ , m_argumentIndex(1) // Index 0 is reserved for restoreArgumentReference();
+ {
+ }
+
+ JITStubCall(JIT* jit, void (JIT_STUB *stub)(STUB_ARGS_DECLARATION))
+ : m_jit(jit)
+ , m_stub(reinterpret_cast<void*>(stub))
+ , m_returnType(Void)
+ , m_argumentIndex(1) // Index 0 is reserved for restoreArgumentReference();
+ {
+ }
+
+ // Arguments are added first to last.
+
+ void addArgument(JIT::Imm32 argument)
+ {
+ m_jit->poke(argument, m_argumentIndex);
+ ++m_argumentIndex;
+ }
+
+ void addArgument(JIT::ImmPtr argument)
+ {
+ m_jit->poke(argument, m_argumentIndex);
+ ++m_argumentIndex;
+ }
+
+ void addArgument(JIT::RegisterID argument)
+ {
+ m_jit->poke(argument, m_argumentIndex);
+ ++m_argumentIndex;
+ }
+
+ void addArgument(unsigned src, JIT::RegisterID scratchRegister) // src is a virtual register.
+ {
+ if (m_jit->m_codeBlock->isConstantRegisterIndex(src))
+ addArgument(JIT::ImmPtr(JSValue::encode(m_jit->m_codeBlock->getConstant(src))));
+ else {
+ m_jit->loadPtr(JIT::Address(JIT::callFrameRegister, src * sizeof(Register)), scratchRegister);
+ addArgument(scratchRegister);
+ }
+ m_jit->killLastResultRegister();
+ }
+
+ JIT::Call call()
+ {
+ ASSERT(m_jit->m_bytecodeIndex != (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeIndex is set.
+
+#if ENABLE(OPCODE_SAMPLING)
+ m_jit->sampleInstruction(m_jit->m_codeBlock->instructions().begin() + m_jit->m_bytecodeIndex, true);
+#endif
+
+ m_jit->restoreArgumentReference();
+ JIT::Call call = m_jit->call();
+ m_jit->m_calls.append(CallRecord(call, m_jit->m_bytecodeIndex, m_stub));
+
+#if ENABLE(OPCODE_SAMPLING)
+ m_jit->sampleInstruction(m_jit->m_codeBlock->instructions().begin() + m_jit->m_bytecodeIndex, false);
+#endif
+
+ m_jit->killLastResultRegister();
+ return call;
+ }
+
+ JIT::Call call(unsigned dst) // dst is a virtual register.
+ {
+ ASSERT(m_returnType == Value);
+ JIT::Call call = this->call();
+ m_jit->emitPutVirtualRegister(dst);
+ return call;
+ }
+
+ JIT::Call call(JIT::RegisterID dst)
+ {
+ ASSERT(m_returnType == Value);
+ JIT::Call call = this->call();
+ if (dst != JIT::returnValueRegister)
+ m_jit->move(JIT::returnValueRegister, dst);
+ return call;
+ }
+
+ private:
+ JIT* m_jit;
+ void* m_stub;
+ enum { Value, Void } m_returnType;
+ size_t m_argumentIndex;
+ };
+
+ class CallEvalJITStub : public JITStubCall {
+ public:
+ CallEvalJITStub(JIT* jit, Instruction* instruction)
+ : JITStubCall(jit, JITStubs::cti_op_call_eval)
+ {
+ int callee = instruction[2].u.operand;
+ int argCount = instruction[3].u.operand;
+ int registerOffset = instruction[4].u.operand;
+
+ addArgument(callee, JIT::regT2);
+ addArgument(JIT::Imm32(registerOffset));
+ addArgument(JIT::Imm32(argCount));
+ }
+ };
+}
+
+#endif // ENABLE(JIT)
+
+#endif // JITStubCall_h
diff --git a/JavaScriptCore/jit/JITStubs.cpp b/JavaScriptCore/jit/JITStubs.cpp
index de528a5..a40d1ba 100644
--- a/JavaScriptCore/jit/JITStubs.cpp
+++ b/JavaScriptCore/jit/JITStubs.cpp
@@ -62,25 +62,267 @@ using namespace std;
namespace JSC {
+
+#if PLATFORM(DARWIN) || PLATFORM(WIN_OS)
+#define SYMBOL_STRING(name) "_" #name
+#else
+#define SYMBOL_STRING(name) #name
+#endif
+
+#if COMPILER(GCC) && PLATFORM(X86)
+
+// These ASSERTs remind you that, if you change the layout of JITStackFrame, you
+// need to change the assembly trampolines below to match.
+COMPILE_ASSERT(offsetof(struct JITStackFrame, callFrame) == 0x38, JITStackFrame_callFrame_offset_matches_ctiTrampoline);
+COMPILE_ASSERT(offsetof(struct JITStackFrame, code) == 0x30, JITStackFrame_code_offset_matches_ctiTrampoline);
+COMPILE_ASSERT(offsetof(struct JITStackFrame, savedEBX) == 0x1c, JITStackFrame_stub_argument_space_matches_ctiTrampoline);
+
+asm(
+".globl " SYMBOL_STRING(ctiTrampoline) "\n"
+SYMBOL_STRING(ctiTrampoline) ":" "\n"
+ "pushl %ebp" "\n"
+ "movl %esp, %ebp" "\n"
+ "pushl %esi" "\n"
+ "pushl %edi" "\n"
+ "pushl %ebx" "\n"
+ "subl $0x1c, %esp" "\n"
+ "movl $512, %esi" "\n"
+ "movl 0x38(%esp), %edi" "\n"
+ "call *0x30(%esp)" "\n"
+ "addl $0x1c, %esp" "\n"
+ "popl %ebx" "\n"
+ "popl %edi" "\n"
+ "popl %esi" "\n"
+ "popl %ebp" "\n"
+ "ret" "\n"
+);
+
+asm(
+".globl " SYMBOL_STRING(ctiVMThrowTrampoline) "\n"
+SYMBOL_STRING(ctiVMThrowTrampoline) ":" "\n"
+#if !USE(JIT_STUB_ARGUMENT_VA_LIST)
+ "movl %esp, %ecx" "\n"
+#endif
+ "call " SYMBOL_STRING(cti_vm_throw) "\n"
+".globl " SYMBOL_STRING(ctiOpThrowNotCaught) "\n"
+SYMBOL_STRING(ctiOpThrowNotCaught) ":" "\n"
+ "addl $0x1c, %esp" "\n"
+ "popl %ebx" "\n"
+ "popl %edi" "\n"
+ "popl %esi" "\n"
+ "popl %ebp" "\n"
+ "ret" "\n"
+);
+
+#elif COMPILER(GCC) && PLATFORM(X86_64)
+
+#if USE(JIT_STUB_ARGUMENT_VA_LIST)
+#error "JIT_STUB_ARGUMENT_VA_LIST not supported on x86-64."
+#endif
+
+// These ASSERTs remind you that, if you change the layout of JITStackFrame, you
+// need to change the assembly trampolines below to match.
+COMPILE_ASSERT(offsetof(struct JITStackFrame, callFrame) == 0x90, JITStackFrame_callFrame_offset_matches_ctiTrampoline);
+COMPILE_ASSERT(offsetof(struct JITStackFrame, code) == 0x80, JITStackFrame_code_offset_matches_ctiTrampoline);
+COMPILE_ASSERT(offsetof(struct JITStackFrame, savedRBX) == 0x48, JITStackFrame_stub_argument_space_matches_ctiTrampoline);
+
+asm(
+".globl " SYMBOL_STRING(ctiTrampoline) "\n"
+SYMBOL_STRING(ctiTrampoline) ":" "\n"
+ "pushq %rbp" "\n"
+ "movq %rsp, %rbp" "\n"
+ "pushq %r12" "\n"
+ "pushq %r13" "\n"
+ "pushq %r14" "\n"
+ "pushq %r15" "\n"
+ "pushq %rbx" "\n"
+ "subq $0x48, %rsp" "\n"
+ "movq $512, %r12" "\n"
+ "movq $0xFFFF000000000000, %r14" "\n"
+ "movq $0xFFFF000000000002, %r15" "\n"
+ "movq 0x90(%rsp), %r13" "\n"
+ "call *0x80(%rsp)" "\n"
+ "addq $0x48, %rsp" "\n"
+ "popq %rbx" "\n"
+ "popq %r15" "\n"
+ "popq %r14" "\n"
+ "popq %r13" "\n"
+ "popq %r12" "\n"
+ "popq %rbp" "\n"
+ "ret" "\n"
+);
+
+asm(
+".globl " SYMBOL_STRING(ctiVMThrowTrampoline) "\n"
+SYMBOL_STRING(ctiVMThrowTrampoline) ":" "\n"
+ "movq %rsp, %rdi" "\n"
+ "call " SYMBOL_STRING(cti_vm_throw) "\n"
+".globl " SYMBOL_STRING(ctiOpThrowNotCaught) "\n"
+SYMBOL_STRING(ctiOpThrowNotCaught) ":" "\n"
+ "addq $0x48, %rsp" "\n"
+ "popq %rbx" "\n"
+ "popq %r15" "\n"
+ "popq %r14" "\n"
+ "popq %r13" "\n"
+ "popq %r12" "\n"
+ "popq %rbp" "\n"
+ "ret" "\n"
+);
+
+#elif COMPILER(GCC) && PLATFORM(ARM_V7)
+
+#if USE(JIT_STUB_ARGUMENT_VA_LIST)
+#error "JIT_STUB_ARGUMENT_VA_LIST not supported on ARMv7."
+#endif
+
+COMPILE_ASSERT(offsetof(struct JITStackFrame, preservedReturnAddress) == 0x20, JITStackFrame_outerReturnAddress_offset_matches_ctiTrampoline);
+COMPILE_ASSERT(offsetof(struct JITStackFrame, preservedR4) == 0x24, JITStackFrame_outerReturnAddress_offset_matches_ctiTrampoline);
+COMPILE_ASSERT(offsetof(struct JITStackFrame, preservedR5) == 0x28, JITStackFrame_outerReturnAddress_offset_matches_ctiTrampoline);
+COMPILE_ASSERT(offsetof(struct JITStackFrame, preservedR6) == 0x2c, JITStackFrame_outerReturnAddress_offset_matches_ctiTrampoline);
+
+COMPILE_ASSERT(offsetof(struct JITStackFrame, registerFile) == 0x30, JITStackFrame_registerFile_offset_matches_ctiTrampoline);
+COMPILE_ASSERT(offsetof(struct JITStackFrame, callFrame) == 0x34, JITStackFrame_callFrame_offset_matches_ctiTrampoline);
+COMPILE_ASSERT(offsetof(struct JITStackFrame, exception) == 0x38, JITStackFrame_exception_offset_matches_ctiTrampoline);
+// The fifth argument is the first item already on the stack.
+COMPILE_ASSERT(offsetof(struct JITStackFrame, enabledProfilerReference) == 0x3c, JITStackFrame_enabledProfilerReference_offset_matches_ctiTrampoline);
+
+asm volatile (
+".text" "\n"
+".align 2" "\n"
+".globl " SYMBOL_STRING(ctiTrampoline) "\n"
+".thumb" "\n"
+".thumb_func " SYMBOL_STRING(ctiTrampoline) "\n"
+SYMBOL_STRING(ctiTrampoline) ":" "\n"
+ "sub sp, sp, #0x3c" "\n"
+ "str lr, [sp, #0x20]" "\n"
+ "str r4, [sp, #0x24]" "\n"
+ "str r5, [sp, #0x28]" "\n"
+ "str r6, [sp, #0x2c]" "\n"
+ "str r1, [sp, #0x30]" "\n"
+ "str r2, [sp, #0x34]" "\n"
+ "str r3, [sp, #0x38]" "\n"
+ "cpy r5, r2" "\n"
+ "mov r6, #512" "\n"
+ "blx r0" "\n"
+ "ldr r6, [sp, #0x2c]" "\n"
+ "ldr r5, [sp, #0x28]" "\n"
+ "ldr r4, [sp, #0x24]" "\n"
+ "ldr lr, [sp, #0x20]" "\n"
+ "add sp, sp, #0x3c" "\n"
+ "bx lr" "\n"
+);
+
+asm volatile (
+".text" "\n"
+".align 2" "\n"
+".globl " SYMBOL_STRING(ctiVMThrowTrampoline) "\n"
+".thumb" "\n"
+".thumb_func " SYMBOL_STRING(ctiVMThrowTrampoline) "\n"
+SYMBOL_STRING(ctiVMThrowTrampoline) ":" "\n"
+ "cpy r0, sp" "\n"
+ "bl " SYMBOL_STRING(cti_vm_throw) "\n"
+ "ldr r6, [sp, #0x2c]" "\n"
+ "ldr r5, [sp, #0x28]" "\n"
+ "ldr r4, [sp, #0x24]" "\n"
+ "ldr lr, [sp, #0x20]" "\n"
+ "add sp, sp, #0x3c" "\n"
+ "bx lr" "\n"
+);
+
+asm volatile (
+".text" "\n"
+".align 2" "\n"
+".globl " SYMBOL_STRING(ctiOpThrowNotCaught) "\n"
+".thumb" "\n"
+".thumb_func " SYMBOL_STRING(ctiOpThrowNotCaught) "\n"
+SYMBOL_STRING(ctiOpThrowNotCaught) ":" "\n"
+ "ldr r6, [sp, #0x2c]" "\n"
+ "ldr r5, [sp, #0x28]" "\n"
+ "ldr r4, [sp, #0x24]" "\n"
+ "ldr lr, [sp, #0x20]" "\n"
+ "add sp, sp, #0x3c" "\n"
+ "bx lr" "\n"
+);
+
+#elif COMPILER(MSVC)
+
+#if USE(JIT_STUB_ARGUMENT_VA_LIST)
+#error "JIT_STUB_ARGUMENT_VA_LIST configuration not supported on MSVC."
+#endif
+
+// These ASSERTs remind you that, if you change the layout of JITStackFrame, you
+// need to change the assembly trampolines below to match.
+COMPILE_ASSERT(offsetof(struct JITStackFrame, callFrame) == 0x38, JITStackFrame_callFrame_offset_matches_ctiTrampoline);
+COMPILE_ASSERT(offsetof(struct JITStackFrame, code) == 0x30, JITStackFrame_code_offset_matches_ctiTrampoline);
+COMPILE_ASSERT(offsetof(struct JITStackFrame, savedEBX) == 0x1c, JITStackFrame_stub_argument_space_matches_ctiTrampoline);
+
+extern "C" {
+
+ __declspec(naked) EncodedJSValue ctiTrampoline(void* code, RegisterFile*, CallFrame*, JSValue* exception, Profiler**, JSGlobalData*)
+ {
+ __asm {
+ push ebp;
+ mov ebp, esp;
+ push esi;
+ push edi;
+ push ebx;
+ sub esp, 0x1c;
+ mov esi, 512;
+ mov ecx, esp;
+ mov edi, [esp + 0x38];
+ call [esp + 0x30];
+ add esp, 0x1c;
+ pop ebx;
+ pop edi;
+ pop esi;
+ pop ebp;
+ ret;
+ }
+ }
+
+ __declspec(naked) void ctiVMThrowTrampoline()
+ {
+ __asm {
+ mov ecx, esp;
+ call JITStubs::cti_vm_throw;
+ add esp, 0x1c;
+ pop ebx;
+ pop edi;
+ pop esi;
+ pop ebp;
+ ret;
+ }
+ }
+
+ __declspec(naked) void ctiOpThrowNotCaught()
+ {
+ __asm {
+ add esp, 0x1c;
+ pop ebx;
+ pop edi;
+ pop esi;
+ pop ebp;
+ ret;
+ }
+ }
+}
+
+#endif
+
#if ENABLE(OPCODE_SAMPLING)
- #define CTI_SAMPLER ARG_globalData->interpreter->sampler()
+ #define CTI_SAMPLER stackFrame.globalData->interpreter->sampler()
#else
#define CTI_SAMPLER 0
#endif
-JITStubs::JITStubs(JSGlobalData* globalData)
- : m_ctiArrayLengthTrampoline(0)
- , m_ctiStringLengthTrampoline(0)
- , m_ctiVirtualCallPreLink(0)
- , m_ctiVirtualCallLink(0)
- , m_ctiVirtualCall(0)
+JITThunks::JITThunks(JSGlobalData* globalData)
{
- JIT::compileCTIMachineTrampolines(globalData, &m_executablePool, &m_ctiArrayLengthTrampoline, &m_ctiStringLengthTrampoline, &m_ctiVirtualCallPreLink, &m_ctiVirtualCallLink, &m_ctiVirtualCall);
+ JIT::compileCTIMachineTrampolines(globalData, &m_executablePool, &m_ctiArrayLengthTrampoline, &m_ctiStringLengthTrampoline, &m_ctiVirtualCallPreLink, &m_ctiVirtualCallLink, &m_ctiVirtualCall, &m_ctiNativeCallThunk);
}
#if ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
-NEVER_INLINE void JITStubs::tryCachePutByID(CallFrame* callFrame, CodeBlock* codeBlock, void* returnAddress, JSValuePtr baseValue, const PutPropertySlot& slot)
+NEVER_INLINE void JITThunks::tryCachePutByID(CallFrame* callFrame, CodeBlock* codeBlock, void* returnAddress, JSValue baseValue, const PutPropertySlot& slot)
{
// The interpreter checks for recursion here; I do not believe this can occur in CTI.
@@ -89,7 +331,7 @@ NEVER_INLINE void JITStubs::tryCachePutByID(CallFrame* callFrame, CodeBlock* cod
// Uncacheable: give up.
if (!slot.isCacheable()) {
- ctiPatchCallByReturnAddress(returnAddress, reinterpret_cast<void*>(JITStubs::cti_op_put_by_id_generic));
+ ctiPatchCallByReturnAddress(returnAddress, FunctionPtr(JITStubs::cti_op_put_by_id_generic));
return;
}
@@ -97,13 +339,13 @@ NEVER_INLINE void JITStubs::tryCachePutByID(CallFrame* callFrame, CodeBlock* cod
Structure* structure = baseCell->structure();
if (structure->isDictionary()) {
- ctiPatchCallByReturnAddress(returnAddress, reinterpret_cast<void*>(JITStubs::cti_op_put_by_id_generic));
+ ctiPatchCallByReturnAddress(returnAddress, FunctionPtr(JITStubs::cti_op_put_by_id_generic));
return;
}
// If baseCell != base, then baseCell must be a proxy for another object.
if (baseCell != slot.base()) {
- ctiPatchCallByReturnAddress(returnAddress, reinterpret_cast<void*>(JITStubs::cti_op_put_by_id_generic));
+ ctiPatchCallByReturnAddress(returnAddress, FunctionPtr(JITStubs::cti_op_put_by_id_generic));
return;
}
@@ -121,32 +363,24 @@ NEVER_INLINE void JITStubs::tryCachePutByID(CallFrame* callFrame, CodeBlock* cod
stubInfo->initPutByIdReplace(structure);
-#if USE(CTI_REPATCH_PIC)
JIT::patchPutByIdReplace(stubInfo, structure, slot.cachedOffset(), returnAddress);
-#else
- JIT::compilePutByIdReplace(callFrame->scopeChain()->globalData, callFrame, codeBlock, stubInfo, structure, slot.cachedOffset(), returnAddress);
-#endif
}
-NEVER_INLINE void JITStubs::tryCacheGetByID(CallFrame* callFrame, CodeBlock* codeBlock, void* returnAddress, JSValuePtr baseValue, const Identifier& propertyName, const PropertySlot& slot)
+NEVER_INLINE void JITThunks::tryCacheGetByID(CallFrame* callFrame, CodeBlock* codeBlock, void* returnAddress, JSValue baseValue, const Identifier& propertyName, const PropertySlot& slot)
{
// FIXME: Write a test that proves we need to check for recursion here just
// like the interpreter does, then add a check for recursion.
// FIXME: Cache property access for immediates.
if (!baseValue.isCell()) {
- ctiPatchCallByReturnAddress(returnAddress, reinterpret_cast<void*>(JITStubs::cti_op_get_by_id_generic));
+ ctiPatchCallByReturnAddress(returnAddress, FunctionPtr(JITStubs::cti_op_get_by_id_generic));
return;
}
JSGlobalData* globalData = &callFrame->globalData();
if (isJSArray(globalData, baseValue) && propertyName == callFrame->propertyNames().length) {
-#if USE(CTI_REPATCH_PIC)
JIT::compilePatchGetArrayLength(callFrame->scopeChain()->globalData, codeBlock, returnAddress);
-#else
- ctiPatchCallByReturnAddress(returnAddress, globalData->jitStubs.ctiArrayLengthTrampoline());
-#endif
return;
}
@@ -159,7 +393,7 @@ NEVER_INLINE void JITStubs::tryCacheGetByID(CallFrame* callFrame, CodeBlock* cod
// Uncacheable: give up.
if (!slot.isCacheable()) {
- ctiPatchCallByReturnAddress(returnAddress, reinterpret_cast<void*>(JITStubs::cti_op_get_by_id_generic));
+ ctiPatchCallByReturnAddress(returnAddress, FunctionPtr(JITStubs::cti_op_get_by_id_generic));
return;
}
@@ -167,7 +401,7 @@ NEVER_INLINE void JITStubs::tryCacheGetByID(CallFrame* callFrame, CodeBlock* cod
Structure* structure = baseCell->structure();
if (structure->isDictionary()) {
- ctiPatchCallByReturnAddress(returnAddress, reinterpret_cast<void*>(JITStubs::cti_op_get_by_id_generic));
+ ctiPatchCallByReturnAddress(returnAddress, FunctionPtr(JITStubs::cti_op_get_by_id_generic));
return;
}
@@ -181,12 +415,8 @@ NEVER_INLINE void JITStubs::tryCacheGetByID(CallFrame* callFrame, CodeBlock* cod
if (slot.slotBase() == baseValue) {
// set this up, so derefStructures can do it's job.
stubInfo->initGetByIdSelf(structure);
-
-#if USE(CTI_REPATCH_PIC)
+
JIT::patchGetByIdSelf(stubInfo, structure, slot.cachedOffset(), returnAddress);
-#else
- JIT::compileGetByIdSelf(callFrame->scopeChain()->globalData, callFrame, codeBlock, stubInfo, structure, slot.cachedOffset(), returnAddress);
-#endif
return;
}
@@ -221,7 +451,7 @@ NEVER_INLINE void JITStubs::tryCacheGetByID(CallFrame* callFrame, CodeBlock* cod
#if USE(JIT_STUB_ARGUMENT_VA_LIST)
#define SETUP_VA_LISTL_ARGS va_list vl_args; va_start(vl_args, args)
-#else // JIT_STUB_ARGUMENT_REGISTER or JIT_STUB_ARGUMENT_STACK
+#else
#define SETUP_VA_LISTL_ARGS
#endif
@@ -231,7 +461,7 @@ extern "C" {
static void jscGeneratedNativeCode()
{
- // When executing a CTI function (which might do an allocation), we hack the return address
+ // When executing a JIT stub function (which might do an allocation), we hack the return address
// to pretend to be executing this function, to keep stack logging tools from blowing out
// memory.
}
@@ -239,30 +469,31 @@ static void jscGeneratedNativeCode()
}
struct StackHack {
- ALWAYS_INLINE StackHack(void** location)
- {
- returnAddressLocation = location;
- savedReturnAddress = *returnAddressLocation;
- ctiSetReturnAddress(returnAddressLocation, reinterpret_cast<void*>(jscGeneratedNativeCode));
+ ALWAYS_INLINE StackHack(JITStackFrame& stackFrame)
+ : stackFrame(stackFrame)
+ , savedReturnAddress(*stackFrame.returnAddressSlot())
+ {
+ *stackFrame.returnAddressSlot() = reinterpret_cast<void*>(jscGeneratedNativeCode);
}
+
ALWAYS_INLINE ~StackHack()
{
- ctiSetReturnAddress(returnAddressLocation, savedReturnAddress);
+ *stackFrame.returnAddressSlot() = savedReturnAddress;
}
- void** returnAddressLocation;
+ JITStackFrame& stackFrame;
void* savedReturnAddress;
};
-#define BEGIN_STUB_FUNCTION() SETUP_VA_LISTL_ARGS; StackHack stackHack(&STUB_RETURN_ADDRESS_SLOT)
-#define STUB_SET_RETURN_ADDRESS(address) stackHack.savedReturnAddress = address
+#define STUB_INIT_STACK_FRAME(stackFrame) SETUP_VA_LISTL_ARGS; JITStackFrame& stackFrame = *reinterpret_cast<JITStackFrame*>(STUB_ARGS); StackHack stackHack(stackFrame)
+#define STUB_SET_RETURN_ADDRESS(returnAddress) stackHack.savedReturnAddress = returnAddress
#define STUB_RETURN_ADDRESS stackHack.savedReturnAddress
#else
-#define BEGIN_STUB_FUNCTION() SETUP_VA_LISTL_ARGS
-#define STUB_SET_RETURN_ADDRESS(address) ctiSetReturnAddress(&STUB_RETURN_ADDRESS_SLOT, address);
-#define STUB_RETURN_ADDRESS STUB_RETURN_ADDRESS_SLOT
+#define STUB_INIT_STACK_FRAME(stackFrame) SETUP_VA_LISTL_ARGS; JITStackFrame& stackFrame = *reinterpret_cast<JITStackFrame*>(STUB_ARGS)
+#define STUB_SET_RETURN_ADDRESS(returnAddress) *stackFrame.returnAddressSlot() = returnAddress
+#define STUB_RETURN_ADDRESS *stackFrame.returnAddressSlot()
#endif
@@ -274,7 +505,7 @@ static NEVER_INLINE void returnToThrowTrampoline(JSGlobalData* globalData, void*
{
ASSERT(globalData->exception);
globalData->exceptionLocation = exceptionLocation;
- ctiSetReturnAddress(&returnAddressSlot, reinterpret_cast<void*>(ctiVMThrowTrampoline));
+ returnAddressSlot = reinterpret_cast<void*>(ctiVMThrowTrampoline);
}
static NEVER_INLINE void throwStackOverflowError(CallFrame* callFrame, JSGlobalData* globalData, void* exceptionLocation, void*& returnAddressSlot)
@@ -288,68 +519,91 @@ static NEVER_INLINE void throwStackOverflowError(CallFrame* callFrame, JSGlobalD
VM_THROW_EXCEPTION_AT_END(); \
return 0; \
} while (0)
-#define VM_THROW_EXCEPTION_2() \
- do { \
- VM_THROW_EXCEPTION_AT_END(); \
- RETURN_PAIR(0, 0); \
- } while (0)
#define VM_THROW_EXCEPTION_AT_END() \
- returnToThrowTrampoline(ARG_globalData, STUB_RETURN_ADDRESS, STUB_RETURN_ADDRESS)
+ returnToThrowTrampoline(stackFrame.globalData, STUB_RETURN_ADDRESS, STUB_RETURN_ADDRESS)
#define CHECK_FOR_EXCEPTION() \
do { \
- if (UNLIKELY(ARG_globalData->exception != noValue())) \
+ if (UNLIKELY(stackFrame.globalData->exception != JSValue())) \
VM_THROW_EXCEPTION(); \
} while (0)
#define CHECK_FOR_EXCEPTION_AT_END() \
do { \
- if (UNLIKELY(ARG_globalData->exception != noValue())) \
+ if (UNLIKELY(stackFrame.globalData->exception != JSValue())) \
VM_THROW_EXCEPTION_AT_END(); \
} while (0)
#define CHECK_FOR_EXCEPTION_VOID() \
do { \
- if (UNLIKELY(ARG_globalData->exception != noValue())) { \
+ if (UNLIKELY(stackFrame.globalData->exception != JSValue())) { \
VM_THROW_EXCEPTION_AT_END(); \
return; \
} \
} while (0)
-JSObject* JITStubs::cti_op_convert_this(STUB_ARGS)
+namespace JITStubs {
+
+#if PLATFORM(ARM_V7)
+
+COMPILE_ASSERT(offsetof(struct JITStackFrame, thunkReturnAddress) == 0x1C, JITStackFrame_outerReturnAddress_offset_matches_ctiTrampoline);
+
+#define DEFINE_STUB_FUNCTION(rtype, op) \
+ extern "C" { \
+ rtype JITStubThunked_##op(STUB_ARGS_DECLARATION); \
+ }; \
+ asm volatile ( \
+ ".text" "\n" \
+ ".align 2" "\n" \
+ ".globl " SYMBOL_STRING(cti_##op) "\n" \
+ ".thumb" "\n" \
+ ".thumb_func " SYMBOL_STRING(cti_##op) "\n" \
+ SYMBOL_STRING(cti_##op) ":" "\n" \
+ "str lr, [sp, #0x1c]" "\n" \
+ "bl " SYMBOL_STRING(JITStubThunked_##op) "\n" \
+ "ldr lr, [sp, #0x1c]" "\n" \
+ "bx lr" "\n" \
+ ); \
+ rtype JITStubThunked_##op(STUB_ARGS_DECLARATION) \
+
+#else
+#define DEFINE_STUB_FUNCTION(rtype, op) rtype JIT_STUB cti_##op(STUB_ARGS_DECLARATION)
+#endif
+
+DEFINE_STUB_FUNCTION(JSObject*, op_convert_this)
{
- BEGIN_STUB_FUNCTION();
+ STUB_INIT_STACK_FRAME(stackFrame);
- JSValuePtr v1 = ARG_src1;
- CallFrame* callFrame = ARG_callFrame;
+ JSValue v1 = stackFrame.args[0].jsValue();
+ CallFrame* callFrame = stackFrame.callFrame;
JSObject* result = v1.toThisObject(callFrame);
CHECK_FOR_EXCEPTION_AT_END();
return result;
}
-void JITStubs::cti_op_end(STUB_ARGS)
+DEFINE_STUB_FUNCTION(void, op_end)
{
- BEGIN_STUB_FUNCTION();
+ STUB_INIT_STACK_FRAME(stackFrame);
- ScopeChainNode* scopeChain = ARG_callFrame->scopeChain();
+ ScopeChainNode* scopeChain = stackFrame.callFrame->scopeChain();
ASSERT(scopeChain->refCount > 1);
scopeChain->deref();
}
-JSValueEncodedAsPointer* JITStubs::cti_op_add(STUB_ARGS)
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_add)
{
- BEGIN_STUB_FUNCTION();
+ STUB_INIT_STACK_FRAME(stackFrame);
- JSValuePtr v1 = ARG_src1;
- JSValuePtr v2 = ARG_src2;
+ JSValue v1 = stackFrame.args[0].jsValue();
+ JSValue v2 = stackFrame.args[1].jsValue();
double left;
double right = 0.0;
bool rightIsNumber = v2.getNumber(right);
if (rightIsNumber && v1.getNumber(left))
- return JSValuePtr::encode(jsNumber(ARG_globalData, left + right));
+ return JSValue::encode(jsNumber(stackFrame.globalData, left + right));
- CallFrame* callFrame = ARG_callFrame;
+ CallFrame* callFrame = stackFrame.callFrame;
bool leftIsString = v1.isString();
if (leftIsString && v2.isString()) {
@@ -359,7 +613,7 @@ JSValueEncodedAsPointer* JITStubs::cti_op_add(STUB_ARGS)
VM_THROW_EXCEPTION();
}
- return JSValuePtr::encode(jsString(ARG_globalData, value.release()));
+ return JSValue::encode(jsString(stackFrame.globalData, value.release()));
}
if (rightIsNumber & leftIsString) {
@@ -371,35 +625,35 @@ JSValueEncodedAsPointer* JITStubs::cti_op_add(STUB_ARGS)
throwOutOfMemoryError(callFrame);
VM_THROW_EXCEPTION();
}
- return JSValuePtr::encode(jsString(ARG_globalData, value.release()));
+ return JSValue::encode(jsString(stackFrame.globalData, value.release()));
}
// All other cases are pretty uncommon
- JSValuePtr result = jsAddSlowCase(callFrame, v1, v2);
+ JSValue result = jsAddSlowCase(callFrame, v1, v2);
CHECK_FOR_EXCEPTION_AT_END();
- return JSValuePtr::encode(result);
+ return JSValue::encode(result);
}
-JSValueEncodedAsPointer* JITStubs::cti_op_pre_inc(STUB_ARGS)
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_pre_inc)
{
- BEGIN_STUB_FUNCTION();
+ STUB_INIT_STACK_FRAME(stackFrame);
- JSValuePtr v = ARG_src1;
+ JSValue v = stackFrame.args[0].jsValue();
- CallFrame* callFrame = ARG_callFrame;
- JSValuePtr result = jsNumber(ARG_globalData, v.toNumber(callFrame) + 1);
+ CallFrame* callFrame = stackFrame.callFrame;
+ JSValue result = jsNumber(stackFrame.globalData, v.toNumber(callFrame) + 1);
CHECK_FOR_EXCEPTION_AT_END();
- return JSValuePtr::encode(result);
+ return JSValue::encode(result);
}
-int JITStubs::cti_timeout_check(STUB_ARGS)
+DEFINE_STUB_FUNCTION(int, timeout_check)
{
- BEGIN_STUB_FUNCTION();
+ STUB_INIT_STACK_FRAME(stackFrame);
- JSGlobalData* globalData = ARG_globalData;
+ JSGlobalData* globalData = stackFrame.globalData;
TimeoutChecker& timeoutChecker = globalData->timeoutChecker;
- if (timeoutChecker.didTimeOut(ARG_callFrame)) {
+ if (timeoutChecker.didTimeOut(stackFrame.callFrame)) {
globalData->exception = createInterruptedExecutionException(globalData);
VM_THROW_EXCEPTION_AT_END();
}
@@ -407,161 +661,258 @@ int JITStubs::cti_timeout_check(STUB_ARGS)
return timeoutChecker.ticksUntilNextCheck();
}
-void JITStubs::cti_register_file_check(STUB_ARGS)
+DEFINE_STUB_FUNCTION(void, register_file_check)
{
- BEGIN_STUB_FUNCTION();
+ STUB_INIT_STACK_FRAME(stackFrame);
- if (LIKELY(ARG_registerFile->grow(ARG_callFrame + ARG_callFrame->codeBlock()->m_numCalleeRegisters)))
+ if (LIKELY(stackFrame.registerFile->grow(&stackFrame.callFrame->registers()[stackFrame.callFrame->codeBlock()->m_numCalleeRegisters])))
return;
// Rewind to the previous call frame because op_call already optimistically
// moved the call frame forward.
- CallFrame* oldCallFrame = ARG_callFrame->callerFrame();
- ARG_setCallFrame(oldCallFrame);
- throwStackOverflowError(oldCallFrame, ARG_globalData, oldCallFrame->returnPC(), STUB_RETURN_ADDRESS);
+ CallFrame* oldCallFrame = stackFrame.callFrame->callerFrame();
+ stackFrame.callFrame = oldCallFrame;
+ throwStackOverflowError(oldCallFrame, stackFrame.globalData, oldCallFrame->returnPC(), STUB_RETURN_ADDRESS);
}
-int JITStubs::cti_op_loop_if_less(STUB_ARGS)
+DEFINE_STUB_FUNCTION(int, op_loop_if_less)
{
- BEGIN_STUB_FUNCTION();
+ STUB_INIT_STACK_FRAME(stackFrame);
- JSValuePtr src1 = ARG_src1;
- JSValuePtr src2 = ARG_src2;
- CallFrame* callFrame = ARG_callFrame;
+ JSValue src1 = stackFrame.args[0].jsValue();
+ JSValue src2 = stackFrame.args[1].jsValue();
+ CallFrame* callFrame = stackFrame.callFrame;
bool result = jsLess(callFrame, src1, src2);
CHECK_FOR_EXCEPTION_AT_END();
return result;
}
-int JITStubs::cti_op_loop_if_lesseq(STUB_ARGS)
+DEFINE_STUB_FUNCTION(int, op_loop_if_lesseq)
{
- BEGIN_STUB_FUNCTION();
+ STUB_INIT_STACK_FRAME(stackFrame);
- JSValuePtr src1 = ARG_src1;
- JSValuePtr src2 = ARG_src2;
- CallFrame* callFrame = ARG_callFrame;
+ JSValue src1 = stackFrame.args[0].jsValue();
+ JSValue src2 = stackFrame.args[1].jsValue();
+ CallFrame* callFrame = stackFrame.callFrame;
bool result = jsLessEq(callFrame, src1, src2);
CHECK_FOR_EXCEPTION_AT_END();
return result;
}
-JSObject* JITStubs::cti_op_new_object(STUB_ARGS)
+DEFINE_STUB_FUNCTION(JSObject*, op_new_object)
{
- BEGIN_STUB_FUNCTION();
+ STUB_INIT_STACK_FRAME(stackFrame);
- return constructEmptyObject(ARG_callFrame);
+ return constructEmptyObject(stackFrame.callFrame);
}
-void JITStubs::cti_op_put_by_id_generic(STUB_ARGS)
+DEFINE_STUB_FUNCTION(void, op_put_by_id_generic)
{
- BEGIN_STUB_FUNCTION();
+ STUB_INIT_STACK_FRAME(stackFrame);
PutPropertySlot slot;
- ARG_src1.put(ARG_callFrame, *ARG_id2, ARG_src3, slot);
+ stackFrame.args[0].jsValue().put(stackFrame.callFrame, stackFrame.args[1].identifier(), stackFrame.args[2].jsValue(), slot);
CHECK_FOR_EXCEPTION_AT_END();
}
-JSValueEncodedAsPointer* JITStubs::cti_op_get_by_id_generic(STUB_ARGS)
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_get_by_id_generic)
{
- BEGIN_STUB_FUNCTION();
+ STUB_INIT_STACK_FRAME(stackFrame);
- CallFrame* callFrame = ARG_callFrame;
- Identifier& ident = *ARG_id2;
+ CallFrame* callFrame = stackFrame.callFrame;
+ Identifier& ident = stackFrame.args[1].identifier();
- JSValuePtr baseValue = ARG_src1;
+ JSValue baseValue = stackFrame.args[0].jsValue();
PropertySlot slot(baseValue);
- JSValuePtr result = baseValue.get(callFrame, ident, slot);
+ JSValue result = baseValue.get(callFrame, ident, slot);
CHECK_FOR_EXCEPTION_AT_END();
- return JSValuePtr::encode(result);
+ return JSValue::encode(result);
}
#if ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
-void JITStubs::cti_op_put_by_id(STUB_ARGS)
+DEFINE_STUB_FUNCTION(void, op_put_by_id)
{
- BEGIN_STUB_FUNCTION();
+ STUB_INIT_STACK_FRAME(stackFrame);
- CallFrame* callFrame = ARG_callFrame;
- Identifier& ident = *ARG_id2;
+ CallFrame* callFrame = stackFrame.callFrame;
+ Identifier& ident = stackFrame.args[1].identifier();
PutPropertySlot slot;
- ARG_src1.put(callFrame, ident, ARG_src3, slot);
+ stackFrame.args[0].jsValue().put(callFrame, ident, stackFrame.args[2].jsValue(), slot);
- ctiPatchCallByReturnAddress(STUB_RETURN_ADDRESS, reinterpret_cast<void*>(cti_op_put_by_id_second));
+ ctiPatchCallByReturnAddress(STUB_RETURN_ADDRESS, FunctionPtr(cti_op_put_by_id_second));
CHECK_FOR_EXCEPTION_AT_END();
}
-void JITStubs::cti_op_put_by_id_second(STUB_ARGS)
+DEFINE_STUB_FUNCTION(void, op_put_by_id_second)
{
- BEGIN_STUB_FUNCTION();
+ STUB_INIT_STACK_FRAME(stackFrame);
PutPropertySlot slot;
- ARG_src1.put(ARG_callFrame, *ARG_id2, ARG_src3, slot);
- tryCachePutByID(ARG_callFrame, ARG_callFrame->codeBlock(), STUB_RETURN_ADDRESS, ARG_src1, slot);
+ stackFrame.args[0].jsValue().put(stackFrame.callFrame, stackFrame.args[1].identifier(), stackFrame.args[2].jsValue(), slot);
+ JITThunks::tryCachePutByID(stackFrame.callFrame, stackFrame.callFrame->codeBlock(), STUB_RETURN_ADDRESS, stackFrame.args[0].jsValue(), slot);
CHECK_FOR_EXCEPTION_AT_END();
}
-void JITStubs::cti_op_put_by_id_fail(STUB_ARGS)
+DEFINE_STUB_FUNCTION(void, op_put_by_id_fail)
{
- BEGIN_STUB_FUNCTION();
+ STUB_INIT_STACK_FRAME(stackFrame);
- CallFrame* callFrame = ARG_callFrame;
- Identifier& ident = *ARG_id2;
+ CallFrame* callFrame = stackFrame.callFrame;
+ Identifier& ident = stackFrame.args[1].identifier();
PutPropertySlot slot;
- ARG_src1.put(callFrame, ident, ARG_src3, slot);
+ stackFrame.args[0].jsValue().put(callFrame, ident, stackFrame.args[2].jsValue(), slot);
CHECK_FOR_EXCEPTION_AT_END();
}
-JSValueEncodedAsPointer* JITStubs::cti_op_get_by_id(STUB_ARGS)
+
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_put_by_id_transition_realloc)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ JSValue baseValue = stackFrame.args[0].jsValue();
+ int32_t oldSize = stackFrame.args[1].int32();
+ int32_t newSize = stackFrame.args[2].int32();
+
+ ASSERT(baseValue.isObject());
+ asObject(baseValue)->allocatePropertyStorage(oldSize, newSize);
+
+ return JSValue::encode(baseValue);
+}
+
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_get_by_id)
{
- BEGIN_STUB_FUNCTION();
+ STUB_INIT_STACK_FRAME(stackFrame);
- CallFrame* callFrame = ARG_callFrame;
- Identifier& ident = *ARG_id2;
+ CallFrame* callFrame = stackFrame.callFrame;
+ Identifier& ident = stackFrame.args[1].identifier();
- JSValuePtr baseValue = ARG_src1;
+ JSValue baseValue = stackFrame.args[0].jsValue();
PropertySlot slot(baseValue);
- JSValuePtr result = baseValue.get(callFrame, ident, slot);
+ JSValue result = baseValue.get(callFrame, ident, slot);
- ctiPatchCallByReturnAddress(STUB_RETURN_ADDRESS, reinterpret_cast<void*>(cti_op_get_by_id_second));
+ ctiPatchCallByReturnAddress(STUB_RETURN_ADDRESS, FunctionPtr(cti_op_get_by_id_second));
CHECK_FOR_EXCEPTION_AT_END();
- return JSValuePtr::encode(result);
+ return JSValue::encode(result);
}
-JSValueEncodedAsPointer* JITStubs::cti_op_get_by_id_second(STUB_ARGS)
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_get_by_id_method_check)
{
- BEGIN_STUB_FUNCTION();
+ STUB_INIT_STACK_FRAME(stackFrame);
- CallFrame* callFrame = ARG_callFrame;
- Identifier& ident = *ARG_id2;
+ CallFrame* callFrame = stackFrame.callFrame;
+ Identifier& ident = stackFrame.args[1].identifier();
- JSValuePtr baseValue = ARG_src1;
+ JSValue baseValue = stackFrame.args[0].jsValue();
PropertySlot slot(baseValue);
- JSValuePtr result = baseValue.get(callFrame, ident, slot);
+ JSValue result = baseValue.get(callFrame, ident, slot);
- tryCacheGetByID(callFrame, callFrame->codeBlock(), STUB_RETURN_ADDRESS, baseValue, ident, slot);
+ ctiPatchCallByReturnAddress(STUB_RETURN_ADDRESS, FunctionPtr(cti_op_get_by_id_method_check_second));
CHECK_FOR_EXCEPTION_AT_END();
- return JSValuePtr::encode(result);
+ return JSValue::encode(result);
}
-JSValueEncodedAsPointer* JITStubs::cti_op_get_by_id_self_fail(STUB_ARGS)
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_get_by_id_method_check_second)
{
- BEGIN_STUB_FUNCTION();
+ STUB_INIT_STACK_FRAME(stackFrame);
- CallFrame* callFrame = ARG_callFrame;
- Identifier& ident = *ARG_id2;
+ CallFrame* callFrame = stackFrame.callFrame;
+ Identifier& ident = stackFrame.args[1].identifier();
- JSValuePtr baseValue = ARG_src1;
+ JSValue baseValue = stackFrame.args[0].jsValue();
PropertySlot slot(baseValue);
- JSValuePtr result = baseValue.get(callFrame, ident, slot);
+ JSValue result = baseValue.get(callFrame, ident, slot);
+
+ CHECK_FOR_EXCEPTION();
+
+ // If we successfully got something, then the base from which it is being accessed must
+ // be an object. (Assertion to ensure asObject() call below is safe, which comes after
+ // an isCacheable() chceck.
+ ASSERT(!slot.isCacheable() || slot.slotBase().isObject());
+
+ // Check that:
+ // * We're dealing with a JSCell,
+ // * the property is cachable,
+ // * it's not a dictionary
+ // * there is a function cached.
+ Structure* structure;
+ JSCell* specific;
+ JSObject* slotBaseObject;
+ if (baseValue.isCell()
+ && slot.isCacheable()
+ && !(structure = asCell(baseValue)->structure())->isDictionary()
+ && (slotBaseObject = asObject(slot.slotBase()))->getPropertySpecificValue(callFrame, ident, specific)
+ && specific
+ ) {
+
+ JSFunction* callee = (JSFunction*)specific;
+
+ // Since we're accessing a prototype in a loop, it's a good bet that it
+ // should not be treated as a dictionary.
+ if (slotBaseObject->structure()->isDictionary())
+ slotBaseObject->setStructure(Structure::fromDictionaryTransition(slotBaseObject->structure()));
+
+ // The result fetched should always be the callee!
+ ASSERT(result == JSValue(callee));
+ MethodCallLinkInfo& methodCallLinkInfo = callFrame->codeBlock()->getMethodCallLinkInfo(STUB_RETURN_ADDRESS);
+
+ // Check to see if the function is on the object's prototype. Patch up the code to optimize.
+ if (slot.slotBase() == structure->prototypeForLookup(callFrame))
+ JIT::patchMethodCallProto(methodCallLinkInfo, callee, structure, slotBaseObject);
+ // Check to see if the function is on the object itself.
+ // Since we generate the method-check to check both the structure and a prototype-structure (since this
+ // is the common case) we have a problem - we need to patch the prototype structure check to do something
+ // useful. We could try to nop it out altogether, but that's a little messy, so lets do something simpler
+ // for now. For now it performs a check on a special object on the global object only used for this
+ // purpose. The object is in no way exposed, and as such the check will always pass.
+ else if (slot.slotBase() == baseValue)
+ JIT::patchMethodCallProto(methodCallLinkInfo, callee, structure, callFrame->scopeChain()->globalObject()->methodCallDummy());
+
+ // For now let any other case be cached as a normal get_by_id.
+ }
+
+ // Revert the get_by_id op back to being a regular get_by_id - allow it to cache like normal, if it needs to.
+ ctiPatchCallByReturnAddress(STUB_RETURN_ADDRESS, FunctionPtr(cti_op_get_by_id));
+
+ return JSValue::encode(result);
+}
+
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_get_by_id_second)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ CallFrame* callFrame = stackFrame.callFrame;
+ Identifier& ident = stackFrame.args[1].identifier();
+
+ JSValue baseValue = stackFrame.args[0].jsValue();
+ PropertySlot slot(baseValue);
+ JSValue result = baseValue.get(callFrame, ident, slot);
+
+ JITThunks::tryCacheGetByID(callFrame, callFrame->codeBlock(), STUB_RETURN_ADDRESS, baseValue, ident, slot);
+
+ CHECK_FOR_EXCEPTION_AT_END();
+ return JSValue::encode(result);
+}
+
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_get_by_id_self_fail)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ CallFrame* callFrame = stackFrame.callFrame;
+ Identifier& ident = stackFrame.args[1].identifier();
+
+ JSValue baseValue = stackFrame.args[0].jsValue();
+ PropertySlot slot(baseValue);
+ JSValue result = baseValue.get(callFrame, ident, slot);
CHECK_FOR_EXCEPTION();
@@ -591,11 +942,11 @@ JSValueEncodedAsPointer* JITStubs::cti_op_get_by_id_self_fail(STUB_ARGS)
JIT::compileGetByIdSelfList(callFrame->scopeChain()->globalData, codeBlock, stubInfo, polymorphicStructureList, listIndex, asCell(baseValue)->structure(), slot.cachedOffset());
if (listIndex == (POLYMORPHIC_LIST_CACHE_SIZE - 1))
- ctiPatchCallByReturnAddress(STUB_RETURN_ADDRESS, reinterpret_cast<void*>(cti_op_get_by_id_generic));
+ ctiPatchCallByReturnAddress(STUB_RETURN_ADDRESS, FunctionPtr(cti_op_get_by_id_generic));
} else {
- ctiPatchCallByReturnAddress(STUB_RETURN_ADDRESS, reinterpret_cast<void*>(cti_op_get_by_id_generic));
+ ctiPatchCallByReturnAddress(STUB_RETURN_ADDRESS, FunctionPtr(cti_op_get_by_id_generic));
}
- return JSValuePtr::encode(result);
+ return JSValue::encode(result);
}
static PolymorphicAccessStructureList* getPolymorphicAccessStructureListSlot(StructureStubInfo* stubInfo, int& listIndex)
@@ -627,21 +978,21 @@ static PolymorphicAccessStructureList* getPolymorphicAccessStructureListSlot(Str
return prototypeStructureList;
}
-JSValueEncodedAsPointer* JITStubs::cti_op_get_by_id_proto_list(STUB_ARGS)
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_get_by_id_proto_list)
{
- BEGIN_STUB_FUNCTION();
+ STUB_INIT_STACK_FRAME(stackFrame);
- CallFrame* callFrame = ARG_callFrame;
+ CallFrame* callFrame = stackFrame.callFrame;
- JSValuePtr baseValue = ARG_src1;
+ JSValue baseValue = stackFrame.args[0].jsValue();
PropertySlot slot(baseValue);
- JSValuePtr result = baseValue.get(callFrame, *ARG_id2, slot);
+ JSValue result = baseValue.get(callFrame, stackFrame.args[1].identifier(), slot);
CHECK_FOR_EXCEPTION();
if (!baseValue.isCell() || !slot.isCacheable() || asCell(baseValue)->structure()->isDictionary()) {
- ctiPatchCallByReturnAddress(STUB_RETURN_ADDRESS, reinterpret_cast<void*>(cti_op_get_by_id_proto_fail));
- return JSValuePtr::encode(result);
+ ctiPatchCallByReturnAddress(STUB_RETURN_ADDRESS, FunctionPtr(cti_op_get_by_id_proto_fail));
+ return JSValue::encode(result);
}
Structure* structure = asCell(baseValue)->structure();
@@ -652,7 +1003,7 @@ JSValueEncodedAsPointer* JITStubs::cti_op_get_by_id_proto_list(STUB_ARGS)
JSObject* slotBaseObject = asObject(slot.slotBase());
if (slot.slotBase() == baseValue)
- ctiPatchCallByReturnAddress(STUB_RETURN_ADDRESS, reinterpret_cast<void*>(cti_op_get_by_id_proto_fail));
+ ctiPatchCallByReturnAddress(STUB_RETURN_ADDRESS, FunctionPtr(cti_op_get_by_id_proto_fail));
else if (slot.slotBase() == asCell(baseValue)->structure()->prototypeForLookup(callFrame)) {
// Since we're accessing a prototype in a loop, it's a good bet that it
// should not be treated as a dictionary.
@@ -665,176 +1016,175 @@ JSValueEncodedAsPointer* JITStubs::cti_op_get_by_id_proto_list(STUB_ARGS)
JIT::compileGetByIdProtoList(callFrame->scopeChain()->globalData, callFrame, codeBlock, stubInfo, prototypeStructureList, listIndex, structure, slotBaseObject->structure(), slot.cachedOffset());
if (listIndex == (POLYMORPHIC_LIST_CACHE_SIZE - 1))
- ctiPatchCallByReturnAddress(STUB_RETURN_ADDRESS, reinterpret_cast<void*>(cti_op_get_by_id_proto_list_full));
+ ctiPatchCallByReturnAddress(STUB_RETURN_ADDRESS, FunctionPtr(cti_op_get_by_id_proto_list_full));
} else if (size_t count = countPrototypeChainEntriesAndCheckForProxies(callFrame, baseValue, slot)) {
int listIndex;
PolymorphicAccessStructureList* prototypeStructureList = getPolymorphicAccessStructureListSlot(stubInfo, listIndex);
JIT::compileGetByIdChainList(callFrame->scopeChain()->globalData, callFrame, codeBlock, stubInfo, prototypeStructureList, listIndex, structure, structure->prototypeChain(callFrame), count, slot.cachedOffset());
if (listIndex == (POLYMORPHIC_LIST_CACHE_SIZE - 1))
- ctiPatchCallByReturnAddress(STUB_RETURN_ADDRESS, reinterpret_cast<void*>(cti_op_get_by_id_proto_list_full));
+ ctiPatchCallByReturnAddress(STUB_RETURN_ADDRESS, FunctionPtr(cti_op_get_by_id_proto_list_full));
} else
- ctiPatchCallByReturnAddress(STUB_RETURN_ADDRESS, reinterpret_cast<void*>(cti_op_get_by_id_proto_fail));
+ ctiPatchCallByReturnAddress(STUB_RETURN_ADDRESS, FunctionPtr(cti_op_get_by_id_proto_fail));
- return JSValuePtr::encode(result);
+ return JSValue::encode(result);
}
-JSValueEncodedAsPointer* JITStubs::cti_op_get_by_id_proto_list_full(STUB_ARGS)
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_get_by_id_proto_list_full)
{
- BEGIN_STUB_FUNCTION();
+ STUB_INIT_STACK_FRAME(stackFrame);
- JSValuePtr baseValue = ARG_src1;
+ JSValue baseValue = stackFrame.args[0].jsValue();
PropertySlot slot(baseValue);
- JSValuePtr result = baseValue.get(ARG_callFrame, *ARG_id2, slot);
+ JSValue result = baseValue.get(stackFrame.callFrame, stackFrame.args[1].identifier(), slot);
CHECK_FOR_EXCEPTION_AT_END();
- return JSValuePtr::encode(result);
+ return JSValue::encode(result);
}
-JSValueEncodedAsPointer* JITStubs::cti_op_get_by_id_proto_fail(STUB_ARGS)
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_get_by_id_proto_fail)
{
- BEGIN_STUB_FUNCTION();
+ STUB_INIT_STACK_FRAME(stackFrame);
- JSValuePtr baseValue = ARG_src1;
+ JSValue baseValue = stackFrame.args[0].jsValue();
PropertySlot slot(baseValue);
- JSValuePtr result = baseValue.get(ARG_callFrame, *ARG_id2, slot);
+ JSValue result = baseValue.get(stackFrame.callFrame, stackFrame.args[1].identifier(), slot);
CHECK_FOR_EXCEPTION_AT_END();
- return JSValuePtr::encode(result);
+ return JSValue::encode(result);
}
-JSValueEncodedAsPointer* JITStubs::cti_op_get_by_id_array_fail(STUB_ARGS)
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_get_by_id_array_fail)
{
- BEGIN_STUB_FUNCTION();
+ STUB_INIT_STACK_FRAME(stackFrame);
- JSValuePtr baseValue = ARG_src1;
+ JSValue baseValue = stackFrame.args[0].jsValue();
PropertySlot slot(baseValue);
- JSValuePtr result = baseValue.get(ARG_callFrame, *ARG_id2, slot);
+ JSValue result = baseValue.get(stackFrame.callFrame, stackFrame.args[1].identifier(), slot);
CHECK_FOR_EXCEPTION_AT_END();
- return JSValuePtr::encode(result);
+ return JSValue::encode(result);
}
-JSValueEncodedAsPointer* JITStubs::cti_op_get_by_id_string_fail(STUB_ARGS)
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_get_by_id_string_fail)
{
- BEGIN_STUB_FUNCTION();
+ STUB_INIT_STACK_FRAME(stackFrame);
- JSValuePtr baseValue = ARG_src1;
+ JSValue baseValue = stackFrame.args[0].jsValue();
PropertySlot slot(baseValue);
- JSValuePtr result = baseValue.get(ARG_callFrame, *ARG_id2, slot);
+ JSValue result = baseValue.get(stackFrame.callFrame, stackFrame.args[1].identifier(), slot);
CHECK_FOR_EXCEPTION_AT_END();
- return JSValuePtr::encode(result);
+ return JSValue::encode(result);
}
#endif
-JSValueEncodedAsPointer* JITStubs::cti_op_instanceof(STUB_ARGS)
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_instanceof)
{
- BEGIN_STUB_FUNCTION();
+ STUB_INIT_STACK_FRAME(stackFrame);
- CallFrame* callFrame = ARG_callFrame;
- JSValuePtr value = ARG_src1;
- JSValuePtr baseVal = ARG_src2;
- JSValuePtr proto = ARG_src3;
+ CallFrame* callFrame = stackFrame.callFrame;
+ JSValue value = stackFrame.args[0].jsValue();
+ JSValue baseVal = stackFrame.args[1].jsValue();
+ JSValue proto = stackFrame.args[2].jsValue();
- // at least one of these checks must have failed to get to the slow case
+ // At least one of these checks must have failed to get to the slow case.
ASSERT(!value.isCell() || !baseVal.isCell() || !proto.isCell()
|| !value.isObject() || !baseVal.isObject() || !proto.isObject()
|| (asObject(baseVal)->structure()->typeInfo().flags() & (ImplementsHasInstance | OverridesHasInstance)) != ImplementsHasInstance);
- if (!baseVal.isObject()) {
- CallFrame* callFrame = ARG_callFrame;
+
+ // ECMA-262 15.3.5.3:
+ // Throw an exception either if baseVal is not an object, or if it does not implement 'HasInstance' (i.e. is a function).
+ TypeInfo typeInfo(UnspecifiedType, 0);
+ if (!baseVal.isObject() || !(typeInfo = asObject(baseVal)->structure()->typeInfo()).implementsHasInstance()) {
+ CallFrame* callFrame = stackFrame.callFrame;
CodeBlock* codeBlock = callFrame->codeBlock();
unsigned vPCIndex = codeBlock->getBytecodeIndex(callFrame, STUB_RETURN_ADDRESS);
- ARG_globalData->exception = createInvalidParamError(callFrame, "instanceof", baseVal, vPCIndex, codeBlock);
+ stackFrame.globalData->exception = createInvalidParamError(callFrame, "instanceof", baseVal, vPCIndex, codeBlock);
VM_THROW_EXCEPTION();
}
-
- JSObject* baseObj = asObject(baseVal);
- TypeInfo typeInfo = baseObj->structure()->typeInfo();
- if (!typeInfo.implementsHasInstance())
- return JSValuePtr::encode(jsBoolean(false));
+ ASSERT(typeInfo.type() != UnspecifiedType);
if (!typeInfo.overridesHasInstance()) {
+ if (!value.isObject())
+ return JSValue::encode(jsBoolean(false));
+
if (!proto.isObject()) {
throwError(callFrame, TypeError, "instanceof called on an object with an invalid prototype property.");
VM_THROW_EXCEPTION();
}
-
- if (!value.isObject())
- return JSValuePtr::encode(jsBoolean(false));
}
- JSValuePtr result = jsBoolean(baseObj->hasInstance(callFrame, value, proto));
+ JSValue result = jsBoolean(asObject(baseVal)->hasInstance(callFrame, value, proto));
CHECK_FOR_EXCEPTION_AT_END();
- return JSValuePtr::encode(result);
+ return JSValue::encode(result);
}
-JSValueEncodedAsPointer* JITStubs::cti_op_del_by_id(STUB_ARGS)
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_del_by_id)
{
- BEGIN_STUB_FUNCTION();
+ STUB_INIT_STACK_FRAME(stackFrame);
- CallFrame* callFrame = ARG_callFrame;
+ CallFrame* callFrame = stackFrame.callFrame;
- JSObject* baseObj = ARG_src1.toObject(callFrame);
+ JSObject* baseObj = stackFrame.args[0].jsValue().toObject(callFrame);
- JSValuePtr result = jsBoolean(baseObj->deleteProperty(callFrame, *ARG_id2));
+ JSValue result = jsBoolean(baseObj->deleteProperty(callFrame, stackFrame.args[1].identifier()));
CHECK_FOR_EXCEPTION_AT_END();
- return JSValuePtr::encode(result);
+ return JSValue::encode(result);
}
-JSValueEncodedAsPointer* JITStubs::cti_op_mul(STUB_ARGS)
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_mul)
{
- BEGIN_STUB_FUNCTION();
+ STUB_INIT_STACK_FRAME(stackFrame);
- JSValuePtr src1 = ARG_src1;
- JSValuePtr src2 = ARG_src2;
+ JSValue src1 = stackFrame.args[0].jsValue();
+ JSValue src2 = stackFrame.args[1].jsValue();
double left;
double right;
if (src1.getNumber(left) && src2.getNumber(right))
- return JSValuePtr::encode(jsNumber(ARG_globalData, left * right));
+ return JSValue::encode(jsNumber(stackFrame.globalData, left * right));
- CallFrame* callFrame = ARG_callFrame;
- JSValuePtr result = jsNumber(ARG_globalData, src1.toNumber(callFrame) * src2.toNumber(callFrame));
+ CallFrame* callFrame = stackFrame.callFrame;
+ JSValue result = jsNumber(stackFrame.globalData, src1.toNumber(callFrame) * src2.toNumber(callFrame));
CHECK_FOR_EXCEPTION_AT_END();
- return JSValuePtr::encode(result);
+ return JSValue::encode(result);
}
-JSObject* JITStubs::cti_op_new_func(STUB_ARGS)
+DEFINE_STUB_FUNCTION(JSObject*, op_new_func)
{
- BEGIN_STUB_FUNCTION();
+ STUB_INIT_STACK_FRAME(stackFrame);
- return ARG_func1->makeFunction(ARG_callFrame, ARG_callFrame->scopeChain());
+ return stackFrame.args[0].funcDeclNode()->makeFunction(stackFrame.callFrame, stackFrame.callFrame->scopeChain());
}
-void* JITStubs::cti_op_call_JSFunction(STUB_ARGS)
+DEFINE_STUB_FUNCTION(void*, op_call_JSFunction)
{
- BEGIN_STUB_FUNCTION();
+ STUB_INIT_STACK_FRAME(stackFrame);
#ifndef NDEBUG
CallData callData;
- ASSERT(ARG_src1.getCallData(callData) == CallTypeJS);
+ ASSERT(stackFrame.args[0].jsValue().getCallData(callData) == CallTypeJS);
#endif
- ScopeChainNode* callDataScopeChain = asFunction(ARG_src1)->scope().node();
- CodeBlock* newCodeBlock = &asFunction(ARG_src1)->body()->bytecode(callDataScopeChain);
+ JSFunction* function = asFunction(stackFrame.args[0].jsValue());
+ FunctionBodyNode* body = function->body();
+ ScopeChainNode* callDataScopeChain = function->scope().node();
+ body->jitCode(callDataScopeChain);
- if (!newCodeBlock->jitCode())
- JIT::compile(ARG_globalData, newCodeBlock);
-
- return newCodeBlock;
+ return &(body->generatedBytecode());
}
-VoidPtrPair JITStubs::cti_op_call_arityCheck(STUB_ARGS)
+DEFINE_STUB_FUNCTION(VoidPtrPair, op_call_arityCheck)
{
- BEGIN_STUB_FUNCTION();
+ STUB_INIT_STACK_FRAME(stackFrame);
- CallFrame* callFrame = ARG_callFrame;
- CodeBlock* newCodeBlock = ARG_codeBlock4;
- int argCount = ARG_int3;
+ CallFrame* callFrame = stackFrame.callFrame;
+ CodeBlock* newCodeBlock = stackFrame.args[3].codeBlock();
+ int argCount = stackFrame.args[2].int32();
ASSERT(argCount != newCodeBlock->m_numParameters);
@@ -854,12 +1204,12 @@ VoidPtrPair JITStubs::cti_op_call_arityCheck(STUB_ARGS)
size_t omittedArgCount = newCodeBlock->m_numParameters - argCount;
Register* r = callFrame->registers() + omittedArgCount;
Register* newEnd = r + newCodeBlock->m_numCalleeRegisters;
- if (!ARG_registerFile->grow(newEnd)) {
+ if (!stackFrame.registerFile->grow(newEnd)) {
// Rewind to the previous call frame because op_call already optimistically
// moved the call frame forward.
- ARG_setCallFrame(oldCallFrame);
- throwStackOverflowError(oldCallFrame, ARG_globalData, ARG_returnAddress2, STUB_RETURN_ADDRESS);
- RETURN_PAIR(0, 0);
+ stackFrame.callFrame = oldCallFrame;
+ throwStackOverflowError(oldCallFrame, stackFrame.globalData, stackFrame.args[1].returnAddress(), STUB_RETURN_ADDRESS);
+ RETURN_POINTER_PAIR(0, 0);
}
Register* argv = r - RegisterFile::CallFrameHeaderSize - omittedArgCount;
@@ -870,53 +1220,52 @@ VoidPtrPair JITStubs::cti_op_call_arityCheck(STUB_ARGS)
callFrame->setCallerFrame(oldCallFrame);
}
- RETURN_PAIR(newCodeBlock, callFrame);
+ RETURN_POINTER_PAIR(newCodeBlock, callFrame);
}
-void* JITStubs::cti_vm_dontLazyLinkCall(STUB_ARGS)
+DEFINE_STUB_FUNCTION(void*, vm_dontLazyLinkCall)
{
- BEGIN_STUB_FUNCTION();
+ STUB_INIT_STACK_FRAME(stackFrame);
- JSGlobalData* globalData = ARG_globalData;
- JSFunction* callee = asFunction(ARG_src1);
- CodeBlock* codeBlock = &callee->body()->bytecode(callee->scope().node());
- if (!codeBlock->jitCode())
- JIT::compile(globalData, codeBlock);
+ JSGlobalData* globalData = stackFrame.globalData;
+ JSFunction* callee = asFunction(stackFrame.args[0].jsValue());
- ctiPatchNearCallByReturnAddress(ARG_returnAddress2, globalData->jitStubs.ctiVirtualCallLink());
+ ctiPatchNearCallByReturnAddress(stackFrame.args[1].returnAddress(), globalData->jitStubs.ctiVirtualCallLink());
- return codeBlock->jitCode().addressForCall();
+ return callee->body()->generatedJITCode().addressForCall().executableAddress();
}
-void* JITStubs::cti_vm_lazyLinkCall(STUB_ARGS)
+DEFINE_STUB_FUNCTION(void*, vm_lazyLinkCall)
{
- BEGIN_STUB_FUNCTION();
+ STUB_INIT_STACK_FRAME(stackFrame);
- JSFunction* callee = asFunction(ARG_src1);
- CodeBlock* codeBlock = &callee->body()->bytecode(callee->scope().node());
- if (!codeBlock->jitCode())
- JIT::compile(ARG_globalData, codeBlock);
+ JSFunction* callee = asFunction(stackFrame.args[0].jsValue());
+ JITCode& jitCode = callee->body()->generatedJITCode();
+
+ CodeBlock* codeBlock = 0;
+ if (!callee->isHostFunction())
+ codeBlock = &callee->body()->bytecode(callee->scope().node());
- CallLinkInfo* callLinkInfo = &ARG_callFrame->callerFrame()->codeBlock()->getCallLinkInfo(ARG_returnAddress2);
- JIT::linkCall(callee, codeBlock, codeBlock->jitCode(), callLinkInfo, ARG_int3);
+ CallLinkInfo* callLinkInfo = &stackFrame.callFrame->callerFrame()->codeBlock()->getCallLinkInfo(stackFrame.args[1].returnAddress());
+ JIT::linkCall(callee, codeBlock, jitCode, callLinkInfo, stackFrame.args[2].int32());
- return codeBlock->jitCode().addressForCall();
+ return jitCode.addressForCall().executableAddress();
}
-JSObject* JITStubs::cti_op_push_activation(STUB_ARGS)
+DEFINE_STUB_FUNCTION(JSObject*, op_push_activation)
{
- BEGIN_STUB_FUNCTION();
+ STUB_INIT_STACK_FRAME(stackFrame);
- JSActivation* activation = new (ARG_globalData) JSActivation(ARG_callFrame, static_cast<FunctionBodyNode*>(ARG_callFrame->codeBlock()->ownerNode()));
- ARG_callFrame->setScopeChain(ARG_callFrame->scopeChain()->copy()->push(activation));
+ JSActivation* activation = new (stackFrame.globalData) JSActivation(stackFrame.callFrame, static_cast<FunctionBodyNode*>(stackFrame.callFrame->codeBlock()->ownerNode()));
+ stackFrame.callFrame->setScopeChain(stackFrame.callFrame->scopeChain()->copy()->push(activation));
return activation;
}
-JSValueEncodedAsPointer* JITStubs::cti_op_call_NotJSFunction(STUB_ARGS)
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_call_NotJSFunction)
{
- BEGIN_STUB_FUNCTION();
+ STUB_INIT_STACK_FRAME(stackFrame);
- JSValuePtr funcVal = ARG_src1;
+ JSValue funcVal = stackFrame.args[0].jsValue();
CallData callData;
CallType callType = funcVal.getCallData(callData);
@@ -924,163 +1273,173 @@ JSValueEncodedAsPointer* JITStubs::cti_op_call_NotJSFunction(STUB_ARGS)
ASSERT(callType != CallTypeJS);
if (callType == CallTypeHost) {
- int registerOffset = ARG_int2;
- int argCount = ARG_int3;
- CallFrame* previousCallFrame = ARG_callFrame;
+ int registerOffset = stackFrame.args[1].int32();
+ int argCount = stackFrame.args[2].int32();
+ CallFrame* previousCallFrame = stackFrame.callFrame;
CallFrame* callFrame = CallFrame::create(previousCallFrame->registers() + registerOffset);
callFrame->init(0, static_cast<Instruction*>(STUB_RETURN_ADDRESS), previousCallFrame->scopeChain(), previousCallFrame, 0, argCount, 0);
- ARG_setCallFrame(callFrame);
+ stackFrame.callFrame = callFrame;
- Register* argv = ARG_callFrame->registers() - RegisterFile::CallFrameHeaderSize - argCount;
+ Register* argv = stackFrame.callFrame->registers() - RegisterFile::CallFrameHeaderSize - argCount;
ArgList argList(argv + 1, argCount - 1);
- JSValuePtr returnValue;
+ JSValue returnValue;
{
SamplingTool::HostCallRecord callRecord(CTI_SAMPLER);
// FIXME: All host methods should be calling toThisObject, but this is not presently the case.
- JSValuePtr thisValue = argv[0].jsValue(callFrame);
+ JSValue thisValue = argv[0].jsValue();
if (thisValue == jsNull())
thisValue = callFrame->globalThisValue();
returnValue = callData.native.function(callFrame, asObject(funcVal), thisValue, argList);
}
- ARG_setCallFrame(previousCallFrame);
+ stackFrame.callFrame = previousCallFrame;
CHECK_FOR_EXCEPTION();
- return JSValuePtr::encode(returnValue);
+ return JSValue::encode(returnValue);
}
ASSERT(callType == CallTypeNone);
- CallFrame* callFrame = ARG_callFrame;
+ CallFrame* callFrame = stackFrame.callFrame;
CodeBlock* codeBlock = callFrame->codeBlock();
unsigned vPCIndex = codeBlock->getBytecodeIndex(callFrame, STUB_RETURN_ADDRESS);
- ARG_globalData->exception = createNotAFunctionError(ARG_callFrame, funcVal, vPCIndex, codeBlock);
+ stackFrame.globalData->exception = createNotAFunctionError(stackFrame.callFrame, funcVal, vPCIndex, codeBlock);
VM_THROW_EXCEPTION();
}
-void JITStubs::cti_op_create_arguments(STUB_ARGS)
+DEFINE_STUB_FUNCTION(void, op_create_arguments)
{
- BEGIN_STUB_FUNCTION();
+ STUB_INIT_STACK_FRAME(stackFrame);
- Arguments* arguments = new (ARG_globalData) Arguments(ARG_callFrame);
- ARG_callFrame->setCalleeArguments(arguments);
- ARG_callFrame[RegisterFile::ArgumentsRegister] = arguments;
+ Arguments* arguments = new (stackFrame.globalData) Arguments(stackFrame.callFrame);
+ stackFrame.callFrame->setCalleeArguments(arguments);
+ stackFrame.callFrame[RegisterFile::ArgumentsRegister] = arguments;
}
-void JITStubs::cti_op_create_arguments_no_params(STUB_ARGS)
+DEFINE_STUB_FUNCTION(void, op_create_arguments_no_params)
{
- BEGIN_STUB_FUNCTION();
+ STUB_INIT_STACK_FRAME(stackFrame);
- Arguments* arguments = new (ARG_globalData) Arguments(ARG_callFrame, Arguments::NoParameters);
- ARG_callFrame->setCalleeArguments(arguments);
- ARG_callFrame[RegisterFile::ArgumentsRegister] = arguments;
+ Arguments* arguments = new (stackFrame.globalData) Arguments(stackFrame.callFrame, Arguments::NoParameters);
+ stackFrame.callFrame->setCalleeArguments(arguments);
+ stackFrame.callFrame[RegisterFile::ArgumentsRegister] = arguments;
}
-void JITStubs::cti_op_tear_off_activation(STUB_ARGS)
+DEFINE_STUB_FUNCTION(void, op_tear_off_activation)
{
- BEGIN_STUB_FUNCTION();
+ STUB_INIT_STACK_FRAME(stackFrame);
- ASSERT(ARG_callFrame->codeBlock()->needsFullScopeChain());
- asActivation(ARG_src1)->copyRegisters(ARG_callFrame->optionalCalleeArguments());
+ ASSERT(stackFrame.callFrame->codeBlock()->needsFullScopeChain());
+ asActivation(stackFrame.args[0].jsValue())->copyRegisters(stackFrame.callFrame->optionalCalleeArguments());
}
-void JITStubs::cti_op_tear_off_arguments(STUB_ARGS)
+DEFINE_STUB_FUNCTION(void, op_tear_off_arguments)
{
- BEGIN_STUB_FUNCTION();
+ STUB_INIT_STACK_FRAME(stackFrame);
- ASSERT(ARG_callFrame->codeBlock()->usesArguments() && !ARG_callFrame->codeBlock()->needsFullScopeChain());
- ARG_callFrame->optionalCalleeArguments()->copyRegisters();
+ ASSERT(stackFrame.callFrame->codeBlock()->usesArguments() && !stackFrame.callFrame->codeBlock()->needsFullScopeChain());
+ if (stackFrame.callFrame->optionalCalleeArguments())
+ stackFrame.callFrame->optionalCalleeArguments()->copyRegisters();
}
-void JITStubs::cti_op_profile_will_call(STUB_ARGS)
+DEFINE_STUB_FUNCTION(void, op_profile_will_call)
{
- BEGIN_STUB_FUNCTION();
+ STUB_INIT_STACK_FRAME(stackFrame);
- ASSERT(*ARG_profilerReference);
- (*ARG_profilerReference)->willExecute(ARG_callFrame, ARG_src1);
+ ASSERT(*stackFrame.enabledProfilerReference);
+ (*stackFrame.enabledProfilerReference)->willExecute(stackFrame.callFrame, stackFrame.args[0].jsValue());
}
-void JITStubs::cti_op_profile_did_call(STUB_ARGS)
+DEFINE_STUB_FUNCTION(void, op_profile_did_call)
{
- BEGIN_STUB_FUNCTION();
+ STUB_INIT_STACK_FRAME(stackFrame);
- ASSERT(*ARG_profilerReference);
- (*ARG_profilerReference)->didExecute(ARG_callFrame, ARG_src1);
+ ASSERT(*stackFrame.enabledProfilerReference);
+ (*stackFrame.enabledProfilerReference)->didExecute(stackFrame.callFrame, stackFrame.args[0].jsValue());
}
-void JITStubs::cti_op_ret_scopeChain(STUB_ARGS)
+DEFINE_STUB_FUNCTION(void, op_ret_scopeChain)
{
- BEGIN_STUB_FUNCTION();
+ STUB_INIT_STACK_FRAME(stackFrame);
- ASSERT(ARG_callFrame->codeBlock()->needsFullScopeChain());
- ARG_callFrame->scopeChain()->deref();
+ ASSERT(stackFrame.callFrame->codeBlock()->needsFullScopeChain());
+ stackFrame.callFrame->scopeChain()->deref();
}
-JSObject* JITStubs::cti_op_new_array(STUB_ARGS)
+DEFINE_STUB_FUNCTION(JSObject*, op_new_array)
{
- BEGIN_STUB_FUNCTION();
+ STUB_INIT_STACK_FRAME(stackFrame);
- ArgList argList(&ARG_callFrame->registers()[ARG_int1], ARG_int2);
- return constructArray(ARG_callFrame, argList);
+ ArgList argList(&stackFrame.callFrame->registers()[stackFrame.args[0].int32()], stackFrame.args[1].int32());
+ return constructArray(stackFrame.callFrame, argList);
}
-JSValueEncodedAsPointer* JITStubs::cti_op_resolve(STUB_ARGS)
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_resolve)
{
- BEGIN_STUB_FUNCTION();
+ STUB_INIT_STACK_FRAME(stackFrame);
- CallFrame* callFrame = ARG_callFrame;
+ CallFrame* callFrame = stackFrame.callFrame;
ScopeChainNode* scopeChain = callFrame->scopeChain();
ScopeChainIterator iter = scopeChain->begin();
ScopeChainIterator end = scopeChain->end();
ASSERT(iter != end);
- Identifier& ident = *ARG_id1;
+ Identifier& ident = stackFrame.args[0].identifier();
do {
JSObject* o = *iter;
PropertySlot slot(o);
if (o->getPropertySlot(callFrame, ident, slot)) {
- JSValuePtr result = slot.getValue(callFrame, ident);
+ JSValue result = slot.getValue(callFrame, ident);
CHECK_FOR_EXCEPTION_AT_END();
- return JSValuePtr::encode(result);
+ return JSValue::encode(result);
}
} while (++iter != end);
CodeBlock* codeBlock = callFrame->codeBlock();
unsigned vPCIndex = codeBlock->getBytecodeIndex(callFrame, STUB_RETURN_ADDRESS);
- ARG_globalData->exception = createUndefinedVariableError(callFrame, ident, vPCIndex, codeBlock);
+ stackFrame.globalData->exception = createUndefinedVariableError(callFrame, ident, vPCIndex, codeBlock);
VM_THROW_EXCEPTION();
}
-JSObject* JITStubs::cti_op_construct_JSConstruct(STUB_ARGS)
+DEFINE_STUB_FUNCTION(JSObject*, op_construct_JSConstruct)
{
- BEGIN_STUB_FUNCTION();
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ JSFunction* constructor = asFunction(stackFrame.args[0].jsValue());
+ if (constructor->isHostFunction()) {
+ CallFrame* callFrame = stackFrame.callFrame;
+ CodeBlock* codeBlock = callFrame->codeBlock();
+ unsigned vPCIndex = codeBlock->getBytecodeIndex(callFrame, STUB_RETURN_ADDRESS);
+ stackFrame.globalData->exception = createNotAConstructorError(callFrame, constructor, vPCIndex, codeBlock);
+ VM_THROW_EXCEPTION();
+ }
#ifndef NDEBUG
ConstructData constructData;
- ASSERT(asFunction(ARG_src1)->getConstructData(constructData) == ConstructTypeJS);
+ ASSERT(constructor->getConstructData(constructData) == ConstructTypeJS);
#endif
Structure* structure;
- if (ARG_src4.isObject())
- structure = asObject(ARG_src4)->inheritorID();
+ if (stackFrame.args[3].jsValue().isObject())
+ structure = asObject(stackFrame.args[3].jsValue())->inheritorID();
else
- structure = asFunction(ARG_src1)->scope().node()->globalObject()->emptyObjectStructure();
- return new (ARG_globalData) JSObject(structure);
+ structure = constructor->scope().node()->globalObject()->emptyObjectStructure();
+ return new (stackFrame.globalData) JSObject(structure);
}
-JSValueEncodedAsPointer* JITStubs::cti_op_construct_NotJSConstruct(STUB_ARGS)
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_construct_NotJSConstruct)
{
- BEGIN_STUB_FUNCTION();
+ STUB_INIT_STACK_FRAME(stackFrame);
- CallFrame* callFrame = ARG_callFrame;
+ CallFrame* callFrame = stackFrame.callFrame;
- JSValuePtr constrVal = ARG_src1;
- int argCount = ARG_int3;
- int thisRegister = ARG_int5;
+ JSValue constrVal = stackFrame.args[0].jsValue();
+ int argCount = stackFrame.args[2].int32();
+ int thisRegister = stackFrame.args[4].int32();
ConstructData constructData;
ConstructType constructType = constrVal.getConstructData(constructData);
@@ -1088,35 +1447,35 @@ JSValueEncodedAsPointer* JITStubs::cti_op_construct_NotJSConstruct(STUB_ARGS)
if (constructType == ConstructTypeHost) {
ArgList argList(callFrame->registers() + thisRegister + 1, argCount - 1);
- JSValuePtr returnValue;
+ JSValue returnValue;
{
SamplingTool::HostCallRecord callRecord(CTI_SAMPLER);
returnValue = constructData.native.function(callFrame, asObject(constrVal), argList);
}
CHECK_FOR_EXCEPTION();
- return JSValuePtr::encode(returnValue);
+ return JSValue::encode(returnValue);
}
ASSERT(constructType == ConstructTypeNone);
CodeBlock* codeBlock = callFrame->codeBlock();
unsigned vPCIndex = codeBlock->getBytecodeIndex(callFrame, STUB_RETURN_ADDRESS);
- ARG_globalData->exception = createNotAConstructorError(callFrame, constrVal, vPCIndex, codeBlock);
+ stackFrame.globalData->exception = createNotAConstructorError(callFrame, constrVal, vPCIndex, codeBlock);
VM_THROW_EXCEPTION();
}
-JSValueEncodedAsPointer* JITStubs::cti_op_get_by_val(STUB_ARGS)
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_get_by_val)
{
- BEGIN_STUB_FUNCTION();
+ STUB_INIT_STACK_FRAME(stackFrame);
- CallFrame* callFrame = ARG_callFrame;
- JSGlobalData* globalData = ARG_globalData;
+ CallFrame* callFrame = stackFrame.callFrame;
+ JSGlobalData* globalData = stackFrame.globalData;
- JSValuePtr baseValue = ARG_src1;
- JSValuePtr subscript = ARG_src2;
+ JSValue baseValue = stackFrame.args[0].jsValue();
+ JSValue subscript = stackFrame.args[1].jsValue();
- JSValuePtr result;
+ JSValue result;
if (LIKELY(subscript.isUInt32Fast())) {
uint32_t i = subscript.getUInt32Fast();
@@ -1126,12 +1485,14 @@ JSValueEncodedAsPointer* JITStubs::cti_op_get_by_val(STUB_ARGS)
result = jsArray->getIndex(i);
else
result = jsArray->JSArray::get(callFrame, i);
- } else if (isJSString(globalData, baseValue) && asString(baseValue)->canGetIndex(i))
- result = asString(baseValue)->getIndex(ARG_globalData, i);
- else if (isJSByteArray(globalData, baseValue) && asByteArray(baseValue)->canAccessIndex(i)) {
+ } else if (isJSString(globalData, baseValue) && asString(baseValue)->canGetIndex(i)) {
// All fast byte array accesses are safe from exceptions so return immediately to avoid exception checks.
- ctiPatchCallByReturnAddress(STUB_RETURN_ADDRESS, reinterpret_cast<void*>(cti_op_get_by_val_byte_array));
- return JSValuePtr::encode(asByteArray(baseValue)->getIndex(callFrame, i));
+ ctiPatchCallByReturnAddress(STUB_RETURN_ADDRESS, FunctionPtr(cti_op_get_by_val_string));
+ result = asString(baseValue)->getIndex(stackFrame.globalData, i);
+ } else if (isJSByteArray(globalData, baseValue) && asByteArray(baseValue)->canAccessIndex(i)) {
+ // All fast byte array accesses are safe from exceptions so return immediately to avoid exception checks.
+ ctiPatchCallByReturnAddress(STUB_RETURN_ADDRESS, FunctionPtr(cti_op_get_by_val_byte_array));
+ return JSValue::encode(asByteArray(baseValue)->getIndex(callFrame, i));
} else
result = baseValue.get(callFrame, i);
} else {
@@ -1140,45 +1501,76 @@ JSValueEncodedAsPointer* JITStubs::cti_op_get_by_val(STUB_ARGS)
}
CHECK_FOR_EXCEPTION_AT_END();
- return JSValuePtr::encode(result);
+ return JSValue::encode(result);
}
+
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_get_by_val_string)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ CallFrame* callFrame = stackFrame.callFrame;
+ JSGlobalData* globalData = stackFrame.globalData;
+
+ JSValue baseValue = stackFrame.args[0].jsValue();
+ JSValue subscript = stackFrame.args[1].jsValue();
+
+ JSValue result;
+
+ if (LIKELY(subscript.isUInt32Fast())) {
+ uint32_t i = subscript.getUInt32Fast();
+ if (isJSString(globalData, baseValue) && asString(baseValue)->canGetIndex(i))
+ result = asString(baseValue)->getIndex(stackFrame.globalData, i);
+ else {
+ result = baseValue.get(callFrame, i);
+ if (!isJSString(globalData, baseValue))
+ ctiPatchCallByReturnAddress(STUB_RETURN_ADDRESS, FunctionPtr(cti_op_get_by_val));
+ }
+ } else {
+ Identifier property(callFrame, subscript.toString(callFrame));
+ result = baseValue.get(callFrame, property);
+ }
+
+ CHECK_FOR_EXCEPTION_AT_END();
+ return JSValue::encode(result);
+}
+
-JSValueEncodedAsPointer* JITStubs::cti_op_get_by_val_byte_array(STUB_ARGS)
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_get_by_val_byte_array)
{
- BEGIN_STUB_FUNCTION();
+ STUB_INIT_STACK_FRAME(stackFrame);
- CallFrame* callFrame = ARG_callFrame;
- JSGlobalData* globalData = ARG_globalData;
+ CallFrame* callFrame = stackFrame.callFrame;
+ JSGlobalData* globalData = stackFrame.globalData;
- JSValuePtr baseValue = ARG_src1;
- JSValuePtr subscript = ARG_src2;
+ JSValue baseValue = stackFrame.args[0].jsValue();
+ JSValue subscript = stackFrame.args[1].jsValue();
- JSValuePtr result;
+ JSValue result;
if (LIKELY(subscript.isUInt32Fast())) {
uint32_t i = subscript.getUInt32Fast();
if (isJSByteArray(globalData, baseValue) && asByteArray(baseValue)->canAccessIndex(i)) {
// All fast byte array accesses are safe from exceptions so return immediately to avoid exception checks.
- return JSValuePtr::encode(asByteArray(baseValue)->getIndex(callFrame, i));
+ return JSValue::encode(asByteArray(baseValue)->getIndex(callFrame, i));
}
result = baseValue.get(callFrame, i);
if (!isJSByteArray(globalData, baseValue))
- ctiPatchCallByReturnAddress(STUB_RETURN_ADDRESS, reinterpret_cast<void*>(cti_op_get_by_val));
+ ctiPatchCallByReturnAddress(STUB_RETURN_ADDRESS, FunctionPtr(cti_op_get_by_val));
} else {
Identifier property(callFrame, subscript.toString(callFrame));
result = baseValue.get(callFrame, property);
}
CHECK_FOR_EXCEPTION_AT_END();
- return JSValuePtr::encode(result);
+ return JSValue::encode(result);
}
-VoidPtrPair JITStubs::cti_op_resolve_func(STUB_ARGS)
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_resolve_func)
{
- BEGIN_STUB_FUNCTION();
+ STUB_INIT_STACK_FRAME(stackFrame);
- CallFrame* callFrame = ARG_callFrame;
+ CallFrame* callFrame = stackFrame.callFrame;
ScopeChainNode* scopeChain = callFrame->scopeChain();
ScopeChainIterator iter = scopeChain->begin();
@@ -1188,7 +1580,7 @@ VoidPtrPair JITStubs::cti_op_resolve_func(STUB_ARGS)
ASSERT(iter != end);
- Identifier& ident = *ARG_id1;
+ Identifier& ident = stackFrame.args[0].identifier();
JSObject* base;
do {
base = *iter;
@@ -1202,48 +1594,50 @@ VoidPtrPair JITStubs::cti_op_resolve_func(STUB_ARGS)
// that in host objects you always get a valid object for this.
// We also handle wrapper substitution for the global object at the same time.
JSObject* thisObj = base->toThisObject(callFrame);
- JSValuePtr result = slot.getValue(callFrame, ident);
+ JSValue result = slot.getValue(callFrame, ident);
CHECK_FOR_EXCEPTION_AT_END();
- RETURN_PAIR(thisObj, JSValuePtr::encode(result));
+ callFrame->registers()[stackFrame.args[1].int32()] = JSValue(thisObj);
+ return JSValue::encode(result);
}
++iter;
} while (iter != end);
CodeBlock* codeBlock = callFrame->codeBlock();
unsigned vPCIndex = codeBlock->getBytecodeIndex(callFrame, STUB_RETURN_ADDRESS);
- ARG_globalData->exception = createUndefinedVariableError(callFrame, ident, vPCIndex, codeBlock);
- VM_THROW_EXCEPTION_2();
+ stackFrame.globalData->exception = createUndefinedVariableError(callFrame, ident, vPCIndex, codeBlock);
+ VM_THROW_EXCEPTION_AT_END();
+ return JSValue::encode(JSValue());
}
-JSValueEncodedAsPointer* JITStubs::cti_op_sub(STUB_ARGS)
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_sub)
{
- BEGIN_STUB_FUNCTION();
+ STUB_INIT_STACK_FRAME(stackFrame);
- JSValuePtr src1 = ARG_src1;
- JSValuePtr src2 = ARG_src2;
+ JSValue src1 = stackFrame.args[0].jsValue();
+ JSValue src2 = stackFrame.args[1].jsValue();
double left;
double right;
if (src1.getNumber(left) && src2.getNumber(right))
- return JSValuePtr::encode(jsNumber(ARG_globalData, left - right));
+ return JSValue::encode(jsNumber(stackFrame.globalData, left - right));
- CallFrame* callFrame = ARG_callFrame;
- JSValuePtr result = jsNumber(ARG_globalData, src1.toNumber(callFrame) - src2.toNumber(callFrame));
+ CallFrame* callFrame = stackFrame.callFrame;
+ JSValue result = jsNumber(stackFrame.globalData, src1.toNumber(callFrame) - src2.toNumber(callFrame));
CHECK_FOR_EXCEPTION_AT_END();
- return JSValuePtr::encode(result);
+ return JSValue::encode(result);
}
-void JITStubs::cti_op_put_by_val(STUB_ARGS)
+DEFINE_STUB_FUNCTION(void, op_put_by_val)
{
- BEGIN_STUB_FUNCTION();
+ STUB_INIT_STACK_FRAME(stackFrame);
- CallFrame* callFrame = ARG_callFrame;
- JSGlobalData* globalData = ARG_globalData;
+ CallFrame* callFrame = stackFrame.callFrame;
+ JSGlobalData* globalData = stackFrame.globalData;
- JSValuePtr baseValue = ARG_src1;
- JSValuePtr subscript = ARG_src2;
- JSValuePtr value = ARG_src3;
+ JSValue baseValue = stackFrame.args[0].jsValue();
+ JSValue subscript = stackFrame.args[1].jsValue();
+ JSValue value = stackFrame.args[2].jsValue();
if (LIKELY(subscript.isUInt32Fast())) {
uint32_t i = subscript.getUInt32Fast();
@@ -1255,7 +1649,7 @@ void JITStubs::cti_op_put_by_val(STUB_ARGS)
jsArray->JSArray::put(callFrame, i, value);
} else if (isJSByteArray(globalData, baseValue) && asByteArray(baseValue)->canAccessIndex(i)) {
JSByteArray* jsByteArray = asByteArray(baseValue);
- ctiPatchCallByReturnAddress(STUB_RETURN_ADDRESS, reinterpret_cast<void*>(cti_op_put_by_val_byte_array));
+ ctiPatchCallByReturnAddress(STUB_RETURN_ADDRESS, FunctionPtr(cti_op_put_by_val_byte_array));
// All fast byte array accesses are safe from exceptions so return immediately to avoid exception checks.
if (value.isInt32Fast()) {
jsByteArray->setIndex(i, value.getInt32Fast());
@@ -1273,7 +1667,7 @@ void JITStubs::cti_op_put_by_val(STUB_ARGS)
baseValue.put(callFrame, i, value);
} else {
Identifier property(callFrame, subscript.toString(callFrame));
- if (!ARG_globalData->exception) { // Don't put to an object if toString threw an exception.
+ if (!stackFrame.globalData->exception) { // Don't put to an object if toString threw an exception.
PutPropertySlot slot;
baseValue.put(callFrame, property, value, slot);
}
@@ -1282,25 +1676,25 @@ void JITStubs::cti_op_put_by_val(STUB_ARGS)
CHECK_FOR_EXCEPTION_AT_END();
}
-void JITStubs::cti_op_put_by_val_array(STUB_ARGS)
+DEFINE_STUB_FUNCTION(void, op_put_by_val_array)
{
- BEGIN_STUB_FUNCTION();
+ STUB_INIT_STACK_FRAME(stackFrame);
- CallFrame* callFrame = ARG_callFrame;
- JSValuePtr baseValue = ARG_src1;
- int i = ARG_int2;
- JSValuePtr value = ARG_src3;
+ CallFrame* callFrame = stackFrame.callFrame;
+ JSValue baseValue = stackFrame.args[0].jsValue();
+ int i = stackFrame.args[1].int32();
+ JSValue value = stackFrame.args[2].jsValue();
- ASSERT(isJSArray(ARG_globalData, baseValue));
+ ASSERT(isJSArray(stackFrame.globalData, baseValue));
if (LIKELY(i >= 0))
asArray(baseValue)->JSArray::put(callFrame, i, value);
else {
// This should work since we're re-boxing an immediate unboxed in JIT code.
- ASSERT(JSValuePtr::makeInt32Fast(i));
- Identifier property(callFrame, JSValuePtr::makeInt32Fast(i).toString(callFrame));
+ ASSERT(JSValue::makeInt32Fast(i));
+ Identifier property(callFrame, JSValue::makeInt32Fast(i).toString(callFrame));
// FIXME: can toString throw an exception here?
- if (!ARG_globalData->exception) { // Don't put to an object if toString threw an exception.
+ if (!stackFrame.globalData->exception) { // Don't put to an object if toString threw an exception.
PutPropertySlot slot;
baseValue.put(callFrame, property, value, slot);
}
@@ -1309,16 +1703,16 @@ void JITStubs::cti_op_put_by_val_array(STUB_ARGS)
CHECK_FOR_EXCEPTION_AT_END();
}
-void JITStubs::cti_op_put_by_val_byte_array(STUB_ARGS)
+DEFINE_STUB_FUNCTION(void, op_put_by_val_byte_array)
{
- BEGIN_STUB_FUNCTION();
+ STUB_INIT_STACK_FRAME(stackFrame);
- CallFrame* callFrame = ARG_callFrame;
- JSGlobalData* globalData = ARG_globalData;
+ CallFrame* callFrame = stackFrame.callFrame;
+ JSGlobalData* globalData = stackFrame.globalData;
- JSValuePtr baseValue = ARG_src1;
- JSValuePtr subscript = ARG_src2;
- JSValuePtr value = ARG_src3;
+ JSValue baseValue = stackFrame.args[0].jsValue();
+ JSValue subscript = stackFrame.args[1].jsValue();
+ JSValue value = stackFrame.args[2].jsValue();
if (LIKELY(subscript.isUInt32Fast())) {
uint32_t i = subscript.getUInt32Fast();
@@ -1339,11 +1733,11 @@ void JITStubs::cti_op_put_by_val_byte_array(STUB_ARGS)
}
if (!isJSByteArray(globalData, baseValue))
- ctiPatchCallByReturnAddress(STUB_RETURN_ADDRESS, reinterpret_cast<void*>(cti_op_put_by_val));
+ ctiPatchCallByReturnAddress(STUB_RETURN_ADDRESS, FunctionPtr(cti_op_put_by_val));
baseValue.put(callFrame, i, value);
} else {
Identifier property(callFrame, subscript.toString(callFrame));
- if (!ARG_globalData->exception) { // Don't put to an object if toString threw an exception.
+ if (!stackFrame.globalData->exception) { // Don't put to an object if toString threw an exception.
PutPropertySlot slot;
baseValue.put(callFrame, property, value, slot);
}
@@ -1352,60 +1746,148 @@ void JITStubs::cti_op_put_by_val_byte_array(STUB_ARGS)
CHECK_FOR_EXCEPTION_AT_END();
}
-JSValueEncodedAsPointer* JITStubs::cti_op_lesseq(STUB_ARGS)
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_lesseq)
{
- BEGIN_STUB_FUNCTION();
+ STUB_INIT_STACK_FRAME(stackFrame);
- CallFrame* callFrame = ARG_callFrame;
- JSValuePtr result = jsBoolean(jsLessEq(callFrame, ARG_src1, ARG_src2));
+ CallFrame* callFrame = stackFrame.callFrame;
+ JSValue result = jsBoolean(jsLessEq(callFrame, stackFrame.args[0].jsValue(), stackFrame.args[1].jsValue()));
CHECK_FOR_EXCEPTION_AT_END();
- return JSValuePtr::encode(result);
+ return JSValue::encode(result);
}
-int JITStubs::cti_op_loop_if_true(STUB_ARGS)
+DEFINE_STUB_FUNCTION(int, op_loop_if_true)
{
- BEGIN_STUB_FUNCTION();
+ STUB_INIT_STACK_FRAME(stackFrame);
- JSValuePtr src1 = ARG_src1;
+ JSValue src1 = stackFrame.args[0].jsValue();
- CallFrame* callFrame = ARG_callFrame;
+ CallFrame* callFrame = stackFrame.callFrame;
bool result = src1.toBoolean(callFrame);
CHECK_FOR_EXCEPTION_AT_END();
return result;
}
+
+DEFINE_STUB_FUNCTION(int, op_load_varargs)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+ CallFrame* callFrame = stackFrame.callFrame;
+ RegisterFile* registerFile = stackFrame.registerFile;
+ int argsOffset = stackFrame.args[0].int32();
+ JSValue arguments = callFrame->registers()[argsOffset].jsValue();
+ uint32_t argCount = 0;
+ if (!arguments) {
+ int providedParams = callFrame->registers()[RegisterFile::ArgumentCount].i() - 1;
+ argCount = providedParams;
+ int32_t sizeDelta = argsOffset + argCount + RegisterFile::CallFrameHeaderSize;
+ Register* newEnd = callFrame->registers() + sizeDelta;
+ if (!registerFile->grow(newEnd) || ((newEnd - callFrame->registers()) != sizeDelta)) {
+ stackFrame.globalData->exception = createStackOverflowError(callFrame);
+ VM_THROW_EXCEPTION();
+ }
+ int32_t expectedParams = asFunction(callFrame->registers()[RegisterFile::Callee].jsValue())->body()->parameterCount();
+ int32_t inplaceArgs = min(providedParams, expectedParams);
+
+ Register* inplaceArgsDst = callFrame->registers() + argsOffset;
+
+ Register* inplaceArgsEnd = inplaceArgsDst + inplaceArgs;
+ Register* inplaceArgsEnd2 = inplaceArgsDst + providedParams;
+
+ Register* inplaceArgsSrc = callFrame->registers() - RegisterFile::CallFrameHeaderSize - expectedParams;
+ Register* inplaceArgsSrc2 = inplaceArgsSrc - providedParams - 1 + inplaceArgs;
+
+ // First step is to copy the "expected" parameters from their normal location relative to the callframe
+ while (inplaceArgsDst < inplaceArgsEnd)
+ *inplaceArgsDst++ = *inplaceArgsSrc++;
+
+ // Then we copy any additional arguments that may be further up the stack ('-1' to account for 'this')
+ while (inplaceArgsDst < inplaceArgsEnd2)
+ *inplaceArgsDst++ = *inplaceArgsSrc2++;
+
+ } else if (!arguments.isUndefinedOrNull()) {
+ if (!arguments.isObject()) {
+ CodeBlock* codeBlock = callFrame->codeBlock();
+ unsigned vPCIndex = codeBlock->getBytecodeIndex(callFrame, STUB_RETURN_ADDRESS);
+ stackFrame.globalData->exception = createInvalidParamError(callFrame, "Function.prototype.apply", arguments, vPCIndex, codeBlock);
+ VM_THROW_EXCEPTION();
+ }
+ if (asObject(arguments)->classInfo() == &Arguments::info) {
+ Arguments* argsObject = asArguments(arguments);
+ argCount = argsObject->numProvidedArguments(callFrame);
+ int32_t sizeDelta = argsOffset + argCount + RegisterFile::CallFrameHeaderSize;
+ Register* newEnd = callFrame->registers() + sizeDelta;
+ if (!registerFile->grow(newEnd) || ((newEnd - callFrame->registers()) != sizeDelta)) {
+ stackFrame.globalData->exception = createStackOverflowError(callFrame);
+ VM_THROW_EXCEPTION();
+ }
+ argsObject->copyToRegisters(callFrame, callFrame->registers() + argsOffset, argCount);
+ } else if (isJSArray(&callFrame->globalData(), arguments)) {
+ JSArray* array = asArray(arguments);
+ argCount = array->length();
+ int32_t sizeDelta = argsOffset + argCount + RegisterFile::CallFrameHeaderSize;
+ Register* newEnd = callFrame->registers() + sizeDelta;
+ if (!registerFile->grow(newEnd) || ((newEnd - callFrame->registers()) != sizeDelta)) {
+ stackFrame.globalData->exception = createStackOverflowError(callFrame);
+ VM_THROW_EXCEPTION();
+ }
+ array->copyToRegisters(callFrame, callFrame->registers() + argsOffset, argCount);
+ } else if (asObject(arguments)->inherits(&JSArray::info)) {
+ JSObject* argObject = asObject(arguments);
+ argCount = argObject->get(callFrame, callFrame->propertyNames().length).toUInt32(callFrame);
+ int32_t sizeDelta = argsOffset + argCount + RegisterFile::CallFrameHeaderSize;
+ Register* newEnd = callFrame->registers() + sizeDelta;
+ if (!registerFile->grow(newEnd) || ((newEnd - callFrame->registers()) != sizeDelta)) {
+ stackFrame.globalData->exception = createStackOverflowError(callFrame);
+ VM_THROW_EXCEPTION();
+ }
+ Register* argsBuffer = callFrame->registers() + argsOffset;
+ for (unsigned i = 0; i < argCount; ++i) {
+ argsBuffer[i] = asObject(arguments)->get(callFrame, i);
+ CHECK_FOR_EXCEPTION();
+ }
+ } else {
+ CodeBlock* codeBlock = callFrame->codeBlock();
+ unsigned vPCIndex = codeBlock->getBytecodeIndex(callFrame, STUB_RETURN_ADDRESS);
+ stackFrame.globalData->exception = createInvalidParamError(callFrame, "Function.prototype.apply", arguments, vPCIndex, codeBlock);
+ VM_THROW_EXCEPTION();
+ }
+ }
-JSValueEncodedAsPointer* JITStubs::cti_op_negate(STUB_ARGS)
+ return argCount + 1;
+}
+
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_negate)
{
- BEGIN_STUB_FUNCTION();
+ STUB_INIT_STACK_FRAME(stackFrame);
- JSValuePtr src = ARG_src1;
+ JSValue src = stackFrame.args[0].jsValue();
double v;
if (src.getNumber(v))
- return JSValuePtr::encode(jsNumber(ARG_globalData, -v));
+ return JSValue::encode(jsNumber(stackFrame.globalData, -v));
- CallFrame* callFrame = ARG_callFrame;
- JSValuePtr result = jsNumber(ARG_globalData, -src.toNumber(callFrame));
+ CallFrame* callFrame = stackFrame.callFrame;
+ JSValue result = jsNumber(stackFrame.globalData, -src.toNumber(callFrame));
CHECK_FOR_EXCEPTION_AT_END();
- return JSValuePtr::encode(result);
+ return JSValue::encode(result);
}
-JSValueEncodedAsPointer* JITStubs::cti_op_resolve_base(STUB_ARGS)
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_resolve_base)
{
- BEGIN_STUB_FUNCTION();
+ STUB_INIT_STACK_FRAME(stackFrame);
- return JSValuePtr::encode(JSC::resolveBase(ARG_callFrame, *ARG_id1, ARG_callFrame->scopeChain()));
+ return JSValue::encode(JSC::resolveBase(stackFrame.callFrame, stackFrame.args[0].identifier(), stackFrame.callFrame->scopeChain()));
}
-JSValueEncodedAsPointer* JITStubs::cti_op_resolve_skip(STUB_ARGS)
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_resolve_skip)
{
- BEGIN_STUB_FUNCTION();
+ STUB_INIT_STACK_FRAME(stackFrame);
- CallFrame* callFrame = ARG_callFrame;
+ CallFrame* callFrame = stackFrame.callFrame;
ScopeChainNode* scopeChain = callFrame->scopeChain();
- int skip = ARG_int2;
+ int skip = stackFrame.args[1].int32();
ScopeChainIterator iter = scopeChain->begin();
ScopeChainIterator end = scopeChain->end();
@@ -1414,232 +1896,246 @@ JSValueEncodedAsPointer* JITStubs::cti_op_resolve_skip(STUB_ARGS)
++iter;
ASSERT(iter != end);
}
- Identifier& ident = *ARG_id1;
+ Identifier& ident = stackFrame.args[0].identifier();
do {
JSObject* o = *iter;
PropertySlot slot(o);
if (o->getPropertySlot(callFrame, ident, slot)) {
- JSValuePtr result = slot.getValue(callFrame, ident);
+ JSValue result = slot.getValue(callFrame, ident);
CHECK_FOR_EXCEPTION_AT_END();
- return JSValuePtr::encode(result);
+ return JSValue::encode(result);
}
} while (++iter != end);
CodeBlock* codeBlock = callFrame->codeBlock();
unsigned vPCIndex = codeBlock->getBytecodeIndex(callFrame, STUB_RETURN_ADDRESS);
- ARG_globalData->exception = createUndefinedVariableError(callFrame, ident, vPCIndex, codeBlock);
+ stackFrame.globalData->exception = createUndefinedVariableError(callFrame, ident, vPCIndex, codeBlock);
VM_THROW_EXCEPTION();
}
-JSValueEncodedAsPointer* JITStubs::cti_op_resolve_global(STUB_ARGS)
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_resolve_global)
{
- BEGIN_STUB_FUNCTION();
+ STUB_INIT_STACK_FRAME(stackFrame);
- CallFrame* callFrame = ARG_callFrame;
- JSGlobalObject* globalObject = asGlobalObject(ARG_src1);
- Identifier& ident = *ARG_id2;
- unsigned globalResolveInfoIndex = ARG_int3;
+ CallFrame* callFrame = stackFrame.callFrame;
+ JSGlobalObject* globalObject = asGlobalObject(stackFrame.args[0].jsValue());
+ Identifier& ident = stackFrame.args[1].identifier();
+ unsigned globalResolveInfoIndex = stackFrame.args[2].int32();
ASSERT(globalObject->isGlobalObject());
PropertySlot slot(globalObject);
if (globalObject->getPropertySlot(callFrame, ident, slot)) {
- JSValuePtr result = slot.getValue(callFrame, ident);
- if (slot.isCacheable() && !globalObject->structure()->isDictionary()) {
+ JSValue result = slot.getValue(callFrame, ident);
+ if (slot.isCacheable() && !globalObject->structure()->isDictionary() && slot.slotBase() == globalObject) {
GlobalResolveInfo& globalResolveInfo = callFrame->codeBlock()->globalResolveInfo(globalResolveInfoIndex);
if (globalResolveInfo.structure)
globalResolveInfo.structure->deref();
globalObject->structure()->ref();
globalResolveInfo.structure = globalObject->structure();
globalResolveInfo.offset = slot.cachedOffset();
- return JSValuePtr::encode(result);
+ return JSValue::encode(result);
}
CHECK_FOR_EXCEPTION_AT_END();
- return JSValuePtr::encode(result);
+ return JSValue::encode(result);
}
unsigned vPCIndex = callFrame->codeBlock()->getBytecodeIndex(callFrame, STUB_RETURN_ADDRESS);
- ARG_globalData->exception = createUndefinedVariableError(callFrame, ident, vPCIndex, callFrame->codeBlock());
+ stackFrame.globalData->exception = createUndefinedVariableError(callFrame, ident, vPCIndex, callFrame->codeBlock());
VM_THROW_EXCEPTION();
}
-JSValueEncodedAsPointer* JITStubs::cti_op_div(STUB_ARGS)
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_div)
{
- BEGIN_STUB_FUNCTION();
+ STUB_INIT_STACK_FRAME(stackFrame);
- JSValuePtr src1 = ARG_src1;
- JSValuePtr src2 = ARG_src2;
+ JSValue src1 = stackFrame.args[0].jsValue();
+ JSValue src2 = stackFrame.args[1].jsValue();
double left;
double right;
if (src1.getNumber(left) && src2.getNumber(right))
- return JSValuePtr::encode(jsNumber(ARG_globalData, left / right));
+ return JSValue::encode(jsNumber(stackFrame.globalData, left / right));
- CallFrame* callFrame = ARG_callFrame;
- JSValuePtr result = jsNumber(ARG_globalData, src1.toNumber(callFrame) / src2.toNumber(callFrame));
+ CallFrame* callFrame = stackFrame.callFrame;
+ JSValue result = jsNumber(stackFrame.globalData, src1.toNumber(callFrame) / src2.toNumber(callFrame));
CHECK_FOR_EXCEPTION_AT_END();
- return JSValuePtr::encode(result);
+ return JSValue::encode(result);
}
-JSValueEncodedAsPointer* JITStubs::cti_op_pre_dec(STUB_ARGS)
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_pre_dec)
{
- BEGIN_STUB_FUNCTION();
+ STUB_INIT_STACK_FRAME(stackFrame);
- JSValuePtr v = ARG_src1;
+ JSValue v = stackFrame.args[0].jsValue();
- CallFrame* callFrame = ARG_callFrame;
- JSValuePtr result = jsNumber(ARG_globalData, v.toNumber(callFrame) - 1);
+ CallFrame* callFrame = stackFrame.callFrame;
+ JSValue result = jsNumber(stackFrame.globalData, v.toNumber(callFrame) - 1);
CHECK_FOR_EXCEPTION_AT_END();
- return JSValuePtr::encode(result);
+ return JSValue::encode(result);
}
-int JITStubs::cti_op_jless(STUB_ARGS)
+DEFINE_STUB_FUNCTION(int, op_jless)
{
- BEGIN_STUB_FUNCTION();
+ STUB_INIT_STACK_FRAME(stackFrame);
- JSValuePtr src1 = ARG_src1;
- JSValuePtr src2 = ARG_src2;
- CallFrame* callFrame = ARG_callFrame;
+ JSValue src1 = stackFrame.args[0].jsValue();
+ JSValue src2 = stackFrame.args[1].jsValue();
+ CallFrame* callFrame = stackFrame.callFrame;
bool result = jsLess(callFrame, src1, src2);
CHECK_FOR_EXCEPTION_AT_END();
return result;
}
-JSValueEncodedAsPointer* JITStubs::cti_op_not(STUB_ARGS)
+DEFINE_STUB_FUNCTION(int, op_jlesseq)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ JSValue src1 = stackFrame.args[0].jsValue();
+ JSValue src2 = stackFrame.args[1].jsValue();
+ CallFrame* callFrame = stackFrame.callFrame;
+
+ bool result = jsLessEq(callFrame, src1, src2);
+ CHECK_FOR_EXCEPTION_AT_END();
+ return result;
+}
+
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_not)
{
- BEGIN_STUB_FUNCTION();
+ STUB_INIT_STACK_FRAME(stackFrame);
- JSValuePtr src = ARG_src1;
+ JSValue src = stackFrame.args[0].jsValue();
- CallFrame* callFrame = ARG_callFrame;
+ CallFrame* callFrame = stackFrame.callFrame;
- JSValuePtr result = jsBoolean(!src.toBoolean(callFrame));
+ JSValue result = jsBoolean(!src.toBoolean(callFrame));
CHECK_FOR_EXCEPTION_AT_END();
- return JSValuePtr::encode(result);
+ return JSValue::encode(result);
}
-int JITStubs::cti_op_jtrue(STUB_ARGS)
+DEFINE_STUB_FUNCTION(int, op_jtrue)
{
- BEGIN_STUB_FUNCTION();
+ STUB_INIT_STACK_FRAME(stackFrame);
- JSValuePtr src1 = ARG_src1;
+ JSValue src1 = stackFrame.args[0].jsValue();
- CallFrame* callFrame = ARG_callFrame;
+ CallFrame* callFrame = stackFrame.callFrame;
bool result = src1.toBoolean(callFrame);
CHECK_FOR_EXCEPTION_AT_END();
return result;
}
-VoidPtrPair JITStubs::cti_op_post_inc(STUB_ARGS)
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_post_inc)
{
- BEGIN_STUB_FUNCTION();
+ STUB_INIT_STACK_FRAME(stackFrame);
- JSValuePtr v = ARG_src1;
+ JSValue v = stackFrame.args[0].jsValue();
- CallFrame* callFrame = ARG_callFrame;
+ CallFrame* callFrame = stackFrame.callFrame;
- JSValuePtr number = v.toJSNumber(callFrame);
+ JSValue number = v.toJSNumber(callFrame);
CHECK_FOR_EXCEPTION_AT_END();
- RETURN_PAIR(JSValuePtr::encode(number), JSValuePtr::encode(jsNumber(ARG_globalData, number.uncheckedGetNumber() + 1)));
+ callFrame->registers()[stackFrame.args[1].int32()] = jsNumber(stackFrame.globalData, number.uncheckedGetNumber() + 1);
+ return JSValue::encode(number);
}
-JSValueEncodedAsPointer* JITStubs::cti_op_eq(STUB_ARGS)
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_eq)
{
- BEGIN_STUB_FUNCTION();
+ STUB_INIT_STACK_FRAME(stackFrame);
- JSValuePtr src1 = ARG_src1;
- JSValuePtr src2 = ARG_src2;
+ JSValue src1 = stackFrame.args[0].jsValue();
+ JSValue src2 = stackFrame.args[1].jsValue();
- CallFrame* callFrame = ARG_callFrame;
+ CallFrame* callFrame = stackFrame.callFrame;
- ASSERT(!JSValuePtr::areBothInt32Fast(src1, src2));
- JSValuePtr result = jsBoolean(JSValuePtr::equalSlowCaseInline(callFrame, src1, src2));
+ ASSERT(!JSValue::areBothInt32Fast(src1, src2));
+ JSValue result = jsBoolean(JSValue::equalSlowCaseInline(callFrame, src1, src2));
CHECK_FOR_EXCEPTION_AT_END();
- return JSValuePtr::encode(result);
+ return JSValue::encode(result);
}
-JSValueEncodedAsPointer* JITStubs::cti_op_lshift(STUB_ARGS)
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_lshift)
{
- BEGIN_STUB_FUNCTION();
+ STUB_INIT_STACK_FRAME(stackFrame);
- JSValuePtr val = ARG_src1;
- JSValuePtr shift = ARG_src2;
+ JSValue val = stackFrame.args[0].jsValue();
+ JSValue shift = stackFrame.args[1].jsValue();
int32_t left;
uint32_t right;
- if (JSValuePtr::areBothInt32Fast(val, shift))
- return JSValuePtr::encode(jsNumber(ARG_globalData, val.getInt32Fast() << (shift.getInt32Fast() & 0x1f)));
+ if (JSValue::areBothInt32Fast(val, shift))
+ return JSValue::encode(jsNumber(stackFrame.globalData, val.getInt32Fast() << (shift.getInt32Fast() & 0x1f)));
if (val.numberToInt32(left) && shift.numberToUInt32(right))
- return JSValuePtr::encode(jsNumber(ARG_globalData, left << (right & 0x1f)));
+ return JSValue::encode(jsNumber(stackFrame.globalData, left << (right & 0x1f)));
- CallFrame* callFrame = ARG_callFrame;
- JSValuePtr result = jsNumber(ARG_globalData, (val.toInt32(callFrame)) << (shift.toUInt32(callFrame) & 0x1f));
+ CallFrame* callFrame = stackFrame.callFrame;
+ JSValue result = jsNumber(stackFrame.globalData, (val.toInt32(callFrame)) << (shift.toUInt32(callFrame) & 0x1f));
CHECK_FOR_EXCEPTION_AT_END();
- return JSValuePtr::encode(result);
+ return JSValue::encode(result);
}
-JSValueEncodedAsPointer* JITStubs::cti_op_bitand(STUB_ARGS)
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_bitand)
{
- BEGIN_STUB_FUNCTION();
+ STUB_INIT_STACK_FRAME(stackFrame);
- JSValuePtr src1 = ARG_src1;
- JSValuePtr src2 = ARG_src2;
+ JSValue src1 = stackFrame.args[0].jsValue();
+ JSValue src2 = stackFrame.args[1].jsValue();
int32_t left;
int32_t right;
if (src1.numberToInt32(left) && src2.numberToInt32(right))
- return JSValuePtr::encode(jsNumber(ARG_globalData, left & right));
+ return JSValue::encode(jsNumber(stackFrame.globalData, left & right));
- CallFrame* callFrame = ARG_callFrame;
- JSValuePtr result = jsNumber(ARG_globalData, src1.toInt32(callFrame) & src2.toInt32(callFrame));
+ CallFrame* callFrame = stackFrame.callFrame;
+ JSValue result = jsNumber(stackFrame.globalData, src1.toInt32(callFrame) & src2.toInt32(callFrame));
CHECK_FOR_EXCEPTION_AT_END();
- return JSValuePtr::encode(result);
+ return JSValue::encode(result);
}
-JSValueEncodedAsPointer* JITStubs::cti_op_rshift(STUB_ARGS)
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_rshift)
{
- BEGIN_STUB_FUNCTION();
+ STUB_INIT_STACK_FRAME(stackFrame);
- JSValuePtr val = ARG_src1;
- JSValuePtr shift = ARG_src2;
+ JSValue val = stackFrame.args[0].jsValue();
+ JSValue shift = stackFrame.args[1].jsValue();
int32_t left;
uint32_t right;
if (JSFastMath::canDoFastRshift(val, shift))
- return JSValuePtr::encode(JSFastMath::rightShiftImmediateNumbers(val, shift));
+ return JSValue::encode(JSFastMath::rightShiftImmediateNumbers(val, shift));
if (val.numberToInt32(left) && shift.numberToUInt32(right))
- return JSValuePtr::encode(jsNumber(ARG_globalData, left >> (right & 0x1f)));
+ return JSValue::encode(jsNumber(stackFrame.globalData, left >> (right & 0x1f)));
- CallFrame* callFrame = ARG_callFrame;
- JSValuePtr result = jsNumber(ARG_globalData, (val.toInt32(callFrame)) >> (shift.toUInt32(callFrame) & 0x1f));
+ CallFrame* callFrame = stackFrame.callFrame;
+ JSValue result = jsNumber(stackFrame.globalData, (val.toInt32(callFrame)) >> (shift.toUInt32(callFrame) & 0x1f));
CHECK_FOR_EXCEPTION_AT_END();
- return JSValuePtr::encode(result);
+ return JSValue::encode(result);
}
-JSValueEncodedAsPointer* JITStubs::cti_op_bitnot(STUB_ARGS)
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_bitnot)
{
- BEGIN_STUB_FUNCTION();
+ STUB_INIT_STACK_FRAME(stackFrame);
- JSValuePtr src = ARG_src1;
+ JSValue src = stackFrame.args[0].jsValue();
int value;
if (src.numberToInt32(value))
- return JSValuePtr::encode(jsNumber(ARG_globalData, ~value));
+ return JSValue::encode(jsNumber(stackFrame.globalData, ~value));
- CallFrame* callFrame = ARG_callFrame;
- JSValuePtr result = jsNumber(ARG_globalData, ~src.toInt32(callFrame));
+ CallFrame* callFrame = stackFrame.callFrame;
+ JSValue result = jsNumber(stackFrame.globalData, ~src.toInt32(callFrame));
CHECK_FOR_EXCEPTION_AT_END();
- return JSValuePtr::encode(result);
+ return JSValue::encode(result);
}
-VoidPtrPair JITStubs::cti_op_resolve_with_base(STUB_ARGS)
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_resolve_with_base)
{
- BEGIN_STUB_FUNCTION();
+ STUB_INIT_STACK_FRAME(stackFrame);
- CallFrame* callFrame = ARG_callFrame;
+ CallFrame* callFrame = stackFrame.callFrame;
ScopeChainNode* scopeChain = callFrame->scopeChain();
ScopeChainIterator iter = scopeChain->begin();
@@ -1649,357 +2145,375 @@ VoidPtrPair JITStubs::cti_op_resolve_with_base(STUB_ARGS)
ASSERT(iter != end);
- Identifier& ident = *ARG_id1;
+ Identifier& ident = stackFrame.args[0].identifier();
JSObject* base;
do {
base = *iter;
PropertySlot slot(base);
if (base->getPropertySlot(callFrame, ident, slot)) {
- JSValuePtr result = slot.getValue(callFrame, ident);
+ JSValue result = slot.getValue(callFrame, ident);
CHECK_FOR_EXCEPTION_AT_END();
- RETURN_PAIR(base, JSValuePtr::encode(result));
+ callFrame->registers()[stackFrame.args[1].int32()] = JSValue(base);
+ return JSValue::encode(result);
}
++iter;
} while (iter != end);
CodeBlock* codeBlock = callFrame->codeBlock();
unsigned vPCIndex = codeBlock->getBytecodeIndex(callFrame, STUB_RETURN_ADDRESS);
- ARG_globalData->exception = createUndefinedVariableError(callFrame, ident, vPCIndex, codeBlock);
- VM_THROW_EXCEPTION_2();
+ stackFrame.globalData->exception = createUndefinedVariableError(callFrame, ident, vPCIndex, codeBlock);
+ VM_THROW_EXCEPTION_AT_END();
+ return JSValue::encode(JSValue());
}
-JSObject* JITStubs::cti_op_new_func_exp(STUB_ARGS)
+DEFINE_STUB_FUNCTION(JSObject*, op_new_func_exp)
{
- BEGIN_STUB_FUNCTION();
+ STUB_INIT_STACK_FRAME(stackFrame);
- return ARG_funcexp1->makeFunction(ARG_callFrame, ARG_callFrame->scopeChain());
+ return stackFrame.args[0].funcExprNode()->makeFunction(stackFrame.callFrame, stackFrame.callFrame->scopeChain());
}
-JSValueEncodedAsPointer* JITStubs::cti_op_mod(STUB_ARGS)
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_mod)
{
- BEGIN_STUB_FUNCTION();
+ STUB_INIT_STACK_FRAME(stackFrame);
- JSValuePtr dividendValue = ARG_src1;
- JSValuePtr divisorValue = ARG_src2;
+ JSValue dividendValue = stackFrame.args[0].jsValue();
+ JSValue divisorValue = stackFrame.args[1].jsValue();
- CallFrame* callFrame = ARG_callFrame;
+ CallFrame* callFrame = stackFrame.callFrame;
double d = dividendValue.toNumber(callFrame);
- JSValuePtr result = jsNumber(ARG_globalData, fmod(d, divisorValue.toNumber(callFrame)));
+ JSValue result = jsNumber(stackFrame.globalData, fmod(d, divisorValue.toNumber(callFrame)));
CHECK_FOR_EXCEPTION_AT_END();
- return JSValuePtr::encode(result);
+ return JSValue::encode(result);
}
-JSValueEncodedAsPointer* JITStubs::cti_op_less(STUB_ARGS)
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_less)
{
- BEGIN_STUB_FUNCTION();
+ STUB_INIT_STACK_FRAME(stackFrame);
- CallFrame* callFrame = ARG_callFrame;
- JSValuePtr result = jsBoolean(jsLess(callFrame, ARG_src1, ARG_src2));
+ CallFrame* callFrame = stackFrame.callFrame;
+ JSValue result = jsBoolean(jsLess(callFrame, stackFrame.args[0].jsValue(), stackFrame.args[1].jsValue()));
CHECK_FOR_EXCEPTION_AT_END();
- return JSValuePtr::encode(result);
+ return JSValue::encode(result);
}
-JSValueEncodedAsPointer* JITStubs::cti_op_neq(STUB_ARGS)
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_neq)
{
- BEGIN_STUB_FUNCTION();
+ STUB_INIT_STACK_FRAME(stackFrame);
- JSValuePtr src1 = ARG_src1;
- JSValuePtr src2 = ARG_src2;
+ JSValue src1 = stackFrame.args[0].jsValue();
+ JSValue src2 = stackFrame.args[1].jsValue();
- ASSERT(!JSValuePtr::areBothInt32Fast(src1, src2));
+ ASSERT(!JSValue::areBothInt32Fast(src1, src2));
- CallFrame* callFrame = ARG_callFrame;
- JSValuePtr result = jsBoolean(!JSValuePtr::equalSlowCaseInline(callFrame, src1, src2));
+ CallFrame* callFrame = stackFrame.callFrame;
+ JSValue result = jsBoolean(!JSValue::equalSlowCaseInline(callFrame, src1, src2));
CHECK_FOR_EXCEPTION_AT_END();
- return JSValuePtr::encode(result);
+ return JSValue::encode(result);
}
-VoidPtrPair JITStubs::cti_op_post_dec(STUB_ARGS)
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_post_dec)
{
- BEGIN_STUB_FUNCTION();
+ STUB_INIT_STACK_FRAME(stackFrame);
- JSValuePtr v = ARG_src1;
+ JSValue v = stackFrame.args[0].jsValue();
- CallFrame* callFrame = ARG_callFrame;
+ CallFrame* callFrame = stackFrame.callFrame;
- JSValuePtr number = v.toJSNumber(callFrame);
+ JSValue number = v.toJSNumber(callFrame);
CHECK_FOR_EXCEPTION_AT_END();
- RETURN_PAIR(JSValuePtr::encode(number), JSValuePtr::encode(jsNumber(ARG_globalData, number.uncheckedGetNumber() - 1)));
+ callFrame->registers()[stackFrame.args[1].int32()] = jsNumber(stackFrame.globalData, number.uncheckedGetNumber() - 1);
+ return JSValue::encode(number);
}
-JSValueEncodedAsPointer* JITStubs::cti_op_urshift(STUB_ARGS)
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_urshift)
{
- BEGIN_STUB_FUNCTION();
+ STUB_INIT_STACK_FRAME(stackFrame);
- JSValuePtr val = ARG_src1;
- JSValuePtr shift = ARG_src2;
+ JSValue val = stackFrame.args[0].jsValue();
+ JSValue shift = stackFrame.args[1].jsValue();
- CallFrame* callFrame = ARG_callFrame;
+ CallFrame* callFrame = stackFrame.callFrame;
if (JSFastMath::canDoFastUrshift(val, shift))
- return JSValuePtr::encode(JSFastMath::rightShiftImmediateNumbers(val, shift));
+ return JSValue::encode(JSFastMath::rightShiftImmediateNumbers(val, shift));
else {
- JSValuePtr result = jsNumber(ARG_globalData, (val.toUInt32(callFrame)) >> (shift.toUInt32(callFrame) & 0x1f));
+ JSValue result = jsNumber(stackFrame.globalData, (val.toUInt32(callFrame)) >> (shift.toUInt32(callFrame) & 0x1f));
CHECK_FOR_EXCEPTION_AT_END();
- return JSValuePtr::encode(result);
+ return JSValue::encode(result);
}
}
-JSValueEncodedAsPointer* JITStubs::cti_op_bitxor(STUB_ARGS)
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_bitxor)
{
- BEGIN_STUB_FUNCTION();
+ STUB_INIT_STACK_FRAME(stackFrame);
- JSValuePtr src1 = ARG_src1;
- JSValuePtr src2 = ARG_src2;
+ JSValue src1 = stackFrame.args[0].jsValue();
+ JSValue src2 = stackFrame.args[1].jsValue();
- CallFrame* callFrame = ARG_callFrame;
+ CallFrame* callFrame = stackFrame.callFrame;
- JSValuePtr result = jsNumber(ARG_globalData, src1.toInt32(callFrame) ^ src2.toInt32(callFrame));
+ JSValue result = jsNumber(stackFrame.globalData, src1.toInt32(callFrame) ^ src2.toInt32(callFrame));
CHECK_FOR_EXCEPTION_AT_END();
- return JSValuePtr::encode(result);
+ return JSValue::encode(result);
}
-JSObject* JITStubs::cti_op_new_regexp(STUB_ARGS)
+DEFINE_STUB_FUNCTION(JSObject*, op_new_regexp)
{
- BEGIN_STUB_FUNCTION();
+ STUB_INIT_STACK_FRAME(stackFrame);
- return new (ARG_globalData) RegExpObject(ARG_callFrame->lexicalGlobalObject()->regExpStructure(), ARG_regexp1);
+ return new (stackFrame.globalData) RegExpObject(stackFrame.callFrame->lexicalGlobalObject()->regExpStructure(), stackFrame.args[0].regExp());
}
-JSValueEncodedAsPointer* JITStubs::cti_op_bitor(STUB_ARGS)
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_bitor)
{
- BEGIN_STUB_FUNCTION();
+ STUB_INIT_STACK_FRAME(stackFrame);
- JSValuePtr src1 = ARG_src1;
- JSValuePtr src2 = ARG_src2;
+ JSValue src1 = stackFrame.args[0].jsValue();
+ JSValue src2 = stackFrame.args[1].jsValue();
- CallFrame* callFrame = ARG_callFrame;
+ CallFrame* callFrame = stackFrame.callFrame;
- JSValuePtr result = jsNumber(ARG_globalData, src1.toInt32(callFrame) | src2.toInt32(callFrame));
+ JSValue result = jsNumber(stackFrame.globalData, src1.toInt32(callFrame) | src2.toInt32(callFrame));
CHECK_FOR_EXCEPTION_AT_END();
- return JSValuePtr::encode(result);
+ return JSValue::encode(result);
}
-JSValueEncodedAsPointer* JITStubs::cti_op_call_eval(STUB_ARGS)
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_call_eval)
{
- BEGIN_STUB_FUNCTION();
+ STUB_INIT_STACK_FRAME(stackFrame);
- CallFrame* callFrame = ARG_callFrame;
- RegisterFile* registerFile = ARG_registerFile;
+ CallFrame* callFrame = stackFrame.callFrame;
+ RegisterFile* registerFile = stackFrame.registerFile;
- Interpreter* interpreter = ARG_globalData->interpreter;
+ Interpreter* interpreter = stackFrame.globalData->interpreter;
- JSValuePtr funcVal = ARG_src1;
- int registerOffset = ARG_int2;
- int argCount = ARG_int3;
+ JSValue funcVal = stackFrame.args[0].jsValue();
+ int registerOffset = stackFrame.args[1].int32();
+ int argCount = stackFrame.args[2].int32();
Register* newCallFrame = callFrame->registers() + registerOffset;
Register* argv = newCallFrame - RegisterFile::CallFrameHeaderSize - argCount;
- JSValuePtr thisValue = argv[0].jsValue(callFrame);
+ JSValue thisValue = argv[0].jsValue();
JSGlobalObject* globalObject = callFrame->scopeChain()->globalObject();
if (thisValue == globalObject && funcVal == globalObject->evalFunction()) {
- JSValuePtr exceptionValue = noValue();
- JSValuePtr result = interpreter->callEval(callFrame, registerFile, argv, argCount, registerOffset, exceptionValue);
- if (UNLIKELY(exceptionValue != noValue())) {
- ARG_globalData->exception = exceptionValue;
+ JSValue exceptionValue;
+ JSValue result = interpreter->callEval(callFrame, registerFile, argv, argCount, registerOffset, exceptionValue);
+ if (UNLIKELY(exceptionValue != JSValue())) {
+ stackFrame.globalData->exception = exceptionValue;
VM_THROW_EXCEPTION_AT_END();
}
- return JSValuePtr::encode(result);
+ return JSValue::encode(result);
}
- return JSValuePtr::encode(jsImpossibleValue());
+ return JSValue::encode(JSValue());
}
-JSValueEncodedAsPointer* JITStubs::cti_op_throw(STUB_ARGS)
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_throw)
{
- BEGIN_STUB_FUNCTION();
+ STUB_INIT_STACK_FRAME(stackFrame);
- CallFrame* callFrame = ARG_callFrame;
+ CallFrame* callFrame = stackFrame.callFrame;
CodeBlock* codeBlock = callFrame->codeBlock();
unsigned vPCIndex = codeBlock->getBytecodeIndex(callFrame, STUB_RETURN_ADDRESS);
- JSValuePtr exceptionValue = ARG_src1;
+ JSValue exceptionValue = stackFrame.args[0].jsValue();
ASSERT(exceptionValue);
- HandlerInfo* handler = ARG_globalData->interpreter->throwException(callFrame, exceptionValue, vPCIndex, true);
+ HandlerInfo* handler = stackFrame.globalData->interpreter->throwException(callFrame, exceptionValue, vPCIndex, true);
if (!handler) {
- *ARG_exception = exceptionValue;
- return JSValuePtr::encode(jsNull());
+ *stackFrame.exception = exceptionValue;
+ STUB_SET_RETURN_ADDRESS(reinterpret_cast<void*>(ctiOpThrowNotCaught));
+ return JSValue::encode(jsNull());
}
- ARG_setCallFrame(callFrame);
+ stackFrame.callFrame = callFrame;
void* catchRoutine = handler->nativeCode.addressForExceptionHandler();
ASSERT(catchRoutine);
STUB_SET_RETURN_ADDRESS(catchRoutine);
- return JSValuePtr::encode(exceptionValue);
+ return JSValue::encode(exceptionValue);
}
-JSPropertyNameIterator* JITStubs::cti_op_get_pnames(STUB_ARGS)
+DEFINE_STUB_FUNCTION(JSPropertyNameIterator*, op_get_pnames)
{
- BEGIN_STUB_FUNCTION();
+ STUB_INIT_STACK_FRAME(stackFrame);
- return JSPropertyNameIterator::create(ARG_callFrame, ARG_src1);
+ return JSPropertyNameIterator::create(stackFrame.callFrame, stackFrame.args[0].jsValue());
}
-JSValueEncodedAsPointer* JITStubs::cti_op_next_pname(STUB_ARGS)
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_next_pname)
{
- BEGIN_STUB_FUNCTION();
+ STUB_INIT_STACK_FRAME(stackFrame);
- JSPropertyNameIterator* it = ARG_pni1;
- JSValuePtr temp = it->next(ARG_callFrame);
+ JSPropertyNameIterator* it = stackFrame.args[0].propertyNameIterator();
+ JSValue temp = it->next(stackFrame.callFrame);
if (!temp)
it->invalidate();
- return JSValuePtr::encode(temp);
+ return JSValue::encode(temp);
}
-JSObject* JITStubs::cti_op_push_scope(STUB_ARGS)
+DEFINE_STUB_FUNCTION(JSObject*, op_push_scope)
{
- BEGIN_STUB_FUNCTION();
+ STUB_INIT_STACK_FRAME(stackFrame);
- JSObject* o = ARG_src1.toObject(ARG_callFrame);
+ JSObject* o = stackFrame.args[0].jsValue().toObject(stackFrame.callFrame);
CHECK_FOR_EXCEPTION();
- ARG_callFrame->setScopeChain(ARG_callFrame->scopeChain()->push(o));
+ stackFrame.callFrame->setScopeChain(stackFrame.callFrame->scopeChain()->push(o));
return o;
}
-void JITStubs::cti_op_pop_scope(STUB_ARGS)
+DEFINE_STUB_FUNCTION(void, op_pop_scope)
{
- BEGIN_STUB_FUNCTION();
+ STUB_INIT_STACK_FRAME(stackFrame);
- ARG_callFrame->setScopeChain(ARG_callFrame->scopeChain()->pop());
+ stackFrame.callFrame->setScopeChain(stackFrame.callFrame->scopeChain()->pop());
}
-JSValueEncodedAsPointer* JITStubs::cti_op_typeof(STUB_ARGS)
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_typeof)
{
- BEGIN_STUB_FUNCTION();
+ STUB_INIT_STACK_FRAME(stackFrame);
- return JSValuePtr::encode(jsTypeStringForValue(ARG_callFrame, ARG_src1));
+ return JSValue::encode(jsTypeStringForValue(stackFrame.callFrame, stackFrame.args[0].jsValue()));
}
-JSValueEncodedAsPointer* JITStubs::cti_op_is_undefined(STUB_ARGS)
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_is_undefined)
{
- BEGIN_STUB_FUNCTION();
+ STUB_INIT_STACK_FRAME(stackFrame);
- JSValuePtr v = ARG_src1;
- return JSValuePtr::encode(jsBoolean(v.isCell() ? v.asCell()->structure()->typeInfo().masqueradesAsUndefined() : v.isUndefined()));
+ JSValue v = stackFrame.args[0].jsValue();
+ return JSValue::encode(jsBoolean(v.isCell() ? v.asCell()->structure()->typeInfo().masqueradesAsUndefined() : v.isUndefined()));
}
-JSValueEncodedAsPointer* JITStubs::cti_op_is_boolean(STUB_ARGS)
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_is_boolean)
{
- BEGIN_STUB_FUNCTION();
+ STUB_INIT_STACK_FRAME(stackFrame);
- return JSValuePtr::encode(jsBoolean(ARG_src1.isBoolean()));
+ return JSValue::encode(jsBoolean(stackFrame.args[0].jsValue().isBoolean()));
}
-JSValueEncodedAsPointer* JITStubs::cti_op_is_number(STUB_ARGS)
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_is_number)
{
- BEGIN_STUB_FUNCTION();
+ STUB_INIT_STACK_FRAME(stackFrame);
- return JSValuePtr::encode(jsBoolean(ARG_src1.isNumber()));
+ return JSValue::encode(jsBoolean(stackFrame.args[0].jsValue().isNumber()));
}
-JSValueEncodedAsPointer* JITStubs::cti_op_is_string(STUB_ARGS)
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_is_string)
{
- BEGIN_STUB_FUNCTION();
+ STUB_INIT_STACK_FRAME(stackFrame);
- return JSValuePtr::encode(jsBoolean(isJSString(ARG_globalData, ARG_src1)));
+ return JSValue::encode(jsBoolean(isJSString(stackFrame.globalData, stackFrame.args[0].jsValue())));
}
-JSValueEncodedAsPointer* JITStubs::cti_op_is_object(STUB_ARGS)
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_is_object)
{
- BEGIN_STUB_FUNCTION();
+ STUB_INIT_STACK_FRAME(stackFrame);
- return JSValuePtr::encode(jsBoolean(jsIsObjectType(ARG_src1)));
+ return JSValue::encode(jsBoolean(jsIsObjectType(stackFrame.args[0].jsValue())));
}
-JSValueEncodedAsPointer* JITStubs::cti_op_is_function(STUB_ARGS)
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_is_function)
{
- BEGIN_STUB_FUNCTION();
+ STUB_INIT_STACK_FRAME(stackFrame);
- return JSValuePtr::encode(jsBoolean(jsIsFunctionType(ARG_src1)));
+ return JSValue::encode(jsBoolean(jsIsFunctionType(stackFrame.args[0].jsValue())));
}
-JSValueEncodedAsPointer* JITStubs::cti_op_stricteq(STUB_ARGS)
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_stricteq)
{
- BEGIN_STUB_FUNCTION();
+ STUB_INIT_STACK_FRAME(stackFrame);
- JSValuePtr src1 = ARG_src1;
- JSValuePtr src2 = ARG_src2;
+ JSValue src1 = stackFrame.args[0].jsValue();
+ JSValue src2 = stackFrame.args[1].jsValue();
- return JSValuePtr::encode(jsBoolean(JSValuePtr::strictEqual(src1, src2)));
+ return JSValue::encode(jsBoolean(JSValue::strictEqual(src1, src2)));
}
-JSValueEncodedAsPointer* JITStubs::cti_op_nstricteq(STUB_ARGS)
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_to_primitive)
{
- BEGIN_STUB_FUNCTION();
+ STUB_INIT_STACK_FRAME(stackFrame);
- JSValuePtr src1 = ARG_src1;
- JSValuePtr src2 = ARG_src2;
+ return JSValue::encode(stackFrame.args[0].jsValue().toPrimitive(stackFrame.callFrame));
+}
- return JSValuePtr::encode(jsBoolean(!JSValuePtr::strictEqual(src1, src2)));
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_strcat)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ return JSValue::encode(concatenateStrings(stackFrame.callFrame, &stackFrame.callFrame->registers()[stackFrame.args[0].int32()], stackFrame.args[1].int32()));
}
-JSValueEncodedAsPointer* JITStubs::cti_op_to_jsnumber(STUB_ARGS)
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_nstricteq)
{
- BEGIN_STUB_FUNCTION();
+ STUB_INIT_STACK_FRAME(stackFrame);
- JSValuePtr src = ARG_src1;
- CallFrame* callFrame = ARG_callFrame;
+ JSValue src1 = stackFrame.args[0].jsValue();
+ JSValue src2 = stackFrame.args[1].jsValue();
- JSValuePtr result = src.toJSNumber(callFrame);
+ return JSValue::encode(jsBoolean(!JSValue::strictEqual(src1, src2)));
+}
+
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_to_jsnumber)
+{
+ STUB_INIT_STACK_FRAME(stackFrame);
+
+ JSValue src = stackFrame.args[0].jsValue();
+ CallFrame* callFrame = stackFrame.callFrame;
+
+ JSValue result = src.toJSNumber(callFrame);
CHECK_FOR_EXCEPTION_AT_END();
- return JSValuePtr::encode(result);
+ return JSValue::encode(result);
}
-JSValueEncodedAsPointer* JITStubs::cti_op_in(STUB_ARGS)
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_in)
{
- BEGIN_STUB_FUNCTION();
+ STUB_INIT_STACK_FRAME(stackFrame);
- CallFrame* callFrame = ARG_callFrame;
- JSValuePtr baseVal = ARG_src2;
+ CallFrame* callFrame = stackFrame.callFrame;
+ JSValue baseVal = stackFrame.args[1].jsValue();
if (!baseVal.isObject()) {
- CallFrame* callFrame = ARG_callFrame;
+ CallFrame* callFrame = stackFrame.callFrame;
CodeBlock* codeBlock = callFrame->codeBlock();
unsigned vPCIndex = codeBlock->getBytecodeIndex(callFrame, STUB_RETURN_ADDRESS);
- ARG_globalData->exception = createInvalidParamError(callFrame, "in", baseVal, vPCIndex, codeBlock);
+ stackFrame.globalData->exception = createInvalidParamError(callFrame, "in", baseVal, vPCIndex, codeBlock);
VM_THROW_EXCEPTION();
}
- JSValuePtr propName = ARG_src1;
+ JSValue propName = stackFrame.args[0].jsValue();
JSObject* baseObj = asObject(baseVal);
uint32_t i;
if (propName.getUInt32(i))
- return JSValuePtr::encode(jsBoolean(baseObj->hasProperty(callFrame, i)));
+ return JSValue::encode(jsBoolean(baseObj->hasProperty(callFrame, i)));
Identifier property(callFrame, propName.toString(callFrame));
CHECK_FOR_EXCEPTION();
- return JSValuePtr::encode(jsBoolean(baseObj->hasProperty(callFrame, property)));
+ return JSValue::encode(jsBoolean(baseObj->hasProperty(callFrame, property)));
}
-JSObject* JITStubs::cti_op_push_new_scope(STUB_ARGS)
+DEFINE_STUB_FUNCTION(JSObject*, op_push_new_scope)
{
- BEGIN_STUB_FUNCTION();
+ STUB_INIT_STACK_FRAME(stackFrame);
- JSObject* scope = new (ARG_globalData) JSStaticScopeObject(ARG_callFrame, *ARG_id1, ARG_src2, DontDelete);
+ JSObject* scope = new (stackFrame.globalData) JSStaticScopeObject(stackFrame.callFrame, stackFrame.args[0].identifier(), stackFrame.args[1].jsValue(), DontDelete);
- CallFrame* callFrame = ARG_callFrame;
+ CallFrame* callFrame = stackFrame.callFrame;
callFrame->setScopeChain(callFrame->scopeChain()->push(scope));
return scope;
}
-void JITStubs::cti_op_jmp_scopes(STUB_ARGS)
+DEFINE_STUB_FUNCTION(void, op_jmp_scopes)
{
- BEGIN_STUB_FUNCTION();
+ STUB_INIT_STACK_FRAME(stackFrame);
- unsigned count = ARG_int1;
- CallFrame* callFrame = ARG_callFrame;
+ unsigned count = stackFrame.args[0].int32();
+ CallFrame* callFrame = stackFrame.callFrame;
ScopeChainNode* tmp = callFrame->scopeChain();
while (count--)
@@ -2007,23 +2521,23 @@ void JITStubs::cti_op_jmp_scopes(STUB_ARGS)
callFrame->setScopeChain(tmp);
}
-void JITStubs::cti_op_put_by_index(STUB_ARGS)
+DEFINE_STUB_FUNCTION(void, op_put_by_index)
{
- BEGIN_STUB_FUNCTION();
+ STUB_INIT_STACK_FRAME(stackFrame);
- CallFrame* callFrame = ARG_callFrame;
- unsigned property = ARG_int2;
+ CallFrame* callFrame = stackFrame.callFrame;
+ unsigned property = stackFrame.args[1].int32();
- ARG_src1.put(callFrame, property, ARG_src3);
+ stackFrame.args[0].jsValue().put(callFrame, property, stackFrame.args[2].jsValue());
}
-void* JITStubs::cti_op_switch_imm(STUB_ARGS)
+DEFINE_STUB_FUNCTION(void*, op_switch_imm)
{
- BEGIN_STUB_FUNCTION();
+ STUB_INIT_STACK_FRAME(stackFrame);
- JSValuePtr scrutinee = ARG_src1;
- unsigned tableIndex = ARG_int2;
- CallFrame* callFrame = ARG_callFrame;
+ JSValue scrutinee = stackFrame.args[0].jsValue();
+ unsigned tableIndex = stackFrame.args[1].int32();
+ CallFrame* callFrame = stackFrame.callFrame;
CodeBlock* codeBlock = callFrame->codeBlock();
if (scrutinee.isInt32Fast())
@@ -2038,13 +2552,13 @@ void* JITStubs::cti_op_switch_imm(STUB_ARGS)
}
}
-void* JITStubs::cti_op_switch_char(STUB_ARGS)
+DEFINE_STUB_FUNCTION(void*, op_switch_char)
{
- BEGIN_STUB_FUNCTION();
+ STUB_INIT_STACK_FRAME(stackFrame);
- JSValuePtr scrutinee = ARG_src1;
- unsigned tableIndex = ARG_int2;
- CallFrame* callFrame = ARG_callFrame;
+ JSValue scrutinee = stackFrame.args[0].jsValue();
+ unsigned tableIndex = stackFrame.args[1].int32();
+ CallFrame* callFrame = stackFrame.callFrame;
CodeBlock* codeBlock = callFrame->codeBlock();
void* result = codeBlock->characterSwitchJumpTable(tableIndex).ctiDefault.addressForSwitch();
@@ -2058,13 +2572,13 @@ void* JITStubs::cti_op_switch_char(STUB_ARGS)
return result;
}
-void* JITStubs::cti_op_switch_string(STUB_ARGS)
+DEFINE_STUB_FUNCTION(void*, op_switch_string)
{
- BEGIN_STUB_FUNCTION();
+ STUB_INIT_STACK_FRAME(stackFrame);
- JSValuePtr scrutinee = ARG_src1;
- unsigned tableIndex = ARG_int2;
- CallFrame* callFrame = ARG_callFrame;
+ JSValue scrutinee = stackFrame.args[0].jsValue();
+ unsigned tableIndex = stackFrame.args[1].int32();
+ CallFrame* callFrame = stackFrame.callFrame;
CodeBlock* codeBlock = callFrame->codeBlock();
void* result = codeBlock->stringSwitchJumpTable(tableIndex).ctiDefault.addressForSwitch();
@@ -2077,17 +2591,17 @@ void* JITStubs::cti_op_switch_string(STUB_ARGS)
return result;
}
-JSValueEncodedAsPointer* JITStubs::cti_op_del_by_val(STUB_ARGS)
+DEFINE_STUB_FUNCTION(EncodedJSValue, op_del_by_val)
{
- BEGIN_STUB_FUNCTION();
+ STUB_INIT_STACK_FRAME(stackFrame);
- CallFrame* callFrame = ARG_callFrame;
+ CallFrame* callFrame = stackFrame.callFrame;
- JSValuePtr baseValue = ARG_src1;
+ JSValue baseValue = stackFrame.args[0].jsValue();
JSObject* baseObj = baseValue.toObject(callFrame); // may throw
- JSValuePtr subscript = ARG_src2;
- JSValuePtr result;
+ JSValue subscript = stackFrame.args[1].jsValue();
+ JSValue result;
uint32_t i;
if (subscript.getUInt32(i))
result = jsBoolean(baseObj->deleteProperty(callFrame, i));
@@ -2099,97 +2613,89 @@ JSValueEncodedAsPointer* JITStubs::cti_op_del_by_val(STUB_ARGS)
}
CHECK_FOR_EXCEPTION_AT_END();
- return JSValuePtr::encode(result);
+ return JSValue::encode(result);
}
-void JITStubs::cti_op_put_getter(STUB_ARGS)
+DEFINE_STUB_FUNCTION(void, op_put_getter)
{
- BEGIN_STUB_FUNCTION();
+ STUB_INIT_STACK_FRAME(stackFrame);
- CallFrame* callFrame = ARG_callFrame;
+ CallFrame* callFrame = stackFrame.callFrame;
- ASSERT(ARG_src1.isObject());
- JSObject* baseObj = asObject(ARG_src1);
- ASSERT(ARG_src3.isObject());
- baseObj->defineGetter(callFrame, *ARG_id2, asObject(ARG_src3));
+ ASSERT(stackFrame.args[0].jsValue().isObject());
+ JSObject* baseObj = asObject(stackFrame.args[0].jsValue());
+ ASSERT(stackFrame.args[2].jsValue().isObject());
+ baseObj->defineGetter(callFrame, stackFrame.args[1].identifier(), asObject(stackFrame.args[2].jsValue()));
}
-void JITStubs::cti_op_put_setter(STUB_ARGS)
+DEFINE_STUB_FUNCTION(void, op_put_setter)
{
- BEGIN_STUB_FUNCTION();
+ STUB_INIT_STACK_FRAME(stackFrame);
- CallFrame* callFrame = ARG_callFrame;
+ CallFrame* callFrame = stackFrame.callFrame;
- ASSERT(ARG_src1.isObject());
- JSObject* baseObj = asObject(ARG_src1);
- ASSERT(ARG_src3.isObject());
- baseObj->defineSetter(callFrame, *ARG_id2, asObject(ARG_src3));
+ ASSERT(stackFrame.args[0].jsValue().isObject());
+ JSObject* baseObj = asObject(stackFrame.args[0].jsValue());
+ ASSERT(stackFrame.args[2].jsValue().isObject());
+ baseObj->defineSetter(callFrame, stackFrame.args[1].identifier(), asObject(stackFrame.args[2].jsValue()));
}
-JSObject* JITStubs::cti_op_new_error(STUB_ARGS)
+DEFINE_STUB_FUNCTION(JSObject*, op_new_error)
{
- BEGIN_STUB_FUNCTION();
+ STUB_INIT_STACK_FRAME(stackFrame);
- CallFrame* callFrame = ARG_callFrame;
+ CallFrame* callFrame = stackFrame.callFrame;
CodeBlock* codeBlock = callFrame->codeBlock();
- unsigned type = ARG_int1;
- JSValuePtr message = ARG_src2;
- unsigned bytecodeOffset = ARG_int3;
+ unsigned type = stackFrame.args[0].int32();
+ JSValue message = stackFrame.args[1].jsValue();
+ unsigned bytecodeOffset = stackFrame.args[2].int32();
unsigned lineNumber = codeBlock->lineNumberForBytecodeOffset(callFrame, bytecodeOffset);
return Error::create(callFrame, static_cast<ErrorType>(type), message.toString(callFrame), lineNumber, codeBlock->ownerNode()->sourceID(), codeBlock->ownerNode()->sourceURL());
}
-void JITStubs::cti_op_debug(STUB_ARGS)
+DEFINE_STUB_FUNCTION(void, op_debug)
{
- BEGIN_STUB_FUNCTION();
+ STUB_INIT_STACK_FRAME(stackFrame);
- CallFrame* callFrame = ARG_callFrame;
+ CallFrame* callFrame = stackFrame.callFrame;
- int debugHookID = ARG_int1;
- int firstLine = ARG_int2;
- int lastLine = ARG_int3;
+ int debugHookID = stackFrame.args[0].int32();
+ int firstLine = stackFrame.args[1].int32();
+ int lastLine = stackFrame.args[2].int32();
- ARG_globalData->interpreter->debug(callFrame, static_cast<DebugHookID>(debugHookID), firstLine, lastLine);
+ stackFrame.globalData->interpreter->debug(callFrame, static_cast<DebugHookID>(debugHookID), firstLine, lastLine);
}
-JSValueEncodedAsPointer* JITStubs::cti_vm_throw(STUB_ARGS)
+DEFINE_STUB_FUNCTION(EncodedJSValue, vm_throw)
{
- BEGIN_STUB_FUNCTION();
+ STUB_INIT_STACK_FRAME(stackFrame);
- CallFrame* callFrame = ARG_callFrame;
+ CallFrame* callFrame = stackFrame.callFrame;
CodeBlock* codeBlock = callFrame->codeBlock();
- JSGlobalData* globalData = ARG_globalData;
+ JSGlobalData* globalData = stackFrame.globalData;
unsigned vPCIndex = codeBlock->getBytecodeIndex(callFrame, globalData->exceptionLocation);
- JSValuePtr exceptionValue = globalData->exception;
+ JSValue exceptionValue = globalData->exception;
ASSERT(exceptionValue);
- globalData->exception = noValue();
+ globalData->exception = JSValue();
HandlerInfo* handler = globalData->interpreter->throwException(callFrame, exceptionValue, vPCIndex, false);
if (!handler) {
- *ARG_exception = exceptionValue;
- return JSValuePtr::encode(jsNull());
+ *stackFrame.exception = exceptionValue;
+ return JSValue::encode(jsNull());
}
- ARG_setCallFrame(callFrame);
+ stackFrame.callFrame = callFrame;
void* catchRoutine = handler->nativeCode.addressForExceptionHandler();
ASSERT(catchRoutine);
STUB_SET_RETURN_ADDRESS(catchRoutine);
- return JSValuePtr::encode(exceptionValue);
-}
-
-#undef STUB_RETURN_ADDRESS
-#undef STUB_SET_RETURN_ADDRESS
-#undef BEGIN_STUB_FUNCTION
-#undef CHECK_FOR_EXCEPTION
-#undef CHECK_FOR_EXCEPTION_AT_END
-#undef CHECK_FOR_EXCEPTION_VOID
-#undef VM_THROW_EXCEPTION
-#undef VM_THROW_EXCEPTION_2
-#undef VM_THROW_EXCEPTION_AT_END
+ return JSValue::encode(exceptionValue);
+}
+
+} // namespace JITStubs
} // namespace JSC
diff --git a/JavaScriptCore/jit/JITStubs.h b/JavaScriptCore/jit/JITStubs.h
index b7b8f35..8e81ade 100644
--- a/JavaScriptCore/jit/JITStubs.h
+++ b/JavaScriptCore/jit/JITStubs.h
@@ -29,196 +29,312 @@
#ifndef JITStubs_h
#define JITStubs_h
-#include "Register.h"
#include <wtf/Platform.h>
+#include "MacroAssemblerCodeRef.h"
+#include "Register.h"
+
#if ENABLE(JIT)
namespace JSC {
+ class CodeBlock;
class ExecutablePool;
+ class Identifier;
+ class JSGlobalData;
class JSGlobalData;
class JSObject;
class JSPropertyNameIterator;
+ class JSValue;
class JSValueEncodedAsPointer;
- class CodeBlock;
- class JSValuePtr;
- class Identifier;
+ class Profiler;
class PropertySlot;
class PutPropertySlot;
+ class RegisterFile;
+ class FuncDeclNode;
+ class FuncExprNode;
+ class RegExp;
-#if USE(JIT_STUB_ARGUMENT_VA_LIST)
- #define STUB_ARGS void* args, ...
- #define ARGS (reinterpret_cast<void**>(vl_args) - 1)
-#else // JIT_STUB_ARGUMENT_REGISTER or JIT_STUB_ARGUMENT_STACK
- #define STUB_ARGS void** args
- #define ARGS (args)
+ union JITStubArg {
+ void* asPointer;
+ EncodedJSValue asEncodedJSValue;
+ int32_t asInt32;
+
+ JSValue jsValue() { return JSValue::decode(asEncodedJSValue); }
+ Identifier& identifier() { return *static_cast<Identifier*>(asPointer); }
+ int32_t int32() { return asInt32; }
+ CodeBlock* codeBlock() { return static_cast<CodeBlock*>(asPointer); }
+ FuncDeclNode* funcDeclNode() { return static_cast<FuncDeclNode*>(asPointer); }
+ FuncExprNode* funcExprNode() { return static_cast<FuncExprNode*>(asPointer); }
+ RegExp* regExp() { return static_cast<RegExp*>(asPointer); }
+ JSPropertyNameIterator* propertyNameIterator() { return static_cast<JSPropertyNameIterator*>(asPointer); }
+ void* returnAddress() { return asPointer; }
+ };
+
+#if PLATFORM(X86_64)
+ struct JITStackFrame {
+ JITStubArg padding; // Unused
+ JITStubArg args[8];
+
+ void* savedRBX;
+ void* savedR15;
+ void* savedR14;
+ void* savedR13;
+ void* savedR12;
+ void* savedRBP;
+ void* savedRIP;
+
+ void* code;
+ RegisterFile* registerFile;
+ CallFrame* callFrame;
+ JSValue* exception;
+ Profiler** enabledProfilerReference;
+ JSGlobalData* globalData;
+
+ // When JIT code makes a call, it pushes its return address just below the rest of the stack.
+ void** returnAddressSlot() { return reinterpret_cast<void**>(this) - 1; }
+ };
+#elif PLATFORM(X86)
+ struct JITStackFrame {
+ JITStubArg padding; // Unused
+ JITStubArg args[6];
+
+ void* savedEBX;
+ void* savedEDI;
+ void* savedESI;
+ void* savedEBP;
+ void* savedEIP;
+
+ void* code;
+ RegisterFile* registerFile;
+ CallFrame* callFrame;
+ JSValue* exception;
+ Profiler** enabledProfilerReference;
+ JSGlobalData* globalData;
+
+ // When JIT code makes a call, it pushes its return address just below the rest of the stack.
+ void** returnAddressSlot() { return reinterpret_cast<void**>(this) - 1; }
+ };
+#elif PLATFORM(ARM_V7)
+ struct JITStackFrame {
+ JITStubArg padding; // Unused
+ JITStubArg args[6];
+
+ void* thunkReturnAddress;
+
+ void* preservedReturnAddress;
+ void* preservedR4;
+ void* preservedR5;
+ void* preservedR6;
+
+ // These arguments passed in r1..r3 (r0 contained the entry code pointed, which is not preserved)
+ RegisterFile* registerFile;
+ CallFrame* callFrame;
+ JSValue* exception;
+
+ // These arguments passed on the stack.
+ Profiler** enabledProfilerReference;
+ JSGlobalData* globalData;
+
+ void** returnAddressSlot() { return &thunkReturnAddress; }
+ };
+#else
+#error "JITStackFrame not defined for this platform."
#endif
-#if USE(JIT_STUB_ARGUMENT_REGISTER)
- #if PLATFORM(X86_64)
+#if USE(JIT_STUB_ARGUMENT_VA_LIST)
+ #define STUB_ARGS_DECLARATION void* args, ...
+ #define STUB_ARGS (reinterpret_cast<void**>(vl_args) - 1)
+
+ #if COMPILER(MSVC)
+ #define JIT_STUB __cdecl
+ #else
#define JIT_STUB
- #elif COMPILER(MSVC)
+ #endif
+#else
+ #define STUB_ARGS_DECLARATION void** args
+ #define STUB_ARGS (args)
+
+ #if PLATFORM(X86) && COMPILER(MSVC)
#define JIT_STUB __fastcall
- #elif COMPILER(GCC)
+ #elif PLATFORM(X86) && COMPILER(GCC)
#define JIT_STUB __attribute__ ((fastcall))
#else
- #error Need to support register calling convention in this compiler
- #endif
-#else // JIT_STUB_ARGUMENT_VA_LIST or JIT_STUB_ARGUMENT_STACK
- #if COMPILER(MSVC)
- #define JIT_STUB __cdecl
- #else
#define JIT_STUB
#endif
#endif
-// The Mac compilers are fine with this,
-#if PLATFORM(MAC)
+#if PLATFORM(X86_64)
struct VoidPtrPair {
void* first;
void* second;
};
-#define RETURN_PAIR(a,b) VoidPtrPair pair = { a, b }; return pair
+ #define RETURN_POINTER_PAIR(a,b) VoidPtrPair pair = { a, b }; return pair
#else
+ // MSVC doesn't support returning a two-value struct in two registers, so
+ // we cast the struct to int64_t instead.
typedef uint64_t VoidPtrPair;
- union VoidPtrPairValue {
+ union VoidPtrPairUnion {
struct { void* first; void* second; } s;
VoidPtrPair i;
};
-#define RETURN_PAIR(a,b) VoidPtrPairValue pair = {{ a, b }}; return pair.i
+ #define RETURN_POINTER_PAIR(a,b) VoidPtrPairUnion pair = {{ a, b }}; return pair.i
#endif
- class JITStubs {
+ extern "C" void ctiVMThrowTrampoline();
+ extern "C" void ctiOpThrowNotCaught();
+ extern "C" EncodedJSValue ctiTrampoline(
+#if PLATFORM(X86_64)
+ // FIXME: (bug #22910) this will force all arguments onto the stack (regparm(0) does not appear to have any effect).
+ // We can allow register passing here, and move the writes of these values into the trampoline.
+ void*, void*, void*, void*, void*, void*,
+#endif
+ void* code, RegisterFile*, CallFrame*, JSValue* exception, Profiler**, JSGlobalData*);
+
+ class JITThunks {
public:
- JITStubs(JSGlobalData*);
-
- static JSObject* JIT_STUB cti_op_construct_JSConstruct(STUB_ARGS);
- static JSObject* JIT_STUB cti_op_convert_this(STUB_ARGS);
- static JSObject* JIT_STUB cti_op_new_array(STUB_ARGS);
- static JSObject* JIT_STUB cti_op_new_error(STUB_ARGS);
- static JSObject* JIT_STUB cti_op_new_func(STUB_ARGS);
- static JSObject* JIT_STUB cti_op_new_func_exp(STUB_ARGS);
- static JSObject* JIT_STUB cti_op_new_object(STUB_ARGS);
- static JSObject* JIT_STUB cti_op_new_regexp(STUB_ARGS);
- static JSObject* JIT_STUB cti_op_push_activation(STUB_ARGS);
- static JSObject* JIT_STUB cti_op_push_new_scope(STUB_ARGS);
- static JSObject* JIT_STUB cti_op_push_scope(STUB_ARGS);
- static JSPropertyNameIterator* JIT_STUB cti_op_get_pnames(STUB_ARGS);
- static JSValueEncodedAsPointer* JIT_STUB cti_op_add(STUB_ARGS);
- static JSValueEncodedAsPointer* JIT_STUB cti_op_bitand(STUB_ARGS);
- static JSValueEncodedAsPointer* JIT_STUB cti_op_bitnot(STUB_ARGS);
- static JSValueEncodedAsPointer* JIT_STUB cti_op_bitor(STUB_ARGS);
- static JSValueEncodedAsPointer* JIT_STUB cti_op_bitxor(STUB_ARGS);
- static JSValueEncodedAsPointer* JIT_STUB cti_op_call_NotJSFunction(STUB_ARGS);
- static JSValueEncodedAsPointer* JIT_STUB cti_op_call_eval(STUB_ARGS);
- static JSValueEncodedAsPointer* JIT_STUB cti_op_construct_NotJSConstruct(STUB_ARGS);
- static JSValueEncodedAsPointer* JIT_STUB cti_op_del_by_id(STUB_ARGS);
- static JSValueEncodedAsPointer* JIT_STUB cti_op_del_by_val(STUB_ARGS);
- static JSValueEncodedAsPointer* JIT_STUB cti_op_div(STUB_ARGS);
- static JSValueEncodedAsPointer* JIT_STUB cti_op_eq(STUB_ARGS);
- static JSValueEncodedAsPointer* JIT_STUB cti_op_get_by_id(STUB_ARGS);
- static JSValueEncodedAsPointer* JIT_STUB cti_op_get_by_id_array_fail(STUB_ARGS);
- static JSValueEncodedAsPointer* JIT_STUB cti_op_get_by_id_generic(STUB_ARGS);
- static JSValueEncodedAsPointer* JIT_STUB cti_op_get_by_id_proto_fail(STUB_ARGS);
- static JSValueEncodedAsPointer* JIT_STUB cti_op_get_by_id_proto_list(STUB_ARGS);
- static JSValueEncodedAsPointer* JIT_STUB cti_op_get_by_id_proto_list_full(STUB_ARGS);
- static JSValueEncodedAsPointer* JIT_STUB cti_op_get_by_id_second(STUB_ARGS);
- static JSValueEncodedAsPointer* JIT_STUB cti_op_get_by_id_self_fail(STUB_ARGS);
- static JSValueEncodedAsPointer* JIT_STUB cti_op_get_by_id_string_fail(STUB_ARGS);
- static JSValueEncodedAsPointer* JIT_STUB cti_op_get_by_val(STUB_ARGS);
- static JSValueEncodedAsPointer* JIT_STUB cti_op_get_by_val_byte_array(STUB_ARGS);
- static JSValueEncodedAsPointer* JIT_STUB cti_op_in(STUB_ARGS);
- static JSValueEncodedAsPointer* JIT_STUB cti_op_instanceof(STUB_ARGS);
- static JSValueEncodedAsPointer* JIT_STUB cti_op_is_boolean(STUB_ARGS);
- static JSValueEncodedAsPointer* JIT_STUB cti_op_is_function(STUB_ARGS);
- static JSValueEncodedAsPointer* JIT_STUB cti_op_is_number(STUB_ARGS);
- static JSValueEncodedAsPointer* JIT_STUB cti_op_is_object(STUB_ARGS);
- static JSValueEncodedAsPointer* JIT_STUB cti_op_is_string(STUB_ARGS);
- static JSValueEncodedAsPointer* JIT_STUB cti_op_is_undefined(STUB_ARGS);
- static JSValueEncodedAsPointer* JIT_STUB cti_op_less(STUB_ARGS);
- static JSValueEncodedAsPointer* JIT_STUB cti_op_lesseq(STUB_ARGS);
- static JSValueEncodedAsPointer* JIT_STUB cti_op_lshift(STUB_ARGS);
- static JSValueEncodedAsPointer* JIT_STUB cti_op_mod(STUB_ARGS);
- static JSValueEncodedAsPointer* JIT_STUB cti_op_mul(STUB_ARGS);
- static JSValueEncodedAsPointer* JIT_STUB cti_op_negate(STUB_ARGS);
- static JSValueEncodedAsPointer* JIT_STUB cti_op_neq(STUB_ARGS);
- static JSValueEncodedAsPointer* JIT_STUB cti_op_next_pname(STUB_ARGS);
- static JSValueEncodedAsPointer* JIT_STUB cti_op_not(STUB_ARGS);
- static JSValueEncodedAsPointer* JIT_STUB cti_op_nstricteq(STUB_ARGS);
- static JSValueEncodedAsPointer* JIT_STUB cti_op_pre_dec(STUB_ARGS);
- static JSValueEncodedAsPointer* JIT_STUB cti_op_pre_inc(STUB_ARGS);
- static JSValueEncodedAsPointer* JIT_STUB cti_op_resolve(STUB_ARGS);
- static JSValueEncodedAsPointer* JIT_STUB cti_op_resolve_base(STUB_ARGS);
- static JSValueEncodedAsPointer* JIT_STUB cti_op_resolve_global(STUB_ARGS);
- static JSValueEncodedAsPointer* JIT_STUB cti_op_resolve_skip(STUB_ARGS);
- static JSValueEncodedAsPointer* JIT_STUB cti_op_rshift(STUB_ARGS);
- static JSValueEncodedAsPointer* JIT_STUB cti_op_stricteq(STUB_ARGS);
- static JSValueEncodedAsPointer* JIT_STUB cti_op_sub(STUB_ARGS);
- static JSValueEncodedAsPointer* JIT_STUB cti_op_throw(STUB_ARGS);
- static JSValueEncodedAsPointer* JIT_STUB cti_op_to_jsnumber(STUB_ARGS);
- static JSValueEncodedAsPointer* JIT_STUB cti_op_typeof(STUB_ARGS);
- static JSValueEncodedAsPointer* JIT_STUB cti_op_urshift(STUB_ARGS);
- static JSValueEncodedAsPointer* JIT_STUB cti_vm_throw(STUB_ARGS);
- static VoidPtrPair JIT_STUB cti_op_call_arityCheck(STUB_ARGS);
- static VoidPtrPair JIT_STUB cti_op_post_dec(STUB_ARGS);
- static VoidPtrPair JIT_STUB cti_op_post_inc(STUB_ARGS);
- static VoidPtrPair JIT_STUB cti_op_resolve_func(STUB_ARGS);
- static VoidPtrPair JIT_STUB cti_op_resolve_with_base(STUB_ARGS);
- static int JIT_STUB cti_op_jless(STUB_ARGS);
- static int JIT_STUB cti_op_jtrue(STUB_ARGS);
- static int JIT_STUB cti_op_loop_if_less(STUB_ARGS);
- static int JIT_STUB cti_op_loop_if_lesseq(STUB_ARGS);
- static int JIT_STUB cti_op_loop_if_true(STUB_ARGS);
- static int JIT_STUB cti_timeout_check(STUB_ARGS);
- static void JIT_STUB cti_op_create_arguments(STUB_ARGS);
- static void JIT_STUB cti_op_create_arguments_no_params(STUB_ARGS);
- static void JIT_STUB cti_op_debug(STUB_ARGS);
- static void JIT_STUB cti_op_end(STUB_ARGS);
- static void JIT_STUB cti_op_jmp_scopes(STUB_ARGS);
- static void JIT_STUB cti_op_pop_scope(STUB_ARGS);
- static void JIT_STUB cti_op_profile_did_call(STUB_ARGS);
- static void JIT_STUB cti_op_profile_will_call(STUB_ARGS);
- static void JIT_STUB cti_op_put_by_id(STUB_ARGS);
- static void JIT_STUB cti_op_put_by_id_fail(STUB_ARGS);
- static void JIT_STUB cti_op_put_by_id_generic(STUB_ARGS);
- static void JIT_STUB cti_op_put_by_id_second(STUB_ARGS);
- static void JIT_STUB cti_op_put_by_index(STUB_ARGS);
- static void JIT_STUB cti_op_put_by_val(STUB_ARGS);
- static void JIT_STUB cti_op_put_by_val_array(STUB_ARGS);
- static void JIT_STUB cti_op_put_by_val_byte_array(STUB_ARGS);
- static void JIT_STUB cti_op_put_getter(STUB_ARGS);
- static void JIT_STUB cti_op_put_setter(STUB_ARGS);
- static void JIT_STUB cti_op_ret_scopeChain(STUB_ARGS);
- static void JIT_STUB cti_op_tear_off_activation(STUB_ARGS);
- static void JIT_STUB cti_op_tear_off_arguments(STUB_ARGS);
- static void JIT_STUB cti_register_file_check(STUB_ARGS);
- static void* JIT_STUB cti_op_call_JSFunction(STUB_ARGS);
- static void* JIT_STUB cti_op_switch_char(STUB_ARGS);
- static void* JIT_STUB cti_op_switch_imm(STUB_ARGS);
- static void* JIT_STUB cti_op_switch_string(STUB_ARGS);
- static void* JIT_STUB cti_vm_dontLazyLinkCall(STUB_ARGS);
- static void* JIT_STUB cti_vm_lazyLinkCall(STUB_ARGS);
-
- static void tryCacheGetByID(CallFrame*, CodeBlock*, void* returnAddress, JSValuePtr baseValue, const Identifier& propertyName, const PropertySlot&);
- static void tryCachePutByID(CallFrame*, CodeBlock*, void* returnAddress, JSValuePtr baseValue, const PutPropertySlot&);
+ JITThunks(JSGlobalData*);
+
+ static void tryCacheGetByID(CallFrame*, CodeBlock*, void* returnAddress, JSValue baseValue, const Identifier& propertyName, const PropertySlot&);
+ static void tryCachePutByID(CallFrame*, CodeBlock*, void* returnAddress, JSValue baseValue, const PutPropertySlot&);
- void* ctiArrayLengthTrampoline() { return m_ctiArrayLengthTrampoline; }
- void* ctiStringLengthTrampoline() { return m_ctiStringLengthTrampoline; }
- void* ctiVirtualCallPreLink() { return m_ctiVirtualCallPreLink; }
- void* ctiVirtualCallLink() { return m_ctiVirtualCallLink; }
- void* ctiVirtualCall() { return m_ctiVirtualCall; }
+ MacroAssemblerCodePtr ctiArrayLengthTrampoline() { return m_ctiArrayLengthTrampoline; }
+ MacroAssemblerCodePtr ctiStringLengthTrampoline() { return m_ctiStringLengthTrampoline; }
+ MacroAssemblerCodePtr ctiVirtualCallPreLink() { return m_ctiVirtualCallPreLink; }
+ MacroAssemblerCodePtr ctiVirtualCallLink() { return m_ctiVirtualCallLink; }
+ MacroAssemblerCodePtr ctiVirtualCall() { return m_ctiVirtualCall; }
+ MacroAssemblerCodePtr ctiNativeCallThunk() { return m_ctiNativeCallThunk; }
private:
RefPtr<ExecutablePool> m_executablePool;
- void* m_ctiArrayLengthTrampoline;
- void* m_ctiStringLengthTrampoline;
- void* m_ctiVirtualCallPreLink;
- void* m_ctiVirtualCallLink;
- void* m_ctiVirtualCall;
+ MacroAssemblerCodePtr m_ctiArrayLengthTrampoline;
+ MacroAssemblerCodePtr m_ctiStringLengthTrampoline;
+ MacroAssemblerCodePtr m_ctiVirtualCallPreLink;
+ MacroAssemblerCodePtr m_ctiVirtualCallLink;
+ MacroAssemblerCodePtr m_ctiVirtualCall;
+ MacroAssemblerCodePtr m_ctiNativeCallThunk;
};
+namespace JITStubs { extern "C" {
+
+ void JIT_STUB cti_op_create_arguments(STUB_ARGS_DECLARATION);
+ void JIT_STUB cti_op_create_arguments_no_params(STUB_ARGS_DECLARATION);
+ void JIT_STUB cti_op_debug(STUB_ARGS_DECLARATION);
+ void JIT_STUB cti_op_end(STUB_ARGS_DECLARATION);
+ void JIT_STUB cti_op_jmp_scopes(STUB_ARGS_DECLARATION);
+ void JIT_STUB cti_op_pop_scope(STUB_ARGS_DECLARATION);
+ void JIT_STUB cti_op_profile_did_call(STUB_ARGS_DECLARATION);
+ void JIT_STUB cti_op_profile_will_call(STUB_ARGS_DECLARATION);
+ void JIT_STUB cti_op_put_by_id(STUB_ARGS_DECLARATION);
+ void JIT_STUB cti_op_put_by_id_fail(STUB_ARGS_DECLARATION);
+ void JIT_STUB cti_op_put_by_id_generic(STUB_ARGS_DECLARATION);
+ void JIT_STUB cti_op_put_by_id_second(STUB_ARGS_DECLARATION);
+ void JIT_STUB cti_op_put_by_index(STUB_ARGS_DECLARATION);
+ void JIT_STUB cti_op_put_by_val(STUB_ARGS_DECLARATION);
+ void JIT_STUB cti_op_put_by_val_array(STUB_ARGS_DECLARATION);
+ void JIT_STUB cti_op_put_by_val_byte_array(STUB_ARGS_DECLARATION);
+ void JIT_STUB cti_op_put_getter(STUB_ARGS_DECLARATION);
+ void JIT_STUB cti_op_put_setter(STUB_ARGS_DECLARATION);
+ void JIT_STUB cti_op_ret_scopeChain(STUB_ARGS_DECLARATION);
+ void JIT_STUB cti_op_tear_off_activation(STUB_ARGS_DECLARATION);
+ void JIT_STUB cti_op_tear_off_arguments(STUB_ARGS_DECLARATION);
+ void JIT_STUB cti_register_file_check(STUB_ARGS_DECLARATION);
+ int JIT_STUB cti_op_jless(STUB_ARGS_DECLARATION);
+ int JIT_STUB cti_op_jlesseq(STUB_ARGS_DECLARATION);
+ int JIT_STUB cti_op_jtrue(STUB_ARGS_DECLARATION);
+ int JIT_STUB cti_op_load_varargs(STUB_ARGS_DECLARATION);
+ int JIT_STUB cti_op_loop_if_less(STUB_ARGS_DECLARATION);
+ int JIT_STUB cti_op_loop_if_lesseq(STUB_ARGS_DECLARATION);
+ int JIT_STUB cti_op_loop_if_true(STUB_ARGS_DECLARATION);
+ int JIT_STUB cti_timeout_check(STUB_ARGS_DECLARATION);
+ void* JIT_STUB cti_op_call_JSFunction(STUB_ARGS_DECLARATION);
+ void* JIT_STUB cti_op_switch_char(STUB_ARGS_DECLARATION);
+ void* JIT_STUB cti_op_switch_imm(STUB_ARGS_DECLARATION);
+ void* JIT_STUB cti_op_switch_string(STUB_ARGS_DECLARATION);
+ void* JIT_STUB cti_vm_dontLazyLinkCall(STUB_ARGS_DECLARATION);
+ void* JIT_STUB cti_vm_lazyLinkCall(STUB_ARGS_DECLARATION);
+ JSObject* JIT_STUB cti_op_construct_JSConstruct(STUB_ARGS_DECLARATION);
+ JSObject* JIT_STUB cti_op_convert_this(STUB_ARGS_DECLARATION);
+ JSObject* JIT_STUB cti_op_new_array(STUB_ARGS_DECLARATION);
+ JSObject* JIT_STUB cti_op_new_error(STUB_ARGS_DECLARATION);
+ JSObject* JIT_STUB cti_op_new_func(STUB_ARGS_DECLARATION);
+ JSObject* JIT_STUB cti_op_new_func_exp(STUB_ARGS_DECLARATION);
+ JSObject* JIT_STUB cti_op_new_object(STUB_ARGS_DECLARATION);
+ JSObject* JIT_STUB cti_op_new_regexp(STUB_ARGS_DECLARATION);
+ JSObject* JIT_STUB cti_op_push_activation(STUB_ARGS_DECLARATION);
+ JSObject* JIT_STUB cti_op_push_new_scope(STUB_ARGS_DECLARATION);
+ JSObject* JIT_STUB cti_op_push_scope(STUB_ARGS_DECLARATION);
+ JSPropertyNameIterator* JIT_STUB cti_op_get_pnames(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_add(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_bitand(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_bitnot(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_bitor(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_bitxor(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_call_NotJSFunction(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_call_eval(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_construct_NotJSConstruct(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_del_by_id(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_del_by_val(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_div(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_eq(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_get_by_id(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_get_by_id_method_check(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_get_by_id_method_check_second(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_get_by_id_array_fail(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_get_by_id_generic(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_get_by_id_proto_fail(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_get_by_id_proto_list(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_get_by_id_proto_list_full(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_get_by_id_second(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_get_by_id_self_fail(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_get_by_id_string_fail(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_get_by_val(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_get_by_val_byte_array(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_get_by_val_string(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_put_by_id_transition_realloc(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_in(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_instanceof(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_is_boolean(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_is_function(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_is_number(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_is_object(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_is_string(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_is_undefined(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_less(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_lesseq(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_lshift(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_mod(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_mul(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_negate(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_neq(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_next_pname(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_not(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_nstricteq(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_pre_dec(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_pre_inc(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_resolve(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_resolve_base(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_resolve_global(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_resolve_skip(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_rshift(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_strcat(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_stricteq(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_sub(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_throw(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_to_jsnumber(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_to_primitive(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_typeof(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_urshift(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_vm_throw(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_post_dec(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_post_inc(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_resolve_func(STUB_ARGS_DECLARATION);
+ EncodedJSValue JIT_STUB cti_op_resolve_with_base(STUB_ARGS_DECLARATION);
+ VoidPtrPair JIT_STUB cti_op_call_arityCheck(STUB_ARGS_DECLARATION);
+
+}; } // extern "C" namespace JITStubs
+
} // namespace JSC
#endif // ENABLE(JIT)