summaryrefslogtreecommitdiffstats
path: root/JavaScriptCore/runtime
diff options
context:
space:
mode:
Diffstat (limited to 'JavaScriptCore/runtime')
-rw-r--r--JavaScriptCore/runtime/AlignedMemoryAllocator.h239
-rw-r--r--JavaScriptCore/runtime/Collector.cpp148
-rw-r--r--JavaScriptCore/runtime/Collector.h52
-rw-r--r--JavaScriptCore/runtime/CollectorHeapIterator.h6
-rw-r--r--JavaScriptCore/runtime/ExceptionHelpers.cpp5
-rw-r--r--JavaScriptCore/runtime/ExceptionHelpers.h1
-rw-r--r--JavaScriptCore/runtime/Executable.cpp35
-rw-r--r--JavaScriptCore/runtime/GCActivityCallback.cpp50
-rw-r--r--JavaScriptCore/runtime/GCActivityCallback.h70
-rw-r--r--JavaScriptCore/runtime/GCActivityCallbackCF.cpp83
-rw-r--r--JavaScriptCore/runtime/GCHandle.cpp91
-rw-r--r--JavaScriptCore/runtime/GCHandle.h120
-rw-r--r--JavaScriptCore/runtime/JSArray.cpp260
-rw-r--r--JavaScriptCore/runtime/JSArray.h30
-rw-r--r--JavaScriptCore/runtime/JSGlobalData.cpp2
-rw-r--r--JavaScriptCore/runtime/JSGlobalData.h2
-rw-r--r--JavaScriptCore/runtime/JSLock.cpp6
-rw-r--r--JavaScriptCore/runtime/JSLock.h2
-rw-r--r--JavaScriptCore/runtime/UStringImpl.h2
-rw-r--r--JavaScriptCore/runtime/WeakGCPtr.h46
20 files changed, 982 insertions, 268 deletions
diff --git a/JavaScriptCore/runtime/AlignedMemoryAllocator.h b/JavaScriptCore/runtime/AlignedMemoryAllocator.h
new file mode 100644
index 0000000..e682eb3
--- /dev/null
+++ b/JavaScriptCore/runtime/AlignedMemoryAllocator.h
@@ -0,0 +1,239 @@
+/*
+ * Copyright (C) 2010 Apple Inc. All rights reserved.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ *
+ */
+
+#ifndef AlignedMemoryAllocator_h
+#define AlignedMemoryAllocator_h
+
+#include <wtf/Bitmap.h>
+#include <wtf/PageReservation.h>
+
+namespace JSC {
+
+struct AlignedMemoryAllocatorConstants {
+// Set sane defaults if -D<flagname=value> wasn't provided via compiler args
+#if defined(JSCCOLLECTOR_VIRTUALMEM_RESERVATION)
+ // Keep backwards compatibility with symbian build system
+ static const size_t virtualMemoryReservation = JSCCOLLECTOR_VIRTUALMEM_RESERVATION;
+#elif defined(__WINS__)
+ // Emulator has limited virtual address space
+ static const size_t virtualMemoryReservation = 0x400000;
+#else
+ // HW has plenty of virtual addresses
+ static const size_t virtualMemoryReservation = 0x8000000;
+#endif
+};
+
+template<size_t blockSize> class AlignedMemory;
+template<size_t blockSize> class AlignedMemoryAllocator;
+
+#if HAVE(PAGE_ALLOCATE_ALIGNED)
+
+template<size_t blockSize>
+class AlignedMemoryAllocator;
+
+template<size_t blockSize>
+class AlignedMemory {
+public:
+ void deallocate();
+ void* base();
+
+private:
+ friend class AlignedMemoryAllocator<blockSize>;
+
+ AlignedMemory(PageAllocation);
+
+ PageAllocation m_allocation;
+};
+
+template<size_t blockSize>
+class AlignedMemoryAllocator {
+public:
+ void destroy();
+ AlignedMemory<blockSize> allocate();
+};
+
+template<size_t blockSize>
+inline void AlignedMemoryAllocator<blockSize>::destroy()
+{
+}
+
+template<size_t blockSize>
+inline AlignedMemory<blockSize> AlignedMemoryAllocator<blockSize>::allocate()
+{
+ return AlignedMemory<blockSize>(PageAllocation::allocateAligned(blockSize, PageAllocation::JSGCHeapPages));
+}
+
+template<size_t blockSize>
+inline void AlignedMemory<blockSize>::deallocate()
+{
+ m_allocation.deallocate();
+}
+
+template<size_t blockSize>
+inline void* AlignedMemory<blockSize>::base()
+{
+ return m_allocation.base();
+}
+
+template<size_t blockSize>
+inline AlignedMemory<blockSize>::AlignedMemory(PageAllocation allocation)
+ : m_allocation(allocation)
+{
+}
+
+#else
+
+template<size_t blockSize>
+class AlignedMemory {
+public:
+ void deallocate();
+ void* base();
+
+private:
+ friend class AlignedMemoryAllocator<blockSize>;
+
+ AlignedMemory(void* base, AlignedMemoryAllocator<blockSize>* allocator);
+
+ void* m_base;
+ AlignedMemoryAllocator<blockSize>* m_allocator;
+};
+
+template<size_t blockSize>
+class AlignedMemoryAllocator {
+public:
+ AlignedMemoryAllocator();
+ ~AlignedMemoryAllocator();
+
+ void destroy();
+ AlignedMemory<blockSize> allocate();
+ void free(AlignedMemory<blockSize>);
+
+private:
+ static const size_t reservationSize = AlignedMemoryAllocatorConstants::virtualMemoryReservation;
+ static const size_t bitmapSize = reservationSize / blockSize;
+
+ PageReservation m_reservation;
+ size_t m_nextFree;
+ uintptr_t m_reservationBase;
+ WTF::Bitmap<bitmapSize> m_bitmap;
+};
+
+template<size_t blockSize>
+AlignedMemoryAllocator<blockSize>::AlignedMemoryAllocator()
+ : m_reservation(PageReservation::reserve(reservationSize + blockSize, PageAllocation::JSGCHeapPages))
+ , m_nextFree(0)
+{
+ // check that blockSize and reservationSize are powers of two
+ ASSERT(!(blockSize & (blockSize - 1)));
+ ASSERT(!(reservationSize & (reservationSize - 1)));
+
+ // check that blockSize is a multiple of pageSize and that
+ // reservationSize is a multiple of blockSize
+ ASSERT(!(blockSize & (PageAllocation::pageSize() - 1)));
+ ASSERT(!(reservationSize & (blockSize - 1)));
+
+ ASSERT(m_reservation);
+
+ m_reservationBase = reinterpret_cast<uintptr_t>(m_reservation.base());
+ m_reservationBase = (m_reservationBase + blockSize) & ~(blockSize - 1);
+}
+
+template<size_t blockSize>
+AlignedMemoryAllocator<blockSize>::~AlignedMemoryAllocator()
+{
+ destroy();
+ m_reservation.deallocate();
+}
+
+template<size_t blockSize>
+inline void AlignedMemoryAllocator<blockSize>::destroy()
+{
+ for (unsigned i = 0; i < bitmapSize; ++i) {
+ if (m_bitmap.get(i)) {
+ void* blockAddress = reinterpret_cast<void*>(m_reservationBase + m_nextFree * blockSize);
+ m_reservation.decommit(blockAddress, blockSize);
+
+ m_bitmap.clear(i);
+ }
+ }
+}
+
+template<size_t blockSize>
+AlignedMemory<blockSize> AlignedMemoryAllocator<blockSize>::allocate()
+{
+ while (m_nextFree < bitmapSize) {
+ if (!m_bitmap.get(m_nextFree)) {
+ void* blockAddress = reinterpret_cast<void*>(m_reservationBase + m_nextFree * blockSize);
+ m_reservation.commit(blockAddress, blockSize);
+
+ m_bitmap.set(m_nextFree);
+ ++m_nextFree;
+
+ return AlignedMemory<blockSize>(blockAddress, this);
+ }
+ m_bitmap.advanceToNextFreeBit(m_nextFree);
+ }
+
+ if (m_bitmap.isFull())
+ return AlignedMemory<blockSize>(0, this);
+
+ m_nextFree = 0;
+
+ return allocate();
+}
+
+template<size_t blockSize>
+void AlignedMemoryAllocator<blockSize>::free(AlignedMemory<blockSize> allocation)
+{
+ ASSERT(allocation.base());
+ m_reservation.decommit(allocation.base(), blockSize);
+
+ size_t diff = (reinterpret_cast<uintptr_t>(allocation.base()) - m_reservationBase);
+ ASSERT(!(diff & (blockSize - 1)));
+
+ size_t i = diff / blockSize;
+ ASSERT(m_bitmap.get(i));
+
+ m_bitmap.clear(i);
+}
+
+template<size_t blockSize>
+inline void AlignedMemory<blockSize>::deallocate()
+{
+ m_allocator->free(*this);
+}
+
+template<size_t blockSize>
+inline void* AlignedMemory<blockSize>::base()
+{
+ return m_base;
+}
+
+template<size_t blockSize>
+AlignedMemory<blockSize>::AlignedMemory(void* base, AlignedMemoryAllocator<blockSize>* allocator)
+ : m_base(base)
+ , m_allocator(allocator)
+{
+}
+
+#endif
+
+}
+
+#endif
diff --git a/JavaScriptCore/runtime/Collector.cpp b/JavaScriptCore/runtime/Collector.cpp
index 38f3ce5..4a81913 100644
--- a/JavaScriptCore/runtime/Collector.cpp
+++ b/JavaScriptCore/runtime/Collector.cpp
@@ -25,6 +25,7 @@
#include "CallFrame.h"
#include "CodeBlock.h"
#include "CollectorHeapIterator.h"
+#include "GCActivityCallback.h"
#include "Interpreter.h"
#include "JSArray.h"
#include "JSGlobalObject.h"
@@ -135,14 +136,13 @@ Heap::Heap(JSGlobalData* globalData)
, m_registeredThreads(0)
, m_currentThreadRegistrar(0)
#endif
-#if OS(SYMBIAN)
- , m_blockallocator(JSCCOLLECTOR_VIRTUALMEM_RESERVATION, BLOCK_SIZE)
-#endif
, m_globalData(globalData)
{
ASSERT(globalData);
memset(&m_heap, 0, sizeof(CollectorHeap));
allocateBlock();
+ m_activityCallback = DefaultGCActivityCallback::create(this);
+ (*m_activityCallback)();
}
Heap::~Heap()
@@ -170,6 +170,9 @@ void Heap::destroy()
freeBlocks();
+ for (unsigned i = 0; i < m_weakGCHandlePools.size(); ++i)
+ m_weakGCHandlePools[i].deallocate();
+
#if ENABLE(JSC_MULTIPLE_THREADS)
if (m_currentThreadRegistrar) {
int error = pthread_key_delete(m_currentThreadRegistrar);
@@ -183,63 +186,19 @@ void Heap::destroy()
t = next;
}
#endif
-#if OS(SYMBIAN)
m_blockallocator.destroy();
-#endif
m_globalData = 0;
}
NEVER_INLINE CollectorBlock* Heap::allocateBlock()
{
-#if OS(DARWIN)
- vm_address_t address = 0;
- vm_map(current_task(), &address, BLOCK_SIZE, BLOCK_OFFSET_MASK, VM_FLAGS_ANYWHERE | VM_TAG_FOR_COLLECTOR_MEMORY, MEMORY_OBJECT_NULL, 0, FALSE, VM_PROT_DEFAULT, VM_PROT_DEFAULT, VM_INHERIT_DEFAULT);
-#elif OS(SYMBIAN)
- void* address = m_blockallocator.alloc();
- if (!address)
+ AlignedCollectorBlock allocation = m_blockallocator.allocate();
+ CollectorBlock* block = static_cast<CollectorBlock*>(allocation.base());
+ if (!block)
CRASH();
-#elif OS(WINCE)
- void* address = VirtualAlloc(NULL, BLOCK_SIZE, MEM_COMMIT | MEM_RESERVE, PAGE_READWRITE);
-#elif OS(WINDOWS)
-#if COMPILER(MINGW) && !COMPILER(MINGW64)
- void* address = __mingw_aligned_malloc(BLOCK_SIZE, BLOCK_SIZE);
-#else
- void* address = _aligned_malloc(BLOCK_SIZE, BLOCK_SIZE);
-#endif
- memset(address, 0, BLOCK_SIZE);
-#elif HAVE(POSIX_MEMALIGN)
- void* address;
- posix_memalign(&address, BLOCK_SIZE, BLOCK_SIZE);
-#else
-
-#if ENABLE(JSC_MULTIPLE_THREADS)
-#error Need to initialize pagesize safely.
-#endif
- static size_t pagesize = getpagesize();
-
- size_t extra = 0;
- if (BLOCK_SIZE > pagesize)
- extra = BLOCK_SIZE - pagesize;
-
- void* mmapResult = mmap(NULL, BLOCK_SIZE + extra, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, -1, 0);
- uintptr_t address = reinterpret_cast<uintptr_t>(mmapResult);
-
- size_t adjust = 0;
- if ((address & BLOCK_OFFSET_MASK) != 0)
- adjust = BLOCK_SIZE - (address & BLOCK_OFFSET_MASK);
-
- if (adjust > 0)
- munmap(reinterpret_cast<char*>(address), adjust);
-
- if (adjust < extra)
- munmap(reinterpret_cast<char*>(address + adjust + BLOCK_SIZE), extra - adjust);
-
- address += adjust;
-#endif
// Initialize block.
- CollectorBlock* block = reinterpret_cast<CollectorBlock*>(address);
block->heap = this;
clearMarkBits(block);
@@ -251,14 +210,14 @@ NEVER_INLINE CollectorBlock* Heap::allocateBlock()
size_t numBlocks = m_heap.numBlocks;
if (m_heap.usedBlocks == numBlocks) {
- static const size_t maxNumBlocks = ULONG_MAX / sizeof(CollectorBlock*) / GROWTH_FACTOR;
+ static const size_t maxNumBlocks = ULONG_MAX / sizeof(AlignedCollectorBlock) / GROWTH_FACTOR;
if (numBlocks > maxNumBlocks)
CRASH();
numBlocks = max(MIN_ARRAY_SIZE, numBlocks * GROWTH_FACTOR);
m_heap.numBlocks = numBlocks;
- m_heap.blocks = static_cast<CollectorBlock**>(fastRealloc(m_heap.blocks, numBlocks * sizeof(CollectorBlock*)));
+ m_heap.blocks = static_cast<AlignedCollectorBlock*>(fastRealloc(m_heap.blocks, numBlocks * sizeof(AlignedCollectorBlock)));
}
- m_heap.blocks[m_heap.usedBlocks++] = block;
+ m_heap.blocks[m_heap.usedBlocks++] = allocation;
return block;
}
@@ -271,7 +230,7 @@ NEVER_INLINE void Heap::freeBlock(size_t block)
ObjectIterator end(m_heap, block + 1);
for ( ; it != end; ++it)
(*it)->~JSCell();
- freeBlockPtr(m_heap.blocks[block]);
+ m_heap.blocks[block].deallocate();
// swap with the last block so we compact as we go
m_heap.blocks[block] = m_heap.blocks[m_heap.usedBlocks - 1];
@@ -279,31 +238,10 @@ NEVER_INLINE void Heap::freeBlock(size_t block)
if (m_heap.numBlocks > MIN_ARRAY_SIZE && m_heap.usedBlocks < m_heap.numBlocks / LOW_WATER_FACTOR) {
m_heap.numBlocks = m_heap.numBlocks / GROWTH_FACTOR;
- m_heap.blocks = static_cast<CollectorBlock**>(fastRealloc(m_heap.blocks, m_heap.numBlocks * sizeof(CollectorBlock*)));
+ m_heap.blocks = static_cast<AlignedCollectorBlock*>(fastRealloc(m_heap.blocks, m_heap.numBlocks * sizeof(AlignedCollectorBlock)));
}
}
-NEVER_INLINE void Heap::freeBlockPtr(CollectorBlock* block)
-{
-#if OS(DARWIN)
- vm_deallocate(current_task(), reinterpret_cast<vm_address_t>(block), BLOCK_SIZE);
-#elif OS(SYMBIAN)
- m_blockallocator.free(reinterpret_cast<void*>(block));
-#elif OS(WINCE)
- VirtualFree(block, 0, MEM_RELEASE);
-#elif OS(WINDOWS)
-#if COMPILER(MINGW) && !COMPILER(MINGW64)
- __mingw_aligned_free(block);
-#else
- _aligned_free(block);
-#endif
-#elif HAVE(POSIX_MEMALIGN)
- free(block);
-#else
- munmap(reinterpret_cast<char*>(block), BLOCK_SIZE);
-#endif
-}
-
void Heap::freeBlocks()
{
ProtectCountSet protectedValuesCopy = m_protectedValues;
@@ -327,7 +265,7 @@ void Heap::freeBlocks()
it->first->~JSCell();
for (size_t block = 0; block < m_heap.usedBlocks; ++block)
- freeBlockPtr(m_heap.blocks[block]);
+ m_heap.blocks[block].deallocate();
fastFree(m_heap.blocks);
@@ -380,7 +318,7 @@ allocate:
do {
ASSERT(m_heap.nextBlock < m_heap.usedBlocks);
- Block* block = reinterpret_cast<Block*>(m_heap.blocks[m_heap.nextBlock]);
+ Block* block = m_heap.collectorBlock(m_heap.nextBlock);
do {
ASSERT(m_heap.nextCell < HeapConstants::cellsPerBlock);
if (!block->marked.get(m_heap.nextCell)) { // Always false for the last cell in the block
@@ -435,10 +373,10 @@ void Heap::shrinkBlocks(size_t neededBlocks)
// Clear the always-on last bit, so isEmpty() isn't fooled by it.
for (size_t i = 0; i < m_heap.usedBlocks; ++i)
- m_heap.blocks[i]->marked.clear(HeapConstants::cellsPerBlock - 1);
+ m_heap.collectorBlock(i)->marked.clear(HeapConstants::cellsPerBlock - 1);
for (size_t i = 0; i != m_heap.usedBlocks && m_heap.usedBlocks != neededBlocks; ) {
- if (m_heap.blocks[i]->marked.isEmpty()) {
+ if (m_heap.collectorBlock(i)->marked.isEmpty()) {
freeBlock(i);
} else
++i;
@@ -446,7 +384,7 @@ void Heap::shrinkBlocks(size_t neededBlocks)
// Reset the always-on last bit.
for (size_t i = 0; i < m_heap.usedBlocks; ++i)
- m_heap.blocks[i]->marked.set(HeapConstants::cellsPerBlock - 1);
+ m_heap.collectorBlock(i)->marked.set(HeapConstants::cellsPerBlock - 1);
}
#if OS(WINCE)
@@ -741,7 +679,6 @@ void Heap::markConservatively(MarkStack& markStack, void* start, void* end)
char** p = static_cast<char**>(start);
char** e = static_cast<char**>(end);
- CollectorBlock** blocks = m_heap.blocks;
while (p != e) {
char* x = *p++;
if (isPossibleCell(x)) {
@@ -757,7 +694,7 @@ void Heap::markConservatively(MarkStack& markStack, void* start, void* end)
CollectorBlock* blockAddr = reinterpret_cast<CollectorBlock*>(xAsBits - offset);
usedBlocks = m_heap.usedBlocks;
for (size_t block = 0; block < usedBlocks; block++) {
- if (blocks[block] != blockAddr)
+ if (m_heap.collectorBlock(block) != blockAddr)
continue;
markStack.append(reinterpret_cast<JSCell*>(xAsBits));
markStack.drain();
@@ -972,6 +909,36 @@ void Heap::markStackObjectsConservatively(MarkStack& markStack)
#endif
}
+void Heap::updateWeakGCHandles()
+{
+ for (unsigned i = 0; i < m_weakGCHandlePools.size(); ++i)
+ weakGCHandlePool(i)->update();
+}
+
+void WeakGCHandlePool::update()
+{
+ for (unsigned i = 1; i < WeakGCHandlePool::numPoolEntries; ++i) {
+ if (m_entries[i].isValidPtr()) {
+ JSCell* cell = m_entries[i].get();
+ if (!cell || !Heap::isCellMarked(cell))
+ m_entries[i].invalidate();
+ }
+ }
+}
+
+WeakGCHandle* Heap::addWeakGCHandle(JSCell* ptr)
+{
+ for (unsigned i = 0; i < m_weakGCHandlePools.size(); ++i)
+ if (!weakGCHandlePool(i)->isFull())
+ return weakGCHandlePool(i)->allocate(ptr);
+
+ AlignedMemory<WeakGCHandlePool::poolSize> allocation = m_weakGCHandlePoolAllocator.allocate();
+ m_weakGCHandlePools.append(allocation);
+
+ WeakGCHandlePool* pool = new (allocation) WeakGCHandlePool();
+ return pool->allocate(ptr);
+}
+
void Heap::protect(JSValue k)
{
ASSERT(k);
@@ -1006,7 +973,7 @@ void Heap::markProtectedObjects(MarkStack& markStack)
void Heap::clearMarkBits()
{
for (size_t i = 0; i < m_heap.usedBlocks; ++i)
- clearMarkBits(m_heap.blocks[i]);
+ clearMarkBits(m_heap.collectorBlock(i));
}
void Heap::clearMarkBits(CollectorBlock* block)
@@ -1025,9 +992,9 @@ size_t Heap::markedCells(size_t startBlock, size_t startCell) const
return 0;
size_t result = 0;
- result += m_heap.blocks[startBlock]->marked.count(startCell);
+ result += m_heap.collectorBlock(startBlock)->marked.count(startCell);
for (size_t i = startBlock + 1; i < m_heap.usedBlocks; ++i)
- result += m_heap.blocks[i]->marked.count();
+ result += m_heap.collectorBlock(i)->marked.count();
return result;
}
@@ -1108,6 +1075,8 @@ void Heap::markRoots()
markStack.drain();
markStack.compact();
+ updateWeakGCHandles();
+
m_heap.operationInProgress = NoOperation;
}
@@ -1236,6 +1205,8 @@ void Heap::reset()
resizeBlocks();
JAVASCRIPTCORE_GC_END();
+
+ (*m_activityCallback)();
}
void Heap::collectAllGarbage()
@@ -1272,4 +1243,9 @@ LiveObjectIterator Heap::primaryHeapEnd()
return LiveObjectIterator(m_heap, m_heap.usedBlocks);
}
+void Heap::setActivityCallback(PassOwnPtr<GCActivityCallback> activityCallback)
+{
+ m_activityCallback = activityCallback;
+}
+
} // namespace JSC
diff --git a/JavaScriptCore/runtime/Collector.h b/JavaScriptCore/runtime/Collector.h
index 1dc9445..38c178b 100644
--- a/JavaScriptCore/runtime/Collector.h
+++ b/JavaScriptCore/runtime/Collector.h
@@ -22,13 +22,18 @@
#ifndef Collector_h
#define Collector_h
+#include "AlignedMemoryAllocator.h"
+#include "GCHandle.h"
#include <stddef.h>
#include <string.h>
+#include <wtf/Bitmap.h>
#include <wtf/FixedArray.h>
#include <wtf/HashCountedSet.h>
#include <wtf/HashSet.h>
#include <wtf/Noncopyable.h>
#include <wtf/OwnPtr.h>
+#include <wtf/PageAllocation.h>
+#include <wtf/PassOwnPtr.h>
#include <wtf/StdLibExtras.h>
#include <wtf/Threading.h>
@@ -36,15 +41,12 @@
#include <pthread.h>
#endif
-#if OS(SYMBIAN)
-#include <wtf/symbian/BlockAllocatorSymbian.h>
-#endif
-
#define ASSERT_CLASS_FITS_IN_CELL(class) COMPILE_ASSERT(sizeof(class) <= CELL_SIZE, class_fits_in_cell)
namespace JSC {
class CollectorBlock;
+ class GCActivityCallback;
class JSCell;
class JSGlobalData;
class JSValue;
@@ -55,10 +57,19 @@ namespace JSC {
class LiveObjectIterator;
+#if OS(WINCE) || OS(SYMBIAN)
+ const size_t BLOCK_SIZE = 64 * 1024; // 64k
+#else
+ const size_t BLOCK_SIZE = 256 * 1024; // 256k
+#endif
+
+ typedef AlignedMemoryAllocator<BLOCK_SIZE> CollectorBlockAllocator;
+ typedef AlignedMemory<BLOCK_SIZE> AlignedCollectorBlock;
+
struct CollectorHeap {
size_t nextBlock;
size_t nextCell;
- CollectorBlock** blocks;
+ AlignedCollectorBlock* blocks;
void* nextNumber;
@@ -69,6 +80,11 @@ namespace JSC {
bool didShrink;
OperationInProgress operationInProgress;
+
+ CollectorBlock* collectorBlock(size_t index) const
+ {
+ return static_cast<CollectorBlock*>(blocks[index].base());
+ }
};
class Heap : public Noncopyable {
@@ -82,6 +98,7 @@ namespace JSC {
bool isBusy(); // true if an allocation or collection is in progress
void collectAllGarbage();
+ void setActivityCallback(PassOwnPtr<GCActivityCallback>);
static const size_t minExtraCost = 256;
static const size_t maxExtraCost = 1024 * 1024;
@@ -115,6 +132,8 @@ namespace JSC {
static bool isCellMarked(const JSCell*);
static void markCell(JSCell*);
+ WeakGCHandle* addWeakGCHandle(JSCell*);
+
void markConservatively(MarkStack&, void* start, void* end);
HashSet<MarkedArgumentBuffer*>& markListSet() { if (!m_markListSet) m_markListSet = new HashSet<MarkedArgumentBuffer*>; return *m_markListSet; }
@@ -137,7 +156,6 @@ namespace JSC {
NEVER_INLINE CollectorBlock* allocateBlock();
NEVER_INLINE void freeBlock(size_t);
- NEVER_INLINE void freeBlockPtr(CollectorBlock*);
void freeBlocks();
void resizeBlocks();
void growBlocks(size_t neededBlocks);
@@ -157,14 +175,20 @@ namespace JSC {
void markOtherThreadConservatively(MarkStack&, Thread*);
void markStackObjectsConservatively(MarkStack&);
+ void updateWeakGCHandles();
+ WeakGCHandlePool* weakGCHandlePool(size_t index);
+
typedef HashCountedSet<JSCell*> ProtectCountSet;
CollectorHeap m_heap;
ProtectCountSet m_protectedValues;
+ WTF::Vector<AlignedMemory<WeakGCHandlePool::poolSize> > m_weakGCHandlePools;
HashSet<MarkedArgumentBuffer*>* m_markListSet;
+ OwnPtr<GCActivityCallback> m_activityCallback;
+
#if ENABLE(JSC_MULTIPLE_THREADS)
void makeUsableFromMultipleThreads();
@@ -176,21 +200,14 @@ namespace JSC {
pthread_key_t m_currentThreadRegistrar;
#endif
-#if OS(SYMBIAN)
// Allocates collector blocks with correct alignment
- WTF::AlignedBlockAllocator m_blockallocator;
-#endif
+ CollectorBlockAllocator m_blockallocator;
+ WeakGCHandlePool::Allocator m_weakGCHandlePoolAllocator;
JSGlobalData* m_globalData;
};
// tunable parameters
-#if OS(WINCE) || OS(SYMBIAN)
- const size_t BLOCK_SIZE = 64 * 1024; // 64k
-#else
- const size_t BLOCK_SIZE = 64 * 4096; // 256k
-#endif
-
// derived constants
const size_t BLOCK_OFFSET_MASK = BLOCK_SIZE - 1;
const size_t BLOCK_MASK = ~BLOCK_OFFSET_MASK;
@@ -294,6 +311,11 @@ namespace JSC {
return result;
}
+
+ inline WeakGCHandlePool* Heap::weakGCHandlePool(size_t index)
+ {
+ return static_cast<WeakGCHandlePool*>(m_weakGCHandlePools[index].base());
+ }
} // namespace JSC
#endif /* Collector_h */
diff --git a/JavaScriptCore/runtime/CollectorHeapIterator.h b/JavaScriptCore/runtime/CollectorHeapIterator.h
index 9a3327c..9d107b7 100644
--- a/JavaScriptCore/runtime/CollectorHeapIterator.h
+++ b/JavaScriptCore/runtime/CollectorHeapIterator.h
@@ -77,7 +77,7 @@ namespace JSC {
inline JSCell* CollectorHeapIterator::operator*() const
{
- return reinterpret_cast<JSCell*>(&m_heap.blocks[m_block]->cells[m_cell]);
+ return reinterpret_cast<JSCell*>(&m_heap.collectorBlock(m_block)->cells[m_cell]);
}
// Iterators advance up to the next-to-last -- and not the last -- cell in a
@@ -103,7 +103,7 @@ namespace JSC {
if (m_block < m_heap.nextBlock || (m_block == m_heap.nextBlock && m_cell < m_heap.nextCell))
return *this;
- while (m_block < m_heap.usedBlocks && !m_heap.blocks[m_block]->marked.get(m_cell))
+ while (m_block < m_heap.usedBlocks && !m_heap.collectorBlock(m_block)->marked.get(m_cell))
advance(HeapConstants::cellsPerBlock - 1);
return *this;
}
@@ -119,7 +119,7 @@ namespace JSC {
do {
advance(HeapConstants::cellsPerBlock - 1);
ASSERT(m_block > m_heap.nextBlock || (m_block == m_heap.nextBlock && m_cell >= m_heap.nextCell));
- } while (m_block < m_heap.usedBlocks && m_heap.blocks[m_block]->marked.get(m_cell));
+ } while (m_block < m_heap.usedBlocks && m_heap.collectorBlock(m_block)->marked.get(m_cell));
return *this;
}
diff --git a/JavaScriptCore/runtime/ExceptionHelpers.cpp b/JavaScriptCore/runtime/ExceptionHelpers.cpp
index ebde320..3e0b70c 100644
--- a/JavaScriptCore/runtime/ExceptionHelpers.cpp
+++ b/JavaScriptCore/runtime/ExceptionHelpers.cpp
@@ -186,6 +186,11 @@ JSObject* createNotAnObjectError(ExecState* exec, JSNotAnObjectErrorStub* error,
return exception;
}
+JSObject* createOutOfMemoryError(JSGlobalObject* globalObject)
+{
+ return createError(globalObject, "Out of memory");
+}
+
JSValue throwOutOfMemoryError(ExecState* exec)
{
return throwError(exec, createError(exec, "Out of memory"));
diff --git a/JavaScriptCore/runtime/ExceptionHelpers.h b/JavaScriptCore/runtime/ExceptionHelpers.h
index 3e6de86..e4c94fb 100644
--- a/JavaScriptCore/runtime/ExceptionHelpers.h
+++ b/JavaScriptCore/runtime/ExceptionHelpers.h
@@ -53,6 +53,7 @@ namespace JSC {
JSObject* createNotAConstructorError(ExecState*, JSValue, unsigned bytecodeOffset, CodeBlock*);
JSValue createNotAFunctionError(ExecState*, JSValue, unsigned bytecodeOffset, CodeBlock*);
JSObject* createNotAnObjectError(ExecState*, JSNotAnObjectErrorStub*, unsigned bytecodeOffset, CodeBlock*);
+ JSObject* createOutOfMemoryError(JSGlobalObject*);
JSValue throwOutOfMemoryError(ExecState*);
} // namespace JSC
diff --git a/JavaScriptCore/runtime/Executable.cpp b/JavaScriptCore/runtime/Executable.cpp
index 229588b..058a091 100644
--- a/JavaScriptCore/runtime/Executable.cpp
+++ b/JavaScriptCore/runtime/Executable.cpp
@@ -116,6 +116,10 @@ JSObject* EvalExecutable::compileInternal(ExecState* exec, ScopeChainNode* scope
#if ENABLE(JIT)
if (exec->globalData().canUseJIT()) {
m_jitCodeForCall = JIT::compile(scopeChainNode->globalData, m_evalCodeBlock.get());
+ if (UNLIKELY(!m_jitCodeForCall)) {
+ m_evalCodeBlock.clear();
+ return createOutOfMemoryError(globalObject);
+ }
#if !ENABLE(OPCODE_SAMPLING)
if (!BytecodeGenerator::dumpsGeneratedCode())
m_evalCodeBlock->discardBytecode();
@@ -164,6 +168,10 @@ JSObject* ProgramExecutable::compileInternal(ExecState* exec, ScopeChainNode* sc
#if ENABLE(JIT)
if (exec->globalData().canUseJIT()) {
m_jitCodeForCall = JIT::compile(scopeChainNode->globalData, m_programCodeBlock.get());
+ if (UNLIKELY(!m_jitCodeForCall)) {
+ m_programCodeBlock.clear();
+ return createOutOfMemoryError(globalObject);
+ }
#if !ENABLE(OPCODE_SAMPLING)
if (!BytecodeGenerator::dumpsGeneratedCode())
m_programCodeBlock->discardBytecode();
@@ -192,7 +200,7 @@ JSObject* FunctionExecutable::compileForCallInternal(ExecState* exec, ScopeChain
JSGlobalObject* globalObject = scopeChain.globalObject();
ASSERT(!m_codeBlockForCall);
- m_codeBlockForCall = adoptPtr(new FunctionCodeBlock(this, FunctionCode, source().provider(), source().startOffset(), false));
+ m_codeBlockForCall = adoptPtr(new FunctionCodeBlock(this, FunctionCode, globalObject, source().provider(), source().startOffset(), false));
OwnPtr<BytecodeGenerator> generator(adoptPtr(new BytecodeGenerator(body.get(), globalObject->debugger(), scopeChain, m_codeBlockForCall->symbolTable(), m_codeBlockForCall.get())));
generator->generate();
m_numParametersForCall = m_codeBlockForCall->m_numParameters;
@@ -205,6 +213,10 @@ JSObject* FunctionExecutable::compileForCallInternal(ExecState* exec, ScopeChain
#if ENABLE(JIT)
if (exec->globalData().canUseJIT()) {
m_jitCodeForCall = JIT::compile(scopeChainNode->globalData, m_codeBlockForCall.get(), &m_jitCodeForCallWithArityCheck);
+ if (UNLIKELY(!m_jitCodeForCall)) {
+ m_codeBlockForCall.clear();
+ return createOutOfMemoryError(globalObject);
+ }
#if !ENABLE(OPCODE_SAMPLING)
if (!BytecodeGenerator::dumpsGeneratedCode())
m_codeBlockForCall->discardBytecode();
@@ -233,7 +245,7 @@ JSObject* FunctionExecutable::compileForConstructInternal(ExecState* exec, Scope
JSGlobalObject* globalObject = scopeChain.globalObject();
ASSERT(!m_codeBlockForConstruct);
- m_codeBlockForConstruct = adoptPtr(new FunctionCodeBlock(this, FunctionCode, source().provider(), source().startOffset(), true));
+ m_codeBlockForConstruct = adoptPtr(new FunctionCodeBlock(this, FunctionCode, globalObject, source().provider(), source().startOffset(), true));
OwnPtr<BytecodeGenerator> generator(adoptPtr(new BytecodeGenerator(body.get(), globalObject->debugger(), scopeChain, m_codeBlockForConstruct->symbolTable(), m_codeBlockForConstruct.get())));
generator->generate();
m_numParametersForConstruct = m_codeBlockForConstruct->m_numParameters;
@@ -246,6 +258,10 @@ JSObject* FunctionExecutable::compileForConstructInternal(ExecState* exec, Scope
#if ENABLE(JIT)
if (exec->globalData().canUseJIT()) {
m_jitCodeForConstruct = JIT::compile(scopeChainNode->globalData, m_codeBlockForConstruct.get(), &m_jitCodeForConstructWithArityCheck);
+ if (UNLIKELY(!m_jitCodeForConstruct)) {
+ m_codeBlockForConstruct.clear();
+ return createOutOfMemoryError(globalObject);
+ }
#if !ENABLE(OPCODE_SAMPLING)
if (!BytecodeGenerator::dumpsGeneratedCode())
m_codeBlockForConstruct->discardBytecode();
@@ -277,7 +293,7 @@ PassOwnPtr<ExceptionInfo> FunctionExecutable::reparseExceptionInfo(JSGlobalData*
ScopeChain scopeChain(scopeChainNode);
JSGlobalObject* globalObject = scopeChain.globalObject();
- OwnPtr<CodeBlock> newCodeBlock(adoptPtr(new FunctionCodeBlock(this, FunctionCode, source().provider(), source().startOffset(), codeBlock->m_isConstructor)));
+ OwnPtr<CodeBlock> newCodeBlock(adoptPtr(new FunctionCodeBlock(this, FunctionCode, globalObject, source().provider(), source().startOffset(), codeBlock->m_isConstructor)));
globalData->functionCodeBlockBeingReparsed = newCodeBlock.get();
OwnPtr<BytecodeGenerator> generator(adoptPtr(new BytecodeGenerator(newFunctionBody.get(), globalObject->debugger(), scopeChain, newCodeBlock->symbolTable(), newCodeBlock.get())));
@@ -288,13 +304,16 @@ PassOwnPtr<ExceptionInfo> FunctionExecutable::reparseExceptionInfo(JSGlobalData*
#if ENABLE(JIT)
if (globalData->canUseJIT()) {
- JITCode newJITCode = JIT::compile(globalData, newCodeBlock.get());
+ JITCode newJITCode = JIT::compile(globalData, newCodeBlock.get(), 0, codeBlock->m_isConstructor ? generatedJITCodeForConstruct().start() : generatedJITCodeForCall().start());
+ if (!newJITCode) {
+ globalData->functionCodeBlockBeingReparsed = 0;
+ return PassOwnPtr<ExceptionInfo>();
+ }
ASSERT(codeBlock->m_isConstructor ? newJITCode.size() == generatedJITCodeForConstruct().size() : newJITCode.size() == generatedJITCodeForCall().size());
}
#endif
globalData->functionCodeBlockBeingReparsed = 0;
-
return newCodeBlock->extractExceptionInfo();
}
@@ -318,7 +337,11 @@ PassOwnPtr<ExceptionInfo> EvalExecutable::reparseExceptionInfo(JSGlobalData* glo
#if ENABLE(JIT)
if (globalData->canUseJIT()) {
- JITCode newJITCode = JIT::compile(globalData, newCodeBlock.get());
+ JITCode newJITCode = JIT::compile(globalData, newCodeBlock.get(), 0, generatedJITCodeForCall().start());
+ if (!newJITCode) {
+ globalData->functionCodeBlockBeingReparsed = 0;
+ return PassOwnPtr<ExceptionInfo>();
+ }
ASSERT(newJITCode.size() == generatedJITCodeForCall().size());
}
#endif
diff --git a/JavaScriptCore/runtime/GCActivityCallback.cpp b/JavaScriptCore/runtime/GCActivityCallback.cpp
new file mode 100644
index 0000000..2f2c079
--- /dev/null
+++ b/JavaScriptCore/runtime/GCActivityCallback.cpp
@@ -0,0 +1,50 @@
+/*
+ * Copyright (C) 2010 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+ * its contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "GCActivityCallback.h"
+
+namespace JSC {
+
+struct DefaultGCActivityCallbackPlatformData {
+};
+
+DefaultGCActivityCallback::DefaultGCActivityCallback(Heap* heap)
+{
+}
+
+DefaultGCActivityCallback::~DefaultGCActivityCallback()
+{
+}
+
+void DefaultGCActivityCallback::operator()()
+{
+}
+
+}
+
diff --git a/JavaScriptCore/runtime/GCActivityCallback.h b/JavaScriptCore/runtime/GCActivityCallback.h
new file mode 100644
index 0000000..66d56e8
--- /dev/null
+++ b/JavaScriptCore/runtime/GCActivityCallback.h
@@ -0,0 +1,70 @@
+/*
+ * Copyright (C) 2010 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+ * its contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef GCActivityCallback_h
+#define GCActivityCallback_h
+
+#include <wtf/OwnPtr.h>
+#include <wtf/PassOwnPtr.h>
+
+namespace JSC {
+
+class Heap;
+
+class GCActivityCallback {
+public:
+ virtual ~GCActivityCallback() {}
+ virtual void operator()() {}
+
+protected:
+ GCActivityCallback() {}
+};
+
+struct DefaultGCActivityCallbackPlatformData;
+
+class DefaultGCActivityCallback : public GCActivityCallback {
+public:
+ static PassOwnPtr<DefaultGCActivityCallback> create(Heap*);
+
+ DefaultGCActivityCallback(Heap*);
+ ~DefaultGCActivityCallback();
+
+ void operator()();
+
+private:
+ OwnPtr<DefaultGCActivityCallbackPlatformData*> d;
+};
+
+inline PassOwnPtr<DefaultGCActivityCallback> DefaultGCActivityCallback::create(Heap* heap)
+{
+ return adoptPtr(new DefaultGCActivityCallback(heap));
+}
+
+}
+
+#endif
diff --git a/JavaScriptCore/runtime/GCActivityCallbackCF.cpp b/JavaScriptCore/runtime/GCActivityCallbackCF.cpp
new file mode 100644
index 0000000..06d4210
--- /dev/null
+++ b/JavaScriptCore/runtime/GCActivityCallbackCF.cpp
@@ -0,0 +1,83 @@
+/*
+ * Copyright (C) 2010 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+ * its contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "GCActivityCallback.h"
+
+#include "Collector.h"
+#include "JSLock.h"
+#include <wtf/RetainPtr.h>
+#include <CoreFoundation/CoreFoundation.h>
+
+#if !PLATFORM(CF)
+#error "This file should only be used on CF platforms."
+#endif
+
+namespace JSC {
+
+struct DefaultGCActivityCallbackPlatformData {
+ static void trigger(CFRunLoopTimerRef, void *info);
+
+ RetainPtr<CFRunLoopTimerRef> timer;
+ CFRunLoopTimerContext context;
+};
+
+const CFTimeInterval decade = 60 * 60 * 24 * 365 * 10;
+
+void DefaultGCActivityCallbackPlatformData::trigger(CFRunLoopTimerRef, void *info)
+{
+ Heap* heap = static_cast<Heap*>(info);
+ JSLock lock(heap->globalData());
+
+ heap->collectAllGarbage();
+}
+
+DefaultGCActivityCallback::DefaultGCActivityCallback(Heap* heap)
+{
+ d = adoptPtr(new DefaultGCActivityCallbackPlatformData);
+
+ memset(&d->context, '\0', sizeof(CFRunLoopTimerContext));
+ d->context.info = heap;
+ d->timer.adoptCF(CFRunLoopTimerCreate(0, decade, decade, 0, 0, DefaultGCActivityCallbackPlatformData::trigger, &d->context));
+ CFRunLoopAddTimer(CFRunLoopGetCurrent(), d->timer.get(), kCFRunLoopCommonModes);
+}
+
+DefaultGCActivityCallback::~DefaultGCActivityCallback()
+{
+ CFRunLoopRemoveTimer(CFRunLoopGetCurrent(), d->timer.get(), kCFRunLoopCommonModes);
+ CFRunLoopTimerInvalidate(d->timer.get());
+ d->context.info = 0;
+ d->timer = 0;
+}
+
+void DefaultGCActivityCallback::operator()()
+{
+ CFRunLoopTimerSetNextFireDate(d->timer.get(), CFAbsoluteTimeGetCurrent() + 2);
+}
+
+}
diff --git a/JavaScriptCore/runtime/GCHandle.cpp b/JavaScriptCore/runtime/GCHandle.cpp
new file mode 100644
index 0000000..3331517
--- /dev/null
+++ b/JavaScriptCore/runtime/GCHandle.cpp
@@ -0,0 +1,91 @@
+/*
+ * Copyright (C) 2010 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "GCHandle.h"
+
+namespace JSC {
+
+WeakGCHandlePool* WeakGCHandle::pool()
+{
+ uintptr_t pool = (reinterpret_cast<uintptr_t>(this) & WeakGCHandlePool::poolMask);
+ return reinterpret_cast<WeakGCHandlePool*>(pool);
+}
+
+WeakGCHandlePool::WeakGCHandlePool()
+{
+ ASSERT(sizeof(WeakGCHandlePool) <= WeakGCHandlePool::poolSize);
+ m_entriesSize = 0;
+ m_initialAlloc = 1;
+ m_entries[0].setNextInFreeList(0);
+}
+
+WeakGCHandle* WeakGCHandlePool::allocate(JSCell* cell)
+{
+ ASSERT(cell);
+ ASSERT(m_entries[0].isNext());
+ unsigned freeList = m_entries[0].getNextInFreeList();
+ ASSERT(freeList < WeakGCHandlePool::numPoolEntries);
+ ASSERT(m_entriesSize < WeakGCHandlePool::numPoolEntries);
+
+ if (m_entriesSize == WeakGCHandlePool::numPoolEntries - 1)
+ return 0;
+
+ if (freeList) {
+ unsigned i = freeList;
+ freeList = m_entries[i].getNextInFreeList();
+ m_entries[i].set(cell);
+ m_entries[0].setNextInFreeList(freeList);
+ ++m_entriesSize;
+ return &m_entries[i];
+ }
+
+ ASSERT(m_initialAlloc < WeakGCHandlePool::numPoolEntries);
+
+ unsigned i = m_initialAlloc;
+ ++m_initialAlloc;
+ m_entries[i].set(cell);
+ ++m_entriesSize;
+ return &m_entries[i];
+
+}
+
+void WeakGCHandlePool::free(WeakGCHandle* handle)
+{
+ ASSERT(handle->pool() == this);
+ ASSERT(m_entries[0].isNext());
+ unsigned freeList = m_entries[0].getNextInFreeList();
+ ASSERT(freeList < WeakGCHandlePool::numPoolEntries);
+ handle->setNextInFreeList(freeList);
+ m_entries[0].setNextInFreeList(handle - m_entries);
+ --m_entriesSize;
+}
+
+void* WeakGCHandlePool::operator new(size_t, AlignedMemory<WeakGCHandlePool::poolSize>& allocation)
+{
+ return allocation.base();
+}
+
+}
diff --git a/JavaScriptCore/runtime/GCHandle.h b/JavaScriptCore/runtime/GCHandle.h
new file mode 100644
index 0000000..38a7be9
--- /dev/null
+++ b/JavaScriptCore/runtime/GCHandle.h
@@ -0,0 +1,120 @@
+/*
+ * Copyright (C) 2010 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+ * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef GCHandle_h
+#define GCHandle_h
+
+#include "AlignedMemoryAllocator.h"
+
+namespace JSC {
+
+class Heap;
+class JSCell;
+class WeakGCHandle;
+class WeakGCHandlePool;
+
+class WeakGCHandle {
+ friend class WeakGCHandlePool;
+
+public:
+ // Because JSCell objects are aligned, we can use the lower two bits as
+ // status flags. The least significant bit is set when the handle is not a
+ // pointer, i.e. when it's used as a offset for the free list in
+ // WeakGCHandlePool. The second least significant bit is set when the object
+ // the pointer corresponds to has been deleted by a garbage collection
+
+ bool isValidPtr() { return !(m_ptr & 3); }
+ bool isPtr() { return !(m_ptr & 1); }
+ bool isNext() { return (m_ptr & 3) == 1; }
+
+ void invalidate()
+ {
+ ASSERT(isValidPtr());
+ m_ptr |= 2;
+ }
+
+ JSCell* get()
+ {
+ ASSERT(isPtr());
+ return reinterpret_cast<JSCell*>(m_ptr & ~3);
+ }
+
+ void set(JSCell* p)
+ {
+ m_ptr = reinterpret_cast<uintptr_t>(p);
+ ASSERT(isPtr());
+ }
+
+ WeakGCHandlePool* pool();
+
+private:
+ uintptr_t getNextInFreeList()
+ {
+ ASSERT(isNext());
+ return m_ptr >> 2;
+ }
+
+ void setNextInFreeList(uintptr_t n)
+ {
+ m_ptr = (n << 2) | 1;
+ ASSERT(isNext());
+ }
+
+ uintptr_t m_ptr;
+};
+
+class WeakGCHandlePool {
+public:
+ static const size_t poolSize = 32 * 1024; // 32k
+ static const size_t poolMask = ~(poolSize - 1);
+ static const size_t numPoolEntries = (poolSize - sizeof(Heap*) - 3 * sizeof(unsigned)) / sizeof(WeakGCHandle);
+
+ typedef AlignedMemoryAllocator<WeakGCHandlePool::poolSize> Allocator;
+
+ WeakGCHandlePool();
+
+ WeakGCHandle* allocate(JSCell* cell);
+ void free(WeakGCHandle*);
+
+ bool isFull()
+ {
+ ASSERT(m_entriesSize < WeakGCHandlePool::numPoolEntries);
+ return m_entriesSize == WeakGCHandlePool::numPoolEntries - 1;
+ }
+
+ void update();
+
+ void* operator new(size_t, AlignedMemory<WeakGCHandlePool::poolSize>&);
+
+private:
+ Heap* m_heap;
+ unsigned m_entriesSize;
+ unsigned m_initialAlloc;
+
+ WeakGCHandle m_entries[WeakGCHandlePool::numPoolEntries];
+};
+
+}
+#endif
diff --git a/JavaScriptCore/runtime/JSArray.cpp b/JavaScriptCore/runtime/JSArray.cpp
index 99e1a10..0db0a63 100644
--- a/JavaScriptCore/runtime/JSArray.cpp
+++ b/JavaScriptCore/runtime/JSArray.cpp
@@ -126,17 +126,36 @@ inline void JSArray::checkConsistency(ConsistencyCheckType)
#endif
+JSArray::JSArray(VPtrStealingHackType)
+ : JSObject(createStructure(jsNull()))
+{
+ unsigned initialCapacity = 0;
+
+ m_storage = static_cast<ArrayStorage*>(fastZeroedMalloc(storageSize(initialCapacity)));
+ m_storage->m_allocBase = m_storage;
+ m_indexBias = 0;
+ m_vectorLength = initialCapacity;
+
+ checkConsistency();
+
+ // It's not safe to call Heap::heap(this) in order to report extra memory
+ // cost here, because the VPtrStealingHackType JSArray is not allocated on
+ // the heap. For the same reason, it's OK not to report extra cost.
+}
+
JSArray::JSArray(NonNullPassRefPtr<Structure> structure)
: JSObject(structure)
{
unsigned initialCapacity = 0;
- ArrayStorage* storage = static_cast<ArrayStorage*>(fastZeroedMalloc(storageSize(initialCapacity)));
+ m_storage = static_cast<ArrayStorage*>(fastZeroedMalloc(storageSize(initialCapacity)));
+ m_storage->m_allocBase = m_storage;
m_indexBias = 0;
- setArrayStorage(storage);
m_vectorLength = initialCapacity;
checkConsistency();
+
+ Heap::heap(this)->reportExtraMemoryCost(storageSize(0));
}
JSArray::JSArray(NonNullPassRefPtr<Structure> structure, unsigned initialLength, ArrayCreationMode creationMode)
@@ -148,35 +167,35 @@ JSArray::JSArray(NonNullPassRefPtr<Structure> structure, unsigned initialLength,
else
initialCapacity = min(BASE_VECTOR_LEN, MIN_SPARSE_ARRAY_INDEX);
- ArrayStorage* storage = static_cast<ArrayStorage*>(fastMalloc(storageSize(initialCapacity)));
- storage->m_length = initialLength;
+ m_storage = static_cast<ArrayStorage*>(fastMalloc(storageSize(initialCapacity)));
+ m_storage->m_allocBase = m_storage;
+ m_storage->m_length = initialLength;
m_indexBias = 0;
m_vectorLength = initialCapacity;
- setArrayStorage(storage);
- storage->m_sparseValueMap = 0;
- storage->subclassData = 0;
- storage->reportedMapCapacity = 0;
+ m_storage->m_sparseValueMap = 0;
+ m_storage->subclassData = 0;
+ m_storage->reportedMapCapacity = 0;
if (creationMode == CreateCompact) {
#if CHECK_ARRAY_CONSISTENCY
- storage->m_inCompactInitialization = !!initialCapacity;
+ m_storage->m_inCompactInitialization = !!initialCapacity;
#endif
- storage->m_length = 0;
- storage->m_numValuesInVector = initialCapacity;
+ m_storage->m_length = 0;
+ m_storage->m_numValuesInVector = initialCapacity;
} else {
#if CHECK_ARRAY_CONSISTENCY
storage->m_inCompactInitialization = false;
#endif
- storage->m_length = initialLength;
- storage->m_numValuesInVector = 0;
- JSValue* vector = m_vector;
+ m_storage->m_length = initialLength;
+ m_storage->m_numValuesInVector = 0;
+ JSValue* vector = m_storage->m_vector;
for (size_t i = 0; i < initialCapacity; ++i)
vector[i] = JSValue();
}
checkConsistency();
- Heap::heap(this)->reportExtraMemoryCost(initialCapacity * sizeof(JSValue));
+ Heap::heap(this)->reportExtraMemoryCost(storageSize(initialCapacity));
}
JSArray::JSArray(NonNullPassRefPtr<Structure> structure, const ArgList& list)
@@ -184,21 +203,21 @@ JSArray::JSArray(NonNullPassRefPtr<Structure> structure, const ArgList& list)
{
unsigned initialCapacity = list.size();
- ArrayStorage* storage = static_cast<ArrayStorage*>(fastMalloc(storageSize(initialCapacity)));
+ m_storage = static_cast<ArrayStorage*>(fastMalloc(storageSize(initialCapacity)));
+ m_storage->m_allocBase = m_storage;
m_indexBias = 0;
- storage->m_length = initialCapacity;
+ m_storage->m_length = initialCapacity;
m_vectorLength = initialCapacity;
- storage->m_numValuesInVector = initialCapacity;
- storage->m_sparseValueMap = 0;
- storage->subclassData = 0;
- storage->reportedMapCapacity = 0;
+ m_storage->m_numValuesInVector = initialCapacity;
+ m_storage->m_sparseValueMap = 0;
+ m_storage->subclassData = 0;
+ m_storage->reportedMapCapacity = 0;
#if CHECK_ARRAY_CONSISTENCY
- storage->m_inCompactInitialization = false;
+ m_storage->m_inCompactInitialization = false;
#endif
- setArrayStorage(storage);
size_t i = 0;
- JSValue* vector = m_vector;
+ JSValue* vector = m_storage->m_vector;
ArgList::const_iterator end = list.end();
for (ArgList::const_iterator it = list.begin(); it != end; ++it, ++i)
vector[i] = *it;
@@ -213,15 +232,13 @@ JSArray::~JSArray()
ASSERT(vptr() == JSGlobalData::jsArrayVPtr);
checkConsistency(DestructorConsistencyCheck);
- ArrayStorage* storage = arrayStorage();
- delete storage->m_sparseValueMap;
- char* realStorage = reinterpret_cast<char*>(storage) - (m_indexBias * sizeof(JSValue));
- fastFree(realStorage);
+ delete m_storage->m_sparseValueMap;
+ fastFree(m_storage->m_allocBase);
}
bool JSArray::getOwnPropertySlot(ExecState* exec, unsigned i, PropertySlot& slot)
{
- ArrayStorage* storage = arrayStorage();
+ ArrayStorage* storage = m_storage;
if (i >= storage->m_length) {
if (i > MAX_ARRAY_INDEX)
@@ -230,7 +247,7 @@ bool JSArray::getOwnPropertySlot(ExecState* exec, unsigned i, PropertySlot& slot
}
if (i < m_vectorLength) {
- JSValue& valueSlot = m_vector[i];
+ JSValue& valueSlot = storage->m_vector[i];
if (valueSlot) {
slot.setValueSlot(&valueSlot);
return true;
@@ -270,7 +287,7 @@ bool JSArray::getOwnPropertyDescriptor(ExecState* exec, const Identifier& proper
return true;
}
- ArrayStorage* storage = arrayStorage();
+ ArrayStorage* storage = m_storage;
bool isArrayIndex;
unsigned i = propertyName.toArrayIndex(&isArrayIndex);
@@ -278,7 +295,7 @@ bool JSArray::getOwnPropertyDescriptor(ExecState* exec, const Identifier& proper
if (i >= storage->m_length)
return false;
if (i < m_vectorLength) {
- JSValue& value = m_vector[i];
+ JSValue& value = storage->m_vector[i];
if (value) {
descriptor.setDescriptor(value, 0);
return true;
@@ -323,7 +340,7 @@ void JSArray::put(ExecState* exec, unsigned i, JSValue value)
{
checkConsistency();
- ArrayStorage* storage = arrayStorage();
+ ArrayStorage* storage = m_storage;
unsigned length = storage->m_length;
if (i >= length && i <= MAX_ARRAY_INDEX) {
@@ -332,7 +349,7 @@ void JSArray::put(ExecState* exec, unsigned i, JSValue value)
}
if (i < m_vectorLength) {
- JSValue& valueSlot = m_vector[i];
+ JSValue& valueSlot = storage->m_vector[i];
if (valueSlot) {
valueSlot = value;
checkConsistency();
@@ -349,7 +366,7 @@ void JSArray::put(ExecState* exec, unsigned i, JSValue value)
NEVER_INLINE void JSArray::putSlowCase(ExecState* exec, unsigned i, JSValue value)
{
- ArrayStorage* storage = arrayStorage();
+ ArrayStorage* storage = m_storage;
SparseArrayValueMap* map = storage->m_sparseValueMap;
@@ -387,8 +404,9 @@ NEVER_INLINE void JSArray::putSlowCase(ExecState* exec, unsigned i, JSValue valu
// Fast case is when there is no sparse map, so we can increase the vector size without moving values from it.
if (!map || map->isEmpty()) {
if (increaseVectorLength(i + 1)) {
- m_vector[i] = value;
- ++arrayStorage()->m_numValuesInVector;
+ storage = m_storage;
+ storage->m_vector[i] = value;
+ ++storage->m_numValuesInVector;
checkConsistency();
} else
throwOutOfMemoryError(exec);
@@ -416,29 +434,30 @@ NEVER_INLINE void JSArray::putSlowCase(ExecState* exec, unsigned i, JSValue valu
}
}
- int baseBias = m_indexBias * sizeof(JSValue);
- char* baseStorage = reinterpret_cast<char*>(storage - baseBias);
+ void* baseStorage = storage->m_allocBase;
if (!tryFastRealloc(baseStorage, storageSize(newVectorLength + m_indexBias)).getValue(baseStorage)) {
throwOutOfMemoryError(exec);
return;
}
- storage = reinterpret_cast<ArrayStorage*>(baseStorage + baseBias);
- setArrayStorage(storage);
+ m_storage = reinterpret_cast<ArrayStorage*>(static_cast<char*>(baseStorage) + m_indexBias * sizeof(JSValue));
+ m_storage->m_allocBase = baseStorage;
+ storage = m_storage;
unsigned vectorLength = m_vectorLength;
+ JSValue* vector = storage->m_vector;
if (newNumValuesInVector == storage->m_numValuesInVector + 1) {
for (unsigned j = vectorLength; j < newVectorLength; ++j)
- m_vector[j] = JSValue();
+ vector[j] = JSValue();
if (i > MIN_SPARSE_ARRAY_INDEX)
map->remove(i);
} else {
for (unsigned j = vectorLength; j < max(vectorLength, MIN_SPARSE_ARRAY_INDEX); ++j)
- m_vector[j] = JSValue();
+ vector[j] = JSValue();
for (unsigned j = max(vectorLength, MIN_SPARSE_ARRAY_INDEX); j < newVectorLength; ++j)
- m_vector[j] = map->take(j);
+ vector[j] = map->take(j);
}
ASSERT(i < newVectorLength);
@@ -446,7 +465,7 @@ NEVER_INLINE void JSArray::putSlowCase(ExecState* exec, unsigned i, JSValue valu
m_vectorLength = newVectorLength;
storage->m_numValuesInVector = newNumValuesInVector;
- m_vector[i] = value;
+ storage->m_vector[i] = value;
checkConsistency();
@@ -470,10 +489,10 @@ bool JSArray::deleteProperty(ExecState* exec, unsigned i)
{
checkConsistency();
- ArrayStorage* storage = arrayStorage();
+ ArrayStorage* storage = m_storage;
if (i < m_vectorLength) {
- JSValue& valueSlot = m_vector[i];
+ JSValue& valueSlot = storage->m_vector[i];
if (!valueSlot) {
checkConsistency();
return false;
@@ -509,11 +528,11 @@ void JSArray::getOwnPropertyNames(ExecState* exec, PropertyNameArray& propertyNa
// is incredibly inefficient for large arrays. We need a different approach,
// which almost certainly means a different structure for PropertyNameArray.
- ArrayStorage* storage = arrayStorage();
+ ArrayStorage* storage = m_storage;
unsigned usedVectorLength = min(storage->m_length, m_vectorLength);
for (unsigned i = 0; i < usedVectorLength; ++i) {
- if (m_vector[i])
+ if (storage->m_vector[i])
propertyNames.add(Identifier::from(exec, i));
}
@@ -534,7 +553,7 @@ ALWAYS_INLINE unsigned JSArray::getNewVectorLength(unsigned desiredLength)
ASSERT(desiredLength <= MAX_STORAGE_VECTOR_LENGTH);
unsigned increasedLength;
- unsigned length = arrayStorage()->m_length;
+ unsigned length = m_storage->m_length;
if (desiredLength < length)
increasedLength = length;
@@ -561,22 +580,21 @@ bool JSArray::increaseVectorLength(unsigned newLength)
// This function leaves the array in an internally inconsistent state, because it does not move any values from sparse value map
// to the vector. Callers have to account for that, because they can do it more efficiently.
- ArrayStorage* storage = arrayStorage();
+ ArrayStorage* storage = m_storage;
unsigned vectorLength = m_vectorLength;
ASSERT(newLength > vectorLength);
ASSERT(newLength <= MAX_STORAGE_VECTOR_INDEX);
unsigned newVectorLength = getNewVectorLength(newLength);
- int baseBias = m_indexBias * sizeof(JSValue);
- char* baseStorage = reinterpret_cast<char*>(storage) - baseBias;
+ void* baseStorage = storage->m_allocBase;
if (!tryFastRealloc(baseStorage, storageSize(newVectorLength + m_indexBias)).getValue(baseStorage))
return false;
-
- storage = reinterpret_cast<ArrayStorage*>(baseStorage + baseBias);
- setArrayStorage(storage);
- JSValue* vector = m_vector;
+ storage = m_storage = reinterpret_cast<ArrayStorage*>(static_cast<char*>(baseStorage) + m_indexBias * sizeof(JSValue));
+ m_storage->m_allocBase = baseStorage;
+
+ JSValue* vector = storage->m_vector;
for (unsigned i = vectorLength; i < newVectorLength; ++i)
vector[i] = JSValue();
@@ -592,33 +610,29 @@ bool JSArray::increaseVectorPrefixLength(unsigned newLength)
// This function leaves the array in an internally inconsistent state, because it does not move any values from sparse value map
// to the vector. Callers have to account for that, because they can do it more efficiently.
- ArrayStorage* storage = arrayStorage();
- ArrayStorage* newStorage;
+ ArrayStorage* storage = m_storage;
unsigned vectorLength = m_vectorLength;
ASSERT(newLength > vectorLength);
ASSERT(newLength <= MAX_STORAGE_VECTOR_INDEX);
unsigned newVectorLength = getNewVectorLength(newLength);
- char* baseStorage = reinterpret_cast<char*>(storage) - (m_indexBias * sizeof(JSValue));
-
- char* newBaseStorage = reinterpret_cast<char*>(fastMalloc(storageSize(newVectorLength + m_indexBias)));
+
+ void* newBaseStorage = fastMalloc(storageSize(newVectorLength + m_indexBias));
if (!newBaseStorage)
return false;
m_indexBias += newVectorLength - newLength;
- int newStorageOffset = m_indexBias * sizeof(JSValue);
-
- newStorage = reinterpret_cast<ArrayStorage*>(newBaseStorage + newStorageOffset);
- memcpy(newStorage, storage, storageSize(0));
- memcpy(&newStorage->m_vector[newLength - m_vectorLength], &storage->m_vector[0], storage->m_length * sizeof(JSValue));
+ m_storage = reinterpret_cast<ArrayStorage*>(static_cast<char*>(newBaseStorage) + m_indexBias * sizeof(JSValue));
+
+ memcpy(m_storage, storage, storageSize(0));
+ memcpy(&m_storage->m_vector[newLength - m_vectorLength], &storage->m_vector[0], vectorLength * sizeof(JSValue));
+ m_storage->m_allocBase = newBaseStorage;
m_vectorLength = newLength;
- fastFree(baseStorage);
+ fastFree(storage->m_allocBase);
- setArrayStorage(newStorage);
-
Heap::heap(this)->reportExtraMemoryCost(storageSize(newVectorLength) - storageSize(vectorLength));
return true;
@@ -627,21 +641,21 @@ bool JSArray::increaseVectorPrefixLength(unsigned newLength)
void JSArray::setLength(unsigned newLength)
{
+ ArrayStorage* storage = m_storage;
+
#if CHECK_ARRAY_CONSISTENCY
- if (!m_storage->m_inCompactInitialization)
+ if (!storage->m_inCompactInitialization)
checkConsistency();
else
- m_storage->m_inCompactInitialization = false;
+ storage->m_inCompactInitialization = false;
#endif
- ArrayStorage* storage = arrayStorage();
-
unsigned length = storage->m_length;
if (newLength < length) {
unsigned usedVectorLength = min(length, m_vectorLength);
for (unsigned i = newLength; i < usedVectorLength; ++i) {
- JSValue& valueSlot = m_vector[i];
+ JSValue& valueSlot = storage->m_vector[i];
bool hadValue = valueSlot;
valueSlot = JSValue();
storage->m_numValuesInVector -= hadValue;
@@ -670,7 +684,7 @@ JSValue JSArray::pop()
{
checkConsistency();
- ArrayStorage* storage = arrayStorage();
+ ArrayStorage* storage = m_storage;
unsigned length = storage->m_length;
if (!length)
@@ -681,7 +695,7 @@ JSValue JSArray::pop()
JSValue result;
if (length < m_vectorLength) {
- JSValue& valueSlot = m_vector[length];
+ JSValue& valueSlot = storage->m_vector[length];
if (valueSlot) {
--storage->m_numValuesInVector;
result = valueSlot;
@@ -714,10 +728,10 @@ void JSArray::push(ExecState* exec, JSValue value)
{
checkConsistency();
- ArrayStorage* storage = arrayStorage();
+ ArrayStorage* storage = m_storage;
if (storage->m_length < m_vectorLength) {
- m_vector[storage->m_length] = value;
+ storage->m_vector[storage->m_length] = value;
++storage->m_numValuesInVector;
++storage->m_length;
checkConsistency();
@@ -728,8 +742,8 @@ void JSArray::push(ExecState* exec, JSValue value)
SparseArrayValueMap* map = storage->m_sparseValueMap;
if (!map || map->isEmpty()) {
if (increaseVectorLength(storage->m_length + 1)) {
- storage = arrayStorage();
- m_vector[storage->m_length] = value;
+ storage = m_storage;
+ storage->m_vector[storage->m_length] = value;
++storage->m_numValuesInVector;
++storage->m_length;
checkConsistency();
@@ -748,7 +762,7 @@ void JSArray::shiftCount(ExecState* exec, int count)
{
ASSERT(count > 0);
- ArrayStorage* storage = arrayStorage();
+ ArrayStorage* storage = m_storage;
unsigned oldLength = storage->m_length;
@@ -761,7 +775,7 @@ void JSArray::shiftCount(ExecState* exec, int count)
// slots and then fill them with possible properties. See ECMA spec.
// 15.4.4.9 steps 11 through 13.
for (unsigned i = count; i < oldLength; ++i) {
- if ((i >= m_vectorLength) || (!m_vector[i])) {
+ if ((i >= m_vectorLength) || (!m_storage->m_vector[i])) {
PropertySlot slot(this);
JSValue p = prototype();
if ((!p.isNull()) && (asObject(p)->getPropertySlot(exec, i, slot)))
@@ -769,11 +783,11 @@ void JSArray::shiftCount(ExecState* exec, int count)
}
}
- storage = arrayStorage(); // The put() above could have grown the vector and realloc'ed storage.
+ storage = m_storage; // The put() above could have grown the vector and realloc'ed storage.
// Need to decrement numValuesInvector based on number of real entries
for (unsigned i = 0; i < (unsigned)count; ++i)
- if ((i < m_vectorLength) && (m_vector[i]))
+ if ((i < m_vectorLength) && (storage->m_vector[i]))
--storage->m_numValuesInVector;
} else
storage->m_numValuesInVector -= count;
@@ -788,17 +802,16 @@ void JSArray::shiftCount(ExecState* exec, int count)
if (m_vectorLength) {
char* newBaseStorage = reinterpret_cast<char*>(storage) + count * sizeof(JSValue);
memmove(newBaseStorage, storage, storageSize(0));
- storage = reinterpret_cast<ArrayStorage*>(newBaseStorage);
+ m_storage = reinterpret_cast<ArrayStorage*>(newBaseStorage);
m_indexBias += count;
- setArrayStorage(storage);
}
}
}
void JSArray::unshiftCount(ExecState* exec, int count)
{
- ArrayStorage* storage = arrayStorage();
+ ArrayStorage* storage = m_storage;
ASSERT(m_indexBias >= 0);
ASSERT(count >= 0);
@@ -811,7 +824,7 @@ void JSArray::unshiftCount(ExecState* exec, int count)
// slots and then fill them with possible properties. See ECMA spec.
// 15.4.4.13 steps 8 through 10.
for (unsigned i = 0; i < length; ++i) {
- if ((i >= m_vectorLength) || (!m_vector[i])) {
+ if ((i >= m_vectorLength) || (!m_storage->m_vector[i])) {
PropertySlot slot(this);
JSValue p = prototype();
if ((!p.isNull()) && (asObject(p)->getPropertySlot(exec, i, slot)))
@@ -820,19 +833,22 @@ void JSArray::unshiftCount(ExecState* exec, int count)
}
}
- storage = arrayStorage(); // The put() above could have grown the vector and realloc'ed storage.
+ storage = m_storage; // The put() above could have grown the vector and realloc'ed storage.
if (m_indexBias >= count) {
m_indexBias -= count;
char* newBaseStorage = reinterpret_cast<char*>(storage) - count * sizeof(JSValue);
memmove(newBaseStorage, storage, storageSize(0));
- storage = reinterpret_cast<ArrayStorage*>(newBaseStorage);
- setArrayStorage(storage);
+ m_storage = reinterpret_cast<ArrayStorage*>(newBaseStorage);
m_vectorLength += count;
- } else if ((!m_indexBias) && (!increaseVectorPrefixLength(m_vectorLength + count))) {
+ } else if (!increaseVectorPrefixLength(m_vectorLength + count)) {
throwOutOfMemoryError(exec);
return;
}
+
+ JSValue* vector = m_storage->m_vector;
+ for (int i = 0; i < count; i++)
+ vector[i] = JSValue();
}
void JSArray::markChildren(MarkStack& markStack)
@@ -858,7 +874,7 @@ static int compareByStringPairForQSort(const void* a, const void* b)
void JSArray::sortNumeric(ExecState* exec, JSValue compareFunction, CallType callType, const CallData& callData)
{
- ArrayStorage* storage = arrayStorage();
+ ArrayStorage* storage = m_storage;
unsigned lengthNotIncludingUndefined = compactForSorting();
if (storage->m_sparseValueMap) {
@@ -872,7 +888,7 @@ void JSArray::sortNumeric(ExecState* exec, JSValue compareFunction, CallType cal
bool allValuesAreNumbers = true;
size_t size = storage->m_numValuesInVector;
for (size_t i = 0; i < size; ++i) {
- if (!m_vector[i].isNumber()) {
+ if (!storage->m_vector[i].isNumber()) {
allValuesAreNumbers = false;
break;
}
@@ -884,14 +900,14 @@ void JSArray::sortNumeric(ExecState* exec, JSValue compareFunction, CallType cal
// For numeric comparison, which is fast, qsort is faster than mergesort. We
// also don't require mergesort's stability, since there's no user visible
// side-effect from swapping the order of equal primitive values.
- qsort(m_vector, size, sizeof(JSValue), compareNumbersForQSort);
+ qsort(storage->m_vector, size, sizeof(JSValue), compareNumbersForQSort);
checkConsistency(SortConsistencyCheck);
}
void JSArray::sort(ExecState* exec)
{
- ArrayStorage* storage = arrayStorage();
+ ArrayStorage* storage = m_storage;
unsigned lengthNotIncludingUndefined = compactForSorting();
if (storage->m_sparseValueMap) {
@@ -914,7 +930,7 @@ void JSArray::sort(ExecState* exec)
}
for (size_t i = 0; i < lengthNotIncludingUndefined; i++) {
- JSValue value = m_vector[i];
+ JSValue value = storage->m_vector[i];
ASSERT(!value.isUndefined());
values[i].first = value;
}
@@ -946,7 +962,7 @@ void JSArray::sort(ExecState* exec)
// modifying the vector incorrectly.
for (size_t i = 0; i < lengthNotIncludingUndefined; i++)
- m_vector[i] = values[i].first;
+ storage->m_vector[i] = values[i].first;
checkConsistency(SortConsistencyCheck);
}
@@ -1033,7 +1049,7 @@ void JSArray::sort(ExecState* exec, JSValue compareFunction, CallType callType,
{
checkConsistency();
- ArrayStorage* storage = arrayStorage();
+ ArrayStorage* storage = m_storage;
// FIXME: This ignores exceptions raised in the compare function or in toNumber.
@@ -1072,14 +1088,14 @@ void JSArray::sort(ExecState* exec, JSValue compareFunction, CallType callType,
// Iterate over the array, ignoring missing values, counting undefined ones, and inserting all other ones into the tree.
for (; numDefined < usedVectorLength; ++numDefined) {
- JSValue v = m_vector[numDefined];
+ JSValue v = storage->m_vector[numDefined];
if (!v || v.isUndefined())
break;
tree.abstractor().m_nodes[numDefined].value = v;
tree.insert(numDefined);
}
for (unsigned i = numDefined; i < usedVectorLength; ++i) {
- JSValue v = m_vector[i];
+ JSValue v = storage->m_vector[i];
if (v) {
if (v.isUndefined())
++numUndefined;
@@ -1103,7 +1119,7 @@ void JSArray::sort(ExecState* exec, JSValue compareFunction, CallType callType,
}
}
- storage = arrayStorage();
+ storage = m_storage;
SparseArrayValueMap::iterator end = map->end();
for (SparseArrayValueMap::iterator it = map->begin(); it != end; ++it) {
@@ -1125,17 +1141,17 @@ void JSArray::sort(ExecState* exec, JSValue compareFunction, CallType callType,
AVLTree<AVLTreeAbstractorForArrayCompare, 44>::Iterator iter;
iter.start_iter_least(tree);
for (unsigned i = 0; i < numDefined; ++i) {
- m_vector[i] = tree.abstractor().m_nodes[*iter].value;
+ storage->m_vector[i] = tree.abstractor().m_nodes[*iter].value;
++iter;
}
// Put undefined values back in.
for (unsigned i = numDefined; i < newUsedVectorLength; ++i)
- m_vector[i] = jsUndefined();
+ storage->m_vector[i] = jsUndefined();
// Ensure that unused values in the vector are zeroed out.
for (unsigned i = newUsedVectorLength; i < usedVectorLength; ++i)
- m_vector[i] = JSValue();
+ storage->m_vector[i] = JSValue();
storage->m_numValuesInVector = newUsedVectorLength;
@@ -1144,7 +1160,7 @@ void JSArray::sort(ExecState* exec, JSValue compareFunction, CallType callType,
void JSArray::fillArgList(ExecState* exec, MarkedArgumentBuffer& args)
{
- ArrayStorage* storage = arrayStorage();
+ ArrayStorage* storage = m_storage;
JSValue* vector = storage->m_vector;
unsigned vectorEnd = min(storage->m_length, m_vectorLength);
@@ -1162,9 +1178,9 @@ void JSArray::fillArgList(ExecState* exec, MarkedArgumentBuffer& args)
void JSArray::copyToRegisters(ExecState* exec, Register* buffer, uint32_t maxSize)
{
- ASSERT(arrayStorage()->m_length >= maxSize);
+ ASSERT(m_storage->m_length >= maxSize);
UNUSED_PARAM(maxSize);
- JSValue* vector = m_vector;
+ JSValue* vector = m_storage->m_vector;
unsigned vectorEnd = min(maxSize, m_vectorLength);
unsigned i = 0;
for (; i < vectorEnd; ++i) {
@@ -1182,7 +1198,7 @@ unsigned JSArray::compactForSorting()
{
checkConsistency();
- ArrayStorage* storage = arrayStorage();
+ ArrayStorage* storage = m_storage;
unsigned usedVectorLength = min(storage->m_length, m_vectorLength);
@@ -1190,17 +1206,17 @@ unsigned JSArray::compactForSorting()
unsigned numUndefined = 0;
for (; numDefined < usedVectorLength; ++numDefined) {
- JSValue v = m_vector[numDefined];
+ JSValue v = storage->m_vector[numDefined];
if (!v || v.isUndefined())
break;
}
for (unsigned i = numDefined; i < usedVectorLength; ++i) {
- JSValue v = m_vector[i];
+ JSValue v = storage->m_vector[i];
if (v) {
if (v.isUndefined())
++numUndefined;
else
- m_vector[numDefined++] = v;
+ storage->m_vector[numDefined++] = v;
}
}
@@ -1214,21 +1230,21 @@ unsigned JSArray::compactForSorting()
if ((newUsedVectorLength > MAX_STORAGE_VECTOR_LENGTH) || !increaseVectorLength(newUsedVectorLength))
return 0;
- storage = arrayStorage();
+ storage = m_storage;
}
SparseArrayValueMap::iterator end = map->end();
for (SparseArrayValueMap::iterator it = map->begin(); it != end; ++it)
- m_vector[numDefined++] = it->second;
+ storage->m_vector[numDefined++] = it->second;
delete map;
storage->m_sparseValueMap = 0;
}
for (unsigned i = numDefined; i < newUsedVectorLength; ++i)
- m_vector[i] = jsUndefined();
+ storage->m_vector[i] = jsUndefined();
for (unsigned i = newUsedVectorLength; i < usedVectorLength; ++i)
- m_vector[i] = JSValue();
+ storage->m_vector[i] = JSValue();
storage->m_numValuesInVector = newUsedVectorLength;
@@ -1239,19 +1255,19 @@ unsigned JSArray::compactForSorting()
void* JSArray::subclassData() const
{
- return arrayStorage()->subclassData;
+ return m_storage->subclassData;
}
void JSArray::setSubclassData(void* d)
{
- arrayStorage()->subclassData = d;
+ m_storage->subclassData = d;
}
#if CHECK_ARRAY_CONSISTENCY
void JSArray::checkConsistency(ConsistencyCheckType type)
{
- ArrayStorage* storage = arrayStorage();
+ ArrayStorage* storage = m_storage;
ASSERT(storage);
if (type == SortConsistencyCheck)
@@ -1259,7 +1275,7 @@ void JSArray::checkConsistency(ConsistencyCheckType type)
unsigned numValuesInVector = 0;
for (unsigned i = 0; i < m_vectorLength; ++i) {
- if (JSValue value = m_vector[i]) {
+ if (JSValue value = storage->m_vector[i]) {
ASSERT(i < storage->m_length);
if (type != DestructorConsistencyCheck)
value.isUndefined(); // Likely to crash if the object was deallocated.
@@ -1277,7 +1293,7 @@ void JSArray::checkConsistency(ConsistencyCheckType type)
for (SparseArrayValueMap::iterator it = storage->m_sparseValueMap->begin(); it != end; ++it) {
unsigned index = it->first;
ASSERT(index < storage->m_length);
- ASSERT(index >= m_vectorLength);
+ ASSERT(index >= storage->m_vectorLength);
ASSERT(index <= MAX_ARRAY_INDEX);
ASSERT(it->second);
if (type != DestructorConsistencyCheck)
diff --git a/JavaScriptCore/runtime/JSArray.h b/JavaScriptCore/runtime/JSArray.h
index a7ce328..f718d7e 100644
--- a/JavaScriptCore/runtime/JSArray.h
+++ b/JavaScriptCore/runtime/JSArray.h
@@ -39,6 +39,7 @@ namespace JSC {
unsigned m_numValuesInVector;
SparseArrayValueMap* m_sparseValueMap;
void* subclassData; // A JSArray subclass can use this to fill the vector lazily.
+ void* m_allocBase; // Pointer to base address returned by malloc(). Keeping this pointer does eliminate false positives from the leak detector.
size_t reportedMapCapacity;
#if CHECK_ARRAY_CONSISTENCY
bool m_inCompactInitialization;
@@ -61,6 +62,9 @@ namespace JSC {
friend class Walker;
public:
+ enum VPtrStealingHackType { VPtrStealingHack };
+ JSArray(VPtrStealingHackType);
+
explicit JSArray(NonNullPassRefPtr<Structure>);
JSArray(NonNullPassRefPtr<Structure>, unsigned initialLength, ArrayCreationMode);
JSArray(NonNullPassRefPtr<Structure>, const ArgList& initialValues);
@@ -73,7 +77,7 @@ namespace JSC {
static JS_EXPORTDATA const ClassInfo info;
- unsigned length() const { return arrayStorage()->m_length; }
+ unsigned length() const { return m_storage->m_length; }
void setLength(unsigned); // OK to use on new arrays, but not if it might be a RegExpMatchArray.
void sort(ExecState*);
@@ -86,11 +90,11 @@ namespace JSC {
void shiftCount(ExecState*, int count);
void unshiftCount(ExecState*, int count);
- bool canGetIndex(unsigned i) { return i < m_vectorLength && m_vector[i]; }
+ bool canGetIndex(unsigned i) { return i < m_vectorLength && m_storage->m_vector[i]; }
JSValue getIndex(unsigned i)
{
ASSERT(canGetIndex(i));
- return m_vector[i];
+ return m_storage->m_vector[i];
}
bool canSetIndex(unsigned i) { return i < m_vectorLength; }
@@ -98,9 +102,9 @@ namespace JSC {
{
ASSERT(canSetIndex(i));
- JSValue& x = m_vector[i];
+ JSValue& x = m_storage->m_vector[i];
if (!x) {
- ArrayStorage *storage = arrayStorage();
+ ArrayStorage *storage = m_storage;
++storage->m_numValuesInVector;
if (i >= storage->m_length)
storage->m_length = i + 1;
@@ -111,7 +115,7 @@ namespace JSC {
void uncheckedSetIndex(unsigned i, JSValue v)
{
ASSERT(canSetIndex(i));
- ArrayStorage *storage = arrayStorage();
+ ArrayStorage *storage = m_storage;
#if CHECK_ARRAY_CONSISTENCY
ASSERT(storage->m_inCompactInitialization);
#endif
@@ -139,16 +143,6 @@ namespace JSC {
void* subclassData() const;
void setSubclassData(void*);
- inline ArrayStorage *arrayStorage() const
- {
- return reinterpret_cast<ArrayStorage*>(reinterpret_cast<char*>(m_vector) - (sizeof(ArrayStorage) - sizeof(JSValue)));
- }
-
- inline void setArrayStorage(ArrayStorage *storage)
- {
- m_vector = &storage->m_vector[0];
- }
-
private:
virtual const ClassInfo* classInfo() const { return &info; }
@@ -166,7 +160,7 @@ namespace JSC {
unsigned m_vectorLength; // The valid length of m_vector
int m_indexBias; // The number of JSValue sized blocks before ArrayStorage.
- JSValue* m_vector; // Copy of ArrayStorage.m_vector. Used for quick vector access and to materialize ArrayStorage ptr.
+ ArrayStorage *m_storage;
};
JSArray* asArray(JSValue);
@@ -192,7 +186,7 @@ namespace JSC {
{
JSObject::markChildrenDirect(markStack);
- ArrayStorage* storage = arrayStorage();
+ ArrayStorage* storage = m_storage;
unsigned usedVectorLength = std::min(storage->m_length, m_vectorLength);
markStack.appendValues(storage->m_vector, usedVectorLength, MayContainNullValues);
diff --git a/JavaScriptCore/runtime/JSGlobalData.cpp b/JavaScriptCore/runtime/JSGlobalData.cpp
index 065cbe1..abb2db2 100644
--- a/JavaScriptCore/runtime/JSGlobalData.cpp
+++ b/JavaScriptCore/runtime/JSGlobalData.cpp
@@ -85,7 +85,7 @@ void JSGlobalData::storeVPtrs()
void* storage = &cell;
COMPILE_ASSERT(sizeof(JSArray) <= sizeof(CollectorCell), sizeof_JSArray_must_be_less_than_CollectorCell);
- JSCell* jsArray = new (storage) JSArray(JSArray::createStructure(jsNull()));
+ JSCell* jsArray = new (storage) JSArray(JSArray::VPtrStealingHack);
JSGlobalData::jsArrayVPtr = jsArray->vptr();
jsArray->~JSCell();
diff --git a/JavaScriptCore/runtime/JSGlobalData.h b/JavaScriptCore/runtime/JSGlobalData.h
index 63f9ad8..1928f77 100644
--- a/JavaScriptCore/runtime/JSGlobalData.h
+++ b/JavaScriptCore/runtime/JSGlobalData.h
@@ -220,7 +220,9 @@ namespace JSC {
RegExpCache* m_regExpCache;
+#if ENABLE(YARR)
BumpPointerAllocator m_regexAllocator;
+#endif
#ifndef NDEBUG
ThreadIdentifier exclusiveThread;
diff --git a/JavaScriptCore/runtime/JSLock.cpp b/JavaScriptCore/runtime/JSLock.cpp
index a1cffbd..10f4f3f 100644
--- a/JavaScriptCore/runtime/JSLock.cpp
+++ b/JavaScriptCore/runtime/JSLock.cpp
@@ -65,6 +65,12 @@ JSLock::JSLock(ExecState* exec)
lock(m_lockBehavior);
}
+JSLock::JSLock(JSGlobalData* globalData)
+ : m_lockBehavior(globalData->isSharedInstance() ? LockForReal : SilenceAssertionsOnly)
+{
+ lock(m_lockBehavior);
+}
+
void JSLock::lock(JSLockBehavior lockBehavior)
{
#ifdef NDEBUG
diff --git a/JavaScriptCore/runtime/JSLock.h b/JavaScriptCore/runtime/JSLock.h
index 8b015c4..05b388c 100644
--- a/JavaScriptCore/runtime/JSLock.h
+++ b/JavaScriptCore/runtime/JSLock.h
@@ -49,12 +49,14 @@ namespace JSC {
// assertions working, so that clients that use the shared context don't break.
class ExecState;
+ class JSGlobalData;
enum JSLockBehavior { SilenceAssertionsOnly, LockForReal };
class JSLock : public Noncopyable {
public:
JSLock(ExecState*);
+ JSLock(JSGlobalData*);
JSLock(JSLockBehavior lockBehavior)
: m_lockBehavior(lockBehavior)
diff --git a/JavaScriptCore/runtime/UStringImpl.h b/JavaScriptCore/runtime/UStringImpl.h
index 08f1fa5..6401d3b 100644
--- a/JavaScriptCore/runtime/UStringImpl.h
+++ b/JavaScriptCore/runtime/UStringImpl.h
@@ -25,6 +25,6 @@
// FIXME: Remove this redundant name!
#include <wtf/text/StringImpl.h>
-namespace JSC { typedef WebCore::StringImpl UStringImpl; }
+namespace JSC { typedef StringImpl UStringImpl; }
#endif
diff --git a/JavaScriptCore/runtime/WeakGCPtr.h b/JavaScriptCore/runtime/WeakGCPtr.h
index 9dce858..ac77cf3 100644
--- a/JavaScriptCore/runtime/WeakGCPtr.h
+++ b/JavaScriptCore/runtime/WeakGCPtr.h
@@ -27,6 +27,7 @@
#define WeakGCPtr_h
#include "Collector.h"
+#include "GCHandle.h"
#include <wtf/Noncopyable.h>
namespace JSC {
@@ -34,23 +35,34 @@ namespace JSC {
// A smart pointer whose get() function returns 0 for cells awaiting destruction.
template <typename T> class WeakGCPtr : Noncopyable {
public:
- WeakGCPtr() : m_ptr(0) { }
+ WeakGCPtr()
+ : m_ptr(0)
+ {
+ }
+
WeakGCPtr(T* ptr) { assign(ptr); }
+ ~WeakGCPtr()
+ {
+ if (m_ptr)
+ m_ptr->pool()->free(m_ptr);
+ }
+
T* get() const
{
- if (!m_ptr || !Heap::isCellMarked(m_ptr))
- return 0;
- return m_ptr;
+ if (m_ptr && m_ptr->isValidPtr())
+ return static_cast<T*>(m_ptr->get());
+ return 0;
}
- bool clear(JSCell* ptr)
+ bool clear(JSCell* p)
{
- if (ptr == m_ptr) {
- m_ptr = 0;
- return true;
- }
- return false;
+ if (!m_ptr || m_ptr->get() != p)
+ return false;
+
+ m_ptr->pool()->free(m_ptr);
+ m_ptr = 0;
+ return true;
}
T& operator*() const { return *get(); }
@@ -62,7 +74,7 @@ public:
#if COMPILER(WINSCW)
operator bool() const { return m_ptr; }
#else
- typedef T* WeakGCPtr::*UnspecifiedBoolType;
+ typedef WeakGCHandle* WeakGCPtr::*UnspecifiedBoolType;
operator UnspecifiedBoolType() const { return get() ? &WeakGCPtr::m_ptr : 0; }
#endif
@@ -73,14 +85,16 @@ public:
#endif
private:
- void assign(T* ptr)
+ void assign(JSCell* ptr)
{
ASSERT(ptr);
- Heap::markCell(ptr);
- m_ptr = ptr;
+ if (m_ptr)
+ m_ptr->set(ptr);
+ else
+ m_ptr = Heap::heap(ptr)->addWeakGCHandle(ptr);
}
- T* m_ptr;
+ WeakGCHandle* m_ptr;
};
template <typename T> inline WeakGCPtr<T>& WeakGCPtr<T>::operator=(T* optr)
@@ -129,7 +143,7 @@ template <typename T, typename U> inline WeakGCPtr<T> const_pointer_cast(const W
return WeakGCPtr<T>(const_cast<T*>(p.get()));
}
-template <typename T> inline T* getPtr(const WeakGCPtr<T>& p)
+template <typename T> inline T* get(const WeakGCPtr<T>& p)
{
return p.get();
}