diff options
Diffstat (limited to 'JavaScriptCore/runtime/Collector.cpp')
| -rw-r--r-- | JavaScriptCore/runtime/Collector.cpp | 255 |
1 files changed, 97 insertions, 158 deletions
diff --git a/JavaScriptCore/runtime/Collector.cpp b/JavaScriptCore/runtime/Collector.cpp index 2873e0b..3fbd278 100644 --- a/JavaScriptCore/runtime/Collector.cpp +++ b/JavaScriptCore/runtime/Collector.cpp @@ -25,6 +25,7 @@ #include "CallFrame.h" #include "CodeBlock.h" #include "CollectorHeapIterator.h" +#include "GCActivityCallback.h" #include "Interpreter.h" #include "JSArray.h" #include "JSGlobalObject.h" @@ -42,6 +43,7 @@ #include <stdlib.h> #include <wtf/FastMalloc.h> #include <wtf/HashCountedSet.h> +#include <wtf/WTFThreadData.h> #include <wtf/UnusedParam.h> #include <wtf/VMTags.h> @@ -53,11 +55,6 @@ #include <mach/thread_act.h> #include <mach/vm_map.h> -#elif OS(SYMBIAN) -#include <e32std.h> -#include <e32cmn.h> -#include <unistd.h> - #elif OS(WINDOWS) #include <windows.h> @@ -109,11 +106,6 @@ const size_t ALLOCATIONS_PER_COLLECTION = 3600; // a PIC branch in Mach-O binaries, see <rdar://problem/5971391>. #define MIN_ARRAY_SIZE (static_cast<size_t>(14)) -#if OS(SYMBIAN) -const size_t MAX_NUM_BLOCKS = 256; // Max size of collector heap set to 16 MB -static RHeap* userChunk = 0; -#endif - #if ENABLE(JSC_MULTIPLE_THREADS) #if OS(DARWIN) @@ -148,29 +140,10 @@ Heap::Heap(JSGlobalData* globalData) , m_globalData(globalData) { ASSERT(globalData); - -#if OS(SYMBIAN) - // Symbian OpenC supports mmap but currently not the MAP_ANON flag. - // Using fastMalloc() does not properly align blocks on 64k boundaries - // and previous implementation was flawed/incomplete. - // UserHeap::ChunkHeap allows allocation of continuous memory and specification - // of alignment value for (symbian) cells within that heap. - // - // Clarification and mapping of terminology: - // RHeap (created by UserHeap::ChunkHeap below) is continuos memory chunk, - // which can dynamically grow up to 8 MB, - // that holds all CollectorBlocks of this session (static). - // Each symbian cell within RHeap maps to a 64kb aligned CollectorBlock. - // JSCell objects are maintained as usual within CollectorBlocks. - if (!userChunk) { - userChunk = UserHeap::ChunkHeap(0, 0, MAX_NUM_BLOCKS * BLOCK_SIZE, BLOCK_SIZE, BLOCK_SIZE); - if (!userChunk) - CRASH(); - } -#endif // OS(SYMBIAN) - memset(&m_heap, 0, sizeof(CollectorHeap)); allocateBlock(); + m_activityCallback = DefaultGCActivityCallback::create(this); + (*m_activityCallback)(); } Heap::~Heap() @@ -198,6 +171,9 @@ void Heap::destroy() freeBlocks(); + for (unsigned i = 0; i < m_weakGCHandlePools.size(); ++i) + m_weakGCHandlePools[i].deallocate(); + #if ENABLE(JSC_MULTIPLE_THREADS) if (m_currentThreadRegistrar) { int error = pthread_key_delete(m_currentThreadRegistrar); @@ -211,82 +187,38 @@ void Heap::destroy() t = next; } #endif - + m_blockallocator.destroy(); m_globalData = 0; } NEVER_INLINE CollectorBlock* Heap::allocateBlock() { -#if OS(DARWIN) - vm_address_t address = 0; - vm_map(current_task(), &address, BLOCK_SIZE, BLOCK_OFFSET_MASK, VM_FLAGS_ANYWHERE | VM_TAG_FOR_COLLECTOR_MEMORY, MEMORY_OBJECT_NULL, 0, FALSE, VM_PROT_DEFAULT, VM_PROT_DEFAULT, VM_INHERIT_DEFAULT); -#elif OS(SYMBIAN) - // Allocate a 64 kb aligned CollectorBlock - unsigned char* mask = reinterpret_cast<unsigned char*>(userChunk->Alloc(BLOCK_SIZE)); - if (!mask) + AlignedCollectorBlock allocation = m_blockallocator.allocate(); + CollectorBlock* block = static_cast<CollectorBlock*>(allocation.base()); + if (!block) CRASH(); - uintptr_t address = reinterpret_cast<uintptr_t>(mask); -#elif OS(WINCE) - void* address = VirtualAlloc(NULL, BLOCK_SIZE, MEM_COMMIT | MEM_RESERVE, PAGE_READWRITE); -#elif OS(WINDOWS) -#if COMPILER(MINGW) - void* address = __mingw_aligned_malloc(BLOCK_SIZE, BLOCK_SIZE); -#else - void* address = _aligned_malloc(BLOCK_SIZE, BLOCK_SIZE); -#endif - memset(address, 0, BLOCK_SIZE); -#elif HAVE(POSIX_MEMALIGN) - void* address; - posix_memalign(&address, BLOCK_SIZE, BLOCK_SIZE); -#else - -#if ENABLE(JSC_MULTIPLE_THREADS) -#error Need to initialize pagesize safely. -#endif - static size_t pagesize = getpagesize(); - - size_t extra = 0; - if (BLOCK_SIZE > pagesize) - extra = BLOCK_SIZE - pagesize; - - void* mmapResult = mmap(NULL, BLOCK_SIZE + extra, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, -1, 0); - uintptr_t address = reinterpret_cast<uintptr_t>(mmapResult); - - size_t adjust = 0; - if ((address & BLOCK_OFFSET_MASK) != 0) - adjust = BLOCK_SIZE - (address & BLOCK_OFFSET_MASK); - - if (adjust > 0) - munmap(reinterpret_cast<char*>(address), adjust); - - if (adjust < extra) - munmap(reinterpret_cast<char*>(address + adjust + BLOCK_SIZE), extra - adjust); - - address += adjust; -#endif // Initialize block. - CollectorBlock* block = reinterpret_cast<CollectorBlock*>(address); block->heap = this; clearMarkBits(block); Structure* dummyMarkableCellStructure = m_globalData->dummyMarkableCellStructure.get(); for (size_t i = 0; i < HeapConstants::cellsPerBlock; ++i) - new (block->cells + i) JSCell(dummyMarkableCellStructure); + new (&block->cells[i]) JSCell(dummyMarkableCellStructure); // Add block to blocks vector. size_t numBlocks = m_heap.numBlocks; if (m_heap.usedBlocks == numBlocks) { - static const size_t maxNumBlocks = ULONG_MAX / sizeof(CollectorBlock*) / GROWTH_FACTOR; + static const size_t maxNumBlocks = ULONG_MAX / sizeof(AlignedCollectorBlock) / GROWTH_FACTOR; if (numBlocks > maxNumBlocks) CRASH(); numBlocks = max(MIN_ARRAY_SIZE, numBlocks * GROWTH_FACTOR); m_heap.numBlocks = numBlocks; - m_heap.blocks = static_cast<CollectorBlock**>(fastRealloc(m_heap.blocks, numBlocks * sizeof(CollectorBlock*))); + m_heap.blocks = static_cast<AlignedCollectorBlock*>(fastRealloc(m_heap.blocks, numBlocks * sizeof(AlignedCollectorBlock))); } - m_heap.blocks[m_heap.usedBlocks++] = block; + m_heap.blocks[m_heap.usedBlocks++] = allocation; return block; } @@ -299,7 +231,7 @@ NEVER_INLINE void Heap::freeBlock(size_t block) ObjectIterator end(m_heap, block + 1); for ( ; it != end; ++it) (*it)->~JSCell(); - freeBlockPtr(m_heap.blocks[block]); + m_heap.blocks[block].deallocate(); // swap with the last block so we compact as we go m_heap.blocks[block] = m_heap.blocks[m_heap.usedBlocks - 1]; @@ -307,31 +239,10 @@ NEVER_INLINE void Heap::freeBlock(size_t block) if (m_heap.numBlocks > MIN_ARRAY_SIZE && m_heap.usedBlocks < m_heap.numBlocks / LOW_WATER_FACTOR) { m_heap.numBlocks = m_heap.numBlocks / GROWTH_FACTOR; - m_heap.blocks = static_cast<CollectorBlock**>(fastRealloc(m_heap.blocks, m_heap.numBlocks * sizeof(CollectorBlock*))); + m_heap.blocks = static_cast<AlignedCollectorBlock*>(fastRealloc(m_heap.blocks, m_heap.numBlocks * sizeof(AlignedCollectorBlock))); } } -NEVER_INLINE void Heap::freeBlockPtr(CollectorBlock* block) -{ -#if OS(DARWIN) - vm_deallocate(current_task(), reinterpret_cast<vm_address_t>(block), BLOCK_SIZE); -#elif OS(SYMBIAN) - userChunk->Free(reinterpret_cast<TAny*>(block)); -#elif OS(WINCE) - VirtualFree(block, 0, MEM_RELEASE); -#elif OS(WINDOWS) -#if COMPILER(MINGW) - __mingw_aligned_free(block); -#else - _aligned_free(block); -#endif -#elif HAVE(POSIX_MEMALIGN) - free(block); -#else - munmap(reinterpret_cast<char*>(block), BLOCK_SIZE); -#endif -} - void Heap::freeBlocks() { ProtectCountSet protectedValuesCopy = m_protectedValues; @@ -355,7 +266,7 @@ void Heap::freeBlocks() it->first->~JSCell(); for (size_t block = 0; block < m_heap.usedBlocks; ++block) - freeBlockPtr(m_heap.blocks[block]); + m_heap.blocks[block].deallocate(); fastFree(m_heap.blocks); @@ -388,6 +299,7 @@ void Heap::recordExtraCost(size_t cost) void* Heap::allocate(size_t s) { + ASSERT(globalData()->identifierTable == wtfThreadData().currentIdentifierTable()); typedef HeapConstants::Block Block; typedef HeapConstants::Cell Cell; @@ -408,11 +320,11 @@ allocate: do { ASSERT(m_heap.nextBlock < m_heap.usedBlocks); - Block* block = reinterpret_cast<Block*>(m_heap.blocks[m_heap.nextBlock]); + Block* block = m_heap.collectorBlock(m_heap.nextBlock); do { ASSERT(m_heap.nextCell < HeapConstants::cellsPerBlock); if (!block->marked.get(m_heap.nextCell)) { // Always false for the last cell in the block - Cell* cell = block->cells + m_heap.nextCell; + Cell* cell = &block->cells[m_heap.nextCell]; m_heap.operationInProgress = Allocation; JSCell* imp = reinterpret_cast<JSCell*>(cell); @@ -422,7 +334,8 @@ allocate: ++m_heap.nextCell; return cell; } - } while (++m_heap.nextCell != HeapConstants::cellsPerBlock); + block->marked.advanceToNextPossibleFreeCell(m_heap.nextCell); + } while (m_heap.nextCell != HeapConstants::cellsPerBlock); m_heap.nextCell = 0; } while (++m_heap.nextBlock != m_heap.usedBlocks); @@ -462,10 +375,10 @@ void Heap::shrinkBlocks(size_t neededBlocks) // Clear the always-on last bit, so isEmpty() isn't fooled by it. for (size_t i = 0; i < m_heap.usedBlocks; ++i) - m_heap.blocks[i]->marked.clear(HeapConstants::cellsPerBlock - 1); + m_heap.collectorBlock(i)->marked.clear(HeapConstants::cellsPerBlock - 1); for (size_t i = 0; i != m_heap.usedBlocks && m_heap.usedBlocks != neededBlocks; ) { - if (m_heap.blocks[i]->marked.isEmpty()) { + if (m_heap.collectorBlock(i)->marked.isEmpty()) { freeBlock(i); } else ++i; @@ -473,11 +386,11 @@ void Heap::shrinkBlocks(size_t neededBlocks) // Reset the always-on last bit. for (size_t i = 0; i < m_heap.usedBlocks; ++i) - m_heap.blocks[i]->marked.set(HeapConstants::cellsPerBlock - 1); + m_heap.collectorBlock(i)->marked.set(HeapConstants::cellsPerBlock - 1); } #if OS(WINCE) -void* g_stackBase = 0; +JS_EXPORTDATA void* g_stackBase = 0; inline bool isPageWritable(void* page) { @@ -574,10 +487,6 @@ static inline void* currentThreadStackBase() MOV pTib, EAX } return static_cast<void*>(pTib->StackBase); -#elif OS(WINDOWS) && CPU(X86_64) && COMPILER(MSVC) - // FIXME: why only for MSVC? - PNT_TIB64 pTib = reinterpret_cast<PNT_TIB64>(NtCurrentTeb()); - return reinterpret_cast<void*>(pTib->StackBase); #elif OS(WINDOWS) && CPU(X86) && COMPILER(GCC) // offset 0x18 from the FS segment register gives a pointer to // the thread information block for the current thread @@ -586,7 +495,12 @@ static inline void* currentThreadStackBase() : "=r" (pTib) ); return static_cast<void*>(pTib->StackBase); +#elif OS(WINDOWS) && CPU(X86_64) + PNT_TIB64 pTib = reinterpret_cast<PNT_TIB64>(NtCurrentTeb()); + return reinterpret_cast<void*>(pTib->StackBase); #elif OS(QNX) + AtomicallyInitializedStatic(Mutex&, mutex = *new Mutex); + MutexLocker locker(mutex); return currentThreadStackBaseQNX(); #elif OS(SOLARIS) stack_t s; @@ -598,19 +512,17 @@ static inline void* currentThreadStackBase() pthread_stackseg_np(thread, &stack); return stack.ss_sp; #elif OS(SYMBIAN) - static void* stackBase = 0; - if (stackBase == 0) { - TThreadStackInfo info; - RThread thread; - thread.StackInfo(info); - stackBase = (void*)info.iBase; - } - return (void*)stackBase; + TThreadStackInfo info; + RThread thread; + thread.StackInfo(info); + return (void*)info.iBase; #elif OS(HAIKU) thread_info threadInfo; get_thread_info(find_thread(NULL), &threadInfo); return threadInfo.stack_end; #elif OS(UNIX) + AtomicallyInitializedStatic(Mutex&, mutex = *new Mutex); + MutexLocker locker(mutex); static void* stackBase = 0; static size_t stackSize = 0; static pthread_t stackThread; @@ -633,6 +545,8 @@ static inline void* currentThreadStackBase() } return static_cast<char*>(stackBase) + stackSize; #elif OS(WINCE) + AtomicallyInitializedStatic(Mutex&, mutex = *new Mutex); + MutexLocker locker(mutex); if (g_stackBase) return g_stackBase; else { @@ -667,7 +581,7 @@ void Heap::makeUsableFromMultipleThreads() void Heap::registerThread() { - ASSERT(!m_globalData->mainThreadOnly || isMainThread()); + ASSERT(!m_globalData->exclusiveThread || m_globalData->exclusiveThread == currentThread()); if (!m_currentThreadRegistrar || pthread_getspecific(m_currentThreadRegistrar)) return; @@ -728,19 +642,6 @@ inline bool isPointerAligned(void* p) // Cell size needs to be a power of two for isPossibleCell to be valid. COMPILE_ASSERT(sizeof(CollectorCell) % 2 == 0, Collector_cell_size_is_power_of_two); -#if USE(JSVALUE32) -static bool isHalfCellAligned(void *p) -{ - return (((intptr_t)(p) & (CELL_MASK >> 1)) == 0); -} - -static inline bool isPossibleCell(void* p) -{ - return isHalfCellAligned(p) && p; -} - -#else - static inline bool isCellAligned(void *p) { return (((intptr_t)(p) & CELL_MASK) == 0); @@ -750,7 +651,6 @@ static inline bool isPossibleCell(void* p) { return isCellAligned(p) && p; } -#endif // USE(JSVALUE32) void Heap::markConservatively(MarkStack& markStack, void* start, void* end) { @@ -767,7 +667,6 @@ void Heap::markConservatively(MarkStack& markStack, void* start, void* end) char** p = static_cast<char**>(start); char** e = static_cast<char**>(end); - CollectorBlock** blocks = m_heap.blocks; while (p != e) { char* x = *p++; if (isPossibleCell(x)) { @@ -783,7 +682,7 @@ void Heap::markConservatively(MarkStack& markStack, void* start, void* end) CollectorBlock* blockAddr = reinterpret_cast<CollectorBlock*>(xAsBits - offset); usedBlocks = m_heap.usedBlocks; for (size_t block = 0; block < usedBlocks; block++) { - if (blocks[block] != blockAddr) + if (m_heap.collectorBlock(block) != blockAddr) continue; markStack.append(reinterpret_cast<JSCell*>(xAsBits)); markStack.drain(); @@ -998,10 +897,40 @@ void Heap::markStackObjectsConservatively(MarkStack& markStack) #endif } +void Heap::updateWeakGCHandles() +{ + for (unsigned i = 0; i < m_weakGCHandlePools.size(); ++i) + weakGCHandlePool(i)->update(); +} + +void WeakGCHandlePool::update() +{ + for (unsigned i = 1; i < WeakGCHandlePool::numPoolEntries; ++i) { + if (m_entries[i].isValidPtr()) { + JSCell* cell = m_entries[i].get(); + if (!cell || !Heap::isCellMarked(cell)) + m_entries[i].invalidate(); + } + } +} + +WeakGCHandle* Heap::addWeakGCHandle(JSCell* ptr) +{ + for (unsigned i = 0; i < m_weakGCHandlePools.size(); ++i) + if (!weakGCHandlePool(i)->isFull()) + return weakGCHandlePool(i)->allocate(ptr); + + AlignedMemory<WeakGCHandlePool::poolSize> allocation = m_weakGCHandlePoolAllocator.allocate(); + m_weakGCHandlePools.append(allocation); + + WeakGCHandlePool* pool = new (allocation) WeakGCHandlePool(); + return pool->allocate(ptr); +} + void Heap::protect(JSValue k) { ASSERT(k); - ASSERT(JSLock::currentThreadIsHoldingLock() || !m_globalData->isSharedInstance); + ASSERT(JSLock::currentThreadIsHoldingLock() || !m_globalData->isSharedInstance()); if (!k.isCell()) return; @@ -1009,15 +938,15 @@ void Heap::protect(JSValue k) m_protectedValues.add(k.asCell()); } -void Heap::unprotect(JSValue k) +bool Heap::unprotect(JSValue k) { ASSERT(k); - ASSERT(JSLock::currentThreadIsHoldingLock() || !m_globalData->isSharedInstance); + ASSERT(JSLock::currentThreadIsHoldingLock() || !m_globalData->isSharedInstance()); if (!k.isCell()) - return; + return false; - m_protectedValues.remove(k.asCell()); + return m_protectedValues.remove(k.asCell()); } void Heap::markProtectedObjects(MarkStack& markStack) @@ -1032,7 +961,7 @@ void Heap::markProtectedObjects(MarkStack& markStack) void Heap::clearMarkBits() { for (size_t i = 0; i < m_heap.usedBlocks; ++i) - clearMarkBits(m_heap.blocks[i]); + clearMarkBits(m_heap.collectorBlock(i)); } void Heap::clearMarkBits(CollectorBlock* block) @@ -1051,9 +980,9 @@ size_t Heap::markedCells(size_t startBlock, size_t startCell) const return 0; size_t result = 0; - result += m_heap.blocks[startBlock]->marked.count(startCell); + result += m_heap.collectorBlock(startBlock)->marked.count(startCell); for (size_t i = startBlock + 1; i < m_heap.usedBlocks; ++i) - result += m_heap.blocks[i]->marked.count(); + result += m_heap.collectorBlock(i)->marked.count(); return result; } @@ -1093,7 +1022,7 @@ void Heap::sweep() void Heap::markRoots() { #ifndef NDEBUG - if (m_globalData->isSharedInstance) { + if (m_globalData->isSharedInstance()) { ASSERT(JSLock::lockCount() > 0); ASSERT(JSLock::currentThreadIsHoldingLock()); } @@ -1122,8 +1051,6 @@ void Heap::markRoots() MarkedArgumentBuffer::markLists(markStack, *m_markListSet); if (m_globalData->exception) markStack.append(m_globalData->exception); - if (m_globalData->functionCodeBlockBeingReparsed) - m_globalData->functionCodeBlockBeingReparsed->markAggregate(markStack); if (m_globalData->firstStringifierToMark) JSONObject::markStringifiers(markStack, m_globalData->firstStringifierToMark); @@ -1134,6 +1061,8 @@ void Heap::markRoots() markStack.drain(); markStack.compact(); + updateWeakGCHandles(); + m_heap.operationInProgress = NoOperation; } @@ -1158,6 +1087,11 @@ Heap::Statistics Heap::statistics() const return statistics; } +size_t Heap::size() const +{ + return m_heap.usedBlocks * BLOCK_SIZE; +} + size_t Heap::globalObjectCount() { size_t count = 0; @@ -1195,10 +1129,6 @@ static const char* typeName(JSCell* cell) { if (cell->isString()) return "string"; -#if USE(JSVALUE32) - if (cell->isNumber()) - return "number"; -#endif if (cell->isGetterSetter()) return "Getter-Setter"; if (cell->isAPIValueWrapper()) @@ -1241,6 +1171,7 @@ bool Heap::isBusy() void Heap::reset() { + ASSERT(globalData()->identifierTable == wtfThreadData().currentIdentifierTable()); JAVASCRIPTCORE_GC_BEGIN(); markRoots(); @@ -1257,10 +1188,13 @@ void Heap::reset() resizeBlocks(); JAVASCRIPTCORE_GC_END(); + + (*m_activityCallback)(); } void Heap::collectAllGarbage() { + ASSERT(globalData()->identifierTable == wtfThreadData().currentIdentifierTable()); JAVASCRIPTCORE_GC_BEGIN(); // If the last iteration through the heap deallocated blocks, we need @@ -1293,4 +1227,9 @@ LiveObjectIterator Heap::primaryHeapEnd() return LiveObjectIterator(m_heap, m_heap.usedBlocks); } +void Heap::setActivityCallback(PassOwnPtr<GCActivityCallback> activityCallback) +{ + m_activityCallback = activityCallback; +} + } // namespace JSC |
