diff options
Diffstat (limited to 'JavaScriptCore/runtime')
29 files changed, 314 insertions, 586 deletions
diff --git a/JavaScriptCore/runtime/AlignedMemoryAllocator.h b/JavaScriptCore/runtime/AlignedMemoryAllocator.h deleted file mode 100644 index e682eb3..0000000 --- a/JavaScriptCore/runtime/AlignedMemoryAllocator.h +++ /dev/null @@ -1,239 +0,0 @@ -/* - * Copyright (C) 2010 Apple Inc. All rights reserved. - * - * This library is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2 of the License, or (at your option) any later version. - * - * This library is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with this library; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - * - */ - -#ifndef AlignedMemoryAllocator_h -#define AlignedMemoryAllocator_h - -#include <wtf/Bitmap.h> -#include <wtf/PageReservation.h> - -namespace JSC { - -struct AlignedMemoryAllocatorConstants { -// Set sane defaults if -D<flagname=value> wasn't provided via compiler args -#if defined(JSCCOLLECTOR_VIRTUALMEM_RESERVATION) - // Keep backwards compatibility with symbian build system - static const size_t virtualMemoryReservation = JSCCOLLECTOR_VIRTUALMEM_RESERVATION; -#elif defined(__WINS__) - // Emulator has limited virtual address space - static const size_t virtualMemoryReservation = 0x400000; -#else - // HW has plenty of virtual addresses - static const size_t virtualMemoryReservation = 0x8000000; -#endif -}; - -template<size_t blockSize> class AlignedMemory; -template<size_t blockSize> class AlignedMemoryAllocator; - -#if HAVE(PAGE_ALLOCATE_ALIGNED) - -template<size_t blockSize> -class AlignedMemoryAllocator; - -template<size_t blockSize> -class AlignedMemory { -public: - void deallocate(); - void* base(); - -private: - friend class AlignedMemoryAllocator<blockSize>; - - AlignedMemory(PageAllocation); - - PageAllocation m_allocation; -}; - -template<size_t blockSize> -class AlignedMemoryAllocator { -public: - void destroy(); - AlignedMemory<blockSize> allocate(); -}; - -template<size_t blockSize> -inline void AlignedMemoryAllocator<blockSize>::destroy() -{ -} - -template<size_t blockSize> -inline AlignedMemory<blockSize> AlignedMemoryAllocator<blockSize>::allocate() -{ - return AlignedMemory<blockSize>(PageAllocation::allocateAligned(blockSize, PageAllocation::JSGCHeapPages)); -} - -template<size_t blockSize> -inline void AlignedMemory<blockSize>::deallocate() -{ - m_allocation.deallocate(); -} - -template<size_t blockSize> -inline void* AlignedMemory<blockSize>::base() -{ - return m_allocation.base(); -} - -template<size_t blockSize> -inline AlignedMemory<blockSize>::AlignedMemory(PageAllocation allocation) - : m_allocation(allocation) -{ -} - -#else - -template<size_t blockSize> -class AlignedMemory { -public: - void deallocate(); - void* base(); - -private: - friend class AlignedMemoryAllocator<blockSize>; - - AlignedMemory(void* base, AlignedMemoryAllocator<blockSize>* allocator); - - void* m_base; - AlignedMemoryAllocator<blockSize>* m_allocator; -}; - -template<size_t blockSize> -class AlignedMemoryAllocator { -public: - AlignedMemoryAllocator(); - ~AlignedMemoryAllocator(); - - void destroy(); - AlignedMemory<blockSize> allocate(); - void free(AlignedMemory<blockSize>); - -private: - static const size_t reservationSize = AlignedMemoryAllocatorConstants::virtualMemoryReservation; - static const size_t bitmapSize = reservationSize / blockSize; - - PageReservation m_reservation; - size_t m_nextFree; - uintptr_t m_reservationBase; - WTF::Bitmap<bitmapSize> m_bitmap; -}; - -template<size_t blockSize> -AlignedMemoryAllocator<blockSize>::AlignedMemoryAllocator() - : m_reservation(PageReservation::reserve(reservationSize + blockSize, PageAllocation::JSGCHeapPages)) - , m_nextFree(0) -{ - // check that blockSize and reservationSize are powers of two - ASSERT(!(blockSize & (blockSize - 1))); - ASSERT(!(reservationSize & (reservationSize - 1))); - - // check that blockSize is a multiple of pageSize and that - // reservationSize is a multiple of blockSize - ASSERT(!(blockSize & (PageAllocation::pageSize() - 1))); - ASSERT(!(reservationSize & (blockSize - 1))); - - ASSERT(m_reservation); - - m_reservationBase = reinterpret_cast<uintptr_t>(m_reservation.base()); - m_reservationBase = (m_reservationBase + blockSize) & ~(blockSize - 1); -} - -template<size_t blockSize> -AlignedMemoryAllocator<blockSize>::~AlignedMemoryAllocator() -{ - destroy(); - m_reservation.deallocate(); -} - -template<size_t blockSize> -inline void AlignedMemoryAllocator<blockSize>::destroy() -{ - for (unsigned i = 0; i < bitmapSize; ++i) { - if (m_bitmap.get(i)) { - void* blockAddress = reinterpret_cast<void*>(m_reservationBase + m_nextFree * blockSize); - m_reservation.decommit(blockAddress, blockSize); - - m_bitmap.clear(i); - } - } -} - -template<size_t blockSize> -AlignedMemory<blockSize> AlignedMemoryAllocator<blockSize>::allocate() -{ - while (m_nextFree < bitmapSize) { - if (!m_bitmap.get(m_nextFree)) { - void* blockAddress = reinterpret_cast<void*>(m_reservationBase + m_nextFree * blockSize); - m_reservation.commit(blockAddress, blockSize); - - m_bitmap.set(m_nextFree); - ++m_nextFree; - - return AlignedMemory<blockSize>(blockAddress, this); - } - m_bitmap.advanceToNextFreeBit(m_nextFree); - } - - if (m_bitmap.isFull()) - return AlignedMemory<blockSize>(0, this); - - m_nextFree = 0; - - return allocate(); -} - -template<size_t blockSize> -void AlignedMemoryAllocator<blockSize>::free(AlignedMemory<blockSize> allocation) -{ - ASSERT(allocation.base()); - m_reservation.decommit(allocation.base(), blockSize); - - size_t diff = (reinterpret_cast<uintptr_t>(allocation.base()) - m_reservationBase); - ASSERT(!(diff & (blockSize - 1))); - - size_t i = diff / blockSize; - ASSERT(m_bitmap.get(i)); - - m_bitmap.clear(i); -} - -template<size_t blockSize> -inline void AlignedMemory<blockSize>::deallocate() -{ - m_allocator->free(*this); -} - -template<size_t blockSize> -inline void* AlignedMemory<blockSize>::base() -{ - return m_base; -} - -template<size_t blockSize> -AlignedMemory<blockSize>::AlignedMemory(void* base, AlignedMemoryAllocator<blockSize>* allocator) - : m_base(base) - , m_allocator(allocator) -{ -} - -#endif - -} - -#endif diff --git a/JavaScriptCore/runtime/Collector.cpp b/JavaScriptCore/runtime/Collector.cpp index 3fbd278..38845ce 100644 --- a/JavaScriptCore/runtime/Collector.cpp +++ b/JavaScriptCore/runtime/Collector.cpp @@ -171,9 +171,6 @@ void Heap::destroy() freeBlocks(); - for (unsigned i = 0; i < m_weakGCHandlePools.size(); ++i) - m_weakGCHandlePools[i].deallocate(); - #if ENABLE(JSC_MULTIPLE_THREADS) if (m_currentThreadRegistrar) { int error = pthread_key_delete(m_currentThreadRegistrar); @@ -187,13 +184,12 @@ void Heap::destroy() t = next; } #endif - m_blockallocator.destroy(); m_globalData = 0; } NEVER_INLINE CollectorBlock* Heap::allocateBlock() { - AlignedCollectorBlock allocation = m_blockallocator.allocate(); + PageAllocationAligned allocation = PageAllocationAligned::allocate(BLOCK_SIZE, BLOCK_SIZE, OSAllocator::JSGCHeapPages); CollectorBlock* block = static_cast<CollectorBlock*>(allocation.base()); if (!block) CRASH(); @@ -211,12 +207,12 @@ NEVER_INLINE CollectorBlock* Heap::allocateBlock() size_t numBlocks = m_heap.numBlocks; if (m_heap.usedBlocks == numBlocks) { - static const size_t maxNumBlocks = ULONG_MAX / sizeof(AlignedCollectorBlock) / GROWTH_FACTOR; + static const size_t maxNumBlocks = ULONG_MAX / sizeof(PageAllocationAligned) / GROWTH_FACTOR; if (numBlocks > maxNumBlocks) CRASH(); numBlocks = max(MIN_ARRAY_SIZE, numBlocks * GROWTH_FACTOR); m_heap.numBlocks = numBlocks; - m_heap.blocks = static_cast<AlignedCollectorBlock*>(fastRealloc(m_heap.blocks, numBlocks * sizeof(AlignedCollectorBlock))); + m_heap.blocks = static_cast<PageAllocationAligned*>(fastRealloc(m_heap.blocks, numBlocks * sizeof(PageAllocationAligned))); } m_heap.blocks[m_heap.usedBlocks++] = allocation; @@ -239,7 +235,7 @@ NEVER_INLINE void Heap::freeBlock(size_t block) if (m_heap.numBlocks > MIN_ARRAY_SIZE && m_heap.usedBlocks < m_heap.numBlocks / LOW_WATER_FACTOR) { m_heap.numBlocks = m_heap.numBlocks / GROWTH_FACTOR; - m_heap.blocks = static_cast<AlignedCollectorBlock*>(fastRealloc(m_heap.blocks, m_heap.numBlocks * sizeof(AlignedCollectorBlock))); + m_heap.blocks = static_cast<PageAllocationAligned*>(fastRealloc(m_heap.blocks, m_heap.numBlocks * sizeof(PageAllocationAligned))); } } @@ -389,175 +385,6 @@ void Heap::shrinkBlocks(size_t neededBlocks) m_heap.collectorBlock(i)->marked.set(HeapConstants::cellsPerBlock - 1); } -#if OS(WINCE) -JS_EXPORTDATA void* g_stackBase = 0; - -inline bool isPageWritable(void* page) -{ - MEMORY_BASIC_INFORMATION memoryInformation; - DWORD result = VirtualQuery(page, &memoryInformation, sizeof(memoryInformation)); - - // return false on error, including ptr outside memory - if (result != sizeof(memoryInformation)) - return false; - - DWORD protect = memoryInformation.Protect & ~(PAGE_GUARD | PAGE_NOCACHE); - return protect == PAGE_READWRITE - || protect == PAGE_WRITECOPY - || protect == PAGE_EXECUTE_READWRITE - || protect == PAGE_EXECUTE_WRITECOPY; -} - -static void* getStackBase(void* previousFrame) -{ - // find the address of this stack frame by taking the address of a local variable - bool isGrowingDownward; - void* thisFrame = (void*)(&isGrowingDownward); - - isGrowingDownward = previousFrame < &thisFrame; - static DWORD pageSize = 0; - if (!pageSize) { - SYSTEM_INFO systemInfo; - GetSystemInfo(&systemInfo); - pageSize = systemInfo.dwPageSize; - } - - // scan all of memory starting from this frame, and return the last writeable page found - register char* currentPage = (char*)((DWORD)thisFrame & ~(pageSize - 1)); - if (isGrowingDownward) { - while (currentPage > 0) { - // check for underflow - if (currentPage >= (char*)pageSize) - currentPage -= pageSize; - else - currentPage = 0; - if (!isPageWritable(currentPage)) - return currentPage + pageSize; - } - return 0; - } else { - while (true) { - // guaranteed to complete because isPageWritable returns false at end of memory - currentPage += pageSize; - if (!isPageWritable(currentPage)) - return currentPage; - } - } -} -#endif - -#if OS(QNX) -static inline void *currentThreadStackBaseQNX() -{ - static void* stackBase = 0; - static size_t stackSize = 0; - static pthread_t stackThread; - pthread_t thread = pthread_self(); - if (stackBase == 0 || thread != stackThread) { - struct _debug_thread_info threadInfo; - memset(&threadInfo, 0, sizeof(threadInfo)); - threadInfo.tid = pthread_self(); - int fd = open("/proc/self", O_RDONLY); - if (fd == -1) { - LOG_ERROR("Unable to open /proc/self (errno: %d)", errno); - return 0; - } - devctl(fd, DCMD_PROC_TIDSTATUS, &threadInfo, sizeof(threadInfo), 0); - close(fd); - stackBase = reinterpret_cast<void*>(threadInfo.stkbase); - stackSize = threadInfo.stksize; - ASSERT(stackBase); - stackThread = thread; - } - return static_cast<char*>(stackBase) + stackSize; -} -#endif - -static inline void* currentThreadStackBase() -{ -#if OS(DARWIN) - pthread_t thread = pthread_self(); - return pthread_get_stackaddr_np(thread); -#elif OS(WINDOWS) && CPU(X86) && COMPILER(MSVC) - // offset 0x18 from the FS segment register gives a pointer to - // the thread information block for the current thread - NT_TIB* pTib; - __asm { - MOV EAX, FS:[18h] - MOV pTib, EAX - } - return static_cast<void*>(pTib->StackBase); -#elif OS(WINDOWS) && CPU(X86) && COMPILER(GCC) - // offset 0x18 from the FS segment register gives a pointer to - // the thread information block for the current thread - NT_TIB* pTib; - asm ( "movl %%fs:0x18, %0\n" - : "=r" (pTib) - ); - return static_cast<void*>(pTib->StackBase); -#elif OS(WINDOWS) && CPU(X86_64) - PNT_TIB64 pTib = reinterpret_cast<PNT_TIB64>(NtCurrentTeb()); - return reinterpret_cast<void*>(pTib->StackBase); -#elif OS(QNX) - AtomicallyInitializedStatic(Mutex&, mutex = *new Mutex); - MutexLocker locker(mutex); - return currentThreadStackBaseQNX(); -#elif OS(SOLARIS) - stack_t s; - thr_stksegment(&s); - return s.ss_sp; -#elif OS(OPENBSD) - pthread_t thread = pthread_self(); - stack_t stack; - pthread_stackseg_np(thread, &stack); - return stack.ss_sp; -#elif OS(SYMBIAN) - TThreadStackInfo info; - RThread thread; - thread.StackInfo(info); - return (void*)info.iBase; -#elif OS(HAIKU) - thread_info threadInfo; - get_thread_info(find_thread(NULL), &threadInfo); - return threadInfo.stack_end; -#elif OS(UNIX) - AtomicallyInitializedStatic(Mutex&, mutex = *new Mutex); - MutexLocker locker(mutex); - static void* stackBase = 0; - static size_t stackSize = 0; - static pthread_t stackThread; - pthread_t thread = pthread_self(); - if (stackBase == 0 || thread != stackThread) { - pthread_attr_t sattr; - pthread_attr_init(&sattr); -#if HAVE(PTHREAD_NP_H) || OS(NETBSD) - // e.g. on FreeBSD 5.4, neundorf@kde.org - pthread_attr_get_np(thread, &sattr); -#else - // FIXME: this function is non-portable; other POSIX systems may have different np alternatives - pthread_getattr_np(thread, &sattr); -#endif - int rc = pthread_attr_getstack(&sattr, &stackBase, &stackSize); - (void)rc; // FIXME: Deal with error code somehow? Seems fatal. - ASSERT(stackBase); - pthread_attr_destroy(&sattr); - stackThread = thread; - } - return static_cast<char*>(stackBase) + stackSize; -#elif OS(WINCE) - AtomicallyInitializedStatic(Mutex&, mutex = *new Mutex); - MutexLocker locker(mutex); - if (g_stackBase) - return g_stackBase; - else { - int dummy; - return getStackBase(&dummy); - } -#else -#error Need a way to get the stack base on this platform -#endif -} - #if ENABLE(JSC_MULTIPLE_THREADS) static inline PlatformThread getCurrentPlatformThread() @@ -587,7 +414,7 @@ void Heap::registerThread() return; pthread_setspecific(m_currentThreadRegistrar, this); - Heap::Thread* thread = new Heap::Thread(pthread_self(), getCurrentPlatformThread(), currentThreadStackBase()); + Heap::Thread* thread = new Heap::Thread(pthread_self(), getCurrentPlatformThread(), m_globalData->stack().origin()); MutexLocker lock(m_registeredThreadsMutex); @@ -654,11 +481,15 @@ static inline bool isPossibleCell(void* p) void Heap::markConservatively(MarkStack& markStack, void* start, void* end) { +#if OS(WINCE) if (start > end) { void* tmp = start; start = end; end = tmp; } +#else + ASSERT(start <= end); +#endif ASSERT((static_cast<char*>(end) - static_cast<char*>(start)) < 0x1000000); ASSERT(isPointerAligned(start)); @@ -685,7 +516,6 @@ void Heap::markConservatively(MarkStack& markStack, void* start, void* end) if (m_heap.collectorBlock(block) != blockAddr) continue; markStack.append(reinterpret_cast<JSCell*>(xAsBits)); - markStack.drain(); } } } @@ -693,10 +523,8 @@ void Heap::markConservatively(MarkStack& markStack, void* start, void* end) void NEVER_INLINE Heap::markCurrentThreadConservativelyInternal(MarkStack& markStack) { - void* dummy; - void* stackPointer = &dummy; - void* stackBase = currentThreadStackBase(); - markConservatively(markStack, stackPointer, stackBase); + markConservatively(markStack, m_globalData->stack().current(), m_globalData->stack().origin()); + markStack.drain(); } #if COMPILER(GCC) @@ -859,9 +687,11 @@ void Heap::markOtherThreadConservatively(MarkStack& markStack, Thread* thread) // mark the thread's registers markConservatively(markStack, static_cast<void*>(®s), static_cast<void*>(reinterpret_cast<char*>(®s) + regSize)); + markStack.drain(); void* stackPointer = otherThreadStackPointer(regs); markConservatively(markStack, stackPointer, thread->stackBase); + markStack.drain(); resumeThread(thread->platformThread); } @@ -920,10 +750,10 @@ WeakGCHandle* Heap::addWeakGCHandle(JSCell* ptr) if (!weakGCHandlePool(i)->isFull()) return weakGCHandlePool(i)->allocate(ptr); - AlignedMemory<WeakGCHandlePool::poolSize> allocation = m_weakGCHandlePoolAllocator.allocate(); + PageAllocationAligned allocation = PageAllocationAligned::allocate(WeakGCHandlePool::poolSize, WeakGCHandlePool::poolSize, OSAllocator::JSGCHeapPages); m_weakGCHandlePools.append(allocation); - WeakGCHandlePool* pool = new (allocation) WeakGCHandlePool(); + WeakGCHandlePool* pool = new (allocation.base()) WeakGCHandlePool(); return pool->allocate(ptr); } @@ -958,6 +788,33 @@ void Heap::markProtectedObjects(MarkStack& markStack) } } +void Heap::pushTempSortVector(Vector<ValueStringPair>* tempVector) +{ + m_tempSortingVectors.append(tempVector); +} + +void Heap::popTempSortVector(Vector<ValueStringPair>* tempVector) +{ + ASSERT_UNUSED(tempVector, tempVector == m_tempSortingVectors.last()); + m_tempSortingVectors.removeLast(); +} + +void Heap::markTempSortVectors(MarkStack& markStack) +{ + typedef Vector<Vector<ValueStringPair>* > VectorOfValueStringVectors; + + VectorOfValueStringVectors::iterator end = m_tempSortingVectors.end(); + for (VectorOfValueStringVectors::iterator it = m_tempSortingVectors.begin(); it != end; ++it) { + Vector<ValueStringPair>* tempSortingVector = *it; + + Vector<ValueStringPair>::iterator vectorEnd = tempSortingVector->end(); + for (Vector<ValueStringPair>::iterator vectorIt = tempSortingVector->begin(); vectorIt != vectorEnd; ++vectorIt) + if (vectorIt->first) + markStack.append(vectorIt->first); + markStack.drain(); + } +} + void Heap::clearMarkBits() { for (size_t i = 0; i < m_heap.usedBlocks; ++i) @@ -1045,6 +902,9 @@ void Heap::markRoots() // Mark explicitly registered roots. markProtectedObjects(markStack); + + // Mark temporary vector for Array sorting + markTempSortVectors(markStack); // Mark misc. other roots. if (m_markListSet && m_markListSet->size()) @@ -1232,4 +1092,9 @@ void Heap::setActivityCallback(PassOwnPtr<GCActivityCallback> activityCallback) m_activityCallback = activityCallback; } +GCActivityCallback* Heap::activityCallback() +{ + return m_activityCallback.get(); +} + } // namespace JSC diff --git a/JavaScriptCore/runtime/Collector.h b/JavaScriptCore/runtime/Collector.h index 237c139..a4e2fe1 100644 --- a/JavaScriptCore/runtime/Collector.h +++ b/JavaScriptCore/runtime/Collector.h @@ -22,8 +22,8 @@ #ifndef Collector_h #define Collector_h -#include "AlignedMemoryAllocator.h" #include "GCHandle.h" +#include "JSValue.h" #include <stddef.h> #include <string.h> #include <wtf/Bitmap.h> @@ -33,6 +33,7 @@ #include <wtf/Noncopyable.h> #include <wtf/OwnPtr.h> #include <wtf/PageAllocation.h> +#include <wtf/PageAllocationAligned.h> #include <wtf/PassOwnPtr.h> #include <wtf/StdLibExtras.h> #include <wtf/Threading.h> @@ -63,13 +64,10 @@ namespace JSC { const size_t BLOCK_SIZE = 256 * 1024; // 256k #endif - typedef AlignedMemoryAllocator<BLOCK_SIZE> CollectorBlockAllocator; - typedef AlignedMemory<BLOCK_SIZE> AlignedCollectorBlock; - struct CollectorHeap { size_t nextBlock; size_t nextCell; - AlignedCollectorBlock* blocks; + PageAllocationAligned* blocks; void* nextNumber; @@ -98,6 +96,8 @@ namespace JSC { bool isBusy(); // true if an allocation or collection is in progress void collectAllGarbage(); + + GCActivityCallback* activityCallback(); void setActivityCallback(PassOwnPtr<GCActivityCallback>); static const size_t minExtraCost = 256; @@ -137,6 +137,9 @@ namespace JSC { void markConservatively(MarkStack&, void* start, void* end); + void pushTempSortVector(WTF::Vector<ValueStringPair>*); + void popTempSortVector(WTF::Vector<ValueStringPair>*); + HashSet<MarkedArgumentBuffer*>& markListSet() { if (!m_markListSet) m_markListSet = new HashSet<MarkedArgumentBuffer*>; return *m_markListSet; } JSGlobalData* globalData() const { return m_globalData; } @@ -171,6 +174,7 @@ namespace JSC { void markRoots(); void markProtectedObjects(MarkStack&); + void markTempSortVectors(MarkStack&); void markCurrentThreadConservatively(MarkStack&); void markCurrentThreadConservativelyInternal(MarkStack&); void markOtherThreadConservatively(MarkStack&, Thread*); @@ -184,7 +188,8 @@ namespace JSC { CollectorHeap m_heap; ProtectCountSet m_protectedValues; - WTF::Vector<AlignedMemory<WeakGCHandlePool::poolSize> > m_weakGCHandlePools; + WTF::Vector<PageAllocationAligned> m_weakGCHandlePools; + WTF::Vector<WTF::Vector<ValueStringPair>* > m_tempSortingVectors; HashSet<MarkedArgumentBuffer*>* m_markListSet; @@ -201,10 +206,6 @@ namespace JSC { pthread_key_t m_currentThreadRegistrar; #endif - // Allocates collector blocks with correct alignment - CollectorBlockAllocator m_blockallocator; - WeakGCHandlePool::Allocator m_weakGCHandlePoolAllocator; - JSGlobalData* m_globalData; }; diff --git a/JavaScriptCore/runtime/Executable.cpp b/JavaScriptCore/runtime/Executable.cpp index f229f96..c7262be 100644 --- a/JavaScriptCore/runtime/Executable.cpp +++ b/JavaScriptCore/runtime/Executable.cpp @@ -264,7 +264,7 @@ void FunctionExecutable::markAggregate(MarkStack& markStack) m_codeBlockForConstruct->markAggregate(markStack); } -void FunctionExecutable::recompile(ExecState*) +void FunctionExecutable::discardCode() { m_codeBlockForCall.clear(); m_codeBlockForConstruct.clear(); diff --git a/JavaScriptCore/runtime/Executable.h b/JavaScriptCore/runtime/Executable.h index 14ed927..544e487 100644 --- a/JavaScriptCore/runtime/Executable.h +++ b/JavaScriptCore/runtime/Executable.h @@ -348,7 +348,7 @@ namespace JSC { UString paramString() const; SharedSymbolTable* symbolTable() const { return m_symbolTable; } - void recompile(ExecState*); + void discardCode(); void markAggregate(MarkStack&); static PassRefPtr<FunctionExecutable> fromGlobalCode(const Identifier&, ExecState*, Debugger*, const SourceCode&, JSObject** exception); diff --git a/JavaScriptCore/runtime/GCActivityCallback.cpp b/JavaScriptCore/runtime/GCActivityCallback.cpp index 2f2c079..161abfb 100644 --- a/JavaScriptCore/runtime/GCActivityCallback.cpp +++ b/JavaScriptCore/runtime/GCActivityCallback.cpp @@ -46,5 +46,9 @@ void DefaultGCActivityCallback::operator()() { } +void DefaultGCActivityCallback::synchronize() +{ +} + } diff --git a/JavaScriptCore/runtime/GCActivityCallback.h b/JavaScriptCore/runtime/GCActivityCallback.h index 66d56e8..862b4df 100644 --- a/JavaScriptCore/runtime/GCActivityCallback.h +++ b/JavaScriptCore/runtime/GCActivityCallback.h @@ -40,6 +40,7 @@ class GCActivityCallback { public: virtual ~GCActivityCallback() {} virtual void operator()() {} + virtual void synchronize() {} protected: GCActivityCallback() {} @@ -55,6 +56,7 @@ public: ~DefaultGCActivityCallback(); void operator()(); + void synchronize(); private: OwnPtr<DefaultGCActivityCallbackPlatformData*> d; diff --git a/JavaScriptCore/runtime/GCActivityCallbackCF.cpp b/JavaScriptCore/runtime/GCActivityCallbackCF.cpp index 45329ca..7168a05 100644 --- a/JavaScriptCore/runtime/GCActivityCallbackCF.cpp +++ b/JavaScriptCore/runtime/GCActivityCallbackCF.cpp @@ -47,10 +47,12 @@ struct DefaultGCActivityCallbackPlatformData { static void trigger(CFRunLoopTimerRef, void *info); RetainPtr<CFRunLoopTimerRef> timer; + RetainPtr<CFRunLoopRef> runLoop; CFRunLoopTimerContext context; }; const CFTimeInterval decade = 60 * 60 * 24 * 365 * 10; +const CFTimeInterval triggerInterval = 2; // seconds void DefaultGCActivityCallbackPlatformData::trigger(CFRunLoopTimerRef, void *info) { @@ -65,21 +67,32 @@ DefaultGCActivityCallback::DefaultGCActivityCallback(Heap* heap) memset(&d->context, '\0', sizeof(CFRunLoopTimerContext)); d->context.info = heap; + d->runLoop = CFRunLoopGetCurrent(); d->timer.adoptCF(CFRunLoopTimerCreate(0, decade, decade, 0, 0, DefaultGCActivityCallbackPlatformData::trigger, &d->context)); - CFRunLoopAddTimer(CFRunLoopGetCurrent(), d->timer.get(), kCFRunLoopCommonModes); + CFRunLoopAddTimer(d->runLoop.get(), d->timer.get(), kCFRunLoopCommonModes); } DefaultGCActivityCallback::~DefaultGCActivityCallback() { - CFRunLoopRemoveTimer(CFRunLoopGetCurrent(), d->timer.get(), kCFRunLoopCommonModes); + CFRunLoopRemoveTimer(d->runLoop.get(), d->timer.get(), kCFRunLoopCommonModes); CFRunLoopTimerInvalidate(d->timer.get()); d->context.info = 0; + d->runLoop = 0; d->timer = 0; } void DefaultGCActivityCallback::operator()() { - CFRunLoopTimerSetNextFireDate(d->timer.get(), CFAbsoluteTimeGetCurrent() + 2); + CFRunLoopTimerSetNextFireDate(d->timer.get(), CFAbsoluteTimeGetCurrent() + triggerInterval); +} + +void DefaultGCActivityCallback::synchronize() +{ + if (CFRunLoopGetCurrent() == d->runLoop.get()) + return; + CFRunLoopRemoveTimer(d->runLoop.get(), d->timer.get(), kCFRunLoopCommonModes); + d->runLoop = CFRunLoopGetCurrent(); + CFRunLoopAddTimer(d->runLoop.get(), d->timer.get(), kCFRunLoopCommonModes); } } diff --git a/JavaScriptCore/runtime/GCHandle.cpp b/JavaScriptCore/runtime/GCHandle.cpp index 3331517..297de38 100644 --- a/JavaScriptCore/runtime/GCHandle.cpp +++ b/JavaScriptCore/runtime/GCHandle.cpp @@ -83,9 +83,4 @@ void WeakGCHandlePool::free(WeakGCHandle* handle) --m_entriesSize; } -void* WeakGCHandlePool::operator new(size_t, AlignedMemory<WeakGCHandlePool::poolSize>& allocation) -{ - return allocation.base(); -} - } diff --git a/JavaScriptCore/runtime/GCHandle.h b/JavaScriptCore/runtime/GCHandle.h index 38a7be9..8818f79 100644 --- a/JavaScriptCore/runtime/GCHandle.h +++ b/JavaScriptCore/runtime/GCHandle.h @@ -26,7 +26,7 @@ #ifndef GCHandle_h #define GCHandle_h -#include "AlignedMemoryAllocator.h" +#include <wtf/Assertions.h> namespace JSC { @@ -91,8 +91,6 @@ public: static const size_t poolMask = ~(poolSize - 1); static const size_t numPoolEntries = (poolSize - sizeof(Heap*) - 3 * sizeof(unsigned)) / sizeof(WeakGCHandle); - typedef AlignedMemoryAllocator<WeakGCHandlePool::poolSize> Allocator; - WeakGCHandlePool(); WeakGCHandle* allocate(JSCell* cell); @@ -106,8 +104,6 @@ public: void update(); - void* operator new(size_t, AlignedMemory<WeakGCHandlePool::poolSize>&); - private: Heap* m_heap; unsigned m_entriesSize; diff --git a/JavaScriptCore/runtime/JSArray.cpp b/JavaScriptCore/runtime/JSArray.cpp index b8b92f4..556a16e 100644 --- a/JavaScriptCore/runtime/JSArray.cpp +++ b/JavaScriptCore/runtime/JSArray.cpp @@ -874,8 +874,6 @@ static int compareNumbersForQSort(const void* a, const void* b) return (da > db) - (da < db); } -typedef std::pair<JSValue, UString> ValueStringPair; - static int compareByStringPairForQSort(const void* a, const void* b) { const ValueStringPair* va = static_cast<const ValueStringPair*>(a); @@ -939,6 +937,8 @@ void JSArray::sort(ExecState* exec) throwOutOfMemoryError(exec); return; } + + Heap::heap(this)->pushTempSortVector(&values); for (size_t i = 0; i < lengthNotIncludingUndefined; i++) { JSValue value = storage->m_vector[i]; @@ -946,9 +946,6 @@ void JSArray::sort(ExecState* exec) values[i].first = value; } - // FIXME: While calling these toString functions, the array could be mutated. - // In that case, objects pointed to by values in this vector might get garbage-collected! - // FIXME: The following loop continues to call toString on subsequent values even after // a toString call raises an exception. @@ -969,12 +966,18 @@ void JSArray::sort(ExecState* exec) qsort(values.begin(), values.size(), sizeof(ValueStringPair), compareByStringPairForQSort); #endif - // FIXME: If the toString function changed the length of the array, this might be - // modifying the vector incorrectly. - + // If the toString function changed the length of the array or vector storage, + // increase the length to handle the orignal number of actual values. + if (m_vectorLength < lengthNotIncludingUndefined) + increaseVectorLength(lengthNotIncludingUndefined); + if (storage->m_length < lengthNotIncludingUndefined) + storage->m_length = lengthNotIncludingUndefined; + for (size_t i = 0; i < lengthNotIncludingUndefined; i++) storage->m_vector[i] = values[i].first; + Heap::heap(this)->popTempSortVector(&values); + checkConsistency(SortConsistencyCheck); } diff --git a/JavaScriptCore/runtime/JSArray.h b/JavaScriptCore/runtime/JSArray.h index 9e155d8..de28b65 100644 --- a/JavaScriptCore/runtime/JSArray.h +++ b/JavaScriptCore/runtime/JSArray.h @@ -222,6 +222,10 @@ namespace JSC { inline void MarkStack::drain() { +#if !ASSERT_DISABLED + ASSERT(!m_isDraining); + m_isDraining = true; +#endif while (!m_markSets.isEmpty() || !m_values.isEmpty()) { while (!m_markSets.isEmpty() && m_values.size() < 50) { ASSERT(!m_markSets.isEmpty()); @@ -260,6 +264,9 @@ namespace JSC { while (!m_values.isEmpty()) markChildren(m_values.removeLast()); } +#if !ASSERT_DISABLED + m_isDraining = false; +#endif } // Rule from ECMA 15.2 about what an array index is. diff --git a/JavaScriptCore/runtime/JSGlobalData.cpp b/JavaScriptCore/runtime/JSGlobalData.cpp index 9948877..aca995a 100644 --- a/JavaScriptCore/runtime/JSGlobalData.cpp +++ b/JavaScriptCore/runtime/JSGlobalData.cpp @@ -31,6 +31,7 @@ #include "ArgList.h" #include "Collector.h" +#include "CollectorHeapIterator.h" #include "CommonIdentifiers.h" #include "FunctionConstructor.h" #include "GetterSetter.h" @@ -153,6 +154,9 @@ JSGlobalData::JSGlobalData(GlobalDataType globalDataType, ThreadStackType thread , exclusiveThread(0) #endif { + if (globalDataType == Default) + m_stack = wtfThreadData().stack(); + #if PLATFORM(MAC) startProfilerServerIfNeeded(); #endif @@ -308,6 +312,22 @@ void JSGlobalData::dumpSampleData(ExecState* exec) interpreter->dumpSampleData(exec); } +void JSGlobalData::recompileAllJSFunctions() +{ + // If JavaScript is running, it's not safe to recompile, since we'll end + // up throwing away code that is live on the stack. + ASSERT(!dynamicGlobalObject); + + LiveObjectIterator it = heap.primaryHeapBegin(); + LiveObjectIterator heapEnd = heap.primaryHeapEnd(); + for ( ; it != heapEnd; ++it) { + if ((*it)->inherits(&JSFunction::info)) { + JSFunction* function = asFunction(*it); + if (!function->executable()->isHostFunction()) + function->jsExecutable()->discardCode(); + } + } +} #if ENABLE(REGEXP_TRACING) void JSGlobalData::addRegExpToTrace(PassRefPtr<RegExp> regExp) diff --git a/JavaScriptCore/runtime/JSGlobalData.h b/JavaScriptCore/runtime/JSGlobalData.h index 1819a0c..699f975 100644 --- a/JavaScriptCore/runtime/JSGlobalData.h +++ b/JavaScriptCore/runtime/JSGlobalData.h @@ -46,6 +46,7 @@ #include <wtf/HashMap.h> #include <wtf/RefCounted.h> #include <wtf/ThreadSpecific.h> +#include <wtf/WTFThreadData.h> #if ENABLE(REGEXP_TRACING) #include <wtf/ListHashSet.h> #endif @@ -169,6 +170,7 @@ namespace JSC { #if ENABLE(ASSEMBLER) ExecutableAllocator executableAllocator; + ExecutableAllocator regexAllocator; #endif #if !ENABLE(JIT) @@ -178,6 +180,14 @@ namespace JSC { #else bool canUseJIT() { return m_canUseJIT; } #endif + + const StackBounds& stack() + { + return (globalDataType == Default) + ? m_stack + : wtfThreadData().stack(); + } + Lexer* lexer; Parser* parser; Interpreter* interpreter; @@ -238,6 +248,7 @@ namespace JSC { void startSampling(); void stopSampling(); void dumpSampleData(ExecState* exec); + void recompileAllJSFunctions(); RegExpCache* regExpCache() { return m_regExpCache; } #if ENABLE(REGEXP_TRACING) void addRegExpToTrace(PassRefPtr<RegExp> regExp); @@ -250,6 +261,7 @@ namespace JSC { #if ENABLE(JIT) && ENABLE(INTERPRETER) bool m_canUseJIT; #endif + StackBounds m_stack; }; } // namespace JSC diff --git a/JavaScriptCore/runtime/JSGlobalObject.cpp b/JavaScriptCore/runtime/JSGlobalObject.cpp index a8fb7bf..408aea7 100644 --- a/JavaScriptCore/runtime/JSGlobalObject.cpp +++ b/JavaScriptCore/runtime/JSGlobalObject.cpp @@ -455,4 +455,22 @@ void JSGlobalObject::destroyJSGlobalObjectData(void* jsGlobalObjectData) delete static_cast<JSGlobalObjectData*>(jsGlobalObjectData); } +DynamicGlobalObjectScope::DynamicGlobalObjectScope(CallFrame* callFrame, JSGlobalObject* dynamicGlobalObject) + : m_dynamicGlobalObjectSlot(callFrame->globalData().dynamicGlobalObject) + , m_savedDynamicGlobalObject(m_dynamicGlobalObjectSlot) +{ + if (!m_dynamicGlobalObjectSlot) { +#if ENABLE(ASSEMBLER) + if (ExecutableAllocator::underMemoryPressure()) + callFrame->globalData().recompileAllJSFunctions(); +#endif + + m_dynamicGlobalObjectSlot = dynamicGlobalObject; + + // Reset the date cache between JS invocations to force the VM + // to observe time zone changes. + callFrame->globalData().resetDateCache(); + } +} + } // namespace JSC diff --git a/JavaScriptCore/runtime/JSGlobalObject.h b/JavaScriptCore/runtime/JSGlobalObject.h index 714999f..a22b0aa 100644 --- a/JavaScriptCore/runtime/JSGlobalObject.h +++ b/JavaScriptCore/runtime/JSGlobalObject.h @@ -467,18 +467,7 @@ namespace JSC { class DynamicGlobalObjectScope : public Noncopyable { public: - DynamicGlobalObjectScope(CallFrame* callFrame, JSGlobalObject* dynamicGlobalObject) - : m_dynamicGlobalObjectSlot(callFrame->globalData().dynamicGlobalObject) - , m_savedDynamicGlobalObject(m_dynamicGlobalObjectSlot) - { - if (!m_dynamicGlobalObjectSlot) { - m_dynamicGlobalObjectSlot = dynamicGlobalObject; - - // Reset the date cache between JS invocations to force the VM - // to observe time zone changes. - callFrame->globalData().resetDateCache(); - } - } + DynamicGlobalObjectScope(CallFrame* callFrame, JSGlobalObject* dynamicGlobalObject); ~DynamicGlobalObjectScope() { diff --git a/JavaScriptCore/runtime/JSString.cpp b/JavaScriptCore/runtime/JSString.cpp index 340a898..ba28139 100644 --- a/JavaScriptCore/runtime/JSString.cpp +++ b/JavaScriptCore/runtime/JSString.cpp @@ -31,6 +31,8 @@ #include "StringPrototype.h" namespace JSC { + +static const unsigned substringFromRopeCutoff = 4; // Overview: this methods converts a JSString from holding a string in rope form // down to a simple UString representation. It does so by building up the string @@ -105,6 +107,60 @@ void JSString::resolveRope(ExecState* exec) const } } } + +// This function construsts a substring out of a rope without flattening by reusing the existing fibers. +// This can reduce memory usage substantially. Since traversing ropes is slow the function will revert +// back to flattening if the rope turns out to be long. +JSString* JSString::substringFromRope(ExecState* exec, unsigned substringStart, unsigned substringLength) +{ + ASSERT(isRope()); + ASSERT(substringLength); + + JSGlobalData* globalData = &exec->globalData(); + + UString substringFibers[3]; + + unsigned fiberCount = 0; + unsigned substringFiberCount = 0; + unsigned substringEnd = substringStart + substringLength; + unsigned fiberEnd = 0; + + RopeIterator end; + for (RopeIterator it(m_other.m_fibers.data(), m_fiberCount); it != end; ++it) { + ++fiberCount; + StringImpl* fiberString = *it; + unsigned fiberStart = fiberEnd; + fiberEnd = fiberStart + fiberString->length(); + if (fiberEnd <= substringStart) + continue; + unsigned copyStart = std::max(substringStart, fiberStart); + unsigned copyEnd = std::min(substringEnd, fiberEnd); + if (copyStart == fiberStart && copyEnd == fiberEnd) + substringFibers[substringFiberCount++] = UString(fiberString); + else + substringFibers[substringFiberCount++] = UString(StringImpl::create(fiberString, copyStart - fiberStart, copyEnd - copyStart)); + if (fiberEnd >= substringEnd) + break; + if (fiberCount > substringFromRopeCutoff || substringFiberCount >= 3) { + // This turned out to be a really inefficient rope. Just flatten it. + resolveRope(exec); + return jsSubstring(&exec->globalData(), m_value, substringStart, substringLength); + } + } + ASSERT(substringFiberCount && substringFiberCount <= 3); + + if (substringLength == 1) { + ASSERT(substringFiberCount == 1); + UChar c = substringFibers[0].characters()[0]; + if (c <= 0xFF) + return globalData->smallStrings.singleCharacterString(globalData, c); + } + if (substringFiberCount == 1) + return new (globalData) JSString(globalData, substringFibers[0]); + if (substringFiberCount == 2) + return new (globalData) JSString(globalData, substringFibers[0], substringFibers[1]); + return new (globalData) JSString(globalData, substringFibers[0], substringFibers[1], substringFibers[2]); +} JSValue JSString::replaceCharacter(ExecState* exec, UChar character, const UString& replacement) { diff --git a/JavaScriptCore/runtime/JSString.h b/JavaScriptCore/runtime/JSString.h index 51b9f2d..fefffde 100644 --- a/JavaScriptCore/runtime/JSString.h +++ b/JavaScriptCore/runtime/JSString.h @@ -356,6 +356,7 @@ namespace JSC { } void resolveRope(ExecState*) const; + JSString* substringFromRope(ExecState*, unsigned offset, unsigned length); void appendStringInConstruct(unsigned& index, const UString& string) { @@ -435,6 +436,7 @@ namespace JSC { friend JSValue jsString(ExecState* exec, Register* strings, unsigned count); friend JSValue jsString(ExecState* exec, JSValue thisValue); friend JSString* jsStringWithFinalizer(ExecState*, const UString&, JSStringFinalizerCallback callback, void* context); + friend JSString* jsSubstring(ExecState* exec, JSString* s, unsigned offset, unsigned length); }; JSString* asString(JSValue); @@ -519,6 +521,19 @@ namespace JSC { JSGlobalData* globalData = &exec->globalData(); return fixupVPtr(globalData, new (globalData) JSString(globalData, s, callback, context)); } + + inline JSString* jsSubstring(ExecState* exec, JSString* s, unsigned offset, unsigned length) + { + ASSERT(offset <= static_cast<unsigned>(s->length())); + ASSERT(length <= static_cast<unsigned>(s->length())); + ASSERT(offset + length <= static_cast<unsigned>(s->length())); + JSGlobalData* globalData = &exec->globalData(); + if (!length) + return globalData->smallStrings.emptyString(globalData); + if (s->isRope()) + return s->substringFromRope(exec, offset, length); + return jsSubstring(globalData, s->m_value, offset, length); + } inline JSString* jsSubstring(JSGlobalData* globalData, const UString& s, unsigned offset, unsigned length) { diff --git a/JavaScriptCore/runtime/JSValue.h b/JavaScriptCore/runtime/JSValue.h index cad9662..dc54f40 100644 --- a/JavaScriptCore/runtime/JSValue.h +++ b/JavaScriptCore/runtime/JSValue.h @@ -763,7 +763,8 @@ namespace JSC { return asValue() == jsNull(); } #endif // USE(JSVALUE32_64) - + + typedef std::pair<JSValue, UString> ValueStringPair; } // namespace JSC #endif // JSValue_h diff --git a/JavaScriptCore/runtime/MarkStack.h b/JavaScriptCore/runtime/MarkStack.h index c551bac..7bccadf 100644 --- a/JavaScriptCore/runtime/MarkStack.h +++ b/JavaScriptCore/runtime/MarkStack.h @@ -28,6 +28,7 @@ #include "JSValue.h" #include <wtf/Noncopyable.h> +#include <wtf/OSAllocator.h> namespace JSC { @@ -40,8 +41,9 @@ namespace JSC { public: MarkStack(void* jsArrayVPtr) : m_jsArrayVPtr(jsArrayVPtr) -#ifndef NDEBUG +#if !ASSERT_DISABLED , m_isCheckingForDefaultMarkViolation(false) + , m_isDraining(false) #endif { } @@ -85,8 +87,8 @@ namespace JSC { MarkSetProperties m_properties; }; - static void* allocateStack(size_t size); - static void releaseStack(void* addr, size_t size); + static void* allocateStack(size_t size) { return OSAllocator::reserveAndCommit(size); } + static void releaseStack(void* addr, size_t size) { OSAllocator::decommitAndRelease(addr, size); } static void initializePagesize(); static size_t pageSize() @@ -177,9 +179,10 @@ namespace JSC { MarkStackArray<JSCell*> m_values; static size_t s_pageSize; -#ifndef NDEBUG +#if !ASSERT_DISABLED public: bool m_isCheckingForDefaultMarkViolation; + bool m_isDraining; #endif }; } diff --git a/JavaScriptCore/runtime/MarkStackNone.cpp b/JavaScriptCore/runtime/MarkStackNone.cpp deleted file mode 100644 index b1ff48b..0000000 --- a/JavaScriptCore/runtime/MarkStackNone.cpp +++ /dev/null @@ -1,49 +0,0 @@ -/* - * Copyright (C) 2009 Company 100, Inc. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * - * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY - * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR - * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR - * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, - * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, - * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR - * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY - * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#include "config.h" - -#include "MarkStack.h" - -#include "FastMalloc.h" - -namespace JSC { - -void MarkStack::initializePagesize() -{ - MarkStack::s_pageSize = 4096; -} - -void* MarkStack::allocateStack(size_t size) -{ - return fastMalloc(size); -} - -void MarkStack::releaseStack(void* addr, size_t) -{ - return fastFree(addr); -} - -} diff --git a/JavaScriptCore/runtime/MarkStackPosix.cpp b/JavaScriptCore/runtime/MarkStackPosix.cpp index c28bc0d..2a5b298 100644 --- a/JavaScriptCore/runtime/MarkStackPosix.cpp +++ b/JavaScriptCore/runtime/MarkStackPosix.cpp @@ -38,15 +38,6 @@ void MarkStack::initializePagesize() MarkStack::s_pageSize = getpagesize(); } -void* MarkStack::allocateStack(size_t size) -{ - return mmap(0, size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, -1, 0); -} -void MarkStack::releaseStack(void* addr, size_t size) -{ - munmap(addr, size); -} - } #endif diff --git a/JavaScriptCore/runtime/MarkStackSymbian.cpp b/JavaScriptCore/runtime/MarkStackSymbian.cpp index bda14ac..a3893d7 100644 --- a/JavaScriptCore/runtime/MarkStackSymbian.cpp +++ b/JavaScriptCore/runtime/MarkStackSymbian.cpp @@ -33,16 +33,6 @@ void MarkStack::initializePagesize() MarkStack::s_pageSize = page_size; } -void* MarkStack::allocateStack(size_t size) -{ - return fastMalloc(size); -} - -void MarkStack::releaseStack(void* addr, size_t size) -{ - return fastFree(addr); -} - } #endif diff --git a/JavaScriptCore/runtime/MarkStackWin.cpp b/JavaScriptCore/runtime/MarkStackWin.cpp index a171c78..2d2a1b3 100644 --- a/JavaScriptCore/runtime/MarkStackWin.cpp +++ b/JavaScriptCore/runtime/MarkStackWin.cpp @@ -39,17 +39,6 @@ void MarkStack::initializePagesize() MarkStack::s_pageSize = system_info.dwPageSize; } -void* MarkStack::allocateStack(size_t size) -{ - return VirtualAlloc(0, size, MEM_COMMIT | MEM_RESERVE, PAGE_READWRITE); -} -void MarkStack::releaseStack(void* addr, size_t) -{ - // According to http://msdn.microsoft.com/en-us/library/aa366892(VS.85).aspx, - // dwSize must be 0 if dwFreeType is MEM_RELEASE. - VirtualFree(addr, 0, MEM_RELEASE); -} - } #endif diff --git a/JavaScriptCore/runtime/RegExp.cpp b/JavaScriptCore/runtime/RegExp.cpp index a33fa91..9b2e3f3 100644 --- a/JavaScriptCore/runtime/RegExp.cpp +++ b/JavaScriptCore/runtime/RegExp.cpp @@ -2,6 +2,7 @@ * Copyright (C) 1999-2001, 2004 Harri Porten (porten@kde.org) * Copyright (c) 2007, 2008 Apple Inc. All rights reserved. * Copyright (C) 2009 Torch Mobile, Inc. + * Copyright (C) 2010 Peter Varga (pvarga@inf.u-szeged.hu), University of Szeged * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public @@ -29,24 +30,21 @@ #include <wtf/OwnArrayPtr.h> #include "yarr/RegexCompiler.h" -#if ENABLE(YARR_JIT) #include "yarr/RegexJIT.h" -#else #include "yarr/RegexInterpreter.h" -#endif +#include "yarr/RegexPattern.h" namespace JSC { struct RegExpRepresentation { #if ENABLE(YARR_JIT) Yarr::RegexCodeBlock m_regExpJITCode; -#else - OwnPtr<Yarr::BytecodePattern> m_regExpBytecode; #endif + OwnPtr<Yarr::BytecodePattern> m_regExpBytecode; }; -inline RegExp::RegExp(JSGlobalData* globalData, const UString& pattern, const UString& flags) - : m_pattern(pattern) +inline RegExp::RegExp(JSGlobalData* globalData, const UString& patternString, const UString& flags) + : m_patternString(patternString) , m_flagBits(0) , m_constructionError(0) , m_numSubpatterns(0) @@ -66,29 +64,42 @@ inline RegExp::RegExp(JSGlobalData* globalData, const UString& pattern, const US if (flags.find('m') != notFound) m_flagBits |= Multiline; } - compile(globalData); + + m_state = compile(globalData); } RegExp::~RegExp() { } -PassRefPtr<RegExp> RegExp::create(JSGlobalData* globalData, const UString& pattern, const UString& flags) +PassRefPtr<RegExp> RegExp::create(JSGlobalData* globalData, const UString& patternString, const UString& flags) { - RefPtr<RegExp> res = adoptRef(new RegExp(globalData, pattern, flags)); + RefPtr<RegExp> res = adoptRef(new RegExp(globalData, patternString, flags)); #if ENABLE(REGEXP_TRACING) globalData->addRegExpToTrace(res); #endif return res.release(); } -void RegExp::compile(JSGlobalData* globalData) +RegExp::RegExpState RegExp::compile(JSGlobalData* globalData) { + Yarr::RegexPattern pattern(ignoreCase(), multiline()); + + if ((m_constructionError = Yarr::compileRegex(m_patternString, pattern))) + return ParseError; + + m_numSubpatterns = pattern.m_numSubpatterns; + #if ENABLE(YARR_JIT) - Yarr::jitCompileRegex(globalData, m_representation->m_regExpJITCode, m_pattern, m_numSubpatterns, m_constructionError, &globalData->m_regexAllocator, ignoreCase(), multiline()); -#else - m_representation->m_regExpBytecode = Yarr::byteCompileRegex(m_pattern, m_numSubpatterns, m_constructionError, &globalData->m_regexAllocator, ignoreCase(), multiline()); + if (!pattern.m_containsBackreferences && globalData->canUseJIT()) { + Yarr::jitCompileRegex(pattern, globalData, m_representation->m_regExpJITCode); + if (!m_representation->m_regExpJITCode.isFallBack()) + return JITCode; + } #endif + + m_representation->m_regExpBytecode = Yarr::byteCompileRegex(pattern, &globalData->m_regexAllocator); + return ByteCode; } int RegExp::match(const UString& s, int startOffset, Vector<int, 32>* ovector) @@ -103,11 +114,7 @@ int RegExp::match(const UString& s, int startOffset, Vector<int, 32>* ovector) if (static_cast<unsigned>(startOffset) > s.length() || s.isNull()) return -1; -#if ENABLE(YARR_JIT) - if (!!m_representation->m_regExpJITCode) { -#else - if (m_representation->m_regExpBytecode) { -#endif + if (m_state != ParseError) { int offsetVectorSize = (m_numSubpatterns + 1) * 2; int* offsetVector; Vector<int, 32> nonReturnedOvector; @@ -126,11 +133,13 @@ int RegExp::match(const UString& s, int startOffset, Vector<int, 32>* ovector) for (unsigned j = 0, i = 0; i < m_numSubpatterns + 1; j += 2, i++) offsetVector[j] = -1; + int result; #if ENABLE(YARR_JIT) - int result = Yarr::executeRegex(m_representation->m_regExpJITCode, s.characters(), startOffset, s.length(), offsetVector); -#else - int result = Yarr::interpretRegex(m_representation->m_regExpBytecode.get(), s.characters(), startOffset, s.length(), offsetVector); + if (m_state == JITCode) + result = Yarr::executeRegex(m_representation->m_regExpJITCode, s.characters(), startOffset, s.length(), offsetVector); + else #endif + result = Yarr::interpretRegex(m_representation->m_regExpBytecode.get(), s.characters(), startOffset, s.length(), offsetVector); ASSERT(result >= -1);; @@ -162,7 +171,7 @@ int RegExp::match(const UString& s, int startOffset, Vector<int, 32>* ovector) Yarr::RegexCodeBlock& codeBlock = m_representation->m_regExpJITCode; char jitAddr[20]; - if (codeBlock.getFallback()) + if (m_state == JITCode) sprintf(jitAddr, "fallback"); else sprintf(jitAddr, "0x%014lx", reinterpret_cast<unsigned long int>(codeBlock.getAddr())); diff --git a/JavaScriptCore/runtime/RegExp.h b/JavaScriptCore/runtime/RegExp.h index e6e2fbc..8f33f57 100644 --- a/JavaScriptCore/runtime/RegExp.h +++ b/JavaScriptCore/runtime/RegExp.h @@ -41,7 +41,7 @@ namespace JSC { bool ignoreCase() const { return m_flagBits & IgnoreCase; } bool multiline() const { return m_flagBits & Multiline; } - const UString& pattern() const { return m_pattern; } + const UString& pattern() const { return m_patternString; } bool isValid() const { return !m_constructionError; } const char* errorMessage() const { return m_constructionError; } @@ -56,11 +56,16 @@ namespace JSC { private: RegExp(JSGlobalData* globalData, const UString& pattern, const UString& flags); - void compile(JSGlobalData*); + enum RegExpState { + ParseError, + JITCode, + ByteCode + } m_state; - enum FlagBits { Global = 1, IgnoreCase = 2, Multiline = 4 }; + RegExpState compile(JSGlobalData*); - UString m_pattern; // FIXME: Just decompile m_regExp instead of storing this. + enum FlagBits { Global = 1, IgnoreCase = 2, Multiline = 4 }; + UString m_patternString; int m_flagBits; const char* m_constructionError; unsigned m_numSubpatterns; diff --git a/JavaScriptCore/runtime/RegExpCache.h b/JavaScriptCore/runtime/RegExpCache.h index e897b43..b5b637f 100644 --- a/JavaScriptCore/runtime/RegExpCache.h +++ b/JavaScriptCore/runtime/RegExpCache.h @@ -47,7 +47,14 @@ public: private: static const unsigned maxCacheablePatternLength = 256; + +#if PLATFORM(IOS) + // The RegExpCache can currently hold onto multiple Mb of memory; + // as a short-term fix some embedded platforms may wish to reduce the cache size. + static const int maxCacheableEntries = 32; +#else static const int maxCacheableEntries = 256; +#endif FixedArray<RegExpKey, maxCacheableEntries> patternKeyArray; RegExpCacheMap m_cacheMap; diff --git a/JavaScriptCore/runtime/StringPrototype.cpp b/JavaScriptCore/runtime/StringPrototype.cpp index b5ea8fa..8b3d056 100644 --- a/JavaScriptCore/runtime/StringPrototype.cpp +++ b/JavaScriptCore/runtime/StringPrototype.cpp @@ -772,8 +772,16 @@ EncodedJSValue JSC_HOST_CALL stringProtoFuncSubstr(ExecState* exec) JSValue thisValue = exec->hostThisValue(); if (thisValue.isUndefinedOrNull()) // CheckObjectCoercible return throwVMTypeError(exec); - UString s = thisValue.toThisString(exec); - int len = s.length(); + unsigned len; + JSString* jsString = 0; + UString uString; + if (thisValue.isString()) { + jsString = static_cast<JSString*>(thisValue.asCell()); + len = jsString->length(); + } else { + uString = thisValue.toThisObject(exec)->toString(exec); + len = uString.length(); + } JSValue a0 = exec->argument(0); JSValue a1 = exec->argument(1); @@ -789,7 +797,11 @@ EncodedJSValue JSC_HOST_CALL stringProtoFuncSubstr(ExecState* exec) } if (start + length > len) length = len - start; - return JSValue::encode(jsSubstring(exec, s, static_cast<unsigned>(start), static_cast<unsigned>(length))); + unsigned substringStart = static_cast<unsigned>(start); + unsigned substringLength = static_cast<unsigned>(length); + if (jsString) + return JSValue::encode(jsSubstring(exec, jsString, substringStart, substringLength)); + return JSValue::encode(jsSubstring(exec, uString, substringStart, substringLength)); } EncodedJSValue JSC_HOST_CALL stringProtoFuncSubstring(ExecState* exec) @@ -797,8 +809,16 @@ EncodedJSValue JSC_HOST_CALL stringProtoFuncSubstring(ExecState* exec) JSValue thisValue = exec->hostThisValue(); if (thisValue.isUndefinedOrNull()) // CheckObjectCoercible return throwVMTypeError(exec); - UString s = thisValue.toThisString(exec); - int len = s.length(); + int len; + JSString* jsString = 0; + UString uString; + if (thisValue.isString()) { + jsString = static_cast<JSString*>(thisValue.asCell()); + len = jsString->length(); + } else { + uString = thisValue.toThisObject(exec)->toString(exec); + len = uString.length(); + } JSValue a0 = exec->argument(0); JSValue a1 = exec->argument(1); @@ -823,7 +843,11 @@ EncodedJSValue JSC_HOST_CALL stringProtoFuncSubstring(ExecState* exec) end = start; start = temp; } - return JSValue::encode(jsSubstring(exec, s, static_cast<unsigned>(start), static_cast<unsigned>(end) - static_cast<unsigned>(start))); + unsigned substringStart = static_cast<unsigned>(start); + unsigned substringLength = static_cast<unsigned>(end) - substringStart; + if (jsString) + return JSValue::encode(jsSubstring(exec, jsString, substringStart, substringLength)); + return JSValue::encode(jsSubstring(exec, uString, substringStart, substringLength)); } EncodedJSValue JSC_HOST_CALL stringProtoFuncToLowerCase(ExecState* exec) diff --git a/JavaScriptCore/runtime/Structure.cpp b/JavaScriptCore/runtime/Structure.cpp index d06a239..0179eed 100644 --- a/JavaScriptCore/runtime/Structure.cpp +++ b/JavaScriptCore/runtime/Structure.cpp @@ -233,6 +233,7 @@ Structure::Structure(JSValue prototype, const TypeInfo& typeInfo, unsigned anony , m_dictionaryKind(NoneDictionaryKind) , m_isPinnedPropertyTable(false) , m_hasGetterSetterProperties(false) + , m_hasNonEnumerableProperties(false) , m_attributesInPrevious(0) , m_specificFunctionThrashCount(0) , m_anonymousSlotCount(anonymousSlotCount) |
