diff options
Diffstat (limited to 'JavaScriptCore/wtf/FastMalloc.cpp')
| -rw-r--r-- | JavaScriptCore/wtf/FastMalloc.cpp | 411 |
1 files changed, 236 insertions, 175 deletions
diff --git a/JavaScriptCore/wtf/FastMalloc.cpp b/JavaScriptCore/wtf/FastMalloc.cpp index 79d2bfb..bbbdaf2 100644 --- a/JavaScriptCore/wtf/FastMalloc.cpp +++ b/JavaScriptCore/wtf/FastMalloc.cpp @@ -82,6 +82,7 @@ #if ENABLE(JSC_MULTIPLE_THREADS) #include <pthread.h> #endif +#include <wtf/StdLibExtras.h> #ifndef NO_TCMALLOC_SAMPLES #ifdef WTF_CHANGES @@ -204,6 +205,16 @@ TryMallocReturnValue tryFastZeroedMalloc(size_t n) #if FORCE_SYSTEM_MALLOC +#if PLATFORM(BREWMP) +#include "brew/SystemMallocBrew.h" +#endif + +#if OS(DARWIN) +#include <malloc/malloc.h> +#elif COMPILER(MSVC) +#include <malloc.h> +#endif + namespace WTF { TryMallocReturnValue tryFastMalloc(size_t n) @@ -365,10 +376,22 @@ void releaseFastMallocFreeMemory() { } FastMallocStatistics fastMallocStatistics() { - FastMallocStatistics statistics = { 0, 0, 0, 0 }; + FastMallocStatistics statistics = { 0, 0, 0 }; return statistics; } +size_t fastMallocSize(const void* p) +{ +#if OS(DARWIN) + return malloc_size(p); +#elif COMPILER(MSVC) && !PLATFORM(BREWMP) + // Brew MP uses its own memory allocator, so _msize does not work on the Brew MP simulator. + return _msize(const_cast<void*>(p)); +#else + return 1; +#endif +} + } // namespace WTF #if OS(DARWIN) @@ -394,24 +417,25 @@ extern "C" const int jscore_fastmalloc_introspection = 0; #include "TCSpinLock.h" #include "TCSystemAlloc.h" #include <algorithm> -#include <errno.h> #include <limits> -#include <new> #include <pthread.h> #include <stdarg.h> #include <stddef.h> #include <stdio.h> +#if HAVE(ERRNO_H) +#include <errno.h> +#endif #if OS(UNIX) #include <unistd.h> #endif -#if COMPILER(MSVC) +#if OS(WINDOWS) #ifndef WIN32_LEAN_AND_MEAN #define WIN32_LEAN_AND_MEAN #endif #include <windows.h> #endif -#if WTF_CHANGES +#ifdef WTF_CHANGES #if OS(DARWIN) #include "MallocZoneSupport.h" @@ -460,7 +484,7 @@ namespace WTF { #define CHECK_CONDITION ASSERT #if OS(DARWIN) -class Span; +struct Span; class TCMalloc_Central_FreeListPadded; class TCMalloc_PageHeap; class TCMalloc_ThreadCache; @@ -995,7 +1019,7 @@ class PageHeapAllocator { if (!new_allocation) CRASH(); - *(void**)new_allocation = allocated_regions_; + *reinterpret_cast_ptr<void**>(new_allocation) = allocated_regions_; allocated_regions_ = new_allocation; free_area_ = new_allocation + kAlignedSize; free_avail_ = kAllocIncrement - kAlignedSize; @@ -1232,29 +1256,34 @@ template <> class MapSelector<32> { // ------------------------------------------------------------------------- #if USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY -// The central page heap collects spans of memory that have been deleted but are still committed until they are released -// back to the system. We use a background thread to periodically scan the list of free spans and release some back to the -// system. Every 5 seconds, the background thread wakes up and does the following: -// - Check if we needed to commit memory in the last 5 seconds. If so, skip this scavenge because it's a sign that we are short -// of free committed pages and so we should not release them back to the system yet. -// - Otherwise, go through the list of free spans (from largest to smallest) and release up to a fraction of the free committed pages -// back to the system. -// - If the number of free committed pages reaches kMinimumFreeCommittedPageCount, we can stop the scavenging and block the -// scavenging thread until the number of free committed pages goes above kMinimumFreeCommittedPageCount. - -// Background thread wakes up every 5 seconds to scavenge as long as there is memory available to return to the system. -static const int kScavengeTimerDelayInSeconds = 5; - -// Number of free committed pages that we want to keep around. -static const size_t kMinimumFreeCommittedPageCount = 512; - -// During a scavenge, we'll release up to a fraction of the free committed pages. -#if OS(WINDOWS) -// We are slightly less aggressive in releasing memory on Windows due to performance reasons. -static const int kMaxScavengeAmountFactor = 3; -#else -static const int kMaxScavengeAmountFactor = 2; -#endif +// The page heap maintains a free list for spans that are no longer in use by +// the central cache or any thread caches. We use a background thread to +// periodically scan the free list and release a percentage of it back to the OS. + +// If free_committed_pages_ exceeds kMinimumFreeCommittedPageCount, the +// background thread: +// - wakes up +// - pauses for kScavengeDelayInSeconds +// - returns to the OS a percentage of the memory that remained unused during +// that pause (kScavengePercentage * min_free_committed_pages_since_last_scavenge_) +// The goal of this strategy is to reduce memory pressure in a timely fashion +// while avoiding thrashing the OS allocator. + +// Time delay before the page heap scavenger will consider returning pages to +// the OS. +static const int kScavengeDelayInSeconds = 2; + +// Approximate percentage of free committed pages to return to the OS in one +// scavenge. +static const float kScavengePercentage = .5f; + +// number of span lists to keep spans in when memory is returned. +static const int kMinSpanListsWithSpans = 32; + +// Number of free committed pages that we want to keep around. The minimum number of pages used when there +// is 1 span in each of the first kMinSpanListsWithSpans spanlists. Currently 528 pages. +static const size_t kMinimumFreeCommittedPageCount = kMinSpanListsWithSpans * ((1.0f+kMinSpanListsWithSpans) / 2.0f); + #endif class TCMalloc_PageHeap { @@ -1360,8 +1389,9 @@ class TCMalloc_PageHeap { // Number of pages kept in free lists that are still committed. Length free_committed_pages_; - // Number of pages that we committed in the last scavenge wait interval. - Length pages_committed_since_last_scavenge_; + // Minimum number of free committed pages since last scavenge. (Can be 0 if + // we've committed new pages since the last scavenge.) + Length min_free_committed_pages_since_last_scavenge_; #endif bool GrowHeap(Length n); @@ -1406,13 +1436,13 @@ class TCMalloc_PageHeap { void initializeScavenger(); ALWAYS_INLINE void signalScavenger(); void scavenge(); - ALWAYS_INLINE bool shouldContinueScavenging() const; + ALWAYS_INLINE bool shouldScavenge() const; #if !HAVE(DISPATCH_H) - static NO_RETURN void* runScavengerThread(void*); + static NO_RETURN_WITH_VALUE void* runScavengerThread(void*); NO_RETURN void scavengerThread(); - // Keeps track of whether the background thread is actively scavenging memory every kScavengeTimerDelayInSeconds, or + // Keeps track of whether the background thread is actively scavenging memory every kScavengeDelayInSeconds, or // it's blocked waiting for more pages to be deleted. bool m_scavengeThreadActive; @@ -1438,7 +1468,7 @@ void TCMalloc_PageHeap::init() #if USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY free_committed_pages_ = 0; - pages_committed_since_last_scavenge_ = 0; + min_free_committed_pages_since_last_scavenge_ = 0; #endif // USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY scavenge_counter_ = 0; @@ -1463,11 +1493,23 @@ void TCMalloc_PageHeap::init() void TCMalloc_PageHeap::initializeScavenger() { - pthread_mutex_init(&m_scavengeMutex, 0); - pthread_cond_init(&m_scavengeCondition, 0); - m_scavengeThreadActive = true; - pthread_t thread; - pthread_create(&thread, 0, runScavengerThread, this); + // Create a non-recursive mutex. +#if !defined(PTHREAD_MUTEX_NORMAL) || PTHREAD_MUTEX_NORMAL == PTHREAD_MUTEX_DEFAULT + pthread_mutex_init(&m_scavengeMutex, 0); +#else + pthread_mutexattr_t attr; + pthread_mutexattr_init(&attr); + pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_NORMAL); + + pthread_mutex_init(&m_scavengeMutex, &attr); + + pthread_mutexattr_destroy(&attr); +#endif + + pthread_cond_init(&m_scavengeCondition, 0); + m_scavengeThreadActive = true; + pthread_t thread; + pthread_create(&thread, 0, runScavengerThread, this); } void* TCMalloc_PageHeap::runScavengerThread(void* context) @@ -1481,8 +1523,10 @@ void* TCMalloc_PageHeap::runScavengerThread(void* context) ALWAYS_INLINE void TCMalloc_PageHeap::signalScavenger() { - if (!m_scavengeThreadActive && shouldContinueScavenging()) - pthread_cond_signal(&m_scavengeCondition); + // m_scavengeMutex should be held before accessing m_scavengeThreadActive. + ASSERT(pthread_mutex_trylock(m_scavengeMutex)); + if (!m_scavengeThreadActive && shouldScavenge()) + pthread_cond_signal(&m_scavengeCondition); } #else // !HAVE(DISPATCH_H) @@ -1491,58 +1535,55 @@ void TCMalloc_PageHeap::initializeScavenger() { m_scavengeQueue = dispatch_queue_create("com.apple.JavaScriptCore.FastMallocSavenger", NULL); m_scavengeTimer = dispatch_source_create(DISPATCH_SOURCE_TYPE_TIMER, 0, 0, m_scavengeQueue); - dispatch_time_t startTime = dispatch_time(DISPATCH_TIME_NOW, kScavengeTimerDelayInSeconds * NSEC_PER_SEC); - dispatch_source_set_timer(m_scavengeTimer, startTime, kScavengeTimerDelayInSeconds * NSEC_PER_SEC, 1000 * NSEC_PER_USEC); + dispatch_time_t startTime = dispatch_time(DISPATCH_TIME_NOW, kScavengeDelayInSeconds * NSEC_PER_SEC); + dispatch_source_set_timer(m_scavengeTimer, startTime, kScavengeDelayInSeconds * NSEC_PER_SEC, 1000 * NSEC_PER_USEC); dispatch_source_set_event_handler(m_scavengeTimer, ^{ periodicScavenge(); }); m_scavengingScheduled = false; } ALWAYS_INLINE void TCMalloc_PageHeap::signalScavenger() { - if (!m_scavengingScheduled && shouldContinueScavenging()) { - m_scavengingScheduled = true; - dispatch_resume(m_scavengeTimer); - } + ASSERT(IsHeld(pageheap_lock)); + if (!m_scavengingScheduled && shouldScavenge()) { + m_scavengingScheduled = true; + dispatch_resume(m_scavengeTimer); + } } #endif -void TCMalloc_PageHeap::scavenge() +void TCMalloc_PageHeap::scavenge() { - // If we have to commit memory in the last 5 seconds, it means we don't have enough free committed pages - // for the amount of allocations that we do. So hold off on releasing memory back to the system. - if (pages_committed_since_last_scavenge_ > 0) { - pages_committed_since_last_scavenge_ = 0; - return; - } - Length pagesDecommitted = 0; - for (int i = kMaxPages; i >= 0; i--) { - SpanList* slist = (static_cast<size_t>(i) == kMaxPages) ? &large_ : &free_[i]; - if (!DLL_IsEmpty(&slist->normal)) { - // Release the last span on the normal portion of this list - Span* s = slist->normal.prev; - // Only decommit up to a fraction of the free committed pages if pages_allocated_since_last_scavenge_ > 0. - if ((pagesDecommitted + s->length) * kMaxScavengeAmountFactor > free_committed_pages_) - continue; - DLL_Remove(s); - TCMalloc_SystemRelease(reinterpret_cast<void*>(s->start << kPageShift), - static_cast<size_t>(s->length << kPageShift)); - if (!s->decommitted) { - pagesDecommitted += s->length; - s->decommitted = true; + size_t pagesToRelease = min_free_committed_pages_since_last_scavenge_ * kScavengePercentage; + size_t targetPageCount = std::max<size_t>(kMinimumFreeCommittedPageCount, free_committed_pages_ - pagesToRelease); + + while (free_committed_pages_ > targetPageCount) { + for (int i = kMaxPages; i > 0 && free_committed_pages_ >= targetPageCount; i--) { + SpanList* slist = (static_cast<size_t>(i) == kMaxPages) ? &large_ : &free_[i]; + // If the span size is bigger than kMinSpanListsWithSpans pages return all the spans in the list, else return all but 1 span. + // Return only 50% of a spanlist at a time so spans of size 1 are not the only ones left. + size_t length = DLL_Length(&slist->normal); + size_t numSpansToReturn = (i > kMinSpanListsWithSpans) ? length : length / 2; + for (int j = 0; static_cast<size_t>(j) < numSpansToReturn && !DLL_IsEmpty(&slist->normal) && free_committed_pages_ > targetPageCount; j++) { + Span* s = slist->normal.prev; + DLL_Remove(s); + ASSERT(!s->decommitted); + if (!s->decommitted) { + TCMalloc_SystemRelease(reinterpret_cast<void*>(s->start << kPageShift), + static_cast<size_t>(s->length << kPageShift)); + ASSERT(free_committed_pages_ >= s->length); + free_committed_pages_ -= s->length; + s->decommitted = true; + } + DLL_Prepend(&slist->returned, s); } - DLL_Prepend(&slist->returned, s); - // We can stop scavenging if the number of free committed pages left is less than or equal to the minimum number we want to keep around. - if (free_committed_pages_ <= kMinimumFreeCommittedPageCount + pagesDecommitted) - break; } } - pages_committed_since_last_scavenge_ = 0; - ASSERT(free_committed_pages_ >= pagesDecommitted); - free_committed_pages_ -= pagesDecommitted; + + min_free_committed_pages_since_last_scavenge_ = free_committed_pages_; } -ALWAYS_INLINE bool TCMalloc_PageHeap::shouldContinueScavenging() const +ALWAYS_INLINE bool TCMalloc_PageHeap::shouldScavenge() const { return free_committed_pages_ > kMinimumFreeCommittedPageCount; } @@ -1571,20 +1612,13 @@ inline Span* TCMalloc_PageHeap::New(Length n) { Span* result = ll->next; Carve(result, n, released); - if (result->decommitted) { - TCMalloc_SystemCommit(reinterpret_cast<void*>(result->start << kPageShift), static_cast<size_t>(n << kPageShift)); - result->decommitted = false; #if USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY - pages_committed_since_last_scavenge_ += n; -#endif - } -#if USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY - else { - // The newly allocated memory is from a span that's in the normal span list (already committed). Update the - // free committed pages count. - ASSERT(free_committed_pages_ >= n); - free_committed_pages_ -= n; - } + // The newly allocated memory is from a span that's in the normal span list (already committed). Update the + // free committed pages count. + ASSERT(free_committed_pages_ >= n); + free_committed_pages_ -= n; + if (free_committed_pages_ < min_free_committed_pages_since_last_scavenge_) + min_free_committed_pages_since_last_scavenge_ = free_committed_pages_; #endif // USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY ASSERT(Check()); free_pages_ -= n; @@ -1642,20 +1676,13 @@ Span* TCMalloc_PageHeap::AllocLarge(Length n) { if (best != NULL) { Carve(best, n, from_released); - if (best->decommitted) { - TCMalloc_SystemCommit(reinterpret_cast<void*>(best->start << kPageShift), static_cast<size_t>(n << kPageShift)); - best->decommitted = false; #if USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY - pages_committed_since_last_scavenge_ += n; -#endif - } -#if USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY - else { - // The newly allocated memory is from a span that's in the normal span list (already committed). Update the - // free committed pages count. - ASSERT(free_committed_pages_ >= n); - free_committed_pages_ -= n; - } + // The newly allocated memory is from a span that's in the normal span list (already committed). Update the + // free committed pages count. + ASSERT(free_committed_pages_ >= n); + free_committed_pages_ -= n; + if (free_committed_pages_ < min_free_committed_pages_since_last_scavenge_) + min_free_committed_pages_since_last_scavenge_ = free_committed_pages_; #endif // USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY ASSERT(Check()); free_pages_ -= n; @@ -1681,29 +1708,34 @@ Span* TCMalloc_PageHeap::Split(Span* span, Length n) { return leftover; } -static ALWAYS_INLINE void propagateDecommittedState(Span* destination, Span* source) -{ - destination->decommitted = source->decommitted; -} - inline void TCMalloc_PageHeap::Carve(Span* span, Length n, bool released) { ASSERT(n > 0); DLL_Remove(span); span->free = 0; Event(span, 'A', n); + if (released) { + // If the span chosen to carve from is decommited, commit the entire span at once to avoid committing spans 1 page at a time. + ASSERT(span->decommitted); + TCMalloc_SystemCommit(reinterpret_cast<void*>(span->start << kPageShift), static_cast<size_t>(span->length << kPageShift)); + span->decommitted = false; +#if USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY + free_committed_pages_ += span->length; +#endif + } + const int extra = static_cast<int>(span->length - n); ASSERT(extra >= 0); if (extra > 0) { Span* leftover = NewSpan(span->start + n, extra); leftover->free = 1; - propagateDecommittedState(leftover, span); + leftover->decommitted = false; Event(leftover, 'S', extra); RecordSpan(leftover); // Place leftover span on appropriate free list SpanList* listpair = (static_cast<size_t>(extra) < kMaxPages) ? &free_[extra] : &large_; - Span* dst = released ? &listpair->returned : &listpair->normal; + Span* dst = &listpair->normal; DLL_Prepend(dst, leftover); span->length = n; @@ -1798,6 +1830,8 @@ inline void TCMalloc_PageHeap::Delete(Span* span) { // If the merged span is decommitted, that means we decommitted any neighboring spans that were // committed. Update the free committed pages count. free_committed_pages_ -= neighboringCommittedSpansLength; + if (free_committed_pages_ < min_free_committed_pages_since_last_scavenge_) + min_free_committed_pages_since_last_scavenge_ = free_committed_pages_; } else { // If the merged span remains committed, add the deleted span's size to the free committed pages count. free_committed_pages_ += n; @@ -1962,10 +1996,6 @@ bool TCMalloc_PageHeap::GrowHeap(Length n) { } ask = actual_size >> kPageShift; -#if USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY - pages_committed_since_last_scavenge_ += ask; -#endif - uint64_t old_system_bytes = system_bytes_; system_bytes_ += (ask << kPageShift); const PageID p = reinterpret_cast<uintptr_t>(ptr) >> kPageShift; @@ -2155,10 +2185,10 @@ class TCMalloc_ThreadCache { // Total byte size in cache size_t Size() const { return size_; } - void* Allocate(size_t size); + ALWAYS_INLINE void* Allocate(size_t size); void Deallocate(void* ptr, size_t size_class); - void FetchFromCentralCache(size_t cl, size_t allocationSize); + ALWAYS_INLINE void FetchFromCentralCache(size_t cl, size_t allocationSize); void ReleaseToCentralCache(size_t cl, int N); void Scavenge(); void Print() const; @@ -2259,12 +2289,12 @@ class TCMalloc_Central_FreeList { // REQUIRES: lock_ is held // Release an object to spans. // May temporarily release lock_. - void ReleaseToSpans(void* object); + ALWAYS_INLINE void ReleaseToSpans(void* object); // REQUIRES: lock_ is held // Populate cache by fetching from the page heap. // May temporarily release lock_. - void Populate(); + ALWAYS_INLINE void Populate(); // REQUIRES: lock is held. // Tries to make room for a TCEntry. If the cache is full it will try to @@ -2277,7 +2307,7 @@ class TCMalloc_Central_FreeList { // just iterates over the sizeclasses but does so without taking a lock. // Returns true on success. // May temporarily lock a "random" size class. - static bool EvictRandomSizeClass(size_t locked_size_class, bool force); + static ALWAYS_INLINE bool EvictRandomSizeClass(size_t locked_size_class, bool force); // REQUIRES: lock_ is *not* held. // Tries to shrink the Cache. If force is true it will relase objects to @@ -2328,7 +2358,7 @@ static TCMalloc_Central_FreeListPadded central_cache[kNumClasses]; // Page-level allocator static SpinLock pageheap_lock = SPINLOCK_INITIALIZER; -static void* pageheap_memory[(sizeof(TCMalloc_PageHeap) + sizeof(void*) - 1) / sizeof(void*)]; +static AllocAlignmentInteger pageheap_memory[(sizeof(TCMalloc_PageHeap) + sizeof(AllocAlignmentInteger) - 1) / sizeof(AllocAlignmentInteger)]; static bool phinited = false; // Avoid extra level of indirection by making "pageheap" be just an alias @@ -2363,15 +2393,15 @@ void TCMalloc_PageHeap::scavengerThread() #endif while (1) { - if (!shouldContinueScavenging()) { + if (!shouldScavenge()) { pthread_mutex_lock(&m_scavengeMutex); m_scavengeThreadActive = false; - // Block until there are enough freed pages to release back to the system. + // Block until there are enough free committed pages to release back to the system. pthread_cond_wait(&m_scavengeCondition, &m_scavengeMutex); m_scavengeThreadActive = true; pthread_mutex_unlock(&m_scavengeMutex); } - sleep(kScavengeTimerDelayInSeconds); + sleep(kScavengeDelayInSeconds); { SpinLockHolder h(&pageheap_lock); pageheap->scavenge(); @@ -2383,15 +2413,13 @@ void TCMalloc_PageHeap::scavengerThread() void TCMalloc_PageHeap::periodicScavenge() { - { SpinLockHolder h(&pageheap_lock); pageheap->scavenge(); - } - if (!shouldContinueScavenging()) { - m_scavengingScheduled = false; - dispatch_suspend(m_scavengeTimer); - } + if (!shouldScavenge()) { + m_scavengingScheduled = false; + dispatch_suspend(m_scavengeTimer); + } } #endif // HAVE(DISPATCH_H) @@ -2677,7 +2705,13 @@ ALWAYS_INLINE void TCMalloc_Central_FreeList::Populate() { if (span) pageheap->RegisterSizeClass(span, size_class_); } if (span == NULL) { +#if HAVE(ERRNO_H) MESSAGE("allocation failed: %d\n", errno); +#elif OS(WINDOWS) + MESSAGE("allocation failed: %d\n", ::GetLastError()); +#else + MESSAGE("allocation failed\n"); +#endif lock_.Lock(); return; } @@ -2700,7 +2734,7 @@ ALWAYS_INLINE void TCMalloc_Central_FreeList::Populate() { char* nptr; while ((nptr = ptr + size) <= limit) { *tail = ptr; - tail = reinterpret_cast<void**>(ptr); + tail = reinterpret_cast_ptr<void**>(ptr); ptr = nptr; num++; } @@ -3044,7 +3078,7 @@ void TCMalloc_ThreadCache::BecomeIdle() { if (heap->in_setspecific_) return; // Do not disturb the active caller heap->in_setspecific_ = true; - pthread_setspecific(heap_key, NULL); + setThreadHeap(NULL); #ifdef HAVE_TLS // Also update the copy in __thread threadlocal_heap = NULL; @@ -3671,7 +3705,7 @@ extern "C" #define do_malloc do_malloc<crashOnFailure> template <bool crashOnFailure> -void* malloc(size_t); +ALWAYS_INLINE void* malloc(size_t); void* fastMalloc(size_t size) { @@ -3732,7 +3766,7 @@ void free(void* ptr) { extern "C" #else template <bool crashOnFailure> -void* calloc(size_t, size_t); +ALWAYS_INLINE void* calloc(size_t, size_t); void* fastCalloc(size_t n, size_t elem_size) { @@ -3796,7 +3830,7 @@ void cfree(void* ptr) { extern "C" #else template <bool crashOnFailure> -void* realloc(void*, size_t); +ALWAYS_INLINE void* realloc(void*, size_t); void* fastRealloc(void* old_ptr, size_t new_size) { @@ -3933,6 +3967,8 @@ static inline void* cpp_alloc(size_t size, bool nothrow) { } } +#if ENABLE(GLOBAL_FASTMALLOC_NEW) + void* operator new(size_t size) { void* p = cpp_alloc(size, false); // We keep this next instruction out of cpp_alloc for a reason: when @@ -3987,6 +4023,8 @@ void operator delete[](void* p, const std::nothrow_t&) __THROW { do_free(p); } +#endif + extern "C" void* memalign(size_t align, size_t size) __THROW { void* result = do_memalign(align, size); MallocHook::InvokeNewHook(result, size); @@ -4102,7 +4140,62 @@ void *(*__memalign_hook)(size_t, size_t, const void *) = MemalignOverride; #endif -#if defined(WTF_CHANGES) && OS(DARWIN) +#ifdef WTF_CHANGES +void releaseFastMallocFreeMemory() +{ + // Flush free pages in the current thread cache back to the page heap. + // Low watermark mechanism in Scavenge() prevents full return on the first pass. + // The second pass flushes everything. + if (TCMalloc_ThreadCache* threadCache = TCMalloc_ThreadCache::GetCacheIfPresent()) { + threadCache->Scavenge(); + threadCache->Scavenge(); + } + + SpinLockHolder h(&pageheap_lock); + pageheap->ReleaseFreePages(); +} + +FastMallocStatistics fastMallocStatistics() +{ + FastMallocStatistics statistics; + + SpinLockHolder lockHolder(&pageheap_lock); + statistics.reservedVMBytes = static_cast<size_t>(pageheap->SystemBytes()); + statistics.committedVMBytes = statistics.reservedVMBytes - pageheap->ReturnedBytes(); + + statistics.freeListBytes = 0; + for (unsigned cl = 0; cl < kNumClasses; ++cl) { + const int length = central_cache[cl].length(); + const int tc_length = central_cache[cl].tc_length(); + + statistics.freeListBytes += ByteSizeForClass(cl) * (length + tc_length); + } + for (TCMalloc_ThreadCache* threadCache = thread_heaps; threadCache ; threadCache = threadCache->next_) + statistics.freeListBytes += threadCache->Size(); + + return statistics; +} + +size_t fastMallocSize(const void* ptr) +{ + const PageID p = reinterpret_cast<uintptr_t>(ptr) >> kPageShift; + Span* span = pageheap->GetDescriptorEnsureSafe(p); + + if (!span || span->free) + return 0; + + for (void* free = span->objects; free != NULL; free = *((void**) free)) { + if (ptr == free) + return 0; + } + + if (size_t cl = span->sizeclass) + return ByteSizeForClass(cl); + + return span->length << kPageShift; +} + +#if OS(DARWIN) class FreeObjectFinder { const RemoteMemoryReader& m_reader; @@ -4385,9 +4478,12 @@ extern "C" { malloc_introspection_t jscore_fastmalloc_introspection = { &FastMallocZone::enumerate, &FastMallocZone::goodSize, &FastMallocZone::check, &FastMallocZone::print, &FastMallocZone::log, &FastMallocZone::forceLock, &FastMallocZone::forceUnlock, &FastMallocZone::statistics -#if !defined(BUILDING_ON_TIGER) && !defined(BUILDING_ON_LEOPARD) && !OS(IPHONE_OS) +#if !defined(BUILDING_ON_TIGER) && !defined(BUILDING_ON_LEOPARD) , 0 // zone_locked will not be called on the zone unless it advertises itself as version five or higher. #endif +#if !defined(BUILDING_ON_TIGER) && !defined(BUILDING_ON_LEOPARD) && !defined(BUILDING_ON_SNOW_LEOPARD) + , 0, 0, 0, 0 // These members will not be used unless the zone advertises itself as version seven or higher. +#endif }; } @@ -4419,44 +4515,9 @@ void FastMallocZone::init() static FastMallocZone zone(pageheap, &thread_heaps, static_cast<TCMalloc_Central_FreeListPadded*>(central_cache), &span_allocator, &threadheap_allocator); } -#endif - -#if WTF_CHANGES -void releaseFastMallocFreeMemory() -{ - // Flush free pages in the current thread cache back to the page heap. - // Low watermark mechanism in Scavenge() prevents full return on the first pass. - // The second pass flushes everything. - if (TCMalloc_ThreadCache* threadCache = TCMalloc_ThreadCache::GetCacheIfPresent()) { - threadCache->Scavenge(); - threadCache->Scavenge(); - } - - SpinLockHolder h(&pageheap_lock); - pageheap->ReleaseFreePages(); -} - -FastMallocStatistics fastMallocStatistics() -{ - FastMallocStatistics statistics; - { - SpinLockHolder lockHolder(&pageheap_lock); - statistics.heapSize = static_cast<size_t>(pageheap->SystemBytes()); - statistics.freeSizeInHeap = static_cast<size_t>(pageheap->FreeBytes()); - statistics.returnedSize = pageheap->ReturnedBytes(); - statistics.freeSizeInCaches = 0; - for (TCMalloc_ThreadCache* threadCache = thread_heaps; threadCache ; threadCache = threadCache->next_) - statistics.freeSizeInCaches += threadCache->Size(); - } - for (unsigned cl = 0; cl < kNumClasses; ++cl) { - const int length = central_cache[cl].length(); - const int tc_length = central_cache[cl].tc_length(); - statistics.freeSizeInCaches += ByteSizeForClass(cl) * (length + tc_length); - } - return statistics; -} +#endif // OS(DARWIN) } // namespace WTF -#endif +#endif // WTF_CHANGES #endif // FORCE_SYSTEM_MALLOC |
