diff options
Diffstat (limited to 'JavaScriptCore/wtf/FastMalloc.cpp')
-rw-r--r-- | JavaScriptCore/wtf/FastMalloc.cpp | 256 |
1 files changed, 229 insertions, 27 deletions
diff --git a/JavaScriptCore/wtf/FastMalloc.cpp b/JavaScriptCore/wtf/FastMalloc.cpp index 69df95e..8f7d5ef 100644 --- a/JavaScriptCore/wtf/FastMalloc.cpp +++ b/JavaScriptCore/wtf/FastMalloc.cpp @@ -1,5 +1,6 @@ // Copyright (c) 2005, 2007, The Android Open Source Project // All rights reserved. +// Copyright (C) 2005, 2006, 2007, 2008 Apple Inc. All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are @@ -77,7 +78,7 @@ #include "FastMalloc.h" #include "Assertions.h" -#if USE(MULTIPLE_THREADS) +#if ENABLE(JSC_MULTIPLE_THREADS) #include <pthread.h> #endif @@ -93,10 +94,12 @@ #define FORCE_SYSTEM_MALLOC 1 #endif +#define TCMALLOC_TRACK_DECOMMITED_SPANS (HAVE(VIRTUALALLOC)) + #ifndef NDEBUG namespace WTF { -#if USE(MULTIPLE_THREADS) +#if ENABLE(JSC_MULTIPLE_THREADS) static pthread_key_t isForbiddenKey; static pthread_once_t isForbiddenKeyOnce = PTHREAD_ONCE_INIT; static void initializeIsForbiddenKey() @@ -139,7 +142,7 @@ void fastMallocAllow() { staticIsForbidden = false; } -#endif // USE(MULTIPLE_THREADS) +#endif // ENABLE(JSC_MULTIPLE_THREADS) } // namespace WTF #endif // NDEBUG @@ -147,53 +150,111 @@ void fastMallocAllow() #include <string.h> namespace WTF { -void *fastZeroedMalloc(size_t n) + +void* fastZeroedMalloc(size_t n) { - void *result = fastMalloc(n); - if (!result) - return 0; + void* result = fastMalloc(n); memset(result, 0, n); -#ifndef WTF_CHANGES - MallocHook::InvokeNewHook(result, n); -#endif return result; } +void* tryFastZeroedMalloc(size_t n) +{ + void* result = tryFastMalloc(n); + if (!result) + return 0; + memset(result, 0, n); + return result; } +} // namespace WTF + #if FORCE_SYSTEM_MALLOC #include <stdlib.h> #if !PLATFORM(WIN_OS) #include <pthread.h> +#else + #include "windows.h" #endif namespace WTF { - -void *fastMalloc(size_t n) + +void* tryFastMalloc(size_t n) { ASSERT(!isForbidden()); return malloc(n); } -void *fastCalloc(size_t n_elements, size_t element_size) +void* fastMalloc(size_t n) +{ + ASSERT(!isForbidden()); + void* result = malloc(n); + if (!result) + abort(); + return result; +} + +void* tryFastCalloc(size_t n_elements, size_t element_size) { ASSERT(!isForbidden()); return calloc(n_elements, element_size); } +void* fastCalloc(size_t n_elements, size_t element_size) +{ + ASSERT(!isForbidden()); + void* result = calloc(n_elements, element_size); + if (!result) + abort(); + return result; +} + void fastFree(void* p) { ASSERT(!isForbidden()); free(p); } -void *fastRealloc(void* p, size_t n) +void* tryFastRealloc(void* p, size_t n) { ASSERT(!isForbidden()); return realloc(p, n); } +void* fastRealloc(void* p, size_t n) +{ + ASSERT(!isForbidden()); + void* result = realloc(p, n); + if (!result) + abort(); + return result; +} + +void releaseFastMallocFreeMemory() { } + +#if HAVE(VIRTUALALLOC) +void* fastMallocExecutable(size_t n) +{ + return VirtualAlloc(0, n, MEM_COMMIT | MEM_RESERVE, PAGE_EXECUTE_READWRITE); +} + +void fastFreeExecutable(void* p) +{ + VirtualFree(p, 0, MEM_RELEASE); +} +#else +void* fastMallocExecutable(size_t n) +{ + return fastMalloc(n); +} + +void fastFreeExecutable(void* p) +{ + fastFree(p); +} +#endif + } // namespace WTF #if PLATFORM(DARWIN) @@ -202,7 +263,7 @@ void *fastRealloc(void* p, size_t n) extern "C" const int jscore_fastmalloc_introspection = 0; #endif -#else +#else // FORCE_SYSTEM_MALLOC #if HAVE(STDINT_H) #include <stdint.h> @@ -236,6 +297,7 @@ extern "C" const int jscore_fastmalloc_introspection = 0; #if PLATFORM(DARWIN) #include "MallocZoneSupport.h" +#include <wtf/HashSet.h> #endif #ifndef PRIuS @@ -290,7 +352,7 @@ public: static void log(malloc_zone_t*, void*) { } static void forceLock(malloc_zone_t*) { } static void forceUnlock(malloc_zone_t*) { } - static void statistics(malloc_zone_t*, malloc_statistics_t*) { } + static void statistics(malloc_zone_t*, malloc_statistics_t* stats) { memset(stats, 0, sizeof(malloc_statistics_t)); } private: FastMallocZone(TCMalloc_PageHeap*, TCMalloc_ThreadCache**, TCMalloc_Central_FreeListPadded*); @@ -859,9 +921,12 @@ struct Span { Span* prev; // Used when in link list void* objects; // Linked list of free objects unsigned int free : 1; // Is the span free +#ifndef NO_TCMALLOC_SAMPLES unsigned int sample : 1; // Sampled object? +#endif unsigned int sizeclass : 8; // Size-class for small objects (or 0) unsigned int refcount : 11; // Number of non-free objects + bool decommitted : 1; #undef SPAN_HISTORY #ifdef SPAN_HISTORY @@ -872,6 +937,12 @@ struct Span { #endif }; +#if TCMALLOC_TRACK_DECOMMITED_SPANS +#define ASSERT_SPAN_COMMITTED(span) ASSERT(!span->decommitted) +#else +#define ASSERT_SPAN_COMMITTED(span) +#endif + #ifdef SPAN_HISTORY void Event(Span* span, char op, int v = 0) { span->history[span->nexthistory] = op; @@ -1173,13 +1244,22 @@ inline Span* TCMalloc_PageHeap::New(Length n) { Span* result = ll->next; Carve(result, n, released); +#if TCMALLOC_TRACK_DECOMMITED_SPANS + if (result->decommitted) { + TCMalloc_SystemCommit(reinterpret_cast<void*>(result->start << kPageShift), static_cast<size_t>(n << kPageShift)); + result->decommitted = false; + } +#endif ASSERT(Check()); free_pages_ -= n; return result; } Span* result = AllocLarge(n); - if (result != NULL) return result; + if (result != NULL) { + ASSERT_SPAN_COMMITTED(result); + return result; + } // Grow the heap and try again if (!GrowHeap(n)) { @@ -1226,6 +1306,12 @@ Span* TCMalloc_PageHeap::AllocLarge(Length n) { if (best != NULL) { Carve(best, n, from_released); +#if TCMALLOC_TRACK_DECOMMITED_SPANS + if (best->decommitted) { + TCMalloc_SystemCommit(reinterpret_cast<void*>(best->start << kPageShift), static_cast<size_t>(n << kPageShift)); + best->decommitted = false; + } +#endif ASSERT(Check()); free_pages_ -= n; return best; @@ -1250,6 +1336,15 @@ Span* TCMalloc_PageHeap::Split(Span* span, Length n) { return leftover; } +#if !TCMALLOC_TRACK_DECOMMITED_SPANS +static ALWAYS_INLINE void propagateDecommittedState(Span*, Span*) { } +#else +static ALWAYS_INLINE void propagateDecommittedState(Span* destination, Span* source) +{ + destination->decommitted = source->decommitted; +} +#endif + inline void TCMalloc_PageHeap::Carve(Span* span, Length n, bool released) { ASSERT(n > 0); DLL_Remove(span); @@ -1261,6 +1356,7 @@ inline void TCMalloc_PageHeap::Carve(Span* span, Length n, bool released) { if (extra > 0) { Span* leftover = NewSpan(span->start + n, extra); leftover->free = 1; + propagateDecommittedState(leftover, span); Event(leftover, 'S', extra); RecordSpan(leftover); @@ -1274,6 +1370,16 @@ inline void TCMalloc_PageHeap::Carve(Span* span, Length n, bool released) { } } +#if !TCMALLOC_TRACK_DECOMMITED_SPANS +static ALWAYS_INLINE void mergeDecommittedStates(Span*, Span*) { } +#else +static ALWAYS_INLINE void mergeDecommittedStates(Span* destination, Span* other) +{ + if (other->decommitted) + destination->decommitted = true; +} +#endif + inline void TCMalloc_PageHeap::Delete(Span* span) { ASSERT(Check()); ASSERT(!span->free); @@ -1281,7 +1387,9 @@ inline void TCMalloc_PageHeap::Delete(Span* span) { ASSERT(GetDescriptor(span->start) == span); ASSERT(GetDescriptor(span->start + span->length - 1) == span); span->sizeclass = 0; +#ifndef NO_TCMALLOC_SAMPLES span->sample = 0; +#endif // Coalesce -- we guarantee that "p" != 0, so no bounds checking // necessary. We do not bother resetting the stale pagemap @@ -1298,6 +1406,7 @@ inline void TCMalloc_PageHeap::Delete(Span* span) { // Merge preceding span into this span ASSERT(prev->start + prev->length == p); const Length len = prev->length; + mergeDecommittedStates(span, prev); DLL_Remove(prev); DeleteSpan(prev); span->start -= len; @@ -1310,6 +1419,7 @@ inline void TCMalloc_PageHeap::Delete(Span* span) { // Merge next span into this span ASSERT(next->start == p+n); const Length len = next->length; + mergeDecommittedStates(span, next); DLL_Remove(next); DeleteSpan(next); span->length += len; @@ -1350,6 +1460,9 @@ void TCMalloc_PageHeap::IncrementalScavenge(Length n) { DLL_Remove(s); TCMalloc_SystemRelease(reinterpret_cast<void*>(s->start << kPageShift), static_cast<size_t>(s->length << kPageShift)); +#if TCMALLOC_TRACK_DECOMMITED_SPANS + s->decommitted = true; +#endif DLL_Prepend(&slist->returned, s); scavenge_counter_ = std::max<size_t>(64UL, std::min<size_t>(kDefaultReleaseDelay, kDefaultReleaseDelay - (free_pages_ / kDefaultReleaseDelay))); @@ -1456,7 +1569,7 @@ bool TCMalloc_PageHeap::GrowHeap(Length n) { if (n < ask) { // Try growing just "n" pages ask = n; - ptr = TCMalloc_SystemAlloc(ask << kPageShift, &actual_size, kPageSize);; + ptr = TCMalloc_SystemAlloc(ask << kPageShift, &actual_size, kPageSize); } if (ptr == NULL) return false; } @@ -1717,13 +1830,18 @@ class TCMalloc_Central_FreeList { #ifdef WTF_CHANGES template <class Finder, class Reader> - void enumerateFreeObjects(Finder& finder, const Reader& reader) + void enumerateFreeObjects(Finder& finder, const Reader& reader, TCMalloc_Central_FreeList* remoteCentralFreeList) { for (Span* span = &empty_; span && span != &empty_; span = (span->next ? reader(span->next) : 0)) ASSERT(!span->objects); ASSERT(!nonempty_.objects); - for (Span* span = reader(nonempty_.next); span && span != &nonempty_; span = (span->next ? reader(span->next) : 0)) { + static const ptrdiff_t nonemptyOffset = reinterpret_cast<const char*>(&nonempty_) - reinterpret_cast<const char*>(this); + + Span* remoteNonempty = reinterpret_cast<Span*>(reinterpret_cast<char*>(remoteCentralFreeList) + nonemptyOffset); + Span* remoteSpan = nonempty_.next; + + for (Span* span = reader(remoteSpan); span && remoteSpan != remoteNonempty; remoteSpan = span->next, span = (span->next ? reader(span->next) : 0)) { for (void* nextObject = span->objects; nextObject; nextObject = *reader(reinterpret_cast<void**>(nextObject))) finder.visit(nextObject); } @@ -2090,6 +2208,7 @@ void* TCMalloc_Central_FreeList::FetchFromSpans() { Span* span = nonempty_.next; ASSERT(span->objects != NULL); + ASSERT_SPAN_COMMITTED(span); span->refcount++; void* result = span->objects; span->objects = *(reinterpret_cast<void**>(result)); @@ -2120,6 +2239,7 @@ ALWAYS_INLINE void TCMalloc_Central_FreeList::Populate() { lock_.Lock(); return; } + ASSERT_SPAN_COMMITTED(span); ASSERT(span->length == npages); // Cache sizeclass info eagerly. Locking is not necessary. // (Instead of being eager, we could just replace any stale info @@ -2896,11 +3016,15 @@ static inline void* CheckedMallocResult(void *result) } static inline void* SpanToMallocResult(Span *span) { + ASSERT_SPAN_COMMITTED(span); pageheap->CacheSizeClass(span->start, 0); return CheckedMallocResult(reinterpret_cast<void*>(span->start << kPageShift)); } +#ifdef WTF_CHANGES +template <bool abortOnFailure> +#endif static ALWAYS_INLINE void* do_malloc(size_t size) { void* ret = NULL; @@ -2930,7 +3054,14 @@ static ALWAYS_INLINE void* do_malloc(size_t size) { // size-appropriate freelist, afer replenishing it if it's empty. ret = CheckedMallocResult(heap->Allocate(size)); } - if (ret == NULL) errno = ENOMEM; + if (!ret) { +#ifdef WTF_CHANGES + if (abortOnFailure) // This branch should be optimized out by the compiler. + abort(); +#else + errno = ENOMEM; +#endif + } return ret; } @@ -2947,7 +3078,9 @@ static ALWAYS_INLINE void do_free(void* ptr) { pageheap->CacheSizeClass(p, cl); } if (cl != 0) { +#ifndef NO_TCMALLOC_SAMPLES ASSERT(!pageheap->GetDescriptor(p)->sample); +#endif TCMalloc_ThreadCache* heap = TCMalloc_ThreadCache::GetCacheIfPresent(); if (heap != NULL) { heap->Deallocate(ptr, cl); @@ -2960,11 +3093,13 @@ static ALWAYS_INLINE void do_free(void* ptr) { SpinLockHolder h(&pageheap_lock); ASSERT(reinterpret_cast<uintptr_t>(ptr) % kPageSize == 0); ASSERT(span != NULL && span->start == p); +#ifndef NO_TCMALLOC_SAMPLES if (span->sample) { DLL_Remove(span); stacktrace_allocator.Delete(reinterpret_cast<StackTrace*>(span->objects)); span->objects = NULL; } +#endif pageheap->Delete(span); } } @@ -3090,6 +3225,24 @@ static inline struct mallinfo do_mallinfo() { #ifndef WTF_CHANGES extern "C" +#else +#define do_malloc do_malloc<abortOnFailure> + +template <bool abortOnFailure> +void* malloc(size_t); + +void* fastMalloc(size_t size) +{ + return malloc<true>(size); +} + +void* tryFastMalloc(size_t size) +{ + return malloc<false>(size); +} + +template <bool abortOnFailure> +ALWAYS_INLINE #endif void* malloc(size_t size) { void* result = do_malloc(size); @@ -3111,6 +3264,22 @@ void free(void* ptr) { #ifndef WTF_CHANGES extern "C" +#else +template <bool abortOnFailure> +void* calloc(size_t, size_t); + +void* fastCalloc(size_t n, size_t elem_size) +{ + return calloc<true>(n, elem_size); +} + +void* tryFastCalloc(size_t n, size_t elem_size) +{ + return calloc<false>(n, elem_size); +} + +template <bool abortOnFailure> +ALWAYS_INLINE #endif void* calloc(size_t n, size_t elem_size) { const size_t totalBytes = n * elem_size; @@ -3141,6 +3310,22 @@ void cfree(void* ptr) { #ifndef WTF_CHANGES extern "C" +#else +template <bool abortOnFailure> +void* realloc(void*, size_t); + +void* fastRealloc(void* old_ptr, size_t new_size) +{ + return realloc<true>(old_ptr, new_size); +} + +void* tryFastRealloc(void* old_ptr, size_t new_size) +{ + return realloc<false>(old_ptr, new_size); +} + +template <bool abortOnFailure> +ALWAYS_INLINE #endif void* realloc(void* old_ptr, size_t new_size) { if (old_ptr == NULL) { @@ -3200,7 +3385,19 @@ void* realloc(void* old_ptr, size_t new_size) { } } -#ifndef WTF_CHANGES +void* fastMallocExecutable(size_t n) +{ + return malloc<false>(n); +} + +void fastFreeExecutable(void* p) +{ + free(p); +} + +#ifdef WTF_CHANGES +#undef do_malloc +#else static SpinLock set_new_handler_lock = SPINLOCK_INITIALIZER; @@ -3412,7 +3609,6 @@ void *(*__memalign_hook)(size_t, size_t, const void *) = MemalignOverride; #endif #if defined(WTF_CHANGES) && PLATFORM(DARWIN) -#include <wtf/HashSet.h> class FreeObjectFinder { const RemoteMemoryReader& m_reader; @@ -3431,10 +3627,10 @@ public: threadCache->enumerateFreeObjects(*this, m_reader); } - void findFreeObjects(TCMalloc_Central_FreeListPadded* centralFreeList, size_t numSizes) + void findFreeObjects(TCMalloc_Central_FreeListPadded* centralFreeList, size_t numSizes, TCMalloc_Central_FreeListPadded* remoteCentralFreeList) { for (unsigned i = 0; i < numSizes; i++) - centralFreeList[i].enumerateFreeObjects(*this, m_reader); + centralFreeList[i].enumerateFreeObjects(*this, m_reader, remoteCentralFreeList + i); } }; @@ -3548,7 +3744,7 @@ kern_return_t FastMallocZone::enumerate(task_t task, void* context, unsigned typ FreeObjectFinder finder(memoryReader); finder.findFreeObjects(threadHeaps); - finder.findFreeObjects(centralCaches, kNumClasses); + finder.findFreeObjects(centralCaches, kNumClasses, mzone->m_centralCaches); TCMalloc_PageHeap::PageMap* pageMap = &pageHeap->pagemap_; PageMapFreeObjectFinder pageMapFinder(memoryReader, finder); @@ -3625,8 +3821,14 @@ void FastMallocZone::init() #endif +void releaseFastMallocFreeMemory() +{ + SpinLockHolder h(&pageheap_lock); + pageheap->ReleaseFreePages(); +} + #if WTF_CHANGES } // namespace WTF #endif -#endif // USE_SYSTEM_MALLOC +#endif // FORCE_SYSTEM_MALLOC |