summaryrefslogtreecommitdiffstats
path: root/JavaScriptCore/wtf/FastMalloc.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'JavaScriptCore/wtf/FastMalloc.cpp')
-rw-r--r--JavaScriptCore/wtf/FastMalloc.cpp223
1 files changed, 180 insertions, 43 deletions
diff --git a/JavaScriptCore/wtf/FastMalloc.cpp b/JavaScriptCore/wtf/FastMalloc.cpp
index 88c10ca..bcac242 100644
--- a/JavaScriptCore/wtf/FastMalloc.cpp
+++ b/JavaScriptCore/wtf/FastMalloc.cpp
@@ -94,7 +94,7 @@
#define FORCE_SYSTEM_MALLOC 1
#endif
-#define TCMALLOC_TRACK_DECOMMITED_SPANS (HAVE(VIRTUALALLOC))
+#define TCMALLOC_TRACK_DECOMMITED_SPANS (HAVE(VIRTUALALLOC) || HAVE(MADV_FREE_REUSE))
#ifndef NDEBUG
namespace WTF {
@@ -321,9 +321,11 @@ namespace WTF {
#define CHECK_CONDITION ASSERT
#if PLATFORM(DARWIN)
+class Span;
+class TCMalloc_Central_FreeListPadded;
class TCMalloc_PageHeap;
class TCMalloc_ThreadCache;
-class TCMalloc_Central_FreeListPadded;
+template <typename T> class PageHeapAllocator;
class FastMallocZone {
public:
@@ -339,7 +341,7 @@ public:
static void statistics(malloc_zone_t*, malloc_statistics_t* stats) { memset(stats, 0, sizeof(malloc_statistics_t)); }
private:
- FastMallocZone(TCMalloc_PageHeap*, TCMalloc_ThreadCache**, TCMalloc_Central_FreeListPadded*);
+ FastMallocZone(TCMalloc_PageHeap*, TCMalloc_ThreadCache**, TCMalloc_Central_FreeListPadded*, PageHeapAllocator<Span>*, PageHeapAllocator<TCMalloc_ThreadCache>*);
static size_t size(malloc_zone_t*, const void*);
static void* zoneMalloc(malloc_zone_t*, size_t);
static void* zoneCalloc(malloc_zone_t*, size_t numItems, size_t size);
@@ -352,6 +354,8 @@ private:
TCMalloc_PageHeap* m_pageHeap;
TCMalloc_ThreadCache** m_threadHeaps;
TCMalloc_Central_FreeListPadded* m_centralCaches;
+ PageHeapAllocator<Span>* m_spanAllocator;
+ PageHeapAllocator<TCMalloc_ThreadCache>* m_pageHeapAllocator;
};
#endif
@@ -820,6 +824,9 @@ class PageHeapAllocator {
char* free_area_;
size_t free_avail_;
+ // Linked list of all regions allocated by this allocator
+ void* allocated_regions_;
+
// Free list of already carved objects
void* free_list_;
@@ -830,6 +837,7 @@ class PageHeapAllocator {
void Init() {
ASSERT(kAlignedSize <= kAllocIncrement);
inuse_ = 0;
+ allocated_regions_ = 0;
free_area_ = NULL;
free_avail_ = 0;
free_list_ = NULL;
@@ -844,9 +852,14 @@ class PageHeapAllocator {
} else {
if (free_avail_ < kAlignedSize) {
// Need more room
- free_area_ = reinterpret_cast<char*>(MetaDataAlloc(kAllocIncrement));
- if (free_area_ == NULL) CRASH();
- free_avail_ = kAllocIncrement;
+ char* new_allocation = reinterpret_cast<char*>(MetaDataAlloc(kAllocIncrement));
+ if (!new_allocation)
+ CRASH();
+
+ *(void**)new_allocation = allocated_regions_;
+ allocated_regions_ = new_allocation;
+ free_area_ = new_allocation + kAlignedSize;
+ free_avail_ = kAllocIncrement - kAlignedSize;
}
result = free_area_;
free_area_ += kAlignedSize;
@@ -863,6 +876,18 @@ class PageHeapAllocator {
}
int inuse() const { return inuse_; }
+
+#if defined(WTF_CHANGES) && PLATFORM(DARWIN)
+ template <class Recorder>
+ void recordAdministrativeRegions(Recorder& recorder, const RemoteMemoryReader& reader)
+ {
+ vm_address_t adminAllocation = reinterpret_cast<vm_address_t>(allocated_regions_);
+ while (adminAllocation) {
+ recorder.recordRegion(adminAllocation, kAllocIncrement);
+ adminAllocation = *reader(reinterpret_cast<vm_address_t*>(adminAllocation));
+ }
+ }
+#endif
};
// -------------------------------------------------------------------------
@@ -1378,8 +1403,14 @@ static ALWAYS_INLINE void mergeDecommittedStates(Span*, Span*) { }
#else
static ALWAYS_INLINE void mergeDecommittedStates(Span* destination, Span* other)
{
- if (other->decommitted)
+ if (destination->decommitted && !other->decommitted) {
+ TCMalloc_SystemRelease(reinterpret_cast<void*>(other->start << kPageShift),
+ static_cast<size_t>(other->length << kPageShift));
+ } else if (other->decommitted && !destination->decommitted) {
+ TCMalloc_SystemRelease(reinterpret_cast<void*>(destination->start << kPageShift),
+ static_cast<size_t>(destination->length << kPageShift));
destination->decommitted = true;
+ }
}
#endif
@@ -3571,7 +3602,7 @@ extern "C" struct mallinfo mallinfo(void) {
#if defined(__GLIBC__)
extern "C" {
-# if defined(__GNUC__) && !defined(__MACH__) && defined(HAVE___ATTRIBUTE__)
+#if COMPILER(GCC) && !defined(__MACH__) && defined(HAVE___ATTRIBUTE__)
// Potentially faster variants that use the gcc alias extension.
// Mach-O (Darwin) does not support weak aliases, hence the __MACH__ check.
# define ALIAS(x) __attribute__ ((weak, alias (x)))
@@ -3630,6 +3661,7 @@ public:
void visit(void* ptr) { m_freeObjects.add(ptr); }
bool isFreeObject(void* ptr) const { return m_freeObjects.contains(ptr); }
+ bool isFreeObject(vm_address_t ptr) const { return isFreeObject(reinterpret_cast<void*>(ptr)); }
size_t freeObjectCount() const { return m_freeObjects.size(); }
void findFreeObjects(TCMalloc_ThreadCache* threadCache)
@@ -3680,7 +3712,9 @@ class PageMapMemoryUsageRecorder {
vm_range_recorder_t* m_recorder;
const RemoteMemoryReader& m_reader;
const FreeObjectFinder& m_freeObjectFinder;
- mutable HashSet<void*> m_seenPointers;
+
+ HashSet<void*> m_seenPointers;
+ Vector<Span*> m_coalescedSpans;
public:
PageMapMemoryUsageRecorder(task_t task, void* context, unsigned typeMask, vm_range_recorder_t* recorder, const RemoteMemoryReader& reader, const FreeObjectFinder& freeObjectFinder)
@@ -3692,51 +3726,133 @@ public:
, m_freeObjectFinder(freeObjectFinder)
{ }
- int visit(void* ptr) const
+ ~PageMapMemoryUsageRecorder()
+ {
+ ASSERT(!m_coalescedSpans.size());
+ }
+
+ void recordPendingRegions()
+ {
+ Span* lastSpan = m_coalescedSpans[m_coalescedSpans.size() - 1];
+ vm_range_t ptrRange = { m_coalescedSpans[0]->start << kPageShift, 0 };
+ ptrRange.size = (lastSpan->start << kPageShift) - ptrRange.address + (lastSpan->length * kPageSize);
+
+ // Mark the memory region the spans represent as a candidate for containing pointers
+ if (m_typeMask & MALLOC_PTR_REGION_RANGE_TYPE)
+ (*m_recorder)(m_task, m_context, MALLOC_PTR_REGION_RANGE_TYPE, &ptrRange, 1);
+
+ if (!(m_typeMask & MALLOC_PTR_IN_USE_RANGE_TYPE)) {
+ m_coalescedSpans.clear();
+ return;
+ }
+
+ Vector<vm_range_t, 1024> allocatedPointers;
+ for (size_t i = 0; i < m_coalescedSpans.size(); ++i) {
+ Span *theSpan = m_coalescedSpans[i];
+ if (theSpan->free)
+ continue;
+
+ vm_address_t spanStartAddress = theSpan->start << kPageShift;
+ vm_size_t spanSizeInBytes = theSpan->length * kPageSize;
+
+ if (!theSpan->sizeclass) {
+ // If it's an allocated large object span, mark it as in use
+ if (!m_freeObjectFinder.isFreeObject(spanStartAddress))
+ allocatedPointers.append((vm_range_t){spanStartAddress, spanSizeInBytes});
+ } else {
+ const size_t objectSize = ByteSizeForClass(theSpan->sizeclass);
+
+ // Mark each allocated small object within the span as in use
+ const vm_address_t endOfSpan = spanStartAddress + spanSizeInBytes;
+ for (vm_address_t object = spanStartAddress; object + objectSize <= endOfSpan; object += objectSize) {
+ if (!m_freeObjectFinder.isFreeObject(object))
+ allocatedPointers.append((vm_range_t){object, objectSize});
+ }
+ }
+ }
+
+ (*m_recorder)(m_task, m_context, MALLOC_PTR_IN_USE_RANGE_TYPE, allocatedPointers.data(), allocatedPointers.size());
+
+ m_coalescedSpans.clear();
+ }
+
+ int visit(void* ptr)
{
if (!ptr)
return 1;
Span* span = m_reader(reinterpret_cast<Span*>(ptr));
+ if (!span->start)
+ return 1;
+
if (m_seenPointers.contains(ptr))
return span->length;
m_seenPointers.add(ptr);
- // Mark the memory used for the Span itself as an administrative region
- vm_range_t ptrRange = { reinterpret_cast<vm_address_t>(ptr), sizeof(Span) };
- if (m_typeMask & (MALLOC_PTR_REGION_RANGE_TYPE | MALLOC_ADMIN_REGION_RANGE_TYPE))
- (*m_recorder)(m_task, m_context, MALLOC_ADMIN_REGION_RANGE_TYPE, &ptrRange, 1);
+ if (!m_coalescedSpans.size()) {
+ m_coalescedSpans.append(span);
+ return span->length;
+ }
- ptrRange.address = span->start << kPageShift;
- ptrRange.size = span->length * kPageSize;
+ Span* previousSpan = m_coalescedSpans[m_coalescedSpans.size() - 1];
+ vm_address_t previousSpanStartAddress = previousSpan->start << kPageShift;
+ vm_size_t previousSpanSizeInBytes = previousSpan->length * kPageSize;
- // Mark the memory region the span represents as candidates for containing pointers
- if (m_typeMask & (MALLOC_PTR_REGION_RANGE_TYPE | MALLOC_ADMIN_REGION_RANGE_TYPE))
- (*m_recorder)(m_task, m_context, MALLOC_PTR_REGION_RANGE_TYPE, &ptrRange, 1);
+ // If the new span is adjacent to the previous span, do nothing for now.
+ vm_address_t spanStartAddress = span->start << kPageShift;
+ if (spanStartAddress == previousSpanStartAddress + previousSpanSizeInBytes) {
+ m_coalescedSpans.append(span);
+ return span->length;
+ }
- if (!span->free && (m_typeMask & MALLOC_PTR_IN_USE_RANGE_TYPE)) {
- // If it's an allocated large object span, mark it as in use
- if (span->sizeclass == 0 && !m_freeObjectFinder.isFreeObject(reinterpret_cast<void*>(ptrRange.address)))
- (*m_recorder)(m_task, m_context, MALLOC_PTR_IN_USE_RANGE_TYPE, &ptrRange, 1);
- else if (span->sizeclass) {
- const size_t byteSize = ByteSizeForClass(span->sizeclass);
- unsigned totalObjects = (span->length << kPageShift) / byteSize;
- ASSERT(span->refcount <= totalObjects);
- char* ptr = reinterpret_cast<char*>(span->start << kPageShift);
+ // New span is not adjacent to previous span, so record the spans coalesced so far.
+ recordPendingRegions();
+ m_coalescedSpans.append(span);
- // Mark each allocated small object within the span as in use
- for (unsigned i = 0; i < totalObjects; i++) {
- char* thisObject = ptr + (i * byteSize);
- if (m_freeObjectFinder.isFreeObject(thisObject))
- continue;
+ return span->length;
+ }
+};
- vm_range_t objectRange = { reinterpret_cast<vm_address_t>(thisObject), byteSize };
- (*m_recorder)(m_task, m_context, MALLOC_PTR_IN_USE_RANGE_TYPE, &objectRange, 1);
- }
- }
+class AdminRegionRecorder {
+ task_t m_task;
+ void* m_context;
+ unsigned m_typeMask;
+ vm_range_recorder_t* m_recorder;
+ const RemoteMemoryReader& m_reader;
+
+ Vector<vm_range_t, 1024> m_pendingRegions;
+
+public:
+ AdminRegionRecorder(task_t task, void* context, unsigned typeMask, vm_range_recorder_t* recorder, const RemoteMemoryReader& reader)
+ : m_task(task)
+ , m_context(context)
+ , m_typeMask(typeMask)
+ , m_recorder(recorder)
+ , m_reader(reader)
+ { }
+
+ void recordRegion(vm_address_t ptr, size_t size)
+ {
+ if (m_typeMask & MALLOC_ADMIN_REGION_RANGE_TYPE)
+ m_pendingRegions.append((vm_range_t){ ptr, size });
+ }
+
+ void visit(void *ptr, size_t size)
+ {
+ recordRegion(reinterpret_cast<vm_address_t>(ptr), size);
+ }
+
+ void recordPendingRegions()
+ {
+ if (m_pendingRegions.size()) {
+ (*m_recorder)(m_task, m_context, MALLOC_ADMIN_REGION_RANGE_TYPE, m_pendingRegions.data(), m_pendingRegions.size());
+ m_pendingRegions.clear();
}
+ }
- return span->length;
+ ~AdminRegionRecorder()
+ {
+ ASSERT(!m_pendingRegions.size());
}
};
@@ -3759,10 +3875,22 @@ kern_return_t FastMallocZone::enumerate(task_t task, void* context, unsigned typ
TCMalloc_PageHeap::PageMap* pageMap = &pageHeap->pagemap_;
PageMapFreeObjectFinder pageMapFinder(memoryReader, finder);
- pageMap->visit(pageMapFinder, memoryReader);
+ pageMap->visitValues(pageMapFinder, memoryReader);
PageMapMemoryUsageRecorder usageRecorder(task, context, typeMask, recorder, memoryReader, finder);
- pageMap->visit(usageRecorder, memoryReader);
+ pageMap->visitValues(usageRecorder, memoryReader);
+ usageRecorder.recordPendingRegions();
+
+ AdminRegionRecorder adminRegionRecorder(task, context, typeMask, recorder, memoryReader);
+ pageMap->visitAllocations(adminRegionRecorder, memoryReader);
+
+ PageHeapAllocator<Span>* spanAllocator = memoryReader(mzone->m_spanAllocator);
+ PageHeapAllocator<TCMalloc_ThreadCache>* pageHeapAllocator = memoryReader(mzone->m_pageHeapAllocator);
+
+ spanAllocator->recordAdministrativeRegions(adminRegionRecorder, memoryReader);
+ pageHeapAllocator->recordAdministrativeRegions(adminRegionRecorder, memoryReader);
+
+ adminRegionRecorder.recordPendingRegions();
return 0;
}
@@ -3803,15 +3931,24 @@ void* FastMallocZone::zoneRealloc(malloc_zone_t*, void*, size_t)
extern "C" {
malloc_introspection_t jscore_fastmalloc_introspection = { &FastMallocZone::enumerate, &FastMallocZone::goodSize, &FastMallocZone::check, &FastMallocZone::print,
- &FastMallocZone::log, &FastMallocZone::forceLock, &FastMallocZone::forceUnlock, &FastMallocZone::statistics };
+ &FastMallocZone::log, &FastMallocZone::forceLock, &FastMallocZone::forceUnlock, &FastMallocZone::statistics
+
+#if !defined(BUILDING_ON_TIGER) && !defined(BUILDING_ON_LEOPARD)
+ , 0 // zone_locked will not be called on the zone unless it advertises itself as version five or higher.
+#endif
+
+ };
}
-FastMallocZone::FastMallocZone(TCMalloc_PageHeap* pageHeap, TCMalloc_ThreadCache** threadHeaps, TCMalloc_Central_FreeListPadded* centralCaches)
+FastMallocZone::FastMallocZone(TCMalloc_PageHeap* pageHeap, TCMalloc_ThreadCache** threadHeaps, TCMalloc_Central_FreeListPadded* centralCaches, PageHeapAllocator<Span>* spanAllocator, PageHeapAllocator<TCMalloc_ThreadCache>* pageHeapAllocator)
: m_pageHeap(pageHeap)
, m_threadHeaps(threadHeaps)
, m_centralCaches(centralCaches)
+ , m_spanAllocator(spanAllocator)
+ , m_pageHeapAllocator(pageHeapAllocator)
{
memset(&m_zone, 0, sizeof(m_zone));
+ m_zone.version = 4;
m_zone.zone_name = "JavaScriptCore FastMalloc";
m_zone.size = &FastMallocZone::size;
m_zone.malloc = &FastMallocZone::zoneMalloc;
@@ -3827,7 +3964,7 @@ FastMallocZone::FastMallocZone(TCMalloc_PageHeap* pageHeap, TCMalloc_ThreadCache
void FastMallocZone::init()
{
- static FastMallocZone zone(pageheap, &thread_heaps, static_cast<TCMalloc_Central_FreeListPadded*>(central_cache));
+ static FastMallocZone zone(pageheap, &thread_heaps, static_cast<TCMalloc_Central_FreeListPadded*>(central_cache), &span_allocator, &threadheap_allocator);
}
#endif