aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorMel Gorman <mel@csn.ul.ie>2007-10-16 01:25:52 -0700
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-10-16 09:43:00 -0700
commite12ba74d8ff3e2f73a583500d7095e406df4d093 (patch)
treea0d3385b65f0b3e1e00b0bbf11b75e7538a93edb /mm
parentc361be55b3128474aa66d31092db330b07539103 (diff)
downloadkernel_samsung_crespo-e12ba74d8ff3e2f73a583500d7095e406df4d093.zip
kernel_samsung_crespo-e12ba74d8ff3e2f73a583500d7095e406df4d093.tar.gz
kernel_samsung_crespo-e12ba74d8ff3e2f73a583500d7095e406df4d093.tar.bz2
Group short-lived and reclaimable kernel allocations
This patch marks a number of allocations that are either short-lived such as network buffers or are reclaimable such as inode allocations. When something like updatedb is called, long-lived and unmovable kernel allocations tend to be spread throughout the address space which increases fragmentation. This patch groups these allocations together as much as possible by adding a new MIGRATE_TYPE. The MIGRATE_RECLAIMABLE type is for allocations that can be reclaimed on demand, but not moved. i.e. they can be migrated by deleting them and re-reading the information from elsewhere. Signed-off-by: Mel Gorman <mel@csn.ul.ie> Cc: Andy Whitcroft <apw@shadowen.org> Cc: Christoph Lameter <clameter@sgi.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/page_alloc.c10
-rw-r--r--mm/shmem.c4
-rw-r--r--mm/slab.c2
-rw-r--r--mm/slub.c3
4 files changed, 14 insertions, 5 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index d575a3e..29f4de1 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -172,7 +172,10 @@ static void set_pageblock_migratetype(struct page *page, int migratetype)
static inline int gfpflags_to_migratetype(gfp_t gfp_flags)
{
- return ((gfp_flags & __GFP_MOVABLE) != 0);
+ WARN_ON((gfp_flags & GFP_MOVABLE_MASK) == GFP_MOVABLE_MASK);
+
+ return (((gfp_flags & __GFP_MOVABLE) != 0) << 1) |
+ ((gfp_flags & __GFP_RECLAIMABLE) != 0);
}
#else
@@ -676,8 +679,9 @@ static int prep_new_page(struct page *page, int order, gfp_t gfp_flags)
* the free lists for the desirable migrate type are depleted
*/
static int fallbacks[MIGRATE_TYPES][MIGRATE_TYPES-1] = {
- [MIGRATE_UNMOVABLE] = { MIGRATE_MOVABLE },
- [MIGRATE_MOVABLE] = { MIGRATE_UNMOVABLE },
+ [MIGRATE_UNMOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_MOVABLE },
+ [MIGRATE_RECLAIMABLE] = { MIGRATE_UNMOVABLE, MIGRATE_MOVABLE },
+ [MIGRATE_MOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE },
};
/*
diff --git a/mm/shmem.c b/mm/shmem.c
index 855b93b..76ecbac 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -95,9 +95,9 @@ static inline struct page *shmem_dir_alloc(gfp_t gfp_mask)
* BLOCKS_PER_PAGE on indirect pages, assume PAGE_CACHE_SIZE:
* might be reconsidered if it ever diverges from PAGE_SIZE.
*
- * __GFP_MOVABLE is masked out as swap vectors cannot move
+ * Mobility flags are masked out as swap vectors cannot move
*/
- return alloc_pages((gfp_mask & ~__GFP_MOVABLE) | __GFP_ZERO,
+ return alloc_pages((gfp_mask & ~GFP_MOVABLE_MASK) | __GFP_ZERO,
PAGE_CACHE_SHIFT-PAGE_SHIFT);
}
diff --git a/mm/slab.c b/mm/slab.c
index 8fb56ae..e34bcb8 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -1643,6 +1643,8 @@ static void *kmem_getpages(struct kmem_cache *cachep, gfp_t flags, int nodeid)
#endif
flags |= cachep->gfpflags;
+ if (cachep->flags & SLAB_RECLAIM_ACCOUNT)
+ flags |= __GFP_RECLAIMABLE;
page = alloc_pages_node(nodeid, flags, cachep->gfporder);
if (!page)
diff --git a/mm/slub.c b/mm/slub.c
index 19d3202..a90c4ff 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1055,6 +1055,9 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
if (s->flags & SLAB_CACHE_DMA)
flags |= SLUB_DMA;
+ if (s->flags & SLAB_RECLAIM_ACCOUNT)
+ flags |= __GFP_RECLAIMABLE;
+
if (node == -1)
page = alloc_pages(flags, s->order);
else