diff options
author | Marek Szyprowski <m.szyprowski@samsung.com> | 2012-01-25 12:09:52 +0100 |
---|---|---|
committer | Ziyan <jaraidaniel@gmail.com> | 2016-04-03 14:56:06 +0200 |
commit | 64fcbe4a21994ed867dbff7b18dd6172a23dc601 (patch) | |
tree | 6c1ce0aff3fcec90e2fff19973bba83f66a75042 /mm | |
parent | b81bc2d2bbf06b1794be8720563dcd29c2b7ff3f (diff) | |
download | kernel_samsung_tuna-64fcbe4a21994ed867dbff7b18dd6172a23dc601.zip kernel_samsung_tuna-64fcbe4a21994ed867dbff7b18dd6172a23dc601.tar.gz kernel_samsung_tuna-64fcbe4a21994ed867dbff7b18dd6172a23dc601.tar.bz2 |
mm: extract reclaim code from __alloc_pages_direct_reclaim()
This patch extracts common reclaim code from __alloc_pages_direct_reclaim()
function to separate function: __perform_reclaim() which can be later used
by alloc_contig_range().
Signed-off-by: Marek Szyprowski <m.szyprowski@samsung.com>
Signed-off-by: Kyungmin Park <kyungmin.park@samsung.com>
Cc: Michal Nazarewicz <mina86@mina86.com>
Acked-by: Mel Gorman <mel@csn.ul.ie>
Tested-by: Rob Clark <rob.clark@linaro.org>
Tested-by: Ohad Ben-Cohen <ohad@wizery.com>
Tested-by: Benjamin Gaignard <benjamin.gaignard@linaro.org>
Tested-by: Robert Nelson <robertcnelson@gmail.com>
Tested-by: Barry Song <Baohua.Song@csr.com>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/page_alloc.c | 30 |
1 files changed, 21 insertions, 9 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 87961bf..3667c36 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -2061,16 +2061,13 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order, } #endif /* CONFIG_COMPACTION */ -/* The really slow allocator path where we enter direct reclaim */ -static inline struct page * -__alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order, - struct zonelist *zonelist, enum zone_type high_zoneidx, - nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone, - int migratetype, unsigned long *did_some_progress) +/* Perform direct synchronous page reclaim */ +static int +__perform_reclaim(gfp_t gfp_mask, unsigned int order, struct zonelist *zonelist, + nodemask_t *nodemask) { - struct page *page = NULL; struct reclaim_state reclaim_state; - bool drained = false; + int progress; cond_resched(); @@ -2081,7 +2078,7 @@ __alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order, reclaim_state.reclaimed_slab = 0; current->reclaim_state = &reclaim_state; - *did_some_progress = try_to_free_pages(zonelist, order, gfp_mask, nodemask); + progress = try_to_free_pages(zonelist, order, gfp_mask, nodemask); current->reclaim_state = NULL; lockdep_clear_current_reclaim_state(); @@ -2089,6 +2086,21 @@ __alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order, cond_resched(); + return progress; +} + +/* The really slow allocator path where we enter direct reclaim */ +static inline struct page * +__alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order, + struct zonelist *zonelist, enum zone_type high_zoneidx, + nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone, + int migratetype, unsigned long *did_some_progress) +{ + struct page *page = NULL; + bool drained = false; + + *did_some_progress = __perform_reclaim(gfp_mask, order, zonelist, + nodemask); if (unlikely(!(*did_some_progress))) return NULL; |