aboutsummaryrefslogtreecommitdiffstats
path: root/mm/vmscan.c
diff options
context:
space:
mode:
authorChristoph Lameter <clameter@engr.sgi.com>2006-02-01 03:05:32 -0800
committerLinus Torvalds <torvalds@g5.osdl.org>2006-02-01 08:53:16 -0800
commita92f71263af9d0ab77c260f709c0c079656221aa (patch)
tree93aa7bf968ba108cc893b0dcc4de36fbf3b733bf /mm/vmscan.c
parentaa3f18b3391ac305baa01faead3fdf9147daf54b (diff)
downloadkernel_samsung_smdk4412-a92f71263af9d0ab77c260f709c0c079656221aa.zip
kernel_samsung_smdk4412-a92f71263af9d0ab77c260f709c0c079656221aa.tar.gz
kernel_samsung_smdk4412-a92f71263af9d0ab77c260f709c0c079656221aa.tar.bz2
[PATCH] zone_reclaim: partial scans instead of full scan
Instead of scanning all the pages in a zone, imitate real swap and scan only a portion of the pages and gradually scan more if we do not free up enough pages. This avoids a zone suddenly loosing all unused pagecache pages (we may after all access some of these again so they deserve another chance) but it still frees up large chunks of memory if a zone only contains unused pagecache pages. Signed-off-by: Christoph Lameter <clameter@sgi.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'mm/vmscan.c')
-rw-r--r--mm/vmscan.c20
1 files changed, 18 insertions, 2 deletions
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 8277f93..f8b94ea 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1596,6 +1596,14 @@ int zone_reclaim_mode __read_mostly;
* Mininum time between zone reclaim scans
*/
#define ZONE_RECLAIM_INTERVAL 30*HZ
+
+/*
+ * Priority for ZONE_RECLAIM. This determines the fraction of pages
+ * of a node considered for each zone_reclaim. 4 scans 1/16th of
+ * a zone.
+ */
+#define ZONE_RECLAIM_PRIORITY 4
+
/*
* Try to free up some pages from this zone through reclaim.
*/
@@ -1626,7 +1634,7 @@ int zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
sc.may_swap = 0;
sc.nr_scanned = 0;
sc.nr_reclaimed = 0;
- sc.priority = 0;
+ sc.priority = ZONE_RECLAIM_PRIORITY + 1;
sc.nr_mapped = read_page_state(nr_mapped);
sc.gfp_mask = gfp_mask;
@@ -1643,7 +1651,15 @@ int zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
reclaim_state.reclaimed_slab = 0;
p->reclaim_state = &reclaim_state;
- shrink_zone(zone, &sc);
+ /*
+ * Free memory by calling shrink zone with increasing priorities
+ * until we have enough memory freed.
+ */
+ do {
+ sc.priority--;
+ shrink_zone(zone, &sc);
+
+ } while (sc.nr_reclaimed < nr_pages && sc.priority > 0);
p->reclaim_state = NULL;
current->flags &= ~PF_MEMALLOC;