diff options
author | Andrea Arcangeli <aarcange@redhat.com> | 2011-01-13 15:47:04 -0800 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2011-01-13 17:32:44 -0800 |
commit | f000565adb770b14cebbafde0a4f3e61a3342a63 (patch) | |
tree | ff2884da257e1df1b8cb4817d09c892d4f84727c /mm | |
parent | cd7548ab360c462118568eebb8c6da3bc303b02e (diff) | |
download | kernel_samsung_crespo-f000565adb770b14cebbafde0a4f3e61a3342a63.zip kernel_samsung_crespo-f000565adb770b14cebbafde0a4f3e61a3342a63.tar.gz kernel_samsung_crespo-f000565adb770b14cebbafde0a4f3e61a3342a63.tar.bz2 |
thp: set recommended min free kbytes
If transparent hugepage is enabled initialize min_free_kbytes to an
optimal value by default. This moves the hugeadm algorithm in kernel.
Signed-off-by: Andrea Arcangeli <aarcange@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/huge_memory.c | 52 |
1 files changed, 52 insertions, 0 deletions
diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 7b55fe0..4ed97a2 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -85,6 +85,47 @@ struct khugepaged_scan { .mm_head = LIST_HEAD_INIT(khugepaged_scan.mm_head), }; + +static int set_recommended_min_free_kbytes(void) +{ + struct zone *zone; + int nr_zones = 0; + unsigned long recommended_min; + extern int min_free_kbytes; + + if (!test_bit(TRANSPARENT_HUGEPAGE_FLAG, + &transparent_hugepage_flags) && + !test_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, + &transparent_hugepage_flags)) + return 0; + + for_each_populated_zone(zone) + nr_zones++; + + /* Make sure at least 2 hugepages are free for MIGRATE_RESERVE */ + recommended_min = pageblock_nr_pages * nr_zones * 2; + + /* + * Make sure that on average at least two pageblocks are almost free + * of another type, one for a migratetype to fall back to and a + * second to avoid subsequent fallbacks of other types There are 3 + * MIGRATE_TYPES we care about. + */ + recommended_min += pageblock_nr_pages * nr_zones * + MIGRATE_PCPTYPES * MIGRATE_PCPTYPES; + + /* don't ever allow to reserve more than 5% of the lowmem */ + recommended_min = min(recommended_min, + (unsigned long) nr_free_buffer_pages() / 20); + recommended_min <<= (PAGE_SHIFT-10); + + if (recommended_min > min_free_kbytes) + min_free_kbytes = recommended_min; + setup_per_zone_wmarks(); + return 0; +} +late_initcall(set_recommended_min_free_kbytes); + static int start_khugepaged(void) { int err = 0; @@ -108,6 +149,8 @@ static int start_khugepaged(void) mutex_unlock(&khugepaged_mutex); if (wakeup) wake_up_interruptible(&khugepaged_wait); + + set_recommended_min_free_kbytes(); } else /* wakeup to exit */ wake_up_interruptible(&khugepaged_wait); @@ -177,6 +220,13 @@ static ssize_t enabled_store(struct kobject *kobj, ret = err; } + if (ret > 0 && + (test_bit(TRANSPARENT_HUGEPAGE_FLAG, + &transparent_hugepage_flags) || + test_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG, + &transparent_hugepage_flags))) + set_recommended_min_free_kbytes(); + return ret; } static struct kobj_attribute enabled_attr = @@ -464,6 +514,8 @@ static int __init hugepage_init(void) start_khugepaged(); + set_recommended_min_free_kbytes(); + out: return err; } |