aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/linux/page-flags.h11
-rw-r--r--mm/Kconfig10
-rw-r--r--mm/ksm.c33
3 files changed, 52 insertions, 2 deletions
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
index 40edce1..b38dc8d 100644
--- a/include/linux/page-flags.h
+++ b/include/linux/page-flags.h
@@ -114,8 +114,15 @@ enum pageflags {
PG_was_active,
#endif
PG_readahead, /* page in a readahead window */
+#ifdef CONFIG_KSM_CHECK_PAGE
+ PG_ksm_scan0, /* page has been scanned by even KSM cycle */
+#endif
__NR_PAGEFLAGS,
+#ifdef CONFIG_KSM_CHECK_PAGE
+ /* page has been scanned by odd KSM cycle */
+ PG_ksm_scan1 = PG_owner_priv_1,
+#endif
/* Filesystems */
PG_checked = PG_owner_priv_1,
@@ -218,6 +225,10 @@ PAGEFLAG(Reserved, reserved) __CLEARPAGEFLAG(Reserved, reserved)
PAGEFLAG(SwapBacked, swapbacked) __CLEARPAGEFLAG(SwapBacked, swapbacked)
__PAGEFLAG(SlobFree, slob_free)
+#ifdef CONFIG_KSM_CHECK_PAGE
+CLEARPAGEFLAG(KsmScan0, ksm_scan0) TESTSETFLAG(KsmScan0, ksm_scan0)
+CLEARPAGEFLAG(KsmScan1, ksm_scan1) TESTSETFLAG(KsmScan1, ksm_scan1)
+#endif
__PAGEFLAG(SlubFrozen, slub_frozen)
diff --git a/mm/Kconfig b/mm/Kconfig
index 264a442..3bf1bfe 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -237,6 +237,16 @@ config KSM
until a program has madvised that an area is MADV_MERGEABLE, and
root has set /sys/kernel/mm/ksm/run to 1 (if CONFIG_SYSFS is set).
+config KSM_CHECK_PAGE
+ bool "Check page before scanning"
+ depends on KSM
+ default n
+ help
+ If enabled, this will check and skip if page is already scanned in
+ same KSM scan cycle.
+ This is useful in situation where you have parent and
+ child process marking same area for KSM scanning.
+
config DEFAULT_MMAP_MIN_ADDR
int "Low address space to protect from user allocation"
depends on MMU
diff --git a/mm/ksm.c b/mm/ksm.c
index 9a68b0c..b126a7f 100644
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -554,7 +554,9 @@ static void remove_rmap_item_from_tree(struct rmap_item *rmap_item)
* than left over from before.
*/
age = (unsigned char)(ksm_scan.seqnr - rmap_item->address);
+#ifndef CONFIG_KSM_CHECK_PAGE
BUG_ON(age > 1);
+#endif
if (!age)
rb_erase(&rmap_item->node, &root_unstable_tree);
@@ -1403,6 +1405,31 @@ next_mm:
return NULL;
}
+static inline int is_page_scanned(struct page *page)
+{
+#ifdef CONFIG_KSM_CHECK_PAGE
+ /* page is already marked as ksm, so this will be simple merge */
+ if (PageKsm(page))
+ return 0;
+
+ if (ksm_scan.seqnr & 0x1) {
+ /* odd cycle */
+ /* clear even cycle bit */
+ ClearPageKsmScan0(page);
+ /* get old value and mark it scanned */
+ return TestSetPageKsmScan1(page);
+ } else {
+ /* even cycle */
+ /* clear odd cycle bit */
+ ClearPageKsmScan1(page);
+ /* get old value and mark it scanned */
+ return TestSetPageKsmScan0(page);
+ }
+#else
+ return 0;
+#endif
+}
+
/**
* ksm_do_scan - the ksm scanner main worker function.
* @scan_npages - number of pages we want to scan before we return.
@@ -1417,8 +1444,10 @@ static void ksm_do_scan(unsigned int scan_npages)
rmap_item = scan_get_next_rmap_item(&page);
if (!rmap_item)
return;
- if (!PageKsm(page) || !in_stable_tree(rmap_item))
- cmp_and_merge_page(page, rmap_item);
+ if (!PageKsm(page) || !in_stable_tree(rmap_item)) {
+ if (!is_page_scanned(page))
+ cmp_and_merge_page(page, rmap_item);
+ }
put_page(page);
}
}