aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/arm/Kconfig1
-rw-r--r--arch/arm/include/asm/dma-contiguous.h15
-rw-r--r--drivers/base/dma-contiguous.c2
-rw-r--r--include/linux/ksm.h3
-rw-r--r--mm/ksm.c23
-rw-r--r--mm/migrate.c2
-rw-r--r--mm/page_alloc.c18
7 files changed, 58 insertions, 6 deletions
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 45e2bee..6f148d5 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -4,6 +4,7 @@ config ARM
select HAVE_AOUT
select HAVE_DMA_API_DEBUG
select HAVE_IDE
+ select HAVE_DMA_CONTIGUOUS if (CPU_V6 || CPU_V6K || CPU_V7)
select HAVE_MEMBLOCK
select RTC_LIB
select SYS_SUPPORTS_APM_EMULATION
diff --git a/arch/arm/include/asm/dma-contiguous.h b/arch/arm/include/asm/dma-contiguous.h
new file mode 100644
index 0000000..3ed37b4
--- /dev/null
+++ b/arch/arm/include/asm/dma-contiguous.h
@@ -0,0 +1,15 @@
+#ifndef ASMARM_DMA_CONTIGUOUS_H
+#define ASMARM_DMA_CONTIGUOUS_H
+
+#ifdef __KERNEL__
+#ifdef CONFIG_CMA
+
+#include <linux/types.h>
+#include <asm-generic/dma-contiguous.h>
+
+void dma_contiguous_early_fixup(phys_addr_t base, unsigned long size);
+
+#endif
+#endif
+
+#endif
diff --git a/drivers/base/dma-contiguous.c b/drivers/base/dma-contiguous.c
index e6fd294..f921338 100644
--- a/drivers/base/dma-contiguous.c
+++ b/drivers/base/dma-contiguous.c
@@ -218,6 +218,7 @@ static int __init cma_init_reserved_areas(void)
}
core_initcall(cma_init_reserved_areas);
+void __init dma_contiguous_early_fixup(phys_addr_t base, unsigned long size) { }
/**
* dma_declare_contiguous() - reserve area for contiguous memory handling
* for particular device
@@ -343,6 +344,7 @@ struct page *dma_alloc_from_contiguous(struct device *dev, int count,
pfn = cma->base_pfn + pageno;
ret = alloc_contig_range(pfn, pfn + count, MIGRATE_CMA);
+ pr_debug("base_pfn = 0x%lx, pageno = 0x%lx\n", cma->base_pfn, pageno);
if (ret == 0) {
bitmap_set(cma->bitmap, pageno, count);
break;
diff --git a/include/linux/ksm.h b/include/linux/ksm.h
index 3319a69..3af13d3 100644
--- a/include/linux/ksm.h
+++ b/include/linux/ksm.h
@@ -89,6 +89,9 @@ int try_to_unmap_ksm(struct page *page, enum ttu_flags flags);
int rmap_walk_ksm(struct page *page, int (*rmap_one)(struct page *,
struct vm_area_struct *, unsigned long, void *), void *arg);
void ksm_migrate_page(struct page *newpage, struct page *oldpage);
+void ksm_start_migration(void);
+void ksm_finalize_migration(unsigned long start_pfn, unsigned long nr_pages);
+void ksm_abort_migration(void);
#else /* !CONFIG_KSM */
diff --git a/mm/ksm.c b/mm/ksm.c
index b126a7f..d623e5b 100644
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -1786,9 +1786,7 @@ void ksm_migrate_page(struct page *newpage, struct page *oldpage)
stable_node->kpfn = page_to_pfn(newpage);
}
}
-#endif /* CONFIG_MIGRATION */
-#ifdef CONFIG_MEMORY_HOTREMOVE
static struct stable_node *ksm_check_stable_tree(unsigned long start_pfn,
unsigned long end_pfn)
{
@@ -1805,6 +1803,27 @@ static struct stable_node *ksm_check_stable_tree(unsigned long start_pfn,
return NULL;
}
+void ksm_start_migration(void)
+{
+ mutex_lock(&ksm_thread_mutex);
+}
+
+void ksm_finalize_migration(unsigned long start_pfn, unsigned long nr_pages)
+{
+ struct stable_node *stable_node;
+ while ((stable_node = ksm_check_stable_tree(start_pfn,
+ start_pfn + nr_pages)) != NULL)
+ remove_node_from_stable_tree(stable_node);
+ mutex_unlock(&ksm_thread_mutex);
+}
+
+void ksm_abort_migration(void)
+{
+ mutex_unlock(&ksm_thread_mutex);
+}
+#endif /* CONFIG_MIGRATION */
+
+#ifdef CONFIG_MEMORY_HOTREMOVE
static int ksm_memory_callback(struct notifier_block *self,
unsigned long action, void *arg)
{
diff --git a/mm/migrate.c b/mm/migrate.c
index 3e315a7..76abb9a 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -770,7 +770,7 @@ static int __unmap_and_move(struct page *page, struct page *newpage,
* File Caches may use write_page() or lock_page() in migration, then,
* just care Anon page here.
*/
- if (PageAnon(page)) {
+ if (PageAnon(page) && !PageKsm(page)) {
/*
* Only page_lock_anon_vma() understands the subtleties of
* getting a hold on an anon_vma from outside one of its mms.
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index fad3396..5a90077 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -58,6 +58,7 @@
#include <linux/memcontrol.h>
#include <linux/prefetch.h>
#include <linux/migrate.h>
+#include <linux/ksm.h>
#include <asm/tlbflush.h>
#include <asm/div64.h>
@@ -5298,7 +5299,6 @@ static void __setup_per_zone_wmarks(void)
zone->watermark[WMARK_MIN] = min;
}
-
zone->watermark[WMARK_LOW] = min_wmark_pages(zone) +
low + (min >> 2);
zone->watermark[WMARK_HIGH] = min_wmark_pages(zone) +
@@ -5898,7 +5898,7 @@ static int __alloc_contig_migrate_range(unsigned long start, unsigned long end)
ret = migrate_pages(&cc.migratepages,
__alloc_contig_migrate_alloc,
- 0, false, MIGRATE_SYNC);
+ 0, true, MIGRATE_SYNC);
}
putback_lru_pages(&cc.migratepages);
@@ -5943,7 +5943,7 @@ static int __reclaim_pages(struct zone *zone, gfp_t gfp_mask, int count)
NULL);
if (!did_some_progress) {
/* Exhausted what can be done so it's blamo time */
- out_of_memory(zonelist, gfp_mask, order, NULL, false);
+ out_of_memory(zonelist, gfp_mask, order, NULL);
}
}
@@ -5978,6 +5978,7 @@ int alloc_contig_range(unsigned long start, unsigned long end,
{
struct zone *zone = page_zone(pfn_to_page(start));
unsigned long outer_start, outer_end;
+ bool ksm_migration_started = false;
int ret = 0, order;
/*
@@ -6009,6 +6010,11 @@ int alloc_contig_range(unsigned long start, unsigned long end,
if (ret)
goto done;
+ // Need to take KSM lock, so that we can specify offlining = true
+ // and move KSM pages.
+ ksm_start_migration();
+ ksm_migration_started = true;
+
ret = __alloc_contig_migrate_range(start, end);
if (ret)
goto done;
@@ -6070,9 +6076,15 @@ int alloc_contig_range(unsigned long start, unsigned long end,
if (end != outer_end)
free_contig_range(end, outer_end - end);
+ // Finalize KSM migration.
+ ksm_finalize_migration(start, end - start);
+
done:
undo_isolate_page_range(pfn_max_align_down(start),
pfn_max_align_up(end), migratetype);
+ if (ksm_migration_started) {
+ ksm_abort_migration();
+ }
return ret;
}