aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2012-10-25 14:16:43 +0200
committerMel Gorman <mgorman@suse.de>2012-12-11 14:42:45 +0000
commitcbee9f88ec1b8dd6b58f25f54e4f52c82ed77690 (patch)
treed4cfbcfa3e89742216cd792d4aa914356406b532 /mm
parenta720094ded8cbb303111035be91858011d2eac71 (diff)
downloadkernel_goldelico_gta04-cbee9f88ec1b8dd6b58f25f54e4f52c82ed77690.zip
kernel_goldelico_gta04-cbee9f88ec1b8dd6b58f25f54e4f52c82ed77690.tar.gz
kernel_goldelico_gta04-cbee9f88ec1b8dd6b58f25f54e4f52c82ed77690.tar.bz2
mm: numa: Add fault driven placement and migration
NOTE: This patch is based on "sched, numa, mm: Add fault driven placement and migration policy" but as it throws away all the policy to just leave a basic foundation I had to drop the signed-offs-by. This patch creates a bare-bones method for setting PTEs pte_numa in the context of the scheduler that when faulted later will be faulted onto the node the CPU is running on. In itself this does nothing useful but any placement policy will fundamentally depend on receiving hints on placement from fault context and doing something intelligent about it. Signed-off-by: Mel Gorman <mgorman@suse.de> Acked-by: Rik van Riel <riel@redhat.com>
Diffstat (limited to 'mm')
-rw-r--r--mm/huge_memory.c5
-rw-r--r--mm/memory.c14
2 files changed, 17 insertions, 2 deletions
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index d79f7a5..ee81337 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -1046,6 +1046,7 @@ int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
*/
split_huge_page(page);
put_page(page);
+
return 0;
clear_pmdnuma:
@@ -1060,8 +1061,10 @@ clear_pmdnuma:
out_unlock:
spin_unlock(&mm->page_table_lock);
- if (page)
+ if (page) {
put_page(page);
+ task_numa_fault(numa_node_id(), HPAGE_PMD_NR);
+ }
return 0;
}
diff --git a/mm/memory.c b/mm/memory.c
index d525426..8012c19 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -3454,7 +3454,8 @@ int do_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
{
struct page *page = NULL;
spinlock_t *ptl;
- int current_nid, target_nid;
+ int current_nid = -1;
+ int target_nid;
/*
* The "pte" at this point cannot be used safely without
@@ -3501,6 +3502,7 @@ int do_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
current_nid = target_nid;
out:
+ task_numa_fault(current_nid, 1);
return 0;
}
@@ -3537,6 +3539,7 @@ static int do_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
for (addr = _addr + offset; addr < _addr + PMD_SIZE; pte++, addr += PAGE_SIZE) {
pte_t pteval = *pte;
struct page *page;
+ int curr_nid;
if (!pte_present(pteval))
continue;
if (!pte_numa(pteval))
@@ -3554,6 +3557,15 @@ static int do_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
page = vm_normal_page(vma, addr, pteval);
if (unlikely(!page))
continue;
+ /* only check non-shared pages */
+ if (unlikely(page_mapcount(page) != 1))
+ continue;
+ pte_unmap_unlock(pte, ptl);
+
+ curr_nid = page_to_nid(page);
+ task_numa_fault(curr_nid, 1);
+
+ pte = pte_offset_map_lock(mm, pmdp, addr, &ptl);
}
pte_unmap_unlock(orig_pte, ptl);