aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorAndi Kleen <ak@suse.de>2008-07-23 21:27:45 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2008-07-24 10:47:17 -0700
commit5ced66c901f1cf0b684feb15c2cd8b126e263d07 (patch)
treed5faa514fa226a2fcba97aef1673c404369c1ad2 /mm
parenta3437870160cf2caaac6bdd76c7377a5a4145a8c (diff)
downloadkernel_samsung_smdk4412-5ced66c901f1cf0b684feb15c2cd8b126e263d07.zip
kernel_samsung_smdk4412-5ced66c901f1cf0b684feb15c2cd8b126e263d07.tar.gz
kernel_samsung_smdk4412-5ced66c901f1cf0b684feb15c2cd8b126e263d07.tar.bz2
hugetlb: abstract numa round robin selection
Need this as a separate function for a future patch. No behaviour change. Acked-by: Adam Litke <agl@us.ibm.com> Acked-by: Nishanth Aravamudan <nacc@us.ibm.com> Signed-off-by: Andi Kleen <ak@suse.de> Signed-off-by: Nick Piggin <npiggin@suse.de> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/hugetlb.c37
1 files changed, 22 insertions, 15 deletions
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index bb49ce5..5e620e2 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -565,6 +565,27 @@ static struct page *alloc_fresh_huge_page_node(struct hstate *h, int nid)
return page;
}
+/*
+ * Use a helper variable to find the next node and then
+ * copy it back to hugetlb_next_nid afterwards:
+ * otherwise there's a window in which a racer might
+ * pass invalid nid MAX_NUMNODES to alloc_pages_node.
+ * But we don't need to use a spin_lock here: it really
+ * doesn't matter if occasionally a racer chooses the
+ * same nid as we do. Move nid forward in the mask even
+ * if we just successfully allocated a hugepage so that
+ * the next caller gets hugepages on the next node.
+ */
+static int hstate_next_node(struct hstate *h)
+{
+ int next_nid;
+ next_nid = next_node(h->hugetlb_next_nid, node_online_map);
+ if (next_nid == MAX_NUMNODES)
+ next_nid = first_node(node_online_map);
+ h->hugetlb_next_nid = next_nid;
+ return next_nid;
+}
+
static int alloc_fresh_huge_page(struct hstate *h)
{
struct page *page;
@@ -578,21 +599,7 @@ static int alloc_fresh_huge_page(struct hstate *h)
page = alloc_fresh_huge_page_node(h, h->hugetlb_next_nid);
if (page)
ret = 1;
- /*
- * Use a helper variable to find the next node and then
- * copy it back to hugetlb_next_nid afterwards:
- * otherwise there's a window in which a racer might
- * pass invalid nid MAX_NUMNODES to alloc_pages_node.
- * But we don't need to use a spin_lock here: it really
- * doesn't matter if occasionally a racer chooses the
- * same nid as we do. Move nid forward in the mask even
- * if we just successfully allocated a hugepage so that
- * the next caller gets hugepages on the next node.
- */
- next_nid = next_node(h->hugetlb_next_nid, node_online_map);
- if (next_nid == MAX_NUMNODES)
- next_nid = first_node(node_online_map);
- h->hugetlb_next_nid = next_nid;
+ next_nid = hstate_next_node(h);
} while (!page && h->hugetlb_next_nid != start_nid);
if (ret)