diff options
author | Yasunori Goto <y-goto@jp.fujitsu.com> | 2007-10-16 01:26:14 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-10-16 09:43:02 -0700 |
commit | 98f3cfc1dc7a53b629d43b7844a9b3f786213048 (patch) | |
tree | de98ff0e3c25b01863c31abc0f3054de978b9ef8 /mm | |
parent | 48e94196a533dbee17c252bf80d0310fb8c8c2eb (diff) | |
download | kernel_samsung_tuna-98f3cfc1dc7a53b629d43b7844a9b3f786213048.zip kernel_samsung_tuna-98f3cfc1dc7a53b629d43b7844a9b3f786213048.tar.gz kernel_samsung_tuna-98f3cfc1dc7a53b629d43b7844a9b3f786213048.tar.bz2 |
memory hotplug: Hot-add with sparsemem-vmemmap
This patch is to avoid panic when memory hot-add is executed with
sparsemem-vmemmap. Current vmemmap-sparsemem code doesn't support memory
hot-add. Vmemmap must be populated when hot-add. This is for
2.6.23-rc2-mm2.
Todo: # Even if this patch is applied, the message "[xxxx-xxxx] potential
offnode page_structs" is displayed. To allocate memmap on its node,
memmap (and pgdat) must be initialized itself like chicken and
egg relationship.
# vmemmap_unpopulate will be necessary for followings.
- For cancel hot-add due to error.
- For unplug.
Signed-off-by: Yasunori Goto <y-goto@jp.fujitsu.com>
Cc: Andy Whitcroft <apw@shadowen.org>
Cc: Christoph Lameter <clameter@sgi.com>
Cc: Mel Gorman <mel@csn.ul.ie>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/sparse-vmemmap.c | 2 | ||||
-rw-r--r-- | mm/sparse.c | 25 |
2 files changed, 23 insertions, 4 deletions
diff --git a/mm/sparse-vmemmap.c b/mm/sparse-vmemmap.c index 4f2d485..d3b718b 100644 --- a/mm/sparse-vmemmap.c +++ b/mm/sparse-vmemmap.c @@ -137,7 +137,7 @@ int __meminit vmemmap_populate_basepages(struct page *start_page, return 0; } -struct page __init *sparse_early_mem_map_populate(unsigned long pnum, int nid) +struct page * __meminit sparse_mem_map_populate(unsigned long pnum, int nid) { struct page *map = pfn_to_page(pnum * PAGES_PER_SECTION); int error = vmemmap_populate(map, PAGES_PER_SECTION, nid); diff --git a/mm/sparse.c b/mm/sparse.c index 1f4dbb8..08fb14f 100644 --- a/mm/sparse.c +++ b/mm/sparse.c @@ -259,7 +259,7 @@ static unsigned long *sparse_early_usemap_alloc(unsigned long pnum) } #ifndef CONFIG_SPARSEMEM_VMEMMAP -struct page __init *sparse_early_mem_map_populate(unsigned long pnum, int nid) +struct page __init *sparse_mem_map_populate(unsigned long pnum, int nid) { struct page *map; @@ -284,7 +284,7 @@ struct page __init *sparse_early_mem_map_alloc(unsigned long pnum) struct mem_section *ms = __nr_to_section(pnum); int nid = sparse_early_nid(ms); - map = sparse_early_mem_map_populate(pnum, nid); + map = sparse_mem_map_populate(pnum, nid); if (map) return map; @@ -322,6 +322,18 @@ void __init sparse_init(void) } #ifdef CONFIG_MEMORY_HOTPLUG +#ifdef CONFIG_SPARSEMEM_VMEMMAP +static inline struct page *kmalloc_section_memmap(unsigned long pnum, int nid, + unsigned long nr_pages) +{ + /* This will make the necessary allocations eventually. */ + return sparse_mem_map_populate(pnum, nid); +} +static void __kfree_section_memmap(struct page *memmap, unsigned long nr_pages) +{ + return; /* XXX: Not implemented yet */ +} +#else static struct page *__kmalloc_section_memmap(unsigned long nr_pages) { struct page *page, *ret; @@ -344,6 +356,12 @@ got_map_ptr: return ret; } +static inline struct page *kmalloc_section_memmap(unsigned long pnum, int nid, + unsigned long nr_pages) +{ + return __kmalloc_section_memmap(nr_pages); +} + static int vaddr_in_vmalloc_area(void *addr) { if (addr >= (void *)VMALLOC_START && @@ -360,6 +378,7 @@ static void __kfree_section_memmap(struct page *memmap, unsigned long nr_pages) free_pages((unsigned long)memmap, get_order(sizeof(struct page) * nr_pages)); } +#endif /* CONFIG_SPARSEMEM_VMEMMAP */ /* * returns the number of sections whose mem_maps were properly @@ -382,7 +401,7 @@ int sparse_add_one_section(struct zone *zone, unsigned long start_pfn, * plus, it does a kmalloc */ sparse_index_init(section_nr, pgdat->node_id); - memmap = __kmalloc_section_memmap(nr_pages); + memmap = kmalloc_section_memmap(section_nr, pgdat->node_id, nr_pages); usemap = __kmalloc_section_usemap(); pgdat_resize_lock(pgdat, &flags); |