From 9a14f653dfe349c0916e6a78c413effa2fa3f001 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Fri, 24 Dec 2010 11:50:34 +0900 Subject: nommu: Fix up vmalloc_node() symbol export regression. Commit e1ca778 ("mm: add vzalloc() and vzalloc_node() helpers") ended up accidentally deleting the vmalloc_node() symbol export, resulting in: "vmalloc_node" [net/core/pktgen.ko] undefined! "vmalloc_node" [net/netfilter/x_tables.ko] undefined! regressions. Signed-off-by: Paul Mundt --- mm/nommu.c | 1 + 1 file changed, 1 insertion(+) (limited to 'mm') diff --git a/mm/nommu.c b/mm/nommu.c index 27a9ac5..275608c 100644 --- a/mm/nommu.c +++ b/mm/nommu.c @@ -328,6 +328,7 @@ void *vmalloc_node(unsigned long size, int node) { return vmalloc(size); } +EXPORT_SYMBOL(vmalloc_node); /** * vzalloc_node - allocate memory on a specific node with zero fill -- cgit v1.1 From 29c185e5c681ca00d863d161eda7eadb93e32ee5 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Fri, 24 Dec 2010 12:08:30 +0900 Subject: nommu: Provide stubbed alloc/free_vm_area() implementation. Now that these have been introduced in to the vmalloc API, sync up the nommu side of things. At present we don't deal with VMAs as such, so for the time being these will simply BUG() out. In the future it should be possible to support this interface by layering on top of the vm_regions. Signed-off-by: Paul Mundt --- mm/nommu.c | 27 ++++++++++++++++++++++++++- 1 file changed, 26 insertions(+), 1 deletion(-) (limited to 'mm') diff --git a/mm/nommu.c b/mm/nommu.c index 275608c..ef4045d 100644 --- a/mm/nommu.c +++ b/mm/nommu.c @@ -10,7 +10,7 @@ * Copyright (c) 2000-2003 David McCullough * Copyright (c) 2000-2001 D Jeff Dionne * Copyright (c) 2002 Greg Ungerer - * Copyright (c) 2007-2009 Paul Mundt + * Copyright (c) 2007-2010 Paul Mundt */ #include @@ -441,6 +441,31 @@ void __attribute__((weak)) vmalloc_sync_all(void) { } +/** + * alloc_vm_area - allocate a range of kernel address space + * @size: size of the area + * + * Returns: NULL on failure, vm_struct on success + * + * This function reserves a range of kernel address space, and + * allocates pagetables to map that range. No actual mappings + * are created. If the kernel address space is not shared + * between processes, it syncs the pagetable across all + * processes. + */ +struct vm_struct *alloc_vm_area(size_t size) +{ + BUG(); + return NULL; +} +EXPORT_SYMBOL_GPL(alloc_vm_area); + +void free_vm_area(struct vm_struct *area) +{ + BUG(); +} +EXPORT_SYMBOL_GPL(free_vm_area); + int vm_insert_page(struct vm_area_struct *vma, unsigned long addr, struct page *page) { -- cgit v1.1