diff options
author | Christoph Lameter <cl@linux-foundation.org> | 2010-01-21 17:43:35 -0600 |
---|---|---|
committer | Pekka Enberg <penberg@cs.helsinki.fi> | 2010-01-22 18:33:38 +0200 |
commit | 91efd773c74bb26b5409c85ad755d536448e229c (patch) | |
tree | b812dadb615ecff08e4d3ebe97483f192d0be27d | |
parent | 7738dd9e8f2bc1c249e00c9c20e018448fac0084 (diff) | |
download | kernel_samsung_tuna-91efd773c74bb26b5409c85ad755d536448e229c.zip kernel_samsung_tuna-91efd773c74bb26b5409c85ad755d536448e229c.tar.gz kernel_samsung_tuna-91efd773c74bb26b5409c85ad755d536448e229c.tar.bz2 |
dma kmalloc handling fixes
1. We need kmalloc_percpu for all of the now extended kmalloc caches
array not just for each shift value.
2. init_kmem_cache_nodes() must assume node 0 locality for statically
allocated dma kmem_cache structures even after boot is complete.
Reported-and-tested-by: Alex Chiang <achiang@hp.com>
Signed-off-by: Christoph Lameter <cl@linux-foundation.org>
Signed-off-by: Pekka Enberg <penberg@cs.helsinki.fi>
-rw-r--r-- | mm/slub.c | 5 |
1 files changed, 3 insertions, 2 deletions
@@ -2062,7 +2062,7 @@ init_kmem_cache_node(struct kmem_cache_node *n, struct kmem_cache *s) #endif } -static DEFINE_PER_CPU(struct kmem_cache_cpu, kmalloc_percpu[SLUB_PAGE_SHIFT]); +static DEFINE_PER_CPU(struct kmem_cache_cpu, kmalloc_percpu[KMALLOC_CACHES]); static inline int alloc_kmem_cache_cpus(struct kmem_cache *s, gfp_t flags) { @@ -2148,7 +2148,8 @@ static int init_kmem_cache_nodes(struct kmem_cache *s, gfp_t gfpflags) int node; int local_node; - if (slab_state >= UP) + if (slab_state >= UP && (s < kmalloc_caches || + s > kmalloc_caches + KMALLOC_CACHES)) local_node = page_to_nid(virt_to_page(s)); else local_node = 0; |