aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorChristoph Lameter <cl@linux.com>2011-06-01 12:25:46 -0500
committerPekka Enberg <penberg@kernel.org>2011-07-02 13:26:53 +0300
commit8cb0a5068f4108e8ca60d5e0bcfbe6901adcfaef (patch)
tree48098dca8f01abd882bfb9771880b2c791604f8a /mm
parent50d5c41cd151b21ac1dfc98f048210456ccacc20 (diff)
downloadkernel_goldelico_gta04-8cb0a5068f4108e8ca60d5e0bcfbe6901adcfaef.zip
kernel_goldelico_gta04-8cb0a5068f4108e8ca60d5e0bcfbe6901adcfaef.tar.gz
kernel_goldelico_gta04-8cb0a5068f4108e8ca60d5e0bcfbe6901adcfaef.tar.bz2
slub: Move page->frozen handling near where the page->freelist handling occurs
This is necessary because the frozen bit has to be handled in the same cmpxchg_double with the freelist and the counters. Signed-off-by: Christoph Lameter <cl@linux.com> Acked-by: David Rientjes <rientjes@google.com> Signed-off-by: Pekka Enberg <penberg@kernel.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/slub.c8
1 files changed, 6 insertions, 2 deletions
diff --git a/mm/slub.c b/mm/slub.c
index 82b2d04..5a2d3d8 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1286,6 +1286,7 @@ static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node)
page->freelist = start;
page->inuse = 0;
+ page->frozen = 1;
out:
return page;
}
@@ -1424,7 +1425,6 @@ static inline int lock_and_freeze_slab(struct kmem_cache_node *n,
{
if (slab_trylock(page)) {
__remove_partial(n, page);
- page->frozen = 1;
return 1;
}
return 0;
@@ -1538,7 +1538,6 @@ static void unfreeze_slab(struct kmem_cache *s, struct page *page, int tail)
{
struct kmem_cache_node *n = get_node(s, page_to_nid(page));
- page->frozen = 0;
if (page->inuse) {
if (page->freelist) {
@@ -1671,6 +1670,7 @@ static void deactivate_slab(struct kmem_cache *s, struct kmem_cache_cpu *c)
}
c->page = NULL;
c->tid = next_tid(c->tid);
+ page->frozen = 0;
unfreeze_slab(s, page, tail);
}
@@ -1831,6 +1831,8 @@ static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
stat(s, ALLOC_REFILL);
load_freelist:
+ VM_BUG_ON(!page->frozen);
+
object = page->freelist;
if (unlikely(!object))
goto another_slab;
@@ -1854,6 +1856,7 @@ new_slab:
page = get_partial(s, gfpflags, node);
if (page) {
stat(s, ALLOC_FROM_PARTIAL);
+ page->frozen = 1;
c->node = page_to_nid(page);
c->page = page;
goto load_freelist;
@@ -2371,6 +2374,7 @@ static void early_kmem_cache_node_alloc(int node)
BUG_ON(!n);
page->freelist = get_freepointer(kmem_cache_node, n);
page->inuse++;
+ page->frozen = 0;
kmem_cache_node->node[node] = n;
#ifdef CONFIG_SLUB_DEBUG
init_object(kmem_cache_node, n, SLUB_RED_ACTIVE);