aboutsummaryrefslogtreecommitdiffstats
path: root/mm/slab.c
diff options
context:
space:
mode:
authorKevin Hilman <khilman@mvista.com>2006-12-06 20:32:11 -0800
committerLinus Torvalds <torvalds@woody.osdl.org>2006-12-07 08:39:21 -0800
commita44b56d354b49f9abb184e5a14f71889856283bb (patch)
tree36eced3b179038a70463645395e6b31f3a5cc74c /mm/slab.c
parentcace673d376d97b0c66ffa0a49b8d588a696d5d2 (diff)
downloadkernel_samsung_aries-a44b56d354b49f9abb184e5a14f71889856283bb.zip
kernel_samsung_aries-a44b56d354b49f9abb184e5a14f71889856283bb.tar.gz
kernel_samsung_aries-a44b56d354b49f9abb184e5a14f71889856283bb.tar.bz2
[PATCH] slab debug and ARCH_SLAB_MINALIGN don't get along
When CONFIG_SLAB_DEBUG is used in combination with ARCH_SLAB_MINALIGN, some debug flags should be disabled which depend on BYTES_PER_WORD alignment. The disabling of these debug flags is not properly handled when BYTES_PER_WORD < ARCH_SLAB_MEMALIGN < cache_line_size() This patch fixes that and also adds an alignment check to cache_alloc_debugcheck_after() when ARCH_SLAB_MINALIGN is used. Signed-off-by: Kevin Hilman <khilman@mvista.com> Cc: Pekka Enberg <penberg@cs.helsinki.fi> Cc: Christoph Lameter <clameter@engr.sgi.com> Cc: Manfred Spraul <manfred@colorfullife.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'mm/slab.c')
-rw-r--r--mm/slab.c17
1 files changed, 11 insertions, 6 deletions
diff --git a/mm/slab.c b/mm/slab.c
index 5de8147..ff60a94 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -2197,18 +2197,17 @@ kmem_cache_create (const char *name, size_t size, size_t align,
if (flags & SLAB_RED_ZONE || flags & SLAB_STORE_USER)
ralign = BYTES_PER_WORD;
- /* 2) arch mandated alignment: disables debug if necessary */
+ /* 2) arch mandated alignment */
if (ralign < ARCH_SLAB_MINALIGN) {
ralign = ARCH_SLAB_MINALIGN;
- if (ralign > BYTES_PER_WORD)
- flags &= ~(SLAB_RED_ZONE | SLAB_STORE_USER);
}
- /* 3) caller mandated alignment: disables debug if necessary */
+ /* 3) caller mandated alignment */
if (ralign < align) {
ralign = align;
- if (ralign > BYTES_PER_WORD)
- flags &= ~(SLAB_RED_ZONE | SLAB_STORE_USER);
}
+ /* disable debug if necessary */
+ if (ralign > BYTES_PER_WORD)
+ flags &= ~(SLAB_RED_ZONE | SLAB_STORE_USER);
/*
* 4) Store it.
*/
@@ -3063,6 +3062,12 @@ static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep,
cachep->ctor(objp, cachep, ctor_flags);
}
+#if ARCH_SLAB_MINALIGN
+ if ((u32)objp & (ARCH_SLAB_MINALIGN-1)) {
+ printk(KERN_ERR "0x%p: not aligned to ARCH_SLAB_MINALIGN=%d\n",
+ objp, ARCH_SLAB_MINALIGN);
+ }
+#endif
return objp;
}
#else