aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorChristoph Lameter <cl@linux.com>2012-09-05 00:18:32 +0000
committerPekka Enberg <penberg@kernel.org>2012-09-05 12:00:36 +0300
commitcbb79694d592e9a76880f6ef6db8feccaeee1c32 (patch)
tree6eb60a253bfde73214d4dc07bed860f17f737537 /mm
parentdb265eca77000c5dafc5608975afe8dafb2a02d5 (diff)
downloadkernel_goldelico_gta04-cbb79694d592e9a76880f6ef6db8feccaeee1c32.zip
kernel_goldelico_gta04-cbb79694d592e9a76880f6ef6db8feccaeee1c32.tar.gz
kernel_goldelico_gta04-cbb79694d592e9a76880f6ef6db8feccaeee1c32.tar.bz2
mm/sl[aou]b: Do slab aliasing call from common code
The slab aliasing logic causes some strange contortions in slub. So add a call to deal with aliases to slab_common.c but disable it for other slab allocators by providng stubs that fail to create aliases. Full general support for aliases will require additional cleanup passes and more standardization of fields in kmem_cache. Signed-off-by: Christoph Lameter <cl@linux.com> Signed-off-by: Pekka Enberg <penberg@kernel.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/slab.h10
-rw-r--r--mm/slab_common.c4
-rw-r--r--mm/slub.c15
3 files changed, 25 insertions, 4 deletions
diff --git a/mm/slab.h b/mm/slab.h
index c4f9a36..84c28f4 100644
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -36,6 +36,16 @@ extern struct kmem_cache *kmem_cache;
struct kmem_cache *__kmem_cache_create(const char *name, size_t size,
size_t align, unsigned long flags, void (*ctor)(void *));
+#ifdef CONFIG_SLUB
+struct kmem_cache *__kmem_cache_alias(const char *name, size_t size,
+ size_t align, unsigned long flags, void (*ctor)(void *));
+#else
+static inline struct kmem_cache *__kmem_cache_alias(const char *name, size_t size,
+ size_t align, unsigned long flags, void (*ctor)(void *))
+{ return NULL; }
+#endif
+
+
int __kmem_cache_shutdown(struct kmem_cache *);
#endif
diff --git a/mm/slab_common.c b/mm/slab_common.c
index f18c06f..adc42b0 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -115,6 +115,10 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size, size_t align
goto out_locked;
}
+ s = __kmem_cache_alias(name, size, align, flags, ctor);
+ if (s)
+ goto out_locked;
+
s = __kmem_cache_create(n, size, align, flags, ctor);
if (s) {
diff --git a/mm/slub.c b/mm/slub.c
index 91c9a2f..64d445e 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -3708,7 +3708,7 @@ void __init kmem_cache_init(void)
slub_max_order = 0;
kmem_size = offsetof(struct kmem_cache, node) +
- nr_node_ids * sizeof(struct kmem_cache_node *);
+ nr_node_ids * sizeof(struct kmem_cache_node *);
/* Allocate two kmem_caches from the page allocator */
kmalloc_size = ALIGN(kmem_size, cache_line_size());
@@ -3922,7 +3922,7 @@ static struct kmem_cache *find_mergeable(size_t size,
return NULL;
}
-struct kmem_cache *__kmem_cache_create(const char *name, size_t size,
+struct kmem_cache *__kmem_cache_alias(const char *name, size_t size,
size_t align, unsigned long flags, void (*ctor)(void *))
{
struct kmem_cache *s;
@@ -3939,11 +3939,18 @@ struct kmem_cache *__kmem_cache_create(const char *name, size_t size,
if (sysfs_slab_alias(s, name)) {
s->refcount--;
- return NULL;
+ s = NULL;
}
- return s;
}
+ return s;
+}
+
+struct kmem_cache *__kmem_cache_create(const char *name, size_t size,
+ size_t align, unsigned long flags, void (*ctor)(void *))
+{
+ struct kmem_cache *s;
+
s = kmem_cache_alloc(kmem_cache, GFP_KERNEL);
if (s) {
if (kmem_cache_open(s, name,