aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/slab_def.h
diff options
context:
space:
mode:
authorChristoph Lameter <clameter@sgi.com>2006-12-13 00:34:23 -0800
committerLinus Torvalds <torvalds@woody.osdl.org>2006-12-13 09:05:49 -0800
commit2e892f43ccb602e8ffad73396a1000f2040c9e0b (patch)
tree2f799810eccebeb5d432daed93ed9654238887b6 /include/linux/slab_def.h
parent872225ca77519a243d7e19270b062b0ac53418d8 (diff)
downloadkernel_samsung_espresso10-2e892f43ccb602e8ffad73396a1000f2040c9e0b.zip
kernel_samsung_espresso10-2e892f43ccb602e8ffad73396a1000f2040c9e0b.tar.gz
kernel_samsung_espresso10-2e892f43ccb602e8ffad73396a1000f2040c9e0b.tar.bz2
[PATCH] Cleanup slab headers / API to allow easy addition of new slab allocators
This is a response to an earlier discussion on linux-mm about splitting slab.h components per allocator. Patch is against 2.6.19-git11. See http://marc.theaimsgroup.com/?l=linux-mm&m=116469577431008&w=2 This patch cleans up the slab header definitions. We define the common functions of slob and slab in slab.h and put the extra definitions needed for slab's kmalloc implementations in <linux/slab_def.h>. In order to get a greater set of common functions we add several empty functions to slob.c and also rename slob's kmalloc to __kmalloc. Slob does not need any special definitions since we introduce a fallback case. If there is no need for a slab implementation to provide its own kmalloc mess^H^H^Hacros then we simply fall back to __kmalloc functions. That is sufficient for SLOB. Sort the function in slab.h according to their functionality. First the functions operating on struct kmem_cache * then the kmalloc related functions followed by special debug and fallback definitions. Also redo a lot of comments. Signed-off-by: Christoph Lameter <clameter@sgi.com>? Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'include/linux/slab_def.h')
-rw-r--r--include/linux/slab_def.h100
1 files changed, 100 insertions, 0 deletions
diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h
new file mode 100644
index 0000000..4b463e6
--- /dev/null
+++ b/include/linux/slab_def.h
@@ -0,0 +1,100 @@
+#ifndef _LINUX_SLAB_DEF_H
+#define _LINUX_SLAB_DEF_H
+
+/*
+ * Definitions unique to the original Linux SLAB allocator.
+ *
+ * What we provide here is a way to optimize the frequent kmalloc
+ * calls in the kernel by selecting the appropriate general cache
+ * if kmalloc was called with a size that can be established at
+ * compile time.
+ */
+
+#include <linux/init.h>
+#include <asm/page.h> /* kmalloc_sizes.h needs PAGE_SIZE */
+#include <asm/cache.h> /* kmalloc_sizes.h needs L1_CACHE_BYTES */
+#include <linux/compiler.h>
+
+/* Size description struct for general caches. */
+struct cache_sizes {
+ size_t cs_size;
+ struct kmem_cache *cs_cachep;
+ struct kmem_cache *cs_dmacachep;
+};
+extern struct cache_sizes malloc_sizes[];
+
+static inline void *kmalloc(size_t size, gfp_t flags)
+{
+ if (__builtin_constant_p(size)) {
+ int i = 0;
+#define CACHE(x) \
+ if (size <= x) \
+ goto found; \
+ else \
+ i++;
+#include "kmalloc_sizes.h"
+#undef CACHE
+ {
+ extern void __you_cannot_kmalloc_that_much(void);
+ __you_cannot_kmalloc_that_much();
+ }
+found:
+ return kmem_cache_alloc((flags & GFP_DMA) ?
+ malloc_sizes[i].cs_dmacachep :
+ malloc_sizes[i].cs_cachep, flags);
+ }
+ return __kmalloc(size, flags);
+}
+
+static inline void *kzalloc(size_t size, gfp_t flags)
+{
+ if (__builtin_constant_p(size)) {
+ int i = 0;
+#define CACHE(x) \
+ if (size <= x) \
+ goto found; \
+ else \
+ i++;
+#include "kmalloc_sizes.h"
+#undef CACHE
+ {
+ extern void __you_cannot_kzalloc_that_much(void);
+ __you_cannot_kzalloc_that_much();
+ }
+found:
+ return kmem_cache_zalloc((flags & GFP_DMA) ?
+ malloc_sizes[i].cs_dmacachep :
+ malloc_sizes[i].cs_cachep, flags);
+ }
+ return __kzalloc(size, flags);
+}
+
+#ifdef CONFIG_NUMA
+extern void *__kmalloc_node(size_t size, gfp_t flags, int node);
+
+static inline void *kmalloc_node(size_t size, gfp_t flags, int node)
+{
+ if (__builtin_constant_p(size)) {
+ int i = 0;
+#define CACHE(x) \
+ if (size <= x) \
+ goto found; \
+ else \
+ i++;
+#include "kmalloc_sizes.h"
+#undef CACHE
+ {
+ extern void __you_cannot_kmalloc_that_much(void);
+ __you_cannot_kmalloc_that_much();
+ }
+found:
+ return kmem_cache_alloc_node((flags & GFP_DMA) ?
+ malloc_sizes[i].cs_dmacachep :
+ malloc_sizes[i].cs_cachep, flags, node);
+ }
+ return __kmalloc_node(size, flags, node);
+}
+
+#endif /* CONFIG_NUMA */
+
+#endif /* _LINUX_SLAB_DEF_H */