aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPekka Enberg <penberg@cs.helsinki.fi>2008-04-28 02:12:22 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2008-04-28 08:58:19 -0700
commit1b27d05b6e21249d2338be26dfcbe8f8d8ff8a5b (patch)
treec5413d5f64efed1aa84bfa0ab718f1e2a2f6f9cb
parent19fc3f0acde32636529969570055c7e2a744787c (diff)
downloadkernel_samsung_tuna-1b27d05b6e21249d2338be26dfcbe8f8d8ff8a5b.zip
kernel_samsung_tuna-1b27d05b6e21249d2338be26dfcbe8f8d8ff8a5b.tar.gz
kernel_samsung_tuna-1b27d05b6e21249d2338be26dfcbe8f8d8ff8a5b.tar.bz2
mm: move cache_line_size() to <linux/cache.h>
Not all architectures define cache_line_size() so as suggested by Andrew move the private implementations in mm/slab.c and mm/slob.c to <linux/cache.h>. Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Ingo Molnar <mingo@redhat.com> Cc: H. Peter Anvin <hpa@zytor.com> Reviewed-by: Christoph Lameter <clameter@sgi.com> Signed-off-by: Pekka Enberg <penberg@cs.helsinki.fi> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--arch/x86/Kconfig3
-rw-r--r--include/linux/cache.h4
-rw-r--r--mm/slab.c4
-rw-r--r--mm/slub.c5
4 files changed, 7 insertions, 9 deletions
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index e5790fe..a8ce13a 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -114,6 +114,9 @@ config GENERIC_TIME_VSYSCALL
config ARCH_HAS_CPU_RELAX
def_bool y
+config ARCH_HAS_CACHE_LINE_SIZE
+ def_bool y
+
config HAVE_SETUP_PER_CPU_AREA
def_bool X86_64 || (X86_SMP && !X86_VOYAGER)
diff --git a/include/linux/cache.h b/include/linux/cache.h
index 4552504..97e2488 100644
--- a/include/linux/cache.h
+++ b/include/linux/cache.h
@@ -60,4 +60,8 @@
#endif
#endif
+#ifndef CONFIG_ARCH_HAS_CACHE_LINE_SIZE
+#define cache_line_size() L1_CACHE_BYTES
+#endif
+
#endif /* __LINUX_CACHE_H */
diff --git a/mm/slab.c b/mm/slab.c
index 7bc4a13..39d20f8 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -139,10 +139,6 @@
#define BYTES_PER_WORD sizeof(void *)
#define REDZONE_ALIGN max(BYTES_PER_WORD, __alignof__(unsigned long long))
-#ifndef cache_line_size
-#define cache_line_size() L1_CACHE_BYTES
-#endif
-
#ifndef ARCH_KMALLOC_MINALIGN
/*
* Enforce a minimum alignment for the kmalloc caches.
diff --git a/mm/slub.c b/mm/slub.c
index 48fff83..38914bc 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -207,11 +207,6 @@ static inline void ClearSlabDebug(struct page *page)
#define __KMALLOC_CACHE 0x20000000 /* objects freed using kfree */
#define __PAGE_ALLOC_FALLBACK 0x10000000 /* Allow fallback to page alloc */
-/* Not all arches define cache_line_size */
-#ifndef cache_line_size
-#define cache_line_size() L1_CACHE_BYTES
-#endif
-
static int kmem_size = sizeof(struct kmem_cache);
#ifdef CONFIG_SMP