aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/blackfin/Kconfig8
-rw-r--r--arch/frv/Kconfig8
-rw-r--r--arch/m68knommu/Kconfig8
-rw-r--r--arch/v850/Kconfig8
-rw-r--r--include/linux/kmalloc_sizes.h20
-rw-r--r--include/linux/slab.h15
-rw-r--r--include/linux/slub_def.h19
-rw-r--r--mm/slab.c19
8 files changed, 34 insertions, 71 deletions
diff --git a/arch/blackfin/Kconfig b/arch/blackfin/Kconfig
index 1a49305..d80e5b1 100644
--- a/arch/blackfin/Kconfig
+++ b/arch/blackfin/Kconfig
@@ -560,14 +560,6 @@ endchoice
source "mm/Kconfig"
-config LARGE_ALLOCS
- bool "Allow allocating large blocks (> 1MB) of memory"
- help
- Allow the slab memory allocator to keep chains for very large
- memory sizes - upto 32MB. You may need this if your system has
- a lot of RAM, and you need to able to allocate very large
- contiguous chunks. If unsure, say N.
-
config BFIN_DMA_5XX
bool "Enable DMA Support"
depends on (BF533 || BF532 || BF531 || BF537 || BF536 || BF534 || BF561)
diff --git a/arch/frv/Kconfig b/arch/frv/Kconfig
index 114738a..74eef71 100644
--- a/arch/frv/Kconfig
+++ b/arch/frv/Kconfig
@@ -102,14 +102,6 @@ config HIGHPTE
with a lot of RAM, this can be wasteful of precious low memory.
Setting this option will put user-space page tables in high memory.
-config LARGE_ALLOCS
- bool "Allow allocating large blocks (> 1MB) of memory"
- help
- Allow the slab memory allocator to keep chains for very large memory
- sizes - up to 32MB. You may need this if your system has a lot of
- RAM, and you need to able to allocate very large contiguous chunks.
- If unsure, say N.
-
source "mm/Kconfig"
choice
diff --git a/arch/m68knommu/Kconfig b/arch/m68knommu/Kconfig
index 823f737..adc64a2 100644
--- a/arch/m68knommu/Kconfig
+++ b/arch/m68knommu/Kconfig
@@ -470,14 +470,6 @@ config AVNET
default y
depends on (AVNET5282)
-config LARGE_ALLOCS
- bool "Allow allocating large blocks (> 1MB) of memory"
- help
- Allow the slab memory allocator to keep chains for very large
- memory sizes - upto 32MB. You may need this if your system has
- a lot of RAM, and you need to able to allocate very large
- contiguous chunks. If unsure, say N.
-
config 4KSTACKS
bool "Use 4Kb for kernel stacks instead of 8Kb"
default y
diff --git a/arch/v850/Kconfig b/arch/v850/Kconfig
index 5f54c12..ace479a 100644
--- a/arch/v850/Kconfig
+++ b/arch/v850/Kconfig
@@ -240,14 +240,6 @@ menu "Processor type and features"
config RESET_GUARD
bool "Reset Guard"
- config LARGE_ALLOCS
- bool "Allow allocating large blocks (> 1MB) of memory"
- help
- Allow the slab memory allocator to keep chains for very large
- memory sizes - upto 32MB. You may need this if your system has
- a lot of RAM, and you need to able to allocate very large
- contiguous chunks. If unsure, say N.
-
source "mm/Kconfig"
endmenu
diff --git a/include/linux/kmalloc_sizes.h b/include/linux/kmalloc_sizes.h
index bda23e0..e576b84 100644
--- a/include/linux/kmalloc_sizes.h
+++ b/include/linux/kmalloc_sizes.h
@@ -19,17 +19,27 @@
CACHE(32768)
CACHE(65536)
CACHE(131072)
-#if (NR_CPUS > 512) || (MAX_NUMNODES > 256) || !defined(CONFIG_MMU)
+#if KMALLOC_MAX_SIZE >= 262144
CACHE(262144)
#endif
-#ifndef CONFIG_MMU
+#if KMALLOC_MAX_SIZE >= 524288
CACHE(524288)
+#endif
+#if KMALLOC_MAX_SIZE >= 1048576
CACHE(1048576)
-#ifdef CONFIG_LARGE_ALLOCS
+#endif
+#if KMALLOC_MAX_SIZE >= 2097152
CACHE(2097152)
+#endif
+#if KMALLOC_MAX_SIZE >= 4194304
CACHE(4194304)
+#endif
+#if KMALLOC_MAX_SIZE >= 8388608
CACHE(8388608)
+#endif
+#if KMALLOC_MAX_SIZE >= 16777216
CACHE(16777216)
+#endif
+#if KMALLOC_MAX_SIZE >= 33554432
CACHE(33554432)
-#endif /* CONFIG_LARGE_ALLOCS */
-#endif /* CONFIG_MMU */
+#endif
diff --git a/include/linux/slab.h b/include/linux/slab.h
index 6fb2ae2..a015236 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -74,6 +74,21 @@ static inline void *kmem_cache_alloc_node(struct kmem_cache *cachep,
#endif
/*
+ * The largest kmalloc size supported by the slab allocators is
+ * 32 megabyte (2^25) or the maximum allocatable page order if that is
+ * less than 32 MB.
+ *
+ * WARNING: Its not easy to increase this value since the allocators have
+ * to do various tricks to work around compiler limitations in order to
+ * ensure proper constant folding.
+ */
+#define KMALLOC_SHIFT_HIGH ((MAX_ORDER + PAGE_SHIFT) <= 25 ? \
+ (MAX_ORDER + PAGE_SHIFT) : 25)
+
+#define KMALLOC_MAX_SIZE (1UL << KMALLOC_SHIFT_HIGH)
+#define KMALLOC_MAX_ORDER (KMALLOC_SHIFT_HIGH - PAGE_SHIFT)
+
+/*
* Common kmalloc functions provided by all allocators
*/
void *__kmalloc(size_t, gfp_t);
diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
index a9fb928..0764c82 100644
--- a/include/linux/slub_def.h
+++ b/include/linux/slub_def.h
@@ -58,17 +58,6 @@ struct kmem_cache {
*/
#define KMALLOC_SHIFT_LOW 3
-#ifdef CONFIG_LARGE_ALLOCS
-#define KMALLOC_SHIFT_HIGH ((MAX_ORDER + PAGE_SHIFT) =< 25 ? \
- (MAX_ORDER + PAGE_SHIFT - 1) : 25)
-#else
-#if !defined(CONFIG_MMU) || NR_CPUS > 512 || MAX_NUMNODES > 256
-#define KMALLOC_SHIFT_HIGH 20
-#else
-#define KMALLOC_SHIFT_HIGH 18
-#endif
-#endif
-
/*
* We keep the general caches in an array of slab caches that are used for
* 2^x bytes of allocations.
@@ -79,7 +68,7 @@ extern struct kmem_cache kmalloc_caches[KMALLOC_SHIFT_HIGH + 1];
* Sorry that the following has to be that ugly but some versions of GCC
* have trouble with constant propagation and loops.
*/
-static inline int kmalloc_index(int size)
+static inline int kmalloc_index(size_t size)
{
/*
* We should return 0 if size == 0 but we use the smallest object
@@ -87,7 +76,7 @@ static inline int kmalloc_index(int size)
*/
WARN_ON_ONCE(size == 0);
- if (size > (1 << KMALLOC_SHIFT_HIGH))
+ if (size > KMALLOC_MAX_SIZE)
return -1;
if (size > 64 && size <= 96)
@@ -110,17 +99,13 @@ static inline int kmalloc_index(int size)
if (size <= 64 * 1024) return 16;
if (size <= 128 * 1024) return 17;
if (size <= 256 * 1024) return 18;
-#if KMALLOC_SHIFT_HIGH > 18
if (size <= 512 * 1024) return 19;
if (size <= 1024 * 1024) return 20;
-#endif
-#if KMALLOC_SHIFT_HIGH > 20
if (size <= 2 * 1024 * 1024) return 21;
if (size <= 4 * 1024 * 1024) return 22;
if (size <= 8 * 1024 * 1024) return 23;
if (size <= 16 * 1024 * 1024) return 24;
if (size <= 32 * 1024 * 1024) return 25;
-#endif
return -1;
/*
diff --git a/mm/slab.c b/mm/slab.c
index 1dc0ce1..528243e 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -569,21 +569,6 @@ static void **dbg_userword(struct kmem_cache *cachep, void *objp)
#endif
/*
- * Maximum size of an obj (in 2^order pages) and absolute limit for the gfp
- * order.
- */
-#if defined(CONFIG_LARGE_ALLOCS)
-#define MAX_OBJ_ORDER 13 /* up to 32Mb */
-#define MAX_GFP_ORDER 13 /* up to 32Mb */
-#elif defined(CONFIG_MMU)
-#define MAX_OBJ_ORDER 5 /* 32 pages */
-#define MAX_GFP_ORDER 5 /* 32 pages */
-#else
-#define MAX_OBJ_ORDER 8 /* up to 1Mb */
-#define MAX_GFP_ORDER 8 /* up to 1Mb */
-#endif
-
-/*
* Do not go above this order unless 0 objects fit into the slab.
*/
#define BREAK_GFP_ORDER_HI 1
@@ -2002,7 +1987,7 @@ static size_t calculate_slab_order(struct kmem_cache *cachep,
size_t left_over = 0;
int gfporder;
- for (gfporder = 0; gfporder <= MAX_GFP_ORDER; gfporder++) {
+ for (gfporder = 0; gfporder <= KMALLOC_MAX_ORDER; gfporder++) {
unsigned int num;
size_t remainder;
@@ -2148,7 +2133,7 @@ kmem_cache_create (const char *name, size_t size, size_t align,
* Sanity checks... these are all serious usage bugs.
*/
if (!name || in_interrupt() || (size < BYTES_PER_WORD) ||
- (size > (1 << MAX_OBJ_ORDER) * PAGE_SIZE) || dtor) {
+ size > KMALLOC_MAX_SIZE || dtor) {
printk(KERN_ERR "%s: Early error in slab %s\n", __FUNCTION__,
name);
BUG();