aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-x86_64
diff options
context:
space:
mode:
authorEric W. Biederman <ebiederm@xmission.com>2006-09-26 10:52:35 +0200
committerAndi Kleen <andi@basil.nowhere.org>2006-09-26 10:52:35 +0200
commitba4d40bb5c465f0a4dcc30d02dab80c2cb7e1ff3 (patch)
tree12b10f26f5efddb15ef8a75a1eff38771334365d /include/asm-x86_64
parent522e93e3fcdbf00ba85c72fde6df28cfc0486a65 (diff)
downloadkernel_samsung_tuna-ba4d40bb5c465f0a4dcc30d02dab80c2cb7e1ff3.zip
kernel_samsung_tuna-ba4d40bb5c465f0a4dcc30d02dab80c2cb7e1ff3.tar.gz
kernel_samsung_tuna-ba4d40bb5c465f0a4dcc30d02dab80c2cb7e1ff3.tar.bz2
[PATCH] Auto size the per cpu area.
Now for a completely different but trivial approach. I just boot tested it with 255 CPUS and everything worked. Currently everything (except module data) we place in the per cpu area we know about at compile time. So instead of allocating a fixed size for the per_cpu area allocate the number of bytes we need plus a fixed constant for to be used for modules. It isn't perfect but it is much less of a pain to work with than what we are doing now. AK: fixed warning Signed-off-by: Eric W. Biederman <ebiederm@xmission.com> Signed-off-by: Andi Kleen <ak@suse.de>
Diffstat (limited to 'include/asm-x86_64')
-rw-r--r--include/asm-x86_64/percpu.h10
1 files changed, 10 insertions, 0 deletions
diff --git a/include/asm-x86_64/percpu.h b/include/asm-x86_64/percpu.h
index 08dd9f9..39d2bab 100644
--- a/include/asm-x86_64/percpu.h
+++ b/include/asm-x86_64/percpu.h
@@ -11,6 +11,16 @@
#include <asm/pda.h>
+#ifdef CONFIG_MODULES
+# define PERCPU_MODULE_RESERVE 8192
+#else
+# define PERCPU_MODULE_RESERVE 0
+#endif
+
+#define PERCPU_ENOUGH_ROOM \
+ (ALIGN(__per_cpu_end - __per_cpu_start, SMP_CACHE_BYTES) + \
+ PERCPU_MODULE_RESERVE)
+
#define __per_cpu_offset(cpu) (cpu_pda(cpu)->data_offset)
#define __my_cpu_offset() read_pda(data_offset)