aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-x86_64/cache.h
diff options
context:
space:
mode:
authorPaolo 'Blaisorblade' Giarrusso <blaisorblade@yahoo.it>2005-11-05 17:25:54 +0100
committerLinus Torvalds <torvalds@g5.osdl.org>2005-11-14 19:55:17 -0800
commitbf0f2e23834e2bf7d64b467ef07095b1c7e2c04b (patch)
tree0a652a8e4cc51f50139d892cee17f7a699c4afa4 /include/asm-x86_64/cache.h
parent8e0d4f4e9132ae6e353f9cf27261627bcc7c65cc (diff)
downloadkernel_samsung_crespo-bf0f2e23834e2bf7d64b467ef07095b1c7e2c04b.zip
kernel_samsung_crespo-bf0f2e23834e2bf7d64b467ef07095b1c7e2c04b.tar.gz
kernel_samsung_crespo-bf0f2e23834e2bf7d64b467ef07095b1c7e2c04b.tar.bz2
[PATCH] x86_64: Set ____cacheline_maxaligned_in_smp alignment to 128 bytes
The current value was correct before the introduction of Intel EM64T support - but now L1_CACHE_SHIFT_MAX can be less than L1_CACHE_SHIFT, which _is_ funny! Between the few users of ____cacheline_maxaligned_in_smp, we also have (for example) rcu_ctrlblk, and struct zone, with zone->{lru_,}lock. I.e. we have a lot of excess cacheline bouncing on them. No correctness issues, obviously. So this could even be merged for 2.6.14 (I'm not a fan of this idea, though). CC: Andi Kleen <ak@suse.de> Signed-off-by: Paolo 'Blaisorblade' Giarrusso <blaisorblade@yahoo.it> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Andi Kleen <ak@suse.de> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'include/asm-x86_64/cache.h')
-rw-r--r--include/asm-x86_64/cache.h2
1 files changed, 1 insertions, 1 deletions
diff --git a/include/asm-x86_64/cache.h b/include/asm-x86_64/cache.h
index eda62ba..33e5342 100644
--- a/include/asm-x86_64/cache.h
+++ b/include/asm-x86_64/cache.h
@@ -9,6 +9,6 @@
/* L1 cache line size */
#define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
-#define L1_CACHE_SHIFT_MAX 6 /* largest L1 which this arch supports */
+#define L1_CACHE_SHIFT_MAX 7 /* largest L1 which this arch supports */
#endif