aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorZach Brown <zach.brown@oracle.com>2006-01-27 14:02:47 -0800
committerSam Ravnborg <sam@mars.ravnborg.org>2006-02-19 09:51:19 +0100
commit379b5441aeb895fe55b877a8a9c187e8728f774c (patch)
tree67916a0f7d8a9a7d1ce186c81ac4f4481e6cc23e
parent8e70c45887a6bbe40393342ea5b426b0dd836dff (diff)
downloadkernel_samsung_crespo-379b5441aeb895fe55b877a8a9c187e8728f774c.zip
kernel_samsung_crespo-379b5441aeb895fe55b877a8a9c187e8728f774c.tar.gz
kernel_samsung_crespo-379b5441aeb895fe55b877a8a9c187e8728f774c.tar.bz2
x86: align per-cpu section to configured cache bytes
This matches the fix for a bug seen on x86-64. Test booted on old hardware that had 32 byte cachelines to begin with. Signed-off-by: Zach Brown <zach.brown@oracle.com> Signed-off-by: Sam Ravnborg <sam@ravnborg.org>
-rw-r--r--arch/i386/kernel/vmlinux.lds.S3
1 files changed, 2 insertions, 1 deletions
diff --git a/arch/i386/kernel/vmlinux.lds.S b/arch/i386/kernel/vmlinux.lds.S
index 4710195..18f99cc 100644
--- a/arch/i386/kernel/vmlinux.lds.S
+++ b/arch/i386/kernel/vmlinux.lds.S
@@ -7,6 +7,7 @@
#include <asm-generic/vmlinux.lds.h>
#include <asm/thread_info.h>
#include <asm/page.h>
+#include <asm/cache.h>
OUTPUT_FORMAT("elf32-i386", "elf32-i386", "elf32-i386")
OUTPUT_ARCH(i386)
@@ -115,7 +116,7 @@ SECTIONS
__initramfs_start = .;
.init.ramfs : AT(ADDR(.init.ramfs) - LOAD_OFFSET) { *(.init.ramfs) }
__initramfs_end = .;
- . = ALIGN(32);
+ . = ALIGN(L1_CACHE_BYTES);
__per_cpu_start = .;
.data.percpu : AT(ADDR(.data.percpu) - LOAD_OFFSET) { *(.data.percpu) }
__per_cpu_end = .;