From da32dac101263fb5b155407507c548e3ac2a6a2a Mon Sep 17 00:00:00 2001 From: Rusty Russell Date: Thu, 16 Dec 2010 17:03:15 -0600 Subject: lguest: populate initial_page_table Two x86 patches broke lguest: 1) v2.6.35-492-g72d7c3b, which changed x86 to use the memblock allocator. In lguest, the host places linear page tables at the top of mem, which used to be enough to get us up to the swapper_pg_dir page tables. With the first patch, the direct mapping tables used that memory: Before: kernel direct mapping tables up to 4000000 @ 7000-1a000 After: kernel direct mapping tables up to 4000000 @ 3fed000-4000000 I initially fixed this by lying about the amount of memory we had, so the kernel wouldn't blatt the lguest boot pagetables (yuk!), but then... 2) v2.6.36-rc8-54-gb40827f, which made x86 boot use initial_page_table. This was initialized in a part of head_32.S which isn't executed by lguest; it is then copied into swapper_pg_dir. So we have to initialize it; and anyway we switch to it before we blatt the old tables, so that fixes the previous damage as well. For the moment, I cut & pasted the code into lguest's boot code, but next merge window I will merge them. Signed-off-by: Rusty Russell Cc: Jeremy Fitzhardinge Cc: Konrad Rzeszutek Wilk To: x86@kernel.org --- arch/x86/kernel/head_32.S | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arch/x86/kernel/head_32.S') diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S index bcece91..f0bea76 100644 --- a/arch/x86/kernel/head_32.S +++ b/arch/x86/kernel/head_32.S @@ -620,13 +620,13 @@ ENTRY(initial_code) __PAGE_ALIGNED_BSS .align PAGE_SIZE_asm #ifdef CONFIG_X86_PAE -initial_pg_pmd: +ENTRY(initial_pg_pmd) .fill 1024*KPMDS,4,0 #else ENTRY(initial_page_table) .fill 1024,4,0 #endif -initial_pg_fixmap: +ENTRY(initial_pg_fixmap) .fill 1024,4,0 ENTRY(empty_zero_page) .fill 4096,1,0 -- cgit v1.1 From 147dd5610c8d1bacb88a6c1dfdaceaf257946ed0 Mon Sep 17 00:00:00 2001 From: "H. Peter Anvin" Date: Thu, 16 Dec 2010 19:11:09 -0800 Subject: x86-32: Make sure we can map all of lowmem if we need to A relocatable kernel can be anywhere in lowmem -- and in the case of a kdump kernel, is likely to be fairly high. Since the early page tables map everything from address zero up we need to make sure we allocate enough brk that we can map all of lowmem if we need to. Reported-by: Stanislaw Gruszka Signed-off-by: H. Peter Anvin Tested-by: Yinghai Lu LKML-Reference: <4D0AD3ED.8070607@kernel.org> --- arch/x86/kernel/head_32.S | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) (limited to 'arch/x86/kernel/head_32.S') diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S index bcece91..d7cdf5b 100644 --- a/arch/x86/kernel/head_32.S +++ b/arch/x86/kernel/head_32.S @@ -60,16 +60,18 @@ #define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PGD) #endif +/* Number of possible pages in the lowmem region */ +LOWMEM_PAGES = (((1<<32) - __PAGE_OFFSET) >> PAGE_SHIFT) + /* Enough space to fit pagetables for the low memory linear map */ -MAPPING_BEYOND_END = \ - PAGE_TABLE_SIZE(((1<<32) - __PAGE_OFFSET) >> PAGE_SHIFT) << PAGE_SHIFT +MAPPING_BEYOND_END = PAGE_TABLE_SIZE(LOWMEM_PAGES) << PAGE_SHIFT /* * Worst-case size of the kernel mapping we need to make: - * the worst-case size of the kernel itself, plus the extra we need - * to map for the linear map. + * a relocatable kernel can live anywhere in lowmem, so we need to be able + * to map all of lowmem. */ -KERNEL_PAGES = (KERNEL_IMAGE_SIZE + MAPPING_BEYOND_END)>>PAGE_SHIFT +KERNEL_PAGES = LOWMEM_PAGES INIT_MAP_SIZE = PAGE_TABLE_SIZE(KERNEL_PAGES) * PAGE_SIZE_asm RESERVE_BRK(pagetables, INIT_MAP_SIZE) -- cgit v1.1