diff options
Diffstat (limited to 'arch/x86/mm')
-rw-r--r-- | arch/x86/mm/hugetlbpage.c | 1 | ||||
-rw-r--r-- | arch/x86/mm/init.c | 33 | ||||
-rw-r--r-- | arch/x86/mm/init_32.c | 2 | ||||
-rw-r--r-- | arch/x86/mm/init_64.c | 1 | ||||
-rw-r--r-- | arch/x86/mm/ioremap.c | 14 | ||||
-rw-r--r-- | arch/x86/mm/kmmio.c | 1 | ||||
-rw-r--r-- | arch/x86/mm/mmio-mod.c | 1 | ||||
-rw-r--r-- | arch/x86/mm/pageattr.c | 27 | ||||
-rw-r--r-- | arch/x86/mm/pat.c | 2 | ||||
-rw-r--r-- | arch/x86/mm/pgtable.c | 1 | ||||
-rw-r--r-- | arch/x86/mm/pgtable_32.c | 3 | ||||
-rw-r--r-- | arch/x86/mm/srat_64.c | 3 |
12 files changed, 75 insertions, 14 deletions
diff --git a/arch/x86/mm/hugetlbpage.c b/arch/x86/mm/hugetlbpage.c index f46c340..069ce7c 100644 --- a/arch/x86/mm/hugetlbpage.c +++ b/arch/x86/mm/hugetlbpage.c @@ -9,7 +9,6 @@ #include <linux/mm.h> #include <linux/hugetlb.h> #include <linux/pagemap.h> -#include <linux/slab.h> #include <linux/err.h> #include <linux/sysctl.h> #include <asm/mman.h> diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c index e71c5cb..b278535 100644 --- a/arch/x86/mm/init.c +++ b/arch/x86/mm/init.c @@ -1,3 +1,4 @@ +#include <linux/gfp.h> #include <linux/initrd.h> #include <linux/ioport.h> #include <linux/swap.h> @@ -331,11 +332,23 @@ int devmem_is_allowed(unsigned long pagenr) void free_init_pages(char *what, unsigned long begin, unsigned long end) { - unsigned long addr = begin; + unsigned long addr; + unsigned long begin_aligned, end_aligned; - if (addr >= end) + /* Make sure boundaries are page aligned */ + begin_aligned = PAGE_ALIGN(begin); + end_aligned = end & PAGE_MASK; + + if (WARN_ON(begin_aligned != begin || end_aligned != end)) { + begin = begin_aligned; + end = end_aligned; + } + + if (begin >= end) return; + addr = begin; + /* * If debugging page accesses then do not free this memory but * mark them not present - any buggy init-section access will @@ -343,7 +356,7 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end) */ #ifdef CONFIG_DEBUG_PAGEALLOC printk(KERN_INFO "debug: unmapping init memory %08lx..%08lx\n", - begin, PAGE_ALIGN(end)); + begin, end); set_memory_np(begin, (end - begin) >> PAGE_SHIFT); #else /* @@ -358,8 +371,7 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end) for (; addr < end; addr += PAGE_SIZE) { ClearPageReserved(virt_to_page(addr)); init_page_count(virt_to_page(addr)); - memset((void *)(addr & ~(PAGE_SIZE-1)), - POISON_FREE_INITMEM, PAGE_SIZE); + memset((void *)addr, POISON_FREE_INITMEM, PAGE_SIZE); free_page(addr); totalram_pages++; } @@ -376,6 +388,15 @@ void free_initmem(void) #ifdef CONFIG_BLK_DEV_INITRD void free_initrd_mem(unsigned long start, unsigned long end) { - free_init_pages("initrd memory", start, end); + /* + * end could be not aligned, and We can not align that, + * decompresser could be confused by aligned initrd_end + * We already reserve the end partial page before in + * - i386_start_kernel() + * - x86_64_start_kernel() + * - relocate_initrd() + * So here We can do PAGE_ALIGN() safely to get partial page to be freed + */ + free_init_pages("initrd memory", start, PAGE_ALIGN(end)); } #endif diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c index 5cb3f0f..bca7909 100644 --- a/arch/x86/mm/init_32.c +++ b/arch/x86/mm/init_32.c @@ -25,11 +25,11 @@ #include <linux/pfn.h> #include <linux/poison.h> #include <linux/bootmem.h> -#include <linux/slab.h> #include <linux/proc_fs.h> #include <linux/memory_hotplug.h> #include <linux/initrd.h> #include <linux/cpumask.h> +#include <linux/gfp.h> #include <asm/asm.h> #include <asm/bios_ebda.h> diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c index e9b040e..ee41bba 100644 --- a/arch/x86/mm/init_64.c +++ b/arch/x86/mm/init_64.c @@ -29,6 +29,7 @@ #include <linux/module.h> #include <linux/memory_hotplug.h> #include <linux/nmi.h> +#include <linux/gfp.h> #include <asm/processor.h> #include <asm/bios_ebda.h> diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c index 5eb1ba7..12e4d2d 100644 --- a/arch/x86/mm/ioremap.c +++ b/arch/x86/mm/ioremap.c @@ -448,6 +448,20 @@ static inline void __init early_clear_fixmap(enum fixed_addresses idx) static void __iomem *prev_map[FIX_BTMAPS_SLOTS] __initdata; static unsigned long prev_size[FIX_BTMAPS_SLOTS] __initdata; +void __init fixup_early_ioremap(void) +{ + int i; + + for (i = 0; i < FIX_BTMAPS_SLOTS; i++) { + if (prev_map[i]) { + WARN_ON(1); + break; + } + } + + early_ioremap_init(); +} + static int __init check_early_ioremap_leak(void) { int count = 0; diff --git a/arch/x86/mm/kmmio.c b/arch/x86/mm/kmmio.c index 536fb68..5d0e67f 100644 --- a/arch/x86/mm/kmmio.c +++ b/arch/x86/mm/kmmio.c @@ -21,6 +21,7 @@ #include <linux/kdebug.h> #include <linux/mutex.h> #include <linux/io.h> +#include <linux/slab.h> #include <asm/cacheflush.h> #include <asm/tlbflush.h> #include <linux/errno.h> diff --git a/arch/x86/mm/mmio-mod.c b/arch/x86/mm/mmio-mod.c index 34a3291..3adff7d 100644 --- a/arch/x86/mm/mmio-mod.c +++ b/arch/x86/mm/mmio-mod.c @@ -26,6 +26,7 @@ #include <linux/module.h> #include <linux/debugfs.h> +#include <linux/slab.h> #include <linux/uaccess.h> #include <linux/io.h> #include <linux/version.h> diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c index 1d4eb93..28195c3 100644 --- a/arch/x86/mm/pageattr.c +++ b/arch/x86/mm/pageattr.c @@ -6,13 +6,13 @@ #include <linux/bootmem.h> #include <linux/module.h> #include <linux/sched.h> -#include <linux/slab.h> #include <linux/mm.h> #include <linux/interrupt.h> #include <linux/seq_file.h> #include <linux/debugfs.h> #include <linux/pfn.h> #include <linux/percpu.h> +#include <linux/gfp.h> #include <asm/e820.h> #include <asm/processor.h> @@ -291,8 +291,29 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address, */ if (kernel_set_to_readonly && within(address, (unsigned long)_text, - (unsigned long)__end_rodata_hpage_align)) - pgprot_val(forbidden) |= _PAGE_RW; + (unsigned long)__end_rodata_hpage_align)) { + unsigned int level; + + /* + * Don't enforce the !RW mapping for the kernel text mapping, + * if the current mapping is already using small page mapping. + * No need to work hard to preserve large page mappings in this + * case. + * + * This also fixes the Linux Xen paravirt guest boot failure + * (because of unexpected read-only mappings for kernel identity + * mappings). In this paravirt guest case, the kernel text + * mapping and the kernel identity mapping share the same + * page-table pages. Thus we can't really use different + * protections for the kernel text and identity mappings. Also, + * these shared mappings are made of small page mappings. + * Thus this don't enforce !RW mapping for small page kernel + * text mapping logic will help Linux Xen parvirt guest boot + * aswell. + */ + if (lookup_address(address, &level) && (level != PG_LEVEL_4K)) + pgprot_val(forbidden) |= _PAGE_RW; + } #endif prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden)); diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c index ae9648e..edc8b95 100644 --- a/arch/x86/mm/pat.c +++ b/arch/x86/mm/pat.c @@ -12,7 +12,7 @@ #include <linux/debugfs.h> #include <linux/kernel.h> #include <linux/module.h> -#include <linux/gfp.h> +#include <linux/slab.h> #include <linux/mm.h> #include <linux/fs.h> #include <linux/rbtree.h> diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c index c9ba9de..5c4ee42 100644 --- a/arch/x86/mm/pgtable.c +++ b/arch/x86/mm/pgtable.c @@ -1,4 +1,5 @@ #include <linux/mm.h> +#include <linux/gfp.h> #include <asm/pgalloc.h> #include <asm/pgtable.h> #include <asm/tlb.h> diff --git a/arch/x86/mm/pgtable_32.c b/arch/x86/mm/pgtable_32.c index 46c8834..7928540 100644 --- a/arch/x86/mm/pgtable_32.c +++ b/arch/x86/mm/pgtable_32.c @@ -6,7 +6,6 @@ #include <linux/swap.h> #include <linux/smp.h> #include <linux/highmem.h> -#include <linux/slab.h> #include <linux/pagemap.h> #include <linux/spinlock.h> #include <linux/module.h> @@ -19,6 +18,7 @@ #include <asm/e820.h> #include <asm/tlb.h> #include <asm/tlbflush.h> +#include <asm/io.h> unsigned int __VMALLOC_RESERVE = 128 << 20; @@ -129,6 +129,7 @@ static int __init parse_reservetop(char *arg) address = memparse(arg, &arg); reserve_top_address(address); + fixup_early_ioremap(); return 0; } early_param("reservetop", parse_reservetop); diff --git a/arch/x86/mm/srat_64.c b/arch/x86/mm/srat_64.c index 28c6876..38512d0 100644 --- a/arch/x86/mm/srat_64.c +++ b/arch/x86/mm/srat_64.c @@ -461,7 +461,8 @@ void __init acpi_fake_nodes(const struct bootnode *fake_nodes, int num_nodes) * node, it must now point to the fake node ID. */ for (j = 0; j < MAX_LOCAL_APIC; j++) - if (apicid_to_node[j] == nid) + if (apicid_to_node[j] == nid && + fake_apicid_to_node[j] == NUMA_NO_NODE) fake_apicid_to_node[j] = i; } for (i = 0; i < num_nodes; i++) |