aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/bootmem.c2
-rw-r--r--mm/memory.c72
-rw-r--r--mm/memory_hotplug.c2
-rw-r--r--mm/mmap.c2
-rw-r--r--mm/mremap.c2
-rw-r--r--mm/page_alloc.c2
6 files changed, 27 insertions, 55 deletions
diff --git a/mm/bootmem.c b/mm/bootmem.c
index e8c5671..16b9465 100644
--- a/mm/bootmem.c
+++ b/mm/bootmem.c
@@ -204,6 +204,8 @@ restart_scan:
unsigned long j;
i = find_next_zero_bit(bdata->node_bootmem_map, eidx, i);
i = ALIGN(i, incr);
+ if (i >= eidx)
+ break;
if (test_bit(i, bdata->node_bootmem_map))
continue;
for (j = i + 1; j < i + areasize; ++j) {
diff --git a/mm/memory.c b/mm/memory.c
index aa8af0e..d8dde07 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -349,6 +349,11 @@ void print_bad_pte(struct vm_area_struct *vma, pte_t pte, unsigned long vaddr)
dump_stack();
}
+static inline int is_cow_mapping(unsigned int flags)
+{
+ return (flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE;
+}
+
/*
* This function gets the "struct page" associated with a pte.
*
@@ -377,6 +382,8 @@ struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr, pte_
unsigned long off = (addr - vma->vm_start) >> PAGE_SHIFT;
if (pfn == vma->vm_pgoff + off)
return NULL;
+ if (!is_cow_mapping(vma->vm_flags))
+ return NULL;
}
/*
@@ -437,7 +444,7 @@ copy_one_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
* If it's a COW mapping, write protect it both
* in the parent and the child
*/
- if ((vm_flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE) {
+ if (is_cow_mapping(vm_flags)) {
ptep_set_wrprotect(src_mm, addr, src_pte);
pte = *src_pte;
}
@@ -567,7 +574,7 @@ int copy_page_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
* readonly mappings. The tradeoff is that copy_page_range is more
* efficient than faulting.
*/
- if (!(vma->vm_flags & (VM_HUGETLB|VM_NONLINEAR|VM_PFNMAP))) {
+ if (!(vma->vm_flags & (VM_HUGETLB|VM_NONLINEAR|VM_PFNMAP|VM_INSERTPAGE))) {
if (!vma->anon_vma)
return 0;
}
@@ -1002,7 +1009,7 @@ int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
continue;
}
- if (!vma || (vma->vm_flags & VM_IO)
+ if (!vma || (vma->vm_flags & (VM_IO | VM_PFNMAP))
|| !(vm_flags & vma->vm_flags))
return i ? : -EFAULT;
@@ -1221,55 +1228,12 @@ int vm_insert_page(struct vm_area_struct *vma, unsigned long addr, struct page *
return -EFAULT;
if (!page_count(page))
return -EINVAL;
+ vma->vm_flags |= VM_INSERTPAGE;
return insert_page(vma->vm_mm, addr, page, vma->vm_page_prot);
}
EXPORT_SYMBOL(vm_insert_page);
/*
- * Somebody does a pfn remapping that doesn't actually work as a vma.
- *
- * Do it as individual pages instead, and warn about it. It's bad form,
- * and very inefficient.
- */
-static int incomplete_pfn_remap(struct vm_area_struct *vma,
- unsigned long start, unsigned long end,
- unsigned long pfn, pgprot_t prot)
-{
- static int warn = 10;
- struct page *page;
- int retval;
-
- if (!(vma->vm_flags & VM_INCOMPLETE)) {
- if (warn) {
- warn--;
- printk("%s does an incomplete pfn remapping", current->comm);
- dump_stack();
- }
- }
- vma->vm_flags |= VM_INCOMPLETE | VM_IO | VM_RESERVED;
-
- if (start < vma->vm_start || end > vma->vm_end)
- return -EINVAL;
-
- if (!pfn_valid(pfn))
- return -EINVAL;
-
- page = pfn_to_page(pfn);
- if (!PageReserved(page))
- return -EINVAL;
-
- retval = 0;
- while (start < end) {
- retval = insert_page(vma->vm_mm, start, page, prot);
- if (retval < 0)
- break;
- start += PAGE_SIZE;
- page++;
- }
- return retval;
-}
-
-/*
* maps a range of physical memory into the requested pages. the old
* mappings are removed. any references to nonexistent pages results
* in null mappings (currently treated as "copy-on-access")
@@ -1343,9 +1307,6 @@ int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr,
struct mm_struct *mm = vma->vm_mm;
int err;
- if (addr != vma->vm_start || end != vma->vm_end)
- return incomplete_pfn_remap(vma, addr, end, pfn, prot);
-
/*
* Physically remapped pages are special. Tell the
* rest of the world about it:
@@ -1359,9 +1320,18 @@ int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr,
* VM_PFNMAP tells the core MM that the base pages are just
* raw PFN mappings, and do not have a "struct page" associated
* with them.
+ *
+ * There's a horrible special case to handle copy-on-write
+ * behaviour that some programs depend on. We mark the "original"
+ * un-COW'ed pages by matching them up with "vma->vm_pgoff".
*/
+ if (is_cow_mapping(vma->vm_flags)) {
+ if (addr != vma->vm_start || end != vma->vm_end)
+ return -EINVAL;
+ vma->vm_pgoff = pfn;
+ }
+
vma->vm_flags |= VM_IO | VM_RESERVED | VM_PFNMAP;
- vma->vm_pgoff = pfn;
BUG_ON(addr >= end);
pfn -= addr >> PAGE_SHIFT;
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index 431a64f..f6d4af8 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -104,7 +104,7 @@ static void grow_pgdat_span(struct pglist_data *pgdat,
pgdat->node_start_pfn = start_pfn;
if (end_pfn > old_pgdat_end_pfn)
- pgdat->node_spanned_pages = end_pfn - pgdat->node_spanned_pages;
+ pgdat->node_spanned_pages = end_pfn - pgdat->node_start_pfn;
}
int online_pages(unsigned long pfn, unsigned long nr_pages)
diff --git a/mm/mmap.c b/mm/mmap.c
index 11ca592..64ba4db 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -611,7 +611,7 @@ again: remove_next = 1 + (end > next->vm_end);
* If the vma has a ->close operation then the driver probably needs to release
* per-vma resources, so we don't attempt to merge those.
*/
-#define VM_SPECIAL (VM_IO | VM_DONTCOPY | VM_DONTEXPAND | VM_RESERVED)
+#define VM_SPECIAL (VM_IO | VM_DONTCOPY | VM_DONTEXPAND | VM_RESERVED | VM_PFNMAP)
static inline int is_mergeable_vma(struct vm_area_struct *vma,
struct file *file, unsigned long vm_flags)
diff --git a/mm/mremap.c b/mm/mremap.c
index b535438..ddaeee9 100644
--- a/mm/mremap.c
+++ b/mm/mremap.c
@@ -323,7 +323,7 @@ unsigned long do_mremap(unsigned long addr,
/* We can't remap across vm area boundaries */
if (old_len > vma->vm_end - addr)
goto out;
- if (vma->vm_flags & VM_DONTEXPAND) {
+ if (vma->vm_flags & (VM_DONTEXPAND | VM_PFNMAP)) {
if (new_len > old_len)
goto out;
}
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 3b21a13..fe14a8c 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -1896,7 +1896,7 @@ static int __devinit pageset_cpuup_callback(struct notifier_block *nfb,
static struct notifier_block pageset_notifier =
{ &pageset_cpuup_callback, NULL, 0 };
-void __init setup_per_cpu_pageset()
+void __init setup_per_cpu_pageset(void)
{
int err;