aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/hugetlb.c22
-rw-r--r--mm/mempolicy.c2
-rw-r--r--mm/page_alloc.c31
-rw-r--r--mm/rmap.c5
-rw-r--r--mm/shmem_acl.c2
-rw-r--r--mm/truncate.c3
6 files changed, 33 insertions, 32 deletions
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 1d709ff..2dbec90 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -356,8 +356,8 @@ nomem:
return -ENOMEM;
}
-void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
- unsigned long end)
+void __unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
+ unsigned long end)
{
struct mm_struct *mm = vma->vm_mm;
unsigned long address;
@@ -398,6 +398,24 @@ void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
}
}
+void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
+ unsigned long end)
+{
+ /*
+ * It is undesirable to test vma->vm_file as it should be non-null
+ * for valid hugetlb area. However, vm_file will be NULL in the error
+ * cleanup path of do_mmap_pgoff. When hugetlbfs ->mmap method fails,
+ * do_mmap_pgoff() nullifies vma->vm_file before calling this function
+ * to clean up. Since no pte has actually been setup, it is safe to
+ * do nothing in this case.
+ */
+ if (vma->vm_file) {
+ spin_lock(&vma->vm_file->f_mapping->i_mmap_lock);
+ __unmap_hugepage_range(vma, start, end);
+ spin_unlock(&vma->vm_file->f_mapping->i_mmap_lock);
+ }
+}
+
static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long address, pte_t *ptep, pte_t pte)
{
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index 25788b1..617fb31 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -727,7 +727,7 @@ int do_migrate_pages(struct mm_struct *mm,
return -ENOSYS;
}
-static struct page *new_vma_page(struct page *page, unsigned long private)
+static struct page *new_vma_page(struct page *page, unsigned long private, int **x)
{
return NULL;
}
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index a8c003e..40db96a 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -495,17 +495,16 @@ static void __free_pages_ok(struct page *page, unsigned int order)
int i;
int reserved = 0;
- arch_free_page(page, order);
- if (!PageHighMem(page))
- debug_check_no_locks_freed(page_address(page),
- PAGE_SIZE<<order);
-
for (i = 0 ; i < (1 << order) ; ++i)
reserved += free_pages_check(page + i);
if (reserved)
return;
+ if (!PageHighMem(page))
+ debug_check_no_locks_freed(page_address(page),PAGE_SIZE<<order);
+ arch_free_page(page, order);
kernel_map_pages(page, 1 << order, 0);
+
local_irq_save(flags);
__count_vm_events(PGFREE, 1 << order);
free_one_page(page_zone(page), page, order);
@@ -781,13 +780,14 @@ static void fastcall free_hot_cold_page(struct page *page, int cold)
struct per_cpu_pages *pcp;
unsigned long flags;
- arch_free_page(page, 0);
-
if (PageAnon(page))
page->mapping = NULL;
if (free_pages_check(page))
return;
+ if (!PageHighMem(page))
+ debug_check_no_locks_freed(page_address(page), PAGE_SIZE);
+ arch_free_page(page, 0);
kernel_map_pages(page, 1, 0);
pcp = &zone_pcp(zone, get_cpu())->pcp[cold];
@@ -2294,19 +2294,6 @@ unsigned long __init zone_absent_pages_in_node(int nid,
return __absent_pages_in_range(nid, zone_start_pfn, zone_end_pfn);
}
-/* Return the zone index a PFN is in */
-int memmap_zone_idx(struct page *lmem_map)
-{
- int i;
- unsigned long phys_addr = virt_to_phys(lmem_map);
- unsigned long pfn = phys_addr >> PAGE_SHIFT;
-
- for (i = 0; i < MAX_NR_ZONES; i++)
- if (pfn < arch_zone_highest_possible_pfn[i])
- break;
-
- return i;
-}
#else
static inline unsigned long zone_spanned_pages_in_node(int nid,
unsigned long zone_type,
@@ -2325,10 +2312,6 @@ static inline unsigned long zone_absent_pages_in_node(int nid,
return zholes_size[zone_type];
}
-static inline int memmap_zone_idx(struct page *lmem_map)
-{
- return MAX_NR_ZONES;
-}
#endif
static void __init calculate_node_totalpages(struct pglist_data *pgdat,
diff --git a/mm/rmap.c b/mm/rmap.c
index e2155d7..a9136d8 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -576,15 +576,14 @@ void page_add_file_rmap(struct page *page)
void page_remove_rmap(struct page *page)
{
if (atomic_add_negative(-1, &page->_mapcount)) {
-#ifdef CONFIG_DEBUG_VM
if (unlikely(page_mapcount(page) < 0)) {
printk (KERN_EMERG "Eeek! page_mapcount(page) went negative! (%d)\n", page_mapcount(page));
printk (KERN_EMERG " page->flags = %lx\n", page->flags);
printk (KERN_EMERG " page->count = %x\n", page_count(page));
printk (KERN_EMERG " page->mapping = %p\n", page->mapping);
+ BUG();
}
-#endif
- BUG_ON(page_mapcount(page) < 0);
+
/*
* It would be tidy to reset the PageAnon mapping here,
* but that might overwrite a racing page_add_anon_rmap
diff --git a/mm/shmem_acl.c b/mm/shmem_acl.c
index c946bf4..f5664c5 100644
--- a/mm/shmem_acl.c
+++ b/mm/shmem_acl.c
@@ -35,7 +35,7 @@ shmem_get_acl(struct inode *inode, int type)
}
/**
- * shmem_get_acl - generic_acl_operations->setacl() operation
+ * shmem_set_acl - generic_acl_operations->setacl() operation
*/
static void
shmem_set_acl(struct inode *inode, int type, struct posix_acl *acl)
diff --git a/mm/truncate.c b/mm/truncate.c
index f4edbc1..11ca480 100644
--- a/mm/truncate.c
+++ b/mm/truncate.c
@@ -302,7 +302,7 @@ invalidate_complete_page2(struct address_space *mapping, struct page *page)
if (page->mapping != mapping)
return 0;
- if (PagePrivate(page) && !try_to_release_page(page, 0))
+ if (PagePrivate(page) && !try_to_release_page(page, GFP_KERNEL))
return 0;
write_lock_irq(&mapping->tree_lock);
@@ -396,6 +396,7 @@ int invalidate_inode_pages2_range(struct address_space *mapping,
pagevec_release(&pvec);
cond_resched();
}
+ WARN_ON_ONCE(ret);
return ret;
}
EXPORT_SYMBOL_GPL(invalidate_inode_pages2_range);