aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/filemap.c6
-rw-r--r--mm/filemap_xip.c2
-rw-r--r--mm/hugetlb.c10
-rw-r--r--mm/memory.c80
-rw-r--r--mm/shmem.c8
5 files changed, 52 insertions, 54 deletions
diff --git a/mm/filemap.c b/mm/filemap.c
index 0876cc5..4fd9e3f 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -1322,9 +1322,7 @@ int filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
struct page *page;
unsigned long size;
int did_readaround = 0;
- int ret;
-
- ret = VM_FAULT_MINOR;
+ int ret = 0;
size = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
if (vmf->pgoff >= size)
@@ -1408,7 +1406,7 @@ retry_find:
*/
mark_page_accessed(page);
vmf->page = page;
- return ret | FAULT_RET_LOCKED;
+ return ret | VM_FAULT_LOCKED;
outside_data_content:
/*
diff --git a/mm/filemap_xip.c b/mm/filemap_xip.c
index 847d5d7..53ee6a2 100644
--- a/mm/filemap_xip.c
+++ b/mm/filemap_xip.c
@@ -252,7 +252,7 @@ static int xip_file_fault(struct vm_area_struct *area, struct vm_fault *vmf)
out:
page_cache_get(page);
vmf->page = page;
- return VM_FAULT_MINOR;
+ return 0;
}
static struct vm_operations_struct xip_file_vm_ops = {
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index aaa7c1a..c4a573b 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -469,7 +469,7 @@ static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
avoidcopy = (page_count(old_page) == 1);
if (avoidcopy) {
set_huge_ptep_writable(vma, address, ptep);
- return VM_FAULT_MINOR;
+ return 0;
}
page_cache_get(old_page);
@@ -494,7 +494,7 @@ static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
}
page_cache_release(new_page);
page_cache_release(old_page);
- return VM_FAULT_MINOR;
+ return 0;
}
static int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma,
@@ -551,7 +551,7 @@ retry:
if (idx >= size)
goto backout;
- ret = VM_FAULT_MINOR;
+ ret = 0;
if (!pte_none(*ptep))
goto backout;
@@ -602,7 +602,7 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
return ret;
}
- ret = VM_FAULT_MINOR;
+ ret = 0;
spin_lock(&mm->page_table_lock);
/* Check for a racing update before calling hugetlb_cow */
@@ -641,7 +641,7 @@ int follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
spin_unlock(&mm->page_table_lock);
ret = hugetlb_fault(mm, vma, vaddr, 0);
spin_lock(&mm->page_table_lock);
- if (ret == VM_FAULT_MINOR)
+ if (!(ret & VM_FAULT_MAJOR))
continue;
remainder = 0;
diff --git a/mm/memory.c b/mm/memory.c
index 23c8704..61d51da 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -1068,31 +1068,30 @@ int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
cond_resched();
while (!(page = follow_page(vma, start, foll_flags))) {
int ret;
- ret = __handle_mm_fault(mm, vma, start,
+ ret = handle_mm_fault(mm, vma, start,
foll_flags & FOLL_WRITE);
+ if (ret & VM_FAULT_ERROR) {
+ if (ret & VM_FAULT_OOM)
+ return i ? i : -ENOMEM;
+ else if (ret & VM_FAULT_SIGBUS)
+ return i ? i : -EFAULT;
+ BUG();
+ }
+ if (ret & VM_FAULT_MAJOR)
+ tsk->maj_flt++;
+ else
+ tsk->min_flt++;
+
/*
- * The VM_FAULT_WRITE bit tells us that do_wp_page has
- * broken COW when necessary, even if maybe_mkwrite
- * decided not to set pte_write. We can thus safely do
- * subsequent page lookups as if they were reads.
+ * The VM_FAULT_WRITE bit tells us that
+ * do_wp_page has broken COW when necessary,
+ * even if maybe_mkwrite decided not to set
+ * pte_write. We can thus safely do subsequent
+ * page lookups as if they were reads.
*/
if (ret & VM_FAULT_WRITE)
foll_flags &= ~FOLL_WRITE;
-
- switch (ret & ~VM_FAULT_WRITE) {
- case VM_FAULT_MINOR:
- tsk->min_flt++;
- break;
- case VM_FAULT_MAJOR:
- tsk->maj_flt++;
- break;
- case VM_FAULT_SIGBUS:
- return i ? i : -EFAULT;
- case VM_FAULT_OOM:
- return i ? i : -ENOMEM;
- default:
- BUG();
- }
+
cond_resched();
}
if (pages) {
@@ -1639,7 +1638,7 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
{
struct page *old_page, *new_page;
pte_t entry;
- int reuse = 0, ret = VM_FAULT_MINOR;
+ int reuse = 0, ret = 0;
struct page *dirty_page = NULL;
old_page = vm_normal_page(vma, address, orig_pte);
@@ -1835,8 +1834,8 @@ static int unmap_mapping_range_vma(struct vm_area_struct *vma,
/*
* files that support invalidating or truncating portions of the
* file from under mmaped areas must have their ->fault function
- * return a locked page (and FAULT_RET_LOCKED code). This provides
- * synchronisation against concurrent unmapping here.
+ * return a locked page (and set VM_FAULT_LOCKED in the return).
+ * This provides synchronisation against concurrent unmapping here.
*/
again:
@@ -2140,7 +2139,7 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
struct page *page;
swp_entry_t entry;
pte_t pte;
- int ret = VM_FAULT_MINOR;
+ int ret = 0;
if (!pte_unmap_same(mm, pmd, page_table, orig_pte))
goto out;
@@ -2208,8 +2207,9 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
unlock_page(page);
if (write_access) {
+ /* XXX: We could OR the do_wp_page code with this one? */
if (do_wp_page(mm, vma, address,
- page_table, pmd, ptl, pte) == VM_FAULT_OOM)
+ page_table, pmd, ptl, pte) & VM_FAULT_OOM)
ret = VM_FAULT_OOM;
goto out;
}
@@ -2280,7 +2280,7 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
lazy_mmu_prot_update(entry);
unlock:
pte_unmap_unlock(page_table, ptl);
- return VM_FAULT_MINOR;
+ return 0;
release:
page_cache_release(page);
goto unlock;
@@ -2323,11 +2323,11 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
if (likely(vma->vm_ops->fault)) {
ret = vma->vm_ops->fault(vma, &vmf);
- if (unlikely(ret & (VM_FAULT_ERROR | FAULT_RET_NOPAGE)))
- return (ret & VM_FAULT_MASK);
+ if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE)))
+ return ret;
} else {
/* Legacy ->nopage path */
- ret = VM_FAULT_MINOR;
+ ret = 0;
vmf.page = vma->vm_ops->nopage(vma, address & PAGE_MASK, &ret);
/* no page was available -- either SIGBUS or OOM */
if (unlikely(vmf.page == NOPAGE_SIGBUS))
@@ -2340,7 +2340,7 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
* For consistency in subsequent calls, make the faulted page always
* locked.
*/
- if (unlikely(!(ret & FAULT_RET_LOCKED)))
+ if (unlikely(!(ret & VM_FAULT_LOCKED)))
lock_page(vmf.page);
else
VM_BUG_ON(!PageLocked(vmf.page));
@@ -2356,7 +2356,8 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
ret = VM_FAULT_OOM;
goto out;
}
- page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address);
+ page = alloc_page_vma(GFP_HIGHUSER_MOVABLE,
+ vma, address);
if (!page) {
ret = VM_FAULT_OOM;
goto out;
@@ -2384,7 +2385,7 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
* is better done later.
*/
if (!page->mapping) {
- ret = VM_FAULT_MINOR;
+ ret = 0;
anon = 1; /* no anon but release vmf.page */
goto out;
}
@@ -2447,7 +2448,7 @@ out_unlocked:
put_page(dirty_page);
}
- return (ret & VM_FAULT_MASK);
+ return ret;
}
static int do_linear_fault(struct mm_struct *mm, struct vm_area_struct *vma,
@@ -2486,7 +2487,6 @@ static noinline int do_no_pfn(struct mm_struct *mm, struct vm_area_struct *vma,
spinlock_t *ptl;
pte_t entry;
unsigned long pfn;
- int ret = VM_FAULT_MINOR;
pte_unmap(page_table);
BUG_ON(!(vma->vm_flags & VM_PFNMAP));
@@ -2498,7 +2498,7 @@ static noinline int do_no_pfn(struct mm_struct *mm, struct vm_area_struct *vma,
else if (unlikely(pfn == NOPFN_SIGBUS))
return VM_FAULT_SIGBUS;
else if (unlikely(pfn == NOPFN_REFAULT))
- return VM_FAULT_MINOR;
+ return 0;
page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
@@ -2510,7 +2510,7 @@ static noinline int do_no_pfn(struct mm_struct *mm, struct vm_area_struct *vma,
set_pte_at(mm, address, page_table, entry);
}
pte_unmap_unlock(page_table, ptl);
- return ret;
+ return 0;
}
/*
@@ -2531,7 +2531,7 @@ static int do_nonlinear_fault(struct mm_struct *mm, struct vm_area_struct *vma,
pgoff_t pgoff;
if (!pte_unmap_same(mm, pmd, page_table, orig_pte))
- return VM_FAULT_MINOR;
+ return 0;
if (unlikely(!(vma->vm_flags & VM_NONLINEAR) ||
!(vma->vm_flags & VM_CAN_NONLINEAR))) {
@@ -2615,13 +2615,13 @@ static inline int handle_pte_fault(struct mm_struct *mm,
}
unlock:
pte_unmap_unlock(pte, ptl);
- return VM_FAULT_MINOR;
+ return 0;
}
/*
* By the time we get here, we already hold the mm semaphore
*/
-int __handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
+int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long address, int write_access)
{
pgd_t *pgd;
@@ -2650,7 +2650,7 @@ int __handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
return handle_pte_fault(mm, vma, address, pte, pmd, write_access);
}
-EXPORT_SYMBOL_GPL(__handle_mm_fault);
+EXPORT_SYMBOL_GPL(handle_mm_fault);
#ifndef __PAGETABLE_PUD_FOLDED
/*
diff --git a/mm/shmem.c b/mm/shmem.c
index 0a555af..ad155c7 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -1103,7 +1103,7 @@ static int shmem_getpage(struct inode *inode, unsigned long idx,
return -EFBIG;
if (type)
- *type = VM_FAULT_MINOR;
+ *type = 0;
/*
* Normally, filepage is NULL on entry, and either found
@@ -1138,9 +1138,9 @@ repeat:
if (!swappage) {
shmem_swp_unmap(entry);
/* here we actually do the io */
- if (type && *type == VM_FAULT_MINOR) {
+ if (type && !(*type & VM_FAULT_MAJOR)) {
__count_vm_event(PGMAJFAULT);
- *type = VM_FAULT_MAJOR;
+ *type |= VM_FAULT_MAJOR;
}
spin_unlock(&info->lock);
swappage = shmem_swapin(info, swap, idx);
@@ -1323,7 +1323,7 @@ static int shmem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
return ((error == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS);
mark_page_accessed(vmf->page);
- return ret | FAULT_RET_LOCKED;
+ return ret | VM_FAULT_LOCKED;
}
#ifdef CONFIG_NUMA