aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorNaoya Horiguchi <n-horiguchi@ah.jp.nec.com>2010-09-10 13:23:04 +0900
committerLinus Torvalds <torvalds@linux-foundation.org>2010-09-23 17:29:18 -0700
commit56c9cfb13c9b6516017eea4e8cbe22ea02e07ee6 (patch)
tree1b09c9350d8f48d81514f274a3a1933dc9f1a2ef /mm
parentcd67f0d2a9a6b5b9f79f4343dc8805757d9ebae2 (diff)
downloadkernel_samsung_aries-56c9cfb13c9b6516017eea4e8cbe22ea02e07ee6.zip
kernel_samsung_aries-56c9cfb13c9b6516017eea4e8cbe22ea02e07ee6.tar.gz
kernel_samsung_aries-56c9cfb13c9b6516017eea4e8cbe22ea02e07ee6.tar.bz2
hugetlb, rmap: fix confusing page locking in hugetlb_cow()
The "if (!trylock_page)" block in the avoidcopy path of hugetlb_cow() looks confusing and is buggy. Originally this trylock_page() was intended to make sure that old_page is locked even when old_page != pagecache_page, because then only pagecache_page is locked. This patch fixes it by moving page locking into hugetlb_fault(). Signed-off-by: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com> Acked-by: Rik van Riel <riel@redhat.com> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/hugetlb.c22
1 files changed, 12 insertions, 10 deletions
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 9519f3f..c032738 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -2324,11 +2324,8 @@ retry_avoidcopy:
* and just make the page writable */
avoidcopy = (page_mapcount(old_page) == 1);
if (avoidcopy) {
- if (!trylock_page(old_page)) {
- if (PageAnon(old_page))
- page_move_anon_rmap(old_page, vma, address);
- } else
- unlock_page(old_page);
+ if (PageAnon(old_page))
+ page_move_anon_rmap(old_page, vma, address);
set_huge_ptep_writable(vma, address, ptep);
return 0;
}
@@ -2631,10 +2628,16 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
vma, address);
}
- if (!pagecache_page) {
- page = pte_page(entry);
+ /*
+ * hugetlb_cow() requires page locks of pte_page(entry) and
+ * pagecache_page, so here we need take the former one
+ * when page != pagecache_page or !pagecache_page.
+ * Note that locking order is always pagecache_page -> page,
+ * so no worry about deadlock.
+ */
+ page = pte_page(entry);
+ if (page != pagecache_page)
lock_page(page);
- }
spin_lock(&mm->page_table_lock);
/* Check for a racing update before calling hugetlb_cow */
@@ -2661,9 +2664,8 @@ out_page_table_lock:
if (pagecache_page) {
unlock_page(pagecache_page);
put_page(pagecache_page);
- } else {
- unlock_page(page);
}
+ unlock_page(page);
out_mutex:
mutex_unlock(&hugetlb_instantiation_mutex);