aboutsummaryrefslogtreecommitdiffstats
path: root/mm/huge_memory.c
diff options
context:
space:
mode:
authorDavid Rientjes <rientjes@google.com>2012-05-29 15:06:17 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2012-05-29 16:22:19 -0700
commitedad9d2c337d43278a9d5aeb0ed531c2e838f8a6 (patch)
tree9cd9aba0647d5d1eedd602451cd34f2d514fd30d /mm/huge_memory.c
parentaa2e878efa7949c8502c9760f92835222714f090 (diff)
downloadkernel_goldelico_gta04-edad9d2c337d43278a9d5aeb0ed531c2e838f8a6.zip
kernel_goldelico_gta04-edad9d2c337d43278a9d5aeb0ed531c2e838f8a6.tar.gz
kernel_goldelico_gta04-edad9d2c337d43278a9d5aeb0ed531c2e838f8a6.tar.bz2
mm, thp: allow fallback when pte_alloc_one() fails for huge pmd
The transparent hugepages feature is careful to not invoke the oom killer when a hugepage cannot be allocated. pte_alloc_one() failing in __do_huge_pmd_anonymous_page(), however, currently results in VM_FAULT_OOM which invokes the pagefault oom killer to kill a memory-hogging task. This is unnecessary since it's possible to drop the reference to the hugepage and fallback to allocating a small page. Signed-off-by: David Rientjes <rientjes@google.com> Cc: Andrea Arcangeli <aarcange@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/huge_memory.c')
-rw-r--r--mm/huge_memory.c13
1 files changed, 8 insertions, 5 deletions
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 8ab2d24..d7d7165 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -640,11 +640,8 @@ static int __do_huge_pmd_anonymous_page(struct mm_struct *mm,
VM_BUG_ON(!PageCompound(page));
pgtable = pte_alloc_one(mm, haddr);
- if (unlikely(!pgtable)) {
- mem_cgroup_uncharge_page(page);
- put_page(page);
+ if (unlikely(!pgtable))
return VM_FAULT_OOM;
- }
clear_huge_page(page, haddr, HPAGE_PMD_NR);
__SetPageUptodate(page);
@@ -723,8 +720,14 @@ int do_huge_pmd_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
put_page(page);
goto out;
}
+ if (unlikely(__do_huge_pmd_anonymous_page(mm, vma, haddr, pmd,
+ page))) {
+ mem_cgroup_uncharge_page(page);
+ put_page(page);
+ goto out;
+ }
- return __do_huge_pmd_anonymous_page(mm, vma, haddr, pmd, page);
+ return 0;
}
out:
/*