aboutsummaryrefslogtreecommitdiffstats
path: root/arch/um/kernel
diff options
context:
space:
mode:
authorHugh Dickins <hugh@veritas.com>2005-10-29 18:16:24 -0700
committerLinus Torvalds <torvalds@g5.osdl.org>2005-10-29 21:40:40 -0700
commitb462705ac679f6195d1b23a752cda592d9107495 (patch)
treec4d9be08f67b0ffdc66c3e170614bd03945f3c42 /arch/um/kernel
parentc74df32c724a1652ad8399b4891bb02c9d43743a (diff)
downloadkernel_samsung_crespo-b462705ac679f6195d1b23a752cda592d9107495.zip
kernel_samsung_crespo-b462705ac679f6195d1b23a752cda592d9107495.tar.gz
kernel_samsung_crespo-b462705ac679f6195d1b23a752cda592d9107495.tar.bz2
[PATCH] mm: arches skip ptlock
Convert those few architectures which are calling pud_alloc, pmd_alloc, pte_alloc_map on a user mm, not to take the page_table_lock first, nor drop it after. Each of these can continue to use pte_alloc_map, no need to change over to pte_alloc_map_lock, they're neither racy nor swappable. In the sparc64 io_remap_pfn_range, flush_tlb_range then falls outside of the page_table_lock: that's okay, on sparc64 it's like flush_tlb_mm, and that has always been called from outside of page_table_lock in dup_mmap. Signed-off-by: Hugh Dickins <hugh@veritas.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'arch/um/kernel')
-rw-r--r--arch/um/kernel/skas/mmu.c3
1 files changed, 0 insertions, 3 deletions
diff --git a/arch/um/kernel/skas/mmu.c b/arch/um/kernel/skas/mmu.c
index 240143b..02cf36e 100644
--- a/arch/um/kernel/skas/mmu.c
+++ b/arch/um/kernel/skas/mmu.c
@@ -28,7 +28,6 @@ static int init_stub_pte(struct mm_struct *mm, unsigned long proc,
pmd_t *pmd;
pte_t *pte;
- spin_lock(&mm->page_table_lock);
pgd = pgd_offset(mm, proc);
pud = pud_alloc(mm, pgd, proc);
if (!pud)
@@ -63,7 +62,6 @@ static int init_stub_pte(struct mm_struct *mm, unsigned long proc,
*pte = mk_pte(virt_to_page(kernel), __pgprot(_PAGE_PRESENT));
*pte = pte_mkexec(*pte);
*pte = pte_wrprotect(*pte);
- spin_unlock(&mm->page_table_lock);
return(0);
out_pmd:
@@ -71,7 +69,6 @@ static int init_stub_pte(struct mm_struct *mm, unsigned long proc,
out_pte:
pmd_free(pmd);
out:
- spin_unlock(&mm->page_table_lock);
return(-ENOMEM);
}