diff options
author | Chris Metcalf <cmetcalf@tilera.com> | 2012-03-29 15:50:08 -0400 |
---|---|---|
committer | Chris Metcalf <cmetcalf@tilera.com> | 2012-04-02 12:13:22 -0400 |
commit | 719ea79e330c5e1a17fb7e4cf352a81e4c84cff5 (patch) | |
tree | 08fc6a29a0430599b9e91e9e87945b0b6eb2ef6a /arch/tile | |
parent | 5f220704127ae70db519fabbda4ece649eadac7f (diff) | |
download | kernel_goldelico_gta04-719ea79e330c5e1a17fb7e4cf352a81e4c84cff5.zip kernel_goldelico_gta04-719ea79e330c5e1a17fb7e4cf352a81e4c84cff5.tar.gz kernel_goldelico_gta04-719ea79e330c5e1a17fb7e4cf352a81e4c84cff5.tar.bz2 |
arch/tile: fix up locking in pgtable.c slightly
We should be holding the init_mm.page_table_lock in shatter_huge_page()
since we are modifying the kernel page tables. Then, only if we are
walking the other root page tables to update them, do we want to take
the pgd_lock.
Add a comment about taking the pgd_lock that we always do it with
interrupts disabled and therefore are not at risk from the tlbflush
IPI deadlock as is seen on x86.
Signed-off-by: Chris Metcalf <cmetcalf@tilera.com>
Diffstat (limited to 'arch/tile')
-rw-r--r-- | arch/tile/mm/pgtable.c | 22 |
1 files changed, 12 insertions, 10 deletions
diff --git a/arch/tile/mm/pgtable.c b/arch/tile/mm/pgtable.c index 6b9a109..2410aa8 100644 --- a/arch/tile/mm/pgtable.c +++ b/arch/tile/mm/pgtable.c @@ -177,14 +177,10 @@ void shatter_huge_page(unsigned long addr) if (!pmd_huge_page(*pmd)) return; - /* - * Grab the pgd_lock, since we may need it to walk the pgd_list, - * and since we need some kind of lock here to avoid races. - */ - spin_lock_irqsave(&pgd_lock, flags); + spin_lock_irqsave(&init_mm.page_table_lock, flags); if (!pmd_huge_page(*pmd)) { /* Lost the race to convert the huge page. */ - spin_unlock_irqrestore(&pgd_lock, flags); + spin_unlock_irqrestore(&init_mm.page_table_lock, flags); return; } @@ -194,6 +190,7 @@ void shatter_huge_page(unsigned long addr) #ifdef __PAGETABLE_PMD_FOLDED /* Walk every pgd on the system and update the pmd there. */ + spin_lock(&pgd_lock); list_for_each(pos, &pgd_list) { pmd_t *copy_pmd; pgd = list_to_pgd(pos) + pgd_index(addr); @@ -201,6 +198,7 @@ void shatter_huge_page(unsigned long addr) copy_pmd = pmd_offset(pud, addr); __set_pmd(copy_pmd, *pmd); } + spin_unlock(&pgd_lock); #endif /* Tell every cpu to notice the change. */ @@ -208,7 +206,7 @@ void shatter_huge_page(unsigned long addr) cpu_possible_mask, NULL, 0); /* Hold the lock until the TLB flush is finished to avoid races. */ - spin_unlock_irqrestore(&pgd_lock, flags); + spin_unlock_irqrestore(&init_mm.page_table_lock, flags); } /* @@ -217,9 +215,13 @@ void shatter_huge_page(unsigned long addr) * against pageattr.c; it is the unique case in which a valid change * of kernel pagetables can't be lazily synchronized by vmalloc faults. * vmalloc faults work because attached pagetables are never freed. - * The locking scheme was chosen on the basis of manfred's - * recommendations and having no core impact whatsoever. - * -- wli + * + * The lock is always taken with interrupts disabled, unlike on x86 + * and other platforms, because we need to take the lock in + * shatter_huge_page(), which may be called from an interrupt context. + * We are not at risk from the tlbflush IPI deadlock that was seen on + * x86, since we use the flush_remote() API to have the hypervisor do + * the TLB flushes regardless of irq disabling. */ DEFINE_SPINLOCK(pgd_lock); LIST_HEAD(pgd_list); |