aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-frv
diff options
context:
space:
mode:
authorDavid Howells <dhowells@redhat.com>2008-04-10 16:10:45 +0100
committerLinus Torvalds <torvalds@linux-foundation.org>2008-04-10 13:41:28 -0700
commita31b9dd8edc6e4e75b5299fee6093b3c54548446 (patch)
treef1f8fe2c4e98e80c505e869cd1c98caef259a079 /include/asm-frv
parent5d69a029ab13ddef5bdad69174fabafada4a87fd (diff)
downloadkernel_samsung_smdk4412-a31b9dd8edc6e4e75b5299fee6093b3c54548446.zip
kernel_samsung_smdk4412-a31b9dd8edc6e4e75b5299fee6093b3c54548446.tar.gz
kernel_samsung_smdk4412-a31b9dd8edc6e4e75b5299fee6093b3c54548446.tar.bz2
FRV: Handle update_mmu_cache() being called when current->mm is NULL [try #2]
Handle update_mmu_cache() being called when current->mm is NULL. We cache static TLB mappings for the current page table in DAMPR4 and DAMPR5 on the theory that the next data lookup is likely to be in the same general region, and thus is likely to be mapped by the same page table. However, we can't get this information if we can't access the appropriate mm_struct. If current->mm is NULL, we just clear the cache in the knowledge that the TLB miss handlers will load it. Signed-off-by: David Howells <dhowells@redhat.com> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'include/asm-frv')
-rw-r--r--include/asm-frv/pgtable.h19
1 files changed, 14 insertions, 5 deletions
diff --git a/include/asm-frv/pgtable.h b/include/asm-frv/pgtable.h
index 6c0682e..4e21904 100644
--- a/include/asm-frv/pgtable.h
+++ b/include/asm-frv/pgtable.h
@@ -507,13 +507,22 @@ static inline int pte_file(pte_t pte)
*/
static inline void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t pte)
{
+ struct mm_struct *mm;
unsigned long ampr;
- pgd_t *pge = pgd_offset(current->mm, address);
- pud_t *pue = pud_offset(pge, address);
- pmd_t *pme = pmd_offset(pue, address);
- ampr = pme->ste[0] & 0xffffff00;
- ampr |= xAMPRx_L | xAMPRx_SS_16Kb | xAMPRx_S | xAMPRx_C | xAMPRx_V;
+ mm = current->mm;
+ if (mm) {
+ pgd_t *pge = pgd_offset(mm, address);
+ pud_t *pue = pud_offset(pge, address);
+ pmd_t *pme = pmd_offset(pue, address);
+
+ ampr = pme->ste[0] & 0xffffff00;
+ ampr |= xAMPRx_L | xAMPRx_SS_16Kb | xAMPRx_S | xAMPRx_C |
+ xAMPRx_V;
+ } else {
+ address = ULONG_MAX;
+ ampr = 0;
+ }
asm volatile("movgs %0,scr0\n"
"movgs %0,scr1\n"