diff options
Diffstat (limited to 'arch/powerpc/mm')
-rw-r--r-- | arch/powerpc/mm/40x_mmu.c | 17 | ||||
-rw-r--r-- | arch/powerpc/mm/44x_mmu.c | 1 | ||||
-rw-r--r-- | arch/powerpc/mm/fault.c | 2 | ||||
-rw-r--r-- | arch/powerpc/mm/hash_low_64.S | 5 | ||||
-rw-r--r-- | arch/powerpc/mm/hash_utils_64.c | 7 | ||||
-rw-r--r-- | arch/powerpc/mm/init_64.c | 16 | ||||
-rw-r--r-- | arch/powerpc/mm/mmu_decl.h | 4 | ||||
-rw-r--r-- | arch/powerpc/mm/slb.c | 35 | ||||
-rw-r--r-- | arch/powerpc/mm/tlb_64.c | 2 |
9 files changed, 56 insertions, 33 deletions
diff --git a/arch/powerpc/mm/40x_mmu.c b/arch/powerpc/mm/40x_mmu.c index e067df8..3899ea9 100644 --- a/arch/powerpc/mm/40x_mmu.c +++ b/arch/powerpc/mm/40x_mmu.c @@ -98,13 +98,12 @@ unsigned long __init mmu_mapin_ram(void) v = KERNELBASE; p = PPC_MEMSTART; - s = 0; + s = total_lowmem; - if (__map_without_ltlbs) { - return s; - } + if (__map_without_ltlbs) + return 0; - while (s <= (total_lowmem - LARGE_PAGE_SIZE_16M)) { + while (s >= LARGE_PAGE_SIZE_16M) { pmd_t *pmdp; unsigned long val = p | _PMD_SIZE_16M | _PAGE_HWEXEC | _PAGE_HWWRITE; @@ -116,10 +115,10 @@ unsigned long __init mmu_mapin_ram(void) v += LARGE_PAGE_SIZE_16M; p += LARGE_PAGE_SIZE_16M; - s += LARGE_PAGE_SIZE_16M; + s -= LARGE_PAGE_SIZE_16M; } - while (s <= (total_lowmem - LARGE_PAGE_SIZE_4M)) { + while (s >= LARGE_PAGE_SIZE_4M) { pmd_t *pmdp; unsigned long val = p | _PMD_SIZE_4M | _PAGE_HWEXEC | _PAGE_HWWRITE; @@ -128,8 +127,8 @@ unsigned long __init mmu_mapin_ram(void) v += LARGE_PAGE_SIZE_4M; p += LARGE_PAGE_SIZE_4M; - s += LARGE_PAGE_SIZE_4M; + s -= LARGE_PAGE_SIZE_4M; } - return s; + return total_lowmem - s; } diff --git a/arch/powerpc/mm/44x_mmu.c b/arch/powerpc/mm/44x_mmu.c index c3df504..04dc087 100644 --- a/arch/powerpc/mm/44x_mmu.c +++ b/arch/powerpc/mm/44x_mmu.c @@ -35,6 +35,7 @@ */ unsigned int tlb_44x_index; /* = 0 */ unsigned int tlb_44x_hwater = PPC44x_TLB_SIZE - 1 - PPC44x_EARLY_TLBS; +int icache_44x_need_flush; /* * "Pins" a 256MB TLB entry in AS0 for kernel lowmem diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c index a18fda3..8135da0 100644 --- a/arch/powerpc/mm/fault.c +++ b/arch/powerpc/mm/fault.c @@ -309,7 +309,7 @@ good_area: set_bit(PG_arch_1, &page->flags); } pte_update(ptep, 0, _PAGE_HWEXEC); - _tlbie(address); + _tlbie(address, mm->context.id); pte_unmap_unlock(ptep, ptl); up_read(&mm->mmap_sem); return 0; diff --git a/arch/powerpc/mm/hash_low_64.S b/arch/powerpc/mm/hash_low_64.S index ad253b9..e935edd 100644 --- a/arch/powerpc/mm/hash_low_64.S +++ b/arch/powerpc/mm/hash_low_64.S @@ -331,7 +331,7 @@ htab_pte_insert_failure: *****************************************************************************/ /* _hash_page_4K(unsigned long ea, unsigned long access, unsigned long vsid, - * pte_t *ptep, unsigned long trap, int local) + * pte_t *ptep, unsigned long trap, int local, int ssize) */ /* @@ -557,7 +557,8 @@ htab_inval_old_hpte: mr r4,r31 /* PTE.pte */ li r5,0 /* PTE.hidx */ li r6,MMU_PAGE_64K /* psize */ - ld r7,STK_PARM(r8)(r1) /* local */ + ld r7,STK_PARM(r9)(r1) /* ssize */ + ld r8,STK_PARM(r8)(r1) /* local */ bl .flush_hash_page b htab_insert_pte diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c index c78dc91..f09730b 100644 --- a/arch/powerpc/mm/hash_utils_64.c +++ b/arch/powerpc/mm/hash_utils_64.c @@ -51,6 +51,7 @@ #include <asm/cputable.h> #include <asm/sections.h> #include <asm/spu.h> +#include <asm/udbg.h> #ifdef DEBUG #define DBG(fmt...) udbg_printf(fmt) @@ -791,8 +792,7 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap) } if (user_region) { if (psize != get_paca()->context.user_psize) { - get_paca()->context.user_psize = - mm->context.user_psize; + get_paca()->context = mm->context; slb_flush_and_rebolt(); } } else if (get_paca()->vmalloc_sllp != @@ -885,6 +885,9 @@ void hash_preload(struct mm_struct *mm, unsigned long ea, local_irq_restore(flags); } +/* WARNING: This is called from hash_low_64.S, if you change this prototype, + * do not forget to update the assembly call site ! + */ void flush_hash_page(unsigned long va, real_pte_t pte, int psize, int ssize, int local) { diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c index d9c82d3..c0f5cff 100644 --- a/arch/powerpc/mm/init_64.c +++ b/arch/powerpc/mm/init_64.c @@ -19,8 +19,6 @@ * */ -#undef DEBUG - #include <linux/signal.h> #include <linux/sched.h> #include <linux/kernel.h> @@ -66,12 +64,6 @@ #include "mmu_decl.h" -#ifdef DEBUG -#define DBG(fmt...) printk(fmt) -#else -#define DBG(fmt...) -#endif - #if PGTABLE_RANGE > USER_VSID_RANGE #warning Limited user VSID range means pagetable space is wasted #endif @@ -175,8 +167,8 @@ void pgtable_cache_init(void) int size = pgtable_cache_size[i]; const char *name = pgtable_cache_name[i]; - DBG("Allocating page table cache %s (#%d) " - "for size: %08x...\n", name, i, size); + pr_debug("Allocating page table cache %s (#%d) " + "for size: %08x...\n", name, i, size); pgtable_cache[i] = kmem_cache_create(name, size, size, SLAB_PANIC, @@ -239,8 +231,8 @@ int __meminit vmemmap_populate(struct page *start_page, if (!p) return -ENOMEM; - printk(KERN_WARNING "vmemmap %08lx allocated at %p, " - "physical %08lx.\n", start, p, __pa(p)); + pr_debug("vmemmap %08lx allocated at %p, physical %08lx.\n", + start, p, __pa(p)); mapped = htab_bolt_mapping(start, start + page_size, __pa(p), mode_rw, mmu_linear_psize, diff --git a/arch/powerpc/mm/mmu_decl.h b/arch/powerpc/mm/mmu_decl.h index c94a64f..eb3a732 100644 --- a/arch/powerpc/mm/mmu_decl.h +++ b/arch/powerpc/mm/mmu_decl.h @@ -61,12 +61,12 @@ extern unsigned long total_lowmem; #define mmu_mapin_ram() (0UL) #elif defined(CONFIG_4xx) -#define flush_HPTE(X, va, pg) _tlbie(va) +#define flush_HPTE(pid, va, pg) _tlbie(va, pid) extern void MMU_init_hw(void); extern unsigned long mmu_mapin_ram(void); #elif defined(CONFIG_FSL_BOOKE) -#define flush_HPTE(X, va, pg) _tlbie(va) +#define flush_HPTE(pid, va, pg) _tlbie(va, pid) extern void MMU_init_hw(void); extern unsigned long mmu_mapin_ram(void); extern void adjust_total_lowmem(void); diff --git a/arch/powerpc/mm/slb.c b/arch/powerpc/mm/slb.c index bbd2c51..27922df 100644 --- a/arch/powerpc/mm/slb.c +++ b/arch/powerpc/mm/slb.c @@ -25,6 +25,7 @@ #include <asm/smp.h> #include <asm/firmware.h> #include <linux/compiler.h> +#include <asm/udbg.h> #ifdef DEBUG #define DBG(fmt...) udbg_printf(fmt) @@ -148,6 +149,35 @@ void slb_vmalloc_update(void) slb_flush_and_rebolt(); } +/* Helper function to compare esids. There are four cases to handle. + * 1. The system is not 1T segment size capable. Use the GET_ESID compare. + * 2. The system is 1T capable, both addresses are < 1T, use the GET_ESID compare. + * 3. The system is 1T capable, only one of the two addresses is > 1T. This is not a match. + * 4. The system is 1T capable, both addresses are > 1T, use the GET_ESID_1T macro to compare. + */ +static inline int esids_match(unsigned long addr1, unsigned long addr2) +{ + int esid_1t_count; + + /* System is not 1T segment size capable. */ + if (!cpu_has_feature(CPU_FTR_1T_SEGMENT)) + return (GET_ESID(addr1) == GET_ESID(addr2)); + + esid_1t_count = (((addr1 >> SID_SHIFT_1T) != 0) + + ((addr2 >> SID_SHIFT_1T) != 0)); + + /* both addresses are < 1T */ + if (esid_1t_count == 0) + return (GET_ESID(addr1) == GET_ESID(addr2)); + + /* One address < 1T, the other > 1T. Not a match */ + if (esid_1t_count == 1) + return 0; + + /* Both addresses are > 1T. */ + return (GET_ESID_1T(addr1) == GET_ESID_1T(addr2)); +} + /* Flush all user entries from the segment table of the current processor. */ void switch_slb(struct task_struct *tsk, struct mm_struct *mm) { @@ -193,15 +223,14 @@ void switch_slb(struct task_struct *tsk, struct mm_struct *mm) return; slb_allocate(pc); - if (GET_ESID(pc) == GET_ESID(stack)) + if (esids_match(pc,stack)) return; if (is_kernel_addr(stack)) return; slb_allocate(stack); - if ((GET_ESID(pc) == GET_ESID(unmapped_base)) - || (GET_ESID(stack) == GET_ESID(unmapped_base))) + if (esids_match(pc,unmapped_base) || esids_match(stack,unmapped_base)) return; if (is_kernel_addr(unmapped_base)) diff --git a/arch/powerpc/mm/tlb_64.c b/arch/powerpc/mm/tlb_64.c index eafbca5..e2d867c 100644 --- a/arch/powerpc/mm/tlb_64.c +++ b/arch/powerpc/mm/tlb_64.c @@ -54,12 +54,10 @@ unsigned long pte_freelist_forced_free; ((PAGE_SIZE - sizeof(struct pte_freelist_batch)) \ / sizeof(pgtable_free_t)) -#ifdef CONFIG_SMP static void pte_free_smp_sync(void *arg) { /* Do nothing, just ensure we sync with all CPUs */ } -#endif /* This is only called when we are critically out of memory * (and fail to get a page in pte_free_tlb). |