diff options
Diffstat (limited to 'arch/powerpc/include/asm')
-rw-r--r-- | arch/powerpc/include/asm/highmem.h | 2 | ||||
-rw-r--r-- | arch/powerpc/include/asm/machdep.h | 2 | ||||
-rw-r--r-- | arch/powerpc/include/asm/mmu-book3e.h (renamed from arch/powerpc/include/asm/mmu-fsl-booke.h) | 66 | ||||
-rw-r--r-- | arch/powerpc/include/asm/mmu.h | 6 | ||||
-rw-r--r-- | arch/powerpc/include/asm/pgtable-4k.h | 2 | ||||
-rw-r--r-- | arch/powerpc/include/asm/pgtable-64k.h | 2 | ||||
-rw-r--r-- | arch/powerpc/include/asm/pgtable-ppc32.h | 58 | ||||
-rw-r--r-- | arch/powerpc/include/asm/pgtable-ppc64.h | 29 | ||||
-rw-r--r-- | arch/powerpc/include/asm/pgtable.h | 84 | ||||
-rw-r--r-- | arch/powerpc/include/asm/reg_booke.h | 1 |
10 files changed, 146 insertions, 106 deletions
diff --git a/arch/powerpc/include/asm/highmem.h b/arch/powerpc/include/asm/highmem.h index 04e4a62..a286e47 100644 --- a/arch/powerpc/include/asm/highmem.h +++ b/arch/powerpc/include/asm/highmem.h @@ -99,7 +99,7 @@ static inline void *kmap_atomic_prot(struct page *page, enum km_type type, pgpro #ifdef CONFIG_DEBUG_HIGHMEM BUG_ON(!pte_none(*(kmap_pte-idx))); #endif - __set_pte_at(&init_mm, vaddr, kmap_pte-idx, mk_pte(page, prot)); + __set_pte_at(&init_mm, vaddr, kmap_pte-idx, mk_pte(page, prot), 1); local_flush_tlb_page(NULL, vaddr); return (void*) vaddr; diff --git a/arch/powerpc/include/asm/machdep.h b/arch/powerpc/include/asm/machdep.h index 2740c44..6c34a0d 100644 --- a/arch/powerpc/include/asm/machdep.h +++ b/arch/powerpc/include/asm/machdep.h @@ -327,8 +327,6 @@ extern void __devinit smp_generic_take_timebase(void); */ /* Print a boot progress message. */ void ppc64_boot_msg(unsigned int src, const char *msg); -/* Print a termination message (print only -- does not stop the kernel) */ -void ppc64_terminate_msg(unsigned int src, const char *msg); static inline void log_error(char *buf, unsigned int err_type, int fatal) { diff --git a/arch/powerpc/include/asm/mmu-fsl-booke.h b/arch/powerpc/include/asm/mmu-book3e.h index 3f941c0..c5363c3 100644 --- a/arch/powerpc/include/asm/mmu-fsl-booke.h +++ b/arch/powerpc/include/asm/mmu-book3e.h @@ -1,26 +1,42 @@ -#ifndef _ASM_POWERPC_MMU_FSL_BOOKE_H_ -#define _ASM_POWERPC_MMU_FSL_BOOKE_H_ +#ifndef _ASM_POWERPC_MMU_BOOK3E_H_ +#define _ASM_POWERPC_MMU_BOOK3E_H_ /* - * Freescale Book-E MMU support + * Freescale Book-E/Book-3e (ISA 2.06+) MMU support */ -/* Book-E defined page sizes */ -#define BOOKE_PAGESZ_1K 0 -#define BOOKE_PAGESZ_4K 1 -#define BOOKE_PAGESZ_16K 2 -#define BOOKE_PAGESZ_64K 3 -#define BOOKE_PAGESZ_256K 4 -#define BOOKE_PAGESZ_1M 5 -#define BOOKE_PAGESZ_4M 6 -#define BOOKE_PAGESZ_16M 7 -#define BOOKE_PAGESZ_64M 8 -#define BOOKE_PAGESZ_256M 9 -#define BOOKE_PAGESZ_1GB 10 -#define BOOKE_PAGESZ_4GB 11 -#define BOOKE_PAGESZ_16GB 12 -#define BOOKE_PAGESZ_64GB 13 -#define BOOKE_PAGESZ_256GB 14 -#define BOOKE_PAGESZ_1TB 15 +/* Book-3e defined page sizes */ +#define BOOK3E_PAGESZ_1K 0 +#define BOOK3E_PAGESZ_2K 1 +#define BOOK3E_PAGESZ_4K 2 +#define BOOK3E_PAGESZ_8K 3 +#define BOOK3E_PAGESZ_16K 4 +#define BOOK3E_PAGESZ_32K 5 +#define BOOK3E_PAGESZ_64K 6 +#define BOOK3E_PAGESZ_128K 7 +#define BOOK3E_PAGESZ_256K 8 +#define BOOK3E_PAGESZ_512K 9 +#define BOOK3E_PAGESZ_1M 10 +#define BOOK3E_PAGESZ_2M 11 +#define BOOK3E_PAGESZ_4M 12 +#define BOOK3E_PAGESZ_8M 13 +#define BOOK3E_PAGESZ_16M 14 +#define BOOK3E_PAGESZ_32M 15 +#define BOOK3E_PAGESZ_64M 16 +#define BOOK3E_PAGESZ_128M 17 +#define BOOK3E_PAGESZ_256M 18 +#define BOOK3E_PAGESZ_512M 19 +#define BOOK3E_PAGESZ_1GB 20 +#define BOOK3E_PAGESZ_2GB 21 +#define BOOK3E_PAGESZ_4GB 22 +#define BOOK3E_PAGESZ_8GB 23 +#define BOOK3E_PAGESZ_16GB 24 +#define BOOK3E_PAGESZ_32GB 25 +#define BOOK3E_PAGESZ_64GB 26 +#define BOOK3E_PAGESZ_128GB 27 +#define BOOK3E_PAGESZ_256GB 28 +#define BOOK3E_PAGESZ_512GB 29 +#define BOOK3E_PAGESZ_1TB 30 +#define BOOK3E_PAGESZ_2TB 31 #define MAS0_TLBSEL(x) ((x << 28) & 0x30000000) #define MAS0_ESEL(x) ((x << 16) & 0x0FFF0000) @@ -29,8 +45,9 @@ #define MAS1_VALID 0x80000000 #define MAS1_IPROT 0x40000000 #define MAS1_TID(x) ((x << 16) & 0x3FFF0000) +#define MAS1_IND 0x00002000 #define MAS1_TS 0x00001000 -#define MAS1_TSIZE(x) ((x << 8) & 0x00000F00) +#define MAS1_TSIZE(x) ((x << 7) & 0x00000F80) #define MAS2_EPN 0xFFFFF000 #define MAS2_X0 0x00000040 @@ -40,7 +57,7 @@ #define MAS2_M 0x00000004 #define MAS2_G 0x00000002 #define MAS2_E 0x00000001 -#define MAS2_EPN_MASK(size) (~0 << (2*(size) + 10)) +#define MAS2_EPN_MASK(size) (~0 << (size + 10)) #define MAS2_VAL(addr, size, flags) ((addr) & MAS2_EPN_MASK(size) | (flags)) #define MAS3_RPN 0xFFFFF000 @@ -56,7 +73,7 @@ #define MAS3_SR 0x00000001 #define MAS4_TLBSELD(x) MAS0_TLBSEL(x) -#define MAS4_TIDDSEL 0x000F0000 +#define MAS4_INDD 0x00008000 #define MAS4_TSIZED(x) MAS1_TSIZE(x) #define MAS4_X0D 0x00000040 #define MAS4_X1D 0x00000020 @@ -68,6 +85,7 @@ #define MAS6_SPID0 0x3FFF0000 #define MAS6_SPID1 0x00007FFE +#define MAS6_ISIZE(x) MAS1_TSIZE(x) #define MAS6_SAS 0x00000001 #define MAS6_SPID MAS6_SPID0 @@ -82,4 +100,4 @@ typedef struct { } mm_context_t; #endif /* !__ASSEMBLY__ */ -#endif /* _ASM_POWERPC_MMU_FSL_BOOKE_H_ */ +#endif /* _ASM_POWERPC_MMU_BOOK3E_H_ */ diff --git a/arch/powerpc/include/asm/mmu.h b/arch/powerpc/include/asm/mmu.h index 6e76399..5c78079 100644 --- a/arch/powerpc/include/asm/mmu.h +++ b/arch/powerpc/include/asm/mmu.h @@ -71,9 +71,9 @@ extern unsigned int __start___mmu_ftr_fixup, __stop___mmu_ftr_fixup; #elif defined(CONFIG_44x) /* 44x-style software loaded TLB */ # include <asm/mmu-44x.h> -#elif defined(CONFIG_FSL_BOOKE) -/* Freescale Book-E software loaded TLB */ -# include <asm/mmu-fsl-booke.h> +#elif defined(CONFIG_PPC_BOOK3E_MMU) +/* Freescale Book-E software loaded TLB or Book-3e (ISA 2.06+) MMU */ +# include <asm/mmu-book3e.h> #elif defined (CONFIG_PPC_8xx) /* Motorola/Freescale 8xx software loaded TLB */ # include <asm/mmu-8xx.h> diff --git a/arch/powerpc/include/asm/pgtable-4k.h b/arch/powerpc/include/asm/pgtable-4k.h index 6b18ba9..1dbca4e7 100644 --- a/arch/powerpc/include/asm/pgtable-4k.h +++ b/arch/powerpc/include/asm/pgtable-4k.h @@ -60,7 +60,7 @@ /* It should be preserving the high 48 bits and then specifically */ /* preserving _PAGE_SECONDARY | _PAGE_GROUP_IX */ #define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY | \ - _PAGE_HPTEFLAGS) + _PAGE_HPTEFLAGS | _PAGE_SPECIAL) /* Bits to mask out from a PMD to get to the PTE page */ #define PMD_MASKED_BITS 0 diff --git a/arch/powerpc/include/asm/pgtable-64k.h b/arch/powerpc/include/asm/pgtable-64k.h index 07b0d8f..7389003 100644 --- a/arch/powerpc/include/asm/pgtable-64k.h +++ b/arch/powerpc/include/asm/pgtable-64k.h @@ -114,7 +114,7 @@ static inline struct subpage_prot_table *pgd_subpage_prot(pgd_t *pgd) * pgprot changes */ #define _PAGE_CHG_MASK (PTE_RPN_MASK | _PAGE_HPTEFLAGS | _PAGE_DIRTY | \ - _PAGE_ACCESSED) + _PAGE_ACCESSED | _PAGE_SPECIAL) /* Bits to mask out from a PMD to get to the PTE page */ #define PMD_MASKED_BITS 0x1ff diff --git a/arch/powerpc/include/asm/pgtable-ppc32.h b/arch/powerpc/include/asm/pgtable-ppc32.h index f69a4d9..98bd7c5 100644 --- a/arch/powerpc/include/asm/pgtable-ppc32.h +++ b/arch/powerpc/include/asm/pgtable-ppc32.h @@ -429,8 +429,10 @@ extern int icache_44x_need_flush; #define PMD_PAGE_SIZE(pmd) bad_call_to_PMD_PAGE_SIZE() #endif -#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY) +#define _PAGE_HPTEFLAGS _PAGE_HASHPTE +#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY | \ + _PAGE_SPECIAL) #define PAGE_PROT_BITS (_PAGE_GUARDED | _PAGE_COHERENT | _PAGE_NO_CACHE | \ _PAGE_WRITETHRU | _PAGE_ENDIAN | \ @@ -667,44 +669,6 @@ static inline unsigned long long pte_update(pte_t *p, #endif /* CONFIG_PTE_64BIT */ /* - * set_pte stores a linux PTE into the linux page table. - * On machines which use an MMU hash table we avoid changing the - * _PAGE_HASHPTE bit. - */ - -static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr, - pte_t *ptep, pte_t pte) -{ -#if (_PAGE_HASHPTE != 0) && defined(CONFIG_SMP) && !defined(CONFIG_PTE_64BIT) - pte_update(ptep, ~_PAGE_HASHPTE, pte_val(pte) & ~_PAGE_HASHPTE); -#elif defined(CONFIG_PTE_64BIT) && defined(CONFIG_SMP) -#if _PAGE_HASHPTE != 0 - if (pte_val(*ptep) & _PAGE_HASHPTE) - flush_hash_entry(mm, ptep, addr); -#endif - __asm__ __volatile__("\ - stw%U0%X0 %2,%0\n\ - eieio\n\ - stw%U0%X0 %L2,%1" - : "=m" (*ptep), "=m" (*((unsigned char *)ptep+4)) - : "r" (pte) : "memory"); -#else - *ptep = __pte((pte_val(*ptep) & _PAGE_HASHPTE) - | (pte_val(pte) & ~_PAGE_HASHPTE)); -#endif -} - - -static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, - pte_t *ptep, pte_t pte) -{ -#if defined(CONFIG_PTE_64BIT) && defined(CONFIG_SMP) && defined(CONFIG_DEBUG_VM) - WARN_ON(pte_present(*ptep)); -#endif - __set_pte_at(mm, addr, ptep, pte); -} - -/* * 2.6 calls this without flushing the TLB entry; this is wrong * for our hash-based implementation, we fix that up here. */ @@ -744,24 +708,14 @@ static inline void huge_ptep_set_wrprotect(struct mm_struct *mm, } -#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS -static inline void __ptep_set_access_flags(pte_t *ptep, pte_t entry, int dirty) +static inline void __ptep_set_access_flags(pte_t *ptep, pte_t entry) { unsigned long bits = pte_val(entry) & - (_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_RW); + (_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_RW | + _PAGE_HWEXEC | _PAGE_EXEC); pte_update(ptep, 0, bits); } -#define ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty) \ -({ \ - int __changed = !pte_same(*(__ptep), __entry); \ - if (__changed) { \ - __ptep_set_access_flags(__ptep, __entry, __dirty); \ - flush_tlb_page_nohash(__vma, __address); \ - } \ - __changed; \ -}) - #define __HAVE_ARCH_PTE_SAME #define pte_same(A,B) (((pte_val(A) ^ pte_val(B)) & ~_PAGE_HASHPTE) == 0) diff --git a/arch/powerpc/include/asm/pgtable-ppc64.h b/arch/powerpc/include/asm/pgtable-ppc64.h index b0f18be..c627877 100644 --- a/arch/powerpc/include/asm/pgtable-ppc64.h +++ b/arch/powerpc/include/asm/pgtable-ppc64.h @@ -125,6 +125,8 @@ #define _PTEIDX_SECONDARY 0x8 #define _PTEIDX_GROUP_IX 0x7 +/* To make some generic powerpc code happy */ +#define _PAGE_HWEXEC 0 /* * POWER4 and newer have per page execute protection, older chips can only @@ -285,6 +287,10 @@ static inline unsigned long pte_update(struct mm_struct *mm, : "r" (ptep), "r" (clr), "m" (*ptep), "i" (_PAGE_BUSY) : "cc" ); + /* huge pages use the old page table lock */ + if (!huge) + assert_pte_locked(mm, addr); + if (old & _PAGE_HASHPTE) hpte_need_flush(mm, addr, ptep, old, huge); return old; @@ -359,23 +365,11 @@ static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_update(mm, addr, ptep, ~0UL, 0); } -/* - * set_pte stores a linux PTE into the linux page table. - */ -static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, - pte_t *ptep, pte_t pte) -{ - if (pte_present(*ptep)) - pte_clear(mm, addr, ptep); - pte = __pte(pte_val(pte) & ~_PAGE_HPTEFLAGS); - *ptep = pte; -} /* Set the dirty and/or accessed bits atomically in a linux PTE, this * function doesn't need to flush the hash entry */ -#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS -static inline void __ptep_set_access_flags(pte_t *ptep, pte_t entry, int dirty) +static inline void __ptep_set_access_flags(pte_t *ptep, pte_t entry) { unsigned long bits = pte_val(entry) & (_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_RW | _PAGE_EXEC); @@ -392,15 +386,6 @@ static inline void __ptep_set_access_flags(pte_t *ptep, pte_t entry, int dirty) :"r" (bits), "r" (ptep), "m" (*ptep), "i" (_PAGE_BUSY) :"cc"); } -#define ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty) \ -({ \ - int __changed = !pte_same(*(__ptep), __entry); \ - if (__changed) { \ - __ptep_set_access_flags(__ptep, __entry, __dirty); \ - flush_tlb_page_nohash(__vma, __address); \ - } \ - __changed; \ -}) #define __HAVE_ARCH_PTE_SAME #define pte_same(A,B) (((pte_val(A) ^ pte_val(B)) & ~_PAGE_HPTEFLAGS) == 0) diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h index 07f55e6..5c1c488 100644 --- a/arch/powerpc/include/asm/pgtable.h +++ b/arch/powerpc/include/asm/pgtable.h @@ -6,7 +6,17 @@ #include <asm/processor.h> /* For TASK_SIZE */ #include <asm/mmu.h> #include <asm/page.h> + struct mm_struct; + +#ifdef CONFIG_DEBUG_VM +extern void assert_pte_locked(struct mm_struct *mm, unsigned long addr); +#else /* CONFIG_DEBUG_VM */ +static inline void assert_pte_locked(struct mm_struct *mm, unsigned long addr) +{ +} +#endif /* !CONFIG_DEBUG_VM */ + #endif /* !__ASSEMBLY__ */ #if defined(CONFIG_PPC64) @@ -17,6 +27,80 @@ struct mm_struct; #ifndef __ASSEMBLY__ +/* Insert a PTE, top-level function is out of line. It uses an inline + * low level function in the respective pgtable-* files + */ +extern void set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, + pte_t pte); + +/* This low level function performs the actual PTE insertion + * Setting the PTE depends on the MMU type and other factors. It's + * an horrible mess that I'm not going to try to clean up now but + * I'm keeping it in one place rather than spread around + */ +static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr, + pte_t *ptep, pte_t pte, int percpu) +{ +#if defined(CONFIG_PPC_STD_MMU_32) && defined(CONFIG_SMP) && !defined(CONFIG_PTE_64BIT) + /* First case is 32-bit Hash MMU in SMP mode with 32-bit PTEs. We use the + * helper pte_update() which does an atomic update. We need to do that + * because a concurrent invalidation can clear _PAGE_HASHPTE. If it's a + * per-CPU PTE such as a kmap_atomic, we do a simple update preserving + * the hash bits instead (ie, same as the non-SMP case) + */ + if (percpu) + *ptep = __pte((pte_val(*ptep) & _PAGE_HASHPTE) + | (pte_val(pte) & ~_PAGE_HASHPTE)); + else + pte_update(ptep, ~_PAGE_HASHPTE, pte_val(pte)); + +#elif defined(CONFIG_PPC32) && defined(CONFIG_PTE_64BIT) && defined(CONFIG_SMP) + /* Second case is 32-bit with 64-bit PTE in SMP mode. In this case, we + * can just store as long as we do the two halves in the right order + * with a barrier in between. This is possible because we take care, + * in the hash code, to pre-invalidate if the PTE was already hashed, + * which synchronizes us with any concurrent invalidation. + * In the percpu case, we also fallback to the simple update preserving + * the hash bits + */ + if (percpu) { + *ptep = __pte((pte_val(*ptep) & _PAGE_HASHPTE) + | (pte_val(pte) & ~_PAGE_HASHPTE)); + return; + } +#if _PAGE_HASHPTE != 0 + if (pte_val(*ptep) & _PAGE_HASHPTE) + flush_hash_entry(mm, ptep, addr); +#endif + __asm__ __volatile__("\ + stw%U0%X0 %2,%0\n\ + eieio\n\ + stw%U0%X0 %L2,%1" + : "=m" (*ptep), "=m" (*((unsigned char *)ptep+4)) + : "r" (pte) : "memory"); + +#elif defined(CONFIG_PPC_STD_MMU_32) + /* Third case is 32-bit hash table in UP mode, we need to preserve + * the _PAGE_HASHPTE bit since we may not have invalidated the previous + * translation in the hash yet (done in a subsequent flush_tlb_xxx()) + * and see we need to keep track that this PTE needs invalidating + */ + *ptep = __pte((pte_val(*ptep) & _PAGE_HASHPTE) + | (pte_val(pte) & ~_PAGE_HASHPTE)); + +#else + /* Anything else just stores the PTE normally. That covers all 64-bit + * cases, and 32-bit non-hash with 64-bit PTEs in UP mode + */ + *ptep = pte; +#endif +} + + +#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS +extern int ptep_set_access_flags(struct vm_area_struct *vma, unsigned long address, + pte_t *ptep, pte_t entry, int dirty); + /* * Macro to mark a page protection value as "uncacheable". */ diff --git a/arch/powerpc/include/asm/reg_booke.h b/arch/powerpc/include/asm/reg_booke.h index 6745376..597debe 100644 --- a/arch/powerpc/include/asm/reg_booke.h +++ b/arch/powerpc/include/asm/reg_booke.h @@ -110,6 +110,7 @@ #define SPRN_L1CSR0 0x3F2 /* L1 Cache Control and Status Register 0 */ #define SPRN_L1CSR1 0x3F3 /* L1 Cache Control and Status Register 1 */ #define SPRN_MMUCSR0 0x3F4 /* MMU Control and Status Register 0 */ +#define SPRN_MMUCFG 0x3F7 /* MMU Configuration Register */ #define SPRN_PIT 0x3DB /* Programmable Interval Timer */ #define SPRN_BUCSR 0x3F5 /* Branch Unit Control and Status */ #define SPRN_L2CSR0 0x3F9 /* L2 Data Cache Control and Status Register 0 */ |