aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-powerpc
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@woody.linux-foundation.org>2007-05-09 12:56:01 -0700
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-05-09 12:56:01 -0700
commitaabded9c3aab5160ae2ca3dd1fa0fa37f3d510e4 (patch)
tree8544d546735bcb975b8dec296eb9b6dc6531fb2a /include/asm-powerpc
parent9a9136e270af14da506f66bcafcc506b86a86498 (diff)
parentf1a1eb299a8422c3e8d41753095bec44b2493398 (diff)
downloadkernel_samsung_smdk4412-aabded9c3aab5160ae2ca3dd1fa0fa37f3d510e4.zip
kernel_samsung_smdk4412-aabded9c3aab5160ae2ca3dd1fa0fa37f3d510e4.tar.gz
kernel_samsung_smdk4412-aabded9c3aab5160ae2ca3dd1fa0fa37f3d510e4.tar.bz2
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/paulus/powerpc
* 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/paulus/powerpc: [POWERPC] Further fixes for the removal of 4level-fixup hack from ppc32 [POWERPC] EEH: log all PCI-X and PCI-E AER registers [POWERPC] EEH: capture and log pci state on error [POWERPC] EEH: Split up long error msg [POWERPC] EEH: log error only after driver notification. [POWERPC] fsl_soc: Make mac_addr const in fs_enet_of_init(). [POWERPC] Don't use SLAB/SLUB for PTE pages [POWERPC] Spufs support for 64K LS mappings on 4K kernels [POWERPC] Add ability to 4K kernel to hash in 64K pages [POWERPC] Introduce address space "slices" [POWERPC] Small fixes & cleanups in segment page size demotion [POWERPC] iSeries: Make HVC_ISERIES the default [POWERPC] iSeries: suppress build warning in lparmap.c [POWERPC] Mark pages that don't exist as nosave [POWERPC] swsusp: Introduce register_nosave_region_late
Diffstat (limited to 'include/asm-powerpc')
-rw-r--r--include/asm-powerpc/mmu-hash64.h11
-rw-r--r--include/asm-powerpc/paca.h2
-rw-r--r--include/asm-powerpc/page_64.h86
-rw-r--r--include/asm-powerpc/pgalloc-64.h31
-rw-r--r--include/asm-powerpc/pgtable-4k.h6
-rw-r--r--include/asm-powerpc/pgtable-64k.h7
-rw-r--r--include/asm-powerpc/spu_csa.h10
7 files changed, 83 insertions, 70 deletions
diff --git a/include/asm-powerpc/mmu-hash64.h b/include/asm-powerpc/mmu-hash64.h
index 6739457..e2ca55b 100644
--- a/include/asm-powerpc/mmu-hash64.h
+++ b/include/asm-powerpc/mmu-hash64.h
@@ -350,10 +350,13 @@ typedef unsigned long mm_context_id_t;
typedef struct {
mm_context_id_t id;
- u16 user_psize; /* page size index */
- u16 sllp; /* SLB entry page size encoding */
-#ifdef CONFIG_HUGETLB_PAGE
- u16 low_htlb_areas, high_htlb_areas;
+ u16 user_psize; /* page size index */
+
+#ifdef CONFIG_PPC_MM_SLICES
+ u64 low_slices_psize; /* SLB page size encodings */
+ u64 high_slices_psize; /* 4 bits per slice for now */
+#else
+ u16 sllp; /* SLB page size encoding */
#endif
unsigned long vdso_base;
} mm_context_t;
diff --git a/include/asm-powerpc/paca.h b/include/asm-powerpc/paca.h
index cf95274..c6a5b17 100644
--- a/include/asm-powerpc/paca.h
+++ b/include/asm-powerpc/paca.h
@@ -83,8 +83,8 @@ struct paca_struct {
mm_context_t context;
u16 vmalloc_sllp;
- u16 slb_cache[SLB_CACHE_ENTRIES];
u16 slb_cache_ptr;
+ u16 slb_cache[SLB_CACHE_ENTRIES];
/*
* then miscellaneous read-write fields
diff --git a/include/asm-powerpc/page_64.h b/include/asm-powerpc/page_64.h
index eab779c..3448a3d 100644
--- a/include/asm-powerpc/page_64.h
+++ b/include/asm-powerpc/page_64.h
@@ -88,57 +88,55 @@ extern unsigned int HPAGE_SHIFT;
#endif /* __ASSEMBLY__ */
-#ifdef CONFIG_HUGETLB_PAGE
+#ifdef CONFIG_PPC_MM_SLICES
-#define HTLB_AREA_SHIFT 40
-#define HTLB_AREA_SIZE (1UL << HTLB_AREA_SHIFT)
-#define GET_HTLB_AREA(x) ((x) >> HTLB_AREA_SHIFT)
+#define SLICE_LOW_SHIFT 28
+#define SLICE_HIGH_SHIFT 40
-#define LOW_ESID_MASK(addr, len) \
- (((1U << (GET_ESID(min((addr)+(len)-1, 0x100000000UL))+1)) \
- - (1U << GET_ESID(min((addr), 0x100000000UL)))) & 0xffff)
-#define HTLB_AREA_MASK(addr, len) (((1U << (GET_HTLB_AREA(addr+len-1)+1)) \
- - (1U << GET_HTLB_AREA(addr))) & 0xffff)
+#define SLICE_LOW_TOP (0x100000000ul)
+#define SLICE_NUM_LOW (SLICE_LOW_TOP >> SLICE_LOW_SHIFT)
+#define SLICE_NUM_HIGH (PGTABLE_RANGE >> SLICE_HIGH_SHIFT)
-#define ARCH_HAS_HUGEPAGE_ONLY_RANGE
-#define ARCH_HAS_HUGETLB_FREE_PGD_RANGE
-#define ARCH_HAS_PREPARE_HUGEPAGE_RANGE
-#define ARCH_HAS_SETCLEAR_HUGE_PTE
+#define GET_LOW_SLICE_INDEX(addr) ((addr) >> SLICE_LOW_SHIFT)
+#define GET_HIGH_SLICE_INDEX(addr) ((addr) >> SLICE_HIGH_SHIFT)
-#define touches_hugepage_low_range(mm, addr, len) \
- (((addr) < 0x100000000UL) \
- && (LOW_ESID_MASK((addr), (len)) & (mm)->context.low_htlb_areas))
-#define touches_hugepage_high_range(mm, addr, len) \
- ((((addr) + (len)) > 0x100000000UL) \
- && (HTLB_AREA_MASK((addr), (len)) & (mm)->context.high_htlb_areas))
-
-#define __within_hugepage_low_range(addr, len, segmask) \
- ( (((addr)+(len)) <= 0x100000000UL) \
- && ((LOW_ESID_MASK((addr), (len)) | (segmask)) == (segmask)))
-#define within_hugepage_low_range(addr, len) \
- __within_hugepage_low_range((addr), (len), \
- current->mm->context.low_htlb_areas)
-#define __within_hugepage_high_range(addr, len, zonemask) \
- ( ((addr) >= 0x100000000UL) \
- && ((HTLB_AREA_MASK((addr), (len)) | (zonemask)) == (zonemask)))
-#define within_hugepage_high_range(addr, len) \
- __within_hugepage_high_range((addr), (len), \
- current->mm->context.high_htlb_areas)
-
-#define is_hugepage_only_range(mm, addr, len) \
- (touches_hugepage_high_range((mm), (addr), (len)) || \
- touches_hugepage_low_range((mm), (addr), (len)))
-#define HAVE_ARCH_HUGETLB_UNMAPPED_AREA
+#ifndef __ASSEMBLY__
+
+struct slice_mask {
+ u16 low_slices;
+ u16 high_slices;
+};
+
+struct mm_struct;
-#define in_hugepage_area(context, addr) \
- (cpu_has_feature(CPU_FTR_16M_PAGE) && \
- ( ( (addr) >= 0x100000000UL) \
- ? ((1 << GET_HTLB_AREA(addr)) & (context).high_htlb_areas) \
- : ((1 << GET_ESID(addr)) & (context).low_htlb_areas) ) )
+extern unsigned long slice_get_unmapped_area(unsigned long addr,
+ unsigned long len,
+ unsigned long flags,
+ unsigned int psize,
+ int topdown,
+ int use_cache);
-#else /* !CONFIG_HUGETLB_PAGE */
+extern unsigned int get_slice_psize(struct mm_struct *mm,
+ unsigned long addr);
-#define in_hugepage_area(mm, addr) 0
+extern void slice_init_context(struct mm_struct *mm, unsigned int psize);
+extern void slice_set_user_psize(struct mm_struct *mm, unsigned int psize);
+
+#define ARCH_HAS_HUGEPAGE_ONLY_RANGE
+extern int is_hugepage_only_range(struct mm_struct *m,
+ unsigned long addr,
+ unsigned long len);
+
+#endif /* __ASSEMBLY__ */
+#else
+#define slice_init()
+#endif /* CONFIG_PPC_MM_SLICES */
+
+#ifdef CONFIG_HUGETLB_PAGE
+
+#define ARCH_HAS_HUGETLB_FREE_PGD_RANGE
+#define ARCH_HAS_SETCLEAR_HUGE_PTE
+#define HAVE_ARCH_HUGETLB_UNMAPPED_AREA
#endif /* !CONFIG_HUGETLB_PAGE */
diff --git a/include/asm-powerpc/pgalloc-64.h b/include/asm-powerpc/pgalloc-64.h
index 30b50cf..d9a3a8c 100644
--- a/include/asm-powerpc/pgalloc-64.h
+++ b/include/asm-powerpc/pgalloc-64.h
@@ -14,18 +14,11 @@
extern struct kmem_cache *pgtable_cache[];
-#ifdef CONFIG_PPC_64K_PAGES
-#define PTE_CACHE_NUM 0
-#define PMD_CACHE_NUM 1
-#define PGD_CACHE_NUM 2
-#define HUGEPTE_CACHE_NUM 3
-#else
-#define PTE_CACHE_NUM 0
-#define PMD_CACHE_NUM 1
-#define PUD_CACHE_NUM 1
-#define PGD_CACHE_NUM 0
-#define HUGEPTE_CACHE_NUM 2
-#endif
+#define PGD_CACHE_NUM 0
+#define PUD_CACHE_NUM 1
+#define PMD_CACHE_NUM 1
+#define HUGEPTE_CACHE_NUM 2
+#define PTE_NONCACHE_NUM 3 /* from GFP rather than kmem_cache */
static inline pgd_t *pgd_alloc(struct mm_struct *mm)
{
@@ -91,8 +84,7 @@ static inline void pmd_free(pmd_t *pmd)
static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
unsigned long address)
{
- return kmem_cache_alloc(pgtable_cache[PTE_CACHE_NUM],
- GFP_KERNEL|__GFP_REPEAT);
+ return (pte_t *)__get_free_page(GFP_KERNEL | __GFP_REPEAT | __GFP_ZERO);
}
static inline struct page *pte_alloc_one(struct mm_struct *mm,
@@ -103,12 +95,12 @@ static inline struct page *pte_alloc_one(struct mm_struct *mm,
static inline void pte_free_kernel(pte_t *pte)
{
- kmem_cache_free(pgtable_cache[PTE_CACHE_NUM], pte);
+ free_page((unsigned long)pte);
}
static inline void pte_free(struct page *ptepage)
{
- pte_free_kernel(page_address(ptepage));
+ __free_page(ptepage);
}
#define PGF_CACHENUM_MASK 0x3
@@ -130,14 +122,17 @@ static inline void pgtable_free(pgtable_free_t pgf)
void *p = (void *)(pgf.val & ~PGF_CACHENUM_MASK);
int cachenum = pgf.val & PGF_CACHENUM_MASK;
- kmem_cache_free(pgtable_cache[cachenum], p);
+ if (cachenum == PTE_NONCACHE_NUM)
+ free_page((unsigned long)p);
+ else
+ kmem_cache_free(pgtable_cache[cachenum], p);
}
extern void pgtable_free_tlb(struct mmu_gather *tlb, pgtable_free_t pgf);
#define __pte_free_tlb(tlb, ptepage) \
pgtable_free_tlb(tlb, pgtable_free_cache(page_address(ptepage), \
- PTE_CACHE_NUM, PTE_TABLE_SIZE-1))
+ PTE_NONCACHE_NUM, PTE_TABLE_SIZE-1))
#define __pmd_free_tlb(tlb, pmd) \
pgtable_free_tlb(tlb, pgtable_free_cache(pmd, \
PMD_CACHE_NUM, PMD_TABLE_SIZE-1))
diff --git a/include/asm-powerpc/pgtable-4k.h b/include/asm-powerpc/pgtable-4k.h
index 1744d6a..add5481 100644
--- a/include/asm-powerpc/pgtable-4k.h
+++ b/include/asm-powerpc/pgtable-4k.h
@@ -80,7 +80,11 @@
#define pte_iterate_hashed_end() } while(0)
-#define pte_pagesize_index(pte) MMU_PAGE_4K
+#ifdef CONFIG_PPC_HAS_HASH_64K
+#define pte_pagesize_index(mm, addr, pte) get_slice_psize(mm, addr)
+#else
+#define pte_pagesize_index(mm, addr, pte) MMU_PAGE_4K
+#endif
/*
* 4-level page tables related bits
diff --git a/include/asm-powerpc/pgtable-64k.h b/include/asm-powerpc/pgtable-64k.h
index 16ef497..31cbd3d 100644
--- a/include/asm-powerpc/pgtable-64k.h
+++ b/include/asm-powerpc/pgtable-64k.h
@@ -35,6 +35,11 @@
#define _PAGE_HPTE_SUB0 0x08000000 /* combo only: first sub page */
#define _PAGE_COMBO 0x10000000 /* this is a combo 4k page */
#define _PAGE_4K_PFN 0x20000000 /* PFN is for a single 4k page */
+
+/* Note the full page bits must be in the same location as for normal
+ * 4k pages as the same asssembly will be used to insert 64K pages
+ * wether the kernel has CONFIG_PPC_64K_PAGES or not
+ */
#define _PAGE_F_SECOND 0x00008000 /* full page: hidx bits */
#define _PAGE_F_GIX 0x00007000 /* full page: hidx bits */
@@ -88,7 +93,7 @@
#define pte_iterate_hashed_end() } while(0); } } while(0)
-#define pte_pagesize_index(pte) \
+#define pte_pagesize_index(mm, addr, pte) \
(((pte) & _PAGE_COMBO)? MMU_PAGE_4K: MMU_PAGE_64K)
#define remap_4k_pfn(vma, addr, pfn, prot) \
diff --git a/include/asm-powerpc/spu_csa.h b/include/asm-powerpc/spu_csa.h
index 02e56a6..c48ae18 100644
--- a/include/asm-powerpc/spu_csa.h
+++ b/include/asm-powerpc/spu_csa.h
@@ -235,6 +235,12 @@ struct spu_priv2_collapsed {
*/
struct spu_state {
struct spu_lscsa *lscsa;
+#ifdef CONFIG_SPU_FS_64K_LS
+ int use_big_pages;
+ /* One struct page per 64k page */
+#define SPU_LSCSA_NUM_BIG_PAGES (sizeof(struct spu_lscsa) / 0x10000)
+ struct page *lscsa_pages[SPU_LSCSA_NUM_BIG_PAGES];
+#endif
struct spu_problem_collapsed prob;
struct spu_priv1_collapsed priv1;
struct spu_priv2_collapsed priv2;
@@ -247,12 +253,14 @@ struct spu_state {
spinlock_t register_lock;
};
-extern void spu_init_csa(struct spu_state *csa);
+extern int spu_init_csa(struct spu_state *csa);
extern void spu_fini_csa(struct spu_state *csa);
extern int spu_save(struct spu_state *prev, struct spu *spu);
extern int spu_restore(struct spu_state *new, struct spu *spu);
extern int spu_switch(struct spu_state *prev, struct spu_state *new,
struct spu *spu);
+extern int spu_alloc_lscsa(struct spu_state *csa);
+extern void spu_free_lscsa(struct spu_state *csa);
#endif /* !__SPU__ */
#endif /* __KERNEL__ */