diff options
author | David S. Miller <davem@sunset.davemloft.net> | 2005-09-25 16:46:57 -0700 |
---|---|---|
committer | David S. Miller <davem@sunset.davemloft.net> | 2005-09-25 16:46:57 -0700 |
commit | 56425306517ef28a9b480161cdb96d182172bc1d (patch) | |
tree | 204cfbef0e5d86954f87b6b40d79d57f8157e5ea /include/asm-sparc64 | |
parent | 52f26deb7c67d5f34910660200b925c1a2b8df8c (diff) | |
download | kernel_samsung_crespo-56425306517ef28a9b480161cdb96d182172bc1d.zip kernel_samsung_crespo-56425306517ef28a9b480161cdb96d182172bc1d.tar.gz kernel_samsung_crespo-56425306517ef28a9b480161cdb96d182172bc1d.tar.bz2 |
[SPARC64]: Add CONFIG_DEBUG_PAGEALLOC support.
The trick is that we do the kernel linear mapping TLB miss starting
with an instruction sequence like this:
ba,pt %xcc, kvmap_load
xor %g2, %g4, %g5
succeeded by an instruction sequence which performs a full page table
walk starting at swapper_pg_dir.
We first take over the trap table from the firmware. Then, using this
constant PTE generation for the linear mapping area above, we build
the kernel page tables for the linear mapping.
After this is setup, we patch that branch above into a "nop", which
will cause TLB misses to fall through to the full page table walk.
With this, the page unmapping for CONFIG_DEBUG_PAGEALLOC is trivial.
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'include/asm-sparc64')
-rw-r--r-- | include/asm-sparc64/cacheflush.h | 5 | ||||
-rw-r--r-- | include/asm-sparc64/pgtable.h | 7 |
2 files changed, 9 insertions, 3 deletions
diff --git a/include/asm-sparc64/cacheflush.h b/include/asm-sparc64/cacheflush.h index ededd26..b3f6165 100644 --- a/include/asm-sparc64/cacheflush.h +++ b/include/asm-sparc64/cacheflush.h @@ -66,6 +66,11 @@ extern void flush_ptrace_access(struct vm_area_struct *, struct page *, #define flush_cache_vmap(start, end) do { } while (0) #define flush_cache_vunmap(start, end) do { } while (0) +#ifdef CONFIG_DEBUG_PAGEALLOC +/* internal debugging function */ +void kernel_map_pages(struct page *page, int numpages, int enable); +#endif + #endif /* !__ASSEMBLY__ */ #endif /* _SPARC64_CACHEFLUSH_H */ diff --git a/include/asm-sparc64/pgtable.h b/include/asm-sparc64/pgtable.h index a297f61..43cbb08 100644 --- a/include/asm-sparc64/pgtable.h +++ b/include/asm-sparc64/pgtable.h @@ -60,13 +60,13 @@ * table can map */ #define PMD_SHIFT (PAGE_SHIFT + (PAGE_SHIFT-3)) -#define PMD_SIZE (1UL << PMD_SHIFT) +#define PMD_SIZE (_AC(1,UL) << PMD_SHIFT) #define PMD_MASK (~(PMD_SIZE-1)) #define PMD_BITS (PAGE_SHIFT - 2) /* PGDIR_SHIFT determines what a third-level page table entry can map */ #define PGDIR_SHIFT (PAGE_SHIFT + (PAGE_SHIFT-3) + PMD_BITS) -#define PGDIR_SIZE (1UL << PGDIR_SHIFT) +#define PGDIR_SIZE (_AC(1,UL) << PGDIR_SHIFT) #define PGDIR_MASK (~(PGDIR_SIZE-1)) #define PGDIR_BITS (PAGE_SHIFT - 2) @@ -336,7 +336,8 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *p #define pte_clear(mm,addr,ptep) \ set_pte_at((mm), (addr), (ptep), __pte(0UL)) -extern pgd_t swapper_pg_dir[1]; +extern pgd_t swapper_pg_dir[2048]; +extern pmd_t swapper_low_pmd_dir[2048]; /* These do nothing with the way I have things setup. */ #define mmu_lockarea(vaddr, len) (vaddr) |