diff options
author | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-04-16 15:20:36 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-04-16 15:20:36 -0700 |
commit | 1da177e4c3f41524e886b7f1b8a0c1fc7321cac2 (patch) | |
tree | 0bba044c4ce775e45a88a51686b5d9f90697ea9d /include/asm-ppc/page.h | |
download | kernel_samsung_aries-1da177e4c3f41524e886b7f1b8a0c1fc7321cac2.zip kernel_samsung_aries-1da177e4c3f41524e886b7f1b8a0c1fc7321cac2.tar.gz kernel_samsung_aries-1da177e4c3f41524e886b7f1b8a0c1fc7321cac2.tar.bz2 |
Linux-2.6.12-rc2
Initial git repository build. I'm not bothering with the full history,
even though we have it. We can create a separate "historical" git
archive of that later if we want to, and in the meantime it's about
3.2GB when imported into git - space that would just make the early
git days unnecessarily complicated, when we don't have a lot of good
infrastructure for it.
Let it rip!
Diffstat (limited to 'include/asm-ppc/page.h')
-rw-r--r-- | include/asm-ppc/page.h | 169 |
1 files changed, 169 insertions, 0 deletions
diff --git a/include/asm-ppc/page.h b/include/asm-ppc/page.h new file mode 100644 index 0000000..4789dc0 --- /dev/null +++ b/include/asm-ppc/page.h @@ -0,0 +1,169 @@ +#ifndef _PPC_PAGE_H +#define _PPC_PAGE_H + +/* PAGE_SHIFT determines the page size */ +#define PAGE_SHIFT 12 +#define PAGE_SIZE (1UL << PAGE_SHIFT) + +/* + * Subtle: this is an int (not an unsigned long) and so it + * gets extended to 64 bits the way want (i.e. with 1s). -- paulus + */ +#define PAGE_MASK (~((1 << PAGE_SHIFT) - 1)) + +#ifdef __KERNEL__ +#include <linux/config.h> + +/* This must match what is in arch/ppc/Makefile */ +#define PAGE_OFFSET CONFIG_KERNEL_START +#define KERNELBASE PAGE_OFFSET + +#ifndef __ASSEMBLY__ + +/* + * The basic type of a PTE - 64 bits for those CPUs with > 32 bit + * physical addressing. For now this just the IBM PPC440. + */ +#ifdef CONFIG_PTE_64BIT +typedef unsigned long long pte_basic_t; +#define PTE_SHIFT (PAGE_SHIFT - 3) /* 512 ptes per page */ +#define PTE_FMT "%16Lx" +#else +typedef unsigned long pte_basic_t; +#define PTE_SHIFT (PAGE_SHIFT - 2) /* 1024 ptes per page */ +#define PTE_FMT "%.8lx" +#endif + +#undef STRICT_MM_TYPECHECKS + +#ifdef STRICT_MM_TYPECHECKS +/* + * These are used to make use of C type-checking.. + */ +typedef struct { pte_basic_t pte; } pte_t; +typedef struct { unsigned long pmd; } pmd_t; +typedef struct { unsigned long pgd; } pgd_t; +typedef struct { unsigned long pgprot; } pgprot_t; + +#define pte_val(x) ((x).pte) +#define pmd_val(x) ((x).pmd) +#define pgd_val(x) ((x).pgd) +#define pgprot_val(x) ((x).pgprot) + +#define __pte(x) ((pte_t) { (x) } ) +#define __pmd(x) ((pmd_t) { (x) } ) +#define __pgd(x) ((pgd_t) { (x) } ) +#define __pgprot(x) ((pgprot_t) { (x) } ) + +#else +/* + * .. while these make it easier on the compiler + */ +typedef pte_basic_t pte_t; +typedef unsigned long pmd_t; +typedef unsigned long pgd_t; +typedef unsigned long pgprot_t; + +#define pte_val(x) (x) +#define pmd_val(x) (x) +#define pgd_val(x) (x) +#define pgprot_val(x) (x) + +#define __pte(x) (x) +#define __pmd(x) (x) +#define __pgd(x) (x) +#define __pgprot(x) (x) + +#endif + + +/* align addr on a size boundary - adjust address up if needed -- Cort */ +#define _ALIGN(addr,size) (((addr)+(size)-1)&(~((size)-1))) + +/* to align the pointer to the (next) page boundary */ +#define PAGE_ALIGN(addr) (((addr)+PAGE_SIZE-1)&PAGE_MASK) + +struct page; +extern void clear_pages(void *page, int order); +static inline void clear_page(void *page) { clear_pages(page, 0); } +extern void copy_page(void *to, void *from); +extern void clear_user_page(void *page, unsigned long vaddr, struct page *pg); +extern void copy_user_page(void *to, void *from, unsigned long vaddr, + struct page *pg); + +#ifndef CONFIG_APUS +#define PPC_MEMSTART 0 +#define PPC_PGSTART 0 +#define PPC_MEMOFFSET PAGE_OFFSET +#else +extern unsigned long ppc_memstart; +extern unsigned long ppc_pgstart; +extern unsigned long ppc_memoffset; +#define PPC_MEMSTART ppc_memstart +#define PPC_PGSTART ppc_pgstart +#define PPC_MEMOFFSET ppc_memoffset +#endif + +#if defined(CONFIG_APUS) && !defined(MODULE) +/* map phys->virtual and virtual->phys for RAM pages */ +static inline unsigned long ___pa(unsigned long v) +{ + unsigned long p; + asm volatile ("1: addis %0, %1, %2;" + ".section \".vtop_fixup\",\"aw\";" + ".align 1;" + ".long 1b;" + ".previous;" + : "=r" (p) + : "b" (v), "K" (((-PAGE_OFFSET) >> 16) & 0xffff)); + + return p; +} +static inline void* ___va(unsigned long p) +{ + unsigned long v; + asm volatile ("1: addis %0, %1, %2;" + ".section \".ptov_fixup\",\"aw\";" + ".align 1;" + ".long 1b;" + ".previous;" + : "=r" (v) + : "b" (p), "K" (((PAGE_OFFSET) >> 16) & 0xffff)); + + return (void*) v; +} +#else +#define ___pa(vaddr) ((vaddr)-PPC_MEMOFFSET) +#define ___va(paddr) ((paddr)+PPC_MEMOFFSET) +#endif + +extern int page_is_ram(unsigned long pfn); + +#define __pa(x) ___pa((unsigned long)(x)) +#define __va(x) ((void *)(___va((unsigned long)(x)))) + +#define pfn_to_page(pfn) (mem_map + ((pfn) - PPC_PGSTART)) +#define page_to_pfn(page) ((unsigned long)((page) - mem_map) + PPC_PGSTART) +#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT) +#define page_to_virt(page) __va(page_to_pfn(page) << PAGE_SHIFT) + +#define pfn_valid(pfn) (((pfn) - PPC_PGSTART) < max_mapnr) +#define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT) + +/* Pure 2^n version of get_order */ +extern __inline__ int get_order(unsigned long size) +{ + int lz; + + size = (size-1) >> PAGE_SHIFT; + asm ("cntlzw %0,%1" : "=r" (lz) : "r" (size)); + return 32 - lz; +} + +#endif /* __ASSEMBLY__ */ + +#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \ + VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) + +#endif /* __KERNEL__ */ +#endif /* _PPC_PAGE_H */ |