diff options
author | Jeremy Fitzhardinge <jeremy@goop.org> | 2008-01-30 13:32:58 +0100 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-01-30 13:32:58 +0100 |
commit | 195466dc4b9b8a4cc89d37ea1211746f3afbc941 (patch) | |
tree | 38a4dc9e105d54cf285cdcbc141b424a2fc16f41 /include/asm-x86/pgtable_64.h | |
parent | e33287013585e96180c575288bf1db22bee47b52 (diff) | |
download | kernel_samsung_tuna-195466dc4b9b8a4cc89d37ea1211746f3afbc941.zip kernel_samsung_tuna-195466dc4b9b8a4cc89d37ea1211746f3afbc941.tar.gz kernel_samsung_tuna-195466dc4b9b8a4cc89d37ea1211746f3afbc941.tar.bz2 |
x86: pgtable: unify pte accessors
Make various pte accessors common.
Signed-off-by: Jeremy Fitzhardinge <jeremy@xensource.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'include/asm-x86/pgtable_64.h')
-rw-r--r-- | include/asm-x86/pgtable_64.h | 43 |
1 files changed, 4 insertions, 39 deletions
diff --git a/include/asm-x86/pgtable_64.h b/include/asm-x86/pgtable_64.h index 77038d8..987f51f 100644 --- a/include/asm-x86/pgtable_64.h +++ b/include/asm-x86/pgtable_64.h @@ -101,18 +101,18 @@ static inline void pgd_clear (pgd_t * pgd) set_pgd(pgd, __pgd(0)); } -#define ptep_get_and_clear(mm,addr,xp) __pte(xchg(&(xp)->pte, 0)) +#define native_ptep_get_and_clear(xp) __pte(xchg(&(xp)->pte, 0)) struct mm_struct; -static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm, unsigned long addr, pte_t *ptep, int full) +static inline pte_t native_ptep_get_and_clear_full(struct mm_struct *mm, unsigned long addr, pte_t *ptep, int full) { pte_t pte; if (full) { pte = *ptep; *ptep = __pte(0); } else { - pte = ptep_get_and_clear(mm, addr, ptep); + pte = native_ptep_get_and_clear(ptep); } return pte; } @@ -158,26 +158,12 @@ static inline unsigned long pmd_bad(pmd_t pmd) #define pte_none(x) (!pte_val(x)) #define pte_present(x) (pte_val(x) & (_PAGE_PRESENT | _PAGE_PROTNONE)) -#define pte_clear(mm,addr,xp) do { set_pte_at(mm, addr, xp, __pte(0)); } while (0) +#define native_pte_clear(mm,addr,xp) do { set_pte_at(mm, addr, xp, __pte(0)); } while (0) #define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT)) /* FIXME: is this right? */ #define pte_page(x) pfn_to_page(pte_pfn(x)) #define pte_pfn(x) ((pte_val(x) & __PHYSICAL_MASK) >> PAGE_SHIFT) -struct vm_area_struct; - -static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep) -{ - if (!pte_young(*ptep)) - return 0; - return test_and_clear_bit(_PAGE_BIT_ACCESSED, &ptep->pte); -} - -static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep) -{ - clear_bit(_PAGE_BIT_RW, &ptep->pte); -} - /* * Macro to mark a page protection value as "uncacheable". */ @@ -243,22 +229,6 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, #define update_mmu_cache(vma,address,pte) do { } while (0) -/* We only update the dirty/accessed state if we set - * the dirty bit by hand in the kernel, since the hardware - * will do the accessed bit for us, and we don't want to - * race with other CPU's that might be updating the dirty - * bit at the same time. */ -#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS -#define ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty) \ -({ \ - int __changed = !pte_same(*(__ptep), __entry); \ - if (__changed && __dirty) { \ - set_pte(__ptep, __entry); \ - flush_tlb_page(__vma, __address); \ - } \ - __changed; \ -}) - /* Encode and de-code a swap entry */ #define __swp_type(x) (((x).val >> 1) & 0x3f) #define __swp_offset(x) ((x).val >> 8) @@ -290,12 +260,7 @@ pte_t *lookup_address(unsigned long addr); #define kc_offset_to_vaddr(o) \ (((o) & (1UL << (__VIRTUAL_MASK_SHIFT-1))) ? ((o) | (~__VIRTUAL_MASK)) : (o)) -#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG -#define __HAVE_ARCH_PTEP_GET_AND_CLEAR -#define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL -#define __HAVE_ARCH_PTEP_SET_WRPROTECT #define __HAVE_ARCH_PTE_SAME -#include <asm-generic/pgtable.h> #endif /* !__ASSEMBLY__ */ #endif /* _X86_64_PGTABLE_H */ |