diff options
Diffstat (limited to 'arch/powerpc/mm')
-rw-r--r-- | arch/powerpc/mm/40x_mmu.c | 4 | ||||
-rw-r--r-- | arch/powerpc/mm/hash_native_64.c | 5 | ||||
-rw-r--r-- | arch/powerpc/mm/mmu_context_hash64.c | 10 | ||||
-rw-r--r-- | arch/powerpc/mm/tlb_low_64e.S | 2 |
4 files changed, 10 insertions, 11 deletions
diff --git a/arch/powerpc/mm/40x_mmu.c b/arch/powerpc/mm/40x_mmu.c index 08dfa8e..65abfcf 100644 --- a/arch/powerpc/mm/40x_mmu.c +++ b/arch/powerpc/mm/40x_mmu.c @@ -84,8 +84,8 @@ void __init MMU_init_hw(void) * vectors and the kernel live in real-mode. */ - mtspr(SPRN_DCCR, 0xF0000000); /* 512 MB of data space at 0x0. */ - mtspr(SPRN_ICCR, 0xF0000000); /* 512 MB of instr. space at 0x0. */ + mtspr(SPRN_DCCR, 0xFFFF0000); /* 2GByte of data space at 0x0. */ + mtspr(SPRN_ICCR, 0xFFFF0000); /* 2GByte of instr. space at 0x0. */ } #define LARGE_PAGE_SIZE_16M (1<<24) diff --git a/arch/powerpc/mm/hash_native_64.c b/arch/powerpc/mm/hash_native_64.c index 056d23a..9e1aa4f 100644 --- a/arch/powerpc/mm/hash_native_64.c +++ b/arch/powerpc/mm/hash_native_64.c @@ -122,7 +122,7 @@ static inline void native_lock_hpte(struct hash_pte *hptep) unsigned long *word = &hptep->v; while (1) { - if (!test_and_set_bit(HPTE_LOCK_BIT, word)) + if (!test_and_set_bit_lock(HPTE_LOCK_BIT, word)) break; while(test_bit(HPTE_LOCK_BIT, word)) cpu_relax(); @@ -133,8 +133,7 @@ static inline void native_unlock_hpte(struct hash_pte *hptep) { unsigned long *word = &hptep->v; - asm volatile("lwsync":::"memory"); - clear_bit(HPTE_LOCK_BIT, word); + clear_bit_unlock(HPTE_LOCK_BIT, word); } static long native_hpte_insert(unsigned long hpte_group, unsigned long va, diff --git a/arch/powerpc/mm/mmu_context_hash64.c b/arch/powerpc/mm/mmu_context_hash64.c index b910d37..51622da 100644 --- a/arch/powerpc/mm/mmu_context_hash64.c +++ b/arch/powerpc/mm/mmu_context_hash64.c @@ -23,7 +23,7 @@ #include <asm/mmu_context.h> static DEFINE_SPINLOCK(mmu_context_lock); -static DEFINE_IDR(mmu_context_idr); +static DEFINE_IDA(mmu_context_ida); /* * The proto-VSID space has 2^35 - 1 segments available for user mappings. @@ -39,11 +39,11 @@ int __init_new_context(void) int err; again: - if (!idr_pre_get(&mmu_context_idr, GFP_KERNEL)) + if (!ida_pre_get(&mmu_context_ida, GFP_KERNEL)) return -ENOMEM; spin_lock(&mmu_context_lock); - err = idr_get_new_above(&mmu_context_idr, NULL, 1, &index); + err = ida_get_new_above(&mmu_context_ida, 1, &index); spin_unlock(&mmu_context_lock); if (err == -EAGAIN) @@ -53,7 +53,7 @@ again: if (index > MAX_CONTEXT) { spin_lock(&mmu_context_lock); - idr_remove(&mmu_context_idr, index); + ida_remove(&mmu_context_ida, index); spin_unlock(&mmu_context_lock); return -ENOMEM; } @@ -85,7 +85,7 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm) void __destroy_context(int context_id) { spin_lock(&mmu_context_lock); - idr_remove(&mmu_context_idr, context_id); + ida_remove(&mmu_context_ida, context_id); spin_unlock(&mmu_context_lock); } EXPORT_SYMBOL_GPL(__destroy_context); diff --git a/arch/powerpc/mm/tlb_low_64e.S b/arch/powerpc/mm/tlb_low_64e.S index f288279..8b04c54 100644 --- a/arch/powerpc/mm/tlb_low_64e.S +++ b/arch/powerpc/mm/tlb_low_64e.S @@ -1,5 +1,5 @@ /* - * Low leve TLB miss handlers for Book3E + * Low level TLB miss handlers for Book3E * * Copyright (C) 2008-2009 * Ben. Herrenschmidt (benh@kernel.crashing.org), IBM Corp. |