aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2010-02-18 02:22:39 +0000
committerBenjamin Herrenschmidt <benh@kernel.crashing.org>2010-02-19 14:52:30 +1100
commitbe833f3371bd9580d9f5a507390d72452577f394 (patch)
tree329b435c37f9957f9c65a8a7cba11868b1537e57 /arch/powerpc
parent87d31345c0a90ccdf185feed9923ed14764f45dc (diff)
downloadkernel_samsung_tuna-be833f3371bd9580d9f5a507390d72452577f394.zip
kernel_samsung_tuna-be833f3371bd9580d9f5a507390d72452577f394.tar.gz
kernel_samsung_tuna-be833f3371bd9580d9f5a507390d72452577f394.tar.bz2
powerpc: Convert context_lock to raw_spinlock
context_lock needs to be a real spinlock in RT. Convert it to raw_spinlock. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Diffstat (limited to 'arch/powerpc')
-rw-r--r--arch/powerpc/mm/mmu_context_nohash.c14
1 files changed, 7 insertions, 7 deletions
diff --git a/arch/powerpc/mm/mmu_context_nohash.c b/arch/powerpc/mm/mmu_context_nohash.c
index 1044a63..dbc6921 100644
--- a/arch/powerpc/mm/mmu_context_nohash.c
+++ b/arch/powerpc/mm/mmu_context_nohash.c
@@ -56,7 +56,7 @@ static unsigned int next_context, nr_free_contexts;
static unsigned long *context_map;
static unsigned long *stale_map[NR_CPUS];
static struct mm_struct **context_mm;
-static DEFINE_SPINLOCK(context_lock);
+static DEFINE_RAW_SPINLOCK(context_lock);
#define CTX_MAP_SIZE \
(sizeof(unsigned long) * (last_context / BITS_PER_LONG + 1))
@@ -121,9 +121,9 @@ static unsigned int steal_context_smp(unsigned int id)
/* This will happen if you have more CPUs than available contexts,
* all we can do here is wait a bit and try again
*/
- spin_unlock(&context_lock);
+ raw_spin_unlock(&context_lock);
cpu_relax();
- spin_lock(&context_lock);
+ raw_spin_lock(&context_lock);
/* This will cause the caller to try again */
return MMU_NO_CONTEXT;
@@ -194,7 +194,7 @@ void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next)
unsigned long *map;
/* No lockless fast path .. yet */
- spin_lock(&context_lock);
+ raw_spin_lock(&context_lock);
pr_hard("[%d] activating context for mm @%p, active=%d, id=%d",
cpu, next, next->context.active, next->context.id);
@@ -278,7 +278,7 @@ void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next)
/* Flick the MMU and release lock */
pr_hardcont(" -> %d\n", id);
set_context(id, next->pgd);
- spin_unlock(&context_lock);
+ raw_spin_unlock(&context_lock);
}
/*
@@ -307,7 +307,7 @@ void destroy_context(struct mm_struct *mm)
WARN_ON(mm->context.active != 0);
- spin_lock_irqsave(&context_lock, flags);
+ raw_spin_lock_irqsave(&context_lock, flags);
id = mm->context.id;
if (id != MMU_NO_CONTEXT) {
__clear_bit(id, context_map);
@@ -318,7 +318,7 @@ void destroy_context(struct mm_struct *mm)
context_mm[id] = NULL;
nr_free_contexts++;
}
- spin_unlock_irqrestore(&context_lock, flags);
+ raw_spin_unlock_irqrestore(&context_lock, flags);
}
#ifdef CONFIG_SMP