aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-ppc/mmu_context.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/asm-ppc/mmu_context.h')
-rw-r--r--include/asm-ppc/mmu_context.h27
1 files changed, 16 insertions, 11 deletions
diff --git a/include/asm-ppc/mmu_context.h b/include/asm-ppc/mmu_context.h
index 94f2bf7..2bc8589 100644
--- a/include/asm-ppc/mmu_context.h
+++ b/include/asm-ppc/mmu_context.h
@@ -70,7 +70,7 @@ static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
#else
/* PPC 6xx, 7xx CPUs */
-#define NO_CONTEXT ((mm_context_t) -1)
+#define NO_CONTEXT ((unsigned long) -1)
#define LAST_CONTEXT 32767
#define FIRST_CONTEXT 1
#endif
@@ -85,7 +85,7 @@ static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
* can be used for debugging on all processors (if you happen to have
* an Abatron).
*/
-extern void set_context(mm_context_t context, pgd_t *pgd);
+extern void set_context(unsigned long contextid, pgd_t *pgd);
/*
* Bitmap of contexts in use.
@@ -98,7 +98,7 @@ extern unsigned long context_map[];
* Its use is an optimization only, we can't rely on this context
* number to be free, but it usually will be.
*/
-extern mm_context_t next_mmu_context;
+extern unsigned long next_mmu_context;
/*
* If we don't have sufficient contexts to give one to every task
@@ -117,9 +117,9 @@ extern void steal_context(void);
*/
static inline void get_mmu_context(struct mm_struct *mm)
{
- mm_context_t ctx;
+ unsigned long ctx;
- if (mm->context != NO_CONTEXT)
+ if (mm->context.id != NO_CONTEXT)
return;
#ifdef FEW_CONTEXTS
while (atomic_dec_if_positive(&nr_free_contexts) < 0)
@@ -132,7 +132,7 @@ static inline void get_mmu_context(struct mm_struct *mm)
ctx = 0;
}
next_mmu_context = (ctx + 1) & LAST_CONTEXT;
- mm->context = ctx;
+ mm->context.id = ctx;
#ifdef FEW_CONTEXTS
context_mm[ctx] = mm;
#endif
@@ -141,7 +141,12 @@ static inline void get_mmu_context(struct mm_struct *mm)
/*
* Set up the context for a new address space.
*/
-#define init_new_context(tsk,mm) (((mm)->context = NO_CONTEXT), 0)
+static inline int init_new_context(struct task_struct *t, struct mm_struct *mm)
+{
+ mm->context.id = NO_CONTEXT;
+ mm->context.vdso_base = 0;
+ return 0;
+}
/*
* We're finished using the context for an address space.
@@ -149,9 +154,9 @@ static inline void get_mmu_context(struct mm_struct *mm)
static inline void destroy_context(struct mm_struct *mm)
{
preempt_disable();
- if (mm->context != NO_CONTEXT) {
- clear_bit(mm->context, context_map);
- mm->context = NO_CONTEXT;
+ if (mm->context.id != NO_CONTEXT) {
+ clear_bit(mm->context.id, context_map);
+ mm->context.id = NO_CONTEXT;
#ifdef FEW_CONTEXTS
atomic_inc(&nr_free_contexts);
#endif
@@ -179,7 +184,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
/* Setup new userspace context */
get_mmu_context(next);
- set_context(next->context, next->pgd);
+ set_context(next->context.id, next->pgd);
}
#define deactivate_mm(tsk,mm) do { } while (0)