aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sh/mm/pmb.c
diff options
context:
space:
mode:
authorStuart Menefy <stuart.menefy@st.com>2007-11-30 17:06:36 +0900
committerPaul Mundt <lethal@linux-sh.org>2008-01-28 13:18:59 +0900
commitcbaa118ecfd99fc5ed7adbd9c34a30e1c05e3c93 (patch)
treee60db5c0f3573558c97f39cfab78732220a72e6d /arch/sh/mm/pmb.c
parent325df7f20467da07901c4f2b006d3457bba0adec (diff)
downloadkernel_samsung_aries-cbaa118ecfd99fc5ed7adbd9c34a30e1c05e3c93.zip
kernel_samsung_aries-cbaa118ecfd99fc5ed7adbd9c34a30e1c05e3c93.tar.gz
kernel_samsung_aries-cbaa118ecfd99fc5ed7adbd9c34a30e1c05e3c93.tar.bz2
sh: Preparation for uncached jumps through PMB.
Presently most of the 29-bit physical parts do P1/P2 segmentation with a 1:1 cached/uncached mapping, jumping between the two to control the caching behaviour. This provides the basic infrastructure to maintain this behaviour on 32-bit physical parts that don't map P1/P2 at all, using a shiny new linker section and corresponding fixmap entry. Signed-off-by: Stuart Menefy <stuart.menefy@st.com> Signed-off-by: Paul Mundt <lethal@linux-sh.org>
Diffstat (limited to 'arch/sh/mm/pmb.c')
-rw-r--r--arch/sh/mm/pmb.c18
1 files changed, 9 insertions, 9 deletions
diff --git a/arch/sh/mm/pmb.c b/arch/sh/mm/pmb.c
index ef6ab39..ab81c60 100644
--- a/arch/sh/mm/pmb.c
+++ b/arch/sh/mm/pmb.c
@@ -163,18 +163,18 @@ repeat:
return 0;
}
-int set_pmb_entry(struct pmb_entry *pmbe)
+int __uses_jump_to_uncached set_pmb_entry(struct pmb_entry *pmbe)
{
int ret;
- jump_to_P2();
+ jump_to_uncached();
ret = __set_pmb_entry(pmbe->vpn, pmbe->ppn, pmbe->flags, &pmbe->entry);
- back_to_P1();
+ back_to_cached();
return ret;
}
-void clear_pmb_entry(struct pmb_entry *pmbe)
+void __uses_jump_to_uncached clear_pmb_entry(struct pmb_entry *pmbe)
{
unsigned int entry = pmbe->entry;
unsigned long addr;
@@ -188,7 +188,7 @@ void clear_pmb_entry(struct pmb_entry *pmbe)
entry >= NR_PMB_ENTRIES))
return;
- jump_to_P2();
+ jump_to_uncached();
/* Clear V-bit */
addr = mk_pmb_addr(entry);
@@ -197,7 +197,7 @@ void clear_pmb_entry(struct pmb_entry *pmbe)
addr = mk_pmb_data(entry);
ctrl_outl(ctrl_inl(addr) & ~PMB_V, addr);
- back_to_P1();
+ back_to_cached();
clear_bit(entry, &pmb_map);
}
@@ -302,7 +302,7 @@ static void pmb_cache_ctor(struct kmem_cache *cachep, void *pmb)
pmbe->entry = PMB_NO_ENTRY;
}
-static int __init pmb_init(void)
+static int __uses_jump_to_uncached pmb_init(void)
{
unsigned int nr_entries = ARRAY_SIZE(pmb_init_map);
unsigned int entry, i;
@@ -312,7 +312,7 @@ static int __init pmb_init(void)
pmb_cache = kmem_cache_create("pmb", sizeof(struct pmb_entry), 0,
SLAB_PANIC, pmb_cache_ctor);
- jump_to_P2();
+ jump_to_uncached();
/*
* Ordering is important, P2 must be mapped in the PMB before we
@@ -335,7 +335,7 @@ static int __init pmb_init(void)
i |= MMUCR_TI;
ctrl_outl(i, MMUCR);
- back_to_P1();
+ back_to_cached();
return 0;
}