diff options
author | Stuart Menefy <stuart.menefy@st.com> | 2007-11-30 17:06:36 +0900 |
---|---|---|
committer | Paul Mundt <lethal@linux-sh.org> | 2008-01-28 13:18:59 +0900 |
commit | cbaa118ecfd99fc5ed7adbd9c34a30e1c05e3c93 (patch) | |
tree | e60db5c0f3573558c97f39cfab78732220a72e6d | |
parent | 325df7f20467da07901c4f2b006d3457bba0adec (diff) | |
download | kernel_samsung_tuna-cbaa118ecfd99fc5ed7adbd9c34a30e1c05e3c93.zip kernel_samsung_tuna-cbaa118ecfd99fc5ed7adbd9c34a30e1c05e3c93.tar.gz kernel_samsung_tuna-cbaa118ecfd99fc5ed7adbd9c34a30e1c05e3c93.tar.bz2 |
sh: Preparation for uncached jumps through PMB.
Presently most of the 29-bit physical parts do P1/P2 segmentation
with a 1:1 cached/uncached mapping, jumping between the two to
control the caching behaviour. This provides the basic infrastructure
to maintain this behaviour on 32-bit physical parts that don't map
P1/P2 at all, using a shiny new linker section and corresponding
fixmap entry.
Signed-off-by: Stuart Menefy <stuart.menefy@st.com>
Signed-off-by: Paul Mundt <lethal@linux-sh.org>
-rw-r--r-- | arch/sh/kernel/cpu/init.c | 6 | ||||
-rw-r--r-- | arch/sh/kernel/cpu/sh3/probe.c | 6 | ||||
-rw-r--r-- | arch/sh/kernel/vmlinux_32.lds.S | 9 | ||||
-rw-r--r-- | arch/sh/mm/cache-debugfs.c | 9 | ||||
-rw-r--r-- | arch/sh/mm/cache-sh4.c | 14 | ||||
-rw-r--r-- | arch/sh/mm/cache-sh7705.c | 12 | ||||
-rw-r--r-- | arch/sh/mm/init.c | 16 | ||||
-rw-r--r-- | arch/sh/mm/pmb.c | 18 | ||||
-rw-r--r-- | arch/sh/mm/tlb-sh4.c | 7 | ||||
-rw-r--r-- | include/asm-sh/fixmap.h | 1 | ||||
-rw-r--r-- | include/asm-sh/sections.h | 1 | ||||
-rw-r--r-- | include/asm-sh/system.h | 2 | ||||
-rw-r--r-- | include/asm-sh/system_32.h | 36 | ||||
-rw-r--r-- | include/asm-sh/system_64.h | 7 |
14 files changed, 88 insertions, 56 deletions
diff --git a/arch/sh/kernel/cpu/init.c b/arch/sh/kernel/cpu/init.c index fd1688e6..0f0c76a 100644 --- a/arch/sh/kernel/cpu/init.c +++ b/arch/sh/kernel/cpu/init.c @@ -64,11 +64,11 @@ static void __init speculative_execution_init(void) * Generic first-level cache init */ #ifdef CONFIG_SUPERH32 -static void __init cache_init(void) +static void __uses_jump_to_uncached cache_init(void) { unsigned long ccr, flags; - jump_to_P2(); + jump_to_uncached(); ccr = ctrl_inl(CCR); /* @@ -145,7 +145,7 @@ static void __init cache_init(void) #endif ctrl_outl(flags, CCR); - back_to_P1(); + back_to_cached(); } #else #define cache_init() do { } while (0) diff --git a/arch/sh/kernel/cpu/sh3/probe.c b/arch/sh/kernel/cpu/sh3/probe.c index bf579e0..22070e4 100644 --- a/arch/sh/kernel/cpu/sh3/probe.c +++ b/arch/sh/kernel/cpu/sh3/probe.c @@ -16,11 +16,11 @@ #include <asm/cache.h> #include <asm/io.h> -int __init detect_cpu_and_cache_system(void) +int __uses_jump_to_uncached detect_cpu_and_cache_system(void) { unsigned long addr0, addr1, data0, data1, data2, data3; - jump_to_P2(); + jump_to_uncached(); /* * Check if the entry shadows or not. * When shadowed, it's 128-entry system. @@ -48,7 +48,7 @@ int __init detect_cpu_and_cache_system(void) ctrl_outl(data0&~SH_CACHE_VALID, addr0); ctrl_outl(data2&~SH_CACHE_VALID, addr1); - back_to_P1(); + back_to_cached(); boot_cpu_data.dcache.ways = 4; boot_cpu_data.dcache.entry_shift = 4; diff --git a/arch/sh/kernel/vmlinux_32.lds.S b/arch/sh/kernel/vmlinux_32.lds.S index 0956fb3..50c69c1 100644 --- a/arch/sh/kernel/vmlinux_32.lds.S +++ b/arch/sh/kernel/vmlinux_32.lds.S @@ -43,6 +43,15 @@ SECTIONS NOTES RO_DATA(PAGE_SIZE) + /* + * Code which must be executed uncached and the associated data + */ + . = ALIGN(PAGE_SIZE); + __uncached_start = .; + .uncached.text : { *(.uncached.text) } + .uncached.data : { *(.uncached.data) } + __uncached_end = .; + . = ALIGN(THREAD_SIZE); .data : { /* Data */ *(.data.init_task) diff --git a/arch/sh/mm/cache-debugfs.c b/arch/sh/mm/cache-debugfs.c index de6d2c9..db6d950 100644 --- a/arch/sh/mm/cache-debugfs.c +++ b/arch/sh/mm/cache-debugfs.c @@ -22,7 +22,8 @@ enum cache_type { CACHE_TYPE_UNIFIED, }; -static int cache_seq_show(struct seq_file *file, void *iter) +static int __uses_jump_to_uncached cache_seq_show(struct seq_file *file, + void *iter) { unsigned int cache_type = (unsigned int)file->private; struct cache_info *cache; @@ -34,11 +35,11 @@ static int cache_seq_show(struct seq_file *file, void *iter) * Go uncached immediately so we don't skew the results any * more than we already are.. */ - jump_to_P2(); + jump_to_uncached(); ccr = ctrl_inl(CCR); if ((ccr & CCR_CACHE_ENABLE) == 0) { - back_to_P1(); + back_to_cached(); seq_printf(file, "disabled\n"); return 0; @@ -104,7 +105,7 @@ static int cache_seq_show(struct seq_file *file, void *iter) addrstart += cache->way_incr; } - back_to_P1(); + back_to_cached(); return 0; } diff --git a/arch/sh/mm/cache-sh4.c b/arch/sh/mm/cache-sh4.c index 226b190..43d7ff6b 100644 --- a/arch/sh/mm/cache-sh4.c +++ b/arch/sh/mm/cache-sh4.c @@ -190,7 +190,7 @@ void flush_icache_range(unsigned long start, unsigned long end) * .. which happens to be the same behavior as flush_icache_range(). * So, we simply flush out a line. */ -void flush_cache_sigtramp(unsigned long addr) +void __uses_jump_to_uncached flush_cache_sigtramp(unsigned long addr) { unsigned long v, index; unsigned long flags; @@ -205,13 +205,13 @@ void flush_cache_sigtramp(unsigned long addr) (v & boot_cpu_data.icache.entry_mask); local_irq_save(flags); - jump_to_P2(); + jump_to_uncached(); for (i = 0; i < boot_cpu_data.icache.ways; i++, index += boot_cpu_data.icache.way_incr) ctrl_outl(0, index); /* Clear out Valid-bit */ - back_to_P1(); + back_to_cached(); wmb(); local_irq_restore(flags); } @@ -256,12 +256,12 @@ void flush_dcache_page(struct page *page) } /* TODO: Selective icache invalidation through IC address array.. */ -static inline void flush_icache_all(void) +static inline void __uses_jump_to_uncached flush_icache_all(void) { unsigned long flags, ccr; local_irq_save(flags); - jump_to_P2(); + jump_to_uncached(); /* Flush I-cache */ ccr = ctrl_inl(CCR); @@ -269,11 +269,11 @@ static inline void flush_icache_all(void) ctrl_outl(ccr, CCR); /* - * back_to_P1() will take care of the barrier for us, don't add + * back_to_cached() will take care of the barrier for us, don't add * another one! */ - back_to_P1(); + back_to_cached(); local_irq_restore(flags); } diff --git a/arch/sh/mm/cache-sh7705.c b/arch/sh/mm/cache-sh7705.c index 4896d73..22dacc7 100644 --- a/arch/sh/mm/cache-sh7705.c +++ b/arch/sh/mm/cache-sh7705.c @@ -71,7 +71,7 @@ void flush_icache_range(unsigned long start, unsigned long end) /* * Writeback&Invalidate the D-cache of the page */ -static void __flush_dcache_page(unsigned long phys) +static void __uses_jump_to_uncached __flush_dcache_page(unsigned long phys) { unsigned long ways, waysize, addrstart; unsigned long flags; @@ -92,7 +92,7 @@ static void __flush_dcache_page(unsigned long phys) * possible. */ local_irq_save(flags); - jump_to_P2(); + jump_to_uncached(); ways = current_cpu_data.dcache.ways; waysize = current_cpu_data.dcache.sets; @@ -118,7 +118,7 @@ static void __flush_dcache_page(unsigned long phys) addrstart += current_cpu_data.dcache.way_incr; } while (--ways); - back_to_P1(); + back_to_cached(); local_irq_restore(flags); } @@ -132,15 +132,15 @@ void flush_dcache_page(struct page *page) __flush_dcache_page(PHYSADDR(page_address(page))); } -void flush_cache_all(void) +void __uses_jump_to_uncached flush_cache_all(void) { unsigned long flags; local_irq_save(flags); - jump_to_P2(); + jump_to_uncached(); cache_wback_all(); - back_to_P1(); + back_to_cached(); local_irq_restore(flags); } diff --git a/arch/sh/mm/init.c b/arch/sh/mm/init.c index 79c3097..094225e 100644 --- a/arch/sh/mm/init.c +++ b/arch/sh/mm/init.c @@ -23,6 +23,7 @@ DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); pgd_t swapper_pg_dir[PTRS_PER_PGD]; +unsigned long cached_to_uncached = 0; void show_mem(void) { @@ -99,7 +100,8 @@ static void set_pte_phys(unsigned long addr, unsigned long phys, pgprot_t prot) set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, prot)); - flush_tlb_one(get_asid(), addr); + if (cached_to_uncached) + flush_tlb_one(get_asid(), addr); } /* @@ -164,6 +166,18 @@ void __init paging_init(void) } free_area_init_nodes(max_zone_pfns); + + /* Set up the uncached fixmap */ + set_fixmap_nocache(FIX_UNCACHED, __pa(&__uncached_start)); + +#ifdef CONFIG_29BIT + /* + * Handle trivial transitions between cached and uncached + * segments, making use of the 1:1 mapping relationship in + * 512MB lowmem. + */ + cached_to_uncached = P2SEG - P1SEG; +#endif } static struct kcore_list kcore_mem, kcore_vmalloc; diff --git a/arch/sh/mm/pmb.c b/arch/sh/mm/pmb.c index ef6ab39..ab81c60 100644 --- a/arch/sh/mm/pmb.c +++ b/arch/sh/mm/pmb.c @@ -163,18 +163,18 @@ repeat: return 0; } -int set_pmb_entry(struct pmb_entry *pmbe) +int __uses_jump_to_uncached set_pmb_entry(struct pmb_entry *pmbe) { int ret; - jump_to_P2(); + jump_to_uncached(); ret = __set_pmb_entry(pmbe->vpn, pmbe->ppn, pmbe->flags, &pmbe->entry); - back_to_P1(); + back_to_cached(); return ret; } -void clear_pmb_entry(struct pmb_entry *pmbe) +void __uses_jump_to_uncached clear_pmb_entry(struct pmb_entry *pmbe) { unsigned int entry = pmbe->entry; unsigned long addr; @@ -188,7 +188,7 @@ void clear_pmb_entry(struct pmb_entry *pmbe) entry >= NR_PMB_ENTRIES)) return; - jump_to_P2(); + jump_to_uncached(); /* Clear V-bit */ addr = mk_pmb_addr(entry); @@ -197,7 +197,7 @@ void clear_pmb_entry(struct pmb_entry *pmbe) addr = mk_pmb_data(entry); ctrl_outl(ctrl_inl(addr) & ~PMB_V, addr); - back_to_P1(); + back_to_cached(); clear_bit(entry, &pmb_map); } @@ -302,7 +302,7 @@ static void pmb_cache_ctor(struct kmem_cache *cachep, void *pmb) pmbe->entry = PMB_NO_ENTRY; } -static int __init pmb_init(void) +static int __uses_jump_to_uncached pmb_init(void) { unsigned int nr_entries = ARRAY_SIZE(pmb_init_map); unsigned int entry, i; @@ -312,7 +312,7 @@ static int __init pmb_init(void) pmb_cache = kmem_cache_create("pmb", sizeof(struct pmb_entry), 0, SLAB_PANIC, pmb_cache_ctor); - jump_to_P2(); + jump_to_uncached(); /* * Ordering is important, P2 must be mapped in the PMB before we @@ -335,7 +335,7 @@ static int __init pmb_init(void) i |= MMUCR_TI; ctrl_outl(i, MMUCR); - back_to_P1(); + back_to_cached(); return 0; } diff --git a/arch/sh/mm/tlb-sh4.c b/arch/sh/mm/tlb-sh4.c index 2d1dd60..f0c7b73 100644 --- a/arch/sh/mm/tlb-sh4.c +++ b/arch/sh/mm/tlb-sh4.c @@ -79,7 +79,8 @@ void update_mmu_cache(struct vm_area_struct * vma, local_irq_restore(flags); } -void local_flush_tlb_one(unsigned long asid, unsigned long page) +void __uses_jump_to_uncached local_flush_tlb_one(unsigned long asid, + unsigned long page) { unsigned long addr, data; @@ -91,7 +92,7 @@ void local_flush_tlb_one(unsigned long asid, unsigned long page) */ addr = MMU_UTLB_ADDRESS_ARRAY | MMU_PAGE_ASSOC_BIT; data = page | asid; /* VALID bit is off */ - jump_to_P2(); + jump_to_uncached(); ctrl_outl(data, addr); - back_to_P1(); + back_to_cached(); } diff --git a/include/asm-sh/fixmap.h b/include/asm-sh/fixmap.h index 09463cd..721fcc4 100644 --- a/include/asm-sh/fixmap.h +++ b/include/asm-sh/fixmap.h @@ -49,6 +49,7 @@ enum fixed_addresses { #define FIX_N_COLOURS 16 FIX_CMAP_BEGIN, FIX_CMAP_END = FIX_CMAP_BEGIN + FIX_N_COLOURS, + FIX_UNCACHED, #ifdef CONFIG_HIGHMEM FIX_KMAP_BEGIN, /* reserved pte's for temporary kernel mappings */ FIX_KMAP_END = FIX_KMAP_BEGIN+(KM_TYPE_NR*NR_CPUS)-1, diff --git a/include/asm-sh/sections.h b/include/asm-sh/sections.h index bd9cbc9..8f8f4ad 100644 --- a/include/asm-sh/sections.h +++ b/include/asm-sh/sections.h @@ -4,6 +4,7 @@ #include <asm-generic/sections.h> extern long __machvec_start, __machvec_end; +extern char __uncached_start, __uncached_end; extern char _ebss[]; #endif /* __ASM_SH_SECTIONS_H */ diff --git a/include/asm-sh/system.h b/include/asm-sh/system.h index 969f3d4..9bda8d0 100644 --- a/include/asm-sh/system.h +++ b/include/asm-sh/system.h @@ -144,6 +144,8 @@ extern unsigned int instruction_size(unsigned int insn); #define instruction_size(insn) (4) #endif +extern unsigned long cached_to_uncached; + /* XXX * disable hlt during certain critical i/o operations */ diff --git a/include/asm-sh/system_32.h b/include/asm-sh/system_32.h index ad37e8d..e918bac 100644 --- a/include/asm-sh/system_32.h +++ b/include/asm-sh/system_32.h @@ -58,29 +58,31 @@ do { \ last = __last; \ } while (0) +#define __uses_jump_to_uncached __attribute__ ((__section__ (".uncached.text"))) + /* - * Jump to P2 area. - * When handling TLB or caches, we need to do it from P2 area. + * Jump to uncached area. + * When handling TLB or caches, we need to do it from an uncached area. */ -#define jump_to_P2() \ -do { \ - unsigned long __dummy; \ - __asm__ __volatile__( \ - "mov.l 1f, %0\n\t" \ - "or %1, %0\n\t" \ - "jmp @%0\n\t" \ - " nop\n\t" \ - ".balign 4\n" \ - "1: .long 2f\n" \ - "2:" \ - : "=&r" (__dummy) \ - : "r" (0x20000000)); \ +#define jump_to_uncached() \ +do { \ + unsigned long __dummy; \ + \ + __asm__ __volatile__( \ + "mova 1f, %0\n\t" \ + "add %1, %0\n\t" \ + "jmp @%0\n\t" \ + " nop\n\t" \ + ".balign 4\n" \ + "1:" \ + : "=&z" (__dummy) \ + : "r" (cached_to_uncached)); \ } while (0) /* - * Back to P1 area. + * Back to cached area. */ -#define back_to_P1() \ +#define back_to_cached() \ do { \ unsigned long __dummy; \ ctrl_barrier(); \ diff --git a/include/asm-sh/system_64.h b/include/asm-sh/system_64.h index 0e466e9..943acf5 100644 --- a/include/asm-sh/system_64.h +++ b/include/asm-sh/system_64.h @@ -32,8 +32,9 @@ do { \ &next->thread); \ } while (0) -/* No segmentation.. */ -#define jump_to_P2() do { } while (0) -#define back_to_P1() do { } while (0) +#define __uses_jump_to_uncached + +#define jump_to_uncached() do { } while (0) +#define back_to_cached() do { } while (0) #endif /* __ASM_SH_SYSTEM_64_H */ |