From cbbe2f68f678a90bebeb30b8a7fcd8aed0614879 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Sat, 15 Aug 2009 09:30:39 +0900 Subject: sh: rename pg-mmu.c -> cache.c, enable generically. This builds in the newly created cache.c (renamed from pg-mmu.c) for both MMU and NOMMU configurations. The kmap_coherent() stubs and alias information recorded by each CPU family takes care of doing the right thing while enabling the code to be commonly shared. Signed-off-by: Paul Mundt --- arch/sh/mm/cache.c | 129 +++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 129 insertions(+) create mode 100644 arch/sh/mm/cache.c (limited to 'arch/sh/mm/cache.c') diff --git a/arch/sh/mm/cache.c b/arch/sh/mm/cache.c new file mode 100644 index 0000000..f51d0a4 --- /dev/null +++ b/arch/sh/mm/cache.c @@ -0,0 +1,129 @@ +/* + * arch/sh/mm/pg-mmu.c + * + * Copyright (C) 1999, 2000, 2002 Niibe Yutaka + * Copyright (C) 2002 - 2009 Paul Mundt + * + * Released under the terms of the GNU GPL v2.0. + */ +#include +#include +#include +#include +#include +#include +#include +#include + +void copy_to_user_page(struct vm_area_struct *vma, struct page *page, + unsigned long vaddr, void *dst, const void *src, + unsigned long len) +{ + if (boot_cpu_data.dcache.n_aliases && page_mapped(page) && + !test_bit(PG_dcache_dirty, &page->flags)) { + void *vto = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK); + memcpy(vto, src, len); + kunmap_coherent(); + } else { + memcpy(dst, src, len); + if (boot_cpu_data.dcache.n_aliases) + set_bit(PG_dcache_dirty, &page->flags); + } + + if (vma->vm_flags & VM_EXEC) + flush_cache_page(vma, vaddr, page_to_pfn(page)); +} + +void copy_from_user_page(struct vm_area_struct *vma, struct page *page, + unsigned long vaddr, void *dst, const void *src, + unsigned long len) +{ + if (boot_cpu_data.dcache.n_aliases && page_mapped(page) && + !test_bit(PG_dcache_dirty, &page->flags)) { + void *vfrom = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK); + memcpy(dst, vfrom, len); + kunmap_coherent(); + } else { + memcpy(dst, src, len); + if (boot_cpu_data.dcache.n_aliases) + set_bit(PG_dcache_dirty, &page->flags); + } +} + +void copy_user_highpage(struct page *to, struct page *from, + unsigned long vaddr, struct vm_area_struct *vma) +{ + void *vfrom, *vto; + + vto = kmap_atomic(to, KM_USER1); + + if (boot_cpu_data.dcache.n_aliases && page_mapped(from) && + !test_bit(PG_dcache_dirty, &from->flags)) { + vfrom = kmap_coherent(from, vaddr); + copy_page(vto, vfrom); + kunmap_coherent(); + } else { + vfrom = kmap_atomic(from, KM_USER0); + copy_page(vto, vfrom); + kunmap_atomic(vfrom, KM_USER0); + } + + if (pages_do_alias((unsigned long)vto, vaddr & PAGE_MASK)) + __flush_wback_region(vto, PAGE_SIZE); + + kunmap_atomic(vto, KM_USER1); + /* Make sure this page is cleared on other CPU's too before using it */ + smp_wmb(); +} +EXPORT_SYMBOL(copy_user_highpage); + +void clear_user_highpage(struct page *page, unsigned long vaddr) +{ + void *kaddr = kmap_atomic(page, KM_USER0); + + clear_page(kaddr); + + if (pages_do_alias((unsigned long)kaddr, vaddr & PAGE_MASK)) + __flush_wback_region(kaddr, PAGE_SIZE); + + kunmap_atomic(kaddr, KM_USER0); +} +EXPORT_SYMBOL(clear_user_highpage); + +void __update_cache(struct vm_area_struct *vma, + unsigned long address, pte_t pte) +{ + struct page *page; + unsigned long pfn = pte_pfn(pte); + + if (!boot_cpu_data.dcache.n_aliases) + return; + + page = pfn_to_page(pfn); + if (pfn_valid(pfn) && page_mapping(page)) { + int dirty = test_and_clear_bit(PG_dcache_dirty, &page->flags); + if (dirty) { + unsigned long addr = (unsigned long)page_address(page); + + if (pages_do_alias(addr, address & PAGE_MASK)) + __flush_wback_region((void *)addr, PAGE_SIZE); + } + } +} + +void __flush_anon_page(struct page *page, unsigned long vmaddr) +{ + unsigned long addr = (unsigned long) page_address(page); + + if (pages_do_alias(addr, vmaddr)) { + if (boot_cpu_data.dcache.n_aliases && page_mapped(page) && + !test_bit(PG_dcache_dirty, &page->flags)) { + void *kaddr; + + kaddr = kmap_coherent(page, vmaddr); + __flush_wback_region((void *)kaddr, PAGE_SIZE); + kunmap_coherent(); + } else + __flush_wback_region((void *)addr, PAGE_SIZE); + } +} -- cgit v1.1 From ecba1060583635ab55092072441ff903b5e9a659 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Sat, 15 Aug 2009 11:05:42 +0900 Subject: sh: Centralize the CPU cache initialization routines. This provides a central point for CPU cache initialization routines. This replaces the antiquated p3_cache_init() method, which the vast majority of CPUs never cared about. Signed-off-by: Paul Mundt --- arch/sh/mm/cache.c | 11 +++++++++++ 1 file changed, 11 insertions(+) (limited to 'arch/sh/mm/cache.c') diff --git a/arch/sh/mm/cache.c b/arch/sh/mm/cache.c index f51d0a4..659981f 100644 --- a/arch/sh/mm/cache.c +++ b/arch/sh/mm/cache.c @@ -127,3 +127,14 @@ void __flush_anon_page(struct page *page, unsigned long vmaddr) __flush_wback_region((void *)addr, PAGE_SIZE); } } + +void __init cpu_cache_init(void) +{ + if ((boot_cpu_data.family == CPU_FAMILY_SH4) || + (boot_cpu_data.family == CPU_FAMILY_SH4A) || + (boot_cpu_data.family == CPU_FAMILY_SH4AL_DSP)) { + extern void __weak sh4_cache_init(void); + + sh4_cache_init(); + } +} -- cgit v1.1 From 27d59ec1709817a90aa3ab7169f60994a89ad2f5 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Sat, 15 Aug 2009 11:11:16 +0900 Subject: sh: Move alias computation to shared cache init. This migrates the alias computation and printing of probed cache parameters from the SH-4 code to the shared cpu_cache_init(). This permits other platforms with aliases to make use of the same probe logic without having to roll their own, and also produces consistent output regardless of platform. Signed-off-by: Paul Mundt --- arch/sh/mm/cache.c | 46 ++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 46 insertions(+) (limited to 'arch/sh/mm/cache.c') diff --git a/arch/sh/mm/cache.c b/arch/sh/mm/cache.c index 659981f..a31e5c4 100644 --- a/arch/sh/mm/cache.c +++ b/arch/sh/mm/cache.c @@ -128,8 +128,52 @@ void __flush_anon_page(struct page *page, unsigned long vmaddr) } } +static void compute_alias(struct cache_info *c) +{ + c->alias_mask = ((c->sets - 1) << c->entry_shift) & ~(PAGE_SIZE - 1); + c->n_aliases = c->alias_mask ? (c->alias_mask >> PAGE_SHIFT) + 1 : 0; +} + +static void __init emit_cache_params(void) +{ + printk(KERN_NOTICE "I-cache : n_ways=%d n_sets=%d way_incr=%d\n", + boot_cpu_data.icache.ways, + boot_cpu_data.icache.sets, + boot_cpu_data.icache.way_incr); + printk(KERN_NOTICE "I-cache : entry_mask=0x%08x alias_mask=0x%08x n_aliases=%d\n", + boot_cpu_data.icache.entry_mask, + boot_cpu_data.icache.alias_mask, + boot_cpu_data.icache.n_aliases); + printk(KERN_NOTICE "D-cache : n_ways=%d n_sets=%d way_incr=%d\n", + boot_cpu_data.dcache.ways, + boot_cpu_data.dcache.sets, + boot_cpu_data.dcache.way_incr); + printk(KERN_NOTICE "D-cache : entry_mask=0x%08x alias_mask=0x%08x n_aliases=%d\n", + boot_cpu_data.dcache.entry_mask, + boot_cpu_data.dcache.alias_mask, + boot_cpu_data.dcache.n_aliases); + + /* + * Emit Secondary Cache parameters if the CPU has a probed L2. + */ + if (boot_cpu_data.flags & CPU_HAS_L2_CACHE) { + printk(KERN_NOTICE "S-cache : n_ways=%d n_sets=%d way_incr=%d\n", + boot_cpu_data.scache.ways, + boot_cpu_data.scache.sets, + boot_cpu_data.scache.way_incr); + printk(KERN_NOTICE "S-cache : entry_mask=0x%08x alias_mask=0x%08x n_aliases=%d\n", + boot_cpu_data.scache.entry_mask, + boot_cpu_data.scache.alias_mask, + boot_cpu_data.scache.n_aliases); + } +} + void __init cpu_cache_init(void) { + compute_alias(&boot_cpu_data.icache); + compute_alias(&boot_cpu_data.dcache); + compute_alias(&boot_cpu_data.scache); + if ((boot_cpu_data.family == CPU_FAMILY_SH4) || (boot_cpu_data.family == CPU_FAMILY_SH4A) || (boot_cpu_data.family == CPU_FAMILY_SH4AL_DSP)) { @@ -137,4 +181,6 @@ void __init cpu_cache_init(void) sh4_cache_init(); } + + emit_cache_params(); } -- cgit v1.1 From 37443ef3f0406e855e169c87ae3f4ffb4b6ff635 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Sat, 15 Aug 2009 12:29:49 +0900 Subject: sh: Migrate SH-4 cacheflush ops to function pointers. This paves the way for allowing individual CPUs to overload the individual flushing routines that they care about without having to depend on weak aliases. SH-4 is converted over initially, as it wires up pretty much everything. The majority of the other CPUs will simply use the default no-op implementation with their own region flushers wired up. Signed-off-by: Paul Mundt --- arch/sh/mm/cache.c | 70 ++++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 70 insertions(+) (limited to 'arch/sh/mm/cache.c') diff --git a/arch/sh/mm/cache.c b/arch/sh/mm/cache.c index a31e5c4..da5bc6a 100644 --- a/arch/sh/mm/cache.c +++ b/arch/sh/mm/cache.c @@ -15,6 +15,62 @@ #include #include +void (*flush_cache_all)(void); +void (*flush_cache_mm)(struct mm_struct *mm); +void (*flush_cache_dup_mm)(struct mm_struct *mm); +void (*flush_cache_page)(struct vm_area_struct *vma, + unsigned long addr, unsigned long pfn); +void (*flush_cache_range)(struct vm_area_struct *vma, + unsigned long start, unsigned long end); +void (*flush_dcache_page)(struct page *page); +void (*flush_icache_range)(unsigned long start, unsigned long end); +void (*flush_icache_page)(struct vm_area_struct *vma, + struct page *page); +void (*flush_cache_sigtramp)(unsigned long address); +void (*__flush_wback_region)(void *start, int size); +void (*__flush_purge_region)(void *start, int size); +void (*__flush_invalidate_region)(void *start, int size); + +static inline void noop_flush_cache_all(void) +{ +} + +static inline void noop_flush_cache_mm(struct mm_struct *mm) +{ +} + +static inline void noop_flush_cache_page(struct vm_area_struct *vma, + unsigned long addr, unsigned long pfn) +{ +} + +static inline void noop_flush_cache_range(struct vm_area_struct *vma, + unsigned long start, unsigned long end) +{ +} + +static inline void noop_flush_dcache_page(struct page *page) +{ +} + +static inline void noop_flush_icache_range(unsigned long start, + unsigned long end) +{ +} + +static inline void noop_flush_icache_page(struct vm_area_struct *vma, + struct page *page) +{ +} + +static inline void noop_flush_cache_sigtramp(unsigned long address) +{ +} + +static inline void noop__flush_region(void *start, int size) +{ +} + void copy_to_user_page(struct vm_area_struct *vma, struct page *page, unsigned long vaddr, void *dst, const void *src, unsigned long len) @@ -174,6 +230,20 @@ void __init cpu_cache_init(void) compute_alias(&boot_cpu_data.dcache); compute_alias(&boot_cpu_data.scache); + flush_cache_all = noop_flush_cache_all; + flush_cache_mm = noop_flush_cache_mm; + flush_cache_dup_mm = noop_flush_cache_mm; + flush_cache_page = noop_flush_cache_page; + flush_cache_range = noop_flush_cache_range; + flush_dcache_page = noop_flush_dcache_page; + flush_icache_range = noop_flush_icache_range; + flush_icache_page = noop_flush_icache_page; + flush_cache_sigtramp = noop_flush_cache_sigtramp; + + __flush_wback_region = noop__flush_region; + __flush_purge_region = noop__flush_region; + __flush_invalidate_region = noop__flush_region; + if ((boot_cpu_data.family == CPU_FAMILY_SH4) || (boot_cpu_data.family == CPU_FAMILY_SH4A) || (boot_cpu_data.family == CPU_FAMILY_SH4AL_DSP)) { -- cgit v1.1 From 109b44a82a7a8ae32d7fb257480f92f2d96f0daf Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Sat, 15 Aug 2009 12:35:15 +0900 Subject: sh: Convert SH-2 to new cacheflush interface. Signed-off-by: Paul Mundt --- arch/sh/mm/cache.c | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'arch/sh/mm/cache.c') diff --git a/arch/sh/mm/cache.c b/arch/sh/mm/cache.c index da5bc6a..5ac299d 100644 --- a/arch/sh/mm/cache.c +++ b/arch/sh/mm/cache.c @@ -244,6 +244,12 @@ void __init cpu_cache_init(void) __flush_purge_region = noop__flush_region; __flush_invalidate_region = noop__flush_region; + if (boot_cpu_data.family == CPU_FAMILY_SH2) { + extern void __weak sh2_cache_init(void); + + sh2_cache_init(); + } + if ((boot_cpu_data.family == CPU_FAMILY_SH4) || (boot_cpu_data.family == CPU_FAMILY_SH4A) || (boot_cpu_data.family == CPU_FAMILY_SH4AL_DSP)) { -- cgit v1.1 From a58e1a2ab4f6334c50dfbda83d3a5c6e0b2b4bee Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Sat, 15 Aug 2009 12:38:29 +0900 Subject: sh: Convert SH-2A to new cacheflush interface. Signed-off-by: Paul Mundt --- arch/sh/mm/cache.c | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'arch/sh/mm/cache.c') diff --git a/arch/sh/mm/cache.c b/arch/sh/mm/cache.c index 5ac299d..b56cce4 100644 --- a/arch/sh/mm/cache.c +++ b/arch/sh/mm/cache.c @@ -250,6 +250,12 @@ void __init cpu_cache_init(void) sh2_cache_init(); } + if (boot_cpu_data.family == CPU_FAMILY_SH2A) { + extern void __weak sh2a_cache_init(void); + + sh2a_cache_init(); + } + if ((boot_cpu_data.family == CPU_FAMILY_SH4) || (boot_cpu_data.family == CPU_FAMILY_SH4A) || (boot_cpu_data.family == CPU_FAMILY_SH4AL_DSP)) { -- cgit v1.1 From 79f1c9da5e5fc5f4705836d8c1cee2213fc80640 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Sat, 15 Aug 2009 12:42:55 +0900 Subject: sh: Convert SH-3 to new cacheflush interface. Signed-off-by: Paul Mundt --- arch/sh/mm/cache.c | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'arch/sh/mm/cache.c') diff --git a/arch/sh/mm/cache.c b/arch/sh/mm/cache.c index b56cce4..c9480b4 100644 --- a/arch/sh/mm/cache.c +++ b/arch/sh/mm/cache.c @@ -256,6 +256,12 @@ void __init cpu_cache_init(void) sh2a_cache_init(); } + if (boot_cpu_data.family == CPU_FAMILY_SH3) { + extern void __weak sh3_cache_init(void); + + sh3_cache_init(); + } + if ((boot_cpu_data.family == CPU_FAMILY_SH4) || (boot_cpu_data.family == CPU_FAMILY_SH4A) || (boot_cpu_data.family == CPU_FAMILY_SH4AL_DSP)) { -- cgit v1.1 From 0d051d90bb08b516b9d6c30d25f83d3c6b5b1c1d Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Sat, 15 Aug 2009 12:53:39 +0900 Subject: sh: Convert SH7705 extended mode to new cacheflush interface. Signed-off-by: Paul Mundt --- arch/sh/mm/cache.c | 7 +++++++ 1 file changed, 7 insertions(+) (limited to 'arch/sh/mm/cache.c') diff --git a/arch/sh/mm/cache.c b/arch/sh/mm/cache.c index c9480b4..8618ccdc 100644 --- a/arch/sh/mm/cache.c +++ b/arch/sh/mm/cache.c @@ -260,6 +260,13 @@ void __init cpu_cache_init(void) extern void __weak sh3_cache_init(void); sh3_cache_init(); + + if ((boot_cpu_data.type == CPU_SH7705) && + (boot_cpu_data.dcache.sets == 512)) { + extern void __weak sh7705_cache_init(void); + + sh7705_cache_init(); + } } if ((boot_cpu_data.family == CPU_FAMILY_SH4) || -- cgit v1.1 From 2b4315185a06414c4ab40fb0db50dce1b534a1d9 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Sun, 16 Aug 2009 02:16:44 +0900 Subject: sh: Wire up sh5_cache_init(). Now that the SH-5 code is more or less behaving with the new cacheflush interface, wire up the initialization code. Signed-off-by: Paul Mundt --- arch/sh/mm/cache.c | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'arch/sh/mm/cache.c') diff --git a/arch/sh/mm/cache.c b/arch/sh/mm/cache.c index 8618ccdc..d602394 100644 --- a/arch/sh/mm/cache.c +++ b/arch/sh/mm/cache.c @@ -277,5 +277,11 @@ void __init cpu_cache_init(void) sh4_cache_init(); } + if (boot_cpu_data.family == CPU_FAMILY_SH5) { + extern void __weak sh5_cache_init(void); + + sh5_cache_init(); + } + emit_cache_params(); } -- cgit v1.1 From f26b2a562b46ab186c8383993ab1332673ac4a47 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Fri, 21 Aug 2009 17:23:14 +0900 Subject: sh: Make cache flushers SMP-aware. This does a bit of rework for making the cache flushers SMP-aware. The function pointer-based flushers are renamed to local variants with the exported interface being commonly implemented and wrapping as necessary. Signed-off-by: Paul Mundt --- arch/sh/mm/cache.c | 137 ++++++++++++++++++++++++++++++----------------------- 1 file changed, 78 insertions(+), 59 deletions(-) (limited to 'arch/sh/mm/cache.c') diff --git a/arch/sh/mm/cache.c b/arch/sh/mm/cache.c index d602394..411fe60 100644 --- a/arch/sh/mm/cache.c +++ b/arch/sh/mm/cache.c @@ -1,5 +1,5 @@ /* - * arch/sh/mm/pg-mmu.c + * arch/sh/mm/cache.c * * Copyright (C) 1999, 2000, 2002 Niibe Yutaka * Copyright (C) 2002 - 2009 Paul Mundt @@ -10,63 +10,26 @@ #include #include #include +#include #include #include #include #include -void (*flush_cache_all)(void); -void (*flush_cache_mm)(struct mm_struct *mm); -void (*flush_cache_dup_mm)(struct mm_struct *mm); -void (*flush_cache_page)(struct vm_area_struct *vma, - unsigned long addr, unsigned long pfn); -void (*flush_cache_range)(struct vm_area_struct *vma, - unsigned long start, unsigned long end); -void (*flush_dcache_page)(struct page *page); -void (*flush_icache_range)(unsigned long start, unsigned long end); -void (*flush_icache_page)(struct vm_area_struct *vma, - struct page *page); -void (*flush_cache_sigtramp)(unsigned long address); +void (*local_flush_cache_all)(void *args) = cache_noop; +void (*local_flush_cache_mm)(void *args) = cache_noop; +void (*local_flush_cache_dup_mm)(void *args) = cache_noop; +void (*local_flush_cache_page)(void *args) = cache_noop; +void (*local_flush_cache_range)(void *args) = cache_noop; +void (*local_flush_dcache_page)(void *args) = cache_noop; +void (*local_flush_icache_range)(void *args) = cache_noop; +void (*local_flush_icache_page)(void *args) = cache_noop; +void (*local_flush_cache_sigtramp)(void *args) = cache_noop; + void (*__flush_wback_region)(void *start, int size); void (*__flush_purge_region)(void *start, int size); void (*__flush_invalidate_region)(void *start, int size); -static inline void noop_flush_cache_all(void) -{ -} - -static inline void noop_flush_cache_mm(struct mm_struct *mm) -{ -} - -static inline void noop_flush_cache_page(struct vm_area_struct *vma, - unsigned long addr, unsigned long pfn) -{ -} - -static inline void noop_flush_cache_range(struct vm_area_struct *vma, - unsigned long start, unsigned long end) -{ -} - -static inline void noop_flush_dcache_page(struct page *page) -{ -} - -static inline void noop_flush_icache_range(unsigned long start, - unsigned long end) -{ -} - -static inline void noop_flush_icache_page(struct vm_area_struct *vma, - struct page *page) -{ -} - -static inline void noop_flush_cache_sigtramp(unsigned long address) -{ -} - static inline void noop__flush_region(void *start, int size) { } @@ -184,6 +147,72 @@ void __flush_anon_page(struct page *page, unsigned long vmaddr) } } +void flush_cache_all(void) +{ + on_each_cpu(local_flush_cache_all, NULL, 1); +} + +void flush_cache_mm(struct mm_struct *mm) +{ + on_each_cpu(local_flush_cache_mm, mm, 1); +} + +void flush_cache_dup_mm(struct mm_struct *mm) +{ + on_each_cpu(local_flush_cache_dup_mm, mm, 1); +} + +void flush_cache_page(struct vm_area_struct *vma, unsigned long addr, + unsigned long pfn) +{ + struct flusher_data data; + + data.vma = vma; + data.addr1 = addr; + data.addr2 = pfn; + + on_each_cpu(local_flush_cache_page, (void *)&data, 1); +} + +void flush_cache_range(struct vm_area_struct *vma, unsigned long start, + unsigned long end) +{ + struct flusher_data data; + + data.vma = vma; + data.addr1 = start; + data.addr2 = end; + + on_each_cpu(local_flush_cache_range, (void *)&data, 1); +} + +void flush_dcache_page(struct page *page) +{ + on_each_cpu(local_flush_dcache_page, page, 1); +} + +void flush_icache_range(unsigned long start, unsigned long end) +{ + struct flusher_data data; + + data.vma = NULL; + data.addr1 = start; + data.addr2 = end; + + on_each_cpu(local_flush_icache_range, (void *)&data, 1); +} + +void flush_icache_page(struct vm_area_struct *vma, struct page *page) +{ + /* Nothing uses the VMA, so just pass the struct page along */ + on_each_cpu(local_flush_icache_page, page, 1); +} + +void flush_cache_sigtramp(unsigned long address) +{ + on_each_cpu(local_flush_cache_sigtramp, (void *)address, 1); +} + static void compute_alias(struct cache_info *c) { c->alias_mask = ((c->sets - 1) << c->entry_shift) & ~(PAGE_SIZE - 1); @@ -230,16 +259,6 @@ void __init cpu_cache_init(void) compute_alias(&boot_cpu_data.dcache); compute_alias(&boot_cpu_data.scache); - flush_cache_all = noop_flush_cache_all; - flush_cache_mm = noop_flush_cache_mm; - flush_cache_dup_mm = noop_flush_cache_mm; - flush_cache_page = noop_flush_cache_page; - flush_cache_range = noop_flush_cache_range; - flush_dcache_page = noop_flush_dcache_page; - flush_icache_range = noop_flush_icache_range; - flush_icache_page = noop_flush_icache_page; - flush_cache_sigtramp = noop_flush_cache_sigtramp; - __flush_wback_region = noop__flush_region; __flush_purge_region = noop__flush_region; __flush_invalidate_region = noop__flush_region; -- cgit v1.1 From 6f3795788b030c3c190fa063adfe519e016cc6fd Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Tue, 1 Sep 2009 21:21:36 +0900 Subject: sh: Fix up UP deadlock with SMP-aware cache ops. This builds on top of the previous reversion and implements a special on_each_cpu() variant that simple disables preemption across the call while leaving the interrupt state to the function itself. There were some unintended consequences with IRQ disabling in some of these paths on UP that ran in to a deadlock scenario with IRQs being missed. Signed-off-by: Paul Mundt --- arch/sh/mm/cache.c | 27 ++++++++++++++++++--------- 1 file changed, 18 insertions(+), 9 deletions(-) (limited to 'arch/sh/mm/cache.c') diff --git a/arch/sh/mm/cache.c b/arch/sh/mm/cache.c index 411fe60..db2b1c5 100644 --- a/arch/sh/mm/cache.c +++ b/arch/sh/mm/cache.c @@ -34,6 +34,15 @@ static inline void noop__flush_region(void *start, int size) { } +static inline void cacheop_on_each_cpu(void (*func) (void *info), void *info, + int wait) +{ + preempt_disable(); + smp_call_function(func, info, wait); + func(info); + preempt_enable(); +} + void copy_to_user_page(struct vm_area_struct *vma, struct page *page, unsigned long vaddr, void *dst, const void *src, unsigned long len) @@ -149,17 +158,17 @@ void __flush_anon_page(struct page *page, unsigned long vmaddr) void flush_cache_all(void) { - on_each_cpu(local_flush_cache_all, NULL, 1); + cacheop_on_each_cpu(local_flush_cache_all, NULL, 1); } void flush_cache_mm(struct mm_struct *mm) { - on_each_cpu(local_flush_cache_mm, mm, 1); + cacheop_on_each_cpu(local_flush_cache_mm, mm, 1); } void flush_cache_dup_mm(struct mm_struct *mm) { - on_each_cpu(local_flush_cache_dup_mm, mm, 1); + cacheop_on_each_cpu(local_flush_cache_dup_mm, mm, 1); } void flush_cache_page(struct vm_area_struct *vma, unsigned long addr, @@ -171,7 +180,7 @@ void flush_cache_page(struct vm_area_struct *vma, unsigned long addr, data.addr1 = addr; data.addr2 = pfn; - on_each_cpu(local_flush_cache_page, (void *)&data, 1); + cacheop_on_each_cpu(local_flush_cache_page, (void *)&data, 1); } void flush_cache_range(struct vm_area_struct *vma, unsigned long start, @@ -183,12 +192,12 @@ void flush_cache_range(struct vm_area_struct *vma, unsigned long start, data.addr1 = start; data.addr2 = end; - on_each_cpu(local_flush_cache_range, (void *)&data, 1); + cacheop_on_each_cpu(local_flush_cache_range, (void *)&data, 1); } void flush_dcache_page(struct page *page) { - on_each_cpu(local_flush_dcache_page, page, 1); + cacheop_on_each_cpu(local_flush_dcache_page, page, 1); } void flush_icache_range(unsigned long start, unsigned long end) @@ -199,18 +208,18 @@ void flush_icache_range(unsigned long start, unsigned long end) data.addr1 = start; data.addr2 = end; - on_each_cpu(local_flush_icache_range, (void *)&data, 1); + cacheop_on_each_cpu(local_flush_icache_range, (void *)&data, 1); } void flush_icache_page(struct vm_area_struct *vma, struct page *page) { /* Nothing uses the VMA, so just pass the struct page along */ - on_each_cpu(local_flush_icache_page, page, 1); + cacheop_on_each_cpu(local_flush_icache_page, page, 1); } void flush_cache_sigtramp(unsigned long address) { - on_each_cpu(local_flush_cache_sigtramp, (void *)address, 1); + cacheop_on_each_cpu(local_flush_cache_sigtramp, (void *)address, 1); } static void compute_alias(struct cache_info *c) -- cgit v1.1 From 0906a3ad33a254094fb74828e3ddb9af8771a6da Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Thu, 3 Sep 2009 17:21:10 +0900 Subject: sh: Fix up and optimize the kmap_coherent() interface. This fixes up the kmap_coherent/kunmap_coherent() interface for recent changes both in the page fault path and the shared cache flushers, as well as adding in some optimizations. One of the key things to note here is that the TLB flush itself is deferred until the unmap, and the call in to update_mmu_cache() itself goes away, relying on the regular page fault path to handle the lazy dcache writeback if necessary. Signed-off-by: Paul Mundt --- arch/sh/mm/cache.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'arch/sh/mm/cache.c') diff --git a/arch/sh/mm/cache.c b/arch/sh/mm/cache.c index db2b1c5..8e4a8d1 100644 --- a/arch/sh/mm/cache.c +++ b/arch/sh/mm/cache.c @@ -51,7 +51,7 @@ void copy_to_user_page(struct vm_area_struct *vma, struct page *page, !test_bit(PG_dcache_dirty, &page->flags)) { void *vto = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK); memcpy(vto, src, len); - kunmap_coherent(); + kunmap_coherent(vto); } else { memcpy(dst, src, len); if (boot_cpu_data.dcache.n_aliases) @@ -70,7 +70,7 @@ void copy_from_user_page(struct vm_area_struct *vma, struct page *page, !test_bit(PG_dcache_dirty, &page->flags)) { void *vfrom = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK); memcpy(dst, vfrom, len); - kunmap_coherent(); + kunmap_coherent(vfrom); } else { memcpy(dst, src, len); if (boot_cpu_data.dcache.n_aliases) @@ -89,7 +89,7 @@ void copy_user_highpage(struct page *to, struct page *from, !test_bit(PG_dcache_dirty, &from->flags)) { vfrom = kmap_coherent(from, vaddr); copy_page(vto, vfrom); - kunmap_coherent(); + kunmap_coherent(vfrom); } else { vfrom = kmap_atomic(from, KM_USER0); copy_page(vto, vfrom); @@ -150,7 +150,7 @@ void __flush_anon_page(struct page *page, unsigned long vmaddr) kaddr = kmap_coherent(page, vmaddr); __flush_wback_region((void *)kaddr, PAGE_SIZE); - kunmap_coherent(); + kunmap_coherent(kaddr); } else __flush_wback_region((void *)addr, PAGE_SIZE); } -- cgit v1.1 From 6e4154d4c2dd3d7e61d19ddd2527322ce34c2f5a Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Tue, 8 Sep 2009 16:21:00 +0900 Subject: sh: Use more aggressive dcache purging in kmap teardown. This fixes up a number of outstanding issues observed with old mappings on the same colour hanging around. This requires some more optimal handling, but is a safe fallback until all of the corner cases have been handled. Signed-off-by: Paul Mundt --- arch/sh/mm/cache.c | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) (limited to 'arch/sh/mm/cache.c') diff --git a/arch/sh/mm/cache.c b/arch/sh/mm/cache.c index 8e4a8d1..35c37b7 100644 --- a/arch/sh/mm/cache.c +++ b/arch/sh/mm/cache.c @@ -97,7 +97,7 @@ void copy_user_highpage(struct page *to, struct page *from, } if (pages_do_alias((unsigned long)vto, vaddr & PAGE_MASK)) - __flush_wback_region(vto, PAGE_SIZE); + __flush_purge_region(vto, PAGE_SIZE); kunmap_atomic(vto, KM_USER1); /* Make sure this page is cleared on other CPU's too before using it */ @@ -112,7 +112,7 @@ void clear_user_highpage(struct page *page, unsigned long vaddr) clear_page(kaddr); if (pages_do_alias((unsigned long)kaddr, vaddr & PAGE_MASK)) - __flush_wback_region(kaddr, PAGE_SIZE); + __flush_purge_region(kaddr, PAGE_SIZE); kunmap_atomic(kaddr, KM_USER0); } @@ -134,7 +134,7 @@ void __update_cache(struct vm_area_struct *vma, unsigned long addr = (unsigned long)page_address(page); if (pages_do_alias(addr, address & PAGE_MASK)) - __flush_wback_region((void *)addr, PAGE_SIZE); + __flush_purge_region((void *)addr, PAGE_SIZE); } } } @@ -149,10 +149,11 @@ void __flush_anon_page(struct page *page, unsigned long vmaddr) void *kaddr; kaddr = kmap_coherent(page, vmaddr); - __flush_wback_region((void *)kaddr, PAGE_SIZE); + /* XXX.. For now kunmap_coherent() does a purge */ + /* __flush_purge_region((void *)kaddr, PAGE_SIZE); */ kunmap_coherent(kaddr); } else - __flush_wback_region((void *)addr, PAGE_SIZE); + __flush_purge_region((void *)addr, PAGE_SIZE); } } -- cgit v1.1