aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/mm')
-rw-r--r--arch/arm/mm/Kconfig6
-rw-r--r--arch/arm/mm/Makefile2
-rw-r--r--arch/arm/mm/blockops.c185
-rw-r--r--arch/arm/mm/consistent.c13
-rw-r--r--arch/arm/mm/flush.c7
-rw-r--r--arch/arm/mm/init.c24
-rw-r--r--arch/arm/mm/ioremap.c3
7 files changed, 34 insertions, 206 deletions
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
index e3c14d6..e84fdde 100644
--- a/arch/arm/mm/Kconfig
+++ b/arch/arm/mm/Kconfig
@@ -102,8 +102,8 @@ config CPU_ARM922T
# ARM925T
config CPU_ARM925T
bool "Support ARM925T processor" if ARCH_OMAP1
- depends on ARCH_OMAP1510
- default y if ARCH_OMAP1510
+ depends on ARCH_OMAP15XX
+ default y if ARCH_OMAP15XX
select CPU_32v4
select CPU_ABRT_EV4T
select CPU_CACHE_V4WT
@@ -242,7 +242,7 @@ config CPU_XSCALE
# ARMv6
config CPU_V6
bool "Support ARM V6 processor"
- depends on ARCH_INTEGRATOR || MACH_REALVIEW_EB
+ depends on ARCH_INTEGRATOR || MACH_REALVIEW_EB || ARCH_OMAP2
select CPU_32v6
select CPU_ABRT_EV6
select CPU_CACHE_V6
diff --git a/arch/arm/mm/Makefile b/arch/arm/mm/Makefile
index 59f47d4..ffe73ba 100644
--- a/arch/arm/mm/Makefile
+++ b/arch/arm/mm/Makefile
@@ -51,4 +51,4 @@ obj-$(CONFIG_CPU_ARM1026) += proc-arm1026.o
obj-$(CONFIG_CPU_SA110) += proc-sa110.o
obj-$(CONFIG_CPU_SA1100) += proc-sa1100.o
obj-$(CONFIG_CPU_XSCALE) += proc-xscale.o
-obj-$(CONFIG_CPU_V6) += proc-v6.o blockops.o
+obj-$(CONFIG_CPU_V6) += proc-v6.o
diff --git a/arch/arm/mm/blockops.c b/arch/arm/mm/blockops.c
deleted file mode 100644
index 4f5ee2d..0000000
--- a/arch/arm/mm/blockops.c
+++ /dev/null
@@ -1,185 +0,0 @@
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/errno.h>
-#include <linux/mm.h>
-
-#include <asm/memory.h>
-#include <asm/ptrace.h>
-#include <asm/cacheflush.h>
-#include <asm/traps.h>
-
-extern struct cpu_cache_fns blk_cache_fns;
-
-#define HARVARD_CACHE
-
-/*
- * blk_flush_kern_dcache_page(kaddr)
- *
- * Ensure that the data held in the page kaddr is written back
- * to the page in question.
- *
- * - kaddr - kernel address (guaranteed to be page aligned)
- */
-static void __attribute__((naked))
-blk_flush_kern_dcache_page(void *kaddr)
-{
- asm(
- "add r1, r0, %0 \n\
- sub r1, r1, %1 \n\
-1: .word 0xec401f0e @ mcrr p15, 0, r0, r1, c14, 0 @ blocking \n\
- mov r0, #0 \n\
- mcr p15, 0, r0, c7, c5, 0 \n\
- mcr p15, 0, r0, c7, c10, 4 \n\
- mov pc, lr"
- :
- : "I" (PAGE_SIZE), "I" (L1_CACHE_BYTES));
-}
-
-/*
- * blk_dma_inv_range(start,end)
- *
- * Invalidate the data cache within the specified region; we will
- * be performing a DMA operation in this region and we want to
- * purge old data in the cache.
- *
- * - start - virtual start address of region
- * - end - virtual end address of region
- */
-static void __attribute__((naked))
-blk_dma_inv_range_unified(unsigned long start, unsigned long end)
-{
- asm(
- "tst r0, %0 \n\
- mcrne p15, 0, r0, c7, c11, 1 @ clean unified line \n\
- tst r1, %0 \n\
- mcrne p15, 0, r1, c7, c15, 1 @ clean & invalidate unified line\n\
- .word 0xec401f06 @ mcrr p15, 0, r1, r0, c6, 0 @ blocking \n\
- mov r0, #0 \n\
- mcr p15, 0, r0, c7, c10, 4 @ drain write buffer \n\
- mov pc, lr"
- :
- : "I" (L1_CACHE_BYTES - 1));
-}
-
-static void __attribute__((naked))
-blk_dma_inv_range_harvard(unsigned long start, unsigned long end)
-{
- asm(
- "tst r0, %0 \n\
- mcrne p15, 0, r0, c7, c10, 1 @ clean D line \n\
- tst r1, %0 \n\
- mcrne p15, 0, r1, c7, c14, 1 @ clean & invalidate D line \n\
- .word 0xec401f06 @ mcrr p15, 0, r1, r0, c6, 0 @ blocking \n\
- mov r0, #0 \n\
- mcr p15, 0, r0, c7, c10, 4 @ drain write buffer \n\
- mov pc, lr"
- :
- : "I" (L1_CACHE_BYTES - 1));
-}
-
-/*
- * blk_dma_clean_range(start,end)
- * - start - virtual start address of region
- * - end - virtual end address of region
- */
-static void __attribute__((naked))
-blk_dma_clean_range(unsigned long start, unsigned long end)
-{
- asm(
- ".word 0xec401f0c @ mcrr p15, 0, r1, r0, c12, 0 @ blocking \n\
- mov r0, #0 \n\
- mcr p15, 0, r0, c7, c10, 4 @ drain write buffer \n\
- mov pc, lr");
-}
-
-/*
- * blk_dma_flush_range(start,end)
- * - start - virtual start address of region
- * - end - virtual end address of region
- */
-static void __attribute__((naked))
-blk_dma_flush_range(unsigned long start, unsigned long end)
-{
- asm(
- ".word 0xec401f0e @ mcrr p15, 0, r1, r0, c14, 0 @ blocking \n\
- mov pc, lr");
-}
-
-static int blockops_trap(struct pt_regs *regs, unsigned int instr)
-{
- regs->ARM_r4 |= regs->ARM_r2;
- regs->ARM_pc += 4;
- return 0;
-}
-
-static char *func[] = {
- "Prefetch data range",
- "Clean+Invalidate data range",
- "Clean data range",
- "Invalidate data range",
- "Invalidate instr range"
-};
-
-static struct undef_hook blockops_hook __initdata = {
- .instr_mask = 0x0fffffd0,
- .instr_val = 0x0c401f00,
- .cpsr_mask = PSR_T_BIT,
- .cpsr_val = 0,
- .fn = blockops_trap,
-};
-
-static int __init blockops_check(void)
-{
- register unsigned int err asm("r4") = 0;
- unsigned int err_pos = 1;
- unsigned int cache_type;
- int i;
-
- asm("mrc p15, 0, %0, c0, c0, 1" : "=r" (cache_type));
-
- printk("Checking V6 block cache operations:\n");
- register_undef_hook(&blockops_hook);
-
- __asm__ ("mov r0, %0\n\t"
- "mov r1, %1\n\t"
- "mov r2, #1\n\t"
- ".word 0xec401f2c @ mcrr p15, 0, r1, r0, c12, 2\n\t"
- "mov r2, #2\n\t"
- ".word 0xec401f0e @ mcrr p15, 0, r1, r0, c14, 0\n\t"
- "mov r2, #4\n\t"
- ".word 0xec401f0c @ mcrr p15, 0, r1, r0, c12, 0\n\t"
- "mov r2, #8\n\t"
- ".word 0xec401f06 @ mcrr p15, 0, r1, r0, c6, 0\n\t"
- "mov r2, #16\n\t"
- ".word 0xec401f05 @ mcrr p15, 0, r1, r0, c5, 0\n\t"
- :
- : "r" (PAGE_OFFSET), "r" (PAGE_OFFSET + 128)
- : "r0", "r1", "r2");
-
- unregister_undef_hook(&blockops_hook);
-
- for (i = 0; i < ARRAY_SIZE(func); i++, err_pos <<= 1)
- printk("%30s: %ssupported\n", func[i], err & err_pos ? "not " : "");
-
- if ((err & 8) == 0) {
- printk(" --> Using %s block cache invalidate\n",
- cache_type & (1 << 24) ? "harvard" : "unified");
- if (cache_type & (1 << 24))
- cpu_cache.dma_inv_range = blk_dma_inv_range_harvard;
- else
- cpu_cache.dma_inv_range = blk_dma_inv_range_unified;
- }
- if ((err & 4) == 0) {
- printk(" --> Using block cache clean\n");
- cpu_cache.dma_clean_range = blk_dma_clean_range;
- }
- if ((err & 2) == 0) {
- printk(" --> Using block cache clean+invalidate\n");
- cpu_cache.dma_flush_range = blk_dma_flush_range;
- cpu_cache.flush_kern_dcache_page = blk_flush_kern_dcache_page;
- }
-
- return 0;
-}
-
-__initcall(blockops_check);
diff --git a/arch/arm/mm/consistent.c b/arch/arm/mm/consistent.c
index 47b0b76..dbfe9e8 100644
--- a/arch/arm/mm/consistent.c
+++ b/arch/arm/mm/consistent.c
@@ -66,6 +66,7 @@ struct vm_region {
unsigned long vm_start;
unsigned long vm_end;
struct page *vm_pages;
+ int vm_active;
};
static struct vm_region consistent_head = {
@@ -104,6 +105,7 @@ vm_region_alloc(struct vm_region *head, size_t size, gfp_t gfp)
list_add_tail(&new->vm_list, &c->vm_list);
new->vm_start = addr;
new->vm_end = addr + size;
+ new->vm_active = 1;
spin_unlock_irqrestore(&consistent_lock, flags);
return new;
@@ -120,7 +122,7 @@ static struct vm_region *vm_region_find(struct vm_region *head, unsigned long ad
struct vm_region *c;
list_for_each_entry(c, &head->vm_list, vm_list) {
- if (c->vm_start == addr)
+ if (c->vm_active && c->vm_start == addr)
goto out;
}
c = NULL;
@@ -319,6 +321,7 @@ EXPORT_SYMBOL(dma_mmap_writecombine);
/*
* free a page as defined by the above mapping.
+ * Must not be called with IRQs disabled.
*/
void dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, dma_addr_t handle)
{
@@ -326,14 +329,18 @@ void dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, dma_addr
unsigned long flags, addr;
pte_t *ptep;
+ WARN_ON(irqs_disabled());
+
size = PAGE_ALIGN(size);
spin_lock_irqsave(&consistent_lock, flags);
-
c = vm_region_find(&consistent_head, (unsigned long)cpu_addr);
if (!c)
goto no_area;
+ c->vm_active = 0;
+ spin_unlock_irqrestore(&consistent_lock, flags);
+
if ((c->vm_end - c->vm_start) != size) {
printk(KERN_ERR "%s: freeing wrong coherent size (%ld != %d)\n",
__func__, c->vm_end - c->vm_start, size);
@@ -372,8 +379,8 @@ void dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, dma_addr
flush_tlb_kernel_range(c->vm_start, c->vm_end);
+ spin_lock_irqsave(&consistent_lock, flags);
list_del(&c->vm_list);
-
spin_unlock_irqrestore(&consistent_lock, flags);
kfree(c);
diff --git a/arch/arm/mm/flush.c b/arch/arm/mm/flush.c
index c9a0398..330695b 100644
--- a/arch/arm/mm/flush.c
+++ b/arch/arm/mm/flush.c
@@ -155,14 +155,19 @@ static void __flush_dcache_aliases(struct address_space *mapping, struct page *p
* space mappings, we can be lazy and remember that we may have dirty
* kernel cache lines for later. Otherwise, we assume we have
* aliasing mappings.
+ *
+ * Note that we disable the lazy flush for SMP.
*/
void flush_dcache_page(struct page *page)
{
struct address_space *mapping = page_mapping(page);
+#ifndef CONFIG_SMP
if (mapping && !mapping_mapped(mapping))
set_bit(PG_dcache_dirty, &page->flags);
- else {
+ else
+#endif
+ {
__flush_dcache_page(mapping, page);
if (mapping && cache_is_vivt())
__flush_dcache_aliases(mapping, page);
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
index c168f32..8b276ee 100644
--- a/arch/arm/mm/init.c
+++ b/arch/arm/mm/init.c
@@ -420,7 +420,8 @@ static void __init bootmem_init(struct meminfo *mi)
* Set up device the mappings. Since we clear out the page tables for all
* mappings above VMALLOC_END, we will remove any debug device mappings.
* This means you have to be careful how you debug this function, or any
- * called function. (Do it by code inspection!)
+ * called function. This means you can't use any function or debugging
+ * method which may touch any device, otherwise the kernel _will_ crash.
*/
static void __init devicemaps_init(struct machine_desc *mdesc)
{
@@ -428,6 +429,12 @@ static void __init devicemaps_init(struct machine_desc *mdesc)
unsigned long addr;
void *vectors;
+ /*
+ * Allocate the vector page early.
+ */
+ vectors = alloc_bootmem_low_pages(PAGE_SIZE);
+ BUG_ON(!vectors);
+
for (addr = VMALLOC_END; addr; addr += PGDIR_SIZE)
pmd_clear(pmd_off_k(addr));
@@ -461,12 +468,6 @@ static void __init devicemaps_init(struct machine_desc *mdesc)
create_mapping(&map);
#endif
- flush_cache_all();
- local_flush_tlb_all();
-
- vectors = alloc_bootmem_low_pages(PAGE_SIZE);
- BUG_ON(!vectors);
-
/*
* Create a mapping for the machine vectors at the high-vectors
* location (0xffff0000). If we aren't using high-vectors, also
@@ -491,12 +492,13 @@ static void __init devicemaps_init(struct machine_desc *mdesc)
mdesc->map_io();
/*
- * Finally flush the tlb again - this ensures that we're in a
- * consistent state wrt the writebuffer if the writebuffer needs
- * draining. After this point, we can start to touch devices
- * again.
+ * Finally flush the caches and tlb to ensure that we're in a
+ * consistent state wrt the writebuffer. This also ensures that
+ * any write-allocated cache lines in the vector page are written
+ * back. After this point, we can start to touch devices again.
*/
local_flush_tlb_all();
+ flush_cache_all();
}
/*
diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c
index 0f128c2..1090139 100644
--- a/arch/arm/mm/ioremap.c
+++ b/arch/arm/mm/ioremap.c
@@ -130,8 +130,7 @@ remap_area_pages(unsigned long start, unsigned long phys_addr,
* mapping. See include/asm-arm/proc-armv/pgtable.h for more information.
*/
void __iomem *
-__ioremap(unsigned long phys_addr, size_t size, unsigned long flags,
- unsigned long align)
+__ioremap(unsigned long phys_addr, size_t size, unsigned long flags)
{
void * addr;
struct vm_struct * area;