diff options
author | Russell King <rmk+kernel@arm.linux.org.uk> | 2010-10-28 14:42:06 +0100 |
---|---|---|
committer | Russell King <rmk+kernel@arm.linux.org.uk> | 2010-10-28 14:42:06 +0100 |
commit | be6786ac738801d39cfd264ec88c352efd029578 (patch) | |
tree | 3652ce6cfbfdb1dbb8cf5a664e5e84c6d3abd260 | |
parent | f9cef506815386df4bd7e463b59e0a0984ce0355 (diff) | |
parent | ae6948048c417d429b8a0f85fad13e483f7cc1a3 (diff) | |
download | kernel_samsung_aries-be6786ac738801d39cfd264ec88c352efd029578.zip kernel_samsung_aries-be6786ac738801d39cfd264ec88c352efd029578.tar.gz kernel_samsung_aries-be6786ac738801d39cfd264ec88c352efd029578.tar.bz2 |
Merge branch 'l2x0-pull-rmk' of git://dev.omapzoom.org/pub/scm/santosh/kernel-omap4-base into devel-stable
-rw-r--r-- | arch/arm/include/asm/hardware/cache-l2x0.h | 9 | ||||
-rw-r--r-- | arch/arm/include/asm/outercache.h | 24 | ||||
-rw-r--r-- | arch/arm/kernel/machine_kexec.c | 3 | ||||
-rw-r--r-- | arch/arm/mach-omap2/omap4-common.c | 13 | ||||
-rw-r--r-- | arch/arm/mach-ux500/cpu.c | 45 | ||||
-rw-r--r-- | arch/arm/mm/Kconfig | 8 | ||||
-rw-r--r-- | arch/arm/mm/cache-l2x0.c | 78 |
7 files changed, 171 insertions, 9 deletions
diff --git a/arch/arm/include/asm/hardware/cache-l2x0.h b/arch/arm/include/asm/hardware/cache-l2x0.h index 351ca8d..cc42d5f 100644 --- a/arch/arm/include/asm/hardware/cache-l2x0.h +++ b/arch/arm/include/asm/hardware/cache-l2x0.h @@ -21,9 +21,6 @@ #define __ASM_ARM_HARDWARE_L2X0_H #define L2X0_CACHE_ID 0x000 -#define L2X0_CACHE_ID_PART_MASK (0xf << 6) -#define L2X0_CACHE_ID_PART_L210 (1 << 6) -#define L2X0_CACHE_ID_PART_L310 (3 << 6) #define L2X0_CACHE_TYPE 0x004 #define L2X0_CTRL 0x100 #define L2X0_AUX_CTRL 0x104 @@ -58,6 +55,12 @@ #define L2X0_DYNAMIC_CLK_GATING_EN (1 << 1) #define L2X0_STNDBY_MODE_EN (1 << 0) +/* Registers shifts and masks */ +#define L2X0_CACHE_ID_PART_MASK (0xf << 6) +#define L2X0_CACHE_ID_PART_L210 (1 << 6) +#define L2X0_CACHE_ID_PART_L310 (3 << 6) +#define L2X0_AUX_CTRL_WAY_SIZE_MASK (0x3 << 17) + #ifndef __ASSEMBLY__ extern void __init l2x0_init(void __iomem *base, __u32 aux_val, __u32 aux_mask); #endif diff --git a/arch/arm/include/asm/outercache.h b/arch/arm/include/asm/outercache.h index 25f76ba..fc19009 100644 --- a/arch/arm/include/asm/outercache.h +++ b/arch/arm/include/asm/outercache.h @@ -25,6 +25,9 @@ struct outer_cache_fns { void (*inv_range)(unsigned long, unsigned long); void (*clean_range)(unsigned long, unsigned long); void (*flush_range)(unsigned long, unsigned long); + void (*flush_all)(void); + void (*inv_all)(void); + void (*disable)(void); #ifdef CONFIG_OUTER_CACHE_SYNC void (*sync)(void); #endif @@ -50,6 +53,24 @@ static inline void outer_flush_range(unsigned long start, unsigned long end) outer_cache.flush_range(start, end); } +static inline void outer_flush_all(void) +{ + if (outer_cache.flush_all) + outer_cache.flush_all(); +} + +static inline void outer_inv_all(void) +{ + if (outer_cache.inv_all) + outer_cache.inv_all(); +} + +static inline void outer_disable(void) +{ + if (outer_cache.disable) + outer_cache.disable(); +} + #else static inline void outer_inv_range(unsigned long start, unsigned long end) @@ -58,6 +79,9 @@ static inline void outer_clean_range(unsigned long start, unsigned long end) { } static inline void outer_flush_range(unsigned long start, unsigned long end) { } +static inline void outer_flush_all(void) { } +static inline void outer_inv_all(void) { } +static inline void outer_disable(void) { } #endif diff --git a/arch/arm/kernel/machine_kexec.c b/arch/arm/kernel/machine_kexec.c index 1fc74cb..3a8fd51 100644 --- a/arch/arm/kernel/machine_kexec.c +++ b/arch/arm/kernel/machine_kexec.c @@ -78,7 +78,10 @@ void machine_kexec(struct kimage *image) local_fiq_disable(); setup_mm_for_reboot(0); /* mode is not used, so just pass 0*/ flush_cache_all(); + outer_flush_all(); + outer_disable(); cpu_proc_fin(); + outer_inv_all(); flush_cache_all(); cpu_reset(reboot_code_buffer_phys); } diff --git a/arch/arm/mach-omap2/omap4-common.c b/arch/arm/mach-omap2/omap4-common.c index 923f9f5..2f89555 100644 --- a/arch/arm/mach-omap2/omap4-common.c +++ b/arch/arm/mach-omap2/omap4-common.c @@ -44,6 +44,13 @@ void __init gic_init_irq(void) } #ifdef CONFIG_CACHE_L2X0 + +static void omap4_l2x0_disable(void) +{ + /* Disable PL310 L2 Cache controller */ + omap_smc1(0x102, 0x0); +} + static int __init omap_l2_cache_init(void) { /* @@ -70,6 +77,12 @@ static int __init omap_l2_cache_init(void) else l2x0_init(l2cache_base, 0x0e070000, 0xc0000fff); + /* + * Override default outer_cache.disable with a OMAP4 + * specific one + */ + outer_cache.disable = omap4_l2x0_disable; + return 0; } early_initcall(omap_l2_cache_init); diff --git a/arch/arm/mach-ux500/cpu.c b/arch/arm/mach-ux500/cpu.c index e0fd747..73fb1a5 100644 --- a/arch/arm/mach-ux500/cpu.c +++ b/arch/arm/mach-ux500/cpu.c @@ -10,6 +10,7 @@ #include <linux/io.h> #include <linux/clk.h> +#include <asm/cacheflush.h> #include <asm/hardware/cache-l2x0.h> #include <asm/hardware/gic.h> #include <asm/mach/map.h> @@ -71,6 +72,46 @@ void __init ux500_init_irq(void) } #ifdef CONFIG_CACHE_L2X0 +static inline void ux500_cache_wait(void __iomem *reg, unsigned long mask) +{ + /* wait for the operation to complete */ + while (readl(reg) & mask) + ; +} + +static inline void ux500_cache_sync(void) +{ + void __iomem *base = __io_address(UX500_L2CC_BASE); + writel(0, base + L2X0_CACHE_SYNC); + ux500_cache_wait(base + L2X0_CACHE_SYNC, 1); +} + +/* + * The L2 cache cannot be turned off in the non-secure world. + * Dummy until a secure service is in place. + */ +static void ux500_l2x0_disable(void) +{ +} + +/* + * This is only called when doing a kexec, just after turning off the L2 + * and L1 cache, and it is surrounded by a spinlock in the generic version. + * However, we're not really turning off the L2 cache right now and the + * PL310 does not support exclusive accesses (used to implement the spinlock). + * So, the invalidation needs to be done without the spinlock. + */ +static void ux500_l2x0_inv_all(void) +{ + void __iomem *l2x0_base = __io_address(UX500_L2CC_BASE); + uint32_t l2x0_way_mask = (1<<16) - 1; /* Bitmask of active ways */ + + /* invalidate all ways */ + writel(l2x0_way_mask, l2x0_base + L2X0_INV_WAY); + ux500_cache_wait(l2x0_base + L2X0_INV_WAY, l2x0_way_mask); + ux500_cache_sync(); +} + static int ux500_l2x0_init(void) { void __iomem *l2x0_base; @@ -80,6 +121,10 @@ static int ux500_l2x0_init(void) /* 64KB way size, 8 way associativity, force WA */ l2x0_init(l2x0_base, 0x3e060000, 0xc0000fff); + /* Override invalidate function */ + outer_cache.disable = ux500_l2x0_disable; + outer_cache.inv_all = ux500_l2x0_inv_all; + return 0; } early_initcall(ux500_l2x0_init); diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig index a0a2928..4414a01 100644 --- a/arch/arm/mm/Kconfig +++ b/arch/arm/mm/Kconfig @@ -779,6 +779,14 @@ config CACHE_L2X0 help This option enables the L2x0 PrimeCell. +config CACHE_PL310 + bool + depends on CACHE_L2X0 + default y if CPU_V7 && !CPU_V6 + help + This option enables optimisations for the PL310 cache + controller. + config CACHE_TAUROS2 bool "Enable the Tauros2 L2 cache controller" depends on (ARCH_DOVE || ARCH_MMP) diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c index 9982eb3..170c9bb 100644 --- a/arch/arm/mm/cache-l2x0.c +++ b/arch/arm/mm/cache-l2x0.c @@ -28,14 +28,24 @@ static void __iomem *l2x0_base; static DEFINE_SPINLOCK(l2x0_lock); static uint32_t l2x0_way_mask; /* Bitmask of active ways */ +static uint32_t l2x0_size; -static inline void cache_wait(void __iomem *reg, unsigned long mask) +static inline void cache_wait_way(void __iomem *reg, unsigned long mask) { - /* wait for the operation to complete */ + /* wait for cache operation by line or way to complete */ while (readl_relaxed(reg) & mask) ; } +#ifdef CONFIG_CACHE_PL310 +static inline void cache_wait(void __iomem *reg, unsigned long mask) +{ + /* cache operations by line are atomic on PL310 */ +} +#else +#define cache_wait cache_wait_way +#endif + static inline void cache_sync(void) { void __iomem *base = l2x0_base; @@ -103,14 +113,40 @@ static void l2x0_cache_sync(void) spin_unlock_irqrestore(&l2x0_lock, flags); } -static inline void l2x0_inv_all(void) +static void l2x0_flush_all(void) +{ + unsigned long flags; + + /* clean all ways */ + spin_lock_irqsave(&l2x0_lock, flags); + writel_relaxed(l2x0_way_mask, l2x0_base + L2X0_CLEAN_INV_WAY); + cache_wait_way(l2x0_base + L2X0_CLEAN_INV_WAY, l2x0_way_mask); + cache_sync(); + spin_unlock_irqrestore(&l2x0_lock, flags); +} + +static void l2x0_clean_all(void) +{ + unsigned long flags; + + /* clean all ways */ + spin_lock_irqsave(&l2x0_lock, flags); + writel_relaxed(l2x0_way_mask, l2x0_base + L2X0_CLEAN_WAY); + cache_wait_way(l2x0_base + L2X0_CLEAN_WAY, l2x0_way_mask); + cache_sync(); + spin_unlock_irqrestore(&l2x0_lock, flags); +} + +static void l2x0_inv_all(void) { unsigned long flags; /* invalidate all ways */ spin_lock_irqsave(&l2x0_lock, flags); + /* Invalidating when L2 is enabled is a nono */ + BUG_ON(readl(l2x0_base + L2X0_CTRL) & 1); writel_relaxed(l2x0_way_mask, l2x0_base + L2X0_INV_WAY); - cache_wait(l2x0_base + L2X0_INV_WAY, l2x0_way_mask); + cache_wait_way(l2x0_base + L2X0_INV_WAY, l2x0_way_mask); cache_sync(); spin_unlock_irqrestore(&l2x0_lock, flags); } @@ -159,6 +195,11 @@ static void l2x0_clean_range(unsigned long start, unsigned long end) void __iomem *base = l2x0_base; unsigned long flags; + if ((end - start) >= l2x0_size) { + l2x0_clean_all(); + return; + } + spin_lock_irqsave(&l2x0_lock, flags); start &= ~(CACHE_LINE_SIZE - 1); while (start < end) { @@ -184,6 +225,11 @@ static void l2x0_flush_range(unsigned long start, unsigned long end) void __iomem *base = l2x0_base; unsigned long flags; + if ((end - start) >= l2x0_size) { + l2x0_flush_all(); + return; + } + spin_lock_irqsave(&l2x0_lock, flags); start &= ~(CACHE_LINE_SIZE - 1); while (start < end) { @@ -206,10 +252,20 @@ static void l2x0_flush_range(unsigned long start, unsigned long end) spin_unlock_irqrestore(&l2x0_lock, flags); } +static void l2x0_disable(void) +{ + unsigned long flags; + + spin_lock_irqsave(&l2x0_lock, flags); + writel(0, l2x0_base + L2X0_CTRL); + spin_unlock_irqrestore(&l2x0_lock, flags); +} + void __init l2x0_init(void __iomem *base, __u32 aux_val, __u32 aux_mask) { __u32 aux; __u32 cache_id; + __u32 way_size = 0; int ways; const char *type; @@ -244,6 +300,13 @@ void __init l2x0_init(void __iomem *base, __u32 aux_val, __u32 aux_mask) l2x0_way_mask = (1 << ways) - 1; /* + * L2 cache Size = Way size * Number of ways + */ + way_size = (aux & L2X0_AUX_CTRL_WAY_SIZE_MASK) >> 17; + way_size = 1 << (way_size + 3); + l2x0_size = ways * way_size * SZ_1K; + + /* * Check if l2x0 controller is already enabled. * If you are booting from non-secure mode * accessing the below registers will fault. @@ -263,8 +326,11 @@ void __init l2x0_init(void __iomem *base, __u32 aux_val, __u32 aux_mask) outer_cache.clean_range = l2x0_clean_range; outer_cache.flush_range = l2x0_flush_range; outer_cache.sync = l2x0_cache_sync; + outer_cache.flush_all = l2x0_flush_all; + outer_cache.inv_all = l2x0_inv_all; + outer_cache.disable = l2x0_disable; printk(KERN_INFO "%s cache controller enabled\n", type); - printk(KERN_INFO "l2x0: %d ways, CACHE_ID 0x%08x, AUX_CTRL 0x%08x\n", - ways, cache_id, aux); + printk(KERN_INFO "l2x0: %d ways, CACHE_ID 0x%08x, AUX_CTRL 0x%08x, Cache size: %d B\n", + ways, cache_id, aux, l2x0_size); } |