diff options
author | Dave Martin <dave.martin@linaro.org> | 2012-07-17 14:25:42 +0100 |
---|---|---|
committer | Nicolas Pitre <nicolas.pitre@linaro.org> | 2013-04-24 10:37:00 -0400 |
commit | 7fe31d28e839f9565c8176ec584676a045970802 (patch) | |
tree | c3fcc8b99461e1fbff13eb08f8dbec891dc1baa4 /arch/arm/common | |
parent | 7c2b860534d02d11923dd0504b961f21508173f1 (diff) | |
download | kernel_goldelico_gta04-7fe31d28e839f9565c8176ec584676a045970802.zip kernel_goldelico_gta04-7fe31d28e839f9565c8176ec584676a045970802.tar.gz kernel_goldelico_gta04-7fe31d28e839f9565c8176ec584676a045970802.tar.bz2 |
ARM: mcpm: introduce helpers for platform coherency exit/setup
This provides helper methods to coordinate between CPUs coming down
and CPUs going up, as well as documentation on the used algorithms,
so that cluster teardown and setup
operations are not done for a cluster simultaneously.
For use in the power_down() implementation:
* __mcpm_cpu_going_down(unsigned int cluster, unsigned int cpu)
* __mcpm_outbound_enter_critical(unsigned int cluster)
* __mcpm_outbound_leave_critical(unsigned int cluster)
* __mcpm_cpu_down(unsigned int cluster, unsigned int cpu)
The power_up_setup() helper should do platform-specific setup in
preparation for turning the CPU on, such as invalidating local caches
or entering coherency. It must be assembler for now, since it must
run before the MMU can be switched on. It is passed the affinity level
for which initialization should be performed.
Because the mcpm_sync_struct content is looked-up and modified
with the cache enabled or disabled depending on the code path, it is
crucial to always ensure proper cache maintenance to update main memory
right away. The sync_cache_*() helpers are used to that end.
Also, in order to prevent a cached writer from interfering with an
adjacent non-cached writer, we ensure each state variable is located to
a separate cache line.
Thanks to Nicolas Pitre and Achin Gupta for the help with this
patch.
Signed-off-by: Dave Martin <dave.martin@linaro.org>
Signed-off-by: Nicolas Pitre <nico@linaro.org>
Reviewed-by: Will Deacon <will.deacon@arm.com>
Diffstat (limited to 'arch/arm/common')
-rw-r--r-- | arch/arm/common/mcpm_entry.c | 150 | ||||
-rw-r--r-- | arch/arm/common/mcpm_head.S | 106 |
2 files changed, 254 insertions, 2 deletions
diff --git a/arch/arm/common/mcpm_entry.c b/arch/arm/common/mcpm_entry.c index 5d72889..370236d 100644 --- a/arch/arm/common/mcpm_entry.c +++ b/arch/arm/common/mcpm_entry.c @@ -16,6 +16,7 @@ #include <asm/mcpm.h> #include <asm/cacheflush.h> #include <asm/idmap.h> +#include <asm/cputype.h> extern unsigned long mcpm_entry_vectors[MAX_NR_CLUSTERS][MAX_CPUS_PER_CLUSTER]; @@ -111,3 +112,152 @@ int mcpm_cpu_powered_up(void) platform_ops->powered_up(); return 0; } + +struct sync_struct mcpm_sync; + +/* + * __mcpm_cpu_going_down: Indicates that the cpu is being torn down. + * This must be called at the point of committing to teardown of a CPU. + * The CPU cache (SCTRL.C bit) is expected to still be active. + */ +void __mcpm_cpu_going_down(unsigned int cpu, unsigned int cluster) +{ + mcpm_sync.clusters[cluster].cpus[cpu].cpu = CPU_GOING_DOWN; + sync_cache_w(&mcpm_sync.clusters[cluster].cpus[cpu].cpu); +} + +/* + * __mcpm_cpu_down: Indicates that cpu teardown is complete and that the + * cluster can be torn down without disrupting this CPU. + * To avoid deadlocks, this must be called before a CPU is powered down. + * The CPU cache (SCTRL.C bit) is expected to be off. + * However L2 cache might or might not be active. + */ +void __mcpm_cpu_down(unsigned int cpu, unsigned int cluster) +{ + dmb(); + mcpm_sync.clusters[cluster].cpus[cpu].cpu = CPU_DOWN; + sync_cache_w(&mcpm_sync.clusters[cluster].cpus[cpu].cpu); + dsb_sev(); +} + +/* + * __mcpm_outbound_leave_critical: Leave the cluster teardown critical section. + * @state: the final state of the cluster: + * CLUSTER_UP: no destructive teardown was done and the cluster has been + * restored to the previous state (CPU cache still active); or + * CLUSTER_DOWN: the cluster has been torn-down, ready for power-off + * (CPU cache disabled, L2 cache either enabled or disabled). + */ +void __mcpm_outbound_leave_critical(unsigned int cluster, int state) +{ + dmb(); + mcpm_sync.clusters[cluster].cluster = state; + sync_cache_w(&mcpm_sync.clusters[cluster].cluster); + dsb_sev(); +} + +/* + * __mcpm_outbound_enter_critical: Enter the cluster teardown critical section. + * This function should be called by the last man, after local CPU teardown + * is complete. CPU cache expected to be active. + * + * Returns: + * false: the critical section was not entered because an inbound CPU was + * observed, or the cluster is already being set up; + * true: the critical section was entered: it is now safe to tear down the + * cluster. + */ +bool __mcpm_outbound_enter_critical(unsigned int cpu, unsigned int cluster) +{ + unsigned int i; + struct mcpm_sync_struct *c = &mcpm_sync.clusters[cluster]; + + /* Warn inbound CPUs that the cluster is being torn down: */ + c->cluster = CLUSTER_GOING_DOWN; + sync_cache_w(&c->cluster); + + /* Back out if the inbound cluster is already in the critical region: */ + sync_cache_r(&c->inbound); + if (c->inbound == INBOUND_COMING_UP) + goto abort; + + /* + * Wait for all CPUs to get out of the GOING_DOWN state, so that local + * teardown is complete on each CPU before tearing down the cluster. + * + * If any CPU has been woken up again from the DOWN state, then we + * shouldn't be taking the cluster down at all: abort in that case. + */ + sync_cache_r(&c->cpus); + for (i = 0; i < MAX_CPUS_PER_CLUSTER; i++) { + int cpustate; + + if (i == cpu) + continue; + + while (1) { + cpustate = c->cpus[i].cpu; + if (cpustate != CPU_GOING_DOWN) + break; + + wfe(); + sync_cache_r(&c->cpus[i].cpu); + } + + switch (cpustate) { + case CPU_DOWN: + continue; + + default: + goto abort; + } + } + + return true; + +abort: + __mcpm_outbound_leave_critical(cluster, CLUSTER_UP); + return false; +} + +int __mcpm_cluster_state(unsigned int cluster) +{ + sync_cache_r(&mcpm_sync.clusters[cluster].cluster); + return mcpm_sync.clusters[cluster].cluster; +} + +extern unsigned long mcpm_power_up_setup_phys; + +int __init mcpm_sync_init( + void (*power_up_setup)(unsigned int affinity_level)) +{ + unsigned int i, j, mpidr, this_cluster; + + BUILD_BUG_ON(MCPM_SYNC_CLUSTER_SIZE * MAX_NR_CLUSTERS != sizeof mcpm_sync); + BUG_ON((unsigned long)&mcpm_sync & (__CACHE_WRITEBACK_GRANULE - 1)); + + /* + * Set initial CPU and cluster states. + * Only one cluster is assumed to be active at this point. + */ + for (i = 0; i < MAX_NR_CLUSTERS; i++) { + mcpm_sync.clusters[i].cluster = CLUSTER_DOWN; + mcpm_sync.clusters[i].inbound = INBOUND_NOT_COMING_UP; + for (j = 0; j < MAX_CPUS_PER_CLUSTER; j++) + mcpm_sync.clusters[i].cpus[j].cpu = CPU_DOWN; + } + mpidr = read_cpuid_mpidr(); + this_cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1); + for_each_online_cpu(i) + mcpm_sync.clusters[this_cluster].cpus[i].cpu = CPU_UP; + mcpm_sync.clusters[this_cluster].cluster = CLUSTER_UP; + sync_cache_w(&mcpm_sync); + + if (power_up_setup) { + mcpm_power_up_setup_phys = virt_to_phys(power_up_setup); + sync_cache_w(&mcpm_power_up_setup_phys); + } + + return 0; +} diff --git a/arch/arm/common/mcpm_head.S b/arch/arm/common/mcpm_head.S index 68c9903..7d729bd 100644 --- a/arch/arm/common/mcpm_head.S +++ b/arch/arm/common/mcpm_head.S @@ -7,11 +7,19 @@ * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. + * + * + * Refer to Documentation/arm/cluster-pm-race-avoidance.txt + * for details of the synchronisation algorithms used here. */ #include <linux/linkage.h> #include <asm/mcpm.h> +.if MCPM_SYNC_CLUSTER_CPUS +.error "cpus must be the first member of struct mcpm_sync_struct" +.endif + .macro pr_dbg string #if defined(CONFIG_DEBUG_LL) && defined(DEBUG) b 1901f @@ -57,24 +65,114 @@ ENTRY(mcpm_entry_point) 2: pr_dbg "kernel mcpm_entry_point\n" /* - * MMU is off so we need to get to mcpm_entry_vectors in a + * MMU is off so we need to get to various variables in a * position independent way. */ adr r5, 3f - ldr r6, [r5] + ldmia r5, {r6, r7, r8} add r6, r5, r6 @ r6 = mcpm_entry_vectors + ldr r7, [r5, r7] @ r7 = mcpm_power_up_setup_phys + add r8, r5, r8 @ r8 = mcpm_sync + + mov r0, #MCPM_SYNC_CLUSTER_SIZE + mla r8, r0, r10, r8 @ r8 = sync cluster base + + @ Signal that this CPU is coming UP: + mov r0, #CPU_COMING_UP + mov r5, #MCPM_SYNC_CPU_SIZE + mla r5, r9, r5, r8 @ r5 = sync cpu address + strb r0, [r5] + + @ At this point, the cluster cannot unexpectedly enter the GOING_DOWN + @ state, because there is at least one active CPU (this CPU). + + @ Note: the following is racy as another CPU might be testing + @ the same flag at the same moment. That'll be fixed later. + ldrb r0, [r8, #MCPM_SYNC_CLUSTER_CLUSTER] + cmp r0, #CLUSTER_UP @ cluster already up? + bne mcpm_setup @ if not, set up the cluster + + @ Otherwise, skip setup: + b mcpm_setup_complete + +mcpm_setup: + @ Control dependency implies strb not observable before previous ldrb. + + @ Signal that the cluster is being brought up: + mov r0, #INBOUND_COMING_UP + strb r0, [r8, #MCPM_SYNC_CLUSTER_INBOUND] + dmb + + @ Any CPU trying to take the cluster into CLUSTER_GOING_DOWN from this + @ point onwards will observe INBOUND_COMING_UP and abort. + + @ Wait for any previously-pending cluster teardown operations to abort + @ or complete: +mcpm_teardown_wait: + ldrb r0, [r8, #MCPM_SYNC_CLUSTER_CLUSTER] + cmp r0, #CLUSTER_GOING_DOWN + bne first_man_setup + wfe + b mcpm_teardown_wait + +first_man_setup: + dmb + + @ If the outbound gave up before teardown started, skip cluster setup: + + cmp r0, #CLUSTER_UP + beq mcpm_setup_leave + + @ power_up_setup is now responsible for setting up the cluster: + + cmp r7, #0 + mov r0, #1 @ second (cluster) affinity level + blxne r7 @ Call power_up_setup if defined + dmb + + mov r0, #CLUSTER_UP + strb r0, [r8, #MCPM_SYNC_CLUSTER_CLUSTER] + dmb + +mcpm_setup_leave: + @ Leave the cluster setup critical section: + + mov r0, #INBOUND_NOT_COMING_UP + strb r0, [r8, #MCPM_SYNC_CLUSTER_INBOUND] + dsb + sev + +mcpm_setup_complete: + @ If a platform-specific CPU setup hook is needed, it is + @ called from here. + + cmp r7, #0 + mov r0, #0 @ first (CPU) affinity level + blxne r7 @ Call power_up_setup if defined + dmb + + @ Mark the CPU as up: + + mov r0, #CPU_UP + strb r0, [r5] + + @ Observability order of CPU_UP and opening of the gate does not matter. mcpm_entry_gated: ldr r5, [r6, r4, lsl #2] @ r5 = CPU entry vector cmp r5, #0 wfeeq beq mcpm_entry_gated + dmb + pr_dbg "released\n" bx r5 .align 2 3: .word mcpm_entry_vectors - . + .word mcpm_power_up_setup_phys - 3b + .word mcpm_sync - 3b ENDPROC(mcpm_entry_point) @@ -84,3 +182,7 @@ ENDPROC(mcpm_entry_point) .type mcpm_entry_vectors, #object ENTRY(mcpm_entry_vectors) .space 4 * MAX_NR_CLUSTERS * MAX_CPUS_PER_CLUSTER + + .type mcpm_power_up_setup_phys, #object +ENTRY(mcpm_power_up_setup_phys) + .space 4 @ set by mcpm_sync_init() |