diff options
Diffstat (limited to 'arch/sh/kernel/idle.c')
-rw-r--r-- | arch/sh/kernel/idle.c | 96 |
1 files changed, 77 insertions, 19 deletions
diff --git a/arch/sh/kernel/idle.c b/arch/sh/kernel/idle.c index 27ff2dc..425d604 100644 --- a/arch/sh/kernel/idle.c +++ b/arch/sh/kernel/idle.c @@ -19,11 +19,11 @@ #include <asm/pgalloc.h> #include <asm/system.h> #include <asm/atomic.h> +#include <asm/smp.h> + +void (*pm_idle)(void) = NULL; static int hlt_counter; -void (*pm_idle)(void); -void (*pm_power_off)(void); -EXPORT_SYMBOL(pm_power_off); static int __init nohlt_setup(char *__unused) { @@ -39,52 +39,110 @@ static int __init hlt_setup(char *__unused) } __setup("hlt", hlt_setup); +static inline int hlt_works(void) +{ + return !hlt_counter; +} + +/* + * On SMP it's slightly faster (but much more power-consuming!) + * to poll the ->work.need_resched flag instead of waiting for the + * cross-CPU IPI to arrive. Use this option with caution. + */ +static void poll_idle(void) +{ + local_irq_enable(); + while (!need_resched()) + cpu_relax(); +} + void default_idle(void) { - if (!hlt_counter) { + if (hlt_works()) { clear_thread_flag(TIF_POLLING_NRFLAG); smp_mb__after_clear_bit(); - set_bl_bit(); - stop_critical_timings(); - while (!need_resched()) + set_bl_bit(); + if (!need_resched()) { + local_irq_enable(); cpu_sleep(); + } else + local_irq_enable(); - start_critical_timings(); - clear_bl_bit(); set_thread_flag(TIF_POLLING_NRFLAG); + clear_bl_bit(); } else - while (!need_resched()) - cpu_relax(); + poll_idle(); } +/* + * The idle thread. There's no useful work to be done, so just try to conserve + * power and have a low exit latency (ie sit in a loop waiting for somebody to + * say that they'd like to reschedule) + */ void cpu_idle(void) { + unsigned int cpu = smp_processor_id(); + set_thread_flag(TIF_POLLING_NRFLAG); /* endless idle loop with no priority at all */ while (1) { - void (*idle)(void) = pm_idle; + tick_nohz_stop_sched_tick(1); - if (!idle) - idle = default_idle; + while (!need_resched()) { + check_pgt_cache(); + rmb(); - tick_nohz_stop_sched_tick(1); - while (!need_resched()) - idle(); - tick_nohz_restart_sched_tick(); + if (cpu_is_offline(cpu)) + play_dead(); + local_irq_disable(); + /* Don't trace irqs off for idle */ + stop_critical_timings(); + pm_idle(); + /* + * Sanity check to ensure that pm_idle() returns + * with IRQs enabled + */ + WARN_ON(irqs_disabled()); + start_critical_timings(); + } + + tick_nohz_restart_sched_tick(); preempt_enable_no_resched(); schedule(); preempt_disable(); - check_pgt_cache(); } } +void __init select_idle_routine(void) +{ + /* + * If a platform has set its own idle routine, leave it alone. + */ + if (pm_idle) + return; + + if (hlt_works()) + pm_idle = default_idle; + else + pm_idle = poll_idle; +} + static void do_nothing(void *unused) { } +void stop_this_cpu(void *unused) +{ + local_irq_disable(); + set_cpu_online(smp_processor_id(), false); + + for (;;) + cpu_sleep(); +} + /* * cpu_idle_wait - Used to ensure that all the CPUs discard old value of * pm_idle and update to new pm_idle value. Required while changing pm_idle |