/* * OMAP2PLUS cpufreq driver * * CPU frequency scaling for OMAP using OPP information * * Copyright (C) 2005 Nokia Corporation * Written by Tony Lindgren * * Based on cpu-sa1110.c, Copyright (C) 2001 Russell King * * Copyright (C) 2007-2011 Texas Instruments, Inc. * Updated to support OMAP3 * Rajendra Nayak * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "dvfs.h" #ifdef CONFIG_SMP struct lpj_info { unsigned long ref; unsigned int freq; }; static DEFINE_PER_CPU(struct lpj_info, lpj_ref); static struct lpj_info global_lpj_ref; #endif static struct cpufreq_frequency_table *freq_table; static atomic_t freq_table_users = ATOMIC_INIT(0); static struct clk *mpu_clk; static char *mpu_clk_name; static struct device *mpu_dev; static DEFINE_MUTEX(omap_cpufreq_lock); static unsigned int max_thermal; static unsigned int max_freq; static unsigned int current_target_freq; static bool omap_cpufreq_ready; static unsigned int omap_getspeed(unsigned int cpu) { unsigned long rate; if (cpu >= NR_CPUS) return 0; rate = clk_get_rate(mpu_clk) / 1000; return rate; } static int omap_cpufreq_scale(unsigned int target_freq, unsigned int cur_freq) { unsigned int i; int ret; struct cpufreq_freqs freqs; freqs.new = target_freq; freqs.old = omap_getspeed(0); /* * If the new frequency is more than the thermal max allowed * frequency, go ahead and scale the mpu device to proper frequency. */ if (freqs.new > max_thermal) freqs.new = max_thermal; if ((freqs.old == freqs.new) && (cur_freq = freqs.new)) return 0; /* notifiers */ for_each_online_cpu(freqs.cpu) cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); #ifdef CONFIG_CPU_FREQ_DEBUG pr_info("cpufreq-omap: transition: %u --> %u\n", freqs.old, freqs.new); #endif ret = omap_device_scale(mpu_dev, mpu_dev, freqs.new * 1000); freqs.new = omap_getspeed(0); #ifdef CONFIG_SMP /* * Note that loops_per_jiffy is not updated on SMP systems in * cpufreq driver. So, update the per-CPU loops_per_jiffy value * on frequency transition. We need to update all dependent CPUs. */ for_each_possible_cpu(i) { struct lpj_info *lpj = &per_cpu(lpj_ref, i); if (!lpj->freq) { lpj->ref = per_cpu(cpu_data, i).loops_per_jiffy; lpj->freq = freqs.old; } per_cpu(cpu_data, i).loops_per_jiffy = cpufreq_scale(lpj->ref, lpj->freq, freqs.new); } /* And don't forget to adjust the global one */ if (!global_lpj_ref.freq) { global_lpj_ref.ref = loops_per_jiffy; global_lpj_ref.freq = freqs.old; } loops_per_jiffy = cpufreq_scale(global_lpj_ref.ref, global_lpj_ref.freq, freqs.new); #endif /* notifiers */ for_each_online_cpu(freqs.cpu) cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); return ret; } static unsigned int omap_thermal_lower_speed(void) { unsigned int max = 0; unsigned int curr; int i; curr = max_thermal; for (i = 0; freq_table[i].frequency != CPUFREQ_TABLE_END; i++) if (freq_table[i].frequency > max && freq_table[i].frequency < curr) max = freq_table[i].frequency; if (!max) return curr; return max; } void omap_thermal_throttle(void) { unsigned int cur; if (!omap_cpufreq_ready) { pr_warn_once("%s: Thermal throttle prior to CPUFREQ ready\n", __func__); return; } mutex_lock(&omap_cpufreq_lock); max_thermal = omap_thermal_lower_speed(); pr_warn("%s: temperature too high, cpu throttle at max %u\n", __func__, max_thermal); cur = omap_getspeed(0); if (cur > max_thermal) omap_cpufreq_scale(max_thermal, cur); mutex_unlock(&omap_cpufreq_lock); } void omap_thermal_unthrottle(void) { unsigned int cur; if (!omap_cpufreq_ready) return; mutex_lock(&omap_cpufreq_lock); if (max_thermal == max_freq) { pr_warn("%s: not throttling\n", __func__); goto out; } max_thermal = max_freq; pr_warn("%s: temperature reduced, ending cpu throttling\n", __func__); cur = omap_getspeed(0); omap_cpufreq_scale(current_target_freq, cur); out: mutex_unlock(&omap_cpufreq_lock); } static int omap_verify_speed(struct cpufreq_policy *policy) { if (!freq_table) return -EINVAL; return cpufreq_frequency_table_verify(policy, freq_table); } static int omap_target(struct cpufreq_policy *policy, unsigned int target_freq, unsigned int relation) { unsigned int i; int ret = 0; if (!freq_table) { dev_err(mpu_dev, "%s: cpu%d: no freq table!\n", __func__, policy->cpu); return -EINVAL; } ret = cpufreq_frequency_table_target(policy, freq_table, target_freq, relation, &i); if (ret) { dev_dbg(mpu_dev, "%s: cpu%d: no freq match for %d(ret=%d)\n", __func__, policy->cpu, target_freq, ret); return ret; } mutex_lock(&omap_cpufreq_lock); current_target_freq = freq_table[i].frequency; ret = omap_cpufreq_scale(current_target_freq, policy->cur); mutex_unlock(&omap_cpufreq_lock); return ret; } static inline void freq_table_free(void) { if (atomic_dec_and_test(&freq_table_users)) opp_free_cpufreq_table(mpu_dev, &freq_table); } static int __cpuinit omap_cpu_init(struct cpufreq_policy *policy) { int result = 0; int i; mpu_clk = clk_get(NULL, mpu_clk_name); if (IS_ERR(mpu_clk)) return PTR_ERR(mpu_clk); if (policy->cpu >= NR_CPUS) { result = -EINVAL; goto fail_ck; } policy->cur = policy->min = policy->max = omap_getspeed(policy->cpu); if (atomic_inc_return(&freq_table_users) == 1) result = opp_init_cpufreq_table(mpu_dev, &freq_table); if (result) { dev_err(mpu_dev, "%s: cpu%d: failed creating freq table[%d]\n", __func__, policy->cpu, result); goto fail_ck; } result = cpufreq_frequency_table_cpuinfo(policy, freq_table); if (result) goto fail_table; cpufreq_frequency_table_get_attr(freq_table, policy->cpu); policy->min = policy->cpuinfo.min_freq; policy->max = policy->cpuinfo.max_freq; policy->cur = omap_getspeed(policy->cpu); for (i = 0; freq_table[i].frequency != CPUFREQ_TABLE_END; i++) max_freq = max(freq_table[i].frequency, max_freq); max_thermal = max_freq; /* * On OMAP SMP configuartion, both processors share the voltage * and clock. So both CPUs needs to be scaled together and hence * needs software co-ordination. Use cpufreq affected_cpus * interface to handle this scenario. Additional is_smp() check * is to keep SMP_ON_UP build working. */ if (is_smp()) { policy->shared_type = CPUFREQ_SHARED_TYPE_ANY; cpumask_setall(policy->cpus); } /* FIXME: what's the actual transition time? */ policy->cpuinfo.transition_latency = 300 * 1000; return 0; fail_table: freq_table_free(); fail_ck: clk_put(mpu_clk); return result; } static int omap_cpu_exit(struct cpufreq_policy *policy) { freq_table_free(); clk_put(mpu_clk); return 0; } static struct freq_attr *omap_cpufreq_attr[] = { &cpufreq_freq_attr_scaling_available_freqs, NULL, }; static struct cpufreq_driver omap_driver = { .flags = CPUFREQ_STICKY, .verify = omap_verify_speed, .target = omap_target, .get = omap_getspeed, .init = omap_cpu_init, .exit = omap_cpu_exit, .name = "omap2plus", .attr = omap_cpufreq_attr, }; static int __init omap_cpufreq_init(void) { int ret; if (cpu_is_omap24xx()) mpu_clk_name = "virt_prcm_set"; else if (cpu_is_omap34xx()) mpu_clk_name = "dpll1_ck"; else if (cpu_is_omap443x()) mpu_clk_name = "dpll_mpu_ck"; else if (cpu_is_omap446x()) mpu_clk_name = "virt_dpll_mpu_ck"; if (!mpu_clk_name) { pr_err("%s: unsupported Silicon?\n", __func__); return -EINVAL; } mpu_dev = omap2_get_mpuss_device(); if (!mpu_dev) { pr_warning("%s: unable to get the mpu device\n", __func__); return -EINVAL; } ret = cpufreq_register_driver(&omap_driver); omap_cpufreq_ready = !ret; return ret; } static void __exit omap_cpufreq_exit(void) { cpufreq_unregister_driver(&omap_driver); } MODULE_DESCRIPTION("cpufreq driver for OMAP2PLUS SOCs"); MODULE_LICENSE("GPL"); late_initcall(omap_cpufreq_init); module_exit(omap_cpufreq_exit);