aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/base/power/opp.c17
-rw-r--r--drivers/cpufreq/Kconfig33
-rw-r--r--drivers/cpufreq/Makefile1
-rw-r--r--drivers/cpufreq/cpufreq_hotplug.c705
-rw-r--r--drivers/cpufreq/freq_table.c73
-rw-r--r--drivers/gpio/gpio-omap.c10
-rw-r--r--drivers/i2c/busses/i2c-omap.c10
-rw-r--r--drivers/tty/serial/omap-serial.c49
8 files changed, 892 insertions, 6 deletions
diff --git a/drivers/base/power/opp.c b/drivers/base/power/opp.c
index 56a6899..5cc1232 100644
--- a/drivers/base/power/opp.c
+++ b/drivers/base/power/opp.c
@@ -625,4 +625,21 @@ int opp_init_cpufreq_table(struct device *dev,
return 0;
}
+
+/**
+ * opp_free_cpufreq_table() - free the cpufreq table
+ * @dev: device for which we do this operation
+ * @table: table to free
+ *
+ * Free up the table allocated by opp_init_cpufreq_table
+ */
+void opp_free_cpufreq_table(struct device *dev,
+ struct cpufreq_frequency_table **table)
+{
+ if (!table)
+ return;
+
+ kfree(*table);
+ *table = NULL;
+}
#endif /* CONFIG_CPU_FREQ */
diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig
index 9fb8485..07711d1 100644
--- a/drivers/cpufreq/Kconfig
+++ b/drivers/cpufreq/Kconfig
@@ -99,6 +99,19 @@ config CPU_FREQ_DEFAULT_GOV_CONSERVATIVE
Be aware that not all cpufreq drivers support the conservative
governor. If unsure have a look at the help section of the
driver. Fallback governor will be the performance governor.
+
+config CPU_FREQ_DEFAULT_GOV_HOTPLUG
+ bool "hotplug"
+ select CPU_FREQ_GOV_HOTPLUG
+ select CPU_FREQ_GOV_PERFORMANCE
+ help
+ Use the CPUFreq governor 'hotplug' as default. This allows you
+ to get a full dynamic frequency capable system with CPU
+ hotplug support by simply loading your cpufreq low-level
+ hardware driver. Be aware that not all cpufreq drivers
+ support the hotplug governor. If unsure have a look at
+ the help section of the driver. Fallback governor will be the
+ performance governor.
endchoice
config CPU_FREQ_GOV_PERFORMANCE
@@ -184,5 +197,25 @@ depends on X86
source "drivers/cpufreq/Kconfig.x86"
endmenu
+config CPU_FREQ_GOV_HOTPLUG
+ tristate "'hotplug' cpufreq governor"
+ depends on CPU_FREQ && NO_HZ && HOTPLUG_CPU
+ help
+ 'hotplug' - this driver mimics the frequency scaling behavior
+ in 'ondemand', but with several key differences. First is
+ that frequency transitions use the CPUFreq table directly,
+ instead of incrementing in a percentage of the maximum
+ available frequency. Second 'hotplug' will offline auxillary
+ CPUs when the system is idle, and online those CPUs once the
+ system becomes busy again. This last feature is needed for
+ architectures which transition to low power states when only
+ the "master" CPU is online, or for thermally constrained
+ devices.
+
+ If you don't have one of these architectures or devices, use
+ 'ondemand' instead.
+
+ If in doubt, say N.
+
endif
endmenu
diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile
index e2fc2d2..6d6a35c 100644
--- a/drivers/cpufreq/Makefile
+++ b/drivers/cpufreq/Makefile
@@ -9,6 +9,7 @@ obj-$(CONFIG_CPU_FREQ_GOV_POWERSAVE) += cpufreq_powersave.o
obj-$(CONFIG_CPU_FREQ_GOV_USERSPACE) += cpufreq_userspace.o
obj-$(CONFIG_CPU_FREQ_GOV_ONDEMAND) += cpufreq_ondemand.o
obj-$(CONFIG_CPU_FREQ_GOV_CONSERVATIVE) += cpufreq_conservative.o
+obj-$(CONFIG_CPU_FREQ_GOV_HOTPLUG) += cpufreq_hotplug.o
# CPUfreq cross-arch helpers
obj-$(CONFIG_CPU_FREQ_TABLE) += freq_table.o
diff --git a/drivers/cpufreq/cpufreq_hotplug.c b/drivers/cpufreq/cpufreq_hotplug.c
new file mode 100644
index 0000000..85aa6d2
--- /dev/null
+++ b/drivers/cpufreq/cpufreq_hotplug.c
@@ -0,0 +1,705 @@
+/*
+ * CPUFreq hotplug governor
+ *
+ * Copyright (C) 2010 Texas Instruments, Inc.
+ * Mike Turquette <mturquette@ti.com>
+ * Santosh Shilimkar <santosh.shilimkar@ti.com>
+ *
+ * Based on ondemand governor
+ * Copyright (C) 2001 Russell King
+ * (C) 2003 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>,
+ * Jun Nakajima <jun.nakajima@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/cpufreq.h>
+#include <linux/cpu.h>
+#include <linux/jiffies.h>
+#include <linux/kernel_stat.h>
+#include <linux/mutex.h>
+#include <linux/hrtimer.h>
+#include <linux/tick.h>
+#include <linux/ktime.h>
+#include <linux/sched.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+
+/* greater than 80% avg load across online CPUs increases frequency */
+#define DEFAULT_UP_FREQ_MIN_LOAD (80)
+
+/* less than 20% avg load across online CPUs decreases frequency */
+#define DEFAULT_DOWN_FREQ_MAX_LOAD (20)
+
+/* default sampling period (uSec) is bogus; 10x ondemand's default for x86 */
+#define DEFAULT_SAMPLING_PERIOD (100000)
+
+/* default number of sampling periods to average before hotplug-in decision */
+#define DEFAULT_HOTPLUG_IN_SAMPLING_PERIODS (5)
+
+/* default number of sampling periods to average before hotplug-out decision */
+#define DEFAULT_HOTPLUG_OUT_SAMPLING_PERIODS (20)
+
+static void do_dbs_timer(struct work_struct *work);
+static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
+ unsigned int event);
+
+#ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_HOTPLUG
+static
+#endif
+struct cpufreq_governor cpufreq_gov_hotplug = {
+ .name = "hotplug",
+ .governor = cpufreq_governor_dbs,
+ .owner = THIS_MODULE,
+};
+
+struct cpu_dbs_info_s {
+ cputime64_t prev_cpu_idle;
+ cputime64_t prev_cpu_wall;
+ cputime64_t prev_cpu_nice;
+ struct cpufreq_policy *cur_policy;
+ struct delayed_work work;
+ struct cpufreq_frequency_table *freq_table;
+ int cpu;
+ /*
+ * percpu mutex that serializes governor limit change with
+ * do_dbs_timer invocation. We do not want do_dbs_timer to run
+ * when user is changing the governor or limits.
+ */
+ struct mutex timer_mutex;
+};
+static DEFINE_PER_CPU(struct cpu_dbs_info_s, hp_cpu_dbs_info);
+
+static unsigned int dbs_enable; /* number of CPUs using this policy */
+
+/*
+ * dbs_mutex protects data in dbs_tuners_ins from concurrent changes on
+ * different CPUs. It protects dbs_enable in governor start/stop.
+ */
+static DEFINE_MUTEX(dbs_mutex);
+
+static struct workqueue_struct *khotplug_wq;
+
+static struct dbs_tuners {
+ unsigned int sampling_rate;
+ unsigned int up_threshold;
+ unsigned int down_threshold;
+ unsigned int hotplug_in_sampling_periods;
+ unsigned int hotplug_out_sampling_periods;
+ unsigned int hotplug_load_index;
+ unsigned int *hotplug_load_history;
+ unsigned int ignore_nice;
+ unsigned int io_is_busy;
+} dbs_tuners_ins = {
+ .sampling_rate = DEFAULT_SAMPLING_PERIOD,
+ .up_threshold = DEFAULT_UP_FREQ_MIN_LOAD,
+ .down_threshold = DEFAULT_DOWN_FREQ_MAX_LOAD,
+ .hotplug_in_sampling_periods = DEFAULT_HOTPLUG_IN_SAMPLING_PERIODS,
+ .hotplug_out_sampling_periods = DEFAULT_HOTPLUG_OUT_SAMPLING_PERIODS,
+ .hotplug_load_index = 0,
+ .ignore_nice = 0,
+ .io_is_busy = 0,
+};
+
+/*
+ * A corner case exists when switching io_is_busy at run-time: comparing idle
+ * times from a non-io_is_busy period to an io_is_busy period (or vice-versa)
+ * will misrepresent the actual change in system idleness. We ignore this
+ * corner case: enabling io_is_busy might cause freq increase and disabling
+ * might cause freq decrease, which probably matches the original intent.
+ */
+static inline cputime64_t get_cpu_idle_time(unsigned int cpu, cputime64_t *wall)
+{
+ u64 idle_time;
+ u64 iowait_time;
+
+ /* cpufreq-hotplug always assumes CONFIG_NO_HZ */
+ idle_time = get_cpu_idle_time_us(cpu, wall);
+
+ /* add time spent doing I/O to idle time */
+ if (dbs_tuners_ins.io_is_busy) {
+ iowait_time = get_cpu_iowait_time_us(cpu, wall);
+ /* cpufreq-hotplug always assumes CONFIG_NO_HZ */
+ if (iowait_time != -1ULL && idle_time >= iowait_time)
+ idle_time -= iowait_time;
+ }
+
+ return idle_time;
+}
+
+/************************** sysfs interface ************************/
+
+/* XXX look at global sysfs macros in cpufreq.h, can those be used here? */
+
+/* cpufreq_hotplug Governor Tunables */
+#define show_one(file_name, object) \
+static ssize_t show_##file_name \
+(struct kobject *kobj, struct attribute *attr, char *buf) \
+{ \
+ return sprintf(buf, "%u\n", dbs_tuners_ins.object); \
+}
+show_one(sampling_rate, sampling_rate);
+show_one(up_threshold, up_threshold);
+show_one(down_threshold, down_threshold);
+show_one(hotplug_in_sampling_periods, hotplug_in_sampling_periods);
+show_one(hotplug_out_sampling_periods, hotplug_out_sampling_periods);
+show_one(ignore_nice_load, ignore_nice);
+show_one(io_is_busy, io_is_busy);
+
+static ssize_t store_sampling_rate(struct kobject *a, struct attribute *b,
+ const char *buf, size_t count)
+{
+ unsigned int input;
+ int ret;
+ ret = sscanf(buf, "%u", &input);
+ if (ret != 1)
+ return -EINVAL;
+
+ mutex_lock(&dbs_mutex);
+ dbs_tuners_ins.sampling_rate = input;
+ mutex_unlock(&dbs_mutex);
+
+ return count;
+}
+
+static ssize_t store_up_threshold(struct kobject *a, struct attribute *b,
+ const char *buf, size_t count)
+{
+ unsigned int input;
+ int ret;
+ ret = sscanf(buf, "%u", &input);
+
+ if (ret != 1 || input <= dbs_tuners_ins.down_threshold) {
+ return -EINVAL;
+ }
+
+ mutex_lock(&dbs_mutex);
+ dbs_tuners_ins.up_threshold = input;
+ mutex_unlock(&dbs_mutex);
+
+ return count;
+}
+
+static ssize_t store_down_threshold(struct kobject *a, struct attribute *b,
+ const char *buf, size_t count)
+{
+ unsigned int input;
+ int ret;
+ ret = sscanf(buf, "%u", &input);
+
+ if (ret != 1 || input >= dbs_tuners_ins.up_threshold) {
+ return -EINVAL;
+ }
+
+ mutex_lock(&dbs_mutex);
+ dbs_tuners_ins.down_threshold = input;
+ mutex_unlock(&dbs_mutex);
+
+ return count;
+}
+
+static ssize_t store_hotplug_in_sampling_periods(struct kobject *a,
+ struct attribute *b, const char *buf, size_t count)
+{
+ unsigned int input;
+ unsigned int *temp;
+ unsigned int max_windows;
+ int ret;
+ ret = sscanf(buf, "%u", &input);
+
+ if (ret != 1)
+ return -EINVAL;
+
+ /* already using this value, bail out */
+ if (input == dbs_tuners_ins.hotplug_in_sampling_periods)
+ return count;
+
+ mutex_lock(&dbs_mutex);
+ ret = count;
+ max_windows = max(dbs_tuners_ins.hotplug_in_sampling_periods,
+ dbs_tuners_ins.hotplug_out_sampling_periods);
+
+ /* no need to resize array */
+ if (input <= max_windows) {
+ dbs_tuners_ins.hotplug_in_sampling_periods = input;
+ goto out;
+ }
+
+ /* resize array */
+ temp = kmalloc((sizeof(unsigned int) * input), GFP_KERNEL);
+
+ if (!temp || IS_ERR(temp)) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ memcpy(temp, dbs_tuners_ins.hotplug_load_history,
+ (max_windows * sizeof(unsigned int)));
+ kfree(dbs_tuners_ins.hotplug_load_history);
+
+ /* replace old buffer, old number of sampling periods & old index */
+ dbs_tuners_ins.hotplug_load_history = temp;
+ dbs_tuners_ins.hotplug_in_sampling_periods = input;
+ dbs_tuners_ins.hotplug_load_index = max_windows;
+out:
+ mutex_unlock(&dbs_mutex);
+
+ return ret;
+}
+
+static ssize_t store_hotplug_out_sampling_periods(struct kobject *a,
+ struct attribute *b, const char *buf, size_t count)
+{
+ unsigned int input;
+ unsigned int *temp;
+ unsigned int max_windows;
+ int ret;
+ ret = sscanf(buf, "%u", &input);
+
+ if (ret != 1)
+ return -EINVAL;
+
+ /* already using this value, bail out */
+ if (input == dbs_tuners_ins.hotplug_out_sampling_periods)
+ return count;
+
+ mutex_lock(&dbs_mutex);
+ ret = count;
+ max_windows = max(dbs_tuners_ins.hotplug_in_sampling_periods,
+ dbs_tuners_ins.hotplug_out_sampling_periods);
+
+ /* no need to resize array */
+ if (input <= max_windows) {
+ dbs_tuners_ins.hotplug_out_sampling_periods = input;
+ goto out;
+ }
+
+ /* resize array */
+ temp = kmalloc((sizeof(unsigned int) * input), GFP_KERNEL);
+
+ if (!temp || IS_ERR(temp)) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ memcpy(temp, dbs_tuners_ins.hotplug_load_history,
+ (max_windows * sizeof(unsigned int)));
+ kfree(dbs_tuners_ins.hotplug_load_history);
+
+ /* replace old buffer, old number of sampling periods & old index */
+ dbs_tuners_ins.hotplug_load_history = temp;
+ dbs_tuners_ins.hotplug_out_sampling_periods = input;
+ dbs_tuners_ins.hotplug_load_index = max_windows;
+out:
+ mutex_unlock(&dbs_mutex);
+
+ return ret;
+}
+
+static ssize_t store_ignore_nice_load(struct kobject *a, struct attribute *b,
+ const char *buf, size_t count)
+{
+ unsigned int input;
+ int ret;
+
+ unsigned int j;
+
+ ret = sscanf(buf, "%u", &input);
+ if (ret != 1)
+ return -EINVAL;
+
+ if (input > 1)
+ input = 1;
+
+ mutex_lock(&dbs_mutex);
+ if (input == dbs_tuners_ins.ignore_nice) { /* nothing to do */
+ mutex_unlock(&dbs_mutex);
+ return count;
+ }
+ dbs_tuners_ins.ignore_nice = input;
+
+ /* we need to re-evaluate prev_cpu_idle */
+ for_each_online_cpu(j) {
+ struct cpu_dbs_info_s *dbs_info;
+ dbs_info = &per_cpu(hp_cpu_dbs_info, j);
+ dbs_info->prev_cpu_idle = get_cpu_idle_time(j,
+ &dbs_info->prev_cpu_wall);
+ if (dbs_tuners_ins.ignore_nice)
+ dbs_info->prev_cpu_nice = kstat_cpu(j).cpustat.nice;
+
+ }
+ mutex_unlock(&dbs_mutex);
+
+ return count;
+}
+
+static ssize_t store_io_is_busy(struct kobject *a, struct attribute *b,
+ const char *buf, size_t count)
+{
+ unsigned int input;
+ int ret;
+
+ ret = sscanf(buf, "%u", &input);
+ if (ret != 1)
+ return -EINVAL;
+
+ mutex_lock(&dbs_mutex);
+ dbs_tuners_ins.io_is_busy = !!input;
+ mutex_unlock(&dbs_mutex);
+
+ return count;
+}
+
+define_one_global_rw(sampling_rate);
+define_one_global_rw(up_threshold);
+define_one_global_rw(down_threshold);
+define_one_global_rw(hotplug_in_sampling_periods);
+define_one_global_rw(hotplug_out_sampling_periods);
+define_one_global_rw(ignore_nice_load);
+define_one_global_rw(io_is_busy);
+
+static struct attribute *dbs_attributes[] = {
+ &sampling_rate.attr,
+ &up_threshold.attr,
+ &down_threshold.attr,
+ &hotplug_in_sampling_periods.attr,
+ &hotplug_out_sampling_periods.attr,
+ &ignore_nice_load.attr,
+ &io_is_busy.attr,
+ NULL
+};
+
+static struct attribute_group dbs_attr_group = {
+ .attrs = dbs_attributes,
+ .name = "hotplug",
+};
+
+/************************** sysfs end ************************/
+
+static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
+{
+ /* combined load of all enabled CPUs */
+ unsigned int total_load = 0;
+ /* single largest CPU load */
+ unsigned int max_load = 0;
+ /* average load across all enabled CPUs */
+ unsigned int avg_load = 0;
+ /* average load across multiple sampling periods for hotplug events */
+ unsigned int hotplug_in_avg_load = 0;
+ unsigned int hotplug_out_avg_load = 0;
+ /* number of sampling periods averaged for hotplug decisions */
+ unsigned int periods;
+
+ struct cpufreq_policy *policy;
+ unsigned int index = 0;
+ unsigned int i, j;
+
+ policy = this_dbs_info->cur_policy;
+
+ /*
+ * cpu load accounting
+ * get highest load, total load and average load across all CPUs
+ */
+ for_each_cpu(j, policy->cpus) {
+ unsigned int load;
+ unsigned int idle_time, wall_time;
+ cputime64_t cur_wall_time, cur_idle_time;
+ struct cpu_dbs_info_s *j_dbs_info;
+
+ j_dbs_info = &per_cpu(hp_cpu_dbs_info, j);
+
+ /* update both cur_idle_time and cur_wall_time */
+ cur_idle_time = get_cpu_idle_time(j, &cur_wall_time);
+
+ /* how much wall time has passed since last iteration? */
+ wall_time = (unsigned int) cputime64_sub(cur_wall_time,
+ j_dbs_info->prev_cpu_wall);
+ j_dbs_info->prev_cpu_wall = cur_wall_time;
+
+ /* how much idle time has passed since last iteration? */
+ idle_time = (unsigned int) cputime64_sub(cur_idle_time,
+ j_dbs_info->prev_cpu_idle);
+ j_dbs_info->prev_cpu_idle = cur_idle_time;
+
+ if (unlikely(!wall_time || wall_time < idle_time))
+ continue;
+
+ /* load is the percentage of time not spent in idle */
+ load = 100 * (wall_time - idle_time) / wall_time;
+
+ /* keep track of combined load across all CPUs */
+ total_load += load;
+
+ /* keep track of highest single load across all CPUs */
+ if (load > max_load)
+ max_load = load;
+ }
+
+ /* calculate the average load across all related CPUs */
+ avg_load = total_load / num_online_cpus();
+
+
+ /*
+ * hotplug load accounting
+ * average load over multiple sampling periods
+ */
+
+ /* how many sampling periods do we use for hotplug decisions? */
+ periods = max(dbs_tuners_ins.hotplug_in_sampling_periods,
+ dbs_tuners_ins.hotplug_out_sampling_periods);
+
+ /* store avg_load in the circular buffer */
+ dbs_tuners_ins.hotplug_load_history[dbs_tuners_ins.hotplug_load_index]
+ = avg_load;
+
+ /* compute average load across in & out sampling periods */
+ for (i = 0, j = dbs_tuners_ins.hotplug_load_index;
+ i < periods; i++, j--) {
+ if (i < dbs_tuners_ins.hotplug_in_sampling_periods)
+ hotplug_in_avg_load +=
+ dbs_tuners_ins.hotplug_load_history[j];
+ if (i < dbs_tuners_ins.hotplug_out_sampling_periods)
+ hotplug_out_avg_load +=
+ dbs_tuners_ins.hotplug_load_history[j];
+
+ if (j == 0)
+ j = periods;
+ }
+
+ hotplug_in_avg_load = hotplug_in_avg_load /
+ dbs_tuners_ins.hotplug_in_sampling_periods;
+
+ hotplug_out_avg_load = hotplug_out_avg_load /
+ dbs_tuners_ins.hotplug_out_sampling_periods;
+
+ /* return to first element if we're at the circular buffer's end */
+ if (++dbs_tuners_ins.hotplug_load_index == periods)
+ dbs_tuners_ins.hotplug_load_index = 0;
+
+ /* check for frequency increase */
+ if (avg_load > dbs_tuners_ins.up_threshold) {
+ /* should we enable auxillary CPUs? */
+ if (num_online_cpus() < 2 && hotplug_in_avg_load >
+ dbs_tuners_ins.up_threshold) {
+ /* hotplug with cpufreq is nasty
+ * a call to cpufreq_governor_dbs may cause a lockup.
+ * wq is not running here so its safe.
+ */
+ mutex_unlock(&this_dbs_info->timer_mutex);
+ cpu_up(1);
+ mutex_lock(&this_dbs_info->timer_mutex);
+ goto out;
+ }
+
+ /* increase to highest frequency supported */
+ if (policy->cur < policy->max)
+ __cpufreq_driver_target(policy, policy->max,
+ CPUFREQ_RELATION_H);
+
+ goto out;
+ }
+
+ /* check for frequency decrease */
+ if (avg_load < dbs_tuners_ins.down_threshold) {
+ /* are we at the minimum frequency already? */
+ if (policy->cur == policy->min) {
+ /* should we disable auxillary CPUs? */
+ if (num_online_cpus() > 1 && hotplug_out_avg_load <
+ dbs_tuners_ins.down_threshold) {
+ mutex_unlock(&this_dbs_info->timer_mutex);
+ cpu_down(1);
+ mutex_lock(&this_dbs_info->timer_mutex);
+ }
+ goto out;
+ }
+
+ /* bump down to the next lowest frequency in the table */
+ if (cpufreq_frequency_table_next_lowest(policy,
+ this_dbs_info->freq_table, &index)) {
+ pr_err("%s: failed to get next lowest frequency\n",
+ __func__);
+ goto out;
+ }
+
+ __cpufreq_driver_target(policy,
+ this_dbs_info->freq_table[index].frequency,
+ CPUFREQ_RELATION_L);
+ }
+out:
+ return;
+}
+
+static void do_dbs_timer(struct work_struct *work)
+{
+ struct cpu_dbs_info_s *dbs_info =
+ container_of(work, struct cpu_dbs_info_s, work.work);
+ unsigned int cpu = dbs_info->cpu;
+
+ /* We want all related CPUs to do sampling nearly on same jiffy */
+ int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate);
+
+ mutex_lock(&dbs_info->timer_mutex);
+ dbs_check_cpu(dbs_info);
+ queue_delayed_work_on(cpu, khotplug_wq, &dbs_info->work, delay);
+ mutex_unlock(&dbs_info->timer_mutex);
+}
+
+static inline void dbs_timer_init(struct cpu_dbs_info_s *dbs_info)
+{
+ /* We want all related CPUs to do sampling nearly on same jiffy */
+ int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate);
+ delay -= jiffies % delay;
+
+ INIT_DELAYED_WORK_DEFERRABLE(&dbs_info->work, do_dbs_timer);
+ queue_delayed_work_on(dbs_info->cpu, khotplug_wq, &dbs_info->work,
+ delay);
+}
+
+static inline void dbs_timer_exit(struct cpu_dbs_info_s *dbs_info)
+{
+ cancel_delayed_work_sync(&dbs_info->work);
+}
+
+static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
+ unsigned int event)
+{
+ unsigned int cpu = policy->cpu;
+ struct cpu_dbs_info_s *this_dbs_info;
+ unsigned int i, j, max_periods;
+ int rc;
+
+ this_dbs_info = &per_cpu(hp_cpu_dbs_info, cpu);
+
+ switch (event) {
+ case CPUFREQ_GOV_START:
+ if ((!cpu_online(cpu)) || (!policy->cur))
+ return -EINVAL;
+
+ mutex_lock(&dbs_mutex);
+ dbs_enable++;
+ for_each_cpu(j, policy->cpus) {
+ struct cpu_dbs_info_s *j_dbs_info;
+ j_dbs_info = &per_cpu(hp_cpu_dbs_info, j);
+ j_dbs_info->cur_policy = policy;
+
+ j_dbs_info->prev_cpu_idle = get_cpu_idle_time(j,
+ &j_dbs_info->prev_cpu_wall);
+ if (dbs_tuners_ins.ignore_nice) {
+ j_dbs_info->prev_cpu_nice =
+ kstat_cpu(j).cpustat.nice;
+ }
+
+ max_periods = max(DEFAULT_HOTPLUG_IN_SAMPLING_PERIODS,
+ DEFAULT_HOTPLUG_OUT_SAMPLING_PERIODS);
+ dbs_tuners_ins.hotplug_load_history = kmalloc(
+ (sizeof(unsigned int) * max_periods),
+ GFP_KERNEL);
+ if (!dbs_tuners_ins.hotplug_load_history) {
+ WARN_ON(1);
+ return -ENOMEM;
+ }
+ for (i = 0; i < max_periods; i++)
+ dbs_tuners_ins.hotplug_load_history[i] = 50;
+ }
+ this_dbs_info->cpu = cpu;
+ this_dbs_info->freq_table = cpufreq_frequency_get_table(cpu);
+ /*
+ * Start the timerschedule work, when this governor
+ * is used for first time
+ */
+ if (dbs_enable == 1) {
+ rc = sysfs_create_group(cpufreq_global_kobject,
+ &dbs_attr_group);
+ if (rc) {
+ mutex_unlock(&dbs_mutex);
+ return rc;
+ }
+ }
+ mutex_unlock(&dbs_mutex);
+
+ mutex_init(&this_dbs_info->timer_mutex);
+ dbs_timer_init(this_dbs_info);
+ break;
+
+ case CPUFREQ_GOV_STOP:
+ dbs_timer_exit(this_dbs_info);
+
+ mutex_lock(&dbs_mutex);
+ mutex_destroy(&this_dbs_info->timer_mutex);
+ dbs_enable--;
+ mutex_unlock(&dbs_mutex);
+ if (!dbs_enable)
+ sysfs_remove_group(cpufreq_global_kobject,
+ &dbs_attr_group);
+ kfree(dbs_tuners_ins.hotplug_load_history);
+ /*
+ * XXX BIG CAVEAT: Stopping the governor with CPU1 offline
+ * will result in it remaining offline until the user onlines
+ * it again. It is up to the user to do this (for now).
+ */
+ break;
+
+ case CPUFREQ_GOV_LIMITS:
+ mutex_lock(&this_dbs_info->timer_mutex);
+ if (policy->max < this_dbs_info->cur_policy->cur)
+ __cpufreq_driver_target(this_dbs_info->cur_policy,
+ policy->max, CPUFREQ_RELATION_H);
+ else if (policy->min > this_dbs_info->cur_policy->cur)
+ __cpufreq_driver_target(this_dbs_info->cur_policy,
+ policy->min, CPUFREQ_RELATION_L);
+ mutex_unlock(&this_dbs_info->timer_mutex);
+ break;
+ }
+ return 0;
+}
+
+static int __init cpufreq_gov_dbs_init(void)
+{
+ int err;
+ cputime64_t wall;
+ u64 idle_time;
+ int cpu = get_cpu();
+
+ idle_time = get_cpu_idle_time_us(cpu, &wall);
+ put_cpu();
+ if (idle_time != -1ULL) {
+ dbs_tuners_ins.up_threshold = DEFAULT_UP_FREQ_MIN_LOAD;
+ } else {
+ pr_err("cpufreq-hotplug: %s: assumes CONFIG_NO_HZ\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ khotplug_wq = create_workqueue("khotplug");
+ if (!khotplug_wq) {
+ pr_err("Creation of khotplug failed\n");
+ return -EFAULT;
+ }
+ err = cpufreq_register_governor(&cpufreq_gov_hotplug);
+ if (err)
+ destroy_workqueue(khotplug_wq);
+
+ return err;
+}
+
+static void __exit cpufreq_gov_dbs_exit(void)
+{
+ cpufreq_unregister_governor(&cpufreq_gov_hotplug);
+ destroy_workqueue(khotplug_wq);
+}
+
+MODULE_AUTHOR("Mike Turquette <mturquette@ti.com>");
+MODULE_DESCRIPTION("'cpufreq_hotplug' - cpufreq governor for dynamic frequency scaling and CPU hotplugging");
+MODULE_LICENSE("GPL");
+
+#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_HOTPLUG
+fs_initcall(cpufreq_gov_dbs_init);
+#else
+module_init(cpufreq_gov_dbs_init);
+#endif
+module_exit(cpufreq_gov_dbs_exit);
diff --git a/drivers/cpufreq/freq_table.c b/drivers/cpufreq/freq_table.c
index 90431cb..54c4de4 100644
--- a/drivers/cpufreq/freq_table.c
+++ b/drivers/cpufreq/freq_table.c
@@ -13,6 +13,7 @@
#include <linux/module.h>
#include <linux/init.h>
#include <linux/cpufreq.h>
+#include <linux/err.h>
/*********************************************************************
* FREQUENCY TABLE HELPERS *
@@ -171,6 +172,78 @@ int cpufreq_frequency_table_target(struct cpufreq_policy *policy,
}
EXPORT_SYMBOL_GPL(cpufreq_frequency_table_target);
+int cpufreq_frequency_table_next_lowest(struct cpufreq_policy *policy,
+ struct cpufreq_frequency_table *table, int *index)
+{
+ unsigned int cur_freq;
+ unsigned int next_lowest_freq;
+ int optimal_index = -1;
+ int i = 0;
+
+ if (!policy || IS_ERR(policy) || !table || IS_ERR(table) ||
+ !index || IS_ERR(index))
+ return -ENOMEM;
+
+ cur_freq = policy->cur;
+ next_lowest_freq = policy->min;
+
+ /* we're at the lowest frequency in the table already, bail out */
+ if (cur_freq == policy->min)
+ return -EINVAL;
+
+ /* walk the list, find closest freq to cur_freq that is below it */
+ while(table[i].frequency != CPUFREQ_TABLE_END) {
+ if (table[i].frequency < cur_freq &&
+ table[i].frequency >= next_lowest_freq) {
+ next_lowest_freq = table[i].frequency;
+ optimal_index = table[i].index;
+ }
+
+ i++;
+ }
+
+ *index = optimal_index;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(cpufreq_frequency_table_next_lowest);
+
+int cpufreq_frequency_table_next_highest(struct cpufreq_policy *policy,
+ struct cpufreq_frequency_table *table, int *index)
+{
+ unsigned int cur_freq;
+ unsigned int next_higher_freq;
+ int optimal_index = -1;
+ int i = 0;
+
+ if (!policy || IS_ERR(policy) || !table || IS_ERR(table) ||
+ !index || IS_ERR(index))
+ return -ENOMEM;
+
+ cur_freq = policy->cur;
+ next_higher_freq = policy->max;
+
+ /* we're at the highest frequency in the table already, bail out */
+ if (cur_freq == policy->max)
+ return -EINVAL;
+
+ /* walk the list, find closest freq to cur_freq that is above it */
+ while(table[i].frequency != CPUFREQ_TABLE_END) {
+ if (table[i].frequency > cur_freq &&
+ table[i].frequency <= next_higher_freq) {
+ next_higher_freq = table[i].frequency;
+ optimal_index = table[i].index;
+ }
+
+ i++;
+ }
+
+ *index = optimal_index;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(cpufreq_frequency_table_next_highest);
+
static DEFINE_PER_CPU(struct cpufreq_frequency_table *, cpufreq_show_table);
/**
* show_available_freqs - show available frequencies for the specified CPU
diff --git a/drivers/gpio/gpio-omap.c b/drivers/gpio/gpio-omap.c
index 01f74a8..13ca564 100644
--- a/drivers/gpio/gpio-omap.c
+++ b/drivers/gpio/gpio-omap.c
@@ -73,6 +73,8 @@ struct omap3_gpio_regs {
static struct omap3_gpio_regs gpio_context[OMAP34XX_NR_GPIOS];
#endif
+#include <plat/omap_device.h>
+
/*
* TODO: Cleanup gpio_bank usage as it is having information
* related to all instances of the device
@@ -1748,6 +1750,7 @@ void omap2_gpio_prepare_for_idle(int off_mode)
{
int i, c = 0;
int min = 0;
+ struct platform_device *pdev;
if (cpu_is_omap34xx())
min = 1;
@@ -1760,6 +1763,9 @@ void omap2_gpio_prepare_for_idle(int off_mode)
for (j = 0; j < hweight_long(bank->dbck_enable_mask); j++)
clk_disable(bank->dbck);
+ pdev = to_platform_device(bank->dev);
+ omap_device_idle(pdev);
+
if (!off_mode)
continue;
@@ -1817,6 +1823,7 @@ void omap2_gpio_resume_after_idle(void)
{
int i;
int min = 0;
+ struct platform_device *pdev;
if (cpu_is_omap34xx())
min = 1;
@@ -1825,6 +1832,9 @@ void omap2_gpio_resume_after_idle(void)
u32 l = 0, gen, gen0, gen1;
int j;
+ pdev = to_platform_device(bank->dev);
+ omap_device_enable(pdev);
+
for (j = 0; j < hweight_long(bank->dbck_enable_mask); j++)
clk_enable(bank->dbck);
diff --git a/drivers/i2c/busses/i2c-omap.c b/drivers/i2c/busses/i2c-omap.c
index 58a58c7..ab9833b 100644
--- a/drivers/i2c/busses/i2c-omap.c
+++ b/drivers/i2c/busses/i2c-omap.c
@@ -1142,18 +1142,16 @@ omap_i2c_remove(struct platform_device *pdev)
#ifdef CONFIG_SUSPEND
static int omap_i2c_suspend(struct device *dev)
{
- if (!pm_runtime_suspended(dev))
- if (dev->bus && dev->bus->pm && dev->bus->pm->runtime_suspend)
- dev->bus->pm->runtime_suspend(dev);
+ if (dev->power.runtime_auto == false)
+ pm_runtime_put_sync(dev);
return 0;
}
static int omap_i2c_resume(struct device *dev)
{
- if (!pm_runtime_suspended(dev))
- if (dev->bus && dev->bus->pm && dev->bus->pm->runtime_resume)
- dev->bus->pm->runtime_resume(dev);
+ if (dev->power.runtime_auto == false)
+ pm_runtime_get_sync(dev);
return 0;
}
diff --git a/drivers/tty/serial/omap-serial.c b/drivers/tty/serial/omap-serial.c
index 47cadf4..0275c28 100644
--- a/drivers/tty/serial/omap-serial.c
+++ b/drivers/tty/serial/omap-serial.c
@@ -1335,6 +1335,55 @@ static struct platform_driver serial_omap_driver = {
},
};
+int omap_uart_active(int num, u32 timeout)
+{
+ struct uart_omap_port *up = ui[num];
+ struct circ_buf *xmit;
+ unsigned int status;
+
+ if(num >= OMAP_MAX_HSUART_PORTS)
+ return 0;
+
+ /* Though when UART's initialised this can never happen,
+ * but during initialisation, it can happen the "ui"
+ * structure is not initialized and the timer kicks
+ * in. This would result in a NULL value, resulting
+ * in crash.
+ */
+ up = ui[num];
+ if (up == NULL)
+ return 0;
+
+ if (!up->port_activity)
+ return 1;
+
+ /* Check for recent driver activity. If time delta from now
+ * to last activty < "uart idle timeout" second keep clocks on.
+ */
+ if (((jiffies - up->port_activity) < timeout))
+ return 1;
+
+ xmit = &up->port.state->xmit;
+ if (!(uart_circ_empty(xmit) || uart_tx_stopped(&up->port)))
+ return 1;
+
+ status = serial_in(up, UART_LSR);
+ /* TX hardware not empty */
+ if (!(status & (UART_LSR_TEMT | UART_LSR_THRE)))
+ return 1;
+
+ /* Any rx activity? */
+ if (status & UART_LSR_DR)
+ return 1;
+
+ /* Check if DMA channels are active */
+ if (up->use_dma && (up->uart_dma.rx_dma_channel != OMAP_UART_DMA_CH_FREE ||
+ up->uart_dma.tx_dma_channel != OMAP_UART_DMA_CH_FREE))
+ return 1;
+ return 0;
+}
+EXPORT_SYMBOL(omap_uart_active);
+
static int __init serial_omap_init(void)
{
int ret;