aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/cpufreq
diff options
context:
space:
mode:
authorDave Jones <davej@redhat.com>2006-12-12 17:41:41 -0500
committerDave Jones <davej@redhat.com>2006-12-12 17:41:41 -0500
commitc4366889dda8110247be59ca41fddb82951a8c26 (patch)
tree705c1a996bed8fd48ce94ff33ec9fd00f9b94875 /drivers/cpufreq
parentdb2fb9db5735cc532fd4fc55e94b9a3c3750378e (diff)
parente1036502e5263851259d147771226161e5ccc85a (diff)
downloadkernel_samsung_tuna-c4366889dda8110247be59ca41fddb82951a8c26.zip
kernel_samsung_tuna-c4366889dda8110247be59ca41fddb82951a8c26.tar.gz
kernel_samsung_tuna-c4366889dda8110247be59ca41fddb82951a8c26.tar.bz2
Merge ../linus
Conflicts: drivers/cpufreq/cpufreq.c
Diffstat (limited to 'drivers/cpufreq')
-rw-r--r--drivers/cpufreq/Kconfig1
-rw-r--r--drivers/cpufreq/cpufreq.c17
-rw-r--r--drivers/cpufreq/cpufreq_conservative.c7
-rw-r--r--drivers/cpufreq/cpufreq_ondemand.c28
4 files changed, 29 insertions, 24 deletions
diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig
index 2cc71b6..491779a 100644
--- a/drivers/cpufreq/Kconfig
+++ b/drivers/cpufreq/Kconfig
@@ -107,6 +107,7 @@ config CPU_FREQ_GOV_USERSPACE
config CPU_FREQ_GOV_ONDEMAND
tristate "'ondemand' cpufreq policy governor"
+ select CPU_FREQ_TABLE
help
'ondemand' - This driver adds a dynamic cpufreq policy governor.
The governor does a periodic polling and
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 0c18ac2..9fb2edf 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -42,9 +42,8 @@ static struct cpufreq_policy *cpufreq_cpu_data[NR_CPUS];
static DEFINE_SPINLOCK(cpufreq_driver_lock);
/* internal prototypes */
-static int __cpufreq_governor(struct cpufreq_policy *policy,
- unsigned int event);
-static void handle_update(void *data);
+static int __cpufreq_governor(struct cpufreq_policy *policy, unsigned int event);
+static void handle_update(struct work_struct *work);
/**
* Two notifier lists: the "policy" list is involved in the
@@ -61,7 +60,7 @@ static int __init init_cpufreq_transition_notifier_list(void)
srcu_init_notifier_head(&cpufreq_transition_notifier_list);
return 0;
}
-core_initcall(init_cpufreq_transition_notifier_list);
+pure_initcall(init_cpufreq_transition_notifier_list);
static LIST_HEAD(cpufreq_governor_list);
static DEFINE_MUTEX (cpufreq_governor_mutex);
@@ -695,7 +694,7 @@ static int cpufreq_add_dev (struct sys_device * sys_dev)
mutex_init(&policy->lock);
mutex_lock(&policy->lock);
init_completion(&policy->kobj_unregister);
- INIT_WORK(&policy->update, handle_update, (void *)(long)cpu);
+ INIT_WORK(&policy->update, handle_update);
/* call driver. From then on the cpufreq must be able
* to accept all calls to ->verify and ->setpolicy for this CPU
@@ -925,9 +924,11 @@ static int cpufreq_remove_dev (struct sys_device * sys_dev)
}
-static void handle_update(void *data)
+static void handle_update(struct work_struct *work)
{
- unsigned int cpu = (unsigned int)(long)data;
+ struct cpufreq_policy *policy =
+ container_of(work, struct cpufreq_policy, update);
+ unsigned int cpu = policy->cpu;
dprintk("handle_update for cpu %u called\n", cpu);
cpufreq_update_policy(cpu);
}
@@ -1599,7 +1600,6 @@ int cpufreq_update_policy(unsigned int cpu)
}
EXPORT_SYMBOL(cpufreq_update_policy);
-#ifdef CONFIG_HOTPLUG_CPU
static int cpufreq_cpu_callback(struct notifier_block *nfb,
unsigned long action, void *hcpu)
{
@@ -1639,7 +1639,6 @@ static struct notifier_block __cpuinitdata cpufreq_cpu_notifier =
{
.notifier_call = cpufreq_cpu_callback,
};
-#endif /* CONFIG_HOTPLUG_CPU */
/*********************************************************************
* REGISTER / UNREGISTER CPUFREQ DRIVER *
diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c
index 29905b4..eef0270 100644
--- a/drivers/cpufreq/cpufreq_conservative.c
+++ b/drivers/cpufreq/cpufreq_conservative.c
@@ -61,7 +61,7 @@ static unsigned int def_sampling_rate;
#define MAX_SAMPLING_DOWN_FACTOR (10)
#define TRANSITION_LATENCY_LIMIT (10 * 1000)
-static void do_dbs_timer(void *data);
+static void do_dbs_timer(struct work_struct *work);
struct cpu_dbs_info_s {
struct cpufreq_policy *cur_policy;
@@ -84,7 +84,7 @@ static unsigned int dbs_enable; /* number of CPUs using this policy */
* is recursive for the same process. -Venki
*/
static DEFINE_MUTEX (dbs_mutex);
-static DECLARE_WORK (dbs_work, do_dbs_timer, NULL);
+static DECLARE_DELAYED_WORK(dbs_work, do_dbs_timer);
struct dbs_tuners {
unsigned int sampling_rate;
@@ -427,7 +427,7 @@ static void dbs_check_cpu(int cpu)
}
}
-static void do_dbs_timer(void *data)
+static void do_dbs_timer(struct work_struct *work)
{
int i;
lock_cpu_hotplug();
@@ -442,7 +442,6 @@ static void do_dbs_timer(void *data)
static inline void dbs_timer_init(void)
{
- INIT_WORK(&dbs_work, do_dbs_timer, NULL);
schedule_delayed_work(&dbs_work,
usecs_to_jiffies(dbs_tuners_ins.sampling_rate));
return;
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
index 048ec8b..f697449 100644
--- a/drivers/cpufreq/cpufreq_ondemand.c
+++ b/drivers/cpufreq/cpufreq_ondemand.c
@@ -49,13 +49,17 @@ static unsigned int def_sampling_rate;
#define DEF_SAMPLING_RATE_LATENCY_MULTIPLIER (1000)
#define TRANSITION_LATENCY_LIMIT (10 * 1000)
-static void do_dbs_timer(void *data);
+static void do_dbs_timer(struct work_struct *work);
+
+/* Sampling types */
+enum dbs_sample {DBS_NORMAL_SAMPLE, DBS_SUB_SAMPLE};
struct cpu_dbs_info_s {
cputime64_t prev_cpu_idle;
cputime64_t prev_cpu_wall;
struct cpufreq_policy *cur_policy;
- struct work_struct work;
+ struct delayed_work work;
+ enum dbs_sample sample_type;
unsigned int enable;
struct cpufreq_frequency_table *freq_table;
unsigned int freq_lo;
@@ -417,30 +421,31 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
}
}
-/* Sampling types */
-enum {DBS_NORMAL_SAMPLE, DBS_SUB_SAMPLE};
-
-static void do_dbs_timer(void *data)
+static void do_dbs_timer(struct work_struct *work)
{
unsigned int cpu = smp_processor_id();
struct cpu_dbs_info_s *dbs_info = &per_cpu(cpu_dbs_info, cpu);
+ enum dbs_sample sample_type = dbs_info->sample_type;
/* We want all CPUs to do sampling nearly on same jiffy */
int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate);
+
+ /* Permit rescheduling of this work item */
+ work_release(work);
+
delay -= jiffies % delay;
if (!dbs_info->enable)
return;
/* Common NORMAL_SAMPLE setup */
- INIT_WORK(&dbs_info->work, do_dbs_timer, (void *)DBS_NORMAL_SAMPLE);
+ dbs_info->sample_type = DBS_NORMAL_SAMPLE;
if (!dbs_tuners_ins.powersave_bias ||
- (unsigned long) data == DBS_NORMAL_SAMPLE) {
+ sample_type == DBS_NORMAL_SAMPLE) {
lock_cpu_hotplug();
dbs_check_cpu(dbs_info);
unlock_cpu_hotplug();
if (dbs_info->freq_lo) {
/* Setup timer for SUB_SAMPLE */
- INIT_WORK(&dbs_info->work, do_dbs_timer,
- (void *)DBS_SUB_SAMPLE);
+ dbs_info->sample_type = DBS_SUB_SAMPLE;
delay = dbs_info->freq_hi_jiffies;
}
} else {
@@ -459,7 +464,8 @@ static inline void dbs_timer_init(unsigned int cpu)
delay -= jiffies % delay;
ondemand_powersave_bias_init();
- INIT_WORK(&dbs_info->work, do_dbs_timer, NULL);
+ INIT_DELAYED_WORK_NAR(&dbs_info->work, do_dbs_timer);
+ dbs_info->sample_type = DBS_NORMAL_SAMPLE;
queue_delayed_work_on(cpu, kondemand_wq, &dbs_info->work, delay);
}