aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorColin Cross <ccross@android.com>2011-07-15 15:56:51 -0700
committerColin Cross <ccross@android.com>2011-07-15 15:56:51 -0700
commit7ab2d7fb28c252bb4d4cb0de2a1848b969665b03 (patch)
tree9229a213d7d0ec99ad1b9cfc414596862ffbfa6b
parent6594a2ed5d43b8a11add52012447f1bc86041616 (diff)
parentadce689ff77b097142cf49e76a232e59126da017 (diff)
downloadkernel_samsung_espresso10-7ab2d7fb28c252bb4d4cb0de2a1848b969665b03.zip
kernel_samsung_espresso10-7ab2d7fb28c252bb4d4cb0de2a1848b969665b03.tar.gz
kernel_samsung_espresso10-7ab2d7fb28c252bb4d4cb0de2a1848b969665b03.tar.bz2
Merge branch 'android-3.0' into android-omap-3.0
-rw-r--r--Documentation/cpu-freq/governors.txt2
-rw-r--r--arch/arm/kernel/leds.c27
-rw-r--r--arch/arm/kernel/process.c5
-rw-r--r--arch/x86/include/asm/idle.h7
-rw-r--r--arch/x86/kernel/process_64.c18
-rw-r--r--drivers/cpufreq/Kconfig11
-rw-r--r--drivers/cpufreq/cpufreq_interactive.c260
-rw-r--r--drivers/usb/gadget/f_mtp.c46
-rw-r--r--include/linux/cpu.h7
-rw-r--r--include/linux/usb/f_mtp.h28
-rw-r--r--kernel/cpu.c20
11 files changed, 222 insertions, 209 deletions
diff --git a/Documentation/cpu-freq/governors.txt b/Documentation/cpu-freq/governors.txt
index a33f9d4..e1083b0 100644
--- a/Documentation/cpu-freq/governors.txt
+++ b/Documentation/cpu-freq/governors.txt
@@ -212,7 +212,7 @@ idle. When the cpu comes out of idle, a timer is configured to fire
within 1-2 ticks. If the cpu is very busy between exiting idle and
when the timer fires then we assume the cpu is underpowered and ramp
to MAX speed.
-
+
If the cpu was not sufficiently busy to immediately ramp to MAX speed,
then governor evaluates the cpu load since the last speed adjustment,
choosing th highest value between that longer-term load or the
diff --git a/arch/arm/kernel/leds.c b/arch/arm/kernel/leds.c
index 0f107dc..136e837 100644
--- a/arch/arm/kernel/leds.c
+++ b/arch/arm/kernel/leds.c
@@ -9,6 +9,8 @@
*/
#include <linux/module.h>
#include <linux/init.h>
+#include <linux/notifier.h>
+#include <linux/cpu.h>
#include <linux/sysdev.h>
#include <linux/syscore_ops.h>
@@ -101,6 +103,25 @@ static struct syscore_ops leds_syscore_ops = {
.resume = leds_resume,
};
+static int leds_idle_notifier(struct notifier_block *nb, unsigned long val,
+ void *data)
+{
+ switch (val) {
+ case IDLE_START:
+ leds_event(led_idle_start);
+ break;
+ case IDLE_END:
+ leds_event(led_idle_end);
+ break;
+ }
+
+ return 0;
+}
+
+static struct notifier_block leds_idle_nb = {
+ .notifier_call = leds_idle_notifier,
+};
+
static int __init leds_init(void)
{
int ret;
@@ -109,8 +130,12 @@ static int __init leds_init(void)
ret = sysdev_register(&leds_device);
if (ret == 0)
ret = sysdev_create_file(&leds_device, &attr_event);
- if (ret == 0)
+
+ if (ret == 0) {
register_syscore_ops(&leds_syscore_ops);
+ idle_notifier_register(&leds_idle_nb);
+ }
+
return ret;
}
diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
index 34ea864..919de7c 100644
--- a/arch/arm/kernel/process.c
+++ b/arch/arm/kernel/process.c
@@ -32,7 +32,6 @@
#include <linux/hw_breakpoint.h>
#include <asm/cacheflush.h>
-#include <asm/leds.h>
#include <asm/processor.h>
#include <asm/system.h>
#include <asm/thread_notify.h>
@@ -183,7 +182,7 @@ void cpu_idle(void)
/* endless idle loop with no priority at all */
while (1) {
tick_nohz_stop_sched_tick(1);
- leds_event(led_idle_start);
+ idle_notifier_call_chain(IDLE_START);
while (!need_resched()) {
#ifdef CONFIG_HOTPLUG_CPU
if (cpu_is_offline(smp_processor_id()))
@@ -207,7 +206,7 @@ void cpu_idle(void)
local_irq_enable();
}
}
- leds_event(led_idle_end);
+ idle_notifier_call_chain(IDLE_END);
tick_nohz_restart_sched_tick();
preempt_enable_no_resched();
schedule();
diff --git a/arch/x86/include/asm/idle.h b/arch/x86/include/asm/idle.h
index f49253d7..f1e4268 100644
--- a/arch/x86/include/asm/idle.h
+++ b/arch/x86/include/asm/idle.h
@@ -1,13 +1,6 @@
#ifndef _ASM_X86_IDLE_H
#define _ASM_X86_IDLE_H
-#define IDLE_START 1
-#define IDLE_END 2
-
-struct notifier_block;
-void idle_notifier_register(struct notifier_block *n);
-void idle_notifier_unregister(struct notifier_block *n);
-
#ifdef CONFIG_X86_64
void enter_idle(void);
void exit_idle(void);
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
index ca6f7ab..63c8aed 100644
--- a/arch/x86/kernel/process_64.c
+++ b/arch/x86/kernel/process_64.c
@@ -56,31 +56,17 @@ asmlinkage extern void ret_from_fork(void);
DEFINE_PER_CPU(unsigned long, old_rsp);
static DEFINE_PER_CPU(unsigned char, is_idle);
-static ATOMIC_NOTIFIER_HEAD(idle_notifier);
-
-void idle_notifier_register(struct notifier_block *n)
-{
- atomic_notifier_chain_register(&idle_notifier, n);
-}
-EXPORT_SYMBOL_GPL(idle_notifier_register);
-
-void idle_notifier_unregister(struct notifier_block *n)
-{
- atomic_notifier_chain_unregister(&idle_notifier, n);
-}
-EXPORT_SYMBOL_GPL(idle_notifier_unregister);
-
void enter_idle(void)
{
percpu_write(is_idle, 1);
- atomic_notifier_call_chain(&idle_notifier, IDLE_START, NULL);
+ idle_notifier_call_chain(IDLE_START);
}
static void __exit_idle(void)
{
if (x86_test_and_clear_bit_percpu(0, is_idle) == 0)
return;
- atomic_notifier_call_chain(&idle_notifier, IDLE_END, NULL);
+ idle_notifier_call_chain(IDLE_END);
}
/* Called from interrupts to signify idle end */
diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig
index e9aaa1d..7e07d94 100644
--- a/drivers/cpufreq/Kconfig
+++ b/drivers/cpufreq/Kconfig
@@ -185,6 +185,17 @@ config CPU_FREQ_GOV_INTERACTIVE
'interactive' - This driver adds a dynamic cpufreq policy governor
designed for latency-sensitive workloads.
+ This governor attempts to reduce the latency of clock
+ increases so that the system is more responsive to
+ interactive workloads.
+
+ To compile this driver as a module, choose M here: the
+ module will be called cpufreq_interactive.
+
+ For details, take a look at linux/Documentation/cpu-freq.
+
+ If in doubt, say N.
+
config CPU_FREQ_GOV_CONSERVATIVE
tristate "'conservative' cpufreq governor"
depends on CPU_FREQ
diff --git a/drivers/cpufreq/cpufreq_interactive.c b/drivers/cpufreq/cpufreq_interactive.c
index bcbb7ac..d9c6d5b 100644
--- a/drivers/cpufreq/cpufreq_interactive.c
+++ b/drivers/cpufreq/cpufreq_interactive.c
@@ -28,7 +28,6 @@
#include <asm/cputime.h>
-static void (*pm_idle_old)(void);
static atomic_t active_count = ATOMIC_INIT(0);
struct cpufreq_interactive_cpuinfo {
@@ -67,92 +66,11 @@ static unsigned long go_maxspeed_load;
#define DEFAULT_MIN_SAMPLE_TIME 80000;
static unsigned long min_sample_time;
-#define DEBUG 0
-#define BUFSZ 128
-
-#if DEBUG
-#include <linux/proc_fs.h>
-
-struct dbgln {
- int cpu;
- unsigned long jiffy;
- unsigned long run;
- char buf[BUFSZ];
-};
-
-#define NDBGLNS 256
-
-static struct dbgln dbgbuf[NDBGLNS];
-static int dbgbufs;
-static int dbgbufe;
-static struct proc_dir_entry *dbg_proc;
-static spinlock_t dbgpr_lock;
-
-static u64 up_request_time;
-static unsigned int up_max_latency;
-
-static void dbgpr(char *fmt, ...)
-{
- va_list args;
- int n;
- unsigned long flags;
-
- spin_lock_irqsave(&dbgpr_lock, flags);
- n = dbgbufe;
- va_start(args, fmt);
- vsnprintf(dbgbuf[n].buf, BUFSZ, fmt, args);
- va_end(args);
- dbgbuf[n].cpu = smp_processor_id();
- dbgbuf[n].run = nr_running();
- dbgbuf[n].jiffy = jiffies;
-
- if (++dbgbufe >= NDBGLNS)
- dbgbufe = 0;
-
- if (dbgbufe == dbgbufs)
- if (++dbgbufs >= NDBGLNS)
- dbgbufs = 0;
-
- spin_unlock_irqrestore(&dbgpr_lock, flags);
-}
-
-static void dbgdump(void)
-{
- int i, j;
- unsigned long flags;
- static struct dbgln prbuf[NDBGLNS];
-
- spin_lock_irqsave(&dbgpr_lock, flags);
- i = dbgbufs;
- j = dbgbufe;
- memcpy(prbuf, dbgbuf, sizeof(dbgbuf));
- dbgbufs = 0;
- dbgbufe = 0;
- spin_unlock_irqrestore(&dbgpr_lock, flags);
-
- while (i != j)
- {
- printk("%lu %d %lu %s",
- prbuf[i].jiffy, prbuf[i].cpu, prbuf[i].run,
- prbuf[i].buf);
- if (++i == NDBGLNS)
- i = 0;
- }
-}
-
-static int dbg_proc_read(char *buffer, char **start, off_t offset,
- int count, int *peof, void *dat)
-{
- printk("max up_task latency=%uus\n", up_max_latency);
- dbgdump();
- *peof = 1;
- return 0;
-}
-
-
-#else
-#define dbgpr(...) do {} while (0)
-#endif
+/*
+ * The sample rate of the timer used to increase frequency
+ */
+#define DEFAULT_TIMER_RATE 30000;
+static unsigned long timer_rate;
static int cpufreq_governor_interactive(struct cpufreq_policy *policy,
unsigned int event);
@@ -202,16 +120,8 @@ static void cpufreq_interactive_timer(unsigned long data)
smp_wmb();
/* If we raced with cancelling a timer, skip. */
- if (!idle_exit_time) {
- dbgpr("timer %d: no valid idle exit sample\n", (int) data);
+ if (!idle_exit_time)
goto exit;
- }
-
-#if DEBUG
- if ((int) jiffies - (int) pcpu->cpu_timer.expires >= 10)
- dbgpr("timer %d: late by %d ticks\n",
- (int) data, jiffies - pcpu->cpu_timer.expires);
-#endif
delta_idle = (unsigned int) cputime64_sub(now_idle, time_in_idle);
delta_time = (unsigned int) cputime64_sub(pcpu->timer_run_time,
@@ -220,11 +130,8 @@ static void cpufreq_interactive_timer(unsigned long data)
/*
* If timer ran less than 1ms after short-term sample started, retry.
*/
- if (delta_time < 1000) {
- dbgpr("timer %d: time delta %u too short exit=%llu now=%llu\n", (int) data,
- delta_time, idle_exit_time, pcpu->timer_run_time);
+ if (delta_time < 1000)
goto rearm;
- }
if (delta_idle > delta_time)
cpu_load = 0;
@@ -232,7 +139,7 @@ static void cpufreq_interactive_timer(unsigned long data)
cpu_load = 100 * (delta_time - delta_idle) / delta_time;
delta_idle = (unsigned int) cputime64_sub(now_idle,
- pcpu->freq_change_time_in_idle);
+ pcpu->freq_change_time_in_idle);
delta_time = (unsigned int) cputime64_sub(pcpu->timer_run_time,
pcpu->freq_change_time);
@@ -258,32 +165,26 @@ static void cpufreq_interactive_timer(unsigned long data)
if (cpufreq_frequency_table_target(pcpu->policy, pcpu->freq_table,
new_freq, CPUFREQ_RELATION_H,
&index)) {
- dbgpr("timer %d: cpufreq_frequency_table_target error\n", (int) data);
+ pr_warn_once("timer %d: cpufreq_frequency_table_target error\n",
+ (int) data);
goto rearm;
}
new_freq = pcpu->freq_table[index].frequency;
if (pcpu->target_freq == new_freq)
- {
- dbgpr("timer %d: load=%d, already at %d\n", (int) data, cpu_load, new_freq);
goto rearm_if_notmax;
- }
/*
* Do not scale down unless we have been at this frequency for the
* minimum sample time.
*/
if (new_freq < pcpu->target_freq) {
- if (cputime64_sub(pcpu->timer_run_time, pcpu->freq_change_time) <
- min_sample_time) {
- dbgpr("timer %d: load=%d cur=%d tgt=%d not yet\n", (int) data, cpu_load, pcpu->target_freq, new_freq);
+ if (cputime64_sub(pcpu->timer_run_time, pcpu->freq_change_time)
+ < min_sample_time)
goto rearm;
- }
}
- dbgpr("timer %d: load=%d cur=%d tgt=%d queue\n", (int) data, cpu_load, pcpu->target_freq, new_freq);
-
if (new_freq < pcpu->target_freq) {
pcpu->target_freq = new_freq;
spin_lock_irqsave(&down_cpumask_lock, flags);
@@ -292,9 +193,6 @@ static void cpufreq_interactive_timer(unsigned long data)
queue_work(down_wq, &freq_scale_down_work);
} else {
pcpu->target_freq = new_freq;
-#if DEBUG
- up_request_time = ktime_to_us(ktime_get());
-#endif
spin_lock_irqsave(&up_cpumask_lock, flags);
cpumask_set_cpu(data, &up_cpumask);
spin_unlock_irqrestore(&up_cpumask_lock, flags);
@@ -319,34 +217,29 @@ rearm:
if (pcpu->target_freq == pcpu->policy->min) {
smp_rmb();
- if (pcpu->idling) {
- dbgpr("timer %d: cpu idle, don't re-arm\n", (int) data);
+ if (pcpu->idling)
goto exit;
- }
pcpu->timer_idlecancel = 1;
}
pcpu->time_in_idle = get_cpu_idle_time_us(
data, &pcpu->idle_exit_time);
- mod_timer(&pcpu->cpu_timer, jiffies + 2);
- dbgpr("timer %d: set timer for %lu exit=%llu\n", (int) data, pcpu->cpu_timer.expires, pcpu->idle_exit_time);
+ mod_timer(&pcpu->cpu_timer, jiffies + usecs_to_jiffies(timer_rate));
}
exit:
return;
}
-static void cpufreq_interactive_idle(void)
+static void cpufreq_interactive_idle_start(void)
{
struct cpufreq_interactive_cpuinfo *pcpu =
&per_cpu(cpuinfo, smp_processor_id());
int pending;
- if (!pcpu->governor_enabled) {
- pm_idle_old();
+ if (!pcpu->governor_enabled)
return;
- }
pcpu->idling = 1;
smp_wmb();
@@ -366,10 +259,7 @@ static void cpufreq_interactive_idle(void)
pcpu->time_in_idle = get_cpu_idle_time_us(
smp_processor_id(), &pcpu->idle_exit_time);
pcpu->timer_idlecancel = 0;
- mod_timer(&pcpu->cpu_timer, jiffies + 2);
- dbgpr("idle: enter at %d, set timer for %lu exit=%llu\n",
- pcpu->target_freq, pcpu->cpu_timer.expires,
- pcpu->idle_exit_time);
+ mod_timer(&pcpu->cpu_timer, jiffies + usecs_to_jiffies(timer_rate));
}
#endif
} else {
@@ -380,7 +270,6 @@ static void cpufreq_interactive_idle(void)
* CPU didn't go busy; we'll recheck things upon idle exit.
*/
if (pending && pcpu->timer_idlecancel) {
- dbgpr("idle: cancel timer for %lu\n", pcpu->cpu_timer.expires);
del_timer(&pcpu->cpu_timer);
/*
* Ensure last timer run time is after current idle
@@ -392,7 +281,13 @@ static void cpufreq_interactive_idle(void)
}
}
- pm_idle_old();
+}
+
+static void cpufreq_interactive_idle_end(void)
+{
+ struct cpufreq_interactive_cpuinfo *pcpu =
+ &per_cpu(cpuinfo, smp_processor_id());
+
pcpu->idling = 0;
smp_wmb();
@@ -414,14 +309,7 @@ static void cpufreq_interactive_idle(void)
get_cpu_idle_time_us(smp_processor_id(),
&pcpu->idle_exit_time);
pcpu->timer_idlecancel = 0;
- mod_timer(&pcpu->cpu_timer, jiffies + 2);
- dbgpr("idle: exit, set timer for %lu exit=%llu\n", pcpu->cpu_timer.expires, pcpu->idle_exit_time);
-#if DEBUG
- } else if (timer_pending(&pcpu->cpu_timer) == 0 &&
- pcpu->timer_run_time < pcpu->idle_exit_time) {
- dbgpr("idle: timer not run yet: exit=%llu tmrrun=%llu\n",
- pcpu->idle_exit_time, pcpu->timer_run_time);
-#endif
+ mod_timer(&pcpu->cpu_timer, jiffies + usecs_to_jiffies(timer_rate));
}
}
@@ -433,12 +321,6 @@ static int cpufreq_interactive_up_task(void *data)
unsigned long flags;
struct cpufreq_interactive_cpuinfo *pcpu;
-#if DEBUG
- u64 now;
- u64 then;
- unsigned int lat;
-#endif
-
while (1) {
set_current_state(TASK_INTERRUPTIBLE);
spin_lock_irqsave(&up_cpumask_lock, flags);
@@ -455,18 +337,6 @@ static int cpufreq_interactive_up_task(void *data)
set_current_state(TASK_RUNNING);
-#if DEBUG
- then = up_request_time;
- now = ktime_to_us(ktime_get());
-
- if (now > then) {
- lat = ktime_to_us(ktime_get()) - then;
-
- if (lat > up_max_latency)
- up_max_latency = lat;
- }
-#endif
-
tmp_mask = up_cpumask;
cpumask_clear(&up_cpumask);
spin_unlock_irqrestore(&up_cpumask_lock, flags);
@@ -474,11 +344,6 @@ static int cpufreq_interactive_up_task(void *data)
for_each_cpu(cpu, &tmp_mask) {
pcpu = &per_cpu(cpuinfo, cpu);
- if (nr_running() == 1) {
- dbgpr("up %d: tgt=%d nothing else running\n", cpu,
- pcpu->target_freq);
- }
-
smp_rmb();
if (!pcpu->governor_enabled)
@@ -490,7 +355,6 @@ static int cpufreq_interactive_up_task(void *data)
pcpu->freq_change_time_in_idle =
get_cpu_idle_time_us(cpu,
&pcpu->freq_change_time);
- dbgpr("up %d: set tgt=%d (actual=%d)\n", cpu, pcpu->target_freq, pcpu->policy->cur);
}
}
@@ -523,7 +387,6 @@ static void cpufreq_interactive_freq_down(struct work_struct *work)
pcpu->freq_change_time_in_idle =
get_cpu_idle_time_us(cpu,
&pcpu->freq_change_time);
- dbgpr("down %d: set tgt=%d (actual=%d)\n", cpu, pcpu->target_freq, pcpu->policy->cur);
}
}
@@ -536,7 +399,14 @@ static ssize_t show_go_maxspeed_load(struct kobject *kobj,
static ssize_t store_go_maxspeed_load(struct kobject *kobj,
struct attribute *attr, const char *buf, size_t count)
{
- return strict_strtoul(buf, 0, &go_maxspeed_load);
+ int ret;
+ unsigned long val;
+
+ ret = strict_strtoul(buf, 0, &val);
+ if (ret < 0)
+ return ret;
+ go_maxspeed_load = val;
+ return count;
}
static struct global_attr go_maxspeed_load_attr = __ATTR(go_maxspeed_load, 0644,
@@ -551,15 +421,45 @@ static ssize_t show_min_sample_time(struct kobject *kobj,
static ssize_t store_min_sample_time(struct kobject *kobj,
struct attribute *attr, const char *buf, size_t count)
{
- return strict_strtoul(buf, 0, &min_sample_time);
+ int ret;
+ unsigned long val;
+
+ ret = strict_strtoul(buf, 0, &val);
+ if (ret < 0)
+ return ret;
+ min_sample_time = val;
+ return count;
}
static struct global_attr min_sample_time_attr = __ATTR(min_sample_time, 0644,
show_min_sample_time, store_min_sample_time);
+static ssize_t show_timer_rate(struct kobject *kobj,
+ struct attribute *attr, char *buf)
+{
+ return sprintf(buf, "%lu\n", timer_rate);
+}
+
+static ssize_t store_timer_rate(struct kobject *kobj,
+ struct attribute *attr, const char *buf, size_t count)
+{
+ int ret;
+ unsigned long val;
+
+ ret = strict_strtoul(buf, 0, &val);
+ if (ret < 0)
+ return ret;
+ timer_rate = val;
+ return count;
+}
+
+static struct global_attr timer_rate_attr = __ATTR(timer_rate, 0644,
+ show_timer_rate, store_timer_rate);
+
static struct attribute *interactive_attributes[] = {
&go_maxspeed_load_attr.attr,
&min_sample_time_attr.attr,
+ &timer_rate_attr.attr,
NULL,
};
@@ -608,8 +508,6 @@ static int cpufreq_governor_interactive(struct cpufreq_policy *policy,
if (rc)
return rc;
- pm_idle_old = pm_idle;
- pm_idle = cpufreq_interactive_idle;
break;
case CPUFREQ_GOV_STOP:
@@ -635,7 +533,6 @@ static int cpufreq_governor_interactive(struct cpufreq_policy *policy,
sysfs_remove_group(cpufreq_global_kobject,
&interactive_attr_group);
- pm_idle = pm_idle_old;
break;
case CPUFREQ_GOV_LIMITS:
@@ -650,6 +547,26 @@ static int cpufreq_governor_interactive(struct cpufreq_policy *policy,
return 0;
}
+static int cpufreq_interactive_idle_notifier(struct notifier_block *nb,
+ unsigned long val,
+ void *data)
+{
+ switch (val) {
+ case IDLE_START:
+ cpufreq_interactive_idle_start();
+ break;
+ case IDLE_END:
+ cpufreq_interactive_idle_end();
+ break;
+ }
+
+ return 0;
+}
+
+static struct notifier_block cpufreq_interactive_idle_nb = {
+ .notifier_call = cpufreq_interactive_idle_notifier,
+};
+
static int __init cpufreq_interactive_init(void)
{
unsigned int i;
@@ -658,6 +575,7 @@ static int __init cpufreq_interactive_init(void)
go_maxspeed_load = DEFAULT_GO_MAXSPEED_LOAD;
min_sample_time = DEFAULT_MIN_SAMPLE_TIME;
+ timer_rate = DEFAULT_TIMER_RATE;
/* Initalize per-cpu timers */
for_each_possible_cpu(i) {
@@ -679,7 +597,7 @@ static int __init cpufreq_interactive_init(void)
warm cache (probably doesn't matter much). */
down_wq = alloc_workqueue("knteractive_down", 0, 1);
- if (! down_wq)
+ if (!down_wq)
goto err_freeuptask;
INIT_WORK(&freq_scale_down_work,
@@ -688,11 +606,7 @@ static int __init cpufreq_interactive_init(void)
spin_lock_init(&up_cpumask_lock);
spin_lock_init(&down_cpumask_lock);
-#if DEBUG
- spin_lock_init(&dbgpr_lock);
- dbg_proc = create_proc_entry("igov", S_IWUSR | S_IRUGO, NULL);
- dbg_proc->read_proc = dbg_proc_read;
-#endif
+ idle_notifier_register(&cpufreq_interactive_idle_nb);
return cpufreq_register_governor(&cpufreq_gov_interactive);
diff --git a/drivers/usb/gadget/f_mtp.c b/drivers/usb/gadget/f_mtp.c
index a5b6463..a383bc5 100644
--- a/drivers/usb/gadget/f_mtp.c
+++ b/drivers/usb/gadget/f_mtp.c
@@ -94,8 +94,8 @@ struct mtp_dev {
struct usb_request *rx_req[RX_REQ_MAX];
int rx_done;
- /* for processing MTP_SEND_FILE and MTP_RECEIVE_FILE
- * ioctls on a work queue
+ /* for processing MTP_SEND_FILE, MTP_RECEIVE_FILE and
+ * MTP_SEND_FILE_WITH_HEADER ioctls on a work queue
*/
struct workqueue_struct *wq;
struct work_struct send_file_work;
@@ -103,6 +103,9 @@ struct mtp_dev {
struct file *xfer_file;
loff_t xfer_file_offset;
int64_t xfer_file_length;
+ unsigned xfer_send_header;
+ uint16_t xfer_command;
+ uint32_t xfer_transaction_id;
int xfer_result;
};
@@ -629,10 +632,11 @@ static void send_file_work(struct work_struct *data) {
struct mtp_dev *dev = container_of(data, struct mtp_dev, send_file_work);
struct usb_composite_dev *cdev = dev->cdev;
struct usb_request *req = 0;
+ struct mtp_data_header *header;
struct file *filp;
loff_t offset;
int64_t count;
- int xfer, ret;
+ int xfer, ret, hdr_size;
int r = 0;
int sendZLP = 0;
@@ -644,10 +648,17 @@ static void send_file_work(struct work_struct *data) {
DBG(cdev, "send_file_work(%lld %lld)\n", offset, count);
+ if (dev->xfer_send_header) {
+ hdr_size = sizeof(struct mtp_data_header);
+ count += hdr_size;
+ } else {
+ hdr_size = 0;
+ }
+
/* we need to send a zero length packet to signal the end of transfer
* if the transfer size is aligned to a packet boundary.
*/
- if ((dev->xfer_file_length & (dev->ep_in->maxpacket - 1)) == 0) {
+ if ((count & (dev->ep_in->maxpacket - 1)) == 0) {
sendZLP = 1;
}
@@ -674,12 +685,23 @@ static void send_file_work(struct work_struct *data) {
xfer = MTP_BULK_BUFFER_SIZE;
else
xfer = count;
- ret = vfs_read(filp, req->buf, xfer, &offset);
+
+ if (hdr_size) {
+ /* prepend MTP data header */
+ header = (struct mtp_data_header *)req->buf;
+ header->length = __cpu_to_le32(count);
+ header->type = __cpu_to_le16(2); /* data packet */
+ header->command = __cpu_to_le16(dev->xfer_command);
+ header->transaction_id = __cpu_to_le32(dev->xfer_transaction_id);
+ }
+
+ ret = vfs_read(filp, req->buf + hdr_size, xfer - hdr_size, &offset);
if (ret < 0) {
r = ret;
break;
}
- xfer = ret;
+ xfer = ret + hdr_size;
+ hdr_size = 0;
req->length = xfer;
ret = usb_ep_queue(dev->ep_in, req, GFP_KERNEL);
@@ -829,6 +851,7 @@ static long mtp_ioctl(struct file *fp, unsigned code, unsigned long value)
switch (code) {
case MTP_SEND_FILE:
case MTP_RECEIVE_FILE:
+ case MTP_SEND_FILE_WITH_HEADER:
{
struct mtp_file_range mfr;
struct work_struct *work;
@@ -866,10 +889,17 @@ static long mtp_ioctl(struct file *fp, unsigned code, unsigned long value)
dev->xfer_file_length = mfr.length;
smp_wmb();
- if (code == MTP_SEND_FILE)
+ if (code == MTP_SEND_FILE_WITH_HEADER) {
work = &dev->send_file_work;
- else
+ dev->xfer_send_header = 1;
+ dev->xfer_command = mfr.command;
+ dev->xfer_transaction_id = mfr.transaction_id;
+ } else if (code == MTP_SEND_FILE) {
+ work = &dev->send_file_work;
+ dev->xfer_send_header = 0;
+ } else {
work = &dev->receive_file_work;
+ }
/* We do the file transfer on a work queue so it will run
* in kernel context, which is necessary for vfs_read and
diff --git a/include/linux/cpu.h b/include/linux/cpu.h
index 5f09323..97f1ca7 100644
--- a/include/linux/cpu.h
+++ b/include/linux/cpu.h
@@ -174,4 +174,11 @@ static inline int disable_nonboot_cpus(void) { return 0; }
static inline void enable_nonboot_cpus(void) {}
#endif /* !CONFIG_PM_SLEEP_SMP */
+#define IDLE_START 1
+#define IDLE_END 2
+
+void idle_notifier_register(struct notifier_block *n);
+void idle_notifier_unregister(struct notifier_block *n);
+void idle_notifier_call_chain(unsigned long val);
+
#endif /* _LINUX_CPU_H_ */
diff --git a/include/linux/usb/f_mtp.h b/include/linux/usb/f_mtp.h
index fdf828c..7422b17 100644
--- a/include/linux/usb/f_mtp.h
+++ b/include/linux/usb/f_mtp.h
@@ -18,6 +18,22 @@
#ifndef __LINUX_USB_F_MTP_H
#define __LINUX_USB_F_MTP_H
+#include <linux/ioctl.h>
+
+#ifdef __KERNEL__
+
+struct mtp_data_header {
+ /* length of packet, including this header */
+ uint32_t length;
+ /* container type (2 for data packet) */
+ uint16_t type;
+ /* MTP command code */
+ uint16_t command;
+ /* MTP transaction ID */
+ uint32_t transaction_id;
+};
+
+#endif /* __KERNEL__ */
struct mtp_file_range {
/* file descriptor for file to transfer */
@@ -26,6 +42,14 @@ struct mtp_file_range {
loff_t offset;
/* number of bytes to transfer */
int64_t length;
+ /* MTP command ID for data header,
+ * used only for MTP_SEND_FILE_WITH_HEADER
+ */
+ uint16_t command;
+ /* MTP transaction ID for data header,
+ * used only for MTP_SEND_FILE_WITH_HEADER
+ */
+ uint32_t transaction_id;
};
struct mtp_event {
@@ -43,5 +67,9 @@ struct mtp_event {
#define MTP_RECEIVE_FILE _IOW('M', 1, struct mtp_file_range)
/* Sends an event to the host via the interrupt endpoint */
#define MTP_SEND_EVENT _IOW('M', 3, struct mtp_event)
+/* Sends the specified file range to the host,
+ * with a 12 byte MTP data packet header at the beginning.
+ */
+#define MTP_SEND_FILE_WITH_HEADER _IOW('M', 4, struct mtp_file_range)
#endif /* __LINUX_USB_F_MTP_H */
diff --git a/kernel/cpu.c b/kernel/cpu.c
index 12b7458..4047707 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -594,3 +594,23 @@ void init_cpu_online(const struct cpumask *src)
{
cpumask_copy(to_cpumask(cpu_online_bits), src);
}
+
+static ATOMIC_NOTIFIER_HEAD(idle_notifier);
+
+void idle_notifier_register(struct notifier_block *n)
+{
+ atomic_notifier_chain_register(&idle_notifier, n);
+}
+EXPORT_SYMBOL_GPL(idle_notifier_register);
+
+void idle_notifier_unregister(struct notifier_block *n)
+{
+ atomic_notifier_chain_unregister(&idle_notifier, n);
+}
+EXPORT_SYMBOL_GPL(idle_notifier_unregister);
+
+void idle_notifier_call_chain(unsigned long val)
+{
+ atomic_notifier_call_chain(&idle_notifier, val, NULL);
+}
+EXPORT_SYMBOL_GPL(idle_notifier_call_chain);