aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorKalimochoAz <calimochoazucarado@gmail.com>2012-10-14 21:11:17 +0200
committerKalimochoAz <calimochoazucarado@gmail.com>2012-10-14 21:11:17 +0200
commit08c142dc92ac8075a74ffd41c7d0fff0d135ba45 (patch)
tree5fec9becb3a5b9653510d5ab63ac317330afb7c7 /kernel
parent123dc2ce092ee26ac57c2b3962aeb8ed9b367fb6 (diff)
parent776a41b87e94f6942793c3268a49809a6691e4e2 (diff)
downloadkernel_samsung_crespo-08c142dc92ac8075a74ffd41c7d0fff0d135ba45.zip
kernel_samsung_crespo-08c142dc92ac8075a74ffd41c7d0fff0d135ba45.tar.gz
kernel_samsung_crespo-08c142dc92ac8075a74ffd41c7d0fff0d135ba45.tar.bz2
Merge commit '776a41b' into HEAD
Diffstat (limited to 'kernel')
-rw-r--r--kernel/cpuset.c3
-rw-r--r--kernel/sched.c40
2 files changed, 39 insertions, 4 deletions
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index b2e84bd..6cbe033 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -2080,6 +2080,9 @@ static void scan_for_empty_cpusets(struct cpuset *root)
* (of no affect) on systems that are actively using CPU hotplug
* but making no active use of cpusets.
*
+ * The only exception to this is suspend/resume, where we don't
+ * modify cpusets at all.
+ *
* This routine ensures that top_cpuset.cpus_allowed tracks
* cpu_active_mask on each CPU hotplug (cpuhp) event.
*
diff --git a/kernel/sched.c b/kernel/sched.c
index d759d33..e788b66 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -7778,34 +7778,66 @@ int __init sched_create_sysfs_power_savings_entries(struct sysdev_class *cls)
}
#endif /* CONFIG_SCHED_MC || CONFIG_SCHED_SMT */
+static int num_cpus_frozen; /* used to mark begin/end of suspend/resume */
+
/*
* Update cpusets according to cpu_active mask. If cpusets are
* disabled, cpuset_update_active_cpus() becomes a simple wrapper
* around partition_sched_domains().
+ *
+ * If we come here as part of a suspend/resume, don't touch cpusets because we
+ * want to restore it back to its original state upon resume anyway.
*/
static int cpuset_cpu_active(struct notifier_block *nfb, unsigned long action,
void *hcpu)
{
- switch (action & ~CPU_TASKS_FROZEN) {
+ switch (action) {
+ case CPU_ONLINE_FROZEN:
+ case CPU_DOWN_FAILED_FROZEN:
+
+ /*
+ * num_cpus_frozen tracks how many CPUs are involved in suspend
+ * resume sequence. As long as this is not the last online
+ * operation in the resume sequence, just build a single sched
+ * domain, ignoring cpusets.
+ */
+ num_cpus_frozen--;
+ if (likely(num_cpus_frozen)) {
+ partition_sched_domains(1, NULL, NULL);
+ break;
+ }
+
+ /*
+ * This is the last CPU online operation. So fall through and
+ * restore the original sched domains by considering the
+ * cpuset configurations.
+ */
+
case CPU_ONLINE:
case CPU_DOWN_FAILED:
cpuset_update_active_cpus();
- return NOTIFY_OK;
+ break;
default:
return NOTIFY_DONE;
}
+ return NOTIFY_OK;
}
static int cpuset_cpu_inactive(struct notifier_block *nfb, unsigned long action,
void *hcpu)
{
- switch (action & ~CPU_TASKS_FROZEN) {
+ switch (action) {
case CPU_DOWN_PREPARE:
cpuset_update_active_cpus();
- return NOTIFY_OK;
+ break;
+ case CPU_DOWN_PREPARE_FROZEN:
+ num_cpus_frozen++;
+ partition_sched_domains(1, NULL, NULL);
+ break;
default:
return NOTIFY_DONE;
}
+ return NOTIFY_OK;
}
static int update_runtime(struct notifier_block *nfb,