aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorRafael J. Wysocki <rjw@sisk.pl>2007-05-09 02:35:10 -0700
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-05-09 12:30:56 -0700
commit8bb7844286fb8c9fce6f65d8288aeb09d03a5e0d (patch)
treef4e305edaedbde05774bb1e4acd89a9475661d2e /mm
parentf37bc2712b54ec641e0c0c8634f1a4b61d9956c0 (diff)
downloadkernel_samsung_crespo-8bb7844286fb8c9fce6f65d8288aeb09d03a5e0d.zip
kernel_samsung_crespo-8bb7844286fb8c9fce6f65d8288aeb09d03a5e0d.tar.gz
kernel_samsung_crespo-8bb7844286fb8c9fce6f65d8288aeb09d03a5e0d.tar.bz2
Add suspend-related notifications for CPU hotplug
Since nonboot CPUs are now disabled after tasks and devices have been frozen and the CPU hotplug infrastructure is used for this purpose, we need special CPU hotplug notifications that will help the CPU-hotplug-aware subsystems distinguish normal CPU hotplug events from CPU hotplug events related to a system-wide suspend or resume operation in progress. This patch introduces such notifications and causes them to be used during suspend and resume transitions. It also changes all of the CPU-hotplug-aware subsystems to take these notifications into consideration (for now they are handled in the same way as the corresponding "normal" ones). [oleg@tv-sign.ru: cleanups] Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl> Cc: Gautham R Shenoy <ego@in.ibm.com> Cc: Pavel Machek <pavel@ucw.cz> Signed-off-by: Oleg Nesterov <oleg@tv-sign.ru> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/page_alloc.c5
-rw-r--r--mm/slab.c6
-rw-r--r--mm/slub.c2
-rw-r--r--mm/swap.c2
-rw-r--r--mm/vmscan.c2
-rw-r--r--mm/vmstat.c3
6 files changed, 17 insertions, 3 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 6fd0b74..d53cbf8 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -2148,11 +2148,14 @@ static int __cpuinit pageset_cpuup_callback(struct notifier_block *nfb,
switch (action) {
case CPU_UP_PREPARE:
+ case CPU_UP_PREPARE_FROZEN:
if (process_zones(cpu))
ret = NOTIFY_BAD;
break;
case CPU_UP_CANCELED:
+ case CPU_UP_CANCELED_FROZEN:
case CPU_DEAD:
+ case CPU_DEAD_FROZEN:
free_zone_pagesets(cpu);
break;
default:
@@ -3012,7 +3015,7 @@ static int page_alloc_cpu_notify(struct notifier_block *self,
{
int cpu = (unsigned long)hcpu;
- if (action == CPU_DEAD) {
+ if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) {
local_irq_disable();
__drain_pages(cpu);
vm_events_fold_cpu(cpu);
diff --git a/mm/slab.c b/mm/slab.c
index 1a7a10d..6f3d6e2 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -1190,6 +1190,7 @@ static int __cpuinit cpuup_callback(struct notifier_block *nfb,
mutex_lock(&cache_chain_mutex);
break;
case CPU_UP_PREPARE:
+ case CPU_UP_PREPARE_FROZEN:
/*
* We need to do this right in the beginning since
* alloc_arraycache's are going to use this list.
@@ -1276,10 +1277,12 @@ static int __cpuinit cpuup_callback(struct notifier_block *nfb,
}
break;
case CPU_ONLINE:
+ case CPU_ONLINE_FROZEN:
start_cpu_timer(cpu);
break;
#ifdef CONFIG_HOTPLUG_CPU
case CPU_DOWN_PREPARE:
+ case CPU_DOWN_PREPARE_FROZEN:
/*
* Shutdown cache reaper. Note that the cache_chain_mutex is
* held so that if cache_reap() is invoked it cannot do
@@ -1291,9 +1294,11 @@ static int __cpuinit cpuup_callback(struct notifier_block *nfb,
per_cpu(reap_work, cpu).work.func = NULL;
break;
case CPU_DOWN_FAILED:
+ case CPU_DOWN_FAILED_FROZEN:
start_cpu_timer(cpu);
break;
case CPU_DEAD:
+ case CPU_DEAD_FROZEN:
/*
* Even if all the cpus of a node are down, we don't free the
* kmem_list3 of any cache. This to avoid a race between
@@ -1305,6 +1310,7 @@ static int __cpuinit cpuup_callback(struct notifier_block *nfb,
/* fall thru */
#endif
case CPU_UP_CANCELED:
+ case CPU_UP_CANCELED_FROZEN:
list_for_each_entry(cachep, &cache_chain, next) {
struct array_cache *nc;
struct array_cache *shared;
diff --git a/mm/slub.c b/mm/slub.c
index f7c120b..a581fa8 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -2514,7 +2514,9 @@ static int __cpuinit slab_cpuup_callback(struct notifier_block *nfb,
switch (action) {
case CPU_UP_CANCELED:
+ case CPU_UP_CANCELED_FROZEN:
case CPU_DEAD:
+ case CPU_DEAD_FROZEN:
for_all_slabs(__flush_cpu_slab, cpu);
break;
default:
diff --git a/mm/swap.c b/mm/swap.c
index 218c52a..d3cb966 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -488,7 +488,7 @@ static int cpu_swap_callback(struct notifier_block *nfb,
long *committed;
committed = &per_cpu(committed_space, (long)hcpu);
- if (action == CPU_DEAD) {
+ if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) {
atomic_add(*committed, &vm_committed_space);
*committed = 0;
__lru_add_drain((long)hcpu);
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 1c8e75a..1be5a63 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1528,7 +1528,7 @@ static int __devinit cpu_callback(struct notifier_block *nfb,
pg_data_t *pgdat;
cpumask_t mask;
- if (action == CPU_ONLINE) {
+ if (action == CPU_ONLINE || action == CPU_ONLINE_FROZEN) {
for_each_online_pgdat(pgdat) {
mask = node_to_cpumask(pgdat->node_id);
if (any_online_cpu(mask) != NR_CPUS)
diff --git a/mm/vmstat.c b/mm/vmstat.c
index 6c488d6..9a66dc4 100644
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -650,8 +650,11 @@ static int __cpuinit vmstat_cpuup_callback(struct notifier_block *nfb,
{
switch (action) {
case CPU_UP_PREPARE:
+ case CPU_UP_PREPARE_FROZEN:
case CPU_UP_CANCELED:
+ case CPU_UP_CANCELED_FROZEN:
case CPU_DEAD:
+ case CPU_DEAD_FROZEN:
refresh_zone_stat_thresholds();
break;
default: