aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorChandra Seetharaman <sekharan@us.ibm.com>2006-06-27 02:54:07 -0700
committerLinus Torvalds <torvalds@g5.osdl.org>2006-06-27 17:32:40 -0700
commit9c7b216d23e820e0e148d5be01bbb5bd2d8378fe (patch)
tree53e6c1e4870db49b4999b4053862d3f63375773f /mm
parent6ac12dfe9c2027cd3c5ed603f11d1bb4f04906fe (diff)
downloadkernel_samsung_espresso10-9c7b216d23e820e0e148d5be01bbb5bd2d8378fe.zip
kernel_samsung_espresso10-9c7b216d23e820e0e148d5be01bbb5bd2d8378fe.tar.gz
kernel_samsung_espresso10-9c7b216d23e820e0e148d5be01bbb5bd2d8378fe.tar.bz2
[PATCH] cpu hotplug: revert init patch submitted for 2.6.17
In 2.6.17, there was a problem with cpu_notifiers and XFS. I provided a band-aid solution to solve that problem. In the process, i undid all the changes you both were making to ensure that these notifiers were available only at init time (unless CONFIG_HOTPLUG_CPU is defined). We deferred the real fix to 2.6.18. Here is a set of patches that fixes the XFS problem cleanly and makes the cpu notifiers available only at init time (unless CONFIG_HOTPLUG_CPU is defined). If CONFIG_HOTPLUG_CPU is defined then cpu notifiers are available at run time. This patch reverts the notifier_call changes made in 2.6.17 Signed-off-by: Chandra Seetharaman <sekharan@us.ibm.com> Cc: Ashok Raj <ashok.raj@intel.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/page_alloc.c2
-rw-r--r--mm/slab.c2
-rw-r--r--mm/vmscan.c2
3 files changed, 3 insertions, 3 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 9f86191..e9fb2d4 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -2009,7 +2009,7 @@ static inline void free_zone_pagesets(int cpu)
}
}
-static int pageset_cpuup_callback(struct notifier_block *nfb,
+static int __cpuinit pageset_cpuup_callback(struct notifier_block *nfb,
unsigned long action,
void *hcpu)
{
diff --git a/mm/slab.c b/mm/slab.c
index 47982c2..631c0fe 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -1073,7 +1073,7 @@ static inline int cache_free_alien(struct kmem_cache *cachep, void *objp)
#endif
-static int cpuup_callback(struct notifier_block *nfb,
+static int __devinit cpuup_callback(struct notifier_block *nfb,
unsigned long action, void *hcpu)
{
long cpu = (long)hcpu;
diff --git a/mm/vmscan.c b/mm/vmscan.c
index f03da33..eeacb0d 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1450,7 +1450,7 @@ out:
not required for correctness. So if the last cpu in a node goes
away, we get changed to run anywhere: as the first one comes back,
restore their cpu bindings. */
-static int cpu_callback(struct notifier_block *nfb,
+static int __devinit cpu_callback(struct notifier_block *nfb,
unsigned long action, void *hcpu)
{
pg_data_t *pgdat;