diff options
author | KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> | 2006-03-28 01:56:37 -0800 |
---|---|---|
committer | Linus Torvalds <torvalds@g5.osdl.org> | 2006-03-28 09:16:05 -0800 |
commit | 0a945022778f100115d0cb6234eb28fc1b15ccaf (patch) | |
tree | 85df4b5f7dd8bf59557091379c59b23b09115bf6 | |
parent | 631d6747e1d877a4baa924cb373b8b9511a53e5e (diff) | |
download | kernel_samsung_tuna-0a945022778f100115d0cb6234eb28fc1b15ccaf.zip kernel_samsung_tuna-0a945022778f100115d0cb6234eb28fc1b15ccaf.tar.gz kernel_samsung_tuna-0a945022778f100115d0cb6234eb28fc1b15ccaf.tar.bz2 |
[PATCH] for_each_possible_cpu: fixes for generic part
replaces for_each_cpu with for_each_possible_cpu().
Signed-off-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
-rw-r--r-- | block/ll_rw_blk.c | 2 | ||||
-rw-r--r-- | fs/file.c | 2 | ||||
-rw-r--r-- | fs/proc/proc_misc.c | 2 | ||||
-rw-r--r-- | include/asm-generic/percpu.h | 2 | ||||
-rw-r--r-- | include/linux/genhd.h | 4 | ||||
-rw-r--r-- | include/linux/kernel_stat.h | 2 | ||||
-rw-r--r-- | init/main.c | 2 | ||||
-rw-r--r-- | kernel/rcutorture.c | 4 | ||||
-rw-r--r-- | kernel/sched.c | 8 | ||||
-rw-r--r-- | mm/slab.c | 4 | ||||
-rw-r--r-- | mm/swap.c | 2 |
11 files changed, 17 insertions, 17 deletions
diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c index 82469db..5a19e2e 100644 --- a/block/ll_rw_blk.c +++ b/block/ll_rw_blk.c @@ -3514,7 +3514,7 @@ int __init blk_dev_init(void) iocontext_cachep = kmem_cache_create("blkdev_ioc", sizeof(struct io_context), 0, SLAB_PANIC, NULL, NULL); - for_each_cpu(i) + for_each_possible_cpu(i) INIT_LIST_HEAD(&per_cpu(blk_cpu_done, i)); open_softirq(BLOCK_SOFTIRQ, blk_done_softirq, NULL); @@ -373,6 +373,6 @@ static void __devinit fdtable_defer_list_init(int cpu) void __init files_defer_init(void) { int i; - for_each_cpu(i) + for_each_possible_cpu(i) fdtable_defer_list_init(i); } diff --git a/fs/proc/proc_misc.c b/fs/proc/proc_misc.c index 1e9ea37..1edce0c 100644 --- a/fs/proc/proc_misc.c +++ b/fs/proc/proc_misc.c @@ -534,7 +534,7 @@ static int show_stat(struct seq_file *p, void *v) if (wall_to_monotonic.tv_nsec) --jif; - for_each_cpu(i) { + for_each_possible_cpu(i) { int j; user = cputime64_add(user, kstat_cpu(i).cpustat.user); diff --git a/include/asm-generic/percpu.h b/include/asm-generic/percpu.h index 78cf455..c0caf43 100644 --- a/include/asm-generic/percpu.h +++ b/include/asm-generic/percpu.h @@ -19,7 +19,7 @@ extern unsigned long __per_cpu_offset[NR_CPUS]; #define percpu_modcopy(pcpudst, src, size) \ do { \ unsigned int __i; \ - for_each_cpu(__i) \ + for_each_possible_cpu(__i) \ memcpy((pcpudst)+__per_cpu_offset[__i], \ (src), (size)); \ } while (0) diff --git a/include/linux/genhd.h b/include/linux/genhd.h index 3c1b029..10a27f2 100644 --- a/include/linux/genhd.h +++ b/include/linux/genhd.h @@ -152,14 +152,14 @@ struct disk_attribute { ({ \ typeof(gendiskp->dkstats->field) res = 0; \ int i; \ - for_each_cpu(i) \ + for_each_possible_cpu(i) \ res += per_cpu_ptr(gendiskp->dkstats, i)->field; \ res; \ }) static inline void disk_stat_set_all(struct gendisk *gendiskp, int value) { int i; - for_each_cpu(i) + for_each_possible_cpu(i) memset(per_cpu_ptr(gendiskp->dkstats, i), value, sizeof (struct disk_stats)); } diff --git a/include/linux/kernel_stat.h b/include/linux/kernel_stat.h index a484572..b462490 100644 --- a/include/linux/kernel_stat.h +++ b/include/linux/kernel_stat.h @@ -46,7 +46,7 @@ static inline int kstat_irqs(int irq) { int cpu, sum = 0; - for_each_cpu(cpu) + for_each_possible_cpu(cpu) sum += kstat_cpu(cpu).irqs[irq]; return sum; diff --git a/init/main.c b/init/main.c index 64466ea..4a2f089 100644 --- a/init/main.c +++ b/init/main.c @@ -341,7 +341,7 @@ static void __init setup_per_cpu_areas(void) #endif ptr = alloc_bootmem(size * nr_possible_cpus); - for_each_cpu(i) { + for_each_possible_cpu(i) { __per_cpu_offset[i] = ptr - __per_cpu_start; memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start); ptr += size; diff --git a/kernel/rcutorture.c b/kernel/rcutorture.c index b4b362b..8154e75 100644 --- a/kernel/rcutorture.c +++ b/kernel/rcutorture.c @@ -301,7 +301,7 @@ rcu_torture_printk(char *page) long pipesummary[RCU_TORTURE_PIPE_LEN + 1] = { 0 }; long batchsummary[RCU_TORTURE_PIPE_LEN + 1] = { 0 }; - for_each_cpu(cpu) { + for_each_possible_cpu(cpu) { for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) { pipesummary[i] += per_cpu(rcu_torture_count, cpu)[i]; batchsummary[i] += per_cpu(rcu_torture_batch, cpu)[i]; @@ -535,7 +535,7 @@ rcu_torture_init(void) atomic_set(&n_rcu_torture_error, 0); for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) atomic_set(&rcu_torture_wcount[i], 0); - for_each_cpu(cpu) { + for_each_possible_cpu(cpu) { for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) { per_cpu(rcu_torture_count, cpu)[i] = 0; per_cpu(rcu_torture_batch, cpu)[i] = 0; diff --git a/kernel/sched.c b/kernel/sched.c index 7854ee5..a9ecac3 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -1625,7 +1625,7 @@ unsigned long nr_uninterruptible(void) { unsigned long i, sum = 0; - for_each_cpu(i) + for_each_possible_cpu(i) sum += cpu_rq(i)->nr_uninterruptible; /* @@ -1642,7 +1642,7 @@ unsigned long long nr_context_switches(void) { unsigned long long i, sum = 0; - for_each_cpu(i) + for_each_possible_cpu(i) sum += cpu_rq(i)->nr_switches; return sum; @@ -1652,7 +1652,7 @@ unsigned long nr_iowait(void) { unsigned long i, sum = 0; - for_each_cpu(i) + for_each_possible_cpu(i) sum += atomic_read(&cpu_rq(i)->nr_iowait); return sum; @@ -6080,7 +6080,7 @@ void __init sched_init(void) runqueue_t *rq; int i, j, k; - for_each_cpu(i) { + for_each_possible_cpu(i) { prio_array_t *array; rq = cpu_rq(i); @@ -3311,7 +3311,7 @@ void *__alloc_percpu(size_t size) * and we have no way of figuring out how to fix the array * that we have allocated then.... */ - for_each_cpu(i) { + for_each_possible_cpu(i) { int node = cpu_to_node(i); if (node_online(node)) @@ -3398,7 +3398,7 @@ void free_percpu(const void *objp) /* * We allocate for all cpus so we cannot use for online cpu here. */ - for_each_cpu(i) + for_each_possible_cpu(i) kfree(p->ptrs[i]); kfree(p); } @@ -512,7 +512,7 @@ long percpu_counter_sum(struct percpu_counter *fbc) spin_lock(&fbc->lock); ret = fbc->count; - for_each_cpu(cpu) { + for_each_possible_cpu(cpu) { long *pcount = per_cpu_ptr(fbc->counters, cpu); ret += *pcount; } |