aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorhawkes@sgi.com <hawkes@sgi.com>2005-10-10 08:43:26 -0700
committerTony Luck <tony.luck@intel.com>2005-10-25 15:10:08 -0700
commitdc565b525d4b7091a3abb6616d210c8a896a11d7 (patch)
tree6bd7e8f5cd4e912bfe7f704b1a7b084bfc3cc123
parent444d1d9bb5b724f03344c9317bc01d54a9b39073 (diff)
downloadkernel_samsung_crespo-dc565b525d4b7091a3abb6616d210c8a896a11d7.zip
kernel_samsung_crespo-dc565b525d4b7091a3abb6616d210c8a896a11d7.tar.gz
kernel_samsung_crespo-dc565b525d4b7091a3abb6616d210c8a896a11d7.tar.bz2
[IA64] wider use of for_each_cpu_mask() in arch/ia64
In arch/ia64 change the explicit use of for-loops and NR_CPUS into the general for_each_cpu() or for_each_online_cpu() constructs, as appropriate. This widens the scope of potential future optimizations of the general constructs, as well as takes advantage of the existing optimizations of first_cpu() and next_cpu(). Signed-off-by: John Hawkes <hawkes@sgi.com> Signed-off-by: Tony Luck <tony.luck@intel.com>
-rw-r--r--arch/ia64/kernel/irq.c12
-rw-r--r--arch/ia64/kernel/module.c6
-rw-r--r--arch/ia64/kernel/smp.c10
-rw-r--r--arch/ia64/kernel/smpboot.c6
-rw-r--r--arch/ia64/mm/tlb.c5
5 files changed, 20 insertions, 19 deletions
diff --git a/arch/ia64/kernel/irq.c b/arch/ia64/kernel/irq.c
index 205d980..d33244c 100644
--- a/arch/ia64/kernel/irq.c
+++ b/arch/ia64/kernel/irq.c
@@ -57,9 +57,9 @@ int show_interrupts(struct seq_file *p, void *v)
if (i == 0) {
seq_printf(p, " ");
- for (j=0; j<NR_CPUS; j++)
- if (cpu_online(j))
- seq_printf(p, "CPU%d ",j);
+ for_each_online_cpu(j) {
+ seq_printf(p, "CPU%d ",j);
+ }
seq_putc(p, '\n');
}
@@ -72,9 +72,9 @@ int show_interrupts(struct seq_file *p, void *v)
#ifndef CONFIG_SMP
seq_printf(p, "%10u ", kstat_irqs(i));
#else
- for (j = 0; j < NR_CPUS; j++)
- if (cpu_online(j))
- seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
+ for_each_online_cpu(j) {
+ seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
+ }
#endif
seq_printf(p, " %14s", irq_desc[i].handler->typename);
seq_printf(p, " %s", action->name);
diff --git a/arch/ia64/kernel/module.c b/arch/ia64/kernel/module.c
index f1aca7c..7a2f0a7 100644
--- a/arch/ia64/kernel/module.c
+++ b/arch/ia64/kernel/module.c
@@ -947,8 +947,8 @@ void
percpu_modcopy (void *pcpudst, const void *src, unsigned long size)
{
unsigned int i;
- for (i = 0; i < NR_CPUS; i++)
- if (cpu_possible(i))
- memcpy(pcpudst + __per_cpu_offset[i], src, size);
+ for_each_cpu(i) {
+ memcpy(pcpudst + __per_cpu_offset[i], src, size);
+ }
}
#endif /* CONFIG_SMP */
diff --git a/arch/ia64/kernel/smp.c b/arch/ia64/kernel/smp.c
index 0166a98..657ac99 100644
--- a/arch/ia64/kernel/smp.c
+++ b/arch/ia64/kernel/smp.c
@@ -185,8 +185,8 @@ send_IPI_allbutself (int op)
{
unsigned int i;
- for (i = 0; i < NR_CPUS; i++) {
- if (cpu_online(i) && i != smp_processor_id())
+ for_each_online_cpu(i) {
+ if (i != smp_processor_id())
send_IPI_single(i, op);
}
}
@@ -199,9 +199,9 @@ send_IPI_all (int op)
{
int i;
- for (i = 0; i < NR_CPUS; i++)
- if (cpu_online(i))
- send_IPI_single(i, op);
+ for_each_online_cpu(i) {
+ send_IPI_single(i, op);
+ }
}
/*
diff --git a/arch/ia64/kernel/smpboot.c b/arch/ia64/kernel/smpboot.c
index 7d72c0d..400a489 100644
--- a/arch/ia64/kernel/smpboot.c
+++ b/arch/ia64/kernel/smpboot.c
@@ -694,9 +694,9 @@ smp_cpus_done (unsigned int dummy)
* Allow the user to impress friends.
*/
- for (cpu = 0; cpu < NR_CPUS; cpu++)
- if (cpu_online(cpu))
- bogosum += cpu_data(cpu)->loops_per_jiffy;
+ for_each_online_cpu(cpu) {
+ bogosum += cpu_data(cpu)->loops_per_jiffy;
+ }
printk(KERN_INFO "Total of %d processors activated (%lu.%02lu BogoMIPS).\n",
(int)num_online_cpus(), bogosum/(500000/HZ), (bogosum/(5000/HZ))%100);
diff --git a/arch/ia64/mm/tlb.c b/arch/ia64/mm/tlb.c
index 464557e..987fb75 100644
--- a/arch/ia64/mm/tlb.c
+++ b/arch/ia64/mm/tlb.c
@@ -77,9 +77,10 @@ wrap_mmu_context (struct mm_struct *mm)
/* can't call flush_tlb_all() here because of race condition with O(1) scheduler [EF] */
{
int cpu = get_cpu(); /* prevent preemption/migration */
- for (i = 0; i < NR_CPUS; ++i)
- if (cpu_online(i) && (i != cpu))
+ for_each_online_cpu(i) {
+ if (i != cpu)
per_cpu(ia64_need_tlb_flush, i) = 1;
+ }
put_cpu();
}
local_flush_tlb_all();