diff options
author | David S. Miller <davem@davemloft.net> | 2008-08-03 23:56:28 -0700 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2008-08-04 13:51:39 -0700 |
commit | 199266305311d060b6e057fa5c7de01f218bb911 (patch) | |
tree | 062c97729ec6c89eab3b4b2c8ff173df7b0e3031 /arch/sparc64/kernel | |
parent | cd5bc89debb4045d55eeffe325b97f2dfba4ddea (diff) | |
download | kernel_samsung_tuna-199266305311d060b6e057fa5c7de01f218bb911.zip kernel_samsung_tuna-199266305311d060b6e057fa5c7de01f218bb911.tar.gz kernel_samsung_tuna-199266305311d060b6e057fa5c7de01f218bb911.tar.bz2 |
sparc64: Call xcall_deliver() directly in some cases.
For these cases the callers make sure:
1) The cpus indicated are online.
2) The current cpu is not in the list of indicated cpus.
Therefore we can pass a pointer to the mask directly.
One of the motivations in this transformation is to make use of
"&cpumask_of_cpu(cpu)" which evaluates to a pointer to constant
data in the kernel and thus takes up no stack space.
Hopefully someone in the future will change the interface of
arch_send_call_function_ipi() such that it passes a const cpumask_t
pointer so that this will optimize ever further.
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'arch/sparc64/kernel')
-rw-r--r-- | arch/sparc64/kernel/smp.c | 33 |
1 files changed, 10 insertions, 23 deletions
diff --git a/arch/sparc64/kernel/smp.c b/arch/sparc64/kernel/smp.c index 868625e..47b0738 100644 --- a/arch/sparc64/kernel/smp.c +++ b/arch/sparc64/kernel/smp.c @@ -792,16 +792,15 @@ extern unsigned long xcall_call_function; void arch_send_call_function_ipi(cpumask_t mask) { - smp_cross_call_masked(&xcall_call_function, 0, 0, 0, mask); + xcall_deliver((u64) &xcall_call_function, 0, 0, &mask); } extern unsigned long xcall_call_function_single; void arch_send_call_function_single_ipi(int cpu) { - cpumask_t mask = cpumask_of_cpu(cpu); - - smp_cross_call_masked(&xcall_call_function_single, 0, 0, 0, mask); + xcall_deliver((u64) &xcall_call_function_single, 0, 0, + &cpumask_of_cpu(cpu)); } /* Send cross call to all processors except self. */ @@ -959,24 +958,6 @@ void flush_dcache_page_all(struct mm_struct *mm, struct page *page) put_cpu(); } -static void __smp_receive_signal_mask(cpumask_t mask) -{ - smp_cross_call_masked(&xcall_receive_signal, 0, 0, 0, mask); -} - -void smp_receive_signal(int cpu) -{ - cpumask_t mask = cpumask_of_cpu(cpu); - - if (cpu_online(cpu)) - __smp_receive_signal_mask(mask); -} - -void smp_receive_signal_client(int irq, struct pt_regs *regs) -{ - clear_softint(1 << irq); -} - void smp_new_mmu_context_version_client(int irq, struct pt_regs *regs) { struct mm_struct *mm; @@ -1374,7 +1355,13 @@ void __init smp_cpus_done(unsigned int max_cpus) void smp_send_reschedule(int cpu) { - smp_receive_signal(cpu); + xcall_deliver((u64) &xcall_receive_signal, 0, 0, + &cpumask_of_cpu(cpu)); +} + +void smp_receive_signal_client(int irq, struct pt_regs *regs) +{ + clear_softint(1 << irq); } /* This is a nop because we capture all other cpus |