diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2008-04-18 09:44:55 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2008-04-18 09:44:55 -0700 |
commit | 4786b4ee22de6304e841b12ee22b849230d7fba3 (patch) | |
tree | 08793b8fbcd63204d5d3355ac755745adcfef170 /arch/ia64/kernel/smp.c | |
parent | 253ba4e79edc695b2925bd2ef34de06ff4d4070c (diff) | |
parent | 71b264f85ff50c14fe945ffff06ae0d5e9a9124e (diff) | |
download | kernel_samsung_crespo-4786b4ee22de6304e841b12ee22b849230d7fba3.zip kernel_samsung_crespo-4786b4ee22de6304e841b12ee22b849230d7fba3.tar.gz kernel_samsung_crespo-4786b4ee22de6304e841b12ee22b849230d7fba3.tar.bz2 |
Merge branch 'release' of git://git.kernel.org/pub/scm/linux/kernel/git/aegl/linux-2.6
* 'release' of git://git.kernel.org/pub/scm/linux/kernel/git/aegl/linux-2.6: (27 commits)
[IA64] kdump: Add crash_save_vmcoreinfo for INIT
[IA64] Fix NUMA configuration issue
[IA64] Itanium Spec updates
[IA64] Untangle sync_icache_dcache() page size determination
[IA64] arch/ia64/kernel/: use time_* macros
[IA64] remove redundant display of free swap space in show_mem()
[IA64] make IOMMU respect the segment boundary limits
[IA64] kprobes: kprobe-booster for ia64
[IA64] fix getpid and set_tid_address fast system calls for pid namespaces
[IA64] Replace explicit jiffies tests with time_* macros.
[IA64] use goto to jump out do/while_each_thread
[IA64] Fix unlock ordering in smp_callin
[IA64] pgd_offset() constfication.
[IA64] kdump: crash.c coding style fix
[IA64] kdump: add kdump_on_fatal_mca
[IA64] Minimize per_cpu reservations.
[IA64] Correct pernodesize calculation.
[IA64] Kernel parameter for max number of concurrent global TLB purges
[IA64] Multiple outstanding ptc.g instruction support
[IA64] Implement smp_call_function_mask for ia64
...
Diffstat (limited to 'arch/ia64/kernel/smp.c')
-rw-r--r-- | arch/ia64/kernel/smp.c | 82 |
1 files changed, 82 insertions, 0 deletions
diff --git a/arch/ia64/kernel/smp.c b/arch/ia64/kernel/smp.c index 4e446aa..9a9d4c4 100644 --- a/arch/ia64/kernel/smp.c +++ b/arch/ia64/kernel/smp.c @@ -213,6 +213,19 @@ send_IPI_allbutself (int op) * Called with preemption disabled. */ static inline void +send_IPI_mask(cpumask_t mask, int op) +{ + unsigned int cpu; + + for_each_cpu_mask(cpu, mask) { + send_IPI_single(cpu, op); + } +} + +/* + * Called with preemption disabled. + */ +static inline void send_IPI_all (int op) { int i; @@ -401,6 +414,75 @@ smp_call_function_single (int cpuid, void (*func) (void *info), void *info, int } EXPORT_SYMBOL(smp_call_function_single); +/** + * smp_call_function_mask(): Run a function on a set of other CPUs. + * <mask> The set of cpus to run on. Must not include the current cpu. + * <func> The function to run. This must be fast and non-blocking. + * <info> An arbitrary pointer to pass to the function. + * <wait> If true, wait (atomically) until function + * has completed on other CPUs. + * + * Returns 0 on success, else a negative status code. + * + * If @wait is true, then returns once @func has returned; otherwise + * it returns just before the target cpu calls @func. + * + * You must not call this function with disabled interrupts or from a + * hardware interrupt handler or from a bottom half handler. + */ +int smp_call_function_mask(cpumask_t mask, + void (*func)(void *), void *info, + int wait) +{ + struct call_data_struct data; + cpumask_t allbutself; + int cpus; + + spin_lock(&call_lock); + allbutself = cpu_online_map; + cpu_clear(smp_processor_id(), allbutself); + + cpus_and(mask, mask, allbutself); + cpus = cpus_weight(mask); + if (!cpus) { + spin_unlock(&call_lock); + return 0; + } + + /* Can deadlock when called with interrupts disabled */ + WARN_ON(irqs_disabled()); + + data.func = func; + data.info = info; + atomic_set(&data.started, 0); + data.wait = wait; + if (wait) + atomic_set(&data.finished, 0); + + call_data = &data; + mb(); /* ensure store to call_data precedes setting of IPI_CALL_FUNC*/ + + /* Send a message to other CPUs */ + if (cpus_equal(mask, allbutself)) + send_IPI_allbutself(IPI_CALL_FUNC); + else + send_IPI_mask(mask, IPI_CALL_FUNC); + + /* Wait for response */ + while (atomic_read(&data.started) != cpus) + cpu_relax(); + + if (wait) + while (atomic_read(&data.finished) != cpus) + cpu_relax(); + call_data = NULL; + + spin_unlock(&call_lock); + return 0; + +} +EXPORT_SYMBOL(smp_call_function_mask); + /* * this function sends a 'generic call function' IPI to all other CPUs * in the system. |