diff options
| author | Xiantao Zhang <xiantao.zhang@intel.com> | 2008-04-03 14:39:43 -0400 | 
|---|---|---|
| committer | Tony Luck <tony.luck@intel.com> | 2008-04-03 14:39:43 -0400 | 
| commit | 31a6b11fed6ceec07ec4bdfefae56b8252d450cf (patch) | |
| tree | 96bb152459c41fe602c463f08e4fd979a0f87fe3 | |
| parent | 96651896b8d9ad4244a1c3ed9691faa3e382f503 (diff) | |
[IA64] Implement smp_call_function_mask for ia64
This interface provides more flexible functionality for smp
infrastructure ... e.g. KVM frequently needs to operate on
a subset of cpus.
Signed-off-by: Xiantao Zhang <xiantao.zhang@intel.com>
Signed-off-by: Tony Luck <tony.luck@intel.com>
| -rw-r--r-- | arch/ia64/kernel/smp.c | 82 | ||||
| -rw-r--r-- | include/asm-ia64/smp.h | 3 | 
2 files changed, 85 insertions, 0 deletions
| diff --git a/arch/ia64/kernel/smp.c b/arch/ia64/kernel/smp.c index 4e446aa5f4ac..9a9d4c489330 100644 --- a/arch/ia64/kernel/smp.c +++ b/arch/ia64/kernel/smp.c | |||
| @@ -213,6 +213,19 @@ send_IPI_allbutself (int op) | |||
| 213 | * Called with preemption disabled. | 213 | * Called with preemption disabled. | 
| 214 | */ | 214 | */ | 
| 215 | static inline void | 215 | static inline void | 
| 216 | send_IPI_mask(cpumask_t mask, int op) | ||
| 217 | { | ||
| 218 | unsigned int cpu; | ||
| 219 | |||
| 220 | for_each_cpu_mask(cpu, mask) { | ||
| 221 | send_IPI_single(cpu, op); | ||
| 222 | } | ||
| 223 | } | ||
| 224 | |||
| 225 | /* | ||
| 226 | * Called with preemption disabled. | ||
| 227 | */ | ||
| 228 | static inline void | ||
| 216 | send_IPI_all (int op) | 229 | send_IPI_all (int op) | 
| 217 | { | 230 | { | 
| 218 | int i; | 231 | int i; | 
| @@ -401,6 +414,75 @@ smp_call_function_single (int cpuid, void (*func) (void *info), void *info, int | |||
| 401 | } | 414 | } | 
| 402 | EXPORT_SYMBOL(smp_call_function_single); | 415 | EXPORT_SYMBOL(smp_call_function_single); | 
| 403 | 416 | ||
| 417 | /** | ||
| 418 | * smp_call_function_mask(): Run a function on a set of other CPUs. | ||
| 419 | * <mask> The set of cpus to run on. Must not include the current cpu. | ||
| 420 | * <func> The function to run. This must be fast and non-blocking. | ||
| 421 | * <info> An arbitrary pointer to pass to the function. | ||
| 422 | * <wait> If true, wait (atomically) until function | ||
| 423 | * has completed on other CPUs. | ||
| 424 | * | ||
| 425 | * Returns 0 on success, else a negative status code. | ||
| 426 | * | ||
| 427 | * If @wait is true, then returns once @func has returned; otherwise | ||
| 428 | * it returns just before the target cpu calls @func. | ||
| 429 | * | ||
| 430 | * You must not call this function with disabled interrupts or from a | ||
| 431 | * hardware interrupt handler or from a bottom half handler. | ||
| 432 | */ | ||
| 433 | int smp_call_function_mask(cpumask_t mask, | ||
| 434 | void (*func)(void *), void *info, | ||
| 435 | int wait) | ||
| 436 | { | ||
| 437 | struct call_data_struct data; | ||
| 438 | cpumask_t allbutself; | ||
| 439 | int cpus; | ||
| 440 | |||
| 441 | spin_lock(&call_lock); | ||
| 442 | allbutself = cpu_online_map; | ||
| 443 | cpu_clear(smp_processor_id(), allbutself); | ||
| 444 | |||
| 445 | cpus_and(mask, mask, allbutself); | ||
| 446 | cpus = cpus_weight(mask); | ||
| 447 | if (!cpus) { | ||
| 448 | spin_unlock(&call_lock); | ||
| 449 | return 0; | ||
| 450 | } | ||
| 451 | |||
| 452 | /* Can deadlock when called with interrupts disabled */ | ||
| 453 | WARN_ON(irqs_disabled()); | ||
| 454 | |||
| 455 | data.func = func; | ||
| 456 | data.info = info; | ||
| 457 | atomic_set(&data.started, 0); | ||
| 458 | data.wait = wait; | ||
| 459 | if (wait) | ||
| 460 | atomic_set(&data.finished, 0); | ||
| 461 | |||
| 462 | call_data = &data; | ||
| 463 | mb(); /* ensure store to call_data precedes setting of IPI_CALL_FUNC*/ | ||
| 464 | |||
| 465 | /* Send a message to other CPUs */ | ||
| 466 | if (cpus_equal(mask, allbutself)) | ||
| 467 | send_IPI_allbutself(IPI_CALL_FUNC); | ||
| 468 | else | ||
| 469 | send_IPI_mask(mask, IPI_CALL_FUNC); | ||
| 470 | |||
| 471 | /* Wait for response */ | ||
| 472 | while (atomic_read(&data.started) != cpus) | ||
| 473 | cpu_relax(); | ||
| 474 | |||
| 475 | if (wait) | ||
| 476 | while (atomic_read(&data.finished) != cpus) | ||
| 477 | cpu_relax(); | ||
| 478 | call_data = NULL; | ||
| 479 | |||
| 480 | spin_unlock(&call_lock); | ||
| 481 | return 0; | ||
| 482 | |||
| 483 | } | ||
| 484 | EXPORT_SYMBOL(smp_call_function_mask); | ||
| 485 | |||
| 404 | /* | 486 | /* | 
| 405 | * this function sends a 'generic call function' IPI to all other CPUs | 487 | * this function sends a 'generic call function' IPI to all other CPUs | 
| 406 | * in the system. | 488 | * in the system. | 
| diff --git a/include/asm-ia64/smp.h b/include/asm-ia64/smp.h index 4fa733dd417a..ec5f355fb7e3 100644 --- a/include/asm-ia64/smp.h +++ b/include/asm-ia64/smp.h | |||
| @@ -38,6 +38,9 @@ ia64_get_lid (void) | |||
| 38 | return lid.f.id << 8 | lid.f.eid; | 38 | return lid.f.id << 8 | lid.f.eid; | 
| 39 | } | 39 | } | 
| 40 | 40 | ||
| 41 | extern int smp_call_function_mask(cpumask_t mask, void (*func)(void *), | ||
| 42 | void *info, int wait); | ||
| 43 | |||
| 41 | #define hard_smp_processor_id() ia64_get_lid() | 44 | #define hard_smp_processor_id() ia64_get_lid() | 
| 42 | 45 | ||
| 43 | #ifdef CONFIG_SMP | 46 | #ifdef CONFIG_SMP | 
