diff options
author | Laurent Vivier <Laurent.Vivier@bull.net> | 2007-10-19 14:35:03 -0400 |
---|---|---|
committer | Thomas Gleixner <tglx@linutronix.de> | 2007-10-19 14:35:03 -0400 |
commit | 66d16ed45d19600abd72dbd55bd2018437b24b73 (patch) | |
tree | 3b9cf5ef34044bb4fa01c81b5aa1869813fc9fff /arch | |
parent | 9d1c6e7c86ddc366d67f0c5fa77be9b93710037a (diff) |
x86: implement missing x86_64 function smp_call_function_mask()
This patch defines the missing function smp_call_function_mask() for x86_64,
this is more or less a cut&paste of i386 function. It removes also some
duplicate code.
This function is needed by KVM to execute a function on some CPUs.
AK: Fixed description
AK: Moved WARN_ON(irqs_disabled) one level up to not warn in the panic case.
[ tglx: arch/x86 adaptation ]
Signed-off-by: Laurent Vivier <Laurent.Vivier@bull.net>
Signed-off-by: Andi Kleen <ak@suse.de>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'arch')
-rw-r--r-- | arch/x86/kernel/smp_64.c | 119 |
1 files changed, 63 insertions, 56 deletions
diff --git a/arch/x86/kernel/smp_64.c b/arch/x86/kernel/smp_64.c index 5c2964727d19..03fa6ed559c6 100644 --- a/arch/x86/kernel/smp_64.c +++ b/arch/x86/kernel/smp_64.c | |||
@@ -322,17 +322,27 @@ void unlock_ipi_call_lock(void) | |||
322 | } | 322 | } |
323 | 323 | ||
324 | /* | 324 | /* |
325 | * this function sends a 'generic call function' IPI to one other CPU | 325 | * this function sends a 'generic call function' IPI to all other CPU |
326 | * in the system. | 326 | * of the system defined in the mask. |
327 | * | ||
328 | * cpu is a standard Linux logical CPU number. | ||
329 | */ | 327 | */ |
330 | static void | 328 | |
331 | __smp_call_function_single(int cpu, void (*func) (void *info), void *info, | 329 | static int |
332 | int nonatomic, int wait) | 330 | __smp_call_function_mask(cpumask_t mask, |
331 | void (*func)(void *), void *info, | ||
332 | int wait) | ||
333 | { | 333 | { |
334 | struct call_data_struct data; | 334 | struct call_data_struct data; |
335 | int cpus = 1; | 335 | cpumask_t allbutself; |
336 | int cpus; | ||
337 | |||
338 | allbutself = cpu_online_map; | ||
339 | cpu_clear(smp_processor_id(), allbutself); | ||
340 | |||
341 | cpus_and(mask, mask, allbutself); | ||
342 | cpus = cpus_weight(mask); | ||
343 | |||
344 | if (!cpus) | ||
345 | return 0; | ||
336 | 346 | ||
337 | data.func = func; | 347 | data.func = func; |
338 | data.info = info; | 348 | data.info = info; |
@@ -343,19 +353,55 @@ __smp_call_function_single(int cpu, void (*func) (void *info), void *info, | |||
343 | 353 | ||
344 | call_data = &data; | 354 | call_data = &data; |
345 | wmb(); | 355 | wmb(); |
346 | /* Send a message to all other CPUs and wait for them to respond */ | 356 | |
347 | send_IPI_mask(cpumask_of_cpu(cpu), CALL_FUNCTION_VECTOR); | 357 | /* Send a message to other CPUs */ |
358 | if (cpus_equal(mask, allbutself)) | ||
359 | send_IPI_allbutself(CALL_FUNCTION_VECTOR); | ||
360 | else | ||
361 | send_IPI_mask(mask, CALL_FUNCTION_VECTOR); | ||
348 | 362 | ||
349 | /* Wait for response */ | 363 | /* Wait for response */ |
350 | while (atomic_read(&data.started) != cpus) | 364 | while (atomic_read(&data.started) != cpus) |
351 | cpu_relax(); | 365 | cpu_relax(); |
352 | 366 | ||
353 | if (!wait) | 367 | if (!wait) |
354 | return; | 368 | return 0; |
355 | 369 | ||
356 | while (atomic_read(&data.finished) != cpus) | 370 | while (atomic_read(&data.finished) != cpus) |
357 | cpu_relax(); | 371 | cpu_relax(); |
372 | |||
373 | return 0; | ||
374 | } | ||
375 | /** | ||
376 | * smp_call_function_mask(): Run a function on a set of other CPUs. | ||
377 | * @mask: The set of cpus to run on. Must not include the current cpu. | ||
378 | * @func: The function to run. This must be fast and non-blocking. | ||
379 | * @info: An arbitrary pointer to pass to the function. | ||
380 | * @wait: If true, wait (atomically) until function has completed on other CPUs. | ||
381 | * | ||
382 | * Returns 0 on success, else a negative status code. | ||
383 | * | ||
384 | * If @wait is true, then returns once @func has returned; otherwise | ||
385 | * it returns just before the target cpu calls @func. | ||
386 | * | ||
387 | * You must not call this function with disabled interrupts or from a | ||
388 | * hardware interrupt handler or from a bottom half handler. | ||
389 | */ | ||
390 | int smp_call_function_mask(cpumask_t mask, | ||
391 | void (*func)(void *), void *info, | ||
392 | int wait) | ||
393 | { | ||
394 | int ret; | ||
395 | |||
396 | /* Can deadlock when called with interrupts disabled */ | ||
397 | WARN_ON(irqs_disabled()); | ||
398 | |||
399 | spin_lock(&call_lock); | ||
400 | ret = __smp_call_function_mask(mask, func, info, wait); | ||
401 | spin_unlock(&call_lock); | ||
402 | return ret; | ||
358 | } | 403 | } |
404 | EXPORT_SYMBOL(smp_call_function_mask); | ||
359 | 405 | ||
360 | /* | 406 | /* |
361 | * smp_call_function_single - Run a function on a specific CPU | 407 | * smp_call_function_single - Run a function on a specific CPU |
@@ -374,6 +420,7 @@ int smp_call_function_single (int cpu, void (*func) (void *info), void *info, | |||
374 | int nonatomic, int wait) | 420 | int nonatomic, int wait) |
375 | { | 421 | { |
376 | /* prevent preemption and reschedule on another processor */ | 422 | /* prevent preemption and reschedule on another processor */ |
423 | int ret; | ||
377 | int me = get_cpu(); | 424 | int me = get_cpu(); |
378 | 425 | ||
379 | /* Can deadlock when called with interrupts disabled */ | 426 | /* Can deadlock when called with interrupts disabled */ |
@@ -387,51 +434,14 @@ int smp_call_function_single (int cpu, void (*func) (void *info), void *info, | |||
387 | return 0; | 434 | return 0; |
388 | } | 435 | } |
389 | 436 | ||
390 | spin_lock(&call_lock); | 437 | ret = smp_call_function_mask(cpumask_of_cpu(cpu), func, info, wait); |
391 | __smp_call_function_single(cpu, func, info, nonatomic, wait); | 438 | |
392 | spin_unlock(&call_lock); | ||
393 | put_cpu(); | 439 | put_cpu(); |
394 | return 0; | 440 | return ret; |
395 | } | 441 | } |
396 | EXPORT_SYMBOL(smp_call_function_single); | 442 | EXPORT_SYMBOL(smp_call_function_single); |
397 | 443 | ||
398 | /* | 444 | /* |
399 | * this function sends a 'generic call function' IPI to all other CPUs | ||
400 | * in the system. | ||
401 | */ | ||
402 | static void __smp_call_function (void (*func) (void *info), void *info, | ||
403 | int nonatomic, int wait) | ||
404 | { | ||
405 | struct call_data_struct data; | ||
406 | int cpus = num_online_cpus()-1; | ||
407 | |||
408 | if (!cpus) | ||
409 | return; | ||
410 | |||
411 | data.func = func; | ||
412 | data.info = info; | ||
413 | atomic_set(&data.started, 0); | ||
414 | data.wait = wait; | ||
415 | if (wait) | ||
416 | atomic_set(&data.finished, 0); | ||
417 | |||
418 | call_data = &data; | ||
419 | wmb(); | ||
420 | /* Send a message to all other CPUs and wait for them to respond */ | ||
421 | send_IPI_allbutself(CALL_FUNCTION_VECTOR); | ||
422 | |||
423 | /* Wait for response */ | ||
424 | while (atomic_read(&data.started) != cpus) | ||
425 | cpu_relax(); | ||
426 | |||
427 | if (!wait) | ||
428 | return; | ||
429 | |||
430 | while (atomic_read(&data.finished) != cpus) | ||
431 | cpu_relax(); | ||
432 | } | ||
433 | |||
434 | /* | ||
435 | * smp_call_function - run a function on all other CPUs. | 445 | * smp_call_function - run a function on all other CPUs. |
436 | * @func: The function to run. This must be fast and non-blocking. | 446 | * @func: The function to run. This must be fast and non-blocking. |
437 | * @info: An arbitrary pointer to pass to the function. | 447 | * @info: An arbitrary pointer to pass to the function. |
@@ -449,10 +459,7 @@ static void __smp_call_function (void (*func) (void *info), void *info, | |||
449 | int smp_call_function (void (*func) (void *info), void *info, int nonatomic, | 459 | int smp_call_function (void (*func) (void *info), void *info, int nonatomic, |
450 | int wait) | 460 | int wait) |
451 | { | 461 | { |
452 | spin_lock(&call_lock); | 462 | return smp_call_function_mask(cpu_online_map, func, info, wait); |
453 | __smp_call_function(func,info,nonatomic,wait); | ||
454 | spin_unlock(&call_lock); | ||
455 | return 0; | ||
456 | } | 463 | } |
457 | EXPORT_SYMBOL(smp_call_function); | 464 | EXPORT_SYMBOL(smp_call_function); |
458 | 465 | ||
@@ -479,7 +486,7 @@ void smp_send_stop(void) | |||
479 | /* Don't deadlock on the call lock in panic */ | 486 | /* Don't deadlock on the call lock in panic */ |
480 | nolock = !spin_trylock(&call_lock); | 487 | nolock = !spin_trylock(&call_lock); |
481 | local_irq_save(flags); | 488 | local_irq_save(flags); |
482 | __smp_call_function(stop_this_cpu, NULL, 0, 0); | 489 | __smp_call_function_mask(cpu_online_map, stop_this_cpu, NULL, 0); |
483 | if (!nolock) | 490 | if (!nolock) |
484 | spin_unlock(&call_lock); | 491 | spin_unlock(&call_lock); |
485 | disable_local_APIC(); | 492 | disable_local_APIC(); |