diff options
author | Glauber Costa <gcosta@redhat.com> | 2008-03-03 12:12:47 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-04-17 11:40:55 -0400 |
commit | 2513926c286ca1d0d189c206966011bdd4080354 (patch) | |
tree | f93f8788a4c972e40231aae88c15015c79d019d7 /arch | |
parent | 3a36d1e435af79ec3bc5ead871e5b22d5558ebf3 (diff) |
x86: change x86_64 smp_call_function_mask to look alike i386
the two versions (the inner version, and the outer version, that takes
the locks) of smp_call_function_mask are made into one. With the changes,
i386 and x86_64 versions look exactly the same.
Signed-off-by: Glauber Costa <gcosta@redhat.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch')
-rw-r--r-- | arch/x86/kernel/smp_32.c | 2 | ||||
-rw-r--r-- | arch/x86/kernel/smp_64.c | 57 |
2 files changed, 17 insertions, 42 deletions
diff --git a/arch/x86/kernel/smp_32.c b/arch/x86/kernel/smp_32.c index dc0cde9d16fb..e4a6b669a0b8 100644 --- a/arch/x86/kernel/smp_32.c +++ b/arch/x86/kernel/smp_32.c | |||
@@ -583,7 +583,7 @@ native_smp_call_function_mask(cpumask_t mask, | |||
583 | atomic_set(&data.finished, 0); | 583 | atomic_set(&data.finished, 0); |
584 | 584 | ||
585 | call_data = &data; | 585 | call_data = &data; |
586 | mb(); | 586 | wmb(); |
587 | 587 | ||
588 | /* Send a message to other CPUs */ | 588 | /* Send a message to other CPUs */ |
589 | if (cpus_equal(mask, allbutself)) | 589 | if (cpus_equal(mask, allbutself)) |
diff --git a/arch/x86/kernel/smp_64.c b/arch/x86/kernel/smp_64.c index aa2edb7f3a51..e4494e829dfa 100644 --- a/arch/x86/kernel/smp_64.c +++ b/arch/x86/kernel/smp_64.c | |||
@@ -354,26 +354,30 @@ static void __smp_call_function(void (*func) (void *info), void *info, | |||
354 | } | 354 | } |
355 | 355 | ||
356 | 356 | ||
357 | /* | 357 | int native_smp_call_function_mask(cpumask_t mask, |
358 | * this function sends a 'generic call function' IPI to all other CPU | 358 | void (*func)(void *), void *info, |
359 | * of the system defined in the mask. | 359 | int wait) |
360 | */ | ||
361 | static int __smp_call_function_mask(cpumask_t mask, | ||
362 | void (*func)(void *), void *info, | ||
363 | int wait) | ||
364 | { | 360 | { |
365 | struct call_data_struct data; | 361 | struct call_data_struct data; |
366 | cpumask_t allbutself; | 362 | cpumask_t allbutself; |
367 | int cpus; | 363 | int cpus; |
368 | 364 | ||
365 | /* Can deadlock when called with interrupts disabled */ | ||
366 | WARN_ON(irqs_disabled()); | ||
367 | |||
368 | /* Holding any lock stops cpus from going down. */ | ||
369 | spin_lock(&call_lock); | ||
370 | |||
369 | allbutself = cpu_online_map; | 371 | allbutself = cpu_online_map; |
370 | cpu_clear(smp_processor_id(), allbutself); | 372 | cpu_clear(smp_processor_id(), allbutself); |
371 | 373 | ||
372 | cpus_and(mask, mask, allbutself); | 374 | cpus_and(mask, mask, allbutself); |
373 | cpus = cpus_weight(mask); | 375 | cpus = cpus_weight(mask); |
374 | 376 | ||
375 | if (!cpus) | 377 | if (!cpus) { |
378 | spin_unlock(&call_lock); | ||
376 | return 0; | 379 | return 0; |
380 | } | ||
377 | 381 | ||
378 | data.func = func; | 382 | data.func = func; |
379 | data.info = info; | 383 | data.info = info; |
@@ -395,43 +399,14 @@ static int __smp_call_function_mask(cpumask_t mask, | |||
395 | while (atomic_read(&data.started) != cpus) | 399 | while (atomic_read(&data.started) != cpus) |
396 | cpu_relax(); | 400 | cpu_relax(); |
397 | 401 | ||
398 | if (!wait) | 402 | if (wait) |
399 | return 0; | 403 | while (atomic_read(&data.finished) != cpus) |
404 | cpu_relax(); | ||
400 | 405 | ||
401 | while (atomic_read(&data.finished) != cpus) | 406 | spin_unlock(&call_lock); |
402 | cpu_relax(); | ||
403 | 407 | ||
404 | return 0; | 408 | return 0; |
405 | } | 409 | } |
406 | /** | ||
407 | * smp_call_function_mask(): Run a function on a set of other CPUs. | ||
408 | * @mask: The set of cpus to run on. Must not include the current cpu. | ||
409 | * @func: The function to run. This must be fast and non-blocking. | ||
410 | * @info: An arbitrary pointer to pass to the function. | ||
411 | * @wait: If true, wait (atomically) until function has completed on other CPUs. | ||
412 | * | ||
413 | * Returns 0 on success, else a negative status code. | ||
414 | * | ||
415 | * If @wait is true, then returns once @func has returned; otherwise | ||
416 | * it returns just before the target cpu calls @func. | ||
417 | * | ||
418 | * You must not call this function with disabled interrupts or from a | ||
419 | * hardware interrupt handler or from a bottom half handler. | ||
420 | */ | ||
421 | int native_smp_call_function_mask(cpumask_t mask, | ||
422 | void (*func)(void *), void *info, | ||
423 | int wait) | ||
424 | { | ||
425 | int ret; | ||
426 | |||
427 | /* Can deadlock when called with interrupts disabled */ | ||
428 | WARN_ON(irqs_disabled()); | ||
429 | |||
430 | spin_lock(&call_lock); | ||
431 | ret = __smp_call_function_mask(mask, func, info, wait); | ||
432 | spin_unlock(&call_lock); | ||
433 | return ret; | ||
434 | } | ||
435 | 410 | ||
436 | static void stop_this_cpu(void *dummy) | 411 | static void stop_this_cpu(void *dummy) |
437 | { | 412 | { |