aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJan Beulich <jbeulich@novell.com>2007-05-02 13:27:05 -0400
committerAndi Kleen <andi@basil.nowhere.org>2007-05-02 13:27:05 -0400
commit9964cf7d776600724ef5f1b33303ceadc588b8ba (patch)
tree80cf8f027b251ed5243d4f8e2219782abfe18df6
parentb0354795c9c8fef2fadf8f867586c78efd9a1dc9 (diff)
[PATCH] x86: consolidate smp_send_stop()
Synchronize i386's smp_send_stop() with x86-64's in only try-locking the call lock to prevent deadlocks when called from panic(). In both version, disable interrupts before clearing the CPU off the online map to eliminate races with IRQ handlers inspecting this map. Also in both versions, save/restore interrupts rather than disabling/ enabling them. On x86-64, eliminate one function used here by folding it into its single caller, convert to static, and rename for consistency with i386 (lkcd may like this). Signed-off-by: Jan Beulich <jbeulich@novell.com> Signed-off-by: Andi Kleen <ak@suse.de>
-rw-r--r--arch/i386/kernel/smp.c68
-rw-r--r--arch/x86_64/kernel/smp.c28
-rw-r--r--include/asm-x86_64/smp.h1
3 files changed, 48 insertions, 49 deletions
diff --git a/arch/i386/kernel/smp.c b/arch/i386/kernel/smp.c
index 0e8977871b1f..0cd459baad68 100644
--- a/arch/i386/kernel/smp.c
+++ b/arch/i386/kernel/smp.c
@@ -515,35 +515,14 @@ void unlock_ipi_call_lock(void)
515 515
516static struct call_data_struct *call_data; 516static struct call_data_struct *call_data;
517 517
518/** 518static void __smp_call_function(void (*func) (void *info), void *info,
519 * smp_call_function(): Run a function on all other CPUs. 519 int nonatomic, int wait)
520 * @func: The function to run. This must be fast and non-blocking.
521 * @info: An arbitrary pointer to pass to the function.
522 * @nonatomic: currently unused.
523 * @wait: If true, wait (atomically) until function has completed on other CPUs.
524 *
525 * Returns 0 on success, else a negative status code. Does not return until
526 * remote CPUs are nearly ready to execute <<func>> or are or have executed.
527 *
528 * You must not call this function with disabled interrupts or from a
529 * hardware interrupt handler or from a bottom half handler.
530 */
531int smp_call_function (void (*func) (void *info), void *info, int nonatomic,
532 int wait)
533{ 520{
534 struct call_data_struct data; 521 struct call_data_struct data;
535 int cpus; 522 int cpus = num_online_cpus() - 1;
536
537 /* Holding any lock stops cpus from going down. */
538 spin_lock(&call_lock);
539 cpus = num_online_cpus() - 1;
540 if (!cpus) {
541 spin_unlock(&call_lock);
542 return 0;
543 }
544 523
545 /* Can deadlock when called with interrupts disabled */ 524 if (!cpus)
546 WARN_ON(irqs_disabled()); 525 return;
547 526
548 data.func = func; 527 data.func = func;
549 data.info = info; 528 data.info = info;
@@ -565,6 +544,30 @@ int smp_call_function (void (*func) (void *info), void *info, int nonatomic,
565 if (wait) 544 if (wait)
566 while (atomic_read(&data.finished) != cpus) 545 while (atomic_read(&data.finished) != cpus)
567 cpu_relax(); 546 cpu_relax();
547}
548
549/**
550 * smp_call_function(): Run a function on all other CPUs.
551 * @func: The function to run. This must be fast and non-blocking.
552 * @info: An arbitrary pointer to pass to the function.
553 * @nonatomic: currently unused.
554 * @wait: If true, wait (atomically) until function has completed on other CPUs.
555 *
556 * Returns 0 on success, else a negative status code. Does not return until
557 * remote CPUs are nearly ready to execute <<func>> or are or have executed.
558 *
559 * You must not call this function with disabled interrupts or from a
560 * hardware interrupt handler or from a bottom half handler.
561 */
562int smp_call_function (void (*func) (void *info), void *info, int nonatomic,
563 int wait)
564{
565 /* Can deadlock when called with interrupts disabled */
566 WARN_ON(irqs_disabled());
567
568 /* Holding any lock stops cpus from going down. */
569 spin_lock(&call_lock);
570 __smp_call_function(func, info, nonatomic, wait);
568 spin_unlock(&call_lock); 571 spin_unlock(&call_lock);
569 572
570 return 0; 573 return 0;
@@ -573,11 +576,11 @@ EXPORT_SYMBOL(smp_call_function);
573 576
574static void stop_this_cpu (void * dummy) 577static void stop_this_cpu (void * dummy)
575{ 578{
579 local_irq_disable();
576 /* 580 /*
577 * Remove this CPU: 581 * Remove this CPU:
578 */ 582 */
579 cpu_clear(smp_processor_id(), cpu_online_map); 583 cpu_clear(smp_processor_id(), cpu_online_map);
580 local_irq_disable();
581 disable_local_APIC(); 584 disable_local_APIC();
582 if (cpu_data[smp_processor_id()].hlt_works_ok) 585 if (cpu_data[smp_processor_id()].hlt_works_ok)
583 for(;;) halt(); 586 for(;;) halt();
@@ -590,11 +593,16 @@ static void stop_this_cpu (void * dummy)
590 593
591void smp_send_stop(void) 594void smp_send_stop(void)
592{ 595{
593 smp_call_function(stop_this_cpu, NULL, 1, 0); 596 /* Don't deadlock on the call lock in panic */
597 int nolock = !spin_trylock(&call_lock);
598 unsigned long flags;
594 599
595 local_irq_disable(); 600 local_irq_save(flags);
601 __smp_call_function(stop_this_cpu, NULL, 0, 0);
602 if (!nolock)
603 spin_unlock(&call_lock);
596 disable_local_APIC(); 604 disable_local_APIC();
597 local_irq_enable(); 605 local_irq_restore(flags);
598} 606}
599 607
600/* 608/*
diff --git a/arch/x86_64/kernel/smp.c b/arch/x86_64/kernel/smp.c
index af1ec4d23cf8..bd1d123947ce 100644
--- a/arch/x86_64/kernel/smp.c
+++ b/arch/x86_64/kernel/smp.c
@@ -452,42 +452,34 @@ int smp_call_function (void (*func) (void *info), void *info, int nonatomic,
452} 452}
453EXPORT_SYMBOL(smp_call_function); 453EXPORT_SYMBOL(smp_call_function);
454 454
455void smp_stop_cpu(void) 455static void stop_this_cpu(void *dummy)
456{ 456{
457 unsigned long flags; 457 local_irq_disable();
458 /* 458 /*
459 * Remove this CPU: 459 * Remove this CPU:
460 */ 460 */
461 cpu_clear(smp_processor_id(), cpu_online_map); 461 cpu_clear(smp_processor_id(), cpu_online_map);
462 local_irq_save(flags);
463 disable_local_APIC(); 462 disable_local_APIC();
464 local_irq_restore(flags);
465}
466
467static void smp_really_stop_cpu(void *dummy)
468{
469 smp_stop_cpu();
470 for (;;) 463 for (;;)
471 halt(); 464 halt();
472} 465}
473 466
474void smp_send_stop(void) 467void smp_send_stop(void)
475{ 468{
476 int nolock = 0; 469 int nolock;
470 unsigned long flags;
471
477 if (reboot_force) 472 if (reboot_force)
478 return; 473 return;
474
479 /* Don't deadlock on the call lock in panic */ 475 /* Don't deadlock on the call lock in panic */
480 if (!spin_trylock(&call_lock)) { 476 nolock = !spin_trylock(&call_lock);
481 /* ignore locking because we have panicked anyways */ 477 local_irq_save(flags);
482 nolock = 1; 478 __smp_call_function(stop_this_cpu, NULL, 0, 0);
483 }
484 __smp_call_function(smp_really_stop_cpu, NULL, 0, 0);
485 if (!nolock) 479 if (!nolock)
486 spin_unlock(&call_lock); 480 spin_unlock(&call_lock);
487
488 local_irq_disable();
489 disable_local_APIC(); 481 disable_local_APIC();
490 local_irq_enable(); 482 local_irq_restore(flags);
491} 483}
492 484
493/* 485/*
diff --git a/include/asm-x86_64/smp.h b/include/asm-x86_64/smp.h
index f4236d7789aa..d5704421456b 100644
--- a/include/asm-x86_64/smp.h
+++ b/include/asm-x86_64/smp.h
@@ -37,7 +37,6 @@ extern void lock_ipi_call_lock(void);
37extern void unlock_ipi_call_lock(void); 37extern void unlock_ipi_call_lock(void);
38extern int smp_num_siblings; 38extern int smp_num_siblings;
39extern void smp_send_reschedule(int cpu); 39extern void smp_send_reschedule(int cpu);
40void smp_stop_cpu(void);
41 40
42extern cpumask_t cpu_sibling_map[NR_CPUS]; 41extern cpumask_t cpu_sibling_map[NR_CPUS];
43extern cpumask_t cpu_core_map[NR_CPUS]; 42extern cpumask_t cpu_core_map[NR_CPUS];