diff options
author | Jeremy Fitzhardinge <jeremy@goop.org> | 2007-05-02 13:27:06 -0400 |
---|---|---|
committer | Andi Kleen <andi@basil.nowhere.org> | 2007-05-02 13:27:06 -0400 |
commit | 19d1743315099665db4ce02c9942507a5ee1deea (patch) | |
tree | 9d6b26114f10415c32bf1a50996aba6fbafd60fa /arch | |
parent | 6b37f5a20c0e5c334c010a587058354215433e92 (diff) |
[PATCH] i386: Simplify smp_call_function*() by using common implementation
smp_call_function and smp_call_function_single are almost complete
duplicates of the same logic. This patch combines them by
implementing them in terms of the more general
smp_call_function_mask().
Signed-off-by: Jeremy Fitzhardinge <jeremy@xensource.com>
Signed-off-by: Andi Kleen <ak@suse.de>
Cc: Stephane Eranian <eranian@hpl.hp.com>
Cc: Andrew Morton <akpm@osdl.org>
Cc: Andi Kleen <ak@suse.de>
Cc: "Randy.Dunlap" <rdunlap@xenotime.net>
Cc: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch')
-rw-r--r-- | arch/i386/kernel/smp.c | 173 |
1 files changed, 96 insertions, 77 deletions
diff --git a/arch/i386/kernel/smp.c b/arch/i386/kernel/smp.c index 0cd459baad68..b90bebeb1c79 100644 --- a/arch/i386/kernel/smp.c +++ b/arch/i386/kernel/smp.c | |||
@@ -546,34 +546,124 @@ static void __smp_call_function(void (*func) (void *info), void *info, | |||
546 | cpu_relax(); | 546 | cpu_relax(); |
547 | } | 547 | } |
548 | 548 | ||
549 | |||
549 | /** | 550 | /** |
550 | * smp_call_function(): Run a function on all other CPUs. | 551 | * smp_call_function_mask(): Run a function on a set of other CPUs. |
552 | * @mask: The set of cpus to run on. Must not include the current cpu. | ||
551 | * @func: The function to run. This must be fast and non-blocking. | 553 | * @func: The function to run. This must be fast and non-blocking. |
552 | * @info: An arbitrary pointer to pass to the function. | 554 | * @info: An arbitrary pointer to pass to the function. |
553 | * @nonatomic: currently unused. | ||
554 | * @wait: If true, wait (atomically) until function has completed on other CPUs. | 555 | * @wait: If true, wait (atomically) until function has completed on other CPUs. |
555 | * | 556 | * |
556 | * Returns 0 on success, else a negative status code. Does not return until | 557 | * Returns 0 on success, else a negative status code. Does not return until |
557 | * remote CPUs are nearly ready to execute <<func>> or are or have executed. | 558 | * remote CPUs are nearly ready to execute <<func>> or are or have finished. |
558 | * | 559 | * |
559 | * You must not call this function with disabled interrupts or from a | 560 | * You must not call this function with disabled interrupts or from a |
560 | * hardware interrupt handler or from a bottom half handler. | 561 | * hardware interrupt handler or from a bottom half handler. |
561 | */ | 562 | */ |
562 | int smp_call_function (void (*func) (void *info), void *info, int nonatomic, | 563 | int smp_call_function_mask(cpumask_t mask, |
563 | int wait) | 564 | void (*func)(void *), void *info, |
565 | int wait) | ||
564 | { | 566 | { |
567 | struct call_data_struct data; | ||
568 | cpumask_t allbutself; | ||
569 | int cpus; | ||
570 | |||
565 | /* Can deadlock when called with interrupts disabled */ | 571 | /* Can deadlock when called with interrupts disabled */ |
566 | WARN_ON(irqs_disabled()); | 572 | WARN_ON(irqs_disabled()); |
567 | 573 | ||
568 | /* Holding any lock stops cpus from going down. */ | 574 | /* Holding any lock stops cpus from going down. */ |
569 | spin_lock(&call_lock); | 575 | spin_lock(&call_lock); |
570 | __smp_call_function(func, info, nonatomic, wait); | 576 | |
577 | allbutself = cpu_online_map; | ||
578 | cpu_clear(smp_processor_id(), allbutself); | ||
579 | |||
580 | cpus_and(mask, mask, allbutself); | ||
581 | cpus = cpus_weight(mask); | ||
582 | |||
583 | if (!cpus) { | ||
584 | spin_unlock(&call_lock); | ||
585 | return 0; | ||
586 | } | ||
587 | |||
588 | data.func = func; | ||
589 | data.info = info; | ||
590 | atomic_set(&data.started, 0); | ||
591 | data.wait = wait; | ||
592 | if (wait) | ||
593 | atomic_set(&data.finished, 0); | ||
594 | |||
595 | call_data = &data; | ||
596 | mb(); | ||
597 | |||
598 | /* Send a message to other CPUs */ | ||
599 | if (cpus_equal(mask, allbutself)) | ||
600 | send_IPI_allbutself(CALL_FUNCTION_VECTOR); | ||
601 | else | ||
602 | send_IPI_mask(mask, CALL_FUNCTION_VECTOR); | ||
603 | |||
604 | /* Wait for response */ | ||
605 | while (atomic_read(&data.started) != cpus) | ||
606 | cpu_relax(); | ||
607 | |||
608 | if (wait) | ||
609 | while (atomic_read(&data.finished) != cpus) | ||
610 | cpu_relax(); | ||
571 | spin_unlock(&call_lock); | 611 | spin_unlock(&call_lock); |
572 | 612 | ||
573 | return 0; | 613 | return 0; |
574 | } | 614 | } |
615 | |||
616 | /** | ||
617 | * smp_call_function(): Run a function on all other CPUs. | ||
618 | * @func: The function to run. This must be fast and non-blocking. | ||
619 | * @info: An arbitrary pointer to pass to the function. | ||
620 | * @nonatomic: currently unused. | ||
621 | * @wait: If true, wait (atomically) until function has completed on other CPUs. | ||
622 | * | ||
623 | * Returns 0 on success, else a negative status code. Does not return until | ||
624 | * remote CPUs are nearly ready to execute <<func>> or are or have executed. | ||
625 | * | ||
626 | * You must not call this function with disabled interrupts or from a | ||
627 | * hardware interrupt handler or from a bottom half handler. | ||
628 | */ | ||
629 | int smp_call_function(void (*func) (void *info), void *info, int nonatomic, | ||
630 | int wait) | ||
631 | { | ||
632 | return smp_call_function_mask(cpu_online_map, func, info, wait); | ||
633 | } | ||
575 | EXPORT_SYMBOL(smp_call_function); | 634 | EXPORT_SYMBOL(smp_call_function); |
576 | 635 | ||
636 | /* | ||
637 | * smp_call_function_single - Run a function on another CPU | ||
638 | * @func: The function to run. This must be fast and non-blocking. | ||
639 | * @info: An arbitrary pointer to pass to the function. | ||
640 | * @nonatomic: Currently unused. | ||
641 | * @wait: If true, wait until function has completed on other CPUs. | ||
642 | * | ||
643 | * Retrurns 0 on success, else a negative status code. | ||
644 | * | ||
645 | * Does not return until the remote CPU is nearly ready to execute <func> | ||
646 | * or is or has executed. | ||
647 | */ | ||
648 | int smp_call_function_single(int cpu, void (*func) (void *info), void *info, | ||
649 | int nonatomic, int wait) | ||
650 | { | ||
651 | /* prevent preemption and reschedule on another processor */ | ||
652 | int ret; | ||
653 | int me = get_cpu(); | ||
654 | if (cpu == me) { | ||
655 | WARN_ON(1); | ||
656 | put_cpu(); | ||
657 | return -EBUSY; | ||
658 | } | ||
659 | |||
660 | ret = smp_call_function_mask(cpumask_of_cpu(cpu), func, info, wait); | ||
661 | |||
662 | put_cpu(); | ||
663 | return ret; | ||
664 | } | ||
665 | EXPORT_SYMBOL(smp_call_function_single); | ||
666 | |||
577 | static void stop_this_cpu (void * dummy) | 667 | static void stop_this_cpu (void * dummy) |
578 | { | 668 | { |
579 | local_irq_disable(); | 669 | local_irq_disable(); |
@@ -641,77 +731,6 @@ fastcall void smp_call_function_interrupt(struct pt_regs *regs) | |||
641 | } | 731 | } |
642 | } | 732 | } |
643 | 733 | ||
644 | /* | ||
645 | * this function sends a 'generic call function' IPI to one other CPU | ||
646 | * in the system. | ||
647 | * | ||
648 | * cpu is a standard Linux logical CPU number. | ||
649 | */ | ||
650 | static void | ||
651 | __smp_call_function_single(int cpu, void (*func) (void *info), void *info, | ||
652 | int nonatomic, int wait) | ||
653 | { | ||
654 | struct call_data_struct data; | ||
655 | int cpus = 1; | ||
656 | |||
657 | data.func = func; | ||
658 | data.info = info; | ||
659 | atomic_set(&data.started, 0); | ||
660 | data.wait = wait; | ||
661 | if (wait) | ||
662 | atomic_set(&data.finished, 0); | ||
663 | |||
664 | call_data = &data; | ||
665 | wmb(); | ||
666 | /* Send a message to all other CPUs and wait for them to respond */ | ||
667 | send_IPI_mask(cpumask_of_cpu(cpu), CALL_FUNCTION_VECTOR); | ||
668 | |||
669 | /* Wait for response */ | ||
670 | while (atomic_read(&data.started) != cpus) | ||
671 | cpu_relax(); | ||
672 | |||
673 | if (!wait) | ||
674 | return; | ||
675 | |||
676 | while (atomic_read(&data.finished) != cpus) | ||
677 | cpu_relax(); | ||
678 | } | ||
679 | |||
680 | /* | ||
681 | * smp_call_function_single - Run a function on another CPU | ||
682 | * @func: The function to run. This must be fast and non-blocking. | ||
683 | * @info: An arbitrary pointer to pass to the function. | ||
684 | * @nonatomic: Currently unused. | ||
685 | * @wait: If true, wait until function has completed on other CPUs. | ||
686 | * | ||
687 | * Retrurns 0 on success, else a negative status code. | ||
688 | * | ||
689 | * Does not return until the remote CPU is nearly ready to execute <func> | ||
690 | * or is or has executed. | ||
691 | */ | ||
692 | |||
693 | int smp_call_function_single(int cpu, void (*func) (void *info), void *info, | ||
694 | int nonatomic, int wait) | ||
695 | { | ||
696 | /* prevent preemption and reschedule on another processor */ | ||
697 | int me = get_cpu(); | ||
698 | if (cpu == me) { | ||
699 | WARN_ON(1); | ||
700 | put_cpu(); | ||
701 | return -EBUSY; | ||
702 | } | ||
703 | |||
704 | /* Can deadlock when called with interrupts disabled */ | ||
705 | WARN_ON(irqs_disabled()); | ||
706 | |||
707 | spin_lock_bh(&call_lock); | ||
708 | __smp_call_function_single(cpu, func, info, nonatomic, wait); | ||
709 | spin_unlock_bh(&call_lock); | ||
710 | put_cpu(); | ||
711 | return 0; | ||
712 | } | ||
713 | EXPORT_SYMBOL(smp_call_function_single); | ||
714 | |||
715 | static int convert_apicid_to_cpu(int apic_id) | 734 | static int convert_apicid_to_cpu(int apic_id) |
716 | { | 735 | { |
717 | int i; | 736 | int i; |