diff options
Diffstat (limited to 'arch/alpha')
-rw-r--r-- | arch/alpha/Kconfig | 1 | ||||
-rw-r--r-- | arch/alpha/kernel/core_marvel.c | 6 | ||||
-rw-r--r-- | arch/alpha/kernel/process.c | 2 | ||||
-rw-r--r-- | arch/alpha/kernel/smp.c | 180 | ||||
-rw-r--r-- | arch/alpha/oprofile/common.c | 6 |
5 files changed, 23 insertions, 172 deletions
diff --git a/arch/alpha/Kconfig b/arch/alpha/Kconfig index 729cdbdf8036..dbe8c280fea9 100644 --- a/arch/alpha/Kconfig +++ b/arch/alpha/Kconfig | |||
@@ -528,6 +528,7 @@ config ARCH_MAY_HAVE_PC_FDC | |||
528 | config SMP | 528 | config SMP |
529 | bool "Symmetric multi-processing support" | 529 | bool "Symmetric multi-processing support" |
530 | depends on ALPHA_SABLE || ALPHA_LYNX || ALPHA_RAWHIDE || ALPHA_DP264 || ALPHA_WILDFIRE || ALPHA_TITAN || ALPHA_GENERIC || ALPHA_SHARK || ALPHA_MARVEL | 530 | depends on ALPHA_SABLE || ALPHA_LYNX || ALPHA_RAWHIDE || ALPHA_DP264 || ALPHA_WILDFIRE || ALPHA_TITAN || ALPHA_GENERIC || ALPHA_SHARK || ALPHA_MARVEL |
531 | select USE_GENERIC_SMP_HELPERS | ||
531 | ---help--- | 532 | ---help--- |
532 | This enables support for systems with more than one CPU. If you have | 533 | This enables support for systems with more than one CPU. If you have |
533 | a system with only one CPU, like most personal computers, say N. If | 534 | a system with only one CPU, like most personal computers, say N. If |
diff --git a/arch/alpha/kernel/core_marvel.c b/arch/alpha/kernel/core_marvel.c index b04f1feb1dda..04dcc5e5d4c1 100644 --- a/arch/alpha/kernel/core_marvel.c +++ b/arch/alpha/kernel/core_marvel.c | |||
@@ -660,9 +660,9 @@ __marvel_rtc_io(u8 b, unsigned long addr, int write) | |||
660 | 660 | ||
661 | #ifdef CONFIG_SMP | 661 | #ifdef CONFIG_SMP |
662 | if (smp_processor_id() != boot_cpuid) | 662 | if (smp_processor_id() != boot_cpuid) |
663 | smp_call_function_on_cpu(__marvel_access_rtc, | 663 | smp_call_function_single(boot_cpuid, |
664 | &rtc_access, 1, 1, | 664 | __marvel_access_rtc, |
665 | cpumask_of_cpu(boot_cpuid)); | 665 | &rtc_access, 1); |
666 | else | 666 | else |
667 | __marvel_access_rtc(&rtc_access); | 667 | __marvel_access_rtc(&rtc_access); |
668 | #else | 668 | #else |
diff --git a/arch/alpha/kernel/process.c b/arch/alpha/kernel/process.c index 96ed82fd9eef..351407e07e71 100644 --- a/arch/alpha/kernel/process.c +++ b/arch/alpha/kernel/process.c | |||
@@ -160,7 +160,7 @@ common_shutdown(int mode, char *restart_cmd) | |||
160 | struct halt_info args; | 160 | struct halt_info args; |
161 | args.mode = mode; | 161 | args.mode = mode; |
162 | args.restart_cmd = restart_cmd; | 162 | args.restart_cmd = restart_cmd; |
163 | on_each_cpu(common_shutdown_1, &args, 1, 0); | 163 | on_each_cpu(common_shutdown_1, &args, 0); |
164 | } | 164 | } |
165 | 165 | ||
166 | void | 166 | void |
diff --git a/arch/alpha/kernel/smp.c b/arch/alpha/kernel/smp.c index 2525692db0ab..83df541650fc 100644 --- a/arch/alpha/kernel/smp.c +++ b/arch/alpha/kernel/smp.c | |||
@@ -62,6 +62,7 @@ static struct { | |||
62 | enum ipi_message_type { | 62 | enum ipi_message_type { |
63 | IPI_RESCHEDULE, | 63 | IPI_RESCHEDULE, |
64 | IPI_CALL_FUNC, | 64 | IPI_CALL_FUNC, |
65 | IPI_CALL_FUNC_SINGLE, | ||
65 | IPI_CPU_STOP, | 66 | IPI_CPU_STOP, |
66 | }; | 67 | }; |
67 | 68 | ||
@@ -558,51 +559,6 @@ send_ipi_message(cpumask_t to_whom, enum ipi_message_type operation) | |||
558 | wripir(i); | 559 | wripir(i); |
559 | } | 560 | } |
560 | 561 | ||
561 | /* Structure and data for smp_call_function. This is designed to | ||
562 | minimize static memory requirements. Plus it looks cleaner. */ | ||
563 | |||
564 | struct smp_call_struct { | ||
565 | void (*func) (void *info); | ||
566 | void *info; | ||
567 | long wait; | ||
568 | atomic_t unstarted_count; | ||
569 | atomic_t unfinished_count; | ||
570 | }; | ||
571 | |||
572 | static struct smp_call_struct *smp_call_function_data; | ||
573 | |||
574 | /* Atomicly drop data into a shared pointer. The pointer is free if | ||
575 | it is initially locked. If retry, spin until free. */ | ||
576 | |||
577 | static int | ||
578 | pointer_lock (void *lock, void *data, int retry) | ||
579 | { | ||
580 | void *old, *tmp; | ||
581 | |||
582 | mb(); | ||
583 | again: | ||
584 | /* Compare and swap with zero. */ | ||
585 | asm volatile ( | ||
586 | "1: ldq_l %0,%1\n" | ||
587 | " mov %3,%2\n" | ||
588 | " bne %0,2f\n" | ||
589 | " stq_c %2,%1\n" | ||
590 | " beq %2,1b\n" | ||
591 | "2:" | ||
592 | : "=&r"(old), "=m"(*(void **)lock), "=&r"(tmp) | ||
593 | : "r"(data) | ||
594 | : "memory"); | ||
595 | |||
596 | if (old == 0) | ||
597 | return 0; | ||
598 | if (! retry) | ||
599 | return -EBUSY; | ||
600 | |||
601 | while (*(void **)lock) | ||
602 | barrier(); | ||
603 | goto again; | ||
604 | } | ||
605 | |||
606 | void | 562 | void |
607 | handle_ipi(struct pt_regs *regs) | 563 | handle_ipi(struct pt_regs *regs) |
608 | { | 564 | { |
@@ -632,31 +588,12 @@ handle_ipi(struct pt_regs *regs) | |||
632 | break; | 588 | break; |
633 | 589 | ||
634 | case IPI_CALL_FUNC: | 590 | case IPI_CALL_FUNC: |
635 | { | 591 | generic_smp_call_function_interrupt(); |
636 | struct smp_call_struct *data; | 592 | break; |
637 | void (*func)(void *info); | 593 | |
638 | void *info; | 594 | case IPI_CALL_FUNC_SINGLE: |
639 | int wait; | 595 | generic_smp_call_function_single_interrupt(); |
640 | |||
641 | data = smp_call_function_data; | ||
642 | func = data->func; | ||
643 | info = data->info; | ||
644 | wait = data->wait; | ||
645 | |||
646 | /* Notify the sending CPU that the data has been | ||
647 | received, and execution is about to begin. */ | ||
648 | mb(); | ||
649 | atomic_dec (&data->unstarted_count); | ||
650 | |||
651 | /* At this point the structure may be gone unless | ||
652 | wait is true. */ | ||
653 | (*func)(info); | ||
654 | |||
655 | /* Notify the sending CPU that the task is done. */ | ||
656 | mb(); | ||
657 | if (wait) atomic_dec (&data->unfinished_count); | ||
658 | break; | 596 | break; |
659 | } | ||
660 | 597 | ||
661 | case IPI_CPU_STOP: | 598 | case IPI_CPU_STOP: |
662 | halt(); | 599 | halt(); |
@@ -700,102 +637,15 @@ smp_send_stop(void) | |||
700 | send_ipi_message(to_whom, IPI_CPU_STOP); | 637 | send_ipi_message(to_whom, IPI_CPU_STOP); |
701 | } | 638 | } |
702 | 639 | ||
703 | /* | 640 | void arch_send_call_function_ipi(cpumask_t mask) |
704 | * Run a function on all other CPUs. | ||
705 | * <func> The function to run. This must be fast and non-blocking. | ||
706 | * <info> An arbitrary pointer to pass to the function. | ||
707 | * <retry> If true, keep retrying until ready. | ||
708 | * <wait> If true, wait until function has completed on other CPUs. | ||
709 | * [RETURNS] 0 on success, else a negative status code. | ||
710 | * | ||
711 | * Does not return until remote CPUs are nearly ready to execute <func> | ||
712 | * or are or have executed. | ||
713 | * You must not call this function with disabled interrupts or from a | ||
714 | * hardware interrupt handler or from a bottom half handler. | ||
715 | */ | ||
716 | |||
717 | int | ||
718 | smp_call_function_on_cpu (void (*func) (void *info), void *info, int retry, | ||
719 | int wait, cpumask_t to_whom) | ||
720 | { | 641 | { |
721 | struct smp_call_struct data; | 642 | send_ipi_message(mask, IPI_CALL_FUNC); |
722 | unsigned long timeout; | ||
723 | int num_cpus_to_call; | ||
724 | |||
725 | /* Can deadlock when called with interrupts disabled */ | ||
726 | WARN_ON(irqs_disabled()); | ||
727 | |||
728 | data.func = func; | ||
729 | data.info = info; | ||
730 | data.wait = wait; | ||
731 | |||
732 | cpu_clear(smp_processor_id(), to_whom); | ||
733 | num_cpus_to_call = cpus_weight(to_whom); | ||
734 | |||
735 | atomic_set(&data.unstarted_count, num_cpus_to_call); | ||
736 | atomic_set(&data.unfinished_count, num_cpus_to_call); | ||
737 | |||
738 | /* Acquire the smp_call_function_data mutex. */ | ||
739 | if (pointer_lock(&smp_call_function_data, &data, retry)) | ||
740 | return -EBUSY; | ||
741 | |||
742 | /* Send a message to the requested CPUs. */ | ||
743 | send_ipi_message(to_whom, IPI_CALL_FUNC); | ||
744 | |||
745 | /* Wait for a minimal response. */ | ||
746 | timeout = jiffies + HZ; | ||
747 | while (atomic_read (&data.unstarted_count) > 0 | ||
748 | && time_before (jiffies, timeout)) | ||
749 | barrier(); | ||
750 | |||
751 | /* If there's no response yet, log a message but allow a longer | ||
752 | * timeout period -- if we get a response this time, log | ||
753 | * a message saying when we got it.. | ||
754 | */ | ||
755 | if (atomic_read(&data.unstarted_count) > 0) { | ||
756 | long start_time = jiffies; | ||
757 | printk(KERN_ERR "%s: initial timeout -- trying long wait\n", | ||
758 | __func__); | ||
759 | timeout = jiffies + 30 * HZ; | ||
760 | while (atomic_read(&data.unstarted_count) > 0 | ||
761 | && time_before(jiffies, timeout)) | ||
762 | barrier(); | ||
763 | if (atomic_read(&data.unstarted_count) <= 0) { | ||
764 | long delta = jiffies - start_time; | ||
765 | printk(KERN_ERR | ||
766 | "%s: response %ld.%ld seconds into long wait\n", | ||
767 | __func__, delta / HZ, | ||
768 | (100 * (delta - ((delta / HZ) * HZ))) / HZ); | ||
769 | } | ||
770 | } | ||
771 | |||
772 | /* We either got one or timed out -- clear the lock. */ | ||
773 | mb(); | ||
774 | smp_call_function_data = NULL; | ||
775 | |||
776 | /* | ||
777 | * If after both the initial and long timeout periods we still don't | ||
778 | * have a response, something is very wrong... | ||
779 | */ | ||
780 | BUG_ON(atomic_read (&data.unstarted_count) > 0); | ||
781 | |||
782 | /* Wait for a complete response, if needed. */ | ||
783 | if (wait) { | ||
784 | while (atomic_read (&data.unfinished_count) > 0) | ||
785 | barrier(); | ||
786 | } | ||
787 | |||
788 | return 0; | ||
789 | } | 643 | } |
790 | EXPORT_SYMBOL(smp_call_function_on_cpu); | ||
791 | 644 | ||
792 | int | 645 | void arch_send_call_function_single_ipi(int cpu) |
793 | smp_call_function (void (*func) (void *info), void *info, int retry, int wait) | ||
794 | { | 646 | { |
795 | return smp_call_function_on_cpu (func, info, retry, wait, | 647 | send_ipi_message(cpumask_of_cpu(cpu), IPI_CALL_FUNC_SINGLE); |
796 | cpu_online_map); | ||
797 | } | 648 | } |
798 | EXPORT_SYMBOL(smp_call_function); | ||
799 | 649 | ||
800 | static void | 650 | static void |
801 | ipi_imb(void *ignored) | 651 | ipi_imb(void *ignored) |
@@ -807,7 +657,7 @@ void | |||
807 | smp_imb(void) | 657 | smp_imb(void) |
808 | { | 658 | { |
809 | /* Must wait other processors to flush their icache before continue. */ | 659 | /* Must wait other processors to flush their icache before continue. */ |
810 | if (on_each_cpu(ipi_imb, NULL, 1, 1)) | 660 | if (on_each_cpu(ipi_imb, NULL, 1)) |
811 | printk(KERN_CRIT "smp_imb: timed out\n"); | 661 | printk(KERN_CRIT "smp_imb: timed out\n"); |
812 | } | 662 | } |
813 | EXPORT_SYMBOL(smp_imb); | 663 | EXPORT_SYMBOL(smp_imb); |
@@ -823,7 +673,7 @@ flush_tlb_all(void) | |||
823 | { | 673 | { |
824 | /* Although we don't have any data to pass, we do want to | 674 | /* Although we don't have any data to pass, we do want to |
825 | synchronize with the other processors. */ | 675 | synchronize with the other processors. */ |
826 | if (on_each_cpu(ipi_flush_tlb_all, NULL, 1, 1)) { | 676 | if (on_each_cpu(ipi_flush_tlb_all, NULL, 1)) { |
827 | printk(KERN_CRIT "flush_tlb_all: timed out\n"); | 677 | printk(KERN_CRIT "flush_tlb_all: timed out\n"); |
828 | } | 678 | } |
829 | } | 679 | } |
@@ -860,7 +710,7 @@ flush_tlb_mm(struct mm_struct *mm) | |||
860 | } | 710 | } |
861 | } | 711 | } |
862 | 712 | ||
863 | if (smp_call_function(ipi_flush_tlb_mm, mm, 1, 1)) { | 713 | if (smp_call_function(ipi_flush_tlb_mm, mm, 1)) { |
864 | printk(KERN_CRIT "flush_tlb_mm: timed out\n"); | 714 | printk(KERN_CRIT "flush_tlb_mm: timed out\n"); |
865 | } | 715 | } |
866 | 716 | ||
@@ -913,7 +763,7 @@ flush_tlb_page(struct vm_area_struct *vma, unsigned long addr) | |||
913 | data.mm = mm; | 763 | data.mm = mm; |
914 | data.addr = addr; | 764 | data.addr = addr; |
915 | 765 | ||
916 | if (smp_call_function(ipi_flush_tlb_page, &data, 1, 1)) { | 766 | if (smp_call_function(ipi_flush_tlb_page, &data, 1)) { |
917 | printk(KERN_CRIT "flush_tlb_page: timed out\n"); | 767 | printk(KERN_CRIT "flush_tlb_page: timed out\n"); |
918 | } | 768 | } |
919 | 769 | ||
@@ -965,7 +815,7 @@ flush_icache_user_range(struct vm_area_struct *vma, struct page *page, | |||
965 | } | 815 | } |
966 | } | 816 | } |
967 | 817 | ||
968 | if (smp_call_function(ipi_flush_icache_page, mm, 1, 1)) { | 818 | if (smp_call_function(ipi_flush_icache_page, mm, 1)) { |
969 | printk(KERN_CRIT "flush_icache_page: timed out\n"); | 819 | printk(KERN_CRIT "flush_icache_page: timed out\n"); |
970 | } | 820 | } |
971 | 821 | ||
diff --git a/arch/alpha/oprofile/common.c b/arch/alpha/oprofile/common.c index 9fc0eeb4f0ab..7c3d5ec6ec67 100644 --- a/arch/alpha/oprofile/common.c +++ b/arch/alpha/oprofile/common.c | |||
@@ -65,7 +65,7 @@ op_axp_setup(void) | |||
65 | model->reg_setup(®, ctr, &sys); | 65 | model->reg_setup(®, ctr, &sys); |
66 | 66 | ||
67 | /* Configure the registers on all cpus. */ | 67 | /* Configure the registers on all cpus. */ |
68 | (void)smp_call_function(model->cpu_setup, ®, 0, 1); | 68 | (void)smp_call_function(model->cpu_setup, ®, 1); |
69 | model->cpu_setup(®); | 69 | model->cpu_setup(®); |
70 | return 0; | 70 | return 0; |
71 | } | 71 | } |
@@ -86,7 +86,7 @@ op_axp_cpu_start(void *dummy) | |||
86 | static int | 86 | static int |
87 | op_axp_start(void) | 87 | op_axp_start(void) |
88 | { | 88 | { |
89 | (void)smp_call_function(op_axp_cpu_start, NULL, 0, 1); | 89 | (void)smp_call_function(op_axp_cpu_start, NULL, 1); |
90 | op_axp_cpu_start(NULL); | 90 | op_axp_cpu_start(NULL); |
91 | return 0; | 91 | return 0; |
92 | } | 92 | } |
@@ -101,7 +101,7 @@ op_axp_cpu_stop(void *dummy) | |||
101 | static void | 101 | static void |
102 | op_axp_stop(void) | 102 | op_axp_stop(void) |
103 | { | 103 | { |
104 | (void)smp_call_function(op_axp_cpu_stop, NULL, 0, 1); | 104 | (void)smp_call_function(op_axp_cpu_stop, NULL, 1); |
105 | op_axp_cpu_stop(NULL); | 105 | op_axp_cpu_stop(NULL); |
106 | } | 106 | } |
107 | 107 | ||