diff options
Diffstat (limited to 'arch')
105 files changed, 467 insertions, 1709 deletions
diff --git a/arch/Kconfig b/arch/Kconfig index 3ea332b009e5..ad89a33d8c6e 100644 --- a/arch/Kconfig +++ b/arch/Kconfig | |||
@@ -39,3 +39,6 @@ config HAVE_KRETPROBES | |||
39 | 39 | ||
40 | config HAVE_DMA_ATTRS | 40 | config HAVE_DMA_ATTRS |
41 | def_bool n | 41 | def_bool n |
42 | |||
43 | config USE_GENERIC_SMP_HELPERS | ||
44 | def_bool n | ||
diff --git a/arch/alpha/Kconfig b/arch/alpha/Kconfig index 729cdbdf8036..dbe8c280fea9 100644 --- a/arch/alpha/Kconfig +++ b/arch/alpha/Kconfig | |||
@@ -528,6 +528,7 @@ config ARCH_MAY_HAVE_PC_FDC | |||
528 | config SMP | 528 | config SMP |
529 | bool "Symmetric multi-processing support" | 529 | bool "Symmetric multi-processing support" |
530 | depends on ALPHA_SABLE || ALPHA_LYNX || ALPHA_RAWHIDE || ALPHA_DP264 || ALPHA_WILDFIRE || ALPHA_TITAN || ALPHA_GENERIC || ALPHA_SHARK || ALPHA_MARVEL | 530 | depends on ALPHA_SABLE || ALPHA_LYNX || ALPHA_RAWHIDE || ALPHA_DP264 || ALPHA_WILDFIRE || ALPHA_TITAN || ALPHA_GENERIC || ALPHA_SHARK || ALPHA_MARVEL |
531 | select USE_GENERIC_SMP_HELPERS | ||
531 | ---help--- | 532 | ---help--- |
532 | This enables support for systems with more than one CPU. If you have | 533 | This enables support for systems with more than one CPU. If you have |
533 | a system with only one CPU, like most personal computers, say N. If | 534 | a system with only one CPU, like most personal computers, say N. If |
diff --git a/arch/alpha/kernel/core_marvel.c b/arch/alpha/kernel/core_marvel.c index b04f1feb1dda..04dcc5e5d4c1 100644 --- a/arch/alpha/kernel/core_marvel.c +++ b/arch/alpha/kernel/core_marvel.c | |||
@@ -660,9 +660,9 @@ __marvel_rtc_io(u8 b, unsigned long addr, int write) | |||
660 | 660 | ||
661 | #ifdef CONFIG_SMP | 661 | #ifdef CONFIG_SMP |
662 | if (smp_processor_id() != boot_cpuid) | 662 | if (smp_processor_id() != boot_cpuid) |
663 | smp_call_function_on_cpu(__marvel_access_rtc, | 663 | smp_call_function_single(boot_cpuid, |
664 | &rtc_access, 1, 1, | 664 | __marvel_access_rtc, |
665 | cpumask_of_cpu(boot_cpuid)); | 665 | &rtc_access, 1); |
666 | else | 666 | else |
667 | __marvel_access_rtc(&rtc_access); | 667 | __marvel_access_rtc(&rtc_access); |
668 | #else | 668 | #else |
diff --git a/arch/alpha/kernel/irq.c b/arch/alpha/kernel/irq.c index facf82a5499a..c626a821cdcb 100644 --- a/arch/alpha/kernel/irq.c +++ b/arch/alpha/kernel/irq.c | |||
@@ -42,8 +42,7 @@ void ack_bad_irq(unsigned int irq) | |||
42 | #ifdef CONFIG_SMP | 42 | #ifdef CONFIG_SMP |
43 | static char irq_user_affinity[NR_IRQS]; | 43 | static char irq_user_affinity[NR_IRQS]; |
44 | 44 | ||
45 | int | 45 | int irq_select_affinity(unsigned int irq) |
46 | select_smp_affinity(unsigned int irq) | ||
47 | { | 46 | { |
48 | static int last_cpu; | 47 | static int last_cpu; |
49 | int cpu = last_cpu + 1; | 48 | int cpu = last_cpu + 1; |
@@ -51,7 +50,7 @@ select_smp_affinity(unsigned int irq) | |||
51 | if (!irq_desc[irq].chip->set_affinity || irq_user_affinity[irq]) | 50 | if (!irq_desc[irq].chip->set_affinity || irq_user_affinity[irq]) |
52 | return 1; | 51 | return 1; |
53 | 52 | ||
54 | while (!cpu_possible(cpu)) | 53 | while (!cpu_possible(cpu) || !cpu_isset(cpu, irq_default_affinity)) |
55 | cpu = (cpu < (NR_CPUS-1) ? cpu + 1 : 0); | 54 | cpu = (cpu < (NR_CPUS-1) ? cpu + 1 : 0); |
56 | last_cpu = cpu; | 55 | last_cpu = cpu; |
57 | 56 | ||
diff --git a/arch/alpha/kernel/process.c b/arch/alpha/kernel/process.c index 96ed82fd9eef..351407e07e71 100644 --- a/arch/alpha/kernel/process.c +++ b/arch/alpha/kernel/process.c | |||
@@ -160,7 +160,7 @@ common_shutdown(int mode, char *restart_cmd) | |||
160 | struct halt_info args; | 160 | struct halt_info args; |
161 | args.mode = mode; | 161 | args.mode = mode; |
162 | args.restart_cmd = restart_cmd; | 162 | args.restart_cmd = restart_cmd; |
163 | on_each_cpu(common_shutdown_1, &args, 1, 0); | 163 | on_each_cpu(common_shutdown_1, &args, 0); |
164 | } | 164 | } |
165 | 165 | ||
166 | void | 166 | void |
diff --git a/arch/alpha/kernel/smp.c b/arch/alpha/kernel/smp.c index 2525692db0ab..83df541650fc 100644 --- a/arch/alpha/kernel/smp.c +++ b/arch/alpha/kernel/smp.c | |||
@@ -62,6 +62,7 @@ static struct { | |||
62 | enum ipi_message_type { | 62 | enum ipi_message_type { |
63 | IPI_RESCHEDULE, | 63 | IPI_RESCHEDULE, |
64 | IPI_CALL_FUNC, | 64 | IPI_CALL_FUNC, |
65 | IPI_CALL_FUNC_SINGLE, | ||
65 | IPI_CPU_STOP, | 66 | IPI_CPU_STOP, |
66 | }; | 67 | }; |
67 | 68 | ||
@@ -558,51 +559,6 @@ send_ipi_message(cpumask_t to_whom, enum ipi_message_type operation) | |||
558 | wripir(i); | 559 | wripir(i); |
559 | } | 560 | } |
560 | 561 | ||
561 | /* Structure and data for smp_call_function. This is designed to | ||
562 | minimize static memory requirements. Plus it looks cleaner. */ | ||
563 | |||
564 | struct smp_call_struct { | ||
565 | void (*func) (void *info); | ||
566 | void *info; | ||
567 | long wait; | ||
568 | atomic_t unstarted_count; | ||
569 | atomic_t unfinished_count; | ||
570 | }; | ||
571 | |||
572 | static struct smp_call_struct *smp_call_function_data; | ||
573 | |||
574 | /* Atomicly drop data into a shared pointer. The pointer is free if | ||
575 | it is initially locked. If retry, spin until free. */ | ||
576 | |||
577 | static int | ||
578 | pointer_lock (void *lock, void *data, int retry) | ||
579 | { | ||
580 | void *old, *tmp; | ||
581 | |||
582 | mb(); | ||
583 | again: | ||
584 | /* Compare and swap with zero. */ | ||
585 | asm volatile ( | ||
586 | "1: ldq_l %0,%1\n" | ||
587 | " mov %3,%2\n" | ||
588 | " bne %0,2f\n" | ||
589 | " stq_c %2,%1\n" | ||
590 | " beq %2,1b\n" | ||
591 | "2:" | ||
592 | : "=&r"(old), "=m"(*(void **)lock), "=&r"(tmp) | ||
593 | : "r"(data) | ||
594 | : "memory"); | ||
595 | |||
596 | if (old == 0) | ||
597 | return 0; | ||
598 | if (! retry) | ||
599 | return -EBUSY; | ||
600 | |||
601 | while (*(void **)lock) | ||
602 | barrier(); | ||
603 | goto again; | ||
604 | } | ||
605 | |||
606 | void | 562 | void |
607 | handle_ipi(struct pt_regs *regs) | 563 | handle_ipi(struct pt_regs *regs) |
608 | { | 564 | { |
@@ -632,31 +588,12 @@ handle_ipi(struct pt_regs *regs) | |||
632 | break; | 588 | break; |
633 | 589 | ||
634 | case IPI_CALL_FUNC: | 590 | case IPI_CALL_FUNC: |
635 | { | 591 | generic_smp_call_function_interrupt(); |
636 | struct smp_call_struct *data; | 592 | break; |
637 | void (*func)(void *info); | 593 | |
638 | void *info; | 594 | case IPI_CALL_FUNC_SINGLE: |
639 | int wait; | 595 | generic_smp_call_function_single_interrupt(); |
640 | |||
641 | data = smp_call_function_data; | ||
642 | func = data->func; | ||
643 | info = data->info; | ||
644 | wait = data->wait; | ||
645 | |||
646 | /* Notify the sending CPU that the data has been | ||
647 | received, and execution is about to begin. */ | ||
648 | mb(); | ||
649 | atomic_dec (&data->unstarted_count); | ||
650 | |||
651 | /* At this point the structure may be gone unless | ||
652 | wait is true. */ | ||
653 | (*func)(info); | ||
654 | |||
655 | /* Notify the sending CPU that the task is done. */ | ||
656 | mb(); | ||
657 | if (wait) atomic_dec (&data->unfinished_count); | ||
658 | break; | 596 | break; |
659 | } | ||
660 | 597 | ||
661 | case IPI_CPU_STOP: | 598 | case IPI_CPU_STOP: |
662 | halt(); | 599 | halt(); |
@@ -700,102 +637,15 @@ smp_send_stop(void) | |||
700 | send_ipi_message(to_whom, IPI_CPU_STOP); | 637 | send_ipi_message(to_whom, IPI_CPU_STOP); |
701 | } | 638 | } |
702 | 639 | ||
703 | /* | 640 | void arch_send_call_function_ipi(cpumask_t mask) |
704 | * Run a function on all other CPUs. | ||
705 | * <func> The function to run. This must be fast and non-blocking. | ||
706 | * <info> An arbitrary pointer to pass to the function. | ||
707 | * <retry> If true, keep retrying until ready. | ||
708 | * <wait> If true, wait until function has completed on other CPUs. | ||
709 | * [RETURNS] 0 on success, else a negative status code. | ||
710 | * | ||
711 | * Does not return until remote CPUs are nearly ready to execute <func> | ||
712 | * or are or have executed. | ||
713 | * You must not call this function with disabled interrupts or from a | ||
714 | * hardware interrupt handler or from a bottom half handler. | ||
715 | */ | ||
716 | |||
717 | int | ||
718 | smp_call_function_on_cpu (void (*func) (void *info), void *info, int retry, | ||
719 | int wait, cpumask_t to_whom) | ||
720 | { | 641 | { |
721 | struct smp_call_struct data; | 642 | send_ipi_message(mask, IPI_CALL_FUNC); |
722 | unsigned long timeout; | ||
723 | int num_cpus_to_call; | ||
724 | |||
725 | /* Can deadlock when called with interrupts disabled */ | ||
726 | WARN_ON(irqs_disabled()); | ||
727 | |||
728 | data.func = func; | ||
729 | data.info = info; | ||
730 | data.wait = wait; | ||
731 | |||
732 | cpu_clear(smp_processor_id(), to_whom); | ||
733 | num_cpus_to_call = cpus_weight(to_whom); | ||
734 | |||
735 | atomic_set(&data.unstarted_count, num_cpus_to_call); | ||
736 | atomic_set(&data.unfinished_count, num_cpus_to_call); | ||
737 | |||
738 | /* Acquire the smp_call_function_data mutex. */ | ||
739 | if (pointer_lock(&smp_call_function_data, &data, retry)) | ||
740 | return -EBUSY; | ||
741 | |||
742 | /* Send a message to the requested CPUs. */ | ||
743 | send_ipi_message(to_whom, IPI_CALL_FUNC); | ||
744 | |||
745 | /* Wait for a minimal response. */ | ||
746 | timeout = jiffies + HZ; | ||
747 | while (atomic_read (&data.unstarted_count) > 0 | ||
748 | && time_before (jiffies, timeout)) | ||
749 | barrier(); | ||
750 | |||
751 | /* If there's no response yet, log a message but allow a longer | ||
752 | * timeout period -- if we get a response this time, log | ||
753 | * a message saying when we got it.. | ||
754 | */ | ||
755 | if (atomic_read(&data.unstarted_count) > 0) { | ||
756 | long start_time = jiffies; | ||
757 | printk(KERN_ERR "%s: initial timeout -- trying long wait\n", | ||
758 | __func__); | ||
759 | timeout = jiffies + 30 * HZ; | ||
760 | while (atomic_read(&data.unstarted_count) > 0 | ||
761 | && time_before(jiffies, timeout)) | ||
762 | barrier(); | ||
763 | if (atomic_read(&data.unstarted_count) <= 0) { | ||
764 | long delta = jiffies - start_time; | ||
765 | printk(KERN_ERR | ||
766 | "%s: response %ld.%ld seconds into long wait\n", | ||
767 | __func__, delta / HZ, | ||
768 | (100 * (delta - ((delta / HZ) * HZ))) / HZ); | ||
769 | } | ||
770 | } | ||
771 | |||
772 | /* We either got one or timed out -- clear the lock. */ | ||
773 | mb(); | ||
774 | smp_call_function_data = NULL; | ||
775 | |||
776 | /* | ||
777 | * If after both the initial and long timeout periods we still don't | ||
778 | * have a response, something is very wrong... | ||
779 | */ | ||
780 | BUG_ON(atomic_read (&data.unstarted_count) > 0); | ||
781 | |||
782 | /* Wait for a complete response, if needed. */ | ||
783 | if (wait) { | ||
784 | while (atomic_read (&data.unfinished_count) > 0) | ||
785 | barrier(); | ||
786 | } | ||
787 | |||
788 | return 0; | ||
789 | } | 643 | } |
790 | EXPORT_SYMBOL(smp_call_function_on_cpu); | ||
791 | 644 | ||
792 | int | 645 | void arch_send_call_function_single_ipi(int cpu) |
793 | smp_call_function (void (*func) (void *info), void *info, int retry, int wait) | ||
794 | { | 646 | { |
795 | return smp_call_function_on_cpu (func, info, retry, wait, | 647 | send_ipi_message(cpumask_of_cpu(cpu), IPI_CALL_FUNC_SINGLE); |
796 | cpu_online_map); | ||
797 | } | 648 | } |
798 | EXPORT_SYMBOL(smp_call_function); | ||
799 | 649 | ||
800 | static void | 650 | static void |
801 | ipi_imb(void *ignored) | 651 | ipi_imb(void *ignored) |
@@ -807,7 +657,7 @@ void | |||
807 | smp_imb(void) | 657 | smp_imb(void) |
808 | { | 658 | { |
809 | /* Must wait other processors to flush their icache before continue. */ | 659 | /* Must wait other processors to flush their icache before continue. */ |
810 | if (on_each_cpu(ipi_imb, NULL, 1, 1)) | 660 | if (on_each_cpu(ipi_imb, NULL, 1)) |
811 | printk(KERN_CRIT "smp_imb: timed out\n"); | 661 | printk(KERN_CRIT "smp_imb: timed out\n"); |
812 | } | 662 | } |
813 | EXPORT_SYMBOL(smp_imb); | 663 | EXPORT_SYMBOL(smp_imb); |
@@ -823,7 +673,7 @@ flush_tlb_all(void) | |||
823 | { | 673 | { |
824 | /* Although we don't have any data to pass, we do want to | 674 | /* Although we don't have any data to pass, we do want to |
825 | synchronize with the other processors. */ | 675 | synchronize with the other processors. */ |
826 | if (on_each_cpu(ipi_flush_tlb_all, NULL, 1, 1)) { | 676 | if (on_each_cpu(ipi_flush_tlb_all, NULL, 1)) { |
827 | printk(KERN_CRIT "flush_tlb_all: timed out\n"); | 677 | printk(KERN_CRIT "flush_tlb_all: timed out\n"); |
828 | } | 678 | } |
829 | } | 679 | } |
@@ -860,7 +710,7 @@ flush_tlb_mm(struct mm_struct *mm) | |||
860 | } | 710 | } |
861 | } | 711 | } |
862 | 712 | ||
863 | if (smp_call_function(ipi_flush_tlb_mm, mm, 1, 1)) { | 713 | if (smp_call_function(ipi_flush_tlb_mm, mm, 1)) { |
864 | printk(KERN_CRIT "flush_tlb_mm: timed out\n"); | 714 | printk(KERN_CRIT "flush_tlb_mm: timed out\n"); |
865 | } | 715 | } |
866 | 716 | ||
@@ -913,7 +763,7 @@ flush_tlb_page(struct vm_area_struct *vma, unsigned long addr) | |||
913 | data.mm = mm; | 763 | data.mm = mm; |
914 | data.addr = addr; | 764 | data.addr = addr; |
915 | 765 | ||
916 | if (smp_call_function(ipi_flush_tlb_page, &data, 1, 1)) { | 766 | if (smp_call_function(ipi_flush_tlb_page, &data, 1)) { |
917 | printk(KERN_CRIT "flush_tlb_page: timed out\n"); | 767 | printk(KERN_CRIT "flush_tlb_page: timed out\n"); |
918 | } | 768 | } |
919 | 769 | ||
@@ -965,7 +815,7 @@ flush_icache_user_range(struct vm_area_struct *vma, struct page *page, | |||
965 | } | 815 | } |
966 | } | 816 | } |
967 | 817 | ||
968 | if (smp_call_function(ipi_flush_icache_page, mm, 1, 1)) { | 818 | if (smp_call_function(ipi_flush_icache_page, mm, 1)) { |
969 | printk(KERN_CRIT "flush_icache_page: timed out\n"); | 819 | printk(KERN_CRIT "flush_icache_page: timed out\n"); |
970 | } | 820 | } |
971 | 821 | ||
diff --git a/arch/alpha/oprofile/common.c b/arch/alpha/oprofile/common.c index 9fc0eeb4f0ab..7c3d5ec6ec67 100644 --- a/arch/alpha/oprofile/common.c +++ b/arch/alpha/oprofile/common.c | |||
@@ -65,7 +65,7 @@ op_axp_setup(void) | |||
65 | model->reg_setup(®, ctr, &sys); | 65 | model->reg_setup(®, ctr, &sys); |
66 | 66 | ||
67 | /* Configure the registers on all cpus. */ | 67 | /* Configure the registers on all cpus. */ |
68 | (void)smp_call_function(model->cpu_setup, ®, 0, 1); | 68 | (void)smp_call_function(model->cpu_setup, ®, 1); |
69 | model->cpu_setup(®); | 69 | model->cpu_setup(®); |
70 | return 0; | 70 | return 0; |
71 | } | 71 | } |
@@ -86,7 +86,7 @@ op_axp_cpu_start(void *dummy) | |||
86 | static int | 86 | static int |
87 | op_axp_start(void) | 87 | op_axp_start(void) |
88 | { | 88 | { |
89 | (void)smp_call_function(op_axp_cpu_start, NULL, 0, 1); | 89 | (void)smp_call_function(op_axp_cpu_start, NULL, 1); |
90 | op_axp_cpu_start(NULL); | 90 | op_axp_cpu_start(NULL); |
91 | return 0; | 91 | return 0; |
92 | } | 92 | } |
@@ -101,7 +101,7 @@ op_axp_cpu_stop(void *dummy) | |||
101 | static void | 101 | static void |
102 | op_axp_stop(void) | 102 | op_axp_stop(void) |
103 | { | 103 | { |
104 | (void)smp_call_function(op_axp_cpu_stop, NULL, 0, 1); | 104 | (void)smp_call_function(op_axp_cpu_stop, NULL, 1); |
105 | op_axp_cpu_stop(NULL); | 105 | op_axp_cpu_stop(NULL); |
106 | } | 106 | } |
107 | 107 | ||
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index 258f1369fb0c..c7ad324ddf2c 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig | |||
@@ -701,6 +701,7 @@ source "kernel/time/Kconfig" | |||
701 | config SMP | 701 | config SMP |
702 | bool "Symmetric Multi-Processing (EXPERIMENTAL)" | 702 | bool "Symmetric Multi-Processing (EXPERIMENTAL)" |
703 | depends on EXPERIMENTAL && (REALVIEW_EB_ARM11MP || MACH_REALVIEW_PB11MP) | 703 | depends on EXPERIMENTAL && (REALVIEW_EB_ARM11MP || MACH_REALVIEW_PB11MP) |
704 | select USE_GENERIC_SMP_HELPERS | ||
704 | help | 705 | help |
705 | This enables support for systems with more than one CPU. If you have | 706 | This enables support for systems with more than one CPU. If you have |
706 | a system with only one CPU, like most personal computers, say N. If | 707 | a system with only one CPU, like most personal computers, say N. If |
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c index eefae1de334c..5a7c09564d13 100644 --- a/arch/arm/kernel/smp.c +++ b/arch/arm/kernel/smp.c | |||
@@ -68,20 +68,10 @@ enum ipi_msg_type { | |||
68 | IPI_TIMER, | 68 | IPI_TIMER, |
69 | IPI_RESCHEDULE, | 69 | IPI_RESCHEDULE, |
70 | IPI_CALL_FUNC, | 70 | IPI_CALL_FUNC, |
71 | IPI_CALL_FUNC_SINGLE, | ||
71 | IPI_CPU_STOP, | 72 | IPI_CPU_STOP, |
72 | }; | 73 | }; |
73 | 74 | ||
74 | struct smp_call_struct { | ||
75 | void (*func)(void *info); | ||
76 | void *info; | ||
77 | int wait; | ||
78 | cpumask_t pending; | ||
79 | cpumask_t unfinished; | ||
80 | }; | ||
81 | |||
82 | static struct smp_call_struct * volatile smp_call_function_data; | ||
83 | static DEFINE_SPINLOCK(smp_call_function_lock); | ||
84 | |||
85 | int __cpuinit __cpu_up(unsigned int cpu) | 75 | int __cpuinit __cpu_up(unsigned int cpu) |
86 | { | 76 | { |
87 | struct cpuinfo_arm *ci = &per_cpu(cpu_data, cpu); | 77 | struct cpuinfo_arm *ci = &per_cpu(cpu_data, cpu); |
@@ -366,114 +356,15 @@ static void send_ipi_message(cpumask_t callmap, enum ipi_msg_type msg) | |||
366 | local_irq_restore(flags); | 356 | local_irq_restore(flags); |
367 | } | 357 | } |
368 | 358 | ||
369 | /* | 359 | void arch_send_call_function_ipi(cpumask_t mask) |
370 | * You must not call this function with disabled interrupts, from a | ||
371 | * hardware interrupt handler, nor from a bottom half handler. | ||
372 | */ | ||
373 | static int smp_call_function_on_cpu(void (*func)(void *info), void *info, | ||
374 | int retry, int wait, cpumask_t callmap) | ||
375 | { | ||
376 | struct smp_call_struct data; | ||
377 | unsigned long timeout; | ||
378 | int ret = 0; | ||
379 | |||
380 | data.func = func; | ||
381 | data.info = info; | ||
382 | data.wait = wait; | ||
383 | |||
384 | cpu_clear(smp_processor_id(), callmap); | ||
385 | if (cpus_empty(callmap)) | ||
386 | goto out; | ||
387 | |||
388 | data.pending = callmap; | ||
389 | if (wait) | ||
390 | data.unfinished = callmap; | ||
391 | |||
392 | /* | ||
393 | * try to get the mutex on smp_call_function_data | ||
394 | */ | ||
395 | spin_lock(&smp_call_function_lock); | ||
396 | smp_call_function_data = &data; | ||
397 | |||
398 | send_ipi_message(callmap, IPI_CALL_FUNC); | ||
399 | |||
400 | timeout = jiffies + HZ; | ||
401 | while (!cpus_empty(data.pending) && time_before(jiffies, timeout)) | ||
402 | barrier(); | ||
403 | |||
404 | /* | ||
405 | * did we time out? | ||
406 | */ | ||
407 | if (!cpus_empty(data.pending)) { | ||
408 | /* | ||
409 | * this may be causing our panic - report it | ||
410 | */ | ||
411 | printk(KERN_CRIT | ||
412 | "CPU%u: smp_call_function timeout for %p(%p)\n" | ||
413 | " callmap %lx pending %lx, %swait\n", | ||
414 | smp_processor_id(), func, info, *cpus_addr(callmap), | ||
415 | *cpus_addr(data.pending), wait ? "" : "no "); | ||
416 | |||
417 | /* | ||
418 | * TRACE | ||
419 | */ | ||
420 | timeout = jiffies + (5 * HZ); | ||
421 | while (!cpus_empty(data.pending) && time_before(jiffies, timeout)) | ||
422 | barrier(); | ||
423 | |||
424 | if (cpus_empty(data.pending)) | ||
425 | printk(KERN_CRIT " RESOLVED\n"); | ||
426 | else | ||
427 | printk(KERN_CRIT " STILL STUCK\n"); | ||
428 | } | ||
429 | |||
430 | /* | ||
431 | * whatever happened, we're done with the data, so release it | ||
432 | */ | ||
433 | smp_call_function_data = NULL; | ||
434 | spin_unlock(&smp_call_function_lock); | ||
435 | |||
436 | if (!cpus_empty(data.pending)) { | ||
437 | ret = -ETIMEDOUT; | ||
438 | goto out; | ||
439 | } | ||
440 | |||
441 | if (wait) | ||
442 | while (!cpus_empty(data.unfinished)) | ||
443 | barrier(); | ||
444 | out: | ||
445 | |||
446 | return 0; | ||
447 | } | ||
448 | |||
449 | int smp_call_function(void (*func)(void *info), void *info, int retry, | ||
450 | int wait) | ||
451 | { | 360 | { |
452 | return smp_call_function_on_cpu(func, info, retry, wait, | 361 | send_ipi_message(mask, IPI_CALL_FUNC); |
453 | cpu_online_map); | ||
454 | } | 362 | } |
455 | EXPORT_SYMBOL_GPL(smp_call_function); | ||
456 | 363 | ||
457 | int smp_call_function_single(int cpu, void (*func)(void *info), void *info, | 364 | void arch_send_call_function_single_ipi(int cpu) |
458 | int retry, int wait) | ||
459 | { | 365 | { |
460 | /* prevent preemption and reschedule on another processor */ | 366 | send_ipi_message(cpumask_of_cpu(cpu), IPI_CALL_FUNC_SINGLE); |
461 | int current_cpu = get_cpu(); | ||
462 | int ret = 0; | ||
463 | |||
464 | if (cpu == current_cpu) { | ||
465 | local_irq_disable(); | ||
466 | func(info); | ||
467 | local_irq_enable(); | ||
468 | } else | ||
469 | ret = smp_call_function_on_cpu(func, info, retry, wait, | ||
470 | cpumask_of_cpu(cpu)); | ||
471 | |||
472 | put_cpu(); | ||
473 | |||
474 | return ret; | ||
475 | } | 367 | } |
476 | EXPORT_SYMBOL_GPL(smp_call_function_single); | ||
477 | 368 | ||
478 | void show_ipi_list(struct seq_file *p) | 369 | void show_ipi_list(struct seq_file *p) |
479 | { | 370 | { |
@@ -521,27 +412,6 @@ asmlinkage void __exception do_local_timer(struct pt_regs *regs) | |||
521 | } | 412 | } |
522 | #endif | 413 | #endif |
523 | 414 | ||
524 | /* | ||
525 | * ipi_call_function - handle IPI from smp_call_function() | ||
526 | * | ||
527 | * Note that we copy data out of the cross-call structure and then | ||
528 | * let the caller know that we're here and have done with their data | ||
529 | */ | ||
530 | static void ipi_call_function(unsigned int cpu) | ||
531 | { | ||
532 | struct smp_call_struct *data = smp_call_function_data; | ||
533 | void (*func)(void *info) = data->func; | ||
534 | void *info = data->info; | ||
535 | int wait = data->wait; | ||
536 | |||
537 | cpu_clear(cpu, data->pending); | ||
538 | |||
539 | func(info); | ||
540 | |||
541 | if (wait) | ||
542 | cpu_clear(cpu, data->unfinished); | ||
543 | } | ||
544 | |||
545 | static DEFINE_SPINLOCK(stop_lock); | 415 | static DEFINE_SPINLOCK(stop_lock); |
546 | 416 | ||
547 | /* | 417 | /* |
@@ -611,7 +481,11 @@ asmlinkage void __exception do_IPI(struct pt_regs *regs) | |||
611 | break; | 481 | break; |
612 | 482 | ||
613 | case IPI_CALL_FUNC: | 483 | case IPI_CALL_FUNC: |
614 | ipi_call_function(cpu); | 484 | generic_smp_call_function_interrupt(); |
485 | break; | ||
486 | |||
487 | case IPI_CALL_FUNC_SINGLE: | ||
488 | generic_smp_call_function_single_interrupt(); | ||
615 | break; | 489 | break; |
616 | 490 | ||
617 | case IPI_CPU_STOP: | 491 | case IPI_CPU_STOP: |
@@ -662,14 +536,13 @@ int setup_profiling_timer(unsigned int multiplier) | |||
662 | } | 536 | } |
663 | 537 | ||
664 | static int | 538 | static int |
665 | on_each_cpu_mask(void (*func)(void *), void *info, int retry, int wait, | 539 | on_each_cpu_mask(void (*func)(void *), void *info, int wait, cpumask_t mask) |
666 | cpumask_t mask) | ||
667 | { | 540 | { |
668 | int ret = 0; | 541 | int ret = 0; |
669 | 542 | ||
670 | preempt_disable(); | 543 | preempt_disable(); |
671 | 544 | ||
672 | ret = smp_call_function_on_cpu(func, info, retry, wait, mask); | 545 | ret = smp_call_function_mask(mask, func, info, wait); |
673 | if (cpu_isset(smp_processor_id(), mask)) | 546 | if (cpu_isset(smp_processor_id(), mask)) |
674 | func(info); | 547 | func(info); |
675 | 548 | ||
@@ -731,14 +604,14 @@ static inline void ipi_flush_tlb_kernel_range(void *arg) | |||
731 | 604 | ||
732 | void flush_tlb_all(void) | 605 | void flush_tlb_all(void) |
733 | { | 606 | { |
734 | on_each_cpu(ipi_flush_tlb_all, NULL, 1, 1); | 607 | on_each_cpu(ipi_flush_tlb_all, NULL, 1); |
735 | } | 608 | } |
736 | 609 | ||
737 | void flush_tlb_mm(struct mm_struct *mm) | 610 | void flush_tlb_mm(struct mm_struct *mm) |
738 | { | 611 | { |
739 | cpumask_t mask = mm->cpu_vm_mask; | 612 | cpumask_t mask = mm->cpu_vm_mask; |
740 | 613 | ||
741 | on_each_cpu_mask(ipi_flush_tlb_mm, mm, 1, 1, mask); | 614 | on_each_cpu_mask(ipi_flush_tlb_mm, mm, 1, mask); |
742 | } | 615 | } |
743 | 616 | ||
744 | void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr) | 617 | void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr) |
@@ -749,7 +622,7 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr) | |||
749 | ta.ta_vma = vma; | 622 | ta.ta_vma = vma; |
750 | ta.ta_start = uaddr; | 623 | ta.ta_start = uaddr; |
751 | 624 | ||
752 | on_each_cpu_mask(ipi_flush_tlb_page, &ta, 1, 1, mask); | 625 | on_each_cpu_mask(ipi_flush_tlb_page, &ta, 1, mask); |
753 | } | 626 | } |
754 | 627 | ||
755 | void flush_tlb_kernel_page(unsigned long kaddr) | 628 | void flush_tlb_kernel_page(unsigned long kaddr) |
@@ -758,7 +631,7 @@ void flush_tlb_kernel_page(unsigned long kaddr) | |||
758 | 631 | ||
759 | ta.ta_start = kaddr; | 632 | ta.ta_start = kaddr; |
760 | 633 | ||
761 | on_each_cpu(ipi_flush_tlb_kernel_page, &ta, 1, 1); | 634 | on_each_cpu(ipi_flush_tlb_kernel_page, &ta, 1); |
762 | } | 635 | } |
763 | 636 | ||
764 | void flush_tlb_range(struct vm_area_struct *vma, | 637 | void flush_tlb_range(struct vm_area_struct *vma, |
@@ -771,7 +644,7 @@ void flush_tlb_range(struct vm_area_struct *vma, | |||
771 | ta.ta_start = start; | 644 | ta.ta_start = start; |
772 | ta.ta_end = end; | 645 | ta.ta_end = end; |
773 | 646 | ||
774 | on_each_cpu_mask(ipi_flush_tlb_range, &ta, 1, 1, mask); | 647 | on_each_cpu_mask(ipi_flush_tlb_range, &ta, 1, mask); |
775 | } | 648 | } |
776 | 649 | ||
777 | void flush_tlb_kernel_range(unsigned long start, unsigned long end) | 650 | void flush_tlb_kernel_range(unsigned long start, unsigned long end) |
@@ -781,5 +654,5 @@ void flush_tlb_kernel_range(unsigned long start, unsigned long end) | |||
781 | ta.ta_start = start; | 654 | ta.ta_start = start; |
782 | ta.ta_end = end; | 655 | ta.ta_end = end; |
783 | 656 | ||
784 | on_each_cpu(ipi_flush_tlb_kernel_range, &ta, 1, 1); | 657 | on_each_cpu(ipi_flush_tlb_kernel_range, &ta, 1); |
785 | } | 658 | } |
diff --git a/arch/arm/kernel/stacktrace.c b/arch/arm/kernel/stacktrace.c index 90e0c35ae60d..fc650f64df43 100644 --- a/arch/arm/kernel/stacktrace.c +++ b/arch/arm/kernel/stacktrace.c | |||
@@ -92,4 +92,5 @@ void save_stack_trace(struct stack_trace *trace) | |||
92 | { | 92 | { |
93 | save_stack_trace_tsk(current, trace); | 93 | save_stack_trace_tsk(current, trace); |
94 | } | 94 | } |
95 | EXPORT_SYMBOL_GPL(save_stack_trace); | ||
95 | #endif | 96 | #endif |
diff --git a/arch/arm/oprofile/op_model_mpcore.c b/arch/arm/oprofile/op_model_mpcore.c index 74fae6045650..4458705021e0 100644 --- a/arch/arm/oprofile/op_model_mpcore.c +++ b/arch/arm/oprofile/op_model_mpcore.c | |||
@@ -201,7 +201,7 @@ static int em_call_function(int (*fn)(void)) | |||
201 | data.ret = 0; | 201 | data.ret = 0; |
202 | 202 | ||
203 | preempt_disable(); | 203 | preempt_disable(); |
204 | smp_call_function(em_func, &data, 1, 1); | 204 | smp_call_function(em_func, &data, 1); |
205 | em_func(&data); | 205 | em_func(&data); |
206 | preempt_enable(); | 206 | preempt_enable(); |
207 | 207 | ||
diff --git a/arch/arm/vfp/vfpmodule.c b/arch/arm/vfp/vfpmodule.c index 32455c633f1c..c0d2c9bb952b 100644 --- a/arch/arm/vfp/vfpmodule.c +++ b/arch/arm/vfp/vfpmodule.c | |||
@@ -352,7 +352,7 @@ static int __init vfp_init(void) | |||
352 | else if (vfpsid & FPSID_NODOUBLE) { | 352 | else if (vfpsid & FPSID_NODOUBLE) { |
353 | printk("no double precision support\n"); | 353 | printk("no double precision support\n"); |
354 | } else { | 354 | } else { |
355 | smp_call_function(vfp_enable, NULL, 1, 1); | 355 | smp_call_function(vfp_enable, NULL, 1); |
356 | 356 | ||
357 | VFP_arch = (vfpsid & FPSID_ARCH_MASK) >> FPSID_ARCH_BIT; /* Extract the architecture version */ | 357 | VFP_arch = (vfpsid & FPSID_ARCH_MASK) >> FPSID_ARCH_BIT; /* Extract the architecture version */ |
358 | printk("implementor %02x architecture %d part %02x variant %x rev %x\n", | 358 | printk("implementor %02x architecture %d part %02x variant %x rev %x\n", |
diff --git a/arch/avr32/kernel/stacktrace.c b/arch/avr32/kernel/stacktrace.c index 9a68190bbffd..f4bdb448049c 100644 --- a/arch/avr32/kernel/stacktrace.c +++ b/arch/avr32/kernel/stacktrace.c | |||
@@ -51,3 +51,4 @@ void save_stack_trace(struct stack_trace *trace) | |||
51 | fp = frame->fp; | 51 | fp = frame->fp; |
52 | } | 52 | } |
53 | } | 53 | } |
54 | EXPORT_SYMBOL_GPL(save_stack_trace); | ||
diff --git a/arch/cris/arch-v32/kernel/smp.c b/arch/cris/arch-v32/kernel/smp.c index a9c3334e46c9..952a24b2f5a9 100644 --- a/arch/cris/arch-v32/kernel/smp.c +++ b/arch/cris/arch-v32/kernel/smp.c | |||
@@ -194,7 +194,7 @@ void stop_this_cpu(void* dummy) | |||
194 | /* Other calls */ | 194 | /* Other calls */ |
195 | void smp_send_stop(void) | 195 | void smp_send_stop(void) |
196 | { | 196 | { |
197 | smp_call_function(stop_this_cpu, NULL, 1, 0); | 197 | smp_call_function(stop_this_cpu, NULL, 0); |
198 | } | 198 | } |
199 | 199 | ||
200 | int setup_profiling_timer(unsigned int multiplier) | 200 | int setup_profiling_timer(unsigned int multiplier) |
@@ -316,8 +316,7 @@ int send_ipi(int vector, int wait, cpumask_t cpu_mask) | |||
316 | * You must not call this function with disabled interrupts or from a | 316 | * You must not call this function with disabled interrupts or from a |
317 | * hardware interrupt handler or from a bottom half handler. | 317 | * hardware interrupt handler or from a bottom half handler. |
318 | */ | 318 | */ |
319 | int smp_call_function(void (*func)(void *info), void *info, | 319 | int smp_call_function(void (*func)(void *info), void *info, int wait) |
320 | int nonatomic, int wait) | ||
321 | { | 320 | { |
322 | cpumask_t cpu_mask = CPU_MASK_ALL; | 321 | cpumask_t cpu_mask = CPU_MASK_ALL; |
323 | struct call_data_struct data; | 322 | struct call_data_struct data; |
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig index 16be41446b5b..18bcc10903b4 100644 --- a/arch/ia64/Kconfig +++ b/arch/ia64/Kconfig | |||
@@ -303,6 +303,7 @@ config VIRT_CPU_ACCOUNTING | |||
303 | 303 | ||
304 | config SMP | 304 | config SMP |
305 | bool "Symmetric multi-processing support" | 305 | bool "Symmetric multi-processing support" |
306 | select USE_GENERIC_SMP_HELPERS | ||
306 | help | 307 | help |
307 | This enables support for systems with more than one CPU. If you have | 308 | This enables support for systems with more than one CPU. If you have |
308 | a system with only one CPU, say N. If you have a system with more | 309 | a system with only one CPU, say N. If you have a system with more |
diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c index 705176b434b3..7dd96c127177 100644 --- a/arch/ia64/kernel/mca.c +++ b/arch/ia64/kernel/mca.c | |||
@@ -707,7 +707,7 @@ ia64_mca_cmc_vector_enable (void *dummy) | |||
707 | static void | 707 | static void |
708 | ia64_mca_cmc_vector_disable_keventd(struct work_struct *unused) | 708 | ia64_mca_cmc_vector_disable_keventd(struct work_struct *unused) |
709 | { | 709 | { |
710 | on_each_cpu(ia64_mca_cmc_vector_disable, NULL, 1, 0); | 710 | on_each_cpu(ia64_mca_cmc_vector_disable, NULL, 0); |
711 | } | 711 | } |
712 | 712 | ||
713 | /* | 713 | /* |
@@ -719,7 +719,7 @@ ia64_mca_cmc_vector_disable_keventd(struct work_struct *unused) | |||
719 | static void | 719 | static void |
720 | ia64_mca_cmc_vector_enable_keventd(struct work_struct *unused) | 720 | ia64_mca_cmc_vector_enable_keventd(struct work_struct *unused) |
721 | { | 721 | { |
722 | on_each_cpu(ia64_mca_cmc_vector_enable, NULL, 1, 0); | 722 | on_each_cpu(ia64_mca_cmc_vector_enable, NULL, 0); |
723 | } | 723 | } |
724 | 724 | ||
725 | /* | 725 | /* |
@@ -1881,7 +1881,7 @@ static int __cpuinit mca_cpu_callback(struct notifier_block *nfb, | |||
1881 | case CPU_ONLINE: | 1881 | case CPU_ONLINE: |
1882 | case CPU_ONLINE_FROZEN: | 1882 | case CPU_ONLINE_FROZEN: |
1883 | smp_call_function_single(hotcpu, ia64_mca_cmc_vector_adjust, | 1883 | smp_call_function_single(hotcpu, ia64_mca_cmc_vector_adjust, |
1884 | NULL, 1, 0); | 1884 | NULL, 0); |
1885 | break; | 1885 | break; |
1886 | } | 1886 | } |
1887 | return NOTIFY_OK; | 1887 | return NOTIFY_OK; |
diff --git a/arch/ia64/kernel/palinfo.c b/arch/ia64/kernel/palinfo.c index 9dc00f7fe10e..e5c57f413ca2 100644 --- a/arch/ia64/kernel/palinfo.c +++ b/arch/ia64/kernel/palinfo.c | |||
@@ -921,7 +921,7 @@ int palinfo_handle_smp(pal_func_cpu_u_t *f, char *page) | |||
921 | 921 | ||
922 | 922 | ||
923 | /* will send IPI to other CPU and wait for completion of remote call */ | 923 | /* will send IPI to other CPU and wait for completion of remote call */ |
924 | if ((ret=smp_call_function_single(f->req_cpu, palinfo_smp_call, &ptr, 0, 1))) { | 924 | if ((ret=smp_call_function_single(f->req_cpu, palinfo_smp_call, &ptr, 1))) { |
925 | printk(KERN_ERR "palinfo: remote CPU call from %d to %d on function %d: " | 925 | printk(KERN_ERR "palinfo: remote CPU call from %d to %d on function %d: " |
926 | "error %d\n", smp_processor_id(), f->req_cpu, f->func_id, ret); | 926 | "error %d\n", smp_processor_id(), f->req_cpu, f->func_id, ret); |
927 | return 0; | 927 | return 0; |
diff --git a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c index 7714a97b0104..19d4493c6193 100644 --- a/arch/ia64/kernel/perfmon.c +++ b/arch/ia64/kernel/perfmon.c | |||
@@ -1820,7 +1820,7 @@ pfm_syswide_cleanup_other_cpu(pfm_context_t *ctx) | |||
1820 | int ret; | 1820 | int ret; |
1821 | 1821 | ||
1822 | DPRINT(("calling CPU%d for cleanup\n", ctx->ctx_cpu)); | 1822 | DPRINT(("calling CPU%d for cleanup\n", ctx->ctx_cpu)); |
1823 | ret = smp_call_function_single(ctx->ctx_cpu, pfm_syswide_force_stop, ctx, 0, 1); | 1823 | ret = smp_call_function_single(ctx->ctx_cpu, pfm_syswide_force_stop, ctx, 1); |
1824 | DPRINT(("called CPU%d for cleanup ret=%d\n", ctx->ctx_cpu, ret)); | 1824 | DPRINT(("called CPU%d for cleanup ret=%d\n", ctx->ctx_cpu, ret)); |
1825 | } | 1825 | } |
1826 | #endif /* CONFIG_SMP */ | 1826 | #endif /* CONFIG_SMP */ |
@@ -6508,7 +6508,7 @@ pfm_install_alt_pmu_interrupt(pfm_intr_handler_desc_t *hdl) | |||
6508 | } | 6508 | } |
6509 | 6509 | ||
6510 | /* save the current system wide pmu states */ | 6510 | /* save the current system wide pmu states */ |
6511 | ret = on_each_cpu(pfm_alt_save_pmu_state, NULL, 0, 1); | 6511 | ret = on_each_cpu(pfm_alt_save_pmu_state, NULL, 1); |
6512 | if (ret) { | 6512 | if (ret) { |
6513 | DPRINT(("on_each_cpu() failed: %d\n", ret)); | 6513 | DPRINT(("on_each_cpu() failed: %d\n", ret)); |
6514 | goto cleanup_reserve; | 6514 | goto cleanup_reserve; |
@@ -6553,7 +6553,7 @@ pfm_remove_alt_pmu_interrupt(pfm_intr_handler_desc_t *hdl) | |||
6553 | 6553 | ||
6554 | pfm_alt_intr_handler = NULL; | 6554 | pfm_alt_intr_handler = NULL; |
6555 | 6555 | ||
6556 | ret = on_each_cpu(pfm_alt_restore_pmu_state, NULL, 0, 1); | 6556 | ret = on_each_cpu(pfm_alt_restore_pmu_state, NULL, 1); |
6557 | if (ret) { | 6557 | if (ret) { |
6558 | DPRINT(("on_each_cpu() failed: %d\n", ret)); | 6558 | DPRINT(("on_each_cpu() failed: %d\n", ret)); |
6559 | } | 6559 | } |
diff --git a/arch/ia64/kernel/process.c b/arch/ia64/kernel/process.c index a3a34b4eb038..fabaf08d9a69 100644 --- a/arch/ia64/kernel/process.c +++ b/arch/ia64/kernel/process.c | |||
@@ -286,7 +286,7 @@ void cpu_idle_wait(void) | |||
286 | { | 286 | { |
287 | smp_mb(); | 287 | smp_mb(); |
288 | /* kick all the CPUs so that they exit out of pm_idle */ | 288 | /* kick all the CPUs so that they exit out of pm_idle */ |
289 | smp_call_function(do_nothing, NULL, 0, 1); | 289 | smp_call_function(do_nothing, NULL, 1); |
290 | } | 290 | } |
291 | EXPORT_SYMBOL_GPL(cpu_idle_wait); | 291 | EXPORT_SYMBOL_GPL(cpu_idle_wait); |
292 | 292 | ||
diff --git a/arch/ia64/kernel/smp.c b/arch/ia64/kernel/smp.c index 983296f1c813..3676468612b6 100644 --- a/arch/ia64/kernel/smp.c +++ b/arch/ia64/kernel/smp.c | |||
@@ -60,25 +60,9 @@ static struct local_tlb_flush_counts { | |||
60 | 60 | ||
61 | static DEFINE_PER_CPU(unsigned int, shadow_flush_counts[NR_CPUS]) ____cacheline_aligned; | 61 | static DEFINE_PER_CPU(unsigned int, shadow_flush_counts[NR_CPUS]) ____cacheline_aligned; |
62 | 62 | ||
63 | |||
64 | /* | ||
65 | * Structure and data for smp_call_function(). This is designed to minimise static memory | ||
66 | * requirements. It also looks cleaner. | ||
67 | */ | ||
68 | static __cacheline_aligned DEFINE_SPINLOCK(call_lock); | ||
69 | |||
70 | struct call_data_struct { | ||
71 | void (*func) (void *info); | ||
72 | void *info; | ||
73 | long wait; | ||
74 | atomic_t started; | ||
75 | atomic_t finished; | ||
76 | }; | ||
77 | |||
78 | static volatile struct call_data_struct *call_data; | ||
79 | |||
80 | #define IPI_CALL_FUNC 0 | 63 | #define IPI_CALL_FUNC 0 |
81 | #define IPI_CPU_STOP 1 | 64 | #define IPI_CPU_STOP 1 |
65 | #define IPI_CALL_FUNC_SINGLE 2 | ||
82 | #define IPI_KDUMP_CPU_STOP 3 | 66 | #define IPI_KDUMP_CPU_STOP 3 |
83 | 67 | ||
84 | /* This needs to be cacheline aligned because it is written to by *other* CPUs. */ | 68 | /* This needs to be cacheline aligned because it is written to by *other* CPUs. */ |
@@ -86,43 +70,6 @@ static DEFINE_PER_CPU_SHARED_ALIGNED(u64, ipi_operation); | |||
86 | 70 | ||
87 | extern void cpu_halt (void); | 71 | extern void cpu_halt (void); |
88 | 72 | ||
89 | void | ||
90 | lock_ipi_calllock(void) | ||
91 | { | ||
92 | spin_lock_irq(&call_lock); | ||
93 | } | ||
94 | |||
95 | void | ||
96 | unlock_ipi_calllock(void) | ||
97 | { | ||
98 | spin_unlock_irq(&call_lock); | ||
99 | } | ||
100 | |||
101 | static inline void | ||
102 | handle_call_data(void) | ||
103 | { | ||
104 | struct call_data_struct *data; | ||
105 | void (*func)(void *info); | ||
106 | void *info; | ||
107 | int wait; | ||
108 | |||
109 | /* release the 'pointer lock' */ | ||
110 | data = (struct call_data_struct *)call_data; | ||
111 | func = data->func; | ||
112 | info = data->info; | ||
113 | wait = data->wait; | ||
114 | |||
115 | mb(); | ||
116 | atomic_inc(&data->started); | ||
117 | /* At this point the structure may be gone unless wait is true. */ | ||
118 | (*func)(info); | ||
119 | |||
120 | /* Notify the sending CPU that the task is done. */ | ||
121 | mb(); | ||
122 | if (wait) | ||
123 | atomic_inc(&data->finished); | ||
124 | } | ||
125 | |||
126 | static void | 73 | static void |
127 | stop_this_cpu(void) | 74 | stop_this_cpu(void) |
128 | { | 75 | { |
@@ -163,13 +110,15 @@ handle_IPI (int irq, void *dev_id) | |||
163 | ops &= ~(1 << which); | 110 | ops &= ~(1 << which); |
164 | 111 | ||
165 | switch (which) { | 112 | switch (which) { |
166 | case IPI_CALL_FUNC: | ||
167 | handle_call_data(); | ||
168 | break; | ||
169 | |||
170 | case IPI_CPU_STOP: | 113 | case IPI_CPU_STOP: |
171 | stop_this_cpu(); | 114 | stop_this_cpu(); |
172 | break; | 115 | break; |
116 | case IPI_CALL_FUNC: | ||
117 | generic_smp_call_function_interrupt(); | ||
118 | break; | ||
119 | case IPI_CALL_FUNC_SINGLE: | ||
120 | generic_smp_call_function_single_interrupt(); | ||
121 | break; | ||
173 | #ifdef CONFIG_KEXEC | 122 | #ifdef CONFIG_KEXEC |
174 | case IPI_KDUMP_CPU_STOP: | 123 | case IPI_KDUMP_CPU_STOP: |
175 | unw_init_running(kdump_cpu_freeze, NULL); | 124 | unw_init_running(kdump_cpu_freeze, NULL); |
@@ -187,6 +136,8 @@ handle_IPI (int irq, void *dev_id) | |||
187 | return IRQ_HANDLED; | 136 | return IRQ_HANDLED; |
188 | } | 137 | } |
189 | 138 | ||
139 | |||
140 | |||
190 | /* | 141 | /* |
191 | * Called with preemption disabled. | 142 | * Called with preemption disabled. |
192 | */ | 143 | */ |
@@ -334,7 +285,7 @@ smp_flush_tlb_cpumask(cpumask_t xcpumask) | |||
334 | void | 285 | void |
335 | smp_flush_tlb_all (void) | 286 | smp_flush_tlb_all (void) |
336 | { | 287 | { |
337 | on_each_cpu((void (*)(void *))local_flush_tlb_all, NULL, 1, 1); | 288 | on_each_cpu((void (*)(void *))local_flush_tlb_all, NULL, 1); |
338 | } | 289 | } |
339 | 290 | ||
340 | void | 291 | void |
@@ -357,193 +308,18 @@ smp_flush_tlb_mm (struct mm_struct *mm) | |||
357 | * anyhow, and once a CPU is interrupted, the cost of local_flush_tlb_all() is | 308 | * anyhow, and once a CPU is interrupted, the cost of local_flush_tlb_all() is |
358 | * rather trivial. | 309 | * rather trivial. |
359 | */ | 310 | */ |
360 | on_each_cpu((void (*)(void *))local_finish_flush_tlb_mm, mm, 1, 1); | 311 | on_each_cpu((void (*)(void *))local_finish_flush_tlb_mm, mm, 1); |
361 | } | 312 | } |
362 | 313 | ||
363 | /* | 314 | void arch_send_call_function_single_ipi(int cpu) |
364 | * Run a function on a specific CPU | ||
365 | * <func> The function to run. This must be fast and non-blocking. | ||
366 | * <info> An arbitrary pointer to pass to the function. | ||
367 | * <nonatomic> Currently unused. | ||
368 | * <wait> If true, wait until function has completed on other CPUs. | ||
369 | * [RETURNS] 0 on success, else a negative status code. | ||
370 | * | ||
371 | * Does not return until the remote CPU is nearly ready to execute <func> | ||
372 | * or is or has executed. | ||
373 | */ | ||
374 | |||
375 | int | ||
376 | smp_call_function_single (int cpuid, void (*func) (void *info), void *info, int nonatomic, | ||
377 | int wait) | ||
378 | { | ||
379 | struct call_data_struct data; | ||
380 | int cpus = 1; | ||
381 | int me = get_cpu(); /* prevent preemption and reschedule on another processor */ | ||
382 | |||
383 | if (cpuid == me) { | ||
384 | local_irq_disable(); | ||
385 | func(info); | ||
386 | local_irq_enable(); | ||
387 | put_cpu(); | ||
388 | return 0; | ||
389 | } | ||
390 | |||
391 | data.func = func; | ||
392 | data.info = info; | ||
393 | atomic_set(&data.started, 0); | ||
394 | data.wait = wait; | ||
395 | if (wait) | ||
396 | atomic_set(&data.finished, 0); | ||
397 | |||
398 | spin_lock_bh(&call_lock); | ||
399 | |||
400 | call_data = &data; | ||
401 | mb(); /* ensure store to call_data precedes setting of IPI_CALL_FUNC */ | ||
402 | send_IPI_single(cpuid, IPI_CALL_FUNC); | ||
403 | |||
404 | /* Wait for response */ | ||
405 | while (atomic_read(&data.started) != cpus) | ||
406 | cpu_relax(); | ||
407 | |||
408 | if (wait) | ||
409 | while (atomic_read(&data.finished) != cpus) | ||
410 | cpu_relax(); | ||
411 | call_data = NULL; | ||
412 | |||
413 | spin_unlock_bh(&call_lock); | ||
414 | put_cpu(); | ||
415 | return 0; | ||
416 | } | ||
417 | EXPORT_SYMBOL(smp_call_function_single); | ||
418 | |||
419 | /** | ||
420 | * smp_call_function_mask(): Run a function on a set of other CPUs. | ||
421 | * <mask> The set of cpus to run on. Must not include the current cpu. | ||
422 | * <func> The function to run. This must be fast and non-blocking. | ||
423 | * <info> An arbitrary pointer to pass to the function. | ||
424 | * <wait> If true, wait (atomically) until function | ||
425 | * has completed on other CPUs. | ||
426 | * | ||
427 | * Returns 0 on success, else a negative status code. | ||
428 | * | ||
429 | * If @wait is true, then returns once @func has returned; otherwise | ||
430 | * it returns just before the target cpu calls @func. | ||
431 | * | ||
432 | * You must not call this function with disabled interrupts or from a | ||
433 | * hardware interrupt handler or from a bottom half handler. | ||
434 | */ | ||
435 | int smp_call_function_mask(cpumask_t mask, | ||
436 | void (*func)(void *), void *info, | ||
437 | int wait) | ||
438 | { | 315 | { |
439 | struct call_data_struct data; | 316 | send_IPI_single(cpu, IPI_CALL_FUNC_SINGLE); |
440 | cpumask_t allbutself; | ||
441 | int cpus; | ||
442 | |||
443 | spin_lock(&call_lock); | ||
444 | allbutself = cpu_online_map; | ||
445 | cpu_clear(smp_processor_id(), allbutself); | ||
446 | |||
447 | cpus_and(mask, mask, allbutself); | ||
448 | cpus = cpus_weight(mask); | ||
449 | if (!cpus) { | ||
450 | spin_unlock(&call_lock); | ||
451 | return 0; | ||
452 | } | ||
453 | |||
454 | /* Can deadlock when called with interrupts disabled */ | ||
455 | WARN_ON(irqs_disabled()); | ||
456 | |||
457 | data.func = func; | ||
458 | data.info = info; | ||
459 | atomic_set(&data.started, 0); | ||
460 | data.wait = wait; | ||
461 | if (wait) | ||
462 | atomic_set(&data.finished, 0); | ||
463 | |||
464 | call_data = &data; | ||
465 | mb(); /* ensure store to call_data precedes setting of IPI_CALL_FUNC*/ | ||
466 | |||
467 | /* Send a message to other CPUs */ | ||
468 | if (cpus_equal(mask, allbutself)) | ||
469 | send_IPI_allbutself(IPI_CALL_FUNC); | ||
470 | else | ||
471 | send_IPI_mask(mask, IPI_CALL_FUNC); | ||
472 | |||
473 | /* Wait for response */ | ||
474 | while (atomic_read(&data.started) != cpus) | ||
475 | cpu_relax(); | ||
476 | |||
477 | if (wait) | ||
478 | while (atomic_read(&data.finished) != cpus) | ||
479 | cpu_relax(); | ||
480 | call_data = NULL; | ||
481 | |||
482 | spin_unlock(&call_lock); | ||
483 | return 0; | ||
484 | |||
485 | } | 317 | } |
486 | EXPORT_SYMBOL(smp_call_function_mask); | ||
487 | 318 | ||
488 | /* | 319 | void arch_send_call_function_ipi(cpumask_t mask) |
489 | * this function sends a 'generic call function' IPI to all other CPUs | ||
490 | * in the system. | ||
491 | */ | ||
492 | |||
493 | /* | ||
494 | * [SUMMARY] Run a function on all other CPUs. | ||
495 | * <func> The function to run. This must be fast and non-blocking. | ||
496 | * <info> An arbitrary pointer to pass to the function. | ||
497 | * <nonatomic> currently unused. | ||
498 | * <wait> If true, wait (atomically) until function has completed on other CPUs. | ||
499 | * [RETURNS] 0 on success, else a negative status code. | ||
500 | * | ||
501 | * Does not return until remote CPUs are nearly ready to execute <func> or are or have | ||
502 | * executed. | ||
503 | * | ||
504 | * You must not call this function with disabled interrupts or from a | ||
505 | * hardware interrupt handler or from a bottom half handler. | ||
506 | */ | ||
507 | int | ||
508 | smp_call_function (void (*func) (void *info), void *info, int nonatomic, int wait) | ||
509 | { | 320 | { |
510 | struct call_data_struct data; | 321 | send_IPI_mask(mask, IPI_CALL_FUNC); |
511 | int cpus; | ||
512 | |||
513 | spin_lock(&call_lock); | ||
514 | cpus = num_online_cpus() - 1; | ||
515 | if (!cpus) { | ||
516 | spin_unlock(&call_lock); | ||
517 | return 0; | ||
518 | } | ||
519 | |||
520 | /* Can deadlock when called with interrupts disabled */ | ||
521 | WARN_ON(irqs_disabled()); | ||
522 | |||
523 | data.func = func; | ||
524 | data.info = info; | ||
525 | atomic_set(&data.started, 0); | ||
526 | data.wait = wait; | ||
527 | if (wait) | ||
528 | atomic_set(&data.finished, 0); | ||
529 | |||
530 | call_data = &data; | ||
531 | mb(); /* ensure store to call_data precedes setting of IPI_CALL_FUNC */ | ||
532 | send_IPI_allbutself(IPI_CALL_FUNC); | ||
533 | |||
534 | /* Wait for response */ | ||
535 | while (atomic_read(&data.started) != cpus) | ||
536 | cpu_relax(); | ||
537 | |||
538 | if (wait) | ||
539 | while (atomic_read(&data.finished) != cpus) | ||
540 | cpu_relax(); | ||
541 | call_data = NULL; | ||
542 | |||
543 | spin_unlock(&call_lock); | ||
544 | return 0; | ||
545 | } | 322 | } |
546 | EXPORT_SYMBOL(smp_call_function); | ||
547 | 323 | ||
548 | /* | 324 | /* |
549 | * this function calls the 'stop' function on all other CPUs in the system. | 325 | * this function calls the 'stop' function on all other CPUs in the system. |
diff --git a/arch/ia64/kernel/smpboot.c b/arch/ia64/kernel/smpboot.c index d7ad42b77d41..9d1d429c6c59 100644 --- a/arch/ia64/kernel/smpboot.c +++ b/arch/ia64/kernel/smpboot.c | |||
@@ -317,7 +317,7 @@ ia64_sync_itc (unsigned int master) | |||
317 | 317 | ||
318 | go[MASTER] = 1; | 318 | go[MASTER] = 1; |
319 | 319 | ||
320 | if (smp_call_function_single(master, sync_master, NULL, 1, 0) < 0) { | 320 | if (smp_call_function_single(master, sync_master, NULL, 0) < 0) { |
321 | printk(KERN_ERR "sync_itc: failed to get attention of CPU %u!\n", master); | 321 | printk(KERN_ERR "sync_itc: failed to get attention of CPU %u!\n", master); |
322 | return; | 322 | return; |
323 | } | 323 | } |
@@ -395,14 +395,14 @@ smp_callin (void) | |||
395 | 395 | ||
396 | fix_b0_for_bsp(); | 396 | fix_b0_for_bsp(); |
397 | 397 | ||
398 | lock_ipi_calllock(); | 398 | ipi_call_lock_irq(); |
399 | spin_lock(&vector_lock); | 399 | spin_lock(&vector_lock); |
400 | /* Setup the per cpu irq handling data structures */ | 400 | /* Setup the per cpu irq handling data structures */ |
401 | __setup_vector_irq(cpuid); | 401 | __setup_vector_irq(cpuid); |
402 | cpu_set(cpuid, cpu_online_map); | 402 | cpu_set(cpuid, cpu_online_map); |
403 | per_cpu(cpu_state, cpuid) = CPU_ONLINE; | 403 | per_cpu(cpu_state, cpuid) = CPU_ONLINE; |
404 | spin_unlock(&vector_lock); | 404 | spin_unlock(&vector_lock); |
405 | unlock_ipi_calllock(); | 405 | ipi_call_unlock_irq(); |
406 | 406 | ||
407 | smp_setup_percpu_timer(); | 407 | smp_setup_percpu_timer(); |
408 | 408 | ||
diff --git a/arch/ia64/kernel/uncached.c b/arch/ia64/kernel/uncached.c index e77995a6e3ed..8eff8c1d40a6 100644 --- a/arch/ia64/kernel/uncached.c +++ b/arch/ia64/kernel/uncached.c | |||
@@ -123,8 +123,7 @@ static int uncached_add_chunk(struct uncached_pool *uc_pool, int nid) | |||
123 | status = ia64_pal_prefetch_visibility(PAL_VISIBILITY_PHYSICAL); | 123 | status = ia64_pal_prefetch_visibility(PAL_VISIBILITY_PHYSICAL); |
124 | if (status == PAL_VISIBILITY_OK_REMOTE_NEEDED) { | 124 | if (status == PAL_VISIBILITY_OK_REMOTE_NEEDED) { |
125 | atomic_set(&uc_pool->status, 0); | 125 | atomic_set(&uc_pool->status, 0); |
126 | status = smp_call_function(uncached_ipi_visibility, uc_pool, | 126 | status = smp_call_function(uncached_ipi_visibility, uc_pool, 1); |
127 | 0, 1); | ||
128 | if (status || atomic_read(&uc_pool->status)) | 127 | if (status || atomic_read(&uc_pool->status)) |
129 | goto failed; | 128 | goto failed; |
130 | } else if (status != PAL_VISIBILITY_OK) | 129 | } else if (status != PAL_VISIBILITY_OK) |
@@ -146,7 +145,7 @@ static int uncached_add_chunk(struct uncached_pool *uc_pool, int nid) | |||
146 | if (status != PAL_STATUS_SUCCESS) | 145 | if (status != PAL_STATUS_SUCCESS) |
147 | goto failed; | 146 | goto failed; |
148 | atomic_set(&uc_pool->status, 0); | 147 | atomic_set(&uc_pool->status, 0); |
149 | status = smp_call_function(uncached_ipi_mc_drain, uc_pool, 0, 1); | 148 | status = smp_call_function(uncached_ipi_mc_drain, uc_pool, 1); |
150 | if (status || atomic_read(&uc_pool->status)) | 149 | if (status || atomic_read(&uc_pool->status)) |
151 | goto failed; | 150 | goto failed; |
152 | 151 | ||
diff --git a/arch/ia64/sn/kernel/irq.c b/arch/ia64/sn/kernel/irq.c index 53351c3cd7b1..96c31b4180c3 100644 --- a/arch/ia64/sn/kernel/irq.c +++ b/arch/ia64/sn/kernel/irq.c | |||
@@ -11,6 +11,7 @@ | |||
11 | #include <linux/irq.h> | 11 | #include <linux/irq.h> |
12 | #include <linux/spinlock.h> | 12 | #include <linux/spinlock.h> |
13 | #include <linux/init.h> | 13 | #include <linux/init.h> |
14 | #include <linux/rculist.h> | ||
14 | #include <asm/sn/addrs.h> | 15 | #include <asm/sn/addrs.h> |
15 | #include <asm/sn/arch.h> | 16 | #include <asm/sn/arch.h> |
16 | #include <asm/sn/intr.h> | 17 | #include <asm/sn/intr.h> |
diff --git a/arch/ia64/sn/kernel/sn2/sn_hwperf.c b/arch/ia64/sn/kernel/sn2/sn_hwperf.c index 8cc0c4753d89..636588e7e068 100644 --- a/arch/ia64/sn/kernel/sn2/sn_hwperf.c +++ b/arch/ia64/sn/kernel/sn2/sn_hwperf.c | |||
@@ -629,7 +629,7 @@ static int sn_hwperf_op_cpu(struct sn_hwperf_op_info *op_info) | |||
629 | if (use_ipi) { | 629 | if (use_ipi) { |
630 | /* use an interprocessor interrupt to call SAL */ | 630 | /* use an interprocessor interrupt to call SAL */ |
631 | smp_call_function_single(cpu, sn_hwperf_call_sal, | 631 | smp_call_function_single(cpu, sn_hwperf_call_sal, |
632 | op_info, 1, 1); | 632 | op_info, 1); |
633 | } | 633 | } |
634 | else { | 634 | else { |
635 | /* migrate the task before calling SAL */ | 635 | /* migrate the task before calling SAL */ |
diff --git a/arch/m32r/Kconfig b/arch/m32r/Kconfig index de153de2ea9f..a5f864c445b2 100644 --- a/arch/m32r/Kconfig +++ b/arch/m32r/Kconfig | |||
@@ -296,6 +296,7 @@ config PREEMPT | |||
296 | 296 | ||
297 | config SMP | 297 | config SMP |
298 | bool "Symmetric multi-processing support" | 298 | bool "Symmetric multi-processing support" |
299 | select USE_GENERIC_SMP_HELPERS | ||
299 | ---help--- | 300 | ---help--- |
300 | This enables support for systems with more than one CPU. If you have | 301 | This enables support for systems with more than one CPU. If you have |
301 | a system with only one CPU, like most personal computers, say N. If | 302 | a system with only one CPU, like most personal computers, say N. If |
diff --git a/arch/m32r/kernel/m32r_ksyms.c b/arch/m32r/kernel/m32r_ksyms.c index e6709fe950ba..16bcb189a383 100644 --- a/arch/m32r/kernel/m32r_ksyms.c +++ b/arch/m32r/kernel/m32r_ksyms.c | |||
@@ -43,9 +43,6 @@ EXPORT_SYMBOL(dcache_dummy); | |||
43 | #endif | 43 | #endif |
44 | EXPORT_SYMBOL(cpu_data); | 44 | EXPORT_SYMBOL(cpu_data); |
45 | 45 | ||
46 | /* Global SMP stuff */ | ||
47 | EXPORT_SYMBOL(smp_call_function); | ||
48 | |||
49 | /* TLB flushing */ | 46 | /* TLB flushing */ |
50 | EXPORT_SYMBOL(smp_flush_tlb_page); | 47 | EXPORT_SYMBOL(smp_flush_tlb_page); |
51 | #endif | 48 | #endif |
diff --git a/arch/m32r/kernel/smp.c b/arch/m32r/kernel/smp.c index c837bc13b015..7577f971ea4e 100644 --- a/arch/m32r/kernel/smp.c +++ b/arch/m32r/kernel/smp.c | |||
@@ -35,22 +35,6 @@ | |||
35 | /*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/ | 35 | /*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/ |
36 | 36 | ||
37 | /* | 37 | /* |
38 | * Structure and data for smp_call_function(). This is designed to minimise | ||
39 | * static memory requirements. It also looks cleaner. | ||
40 | */ | ||
41 | static DEFINE_SPINLOCK(call_lock); | ||
42 | |||
43 | struct call_data_struct { | ||
44 | void (*func) (void *info); | ||
45 | void *info; | ||
46 | atomic_t started; | ||
47 | atomic_t finished; | ||
48 | int wait; | ||
49 | } __attribute__ ((__aligned__(SMP_CACHE_BYTES))); | ||
50 | |||
51 | static struct call_data_struct *call_data; | ||
52 | |||
53 | /* | ||
54 | * For flush_cache_all() | 38 | * For flush_cache_all() |
55 | */ | 39 | */ |
56 | static DEFINE_SPINLOCK(flushcache_lock); | 40 | static DEFINE_SPINLOCK(flushcache_lock); |
@@ -96,9 +80,6 @@ void smp_invalidate_interrupt(void); | |||
96 | void smp_send_stop(void); | 80 | void smp_send_stop(void); |
97 | static void stop_this_cpu(void *); | 81 | static void stop_this_cpu(void *); |
98 | 82 | ||
99 | int smp_call_function(void (*) (void *), void *, int, int); | ||
100 | void smp_call_function_interrupt(void); | ||
101 | |||
102 | void smp_send_timer(void); | 83 | void smp_send_timer(void); |
103 | void smp_ipi_timer_interrupt(struct pt_regs *); | 84 | void smp_ipi_timer_interrupt(struct pt_regs *); |
104 | void smp_local_timer_interrupt(void); | 85 | void smp_local_timer_interrupt(void); |
@@ -231,7 +212,7 @@ void smp_flush_tlb_all(void) | |||
231 | local_irq_save(flags); | 212 | local_irq_save(flags); |
232 | __flush_tlb_all(); | 213 | __flush_tlb_all(); |
233 | local_irq_restore(flags); | 214 | local_irq_restore(flags); |
234 | smp_call_function(flush_tlb_all_ipi, NULL, 1, 1); | 215 | smp_call_function(flush_tlb_all_ipi, NULL, 1); |
235 | preempt_enable(); | 216 | preempt_enable(); |
236 | } | 217 | } |
237 | 218 | ||
@@ -524,7 +505,7 @@ void smp_invalidate_interrupt(void) | |||
524 | *==========================================================================*/ | 505 | *==========================================================================*/ |
525 | void smp_send_stop(void) | 506 | void smp_send_stop(void) |
526 | { | 507 | { |
527 | smp_call_function(stop_this_cpu, NULL, 1, 0); | 508 | smp_call_function(stop_this_cpu, NULL, 0); |
528 | } | 509 | } |
529 | 510 | ||
530 | /*==========================================================================* | 511 | /*==========================================================================* |
@@ -565,86 +546,14 @@ static void stop_this_cpu(void *dummy) | |||
565 | for ( ; ; ); | 546 | for ( ; ; ); |
566 | } | 547 | } |
567 | 548 | ||
568 | /*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/ | 549 | void arch_send_call_function_ipi(cpumask_t mask) |
569 | /* Call function Routines */ | ||
570 | /*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/ | ||
571 | |||
572 | /*==========================================================================* | ||
573 | * Name: smp_call_function | ||
574 | * | ||
575 | * Description: This routine sends a 'CALL_FUNCTION_IPI' to all other CPUs | ||
576 | * in the system. | ||
577 | * | ||
578 | * Born on Date: 2002.02.05 | ||
579 | * | ||
580 | * Arguments: *func - The function to run. This must be fast and | ||
581 | * non-blocking. | ||
582 | * *info - An arbitrary pointer to pass to the function. | ||
583 | * nonatomic - currently unused. | ||
584 | * wait - If true, wait (atomically) until function has | ||
585 | * completed on other CPUs. | ||
586 | * | ||
587 | * Returns: 0 on success, else a negative status code. Does not return | ||
588 | * until remote CPUs are nearly ready to execute <<func>> or | ||
589 | * are or have executed. | ||
590 | * | ||
591 | * Cautions: You must not call this function with disabled interrupts or | ||
592 | * from a hardware interrupt handler, you may call it from a | ||
593 | * bottom half handler. | ||
594 | * | ||
595 | * Modification log: | ||
596 | * Date Who Description | ||
597 | * ---------- --- -------------------------------------------------------- | ||
598 | * | ||
599 | *==========================================================================*/ | ||
600 | int smp_call_function(void (*func) (void *info), void *info, int nonatomic, | ||
601 | int wait) | ||
602 | { | 550 | { |
603 | struct call_data_struct data; | 551 | send_IPI_mask(mask, CALL_FUNCTION_IPI, 0); |
604 | int cpus; | 552 | } |
605 | |||
606 | #ifdef DEBUG_SMP | ||
607 | unsigned long flags; | ||
608 | __save_flags(flags); | ||
609 | if (!(flags & 0x0040)) /* Interrupt Disable NONONO */ | ||
610 | BUG(); | ||
611 | #endif /* DEBUG_SMP */ | ||
612 | |||
613 | /* Holding any lock stops cpus from going down. */ | ||
614 | spin_lock(&call_lock); | ||
615 | cpus = num_online_cpus() - 1; | ||
616 | |||
617 | if (!cpus) { | ||
618 | spin_unlock(&call_lock); | ||
619 | return 0; | ||
620 | } | ||
621 | |||
622 | /* Can deadlock when called with interrupts disabled */ | ||
623 | WARN_ON(irqs_disabled()); | ||
624 | |||
625 | data.func = func; | ||
626 | data.info = info; | ||
627 | atomic_set(&data.started, 0); | ||
628 | data.wait = wait; | ||
629 | if (wait) | ||
630 | atomic_set(&data.finished, 0); | ||
631 | |||
632 | call_data = &data; | ||
633 | mb(); | ||
634 | |||
635 | /* Send a message to all other CPUs and wait for them to respond */ | ||
636 | send_IPI_allbutself(CALL_FUNCTION_IPI, 0); | ||
637 | |||
638 | /* Wait for response */ | ||
639 | while (atomic_read(&data.started) != cpus) | ||
640 | barrier(); | ||
641 | |||
642 | if (wait) | ||
643 | while (atomic_read(&data.finished) != cpus) | ||
644 | barrier(); | ||
645 | spin_unlock(&call_lock); | ||
646 | 553 | ||
647 | return 0; | 554 | void arch_send_call_function_single_ipi(int cpu) |
555 | { | ||
556 | send_IPI_mask(cpumask_of_cpu(cpu), CALL_FUNC_SINGLE_IPI, 0); | ||
648 | } | 557 | } |
649 | 558 | ||
650 | /*==========================================================================* | 559 | /*==========================================================================* |
@@ -666,27 +575,16 @@ int smp_call_function(void (*func) (void *info), void *info, int nonatomic, | |||
666 | *==========================================================================*/ | 575 | *==========================================================================*/ |
667 | void smp_call_function_interrupt(void) | 576 | void smp_call_function_interrupt(void) |
668 | { | 577 | { |
669 | void (*func) (void *info) = call_data->func; | ||
670 | void *info = call_data->info; | ||
671 | int wait = call_data->wait; | ||
672 | |||
673 | /* | ||
674 | * Notify initiating CPU that I've grabbed the data and am | ||
675 | * about to execute the function | ||
676 | */ | ||
677 | mb(); | ||
678 | atomic_inc(&call_data->started); | ||
679 | /* | ||
680 | * At this point the info structure may be out of scope unless wait==1 | ||
681 | */ | ||
682 | irq_enter(); | 578 | irq_enter(); |
683 | (*func)(info); | 579 | generic_smp_call_function_interrupt(); |
684 | irq_exit(); | 580 | irq_exit(); |
581 | } | ||
685 | 582 | ||
686 | if (wait) { | 583 | void smp_call_function_single_interrupt(void) |
687 | mb(); | 584 | { |
688 | atomic_inc(&call_data->finished); | 585 | irq_enter(); |
689 | } | 586 | generic_smp_call_function_single_interrupt(); |
587 | irq_exit(); | ||
690 | } | 588 | } |
691 | 589 | ||
692 | /*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/ | 590 | /*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/ |
diff --git a/arch/m32r/kernel/traps.c b/arch/m32r/kernel/traps.c index 89ba4a0b5d51..46159a4e644b 100644 --- a/arch/m32r/kernel/traps.c +++ b/arch/m32r/kernel/traps.c | |||
@@ -40,6 +40,7 @@ extern void smp_invalidate_interrupt(void); | |||
40 | extern void smp_call_function_interrupt(void); | 40 | extern void smp_call_function_interrupt(void); |
41 | extern void smp_ipi_timer_interrupt(void); | 41 | extern void smp_ipi_timer_interrupt(void); |
42 | extern void smp_flush_cache_all_interrupt(void); | 42 | extern void smp_flush_cache_all_interrupt(void); |
43 | extern void smp_call_function_single_interrupt(void); | ||
43 | 44 | ||
44 | /* | 45 | /* |
45 | * for Boot AP function | 46 | * for Boot AP function |
@@ -103,7 +104,7 @@ void set_eit_vector_entries(void) | |||
103 | eit_vector[186] = (unsigned long)smp_call_function_interrupt; | 104 | eit_vector[186] = (unsigned long)smp_call_function_interrupt; |
104 | eit_vector[187] = (unsigned long)smp_ipi_timer_interrupt; | 105 | eit_vector[187] = (unsigned long)smp_ipi_timer_interrupt; |
105 | eit_vector[188] = (unsigned long)smp_flush_cache_all_interrupt; | 106 | eit_vector[188] = (unsigned long)smp_flush_cache_all_interrupt; |
106 | eit_vector[189] = 0; | 107 | eit_vector[189] = (unsigned long)smp_call_function_single_interrupt; |
107 | eit_vector[190] = 0; | 108 | eit_vector[190] = 0; |
108 | eit_vector[191] = 0; | 109 | eit_vector[191] = 0; |
109 | #endif | 110 | #endif |
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig index d23204e29e16..d21df5f1b1f3 100644 --- a/arch/mips/Kconfig +++ b/arch/mips/Kconfig | |||
@@ -1657,6 +1657,7 @@ config SMP | |||
1657 | bool "Multi-Processing support" | 1657 | bool "Multi-Processing support" |
1658 | depends on SYS_SUPPORTS_SMP | 1658 | depends on SYS_SUPPORTS_SMP |
1659 | select IRQ_PER_CPU | 1659 | select IRQ_PER_CPU |
1660 | select USE_GENERIC_SMP_HELPERS | ||
1660 | help | 1661 | help |
1661 | This enables support for systems with more than one CPU. If you have | 1662 | This enables support for systems with more than one CPU. If you have |
1662 | a system with only one CPU, like most personal computers, say N. If | 1663 | a system with only one CPU, like most personal computers, say N. If |
diff --git a/arch/mips/kernel/irq-rm9000.c b/arch/mips/kernel/irq-rm9000.c index ed9febe63d72..b47e4615ec12 100644 --- a/arch/mips/kernel/irq-rm9000.c +++ b/arch/mips/kernel/irq-rm9000.c | |||
@@ -49,7 +49,7 @@ static void local_rm9k_perfcounter_irq_startup(void *args) | |||
49 | 49 | ||
50 | static unsigned int rm9k_perfcounter_irq_startup(unsigned int irq) | 50 | static unsigned int rm9k_perfcounter_irq_startup(unsigned int irq) |
51 | { | 51 | { |
52 | on_each_cpu(local_rm9k_perfcounter_irq_startup, (void *) irq, 0, 1); | 52 | on_each_cpu(local_rm9k_perfcounter_irq_startup, (void *) irq, 1); |
53 | 53 | ||
54 | return 0; | 54 | return 0; |
55 | } | 55 | } |
@@ -66,7 +66,7 @@ static void local_rm9k_perfcounter_irq_shutdown(void *args) | |||
66 | 66 | ||
67 | static void rm9k_perfcounter_irq_shutdown(unsigned int irq) | 67 | static void rm9k_perfcounter_irq_shutdown(unsigned int irq) |
68 | { | 68 | { |
69 | on_each_cpu(local_rm9k_perfcounter_irq_shutdown, (void *) irq, 0, 1); | 69 | on_each_cpu(local_rm9k_perfcounter_irq_shutdown, (void *) irq, 1); |
70 | } | 70 | } |
71 | 71 | ||
72 | static struct irq_chip rm9k_irq_controller = { | 72 | static struct irq_chip rm9k_irq_controller = { |
diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c index cdf87a9dd4ba..4410f172b8ab 100644 --- a/arch/mips/kernel/smp.c +++ b/arch/mips/kernel/smp.c | |||
@@ -131,148 +131,29 @@ asmlinkage __cpuinit void start_secondary(void) | |||
131 | cpu_idle(); | 131 | cpu_idle(); |
132 | } | 132 | } |
133 | 133 | ||
134 | DEFINE_SPINLOCK(smp_call_lock); | 134 | void arch_send_call_function_ipi(cpumask_t mask) |
135 | |||
136 | struct call_data_struct *call_data; | ||
137 | |||
138 | /* | ||
139 | * Run a function on all other CPUs. | ||
140 | * | ||
141 | * <mask> cpuset_t of all processors to run the function on. | ||
142 | * <func> The function to run. This must be fast and non-blocking. | ||
143 | * <info> An arbitrary pointer to pass to the function. | ||
144 | * <retry> If true, keep retrying until ready. | ||
145 | * <wait> If true, wait until function has completed on other CPUs. | ||
146 | * [RETURNS] 0 on success, else a negative status code. | ||
147 | * | ||
148 | * Does not return until remote CPUs are nearly ready to execute <func> | ||
149 | * or are or have executed. | ||
150 | * | ||
151 | * You must not call this function with disabled interrupts or from a | ||
152 | * hardware interrupt handler or from a bottom half handler: | ||
153 | * | ||
154 | * CPU A CPU B | ||
155 | * Disable interrupts | ||
156 | * smp_call_function() | ||
157 | * Take call_lock | ||
158 | * Send IPIs | ||
159 | * Wait for all cpus to acknowledge IPI | ||
160 | * CPU A has not responded, spin waiting | ||
161 | * for cpu A to respond, holding call_lock | ||
162 | * smp_call_function() | ||
163 | * Spin waiting for call_lock | ||
164 | * Deadlock Deadlock | ||
165 | */ | ||
166 | int smp_call_function_mask(cpumask_t mask, void (*func) (void *info), | ||
167 | void *info, int retry, int wait) | ||
168 | { | 135 | { |
169 | struct call_data_struct data; | ||
170 | int cpu = smp_processor_id(); | ||
171 | int cpus; | ||
172 | |||
173 | /* | ||
174 | * Can die spectacularly if this CPU isn't yet marked online | ||
175 | */ | ||
176 | BUG_ON(!cpu_online(cpu)); | ||
177 | |||
178 | cpu_clear(cpu, mask); | ||
179 | cpus = cpus_weight(mask); | ||
180 | if (!cpus) | ||
181 | return 0; | ||
182 | |||
183 | /* Can deadlock when called with interrupts disabled */ | ||
184 | WARN_ON(irqs_disabled()); | ||
185 | |||
186 | data.func = func; | ||
187 | data.info = info; | ||
188 | atomic_set(&data.started, 0); | ||
189 | data.wait = wait; | ||
190 | if (wait) | ||
191 | atomic_set(&data.finished, 0); | ||
192 | |||
193 | spin_lock(&smp_call_lock); | ||
194 | call_data = &data; | ||
195 | smp_mb(); | ||
196 | |||
197 | /* Send a message to all other CPUs and wait for them to respond */ | ||
198 | mp_ops->send_ipi_mask(mask, SMP_CALL_FUNCTION); | 136 | mp_ops->send_ipi_mask(mask, SMP_CALL_FUNCTION); |
199 | |||
200 | /* Wait for response */ | ||
201 | /* FIXME: lock-up detection, backtrace on lock-up */ | ||
202 | while (atomic_read(&data.started) != cpus) | ||
203 | barrier(); | ||
204 | |||
205 | if (wait) | ||
206 | while (atomic_read(&data.finished) != cpus) | ||
207 | barrier(); | ||
208 | call_data = NULL; | ||
209 | spin_unlock(&smp_call_lock); | ||
210 | |||
211 | return 0; | ||
212 | } | 137 | } |
213 | 138 | ||
214 | int smp_call_function(void (*func) (void *info), void *info, int retry, | 139 | /* |
215 | int wait) | 140 | * We reuse the same vector for the single IPI |
141 | */ | ||
142 | void arch_send_call_function_single_ipi(int cpu) | ||
216 | { | 143 | { |
217 | return smp_call_function_mask(cpu_online_map, func, info, retry, wait); | 144 | mp_ops->send_ipi_mask(cpumask_of_cpu(cpu), SMP_CALL_FUNCTION); |
218 | } | 145 | } |
219 | EXPORT_SYMBOL(smp_call_function); | ||
220 | 146 | ||
147 | /* | ||
148 | * Call into both interrupt handlers, as we share the IPI for them | ||
149 | */ | ||
221 | void smp_call_function_interrupt(void) | 150 | void smp_call_function_interrupt(void) |
222 | { | 151 | { |
223 | void (*func) (void *info) = call_data->func; | ||
224 | void *info = call_data->info; | ||
225 | int wait = call_data->wait; | ||
226 | |||
227 | /* | ||
228 | * Notify initiating CPU that I've grabbed the data and am | ||
229 | * about to execute the function. | ||
230 | */ | ||
231 | smp_mb(); | ||
232 | atomic_inc(&call_data->started); | ||
233 | |||
234 | /* | ||
235 | * At this point the info structure may be out of scope unless wait==1. | ||
236 | */ | ||
237 | irq_enter(); | 152 | irq_enter(); |
238 | (*func)(info); | 153 | generic_smp_call_function_single_interrupt(); |
154 | generic_smp_call_function_interrupt(); | ||
239 | irq_exit(); | 155 | irq_exit(); |
240 | |||
241 | if (wait) { | ||
242 | smp_mb(); | ||
243 | atomic_inc(&call_data->finished); | ||
244 | } | ||
245 | } | ||
246 | |||
247 | int smp_call_function_single(int cpu, void (*func) (void *info), void *info, | ||
248 | int retry, int wait) | ||
249 | { | ||
250 | int ret, me; | ||
251 | |||
252 | /* | ||
253 | * Can die spectacularly if this CPU isn't yet marked online | ||
254 | */ | ||
255 | if (!cpu_online(cpu)) | ||
256 | return 0; | ||
257 | |||
258 | me = get_cpu(); | ||
259 | BUG_ON(!cpu_online(me)); | ||
260 | |||
261 | if (cpu == me) { | ||
262 | local_irq_disable(); | ||
263 | func(info); | ||
264 | local_irq_enable(); | ||
265 | put_cpu(); | ||
266 | return 0; | ||
267 | } | ||
268 | |||
269 | ret = smp_call_function_mask(cpumask_of_cpu(cpu), func, info, retry, | ||
270 | wait); | ||
271 | |||
272 | put_cpu(); | ||
273 | return 0; | ||
274 | } | 156 | } |
275 | EXPORT_SYMBOL(smp_call_function_single); | ||
276 | 157 | ||
277 | static void stop_this_cpu(void *dummy) | 158 | static void stop_this_cpu(void *dummy) |
278 | { | 159 | { |
@@ -286,7 +167,7 @@ static void stop_this_cpu(void *dummy) | |||
286 | 167 | ||
287 | void smp_send_stop(void) | 168 | void smp_send_stop(void) |
288 | { | 169 | { |
289 | smp_call_function(stop_this_cpu, NULL, 1, 0); | 170 | smp_call_function(stop_this_cpu, NULL, 0); |
290 | } | 171 | } |
291 | 172 | ||
292 | void __init smp_cpus_done(unsigned int max_cpus) | 173 | void __init smp_cpus_done(unsigned int max_cpus) |
@@ -365,7 +246,7 @@ static void flush_tlb_all_ipi(void *info) | |||
365 | 246 | ||
366 | void flush_tlb_all(void) | 247 | void flush_tlb_all(void) |
367 | { | 248 | { |
368 | on_each_cpu(flush_tlb_all_ipi, NULL, 1, 1); | 249 | on_each_cpu(flush_tlb_all_ipi, NULL, 1); |
369 | } | 250 | } |
370 | 251 | ||
371 | static void flush_tlb_mm_ipi(void *mm) | 252 | static void flush_tlb_mm_ipi(void *mm) |
@@ -385,7 +266,7 @@ static void flush_tlb_mm_ipi(void *mm) | |||
385 | static inline void smp_on_other_tlbs(void (*func) (void *info), void *info) | 266 | static inline void smp_on_other_tlbs(void (*func) (void *info), void *info) |
386 | { | 267 | { |
387 | #ifndef CONFIG_MIPS_MT_SMTC | 268 | #ifndef CONFIG_MIPS_MT_SMTC |
388 | smp_call_function(func, info, 1, 1); | 269 | smp_call_function(func, info, 1); |
389 | #endif | 270 | #endif |
390 | } | 271 | } |
391 | 272 | ||
@@ -485,7 +366,7 @@ void flush_tlb_kernel_range(unsigned long start, unsigned long end) | |||
485 | .addr2 = end, | 366 | .addr2 = end, |
486 | }; | 367 | }; |
487 | 368 | ||
488 | on_each_cpu(flush_tlb_kernel_range_ipi, &fd, 1, 1); | 369 | on_each_cpu(flush_tlb_kernel_range_ipi, &fd, 1); |
489 | } | 370 | } |
490 | 371 | ||
491 | static void flush_tlb_page_ipi(void *info) | 372 | static void flush_tlb_page_ipi(void *info) |
diff --git a/arch/mips/kernel/smtc.c b/arch/mips/kernel/smtc.c index 3e863186cd22..a516286532ab 100644 --- a/arch/mips/kernel/smtc.c +++ b/arch/mips/kernel/smtc.c | |||
@@ -877,7 +877,6 @@ static void ipi_resched_interrupt(void) | |||
877 | /* Return from interrupt should be enough to cause scheduler check */ | 877 | /* Return from interrupt should be enough to cause scheduler check */ |
878 | } | 878 | } |
879 | 879 | ||
880 | |||
881 | static void ipi_call_interrupt(void) | 880 | static void ipi_call_interrupt(void) |
882 | { | 881 | { |
883 | /* Invoke generic function invocation code in smp.c */ | 882 | /* Invoke generic function invocation code in smp.c */ |
diff --git a/arch/mips/kernel/stacktrace.c b/arch/mips/kernel/stacktrace.c index ebd9db8d1ece..5eb4681a73d2 100644 --- a/arch/mips/kernel/stacktrace.c +++ b/arch/mips/kernel/stacktrace.c | |||
@@ -73,3 +73,4 @@ void save_stack_trace(struct stack_trace *trace) | |||
73 | prepare_frametrace(regs); | 73 | prepare_frametrace(regs); |
74 | save_context_stack(trace, regs); | 74 | save_context_stack(trace, regs); |
75 | } | 75 | } |
76 | EXPORT_SYMBOL_GPL(save_stack_trace); | ||
diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c index 27096751ddce..71df3390c07b 100644 --- a/arch/mips/mm/c-r4k.c +++ b/arch/mips/mm/c-r4k.c | |||
@@ -43,12 +43,12 @@ | |||
43 | * primary cache. | 43 | * primary cache. |
44 | */ | 44 | */ |
45 | static inline void r4k_on_each_cpu(void (*func) (void *info), void *info, | 45 | static inline void r4k_on_each_cpu(void (*func) (void *info), void *info, |
46 | int retry, int wait) | 46 | int wait) |
47 | { | 47 | { |
48 | preempt_disable(); | 48 | preempt_disable(); |
49 | 49 | ||
50 | #if !defined(CONFIG_MIPS_MT_SMP) && !defined(CONFIG_MIPS_MT_SMTC) | 50 | #if !defined(CONFIG_MIPS_MT_SMP) && !defined(CONFIG_MIPS_MT_SMTC) |
51 | smp_call_function(func, info, retry, wait); | 51 | smp_call_function(func, info, wait); |
52 | #endif | 52 | #endif |
53 | func(info); | 53 | func(info); |
54 | preempt_enable(); | 54 | preempt_enable(); |
@@ -350,7 +350,7 @@ static inline void local_r4k___flush_cache_all(void * args) | |||
350 | 350 | ||
351 | static void r4k___flush_cache_all(void) | 351 | static void r4k___flush_cache_all(void) |
352 | { | 352 | { |
353 | r4k_on_each_cpu(local_r4k___flush_cache_all, NULL, 1, 1); | 353 | r4k_on_each_cpu(local_r4k___flush_cache_all, NULL, 1); |
354 | } | 354 | } |
355 | 355 | ||
356 | static inline int has_valid_asid(const struct mm_struct *mm) | 356 | static inline int has_valid_asid(const struct mm_struct *mm) |
@@ -397,7 +397,7 @@ static void r4k_flush_cache_range(struct vm_area_struct *vma, | |||
397 | int exec = vma->vm_flags & VM_EXEC; | 397 | int exec = vma->vm_flags & VM_EXEC; |
398 | 398 | ||
399 | if (cpu_has_dc_aliases || (exec && !cpu_has_ic_fills_f_dc)) | 399 | if (cpu_has_dc_aliases || (exec && !cpu_has_ic_fills_f_dc)) |
400 | r4k_on_each_cpu(local_r4k_flush_cache_range, vma, 1, 1); | 400 | r4k_on_each_cpu(local_r4k_flush_cache_range, vma, 1); |
401 | } | 401 | } |
402 | 402 | ||
403 | static inline void local_r4k_flush_cache_mm(void * args) | 403 | static inline void local_r4k_flush_cache_mm(void * args) |
@@ -429,7 +429,7 @@ static void r4k_flush_cache_mm(struct mm_struct *mm) | |||
429 | if (!cpu_has_dc_aliases) | 429 | if (!cpu_has_dc_aliases) |
430 | return; | 430 | return; |
431 | 431 | ||
432 | r4k_on_each_cpu(local_r4k_flush_cache_mm, mm, 1, 1); | 432 | r4k_on_each_cpu(local_r4k_flush_cache_mm, mm, 1); |
433 | } | 433 | } |
434 | 434 | ||
435 | struct flush_cache_page_args { | 435 | struct flush_cache_page_args { |
@@ -521,7 +521,7 @@ static void r4k_flush_cache_page(struct vm_area_struct *vma, | |||
521 | args.addr = addr; | 521 | args.addr = addr; |
522 | args.pfn = pfn; | 522 | args.pfn = pfn; |
523 | 523 | ||
524 | r4k_on_each_cpu(local_r4k_flush_cache_page, &args, 1, 1); | 524 | r4k_on_each_cpu(local_r4k_flush_cache_page, &args, 1); |
525 | } | 525 | } |
526 | 526 | ||
527 | static inline void local_r4k_flush_data_cache_page(void * addr) | 527 | static inline void local_r4k_flush_data_cache_page(void * addr) |
@@ -535,7 +535,7 @@ static void r4k_flush_data_cache_page(unsigned long addr) | |||
535 | local_r4k_flush_data_cache_page((void *)addr); | 535 | local_r4k_flush_data_cache_page((void *)addr); |
536 | else | 536 | else |
537 | r4k_on_each_cpu(local_r4k_flush_data_cache_page, (void *) addr, | 537 | r4k_on_each_cpu(local_r4k_flush_data_cache_page, (void *) addr, |
538 | 1, 1); | 538 | 1); |
539 | } | 539 | } |
540 | 540 | ||
541 | struct flush_icache_range_args { | 541 | struct flush_icache_range_args { |
@@ -571,7 +571,7 @@ static void r4k_flush_icache_range(unsigned long start, unsigned long end) | |||
571 | args.start = start; | 571 | args.start = start; |
572 | args.end = end; | 572 | args.end = end; |
573 | 573 | ||
574 | r4k_on_each_cpu(local_r4k_flush_icache_range, &args, 1, 1); | 574 | r4k_on_each_cpu(local_r4k_flush_icache_range, &args, 1); |
575 | instruction_hazard(); | 575 | instruction_hazard(); |
576 | } | 576 | } |
577 | 577 | ||
@@ -672,7 +672,7 @@ static void local_r4k_flush_cache_sigtramp(void * arg) | |||
672 | 672 | ||
673 | static void r4k_flush_cache_sigtramp(unsigned long addr) | 673 | static void r4k_flush_cache_sigtramp(unsigned long addr) |
674 | { | 674 | { |
675 | r4k_on_each_cpu(local_r4k_flush_cache_sigtramp, (void *) addr, 1, 1); | 675 | r4k_on_each_cpu(local_r4k_flush_cache_sigtramp, (void *) addr, 1); |
676 | } | 676 | } |
677 | 677 | ||
678 | static void r4k_flush_icache_all(void) | 678 | static void r4k_flush_icache_all(void) |
diff --git a/arch/mips/oprofile/common.c b/arch/mips/oprofile/common.c index b5f6f71b27bc..dd2fbd6645c1 100644 --- a/arch/mips/oprofile/common.c +++ b/arch/mips/oprofile/common.c | |||
@@ -27,7 +27,7 @@ static int op_mips_setup(void) | |||
27 | model->reg_setup(ctr); | 27 | model->reg_setup(ctr); |
28 | 28 | ||
29 | /* Configure the registers on all cpus. */ | 29 | /* Configure the registers on all cpus. */ |
30 | on_each_cpu(model->cpu_setup, NULL, 0, 1); | 30 | on_each_cpu(model->cpu_setup, NULL, 1); |
31 | 31 | ||
32 | return 0; | 32 | return 0; |
33 | } | 33 | } |
@@ -58,7 +58,7 @@ static int op_mips_create_files(struct super_block * sb, struct dentry * root) | |||
58 | 58 | ||
59 | static int op_mips_start(void) | 59 | static int op_mips_start(void) |
60 | { | 60 | { |
61 | on_each_cpu(model->cpu_start, NULL, 0, 1); | 61 | on_each_cpu(model->cpu_start, NULL, 1); |
62 | 62 | ||
63 | return 0; | 63 | return 0; |
64 | } | 64 | } |
@@ -66,7 +66,7 @@ static int op_mips_start(void) | |||
66 | static void op_mips_stop(void) | 66 | static void op_mips_stop(void) |
67 | { | 67 | { |
68 | /* Disable performance monitoring for all counters. */ | 68 | /* Disable performance monitoring for all counters. */ |
69 | on_each_cpu(model->cpu_stop, NULL, 0, 1); | 69 | on_each_cpu(model->cpu_stop, NULL, 1); |
70 | } | 70 | } |
71 | 71 | ||
72 | int __init oprofile_arch_init(struct oprofile_operations *ops) | 72 | int __init oprofile_arch_init(struct oprofile_operations *ops) |
diff --git a/arch/mips/oprofile/op_model_mipsxx.c b/arch/mips/oprofile/op_model_mipsxx.c index b40df7d2cf44..54759f1669d3 100644 --- a/arch/mips/oprofile/op_model_mipsxx.c +++ b/arch/mips/oprofile/op_model_mipsxx.c | |||
@@ -313,7 +313,7 @@ static int __init mipsxx_init(void) | |||
313 | if (!cpu_has_mipsmt_pertccounters) | 313 | if (!cpu_has_mipsmt_pertccounters) |
314 | counters = counters_total_to_per_cpu(counters); | 314 | counters = counters_total_to_per_cpu(counters); |
315 | #endif | 315 | #endif |
316 | on_each_cpu(reset_counters, (void *)(long)counters, 0, 1); | 316 | on_each_cpu(reset_counters, (void *)(long)counters, 1); |
317 | 317 | ||
318 | op_model_mipsxx_ops.num_counters = counters; | 318 | op_model_mipsxx_ops.num_counters = counters; |
319 | switch (current_cpu_type()) { | 319 | switch (current_cpu_type()) { |
@@ -382,7 +382,7 @@ static void mipsxx_exit(void) | |||
382 | int counters = op_model_mipsxx_ops.num_counters; | 382 | int counters = op_model_mipsxx_ops.num_counters; |
383 | 383 | ||
384 | counters = counters_per_cpu_to_total(counters); | 384 | counters = counters_per_cpu_to_total(counters); |
385 | on_each_cpu(reset_counters, (void *)(long)counters, 0, 1); | 385 | on_each_cpu(reset_counters, (void *)(long)counters, 1); |
386 | 386 | ||
387 | perf_irq = save_perf_irq; | 387 | perf_irq = save_perf_irq; |
388 | } | 388 | } |
diff --git a/arch/mips/pmc-sierra/yosemite/prom.c b/arch/mips/pmc-sierra/yosemite/prom.c index 35dc435846a6..cf4c868715ac 100644 --- a/arch/mips/pmc-sierra/yosemite/prom.c +++ b/arch/mips/pmc-sierra/yosemite/prom.c | |||
@@ -64,7 +64,7 @@ static void prom_exit(void) | |||
64 | #ifdef CONFIG_SMP | 64 | #ifdef CONFIG_SMP |
65 | if (smp_processor_id()) | 65 | if (smp_processor_id()) |
66 | /* CPU 1 */ | 66 | /* CPU 1 */ |
67 | smp_call_function(prom_cpu0_exit, NULL, 1, 1); | 67 | smp_call_function(prom_cpu0_exit, NULL, 1); |
68 | #endif | 68 | #endif |
69 | prom_cpu0_exit(NULL); | 69 | prom_cpu0_exit(NULL); |
70 | } | 70 | } |
diff --git a/arch/mips/sibyte/cfe/setup.c b/arch/mips/sibyte/cfe/setup.c index 33fce826f8bf..fd9604d5555a 100644 --- a/arch/mips/sibyte/cfe/setup.c +++ b/arch/mips/sibyte/cfe/setup.c | |||
@@ -74,7 +74,7 @@ static void __noreturn cfe_linux_exit(void *arg) | |||
74 | if (!reboot_smp) { | 74 | if (!reboot_smp) { |
75 | /* Get CPU 0 to do the cfe_exit */ | 75 | /* Get CPU 0 to do the cfe_exit */ |
76 | reboot_smp = 1; | 76 | reboot_smp = 1; |
77 | smp_call_function(cfe_linux_exit, arg, 1, 0); | 77 | smp_call_function(cfe_linux_exit, arg, 0); |
78 | } | 78 | } |
79 | } else { | 79 | } else { |
80 | printk("Passing control back to CFE...\n"); | 80 | printk("Passing control back to CFE...\n"); |
diff --git a/arch/mips/sibyte/sb1250/prom.c b/arch/mips/sibyte/sb1250/prom.c index cf8f6b3de86c..65b1af66b674 100644 --- a/arch/mips/sibyte/sb1250/prom.c +++ b/arch/mips/sibyte/sb1250/prom.c | |||
@@ -66,7 +66,7 @@ static void prom_linux_exit(void) | |||
66 | { | 66 | { |
67 | #ifdef CONFIG_SMP | 67 | #ifdef CONFIG_SMP |
68 | if (smp_processor_id()) { | 68 | if (smp_processor_id()) { |
69 | smp_call_function(prom_cpu0_exit, NULL, 1, 1); | 69 | smp_call_function(prom_cpu0_exit, NULL, 1); |
70 | } | 70 | } |
71 | #endif | 71 | #endif |
72 | while(1); | 72 | while(1); |
diff --git a/arch/mips/sibyte/swarm/Makefile b/arch/mips/sibyte/swarm/Makefile index 1775755a2619..255d692bfa18 100644 --- a/arch/mips/sibyte/swarm/Makefile +++ b/arch/mips/sibyte/swarm/Makefile | |||
@@ -1,3 +1,4 @@ | |||
1 | obj-y := setup.o rtc_xicor1241.o rtc_m41t81.o | 1 | obj-y := setup.o rtc_xicor1241.o rtc_m41t81.o |
2 | 2 | ||
3 | obj-$(CONFIG_I2C_BOARDINFO) += swarm-i2c.o | ||
3 | obj-$(CONFIG_KGDB) += dbg_io.o | 4 | obj-$(CONFIG_KGDB) += dbg_io.o |
diff --git a/arch/mips/sibyte/swarm/swarm-i2c.c b/arch/mips/sibyte/swarm/swarm-i2c.c new file mode 100644 index 000000000000..4282ac9d01d2 --- /dev/null +++ b/arch/mips/sibyte/swarm/swarm-i2c.c | |||
@@ -0,0 +1,37 @@ | |||
1 | /* | ||
2 | * arch/mips/sibyte/swarm/swarm-i2c.c | ||
3 | * | ||
4 | * Broadcom BCM91250A (SWARM), etc. I2C platform setup. | ||
5 | * | ||
6 | * Copyright (c) 2008 Maciej W. Rozycki | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or | ||
9 | * modify it under the terms of the GNU General Public License | ||
10 | * as published by the Free Software Foundation; either version | ||
11 | * 2 of the License, or (at your option) any later version. | ||
12 | */ | ||
13 | |||
14 | #include <linux/i2c.h> | ||
15 | #include <linux/init.h> | ||
16 | #include <linux/kernel.h> | ||
17 | |||
18 | |||
19 | static struct i2c_board_info swarm_i2c_info1[] __initdata = { | ||
20 | { | ||
21 | I2C_BOARD_INFO("m41t81", 0x68), | ||
22 | }, | ||
23 | }; | ||
24 | |||
25 | static int __init swarm_i2c_init(void) | ||
26 | { | ||
27 | int err; | ||
28 | |||
29 | err = i2c_register_board_info(1, swarm_i2c_info1, | ||
30 | ARRAY_SIZE(swarm_i2c_info1)); | ||
31 | if (err < 0) | ||
32 | printk(KERN_ERR | ||
33 | "swarm-i2c: cannot register board I2C devices\n"); | ||
34 | return err; | ||
35 | } | ||
36 | |||
37 | arch_initcall(swarm_i2c_init); | ||
diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig index bc7a19da6245..a7d4fd353c2b 100644 --- a/arch/parisc/Kconfig +++ b/arch/parisc/Kconfig | |||
@@ -199,6 +199,7 @@ endchoice | |||
199 | 199 | ||
200 | config SMP | 200 | config SMP |
201 | bool "Symmetric multi-processing support" | 201 | bool "Symmetric multi-processing support" |
202 | select USE_GENERIC_SMP_HELPERS | ||
202 | ---help--- | 203 | ---help--- |
203 | This enables support for systems with more than one CPU. If you have | 204 | This enables support for systems with more than one CPU. If you have |
204 | a system with only one CPU, like most personal computers, say N. If | 205 | a system with only one CPU, like most personal computers, say N. If |
diff --git a/arch/parisc/kernel/cache.c b/arch/parisc/kernel/cache.c index e10d25d2d9c9..5259d8c20676 100644 --- a/arch/parisc/kernel/cache.c +++ b/arch/parisc/kernel/cache.c | |||
@@ -51,12 +51,12 @@ static struct pdc_btlb_info btlb_info __read_mostly; | |||
51 | void | 51 | void |
52 | flush_data_cache(void) | 52 | flush_data_cache(void) |
53 | { | 53 | { |
54 | on_each_cpu(flush_data_cache_local, NULL, 1, 1); | 54 | on_each_cpu(flush_data_cache_local, NULL, 1); |
55 | } | 55 | } |
56 | void | 56 | void |
57 | flush_instruction_cache(void) | 57 | flush_instruction_cache(void) |
58 | { | 58 | { |
59 | on_each_cpu(flush_instruction_cache_local, NULL, 1, 1); | 59 | on_each_cpu(flush_instruction_cache_local, NULL, 1); |
60 | } | 60 | } |
61 | #endif | 61 | #endif |
62 | 62 | ||
@@ -515,7 +515,7 @@ static void cacheflush_h_tmp_function(void *dummy) | |||
515 | 515 | ||
516 | void flush_cache_all(void) | 516 | void flush_cache_all(void) |
517 | { | 517 | { |
518 | on_each_cpu(cacheflush_h_tmp_function, NULL, 1, 1); | 518 | on_each_cpu(cacheflush_h_tmp_function, NULL, 1); |
519 | } | 519 | } |
520 | 520 | ||
521 | void flush_cache_mm(struct mm_struct *mm) | 521 | void flush_cache_mm(struct mm_struct *mm) |
diff --git a/arch/parisc/kernel/smp.c b/arch/parisc/kernel/smp.c index 85fc7754ec25..d47f3975c9c6 100644 --- a/arch/parisc/kernel/smp.c +++ b/arch/parisc/kernel/smp.c | |||
@@ -84,19 +84,11 @@ EXPORT_SYMBOL(cpu_possible_map); | |||
84 | 84 | ||
85 | DEFINE_PER_CPU(spinlock_t, ipi_lock) = SPIN_LOCK_UNLOCKED; | 85 | DEFINE_PER_CPU(spinlock_t, ipi_lock) = SPIN_LOCK_UNLOCKED; |
86 | 86 | ||
87 | struct smp_call_struct { | ||
88 | void (*func) (void *info); | ||
89 | void *info; | ||
90 | long wait; | ||
91 | atomic_t unstarted_count; | ||
92 | atomic_t unfinished_count; | ||
93 | }; | ||
94 | static volatile struct smp_call_struct *smp_call_function_data; | ||
95 | |||
96 | enum ipi_message_type { | 87 | enum ipi_message_type { |
97 | IPI_NOP=0, | 88 | IPI_NOP=0, |
98 | IPI_RESCHEDULE=1, | 89 | IPI_RESCHEDULE=1, |
99 | IPI_CALL_FUNC, | 90 | IPI_CALL_FUNC, |
91 | IPI_CALL_FUNC_SINGLE, | ||
100 | IPI_CPU_START, | 92 | IPI_CPU_START, |
101 | IPI_CPU_STOP, | 93 | IPI_CPU_STOP, |
102 | IPI_CPU_TEST | 94 | IPI_CPU_TEST |
@@ -187,33 +179,12 @@ ipi_interrupt(int irq, void *dev_id) | |||
187 | 179 | ||
188 | case IPI_CALL_FUNC: | 180 | case IPI_CALL_FUNC: |
189 | smp_debug(100, KERN_DEBUG "CPU%d IPI_CALL_FUNC\n", this_cpu); | 181 | smp_debug(100, KERN_DEBUG "CPU%d IPI_CALL_FUNC\n", this_cpu); |
190 | { | 182 | generic_smp_call_function_interrupt(); |
191 | volatile struct smp_call_struct *data; | 183 | break; |
192 | void (*func)(void *info); | 184 | |
193 | void *info; | 185 | case IPI_CALL_FUNC_SINGLE: |
194 | int wait; | 186 | smp_debug(100, KERN_DEBUG "CPU%d IPI_CALL_FUNC_SINGLE\n", this_cpu); |
195 | 187 | generic_smp_call_function_single_interrupt(); | |
196 | data = smp_call_function_data; | ||
197 | func = data->func; | ||
198 | info = data->info; | ||
199 | wait = data->wait; | ||
200 | |||
201 | mb(); | ||
202 | atomic_dec ((atomic_t *)&data->unstarted_count); | ||
203 | |||
204 | /* At this point, *data can't | ||
205 | * be relied upon. | ||
206 | */ | ||
207 | |||
208 | (*func)(info); | ||
209 | |||
210 | /* Notify the sending CPU that the | ||
211 | * task is done. | ||
212 | */ | ||
213 | mb(); | ||
214 | if (wait) | ||
215 | atomic_dec ((atomic_t *)&data->unfinished_count); | ||
216 | } | ||
217 | break; | 188 | break; |
218 | 189 | ||
219 | case IPI_CPU_START: | 190 | case IPI_CPU_START: |
@@ -256,6 +227,14 @@ ipi_send(int cpu, enum ipi_message_type op) | |||
256 | spin_unlock_irqrestore(lock, flags); | 227 | spin_unlock_irqrestore(lock, flags); |
257 | } | 228 | } |
258 | 229 | ||
230 | static void | ||
231 | send_IPI_mask(cpumask_t mask, enum ipi_message_type op) | ||
232 | { | ||
233 | int cpu; | ||
234 | |||
235 | for_each_cpu_mask(cpu, mask) | ||
236 | ipi_send(cpu, op); | ||
237 | } | ||
259 | 238 | ||
260 | static inline void | 239 | static inline void |
261 | send_IPI_single(int dest_cpu, enum ipi_message_type op) | 240 | send_IPI_single(int dest_cpu, enum ipi_message_type op) |
@@ -295,86 +274,15 @@ smp_send_all_nop(void) | |||
295 | send_IPI_allbutself(IPI_NOP); | 274 | send_IPI_allbutself(IPI_NOP); |
296 | } | 275 | } |
297 | 276 | ||
298 | 277 | void arch_send_call_function_ipi(cpumask_t mask) | |
299 | /** | ||
300 | * Run a function on all other CPUs. | ||
301 | * <func> The function to run. This must be fast and non-blocking. | ||
302 | * <info> An arbitrary pointer to pass to the function. | ||
303 | * <retry> If true, keep retrying until ready. | ||
304 | * <wait> If true, wait until function has completed on other CPUs. | ||
305 | * [RETURNS] 0 on success, else a negative status code. | ||
306 | * | ||
307 | * Does not return until remote CPUs are nearly ready to execute <func> | ||
308 | * or have executed. | ||
309 | */ | ||
310 | |||
311 | int | ||
312 | smp_call_function (void (*func) (void *info), void *info, int retry, int wait) | ||
313 | { | 278 | { |
314 | struct smp_call_struct data; | 279 | send_IPI_mask(mask, IPI_CALL_FUNC); |
315 | unsigned long timeout; | ||
316 | static DEFINE_SPINLOCK(lock); | ||
317 | int retries = 0; | ||
318 | |||
319 | if (num_online_cpus() < 2) | ||
320 | return 0; | ||
321 | |||
322 | /* Can deadlock when called with interrupts disabled */ | ||
323 | WARN_ON(irqs_disabled()); | ||
324 | |||
325 | /* can also deadlock if IPIs are disabled */ | ||
326 | WARN_ON((get_eiem() & (1UL<<(CPU_IRQ_MAX - IPI_IRQ))) == 0); | ||
327 | |||
328 | |||
329 | data.func = func; | ||
330 | data.info = info; | ||
331 | data.wait = wait; | ||
332 | atomic_set(&data.unstarted_count, num_online_cpus() - 1); | ||
333 | atomic_set(&data.unfinished_count, num_online_cpus() - 1); | ||
334 | |||
335 | if (retry) { | ||
336 | spin_lock (&lock); | ||
337 | while (smp_call_function_data != 0) | ||
338 | barrier(); | ||
339 | } | ||
340 | else { | ||
341 | spin_lock (&lock); | ||
342 | if (smp_call_function_data) { | ||
343 | spin_unlock (&lock); | ||
344 | return -EBUSY; | ||
345 | } | ||
346 | } | ||
347 | |||
348 | smp_call_function_data = &data; | ||
349 | spin_unlock (&lock); | ||
350 | |||
351 | /* Send a message to all other CPUs and wait for them to respond */ | ||
352 | send_IPI_allbutself(IPI_CALL_FUNC); | ||
353 | |||
354 | retry: | ||
355 | /* Wait for response */ | ||
356 | timeout = jiffies + HZ; | ||
357 | while ( (atomic_read (&data.unstarted_count) > 0) && | ||
358 | time_before (jiffies, timeout) ) | ||
359 | barrier (); | ||
360 | |||
361 | if (atomic_read (&data.unstarted_count) > 0) { | ||
362 | printk(KERN_CRIT "SMP CALL FUNCTION TIMED OUT! (cpu=%d), try %d\n", | ||
363 | smp_processor_id(), ++retries); | ||
364 | goto retry; | ||
365 | } | ||
366 | /* We either got one or timed out. Release the lock */ | ||
367 | |||
368 | mb(); | ||
369 | smp_call_function_data = NULL; | ||
370 | |||
371 | while (wait && atomic_read (&data.unfinished_count) > 0) | ||
372 | barrier (); | ||
373 | |||
374 | return 0; | ||
375 | } | 280 | } |
376 | 281 | ||
377 | EXPORT_SYMBOL(smp_call_function); | 282 | void arch_send_call_function_single_ipi(int cpu) |
283 | { | ||
284 | send_IPI_single(cpu, IPI_CALL_FUNC_SINGLE); | ||
285 | } | ||
378 | 286 | ||
379 | /* | 287 | /* |
380 | * Flush all other CPU's tlb and then mine. Do this with on_each_cpu() | 288 | * Flush all other CPU's tlb and then mine. Do this with on_each_cpu() |
@@ -384,7 +292,7 @@ EXPORT_SYMBOL(smp_call_function); | |||
384 | void | 292 | void |
385 | smp_flush_tlb_all(void) | 293 | smp_flush_tlb_all(void) |
386 | { | 294 | { |
387 | on_each_cpu(flush_tlb_all_local, NULL, 1, 1); | 295 | on_each_cpu(flush_tlb_all_local, NULL, 1); |
388 | } | 296 | } |
389 | 297 | ||
390 | /* | 298 | /* |
diff --git a/arch/parisc/mm/init.c b/arch/parisc/mm/init.c index ce0da689a89d..b4d6c8777ed0 100644 --- a/arch/parisc/mm/init.c +++ b/arch/parisc/mm/init.c | |||
@@ -1053,7 +1053,7 @@ void flush_tlb_all(void) | |||
1053 | do_recycle++; | 1053 | do_recycle++; |
1054 | } | 1054 | } |
1055 | spin_unlock(&sid_lock); | 1055 | spin_unlock(&sid_lock); |
1056 | on_each_cpu(flush_tlb_all_local, NULL, 1, 1); | 1056 | on_each_cpu(flush_tlb_all_local, NULL, 1); |
1057 | if (do_recycle) { | 1057 | if (do_recycle) { |
1058 | spin_lock(&sid_lock); | 1058 | spin_lock(&sid_lock); |
1059 | recycle_sids(recycle_ndirty,recycle_dirty_array); | 1059 | recycle_sids(recycle_ndirty,recycle_dirty_array); |
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index a5e9912e2d37..20eacf2a8424 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig | |||
@@ -111,6 +111,7 @@ config PPC | |||
111 | select HAVE_KPROBES | 111 | select HAVE_KPROBES |
112 | select HAVE_KRETPROBES | 112 | select HAVE_KRETPROBES |
113 | select HAVE_LMB | 113 | select HAVE_LMB |
114 | select USE_GENERIC_SMP_HELPERS if SMP | ||
114 | select HAVE_OPROFILE | 115 | select HAVE_OPROFILE |
115 | 116 | ||
116 | config EARLY_PRINTK | 117 | config EARLY_PRINTK |
diff --git a/arch/powerpc/kernel/machine_kexec_64.c b/arch/powerpc/kernel/machine_kexec_64.c index 704375bda73a..b732b5f8e356 100644 --- a/arch/powerpc/kernel/machine_kexec_64.c +++ b/arch/powerpc/kernel/machine_kexec_64.c | |||
@@ -172,7 +172,7 @@ static void kexec_prepare_cpus(void) | |||
172 | { | 172 | { |
173 | int my_cpu, i, notified=-1; | 173 | int my_cpu, i, notified=-1; |
174 | 174 | ||
175 | smp_call_function(kexec_smp_down, NULL, 0, /* wait */0); | 175 | smp_call_function(kexec_smp_down, NULL, /* wait */0); |
176 | my_cpu = get_cpu(); | 176 | my_cpu = get_cpu(); |
177 | 177 | ||
178 | /* check the others cpus are now down (via paca hw cpu id == -1) */ | 178 | /* check the others cpus are now down (via paca hw cpu id == -1) */ |
diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c index 34843c318419..647f3e8677dc 100644 --- a/arch/powerpc/kernel/rtas.c +++ b/arch/powerpc/kernel/rtas.c | |||
@@ -747,7 +747,7 @@ static int rtas_ibm_suspend_me(struct rtas_args *args) | |||
747 | /* Call function on all CPUs. One of us will make the | 747 | /* Call function on all CPUs. One of us will make the |
748 | * rtas call | 748 | * rtas call |
749 | */ | 749 | */ |
750 | if (on_each_cpu(rtas_percpu_suspend_me, &data, 1, 0)) | 750 | if (on_each_cpu(rtas_percpu_suspend_me, &data, 0)) |
751 | data.error = -EINVAL; | 751 | data.error = -EINVAL; |
752 | 752 | ||
753 | wait_for_completion(&done); | 753 | wait_for_completion(&done); |
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c index 1457aa0a08f1..5191b46a611e 100644 --- a/arch/powerpc/kernel/smp.c +++ b/arch/powerpc/kernel/smp.c | |||
@@ -72,12 +72,8 @@ struct smp_ops_t *smp_ops; | |||
72 | 72 | ||
73 | static volatile unsigned int cpu_callin_map[NR_CPUS]; | 73 | static volatile unsigned int cpu_callin_map[NR_CPUS]; |
74 | 74 | ||
75 | void smp_call_function_interrupt(void); | ||
76 | |||
77 | int smt_enabled_at_boot = 1; | 75 | int smt_enabled_at_boot = 1; |
78 | 76 | ||
79 | static int ipi_fail_ok; | ||
80 | |||
81 | static void (*crash_ipi_function_ptr)(struct pt_regs *) = NULL; | 77 | static void (*crash_ipi_function_ptr)(struct pt_regs *) = NULL; |
82 | 78 | ||
83 | #ifdef CONFIG_PPC64 | 79 | #ifdef CONFIG_PPC64 |
@@ -99,12 +95,15 @@ void smp_message_recv(int msg) | |||
99 | { | 95 | { |
100 | switch(msg) { | 96 | switch(msg) { |
101 | case PPC_MSG_CALL_FUNCTION: | 97 | case PPC_MSG_CALL_FUNCTION: |
102 | smp_call_function_interrupt(); | 98 | generic_smp_call_function_interrupt(); |
103 | break; | 99 | break; |
104 | case PPC_MSG_RESCHEDULE: | 100 | case PPC_MSG_RESCHEDULE: |
105 | /* XXX Do we have to do this? */ | 101 | /* XXX Do we have to do this? */ |
106 | set_need_resched(); | 102 | set_need_resched(); |
107 | break; | 103 | break; |
104 | case PPC_MSG_CALL_FUNC_SINGLE: | ||
105 | generic_smp_call_function_single_interrupt(); | ||
106 | break; | ||
108 | case PPC_MSG_DEBUGGER_BREAK: | 107 | case PPC_MSG_DEBUGGER_BREAK: |
109 | if (crash_ipi_function_ptr) { | 108 | if (crash_ipi_function_ptr) { |
110 | crash_ipi_function_ptr(get_irq_regs()); | 109 | crash_ipi_function_ptr(get_irq_regs()); |
@@ -128,6 +127,19 @@ void smp_send_reschedule(int cpu) | |||
128 | smp_ops->message_pass(cpu, PPC_MSG_RESCHEDULE); | 127 | smp_ops->message_pass(cpu, PPC_MSG_RESCHEDULE); |
129 | } | 128 | } |
130 | 129 | ||
130 | void arch_send_call_function_single_ipi(int cpu) | ||
131 | { | ||
132 | smp_ops->message_pass(cpu, PPC_MSG_CALL_FUNC_SINGLE); | ||
133 | } | ||
134 | |||
135 | void arch_send_call_function_ipi(cpumask_t mask) | ||
136 | { | ||
137 | unsigned int cpu; | ||
138 | |||
139 | for_each_cpu_mask(cpu, mask) | ||
140 | smp_ops->message_pass(cpu, PPC_MSG_CALL_FUNCTION); | ||
141 | } | ||
142 | |||
131 | #ifdef CONFIG_DEBUGGER | 143 | #ifdef CONFIG_DEBUGGER |
132 | void smp_send_debugger_break(int cpu) | 144 | void smp_send_debugger_break(int cpu) |
133 | { | 145 | { |
@@ -154,215 +166,9 @@ static void stop_this_cpu(void *dummy) | |||
154 | ; | 166 | ; |
155 | } | 167 | } |
156 | 168 | ||
157 | /* | ||
158 | * Structure and data for smp_call_function(). This is designed to minimise | ||
159 | * static memory requirements. It also looks cleaner. | ||
160 | * Stolen from the i386 version. | ||
161 | */ | ||
162 | static __cacheline_aligned_in_smp DEFINE_SPINLOCK(call_lock); | ||
163 | |||
164 | static struct call_data_struct { | ||
165 | void (*func) (void *info); | ||
166 | void *info; | ||
167 | atomic_t started; | ||
168 | atomic_t finished; | ||
169 | int wait; | ||
170 | } *call_data; | ||
171 | |||
172 | /* delay of at least 8 seconds */ | ||
173 | #define SMP_CALL_TIMEOUT 8 | ||
174 | |||
175 | /* | ||
176 | * These functions send a 'generic call function' IPI to other online | ||
177 | * CPUS in the system. | ||
178 | * | ||
179 | * [SUMMARY] Run a function on other CPUs. | ||
180 | * <func> The function to run. This must be fast and non-blocking. | ||
181 | * <info> An arbitrary pointer to pass to the function. | ||
182 | * <nonatomic> currently unused. | ||
183 | * <wait> If true, wait (atomically) until function has completed on other CPUs. | ||
184 | * [RETURNS] 0 on success, else a negative status code. Does not return until | ||
185 | * remote CPUs are nearly ready to execute <<func>> or are or have executed. | ||
186 | * <map> is a cpu map of the cpus to send IPI to. | ||
187 | * | ||
188 | * You must not call this function with disabled interrupts or from a | ||
189 | * hardware interrupt handler or from a bottom half handler. | ||
190 | */ | ||
191 | static int __smp_call_function_map(void (*func) (void *info), void *info, | ||
192 | int nonatomic, int wait, cpumask_t map) | ||
193 | { | ||
194 | struct call_data_struct data; | ||
195 | int ret = -1, num_cpus; | ||
196 | int cpu; | ||
197 | u64 timeout; | ||
198 | |||
199 | if (unlikely(smp_ops == NULL)) | ||
200 | return ret; | ||
201 | |||
202 | data.func = func; | ||
203 | data.info = info; | ||
204 | atomic_set(&data.started, 0); | ||
205 | data.wait = wait; | ||
206 | if (wait) | ||
207 | atomic_set(&data.finished, 0); | ||
208 | |||
209 | /* remove 'self' from the map */ | ||
210 | if (cpu_isset(smp_processor_id(), map)) | ||
211 | cpu_clear(smp_processor_id(), map); | ||
212 | |||
213 | /* sanity check the map, remove any non-online processors. */ | ||
214 | cpus_and(map, map, cpu_online_map); | ||
215 | |||
216 | num_cpus = cpus_weight(map); | ||
217 | if (!num_cpus) | ||
218 | goto done; | ||
219 | |||
220 | call_data = &data; | ||
221 | smp_wmb(); | ||
222 | /* Send a message to all CPUs in the map */ | ||
223 | for_each_cpu_mask(cpu, map) | ||
224 | smp_ops->message_pass(cpu, PPC_MSG_CALL_FUNCTION); | ||
225 | |||
226 | timeout = get_tb() + (u64) SMP_CALL_TIMEOUT * tb_ticks_per_sec; | ||
227 | |||
228 | /* Wait for indication that they have received the message */ | ||
229 | while (atomic_read(&data.started) != num_cpus) { | ||
230 | HMT_low(); | ||
231 | if (get_tb() >= timeout) { | ||
232 | printk("smp_call_function on cpu %d: other cpus not " | ||
233 | "responding (%d)\n", smp_processor_id(), | ||
234 | atomic_read(&data.started)); | ||
235 | if (!ipi_fail_ok) | ||
236 | debugger(NULL); | ||
237 | goto out; | ||
238 | } | ||
239 | } | ||
240 | |||
241 | /* optionally wait for the CPUs to complete */ | ||
242 | if (wait) { | ||
243 | while (atomic_read(&data.finished) != num_cpus) { | ||
244 | HMT_low(); | ||
245 | if (get_tb() >= timeout) { | ||
246 | printk("smp_call_function on cpu %d: other " | ||
247 | "cpus not finishing (%d/%d)\n", | ||
248 | smp_processor_id(), | ||
249 | atomic_read(&data.finished), | ||
250 | atomic_read(&data.started)); | ||
251 | debugger(NULL); | ||
252 | goto out; | ||
253 | } | ||
254 | } | ||
255 | } | ||
256 | |||
257 | done: | ||
258 | ret = 0; | ||
259 | |||
260 | out: | ||
261 | call_data = NULL; | ||
262 | HMT_medium(); | ||
263 | return ret; | ||
264 | } | ||
265 | |||
266 | static int __smp_call_function(void (*func)(void *info), void *info, | ||
267 | int nonatomic, int wait) | ||
268 | { | ||
269 | int ret; | ||
270 | spin_lock(&call_lock); | ||
271 | ret =__smp_call_function_map(func, info, nonatomic, wait, | ||
272 | cpu_online_map); | ||
273 | spin_unlock(&call_lock); | ||
274 | return ret; | ||
275 | } | ||
276 | |||
277 | int smp_call_function(void (*func) (void *info), void *info, int nonatomic, | ||
278 | int wait) | ||
279 | { | ||
280 | /* Can deadlock when called with interrupts disabled */ | ||
281 | WARN_ON(irqs_disabled()); | ||
282 | |||
283 | return __smp_call_function(func, info, nonatomic, wait); | ||
284 | } | ||
285 | EXPORT_SYMBOL(smp_call_function); | ||
286 | |||
287 | int smp_call_function_single(int cpu, void (*func) (void *info), void *info, | ||
288 | int nonatomic, int wait) | ||
289 | { | ||
290 | cpumask_t map = CPU_MASK_NONE; | ||
291 | int ret = 0; | ||
292 | |||
293 | /* Can deadlock when called with interrupts disabled */ | ||
294 | WARN_ON(irqs_disabled()); | ||
295 | |||
296 | if (!cpu_online(cpu)) | ||
297 | return -EINVAL; | ||
298 | |||
299 | cpu_set(cpu, map); | ||
300 | if (cpu != get_cpu()) { | ||
301 | spin_lock(&call_lock); | ||
302 | ret = __smp_call_function_map(func, info, nonatomic, wait, map); | ||
303 | spin_unlock(&call_lock); | ||
304 | } else { | ||
305 | local_irq_disable(); | ||
306 | func(info); | ||
307 | local_irq_enable(); | ||
308 | } | ||
309 | put_cpu(); | ||
310 | return ret; | ||
311 | } | ||
312 | EXPORT_SYMBOL(smp_call_function_single); | ||
313 | |||
314 | void smp_send_stop(void) | 169 | void smp_send_stop(void) |
315 | { | 170 | { |
316 | int nolock; | 171 | smp_call_function(stop_this_cpu, NULL, 0); |
317 | |||
318 | /* It's OK to fail sending the IPI, since the alternative is to | ||
319 | * be stuck forever waiting on the other CPU to take the interrupt. | ||
320 | * | ||
321 | * It's better to at least continue and go through reboot, since this | ||
322 | * function is usually called at panic or reboot time in the first | ||
323 | * place. | ||
324 | */ | ||
325 | ipi_fail_ok = 1; | ||
326 | |||
327 | /* Don't deadlock in case we got called through panic */ | ||
328 | nolock = !spin_trylock(&call_lock); | ||
329 | __smp_call_function_map(stop_this_cpu, NULL, 1, 0, cpu_online_map); | ||
330 | if (!nolock) | ||
331 | spin_unlock(&call_lock); | ||
332 | } | ||
333 | |||
334 | void smp_call_function_interrupt(void) | ||
335 | { | ||
336 | void (*func) (void *info); | ||
337 | void *info; | ||
338 | int wait; | ||
339 | |||
340 | /* call_data will be NULL if the sender timed out while | ||
341 | * waiting on us to receive the call. | ||
342 | */ | ||
343 | if (!call_data) | ||
344 | return; | ||
345 | |||
346 | func = call_data->func; | ||
347 | info = call_data->info; | ||
348 | wait = call_data->wait; | ||
349 | |||
350 | if (!wait) | ||
351 | smp_mb__before_atomic_inc(); | ||
352 | |||
353 | /* | ||
354 | * Notify initiating CPU that I've grabbed the data and am | ||
355 | * about to execute the function | ||
356 | */ | ||
357 | atomic_inc(&call_data->started); | ||
358 | /* | ||
359 | * At this point the info structure may be out of scope unless wait==1 | ||
360 | */ | ||
361 | (*func)(info); | ||
362 | if (wait) { | ||
363 | smp_mb__before_atomic_inc(); | ||
364 | atomic_inc(&call_data->finished); | ||
365 | } | ||
366 | } | 172 | } |
367 | 173 | ||
368 | extern struct gettimeofday_struct do_gtod; | 174 | extern struct gettimeofday_struct do_gtod; |
@@ -596,9 +402,9 @@ int __devinit start_secondary(void *unused) | |||
596 | 402 | ||
597 | secondary_cpu_time_init(); | 403 | secondary_cpu_time_init(); |
598 | 404 | ||
599 | spin_lock(&call_lock); | 405 | ipi_call_lock(); |
600 | cpu_set(cpu, cpu_online_map); | 406 | cpu_set(cpu, cpu_online_map); |
601 | spin_unlock(&call_lock); | 407 | ipi_call_unlock(); |
602 | 408 | ||
603 | local_irq_enable(); | 409 | local_irq_enable(); |
604 | 410 | ||
diff --git a/arch/powerpc/kernel/stacktrace.c b/arch/powerpc/kernel/stacktrace.c index 962944038430..3cf0d94ba340 100644 --- a/arch/powerpc/kernel/stacktrace.c +++ b/arch/powerpc/kernel/stacktrace.c | |||
@@ -12,6 +12,7 @@ | |||
12 | 12 | ||
13 | #include <linux/sched.h> | 13 | #include <linux/sched.h> |
14 | #include <linux/stacktrace.h> | 14 | #include <linux/stacktrace.h> |
15 | #include <linux/module.h> | ||
15 | #include <asm/ptrace.h> | 16 | #include <asm/ptrace.h> |
16 | 17 | ||
17 | /* | 18 | /* |
@@ -44,3 +45,4 @@ void save_stack_trace(struct stack_trace *trace) | |||
44 | sp = newsp; | 45 | sp = newsp; |
45 | } | 46 | } |
46 | } | 47 | } |
48 | EXPORT_SYMBOL_GPL(save_stack_trace); | ||
diff --git a/arch/powerpc/kernel/tau_6xx.c b/arch/powerpc/kernel/tau_6xx.c index 368a4934f7ee..c3a56d65c5a9 100644 --- a/arch/powerpc/kernel/tau_6xx.c +++ b/arch/powerpc/kernel/tau_6xx.c | |||
@@ -192,7 +192,7 @@ static void tau_timeout_smp(unsigned long unused) | |||
192 | 192 | ||
193 | /* schedule ourselves to be run again */ | 193 | /* schedule ourselves to be run again */ |
194 | mod_timer(&tau_timer, jiffies + shrink_timer) ; | 194 | mod_timer(&tau_timer, jiffies + shrink_timer) ; |
195 | on_each_cpu(tau_timeout, NULL, 1, 0); | 195 | on_each_cpu(tau_timeout, NULL, 0); |
196 | } | 196 | } |
197 | 197 | ||
198 | /* | 198 | /* |
@@ -234,7 +234,7 @@ int __init TAU_init(void) | |||
234 | tau_timer.expires = jiffies + shrink_timer; | 234 | tau_timer.expires = jiffies + shrink_timer; |
235 | add_timer(&tau_timer); | 235 | add_timer(&tau_timer); |
236 | 236 | ||
237 | on_each_cpu(TAU_init_smp, NULL, 1, 0); | 237 | on_each_cpu(TAU_init_smp, NULL, 0); |
238 | 238 | ||
239 | printk("Thermal assist unit "); | 239 | printk("Thermal assist unit "); |
240 | #ifdef CONFIG_TAU_INT | 240 | #ifdef CONFIG_TAU_INT |
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c index 73401e83739a..f1a38a6c1e2d 100644 --- a/arch/powerpc/kernel/time.c +++ b/arch/powerpc/kernel/time.c | |||
@@ -322,7 +322,7 @@ void snapshot_timebases(void) | |||
322 | { | 322 | { |
323 | if (!cpu_has_feature(CPU_FTR_PURR)) | 323 | if (!cpu_has_feature(CPU_FTR_PURR)) |
324 | return; | 324 | return; |
325 | on_each_cpu(snapshot_tb_and_purr, NULL, 0, 1); | 325 | on_each_cpu(snapshot_tb_and_purr, NULL, 1); |
326 | } | 326 | } |
327 | 327 | ||
328 | /* | 328 | /* |
diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c index ad928edafb0a..2bd12d965db1 100644 --- a/arch/powerpc/mm/slice.c +++ b/arch/powerpc/mm/slice.c | |||
@@ -218,7 +218,7 @@ static void slice_convert(struct mm_struct *mm, struct slice_mask mask, int psiz | |||
218 | mb(); | 218 | mb(); |
219 | 219 | ||
220 | /* XXX this is sub-optimal but will do for now */ | 220 | /* XXX this is sub-optimal but will do for now */ |
221 | on_each_cpu(slice_flush_segments, mm, 0, 1); | 221 | on_each_cpu(slice_flush_segments, mm, 1); |
222 | #ifdef CONFIG_SPU_BASE | 222 | #ifdef CONFIG_SPU_BASE |
223 | spu_flush_all_slbs(mm); | 223 | spu_flush_all_slbs(mm); |
224 | #endif | 224 | #endif |
diff --git a/arch/powerpc/mm/tlb_64.c b/arch/powerpc/mm/tlb_64.c index e2d867ce1c7e..69ad829a7fa3 100644 --- a/arch/powerpc/mm/tlb_64.c +++ b/arch/powerpc/mm/tlb_64.c | |||
@@ -66,7 +66,7 @@ static void pgtable_free_now(pgtable_free_t pgf) | |||
66 | { | 66 | { |
67 | pte_freelist_forced_free++; | 67 | pte_freelist_forced_free++; |
68 | 68 | ||
69 | smp_call_function(pte_free_smp_sync, NULL, 0, 1); | 69 | smp_call_function(pte_free_smp_sync, NULL, 1); |
70 | 70 | ||
71 | pgtable_free(pgf); | 71 | pgtable_free(pgf); |
72 | } | 72 | } |
diff --git a/arch/powerpc/oprofile/common.c b/arch/powerpc/oprofile/common.c index 4908dc98f9ca..17807acb05d9 100644 --- a/arch/powerpc/oprofile/common.c +++ b/arch/powerpc/oprofile/common.c | |||
@@ -65,7 +65,7 @@ static int op_powerpc_setup(void) | |||
65 | 65 | ||
66 | /* Configure the registers on all cpus. If an error occurs on one | 66 | /* Configure the registers on all cpus. If an error occurs on one |
67 | * of the cpus, op_per_cpu_rc will be set to the error */ | 67 | * of the cpus, op_per_cpu_rc will be set to the error */ |
68 | on_each_cpu(op_powerpc_cpu_setup, NULL, 0, 1); | 68 | on_each_cpu(op_powerpc_cpu_setup, NULL, 1); |
69 | 69 | ||
70 | out: if (op_per_cpu_rc) { | 70 | out: if (op_per_cpu_rc) { |
71 | /* error on setup release the performance counter hardware */ | 71 | /* error on setup release the performance counter hardware */ |
@@ -100,7 +100,7 @@ static int op_powerpc_start(void) | |||
100 | if (model->global_start) | 100 | if (model->global_start) |
101 | return model->global_start(ctr); | 101 | return model->global_start(ctr); |
102 | if (model->start) { | 102 | if (model->start) { |
103 | on_each_cpu(op_powerpc_cpu_start, NULL, 0, 1); | 103 | on_each_cpu(op_powerpc_cpu_start, NULL, 1); |
104 | return op_per_cpu_rc; | 104 | return op_per_cpu_rc; |
105 | } | 105 | } |
106 | return -EIO; /* No start function is defined for this | 106 | return -EIO; /* No start function is defined for this |
@@ -115,7 +115,7 @@ static inline void op_powerpc_cpu_stop(void *dummy) | |||
115 | static void op_powerpc_stop(void) | 115 | static void op_powerpc_stop(void) |
116 | { | 116 | { |
117 | if (model->stop) | 117 | if (model->stop) |
118 | on_each_cpu(op_powerpc_cpu_stop, NULL, 0, 1); | 118 | on_each_cpu(op_powerpc_cpu_stop, NULL, 1); |
119 | if (model->global_stop) | 119 | if (model->global_stop) |
120 | model->global_stop(); | 120 | model->global_stop(); |
121 | } | 121 | } |
diff --git a/arch/powerpc/platforms/cell/interrupt.c b/arch/powerpc/platforms/cell/interrupt.c index 5bf7df146022..2d5bb22d6c09 100644 --- a/arch/powerpc/platforms/cell/interrupt.c +++ b/arch/powerpc/platforms/cell/interrupt.c | |||
@@ -218,6 +218,7 @@ void iic_request_IPIs(void) | |||
218 | { | 218 | { |
219 | iic_request_ipi(PPC_MSG_CALL_FUNCTION, "IPI-call"); | 219 | iic_request_ipi(PPC_MSG_CALL_FUNCTION, "IPI-call"); |
220 | iic_request_ipi(PPC_MSG_RESCHEDULE, "IPI-resched"); | 220 | iic_request_ipi(PPC_MSG_RESCHEDULE, "IPI-resched"); |
221 | iic_request_ipi(PPC_MSG_CALL_FUNC_SINGLE, "IPI-call-single"); | ||
221 | #ifdef CONFIG_DEBUGGER | 222 | #ifdef CONFIG_DEBUGGER |
222 | iic_request_ipi(PPC_MSG_DEBUGGER_BREAK, "IPI-debug"); | 223 | iic_request_ipi(PPC_MSG_DEBUGGER_BREAK, "IPI-debug"); |
223 | #endif /* CONFIG_DEBUGGER */ | 224 | #endif /* CONFIG_DEBUGGER */ |
diff --git a/arch/powerpc/platforms/ps3/smp.c b/arch/powerpc/platforms/ps3/smp.c index f0b12f212363..a0927a3bacb7 100644 --- a/arch/powerpc/platforms/ps3/smp.c +++ b/arch/powerpc/platforms/ps3/smp.c | |||
@@ -105,9 +105,10 @@ static void __init ps3_smp_setup_cpu(int cpu) | |||
105 | * to index needs to be setup. | 105 | * to index needs to be setup. |
106 | */ | 106 | */ |
107 | 107 | ||
108 | BUILD_BUG_ON(PPC_MSG_CALL_FUNCTION != 0); | 108 | BUILD_BUG_ON(PPC_MSG_CALL_FUNCTION != 0); |
109 | BUILD_BUG_ON(PPC_MSG_RESCHEDULE != 1); | 109 | BUILD_BUG_ON(PPC_MSG_RESCHEDULE != 1); |
110 | BUILD_BUG_ON(PPC_MSG_DEBUGGER_BREAK != 3); | 110 | BUILD_BUG_ON(PPC_MSG_CALL_FUNC_SINGLE != 2); |
111 | BUILD_BUG_ON(PPC_MSG_DEBUGGER_BREAK != 3); | ||
111 | 112 | ||
112 | for (i = 0; i < MSG_COUNT; i++) { | 113 | for (i = 0; i < MSG_COUNT; i++) { |
113 | result = ps3_event_receive_port_setup(cpu, &virqs[i]); | 114 | result = ps3_event_receive_port_setup(cpu, &virqs[i]); |
diff --git a/arch/powerpc/platforms/pseries/xics.c b/arch/powerpc/platforms/pseries/xics.c index ebebc28fe895..0fc830f576f5 100644 --- a/arch/powerpc/platforms/pseries/xics.c +++ b/arch/powerpc/platforms/pseries/xics.c | |||
@@ -383,13 +383,11 @@ static irqreturn_t xics_ipi_dispatch(int cpu) | |||
383 | mb(); | 383 | mb(); |
384 | smp_message_recv(PPC_MSG_RESCHEDULE); | 384 | smp_message_recv(PPC_MSG_RESCHEDULE); |
385 | } | 385 | } |
386 | #if 0 | 386 | if (test_and_clear_bit(PPC_MSG_CALL_FUNC_SINGLE, |
387 | if (test_and_clear_bit(PPC_MSG_MIGRATE_TASK, | ||
388 | &xics_ipi_message[cpu].value)) { | 387 | &xics_ipi_message[cpu].value)) { |
389 | mb(); | 388 | mb(); |
390 | smp_message_recv(PPC_MSG_MIGRATE_TASK); | 389 | smp_message_recv(PPC_MSG_CALL_FUNC_SINGLE); |
391 | } | 390 | } |
392 | #endif | ||
393 | #if defined(CONFIG_DEBUGGER) || defined(CONFIG_KEXEC) | 391 | #if defined(CONFIG_DEBUGGER) || defined(CONFIG_KEXEC) |
394 | if (test_and_clear_bit(PPC_MSG_DEBUGGER_BREAK, | 392 | if (test_and_clear_bit(PPC_MSG_DEBUGGER_BREAK, |
395 | &xics_ipi_message[cpu].value)) { | 393 | &xics_ipi_message[cpu].value)) { |
diff --git a/arch/powerpc/sysdev/mpic.c b/arch/powerpc/sysdev/mpic.c index 7680001676a6..6c90c95b454e 100644 --- a/arch/powerpc/sysdev/mpic.c +++ b/arch/powerpc/sysdev/mpic.c | |||
@@ -1494,7 +1494,7 @@ void mpic_request_ipis(void) | |||
1494 | static char *ipi_names[] = { | 1494 | static char *ipi_names[] = { |
1495 | "IPI0 (call function)", | 1495 | "IPI0 (call function)", |
1496 | "IPI1 (reschedule)", | 1496 | "IPI1 (reschedule)", |
1497 | "IPI2 (unused)", | 1497 | "IPI2 (call function single)", |
1498 | "IPI3 (debugger break)", | 1498 | "IPI3 (debugger break)", |
1499 | }; | 1499 | }; |
1500 | BUG_ON(mpic == NULL); | 1500 | BUG_ON(mpic == NULL); |
diff --git a/arch/ppc/kernel/smp.c b/arch/ppc/kernel/smp.c index 055998575cb4..bcab0d210e30 100644 --- a/arch/ppc/kernel/smp.c +++ b/arch/ppc/kernel/smp.c | |||
@@ -152,7 +152,7 @@ static void stop_this_cpu(void *dummy) | |||
152 | 152 | ||
153 | void smp_send_stop(void) | 153 | void smp_send_stop(void) |
154 | { | 154 | { |
155 | smp_call_function(stop_this_cpu, NULL, 1, 0); | 155 | smp_call_function(stop_this_cpu, NULL, 0); |
156 | } | 156 | } |
157 | 157 | ||
158 | /* | 158 | /* |
diff --git a/arch/s390/appldata/appldata_base.c b/arch/s390/appldata/appldata_base.c index 9cb3d92447a3..a7f8979fb925 100644 --- a/arch/s390/appldata/appldata_base.c +++ b/arch/s390/appldata/appldata_base.c | |||
@@ -203,7 +203,7 @@ __appldata_vtimer_setup(int cmd) | |||
203 | per_cpu(appldata_timer, i).expires = per_cpu_interval; | 203 | per_cpu(appldata_timer, i).expires = per_cpu_interval; |
204 | smp_call_function_single(i, add_virt_timer_periodic, | 204 | smp_call_function_single(i, add_virt_timer_periodic, |
205 | &per_cpu(appldata_timer, i), | 205 | &per_cpu(appldata_timer, i), |
206 | 0, 1); | 206 | 1); |
207 | } | 207 | } |
208 | appldata_timer_active = 1; | 208 | appldata_timer_active = 1; |
209 | break; | 209 | break; |
@@ -228,7 +228,7 @@ __appldata_vtimer_setup(int cmd) | |||
228 | args.timer = &per_cpu(appldata_timer, i); | 228 | args.timer = &per_cpu(appldata_timer, i); |
229 | args.expires = per_cpu_interval; | 229 | args.expires = per_cpu_interval; |
230 | smp_call_function_single(i, __appldata_mod_vtimer_wrap, | 230 | smp_call_function_single(i, __appldata_mod_vtimer_wrap, |
231 | &args, 0, 1); | 231 | &args, 1); |
232 | } | 232 | } |
233 | } | 233 | } |
234 | } | 234 | } |
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c index 5d4fa4b1c74c..b6781030cfbd 100644 --- a/arch/s390/kernel/smp.c +++ b/arch/s390/kernel/smp.c | |||
@@ -109,7 +109,7 @@ static void do_call_function(void) | |||
109 | } | 109 | } |
110 | 110 | ||
111 | static void __smp_call_function_map(void (*func) (void *info), void *info, | 111 | static void __smp_call_function_map(void (*func) (void *info), void *info, |
112 | int nonatomic, int wait, cpumask_t map) | 112 | int wait, cpumask_t map) |
113 | { | 113 | { |
114 | struct call_data_struct data; | 114 | struct call_data_struct data; |
115 | int cpu, local = 0; | 115 | int cpu, local = 0; |
@@ -162,7 +162,6 @@ out: | |||
162 | * smp_call_function: | 162 | * smp_call_function: |
163 | * @func: the function to run; this must be fast and non-blocking | 163 | * @func: the function to run; this must be fast and non-blocking |
164 | * @info: an arbitrary pointer to pass to the function | 164 | * @info: an arbitrary pointer to pass to the function |
165 | * @nonatomic: unused | ||
166 | * @wait: if true, wait (atomically) until function has completed on other CPUs | 165 | * @wait: if true, wait (atomically) until function has completed on other CPUs |
167 | * | 166 | * |
168 | * Run a function on all other CPUs. | 167 | * Run a function on all other CPUs. |
@@ -170,15 +169,14 @@ out: | |||
170 | * You must not call this function with disabled interrupts, from a | 169 | * You must not call this function with disabled interrupts, from a |
171 | * hardware interrupt handler or from a bottom half. | 170 | * hardware interrupt handler or from a bottom half. |
172 | */ | 171 | */ |
173 | int smp_call_function(void (*func) (void *info), void *info, int nonatomic, | 172 | int smp_call_function(void (*func) (void *info), void *info, int wait) |
174 | int wait) | ||
175 | { | 173 | { |
176 | cpumask_t map; | 174 | cpumask_t map; |
177 | 175 | ||
178 | spin_lock(&call_lock); | 176 | spin_lock(&call_lock); |
179 | map = cpu_online_map; | 177 | map = cpu_online_map; |
180 | cpu_clear(smp_processor_id(), map); | 178 | cpu_clear(smp_processor_id(), map); |
181 | __smp_call_function_map(func, info, nonatomic, wait, map); | 179 | __smp_call_function_map(func, info, wait, map); |
182 | spin_unlock(&call_lock); | 180 | spin_unlock(&call_lock); |
183 | return 0; | 181 | return 0; |
184 | } | 182 | } |
@@ -189,7 +187,6 @@ EXPORT_SYMBOL(smp_call_function); | |||
189 | * @cpu: the CPU where func should run | 187 | * @cpu: the CPU where func should run |
190 | * @func: the function to run; this must be fast and non-blocking | 188 | * @func: the function to run; this must be fast and non-blocking |
191 | * @info: an arbitrary pointer to pass to the function | 189 | * @info: an arbitrary pointer to pass to the function |
192 | * @nonatomic: unused | ||
193 | * @wait: if true, wait (atomically) until function has completed on other CPUs | 190 | * @wait: if true, wait (atomically) until function has completed on other CPUs |
194 | * | 191 | * |
195 | * Run a function on one processor. | 192 | * Run a function on one processor. |
@@ -198,11 +195,10 @@ EXPORT_SYMBOL(smp_call_function); | |||
198 | * hardware interrupt handler or from a bottom half. | 195 | * hardware interrupt handler or from a bottom half. |
199 | */ | 196 | */ |
200 | int smp_call_function_single(int cpu, void (*func) (void *info), void *info, | 197 | int smp_call_function_single(int cpu, void (*func) (void *info), void *info, |
201 | int nonatomic, int wait) | 198 | int wait) |
202 | { | 199 | { |
203 | spin_lock(&call_lock); | 200 | spin_lock(&call_lock); |
204 | __smp_call_function_map(func, info, nonatomic, wait, | 201 | __smp_call_function_map(func, info, wait, cpumask_of_cpu(cpu)); |
205 | cpumask_of_cpu(cpu)); | ||
206 | spin_unlock(&call_lock); | 202 | spin_unlock(&call_lock); |
207 | return 0; | 203 | return 0; |
208 | } | 204 | } |
@@ -228,7 +224,7 @@ int smp_call_function_mask(cpumask_t mask, void (*func)(void *), void *info, | |||
228 | { | 224 | { |
229 | spin_lock(&call_lock); | 225 | spin_lock(&call_lock); |
230 | cpu_clear(smp_processor_id(), mask); | 226 | cpu_clear(smp_processor_id(), mask); |
231 | __smp_call_function_map(func, info, 0, wait, mask); | 227 | __smp_call_function_map(func, info, wait, mask); |
232 | spin_unlock(&call_lock); | 228 | spin_unlock(&call_lock); |
233 | return 0; | 229 | return 0; |
234 | } | 230 | } |
@@ -303,7 +299,7 @@ static void smp_ptlb_callback(void *info) | |||
303 | 299 | ||
304 | void smp_ptlb_all(void) | 300 | void smp_ptlb_all(void) |
305 | { | 301 | { |
306 | on_each_cpu(smp_ptlb_callback, NULL, 0, 1); | 302 | on_each_cpu(smp_ptlb_callback, NULL, 1); |
307 | } | 303 | } |
308 | EXPORT_SYMBOL(smp_ptlb_all); | 304 | EXPORT_SYMBOL(smp_ptlb_all); |
309 | #endif /* ! CONFIG_64BIT */ | 305 | #endif /* ! CONFIG_64BIT */ |
@@ -351,7 +347,7 @@ void smp_ctl_set_bit(int cr, int bit) | |||
351 | memset(&parms.orvals, 0, sizeof(parms.orvals)); | 347 | memset(&parms.orvals, 0, sizeof(parms.orvals)); |
352 | memset(&parms.andvals, 0xff, sizeof(parms.andvals)); | 348 | memset(&parms.andvals, 0xff, sizeof(parms.andvals)); |
353 | parms.orvals[cr] = 1 << bit; | 349 | parms.orvals[cr] = 1 << bit; |
354 | on_each_cpu(smp_ctl_bit_callback, &parms, 0, 1); | 350 | on_each_cpu(smp_ctl_bit_callback, &parms, 1); |
355 | } | 351 | } |
356 | EXPORT_SYMBOL(smp_ctl_set_bit); | 352 | EXPORT_SYMBOL(smp_ctl_set_bit); |
357 | 353 | ||
@@ -365,7 +361,7 @@ void smp_ctl_clear_bit(int cr, int bit) | |||
365 | memset(&parms.orvals, 0, sizeof(parms.orvals)); | 361 | memset(&parms.orvals, 0, sizeof(parms.orvals)); |
366 | memset(&parms.andvals, 0xff, sizeof(parms.andvals)); | 362 | memset(&parms.andvals, 0xff, sizeof(parms.andvals)); |
367 | parms.andvals[cr] = ~(1L << bit); | 363 | parms.andvals[cr] = ~(1L << bit); |
368 | on_each_cpu(smp_ctl_bit_callback, &parms, 0, 1); | 364 | on_each_cpu(smp_ctl_bit_callback, &parms, 1); |
369 | } | 365 | } |
370 | EXPORT_SYMBOL(smp_ctl_clear_bit); | 366 | EXPORT_SYMBOL(smp_ctl_clear_bit); |
371 | 367 | ||
diff --git a/arch/s390/kernel/stacktrace.c b/arch/s390/kernel/stacktrace.c index 85e46a5d0e08..57571f10270c 100644 --- a/arch/s390/kernel/stacktrace.c +++ b/arch/s390/kernel/stacktrace.c | |||
@@ -81,6 +81,7 @@ void save_stack_trace(struct stack_trace *trace) | |||
81 | S390_lowcore.thread_info, | 81 | S390_lowcore.thread_info, |
82 | S390_lowcore.thread_info + THREAD_SIZE, 1); | 82 | S390_lowcore.thread_info + THREAD_SIZE, 1); |
83 | } | 83 | } |
84 | EXPORT_SYMBOL_GPL(save_stack_trace); | ||
84 | 85 | ||
85 | void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace) | 86 | void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace) |
86 | { | 87 | { |
@@ -93,3 +94,4 @@ void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace) | |||
93 | if (trace->nr_entries < trace->max_entries) | 94 | if (trace->nr_entries < trace->max_entries) |
94 | trace->entries[trace->nr_entries++] = ULONG_MAX; | 95 | trace->entries[trace->nr_entries++] = ULONG_MAX; |
95 | } | 96 | } |
97 | EXPORT_SYMBOL_GPL(save_stack_trace_tsk); | ||
diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c index 7418bebb547f..f2cede3947b2 100644 --- a/arch/s390/kernel/time.c +++ b/arch/s390/kernel/time.c | |||
@@ -707,7 +707,7 @@ static int etr_sync_clock(struct etr_aib *aib, int port) | |||
707 | */ | 707 | */ |
708 | memset(&etr_sync, 0, sizeof(etr_sync)); | 708 | memset(&etr_sync, 0, sizeof(etr_sync)); |
709 | preempt_disable(); | 709 | preempt_disable(); |
710 | smp_call_function(clock_sync_cpu_start, &etr_sync, 0, 0); | 710 | smp_call_function(clock_sync_cpu_start, &etr_sync, 0); |
711 | local_irq_disable(); | 711 | local_irq_disable(); |
712 | enable_sync_clock(); | 712 | enable_sync_clock(); |
713 | 713 | ||
@@ -746,7 +746,7 @@ static int etr_sync_clock(struct etr_aib *aib, int port) | |||
746 | rc = -EAGAIN; | 746 | rc = -EAGAIN; |
747 | } | 747 | } |
748 | local_irq_enable(); | 748 | local_irq_enable(); |
749 | smp_call_function(clock_sync_cpu_end, NULL, 0, 0); | 749 | smp_call_function(clock_sync_cpu_end, NULL, 0); |
750 | preempt_enable(); | 750 | preempt_enable(); |
751 | return rc; | 751 | return rc; |
752 | } | 752 | } |
@@ -926,7 +926,7 @@ static void etr_work_fn(struct work_struct *work) | |||
926 | if (!eacr.ea) { | 926 | if (!eacr.ea) { |
927 | /* Both ports offline. Reset everything. */ | 927 | /* Both ports offline. Reset everything. */ |
928 | eacr.dp = eacr.es = eacr.sl = 0; | 928 | eacr.dp = eacr.es = eacr.sl = 0; |
929 | on_each_cpu(disable_sync_clock, NULL, 0, 1); | 929 | on_each_cpu(disable_sync_clock, NULL, 1); |
930 | del_timer_sync(&etr_timer); | 930 | del_timer_sync(&etr_timer); |
931 | etr_update_eacr(eacr); | 931 | etr_update_eacr(eacr); |
932 | clear_bit(CLOCK_SYNC_ETR, &clock_sync_flags); | 932 | clear_bit(CLOCK_SYNC_ETR, &clock_sync_flags); |
@@ -1432,7 +1432,7 @@ static void stp_work_fn(struct work_struct *work) | |||
1432 | */ | 1432 | */ |
1433 | memset(&stp_sync, 0, sizeof(stp_sync)); | 1433 | memset(&stp_sync, 0, sizeof(stp_sync)); |
1434 | preempt_disable(); | 1434 | preempt_disable(); |
1435 | smp_call_function(clock_sync_cpu_start, &stp_sync, 0, 0); | 1435 | smp_call_function(clock_sync_cpu_start, &stp_sync, 0); |
1436 | local_irq_disable(); | 1436 | local_irq_disable(); |
1437 | enable_sync_clock(); | 1437 | enable_sync_clock(); |
1438 | 1438 | ||
@@ -1465,7 +1465,7 @@ static void stp_work_fn(struct work_struct *work) | |||
1465 | stp_sync.in_sync = 1; | 1465 | stp_sync.in_sync = 1; |
1466 | 1466 | ||
1467 | local_irq_enable(); | 1467 | local_irq_enable(); |
1468 | smp_call_function(clock_sync_cpu_end, NULL, 0, 0); | 1468 | smp_call_function(clock_sync_cpu_end, NULL, 0); |
1469 | preempt_enable(); | 1469 | preempt_enable(); |
1470 | } | 1470 | } |
1471 | 1471 | ||
diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig index 9a854c8e5274..3e7384f4619c 100644 --- a/arch/sh/Kconfig +++ b/arch/sh/Kconfig | |||
@@ -688,6 +688,7 @@ config CRASH_DUMP | |||
688 | config SMP | 688 | config SMP |
689 | bool "Symmetric multi-processing support" | 689 | bool "Symmetric multi-processing support" |
690 | depends on SYS_SUPPORTS_SMP | 690 | depends on SYS_SUPPORTS_SMP |
691 | select USE_GENERIC_SMP_HELPERS | ||
691 | ---help--- | 692 | ---help--- |
692 | This enables support for systems with more than one CPU. If you have | 693 | This enables support for systems with more than one CPU. If you have |
693 | a system with only one CPU, like most personal computers, say N. If | 694 | a system with only one CPU, like most personal computers, say N. If |
diff --git a/arch/sh/kernel/smp.c b/arch/sh/kernel/smp.c index 5d039d168f57..60c50841143e 100644 --- a/arch/sh/kernel/smp.c +++ b/arch/sh/kernel/smp.c | |||
@@ -36,13 +36,6 @@ EXPORT_SYMBOL(cpu_possible_map); | |||
36 | cpumask_t cpu_online_map; | 36 | cpumask_t cpu_online_map; |
37 | EXPORT_SYMBOL(cpu_online_map); | 37 | EXPORT_SYMBOL(cpu_online_map); |
38 | 38 | ||
39 | static atomic_t cpus_booted = ATOMIC_INIT(0); | ||
40 | |||
41 | /* | ||
42 | * Run specified function on a particular processor. | ||
43 | */ | ||
44 | void __smp_call_function(unsigned int cpu); | ||
45 | |||
46 | static inline void __init smp_store_cpu_info(unsigned int cpu) | 39 | static inline void __init smp_store_cpu_info(unsigned int cpu) |
47 | { | 40 | { |
48 | struct sh_cpuinfo *c = cpu_data + cpu; | 41 | struct sh_cpuinfo *c = cpu_data + cpu; |
@@ -175,45 +168,20 @@ static void stop_this_cpu(void *unused) | |||
175 | 168 | ||
176 | void smp_send_stop(void) | 169 | void smp_send_stop(void) |
177 | { | 170 | { |
178 | smp_call_function(stop_this_cpu, 0, 1, 0); | 171 | smp_call_function(stop_this_cpu, 0, 0); |
179 | } | 172 | } |
180 | 173 | ||
181 | struct smp_fn_call_struct smp_fn_call = { | 174 | void arch_send_call_function_ipi(cpumask_t mask) |
182 | .lock = __SPIN_LOCK_UNLOCKED(smp_fn_call.lock), | ||
183 | .finished = ATOMIC_INIT(0), | ||
184 | }; | ||
185 | |||
186 | /* | ||
187 | * The caller of this wants the passed function to run on every cpu. If wait | ||
188 | * is set, wait until all cpus have finished the function before returning. | ||
189 | * The lock is here to protect the call structure. | ||
190 | * You must not call this function with disabled interrupts or from a | ||
191 | * hardware interrupt handler or from a bottom half handler. | ||
192 | */ | ||
193 | int smp_call_function(void (*func)(void *info), void *info, int retry, int wait) | ||
194 | { | 175 | { |
195 | unsigned int nr_cpus = atomic_read(&cpus_booted); | 176 | int cpu; |
196 | int i; | ||
197 | |||
198 | /* Can deadlock when called with interrupts disabled */ | ||
199 | WARN_ON(irqs_disabled()); | ||
200 | |||
201 | spin_lock(&smp_fn_call.lock); | ||
202 | |||
203 | atomic_set(&smp_fn_call.finished, 0); | ||
204 | smp_fn_call.fn = func; | ||
205 | smp_fn_call.data = info; | ||
206 | |||
207 | for (i = 0; i < nr_cpus; i++) | ||
208 | if (i != smp_processor_id()) | ||
209 | plat_send_ipi(i, SMP_MSG_FUNCTION); | ||
210 | |||
211 | if (wait) | ||
212 | while (atomic_read(&smp_fn_call.finished) != (nr_cpus - 1)); | ||
213 | 177 | ||
214 | spin_unlock(&smp_fn_call.lock); | 178 | for_each_cpu_mask(cpu, mask) |
179 | plat_send_ipi(cpu, SMP_MSG_FUNCTION); | ||
180 | } | ||
215 | 181 | ||
216 | return 0; | 182 | void arch_send_call_function_single_ipi(int cpu) |
183 | { | ||
184 | plat_send_ipi(cpu, SMP_MSG_FUNCTION_SINGLE); | ||
217 | } | 185 | } |
218 | 186 | ||
219 | /* Not really SMP stuff ... */ | 187 | /* Not really SMP stuff ... */ |
@@ -229,7 +197,7 @@ static void flush_tlb_all_ipi(void *info) | |||
229 | 197 | ||
230 | void flush_tlb_all(void) | 198 | void flush_tlb_all(void) |
231 | { | 199 | { |
232 | on_each_cpu(flush_tlb_all_ipi, 0, 1, 1); | 200 | on_each_cpu(flush_tlb_all_ipi, 0, 1); |
233 | } | 201 | } |
234 | 202 | ||
235 | static void flush_tlb_mm_ipi(void *mm) | 203 | static void flush_tlb_mm_ipi(void *mm) |
@@ -255,7 +223,7 @@ void flush_tlb_mm(struct mm_struct *mm) | |||
255 | preempt_disable(); | 223 | preempt_disable(); |
256 | 224 | ||
257 | if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) { | 225 | if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) { |
258 | smp_call_function(flush_tlb_mm_ipi, (void *)mm, 1, 1); | 226 | smp_call_function(flush_tlb_mm_ipi, (void *)mm, 1); |
259 | } else { | 227 | } else { |
260 | int i; | 228 | int i; |
261 | for (i = 0; i < num_online_cpus(); i++) | 229 | for (i = 0; i < num_online_cpus(); i++) |
@@ -292,7 +260,7 @@ void flush_tlb_range(struct vm_area_struct *vma, | |||
292 | fd.vma = vma; | 260 | fd.vma = vma; |
293 | fd.addr1 = start; | 261 | fd.addr1 = start; |
294 | fd.addr2 = end; | 262 | fd.addr2 = end; |
295 | smp_call_function(flush_tlb_range_ipi, (void *)&fd, 1, 1); | 263 | smp_call_function(flush_tlb_range_ipi, (void *)&fd, 1); |
296 | } else { | 264 | } else { |
297 | int i; | 265 | int i; |
298 | for (i = 0; i < num_online_cpus(); i++) | 266 | for (i = 0; i < num_online_cpus(); i++) |
@@ -316,7 +284,7 @@ void flush_tlb_kernel_range(unsigned long start, unsigned long end) | |||
316 | 284 | ||
317 | fd.addr1 = start; | 285 | fd.addr1 = start; |
318 | fd.addr2 = end; | 286 | fd.addr2 = end; |
319 | on_each_cpu(flush_tlb_kernel_range_ipi, (void *)&fd, 1, 1); | 287 | on_each_cpu(flush_tlb_kernel_range_ipi, (void *)&fd, 1); |
320 | } | 288 | } |
321 | 289 | ||
322 | static void flush_tlb_page_ipi(void *info) | 290 | static void flush_tlb_page_ipi(void *info) |
@@ -335,7 +303,7 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long page) | |||
335 | 303 | ||
336 | fd.vma = vma; | 304 | fd.vma = vma; |
337 | fd.addr1 = page; | 305 | fd.addr1 = page; |
338 | smp_call_function(flush_tlb_page_ipi, (void *)&fd, 1, 1); | 306 | smp_call_function(flush_tlb_page_ipi, (void *)&fd, 1); |
339 | } else { | 307 | } else { |
340 | int i; | 308 | int i; |
341 | for (i = 0; i < num_online_cpus(); i++) | 309 | for (i = 0; i < num_online_cpus(); i++) |
@@ -359,6 +327,6 @@ void flush_tlb_one(unsigned long asid, unsigned long vaddr) | |||
359 | fd.addr1 = asid; | 327 | fd.addr1 = asid; |
360 | fd.addr2 = vaddr; | 328 | fd.addr2 = vaddr; |
361 | 329 | ||
362 | smp_call_function(flush_tlb_one_ipi, (void *)&fd, 1, 1); | 330 | smp_call_function(flush_tlb_one_ipi, (void *)&fd, 1); |
363 | local_flush_tlb_one(asid, vaddr); | 331 | local_flush_tlb_one(asid, vaddr); |
364 | } | 332 | } |
diff --git a/arch/sh/kernel/stacktrace.c b/arch/sh/kernel/stacktrace.c index d41e561be20e..1b2ae35c4a76 100644 --- a/arch/sh/kernel/stacktrace.c +++ b/arch/sh/kernel/stacktrace.c | |||
@@ -34,3 +34,4 @@ void save_stack_trace(struct stack_trace *trace) | |||
34 | } | 34 | } |
35 | } | 35 | } |
36 | } | 36 | } |
37 | EXPORT_SYMBOL_GPL(save_stack_trace); | ||
diff --git a/arch/sparc64/kernel/smp.c b/arch/sparc64/kernel/smp.c index fa63c68a1819..c099d96f1239 100644 --- a/arch/sparc64/kernel/smp.c +++ b/arch/sparc64/kernel/smp.c | |||
@@ -807,7 +807,6 @@ extern unsigned long xcall_call_function; | |||
807 | * smp_call_function(): Run a function on all other CPUs. | 807 | * smp_call_function(): Run a function on all other CPUs. |
808 | * @func: The function to run. This must be fast and non-blocking. | 808 | * @func: The function to run. This must be fast and non-blocking. |
809 | * @info: An arbitrary pointer to pass to the function. | 809 | * @info: An arbitrary pointer to pass to the function. |
810 | * @nonatomic: currently unused. | ||
811 | * @wait: If true, wait (atomically) until function has completed on other CPUs. | 810 | * @wait: If true, wait (atomically) until function has completed on other CPUs. |
812 | * | 811 | * |
813 | * Returns 0 on success, else a negative status code. Does not return until | 812 | * Returns 0 on success, else a negative status code. Does not return until |
@@ -816,8 +815,8 @@ extern unsigned long xcall_call_function; | |||
816 | * You must not call this function with disabled interrupts or from a | 815 | * You must not call this function with disabled interrupts or from a |
817 | * hardware interrupt handler or from a bottom half handler. | 816 | * hardware interrupt handler or from a bottom half handler. |
818 | */ | 817 | */ |
819 | static int smp_call_function_mask(void (*func)(void *info), void *info, | 818 | static int sparc64_smp_call_function_mask(void (*func)(void *info), void *info, |
820 | int nonatomic, int wait, cpumask_t mask) | 819 | int wait, cpumask_t mask) |
821 | { | 820 | { |
822 | struct call_data_struct data; | 821 | struct call_data_struct data; |
823 | int cpus; | 822 | int cpus; |
@@ -852,11 +851,9 @@ out_unlock: | |||
852 | return 0; | 851 | return 0; |
853 | } | 852 | } |
854 | 853 | ||
855 | int smp_call_function(void (*func)(void *info), void *info, | 854 | int smp_call_function(void (*func)(void *info), void *info, int wait) |
856 | int nonatomic, int wait) | ||
857 | { | 855 | { |
858 | return smp_call_function_mask(func, info, nonatomic, wait, | 856 | return sparc64_smp_call_function_mask(func, info, wait, cpu_online_map); |
859 | cpu_online_map); | ||
860 | } | 857 | } |
861 | 858 | ||
862 | void smp_call_function_client(int irq, struct pt_regs *regs) | 859 | void smp_call_function_client(int irq, struct pt_regs *regs) |
@@ -893,7 +890,7 @@ static void tsb_sync(void *info) | |||
893 | 890 | ||
894 | void smp_tsb_sync(struct mm_struct *mm) | 891 | void smp_tsb_sync(struct mm_struct *mm) |
895 | { | 892 | { |
896 | smp_call_function_mask(tsb_sync, mm, 0, 1, mm->cpu_vm_mask); | 893 | sparc64_smp_call_function_mask(tsb_sync, mm, 1, mm->cpu_vm_mask); |
897 | } | 894 | } |
898 | 895 | ||
899 | extern unsigned long xcall_flush_tlb_mm; | 896 | extern unsigned long xcall_flush_tlb_mm; |
diff --git a/arch/sparc64/kernel/stacktrace.c b/arch/sparc64/kernel/stacktrace.c index c73ce3f4197e..b3e3737750d8 100644 --- a/arch/sparc64/kernel/stacktrace.c +++ b/arch/sparc64/kernel/stacktrace.c | |||
@@ -1,6 +1,7 @@ | |||
1 | #include <linux/sched.h> | 1 | #include <linux/sched.h> |
2 | #include <linux/stacktrace.h> | 2 | #include <linux/stacktrace.h> |
3 | #include <linux/thread_info.h> | 3 | #include <linux/thread_info.h> |
4 | #include <linux/module.h> | ||
4 | #include <asm/ptrace.h> | 5 | #include <asm/ptrace.h> |
5 | #include <asm/stacktrace.h> | 6 | #include <asm/stacktrace.h> |
6 | 7 | ||
@@ -47,3 +48,4 @@ void save_stack_trace(struct stack_trace *trace) | |||
47 | trace->entries[trace->nr_entries++] = pc; | 48 | trace->entries[trace->nr_entries++] = pc; |
48 | } while (trace->nr_entries < trace->max_entries); | 49 | } while (trace->nr_entries < trace->max_entries); |
49 | } | 50 | } |
51 | EXPORT_SYMBOL_GPL(save_stack_trace); | ||
diff --git a/arch/sparc64/mm/hugetlbpage.c b/arch/sparc64/mm/hugetlbpage.c index 6cfab2e4d340..ebefd2a14375 100644 --- a/arch/sparc64/mm/hugetlbpage.c +++ b/arch/sparc64/mm/hugetlbpage.c | |||
@@ -344,7 +344,7 @@ void hugetlb_prefault_arch_hook(struct mm_struct *mm) | |||
344 | * also executing in this address space. | 344 | * also executing in this address space. |
345 | */ | 345 | */ |
346 | mm->context.sparc64_ctx_val = ctx; | 346 | mm->context.sparc64_ctx_val = ctx; |
347 | on_each_cpu(context_reload, mm, 0, 0); | 347 | on_each_cpu(context_reload, mm, 0); |
348 | } | 348 | } |
349 | spin_unlock(&ctx_alloc_lock); | 349 | spin_unlock(&ctx_alloc_lock); |
350 | } | 350 | } |
diff --git a/arch/um/kernel/smp.c b/arch/um/kernel/smp.c index e1062ec36d40..be2d50c3aa95 100644 --- a/arch/um/kernel/smp.c +++ b/arch/um/kernel/smp.c | |||
@@ -214,8 +214,7 @@ void smp_call_function_slave(int cpu) | |||
214 | atomic_inc(&scf_finished); | 214 | atomic_inc(&scf_finished); |
215 | } | 215 | } |
216 | 216 | ||
217 | int smp_call_function(void (*_func)(void *info), void *_info, int nonatomic, | 217 | int smp_call_function(void (*_func)(void *info), void *_info, int wait) |
218 | int wait) | ||
219 | { | 218 | { |
220 | int cpus = num_online_cpus() - 1; | 219 | int cpus = num_online_cpus() - 1; |
221 | int i; | 220 | int i; |
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 6958d6bcaf70..96e0c2ebc388 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig | |||
@@ -170,6 +170,7 @@ config GENERIC_PENDING_IRQ | |||
170 | config X86_SMP | 170 | config X86_SMP |
171 | bool | 171 | bool |
172 | depends on SMP && ((X86_32 && !X86_VOYAGER) || X86_64) | 172 | depends on SMP && ((X86_32 && !X86_VOYAGER) || X86_64) |
173 | select USE_GENERIC_SMP_HELPERS | ||
173 | default y | 174 | default y |
174 | 175 | ||
175 | config X86_32_SMP | 176 | config X86_32_SMP |
@@ -447,7 +448,6 @@ config PARAVIRT_DEBUG | |||
447 | config MEMTEST | 448 | config MEMTEST |
448 | bool "Memtest" | 449 | bool "Memtest" |
449 | depends on X86_64 | 450 | depends on X86_64 |
450 | default y | ||
451 | help | 451 | help |
452 | This option adds a kernel parameter 'memtest', which allows memtest | 452 | This option adds a kernel parameter 'memtest', which allows memtest |
453 | to be set. | 453 | to be set. |
@@ -455,7 +455,7 @@ config MEMTEST | |||
455 | memtest=1, mean do 1 test pattern; | 455 | memtest=1, mean do 1 test pattern; |
456 | ... | 456 | ... |
457 | memtest=4, mean do 4 test patterns. | 457 | memtest=4, mean do 4 test patterns. |
458 | If you are unsure how to answer this question, answer Y. | 458 | If you are unsure how to answer this question, answer N. |
459 | 459 | ||
460 | config X86_SUMMIT_NUMA | 460 | config X86_SUMMIT_NUMA |
461 | def_bool y | 461 | def_bool y |
@@ -1135,21 +1135,18 @@ config MTRR | |||
1135 | See <file:Documentation/mtrr.txt> for more information. | 1135 | See <file:Documentation/mtrr.txt> for more information. |
1136 | 1136 | ||
1137 | config MTRR_SANITIZER | 1137 | config MTRR_SANITIZER |
1138 | def_bool y | 1138 | bool |
1139 | prompt "MTRR cleanup support" | 1139 | prompt "MTRR cleanup support" |
1140 | depends on MTRR | 1140 | depends on MTRR |
1141 | help | 1141 | help |
1142 | Convert MTRR layout from continuous to discrete, so some X driver | 1142 | Convert MTRR layout from continuous to discrete, so X drivers can |
1143 | could add WB entries. | 1143 | add writeback entries. |
1144 | 1144 | ||
1145 | Say N here if you see bootup problems (boot crash, boot hang, | 1145 | Can be disabled with disable_mtrr_cleanup on the kernel command line. |
1146 | spontaneous reboots). | 1146 | The largest mtrr entry size for a continous block can be set with |
1147 | mtrr_chunk_size. | ||
1147 | 1148 | ||
1148 | Could be disabled with disable_mtrr_cleanup. Also mtrr_chunk_size | 1149 | If unsure, say N. |
1149 | could be used to send largest mtrr entry size for continuous block | ||
1150 | to hold holes (aka. UC entries) | ||
1151 | |||
1152 | If unsure, say Y. | ||
1153 | 1150 | ||
1154 | config MTRR_SANITIZER_ENABLE_DEFAULT | 1151 | config MTRR_SANITIZER_ENABLE_DEFAULT |
1155 | int "MTRR cleanup enable value (0-1)" | 1152 | int "MTRR cleanup enable value (0-1)" |
@@ -1166,7 +1163,7 @@ config MTRR_SANITIZER_SPARE_REG_NR_DEFAULT | |||
1166 | depends on MTRR_SANITIZER | 1163 | depends on MTRR_SANITIZER |
1167 | help | 1164 | help |
1168 | mtrr cleanup spare entries default, it can be changed via | 1165 | mtrr cleanup spare entries default, it can be changed via |
1169 | mtrr_spare_reg_nr= | 1166 | mtrr_spare_reg_nr=N on the kernel command line. |
1170 | 1167 | ||
1171 | config X86_PAT | 1168 | config X86_PAT |
1172 | bool | 1169 | bool |
diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c index e6a4b564ccaa..793ad2045f58 100644 --- a/arch/x86/kernel/acpi/sleep.c +++ b/arch/x86/kernel/acpi/sleep.c | |||
@@ -23,6 +23,15 @@ static unsigned long acpi_realmode; | |||
23 | static char temp_stack[10240]; | 23 | static char temp_stack[10240]; |
24 | #endif | 24 | #endif |
25 | 25 | ||
26 | /* XXX: this macro should move to asm-x86/segment.h and be shared with the | ||
27 | boot code... */ | ||
28 | #define GDT_ENTRY(flags, base, limit) \ | ||
29 | (((u64)(base & 0xff000000) << 32) | \ | ||
30 | ((u64)flags << 40) | \ | ||
31 | ((u64)(limit & 0x00ff0000) << 32) | \ | ||
32 | ((u64)(base & 0x00ffffff) << 16) | \ | ||
33 | ((u64)(limit & 0x0000ffff))) | ||
34 | |||
26 | /** | 35 | /** |
27 | * acpi_save_state_mem - save kernel state | 36 | * acpi_save_state_mem - save kernel state |
28 | * | 37 | * |
@@ -51,18 +60,27 @@ int acpi_save_state_mem(void) | |||
51 | header->video_mode = saved_video_mode; | 60 | header->video_mode = saved_video_mode; |
52 | 61 | ||
53 | header->wakeup_jmp_seg = acpi_wakeup_address >> 4; | 62 | header->wakeup_jmp_seg = acpi_wakeup_address >> 4; |
63 | |||
64 | /* | ||
65 | * Set up the wakeup GDT. We set these up as Big Real Mode, | ||
66 | * that is, with limits set to 4 GB. At least the Lenovo | ||
67 | * Thinkpad X61 is known to need this for the video BIOS | ||
68 | * initialization quirk to work; this is likely to also | ||
69 | * be the case for other laptops or integrated video devices. | ||
70 | */ | ||
71 | |||
54 | /* GDT[0]: GDT self-pointer */ | 72 | /* GDT[0]: GDT self-pointer */ |
55 | header->wakeup_gdt[0] = | 73 | header->wakeup_gdt[0] = |
56 | (u64)(sizeof(header->wakeup_gdt) - 1) + | 74 | (u64)(sizeof(header->wakeup_gdt) - 1) + |
57 | ((u64)(acpi_wakeup_address + | 75 | ((u64)(acpi_wakeup_address + |
58 | ((char *)&header->wakeup_gdt - (char *)acpi_realmode)) | 76 | ((char *)&header->wakeup_gdt - (char *)acpi_realmode)) |
59 | << 16); | 77 | << 16); |
60 | /* GDT[1]: real-mode-like code segment */ | 78 | /* GDT[1]: big real mode-like code segment */ |
61 | header->wakeup_gdt[1] = (0x009bULL << 40) + | 79 | header->wakeup_gdt[1] = |
62 | ((u64)acpi_wakeup_address << 16) + 0xffff; | 80 | GDT_ENTRY(0x809b, acpi_wakeup_address, 0xfffff); |
63 | /* GDT[2]: real-mode-like data segment */ | 81 | /* GDT[2]: big real mode-like data segment */ |
64 | header->wakeup_gdt[2] = (0x0093ULL << 40) + | 82 | header->wakeup_gdt[2] = |
65 | ((u64)acpi_wakeup_address << 16) + 0xffff; | 83 | GDT_ENTRY(0x8093, acpi_wakeup_address, 0xfffff); |
66 | 84 | ||
67 | #ifndef CONFIG_64BIT | 85 | #ifndef CONFIG_64BIT |
68 | store_gdt((struct desc_ptr *)&header->pmode_gdt); | 86 | store_gdt((struct desc_ptr *)&header->pmode_gdt); |
diff --git a/arch/x86/kernel/apic_32.c b/arch/x86/kernel/apic_32.c index 3e58b676d23b..a437d027f20b 100644 --- a/arch/x86/kernel/apic_32.c +++ b/arch/x86/kernel/apic_32.c | |||
@@ -1340,6 +1340,10 @@ void __init smp_intr_init(void) | |||
1340 | 1340 | ||
1341 | /* IPI for generic function call */ | 1341 | /* IPI for generic function call */ |
1342 | alloc_intr_gate(CALL_FUNCTION_VECTOR, call_function_interrupt); | 1342 | alloc_intr_gate(CALL_FUNCTION_VECTOR, call_function_interrupt); |
1343 | |||
1344 | /* IPI for single call function */ | ||
1345 | set_intr_gate(CALL_FUNCTION_SINGLE_VECTOR, | ||
1346 | call_function_single_interrupt); | ||
1343 | } | 1347 | } |
1344 | #endif | 1348 | #endif |
1345 | 1349 | ||
diff --git a/arch/x86/kernel/cpu/mcheck/mce_64.c b/arch/x86/kernel/cpu/mcheck/mce_64.c index 987410745182..c4a7ec31394c 100644 --- a/arch/x86/kernel/cpu/mcheck/mce_64.c +++ b/arch/x86/kernel/cpu/mcheck/mce_64.c | |||
@@ -364,7 +364,7 @@ static void mcheck_check_cpu(void *info) | |||
364 | 364 | ||
365 | static void mcheck_timer(struct work_struct *work) | 365 | static void mcheck_timer(struct work_struct *work) |
366 | { | 366 | { |
367 | on_each_cpu(mcheck_check_cpu, NULL, 1, 1); | 367 | on_each_cpu(mcheck_check_cpu, NULL, 1); |
368 | 368 | ||
369 | /* | 369 | /* |
370 | * Alert userspace if needed. If we logged an MCE, reduce the | 370 | * Alert userspace if needed. If we logged an MCE, reduce the |
@@ -621,7 +621,7 @@ static ssize_t mce_read(struct file *filp, char __user *ubuf, size_t usize, | |||
621 | * Collect entries that were still getting written before the | 621 | * Collect entries that were still getting written before the |
622 | * synchronize. | 622 | * synchronize. |
623 | */ | 623 | */ |
624 | on_each_cpu(collect_tscs, cpu_tsc, 1, 1); | 624 | on_each_cpu(collect_tscs, cpu_tsc, 1); |
625 | for (i = next; i < MCE_LOG_LEN; i++) { | 625 | for (i = next; i < MCE_LOG_LEN; i++) { |
626 | if (mcelog.entry[i].finished && | 626 | if (mcelog.entry[i].finished && |
627 | mcelog.entry[i].tsc < cpu_tsc[mcelog.entry[i].cpu]) { | 627 | mcelog.entry[i].tsc < cpu_tsc[mcelog.entry[i].cpu]) { |
@@ -746,7 +746,7 @@ static void mce_restart(void) | |||
746 | if (next_interval) | 746 | if (next_interval) |
747 | cancel_delayed_work(&mcheck_work); | 747 | cancel_delayed_work(&mcheck_work); |
748 | /* Timer race is harmless here */ | 748 | /* Timer race is harmless here */ |
749 | on_each_cpu(mce_init, NULL, 1, 1); | 749 | on_each_cpu(mce_init, NULL, 1); |
750 | next_interval = check_interval * HZ; | 750 | next_interval = check_interval * HZ; |
751 | if (next_interval) | 751 | if (next_interval) |
752 | schedule_delayed_work(&mcheck_work, | 752 | schedule_delayed_work(&mcheck_work, |
diff --git a/arch/x86/kernel/cpu/mcheck/non-fatal.c b/arch/x86/kernel/cpu/mcheck/non-fatal.c index 00ccb6c14ec2..cc1fccdd31e0 100644 --- a/arch/x86/kernel/cpu/mcheck/non-fatal.c +++ b/arch/x86/kernel/cpu/mcheck/non-fatal.c | |||
@@ -59,7 +59,7 @@ static DECLARE_DELAYED_WORK(mce_work, mce_work_fn); | |||
59 | 59 | ||
60 | static void mce_work_fn(struct work_struct *work) | 60 | static void mce_work_fn(struct work_struct *work) |
61 | { | 61 | { |
62 | on_each_cpu(mce_checkregs, NULL, 1, 1); | 62 | on_each_cpu(mce_checkregs, NULL, 1); |
63 | schedule_delayed_work(&mce_work, round_jiffies_relative(MCE_RATE)); | 63 | schedule_delayed_work(&mce_work, round_jiffies_relative(MCE_RATE)); |
64 | } | 64 | } |
65 | 65 | ||
diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c index 105afe12beb0..6f23969c8faf 100644 --- a/arch/x86/kernel/cpu/mtrr/main.c +++ b/arch/x86/kernel/cpu/mtrr/main.c | |||
@@ -223,7 +223,7 @@ static void set_mtrr(unsigned int reg, unsigned long base, | |||
223 | atomic_set(&data.gate,0); | 223 | atomic_set(&data.gate,0); |
224 | 224 | ||
225 | /* Start the ball rolling on other CPUs */ | 225 | /* Start the ball rolling on other CPUs */ |
226 | if (smp_call_function(ipi_handler, &data, 1, 0) != 0) | 226 | if (smp_call_function(ipi_handler, &data, 0) != 0) |
227 | panic("mtrr: timed out waiting for other CPUs\n"); | 227 | panic("mtrr: timed out waiting for other CPUs\n"); |
228 | 228 | ||
229 | local_irq_save(flags); | 229 | local_irq_save(flags); |
@@ -1682,7 +1682,7 @@ void mtrr_ap_init(void) | |||
1682 | */ | 1682 | */ |
1683 | void mtrr_save_state(void) | 1683 | void mtrr_save_state(void) |
1684 | { | 1684 | { |
1685 | smp_call_function_single(0, mtrr_save_fixed_ranges, NULL, 1, 1); | 1685 | smp_call_function_single(0, mtrr_save_fixed_ranges, NULL, 1); |
1686 | } | 1686 | } |
1687 | 1687 | ||
1688 | static int __init mtrr_init_finialize(void) | 1688 | static int __init mtrr_init_finialize(void) |
diff --git a/arch/x86/kernel/cpu/perfctr-watchdog.c b/arch/x86/kernel/cpu/perfctr-watchdog.c index 2e9bef6e3aa3..6d4bdc02388a 100644 --- a/arch/x86/kernel/cpu/perfctr-watchdog.c +++ b/arch/x86/kernel/cpu/perfctr-watchdog.c | |||
@@ -189,7 +189,7 @@ void disable_lapic_nmi_watchdog(void) | |||
189 | if (atomic_read(&nmi_active) <= 0) | 189 | if (atomic_read(&nmi_active) <= 0) |
190 | return; | 190 | return; |
191 | 191 | ||
192 | on_each_cpu(stop_apic_nmi_watchdog, NULL, 0, 1); | 192 | on_each_cpu(stop_apic_nmi_watchdog, NULL, 1); |
193 | 193 | ||
194 | if (wd_ops) | 194 | if (wd_ops) |
195 | wd_ops->unreserve(); | 195 | wd_ops->unreserve(); |
@@ -213,7 +213,7 @@ void enable_lapic_nmi_watchdog(void) | |||
213 | return; | 213 | return; |
214 | } | 214 | } |
215 | 215 | ||
216 | on_each_cpu(setup_apic_nmi_watchdog, NULL, 0, 1); | 216 | on_each_cpu(setup_apic_nmi_watchdog, NULL, 1); |
217 | touch_nmi_watchdog(); | 217 | touch_nmi_watchdog(); |
218 | } | 218 | } |
219 | 219 | ||
diff --git a/arch/x86/kernel/cpuid.c b/arch/x86/kernel/cpuid.c index 71f1c2654bec..2de5fa2bbf77 100644 --- a/arch/x86/kernel/cpuid.c +++ b/arch/x86/kernel/cpuid.c | |||
@@ -96,7 +96,7 @@ static ssize_t cpuid_read(struct file *file, char __user *buf, | |||
96 | for (; count; count -= 16) { | 96 | for (; count; count -= 16) { |
97 | cmd.eax = pos; | 97 | cmd.eax = pos; |
98 | cmd.ecx = pos >> 32; | 98 | cmd.ecx = pos >> 32; |
99 | smp_call_function_single(cpu, cpuid_smp_cpuid, &cmd, 1, 1); | 99 | smp_call_function_single(cpu, cpuid_smp_cpuid, &cmd, 1); |
100 | if (copy_to_user(tmp, &cmd, 16)) | 100 | if (copy_to_user(tmp, &cmd, 16)) |
101 | return -EFAULT; | 101 | return -EFAULT; |
102 | tmp += 16; | 102 | tmp += 16; |
diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S index ba41bf42748d..ae63e584c340 100644 --- a/arch/x86/kernel/entry_64.S +++ b/arch/x86/kernel/entry_64.S | |||
@@ -816,6 +816,9 @@ END(invalidate_interrupt\num) | |||
816 | ENTRY(call_function_interrupt) | 816 | ENTRY(call_function_interrupt) |
817 | apicinterrupt CALL_FUNCTION_VECTOR,smp_call_function_interrupt | 817 | apicinterrupt CALL_FUNCTION_VECTOR,smp_call_function_interrupt |
818 | END(call_function_interrupt) | 818 | END(call_function_interrupt) |
819 | ENTRY(call_function_single_interrupt) | ||
820 | apicinterrupt CALL_FUNCTION_SINGLE_VECTOR,smp_call_function_single_interrupt | ||
821 | END(call_function_single_interrupt) | ||
819 | ENTRY(irq_move_cleanup_interrupt) | 822 | ENTRY(irq_move_cleanup_interrupt) |
820 | apicinterrupt IRQ_MOVE_CLEANUP_VECTOR,smp_irq_move_cleanup_interrupt | 823 | apicinterrupt IRQ_MOVE_CLEANUP_VECTOR,smp_irq_move_cleanup_interrupt |
821 | END(irq_move_cleanup_interrupt) | 824 | END(irq_move_cleanup_interrupt) |
diff --git a/arch/x86/kernel/io_apic_32.c b/arch/x86/kernel/io_apic_32.c index 603261a5885c..558abf4c796a 100644 --- a/arch/x86/kernel/io_apic_32.c +++ b/arch/x86/kernel/io_apic_32.c | |||
@@ -1569,7 +1569,7 @@ void /*__init*/ print_local_APIC(void *dummy) | |||
1569 | 1569 | ||
1570 | void print_all_local_APICs(void) | 1570 | void print_all_local_APICs(void) |
1571 | { | 1571 | { |
1572 | on_each_cpu(print_local_APIC, NULL, 1, 1); | 1572 | on_each_cpu(print_local_APIC, NULL, 1); |
1573 | } | 1573 | } |
1574 | 1574 | ||
1575 | void /*__init*/ print_PIC(void) | 1575 | void /*__init*/ print_PIC(void) |
diff --git a/arch/x86/kernel/io_apic_64.c b/arch/x86/kernel/io_apic_64.c index b16ef029cf88..6510cde36b35 100644 --- a/arch/x86/kernel/io_apic_64.c +++ b/arch/x86/kernel/io_apic_64.c | |||
@@ -1160,7 +1160,7 @@ void __apicdebuginit print_local_APIC(void * dummy) | |||
1160 | 1160 | ||
1161 | void print_all_local_APICs (void) | 1161 | void print_all_local_APICs (void) |
1162 | { | 1162 | { |
1163 | on_each_cpu(print_local_APIC, NULL, 1, 1); | 1163 | on_each_cpu(print_local_APIC, NULL, 1); |
1164 | } | 1164 | } |
1165 | 1165 | ||
1166 | void __apicdebuginit print_PIC(void) | 1166 | void __apicdebuginit print_PIC(void) |
diff --git a/arch/x86/kernel/irqinit_64.c b/arch/x86/kernel/irqinit_64.c index 31f49e8f46a7..0373e88de95a 100644 --- a/arch/x86/kernel/irqinit_64.c +++ b/arch/x86/kernel/irqinit_64.c | |||
@@ -199,6 +199,10 @@ void __init native_init_IRQ(void) | |||
199 | /* IPI for generic function call */ | 199 | /* IPI for generic function call */ |
200 | alloc_intr_gate(CALL_FUNCTION_VECTOR, call_function_interrupt); | 200 | alloc_intr_gate(CALL_FUNCTION_VECTOR, call_function_interrupt); |
201 | 201 | ||
202 | /* IPI for generic single function call */ | ||
203 | alloc_intr_gate(CALL_FUNCTION_SINGLE_VECTOR, | ||
204 | call_function_single_interrupt); | ||
205 | |||
202 | /* Low priority IPI to cleanup after moving an irq */ | 206 | /* Low priority IPI to cleanup after moving an irq */ |
203 | set_intr_gate(IRQ_MOVE_CLEANUP_VECTOR, irq_move_cleanup_interrupt); | 207 | set_intr_gate(IRQ_MOVE_CLEANUP_VECTOR, irq_move_cleanup_interrupt); |
204 | #endif | 208 | #endif |
diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c index 21f2bae98c15..a8449571858a 100644 --- a/arch/x86/kernel/ldt.c +++ b/arch/x86/kernel/ldt.c | |||
@@ -68,7 +68,7 @@ static int alloc_ldt(mm_context_t *pc, int mincount, int reload) | |||
68 | load_LDT(pc); | 68 | load_LDT(pc); |
69 | mask = cpumask_of_cpu(smp_processor_id()); | 69 | mask = cpumask_of_cpu(smp_processor_id()); |
70 | if (!cpus_equal(current->mm->cpu_vm_mask, mask)) | 70 | if (!cpus_equal(current->mm->cpu_vm_mask, mask)) |
71 | smp_call_function(flush_ldt, current->mm, 1, 1); | 71 | smp_call_function(flush_ldt, current->mm, 1); |
72 | preempt_enable(); | 72 | preempt_enable(); |
73 | #else | 73 | #else |
74 | load_LDT(pc); | 74 | load_LDT(pc); |
diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c index 716b89284be0..ec024b3baad0 100644 --- a/arch/x86/kernel/nmi.c +++ b/arch/x86/kernel/nmi.c | |||
@@ -130,7 +130,7 @@ int __init check_nmi_watchdog(void) | |||
130 | 130 | ||
131 | #ifdef CONFIG_SMP | 131 | #ifdef CONFIG_SMP |
132 | if (nmi_watchdog == NMI_LOCAL_APIC) | 132 | if (nmi_watchdog == NMI_LOCAL_APIC) |
133 | smp_call_function(nmi_cpu_busy, (void *)&endflag, 0, 0); | 133 | smp_call_function(nmi_cpu_busy, (void *)&endflag, 0); |
134 | #endif | 134 | #endif |
135 | 135 | ||
136 | for_each_possible_cpu(cpu) | 136 | for_each_possible_cpu(cpu) |
@@ -272,7 +272,7 @@ static void __acpi_nmi_enable(void *__unused) | |||
272 | void acpi_nmi_enable(void) | 272 | void acpi_nmi_enable(void) |
273 | { | 273 | { |
274 | if (atomic_read(&nmi_active) && nmi_watchdog == NMI_IO_APIC) | 274 | if (atomic_read(&nmi_active) && nmi_watchdog == NMI_IO_APIC) |
275 | on_each_cpu(__acpi_nmi_enable, NULL, 0, 1); | 275 | on_each_cpu(__acpi_nmi_enable, NULL, 1); |
276 | } | 276 | } |
277 | 277 | ||
278 | static void __acpi_nmi_disable(void *__unused) | 278 | static void __acpi_nmi_disable(void *__unused) |
@@ -286,7 +286,7 @@ static void __acpi_nmi_disable(void *__unused) | |||
286 | void acpi_nmi_disable(void) | 286 | void acpi_nmi_disable(void) |
287 | { | 287 | { |
288 | if (atomic_read(&nmi_active) && nmi_watchdog == NMI_IO_APIC) | 288 | if (atomic_read(&nmi_active) && nmi_watchdog == NMI_IO_APIC) |
289 | on_each_cpu(__acpi_nmi_disable, NULL, 0, 1); | 289 | on_each_cpu(__acpi_nmi_disable, NULL, 1); |
290 | } | 290 | } |
291 | 291 | ||
292 | void setup_apic_nmi_watchdog(void *unused) | 292 | void setup_apic_nmi_watchdog(void *unused) |
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c index 4061d63aabe7..7dceea947232 100644 --- a/arch/x86/kernel/process.c +++ b/arch/x86/kernel/process.c | |||
@@ -132,7 +132,7 @@ void cpu_idle_wait(void) | |||
132 | { | 132 | { |
133 | smp_mb(); | 133 | smp_mb(); |
134 | /* kick all the CPUs so that they exit out of pm_idle */ | 134 | /* kick all the CPUs so that they exit out of pm_idle */ |
135 | smp_call_function(do_nothing, NULL, 0, 1); | 135 | smp_call_function(do_nothing, NULL, 1); |
136 | } | 136 | } |
137 | EXPORT_SYMBOL_GPL(cpu_idle_wait); | 137 | EXPORT_SYMBOL_GPL(cpu_idle_wait); |
138 | 138 | ||
diff --git a/arch/x86/kernel/quirks.c b/arch/x86/kernel/quirks.c index 79bdcd11c66e..d13858818100 100644 --- a/arch/x86/kernel/quirks.c +++ b/arch/x86/kernel/quirks.c | |||
@@ -266,6 +266,8 @@ static void old_ich_force_enable_hpet_user(struct pci_dev *dev) | |||
266 | hpet_print_force_info(); | 266 | hpet_print_force_info(); |
267 | } | 267 | } |
268 | 268 | ||
269 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB_1, | ||
270 | old_ich_force_enable_hpet_user); | ||
269 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_0, | 271 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_0, |
270 | old_ich_force_enable_hpet_user); | 272 | old_ich_force_enable_hpet_user); |
271 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_12, | 273 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_12, |
diff --git a/arch/x86/kernel/smp.c b/arch/x86/kernel/smp.c index 0cb7aadc87cd..361b7a4c640c 100644 --- a/arch/x86/kernel/smp.c +++ b/arch/x86/kernel/smp.c | |||
@@ -121,132 +121,23 @@ static void native_smp_send_reschedule(int cpu) | |||
121 | send_IPI_mask(cpumask_of_cpu(cpu), RESCHEDULE_VECTOR); | 121 | send_IPI_mask(cpumask_of_cpu(cpu), RESCHEDULE_VECTOR); |
122 | } | 122 | } |
123 | 123 | ||
124 | /* | 124 | void native_send_call_func_single_ipi(int cpu) |
125 | * Structure and data for smp_call_function(). This is designed to minimise | ||
126 | * static memory requirements. It also looks cleaner. | ||
127 | */ | ||
128 | static DEFINE_SPINLOCK(call_lock); | ||
129 | |||
130 | struct call_data_struct { | ||
131 | void (*func) (void *info); | ||
132 | void *info; | ||
133 | atomic_t started; | ||
134 | atomic_t finished; | ||
135 | int wait; | ||
136 | }; | ||
137 | |||
138 | void lock_ipi_call_lock(void) | ||
139 | { | 125 | { |
140 | spin_lock_irq(&call_lock); | 126 | send_IPI_mask(cpumask_of_cpu(cpu), CALL_FUNCTION_SINGLE_VECTOR); |
141 | } | ||
142 | |||
143 | void unlock_ipi_call_lock(void) | ||
144 | { | ||
145 | spin_unlock_irq(&call_lock); | ||
146 | } | ||
147 | |||
148 | static struct call_data_struct *call_data; | ||
149 | |||
150 | static void __smp_call_function(void (*func) (void *info), void *info, | ||
151 | int nonatomic, int wait) | ||
152 | { | ||
153 | struct call_data_struct data; | ||
154 | int cpus = num_online_cpus() - 1; | ||
155 | |||
156 | if (!cpus) | ||
157 | return; | ||
158 | |||
159 | data.func = func; | ||
160 | data.info = info; | ||
161 | atomic_set(&data.started, 0); | ||
162 | data.wait = wait; | ||
163 | if (wait) | ||
164 | atomic_set(&data.finished, 0); | ||
165 | |||
166 | call_data = &data; | ||
167 | mb(); | ||
168 | |||
169 | /* Send a message to all other CPUs and wait for them to respond */ | ||
170 | send_IPI_allbutself(CALL_FUNCTION_VECTOR); | ||
171 | |||
172 | /* Wait for response */ | ||
173 | while (atomic_read(&data.started) != cpus) | ||
174 | cpu_relax(); | ||
175 | |||
176 | if (wait) | ||
177 | while (atomic_read(&data.finished) != cpus) | ||
178 | cpu_relax(); | ||
179 | } | 127 | } |
180 | 128 | ||
181 | 129 | void native_send_call_func_ipi(cpumask_t mask) | |
182 | /** | ||
183 | * smp_call_function_mask(): Run a function on a set of other CPUs. | ||
184 | * @mask: The set of cpus to run on. Must not include the current cpu. | ||
185 | * @func: The function to run. This must be fast and non-blocking. | ||
186 | * @info: An arbitrary pointer to pass to the function. | ||
187 | * @wait: If true, wait (atomically) until function has completed on other CPUs. | ||
188 | * | ||
189 | * Returns 0 on success, else a negative status code. | ||
190 | * | ||
191 | * If @wait is true, then returns once @func has returned; otherwise | ||
192 | * it returns just before the target cpu calls @func. | ||
193 | * | ||
194 | * You must not call this function with disabled interrupts or from a | ||
195 | * hardware interrupt handler or from a bottom half handler. | ||
196 | */ | ||
197 | static int | ||
198 | native_smp_call_function_mask(cpumask_t mask, | ||
199 | void (*func)(void *), void *info, | ||
200 | int wait) | ||
201 | { | 130 | { |
202 | struct call_data_struct data; | ||
203 | cpumask_t allbutself; | 131 | cpumask_t allbutself; |
204 | int cpus; | ||
205 | |||
206 | /* Can deadlock when called with interrupts disabled */ | ||
207 | WARN_ON(irqs_disabled()); | ||
208 | |||
209 | /* Holding any lock stops cpus from going down. */ | ||
210 | spin_lock(&call_lock); | ||
211 | 132 | ||
212 | allbutself = cpu_online_map; | 133 | allbutself = cpu_online_map; |
213 | cpu_clear(smp_processor_id(), allbutself); | 134 | cpu_clear(smp_processor_id(), allbutself); |
214 | 135 | ||
215 | cpus_and(mask, mask, allbutself); | ||
216 | cpus = cpus_weight(mask); | ||
217 | |||
218 | if (!cpus) { | ||
219 | spin_unlock(&call_lock); | ||
220 | return 0; | ||
221 | } | ||
222 | |||
223 | data.func = func; | ||
224 | data.info = info; | ||
225 | atomic_set(&data.started, 0); | ||
226 | data.wait = wait; | ||
227 | if (wait) | ||
228 | atomic_set(&data.finished, 0); | ||
229 | |||
230 | call_data = &data; | ||
231 | wmb(); | ||
232 | |||
233 | /* Send a message to other CPUs */ | ||
234 | if (cpus_equal(mask, allbutself) && | 136 | if (cpus_equal(mask, allbutself) && |
235 | cpus_equal(cpu_online_map, cpu_callout_map)) | 137 | cpus_equal(cpu_online_map, cpu_callout_map)) |
236 | send_IPI_allbutself(CALL_FUNCTION_VECTOR); | 138 | send_IPI_allbutself(CALL_FUNCTION_VECTOR); |
237 | else | 139 | else |
238 | send_IPI_mask(mask, CALL_FUNCTION_VECTOR); | 140 | send_IPI_mask(mask, CALL_FUNCTION_VECTOR); |
239 | |||
240 | /* Wait for response */ | ||
241 | while (atomic_read(&data.started) != cpus) | ||
242 | cpu_relax(); | ||
243 | |||
244 | if (wait) | ||
245 | while (atomic_read(&data.finished) != cpus) | ||
246 | cpu_relax(); | ||
247 | spin_unlock(&call_lock); | ||
248 | |||
249 | return 0; | ||
250 | } | 141 | } |
251 | 142 | ||
252 | static void stop_this_cpu(void *dummy) | 143 | static void stop_this_cpu(void *dummy) |
@@ -268,18 +159,13 @@ static void stop_this_cpu(void *dummy) | |||
268 | 159 | ||
269 | static void native_smp_send_stop(void) | 160 | static void native_smp_send_stop(void) |
270 | { | 161 | { |
271 | int nolock; | ||
272 | unsigned long flags; | 162 | unsigned long flags; |
273 | 163 | ||
274 | if (reboot_force) | 164 | if (reboot_force) |
275 | return; | 165 | return; |
276 | 166 | ||
277 | /* Don't deadlock on the call lock in panic */ | 167 | smp_call_function(stop_this_cpu, NULL, 0); |
278 | nolock = !spin_trylock(&call_lock); | ||
279 | local_irq_save(flags); | 168 | local_irq_save(flags); |
280 | __smp_call_function(stop_this_cpu, NULL, 0, 0); | ||
281 | if (!nolock) | ||
282 | spin_unlock(&call_lock); | ||
283 | disable_local_APIC(); | 169 | disable_local_APIC(); |
284 | local_irq_restore(flags); | 170 | local_irq_restore(flags); |
285 | } | 171 | } |
@@ -301,33 +187,28 @@ void smp_reschedule_interrupt(struct pt_regs *regs) | |||
301 | 187 | ||
302 | void smp_call_function_interrupt(struct pt_regs *regs) | 188 | void smp_call_function_interrupt(struct pt_regs *regs) |
303 | { | 189 | { |
304 | void (*func) (void *info) = call_data->func; | ||
305 | void *info = call_data->info; | ||
306 | int wait = call_data->wait; | ||
307 | |||
308 | ack_APIC_irq(); | 190 | ack_APIC_irq(); |
309 | /* | ||
310 | * Notify initiating CPU that I've grabbed the data and am | ||
311 | * about to execute the function | ||
312 | */ | ||
313 | mb(); | ||
314 | atomic_inc(&call_data->started); | ||
315 | /* | ||
316 | * At this point the info structure may be out of scope unless wait==1 | ||
317 | */ | ||
318 | irq_enter(); | 191 | irq_enter(); |
319 | (*func)(info); | 192 | generic_smp_call_function_interrupt(); |
320 | #ifdef CONFIG_X86_32 | 193 | #ifdef CONFIG_X86_32 |
321 | __get_cpu_var(irq_stat).irq_call_count++; | 194 | __get_cpu_var(irq_stat).irq_call_count++; |
322 | #else | 195 | #else |
323 | add_pda(irq_call_count, 1); | 196 | add_pda(irq_call_count, 1); |
324 | #endif | 197 | #endif |
325 | irq_exit(); | 198 | irq_exit(); |
199 | } | ||
326 | 200 | ||
327 | if (wait) { | 201 | void smp_call_function_single_interrupt(struct pt_regs *regs) |
328 | mb(); | 202 | { |
329 | atomic_inc(&call_data->finished); | 203 | ack_APIC_irq(); |
330 | } | 204 | irq_enter(); |
205 | generic_smp_call_function_single_interrupt(); | ||
206 | #ifdef CONFIG_X86_32 | ||
207 | __get_cpu_var(irq_stat).irq_call_count++; | ||
208 | #else | ||
209 | add_pda(irq_call_count, 1); | ||
210 | #endif | ||
211 | irq_exit(); | ||
331 | } | 212 | } |
332 | 213 | ||
333 | struct smp_ops smp_ops = { | 214 | struct smp_ops smp_ops = { |
@@ -338,7 +219,8 @@ struct smp_ops smp_ops = { | |||
338 | 219 | ||
339 | .smp_send_stop = native_smp_send_stop, | 220 | .smp_send_stop = native_smp_send_stop, |
340 | .smp_send_reschedule = native_smp_send_reschedule, | 221 | .smp_send_reschedule = native_smp_send_reschedule, |
341 | .smp_call_function_mask = native_smp_call_function_mask, | 222 | |
223 | .send_call_func_ipi = native_send_call_func_ipi, | ||
224 | .send_call_func_single_ipi = native_send_call_func_single_ipi, | ||
342 | }; | 225 | }; |
343 | EXPORT_SYMBOL_GPL(smp_ops); | 226 | EXPORT_SYMBOL_GPL(smp_ops); |
344 | |||
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index f35c2d8016ac..687376ab07e8 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c | |||
@@ -327,12 +327,12 @@ static void __cpuinit start_secondary(void *unused) | |||
327 | * lock helps us to not include this cpu in a currently in progress | 327 | * lock helps us to not include this cpu in a currently in progress |
328 | * smp_call_function(). | 328 | * smp_call_function(). |
329 | */ | 329 | */ |
330 | lock_ipi_call_lock(); | 330 | ipi_call_lock_irq(); |
331 | #ifdef CONFIG_X86_IO_APIC | 331 | #ifdef CONFIG_X86_IO_APIC |
332 | setup_vector_irq(smp_processor_id()); | 332 | setup_vector_irq(smp_processor_id()); |
333 | #endif | 333 | #endif |
334 | cpu_set(smp_processor_id(), cpu_online_map); | 334 | cpu_set(smp_processor_id(), cpu_online_map); |
335 | unlock_ipi_call_lock(); | 335 | ipi_call_unlock_irq(); |
336 | per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE; | 336 | per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE; |
337 | 337 | ||
338 | setup_secondary_clock(); | 338 | setup_secondary_clock(); |
diff --git a/arch/x86/kernel/smpcommon.c b/arch/x86/kernel/smpcommon.c index 3449064d141a..99941b37eca0 100644 --- a/arch/x86/kernel/smpcommon.c +++ b/arch/x86/kernel/smpcommon.c | |||
@@ -25,59 +25,3 @@ __cpuinit void init_gdt(int cpu) | |||
25 | per_cpu(cpu_number, cpu) = cpu; | 25 | per_cpu(cpu_number, cpu) = cpu; |
26 | } | 26 | } |
27 | #endif | 27 | #endif |
28 | |||
29 | /** | ||
30 | * smp_call_function(): Run a function on all other CPUs. | ||
31 | * @func: The function to run. This must be fast and non-blocking. | ||
32 | * @info: An arbitrary pointer to pass to the function. | ||
33 | * @nonatomic: Unused. | ||
34 | * @wait: If true, wait (atomically) until function has completed on other CPUs. | ||
35 | * | ||
36 | * Returns 0 on success, else a negative status code. | ||
37 | * | ||
38 | * If @wait is true, then returns once @func has returned; otherwise | ||
39 | * it returns just before the target cpu calls @func. | ||
40 | * | ||
41 | * You must not call this function with disabled interrupts or from a | ||
42 | * hardware interrupt handler or from a bottom half handler. | ||
43 | */ | ||
44 | int smp_call_function(void (*func) (void *info), void *info, int nonatomic, | ||
45 | int wait) | ||
46 | { | ||
47 | return smp_call_function_mask(cpu_online_map, func, info, wait); | ||
48 | } | ||
49 | EXPORT_SYMBOL(smp_call_function); | ||
50 | |||
51 | /** | ||
52 | * smp_call_function_single - Run a function on a specific CPU | ||
53 | * @cpu: The target CPU. Cannot be the calling CPU. | ||
54 | * @func: The function to run. This must be fast and non-blocking. | ||
55 | * @info: An arbitrary pointer to pass to the function. | ||
56 | * @nonatomic: Unused. | ||
57 | * @wait: If true, wait until function has completed on other CPUs. | ||
58 | * | ||
59 | * Returns 0 on success, else a negative status code. | ||
60 | * | ||
61 | * If @wait is true, then returns once @func has returned; otherwise | ||
62 | * it returns just before the target cpu calls @func. | ||
63 | */ | ||
64 | int smp_call_function_single(int cpu, void (*func) (void *info), void *info, | ||
65 | int nonatomic, int wait) | ||
66 | { | ||
67 | /* prevent preemption and reschedule on another processor */ | ||
68 | int ret; | ||
69 | int me = get_cpu(); | ||
70 | if (cpu == me) { | ||
71 | local_irq_disable(); | ||
72 | func(info); | ||
73 | local_irq_enable(); | ||
74 | put_cpu(); | ||
75 | return 0; | ||
76 | } | ||
77 | |||
78 | ret = smp_call_function_mask(cpumask_of_cpu(cpu), func, info, wait); | ||
79 | |||
80 | put_cpu(); | ||
81 | return ret; | ||
82 | } | ||
83 | EXPORT_SYMBOL(smp_call_function_single); | ||
diff --git a/arch/x86/kernel/stacktrace.c b/arch/x86/kernel/stacktrace.c index c28c342c162f..a03e7f6d90c3 100644 --- a/arch/x86/kernel/stacktrace.c +++ b/arch/x86/kernel/stacktrace.c | |||
@@ -74,6 +74,7 @@ void save_stack_trace(struct stack_trace *trace) | |||
74 | if (trace->nr_entries < trace->max_entries) | 74 | if (trace->nr_entries < trace->max_entries) |
75 | trace->entries[trace->nr_entries++] = ULONG_MAX; | 75 | trace->entries[trace->nr_entries++] = ULONG_MAX; |
76 | } | 76 | } |
77 | EXPORT_SYMBOL_GPL(save_stack_trace); | ||
77 | 78 | ||
78 | void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace) | 79 | void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace) |
79 | { | 80 | { |
@@ -81,3 +82,4 @@ void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace) | |||
81 | if (trace->nr_entries < trace->max_entries) | 82 | if (trace->nr_entries < trace->max_entries) |
82 | trace->entries[trace->nr_entries++] = ULONG_MAX; | 83 | trace->entries[trace->nr_entries++] = ULONG_MAX; |
83 | } | 84 | } |
85 | EXPORT_SYMBOL_GPL(save_stack_trace_tsk); | ||
diff --git a/arch/x86/kernel/tlb_32.c b/arch/x86/kernel/tlb_32.c index 9bb2363851af..fec1ecedc9b7 100644 --- a/arch/x86/kernel/tlb_32.c +++ b/arch/x86/kernel/tlb_32.c | |||
@@ -238,6 +238,6 @@ static void do_flush_tlb_all(void *info) | |||
238 | 238 | ||
239 | void flush_tlb_all(void) | 239 | void flush_tlb_all(void) |
240 | { | 240 | { |
241 | on_each_cpu(do_flush_tlb_all, NULL, 1, 1); | 241 | on_each_cpu(do_flush_tlb_all, NULL, 1); |
242 | } | 242 | } |
243 | 243 | ||
diff --git a/arch/x86/kernel/tlb_64.c b/arch/x86/kernel/tlb_64.c index 5039d0f097a2..dcbf7a1159ea 100644 --- a/arch/x86/kernel/tlb_64.c +++ b/arch/x86/kernel/tlb_64.c | |||
@@ -275,5 +275,5 @@ static void do_flush_tlb_all(void *info) | |||
275 | 275 | ||
276 | void flush_tlb_all(void) | 276 | void flush_tlb_all(void) |
277 | { | 277 | { |
278 | on_each_cpu(do_flush_tlb_all, NULL, 1, 1); | 278 | on_each_cpu(do_flush_tlb_all, NULL, 1); |
279 | } | 279 | } |
diff --git a/arch/x86/kernel/vsyscall_64.c b/arch/x86/kernel/vsyscall_64.c index e50740d32314..0b8b6690a86d 100644 --- a/arch/x86/kernel/vsyscall_64.c +++ b/arch/x86/kernel/vsyscall_64.c | |||
@@ -279,7 +279,7 @@ cpu_vsyscall_notifier(struct notifier_block *n, unsigned long action, void *arg) | |||
279 | { | 279 | { |
280 | long cpu = (long)arg; | 280 | long cpu = (long)arg; |
281 | if (action == CPU_ONLINE || action == CPU_ONLINE_FROZEN) | 281 | if (action == CPU_ONLINE || action == CPU_ONLINE_FROZEN) |
282 | smp_call_function_single(cpu, cpu_vsyscall_init, NULL, 0, 1); | 282 | smp_call_function_single(cpu, cpu_vsyscall_init, NULL, 1); |
283 | return NOTIFY_DONE; | 283 | return NOTIFY_DONE; |
284 | } | 284 | } |
285 | 285 | ||
@@ -302,7 +302,7 @@ static int __init vsyscall_init(void) | |||
302 | #ifdef CONFIG_SYSCTL | 302 | #ifdef CONFIG_SYSCTL |
303 | register_sysctl_table(kernel_root_table2); | 303 | register_sysctl_table(kernel_root_table2); |
304 | #endif | 304 | #endif |
305 | on_each_cpu(cpu_vsyscall_init, NULL, 0, 1); | 305 | on_each_cpu(cpu_vsyscall_init, NULL, 1); |
306 | hotcpu_notifier(cpu_vsyscall_notifier, 0); | 306 | hotcpu_notifier(cpu_vsyscall_notifier, 0); |
307 | return 0; | 307 | return 0; |
308 | } | 308 | } |
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 540e95179074..10ce6ee4c491 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c | |||
@@ -335,7 +335,7 @@ static void vcpu_clear(struct vcpu_vmx *vmx) | |||
335 | { | 335 | { |
336 | if (vmx->vcpu.cpu == -1) | 336 | if (vmx->vcpu.cpu == -1) |
337 | return; | 337 | return; |
338 | smp_call_function_single(vmx->vcpu.cpu, __vcpu_clear, vmx, 0, 1); | 338 | smp_call_function_single(vmx->vcpu.cpu, __vcpu_clear, vmx, 1); |
339 | vmx->launched = 0; | 339 | vmx->launched = 0; |
340 | } | 340 | } |
341 | 341 | ||
@@ -2968,7 +2968,7 @@ static void vmx_free_vmcs(struct kvm_vcpu *vcpu) | |||
2968 | struct vcpu_vmx *vmx = to_vmx(vcpu); | 2968 | struct vcpu_vmx *vmx = to_vmx(vcpu); |
2969 | 2969 | ||
2970 | if (vmx->vmcs) { | 2970 | if (vmx->vmcs) { |
2971 | on_each_cpu(__vcpu_clear, vmx, 0, 1); | 2971 | on_each_cpu(__vcpu_clear, vmx, 1); |
2972 | free_vmcs(vmx->vmcs); | 2972 | free_vmcs(vmx->vmcs); |
2973 | vmx->vmcs = NULL; | 2973 | vmx->vmcs = NULL; |
2974 | } | 2974 | } |
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 63a77caa59f1..0faa2546b1cd 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c | |||
@@ -4044,6 +4044,6 @@ void kvm_vcpu_kick(struct kvm_vcpu *vcpu) | |||
4044 | * So need not to call smp_call_function_single() in that case. | 4044 | * So need not to call smp_call_function_single() in that case. |
4045 | */ | 4045 | */ |
4046 | if (vcpu->guest_mode && vcpu->cpu != cpu) | 4046 | if (vcpu->guest_mode && vcpu->cpu != cpu) |
4047 | smp_call_function_single(ipi_pcpu, vcpu_kick_intr, vcpu, 0, 0); | 4047 | smp_call_function_single(ipi_pcpu, vcpu_kick_intr, vcpu, 0); |
4048 | put_cpu(); | 4048 | put_cpu(); |
4049 | } | 4049 | } |
diff --git a/arch/x86/lib/msr-on-cpu.c b/arch/x86/lib/msr-on-cpu.c index 57d043fa893e..d5a2b39f882b 100644 --- a/arch/x86/lib/msr-on-cpu.c +++ b/arch/x86/lib/msr-on-cpu.c | |||
@@ -30,10 +30,10 @@ static int _rdmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 *l, u32 *h, int safe) | |||
30 | 30 | ||
31 | rv.msr_no = msr_no; | 31 | rv.msr_no = msr_no; |
32 | if (safe) { | 32 | if (safe) { |
33 | smp_call_function_single(cpu, __rdmsr_safe_on_cpu, &rv, 0, 1); | 33 | smp_call_function_single(cpu, __rdmsr_safe_on_cpu, &rv, 1); |
34 | err = rv.err; | 34 | err = rv.err; |
35 | } else { | 35 | } else { |
36 | smp_call_function_single(cpu, __rdmsr_on_cpu, &rv, 0, 1); | 36 | smp_call_function_single(cpu, __rdmsr_on_cpu, &rv, 1); |
37 | } | 37 | } |
38 | *l = rv.l; | 38 | *l = rv.l; |
39 | *h = rv.h; | 39 | *h = rv.h; |
@@ -64,10 +64,10 @@ static int _wrmsr_on_cpu(unsigned int cpu, u32 msr_no, u32 l, u32 h, int safe) | |||
64 | rv.l = l; | 64 | rv.l = l; |
65 | rv.h = h; | 65 | rv.h = h; |
66 | if (safe) { | 66 | if (safe) { |
67 | smp_call_function_single(cpu, __wrmsr_safe_on_cpu, &rv, 0, 1); | 67 | smp_call_function_single(cpu, __wrmsr_safe_on_cpu, &rv, 1); |
68 | err = rv.err; | 68 | err = rv.err; |
69 | } else { | 69 | } else { |
70 | smp_call_function_single(cpu, __wrmsr_on_cpu, &rv, 0, 1); | 70 | smp_call_function_single(cpu, __wrmsr_on_cpu, &rv, 1); |
71 | } | 71 | } |
72 | 72 | ||
73 | return err; | 73 | return err; |
diff --git a/arch/x86/mach-voyager/voyager_smp.c b/arch/x86/mach-voyager/voyager_smp.c index 8dedd01e909f..ee0fba092157 100644 --- a/arch/x86/mach-voyager/voyager_smp.c +++ b/arch/x86/mach-voyager/voyager_smp.c | |||
@@ -950,94 +950,24 @@ static void smp_stop_cpu_function(void *dummy) | |||
950 | halt(); | 950 | halt(); |
951 | } | 951 | } |
952 | 952 | ||
953 | static DEFINE_SPINLOCK(call_lock); | ||
954 | |||
955 | struct call_data_struct { | ||
956 | void (*func) (void *info); | ||
957 | void *info; | ||
958 | volatile unsigned long started; | ||
959 | volatile unsigned long finished; | ||
960 | int wait; | ||
961 | }; | ||
962 | |||
963 | static struct call_data_struct *call_data; | ||
964 | |||
965 | /* execute a thread on a new CPU. The function to be called must be | 953 | /* execute a thread on a new CPU. The function to be called must be |
966 | * previously set up. This is used to schedule a function for | 954 | * previously set up. This is used to schedule a function for |
967 | * execution on all CPUs - set up the function then broadcast a | 955 | * execution on all CPUs - set up the function then broadcast a |
968 | * function_interrupt CPI to come here on each CPU */ | 956 | * function_interrupt CPI to come here on each CPU */ |
969 | static void smp_call_function_interrupt(void) | 957 | static void smp_call_function_interrupt(void) |
970 | { | 958 | { |
971 | void (*func) (void *info) = call_data->func; | ||
972 | void *info = call_data->info; | ||
973 | /* must take copy of wait because call_data may be replaced | ||
974 | * unless the function is waiting for us to finish */ | ||
975 | int wait = call_data->wait; | ||
976 | __u8 cpu = smp_processor_id(); | ||
977 | |||
978 | /* | ||
979 | * Notify initiating CPU that I've grabbed the data and am | ||
980 | * about to execute the function | ||
981 | */ | ||
982 | mb(); | ||
983 | if (!test_and_clear_bit(cpu, &call_data->started)) { | ||
984 | /* If the bit wasn't set, this could be a replay */ | ||
985 | printk(KERN_WARNING "VOYAGER SMP: CPU %d received call funtion" | ||
986 | " with no call pending\n", cpu); | ||
987 | return; | ||
988 | } | ||
989 | /* | ||
990 | * At this point the info structure may be out of scope unless wait==1 | ||
991 | */ | ||
992 | irq_enter(); | 959 | irq_enter(); |
993 | (*func) (info); | 960 | generic_smp_call_function_interrupt(); |
994 | __get_cpu_var(irq_stat).irq_call_count++; | 961 | __get_cpu_var(irq_stat).irq_call_count++; |
995 | irq_exit(); | 962 | irq_exit(); |
996 | if (wait) { | ||
997 | mb(); | ||
998 | clear_bit(cpu, &call_data->finished); | ||
999 | } | ||
1000 | } | 963 | } |
1001 | 964 | ||
1002 | static int | 965 | static void smp_call_function_single_interrupt(void) |
1003 | voyager_smp_call_function_mask(cpumask_t cpumask, | ||
1004 | void (*func) (void *info), void *info, int wait) | ||
1005 | { | 966 | { |
1006 | struct call_data_struct data; | 967 | irq_enter(); |
1007 | u32 mask = cpus_addr(cpumask)[0]; | 968 | generic_smp_call_function_single_interrupt(); |
1008 | 969 | __get_cpu_var(irq_stat).irq_call_count++; | |
1009 | mask &= ~(1 << smp_processor_id()); | 970 | irq_exit(); |
1010 | |||
1011 | if (!mask) | ||
1012 | return 0; | ||
1013 | |||
1014 | /* Can deadlock when called with interrupts disabled */ | ||
1015 | WARN_ON(irqs_disabled()); | ||
1016 | |||
1017 | data.func = func; | ||
1018 | data.info = info; | ||
1019 | data.started = mask; | ||
1020 | data.wait = wait; | ||
1021 | if (wait) | ||
1022 | data.finished = mask; | ||
1023 | |||
1024 | spin_lock(&call_lock); | ||
1025 | call_data = &data; | ||
1026 | wmb(); | ||
1027 | /* Send a message to all other CPUs and wait for them to respond */ | ||
1028 | send_CPI(mask, VIC_CALL_FUNCTION_CPI); | ||
1029 | |||
1030 | /* Wait for response */ | ||
1031 | while (data.started) | ||
1032 | barrier(); | ||
1033 | |||
1034 | if (wait) | ||
1035 | while (data.finished) | ||
1036 | barrier(); | ||
1037 | |||
1038 | spin_unlock(&call_lock); | ||
1039 | |||
1040 | return 0; | ||
1041 | } | 971 | } |
1042 | 972 | ||
1043 | /* Sorry about the name. In an APIC based system, the APICs | 973 | /* Sorry about the name. In an APIC based system, the APICs |
@@ -1094,6 +1024,12 @@ void smp_qic_call_function_interrupt(struct pt_regs *regs) | |||
1094 | smp_call_function_interrupt(); | 1024 | smp_call_function_interrupt(); |
1095 | } | 1025 | } |
1096 | 1026 | ||
1027 | void smp_qic_call_function_single_interrupt(struct pt_regs *regs) | ||
1028 | { | ||
1029 | ack_QIC_CPI(QIC_CALL_FUNCTION_SINGLE_CPI); | ||
1030 | smp_call_function_single_interrupt(); | ||
1031 | } | ||
1032 | |||
1097 | void smp_vic_cpi_interrupt(struct pt_regs *regs) | 1033 | void smp_vic_cpi_interrupt(struct pt_regs *regs) |
1098 | { | 1034 | { |
1099 | struct pt_regs *old_regs = set_irq_regs(regs); | 1035 | struct pt_regs *old_regs = set_irq_regs(regs); |
@@ -1114,6 +1050,8 @@ void smp_vic_cpi_interrupt(struct pt_regs *regs) | |||
1114 | smp_enable_irq_interrupt(); | 1050 | smp_enable_irq_interrupt(); |
1115 | if (test_and_clear_bit(VIC_CALL_FUNCTION_CPI, &vic_cpi_mailbox[cpu])) | 1051 | if (test_and_clear_bit(VIC_CALL_FUNCTION_CPI, &vic_cpi_mailbox[cpu])) |
1116 | smp_call_function_interrupt(); | 1052 | smp_call_function_interrupt(); |
1053 | if (test_and_clear_bit(VIC_CALL_FUNCTION_SINGLE_CPI, &vic_cpi_mailbox[cpu])) | ||
1054 | smp_call_function_single_interrupt(); | ||
1117 | set_irq_regs(old_regs); | 1055 | set_irq_regs(old_regs); |
1118 | } | 1056 | } |
1119 | 1057 | ||
@@ -1129,7 +1067,7 @@ static void do_flush_tlb_all(void *info) | |||
1129 | /* flush the TLB of every active CPU in the system */ | 1067 | /* flush the TLB of every active CPU in the system */ |
1130 | void flush_tlb_all(void) | 1068 | void flush_tlb_all(void) |
1131 | { | 1069 | { |
1132 | on_each_cpu(do_flush_tlb_all, 0, 1, 1); | 1070 | on_each_cpu(do_flush_tlb_all, 0, 1); |
1133 | } | 1071 | } |
1134 | 1072 | ||
1135 | /* send a reschedule CPI to one CPU by physical CPU number*/ | 1073 | /* send a reschedule CPI to one CPU by physical CPU number*/ |
@@ -1161,7 +1099,7 @@ int safe_smp_processor_id(void) | |||
1161 | /* broadcast a halt to all other CPUs */ | 1099 | /* broadcast a halt to all other CPUs */ |
1162 | static void voyager_smp_send_stop(void) | 1100 | static void voyager_smp_send_stop(void) |
1163 | { | 1101 | { |
1164 | smp_call_function(smp_stop_cpu_function, NULL, 1, 1); | 1102 | smp_call_function(smp_stop_cpu_function, NULL, 1); |
1165 | } | 1103 | } |
1166 | 1104 | ||
1167 | /* this function is triggered in time.c when a clock tick fires | 1105 | /* this function is triggered in time.c when a clock tick fires |
@@ -1848,5 +1786,7 @@ struct smp_ops smp_ops = { | |||
1848 | 1786 | ||
1849 | .smp_send_stop = voyager_smp_send_stop, | 1787 | .smp_send_stop = voyager_smp_send_stop, |
1850 | .smp_send_reschedule = voyager_smp_send_reschedule, | 1788 | .smp_send_reschedule = voyager_smp_send_reschedule, |
1851 | .smp_call_function_mask = voyager_smp_call_function_mask, | 1789 | |
1790 | .send_call_func_ipi = native_send_call_func_ipi, | ||
1791 | .send_call_func_single_ipi = native_send_call_func_single_ipi, | ||
1852 | }; | 1792 | }; |
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c index 47f4e2e4a096..65c6e46bf059 100644 --- a/arch/x86/mm/pageattr.c +++ b/arch/x86/mm/pageattr.c | |||
@@ -141,7 +141,7 @@ static void cpa_flush_all(unsigned long cache) | |||
141 | { | 141 | { |
142 | BUG_ON(irqs_disabled()); | 142 | BUG_ON(irqs_disabled()); |
143 | 143 | ||
144 | on_each_cpu(__cpa_flush_all, (void *) cache, 1, 1); | 144 | on_each_cpu(__cpa_flush_all, (void *) cache, 1); |
145 | } | 145 | } |
146 | 146 | ||
147 | static void __cpa_flush_range(void *arg) | 147 | static void __cpa_flush_range(void *arg) |
@@ -162,7 +162,7 @@ static void cpa_flush_range(unsigned long start, int numpages, int cache) | |||
162 | BUG_ON(irqs_disabled()); | 162 | BUG_ON(irqs_disabled()); |
163 | WARN_ON(PAGE_ALIGN(start) != start); | 163 | WARN_ON(PAGE_ALIGN(start) != start); |
164 | 164 | ||
165 | on_each_cpu(__cpa_flush_range, NULL, 1, 1); | 165 | on_each_cpu(__cpa_flush_range, NULL, 1); |
166 | 166 | ||
167 | if (!cache) | 167 | if (!cache) |
168 | return; | 168 | return; |
diff --git a/arch/x86/oprofile/nmi_int.c b/arch/x86/oprofile/nmi_int.c index 2b6ad5b9f9d5..7f3329b55d2e 100644 --- a/arch/x86/oprofile/nmi_int.c +++ b/arch/x86/oprofile/nmi_int.c | |||
@@ -218,8 +218,8 @@ static int nmi_setup(void) | |||
218 | } | 218 | } |
219 | 219 | ||
220 | } | 220 | } |
221 | on_each_cpu(nmi_save_registers, NULL, 0, 1); | 221 | on_each_cpu(nmi_save_registers, NULL, 1); |
222 | on_each_cpu(nmi_cpu_setup, NULL, 0, 1); | 222 | on_each_cpu(nmi_cpu_setup, NULL, 1); |
223 | nmi_enabled = 1; | 223 | nmi_enabled = 1; |
224 | return 0; | 224 | return 0; |
225 | } | 225 | } |
@@ -271,7 +271,7 @@ static void nmi_shutdown(void) | |||
271 | { | 271 | { |
272 | struct op_msrs *msrs = &get_cpu_var(cpu_msrs); | 272 | struct op_msrs *msrs = &get_cpu_var(cpu_msrs); |
273 | nmi_enabled = 0; | 273 | nmi_enabled = 0; |
274 | on_each_cpu(nmi_cpu_shutdown, NULL, 0, 1); | 274 | on_each_cpu(nmi_cpu_shutdown, NULL, 1); |
275 | unregister_die_notifier(&profile_exceptions_nb); | 275 | unregister_die_notifier(&profile_exceptions_nb); |
276 | model->shutdown(msrs); | 276 | model->shutdown(msrs); |
277 | free_msrs(); | 277 | free_msrs(); |
@@ -286,7 +286,7 @@ static void nmi_cpu_start(void *dummy) | |||
286 | 286 | ||
287 | static int nmi_start(void) | 287 | static int nmi_start(void) |
288 | { | 288 | { |
289 | on_each_cpu(nmi_cpu_start, NULL, 0, 1); | 289 | on_each_cpu(nmi_cpu_start, NULL, 1); |
290 | return 0; | 290 | return 0; |
291 | } | 291 | } |
292 | 292 | ||
@@ -298,7 +298,7 @@ static void nmi_cpu_stop(void *dummy) | |||
298 | 298 | ||
299 | static void nmi_stop(void) | 299 | static void nmi_stop(void) |
300 | { | 300 | { |
301 | on_each_cpu(nmi_cpu_stop, NULL, 0, 1); | 301 | on_each_cpu(nmi_cpu_stop, NULL, 1); |
302 | } | 302 | } |
303 | 303 | ||
304 | struct op_counter_config counter_config[OP_MAX_COUNTER]; | 304 | struct op_counter_config counter_config[OP_MAX_COUNTER]; |
diff --git a/arch/x86/pci/amd_bus.c b/arch/x86/pci/amd_bus.c index a18141ae3f02..dbf532369711 100644 --- a/arch/x86/pci/amd_bus.c +++ b/arch/x86/pci/amd_bus.c | |||
@@ -578,7 +578,7 @@ static int __init enable_pci_io_ecs(void) | |||
578 | /* assume all cpus from fam10h have IO ECS */ | 578 | /* assume all cpus from fam10h have IO ECS */ |
579 | if (boot_cpu_data.x86 < 0x10) | 579 | if (boot_cpu_data.x86 < 0x10) |
580 | return 0; | 580 | return 0; |
581 | on_each_cpu(enable_pci_io_ecs_per_cpu, NULL, 1, 1); | 581 | on_each_cpu(enable_pci_io_ecs_per_cpu, NULL, 1); |
582 | pci_probe |= PCI_HAS_IO_ECS; | 582 | pci_probe |= PCI_HAS_IO_ECS; |
583 | return 0; | 583 | return 0; |
584 | } | 584 | } |
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c index dcd4e51f2f16..bb508456ef52 100644 --- a/arch/x86/xen/enlighten.c +++ b/arch/x86/xen/enlighten.c | |||
@@ -1214,7 +1214,9 @@ static const struct smp_ops xen_smp_ops __initdata = { | |||
1214 | 1214 | ||
1215 | .smp_send_stop = xen_smp_send_stop, | 1215 | .smp_send_stop = xen_smp_send_stop, |
1216 | .smp_send_reschedule = xen_smp_send_reschedule, | 1216 | .smp_send_reschedule = xen_smp_send_reschedule, |
1217 | .smp_call_function_mask = xen_smp_call_function_mask, | 1217 | |
1218 | .send_call_func_ipi = xen_smp_send_call_function_ipi, | ||
1219 | .send_call_func_single_ipi = xen_smp_send_call_function_single_ipi, | ||
1218 | }; | 1220 | }; |
1219 | #endif /* CONFIG_SMP */ | 1221 | #endif /* CONFIG_SMP */ |
1220 | 1222 | ||
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c index 42b3b9ed641d..ff0aa74afaa1 100644 --- a/arch/x86/xen/mmu.c +++ b/arch/x86/xen/mmu.c | |||
@@ -796,7 +796,7 @@ static void drop_mm_ref(struct mm_struct *mm) | |||
796 | } | 796 | } |
797 | 797 | ||
798 | if (!cpus_empty(mask)) | 798 | if (!cpus_empty(mask)) |
799 | xen_smp_call_function_mask(mask, drop_other_mm_ref, mm, 1); | 799 | smp_call_function_mask(mask, drop_other_mm_ref, mm, 1); |
800 | } | 800 | } |
801 | #else | 801 | #else |
802 | static void drop_mm_ref(struct mm_struct *mm) | 802 | static void drop_mm_ref(struct mm_struct *mm) |
diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c index d2e3c20127d7..233156f39b7f 100644 --- a/arch/x86/xen/smp.c +++ b/arch/x86/xen/smp.c | |||
@@ -36,27 +36,14 @@ | |||
36 | #include "mmu.h" | 36 | #include "mmu.h" |
37 | 37 | ||
38 | cpumask_t xen_cpu_initialized_map; | 38 | cpumask_t xen_cpu_initialized_map; |
39 | static DEFINE_PER_CPU(int, resched_irq) = -1; | ||
40 | static DEFINE_PER_CPU(int, callfunc_irq) = -1; | ||
41 | static DEFINE_PER_CPU(int, debug_irq) = -1; | ||
42 | |||
43 | /* | ||
44 | * Structure and data for smp_call_function(). This is designed to minimise | ||
45 | * static memory requirements. It also looks cleaner. | ||
46 | */ | ||
47 | static DEFINE_SPINLOCK(call_lock); | ||
48 | 39 | ||
49 | struct call_data_struct { | 40 | static DEFINE_PER_CPU(int, resched_irq); |
50 | void (*func) (void *info); | 41 | static DEFINE_PER_CPU(int, callfunc_irq); |
51 | void *info; | 42 | static DEFINE_PER_CPU(int, callfuncsingle_irq); |
52 | atomic_t started; | 43 | static DEFINE_PER_CPU(int, debug_irq) = -1; |
53 | atomic_t finished; | ||
54 | int wait; | ||
55 | }; | ||
56 | 44 | ||
57 | static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id); | 45 | static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id); |
58 | 46 | static irqreturn_t xen_call_function_single_interrupt(int irq, void *dev_id); | |
59 | static struct call_data_struct *call_data; | ||
60 | 47 | ||
61 | /* | 48 | /* |
62 | * Reschedule call back. Nothing to do, | 49 | * Reschedule call back. Nothing to do, |
@@ -128,6 +115,17 @@ static int xen_smp_intr_init(unsigned int cpu) | |||
128 | goto fail; | 115 | goto fail; |
129 | per_cpu(debug_irq, cpu) = rc; | 116 | per_cpu(debug_irq, cpu) = rc; |
130 | 117 | ||
118 | callfunc_name = kasprintf(GFP_KERNEL, "callfuncsingle%d", cpu); | ||
119 | rc = bind_ipi_to_irqhandler(XEN_CALL_FUNCTION_SINGLE_VECTOR, | ||
120 | cpu, | ||
121 | xen_call_function_single_interrupt, | ||
122 | IRQF_DISABLED|IRQF_PERCPU|IRQF_NOBALANCING, | ||
123 | callfunc_name, | ||
124 | NULL); | ||
125 | if (rc < 0) | ||
126 | goto fail; | ||
127 | per_cpu(callfuncsingle_irq, cpu) = rc; | ||
128 | |||
131 | return 0; | 129 | return 0; |
132 | 130 | ||
133 | fail: | 131 | fail: |
@@ -137,6 +135,9 @@ static int xen_smp_intr_init(unsigned int cpu) | |||
137 | unbind_from_irqhandler(per_cpu(callfunc_irq, cpu), NULL); | 135 | unbind_from_irqhandler(per_cpu(callfunc_irq, cpu), NULL); |
138 | if (per_cpu(debug_irq, cpu) >= 0) | 136 | if (per_cpu(debug_irq, cpu) >= 0) |
139 | unbind_from_irqhandler(per_cpu(debug_irq, cpu), NULL); | 137 | unbind_from_irqhandler(per_cpu(debug_irq, cpu), NULL); |
138 | if (per_cpu(callfuncsingle_irq, cpu) >= 0) | ||
139 | unbind_from_irqhandler(per_cpu(callfuncsingle_irq, cpu), NULL); | ||
140 | |||
140 | return rc; | 141 | return rc; |
141 | } | 142 | } |
142 | 143 | ||
@@ -336,7 +337,7 @@ static void stop_self(void *v) | |||
336 | 337 | ||
337 | void xen_smp_send_stop(void) | 338 | void xen_smp_send_stop(void) |
338 | { | 339 | { |
339 | smp_call_function(stop_self, NULL, 0, 0); | 340 | smp_call_function(stop_self, NULL, 0); |
340 | } | 341 | } |
341 | 342 | ||
342 | void xen_smp_send_reschedule(int cpu) | 343 | void xen_smp_send_reschedule(int cpu) |
@@ -344,7 +345,6 @@ void xen_smp_send_reschedule(int cpu) | |||
344 | xen_send_IPI_one(cpu, XEN_RESCHEDULE_VECTOR); | 345 | xen_send_IPI_one(cpu, XEN_RESCHEDULE_VECTOR); |
345 | } | 346 | } |
346 | 347 | ||
347 | |||
348 | static void xen_send_IPI_mask(cpumask_t mask, enum ipi_vector vector) | 348 | static void xen_send_IPI_mask(cpumask_t mask, enum ipi_vector vector) |
349 | { | 349 | { |
350 | unsigned cpu; | 350 | unsigned cpu; |
@@ -355,83 +355,42 @@ static void xen_send_IPI_mask(cpumask_t mask, enum ipi_vector vector) | |||
355 | xen_send_IPI_one(cpu, vector); | 355 | xen_send_IPI_one(cpu, vector); |
356 | } | 356 | } |
357 | 357 | ||
358 | void xen_smp_send_call_function_ipi(cpumask_t mask) | ||
359 | { | ||
360 | int cpu; | ||
361 | |||
362 | xen_send_IPI_mask(mask, XEN_CALL_FUNCTION_VECTOR); | ||
363 | |||
364 | /* Make sure other vcpus get a chance to run if they need to. */ | ||
365 | for_each_cpu_mask(cpu, mask) { | ||
366 | if (xen_vcpu_stolen(cpu)) { | ||
367 | HYPERVISOR_sched_op(SCHEDOP_yield, 0); | ||
368 | break; | ||
369 | } | ||
370 | } | ||
371 | } | ||
372 | |||
373 | void xen_smp_send_call_function_single_ipi(int cpu) | ||
374 | { | ||
375 | xen_send_IPI_mask(cpumask_of_cpu(cpu), XEN_CALL_FUNCTION_SINGLE_VECTOR); | ||
376 | } | ||
377 | |||
358 | static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id) | 378 | static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id) |
359 | { | 379 | { |
360 | void (*func) (void *info) = call_data->func; | ||
361 | void *info = call_data->info; | ||
362 | int wait = call_data->wait; | ||
363 | |||
364 | /* | ||
365 | * Notify initiating CPU that I've grabbed the data and am | ||
366 | * about to execute the function | ||
367 | */ | ||
368 | mb(); | ||
369 | atomic_inc(&call_data->started); | ||
370 | /* | ||
371 | * At this point the info structure may be out of scope unless wait==1 | ||
372 | */ | ||
373 | irq_enter(); | 380 | irq_enter(); |
374 | (*func)(info); | 381 | generic_smp_call_function_interrupt(); |
375 | __get_cpu_var(irq_stat).irq_call_count++; | 382 | __get_cpu_var(irq_stat).irq_call_count++; |
376 | irq_exit(); | 383 | irq_exit(); |
377 | 384 | ||
378 | if (wait) { | ||
379 | mb(); /* commit everything before setting finished */ | ||
380 | atomic_inc(&call_data->finished); | ||
381 | } | ||
382 | |||
383 | return IRQ_HANDLED; | 385 | return IRQ_HANDLED; |
384 | } | 386 | } |
385 | 387 | ||
386 | int xen_smp_call_function_mask(cpumask_t mask, void (*func)(void *), | 388 | static irqreturn_t xen_call_function_single_interrupt(int irq, void *dev_id) |
387 | void *info, int wait) | ||
388 | { | 389 | { |
389 | struct call_data_struct data; | 390 | irq_enter(); |
390 | int cpus, cpu; | 391 | generic_smp_call_function_single_interrupt(); |
391 | bool yield; | 392 | __get_cpu_var(irq_stat).irq_call_count++; |
392 | 393 | irq_exit(); | |
393 | /* Holding any lock stops cpus from going down. */ | ||
394 | spin_lock(&call_lock); | ||
395 | |||
396 | cpu_clear(smp_processor_id(), mask); | ||
397 | |||
398 | cpus = cpus_weight(mask); | ||
399 | if (!cpus) { | ||
400 | spin_unlock(&call_lock); | ||
401 | return 0; | ||
402 | } | ||
403 | |||
404 | /* Can deadlock when called with interrupts disabled */ | ||
405 | WARN_ON(irqs_disabled()); | ||
406 | |||
407 | data.func = func; | ||
408 | data.info = info; | ||
409 | atomic_set(&data.started, 0); | ||
410 | data.wait = wait; | ||
411 | if (wait) | ||
412 | atomic_set(&data.finished, 0); | ||
413 | |||
414 | call_data = &data; | ||
415 | mb(); /* write everything before IPI */ | ||
416 | |||
417 | /* Send a message to other CPUs and wait for them to respond */ | ||
418 | xen_send_IPI_mask(mask, XEN_CALL_FUNCTION_VECTOR); | ||
419 | |||
420 | /* Make sure other vcpus get a chance to run if they need to. */ | ||
421 | yield = false; | ||
422 | for_each_cpu_mask(cpu, mask) | ||
423 | if (xen_vcpu_stolen(cpu)) | ||
424 | yield = true; | ||
425 | |||
426 | if (yield) | ||
427 | HYPERVISOR_sched_op(SCHEDOP_yield, 0); | ||
428 | |||
429 | /* Wait for response */ | ||
430 | while (atomic_read(&data.started) != cpus || | ||
431 | (wait && atomic_read(&data.finished) != cpus)) | ||
432 | cpu_relax(); | ||
433 | |||
434 | spin_unlock(&call_lock); | ||
435 | 394 | ||
436 | return 0; | 395 | return IRQ_HANDLED; |
437 | } | 396 | } |
diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h index d852ddbb3448..6f4b1045c1c2 100644 --- a/arch/x86/xen/xen-ops.h +++ b/arch/x86/xen/xen-ops.h | |||
@@ -55,13 +55,8 @@ void xen_smp_cpus_done(unsigned int max_cpus); | |||
55 | 55 | ||
56 | void xen_smp_send_stop(void); | 56 | void xen_smp_send_stop(void); |
57 | void xen_smp_send_reschedule(int cpu); | 57 | void xen_smp_send_reschedule(int cpu); |
58 | int xen_smp_call_function (void (*func) (void *info), void *info, int nonatomic, | 58 | void xen_smp_send_call_function_ipi(cpumask_t mask); |
59 | int wait); | 59 | void xen_smp_send_call_function_single_ipi(int cpu); |
60 | int xen_smp_call_function_single(int cpu, void (*func) (void *info), void *info, | ||
61 | int nonatomic, int wait); | ||
62 | |||
63 | int xen_smp_call_function_mask(cpumask_t mask, void (*func)(void *), | ||
64 | void *info, int wait); | ||
65 | 60 | ||
66 | extern cpumask_t xen_cpu_initialized_map; | 61 | extern cpumask_t xen_cpu_initialized_map; |
67 | 62 | ||