diff options
Diffstat (limited to 'arch/x86/mach-voyager/voyager_smp.c')
-rw-r--r-- | arch/x86/mach-voyager/voyager_smp.c | 98 |
1 files changed, 19 insertions, 79 deletions
diff --git a/arch/x86/mach-voyager/voyager_smp.c b/arch/x86/mach-voyager/voyager_smp.c index 8dedd01e909f..ee0fba092157 100644 --- a/arch/x86/mach-voyager/voyager_smp.c +++ b/arch/x86/mach-voyager/voyager_smp.c | |||
@@ -950,94 +950,24 @@ static void smp_stop_cpu_function(void *dummy) | |||
950 | halt(); | 950 | halt(); |
951 | } | 951 | } |
952 | 952 | ||
953 | static DEFINE_SPINLOCK(call_lock); | ||
954 | |||
955 | struct call_data_struct { | ||
956 | void (*func) (void *info); | ||
957 | void *info; | ||
958 | volatile unsigned long started; | ||
959 | volatile unsigned long finished; | ||
960 | int wait; | ||
961 | }; | ||
962 | |||
963 | static struct call_data_struct *call_data; | ||
964 | |||
965 | /* execute a thread on a new CPU. The function to be called must be | 953 | /* execute a thread on a new CPU. The function to be called must be |
966 | * previously set up. This is used to schedule a function for | 954 | * previously set up. This is used to schedule a function for |
967 | * execution on all CPUs - set up the function then broadcast a | 955 | * execution on all CPUs - set up the function then broadcast a |
968 | * function_interrupt CPI to come here on each CPU */ | 956 | * function_interrupt CPI to come here on each CPU */ |
969 | static void smp_call_function_interrupt(void) | 957 | static void smp_call_function_interrupt(void) |
970 | { | 958 | { |
971 | void (*func) (void *info) = call_data->func; | ||
972 | void *info = call_data->info; | ||
973 | /* must take copy of wait because call_data may be replaced | ||
974 | * unless the function is waiting for us to finish */ | ||
975 | int wait = call_data->wait; | ||
976 | __u8 cpu = smp_processor_id(); | ||
977 | |||
978 | /* | ||
979 | * Notify initiating CPU that I've grabbed the data and am | ||
980 | * about to execute the function | ||
981 | */ | ||
982 | mb(); | ||
983 | if (!test_and_clear_bit(cpu, &call_data->started)) { | ||
984 | /* If the bit wasn't set, this could be a replay */ | ||
985 | printk(KERN_WARNING "VOYAGER SMP: CPU %d received call funtion" | ||
986 | " with no call pending\n", cpu); | ||
987 | return; | ||
988 | } | ||
989 | /* | ||
990 | * At this point the info structure may be out of scope unless wait==1 | ||
991 | */ | ||
992 | irq_enter(); | 959 | irq_enter(); |
993 | (*func) (info); | 960 | generic_smp_call_function_interrupt(); |
994 | __get_cpu_var(irq_stat).irq_call_count++; | 961 | __get_cpu_var(irq_stat).irq_call_count++; |
995 | irq_exit(); | 962 | irq_exit(); |
996 | if (wait) { | ||
997 | mb(); | ||
998 | clear_bit(cpu, &call_data->finished); | ||
999 | } | ||
1000 | } | 963 | } |
1001 | 964 | ||
1002 | static int | 965 | static void smp_call_function_single_interrupt(void) |
1003 | voyager_smp_call_function_mask(cpumask_t cpumask, | ||
1004 | void (*func) (void *info), void *info, int wait) | ||
1005 | { | 966 | { |
1006 | struct call_data_struct data; | 967 | irq_enter(); |
1007 | u32 mask = cpus_addr(cpumask)[0]; | 968 | generic_smp_call_function_single_interrupt(); |
1008 | 969 | __get_cpu_var(irq_stat).irq_call_count++; | |
1009 | mask &= ~(1 << smp_processor_id()); | 970 | irq_exit(); |
1010 | |||
1011 | if (!mask) | ||
1012 | return 0; | ||
1013 | |||
1014 | /* Can deadlock when called with interrupts disabled */ | ||
1015 | WARN_ON(irqs_disabled()); | ||
1016 | |||
1017 | data.func = func; | ||
1018 | data.info = info; | ||
1019 | data.started = mask; | ||
1020 | data.wait = wait; | ||
1021 | if (wait) | ||
1022 | data.finished = mask; | ||
1023 | |||
1024 | spin_lock(&call_lock); | ||
1025 | call_data = &data; | ||
1026 | wmb(); | ||
1027 | /* Send a message to all other CPUs and wait for them to respond */ | ||
1028 | send_CPI(mask, VIC_CALL_FUNCTION_CPI); | ||
1029 | |||
1030 | /* Wait for response */ | ||
1031 | while (data.started) | ||
1032 | barrier(); | ||
1033 | |||
1034 | if (wait) | ||
1035 | while (data.finished) | ||
1036 | barrier(); | ||
1037 | |||
1038 | spin_unlock(&call_lock); | ||
1039 | |||
1040 | return 0; | ||
1041 | } | 971 | } |
1042 | 972 | ||
1043 | /* Sorry about the name. In an APIC based system, the APICs | 973 | /* Sorry about the name. In an APIC based system, the APICs |
@@ -1094,6 +1024,12 @@ void smp_qic_call_function_interrupt(struct pt_regs *regs) | |||
1094 | smp_call_function_interrupt(); | 1024 | smp_call_function_interrupt(); |
1095 | } | 1025 | } |
1096 | 1026 | ||
1027 | void smp_qic_call_function_single_interrupt(struct pt_regs *regs) | ||
1028 | { | ||
1029 | ack_QIC_CPI(QIC_CALL_FUNCTION_SINGLE_CPI); | ||
1030 | smp_call_function_single_interrupt(); | ||
1031 | } | ||
1032 | |||
1097 | void smp_vic_cpi_interrupt(struct pt_regs *regs) | 1033 | void smp_vic_cpi_interrupt(struct pt_regs *regs) |
1098 | { | 1034 | { |
1099 | struct pt_regs *old_regs = set_irq_regs(regs); | 1035 | struct pt_regs *old_regs = set_irq_regs(regs); |
@@ -1114,6 +1050,8 @@ void smp_vic_cpi_interrupt(struct pt_regs *regs) | |||
1114 | smp_enable_irq_interrupt(); | 1050 | smp_enable_irq_interrupt(); |
1115 | if (test_and_clear_bit(VIC_CALL_FUNCTION_CPI, &vic_cpi_mailbox[cpu])) | 1051 | if (test_and_clear_bit(VIC_CALL_FUNCTION_CPI, &vic_cpi_mailbox[cpu])) |
1116 | smp_call_function_interrupt(); | 1052 | smp_call_function_interrupt(); |
1053 | if (test_and_clear_bit(VIC_CALL_FUNCTION_SINGLE_CPI, &vic_cpi_mailbox[cpu])) | ||
1054 | smp_call_function_single_interrupt(); | ||
1117 | set_irq_regs(old_regs); | 1055 | set_irq_regs(old_regs); |
1118 | } | 1056 | } |
1119 | 1057 | ||
@@ -1129,7 +1067,7 @@ static void do_flush_tlb_all(void *info) | |||
1129 | /* flush the TLB of every active CPU in the system */ | 1067 | /* flush the TLB of every active CPU in the system */ |
1130 | void flush_tlb_all(void) | 1068 | void flush_tlb_all(void) |
1131 | { | 1069 | { |
1132 | on_each_cpu(do_flush_tlb_all, 0, 1, 1); | 1070 | on_each_cpu(do_flush_tlb_all, 0, 1); |
1133 | } | 1071 | } |
1134 | 1072 | ||
1135 | /* send a reschedule CPI to one CPU by physical CPU number*/ | 1073 | /* send a reschedule CPI to one CPU by physical CPU number*/ |
@@ -1161,7 +1099,7 @@ int safe_smp_processor_id(void) | |||
1161 | /* broadcast a halt to all other CPUs */ | 1099 | /* broadcast a halt to all other CPUs */ |
1162 | static void voyager_smp_send_stop(void) | 1100 | static void voyager_smp_send_stop(void) |
1163 | { | 1101 | { |
1164 | smp_call_function(smp_stop_cpu_function, NULL, 1, 1); | 1102 | smp_call_function(smp_stop_cpu_function, NULL, 1); |
1165 | } | 1103 | } |
1166 | 1104 | ||
1167 | /* this function is triggered in time.c when a clock tick fires | 1105 | /* this function is triggered in time.c when a clock tick fires |
@@ -1848,5 +1786,7 @@ struct smp_ops smp_ops = { | |||
1848 | 1786 | ||
1849 | .smp_send_stop = voyager_smp_send_stop, | 1787 | .smp_send_stop = voyager_smp_send_stop, |
1850 | .smp_send_reschedule = voyager_smp_send_reschedule, | 1788 | .smp_send_reschedule = voyager_smp_send_reschedule, |
1851 | .smp_call_function_mask = voyager_smp_call_function_mask, | 1789 | |
1790 | .send_call_func_ipi = native_send_call_func_ipi, | ||
1791 | .send_call_func_single_ipi = native_send_call_func_single_ipi, | ||
1852 | }; | 1792 | }; |