diff options
author | Jens Axboe <jens.axboe@oracle.com> | 2008-06-26 05:21:54 -0400 |
---|---|---|
committer | Jens Axboe <jens.axboe@oracle.com> | 2008-06-26 05:21:54 -0400 |
commit | 3b16cf874861436725c43ba0b68bdd799297be7c (patch) | |
tree | 8e48647e3dce5dde6917f260f93c4b9f19945c55 /arch/x86/mach-voyager/voyager_smp.c | |
parent | 3d4422332711ef48ef0f132f1fcbfcbd56c7f3d1 (diff) |
x86: convert to generic helpers for IPI function calls
This converts x86, x86-64, and xen to use the new helpers for
smp_call_function() and friends, and adds support for
smp_call_function_single().
Acked-by: Ingo Molnar <mingo@elte.hu>
Acked-by: Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com>
Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
Diffstat (limited to 'arch/x86/mach-voyager/voyager_smp.c')
-rw-r--r-- | arch/x86/mach-voyager/voyager_smp.c | 94 |
1 files changed, 17 insertions, 77 deletions
diff --git a/arch/x86/mach-voyager/voyager_smp.c b/arch/x86/mach-voyager/voyager_smp.c index 8acbf0cdf1a5..cb34407a9930 100644 --- a/arch/x86/mach-voyager/voyager_smp.c +++ b/arch/x86/mach-voyager/voyager_smp.c | |||
@@ -955,94 +955,24 @@ static void smp_stop_cpu_function(void *dummy) | |||
955 | halt(); | 955 | halt(); |
956 | } | 956 | } |
957 | 957 | ||
958 | static DEFINE_SPINLOCK(call_lock); | ||
959 | |||
960 | struct call_data_struct { | ||
961 | void (*func) (void *info); | ||
962 | void *info; | ||
963 | volatile unsigned long started; | ||
964 | volatile unsigned long finished; | ||
965 | int wait; | ||
966 | }; | ||
967 | |||
968 | static struct call_data_struct *call_data; | ||
969 | |||
970 | /* execute a thread on a new CPU. The function to be called must be | 958 | /* execute a thread on a new CPU. The function to be called must be |
971 | * previously set up. This is used to schedule a function for | 959 | * previously set up. This is used to schedule a function for |
972 | * execution on all CPUs - set up the function then broadcast a | 960 | * execution on all CPUs - set up the function then broadcast a |
973 | * function_interrupt CPI to come here on each CPU */ | 961 | * function_interrupt CPI to come here on each CPU */ |
974 | static void smp_call_function_interrupt(void) | 962 | static void smp_call_function_interrupt(void) |
975 | { | 963 | { |
976 | void (*func) (void *info) = call_data->func; | ||
977 | void *info = call_data->info; | ||
978 | /* must take copy of wait because call_data may be replaced | ||
979 | * unless the function is waiting for us to finish */ | ||
980 | int wait = call_data->wait; | ||
981 | __u8 cpu = smp_processor_id(); | ||
982 | |||
983 | /* | ||
984 | * Notify initiating CPU that I've grabbed the data and am | ||
985 | * about to execute the function | ||
986 | */ | ||
987 | mb(); | ||
988 | if (!test_and_clear_bit(cpu, &call_data->started)) { | ||
989 | /* If the bit wasn't set, this could be a replay */ | ||
990 | printk(KERN_WARNING "VOYAGER SMP: CPU %d received call funtion" | ||
991 | " with no call pending\n", cpu); | ||
992 | return; | ||
993 | } | ||
994 | /* | ||
995 | * At this point the info structure may be out of scope unless wait==1 | ||
996 | */ | ||
997 | irq_enter(); | 964 | irq_enter(); |
998 | (*func) (info); | 965 | generic_smp_call_function_interrupt(); |
999 | __get_cpu_var(irq_stat).irq_call_count++; | 966 | __get_cpu_var(irq_stat).irq_call_count++; |
1000 | irq_exit(); | 967 | irq_exit(); |
1001 | if (wait) { | ||
1002 | mb(); | ||
1003 | clear_bit(cpu, &call_data->finished); | ||
1004 | } | ||
1005 | } | 968 | } |
1006 | 969 | ||
1007 | static int | 970 | static void smp_call_function_single_interrupt(void) |
1008 | voyager_smp_call_function_mask(cpumask_t cpumask, | ||
1009 | void (*func) (void *info), void *info, int wait) | ||
1010 | { | 971 | { |
1011 | struct call_data_struct data; | 972 | irq_enter(); |
1012 | u32 mask = cpus_addr(cpumask)[0]; | 973 | generic_smp_call_function_single_interrupt(); |
1013 | 974 | __get_cpu_var(irq_stat).irq_call_count++; | |
1014 | mask &= ~(1 << smp_processor_id()); | 975 | irq_exit(); |
1015 | |||
1016 | if (!mask) | ||
1017 | return 0; | ||
1018 | |||
1019 | /* Can deadlock when called with interrupts disabled */ | ||
1020 | WARN_ON(irqs_disabled()); | ||
1021 | |||
1022 | data.func = func; | ||
1023 | data.info = info; | ||
1024 | data.started = mask; | ||
1025 | data.wait = wait; | ||
1026 | if (wait) | ||
1027 | data.finished = mask; | ||
1028 | |||
1029 | spin_lock(&call_lock); | ||
1030 | call_data = &data; | ||
1031 | wmb(); | ||
1032 | /* Send a message to all other CPUs and wait for them to respond */ | ||
1033 | send_CPI(mask, VIC_CALL_FUNCTION_CPI); | ||
1034 | |||
1035 | /* Wait for response */ | ||
1036 | while (data.started) | ||
1037 | barrier(); | ||
1038 | |||
1039 | if (wait) | ||
1040 | while (data.finished) | ||
1041 | barrier(); | ||
1042 | |||
1043 | spin_unlock(&call_lock); | ||
1044 | |||
1045 | return 0; | ||
1046 | } | 976 | } |
1047 | 977 | ||
1048 | /* Sorry about the name. In an APIC based system, the APICs | 978 | /* Sorry about the name. In an APIC based system, the APICs |
@@ -1099,6 +1029,12 @@ void smp_qic_call_function_interrupt(struct pt_regs *regs) | |||
1099 | smp_call_function_interrupt(); | 1029 | smp_call_function_interrupt(); |
1100 | } | 1030 | } |
1101 | 1031 | ||
1032 | void smp_qic_call_function_single_interrupt(struct pt_regs *regs) | ||
1033 | { | ||
1034 | ack_QIC_CPI(QIC_CALL_FUNCTION_SINGLE_CPI); | ||
1035 | smp_call_function_single_interrupt(); | ||
1036 | } | ||
1037 | |||
1102 | void smp_vic_cpi_interrupt(struct pt_regs *regs) | 1038 | void smp_vic_cpi_interrupt(struct pt_regs *regs) |
1103 | { | 1039 | { |
1104 | struct pt_regs *old_regs = set_irq_regs(regs); | 1040 | struct pt_regs *old_regs = set_irq_regs(regs); |
@@ -1119,6 +1055,8 @@ void smp_vic_cpi_interrupt(struct pt_regs *regs) | |||
1119 | smp_enable_irq_interrupt(); | 1055 | smp_enable_irq_interrupt(); |
1120 | if (test_and_clear_bit(VIC_CALL_FUNCTION_CPI, &vic_cpi_mailbox[cpu])) | 1056 | if (test_and_clear_bit(VIC_CALL_FUNCTION_CPI, &vic_cpi_mailbox[cpu])) |
1121 | smp_call_function_interrupt(); | 1057 | smp_call_function_interrupt(); |
1058 | if (test_and_clear_bit(VIC_CALL_FUNCTION_SINGLE_CPI, &vic_cpi_mailbox[cpu])) | ||
1059 | smp_call_function_single_interrupt(); | ||
1122 | set_irq_regs(old_regs); | 1060 | set_irq_regs(old_regs); |
1123 | } | 1061 | } |
1124 | 1062 | ||
@@ -1862,5 +1800,7 @@ struct smp_ops smp_ops = { | |||
1862 | 1800 | ||
1863 | .smp_send_stop = voyager_smp_send_stop, | 1801 | .smp_send_stop = voyager_smp_send_stop, |
1864 | .smp_send_reschedule = voyager_smp_send_reschedule, | 1802 | .smp_send_reschedule = voyager_smp_send_reschedule, |
1865 | .smp_call_function_mask = voyager_smp_call_function_mask, | 1803 | |
1804 | .send_call_func_ipi = native_send_call_func_ipi, | ||
1805 | .send_call_func_single_ipi = native_send_call_func_single_ipi, | ||
1866 | }; | 1806 | }; |