diff options
author | Glauber Costa <gcosta@redhat.com> | 2008-03-03 12:12:52 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-04-17 11:40:56 -0400 |
commit | f9e47a126be2eaabf04a1a5c71ca7b23a473d0d8 (patch) | |
tree | 5a16cf4ac06bb4031de1be5070281ef5f7847bf0 /arch/x86/kernel | |
parent | 377d698426b8c685fb6d48fe89694fe4ce3aa1f8 (diff) |
x86: create smp.c
this patch moves all the functions and data structures that look
like exactly the same from smp_{32,64}.c to smp.c
Signed-off-by: Glauber Costa <gcosta@redhat.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86/kernel')
-rw-r--r-- | arch/x86/kernel/Makefile | 3 | ||||
-rw-r--r-- | arch/x86/kernel/smp.c | 253 | ||||
-rw-r--r-- | arch/x86/kernel/smp_32.c | 223 | ||||
-rw-r--r-- | arch/x86/kernel/smp_64.c | 205 |
4 files changed, 255 insertions, 429 deletions
diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile index 018d04d880db..0a4b088bab5d 100644 --- a/arch/x86/kernel/Makefile +++ b/arch/x86/kernel/Makefile | |||
@@ -46,7 +46,8 @@ obj-$(CONFIG_MICROCODE) += microcode.o | |||
46 | obj-$(CONFIG_PCI) += early-quirks.o | 46 | obj-$(CONFIG_PCI) += early-quirks.o |
47 | apm-y := apm_32.o | 47 | apm-y := apm_32.o |
48 | obj-$(CONFIG_APM) += apm.o | 48 | obj-$(CONFIG_APM) += apm.o |
49 | obj-$(CONFIG_X86_SMP) += smp_$(BITS).o smpboot_$(BITS).o smpboot.o tsc_sync.o | 49 | obj-$(CONFIG_X86_SMP) += smp_$(BITS).o smpboot_$(BITS).o smp.o |
50 | obj-$(CONFIG_X86_SMP) += smpboot.o tsc_sync.o | ||
50 | obj-$(CONFIG_X86_32_SMP) += smpcommon.o | 51 | obj-$(CONFIG_X86_32_SMP) += smpcommon.o |
51 | obj-$(CONFIG_X86_64_SMP) += smp_64.o smpboot_64.o tsc_sync.o smpcommon.o | 52 | obj-$(CONFIG_X86_64_SMP) += smp_64.o smpboot_64.o tsc_sync.o smpcommon.o |
52 | obj-$(CONFIG_X86_TRAMPOLINE) += trampoline_$(BITS).o | 53 | obj-$(CONFIG_X86_TRAMPOLINE) += trampoline_$(BITS).o |
diff --git a/arch/x86/kernel/smp.c b/arch/x86/kernel/smp.c new file mode 100644 index 000000000000..b662300a88f3 --- /dev/null +++ b/arch/x86/kernel/smp.c | |||
@@ -0,0 +1,253 @@ | |||
1 | #include <linux/init.h> | ||
2 | |||
3 | #include <linux/mm.h> | ||
4 | #include <linux/delay.h> | ||
5 | #include <linux/spinlock.h> | ||
6 | #include <linux/kernel_stat.h> | ||
7 | #include <linux/mc146818rtc.h> | ||
8 | #include <linux/cache.h> | ||
9 | #include <linux/interrupt.h> | ||
10 | #include <linux/cpu.h> | ||
11 | |||
12 | #include <asm/mtrr.h> | ||
13 | #include <asm/tlbflush.h> | ||
14 | #include <asm/mmu_context.h> | ||
15 | #include <asm/proto.h> | ||
16 | #ifdef CONFIG_X86_32 | ||
17 | #include <mach_apic.h> | ||
18 | #include <mach_ipi.h> | ||
19 | #else | ||
20 | #include <asm/mach_apic.h> | ||
21 | #endif | ||
22 | |||
23 | /* | ||
24 | * this function sends a 'reschedule' IPI to another CPU. | ||
25 | * it goes straight through and wastes no time serializing | ||
26 | * anything. Worst case is that we lose a reschedule ... | ||
27 | */ | ||
28 | static void native_smp_send_reschedule(int cpu) | ||
29 | { | ||
30 | WARN_ON(cpu_is_offline(cpu)); | ||
31 | send_IPI_mask(cpumask_of_cpu(cpu), RESCHEDULE_VECTOR); | ||
32 | } | ||
33 | |||
34 | /* | ||
35 | * Structure and data for smp_call_function(). This is designed to minimise | ||
36 | * static memory requirements. It also looks cleaner. | ||
37 | */ | ||
38 | static DEFINE_SPINLOCK(call_lock); | ||
39 | |||
40 | struct call_data_struct { | ||
41 | void (*func) (void *info); | ||
42 | void *info; | ||
43 | atomic_t started; | ||
44 | atomic_t finished; | ||
45 | int wait; | ||
46 | }; | ||
47 | |||
48 | void lock_ipi_call_lock(void) | ||
49 | { | ||
50 | spin_lock_irq(&call_lock); | ||
51 | } | ||
52 | |||
53 | void unlock_ipi_call_lock(void) | ||
54 | { | ||
55 | spin_unlock_irq(&call_lock); | ||
56 | } | ||
57 | |||
58 | static struct call_data_struct *call_data; | ||
59 | |||
60 | static void __smp_call_function(void (*func) (void *info), void *info, | ||
61 | int nonatomic, int wait) | ||
62 | { | ||
63 | struct call_data_struct data; | ||
64 | int cpus = num_online_cpus() - 1; | ||
65 | |||
66 | if (!cpus) | ||
67 | return; | ||
68 | |||
69 | data.func = func; | ||
70 | data.info = info; | ||
71 | atomic_set(&data.started, 0); | ||
72 | data.wait = wait; | ||
73 | if (wait) | ||
74 | atomic_set(&data.finished, 0); | ||
75 | |||
76 | call_data = &data; | ||
77 | mb(); | ||
78 | |||
79 | /* Send a message to all other CPUs and wait for them to respond */ | ||
80 | send_IPI_allbutself(CALL_FUNCTION_VECTOR); | ||
81 | |||
82 | /* Wait for response */ | ||
83 | while (atomic_read(&data.started) != cpus) | ||
84 | cpu_relax(); | ||
85 | |||
86 | if (wait) | ||
87 | while (atomic_read(&data.finished) != cpus) | ||
88 | cpu_relax(); | ||
89 | } | ||
90 | |||
91 | |||
92 | /** | ||
93 | * smp_call_function_mask(): Run a function on a set of other CPUs. | ||
94 | * @mask: The set of cpus to run on. Must not include the current cpu. | ||
95 | * @func: The function to run. This must be fast and non-blocking. | ||
96 | * @info: An arbitrary pointer to pass to the function. | ||
97 | * @wait: If true, wait (atomically) until function has completed on other CPUs. | ||
98 | * | ||
99 | * Returns 0 on success, else a negative status code. | ||
100 | * | ||
101 | * If @wait is true, then returns once @func has returned; otherwise | ||
102 | * it returns just before the target cpu calls @func. | ||
103 | * | ||
104 | * You must not call this function with disabled interrupts or from a | ||
105 | * hardware interrupt handler or from a bottom half handler. | ||
106 | */ | ||
107 | static int | ||
108 | native_smp_call_function_mask(cpumask_t mask, | ||
109 | void (*func)(void *), void *info, | ||
110 | int wait) | ||
111 | { | ||
112 | struct call_data_struct data; | ||
113 | cpumask_t allbutself; | ||
114 | int cpus; | ||
115 | |||
116 | /* Can deadlock when called with interrupts disabled */ | ||
117 | WARN_ON(irqs_disabled()); | ||
118 | |||
119 | /* Holding any lock stops cpus from going down. */ | ||
120 | spin_lock(&call_lock); | ||
121 | |||
122 | allbutself = cpu_online_map; | ||
123 | cpu_clear(smp_processor_id(), allbutself); | ||
124 | |||
125 | cpus_and(mask, mask, allbutself); | ||
126 | cpus = cpus_weight(mask); | ||
127 | |||
128 | if (!cpus) { | ||
129 | spin_unlock(&call_lock); | ||
130 | return 0; | ||
131 | } | ||
132 | |||
133 | data.func = func; | ||
134 | data.info = info; | ||
135 | atomic_set(&data.started, 0); | ||
136 | data.wait = wait; | ||
137 | if (wait) | ||
138 | atomic_set(&data.finished, 0); | ||
139 | |||
140 | call_data = &data; | ||
141 | wmb(); | ||
142 | |||
143 | /* Send a message to other CPUs */ | ||
144 | if (cpus_equal(mask, allbutself)) | ||
145 | send_IPI_allbutself(CALL_FUNCTION_VECTOR); | ||
146 | else | ||
147 | send_IPI_mask(mask, CALL_FUNCTION_VECTOR); | ||
148 | |||
149 | /* Wait for response */ | ||
150 | while (atomic_read(&data.started) != cpus) | ||
151 | cpu_relax(); | ||
152 | |||
153 | if (wait) | ||
154 | while (atomic_read(&data.finished) != cpus) | ||
155 | cpu_relax(); | ||
156 | spin_unlock(&call_lock); | ||
157 | |||
158 | return 0; | ||
159 | } | ||
160 | |||
161 | static void stop_this_cpu(void *dummy) | ||
162 | { | ||
163 | local_irq_disable(); | ||
164 | /* | ||
165 | * Remove this CPU: | ||
166 | */ | ||
167 | cpu_clear(smp_processor_id(), cpu_online_map); | ||
168 | disable_local_APIC(); | ||
169 | if (hlt_works(smp_processor_id())) | ||
170 | for (;;) halt(); | ||
171 | for (;;); | ||
172 | } | ||
173 | |||
174 | /* | ||
175 | * this function calls the 'stop' function on all other CPUs in the system. | ||
176 | */ | ||
177 | |||
178 | static void native_smp_send_stop(void) | ||
179 | { | ||
180 | int nolock; | ||
181 | unsigned long flags; | ||
182 | |||
183 | if (reboot_force) | ||
184 | return; | ||
185 | |||
186 | /* Don't deadlock on the call lock in panic */ | ||
187 | nolock = !spin_trylock(&call_lock); | ||
188 | local_irq_save(flags); | ||
189 | __smp_call_function(stop_this_cpu, NULL, 0, 0); | ||
190 | if (!nolock) | ||
191 | spin_unlock(&call_lock); | ||
192 | disable_local_APIC(); | ||
193 | local_irq_restore(flags); | ||
194 | } | ||
195 | |||
196 | /* | ||
197 | * Reschedule call back. Nothing to do, | ||
198 | * all the work is done automatically when | ||
199 | * we return from the interrupt. | ||
200 | */ | ||
201 | void smp_reschedule_interrupt(struct pt_regs *regs) | ||
202 | { | ||
203 | ack_APIC_irq(); | ||
204 | #ifdef CONFIG_X86_32 | ||
205 | __get_cpu_var(irq_stat).irq_resched_count++; | ||
206 | #else | ||
207 | add_pda(irq_resched_count, 1); | ||
208 | #endif | ||
209 | } | ||
210 | |||
211 | void smp_call_function_interrupt(struct pt_regs *regs) | ||
212 | { | ||
213 | void (*func) (void *info) = call_data->func; | ||
214 | void *info = call_data->info; | ||
215 | int wait = call_data->wait; | ||
216 | |||
217 | ack_APIC_irq(); | ||
218 | /* | ||
219 | * Notify initiating CPU that I've grabbed the data and am | ||
220 | * about to execute the function | ||
221 | */ | ||
222 | mb(); | ||
223 | atomic_inc(&call_data->started); | ||
224 | /* | ||
225 | * At this point the info structure may be out of scope unless wait==1 | ||
226 | */ | ||
227 | irq_enter(); | ||
228 | (*func)(info); | ||
229 | #ifdef CONFIG_X86_32 | ||
230 | __get_cpu_var(irq_stat).irq_call_count++; | ||
231 | #else | ||
232 | add_pda(irq_call_count, 1); | ||
233 | #endif | ||
234 | irq_exit(); | ||
235 | |||
236 | if (wait) { | ||
237 | mb(); | ||
238 | atomic_inc(&call_data->finished); | ||
239 | } | ||
240 | } | ||
241 | |||
242 | struct smp_ops smp_ops = { | ||
243 | .smp_prepare_boot_cpu = native_smp_prepare_boot_cpu, | ||
244 | .smp_prepare_cpus = native_smp_prepare_cpus, | ||
245 | .cpu_up = native_cpu_up, | ||
246 | .smp_cpus_done = native_smp_cpus_done, | ||
247 | |||
248 | .smp_send_stop = native_smp_send_stop, | ||
249 | .smp_send_reschedule = native_smp_send_reschedule, | ||
250 | .smp_call_function_mask = native_smp_call_function_mask, | ||
251 | }; | ||
252 | EXPORT_SYMBOL_GPL(smp_ops); | ||
253 | |||
diff --git a/arch/x86/kernel/smp_32.c b/arch/x86/kernel/smp_32.c index 8be3e091dcd0..61e546e85733 100644 --- a/arch/x86/kernel/smp_32.c +++ b/arch/x86/kernel/smp_32.c | |||
@@ -466,217 +466,6 @@ void flush_tlb_all(void) | |||
466 | on_each_cpu(do_flush_tlb_all, NULL, 1, 1); | 466 | on_each_cpu(do_flush_tlb_all, NULL, 1, 1); |
467 | } | 467 | } |
468 | 468 | ||
469 | /* | ||
470 | * this function sends a 'reschedule' IPI to another CPU. | ||
471 | * it goes straight through and wastes no time serializing | ||
472 | * anything. Worst case is that we lose a reschedule ... | ||
473 | */ | ||
474 | static void native_smp_send_reschedule(int cpu) | ||
475 | { | ||
476 | WARN_ON(cpu_is_offline(cpu)); | ||
477 | send_IPI_mask(cpumask_of_cpu(cpu), RESCHEDULE_VECTOR); | ||
478 | } | ||
479 | |||
480 | /* | ||
481 | * Structure and data for smp_call_function(). This is designed to minimise | ||
482 | * static memory requirements. It also looks cleaner. | ||
483 | */ | ||
484 | static DEFINE_SPINLOCK(call_lock); | ||
485 | |||
486 | struct call_data_struct { | ||
487 | void (*func) (void *info); | ||
488 | void *info; | ||
489 | atomic_t started; | ||
490 | atomic_t finished; | ||
491 | int wait; | ||
492 | }; | ||
493 | |||
494 | void lock_ipi_call_lock(void) | ||
495 | { | ||
496 | spin_lock_irq(&call_lock); | ||
497 | } | ||
498 | |||
499 | void unlock_ipi_call_lock(void) | ||
500 | { | ||
501 | spin_unlock_irq(&call_lock); | ||
502 | } | ||
503 | |||
504 | static struct call_data_struct *call_data; | ||
505 | |||
506 | static void __smp_call_function(void (*func) (void *info), void *info, | ||
507 | int nonatomic, int wait) | ||
508 | { | ||
509 | struct call_data_struct data; | ||
510 | int cpus = num_online_cpus() - 1; | ||
511 | |||
512 | if (!cpus) | ||
513 | return; | ||
514 | |||
515 | data.func = func; | ||
516 | data.info = info; | ||
517 | atomic_set(&data.started, 0); | ||
518 | data.wait = wait; | ||
519 | if (wait) | ||
520 | atomic_set(&data.finished, 0); | ||
521 | |||
522 | call_data = &data; | ||
523 | mb(); | ||
524 | |||
525 | /* Send a message to all other CPUs and wait for them to respond */ | ||
526 | send_IPI_allbutself(CALL_FUNCTION_VECTOR); | ||
527 | |||
528 | /* Wait for response */ | ||
529 | while (atomic_read(&data.started) != cpus) | ||
530 | cpu_relax(); | ||
531 | |||
532 | if (wait) | ||
533 | while (atomic_read(&data.finished) != cpus) | ||
534 | cpu_relax(); | ||
535 | } | ||
536 | |||
537 | |||
538 | /** | ||
539 | * smp_call_function_mask(): Run a function on a set of other CPUs. | ||
540 | * @mask: The set of cpus to run on. Must not include the current cpu. | ||
541 | * @func: The function to run. This must be fast and non-blocking. | ||
542 | * @info: An arbitrary pointer to pass to the function. | ||
543 | * @wait: If true, wait (atomically) until function has completed on other CPUs. | ||
544 | * | ||
545 | * Returns 0 on success, else a negative status code. | ||
546 | * | ||
547 | * If @wait is true, then returns once @func has returned; otherwise | ||
548 | * it returns just before the target cpu calls @func. | ||
549 | * | ||
550 | * You must not call this function with disabled interrupts or from a | ||
551 | * hardware interrupt handler or from a bottom half handler. | ||
552 | */ | ||
553 | static int | ||
554 | native_smp_call_function_mask(cpumask_t mask, | ||
555 | void (*func)(void *), void *info, | ||
556 | int wait) | ||
557 | { | ||
558 | struct call_data_struct data; | ||
559 | cpumask_t allbutself; | ||
560 | int cpus; | ||
561 | |||
562 | /* Can deadlock when called with interrupts disabled */ | ||
563 | WARN_ON(irqs_disabled()); | ||
564 | |||
565 | /* Holding any lock stops cpus from going down. */ | ||
566 | spin_lock(&call_lock); | ||
567 | |||
568 | allbutself = cpu_online_map; | ||
569 | cpu_clear(smp_processor_id(), allbutself); | ||
570 | |||
571 | cpus_and(mask, mask, allbutself); | ||
572 | cpus = cpus_weight(mask); | ||
573 | |||
574 | if (!cpus) { | ||
575 | spin_unlock(&call_lock); | ||
576 | return 0; | ||
577 | } | ||
578 | |||
579 | data.func = func; | ||
580 | data.info = info; | ||
581 | atomic_set(&data.started, 0); | ||
582 | data.wait = wait; | ||
583 | if (wait) | ||
584 | atomic_set(&data.finished, 0); | ||
585 | |||
586 | call_data = &data; | ||
587 | wmb(); | ||
588 | |||
589 | /* Send a message to other CPUs */ | ||
590 | if (cpus_equal(mask, allbutself)) | ||
591 | send_IPI_allbutself(CALL_FUNCTION_VECTOR); | ||
592 | else | ||
593 | send_IPI_mask(mask, CALL_FUNCTION_VECTOR); | ||
594 | |||
595 | /* Wait for response */ | ||
596 | while (atomic_read(&data.started) != cpus) | ||
597 | cpu_relax(); | ||
598 | |||
599 | if (wait) | ||
600 | while (atomic_read(&data.finished) != cpus) | ||
601 | cpu_relax(); | ||
602 | spin_unlock(&call_lock); | ||
603 | |||
604 | return 0; | ||
605 | } | ||
606 | |||
607 | static void stop_this_cpu (void * dummy) | ||
608 | { | ||
609 | local_irq_disable(); | ||
610 | /* | ||
611 | * Remove this CPU: | ||
612 | */ | ||
613 | cpu_clear(smp_processor_id(), cpu_online_map); | ||
614 | disable_local_APIC(); | ||
615 | if (hlt_works(smp_processor_id())) | ||
616 | for(;;) halt(); | ||
617 | for (;;); | ||
618 | } | ||
619 | |||
620 | /* | ||
621 | * this function calls the 'stop' function on all other CPUs in the system. | ||
622 | */ | ||
623 | |||
624 | static void native_smp_send_stop(void) | ||
625 | { | ||
626 | int nolock; | ||
627 | unsigned long flags; | ||
628 | |||
629 | if (reboot_force) | ||
630 | return; | ||
631 | |||
632 | /* Don't deadlock on the call lock in panic */ | ||
633 | nolock = !spin_trylock(&call_lock); | ||
634 | local_irq_save(flags); | ||
635 | __smp_call_function(stop_this_cpu, NULL, 0, 0); | ||
636 | if (!nolock) | ||
637 | spin_unlock(&call_lock); | ||
638 | disable_local_APIC(); | ||
639 | local_irq_restore(flags); | ||
640 | } | ||
641 | |||
642 | /* | ||
643 | * Reschedule call back. Nothing to do, | ||
644 | * all the work is done automatically when | ||
645 | * we return from the interrupt. | ||
646 | */ | ||
647 | void smp_reschedule_interrupt(struct pt_regs *regs) | ||
648 | { | ||
649 | ack_APIC_irq(); | ||
650 | __get_cpu_var(irq_stat).irq_resched_count++; | ||
651 | } | ||
652 | |||
653 | void smp_call_function_interrupt(struct pt_regs *regs) | ||
654 | { | ||
655 | void (*func) (void *info) = call_data->func; | ||
656 | void *info = call_data->info; | ||
657 | int wait = call_data->wait; | ||
658 | |||
659 | ack_APIC_irq(); | ||
660 | /* | ||
661 | * Notify initiating CPU that I've grabbed the data and am | ||
662 | * about to execute the function | ||
663 | */ | ||
664 | mb(); | ||
665 | atomic_inc(&call_data->started); | ||
666 | /* | ||
667 | * At this point the info structure may be out of scope unless wait==1 | ||
668 | */ | ||
669 | irq_enter(); | ||
670 | (*func)(info); | ||
671 | __get_cpu_var(irq_stat).irq_call_count++; | ||
672 | irq_exit(); | ||
673 | |||
674 | if (wait) { | ||
675 | mb(); | ||
676 | atomic_inc(&call_data->finished); | ||
677 | } | ||
678 | } | ||
679 | |||
680 | static int convert_apicid_to_cpu(int apic_id) | 469 | static int convert_apicid_to_cpu(int apic_id) |
681 | { | 470 | { |
682 | int i; | 471 | int i; |
@@ -703,15 +492,3 @@ int safe_smp_processor_id(void) | |||
703 | 492 | ||
704 | return cpuid >= 0 ? cpuid : 0; | 493 | return cpuid >= 0 ? cpuid : 0; |
705 | } | 494 | } |
706 | |||
707 | struct smp_ops smp_ops = { | ||
708 | .smp_prepare_boot_cpu = native_smp_prepare_boot_cpu, | ||
709 | .smp_prepare_cpus = native_smp_prepare_cpus, | ||
710 | .cpu_up = native_cpu_up, | ||
711 | .smp_cpus_done = native_smp_cpus_done, | ||
712 | |||
713 | .smp_send_stop = native_smp_send_stop, | ||
714 | .smp_send_reschedule = native_smp_send_reschedule, | ||
715 | .smp_call_function_mask = native_smp_call_function_mask, | ||
716 | }; | ||
717 | EXPORT_SYMBOL_GPL(smp_ops); | ||
diff --git a/arch/x86/kernel/smp_64.c b/arch/x86/kernel/smp_64.c index ad11ef0c3fae..d28e8685709d 100644 --- a/arch/x86/kernel/smp_64.c +++ b/arch/x86/kernel/smp_64.c | |||
@@ -283,208 +283,3 @@ void flush_tlb_all(void) | |||
283 | { | 283 | { |
284 | on_each_cpu(do_flush_tlb_all, NULL, 1, 1); | 284 | on_each_cpu(do_flush_tlb_all, NULL, 1, 1); |
285 | } | 285 | } |
286 | |||
287 | /* | ||
288 | * this function sends a 'reschedule' IPI to another CPU. | ||
289 | * it goes straight through and wastes no time serializing | ||
290 | * anything. Worst case is that we lose a reschedule ... | ||
291 | */ | ||
292 | |||
293 | static void native_smp_send_reschedule(int cpu) | ||
294 | { | ||
295 | WARN_ON(cpu_is_offline(cpu)); | ||
296 | send_IPI_mask(cpumask_of_cpu(cpu), RESCHEDULE_VECTOR); | ||
297 | } | ||
298 | |||
299 | /* | ||
300 | * Structure and data for smp_call_function(). This is designed to minimise | ||
301 | * static memory requirements. It also looks cleaner. | ||
302 | */ | ||
303 | static DEFINE_SPINLOCK(call_lock); | ||
304 | |||
305 | struct call_data_struct { | ||
306 | void (*func) (void *info); | ||
307 | void *info; | ||
308 | atomic_t started; | ||
309 | atomic_t finished; | ||
310 | int wait; | ||
311 | }; | ||
312 | |||
313 | static struct call_data_struct * call_data; | ||
314 | |||
315 | void lock_ipi_call_lock(void) | ||
316 | { | ||
317 | spin_lock_irq(&call_lock); | ||
318 | } | ||
319 | |||
320 | void unlock_ipi_call_lock(void) | ||
321 | { | ||
322 | spin_unlock_irq(&call_lock); | ||
323 | } | ||
324 | |||
325 | static void __smp_call_function(void (*func) (void *info), void *info, | ||
326 | int nonatomic, int wait) | ||
327 | { | ||
328 | struct call_data_struct data; | ||
329 | int cpus = num_online_cpus() - 1; | ||
330 | |||
331 | if (!cpus) | ||
332 | return; | ||
333 | |||
334 | data.func = func; | ||
335 | data.info = info; | ||
336 | atomic_set(&data.started, 0); | ||
337 | data.wait = wait; | ||
338 | if (wait) | ||
339 | atomic_set(&data.finished, 0); | ||
340 | |||
341 | call_data = &data; | ||
342 | mb(); | ||
343 | |||
344 | /* Send a message to all other CPUs and wait for them to respond */ | ||
345 | send_IPI_allbutself(CALL_FUNCTION_VECTOR); | ||
346 | |||
347 | /* Wait for response */ | ||
348 | while (atomic_read(&data.started) != cpus) | ||
349 | cpu_relax(); | ||
350 | |||
351 | if (wait) | ||
352 | while (atomic_read(&data.finished) != cpus) | ||
353 | cpu_relax(); | ||
354 | } | ||
355 | |||
356 | |||
357 | int native_smp_call_function_mask(cpumask_t mask, | ||
358 | void (*func)(void *), void *info, | ||
359 | int wait) | ||
360 | { | ||
361 | struct call_data_struct data; | ||
362 | cpumask_t allbutself; | ||
363 | int cpus; | ||
364 | |||
365 | /* Can deadlock when called with interrupts disabled */ | ||
366 | WARN_ON(irqs_disabled()); | ||
367 | |||
368 | /* Holding any lock stops cpus from going down. */ | ||
369 | spin_lock(&call_lock); | ||
370 | |||
371 | allbutself = cpu_online_map; | ||
372 | cpu_clear(smp_processor_id(), allbutself); | ||
373 | |||
374 | cpus_and(mask, mask, allbutself); | ||
375 | cpus = cpus_weight(mask); | ||
376 | |||
377 | if (!cpus) { | ||
378 | spin_unlock(&call_lock); | ||
379 | return 0; | ||
380 | } | ||
381 | |||
382 | data.func = func; | ||
383 | data.info = info; | ||
384 | atomic_set(&data.started, 0); | ||
385 | data.wait = wait; | ||
386 | if (wait) | ||
387 | atomic_set(&data.finished, 0); | ||
388 | |||
389 | call_data = &data; | ||
390 | wmb(); | ||
391 | |||
392 | /* Send a message to other CPUs */ | ||
393 | if (cpus_equal(mask, allbutself)) | ||
394 | send_IPI_allbutself(CALL_FUNCTION_VECTOR); | ||
395 | else | ||
396 | send_IPI_mask(mask, CALL_FUNCTION_VECTOR); | ||
397 | |||
398 | /* Wait for response */ | ||
399 | while (atomic_read(&data.started) != cpus) | ||
400 | cpu_relax(); | ||
401 | |||
402 | if (wait) | ||
403 | while (atomic_read(&data.finished) != cpus) | ||
404 | cpu_relax(); | ||
405 | |||
406 | spin_unlock(&call_lock); | ||
407 | |||
408 | return 0; | ||
409 | } | ||
410 | |||
411 | static void stop_this_cpu(void *dummy) | ||
412 | { | ||
413 | local_irq_disable(); | ||
414 | /* | ||
415 | * Remove this CPU: | ||
416 | */ | ||
417 | cpu_clear(smp_processor_id(), cpu_online_map); | ||
418 | disable_local_APIC(); | ||
419 | if (hlt_works(smp_processor_id())) | ||
420 | for (;;) halt(); | ||
421 | for (;;); | ||
422 | } | ||
423 | |||
424 | void native_smp_send_stop(void) | ||
425 | { | ||
426 | int nolock; | ||
427 | unsigned long flags; | ||
428 | |||
429 | if (reboot_force) | ||
430 | return; | ||
431 | |||
432 | /* Don't deadlock on the call lock in panic */ | ||
433 | nolock = !spin_trylock(&call_lock); | ||
434 | local_irq_save(flags); | ||
435 | __smp_call_function(stop_this_cpu, NULL, 0, 0); | ||
436 | if (!nolock) | ||
437 | spin_unlock(&call_lock); | ||
438 | disable_local_APIC(); | ||
439 | local_irq_restore(flags); | ||
440 | } | ||
441 | |||
442 | /* | ||
443 | * Reschedule call back. Nothing to do, | ||
444 | * all the work is done automatically when | ||
445 | * we return from the interrupt. | ||
446 | */ | ||
447 | asmlinkage void smp_reschedule_interrupt(void) | ||
448 | { | ||
449 | ack_APIC_irq(); | ||
450 | add_pda(irq_resched_count, 1); | ||
451 | } | ||
452 | |||
453 | asmlinkage void smp_call_function_interrupt(void) | ||
454 | { | ||
455 | void (*func) (void *info) = call_data->func; | ||
456 | void *info = call_data->info; | ||
457 | int wait = call_data->wait; | ||
458 | |||
459 | ack_APIC_irq(); | ||
460 | /* | ||
461 | * Notify initiating CPU that I've grabbed the data and am | ||
462 | * about to execute the function | ||
463 | */ | ||
464 | mb(); | ||
465 | atomic_inc(&call_data->started); | ||
466 | /* | ||
467 | * At this point the info structure may be out of scope unless wait==1 | ||
468 | */ | ||
469 | exit_idle(); | ||
470 | irq_enter(); | ||
471 | (*func)(info); | ||
472 | add_pda(irq_call_count, 1); | ||
473 | irq_exit(); | ||
474 | if (wait) { | ||
475 | mb(); | ||
476 | atomic_inc(&call_data->finished); | ||
477 | } | ||
478 | } | ||
479 | |||
480 | struct smp_ops smp_ops = { | ||
481 | .smp_prepare_boot_cpu = native_smp_prepare_boot_cpu, | ||
482 | .smp_prepare_cpus = native_smp_prepare_cpus, | ||
483 | .smp_cpus_done = native_smp_cpus_done, | ||
484 | |||
485 | .smp_send_stop = native_smp_send_stop, | ||
486 | .smp_send_reschedule = native_smp_send_reschedule, | ||
487 | .smp_call_function_mask = native_smp_call_function_mask, | ||
488 | .cpu_up = native_cpu_up, | ||
489 | }; | ||
490 | EXPORT_SYMBOL_GPL(smp_ops); | ||