aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/smp.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/kernel/smp.c')
-rw-r--r--arch/x86/kernel/smp.c253
1 files changed, 253 insertions, 0 deletions
diff --git a/arch/x86/kernel/smp.c b/arch/x86/kernel/smp.c
new file mode 100644
index 000000000000..b662300a88f3
--- /dev/null
+++ b/arch/x86/kernel/smp.c
@@ -0,0 +1,253 @@
1#include <linux/init.h>
2
3#include <linux/mm.h>
4#include <linux/delay.h>
5#include <linux/spinlock.h>
6#include <linux/kernel_stat.h>
7#include <linux/mc146818rtc.h>
8#include <linux/cache.h>
9#include <linux/interrupt.h>
10#include <linux/cpu.h>
11
12#include <asm/mtrr.h>
13#include <asm/tlbflush.h>
14#include <asm/mmu_context.h>
15#include <asm/proto.h>
16#ifdef CONFIG_X86_32
17#include <mach_apic.h>
18#include <mach_ipi.h>
19#else
20#include <asm/mach_apic.h>
21#endif
22
23/*
24 * this function sends a 'reschedule' IPI to another CPU.
25 * it goes straight through and wastes no time serializing
26 * anything. Worst case is that we lose a reschedule ...
27 */
28static void native_smp_send_reschedule(int cpu)
29{
30 WARN_ON(cpu_is_offline(cpu));
31 send_IPI_mask(cpumask_of_cpu(cpu), RESCHEDULE_VECTOR);
32}
33
34/*
35 * Structure and data for smp_call_function(). This is designed to minimise
36 * static memory requirements. It also looks cleaner.
37 */
38static DEFINE_SPINLOCK(call_lock);
39
40struct call_data_struct {
41 void (*func) (void *info);
42 void *info;
43 atomic_t started;
44 atomic_t finished;
45 int wait;
46};
47
48void lock_ipi_call_lock(void)
49{
50 spin_lock_irq(&call_lock);
51}
52
53void unlock_ipi_call_lock(void)
54{
55 spin_unlock_irq(&call_lock);
56}
57
58static struct call_data_struct *call_data;
59
60static void __smp_call_function(void (*func) (void *info), void *info,
61 int nonatomic, int wait)
62{
63 struct call_data_struct data;
64 int cpus = num_online_cpus() - 1;
65
66 if (!cpus)
67 return;
68
69 data.func = func;
70 data.info = info;
71 atomic_set(&data.started, 0);
72 data.wait = wait;
73 if (wait)
74 atomic_set(&data.finished, 0);
75
76 call_data = &data;
77 mb();
78
79 /* Send a message to all other CPUs and wait for them to respond */
80 send_IPI_allbutself(CALL_FUNCTION_VECTOR);
81
82 /* Wait for response */
83 while (atomic_read(&data.started) != cpus)
84 cpu_relax();
85
86 if (wait)
87 while (atomic_read(&data.finished) != cpus)
88 cpu_relax();
89}
90
91
92/**
93 * smp_call_function_mask(): Run a function on a set of other CPUs.
94 * @mask: The set of cpus to run on. Must not include the current cpu.
95 * @func: The function to run. This must be fast and non-blocking.
96 * @info: An arbitrary pointer to pass to the function.
97 * @wait: If true, wait (atomically) until function has completed on other CPUs.
98 *
99 * Returns 0 on success, else a negative status code.
100 *
101 * If @wait is true, then returns once @func has returned; otherwise
102 * it returns just before the target cpu calls @func.
103 *
104 * You must not call this function with disabled interrupts or from a
105 * hardware interrupt handler or from a bottom half handler.
106 */
107static int
108native_smp_call_function_mask(cpumask_t mask,
109 void (*func)(void *), void *info,
110 int wait)
111{
112 struct call_data_struct data;
113 cpumask_t allbutself;
114 int cpus;
115
116 /* Can deadlock when called with interrupts disabled */
117 WARN_ON(irqs_disabled());
118
119 /* Holding any lock stops cpus from going down. */
120 spin_lock(&call_lock);
121
122 allbutself = cpu_online_map;
123 cpu_clear(smp_processor_id(), allbutself);
124
125 cpus_and(mask, mask, allbutself);
126 cpus = cpus_weight(mask);
127
128 if (!cpus) {
129 spin_unlock(&call_lock);
130 return 0;
131 }
132
133 data.func = func;
134 data.info = info;
135 atomic_set(&data.started, 0);
136 data.wait = wait;
137 if (wait)
138 atomic_set(&data.finished, 0);
139
140 call_data = &data;
141 wmb();
142
143 /* Send a message to other CPUs */
144 if (cpus_equal(mask, allbutself))
145 send_IPI_allbutself(CALL_FUNCTION_VECTOR);
146 else
147 send_IPI_mask(mask, CALL_FUNCTION_VECTOR);
148
149 /* Wait for response */
150 while (atomic_read(&data.started) != cpus)
151 cpu_relax();
152
153 if (wait)
154 while (atomic_read(&data.finished) != cpus)
155 cpu_relax();
156 spin_unlock(&call_lock);
157
158 return 0;
159}
160
161static void stop_this_cpu(void *dummy)
162{
163 local_irq_disable();
164 /*
165 * Remove this CPU:
166 */
167 cpu_clear(smp_processor_id(), cpu_online_map);
168 disable_local_APIC();
169 if (hlt_works(smp_processor_id()))
170 for (;;) halt();
171 for (;;);
172}
173
174/*
175 * this function calls the 'stop' function on all other CPUs in the system.
176 */
177
178static void native_smp_send_stop(void)
179{
180 int nolock;
181 unsigned long flags;
182
183 if (reboot_force)
184 return;
185
186 /* Don't deadlock on the call lock in panic */
187 nolock = !spin_trylock(&call_lock);
188 local_irq_save(flags);
189 __smp_call_function(stop_this_cpu, NULL, 0, 0);
190 if (!nolock)
191 spin_unlock(&call_lock);
192 disable_local_APIC();
193 local_irq_restore(flags);
194}
195
196/*
197 * Reschedule call back. Nothing to do,
198 * all the work is done automatically when
199 * we return from the interrupt.
200 */
201void smp_reschedule_interrupt(struct pt_regs *regs)
202{
203 ack_APIC_irq();
204#ifdef CONFIG_X86_32
205 __get_cpu_var(irq_stat).irq_resched_count++;
206#else
207 add_pda(irq_resched_count, 1);
208#endif
209}
210
211void smp_call_function_interrupt(struct pt_regs *regs)
212{
213 void (*func) (void *info) = call_data->func;
214 void *info = call_data->info;
215 int wait = call_data->wait;
216
217 ack_APIC_irq();
218 /*
219 * Notify initiating CPU that I've grabbed the data and am
220 * about to execute the function
221 */
222 mb();
223 atomic_inc(&call_data->started);
224 /*
225 * At this point the info structure may be out of scope unless wait==1
226 */
227 irq_enter();
228 (*func)(info);
229#ifdef CONFIG_X86_32
230 __get_cpu_var(irq_stat).irq_call_count++;
231#else
232 add_pda(irq_call_count, 1);
233#endif
234 irq_exit();
235
236 if (wait) {
237 mb();
238 atomic_inc(&call_data->finished);
239 }
240}
241
242struct smp_ops smp_ops = {
243 .smp_prepare_boot_cpu = native_smp_prepare_boot_cpu,
244 .smp_prepare_cpus = native_smp_prepare_cpus,
245 .cpu_up = native_cpu_up,
246 .smp_cpus_done = native_smp_cpus_done,
247
248 .smp_send_stop = native_smp_send_stop,
249 .smp_send_reschedule = native_smp_send_reschedule,
250 .smp_call_function_mask = native_smp_call_function_mask,
251};
252EXPORT_SYMBOL_GPL(smp_ops);
253