diff options
author | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-04-16 18:20:36 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-04-16 18:20:36 -0400 |
commit | 1da177e4c3f41524e886b7f1b8a0c1fc7321cac2 (patch) | |
tree | 0bba044c4ce775e45a88a51686b5d9f90697ea9d /arch/ia64/kernel/smp.c |
Linux-2.6.12-rc2v2.6.12-rc2
Initial git repository build. I'm not bothering with the full history,
even though we have it. We can create a separate "historical" git
archive of that later if we want to, and in the meantime it's about
3.2GB when imported into git - space that would just make the early
git days unnecessarily complicated, when we don't have a lot of good
infrastructure for it.
Let it rip!
Diffstat (limited to 'arch/ia64/kernel/smp.c')
-rw-r--r-- | arch/ia64/kernel/smp.c | 376 |
1 files changed, 376 insertions, 0 deletions
diff --git a/arch/ia64/kernel/smp.c b/arch/ia64/kernel/smp.c new file mode 100644 index 000000000000..953095e2ce15 --- /dev/null +++ b/arch/ia64/kernel/smp.c | |||
@@ -0,0 +1,376 @@ | |||
1 | /* | ||
2 | * SMP Support | ||
3 | * | ||
4 | * Copyright (C) 1999 Walt Drummond <drummond@valinux.com> | ||
5 | * Copyright (C) 1999, 2001, 2003 David Mosberger-Tang <davidm@hpl.hp.com> | ||
6 | * | ||
7 | * Lots of stuff stolen from arch/alpha/kernel/smp.c | ||
8 | * | ||
9 | * 01/05/16 Rohit Seth <rohit.seth@intel.com> IA64-SMP functions. Reorganized | ||
10 | * the existing code (on the lines of x86 port). | ||
11 | * 00/09/11 David Mosberger <davidm@hpl.hp.com> Do loops_per_jiffy | ||
12 | * calibration on each CPU. | ||
13 | * 00/08/23 Asit Mallick <asit.k.mallick@intel.com> fixed logical processor id | ||
14 | * 00/03/31 Rohit Seth <rohit.seth@intel.com> Fixes for Bootstrap Processor | ||
15 | * & cpu_online_map now gets done here (instead of setup.c) | ||
16 | * 99/10/05 davidm Update to bring it in sync with new command-line processing | ||
17 | * scheme. | ||
18 | * 10/13/00 Goutham Rao <goutham.rao@intel.com> Updated smp_call_function and | ||
19 | * smp_call_function_single to resend IPI on timeouts | ||
20 | */ | ||
21 | #include <linux/module.h> | ||
22 | #include <linux/kernel.h> | ||
23 | #include <linux/sched.h> | ||
24 | #include <linux/init.h> | ||
25 | #include <linux/interrupt.h> | ||
26 | #include <linux/smp.h> | ||
27 | #include <linux/kernel_stat.h> | ||
28 | #include <linux/mm.h> | ||
29 | #include <linux/cache.h> | ||
30 | #include <linux/delay.h> | ||
31 | #include <linux/efi.h> | ||
32 | #include <linux/bitops.h> | ||
33 | |||
34 | #include <asm/atomic.h> | ||
35 | #include <asm/current.h> | ||
36 | #include <asm/delay.h> | ||
37 | #include <asm/machvec.h> | ||
38 | #include <asm/io.h> | ||
39 | #include <asm/irq.h> | ||
40 | #include <asm/page.h> | ||
41 | #include <asm/pgalloc.h> | ||
42 | #include <asm/pgtable.h> | ||
43 | #include <asm/processor.h> | ||
44 | #include <asm/ptrace.h> | ||
45 | #include <asm/sal.h> | ||
46 | #include <asm/system.h> | ||
47 | #include <asm/tlbflush.h> | ||
48 | #include <asm/unistd.h> | ||
49 | #include <asm/mca.h> | ||
50 | |||
51 | /* | ||
52 | * Structure and data for smp_call_function(). This is designed to minimise static memory | ||
53 | * requirements. It also looks cleaner. | ||
54 | */ | ||
55 | static __cacheline_aligned DEFINE_SPINLOCK(call_lock); | ||
56 | |||
57 | struct call_data_struct { | ||
58 | void (*func) (void *info); | ||
59 | void *info; | ||
60 | long wait; | ||
61 | atomic_t started; | ||
62 | atomic_t finished; | ||
63 | }; | ||
64 | |||
65 | static volatile struct call_data_struct *call_data; | ||
66 | |||
67 | #define IPI_CALL_FUNC 0 | ||
68 | #define IPI_CPU_STOP 1 | ||
69 | |||
70 | /* This needs to be cacheline aligned because it is written to by *other* CPUs. */ | ||
71 | static DEFINE_PER_CPU(u64, ipi_operation) ____cacheline_aligned; | ||
72 | |||
73 | extern void cpu_halt (void); | ||
74 | |||
75 | void | ||
76 | lock_ipi_calllock(void) | ||
77 | { | ||
78 | spin_lock_irq(&call_lock); | ||
79 | } | ||
80 | |||
81 | void | ||
82 | unlock_ipi_calllock(void) | ||
83 | { | ||
84 | spin_unlock_irq(&call_lock); | ||
85 | } | ||
86 | |||
87 | static void | ||
88 | stop_this_cpu (void) | ||
89 | { | ||
90 | /* | ||
91 | * Remove this CPU: | ||
92 | */ | ||
93 | cpu_clear(smp_processor_id(), cpu_online_map); | ||
94 | max_xtp(); | ||
95 | local_irq_disable(); | ||
96 | cpu_halt(); | ||
97 | } | ||
98 | |||
99 | void | ||
100 | cpu_die(void) | ||
101 | { | ||
102 | max_xtp(); | ||
103 | local_irq_disable(); | ||
104 | cpu_halt(); | ||
105 | /* Should never be here */ | ||
106 | BUG(); | ||
107 | for (;;); | ||
108 | } | ||
109 | |||
110 | irqreturn_t | ||
111 | handle_IPI (int irq, void *dev_id, struct pt_regs *regs) | ||
112 | { | ||
113 | int this_cpu = get_cpu(); | ||
114 | unsigned long *pending_ipis = &__ia64_per_cpu_var(ipi_operation); | ||
115 | unsigned long ops; | ||
116 | |||
117 | mb(); /* Order interrupt and bit testing. */ | ||
118 | while ((ops = xchg(pending_ipis, 0)) != 0) { | ||
119 | mb(); /* Order bit clearing and data access. */ | ||
120 | do { | ||
121 | unsigned long which; | ||
122 | |||
123 | which = ffz(~ops); | ||
124 | ops &= ~(1 << which); | ||
125 | |||
126 | switch (which) { | ||
127 | case IPI_CALL_FUNC: | ||
128 | { | ||
129 | struct call_data_struct *data; | ||
130 | void (*func)(void *info); | ||
131 | void *info; | ||
132 | int wait; | ||
133 | |||
134 | /* release the 'pointer lock' */ | ||
135 | data = (struct call_data_struct *) call_data; | ||
136 | func = data->func; | ||
137 | info = data->info; | ||
138 | wait = data->wait; | ||
139 | |||
140 | mb(); | ||
141 | atomic_inc(&data->started); | ||
142 | /* | ||
143 | * At this point the structure may be gone unless | ||
144 | * wait is true. | ||
145 | */ | ||
146 | (*func)(info); | ||
147 | |||
148 | /* Notify the sending CPU that the task is done. */ | ||
149 | mb(); | ||
150 | if (wait) | ||
151 | atomic_inc(&data->finished); | ||
152 | } | ||
153 | break; | ||
154 | |||
155 | case IPI_CPU_STOP: | ||
156 | stop_this_cpu(); | ||
157 | break; | ||
158 | |||
159 | default: | ||
160 | printk(KERN_CRIT "Unknown IPI on CPU %d: %lu\n", this_cpu, which); | ||
161 | break; | ||
162 | } | ||
163 | } while (ops); | ||
164 | mb(); /* Order data access and bit testing. */ | ||
165 | } | ||
166 | put_cpu(); | ||
167 | return IRQ_HANDLED; | ||
168 | } | ||
169 | |||
170 | /* | ||
171 | * Called with preeemption disabled. | ||
172 | */ | ||
173 | static inline void | ||
174 | send_IPI_single (int dest_cpu, int op) | ||
175 | { | ||
176 | set_bit(op, &per_cpu(ipi_operation, dest_cpu)); | ||
177 | platform_send_ipi(dest_cpu, IA64_IPI_VECTOR, IA64_IPI_DM_INT, 0); | ||
178 | } | ||
179 | |||
180 | /* | ||
181 | * Called with preeemption disabled. | ||
182 | */ | ||
183 | static inline void | ||
184 | send_IPI_allbutself (int op) | ||
185 | { | ||
186 | unsigned int i; | ||
187 | |||
188 | for (i = 0; i < NR_CPUS; i++) { | ||
189 | if (cpu_online(i) && i != smp_processor_id()) | ||
190 | send_IPI_single(i, op); | ||
191 | } | ||
192 | } | ||
193 | |||
194 | /* | ||
195 | * Called with preeemption disabled. | ||
196 | */ | ||
197 | static inline void | ||
198 | send_IPI_all (int op) | ||
199 | { | ||
200 | int i; | ||
201 | |||
202 | for (i = 0; i < NR_CPUS; i++) | ||
203 | if (cpu_online(i)) | ||
204 | send_IPI_single(i, op); | ||
205 | } | ||
206 | |||
207 | /* | ||
208 | * Called with preeemption disabled. | ||
209 | */ | ||
210 | static inline void | ||
211 | send_IPI_self (int op) | ||
212 | { | ||
213 | send_IPI_single(smp_processor_id(), op); | ||
214 | } | ||
215 | |||
216 | /* | ||
217 | * Called with preeemption disabled. | ||
218 | */ | ||
219 | void | ||
220 | smp_send_reschedule (int cpu) | ||
221 | { | ||
222 | platform_send_ipi(cpu, IA64_IPI_RESCHEDULE, IA64_IPI_DM_INT, 0); | ||
223 | } | ||
224 | |||
225 | void | ||
226 | smp_flush_tlb_all (void) | ||
227 | { | ||
228 | on_each_cpu((void (*)(void *))local_flush_tlb_all, NULL, 1, 1); | ||
229 | } | ||
230 | |||
231 | void | ||
232 | smp_flush_tlb_mm (struct mm_struct *mm) | ||
233 | { | ||
234 | /* this happens for the common case of a single-threaded fork(): */ | ||
235 | if (likely(mm == current->active_mm && atomic_read(&mm->mm_users) == 1)) | ||
236 | { | ||
237 | local_finish_flush_tlb_mm(mm); | ||
238 | return; | ||
239 | } | ||
240 | |||
241 | /* | ||
242 | * We could optimize this further by using mm->cpu_vm_mask to track which CPUs | ||
243 | * have been running in the address space. It's not clear that this is worth the | ||
244 | * trouble though: to avoid races, we have to raise the IPI on the target CPU | ||
245 | * anyhow, and once a CPU is interrupted, the cost of local_flush_tlb_all() is | ||
246 | * rather trivial. | ||
247 | */ | ||
248 | on_each_cpu((void (*)(void *))local_finish_flush_tlb_mm, mm, 1, 1); | ||
249 | } | ||
250 | |||
251 | /* | ||
252 | * Run a function on another CPU | ||
253 | * <func> The function to run. This must be fast and non-blocking. | ||
254 | * <info> An arbitrary pointer to pass to the function. | ||
255 | * <nonatomic> Currently unused. | ||
256 | * <wait> If true, wait until function has completed on other CPUs. | ||
257 | * [RETURNS] 0 on success, else a negative status code. | ||
258 | * | ||
259 | * Does not return until the remote CPU is nearly ready to execute <func> | ||
260 | * or is or has executed. | ||
261 | */ | ||
262 | |||
263 | int | ||
264 | smp_call_function_single (int cpuid, void (*func) (void *info), void *info, int nonatomic, | ||
265 | int wait) | ||
266 | { | ||
267 | struct call_data_struct data; | ||
268 | int cpus = 1; | ||
269 | int me = get_cpu(); /* prevent preemption and reschedule on another processor */ | ||
270 | |||
271 | if (cpuid == me) { | ||
272 | printk("%s: trying to call self\n", __FUNCTION__); | ||
273 | put_cpu(); | ||
274 | return -EBUSY; | ||
275 | } | ||
276 | |||
277 | data.func = func; | ||
278 | data.info = info; | ||
279 | atomic_set(&data.started, 0); | ||
280 | data.wait = wait; | ||
281 | if (wait) | ||
282 | atomic_set(&data.finished, 0); | ||
283 | |||
284 | spin_lock_bh(&call_lock); | ||
285 | |||
286 | call_data = &data; | ||
287 | mb(); /* ensure store to call_data precedes setting of IPI_CALL_FUNC */ | ||
288 | send_IPI_single(cpuid, IPI_CALL_FUNC); | ||
289 | |||
290 | /* Wait for response */ | ||
291 | while (atomic_read(&data.started) != cpus) | ||
292 | cpu_relax(); | ||
293 | |||
294 | if (wait) | ||
295 | while (atomic_read(&data.finished) != cpus) | ||
296 | cpu_relax(); | ||
297 | call_data = NULL; | ||
298 | |||
299 | spin_unlock_bh(&call_lock); | ||
300 | put_cpu(); | ||
301 | return 0; | ||
302 | } | ||
303 | EXPORT_SYMBOL(smp_call_function_single); | ||
304 | |||
305 | /* | ||
306 | * this function sends a 'generic call function' IPI to all other CPUs | ||
307 | * in the system. | ||
308 | */ | ||
309 | |||
310 | /* | ||
311 | * [SUMMARY] Run a function on all other CPUs. | ||
312 | * <func> The function to run. This must be fast and non-blocking. | ||
313 | * <info> An arbitrary pointer to pass to the function. | ||
314 | * <nonatomic> currently unused. | ||
315 | * <wait> If true, wait (atomically) until function has completed on other CPUs. | ||
316 | * [RETURNS] 0 on success, else a negative status code. | ||
317 | * | ||
318 | * Does not return until remote CPUs are nearly ready to execute <func> or are or have | ||
319 | * executed. | ||
320 | * | ||
321 | * You must not call this function with disabled interrupts or from a | ||
322 | * hardware interrupt handler or from a bottom half handler. | ||
323 | */ | ||
324 | int | ||
325 | smp_call_function (void (*func) (void *info), void *info, int nonatomic, int wait) | ||
326 | { | ||
327 | struct call_data_struct data; | ||
328 | int cpus = num_online_cpus()-1; | ||
329 | |||
330 | if (!cpus) | ||
331 | return 0; | ||
332 | |||
333 | /* Can deadlock when called with interrupts disabled */ | ||
334 | WARN_ON(irqs_disabled()); | ||
335 | |||
336 | data.func = func; | ||
337 | data.info = info; | ||
338 | atomic_set(&data.started, 0); | ||
339 | data.wait = wait; | ||
340 | if (wait) | ||
341 | atomic_set(&data.finished, 0); | ||
342 | |||
343 | spin_lock(&call_lock); | ||
344 | |||
345 | call_data = &data; | ||
346 | mb(); /* ensure store to call_data precedes setting of IPI_CALL_FUNC */ | ||
347 | send_IPI_allbutself(IPI_CALL_FUNC); | ||
348 | |||
349 | /* Wait for response */ | ||
350 | while (atomic_read(&data.started) != cpus) | ||
351 | cpu_relax(); | ||
352 | |||
353 | if (wait) | ||
354 | while (atomic_read(&data.finished) != cpus) | ||
355 | cpu_relax(); | ||
356 | call_data = NULL; | ||
357 | |||
358 | spin_unlock(&call_lock); | ||
359 | return 0; | ||
360 | } | ||
361 | EXPORT_SYMBOL(smp_call_function); | ||
362 | |||
363 | /* | ||
364 | * this function calls the 'stop' function on all other CPUs in the system. | ||
365 | */ | ||
366 | void | ||
367 | smp_send_stop (void) | ||
368 | { | ||
369 | send_IPI_allbutself(IPI_CPU_STOP); | ||
370 | } | ||
371 | |||
372 | int __init | ||
373 | setup_profiling_timer (unsigned int multiplier) | ||
374 | { | ||
375 | return -EINVAL; | ||
376 | } | ||