diff options
author | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-04-16 18:20:36 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-04-16 18:20:36 -0400 |
commit | 1da177e4c3f41524e886b7f1b8a0c1fc7321cac2 (patch) | |
tree | 0bba044c4ce775e45a88a51686b5d9f90697ea9d /arch/m32r/kernel/smp.c |
Linux-2.6.12-rc2v2.6.12-rc2
Initial git repository build. I'm not bothering with the full history,
even though we have it. We can create a separate "historical" git
archive of that later if we want to, and in the meantime it's about
3.2GB when imported into git - space that would just make the early
git days unnecessarily complicated, when we don't have a lot of good
infrastructure for it.
Let it rip!
Diffstat (limited to 'arch/m32r/kernel/smp.c')
-rw-r--r-- | arch/m32r/kernel/smp.c | 965 |
1 files changed, 965 insertions, 0 deletions
diff --git a/arch/m32r/kernel/smp.c b/arch/m32r/kernel/smp.c new file mode 100644 index 000000000000..48b187f2d2b3 --- /dev/null +++ b/arch/m32r/kernel/smp.c | |||
@@ -0,0 +1,965 @@ | |||
1 | /* | ||
2 | * linux/arch/m32r/kernel/smp.c | ||
3 | * | ||
4 | * M32R SMP support routines. | ||
5 | * | ||
6 | * Copyright (c) 2001, 2002 Hitoshi Yamamoto | ||
7 | * | ||
8 | * Taken from i386 version. | ||
9 | * (c) 1995 Alan Cox, Building #3 <alan@redhat.com> | ||
10 | * (c) 1998-99, 2000 Ingo Molnar <mingo@redhat.com> | ||
11 | * | ||
12 | * This code is released under the GNU General Public License version 2 or | ||
13 | * later. | ||
14 | */ | ||
15 | |||
16 | #undef DEBUG_SMP | ||
17 | |||
18 | #include <linux/irq.h> | ||
19 | #include <linux/interrupt.h> | ||
20 | #include <linux/spinlock.h> | ||
21 | #include <linux/mm.h> | ||
22 | #include <linux/smp.h> | ||
23 | #include <linux/profile.h> | ||
24 | #include <linux/cpu.h> | ||
25 | |||
26 | #include <asm/cacheflush.h> | ||
27 | #include <asm/pgalloc.h> | ||
28 | #include <asm/atomic.h> | ||
29 | #include <asm/io.h> | ||
30 | #include <asm/mmu_context.h> | ||
31 | #include <asm/m32r.h> | ||
32 | |||
33 | /*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/ | ||
34 | /* Data structures and variables */ | ||
35 | /*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/ | ||
36 | |||
37 | /* | ||
38 | * Structure and data for smp_call_function(). This is designed to minimise | ||
39 | * static memory requirements. It also looks cleaner. | ||
40 | */ | ||
41 | static DEFINE_SPINLOCK(call_lock); | ||
42 | |||
43 | struct call_data_struct { | ||
44 | void (*func) (void *info); | ||
45 | void *info; | ||
46 | atomic_t started; | ||
47 | atomic_t finished; | ||
48 | int wait; | ||
49 | } __attribute__ ((__aligned__(SMP_CACHE_BYTES))); | ||
50 | |||
51 | static struct call_data_struct *call_data; | ||
52 | |||
53 | /* | ||
54 | * For flush_cache_all() | ||
55 | */ | ||
56 | static DEFINE_SPINLOCK(flushcache_lock); | ||
57 | static volatile unsigned long flushcache_cpumask = 0; | ||
58 | |||
59 | /* | ||
60 | * For flush_tlb_others() | ||
61 | */ | ||
62 | static volatile cpumask_t flush_cpumask; | ||
63 | static struct mm_struct *flush_mm; | ||
64 | static struct vm_area_struct *flush_vma; | ||
65 | static volatile unsigned long flush_va; | ||
66 | static DEFINE_SPINLOCK(tlbstate_lock); | ||
67 | #define FLUSH_ALL 0xffffffff | ||
68 | |||
69 | DECLARE_PER_CPU(int, prof_multiplier); | ||
70 | DECLARE_PER_CPU(int, prof_old_multiplier); | ||
71 | DECLARE_PER_CPU(int, prof_counter); | ||
72 | |||
73 | extern spinlock_t ipi_lock[]; | ||
74 | |||
75 | /*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/ | ||
76 | /* Function Prototypes */ | ||
77 | /*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/ | ||
78 | |||
79 | void smp_send_reschedule(int); | ||
80 | void smp_reschedule_interrupt(void); | ||
81 | |||
82 | void smp_flush_cache_all(void); | ||
83 | void smp_flush_cache_all_interrupt(void); | ||
84 | |||
85 | void smp_flush_tlb_all(void); | ||
86 | static void flush_tlb_all_ipi(void *); | ||
87 | |||
88 | void smp_flush_tlb_mm(struct mm_struct *); | ||
89 | void smp_flush_tlb_range(struct vm_area_struct *, unsigned long, \ | ||
90 | unsigned long); | ||
91 | void smp_flush_tlb_page(struct vm_area_struct *, unsigned long); | ||
92 | static void flush_tlb_others(cpumask_t, struct mm_struct *, | ||
93 | struct vm_area_struct *, unsigned long); | ||
94 | void smp_invalidate_interrupt(void); | ||
95 | |||
96 | void smp_send_stop(void); | ||
97 | static void stop_this_cpu(void *); | ||
98 | |||
99 | int smp_call_function(void (*) (void *), void *, int, int); | ||
100 | void smp_call_function_interrupt(void); | ||
101 | |||
102 | void smp_send_timer(void); | ||
103 | void smp_ipi_timer_interrupt(struct pt_regs *); | ||
104 | void smp_local_timer_interrupt(struct pt_regs *); | ||
105 | |||
106 | void send_IPI_allbutself(int, int); | ||
107 | static void send_IPI_mask(cpumask_t, int, int); | ||
108 | unsigned long send_IPI_mask_phys(cpumask_t, int, int); | ||
109 | |||
110 | /*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/ | ||
111 | /* Rescheduling request Routines */ | ||
112 | /*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/ | ||
113 | |||
114 | /*==========================================================================* | ||
115 | * Name: smp_send_reschedule | ||
116 | * | ||
117 | * Description: This routine requests other CPU to execute rescheduling. | ||
118 | * 1.Send 'RESCHEDULE_IPI' to other CPU. | ||
119 | * Request other CPU to execute 'smp_reschedule_interrupt()'. | ||
120 | * | ||
121 | * Born on Date: 2002.02.05 | ||
122 | * | ||
123 | * Arguments: cpu_id - Target CPU ID | ||
124 | * | ||
125 | * Returns: void (cannot fail) | ||
126 | * | ||
127 | * Modification log: | ||
128 | * Date Who Description | ||
129 | * ---------- --- -------------------------------------------------------- | ||
130 | * | ||
131 | *==========================================================================*/ | ||
132 | void smp_send_reschedule(int cpu_id) | ||
133 | { | ||
134 | WARN_ON(cpu_is_offline(cpu_id)); | ||
135 | send_IPI_mask(cpumask_of_cpu(cpu_id), RESCHEDULE_IPI, 1); | ||
136 | } | ||
137 | |||
138 | /*==========================================================================* | ||
139 | * Name: smp_reschedule_interrupt | ||
140 | * | ||
141 | * Description: This routine executes on CPU which received | ||
142 | * 'RESCHEDULE_IPI'. | ||
143 | * Rescheduling is processed at the exit of interrupt | ||
144 | * operation. | ||
145 | * | ||
146 | * Born on Date: 2002.02.05 | ||
147 | * | ||
148 | * Arguments: NONE | ||
149 | * | ||
150 | * Returns: void (cannot fail) | ||
151 | * | ||
152 | * Modification log: | ||
153 | * Date Who Description | ||
154 | * ---------- --- -------------------------------------------------------- | ||
155 | * | ||
156 | *==========================================================================*/ | ||
157 | void smp_reschedule_interrupt(void) | ||
158 | { | ||
159 | /* nothing to do */ | ||
160 | } | ||
161 | |||
162 | /*==========================================================================* | ||
163 | * Name: smp_flush_cache_all | ||
164 | * | ||
165 | * Description: This routine sends a 'INVALIDATE_CACHE_IPI' to all other | ||
166 | * CPUs in the system. | ||
167 | * | ||
168 | * Born on Date: 2003-05-28 | ||
169 | * | ||
170 | * Arguments: NONE | ||
171 | * | ||
172 | * Returns: void (cannot fail) | ||
173 | * | ||
174 | * Modification log: | ||
175 | * Date Who Description | ||
176 | * ---------- --- -------------------------------------------------------- | ||
177 | * | ||
178 | *==========================================================================*/ | ||
179 | void smp_flush_cache_all(void) | ||
180 | { | ||
181 | cpumask_t cpumask; | ||
182 | unsigned long *mask; | ||
183 | |||
184 | preempt_disable(); | ||
185 | cpumask = cpu_online_map; | ||
186 | cpu_clear(smp_processor_id(), cpumask); | ||
187 | spin_lock(&flushcache_lock); | ||
188 | mask=cpus_addr(cpumask); | ||
189 | atomic_set_mask(*mask, (atomic_t *)&flushcache_cpumask); | ||
190 | send_IPI_mask(cpumask, INVALIDATE_CACHE_IPI, 0); | ||
191 | _flush_cache_copyback_all(); | ||
192 | while (flushcache_cpumask) | ||
193 | mb(); | ||
194 | spin_unlock(&flushcache_lock); | ||
195 | preempt_enable(); | ||
196 | } | ||
197 | |||
198 | void smp_flush_cache_all_interrupt(void) | ||
199 | { | ||
200 | _flush_cache_copyback_all(); | ||
201 | clear_bit(smp_processor_id(), &flushcache_cpumask); | ||
202 | } | ||
203 | |||
204 | /*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/ | ||
205 | /* TLB flush request Routins */ | ||
206 | /*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/ | ||
207 | |||
208 | /*==========================================================================* | ||
209 | * Name: smp_flush_tlb_all | ||
210 | * | ||
211 | * Description: This routine flushes all processes TLBs. | ||
212 | * 1.Request other CPU to execute 'flush_tlb_all_ipi()'. | ||
213 | * 2.Execute 'do_flush_tlb_all_local()'. | ||
214 | * | ||
215 | * Born on Date: 2002.02.05 | ||
216 | * | ||
217 | * Arguments: NONE | ||
218 | * | ||
219 | * Returns: void (cannot fail) | ||
220 | * | ||
221 | * Modification log: | ||
222 | * Date Who Description | ||
223 | * ---------- --- -------------------------------------------------------- | ||
224 | * | ||
225 | *==========================================================================*/ | ||
226 | void smp_flush_tlb_all(void) | ||
227 | { | ||
228 | unsigned long flags; | ||
229 | |||
230 | preempt_disable(); | ||
231 | local_irq_save(flags); | ||
232 | __flush_tlb_all(); | ||
233 | local_irq_restore(flags); | ||
234 | smp_call_function(flush_tlb_all_ipi, 0, 1, 1); | ||
235 | preempt_enable(); | ||
236 | } | ||
237 | |||
238 | /*==========================================================================* | ||
239 | * Name: flush_tlb_all_ipi | ||
240 | * | ||
241 | * Description: This routine flushes all local TLBs. | ||
242 | * 1.Execute 'do_flush_tlb_all_local()'. | ||
243 | * | ||
244 | * Born on Date: 2002.02.05 | ||
245 | * | ||
246 | * Arguments: *info - not used | ||
247 | * | ||
248 | * Returns: void (cannot fail) | ||
249 | * | ||
250 | * Modification log: | ||
251 | * Date Who Description | ||
252 | * ---------- --- -------------------------------------------------------- | ||
253 | * | ||
254 | *==========================================================================*/ | ||
255 | static void flush_tlb_all_ipi(void *info) | ||
256 | { | ||
257 | __flush_tlb_all(); | ||
258 | } | ||
259 | |||
260 | /*==========================================================================* | ||
261 | * Name: smp_flush_tlb_mm | ||
262 | * | ||
263 | * Description: This routine flushes the specified mm context TLB's. | ||
264 | * | ||
265 | * Born on Date: 2002.02.05 | ||
266 | * | ||
267 | * Arguments: *mm - a pointer to the mm struct for flush TLB | ||
268 | * | ||
269 | * Returns: void (cannot fail) | ||
270 | * | ||
271 | * Modification log: | ||
272 | * Date Who Description | ||
273 | * ---------- --- -------------------------------------------------------- | ||
274 | * | ||
275 | *==========================================================================*/ | ||
276 | void smp_flush_tlb_mm(struct mm_struct *mm) | ||
277 | { | ||
278 | int cpu_id = smp_processor_id(); | ||
279 | cpumask_t cpu_mask; | ||
280 | unsigned long *mmc = &mm->context[cpu_id]; | ||
281 | unsigned long flags; | ||
282 | |||
283 | preempt_disable(); | ||
284 | cpu_mask = mm->cpu_vm_mask; | ||
285 | cpu_clear(cpu_id, cpu_mask); | ||
286 | |||
287 | if (*mmc != NO_CONTEXT) { | ||
288 | local_irq_save(flags); | ||
289 | *mmc = NO_CONTEXT; | ||
290 | if (mm == current->mm) | ||
291 | activate_context(mm); | ||
292 | else | ||
293 | cpu_clear(cpu_id, mm->cpu_vm_mask); | ||
294 | local_irq_restore(flags); | ||
295 | } | ||
296 | if (!cpus_empty(cpu_mask)) | ||
297 | flush_tlb_others(cpu_mask, mm, NULL, FLUSH_ALL); | ||
298 | |||
299 | preempt_enable(); | ||
300 | } | ||
301 | |||
302 | /*==========================================================================* | ||
303 | * Name: smp_flush_tlb_range | ||
304 | * | ||
305 | * Description: This routine flushes a range of pages. | ||
306 | * | ||
307 | * Born on Date: 2002.02.05 | ||
308 | * | ||
309 | * Arguments: *mm - a pointer to the mm struct for flush TLB | ||
310 | * start - not used | ||
311 | * end - not used | ||
312 | * | ||
313 | * Returns: void (cannot fail) | ||
314 | * | ||
315 | * Modification log: | ||
316 | * Date Who Description | ||
317 | * ---------- --- -------------------------------------------------------- | ||
318 | * | ||
319 | *==========================================================================*/ | ||
320 | void smp_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, | ||
321 | unsigned long end) | ||
322 | { | ||
323 | smp_flush_tlb_mm(vma->vm_mm); | ||
324 | } | ||
325 | |||
326 | /*==========================================================================* | ||
327 | * Name: smp_flush_tlb_page | ||
328 | * | ||
329 | * Description: This routine flushes one page. | ||
330 | * | ||
331 | * Born on Date: 2002.02.05 | ||
332 | * | ||
333 | * Arguments: *vma - a pointer to the vma struct include va | ||
334 | * va - virtual address for flush TLB | ||
335 | * | ||
336 | * Returns: void (cannot fail) | ||
337 | * | ||
338 | * Modification log: | ||
339 | * Date Who Description | ||
340 | * ---------- --- -------------------------------------------------------- | ||
341 | * | ||
342 | *==========================================================================*/ | ||
343 | void smp_flush_tlb_page(struct vm_area_struct *vma, unsigned long va) | ||
344 | { | ||
345 | struct mm_struct *mm = vma->vm_mm; | ||
346 | int cpu_id = smp_processor_id(); | ||
347 | cpumask_t cpu_mask; | ||
348 | unsigned long *mmc = &mm->context[cpu_id]; | ||
349 | unsigned long flags; | ||
350 | |||
351 | preempt_disable(); | ||
352 | cpu_mask = mm->cpu_vm_mask; | ||
353 | cpu_clear(cpu_id, cpu_mask); | ||
354 | |||
355 | #ifdef DEBUG_SMP | ||
356 | if (!mm) | ||
357 | BUG(); | ||
358 | #endif | ||
359 | |||
360 | if (*mmc != NO_CONTEXT) { | ||
361 | local_irq_save(flags); | ||
362 | va &= PAGE_MASK; | ||
363 | va |= (*mmc & MMU_CONTEXT_ASID_MASK); | ||
364 | __flush_tlb_page(va); | ||
365 | local_irq_restore(flags); | ||
366 | } | ||
367 | if (!cpus_empty(cpu_mask)) | ||
368 | flush_tlb_others(cpu_mask, mm, vma, va); | ||
369 | |||
370 | preempt_enable(); | ||
371 | } | ||
372 | |||
373 | /*==========================================================================* | ||
374 | * Name: flush_tlb_others | ||
375 | * | ||
376 | * Description: This routine requests other CPU to execute flush TLB. | ||
377 | * 1.Setup parmeters. | ||
378 | * 2.Send 'INVALIDATE_TLB_IPI' to other CPU. | ||
379 | * Request other CPU to execute 'smp_invalidate_interrupt()'. | ||
380 | * 3.Wait for other CPUs operation finished. | ||
381 | * | ||
382 | * Born on Date: 2002.02.05 | ||
383 | * | ||
384 | * Arguments: cpumask - bitmap of target CPUs | ||
385 | * *mm - a pointer to the mm struct for flush TLB | ||
386 | * *vma - a pointer to the vma struct include va | ||
387 | * va - virtual address for flush TLB | ||
388 | * | ||
389 | * Returns: void (cannot fail) | ||
390 | * | ||
391 | * Modification log: | ||
392 | * Date Who Description | ||
393 | * ---------- --- -------------------------------------------------------- | ||
394 | * | ||
395 | *==========================================================================*/ | ||
396 | static void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm, | ||
397 | struct vm_area_struct *vma, unsigned long va) | ||
398 | { | ||
399 | unsigned long *mask; | ||
400 | #ifdef DEBUG_SMP | ||
401 | unsigned long flags; | ||
402 | __save_flags(flags); | ||
403 | if (!(flags & 0x0040)) /* Interrupt Disable NONONO */ | ||
404 | BUG(); | ||
405 | #endif /* DEBUG_SMP */ | ||
406 | |||
407 | /* | ||
408 | * A couple of (to be removed) sanity checks: | ||
409 | * | ||
410 | * - we do not send IPIs to not-yet booted CPUs. | ||
411 | * - current CPU must not be in mask | ||
412 | * - mask must exist :) | ||
413 | */ | ||
414 | BUG_ON(cpus_empty(cpumask)); | ||
415 | |||
416 | BUG_ON(cpu_isset(smp_processor_id(), cpumask)); | ||
417 | BUG_ON(!mm); | ||
418 | |||
419 | /* If a CPU which we ran on has gone down, OK. */ | ||
420 | cpus_and(cpumask, cpumask, cpu_online_map); | ||
421 | if (cpus_empty(cpumask)) | ||
422 | return; | ||
423 | |||
424 | /* | ||
425 | * i'm not happy about this global shared spinlock in the | ||
426 | * MM hot path, but we'll see how contended it is. | ||
427 | * Temporarily this turns IRQs off, so that lockups are | ||
428 | * detected by the NMI watchdog. | ||
429 | */ | ||
430 | spin_lock(&tlbstate_lock); | ||
431 | |||
432 | flush_mm = mm; | ||
433 | flush_vma = vma; | ||
434 | flush_va = va; | ||
435 | mask=cpus_addr(cpumask); | ||
436 | atomic_set_mask(*mask, (atomic_t *)&flush_cpumask); | ||
437 | |||
438 | /* | ||
439 | * We have to send the IPI only to | ||
440 | * CPUs affected. | ||
441 | */ | ||
442 | send_IPI_mask(cpumask, INVALIDATE_TLB_IPI, 0); | ||
443 | |||
444 | while (!cpus_empty(flush_cpumask)) { | ||
445 | /* nothing. lockup detection does not belong here */ | ||
446 | mb(); | ||
447 | } | ||
448 | |||
449 | flush_mm = NULL; | ||
450 | flush_vma = NULL; | ||
451 | flush_va = 0; | ||
452 | spin_unlock(&tlbstate_lock); | ||
453 | } | ||
454 | |||
455 | /*==========================================================================* | ||
456 | * Name: smp_invalidate_interrupt | ||
457 | * | ||
458 | * Description: This routine executes on CPU which received | ||
459 | * 'INVALIDATE_TLB_IPI'. | ||
460 | * 1.Flush local TLB. | ||
461 | * 2.Report flush TLB process was finished. | ||
462 | * | ||
463 | * Born on Date: 2002.02.05 | ||
464 | * | ||
465 | * Arguments: NONE | ||
466 | * | ||
467 | * Returns: void (cannot fail) | ||
468 | * | ||
469 | * Modification log: | ||
470 | * Date Who Description | ||
471 | * ---------- --- -------------------------------------------------------- | ||
472 | * | ||
473 | *==========================================================================*/ | ||
474 | void smp_invalidate_interrupt(void) | ||
475 | { | ||
476 | int cpu_id = smp_processor_id(); | ||
477 | unsigned long *mmc = &flush_mm->context[cpu_id]; | ||
478 | |||
479 | if (!cpu_isset(cpu_id, flush_cpumask)) | ||
480 | return; | ||
481 | |||
482 | if (flush_va == FLUSH_ALL) { | ||
483 | *mmc = NO_CONTEXT; | ||
484 | if (flush_mm == current->active_mm) | ||
485 | activate_context(flush_mm); | ||
486 | else | ||
487 | cpu_clear(cpu_id, flush_mm->cpu_vm_mask); | ||
488 | } else { | ||
489 | unsigned long va = flush_va; | ||
490 | |||
491 | if (*mmc != NO_CONTEXT) { | ||
492 | va &= PAGE_MASK; | ||
493 | va |= (*mmc & MMU_CONTEXT_ASID_MASK); | ||
494 | __flush_tlb_page(va); | ||
495 | } | ||
496 | } | ||
497 | cpu_clear(cpu_id, flush_cpumask); | ||
498 | } | ||
499 | |||
500 | /*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/ | ||
501 | /* Stop CPU request Routins */ | ||
502 | /*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/ | ||
503 | |||
504 | /*==========================================================================* | ||
505 | * Name: smp_send_stop | ||
506 | * | ||
507 | * Description: This routine requests stop all CPUs. | ||
508 | * 1.Request other CPU to execute 'stop_this_cpu()'. | ||
509 | * | ||
510 | * Born on Date: 2002.02.05 | ||
511 | * | ||
512 | * Arguments: NONE | ||
513 | * | ||
514 | * Returns: void (cannot fail) | ||
515 | * | ||
516 | * Modification log: | ||
517 | * Date Who Description | ||
518 | * ---------- --- -------------------------------------------------------- | ||
519 | * | ||
520 | *==========================================================================*/ | ||
521 | void smp_send_stop(void) | ||
522 | { | ||
523 | smp_call_function(stop_this_cpu, NULL, 1, 0); | ||
524 | } | ||
525 | |||
526 | /*==========================================================================* | ||
527 | * Name: stop_this_cpu | ||
528 | * | ||
529 | * Description: This routine halt CPU. | ||
530 | * | ||
531 | * Born on Date: 2002.02.05 | ||
532 | * | ||
533 | * Arguments: NONE | ||
534 | * | ||
535 | * Returns: void (cannot fail) | ||
536 | * | ||
537 | * Modification log: | ||
538 | * Date Who Description | ||
539 | * ---------- --- -------------------------------------------------------- | ||
540 | * | ||
541 | *==========================================================================*/ | ||
542 | static void stop_this_cpu(void *dummy) | ||
543 | { | ||
544 | int cpu_id = smp_processor_id(); | ||
545 | |||
546 | /* | ||
547 | * Remove this CPU: | ||
548 | */ | ||
549 | cpu_clear(cpu_id, cpu_online_map); | ||
550 | |||
551 | /* | ||
552 | * PSW IE = 1; | ||
553 | * IMASK = 0; | ||
554 | * goto SLEEP | ||
555 | */ | ||
556 | local_irq_disable(); | ||
557 | outl(0, M32R_ICU_IMASK_PORTL); | ||
558 | inl(M32R_ICU_IMASK_PORTL); /* dummy read */ | ||
559 | local_irq_enable(); | ||
560 | |||
561 | for ( ; ; ); | ||
562 | } | ||
563 | |||
564 | /*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/ | ||
565 | /* Call function Routins */ | ||
566 | /*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/ | ||
567 | |||
568 | /*==========================================================================* | ||
569 | * Name: smp_call_function | ||
570 | * | ||
571 | * Description: This routine sends a 'CALL_FUNCTION_IPI' to all other CPUs | ||
572 | * in the system. | ||
573 | * | ||
574 | * Born on Date: 2002.02.05 | ||
575 | * | ||
576 | * Arguments: *func - The function to run. This must be fast and | ||
577 | * non-blocking. | ||
578 | * *info - An arbitrary pointer to pass to the function. | ||
579 | * nonatomic - currently unused. | ||
580 | * wait - If true, wait (atomically) until function has | ||
581 | * completed on other CPUs. | ||
582 | * | ||
583 | * Returns: 0 on success, else a negative status code. Does not return | ||
584 | * until remote CPUs are nearly ready to execute <<func>> or | ||
585 | * are or have executed. | ||
586 | * | ||
587 | * Cautions: You must not call this function with disabled interrupts or | ||
588 | * from a hardware interrupt handler, you may call it from a | ||
589 | * bottom half handler. | ||
590 | * | ||
591 | * Modification log: | ||
592 | * Date Who Description | ||
593 | * ---------- --- -------------------------------------------------------- | ||
594 | * | ||
595 | *==========================================================================*/ | ||
596 | int smp_call_function(void (*func) (void *info), void *info, int nonatomic, | ||
597 | int wait) | ||
598 | { | ||
599 | struct call_data_struct data; | ||
600 | int cpus; | ||
601 | |||
602 | #ifdef DEBUG_SMP | ||
603 | unsigned long flags; | ||
604 | __save_flags(flags); | ||
605 | if (!(flags & 0x0040)) /* Interrupt Disable NONONO */ | ||
606 | BUG(); | ||
607 | #endif /* DEBUG_SMP */ | ||
608 | |||
609 | /* Holding any lock stops cpus from going down. */ | ||
610 | spin_lock(&call_lock); | ||
611 | cpus = num_online_cpus() - 1; | ||
612 | |||
613 | if (!cpus) { | ||
614 | spin_unlock(&call_lock); | ||
615 | return 0; | ||
616 | } | ||
617 | |||
618 | /* Can deadlock when called with interrupts disabled */ | ||
619 | WARN_ON(irqs_disabled()); | ||
620 | |||
621 | data.func = func; | ||
622 | data.info = info; | ||
623 | atomic_set(&data.started, 0); | ||
624 | data.wait = wait; | ||
625 | if (wait) | ||
626 | atomic_set(&data.finished, 0); | ||
627 | |||
628 | call_data = &data; | ||
629 | mb(); | ||
630 | |||
631 | /* Send a message to all other CPUs and wait for them to respond */ | ||
632 | send_IPI_allbutself(CALL_FUNCTION_IPI, 0); | ||
633 | |||
634 | /* Wait for response */ | ||
635 | while (atomic_read(&data.started) != cpus) | ||
636 | barrier(); | ||
637 | |||
638 | if (wait) | ||
639 | while (atomic_read(&data.finished) != cpus) | ||
640 | barrier(); | ||
641 | spin_unlock(&call_lock); | ||
642 | |||
643 | return 0; | ||
644 | } | ||
645 | |||
646 | /*==========================================================================* | ||
647 | * Name: smp_call_function_interrupt | ||
648 | * | ||
649 | * Description: This routine executes on CPU which received | ||
650 | * 'CALL_FUNCTION_IPI'. | ||
651 | * | ||
652 | * Born on Date: 2002.02.05 | ||
653 | * | ||
654 | * Arguments: NONE | ||
655 | * | ||
656 | * Returns: void (cannot fail) | ||
657 | * | ||
658 | * Modification log: | ||
659 | * Date Who Description | ||
660 | * ---------- --- -------------------------------------------------------- | ||
661 | * | ||
662 | *==========================================================================*/ | ||
663 | void smp_call_function_interrupt(void) | ||
664 | { | ||
665 | void (*func) (void *info) = call_data->func; | ||
666 | void *info = call_data->info; | ||
667 | int wait = call_data->wait; | ||
668 | |||
669 | /* | ||
670 | * Notify initiating CPU that I've grabbed the data and am | ||
671 | * about to execute the function | ||
672 | */ | ||
673 | mb(); | ||
674 | atomic_inc(&call_data->started); | ||
675 | /* | ||
676 | * At this point the info structure may be out of scope unless wait==1 | ||
677 | */ | ||
678 | irq_enter(); | ||
679 | (*func)(info); | ||
680 | irq_exit(); | ||
681 | |||
682 | if (wait) { | ||
683 | mb(); | ||
684 | atomic_inc(&call_data->finished); | ||
685 | } | ||
686 | } | ||
687 | |||
688 | /*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/ | ||
689 | /* Timer Routins */ | ||
690 | /*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/ | ||
691 | |||
692 | /*==========================================================================* | ||
693 | * Name: smp_send_timer | ||
694 | * | ||
695 | * Description: This routine sends a 'LOCAL_TIMER_IPI' to all other CPUs | ||
696 | * in the system. | ||
697 | * | ||
698 | * Born on Date: 2002.02.05 | ||
699 | * | ||
700 | * Arguments: NONE | ||
701 | * | ||
702 | * Returns: void (cannot fail) | ||
703 | * | ||
704 | * Modification log: | ||
705 | * Date Who Description | ||
706 | * ---------- --- -------------------------------------------------------- | ||
707 | * | ||
708 | *==========================================================================*/ | ||
709 | void smp_send_timer(void) | ||
710 | { | ||
711 | send_IPI_allbutself(LOCAL_TIMER_IPI, 1); | ||
712 | } | ||
713 | |||
714 | /*==========================================================================* | ||
715 | * Name: smp_send_timer | ||
716 | * | ||
717 | * Description: This routine executes on CPU which received | ||
718 | * 'LOCAL_TIMER_IPI'. | ||
719 | * | ||
720 | * Born on Date: 2002.02.05 | ||
721 | * | ||
722 | * Arguments: *regs - a pointer to the saved regster info | ||
723 | * | ||
724 | * Returns: void (cannot fail) | ||
725 | * | ||
726 | * Modification log: | ||
727 | * Date Who Description | ||
728 | * ---------- --- -------------------------------------------------------- | ||
729 | * | ||
730 | *==========================================================================*/ | ||
731 | void smp_ipi_timer_interrupt(struct pt_regs *regs) | ||
732 | { | ||
733 | irq_enter(); | ||
734 | smp_local_timer_interrupt(regs); | ||
735 | irq_exit(); | ||
736 | } | ||
737 | |||
738 | /*==========================================================================* | ||
739 | * Name: smp_local_timer_interrupt | ||
740 | * | ||
741 | * Description: Local timer interrupt handler. It does both profiling and | ||
742 | * process statistics/rescheduling. | ||
743 | * We do profiling in every local tick, statistics/rescheduling | ||
744 | * happen only every 'profiling multiplier' ticks. The default | ||
745 | * multiplier is 1 and it can be changed by writing the new | ||
746 | * multiplier value into /proc/profile. | ||
747 | * | ||
748 | * Born on Date: 2002.02.05 | ||
749 | * | ||
750 | * Arguments: *regs - a pointer to the saved regster info | ||
751 | * | ||
752 | * Returns: void (cannot fail) | ||
753 | * | ||
754 | * Original: arch/i386/kernel/apic.c | ||
755 | * | ||
756 | * Modification log: | ||
757 | * Date Who Description | ||
758 | * ---------- --- -------------------------------------------------------- | ||
759 | * 2003-06-24 hy use per_cpu structure. | ||
760 | *==========================================================================*/ | ||
761 | void smp_local_timer_interrupt(struct pt_regs *regs) | ||
762 | { | ||
763 | int user = user_mode(regs); | ||
764 | int cpu_id = smp_processor_id(); | ||
765 | |||
766 | /* | ||
767 | * The profiling function is SMP safe. (nothing can mess | ||
768 | * around with "current", and the profiling counters are | ||
769 | * updated with atomic operations). This is especially | ||
770 | * useful with a profiling multiplier != 1 | ||
771 | */ | ||
772 | |||
773 | profile_tick(CPU_PROFILING, regs); | ||
774 | |||
775 | if (--per_cpu(prof_counter, cpu_id) <= 0) { | ||
776 | /* | ||
777 | * The multiplier may have changed since the last time we got | ||
778 | * to this point as a result of the user writing to | ||
779 | * /proc/profile. In this case we need to adjust the APIC | ||
780 | * timer accordingly. | ||
781 | * | ||
782 | * Interrupts are already masked off at this point. | ||
783 | */ | ||
784 | per_cpu(prof_counter, cpu_id) | ||
785 | = per_cpu(prof_multiplier, cpu_id); | ||
786 | if (per_cpu(prof_counter, cpu_id) | ||
787 | != per_cpu(prof_old_multiplier, cpu_id)) | ||
788 | { | ||
789 | per_cpu(prof_old_multiplier, cpu_id) | ||
790 | = per_cpu(prof_counter, cpu_id); | ||
791 | } | ||
792 | |||
793 | update_process_times(user); | ||
794 | } | ||
795 | } | ||
796 | |||
797 | /*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/ | ||
798 | /* Send IPI Routins */ | ||
799 | /*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/ | ||
800 | |||
801 | /*==========================================================================* | ||
802 | * Name: send_IPI_allbutself | ||
803 | * | ||
804 | * Description: This routine sends a IPI to all other CPUs in the system. | ||
805 | * | ||
806 | * Born on Date: 2002.02.05 | ||
807 | * | ||
808 | * Arguments: ipi_num - Number of IPI | ||
809 | * try - 0 : Send IPI certainly. | ||
810 | * !0 : The following IPI is not sended when Target CPU | ||
811 | * has not received the before IPI. | ||
812 | * | ||
813 | * Returns: void (cannot fail) | ||
814 | * | ||
815 | * Modification log: | ||
816 | * Date Who Description | ||
817 | * ---------- --- -------------------------------------------------------- | ||
818 | * | ||
819 | *==========================================================================*/ | ||
820 | void send_IPI_allbutself(int ipi_num, int try) | ||
821 | { | ||
822 | cpumask_t cpumask; | ||
823 | |||
824 | cpumask = cpu_online_map; | ||
825 | cpu_clear(smp_processor_id(), cpumask); | ||
826 | |||
827 | send_IPI_mask(cpumask, ipi_num, try); | ||
828 | } | ||
829 | |||
830 | /*==========================================================================* | ||
831 | * Name: send_IPI_mask | ||
832 | * | ||
833 | * Description: This routine sends a IPI to CPUs in the system. | ||
834 | * | ||
835 | * Born on Date: 2002.02.05 | ||
836 | * | ||
837 | * Arguments: cpu_mask - Bitmap of target CPUs logical ID | ||
838 | * ipi_num - Number of IPI | ||
839 | * try - 0 : Send IPI certainly. | ||
840 | * !0 : The following IPI is not sended when Target CPU | ||
841 | * has not received the before IPI. | ||
842 | * | ||
843 | * Returns: void (cannot fail) | ||
844 | * | ||
845 | * Modification log: | ||
846 | * Date Who Description | ||
847 | * ---------- --- -------------------------------------------------------- | ||
848 | * | ||
849 | *==========================================================================*/ | ||
850 | static void send_IPI_mask(cpumask_t cpumask, int ipi_num, int try) | ||
851 | { | ||
852 | cpumask_t physid_mask, tmp; | ||
853 | int cpu_id, phys_id; | ||
854 | int num_cpus = num_online_cpus(); | ||
855 | |||
856 | if (num_cpus <= 1) /* NO MP */ | ||
857 | return; | ||
858 | |||
859 | cpus_and(tmp, cpumask, cpu_online_map); | ||
860 | BUG_ON(!cpus_equal(cpumask, tmp)); | ||
861 | |||
862 | physid_mask = CPU_MASK_NONE; | ||
863 | for_each_cpu_mask(cpu_id, cpumask){ | ||
864 | if ((phys_id = cpu_to_physid(cpu_id)) != -1) | ||
865 | cpu_set(phys_id, physid_mask); | ||
866 | } | ||
867 | |||
868 | send_IPI_mask_phys(physid_mask, ipi_num, try); | ||
869 | } | ||
870 | |||
871 | /*==========================================================================* | ||
872 | * Name: send_IPI_mask_phys | ||
873 | * | ||
874 | * Description: This routine sends a IPI to other CPUs in the system. | ||
875 | * | ||
876 | * Born on Date: 2002.02.05 | ||
877 | * | ||
878 | * Arguments: cpu_mask - Bitmap of target CPUs physical ID | ||
879 | * ipi_num - Number of IPI | ||
880 | * try - 0 : Send IPI certainly. | ||
881 | * !0 : The following IPI is not sended when Target CPU | ||
882 | * has not received the before IPI. | ||
883 | * | ||
884 | * Returns: IPICRi regster value. | ||
885 | * | ||
886 | * Modification log: | ||
887 | * Date Who Description | ||
888 | * ---------- --- -------------------------------------------------------- | ||
889 | * | ||
890 | *==========================================================================*/ | ||
891 | unsigned long send_IPI_mask_phys(cpumask_t physid_mask, int ipi_num, | ||
892 | int try) | ||
893 | { | ||
894 | spinlock_t *ipilock; | ||
895 | unsigned long flags = 0; | ||
896 | volatile unsigned long *ipicr_addr; | ||
897 | unsigned long ipicr_val; | ||
898 | unsigned long my_physid_mask; | ||
899 | unsigned long mask = cpus_addr(physid_mask)[0]; | ||
900 | |||
901 | |||
902 | if (mask & ~physids_coerce(phys_cpu_present_map)) | ||
903 | BUG(); | ||
904 | if (ipi_num >= NR_IPIS) | ||
905 | BUG(); | ||
906 | |||
907 | mask <<= IPI_SHIFT; | ||
908 | ipilock = &ipi_lock[ipi_num]; | ||
909 | ipicr_addr = (volatile unsigned long *)(M32R_ICU_IPICR_ADDR | ||
910 | + (ipi_num << 2)); | ||
911 | my_physid_mask = ~(1 << smp_processor_id()); | ||
912 | |||
913 | /* | ||
914 | * lock ipi_lock[i] | ||
915 | * check IPICRi == 0 | ||
916 | * write IPICRi (send IPIi) | ||
917 | * unlock ipi_lock[i] | ||
918 | */ | ||
919 | __asm__ __volatile__ ( | ||
920 | ";; LOCK ipi_lock[i] \n\t" | ||
921 | ".fillinsn \n" | ||
922 | "1: \n\t" | ||
923 | "mvfc %1, psw \n\t" | ||
924 | "clrpsw #0x40 -> nop \n\t" | ||
925 | DCACHE_CLEAR("r4", "r5", "%2") | ||
926 | "lock r4, @%2 \n\t" | ||
927 | "addi r4, #-1 \n\t" | ||
928 | "unlock r4, @%2 \n\t" | ||
929 | "mvtc %1, psw \n\t" | ||
930 | "bnez r4, 2f \n\t" | ||
931 | LOCK_SECTION_START(".balign 4 \n\t") | ||
932 | ".fillinsn \n" | ||
933 | "2: \n\t" | ||
934 | "ld r4, @%2 \n\t" | ||
935 | "blez r4, 2b \n\t" | ||
936 | "bra 1b \n\t" | ||
937 | LOCK_SECTION_END | ||
938 | ";; CHECK IPICRi == 0 \n\t" | ||
939 | ".fillinsn \n" | ||
940 | "3: \n\t" | ||
941 | "ld %0, @%3 \n\t" | ||
942 | "and %0, %6 \n\t" | ||
943 | "beqz %0, 4f \n\t" | ||
944 | "bnez %5, 5f \n\t" | ||
945 | "bra 3b \n\t" | ||
946 | ";; WRITE IPICRi (send IPIi) \n\t" | ||
947 | ".fillinsn \n" | ||
948 | "4: \n\t" | ||
949 | "st %4, @%3 \n\t" | ||
950 | ";; UNLOCK ipi_lock[i] \n\t" | ||
951 | ".fillinsn \n" | ||
952 | "5: \n\t" | ||
953 | "ldi r4, #1 \n\t" | ||
954 | "st r4, @%2 \n\t" | ||
955 | : "=&r"(ipicr_val) | ||
956 | : "r"(flags), "r"(&ipilock->slock), "r"(ipicr_addr), | ||
957 | "r"(mask), "r"(try), "r"(my_physid_mask) | ||
958 | : "memory", "r4" | ||
959 | #ifdef CONFIG_CHIP_M32700_TS1 | ||
960 | , "r5" | ||
961 | #endif /* CONFIG_CHIP_M32700_TS1 */ | ||
962 | ); | ||
963 | |||
964 | return ipicr_val; | ||
965 | } | ||