diff options
author | Paul Mundt <lethal@linux-sh.org> | 2011-01-06 20:29:26 -0500 |
---|---|---|
committer | Paul Mundt <lethal@linux-sh.org> | 2011-01-06 20:29:26 -0500 |
commit | 5e93c6b4ecd78b1bab49bad1dc2f6ed7ec0115ee (patch) | |
tree | 4f4e321a1ca0baf64d8af528080c71f93495a7d7 /arch/arm/kernel/smp.c | |
parent | 98d27b8abf413a310df6676f7d2128ada1cccc08 (diff) | |
parent | 3c0cb7c31c206aaedb967e44b98442bbeb17a6c4 (diff) |
Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/torvalds/linux-2.6 into rmobile-latest
Conflicts:
arch/arm/mach-shmobile/Kconfig
Signed-off-by: Paul Mundt <lethal@linux-sh.org>
Diffstat (limited to 'arch/arm/kernel/smp.c')
-rw-r--r-- | arch/arm/kernel/smp.c | 449 |
1 files changed, 159 insertions, 290 deletions
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c index 8c195959025..4539ebcb089 100644 --- a/arch/arm/kernel/smp.c +++ b/arch/arm/kernel/smp.c | |||
@@ -16,6 +16,7 @@ | |||
16 | #include <linux/cache.h> | 16 | #include <linux/cache.h> |
17 | #include <linux/profile.h> | 17 | #include <linux/profile.h> |
18 | #include <linux/errno.h> | 18 | #include <linux/errno.h> |
19 | #include <linux/ftrace.h> | ||
19 | #include <linux/mm.h> | 20 | #include <linux/mm.h> |
20 | #include <linux/err.h> | 21 | #include <linux/err.h> |
21 | #include <linux/cpu.h> | 22 | #include <linux/cpu.h> |
@@ -24,6 +25,7 @@ | |||
24 | #include <linux/irq.h> | 25 | #include <linux/irq.h> |
25 | #include <linux/percpu.h> | 26 | #include <linux/percpu.h> |
26 | #include <linux/clockchips.h> | 27 | #include <linux/clockchips.h> |
28 | #include <linux/completion.h> | ||
27 | 29 | ||
28 | #include <asm/atomic.h> | 30 | #include <asm/atomic.h> |
29 | #include <asm/cacheflush.h> | 31 | #include <asm/cacheflush.h> |
@@ -37,7 +39,6 @@ | |||
37 | #include <asm/tlbflush.h> | 39 | #include <asm/tlbflush.h> |
38 | #include <asm/ptrace.h> | 40 | #include <asm/ptrace.h> |
39 | #include <asm/localtimer.h> | 41 | #include <asm/localtimer.h> |
40 | #include <asm/smp_plat.h> | ||
41 | 42 | ||
42 | /* | 43 | /* |
43 | * as from 2.5, kernels no longer have an init_tasks structure | 44 | * as from 2.5, kernels no longer have an init_tasks structure |
@@ -46,64 +47,14 @@ | |||
46 | */ | 47 | */ |
47 | struct secondary_data secondary_data; | 48 | struct secondary_data secondary_data; |
48 | 49 | ||
49 | /* | ||
50 | * structures for inter-processor calls | ||
51 | * - A collection of single bit ipi messages. | ||
52 | */ | ||
53 | struct ipi_data { | ||
54 | spinlock_t lock; | ||
55 | unsigned long ipi_count; | ||
56 | unsigned long bits; | ||
57 | }; | ||
58 | |||
59 | static DEFINE_PER_CPU(struct ipi_data, ipi_data) = { | ||
60 | .lock = SPIN_LOCK_UNLOCKED, | ||
61 | }; | ||
62 | |||
63 | enum ipi_msg_type { | 50 | enum ipi_msg_type { |
64 | IPI_TIMER, | 51 | IPI_TIMER = 2, |
65 | IPI_RESCHEDULE, | 52 | IPI_RESCHEDULE, |
66 | IPI_CALL_FUNC, | 53 | IPI_CALL_FUNC, |
67 | IPI_CALL_FUNC_SINGLE, | 54 | IPI_CALL_FUNC_SINGLE, |
68 | IPI_CPU_STOP, | 55 | IPI_CPU_STOP, |
69 | }; | 56 | }; |
70 | 57 | ||
71 | static inline void identity_mapping_add(pgd_t *pgd, unsigned long start, | ||
72 | unsigned long end) | ||
73 | { | ||
74 | unsigned long addr, prot; | ||
75 | pmd_t *pmd; | ||
76 | |||
77 | prot = PMD_TYPE_SECT | PMD_SECT_AP_WRITE; | ||
78 | if (cpu_architecture() <= CPU_ARCH_ARMv5TEJ && !cpu_is_xscale()) | ||
79 | prot |= PMD_BIT4; | ||
80 | |||
81 | for (addr = start & PGDIR_MASK; addr < end;) { | ||
82 | pmd = pmd_offset(pgd + pgd_index(addr), addr); | ||
83 | pmd[0] = __pmd(addr | prot); | ||
84 | addr += SECTION_SIZE; | ||
85 | pmd[1] = __pmd(addr | prot); | ||
86 | addr += SECTION_SIZE; | ||
87 | flush_pmd_entry(pmd); | ||
88 | outer_clean_range(__pa(pmd), __pa(pmd + 1)); | ||
89 | } | ||
90 | } | ||
91 | |||
92 | static inline void identity_mapping_del(pgd_t *pgd, unsigned long start, | ||
93 | unsigned long end) | ||
94 | { | ||
95 | unsigned long addr; | ||
96 | pmd_t *pmd; | ||
97 | |||
98 | for (addr = start & PGDIR_MASK; addr < end; addr += PGDIR_SIZE) { | ||
99 | pmd = pmd_offset(pgd + pgd_index(addr), addr); | ||
100 | pmd[0] = __pmd(0); | ||
101 | pmd[1] = __pmd(0); | ||
102 | clean_pmd_entry(pmd); | ||
103 | outer_clean_range(__pa(pmd), __pa(pmd + 1)); | ||
104 | } | ||
105 | } | ||
106 | |||
107 | int __cpuinit __cpu_up(unsigned int cpu) | 58 | int __cpuinit __cpu_up(unsigned int cpu) |
108 | { | 59 | { |
109 | struct cpuinfo_arm *ci = &per_cpu(cpu_data, cpu); | 60 | struct cpuinfo_arm *ci = &per_cpu(cpu_data, cpu); |
@@ -177,8 +128,12 @@ int __cpuinit __cpu_up(unsigned int cpu) | |||
177 | barrier(); | 128 | barrier(); |
178 | } | 129 | } |
179 | 130 | ||
180 | if (!cpu_online(cpu)) | 131 | if (!cpu_online(cpu)) { |
132 | pr_crit("CPU%u: failed to come online\n", cpu); | ||
181 | ret = -EIO; | 133 | ret = -EIO; |
134 | } | ||
135 | } else { | ||
136 | pr_err("CPU%u: failed to boot: %d\n", cpu, ret); | ||
182 | } | 137 | } |
183 | 138 | ||
184 | secondary_data.stack = NULL; | 139 | secondary_data.stack = NULL; |
@@ -194,18 +149,12 @@ int __cpuinit __cpu_up(unsigned int cpu) | |||
194 | 149 | ||
195 | pgd_free(&init_mm, pgd); | 150 | pgd_free(&init_mm, pgd); |
196 | 151 | ||
197 | if (ret) { | ||
198 | printk(KERN_CRIT "CPU%u: processor failed to boot\n", cpu); | ||
199 | |||
200 | /* | ||
201 | * FIXME: We need to clean up the new idle thread. --rmk | ||
202 | */ | ||
203 | } | ||
204 | |||
205 | return ret; | 152 | return ret; |
206 | } | 153 | } |
207 | 154 | ||
208 | #ifdef CONFIG_HOTPLUG_CPU | 155 | #ifdef CONFIG_HOTPLUG_CPU |
156 | static void percpu_timer_stop(void); | ||
157 | |||
209 | /* | 158 | /* |
210 | * __cpu_disable runs on the processor to be shutdown. | 159 | * __cpu_disable runs on the processor to be shutdown. |
211 | */ | 160 | */ |
@@ -233,7 +182,7 @@ int __cpu_disable(void) | |||
233 | /* | 182 | /* |
234 | * Stop the local timer for this CPU. | 183 | * Stop the local timer for this CPU. |
235 | */ | 184 | */ |
236 | local_timer_stop(); | 185 | percpu_timer_stop(); |
237 | 186 | ||
238 | /* | 187 | /* |
239 | * Flush user cache and TLB mappings, and then remove this CPU | 188 | * Flush user cache and TLB mappings, and then remove this CPU |
@@ -252,12 +201,20 @@ int __cpu_disable(void) | |||
252 | return 0; | 201 | return 0; |
253 | } | 202 | } |
254 | 203 | ||
204 | static DECLARE_COMPLETION(cpu_died); | ||
205 | |||
255 | /* | 206 | /* |
256 | * called on the thread which is asking for a CPU to be shutdown - | 207 | * called on the thread which is asking for a CPU to be shutdown - |
257 | * waits until shutdown has completed, or it is timed out. | 208 | * waits until shutdown has completed, or it is timed out. |
258 | */ | 209 | */ |
259 | void __cpu_die(unsigned int cpu) | 210 | void __cpu_die(unsigned int cpu) |
260 | { | 211 | { |
212 | if (!wait_for_completion_timeout(&cpu_died, msecs_to_jiffies(5000))) { | ||
213 | pr_err("CPU%u: cpu didn't die\n", cpu); | ||
214 | return; | ||
215 | } | ||
216 | printk(KERN_NOTICE "CPU%u: shutdown\n", cpu); | ||
217 | |||
261 | if (!platform_cpu_kill(cpu)) | 218 | if (!platform_cpu_kill(cpu)) |
262 | printk("CPU%u: unable to kill\n", cpu); | 219 | printk("CPU%u: unable to kill\n", cpu); |
263 | } | 220 | } |
@@ -274,12 +231,17 @@ void __ref cpu_die(void) | |||
274 | { | 231 | { |
275 | unsigned int cpu = smp_processor_id(); | 232 | unsigned int cpu = smp_processor_id(); |
276 | 233 | ||
277 | local_irq_disable(); | ||
278 | idle_task_exit(); | 234 | idle_task_exit(); |
279 | 235 | ||
236 | local_irq_disable(); | ||
237 | mb(); | ||
238 | |||
239 | /* Tell __cpu_die() that this CPU is now safe to dispose of */ | ||
240 | complete(&cpu_died); | ||
241 | |||
280 | /* | 242 | /* |
281 | * actual CPU shutdown procedure is at least platform (if not | 243 | * actual CPU shutdown procedure is at least platform (if not |
282 | * CPU) specific | 244 | * CPU) specific. |
283 | */ | 245 | */ |
284 | platform_cpu_die(cpu); | 246 | platform_cpu_die(cpu); |
285 | 247 | ||
@@ -289,6 +251,7 @@ void __ref cpu_die(void) | |||
289 | * to be repeated to undo the effects of taking the CPU offline. | 251 | * to be repeated to undo the effects of taking the CPU offline. |
290 | */ | 252 | */ |
291 | __asm__("mov sp, %0\n" | 253 | __asm__("mov sp, %0\n" |
254 | " mov fp, #0\n" | ||
292 | " b secondary_start_kernel" | 255 | " b secondary_start_kernel" |
293 | : | 256 | : |
294 | : "r" (task_stack_page(current) + THREAD_SIZE - 8)); | 257 | : "r" (task_stack_page(current) + THREAD_SIZE - 8)); |
@@ -296,6 +259,17 @@ void __ref cpu_die(void) | |||
296 | #endif /* CONFIG_HOTPLUG_CPU */ | 259 | #endif /* CONFIG_HOTPLUG_CPU */ |
297 | 260 | ||
298 | /* | 261 | /* |
262 | * Called by both boot and secondaries to move global data into | ||
263 | * per-processor storage. | ||
264 | */ | ||
265 | static void __cpuinit smp_store_cpu_info(unsigned int cpuid) | ||
266 | { | ||
267 | struct cpuinfo_arm *cpu_info = &per_cpu(cpu_data, cpuid); | ||
268 | |||
269 | cpu_info->loops_per_jiffy = loops_per_jiffy; | ||
270 | } | ||
271 | |||
272 | /* | ||
299 | * This is the secondary CPU boot entry. We're using this CPUs | 273 | * This is the secondary CPU boot entry. We're using this CPUs |
300 | * idle thread stack, but a set of temporary page tables. | 274 | * idle thread stack, but a set of temporary page tables. |
301 | */ | 275 | */ |
@@ -310,7 +284,6 @@ asmlinkage void __cpuinit secondary_start_kernel(void) | |||
310 | * All kernel threads share the same mm context; grab a | 284 | * All kernel threads share the same mm context; grab a |
311 | * reference and switch to it. | 285 | * reference and switch to it. |
312 | */ | 286 | */ |
313 | atomic_inc(&mm->mm_users); | ||
314 | atomic_inc(&mm->mm_count); | 287 | atomic_inc(&mm->mm_count); |
315 | current->active_mm = mm; | 288 | current->active_mm = mm; |
316 | cpumask_set_cpu(cpu, mm_cpumask(mm)); | 289 | cpumask_set_cpu(cpu, mm_cpumask(mm)); |
@@ -320,6 +293,7 @@ asmlinkage void __cpuinit secondary_start_kernel(void) | |||
320 | 293 | ||
321 | cpu_init(); | 294 | cpu_init(); |
322 | preempt_disable(); | 295 | preempt_disable(); |
296 | trace_hardirqs_off(); | ||
323 | 297 | ||
324 | /* | 298 | /* |
325 | * Give the platform a chance to do its own initialisation. | 299 | * Give the platform a chance to do its own initialisation. |
@@ -353,17 +327,6 @@ asmlinkage void __cpuinit secondary_start_kernel(void) | |||
353 | cpu_idle(); | 327 | cpu_idle(); |
354 | } | 328 | } |
355 | 329 | ||
356 | /* | ||
357 | * Called by both boot and secondaries to move global data into | ||
358 | * per-processor storage. | ||
359 | */ | ||
360 | void __cpuinit smp_store_cpu_info(unsigned int cpuid) | ||
361 | { | ||
362 | struct cpuinfo_arm *cpu_info = &per_cpu(cpu_data, cpuid); | ||
363 | |||
364 | cpu_info->loops_per_jiffy = loops_per_jiffy; | ||
365 | } | ||
366 | |||
367 | void __init smp_cpus_done(unsigned int max_cpus) | 330 | void __init smp_cpus_done(unsigned int max_cpus) |
368 | { | 331 | { |
369 | int cpu; | 332 | int cpu; |
@@ -386,61 +349,80 @@ void __init smp_prepare_boot_cpu(void) | |||
386 | per_cpu(cpu_data, cpu).idle = current; | 349 | per_cpu(cpu_data, cpu).idle = current; |
387 | } | 350 | } |
388 | 351 | ||
389 | static void send_ipi_message(const struct cpumask *mask, enum ipi_msg_type msg) | 352 | void __init smp_prepare_cpus(unsigned int max_cpus) |
390 | { | 353 | { |
391 | unsigned long flags; | 354 | unsigned int ncores = num_possible_cpus(); |
392 | unsigned int cpu; | ||
393 | 355 | ||
394 | local_irq_save(flags); | 356 | smp_store_cpu_info(smp_processor_id()); |
395 | |||
396 | for_each_cpu(cpu, mask) { | ||
397 | struct ipi_data *ipi = &per_cpu(ipi_data, cpu); | ||
398 | |||
399 | spin_lock(&ipi->lock); | ||
400 | ipi->bits |= 1 << msg; | ||
401 | spin_unlock(&ipi->lock); | ||
402 | } | ||
403 | 357 | ||
404 | /* | 358 | /* |
405 | * Call the platform specific cross-CPU call function. | 359 | * are we trying to boot more cores than exist? |
406 | */ | 360 | */ |
407 | smp_cross_call(mask); | 361 | if (max_cpus > ncores) |
362 | max_cpus = ncores; | ||
363 | |||
364 | if (max_cpus > 1) { | ||
365 | /* | ||
366 | * Enable the local timer or broadcast device for the | ||
367 | * boot CPU, but only if we have more than one CPU. | ||
368 | */ | ||
369 | percpu_timer_setup(); | ||
408 | 370 | ||
409 | local_irq_restore(flags); | 371 | /* |
372 | * Initialise the SCU if there are more than one CPU | ||
373 | * and let them know where to start. | ||
374 | */ | ||
375 | platform_smp_prepare_cpus(max_cpus); | ||
376 | } | ||
410 | } | 377 | } |
411 | 378 | ||
412 | void arch_send_call_function_ipi_mask(const struct cpumask *mask) | 379 | void arch_send_call_function_ipi_mask(const struct cpumask *mask) |
413 | { | 380 | { |
414 | send_ipi_message(mask, IPI_CALL_FUNC); | 381 | smp_cross_call(mask, IPI_CALL_FUNC); |
415 | } | 382 | } |
416 | 383 | ||
417 | void arch_send_call_function_single_ipi(int cpu) | 384 | void arch_send_call_function_single_ipi(int cpu) |
418 | { | 385 | { |
419 | send_ipi_message(cpumask_of(cpu), IPI_CALL_FUNC_SINGLE); | 386 | smp_cross_call(cpumask_of(cpu), IPI_CALL_FUNC_SINGLE); |
420 | } | 387 | } |
421 | 388 | ||
422 | void show_ipi_list(struct seq_file *p) | 389 | static const char *ipi_types[NR_IPI] = { |
390 | #define S(x,s) [x - IPI_TIMER] = s | ||
391 | S(IPI_TIMER, "Timer broadcast interrupts"), | ||
392 | S(IPI_RESCHEDULE, "Rescheduling interrupts"), | ||
393 | S(IPI_CALL_FUNC, "Function call interrupts"), | ||
394 | S(IPI_CALL_FUNC_SINGLE, "Single function call interrupts"), | ||
395 | S(IPI_CPU_STOP, "CPU stop interrupts"), | ||
396 | }; | ||
397 | |||
398 | void show_ipi_list(struct seq_file *p, int prec) | ||
423 | { | 399 | { |
424 | unsigned int cpu; | 400 | unsigned int cpu, i; |
425 | 401 | ||
426 | seq_puts(p, "IPI:"); | 402 | for (i = 0; i < NR_IPI; i++) { |
403 | seq_printf(p, "%*s%u: ", prec - 1, "IPI", i); | ||
427 | 404 | ||
428 | for_each_present_cpu(cpu) | 405 | for_each_present_cpu(cpu) |
429 | seq_printf(p, " %10lu", per_cpu(ipi_data, cpu).ipi_count); | 406 | seq_printf(p, "%10u ", |
407 | __get_irq_stat(cpu, ipi_irqs[i])); | ||
430 | 408 | ||
431 | seq_putc(p, '\n'); | 409 | seq_printf(p, " %s\n", ipi_types[i]); |
410 | } | ||
432 | } | 411 | } |
433 | 412 | ||
434 | void show_local_irqs(struct seq_file *p) | 413 | u64 smp_irq_stat_cpu(unsigned int cpu) |
435 | { | 414 | { |
436 | unsigned int cpu; | 415 | u64 sum = 0; |
416 | int i; | ||
437 | 417 | ||
438 | seq_printf(p, "LOC: "); | 418 | for (i = 0; i < NR_IPI; i++) |
419 | sum += __get_irq_stat(cpu, ipi_irqs[i]); | ||
439 | 420 | ||
440 | for_each_present_cpu(cpu) | 421 | #ifdef CONFIG_LOCAL_TIMERS |
441 | seq_printf(p, "%10u ", irq_stat[cpu].local_timer_irqs); | 422 | sum += __get_irq_stat(cpu, local_timer_irqs); |
423 | #endif | ||
442 | 424 | ||
443 | seq_putc(p, '\n'); | 425 | return sum; |
444 | } | 426 | } |
445 | 427 | ||
446 | /* | 428 | /* |
@@ -457,24 +439,36 @@ static void ipi_timer(void) | |||
457 | } | 439 | } |
458 | 440 | ||
459 | #ifdef CONFIG_LOCAL_TIMERS | 441 | #ifdef CONFIG_LOCAL_TIMERS |
460 | asmlinkage void __exception do_local_timer(struct pt_regs *regs) | 442 | asmlinkage void __exception_irq_entry do_local_timer(struct pt_regs *regs) |
461 | { | 443 | { |
462 | struct pt_regs *old_regs = set_irq_regs(regs); | 444 | struct pt_regs *old_regs = set_irq_regs(regs); |
463 | int cpu = smp_processor_id(); | 445 | int cpu = smp_processor_id(); |
464 | 446 | ||
465 | if (local_timer_ack()) { | 447 | if (local_timer_ack()) { |
466 | irq_stat[cpu].local_timer_irqs++; | 448 | __inc_irq_stat(cpu, local_timer_irqs); |
467 | ipi_timer(); | 449 | ipi_timer(); |
468 | } | 450 | } |
469 | 451 | ||
470 | set_irq_regs(old_regs); | 452 | set_irq_regs(old_regs); |
471 | } | 453 | } |
454 | |||
455 | void show_local_irqs(struct seq_file *p, int prec) | ||
456 | { | ||
457 | unsigned int cpu; | ||
458 | |||
459 | seq_printf(p, "%*s: ", prec, "LOC"); | ||
460 | |||
461 | for_each_present_cpu(cpu) | ||
462 | seq_printf(p, "%10u ", __get_irq_stat(cpu, local_timer_irqs)); | ||
463 | |||
464 | seq_printf(p, " Local timer interrupts\n"); | ||
465 | } | ||
472 | #endif | 466 | #endif |
473 | 467 | ||
474 | #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST | 468 | #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST |
475 | static void smp_timer_broadcast(const struct cpumask *mask) | 469 | static void smp_timer_broadcast(const struct cpumask *mask) |
476 | { | 470 | { |
477 | send_ipi_message(mask, IPI_TIMER); | 471 | smp_cross_call(mask, IPI_TIMER); |
478 | } | 472 | } |
479 | #else | 473 | #else |
480 | #define smp_timer_broadcast NULL | 474 | #define smp_timer_broadcast NULL |
@@ -511,6 +505,21 @@ void __cpuinit percpu_timer_setup(void) | |||
511 | local_timer_setup(evt); | 505 | local_timer_setup(evt); |
512 | } | 506 | } |
513 | 507 | ||
508 | #ifdef CONFIG_HOTPLUG_CPU | ||
509 | /* | ||
510 | * The generic clock events code purposely does not stop the local timer | ||
511 | * on CPU_DEAD/CPU_DEAD_FROZEN hotplug events, so we have to do it | ||
512 | * manually here. | ||
513 | */ | ||
514 | static void percpu_timer_stop(void) | ||
515 | { | ||
516 | unsigned int cpu = smp_processor_id(); | ||
517 | struct clock_event_device *evt = &per_cpu(percpu_clockevent, cpu); | ||
518 | |||
519 | evt->set_mode(CLOCK_EVT_MODE_UNUSED, evt); | ||
520 | } | ||
521 | #endif | ||
522 | |||
514 | static DEFINE_SPINLOCK(stop_lock); | 523 | static DEFINE_SPINLOCK(stop_lock); |
515 | 524 | ||
516 | /* | 525 | /* |
@@ -537,216 +546,76 @@ static void ipi_cpu_stop(unsigned int cpu) | |||
537 | 546 | ||
538 | /* | 547 | /* |
539 | * Main handler for inter-processor interrupts | 548 | * Main handler for inter-processor interrupts |
540 | * | ||
541 | * For ARM, the ipimask now only identifies a single | ||
542 | * category of IPI (Bit 1 IPIs have been replaced by a | ||
543 | * different mechanism): | ||
544 | * | ||
545 | * Bit 0 - Inter-processor function call | ||
546 | */ | 549 | */ |
547 | asmlinkage void __exception do_IPI(struct pt_regs *regs) | 550 | asmlinkage void __exception_irq_entry do_IPI(int ipinr, struct pt_regs *regs) |
548 | { | 551 | { |
549 | unsigned int cpu = smp_processor_id(); | 552 | unsigned int cpu = smp_processor_id(); |
550 | struct ipi_data *ipi = &per_cpu(ipi_data, cpu); | ||
551 | struct pt_regs *old_regs = set_irq_regs(regs); | 553 | struct pt_regs *old_regs = set_irq_regs(regs); |
552 | 554 | ||
553 | ipi->ipi_count++; | 555 | if (ipinr >= IPI_TIMER && ipinr < IPI_TIMER + NR_IPI) |
554 | 556 | __inc_irq_stat(cpu, ipi_irqs[ipinr - IPI_TIMER]); | |
555 | for (;;) { | ||
556 | unsigned long msgs; | ||
557 | |||
558 | spin_lock(&ipi->lock); | ||
559 | msgs = ipi->bits; | ||
560 | ipi->bits = 0; | ||
561 | spin_unlock(&ipi->lock); | ||
562 | 557 | ||
563 | if (!msgs) | 558 | switch (ipinr) { |
564 | break; | 559 | case IPI_TIMER: |
565 | 560 | ipi_timer(); | |
566 | do { | 561 | break; |
567 | unsigned nextmsg; | ||
568 | |||
569 | nextmsg = msgs & -msgs; | ||
570 | msgs &= ~nextmsg; | ||
571 | nextmsg = ffz(~nextmsg); | ||
572 | |||
573 | switch (nextmsg) { | ||
574 | case IPI_TIMER: | ||
575 | ipi_timer(); | ||
576 | break; | ||
577 | 562 | ||
578 | case IPI_RESCHEDULE: | 563 | case IPI_RESCHEDULE: |
579 | /* | 564 | /* |
580 | * nothing more to do - eveything is | 565 | * nothing more to do - eveything is |
581 | * done on the interrupt return path | 566 | * done on the interrupt return path |
582 | */ | 567 | */ |
583 | break; | 568 | break; |
584 | 569 | ||
585 | case IPI_CALL_FUNC: | 570 | case IPI_CALL_FUNC: |
586 | generic_smp_call_function_interrupt(); | 571 | generic_smp_call_function_interrupt(); |
587 | break; | 572 | break; |
588 | 573 | ||
589 | case IPI_CALL_FUNC_SINGLE: | 574 | case IPI_CALL_FUNC_SINGLE: |
590 | generic_smp_call_function_single_interrupt(); | 575 | generic_smp_call_function_single_interrupt(); |
591 | break; | 576 | break; |
592 | 577 | ||
593 | case IPI_CPU_STOP: | 578 | case IPI_CPU_STOP: |
594 | ipi_cpu_stop(cpu); | 579 | ipi_cpu_stop(cpu); |
595 | break; | 580 | break; |
596 | 581 | ||
597 | default: | 582 | default: |
598 | printk(KERN_CRIT "CPU%u: Unknown IPI message 0x%x\n", | 583 | printk(KERN_CRIT "CPU%u: Unknown IPI message 0x%x\n", |
599 | cpu, nextmsg); | 584 | cpu, ipinr); |
600 | break; | 585 | break; |
601 | } | ||
602 | } while (msgs); | ||
603 | } | 586 | } |
604 | |||
605 | set_irq_regs(old_regs); | 587 | set_irq_regs(old_regs); |
606 | } | 588 | } |
607 | 589 | ||
608 | void smp_send_reschedule(int cpu) | 590 | void smp_send_reschedule(int cpu) |
609 | { | 591 | { |
610 | send_ipi_message(cpumask_of(cpu), IPI_RESCHEDULE); | 592 | smp_cross_call(cpumask_of(cpu), IPI_RESCHEDULE); |
611 | } | 593 | } |
612 | 594 | ||
613 | void smp_send_stop(void) | 595 | void smp_send_stop(void) |
614 | { | 596 | { |
615 | cpumask_t mask = cpu_online_map; | 597 | unsigned long timeout; |
616 | cpu_clear(smp_processor_id(), mask); | ||
617 | if (!cpus_empty(mask)) | ||
618 | send_ipi_message(&mask, IPI_CPU_STOP); | ||
619 | } | ||
620 | 598 | ||
621 | /* | 599 | if (num_online_cpus() > 1) { |
622 | * not supported here | 600 | cpumask_t mask = cpu_online_map; |
623 | */ | 601 | cpu_clear(smp_processor_id(), mask); |
624 | int setup_profiling_timer(unsigned int multiplier) | ||
625 | { | ||
626 | return -EINVAL; | ||
627 | } | ||
628 | 602 | ||
629 | static void | 603 | smp_cross_call(&mask, IPI_CPU_STOP); |
630 | on_each_cpu_mask(void (*func)(void *), void *info, int wait, | 604 | } |
631 | const struct cpumask *mask) | ||
632 | { | ||
633 | preempt_disable(); | ||
634 | 605 | ||
635 | smp_call_function_many(mask, func, info, wait); | 606 | /* Wait up to one second for other CPUs to stop */ |
636 | if (cpumask_test_cpu(smp_processor_id(), mask)) | 607 | timeout = USEC_PER_SEC; |
637 | func(info); | 608 | while (num_online_cpus() > 1 && timeout--) |
609 | udelay(1); | ||
638 | 610 | ||
639 | preempt_enable(); | 611 | if (num_online_cpus() > 1) |
612 | pr_warning("SMP: failed to stop secondary CPUs\n"); | ||
640 | } | 613 | } |
641 | 614 | ||
642 | /**********************************************************************/ | ||
643 | |||
644 | /* | 615 | /* |
645 | * TLB operations | 616 | * not supported here |
646 | */ | 617 | */ |
647 | struct tlb_args { | 618 | int setup_profiling_timer(unsigned int multiplier) |
648 | struct vm_area_struct *ta_vma; | ||
649 | unsigned long ta_start; | ||
650 | unsigned long ta_end; | ||
651 | }; | ||
652 | |||
653 | static inline void ipi_flush_tlb_all(void *ignored) | ||
654 | { | ||
655 | local_flush_tlb_all(); | ||
656 | } | ||
657 | |||
658 | static inline void ipi_flush_tlb_mm(void *arg) | ||
659 | { | ||
660 | struct mm_struct *mm = (struct mm_struct *)arg; | ||
661 | |||
662 | local_flush_tlb_mm(mm); | ||
663 | } | ||
664 | |||
665 | static inline void ipi_flush_tlb_page(void *arg) | ||
666 | { | ||
667 | struct tlb_args *ta = (struct tlb_args *)arg; | ||
668 | |||
669 | local_flush_tlb_page(ta->ta_vma, ta->ta_start); | ||
670 | } | ||
671 | |||
672 | static inline void ipi_flush_tlb_kernel_page(void *arg) | ||
673 | { | ||
674 | struct tlb_args *ta = (struct tlb_args *)arg; | ||
675 | |||
676 | local_flush_tlb_kernel_page(ta->ta_start); | ||
677 | } | ||
678 | |||
679 | static inline void ipi_flush_tlb_range(void *arg) | ||
680 | { | ||
681 | struct tlb_args *ta = (struct tlb_args *)arg; | ||
682 | |||
683 | local_flush_tlb_range(ta->ta_vma, ta->ta_start, ta->ta_end); | ||
684 | } | ||
685 | |||
686 | static inline void ipi_flush_tlb_kernel_range(void *arg) | ||
687 | { | ||
688 | struct tlb_args *ta = (struct tlb_args *)arg; | ||
689 | |||
690 | local_flush_tlb_kernel_range(ta->ta_start, ta->ta_end); | ||
691 | } | ||
692 | |||
693 | void flush_tlb_all(void) | ||
694 | { | ||
695 | if (tlb_ops_need_broadcast()) | ||
696 | on_each_cpu(ipi_flush_tlb_all, NULL, 1); | ||
697 | else | ||
698 | local_flush_tlb_all(); | ||
699 | } | ||
700 | |||
701 | void flush_tlb_mm(struct mm_struct *mm) | ||
702 | { | ||
703 | if (tlb_ops_need_broadcast()) | ||
704 | on_each_cpu_mask(ipi_flush_tlb_mm, mm, 1, mm_cpumask(mm)); | ||
705 | else | ||
706 | local_flush_tlb_mm(mm); | ||
707 | } | ||
708 | |||
709 | void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr) | ||
710 | { | ||
711 | if (tlb_ops_need_broadcast()) { | ||
712 | struct tlb_args ta; | ||
713 | ta.ta_vma = vma; | ||
714 | ta.ta_start = uaddr; | ||
715 | on_each_cpu_mask(ipi_flush_tlb_page, &ta, 1, mm_cpumask(vma->vm_mm)); | ||
716 | } else | ||
717 | local_flush_tlb_page(vma, uaddr); | ||
718 | } | ||
719 | |||
720 | void flush_tlb_kernel_page(unsigned long kaddr) | ||
721 | { | ||
722 | if (tlb_ops_need_broadcast()) { | ||
723 | struct tlb_args ta; | ||
724 | ta.ta_start = kaddr; | ||
725 | on_each_cpu(ipi_flush_tlb_kernel_page, &ta, 1); | ||
726 | } else | ||
727 | local_flush_tlb_kernel_page(kaddr); | ||
728 | } | ||
729 | |||
730 | void flush_tlb_range(struct vm_area_struct *vma, | ||
731 | unsigned long start, unsigned long end) | ||
732 | { | ||
733 | if (tlb_ops_need_broadcast()) { | ||
734 | struct tlb_args ta; | ||
735 | ta.ta_vma = vma; | ||
736 | ta.ta_start = start; | ||
737 | ta.ta_end = end; | ||
738 | on_each_cpu_mask(ipi_flush_tlb_range, &ta, 1, mm_cpumask(vma->vm_mm)); | ||
739 | } else | ||
740 | local_flush_tlb_range(vma, start, end); | ||
741 | } | ||
742 | |||
743 | void flush_tlb_kernel_range(unsigned long start, unsigned long end) | ||
744 | { | 619 | { |
745 | if (tlb_ops_need_broadcast()) { | 620 | return -EINVAL; |
746 | struct tlb_args ta; | ||
747 | ta.ta_start = start; | ||
748 | ta.ta_end = end; | ||
749 | on_each_cpu(ipi_flush_tlb_kernel_range, &ta, 1); | ||
750 | } else | ||
751 | local_flush_tlb_kernel_range(start, end); | ||
752 | } | 621 | } |