diff options
author | Russell King <rmk+kernel@arm.linux.org.uk> | 2011-01-06 17:31:35 -0500 |
---|---|---|
committer | Russell King <rmk+kernel@arm.linux.org.uk> | 2011-01-06 17:32:03 -0500 |
commit | 4ec3eb13634529c0bc7466658d84d0bbe3244aea (patch) | |
tree | b491daac2ccfc7b8ca88e171a43f66888463568a /arch/arm/kernel/smp.c | |
parent | 24056f525051a9e186af28904b396320e18bf9a0 (diff) | |
parent | 15095bb0fe779c0403091bda7adce5fb3bb9ca35 (diff) |
Merge branch 'smp' into misc
Conflicts:
arch/arm/kernel/entry-armv.S
arch/arm/mm/ioremap.c
Diffstat (limited to 'arch/arm/kernel/smp.c')
-rw-r--r-- | arch/arm/kernel/smp.c | 409 |
1 files changed, 157 insertions, 252 deletions
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c index 8c1959590252..5341b0b19701 100644 --- a/arch/arm/kernel/smp.c +++ b/arch/arm/kernel/smp.c | |||
@@ -24,6 +24,7 @@ | |||
24 | #include <linux/irq.h> | 24 | #include <linux/irq.h> |
25 | #include <linux/percpu.h> | 25 | #include <linux/percpu.h> |
26 | #include <linux/clockchips.h> | 26 | #include <linux/clockchips.h> |
27 | #include <linux/completion.h> | ||
27 | 28 | ||
28 | #include <asm/atomic.h> | 29 | #include <asm/atomic.h> |
29 | #include <asm/cacheflush.h> | 30 | #include <asm/cacheflush.h> |
@@ -37,7 +38,6 @@ | |||
37 | #include <asm/tlbflush.h> | 38 | #include <asm/tlbflush.h> |
38 | #include <asm/ptrace.h> | 39 | #include <asm/ptrace.h> |
39 | #include <asm/localtimer.h> | 40 | #include <asm/localtimer.h> |
40 | #include <asm/smp_plat.h> | ||
41 | 41 | ||
42 | /* | 42 | /* |
43 | * as from 2.5, kernels no longer have an init_tasks structure | 43 | * as from 2.5, kernels no longer have an init_tasks structure |
@@ -46,22 +46,8 @@ | |||
46 | */ | 46 | */ |
47 | struct secondary_data secondary_data; | 47 | struct secondary_data secondary_data; |
48 | 48 | ||
49 | /* | ||
50 | * structures for inter-processor calls | ||
51 | * - A collection of single bit ipi messages. | ||
52 | */ | ||
53 | struct ipi_data { | ||
54 | spinlock_t lock; | ||
55 | unsigned long ipi_count; | ||
56 | unsigned long bits; | ||
57 | }; | ||
58 | |||
59 | static DEFINE_PER_CPU(struct ipi_data, ipi_data) = { | ||
60 | .lock = SPIN_LOCK_UNLOCKED, | ||
61 | }; | ||
62 | |||
63 | enum ipi_msg_type { | 49 | enum ipi_msg_type { |
64 | IPI_TIMER, | 50 | IPI_TIMER = 2, |
65 | IPI_RESCHEDULE, | 51 | IPI_RESCHEDULE, |
66 | IPI_CALL_FUNC, | 52 | IPI_CALL_FUNC, |
67 | IPI_CALL_FUNC_SINGLE, | 53 | IPI_CALL_FUNC_SINGLE, |
@@ -177,8 +163,12 @@ int __cpuinit __cpu_up(unsigned int cpu) | |||
177 | barrier(); | 163 | barrier(); |
178 | } | 164 | } |
179 | 165 | ||
180 | if (!cpu_online(cpu)) | 166 | if (!cpu_online(cpu)) { |
167 | pr_crit("CPU%u: failed to come online\n", cpu); | ||
181 | ret = -EIO; | 168 | ret = -EIO; |
169 | } | ||
170 | } else { | ||
171 | pr_err("CPU%u: failed to boot: %d\n", cpu, ret); | ||
182 | } | 172 | } |
183 | 173 | ||
184 | secondary_data.stack = NULL; | 174 | secondary_data.stack = NULL; |
@@ -194,18 +184,12 @@ int __cpuinit __cpu_up(unsigned int cpu) | |||
194 | 184 | ||
195 | pgd_free(&init_mm, pgd); | 185 | pgd_free(&init_mm, pgd); |
196 | 186 | ||
197 | if (ret) { | ||
198 | printk(KERN_CRIT "CPU%u: processor failed to boot\n", cpu); | ||
199 | |||
200 | /* | ||
201 | * FIXME: We need to clean up the new idle thread. --rmk | ||
202 | */ | ||
203 | } | ||
204 | |||
205 | return ret; | 187 | return ret; |
206 | } | 188 | } |
207 | 189 | ||
208 | #ifdef CONFIG_HOTPLUG_CPU | 190 | #ifdef CONFIG_HOTPLUG_CPU |
191 | static void percpu_timer_stop(void); | ||
192 | |||
209 | /* | 193 | /* |
210 | * __cpu_disable runs on the processor to be shutdown. | 194 | * __cpu_disable runs on the processor to be shutdown. |
211 | */ | 195 | */ |
@@ -233,7 +217,7 @@ int __cpu_disable(void) | |||
233 | /* | 217 | /* |
234 | * Stop the local timer for this CPU. | 218 | * Stop the local timer for this CPU. |
235 | */ | 219 | */ |
236 | local_timer_stop(); | 220 | percpu_timer_stop(); |
237 | 221 | ||
238 | /* | 222 | /* |
239 | * Flush user cache and TLB mappings, and then remove this CPU | 223 | * Flush user cache and TLB mappings, and then remove this CPU |
@@ -252,12 +236,20 @@ int __cpu_disable(void) | |||
252 | return 0; | 236 | return 0; |
253 | } | 237 | } |
254 | 238 | ||
239 | static DECLARE_COMPLETION(cpu_died); | ||
240 | |||
255 | /* | 241 | /* |
256 | * called on the thread which is asking for a CPU to be shutdown - | 242 | * called on the thread which is asking for a CPU to be shutdown - |
257 | * waits until shutdown has completed, or it is timed out. | 243 | * waits until shutdown has completed, or it is timed out. |
258 | */ | 244 | */ |
259 | void __cpu_die(unsigned int cpu) | 245 | void __cpu_die(unsigned int cpu) |
260 | { | 246 | { |
247 | if (!wait_for_completion_timeout(&cpu_died, msecs_to_jiffies(5000))) { | ||
248 | pr_err("CPU%u: cpu didn't die\n", cpu); | ||
249 | return; | ||
250 | } | ||
251 | printk(KERN_NOTICE "CPU%u: shutdown\n", cpu); | ||
252 | |||
261 | if (!platform_cpu_kill(cpu)) | 253 | if (!platform_cpu_kill(cpu)) |
262 | printk("CPU%u: unable to kill\n", cpu); | 254 | printk("CPU%u: unable to kill\n", cpu); |
263 | } | 255 | } |
@@ -274,12 +266,17 @@ void __ref cpu_die(void) | |||
274 | { | 266 | { |
275 | unsigned int cpu = smp_processor_id(); | 267 | unsigned int cpu = smp_processor_id(); |
276 | 268 | ||
277 | local_irq_disable(); | ||
278 | idle_task_exit(); | 269 | idle_task_exit(); |
279 | 270 | ||
271 | local_irq_disable(); | ||
272 | mb(); | ||
273 | |||
274 | /* Tell __cpu_die() that this CPU is now safe to dispose of */ | ||
275 | complete(&cpu_died); | ||
276 | |||
280 | /* | 277 | /* |
281 | * actual CPU shutdown procedure is at least platform (if not | 278 | * actual CPU shutdown procedure is at least platform (if not |
282 | * CPU) specific | 279 | * CPU) specific. |
283 | */ | 280 | */ |
284 | platform_cpu_die(cpu); | 281 | platform_cpu_die(cpu); |
285 | 282 | ||
@@ -289,6 +286,7 @@ void __ref cpu_die(void) | |||
289 | * to be repeated to undo the effects of taking the CPU offline. | 286 | * to be repeated to undo the effects of taking the CPU offline. |
290 | */ | 287 | */ |
291 | __asm__("mov sp, %0\n" | 288 | __asm__("mov sp, %0\n" |
289 | " mov fp, #0\n" | ||
292 | " b secondary_start_kernel" | 290 | " b secondary_start_kernel" |
293 | : | 291 | : |
294 | : "r" (task_stack_page(current) + THREAD_SIZE - 8)); | 292 | : "r" (task_stack_page(current) + THREAD_SIZE - 8)); |
@@ -296,6 +294,17 @@ void __ref cpu_die(void) | |||
296 | #endif /* CONFIG_HOTPLUG_CPU */ | 294 | #endif /* CONFIG_HOTPLUG_CPU */ |
297 | 295 | ||
298 | /* | 296 | /* |
297 | * Called by both boot and secondaries to move global data into | ||
298 | * per-processor storage. | ||
299 | */ | ||
300 | static void __cpuinit smp_store_cpu_info(unsigned int cpuid) | ||
301 | { | ||
302 | struct cpuinfo_arm *cpu_info = &per_cpu(cpu_data, cpuid); | ||
303 | |||
304 | cpu_info->loops_per_jiffy = loops_per_jiffy; | ||
305 | } | ||
306 | |||
307 | /* | ||
299 | * This is the secondary CPU boot entry. We're using this CPUs | 308 | * This is the secondary CPU boot entry. We're using this CPUs |
300 | * idle thread stack, but a set of temporary page tables. | 309 | * idle thread stack, but a set of temporary page tables. |
301 | */ | 310 | */ |
@@ -320,6 +329,7 @@ asmlinkage void __cpuinit secondary_start_kernel(void) | |||
320 | 329 | ||
321 | cpu_init(); | 330 | cpu_init(); |
322 | preempt_disable(); | 331 | preempt_disable(); |
332 | trace_hardirqs_off(); | ||
323 | 333 | ||
324 | /* | 334 | /* |
325 | * Give the platform a chance to do its own initialisation. | 335 | * Give the platform a chance to do its own initialisation. |
@@ -353,17 +363,6 @@ asmlinkage void __cpuinit secondary_start_kernel(void) | |||
353 | cpu_idle(); | 363 | cpu_idle(); |
354 | } | 364 | } |
355 | 365 | ||
356 | /* | ||
357 | * Called by both boot and secondaries to move global data into | ||
358 | * per-processor storage. | ||
359 | */ | ||
360 | void __cpuinit smp_store_cpu_info(unsigned int cpuid) | ||
361 | { | ||
362 | struct cpuinfo_arm *cpu_info = &per_cpu(cpu_data, cpuid); | ||
363 | |||
364 | cpu_info->loops_per_jiffy = loops_per_jiffy; | ||
365 | } | ||
366 | |||
367 | void __init smp_cpus_done(unsigned int max_cpus) | 366 | void __init smp_cpus_done(unsigned int max_cpus) |
368 | { | 367 | { |
369 | int cpu; | 368 | int cpu; |
@@ -386,61 +385,80 @@ void __init smp_prepare_boot_cpu(void) | |||
386 | per_cpu(cpu_data, cpu).idle = current; | 385 | per_cpu(cpu_data, cpu).idle = current; |
387 | } | 386 | } |
388 | 387 | ||
389 | static void send_ipi_message(const struct cpumask *mask, enum ipi_msg_type msg) | 388 | void __init smp_prepare_cpus(unsigned int max_cpus) |
390 | { | 389 | { |
391 | unsigned long flags; | 390 | unsigned int ncores = num_possible_cpus(); |
392 | unsigned int cpu; | ||
393 | |||
394 | local_irq_save(flags); | ||
395 | 391 | ||
396 | for_each_cpu(cpu, mask) { | 392 | smp_store_cpu_info(smp_processor_id()); |
397 | struct ipi_data *ipi = &per_cpu(ipi_data, cpu); | ||
398 | |||
399 | spin_lock(&ipi->lock); | ||
400 | ipi->bits |= 1 << msg; | ||
401 | spin_unlock(&ipi->lock); | ||
402 | } | ||
403 | 393 | ||
404 | /* | 394 | /* |
405 | * Call the platform specific cross-CPU call function. | 395 | * are we trying to boot more cores than exist? |
406 | */ | 396 | */ |
407 | smp_cross_call(mask); | 397 | if (max_cpus > ncores) |
398 | max_cpus = ncores; | ||
399 | |||
400 | if (max_cpus > 1) { | ||
401 | /* | ||
402 | * Enable the local timer or broadcast device for the | ||
403 | * boot CPU, but only if we have more than one CPU. | ||
404 | */ | ||
405 | percpu_timer_setup(); | ||
408 | 406 | ||
409 | local_irq_restore(flags); | 407 | /* |
408 | * Initialise the SCU if there are more than one CPU | ||
409 | * and let them know where to start. | ||
410 | */ | ||
411 | platform_smp_prepare_cpus(max_cpus); | ||
412 | } | ||
410 | } | 413 | } |
411 | 414 | ||
412 | void arch_send_call_function_ipi_mask(const struct cpumask *mask) | 415 | void arch_send_call_function_ipi_mask(const struct cpumask *mask) |
413 | { | 416 | { |
414 | send_ipi_message(mask, IPI_CALL_FUNC); | 417 | smp_cross_call(mask, IPI_CALL_FUNC); |
415 | } | 418 | } |
416 | 419 | ||
417 | void arch_send_call_function_single_ipi(int cpu) | 420 | void arch_send_call_function_single_ipi(int cpu) |
418 | { | 421 | { |
419 | send_ipi_message(cpumask_of(cpu), IPI_CALL_FUNC_SINGLE); | 422 | smp_cross_call(cpumask_of(cpu), IPI_CALL_FUNC_SINGLE); |
420 | } | 423 | } |
421 | 424 | ||
422 | void show_ipi_list(struct seq_file *p) | 425 | static const char *ipi_types[NR_IPI] = { |
426 | #define S(x,s) [x - IPI_TIMER] = s | ||
427 | S(IPI_TIMER, "Timer broadcast interrupts"), | ||
428 | S(IPI_RESCHEDULE, "Rescheduling interrupts"), | ||
429 | S(IPI_CALL_FUNC, "Function call interrupts"), | ||
430 | S(IPI_CALL_FUNC_SINGLE, "Single function call interrupts"), | ||
431 | S(IPI_CPU_STOP, "CPU stop interrupts"), | ||
432 | }; | ||
433 | |||
434 | void show_ipi_list(struct seq_file *p, int prec) | ||
423 | { | 435 | { |
424 | unsigned int cpu; | 436 | unsigned int cpu, i; |
425 | 437 | ||
426 | seq_puts(p, "IPI:"); | 438 | for (i = 0; i < NR_IPI; i++) { |
439 | seq_printf(p, "%*s%u: ", prec - 1, "IPI", i); | ||
427 | 440 | ||
428 | for_each_present_cpu(cpu) | 441 | for_each_present_cpu(cpu) |
429 | seq_printf(p, " %10lu", per_cpu(ipi_data, cpu).ipi_count); | 442 | seq_printf(p, "%10u ", |
443 | __get_irq_stat(cpu, ipi_irqs[i])); | ||
430 | 444 | ||
431 | seq_putc(p, '\n'); | 445 | seq_printf(p, " %s\n", ipi_types[i]); |
446 | } | ||
432 | } | 447 | } |
433 | 448 | ||
434 | void show_local_irqs(struct seq_file *p) | 449 | u64 smp_irq_stat_cpu(unsigned int cpu) |
435 | { | 450 | { |
436 | unsigned int cpu; | 451 | u64 sum = 0; |
452 | int i; | ||
437 | 453 | ||
438 | seq_printf(p, "LOC: "); | 454 | for (i = 0; i < NR_IPI; i++) |
455 | sum += __get_irq_stat(cpu, ipi_irqs[i]); | ||
439 | 456 | ||
440 | for_each_present_cpu(cpu) | 457 | #ifdef CONFIG_LOCAL_TIMERS |
441 | seq_printf(p, "%10u ", irq_stat[cpu].local_timer_irqs); | 458 | sum += __get_irq_stat(cpu, local_timer_irqs); |
459 | #endif | ||
442 | 460 | ||
443 | seq_putc(p, '\n'); | 461 | return sum; |
444 | } | 462 | } |
445 | 463 | ||
446 | /* | 464 | /* |
@@ -463,18 +481,30 @@ asmlinkage void __exception do_local_timer(struct pt_regs *regs) | |||
463 | int cpu = smp_processor_id(); | 481 | int cpu = smp_processor_id(); |
464 | 482 | ||
465 | if (local_timer_ack()) { | 483 | if (local_timer_ack()) { |
466 | irq_stat[cpu].local_timer_irqs++; | 484 | __inc_irq_stat(cpu, local_timer_irqs); |
467 | ipi_timer(); | 485 | ipi_timer(); |
468 | } | 486 | } |
469 | 487 | ||
470 | set_irq_regs(old_regs); | 488 | set_irq_regs(old_regs); |
471 | } | 489 | } |
490 | |||
491 | void show_local_irqs(struct seq_file *p, int prec) | ||
492 | { | ||
493 | unsigned int cpu; | ||
494 | |||
495 | seq_printf(p, "%*s: ", prec, "LOC"); | ||
496 | |||
497 | for_each_present_cpu(cpu) | ||
498 | seq_printf(p, "%10u ", __get_irq_stat(cpu, local_timer_irqs)); | ||
499 | |||
500 | seq_printf(p, " Local timer interrupts\n"); | ||
501 | } | ||
472 | #endif | 502 | #endif |
473 | 503 | ||
474 | #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST | 504 | #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST |
475 | static void smp_timer_broadcast(const struct cpumask *mask) | 505 | static void smp_timer_broadcast(const struct cpumask *mask) |
476 | { | 506 | { |
477 | send_ipi_message(mask, IPI_TIMER); | 507 | smp_cross_call(mask, IPI_TIMER); |
478 | } | 508 | } |
479 | #else | 509 | #else |
480 | #define smp_timer_broadcast NULL | 510 | #define smp_timer_broadcast NULL |
@@ -511,6 +541,21 @@ void __cpuinit percpu_timer_setup(void) | |||
511 | local_timer_setup(evt); | 541 | local_timer_setup(evt); |
512 | } | 542 | } |
513 | 543 | ||
544 | #ifdef CONFIG_HOTPLUG_CPU | ||
545 | /* | ||
546 | * The generic clock events code purposely does not stop the local timer | ||
547 | * on CPU_DEAD/CPU_DEAD_FROZEN hotplug events, so we have to do it | ||
548 | * manually here. | ||
549 | */ | ||
550 | static void percpu_timer_stop(void) | ||
551 | { | ||
552 | unsigned int cpu = smp_processor_id(); | ||
553 | struct clock_event_device *evt = &per_cpu(percpu_clockevent, cpu); | ||
554 | |||
555 | evt->set_mode(CLOCK_EVT_MODE_UNUSED, evt); | ||
556 | } | ||
557 | #endif | ||
558 | |||
514 | static DEFINE_SPINLOCK(stop_lock); | 559 | static DEFINE_SPINLOCK(stop_lock); |
515 | 560 | ||
516 | /* | 561 | /* |
@@ -537,216 +582,76 @@ static void ipi_cpu_stop(unsigned int cpu) | |||
537 | 582 | ||
538 | /* | 583 | /* |
539 | * Main handler for inter-processor interrupts | 584 | * Main handler for inter-processor interrupts |
540 | * | ||
541 | * For ARM, the ipimask now only identifies a single | ||
542 | * category of IPI (Bit 1 IPIs have been replaced by a | ||
543 | * different mechanism): | ||
544 | * | ||
545 | * Bit 0 - Inter-processor function call | ||
546 | */ | 585 | */ |
547 | asmlinkage void __exception do_IPI(struct pt_regs *regs) | 586 | asmlinkage void __exception do_IPI(int ipinr, struct pt_regs *regs) |
548 | { | 587 | { |
549 | unsigned int cpu = smp_processor_id(); | 588 | unsigned int cpu = smp_processor_id(); |
550 | struct ipi_data *ipi = &per_cpu(ipi_data, cpu); | ||
551 | struct pt_regs *old_regs = set_irq_regs(regs); | 589 | struct pt_regs *old_regs = set_irq_regs(regs); |
552 | 590 | ||
553 | ipi->ipi_count++; | 591 | if (ipinr >= IPI_TIMER && ipinr < IPI_TIMER + NR_IPI) |
554 | 592 | __inc_irq_stat(cpu, ipi_irqs[ipinr - IPI_TIMER]); | |
555 | for (;;) { | ||
556 | unsigned long msgs; | ||
557 | 593 | ||
558 | spin_lock(&ipi->lock); | 594 | switch (ipinr) { |
559 | msgs = ipi->bits; | 595 | case IPI_TIMER: |
560 | ipi->bits = 0; | 596 | ipi_timer(); |
561 | spin_unlock(&ipi->lock); | 597 | break; |
562 | |||
563 | if (!msgs) | ||
564 | break; | ||
565 | |||
566 | do { | ||
567 | unsigned nextmsg; | ||
568 | |||
569 | nextmsg = msgs & -msgs; | ||
570 | msgs &= ~nextmsg; | ||
571 | nextmsg = ffz(~nextmsg); | ||
572 | |||
573 | switch (nextmsg) { | ||
574 | case IPI_TIMER: | ||
575 | ipi_timer(); | ||
576 | break; | ||
577 | 598 | ||
578 | case IPI_RESCHEDULE: | 599 | case IPI_RESCHEDULE: |
579 | /* | 600 | /* |
580 | * nothing more to do - eveything is | 601 | * nothing more to do - eveything is |
581 | * done on the interrupt return path | 602 | * done on the interrupt return path |
582 | */ | 603 | */ |
583 | break; | 604 | break; |
584 | 605 | ||
585 | case IPI_CALL_FUNC: | 606 | case IPI_CALL_FUNC: |
586 | generic_smp_call_function_interrupt(); | 607 | generic_smp_call_function_interrupt(); |
587 | break; | 608 | break; |
588 | 609 | ||
589 | case IPI_CALL_FUNC_SINGLE: | 610 | case IPI_CALL_FUNC_SINGLE: |
590 | generic_smp_call_function_single_interrupt(); | 611 | generic_smp_call_function_single_interrupt(); |
591 | break; | 612 | break; |
592 | 613 | ||
593 | case IPI_CPU_STOP: | 614 | case IPI_CPU_STOP: |
594 | ipi_cpu_stop(cpu); | 615 | ipi_cpu_stop(cpu); |
595 | break; | 616 | break; |
596 | 617 | ||
597 | default: | 618 | default: |
598 | printk(KERN_CRIT "CPU%u: Unknown IPI message 0x%x\n", | 619 | printk(KERN_CRIT "CPU%u: Unknown IPI message 0x%x\n", |
599 | cpu, nextmsg); | 620 | cpu, ipinr); |
600 | break; | 621 | break; |
601 | } | ||
602 | } while (msgs); | ||
603 | } | 622 | } |
604 | |||
605 | set_irq_regs(old_regs); | 623 | set_irq_regs(old_regs); |
606 | } | 624 | } |
607 | 625 | ||
608 | void smp_send_reschedule(int cpu) | 626 | void smp_send_reschedule(int cpu) |
609 | { | 627 | { |
610 | send_ipi_message(cpumask_of(cpu), IPI_RESCHEDULE); | 628 | smp_cross_call(cpumask_of(cpu), IPI_RESCHEDULE); |
611 | } | 629 | } |
612 | 630 | ||
613 | void smp_send_stop(void) | 631 | void smp_send_stop(void) |
614 | { | 632 | { |
615 | cpumask_t mask = cpu_online_map; | 633 | unsigned long timeout; |
616 | cpu_clear(smp_processor_id(), mask); | ||
617 | if (!cpus_empty(mask)) | ||
618 | send_ipi_message(&mask, IPI_CPU_STOP); | ||
619 | } | ||
620 | 634 | ||
621 | /* | 635 | if (num_online_cpus() > 1) { |
622 | * not supported here | 636 | cpumask_t mask = cpu_online_map; |
623 | */ | 637 | cpu_clear(smp_processor_id(), mask); |
624 | int setup_profiling_timer(unsigned int multiplier) | ||
625 | { | ||
626 | return -EINVAL; | ||
627 | } | ||
628 | 638 | ||
629 | static void | 639 | smp_cross_call(&mask, IPI_CPU_STOP); |
630 | on_each_cpu_mask(void (*func)(void *), void *info, int wait, | 640 | } |
631 | const struct cpumask *mask) | ||
632 | { | ||
633 | preempt_disable(); | ||
634 | 641 | ||
635 | smp_call_function_many(mask, func, info, wait); | 642 | /* Wait up to one second for other CPUs to stop */ |
636 | if (cpumask_test_cpu(smp_processor_id(), mask)) | 643 | timeout = USEC_PER_SEC; |
637 | func(info); | 644 | while (num_online_cpus() > 1 && timeout--) |
645 | udelay(1); | ||
638 | 646 | ||
639 | preempt_enable(); | 647 | if (num_online_cpus() > 1) |
648 | pr_warning("SMP: failed to stop secondary CPUs\n"); | ||
640 | } | 649 | } |
641 | 650 | ||
642 | /**********************************************************************/ | ||
643 | |||
644 | /* | 651 | /* |
645 | * TLB operations | 652 | * not supported here |
646 | */ | 653 | */ |
647 | struct tlb_args { | 654 | int setup_profiling_timer(unsigned int multiplier) |
648 | struct vm_area_struct *ta_vma; | ||
649 | unsigned long ta_start; | ||
650 | unsigned long ta_end; | ||
651 | }; | ||
652 | |||
653 | static inline void ipi_flush_tlb_all(void *ignored) | ||
654 | { | ||
655 | local_flush_tlb_all(); | ||
656 | } | ||
657 | |||
658 | static inline void ipi_flush_tlb_mm(void *arg) | ||
659 | { | ||
660 | struct mm_struct *mm = (struct mm_struct *)arg; | ||
661 | |||
662 | local_flush_tlb_mm(mm); | ||
663 | } | ||
664 | |||
665 | static inline void ipi_flush_tlb_page(void *arg) | ||
666 | { | ||
667 | struct tlb_args *ta = (struct tlb_args *)arg; | ||
668 | |||
669 | local_flush_tlb_page(ta->ta_vma, ta->ta_start); | ||
670 | } | ||
671 | |||
672 | static inline void ipi_flush_tlb_kernel_page(void *arg) | ||
673 | { | ||
674 | struct tlb_args *ta = (struct tlb_args *)arg; | ||
675 | |||
676 | local_flush_tlb_kernel_page(ta->ta_start); | ||
677 | } | ||
678 | |||
679 | static inline void ipi_flush_tlb_range(void *arg) | ||
680 | { | ||
681 | struct tlb_args *ta = (struct tlb_args *)arg; | ||
682 | |||
683 | local_flush_tlb_range(ta->ta_vma, ta->ta_start, ta->ta_end); | ||
684 | } | ||
685 | |||
686 | static inline void ipi_flush_tlb_kernel_range(void *arg) | ||
687 | { | ||
688 | struct tlb_args *ta = (struct tlb_args *)arg; | ||
689 | |||
690 | local_flush_tlb_kernel_range(ta->ta_start, ta->ta_end); | ||
691 | } | ||
692 | |||
693 | void flush_tlb_all(void) | ||
694 | { | ||
695 | if (tlb_ops_need_broadcast()) | ||
696 | on_each_cpu(ipi_flush_tlb_all, NULL, 1); | ||
697 | else | ||
698 | local_flush_tlb_all(); | ||
699 | } | ||
700 | |||
701 | void flush_tlb_mm(struct mm_struct *mm) | ||
702 | { | ||
703 | if (tlb_ops_need_broadcast()) | ||
704 | on_each_cpu_mask(ipi_flush_tlb_mm, mm, 1, mm_cpumask(mm)); | ||
705 | else | ||
706 | local_flush_tlb_mm(mm); | ||
707 | } | ||
708 | |||
709 | void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr) | ||
710 | { | ||
711 | if (tlb_ops_need_broadcast()) { | ||
712 | struct tlb_args ta; | ||
713 | ta.ta_vma = vma; | ||
714 | ta.ta_start = uaddr; | ||
715 | on_each_cpu_mask(ipi_flush_tlb_page, &ta, 1, mm_cpumask(vma->vm_mm)); | ||
716 | } else | ||
717 | local_flush_tlb_page(vma, uaddr); | ||
718 | } | ||
719 | |||
720 | void flush_tlb_kernel_page(unsigned long kaddr) | ||
721 | { | ||
722 | if (tlb_ops_need_broadcast()) { | ||
723 | struct tlb_args ta; | ||
724 | ta.ta_start = kaddr; | ||
725 | on_each_cpu(ipi_flush_tlb_kernel_page, &ta, 1); | ||
726 | } else | ||
727 | local_flush_tlb_kernel_page(kaddr); | ||
728 | } | ||
729 | |||
730 | void flush_tlb_range(struct vm_area_struct *vma, | ||
731 | unsigned long start, unsigned long end) | ||
732 | { | ||
733 | if (tlb_ops_need_broadcast()) { | ||
734 | struct tlb_args ta; | ||
735 | ta.ta_vma = vma; | ||
736 | ta.ta_start = start; | ||
737 | ta.ta_end = end; | ||
738 | on_each_cpu_mask(ipi_flush_tlb_range, &ta, 1, mm_cpumask(vma->vm_mm)); | ||
739 | } else | ||
740 | local_flush_tlb_range(vma, start, end); | ||
741 | } | ||
742 | |||
743 | void flush_tlb_kernel_range(unsigned long start, unsigned long end) | ||
744 | { | 655 | { |
745 | if (tlb_ops_need_broadcast()) { | 656 | return -EINVAL; |
746 | struct tlb_args ta; | ||
747 | ta.ta_start = start; | ||
748 | ta.ta_end = end; | ||
749 | on_each_cpu(ipi_flush_tlb_kernel_range, &ta, 1); | ||
750 | } else | ||
751 | local_flush_tlb_kernel_range(start, end); | ||
752 | } | 657 | } |