diff options
Diffstat (limited to 'arch/arm/kernel/smp.c')
-rw-r--r-- | arch/arm/kernel/smp.c | 169 |
1 files changed, 106 insertions, 63 deletions
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c index 7801aac3c043..de885fd256c5 100644 --- a/arch/arm/kernel/smp.c +++ b/arch/arm/kernel/smp.c | |||
@@ -22,16 +22,20 @@ | |||
22 | #include <linux/smp.h> | 22 | #include <linux/smp.h> |
23 | #include <linux/seq_file.h> | 23 | #include <linux/seq_file.h> |
24 | #include <linux/irq.h> | 24 | #include <linux/irq.h> |
25 | #include <linux/percpu.h> | ||
26 | #include <linux/clockchips.h> | ||
25 | 27 | ||
26 | #include <asm/atomic.h> | 28 | #include <asm/atomic.h> |
27 | #include <asm/cacheflush.h> | 29 | #include <asm/cacheflush.h> |
28 | #include <asm/cpu.h> | 30 | #include <asm/cpu.h> |
31 | #include <asm/cputype.h> | ||
29 | #include <asm/mmu_context.h> | 32 | #include <asm/mmu_context.h> |
30 | #include <asm/pgtable.h> | 33 | #include <asm/pgtable.h> |
31 | #include <asm/pgalloc.h> | 34 | #include <asm/pgalloc.h> |
32 | #include <asm/processor.h> | 35 | #include <asm/processor.h> |
33 | #include <asm/tlbflush.h> | 36 | #include <asm/tlbflush.h> |
34 | #include <asm/ptrace.h> | 37 | #include <asm/ptrace.h> |
38 | #include <asm/localtimer.h> | ||
35 | 39 | ||
36 | /* | 40 | /* |
37 | * as from 2.5, kernels no longer have an init_tasks structure | 41 | * as from 2.5, kernels no longer have an init_tasks structure |
@@ -163,7 +167,7 @@ int __cpuexit __cpu_disable(void) | |||
163 | * Take this CPU offline. Once we clear this, we can't return, | 167 | * Take this CPU offline. Once we clear this, we can't return, |
164 | * and we must not schedule until we're ready to give up the cpu. | 168 | * and we must not schedule until we're ready to give up the cpu. |
165 | */ | 169 | */ |
166 | cpu_clear(cpu, cpu_online_map); | 170 | set_cpu_online(cpu, false); |
167 | 171 | ||
168 | /* | 172 | /* |
169 | * OK - migrate IRQs away from this CPU | 173 | * OK - migrate IRQs away from this CPU |
@@ -274,9 +278,9 @@ asmlinkage void __cpuinit secondary_start_kernel(void) | |||
274 | local_fiq_enable(); | 278 | local_fiq_enable(); |
275 | 279 | ||
276 | /* | 280 | /* |
277 | * Setup local timer for this CPU. | 281 | * Setup the percpu timer for this CPU. |
278 | */ | 282 | */ |
279 | local_timer_setup(); | 283 | percpu_timer_setup(); |
280 | 284 | ||
281 | calibrate_delay(); | 285 | calibrate_delay(); |
282 | 286 | ||
@@ -285,7 +289,7 @@ asmlinkage void __cpuinit secondary_start_kernel(void) | |||
285 | /* | 289 | /* |
286 | * OK, now it's safe to let the boot CPU continue | 290 | * OK, now it's safe to let the boot CPU continue |
287 | */ | 291 | */ |
288 | cpu_set(cpu, cpu_online_map); | 292 | set_cpu_online(cpu, true); |
289 | 293 | ||
290 | /* | 294 | /* |
291 | * OK, it's off to the idle thread for us | 295 | * OK, it's off to the idle thread for us |
@@ -326,14 +330,14 @@ void __init smp_prepare_boot_cpu(void) | |||
326 | per_cpu(cpu_data, cpu).idle = current; | 330 | per_cpu(cpu_data, cpu).idle = current; |
327 | } | 331 | } |
328 | 332 | ||
329 | static void send_ipi_message(cpumask_t callmap, enum ipi_msg_type msg) | 333 | static void send_ipi_message(const struct cpumask *mask, enum ipi_msg_type msg) |
330 | { | 334 | { |
331 | unsigned long flags; | 335 | unsigned long flags; |
332 | unsigned int cpu; | 336 | unsigned int cpu; |
333 | 337 | ||
334 | local_irq_save(flags); | 338 | local_irq_save(flags); |
335 | 339 | ||
336 | for_each_cpu_mask(cpu, callmap) { | 340 | for_each_cpu(cpu, mask) { |
337 | struct ipi_data *ipi = &per_cpu(ipi_data, cpu); | 341 | struct ipi_data *ipi = &per_cpu(ipi_data, cpu); |
338 | 342 | ||
339 | spin_lock(&ipi->lock); | 343 | spin_lock(&ipi->lock); |
@@ -344,19 +348,19 @@ static void send_ipi_message(cpumask_t callmap, enum ipi_msg_type msg) | |||
344 | /* | 348 | /* |
345 | * Call the platform specific cross-CPU call function. | 349 | * Call the platform specific cross-CPU call function. |
346 | */ | 350 | */ |
347 | smp_cross_call(callmap); | 351 | smp_cross_call(mask); |
348 | 352 | ||
349 | local_irq_restore(flags); | 353 | local_irq_restore(flags); |
350 | } | 354 | } |
351 | 355 | ||
352 | void arch_send_call_function_ipi(cpumask_t mask) | 356 | void arch_send_call_function_ipi_mask(const struct cpumask *mask) |
353 | { | 357 | { |
354 | send_ipi_message(mask, IPI_CALL_FUNC); | 358 | send_ipi_message(mask, IPI_CALL_FUNC); |
355 | } | 359 | } |
356 | 360 | ||
357 | void arch_send_call_function_single_ipi(int cpu) | 361 | void arch_send_call_function_single_ipi(int cpu) |
358 | { | 362 | { |
359 | send_ipi_message(cpumask_of_cpu(cpu), IPI_CALL_FUNC_SINGLE); | 363 | send_ipi_message(cpumask_of(cpu), IPI_CALL_FUNC_SINGLE); |
360 | } | 364 | } |
361 | 365 | ||
362 | void show_ipi_list(struct seq_file *p) | 366 | void show_ipi_list(struct seq_file *p) |
@@ -383,10 +387,16 @@ void show_local_irqs(struct seq_file *p) | |||
383 | seq_putc(p, '\n'); | 387 | seq_putc(p, '\n'); |
384 | } | 388 | } |
385 | 389 | ||
390 | /* | ||
391 | * Timer (local or broadcast) support | ||
392 | */ | ||
393 | static DEFINE_PER_CPU(struct clock_event_device, percpu_clockevent); | ||
394 | |||
386 | static void ipi_timer(void) | 395 | static void ipi_timer(void) |
387 | { | 396 | { |
397 | struct clock_event_device *evt = &__get_cpu_var(percpu_clockevent); | ||
388 | irq_enter(); | 398 | irq_enter(); |
389 | local_timer_interrupt(); | 399 | evt->event_handler(evt); |
390 | irq_exit(); | 400 | irq_exit(); |
391 | } | 401 | } |
392 | 402 | ||
@@ -405,6 +415,42 @@ asmlinkage void __exception do_local_timer(struct pt_regs *regs) | |||
405 | } | 415 | } |
406 | #endif | 416 | #endif |
407 | 417 | ||
418 | #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST | ||
419 | static void smp_timer_broadcast(const struct cpumask *mask) | ||
420 | { | ||
421 | send_ipi_message(mask, IPI_TIMER); | ||
422 | } | ||
423 | |||
424 | static void broadcast_timer_set_mode(enum clock_event_mode mode, | ||
425 | struct clock_event_device *evt) | ||
426 | { | ||
427 | } | ||
428 | |||
429 | static void local_timer_setup(struct clock_event_device *evt) | ||
430 | { | ||
431 | evt->name = "dummy_timer"; | ||
432 | evt->features = CLOCK_EVT_FEAT_ONESHOT | | ||
433 | CLOCK_EVT_FEAT_PERIODIC | | ||
434 | CLOCK_EVT_FEAT_DUMMY; | ||
435 | evt->rating = 400; | ||
436 | evt->mult = 1; | ||
437 | evt->set_mode = broadcast_timer_set_mode; | ||
438 | evt->broadcast = smp_timer_broadcast; | ||
439 | |||
440 | clockevents_register_device(evt); | ||
441 | } | ||
442 | #endif | ||
443 | |||
444 | void __cpuinit percpu_timer_setup(void) | ||
445 | { | ||
446 | unsigned int cpu = smp_processor_id(); | ||
447 | struct clock_event_device *evt = &per_cpu(percpu_clockevent, cpu); | ||
448 | |||
449 | evt->cpumask = cpumask_of(cpu); | ||
450 | |||
451 | local_timer_setup(evt); | ||
452 | } | ||
453 | |||
408 | static DEFINE_SPINLOCK(stop_lock); | 454 | static DEFINE_SPINLOCK(stop_lock); |
409 | 455 | ||
410 | /* | 456 | /* |
@@ -417,7 +463,7 @@ static void ipi_cpu_stop(unsigned int cpu) | |||
417 | dump_stack(); | 463 | dump_stack(); |
418 | spin_unlock(&stop_lock); | 464 | spin_unlock(&stop_lock); |
419 | 465 | ||
420 | cpu_clear(cpu, cpu_online_map); | 466 | set_cpu_online(cpu, false); |
421 | 467 | ||
422 | local_fiq_disable(); | 468 | local_fiq_disable(); |
423 | local_irq_disable(); | 469 | local_irq_disable(); |
@@ -498,26 +544,14 @@ asmlinkage void __exception do_IPI(struct pt_regs *regs) | |||
498 | 544 | ||
499 | void smp_send_reschedule(int cpu) | 545 | void smp_send_reschedule(int cpu) |
500 | { | 546 | { |
501 | send_ipi_message(cpumask_of_cpu(cpu), IPI_RESCHEDULE); | 547 | send_ipi_message(cpumask_of(cpu), IPI_RESCHEDULE); |
502 | } | ||
503 | |||
504 | void smp_send_timer(void) | ||
505 | { | ||
506 | cpumask_t mask = cpu_online_map; | ||
507 | cpu_clear(smp_processor_id(), mask); | ||
508 | send_ipi_message(mask, IPI_TIMER); | ||
509 | } | ||
510 | |||
511 | void smp_timer_broadcast(cpumask_t mask) | ||
512 | { | ||
513 | send_ipi_message(mask, IPI_TIMER); | ||
514 | } | 548 | } |
515 | 549 | ||
516 | void smp_send_stop(void) | 550 | void smp_send_stop(void) |
517 | { | 551 | { |
518 | cpumask_t mask = cpu_online_map; | 552 | cpumask_t mask = cpu_online_map; |
519 | cpu_clear(smp_processor_id(), mask); | 553 | cpu_clear(smp_processor_id(), mask); |
520 | send_ipi_message(mask, IPI_CPU_STOP); | 554 | send_ipi_message(&mask, IPI_CPU_STOP); |
521 | } | 555 | } |
522 | 556 | ||
523 | /* | 557 | /* |
@@ -528,20 +562,17 @@ int setup_profiling_timer(unsigned int multiplier) | |||
528 | return -EINVAL; | 562 | return -EINVAL; |
529 | } | 563 | } |
530 | 564 | ||
531 | static int | 565 | static void |
532 | on_each_cpu_mask(void (*func)(void *), void *info, int wait, cpumask_t mask) | 566 | on_each_cpu_mask(void (*func)(void *), void *info, int wait, |
567 | const struct cpumask *mask) | ||
533 | { | 568 | { |
534 | int ret = 0; | ||
535 | |||
536 | preempt_disable(); | 569 | preempt_disable(); |
537 | 570 | ||
538 | ret = smp_call_function_mask(mask, func, info, wait); | 571 | smp_call_function_many(mask, func, info, wait); |
539 | if (cpu_isset(smp_processor_id(), mask)) | 572 | if (cpumask_test_cpu(smp_processor_id(), mask)) |
540 | func(info); | 573 | func(info); |
541 | 574 | ||
542 | preempt_enable(); | 575 | preempt_enable(); |
543 | |||
544 | return ret; | ||
545 | } | 576 | } |
546 | 577 | ||
547 | /**********************************************************************/ | 578 | /**********************************************************************/ |
@@ -555,6 +586,12 @@ struct tlb_args { | |||
555 | unsigned long ta_end; | 586 | unsigned long ta_end; |
556 | }; | 587 | }; |
557 | 588 | ||
589 | /* all SMP configurations have the extended CPUID registers */ | ||
590 | static inline int tlb_ops_need_broadcast(void) | ||
591 | { | ||
592 | return ((read_cpuid_ext(CPUID_EXT_MMFR3) >> 12) & 0xf) < 2; | ||
593 | } | ||
594 | |||
558 | static inline void ipi_flush_tlb_all(void *ignored) | 595 | static inline void ipi_flush_tlb_all(void *ignored) |
559 | { | 596 | { |
560 | local_flush_tlb_all(); | 597 | local_flush_tlb_all(); |
@@ -597,55 +634,61 @@ static inline void ipi_flush_tlb_kernel_range(void *arg) | |||
597 | 634 | ||
598 | void flush_tlb_all(void) | 635 | void flush_tlb_all(void) |
599 | { | 636 | { |
600 | on_each_cpu(ipi_flush_tlb_all, NULL, 1); | 637 | if (tlb_ops_need_broadcast()) |
638 | on_each_cpu(ipi_flush_tlb_all, NULL, 1); | ||
639 | else | ||
640 | local_flush_tlb_all(); | ||
601 | } | 641 | } |
602 | 642 | ||
603 | void flush_tlb_mm(struct mm_struct *mm) | 643 | void flush_tlb_mm(struct mm_struct *mm) |
604 | { | 644 | { |
605 | cpumask_t mask = mm->cpu_vm_mask; | 645 | if (tlb_ops_need_broadcast()) |
606 | 646 | on_each_cpu_mask(ipi_flush_tlb_mm, mm, 1, &mm->cpu_vm_mask); | |
607 | on_each_cpu_mask(ipi_flush_tlb_mm, mm, 1, mask); | 647 | else |
648 | local_flush_tlb_mm(mm); | ||
608 | } | 649 | } |
609 | 650 | ||
610 | void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr) | 651 | void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr) |
611 | { | 652 | { |
612 | cpumask_t mask = vma->vm_mm->cpu_vm_mask; | 653 | if (tlb_ops_need_broadcast()) { |
613 | struct tlb_args ta; | 654 | struct tlb_args ta; |
614 | 655 | ta.ta_vma = vma; | |
615 | ta.ta_vma = vma; | 656 | ta.ta_start = uaddr; |
616 | ta.ta_start = uaddr; | 657 | on_each_cpu_mask(ipi_flush_tlb_page, &ta, 1, &vma->vm_mm->cpu_vm_mask); |
617 | 658 | } else | |
618 | on_each_cpu_mask(ipi_flush_tlb_page, &ta, 1, mask); | 659 | local_flush_tlb_page(vma, uaddr); |
619 | } | 660 | } |
620 | 661 | ||
621 | void flush_tlb_kernel_page(unsigned long kaddr) | 662 | void flush_tlb_kernel_page(unsigned long kaddr) |
622 | { | 663 | { |
623 | struct tlb_args ta; | 664 | if (tlb_ops_need_broadcast()) { |
624 | 665 | struct tlb_args ta; | |
625 | ta.ta_start = kaddr; | 666 | ta.ta_start = kaddr; |
626 | 667 | on_each_cpu(ipi_flush_tlb_kernel_page, &ta, 1); | |
627 | on_each_cpu(ipi_flush_tlb_kernel_page, &ta, 1); | 668 | } else |
669 | local_flush_tlb_kernel_page(kaddr); | ||
628 | } | 670 | } |
629 | 671 | ||
630 | void flush_tlb_range(struct vm_area_struct *vma, | 672 | void flush_tlb_range(struct vm_area_struct *vma, |
631 | unsigned long start, unsigned long end) | 673 | unsigned long start, unsigned long end) |
632 | { | 674 | { |
633 | cpumask_t mask = vma->vm_mm->cpu_vm_mask; | 675 | if (tlb_ops_need_broadcast()) { |
634 | struct tlb_args ta; | 676 | struct tlb_args ta; |
635 | 677 | ta.ta_vma = vma; | |
636 | ta.ta_vma = vma; | 678 | ta.ta_start = start; |
637 | ta.ta_start = start; | 679 | ta.ta_end = end; |
638 | ta.ta_end = end; | 680 | on_each_cpu_mask(ipi_flush_tlb_range, &ta, 1, &vma->vm_mm->cpu_vm_mask); |
639 | 681 | } else | |
640 | on_each_cpu_mask(ipi_flush_tlb_range, &ta, 1, mask); | 682 | local_flush_tlb_range(vma, start, end); |
641 | } | 683 | } |
642 | 684 | ||
643 | void flush_tlb_kernel_range(unsigned long start, unsigned long end) | 685 | void flush_tlb_kernel_range(unsigned long start, unsigned long end) |
644 | { | 686 | { |
645 | struct tlb_args ta; | 687 | if (tlb_ops_need_broadcast()) { |
646 | 688 | struct tlb_args ta; | |
647 | ta.ta_start = start; | 689 | ta.ta_start = start; |
648 | ta.ta_end = end; | 690 | ta.ta_end = end; |
649 | 691 | on_each_cpu(ipi_flush_tlb_kernel_range, &ta, 1); | |
650 | on_each_cpu(ipi_flush_tlb_kernel_range, &ta, 1); | 692 | } else |
693 | local_flush_tlb_kernel_range(start, end); | ||
651 | } | 694 | } |