diff options
Diffstat (limited to 'arch/arm/kernel/smp.c')
| -rw-r--r-- | arch/arm/kernel/smp.c | 46 |
1 files changed, 16 insertions, 30 deletions
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c index 7801aac3c043..6014dfd22af4 100644 --- a/arch/arm/kernel/smp.c +++ b/arch/arm/kernel/smp.c | |||
| @@ -326,14 +326,14 @@ void __init smp_prepare_boot_cpu(void) | |||
| 326 | per_cpu(cpu_data, cpu).idle = current; | 326 | per_cpu(cpu_data, cpu).idle = current; |
| 327 | } | 327 | } |
| 328 | 328 | ||
| 329 | static void send_ipi_message(cpumask_t callmap, enum ipi_msg_type msg) | 329 | static void send_ipi_message(const struct cpumask *mask, enum ipi_msg_type msg) |
| 330 | { | 330 | { |
| 331 | unsigned long flags; | 331 | unsigned long flags; |
| 332 | unsigned int cpu; | 332 | unsigned int cpu; |
| 333 | 333 | ||
| 334 | local_irq_save(flags); | 334 | local_irq_save(flags); |
| 335 | 335 | ||
| 336 | for_each_cpu_mask(cpu, callmap) { | 336 | for_each_cpu(cpu, mask) { |
| 337 | struct ipi_data *ipi = &per_cpu(ipi_data, cpu); | 337 | struct ipi_data *ipi = &per_cpu(ipi_data, cpu); |
| 338 | 338 | ||
| 339 | spin_lock(&ipi->lock); | 339 | spin_lock(&ipi->lock); |
| @@ -344,19 +344,19 @@ static void send_ipi_message(cpumask_t callmap, enum ipi_msg_type msg) | |||
| 344 | /* | 344 | /* |
| 345 | * Call the platform specific cross-CPU call function. | 345 | * Call the platform specific cross-CPU call function. |
| 346 | */ | 346 | */ |
| 347 | smp_cross_call(callmap); | 347 | smp_cross_call(mask); |
| 348 | 348 | ||
| 349 | local_irq_restore(flags); | 349 | local_irq_restore(flags); |
| 350 | } | 350 | } |
| 351 | 351 | ||
| 352 | void arch_send_call_function_ipi(cpumask_t mask) | 352 | void arch_send_call_function_ipi_mask(const struct cpumask *mask) |
| 353 | { | 353 | { |
| 354 | send_ipi_message(mask, IPI_CALL_FUNC); | 354 | send_ipi_message(mask, IPI_CALL_FUNC); |
| 355 | } | 355 | } |
| 356 | 356 | ||
| 357 | void arch_send_call_function_single_ipi(int cpu) | 357 | void arch_send_call_function_single_ipi(int cpu) |
| 358 | { | 358 | { |
| 359 | send_ipi_message(cpumask_of_cpu(cpu), IPI_CALL_FUNC_SINGLE); | 359 | send_ipi_message(cpumask_of(cpu), IPI_CALL_FUNC_SINGLE); |
| 360 | } | 360 | } |
| 361 | 361 | ||
| 362 | void show_ipi_list(struct seq_file *p) | 362 | void show_ipi_list(struct seq_file *p) |
| @@ -498,17 +498,10 @@ asmlinkage void __exception do_IPI(struct pt_regs *regs) | |||
| 498 | 498 | ||
| 499 | void smp_send_reschedule(int cpu) | 499 | void smp_send_reschedule(int cpu) |
| 500 | { | 500 | { |
| 501 | send_ipi_message(cpumask_of_cpu(cpu), IPI_RESCHEDULE); | 501 | send_ipi_message(cpumask_of(cpu), IPI_RESCHEDULE); |
| 502 | } | 502 | } |
| 503 | 503 | ||
| 504 | void smp_send_timer(void) | 504 | void smp_timer_broadcast(const struct cpumask *mask) |
| 505 | { | ||
| 506 | cpumask_t mask = cpu_online_map; | ||
| 507 | cpu_clear(smp_processor_id(), mask); | ||
| 508 | send_ipi_message(mask, IPI_TIMER); | ||
| 509 | } | ||
| 510 | |||
| 511 | void smp_timer_broadcast(cpumask_t mask) | ||
| 512 | { | 505 | { |
| 513 | send_ipi_message(mask, IPI_TIMER); | 506 | send_ipi_message(mask, IPI_TIMER); |
| 514 | } | 507 | } |
| @@ -517,7 +510,7 @@ void smp_send_stop(void) | |||
| 517 | { | 510 | { |
| 518 | cpumask_t mask = cpu_online_map; | 511 | cpumask_t mask = cpu_online_map; |
| 519 | cpu_clear(smp_processor_id(), mask); | 512 | cpu_clear(smp_processor_id(), mask); |
| 520 | send_ipi_message(mask, IPI_CPU_STOP); | 513 | send_ipi_message(&mask, IPI_CPU_STOP); |
| 521 | } | 514 | } |
| 522 | 515 | ||
| 523 | /* | 516 | /* |
| @@ -528,20 +521,17 @@ int setup_profiling_timer(unsigned int multiplier) | |||
| 528 | return -EINVAL; | 521 | return -EINVAL; |
| 529 | } | 522 | } |
| 530 | 523 | ||
| 531 | static int | 524 | static void |
| 532 | on_each_cpu_mask(void (*func)(void *), void *info, int wait, cpumask_t mask) | 525 | on_each_cpu_mask(void (*func)(void *), void *info, int wait, |
| 526 | const struct cpumask *mask) | ||
| 533 | { | 527 | { |
| 534 | int ret = 0; | ||
| 535 | |||
| 536 | preempt_disable(); | 528 | preempt_disable(); |
| 537 | 529 | ||
| 538 | ret = smp_call_function_mask(mask, func, info, wait); | 530 | smp_call_function_many(mask, func, info, wait); |
| 539 | if (cpu_isset(smp_processor_id(), mask)) | 531 | if (cpumask_test_cpu(smp_processor_id(), mask)) |
| 540 | func(info); | 532 | func(info); |
| 541 | 533 | ||
| 542 | preempt_enable(); | 534 | preempt_enable(); |
| 543 | |||
| 544 | return ret; | ||
| 545 | } | 535 | } |
| 546 | 536 | ||
| 547 | /**********************************************************************/ | 537 | /**********************************************************************/ |
| @@ -602,20 +592,17 @@ void flush_tlb_all(void) | |||
| 602 | 592 | ||
| 603 | void flush_tlb_mm(struct mm_struct *mm) | 593 | void flush_tlb_mm(struct mm_struct *mm) |
| 604 | { | 594 | { |
| 605 | cpumask_t mask = mm->cpu_vm_mask; | 595 | on_each_cpu_mask(ipi_flush_tlb_mm, mm, 1, &mm->cpu_vm_mask); |
| 606 | |||
| 607 | on_each_cpu_mask(ipi_flush_tlb_mm, mm, 1, mask); | ||
| 608 | } | 596 | } |
| 609 | 597 | ||
| 610 | void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr) | 598 | void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr) |
| 611 | { | 599 | { |
| 612 | cpumask_t mask = vma->vm_mm->cpu_vm_mask; | ||
| 613 | struct tlb_args ta; | 600 | struct tlb_args ta; |
| 614 | 601 | ||
| 615 | ta.ta_vma = vma; | 602 | ta.ta_vma = vma; |
| 616 | ta.ta_start = uaddr; | 603 | ta.ta_start = uaddr; |
| 617 | 604 | ||
| 618 | on_each_cpu_mask(ipi_flush_tlb_page, &ta, 1, mask); | 605 | on_each_cpu_mask(ipi_flush_tlb_page, &ta, 1, &vma->vm_mm->cpu_vm_mask); |
| 619 | } | 606 | } |
| 620 | 607 | ||
| 621 | void flush_tlb_kernel_page(unsigned long kaddr) | 608 | void flush_tlb_kernel_page(unsigned long kaddr) |
| @@ -630,14 +617,13 @@ void flush_tlb_kernel_page(unsigned long kaddr) | |||
| 630 | void flush_tlb_range(struct vm_area_struct *vma, | 617 | void flush_tlb_range(struct vm_area_struct *vma, |
| 631 | unsigned long start, unsigned long end) | 618 | unsigned long start, unsigned long end) |
| 632 | { | 619 | { |
| 633 | cpumask_t mask = vma->vm_mm->cpu_vm_mask; | ||
| 634 | struct tlb_args ta; | 620 | struct tlb_args ta; |
| 635 | 621 | ||
| 636 | ta.ta_vma = vma; | 622 | ta.ta_vma = vma; |
| 637 | ta.ta_start = start; | 623 | ta.ta_start = start; |
| 638 | ta.ta_end = end; | 624 | ta.ta_end = end; |
| 639 | 625 | ||
| 640 | on_each_cpu_mask(ipi_flush_tlb_range, &ta, 1, mask); | 626 | on_each_cpu_mask(ipi_flush_tlb_range, &ta, 1, &vma->vm_mm->cpu_vm_mask); |
| 641 | } | 627 | } |
| 642 | 628 | ||
| 643 | void flush_tlb_kernel_range(unsigned long start, unsigned long end) | 629 | void flush_tlb_kernel_range(unsigned long start, unsigned long end) |
