diff options
-rw-r--r-- | arch/mips/kernel/gdb-stub.c | 4 | ||||
-rw-r--r-- | arch/mips/kernel/smp.c | 33 | ||||
-rw-r--r-- | arch/mips/kernel/smtc.c | 4 | ||||
-rw-r--r-- | include/asm-mips/mmu_context.h | 4 |
4 files changed, 27 insertions, 18 deletions
diff --git a/arch/mips/kernel/gdb-stub.c b/arch/mips/kernel/gdb-stub.c index bd128fab4b36..3191afa29ad8 100644 --- a/arch/mips/kernel/gdb-stub.c +++ b/arch/mips/kernel/gdb-stub.c | |||
@@ -769,7 +769,7 @@ void handle_exception(struct gdb_regs *regs) | |||
769 | /* | 769 | /* |
770 | * acquire the CPU spinlocks | 770 | * acquire the CPU spinlocks |
771 | */ | 771 | */ |
772 | for (i = num_online_cpus()-1; i >= 0; i--) | 772 | for_each_online_cpu(i) |
773 | if (__raw_spin_trylock(&kgdb_cpulock[i]) == 0) | 773 | if (__raw_spin_trylock(&kgdb_cpulock[i]) == 0) |
774 | panic("kgdb: couldn't get cpulock %d\n", i); | 774 | panic("kgdb: couldn't get cpulock %d\n", i); |
775 | 775 | ||
@@ -1044,7 +1044,7 @@ finish_kgdb: | |||
1044 | 1044 | ||
1045 | exit_kgdb_exception: | 1045 | exit_kgdb_exception: |
1046 | /* release locks so other CPUs can go */ | 1046 | /* release locks so other CPUs can go */ |
1047 | for (i = num_online_cpus()-1; i >= 0; i--) | 1047 | for_each_online_cpu(i) |
1048 | __raw_spin_unlock(&kgdb_cpulock[i]); | 1048 | __raw_spin_unlock(&kgdb_cpulock[i]); |
1049 | spin_unlock(&kgdb_lock); | 1049 | spin_unlock(&kgdb_lock); |
1050 | 1050 | ||
diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c index 481ba5355dcb..5ca3809a1b45 100644 --- a/arch/mips/kernel/smp.c +++ b/arch/mips/kernel/smp.c | |||
@@ -375,10 +375,13 @@ void flush_tlb_mm(struct mm_struct *mm) | |||
375 | if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) { | 375 | if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) { |
376 | smp_on_other_tlbs(flush_tlb_mm_ipi, (void *)mm); | 376 | smp_on_other_tlbs(flush_tlb_mm_ipi, (void *)mm); |
377 | } else { | 377 | } else { |
378 | int i; | 378 | cpumask_t mask = cpu_online_map; |
379 | for (i = 0; i < num_online_cpus(); i++) | 379 | unsigned int cpu; |
380 | if (smp_processor_id() != i) | 380 | |
381 | cpu_context(i, mm) = 0; | 381 | cpu_clear(smp_processor_id(), mask); |
382 | for_each_online_cpu(cpu) | ||
383 | if (cpu_context(cpu, mm)) | ||
384 | cpu_context(cpu, mm) = 0; | ||
382 | } | 385 | } |
383 | local_flush_tlb_mm(mm); | 386 | local_flush_tlb_mm(mm); |
384 | 387 | ||
@@ -411,10 +414,13 @@ void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned l | |||
411 | fd.addr2 = end; | 414 | fd.addr2 = end; |
412 | smp_on_other_tlbs(flush_tlb_range_ipi, (void *)&fd); | 415 | smp_on_other_tlbs(flush_tlb_range_ipi, (void *)&fd); |
413 | } else { | 416 | } else { |
414 | int i; | 417 | cpumask_t mask = cpu_online_map; |
415 | for (i = 0; i < num_online_cpus(); i++) | 418 | unsigned int cpu; |
416 | if (smp_processor_id() != i) | 419 | |
417 | cpu_context(i, mm) = 0; | 420 | cpu_clear(smp_processor_id(), mask); |
421 | for_each_online_cpu(cpu) | ||
422 | if (cpu_context(cpu, mm)) | ||
423 | cpu_context(cpu, mm) = 0; | ||
418 | } | 424 | } |
419 | local_flush_tlb_range(vma, start, end); | 425 | local_flush_tlb_range(vma, start, end); |
420 | preempt_enable(); | 426 | preempt_enable(); |
@@ -453,10 +459,13 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long page) | |||
453 | fd.addr1 = page; | 459 | fd.addr1 = page; |
454 | smp_on_other_tlbs(flush_tlb_page_ipi, (void *)&fd); | 460 | smp_on_other_tlbs(flush_tlb_page_ipi, (void *)&fd); |
455 | } else { | 461 | } else { |
456 | int i; | 462 | cpumask_t mask = cpu_online_map; |
457 | for (i = 0; i < num_online_cpus(); i++) | 463 | unsigned int cpu; |
458 | if (smp_processor_id() != i) | 464 | |
459 | cpu_context(i, vma->vm_mm) = 0; | 465 | cpu_clear(smp_processor_id(), mask); |
466 | for_each_online_cpu(cpu) | ||
467 | if (cpu_context(cpu, vma->vm_mm)) | ||
468 | cpu_context(cpu, vma->vm_mm) = 0; | ||
460 | } | 469 | } |
461 | local_flush_tlb_page(vma, page); | 470 | local_flush_tlb_page(vma, page); |
462 | preempt_enable(); | 471 | preempt_enable(); |
diff --git a/arch/mips/kernel/smtc.c b/arch/mips/kernel/smtc.c index 4d91e2f455c0..a8c1a698d588 100644 --- a/arch/mips/kernel/smtc.c +++ b/arch/mips/kernel/smtc.c | |||
@@ -1264,7 +1264,7 @@ void smtc_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu) | |||
1264 | if (cpu_has_vtag_icache) | 1264 | if (cpu_has_vtag_icache) |
1265 | flush_icache_all(); | 1265 | flush_icache_all(); |
1266 | /* Traverse all online CPUs (hack requires contigous range) */ | 1266 | /* Traverse all online CPUs (hack requires contigous range) */ |
1267 | for (i = 0; i < num_online_cpus(); i++) { | 1267 | for_each_online_cpu(i) { |
1268 | /* | 1268 | /* |
1269 | * We don't need to worry about our own CPU, nor those of | 1269 | * We don't need to worry about our own CPU, nor those of |
1270 | * CPUs who don't share our TLB. | 1270 | * CPUs who don't share our TLB. |
@@ -1293,7 +1293,7 @@ void smtc_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu) | |||
1293 | /* | 1293 | /* |
1294 | * SMTC shares the TLB within VPEs and possibly across all VPEs. | 1294 | * SMTC shares the TLB within VPEs and possibly across all VPEs. |
1295 | */ | 1295 | */ |
1296 | for (i = 0; i < num_online_cpus(); i++) { | 1296 | for_each_online_cpu(i) { |
1297 | if ((smtc_status & SMTC_TLB_SHARED) || | 1297 | if ((smtc_status & SMTC_TLB_SHARED) || |
1298 | (cpu_data[i].vpe_id == cpu_data[cpu].vpe_id)) | 1298 | (cpu_data[i].vpe_id == cpu_data[cpu].vpe_id)) |
1299 | cpu_context(i, mm) = asid_cache(i) = asid; | 1299 | cpu_context(i, mm) = asid_cache(i) = asid; |
diff --git a/include/asm-mips/mmu_context.h b/include/asm-mips/mmu_context.h index b3b7a689e7d3..0c4f245eaeb2 100644 --- a/include/asm-mips/mmu_context.h +++ b/include/asm-mips/mmu_context.h | |||
@@ -120,7 +120,7 @@ init_new_context(struct task_struct *tsk, struct mm_struct *mm) | |||
120 | { | 120 | { |
121 | int i; | 121 | int i; |
122 | 122 | ||
123 | for (i = 0; i < num_online_cpus(); i++) | 123 | for_each_online_cpu(i) |
124 | cpu_context(i, mm) = 0; | 124 | cpu_context(i, mm) = 0; |
125 | 125 | ||
126 | return 0; | 126 | return 0; |
@@ -284,7 +284,7 @@ drop_mmu_context(struct mm_struct *mm, unsigned cpu) | |||
284 | int i; | 284 | int i; |
285 | 285 | ||
286 | /* SMTC shares the TLB (and ASIDs) across VPEs */ | 286 | /* SMTC shares the TLB (and ASIDs) across VPEs */ |
287 | for (i = 0; i < num_online_cpus(); i++) { | 287 | for_each_online_cpu(i) { |
288 | if((smtc_status & SMTC_TLB_SHARED) | 288 | if((smtc_status & SMTC_TLB_SHARED) |
289 | || (cpu_data[i].vpe_id == cpu_data[cpu].vpe_id)) | 289 | || (cpu_data[i].vpe_id == cpu_data[cpu].vpe_id)) |
290 | cpu_context(i, mm) = 0; | 290 | cpu_context(i, mm) = 0; |