diff options
author | KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> | 2011-05-26 19:24:59 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2011-05-26 20:12:32 -0400 |
commit | 937e26c0d1843c92750dac9bca1c972d33e73306 (patch) | |
tree | aad9ae8c1f736a3acd56fbcc954cb1af7a50ea6e /arch/m32r/kernel/smp.c | |
parent | ba7328b2d83090c2440b8d0baa6ccfc2ddf1bda6 (diff) |
m32r: convert cpumask api
We plan to remove cpus_xx() old cpumask APIs later. Also, we plan to
change mm_cpu_mask() implementation, allocate only nr_cpu_ids, thus
*mm_cpu_mask() is dangerous operation.
Then, this patch convert them.
Signed-off-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Cc: Hirokazu Takata <takata@linux-m32r.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'arch/m32r/kernel/smp.c')
-rw-r--r-- | arch/m32r/kernel/smp.c | 51 |
1 files changed, 25 insertions, 26 deletions
diff --git a/arch/m32r/kernel/smp.c b/arch/m32r/kernel/smp.c index fc10b39893d4..f758100b8976 100644 --- a/arch/m32r/kernel/smp.c +++ b/arch/m32r/kernel/smp.c | |||
@@ -87,7 +87,6 @@ void smp_local_timer_interrupt(void); | |||
87 | 87 | ||
88 | static void send_IPI_allbutself(int, int); | 88 | static void send_IPI_allbutself(int, int); |
89 | static void send_IPI_mask(const struct cpumask *, int, int); | 89 | static void send_IPI_mask(const struct cpumask *, int, int); |
90 | unsigned long send_IPI_mask_phys(cpumask_t, int, int); | ||
91 | 90 | ||
92 | /*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/ | 91 | /*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/ |
93 | /* Rescheduling request Routines */ | 92 | /* Rescheduling request Routines */ |
@@ -162,10 +161,10 @@ void smp_flush_cache_all(void) | |||
162 | unsigned long *mask; | 161 | unsigned long *mask; |
163 | 162 | ||
164 | preempt_disable(); | 163 | preempt_disable(); |
165 | cpumask = cpu_online_map; | 164 | cpumask_copy(&cpumask, cpu_online_mask); |
166 | cpu_clear(smp_processor_id(), cpumask); | 165 | cpumask_clear_cpu(smp_processor_id(), &cpumask); |
167 | spin_lock(&flushcache_lock); | 166 | spin_lock(&flushcache_lock); |
168 | mask=cpus_addr(cpumask); | 167 | mask=cpumask_bits(&cpumask); |
169 | atomic_set_mask(*mask, (atomic_t *)&flushcache_cpumask); | 168 | atomic_set_mask(*mask, (atomic_t *)&flushcache_cpumask); |
170 | send_IPI_mask(&cpumask, INVALIDATE_CACHE_IPI, 0); | 169 | send_IPI_mask(&cpumask, INVALIDATE_CACHE_IPI, 0); |
171 | _flush_cache_copyback_all(); | 170 | _flush_cache_copyback_all(); |
@@ -263,8 +262,8 @@ void smp_flush_tlb_mm(struct mm_struct *mm) | |||
263 | preempt_disable(); | 262 | preempt_disable(); |
264 | cpu_id = smp_processor_id(); | 263 | cpu_id = smp_processor_id(); |
265 | mmc = &mm->context[cpu_id]; | 264 | mmc = &mm->context[cpu_id]; |
266 | cpu_mask = *mm_cpumask(mm); | 265 | cpumask_copy(&cpu_mask, mm_cpumask(mm)); |
267 | cpu_clear(cpu_id, cpu_mask); | 266 | cpumask_clear_cpu(cpu_id, &cpu_mask); |
268 | 267 | ||
269 | if (*mmc != NO_CONTEXT) { | 268 | if (*mmc != NO_CONTEXT) { |
270 | local_irq_save(flags); | 269 | local_irq_save(flags); |
@@ -275,7 +274,7 @@ void smp_flush_tlb_mm(struct mm_struct *mm) | |||
275 | cpumask_clear_cpu(cpu_id, mm_cpumask(mm)); | 274 | cpumask_clear_cpu(cpu_id, mm_cpumask(mm)); |
276 | local_irq_restore(flags); | 275 | local_irq_restore(flags); |
277 | } | 276 | } |
278 | if (!cpus_empty(cpu_mask)) | 277 | if (!cpumask_empty(&cpu_mask)) |
279 | flush_tlb_others(cpu_mask, mm, NULL, FLUSH_ALL); | 278 | flush_tlb_others(cpu_mask, mm, NULL, FLUSH_ALL); |
280 | 279 | ||
281 | preempt_enable(); | 280 | preempt_enable(); |
@@ -333,8 +332,8 @@ void smp_flush_tlb_page(struct vm_area_struct *vma, unsigned long va) | |||
333 | preempt_disable(); | 332 | preempt_disable(); |
334 | cpu_id = smp_processor_id(); | 333 | cpu_id = smp_processor_id(); |
335 | mmc = &mm->context[cpu_id]; | 334 | mmc = &mm->context[cpu_id]; |
336 | cpu_mask = *mm_cpumask(mm); | 335 | cpumask_copy(&cpu_mask, mm_cpumask(mm)); |
337 | cpu_clear(cpu_id, cpu_mask); | 336 | cpumask_clear_cpu(cpu_id, &cpu_mask); |
338 | 337 | ||
339 | #ifdef DEBUG_SMP | 338 | #ifdef DEBUG_SMP |
340 | if (!mm) | 339 | if (!mm) |
@@ -348,7 +347,7 @@ void smp_flush_tlb_page(struct vm_area_struct *vma, unsigned long va) | |||
348 | __flush_tlb_page(va); | 347 | __flush_tlb_page(va); |
349 | local_irq_restore(flags); | 348 | local_irq_restore(flags); |
350 | } | 349 | } |
351 | if (!cpus_empty(cpu_mask)) | 350 | if (!cpumask_empty(&cpu_mask)) |
352 | flush_tlb_others(cpu_mask, mm, vma, va); | 351 | flush_tlb_others(cpu_mask, mm, vma, va); |
353 | 352 | ||
354 | preempt_enable(); | 353 | preempt_enable(); |
@@ -395,14 +394,14 @@ static void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm, | |||
395 | * - current CPU must not be in mask | 394 | * - current CPU must not be in mask |
396 | * - mask must exist :) | 395 | * - mask must exist :) |
397 | */ | 396 | */ |
398 | BUG_ON(cpus_empty(cpumask)); | 397 | BUG_ON(cpumask_empty(&cpumask)); |
399 | 398 | ||
400 | BUG_ON(cpu_isset(smp_processor_id(), cpumask)); | 399 | BUG_ON(cpumask_test_cpu(smp_processor_id(), &cpumask)); |
401 | BUG_ON(!mm); | 400 | BUG_ON(!mm); |
402 | 401 | ||
403 | /* If a CPU which we ran on has gone down, OK. */ | 402 | /* If a CPU which we ran on has gone down, OK. */ |
404 | cpus_and(cpumask, cpumask, cpu_online_map); | 403 | cpumask_and(&cpumask, &cpumask, cpu_online_mask); |
405 | if (cpus_empty(cpumask)) | 404 | if (cpumask_empty(&cpumask)) |
406 | return; | 405 | return; |
407 | 406 | ||
408 | /* | 407 | /* |
@@ -416,7 +415,7 @@ static void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm, | |||
416 | flush_mm = mm; | 415 | flush_mm = mm; |
417 | flush_vma = vma; | 416 | flush_vma = vma; |
418 | flush_va = va; | 417 | flush_va = va; |
419 | mask=cpus_addr(cpumask); | 418 | mask=cpumask_bits(&cpumask); |
420 | atomic_set_mask(*mask, (atomic_t *)&flush_cpumask); | 419 | atomic_set_mask(*mask, (atomic_t *)&flush_cpumask); |
421 | 420 | ||
422 | /* | 421 | /* |
@@ -425,7 +424,7 @@ static void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm, | |||
425 | */ | 424 | */ |
426 | send_IPI_mask(&cpumask, INVALIDATE_TLB_IPI, 0); | 425 | send_IPI_mask(&cpumask, INVALIDATE_TLB_IPI, 0); |
427 | 426 | ||
428 | while (!cpus_empty(flush_cpumask)) { | 427 | while (!cpumask_empty((cpumask_t*)&flush_cpumask)) { |
429 | /* nothing. lockup detection does not belong here */ | 428 | /* nothing. lockup detection does not belong here */ |
430 | mb(); | 429 | mb(); |
431 | } | 430 | } |
@@ -460,7 +459,7 @@ void smp_invalidate_interrupt(void) | |||
460 | int cpu_id = smp_processor_id(); | 459 | int cpu_id = smp_processor_id(); |
461 | unsigned long *mmc = &flush_mm->context[cpu_id]; | 460 | unsigned long *mmc = &flush_mm->context[cpu_id]; |
462 | 461 | ||
463 | if (!cpu_isset(cpu_id, flush_cpumask)) | 462 | if (!cpumask_test_cpu(cpu_id, &flush_cpumask)) |
464 | return; | 463 | return; |
465 | 464 | ||
466 | if (flush_va == FLUSH_ALL) { | 465 | if (flush_va == FLUSH_ALL) { |
@@ -478,7 +477,7 @@ void smp_invalidate_interrupt(void) | |||
478 | __flush_tlb_page(va); | 477 | __flush_tlb_page(va); |
479 | } | 478 | } |
480 | } | 479 | } |
481 | cpu_clear(cpu_id, flush_cpumask); | 480 | cpumask_clear_cpu(cpu_id, (cpumask_t*)&flush_cpumask); |
482 | } | 481 | } |
483 | 482 | ||
484 | /*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/ | 483 | /*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/ |
@@ -530,7 +529,7 @@ static void stop_this_cpu(void *dummy) | |||
530 | /* | 529 | /* |
531 | * Remove this CPU: | 530 | * Remove this CPU: |
532 | */ | 531 | */ |
533 | cpu_clear(cpu_id, cpu_online_map); | 532 | set_cpu_online(cpu_id, false); |
534 | 533 | ||
535 | /* | 534 | /* |
536 | * PSW IE = 1; | 535 | * PSW IE = 1; |
@@ -725,8 +724,8 @@ static void send_IPI_allbutself(int ipi_num, int try) | |||
725 | { | 724 | { |
726 | cpumask_t cpumask; | 725 | cpumask_t cpumask; |
727 | 726 | ||
728 | cpumask = cpu_online_map; | 727 | cpumask_copy(&cpumask, cpu_online_mask); |
729 | cpu_clear(smp_processor_id(), cpumask); | 728 | cpumask_clear_cpu(smp_processor_id(), &cpumask); |
730 | 729 | ||
731 | send_IPI_mask(&cpumask, ipi_num, try); | 730 | send_IPI_mask(&cpumask, ipi_num, try); |
732 | } | 731 | } |
@@ -763,13 +762,13 @@ static void send_IPI_mask(const struct cpumask *cpumask, int ipi_num, int try) | |||
763 | cpumask_and(&tmp, cpumask, cpu_online_mask); | 762 | cpumask_and(&tmp, cpumask, cpu_online_mask); |
764 | BUG_ON(!cpumask_equal(cpumask, &tmp)); | 763 | BUG_ON(!cpumask_equal(cpumask, &tmp)); |
765 | 764 | ||
766 | physid_mask = CPU_MASK_NONE; | 765 | cpumask_clear(&physid_mask); |
767 | for_each_cpu(cpu_id, cpumask) { | 766 | for_each_cpu(cpu_id, cpumask) { |
768 | if ((phys_id = cpu_to_physid(cpu_id)) != -1) | 767 | if ((phys_id = cpu_to_physid(cpu_id)) != -1) |
769 | cpu_set(phys_id, physid_mask); | 768 | cpumask_set_cpu(phys_id, &physid_mask); |
770 | } | 769 | } |
771 | 770 | ||
772 | send_IPI_mask_phys(physid_mask, ipi_num, try); | 771 | send_IPI_mask_phys(&physid_mask, ipi_num, try); |
773 | } | 772 | } |
774 | 773 | ||
775 | /*==========================================================================* | 774 | /*==========================================================================* |
@@ -792,14 +791,14 @@ static void send_IPI_mask(const struct cpumask *cpumask, int ipi_num, int try) | |||
792 | * ---------- --- -------------------------------------------------------- | 791 | * ---------- --- -------------------------------------------------------- |
793 | * | 792 | * |
794 | *==========================================================================*/ | 793 | *==========================================================================*/ |
795 | unsigned long send_IPI_mask_phys(cpumask_t physid_mask, int ipi_num, | 794 | unsigned long send_IPI_mask_phys(const cpumask_t *physid_mask, int ipi_num, |
796 | int try) | 795 | int try) |
797 | { | 796 | { |
798 | spinlock_t *ipilock; | 797 | spinlock_t *ipilock; |
799 | volatile unsigned long *ipicr_addr; | 798 | volatile unsigned long *ipicr_addr; |
800 | unsigned long ipicr_val; | 799 | unsigned long ipicr_val; |
801 | unsigned long my_physid_mask; | 800 | unsigned long my_physid_mask; |
802 | unsigned long mask = cpus_addr(physid_mask)[0]; | 801 | unsigned long mask = cpumask_bits(physid_mask)[0]; |
803 | 802 | ||
804 | 803 | ||
805 | if (mask & ~physids_coerce(phys_cpu_present_map)) | 804 | if (mask & ~physids_coerce(phys_cpu_present_map)) |