aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sparc64
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2008-08-04 02:24:26 -0400
committerDavid S. Miller <davem@davemloft.net>2008-08-04 16:51:38 -0400
commitcd5bc89debb4045d55eeffe325b97f2dfba4ddea (patch)
treed3e876bc76f5ef99ff71a1c109e7b8db65042b99 /arch/sparc64
parent622824dbb536f7bdc241eefc3e1ae31c463b4eb8 (diff)
sparc64: Use cpumask_t pointers and for_each_cpu_mask_nr() in xcall_deliver.
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'arch/sparc64')
-rw-r--r--arch/sparc64/kernel/smp.c39
1 files changed, 21 insertions, 18 deletions
diff --git a/arch/sparc64/kernel/smp.c b/arch/sparc64/kernel/smp.c
index 063668feab1e..868625e3b661 100644
--- a/arch/sparc64/kernel/smp.c
+++ b/arch/sparc64/kernel/smp.c
@@ -459,13 +459,13 @@ again:
459 } 459 }
460} 460}
461 461
462static inline void spitfire_xcall_deliver(u64 data0, u64 data1, u64 data2, cpumask_t mask) 462static inline void spitfire_xcall_deliver(u64 data0, u64 data1, u64 data2, const cpumask_t *mask)
463{ 463{
464 u64 pstate; 464 u64 pstate;
465 int i; 465 int i;
466 466
467 __asm__ __volatile__("rdpr %%pstate, %0" : "=r" (pstate)); 467 __asm__ __volatile__("rdpr %%pstate, %0" : "=r" (pstate));
468 for_each_cpu_mask(i, mask) 468 for_each_cpu_mask_nr(i, *mask)
469 spitfire_xcall_helper(data0, data1, data2, pstate, i); 469 spitfire_xcall_helper(data0, data1, data2, pstate, i);
470} 470}
471 471
@@ -473,14 +473,17 @@ static inline void spitfire_xcall_deliver(u64 data0, u64 data1, u64 data2, cpuma
473 * packet, but we have no use for that. However we do take advantage of 473 * packet, but we have no use for that. However we do take advantage of
474 * the new pipelining feature (ie. dispatch to multiple cpus simultaneously). 474 * the new pipelining feature (ie. dispatch to multiple cpus simultaneously).
475 */ 475 */
476static void cheetah_xcall_deliver(u64 data0, u64 data1, u64 data2, cpumask_t mask) 476static void cheetah_xcall_deliver(u64 data0, u64 data1, u64 data2, const cpumask_t *mask_p)
477{ 477{
478 u64 pstate, ver, busy_mask; 478 u64 pstate, ver, busy_mask;
479 int nack_busy_id, is_jbus, need_more; 479 int nack_busy_id, is_jbus, need_more;
480 cpumask_t mask;
480 481
481 if (cpus_empty(mask)) 482 if (cpus_empty(*mask_p))
482 return; 483 return;
483 484
485 mask = *mask_p;
486
484 /* Unfortunately, someone at Sun had the brilliant idea to make the 487 /* Unfortunately, someone at Sun had the brilliant idea to make the
485 * busy/nack fields hard-coded by ITID number for this Ultra-III 488 * busy/nack fields hard-coded by ITID number for this Ultra-III
486 * derivative processor. 489 * derivative processor.
@@ -511,7 +514,7 @@ retry:
511 { 514 {
512 int i; 515 int i;
513 516
514 for_each_cpu_mask(i, mask) { 517 for_each_cpu_mask_nr(i, mask) {
515 u64 target = (i << 14) | 0x70; 518 u64 target = (i << 14) | 0x70;
516 519
517 if (is_jbus) { 520 if (is_jbus) {
@@ -550,7 +553,7 @@ retry:
550 : : "r" (pstate)); 553 : : "r" (pstate));
551 if (unlikely(need_more)) { 554 if (unlikely(need_more)) {
552 int i, cnt = 0; 555 int i, cnt = 0;
553 for_each_cpu_mask(i, mask) { 556 for_each_cpu_mask_nr(i, mask) {
554 cpu_clear(i, mask); 557 cpu_clear(i, mask);
555 cnt++; 558 cnt++;
556 if (cnt == 32) 559 if (cnt == 32)
@@ -584,7 +587,7 @@ retry:
584 /* Clear out the mask bits for cpus which did not 587 /* Clear out the mask bits for cpus which did not
585 * NACK us. 588 * NACK us.
586 */ 589 */
587 for_each_cpu_mask(i, mask) { 590 for_each_cpu_mask_nr(i, mask) {
588 u64 check_mask; 591 u64 check_mask;
589 592
590 if (is_jbus) 593 if (is_jbus)
@@ -605,16 +608,16 @@ retry:
605} 608}
606 609
607/* Multi-cpu list version. */ 610/* Multi-cpu list version. */
608static void hypervisor_xcall_deliver(u64 data0, u64 data1, u64 data2, cpumask_t mask) 611static void hypervisor_xcall_deliver(u64 data0, u64 data1, u64 data2, const cpumask_t *mask)
609{ 612{
613 int cnt, retries, this_cpu, prev_sent, i;
614 unsigned long flags, status;
615 cpumask_t error_mask;
610 struct trap_per_cpu *tb; 616 struct trap_per_cpu *tb;
611 u16 *cpu_list; 617 u16 *cpu_list;
612 u64 *mondo; 618 u64 *mondo;
613 cpumask_t error_mask;
614 unsigned long flags, status;
615 int cnt, retries, this_cpu, prev_sent, i;
616 619
617 if (cpus_empty(mask)) 620 if (cpus_empty(*mask))
618 return; 621 return;
619 622
620 /* We have to do this whole thing with interrupts fully disabled. 623 /* We have to do this whole thing with interrupts fully disabled.
@@ -642,7 +645,7 @@ static void hypervisor_xcall_deliver(u64 data0, u64 data1, u64 data2, cpumask_t
642 645
643 /* Setup the initial cpu list. */ 646 /* Setup the initial cpu list. */
644 cnt = 0; 647 cnt = 0;
645 for_each_cpu_mask(i, mask) 648 for_each_cpu_mask_nr(i, *mask)
646 cpu_list[cnt++] = i; 649 cpu_list[cnt++] = i;
647 650
648 cpus_clear(error_mask); 651 cpus_clear(error_mask);
@@ -729,7 +732,7 @@ fatal_mondo_cpu_error:
729 "were in error state\n", 732 "were in error state\n",
730 this_cpu); 733 this_cpu);
731 printk(KERN_CRIT "CPU[%d]: Error mask [ ", this_cpu); 734 printk(KERN_CRIT "CPU[%d]: Error mask [ ", this_cpu);
732 for_each_cpu_mask(i, error_mask) 735 for_each_cpu_mask_nr(i, error_mask)
733 printk("%d ", i); 736 printk("%d ", i);
734 printk("]\n"); 737 printk("]\n");
735 return; 738 return;
@@ -756,7 +759,7 @@ dump_cpu_list_and_out:
756 printk("]\n"); 759 printk("]\n");
757} 760}
758 761
759static void (*xcall_deliver)(u64, u64, u64, cpumask_t); 762static void (*xcall_deliver)(u64, u64, u64, const cpumask_t *);
760 763
761/* Send cross call to all processors mentioned in MASK 764/* Send cross call to all processors mentioned in MASK
762 * except self. 765 * except self.
@@ -769,7 +772,7 @@ static void smp_cross_call_masked(unsigned long *func, u32 ctx, u64 data1, u64 d
769 cpus_and(mask, mask, cpu_online_map); 772 cpus_and(mask, mask, cpu_online_map);
770 cpu_clear(this_cpu, mask); 773 cpu_clear(this_cpu, mask);
771 774
772 xcall_deliver(data0, data1, data2, mask); 775 xcall_deliver(data0, data1, data2, &mask);
773 /* NOTE: Caller runs local copy on master. */ 776 /* NOTE: Caller runs local copy on master. */
774 777
775 put_cpu(); 778 put_cpu();
@@ -903,7 +906,7 @@ void smp_flush_dcache_page_impl(struct page *page, int cpu)
903 } 906 }
904 if (data0) { 907 if (data0) {
905 xcall_deliver(data0, __pa(pg_addr), 908 xcall_deliver(data0, __pa(pg_addr),
906 (u64) pg_addr, mask); 909 (u64) pg_addr, &mask);
907#ifdef CONFIG_DEBUG_DCFLUSH 910#ifdef CONFIG_DEBUG_DCFLUSH
908 atomic_inc(&dcpage_flushes_xcall); 911 atomic_inc(&dcpage_flushes_xcall);
909#endif 912#endif
@@ -945,7 +948,7 @@ void flush_dcache_page_all(struct mm_struct *mm, struct page *page)
945 } 948 }
946 if (data0) { 949 if (data0) {
947 xcall_deliver(data0, __pa(pg_addr), 950 xcall_deliver(data0, __pa(pg_addr),
948 (u64) pg_addr, mask); 951 (u64) pg_addr, &mask);
949#ifdef CONFIG_DEBUG_DCFLUSH 952#ifdef CONFIG_DEBUG_DCFLUSH
950 atomic_inc(&dcpage_flushes_xcall); 953 atomic_inc(&dcpage_flushes_xcall);
951#endif 954#endif