aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2008-08-04 19:47:57 -0400
committerDavid S. Miller <davem@davemloft.net>2008-08-04 19:47:57 -0400
commited4d9c66eb941a416c8cb9a0138c69d46d82fc4f (patch)
tree0a69005357f3e595268766ee45877451d2b9f85b /arch
parent90f7ae8a55190f5edfb9fda957e25c994ed39ec4 (diff)
sparc64: Kill error_mask from hypervisor_xcall_deliver().
It can eat up a lot of stack space when NR_CPUS is large. We retain some of it's functionality by reporting at least one of the cpu's which are seen in error state. Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'arch')
-rw-r--r--arch/sparc64/kernel/smp.c20
1 files changed, 7 insertions, 13 deletions
diff --git a/arch/sparc64/kernel/smp.c b/arch/sparc64/kernel/smp.c
index 2387a9b81be7..ac8996ec97be 100644
--- a/arch/sparc64/kernel/smp.c
+++ b/arch/sparc64/kernel/smp.c
@@ -626,16 +626,15 @@ retry:
626/* Multi-cpu list version. */ 626/* Multi-cpu list version. */
627static void hypervisor_xcall_deliver(struct trap_per_cpu *tb, int cnt) 627static void hypervisor_xcall_deliver(struct trap_per_cpu *tb, int cnt)
628{ 628{
629 int retries, this_cpu, prev_sent, i; 629 int retries, this_cpu, prev_sent, i, saw_cpu_error;
630 unsigned long status; 630 unsigned long status;
631 cpumask_t error_mask;
632 u16 *cpu_list; 631 u16 *cpu_list;
633 632
634 this_cpu = smp_processor_id(); 633 this_cpu = smp_processor_id();
635 634
636 cpu_list = __va(tb->cpu_list_pa); 635 cpu_list = __va(tb->cpu_list_pa);
637 636
638 cpus_clear(error_mask); 637 saw_cpu_error = 0;
639 retries = 0; 638 retries = 0;
640 prev_sent = 0; 639 prev_sent = 0;
641 do { 640 do {
@@ -680,10 +679,9 @@ static void hypervisor_xcall_deliver(struct trap_per_cpu *tb, int cnt)
680 continue; 679 continue;
681 680
682 err = sun4v_cpu_state(cpu); 681 err = sun4v_cpu_state(cpu);
683 if (err >= 0 && 682 if (err == HV_CPU_STATE_ERROR) {
684 err == HV_CPU_STATE_ERROR) { 683 saw_cpu_error = (cpu + 1);
685 cpu_list[i] = 0xffff; 684 cpu_list[i] = 0xffff;
686 cpu_set(cpu, error_mask);
687 } 685 }
688 } 686 }
689 } else if (unlikely(status != HV_EWOULDBLOCK)) 687 } else if (unlikely(status != HV_EWOULDBLOCK))
@@ -707,19 +705,15 @@ static void hypervisor_xcall_deliver(struct trap_per_cpu *tb, int cnt)
707 } 705 }
708 } while (1); 706 } while (1);
709 707
710 if (unlikely(!cpus_empty(error_mask))) 708 if (unlikely(saw_cpu_error))
711 goto fatal_mondo_cpu_error; 709 goto fatal_mondo_cpu_error;
712 710
713 return; 711 return;
714 712
715fatal_mondo_cpu_error: 713fatal_mondo_cpu_error:
716 printk(KERN_CRIT "CPU[%d]: SUN4V mondo cpu error, some target cpus " 714 printk(KERN_CRIT "CPU[%d]: SUN4V mondo cpu error, some target cpus "
717 "were in error state\n", 715 "(including %d) were in error state\n",
718 this_cpu); 716 this_cpu, saw_cpu_error - 1);
719 printk(KERN_CRIT "CPU[%d]: Error mask [ ", this_cpu);
720 for_each_cpu_mask_nr(i, error_mask)
721 printk("%d ", i);
722 printk("]\n");
723 return; 717 return;
724 718
725fatal_mondo_timeout: 719fatal_mondo_timeout: