aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sparc64/kernel/smp.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/sparc64/kernel/smp.c')
-rw-r--r--arch/sparc64/kernel/smp.c32
1 files changed, 15 insertions, 17 deletions
diff --git a/arch/sparc64/kernel/smp.c b/arch/sparc64/kernel/smp.c
index 20f4e291c74a..6d458b35643c 100644
--- a/arch/sparc64/kernel/smp.c
+++ b/arch/sparc64/kernel/smp.c
@@ -611,7 +611,7 @@ retry:
611static void hypervisor_xcall_deliver(u64 data0, u64 data1, u64 data2, const cpumask_t *mask) 611static void hypervisor_xcall_deliver(u64 data0, u64 data1, u64 data2, const cpumask_t *mask)
612{ 612{
613 int cnt, retries, this_cpu, prev_sent, i; 613 int cnt, retries, this_cpu, prev_sent, i;
614 unsigned long flags, status; 614 unsigned long status;
615 cpumask_t error_mask; 615 cpumask_t error_mask;
616 struct trap_per_cpu *tb; 616 struct trap_per_cpu *tb;
617 u16 *cpu_list; 617 u16 *cpu_list;
@@ -620,18 +620,6 @@ static void hypervisor_xcall_deliver(u64 data0, u64 data1, u64 data2, const cpum
620 if (cpus_empty(*mask)) 620 if (cpus_empty(*mask))
621 return; 621 return;
622 622
623 /* We have to do this whole thing with interrupts fully disabled.
624 * Otherwise if we send an xcall from interrupt context it will
625 * corrupt both our mondo block and cpu list state.
626 *
627 * One consequence of this is that we cannot use timeout mechanisms
628 * that depend upon interrupts being delivered locally. So, for
629 * example, we cannot sample jiffies and expect it to advance.
630 *
631 * Fortunately, udelay() uses %stick/%tick so we can use that.
632 */
633 local_irq_save(flags);
634
635 this_cpu = smp_processor_id(); 623 this_cpu = smp_processor_id();
636 tb = &trap_block[this_cpu]; 624 tb = &trap_block[this_cpu];
637 625
@@ -720,8 +708,6 @@ static void hypervisor_xcall_deliver(u64 data0, u64 data1, u64 data2, const cpum
720 } 708 }
721 } while (1); 709 } while (1);
722 710
723 local_irq_restore(flags);
724
725 if (unlikely(!cpus_empty(error_mask))) 711 if (unlikely(!cpus_empty(error_mask)))
726 goto fatal_mondo_cpu_error; 712 goto fatal_mondo_cpu_error;
727 713
@@ -738,14 +724,12 @@ fatal_mondo_cpu_error:
738 return; 724 return;
739 725
740fatal_mondo_timeout: 726fatal_mondo_timeout:
741 local_irq_restore(flags);
742 printk(KERN_CRIT "CPU[%d]: SUN4V mondo timeout, no forward " 727 printk(KERN_CRIT "CPU[%d]: SUN4V mondo timeout, no forward "
743 " progress after %d retries.\n", 728 " progress after %d retries.\n",
744 this_cpu, retries); 729 this_cpu, retries);
745 goto dump_cpu_list_and_out; 730 goto dump_cpu_list_and_out;
746 731
747fatal_mondo_error: 732fatal_mondo_error:
748 local_irq_restore(flags);
749 printk(KERN_CRIT "CPU[%d]: Unexpected SUN4V mondo error %lu\n", 733 printk(KERN_CRIT "CPU[%d]: Unexpected SUN4V mondo error %lu\n",
750 this_cpu, status); 734 this_cpu, status);
751 printk(KERN_CRIT "CPU[%d]: Args were cnt(%d) cpulist_pa(%lx) " 735 printk(KERN_CRIT "CPU[%d]: Args were cnt(%d) cpulist_pa(%lx) "
@@ -763,7 +747,21 @@ static void (*xcall_deliver_impl)(u64, u64, u64, const cpumask_t *);
763 747
764static void xcall_deliver(u64 data0, u64 data1, u64 data2, const cpumask_t *mask) 748static void xcall_deliver(u64 data0, u64 data1, u64 data2, const cpumask_t *mask)
765{ 749{
750 unsigned long flags;
751
752 /* We have to do this whole thing with interrupts fully disabled.
753 * Otherwise if we send an xcall from interrupt context it will
754 * corrupt both our mondo block and cpu list state.
755 *
756 * One consequence of this is that we cannot use timeout mechanisms
757 * that depend upon interrupts being delivered locally. So, for
758 * example, we cannot sample jiffies and expect it to advance.
759 *
760 * Fortunately, udelay() uses %stick/%tick so we can use that.
761 */
762 local_irq_save(flags);
766 xcall_deliver_impl(data0, data1, data2, mask); 763 xcall_deliver_impl(data0, data1, data2, mask);
764 local_irq_restore(flags);
767} 765}
768 766
769/* Send cross call to all processors mentioned in MASK_P 767/* Send cross call to all processors mentioned in MASK_P