aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/xen
diff options
context:
space:
mode:
authorPaul E. McKenney <paulmck@linux.vnet.ibm.com>2015-02-25 14:42:15 -0500
committerPaul E. McKenney <paulmck@linux.vnet.ibm.com>2015-03-11 16:22:35 -0400
commit2a442c9c6453d3d043dfd89f2e03a1deff8a6f06 (patch)
treee1aaab3098d09603152104254acf709e71874d94 /arch/x86/xen
parent8038dad7e888581266c76df15d70ca457a3c5910 (diff)
x86: Use common outgoing-CPU-notification code
This commit removes the open-coded CPU-offline notification with new common code. Among other things, this change avoids calling scheduler code using RCU from an offline CPU that RCU is ignoring. It also allows Xen to notice at online time that the CPU did not go offline correctly. Note that Xen has the surviving CPU carry out some cleanup operations, so if the surviving CPU times out, these cleanup operations might have been carried out while the outgoing CPU was still running. It might therefore be unwise to bring this CPU back online, and this commit avoids doing so. Signed-off-by: Boris Ostrovsky <boris.ostrovsky@oracle.com> Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Cc: <x86@kernel.org> Cc: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> Cc: David Vrabel <david.vrabel@citrix.com> Cc: <xen-devel@lists.xenproject.org>
Diffstat (limited to 'arch/x86/xen')
-rw-r--r--arch/x86/xen/smp.c46
1 files changed, 25 insertions, 21 deletions
diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
index 08e8489c47f1..1c5e760f34ca 100644
--- a/arch/x86/xen/smp.c
+++ b/arch/x86/xen/smp.c
@@ -90,14 +90,10 @@ static void cpu_bringup(void)
90 90
91 set_cpu_online(cpu, true); 91 set_cpu_online(cpu, true);
92 92
93 this_cpu_write(cpu_state, CPU_ONLINE); 93 cpu_set_state_online(cpu); /* Implies full memory barrier. */
94
95 wmb();
96 94
97 /* We can take interrupts now: we're officially "up". */ 95 /* We can take interrupts now: we're officially "up". */
98 local_irq_enable(); 96 local_irq_enable();
99
100 wmb(); /* make sure everything is out */
101} 97}
102 98
103/* 99/*
@@ -459,7 +455,13 @@ static int xen_cpu_up(unsigned int cpu, struct task_struct *idle)
459 xen_setup_timer(cpu); 455 xen_setup_timer(cpu);
460 xen_init_lock_cpu(cpu); 456 xen_init_lock_cpu(cpu);
461 457
462 per_cpu(cpu_state, cpu) = CPU_UP_PREPARE; 458 /*
459 * PV VCPUs are always successfully taken down (see 'while' loop
460 * in xen_cpu_die()), so -EBUSY is an error.
461 */
462 rc = cpu_check_up_prepare(cpu);
463 if (rc)
464 return rc;
463 465
464 /* make sure interrupts start blocked */ 466 /* make sure interrupts start blocked */
465 per_cpu(xen_vcpu, cpu)->evtchn_upcall_mask = 1; 467 per_cpu(xen_vcpu, cpu)->evtchn_upcall_mask = 1;
@@ -479,10 +481,8 @@ static int xen_cpu_up(unsigned int cpu, struct task_struct *idle)
479 rc = HYPERVISOR_vcpu_op(VCPUOP_up, cpu, NULL); 481 rc = HYPERVISOR_vcpu_op(VCPUOP_up, cpu, NULL);
480 BUG_ON(rc); 482 BUG_ON(rc);
481 483
482 while(per_cpu(cpu_state, cpu) != CPU_ONLINE) { 484 while (cpu_report_state(cpu) != CPU_ONLINE)
483 HYPERVISOR_sched_op(SCHEDOP_yield, NULL); 485 HYPERVISOR_sched_op(SCHEDOP_yield, NULL);
484 barrier();
485 }
486 486
487 return 0; 487 return 0;
488} 488}
@@ -511,11 +511,11 @@ static void xen_cpu_die(unsigned int cpu)
511 schedule_timeout(HZ/10); 511 schedule_timeout(HZ/10);
512 } 512 }
513 513
514 cpu_die_common(cpu); 514 if (common_cpu_die(cpu) == 0) {
515 515 xen_smp_intr_free(cpu);
516 xen_smp_intr_free(cpu); 516 xen_uninit_lock_cpu(cpu);
517 xen_uninit_lock_cpu(cpu); 517 xen_teardown_timer(cpu);
518 xen_teardown_timer(cpu); 518 }
519} 519}
520 520
521static void xen_play_dead(void) /* used only with HOTPLUG_CPU */ 521static void xen_play_dead(void) /* used only with HOTPLUG_CPU */
@@ -747,6 +747,16 @@ static void __init xen_hvm_smp_prepare_cpus(unsigned int max_cpus)
747static int xen_hvm_cpu_up(unsigned int cpu, struct task_struct *tidle) 747static int xen_hvm_cpu_up(unsigned int cpu, struct task_struct *tidle)
748{ 748{
749 int rc; 749 int rc;
750
751 /*
752 * This can happen if CPU was offlined earlier and
753 * offlining timed out in common_cpu_die().
754 */
755 if (cpu_report_state(cpu) == CPU_DEAD_FROZEN) {
756 xen_smp_intr_free(cpu);
757 xen_uninit_lock_cpu(cpu);
758 }
759
750 /* 760 /*
751 * xen_smp_intr_init() needs to run before native_cpu_up() 761 * xen_smp_intr_init() needs to run before native_cpu_up()
752 * so that IPI vectors are set up on the booting CPU before 762 * so that IPI vectors are set up on the booting CPU before
@@ -768,12 +778,6 @@ static int xen_hvm_cpu_up(unsigned int cpu, struct task_struct *tidle)
768 return rc; 778 return rc;
769} 779}
770 780
771static void xen_hvm_cpu_die(unsigned int cpu)
772{
773 xen_cpu_die(cpu);
774 native_cpu_die(cpu);
775}
776
777void __init xen_hvm_smp_init(void) 781void __init xen_hvm_smp_init(void)
778{ 782{
779 if (!xen_have_vector_callback) 783 if (!xen_have_vector_callback)
@@ -781,7 +785,7 @@ void __init xen_hvm_smp_init(void)
781 smp_ops.smp_prepare_cpus = xen_hvm_smp_prepare_cpus; 785 smp_ops.smp_prepare_cpus = xen_hvm_smp_prepare_cpus;
782 smp_ops.smp_send_reschedule = xen_smp_send_reschedule; 786 smp_ops.smp_send_reschedule = xen_smp_send_reschedule;
783 smp_ops.cpu_up = xen_hvm_cpu_up; 787 smp_ops.cpu_up = xen_hvm_cpu_up;
784 smp_ops.cpu_die = xen_hvm_cpu_die; 788 smp_ops.cpu_die = xen_cpu_die;
785 smp_ops.send_call_func_ipi = xen_smp_send_call_function_ipi; 789 smp_ops.send_call_func_ipi = xen_smp_send_call_function_ipi;
786 smp_ops.send_call_func_single_ipi = xen_smp_send_call_function_single_ipi; 790 smp_ops.send_call_func_single_ipi = xen_smp_send_call_function_single_ipi;
787 smp_ops.smp_prepare_boot_cpu = xen_smp_prepare_boot_cpu; 791 smp_ops.smp_prepare_boot_cpu = xen_smp_prepare_boot_cpu;