aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/xen
diff options
context:
space:
mode:
authorIngo Molnar <mingo@kernel.org>2015-03-27 05:04:06 -0400
committerIngo Molnar <mingo@kernel.org>2015-03-27 05:04:06 -0400
commit4bfe186dbe0a058680e4bfb0d673194f0ceaffd4 (patch)
tree5e374857dcd979d50f51c7091505784cb053d078 /arch/x86/xen
parent3c435c1e472ba344ee25f795f4807d4457e61f6c (diff)
parent42528795ac1c8d7ba021797ec004904168956d64 (diff)
Merge branch 'for-mingo' of git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu into core/rcu
Pull RCU updates from Paul E. McKenney: - Documentation updates. - Changes permitting use of call_rcu() and friends very early in boot, for example, before rcu_init() is invoked. - Miscellaneous fixes. - Add in-kernel API to enable and disable expediting of normal RCU grace periods. - Improve RCU's handling of (hotplug-) outgoing CPUs. Note: ARM support is lagging a bit here, and these improved diagnostics might generate (harmless) splats. - NO_HZ_FULL_SYSIDLE fixes. - Tiny RCU updates to make it more tiny. Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'arch/x86/xen')
-rw-r--r--arch/x86/xen/smp.c46
1 files changed, 25 insertions, 21 deletions
diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
index 08e8489c47f1..1c5e760f34ca 100644
--- a/arch/x86/xen/smp.c
+++ b/arch/x86/xen/smp.c
@@ -90,14 +90,10 @@ static void cpu_bringup(void)
90 90
91 set_cpu_online(cpu, true); 91 set_cpu_online(cpu, true);
92 92
93 this_cpu_write(cpu_state, CPU_ONLINE); 93 cpu_set_state_online(cpu); /* Implies full memory barrier. */
94
95 wmb();
96 94
97 /* We can take interrupts now: we're officially "up". */ 95 /* We can take interrupts now: we're officially "up". */
98 local_irq_enable(); 96 local_irq_enable();
99
100 wmb(); /* make sure everything is out */
101} 97}
102 98
103/* 99/*
@@ -459,7 +455,13 @@ static int xen_cpu_up(unsigned int cpu, struct task_struct *idle)
459 xen_setup_timer(cpu); 455 xen_setup_timer(cpu);
460 xen_init_lock_cpu(cpu); 456 xen_init_lock_cpu(cpu);
461 457
462 per_cpu(cpu_state, cpu) = CPU_UP_PREPARE; 458 /*
459 * PV VCPUs are always successfully taken down (see 'while' loop
460 * in xen_cpu_die()), so -EBUSY is an error.
461 */
462 rc = cpu_check_up_prepare(cpu);
463 if (rc)
464 return rc;
463 465
464 /* make sure interrupts start blocked */ 466 /* make sure interrupts start blocked */
465 per_cpu(xen_vcpu, cpu)->evtchn_upcall_mask = 1; 467 per_cpu(xen_vcpu, cpu)->evtchn_upcall_mask = 1;
@@ -479,10 +481,8 @@ static int xen_cpu_up(unsigned int cpu, struct task_struct *idle)
479 rc = HYPERVISOR_vcpu_op(VCPUOP_up, cpu, NULL); 481 rc = HYPERVISOR_vcpu_op(VCPUOP_up, cpu, NULL);
480 BUG_ON(rc); 482 BUG_ON(rc);
481 483
482 while(per_cpu(cpu_state, cpu) != CPU_ONLINE) { 484 while (cpu_report_state(cpu) != CPU_ONLINE)
483 HYPERVISOR_sched_op(SCHEDOP_yield, NULL); 485 HYPERVISOR_sched_op(SCHEDOP_yield, NULL);
484 barrier();
485 }
486 486
487 return 0; 487 return 0;
488} 488}
@@ -511,11 +511,11 @@ static void xen_cpu_die(unsigned int cpu)
511 schedule_timeout(HZ/10); 511 schedule_timeout(HZ/10);
512 } 512 }
513 513
514 cpu_die_common(cpu); 514 if (common_cpu_die(cpu) == 0) {
515 515 xen_smp_intr_free(cpu);
516 xen_smp_intr_free(cpu); 516 xen_uninit_lock_cpu(cpu);
517 xen_uninit_lock_cpu(cpu); 517 xen_teardown_timer(cpu);
518 xen_teardown_timer(cpu); 518 }
519} 519}
520 520
521static void xen_play_dead(void) /* used only with HOTPLUG_CPU */ 521static void xen_play_dead(void) /* used only with HOTPLUG_CPU */
@@ -747,6 +747,16 @@ static void __init xen_hvm_smp_prepare_cpus(unsigned int max_cpus)
747static int xen_hvm_cpu_up(unsigned int cpu, struct task_struct *tidle) 747static int xen_hvm_cpu_up(unsigned int cpu, struct task_struct *tidle)
748{ 748{
749 int rc; 749 int rc;
750
751 /*
752 * This can happen if CPU was offlined earlier and
753 * offlining timed out in common_cpu_die().
754 */
755 if (cpu_report_state(cpu) == CPU_DEAD_FROZEN) {
756 xen_smp_intr_free(cpu);
757 xen_uninit_lock_cpu(cpu);
758 }
759
750 /* 760 /*
751 * xen_smp_intr_init() needs to run before native_cpu_up() 761 * xen_smp_intr_init() needs to run before native_cpu_up()
752 * so that IPI vectors are set up on the booting CPU before 762 * so that IPI vectors are set up on the booting CPU before
@@ -768,12 +778,6 @@ static int xen_hvm_cpu_up(unsigned int cpu, struct task_struct *tidle)
768 return rc; 778 return rc;
769} 779}
770 780
771static void xen_hvm_cpu_die(unsigned int cpu)
772{
773 xen_cpu_die(cpu);
774 native_cpu_die(cpu);
775}
776
777void __init xen_hvm_smp_init(void) 781void __init xen_hvm_smp_init(void)
778{ 782{
779 if (!xen_have_vector_callback) 783 if (!xen_have_vector_callback)
@@ -781,7 +785,7 @@ void __init xen_hvm_smp_init(void)
781 smp_ops.smp_prepare_cpus = xen_hvm_smp_prepare_cpus; 785 smp_ops.smp_prepare_cpus = xen_hvm_smp_prepare_cpus;
782 smp_ops.smp_send_reschedule = xen_smp_send_reschedule; 786 smp_ops.smp_send_reschedule = xen_smp_send_reschedule;
783 smp_ops.cpu_up = xen_hvm_cpu_up; 787 smp_ops.cpu_up = xen_hvm_cpu_up;
784 smp_ops.cpu_die = xen_hvm_cpu_die; 788 smp_ops.cpu_die = xen_cpu_die;
785 smp_ops.send_call_func_ipi = xen_smp_send_call_function_ipi; 789 smp_ops.send_call_func_ipi = xen_smp_send_call_function_ipi;
786 smp_ops.send_call_func_single_ipi = xen_smp_send_call_function_single_ipi; 790 smp_ops.send_call_func_single_ipi = xen_smp_send_call_function_single_ipi;
787 smp_ops.smp_prepare_boot_cpu = xen_smp_prepare_boot_cpu; 791 smp_ops.smp_prepare_boot_cpu = xen_smp_prepare_boot_cpu;