aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
authorKonrad Rzeszutek Wilk <konrad.wilk@oracle.com>2013-08-13 11:47:21 -0400
committerKonrad Rzeszutek Wilk <konrad.wilk@oracle.com>2013-09-09 12:06:16 -0400
commit1fb3a8b2cfb278f139d9ff7ca5fe06a65de64494 (patch)
treeafa04e3edf9b99e621593464dd9d1f027b19e4ff /arch/x86
parent65320fcedaa7affd1736cd7aa51f5e70b5c7e7f2 (diff)
xen/spinlock: Fix locking path engaging too soon under PVHVM.
The xen_lock_spinning has a check for the kicker interrupts and if it is not initialized it will spin normally (not enter the slowpath). But for PVHVM case we would initialize the kicker interrupt before the CPU came online. This meant that if the booting CPU used a spinlock and went in the slowpath - it would enter the slowpath and block forever. The forever part because during bootup: the spinlock would be taken _before_ the CPU sets itself to be online (more on this further), and we enter to poll on the event channel forever. The bootup CPU (see commit fc78d343fa74514f6fd117b5ef4cd27e4ac30236 "xen/smp: initialize IPI vectors before marking CPU online" for details) and the CPU that started the bootup consult the cpu_online_mask to determine whether the booting CPU should get an IPI. The booting CPU has to set itself in this mask via: set_cpu_online(smp_processor_id(), true); However, if the spinlock is taken before this (and it is) and it polls on an event channel - it will never be woken up as the kernel will never send an IPI to an offline CPU. Note that the PVHVM logic in sending IPIs is using the HVM path which has numerous checks using the cpu_online_mask and cpu_active_mask. See above mention git commit for details. Signed-off-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> Reviewed-by: David Vrabel <david.vrabel@citrix.com>
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/xen/enlighten.c1
-rw-r--r--arch/x86/xen/smp.c9
2 files changed, 9 insertions, 1 deletions
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index 15939e872db2..f091c80974c4 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -1692,7 +1692,6 @@ static int xen_hvm_cpu_notify(struct notifier_block *self, unsigned long action,
1692 case CPU_UP_PREPARE: 1692 case CPU_UP_PREPARE:
1693 xen_vcpu_setup(cpu); 1693 xen_vcpu_setup(cpu);
1694 if (xen_have_vector_callback) { 1694 if (xen_have_vector_callback) {
1695 xen_init_lock_cpu(cpu);
1696 if (xen_feature(XENFEAT_hvm_safe_pvclock)) 1695 if (xen_feature(XENFEAT_hvm_safe_pvclock))
1697 xen_setup_timer(cpu); 1696 xen_setup_timer(cpu);
1698 } 1697 }
diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
index 9235842cd76a..c21b825ed056 100644
--- a/arch/x86/xen/smp.c
+++ b/arch/x86/xen/smp.c
@@ -709,6 +709,15 @@ static int xen_hvm_cpu_up(unsigned int cpu, struct task_struct *tidle)
709 WARN_ON(rc); 709 WARN_ON(rc);
710 if (!rc) 710 if (!rc)
711 rc = native_cpu_up(cpu, tidle); 711 rc = native_cpu_up(cpu, tidle);
712
713 /*
714 * We must initialize the slowpath CPU kicker _after_ the native
715 * path has executed. If we initialized it before none of the
716 * unlocker IPI kicks would reach the booting CPU as the booting
717 * CPU had not set itself 'online' in cpu_online_mask. That mask
718 * is checked when IPIs are sent (on HVM at least).
719 */
720 xen_init_lock_cpu(cpu);
712 return rc; 721 return rc;
713} 722}
714 723