aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid Vrabel <david.vrabel@citrix.com>2015-04-24 14:56:40 -0400
committerIngo Molnar <mingo@kernel.org>2015-05-08 06:37:18 -0400
commite95e6f176c61dd0e7bd9fdfb4956df1f9bfe99d4 (patch)
treeb02e18f56aece0639cec468ad9439944a9319d6b
parentbf0c7c34adc286bec3a5a38c00c773ba1b2d0396 (diff)
locking/pvqspinlock, x86: Enable PV qspinlock for Xen
This patch adds the necessary Xen specific code to allow Xen to support the CPU halting and kicking operations needed by the queue spinlock PV code. Signed-off-by: David Vrabel <david.vrabel@citrix.com> Signed-off-by: Waiman Long <Waiman.Long@hp.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Boris Ostrovsky <boris.ostrovsky@oracle.com> Cc: Borislav Petkov <bp@alien8.de> Cc: Daniel J Blueman <daniel@numascale.com> Cc: Douglas Hatch <doug.hatch@hp.com> Cc: H. Peter Anvin <hpa@zytor.com> Cc: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Oleg Nesterov <oleg@redhat.com> Cc: Paolo Bonzini <paolo.bonzini@gmail.com> Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Raghavendra K T <raghavendra.kt@linux.vnet.ibm.com> Cc: Rik van Riel <riel@redhat.com> Cc: Scott J Norton <scott.norton@hp.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: virtualization@lists.linux-foundation.org Cc: xen-devel@lists.xenproject.org Link: http://lkml.kernel.org/r/1429901803-29771-12-git-send-email-Waiman.Long@hp.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r--arch/x86/xen/spinlock.c64
-rw-r--r--kernel/Kconfig.locks2
2 files changed, 61 insertions, 5 deletions
diff --git a/arch/x86/xen/spinlock.c b/arch/x86/xen/spinlock.c
index 956374c1edbc..af907a90fb19 100644
--- a/arch/x86/xen/spinlock.c
+++ b/arch/x86/xen/spinlock.c
@@ -17,6 +17,56 @@
17#include "xen-ops.h" 17#include "xen-ops.h"
18#include "debugfs.h" 18#include "debugfs.h"
19 19
20static DEFINE_PER_CPU(int, lock_kicker_irq) = -1;
21static DEFINE_PER_CPU(char *, irq_name);
22static bool xen_pvspin = true;
23
24#ifdef CONFIG_QUEUED_SPINLOCK
25
26#include <asm/qspinlock.h>
27
28static void xen_qlock_kick(int cpu)
29{
30 xen_send_IPI_one(cpu, XEN_SPIN_UNLOCK_VECTOR);
31}
32
33/*
34 * Halt the current CPU & release it back to the host
35 */
36static void xen_qlock_wait(u8 *byte, u8 val)
37{
38 int irq = __this_cpu_read(lock_kicker_irq);
39
40 /* If kicker interrupts not initialized yet, just spin */
41 if (irq == -1)
42 return;
43
44 /* clear pending */
45 xen_clear_irq_pending(irq);
46 barrier();
47
48 /*
49 * We check the byte value after clearing pending IRQ to make sure
50 * that we won't miss a wakeup event because of the clearing.
51 *
52 * The sync_clear_bit() call in xen_clear_irq_pending() is atomic.
53 * So it is effectively a memory barrier for x86.
54 */
55 if (READ_ONCE(*byte) != val)
56 return;
57
58 /*
59 * If an interrupt happens here, it will leave the wakeup irq
60 * pending, which will cause xen_poll_irq() to return
61 * immediately.
62 */
63
64 /* Block until irq becomes pending (or perhaps a spurious wakeup) */
65 xen_poll_irq(irq);
66}
67
68#else /* CONFIG_QUEUED_SPINLOCK */
69
20enum xen_contention_stat { 70enum xen_contention_stat {
21 TAKEN_SLOW, 71 TAKEN_SLOW,
22 TAKEN_SLOW_PICKUP, 72 TAKEN_SLOW_PICKUP,
@@ -100,12 +150,9 @@ struct xen_lock_waiting {
100 __ticket_t want; 150 __ticket_t want;
101}; 151};
102 152
103static DEFINE_PER_CPU(int, lock_kicker_irq) = -1;
104static DEFINE_PER_CPU(char *, irq_name);
105static DEFINE_PER_CPU(struct xen_lock_waiting, lock_waiting); 153static DEFINE_PER_CPU(struct xen_lock_waiting, lock_waiting);
106static cpumask_t waiting_cpus; 154static cpumask_t waiting_cpus;
107 155
108static bool xen_pvspin = true;
109__visible void xen_lock_spinning(struct arch_spinlock *lock, __ticket_t want) 156__visible void xen_lock_spinning(struct arch_spinlock *lock, __ticket_t want)
110{ 157{
111 int irq = __this_cpu_read(lock_kicker_irq); 158 int irq = __this_cpu_read(lock_kicker_irq);
@@ -217,6 +264,7 @@ static void xen_unlock_kick(struct arch_spinlock *lock, __ticket_t next)
217 } 264 }
218 } 265 }
219} 266}
267#endif /* CONFIG_QUEUED_SPINLOCK */
220 268
221static irqreturn_t dummy_handler(int irq, void *dev_id) 269static irqreturn_t dummy_handler(int irq, void *dev_id)
222{ 270{
@@ -280,8 +328,16 @@ void __init xen_init_spinlocks(void)
280 return; 328 return;
281 } 329 }
282 printk(KERN_DEBUG "xen: PV spinlocks enabled\n"); 330 printk(KERN_DEBUG "xen: PV spinlocks enabled\n");
331#ifdef CONFIG_QUEUED_SPINLOCK
332 __pv_init_lock_hash();
333 pv_lock_ops.queued_spin_lock_slowpath = __pv_queued_spin_lock_slowpath;
334 pv_lock_ops.queued_spin_unlock = PV_CALLEE_SAVE(__pv_queued_spin_unlock);
335 pv_lock_ops.wait = xen_qlock_wait;
336 pv_lock_ops.kick = xen_qlock_kick;
337#else
283 pv_lock_ops.lock_spinning = PV_CALLEE_SAVE(xen_lock_spinning); 338 pv_lock_ops.lock_spinning = PV_CALLEE_SAVE(xen_lock_spinning);
284 pv_lock_ops.unlock_kick = xen_unlock_kick; 339 pv_lock_ops.unlock_kick = xen_unlock_kick;
340#endif
285} 341}
286 342
287/* 343/*
@@ -310,7 +366,7 @@ static __init int xen_parse_nopvspin(char *arg)
310} 366}
311early_param("xen_nopvspin", xen_parse_nopvspin); 367early_param("xen_nopvspin", xen_parse_nopvspin);
312 368
313#ifdef CONFIG_XEN_DEBUG_FS 369#if defined(CONFIG_XEN_DEBUG_FS) && !defined(CONFIG_QUEUED_SPINLOCK)
314 370
315static struct dentry *d_spin_debug; 371static struct dentry *d_spin_debug;
316 372
diff --git a/kernel/Kconfig.locks b/kernel/Kconfig.locks
index 4379eef9334d..95dd7587ec34 100644
--- a/kernel/Kconfig.locks
+++ b/kernel/Kconfig.locks
@@ -240,7 +240,7 @@ config ARCH_USE_QUEUED_SPINLOCK
240 240
241config QUEUED_SPINLOCK 241config QUEUED_SPINLOCK
242 def_bool y if ARCH_USE_QUEUED_SPINLOCK 242 def_bool y if ARCH_USE_QUEUED_SPINLOCK
243 depends on SMP && (!PARAVIRT_SPINLOCKS || !XEN) 243 depends on SMP
244 244
245config ARCH_USE_QUEUE_RWLOCK 245config ARCH_USE_QUEUE_RWLOCK
246 bool 246 bool