aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJuergen Gross <jgross@suse.com>2018-11-08 02:35:06 -0500
committerJuergen Gross <jgross@suse.com>2018-11-09 10:37:27 -0500
commitd3132b3860f6cf35ff7609a76bbcdbb814bd027c (patch)
tree85648eac48b0beb019ed8b31f6344f883b5677d7
parent1457d8cf7664f34c4ba534c1073821a559a2f6f9 (diff)
xen: fix xen_qlock_wait()
Commit a856531951dc80 ("xen: make xen_qlock_wait() nestable") introduced a regression for Xen guests running fully virtualized (HVM or PVH mode). The Xen hypervisor wouldn't return from the poll hypercall with interrupts disabled in case of an interrupt (for PV guests it does). So instead of disabling interrupts in xen_qlock_wait() use a nesting counter to avoid calling xen_clear_irq_pending() in case xen_qlock_wait() is nested. Fixes: a856531951dc80 ("xen: make xen_qlock_wait() nestable") Cc: stable@vger.kernel.org Reported-by: Sander Eikelenboom <linux@eikelenboom.it> Signed-off-by: Juergen Gross <jgross@suse.com> Reviewed-by: Boris Ostrovsky <boris.ostrovsky@oracle.com> Tested-by: Sander Eikelenboom <linux@eikelenboom.it> Signed-off-by: Juergen Gross <jgross@suse.com>
-rw-r--r--arch/x86/xen/spinlock.c14
1 files changed, 8 insertions, 6 deletions
diff --git a/arch/x86/xen/spinlock.c b/arch/x86/xen/spinlock.c
index 441c88262169..1c8a8816a402 100644
--- a/arch/x86/xen/spinlock.c
+++ b/arch/x86/xen/spinlock.c
@@ -9,6 +9,7 @@
9#include <linux/log2.h> 9#include <linux/log2.h>
10#include <linux/gfp.h> 10#include <linux/gfp.h>
11#include <linux/slab.h> 11#include <linux/slab.h>
12#include <linux/atomic.h>
12 13
13#include <asm/paravirt.h> 14#include <asm/paravirt.h>
14#include <asm/qspinlock.h> 15#include <asm/qspinlock.h>
@@ -21,6 +22,7 @@
21 22
22static DEFINE_PER_CPU(int, lock_kicker_irq) = -1; 23static DEFINE_PER_CPU(int, lock_kicker_irq) = -1;
23static DEFINE_PER_CPU(char *, irq_name); 24static DEFINE_PER_CPU(char *, irq_name);
25static DEFINE_PER_CPU(atomic_t, xen_qlock_wait_nest);
24static bool xen_pvspin = true; 26static bool xen_pvspin = true;
25 27
26static void xen_qlock_kick(int cpu) 28static void xen_qlock_kick(int cpu)
@@ -39,25 +41,25 @@ static void xen_qlock_kick(int cpu)
39 */ 41 */
40static void xen_qlock_wait(u8 *byte, u8 val) 42static void xen_qlock_wait(u8 *byte, u8 val)
41{ 43{
42 unsigned long flags;
43 int irq = __this_cpu_read(lock_kicker_irq); 44 int irq = __this_cpu_read(lock_kicker_irq);
45 atomic_t *nest_cnt = this_cpu_ptr(&xen_qlock_wait_nest);
44 46
45 /* If kicker interrupts not initialized yet, just spin */ 47 /* If kicker interrupts not initialized yet, just spin */
46 if (irq == -1 || in_nmi()) 48 if (irq == -1 || in_nmi())
47 return; 49 return;
48 50
49 /* Guard against reentry. */ 51 /* Detect reentry. */
50 local_irq_save(flags); 52 atomic_inc(nest_cnt);
51 53
52 /* If irq pending already clear it. */ 54 /* If irq pending already and no nested call clear it. */
53 if (xen_test_irq_pending(irq)) { 55 if (atomic_read(nest_cnt) == 1 && xen_test_irq_pending(irq)) {
54 xen_clear_irq_pending(irq); 56 xen_clear_irq_pending(irq);
55 } else if (READ_ONCE(*byte) == val) { 57 } else if (READ_ONCE(*byte) == val) {
56 /* Block until irq becomes pending (or a spurious wakeup) */ 58 /* Block until irq becomes pending (or a spurious wakeup) */
57 xen_poll_irq(irq); 59 xen_poll_irq(irq);
58 } 60 }
59 61
60 local_irq_restore(flags); 62 atomic_dec(nest_cnt);
61} 63}
62 64
63static irqreturn_t dummy_handler(int irq, void *dev_id) 65static irqreturn_t dummy_handler(int irq, void *dev_id)