diff options
Diffstat (limited to 'arch/x86')
| -rw-r--r-- | arch/x86/xen/spinlock.c | 14 |
1 files changed, 8 insertions, 6 deletions
diff --git a/arch/x86/xen/spinlock.c b/arch/x86/xen/spinlock.c index 441c88262169..1c8a8816a402 100644 --- a/arch/x86/xen/spinlock.c +++ b/arch/x86/xen/spinlock.c | |||
| @@ -9,6 +9,7 @@ | |||
| 9 | #include <linux/log2.h> | 9 | #include <linux/log2.h> |
| 10 | #include <linux/gfp.h> | 10 | #include <linux/gfp.h> |
| 11 | #include <linux/slab.h> | 11 | #include <linux/slab.h> |
| 12 | #include <linux/atomic.h> | ||
| 12 | 13 | ||
| 13 | #include <asm/paravirt.h> | 14 | #include <asm/paravirt.h> |
| 14 | #include <asm/qspinlock.h> | 15 | #include <asm/qspinlock.h> |
| @@ -21,6 +22,7 @@ | |||
| 21 | 22 | ||
| 22 | static DEFINE_PER_CPU(int, lock_kicker_irq) = -1; | 23 | static DEFINE_PER_CPU(int, lock_kicker_irq) = -1; |
| 23 | static DEFINE_PER_CPU(char *, irq_name); | 24 | static DEFINE_PER_CPU(char *, irq_name); |
| 25 | static DEFINE_PER_CPU(atomic_t, xen_qlock_wait_nest); | ||
| 24 | static bool xen_pvspin = true; | 26 | static bool xen_pvspin = true; |
| 25 | 27 | ||
| 26 | static void xen_qlock_kick(int cpu) | 28 | static void xen_qlock_kick(int cpu) |
| @@ -39,25 +41,25 @@ static void xen_qlock_kick(int cpu) | |||
| 39 | */ | 41 | */ |
| 40 | static void xen_qlock_wait(u8 *byte, u8 val) | 42 | static void xen_qlock_wait(u8 *byte, u8 val) |
| 41 | { | 43 | { |
| 42 | unsigned long flags; | ||
| 43 | int irq = __this_cpu_read(lock_kicker_irq); | 44 | int irq = __this_cpu_read(lock_kicker_irq); |
| 45 | atomic_t *nest_cnt = this_cpu_ptr(&xen_qlock_wait_nest); | ||
| 44 | 46 | ||
| 45 | /* If kicker interrupts not initialized yet, just spin */ | 47 | /* If kicker interrupts not initialized yet, just spin */ |
| 46 | if (irq == -1 || in_nmi()) | 48 | if (irq == -1 || in_nmi()) |
| 47 | return; | 49 | return; |
| 48 | 50 | ||
| 49 | /* Guard against reentry. */ | 51 | /* Detect reentry. */ |
| 50 | local_irq_save(flags); | 52 | atomic_inc(nest_cnt); |
| 51 | 53 | ||
| 52 | /* If irq pending already clear it. */ | 54 | /* If irq pending already and no nested call clear it. */ |
| 53 | if (xen_test_irq_pending(irq)) { | 55 | if (atomic_read(nest_cnt) == 1 && xen_test_irq_pending(irq)) { |
| 54 | xen_clear_irq_pending(irq); | 56 | xen_clear_irq_pending(irq); |
| 55 | } else if (READ_ONCE(*byte) == val) { | 57 | } else if (READ_ONCE(*byte) == val) { |
| 56 | /* Block until irq becomes pending (or a spurious wakeup) */ | 58 | /* Block until irq becomes pending (or a spurious wakeup) */ |
| 57 | xen_poll_irq(irq); | 59 | xen_poll_irq(irq); |
| 58 | } | 60 | } |
| 59 | 61 | ||
| 60 | local_irq_restore(flags); | 62 | atomic_dec(nest_cnt); |
| 61 | } | 63 | } |
| 62 | 64 | ||
| 63 | static irqreturn_t dummy_handler(int irq, void *dev_id) | 65 | static irqreturn_t dummy_handler(int irq, void *dev_id) |
