aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/xen
diff options
context:
space:
mode:
authorJeremy Fitzhardinge <jeremy@goop.org>2008-08-20 20:02:18 -0400
committerIngo Molnar <mingo@elte.hu>2008-08-21 07:52:57 -0400
commit168d2f464ab9860f0d1e66cf1f9684973222f1c6 (patch)
treec3a4199cf2aa4c6322b7860c431fd5bfdc4220ef /arch/x86/xen
parent7708ad64a24a674f7905aa7a5099a50f055debec (diff)
xen: save previous spinlock when blocking
A spinlock can be interrupted while spinning, so make sure we preserve the previous lock of interest if we're taking a lock from within an interrupt handler. We also need to deal with the case where the blocking path gets interrupted between testing to see if the lock is free and actually blocking. If we get interrupted there and end up in the state where the lock is free but the irq isn't pending, then we'll block indefinitely in the hypervisor. This fix is to make sure that any nested lock-takers will always leave the irq pending if there's any chance the outer lock became free. Signed-off-by: Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com> Acked-by: Jan Beulich <jbeulich@novell.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86/xen')
-rw-r--r--arch/x86/xen/spinlock.c65
1 files changed, 50 insertions, 15 deletions
diff --git a/arch/x86/xen/spinlock.c b/arch/x86/xen/spinlock.c
index 8dc4d31da67f..4884bc603aa7 100644
--- a/arch/x86/xen/spinlock.c
+++ b/arch/x86/xen/spinlock.c
@@ -47,25 +47,41 @@ static int xen_spin_trylock(struct raw_spinlock *lock)
47static DEFINE_PER_CPU(int, lock_kicker_irq) = -1; 47static DEFINE_PER_CPU(int, lock_kicker_irq) = -1;
48static DEFINE_PER_CPU(struct xen_spinlock *, lock_spinners); 48static DEFINE_PER_CPU(struct xen_spinlock *, lock_spinners);
49 49
50static inline void spinning_lock(struct xen_spinlock *xl) 50/*
51 * Mark a cpu as interested in a lock. Returns the CPU's previous
52 * lock of interest, in case we got preempted by an interrupt.
53 */
54static inline struct xen_spinlock *spinning_lock(struct xen_spinlock *xl)
51{ 55{
56 struct xen_spinlock *prev;
57
58 prev = __get_cpu_var(lock_spinners);
52 __get_cpu_var(lock_spinners) = xl; 59 __get_cpu_var(lock_spinners) = xl;
60
53 wmb(); /* set lock of interest before count */ 61 wmb(); /* set lock of interest before count */
62
54 asm(LOCK_PREFIX " incw %0" 63 asm(LOCK_PREFIX " incw %0"
55 : "+m" (xl->spinners) : : "memory"); 64 : "+m" (xl->spinners) : : "memory");
65
66 return prev;
56} 67}
57 68
58static inline void unspinning_lock(struct xen_spinlock *xl) 69/*
70 * Mark a cpu as no longer interested in a lock. Restores previous
71 * lock of interest (NULL for none).
72 */
73static inline void unspinning_lock(struct xen_spinlock *xl, struct xen_spinlock *prev)
59{ 74{
60 asm(LOCK_PREFIX " decw %0" 75 asm(LOCK_PREFIX " decw %0"
61 : "+m" (xl->spinners) : : "memory"); 76 : "+m" (xl->spinners) : : "memory");
62 wmb(); /* decrement count before clearing lock */ 77 wmb(); /* decrement count before restoring lock */
63 __get_cpu_var(lock_spinners) = NULL; 78 __get_cpu_var(lock_spinners) = prev;
64} 79}
65 80
66static noinline int xen_spin_lock_slow(struct raw_spinlock *lock) 81static noinline int xen_spin_lock_slow(struct raw_spinlock *lock)
67{ 82{
68 struct xen_spinlock *xl = (struct xen_spinlock *)lock; 83 struct xen_spinlock *xl = (struct xen_spinlock *)lock;
84 struct xen_spinlock *prev;
69 int irq = __get_cpu_var(lock_kicker_irq); 85 int irq = __get_cpu_var(lock_kicker_irq);
70 int ret; 86 int ret;
71 87
@@ -74,23 +90,42 @@ static noinline int xen_spin_lock_slow(struct raw_spinlock *lock)
74 return 0; 90 return 0;
75 91
76 /* announce we're spinning */ 92 /* announce we're spinning */
77 spinning_lock(xl); 93 prev = spinning_lock(xl);
78 94
79 /* clear pending */ 95 do {
80 xen_clear_irq_pending(irq); 96 /* clear pending */
97 xen_clear_irq_pending(irq);
98
99 /* check again make sure it didn't become free while
100 we weren't looking */
101 ret = xen_spin_trylock(lock);
102 if (ret) {
103 /*
104 * If we interrupted another spinlock while it
105 * was blocking, make sure it doesn't block
106 * without rechecking the lock.
107 */
108 if (prev != NULL)
109 xen_set_irq_pending(irq);
110 goto out;
111 }
81 112
82 /* check again make sure it didn't become free while 113 /*
83 we weren't looking */ 114 * Block until irq becomes pending. If we're
84 ret = xen_spin_trylock(lock); 115 * interrupted at this point (after the trylock but
85 if (ret) 116 * before entering the block), then the nested lock
86 goto out; 117 * handler guarantees that the irq will be left
118 * pending if there's any chance the lock became free;
119 * xen_poll_irq() returns immediately if the irq is
120 * pending.
121 */
122 xen_poll_irq(irq);
123 } while (!xen_test_irq_pending(irq)); /* check for spurious wakeups */
87 124
88 /* block until irq becomes pending */
89 xen_poll_irq(irq);
90 kstat_this_cpu.irqs[irq]++; 125 kstat_this_cpu.irqs[irq]++;
91 126
92out: 127out:
93 unspinning_lock(xl); 128 unspinning_lock(xl, prev);
94 return ret; 129 return ret;
95} 130}
96 131