diff options
author | Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com> | 2009-09-09 15:33:51 -0400 |
---|---|---|
committer | Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com> | 2009-09-09 19:38:11 -0400 |
commit | 4d576b57b50a92801e6493e76e5243d6cff193d2 (patch) | |
tree | d02bbb6713f47e3cd26643c36d2dcada0ff1dccc /arch | |
parent | 577eebeae34d340685d8985dfdb7dfe337c511e8 (diff) |
xen: only enable interrupts while actually blocking for spinlock
Where possible we enable interrupts while waiting for a spinlock to
become free, in order to reduce big latency spikes in interrupt handling.
However, at present if we manage to pick up the spinlock just before
blocking, we'll end up holding the lock with interrupts enabled for a
while. This will cause a deadlock if we recieve an interrupt in that
window, and the interrupt handler tries to take the lock too.
Solve this by shrinking the interrupt-enabled region to just around the
blocking call.
[ Impact: avoid race/deadlock when using Xen PV spinlocks ]
Reported-by: "Yang, Xiaowei" <xiaowei.yang@intel.com>
Signed-off-by: Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com>
Diffstat (limited to 'arch')
-rw-r--r-- | arch/x86/xen/spinlock.c | 19 |
1 files changed, 11 insertions, 8 deletions
diff --git a/arch/x86/xen/spinlock.c b/arch/x86/xen/spinlock.c index 5601506f2dd9..2f91e5651926 100644 --- a/arch/x86/xen/spinlock.c +++ b/arch/x86/xen/spinlock.c | |||
@@ -187,7 +187,6 @@ static noinline int xen_spin_lock_slow(struct raw_spinlock *lock, bool irq_enabl | |||
187 | struct xen_spinlock *prev; | 187 | struct xen_spinlock *prev; |
188 | int irq = __get_cpu_var(lock_kicker_irq); | 188 | int irq = __get_cpu_var(lock_kicker_irq); |
189 | int ret; | 189 | int ret; |
190 | unsigned long flags; | ||
191 | u64 start; | 190 | u64 start; |
192 | 191 | ||
193 | /* If kicker interrupts not initialized yet, just spin */ | 192 | /* If kicker interrupts not initialized yet, just spin */ |
@@ -199,16 +198,12 @@ static noinline int xen_spin_lock_slow(struct raw_spinlock *lock, bool irq_enabl | |||
199 | /* announce we're spinning */ | 198 | /* announce we're spinning */ |
200 | prev = spinning_lock(xl); | 199 | prev = spinning_lock(xl); |
201 | 200 | ||
202 | flags = __raw_local_save_flags(); | ||
203 | if (irq_enable) { | ||
204 | ADD_STATS(taken_slow_irqenable, 1); | ||
205 | raw_local_irq_enable(); | ||
206 | } | ||
207 | |||
208 | ADD_STATS(taken_slow, 1); | 201 | ADD_STATS(taken_slow, 1); |
209 | ADD_STATS(taken_slow_nested, prev != NULL); | 202 | ADD_STATS(taken_slow_nested, prev != NULL); |
210 | 203 | ||
211 | do { | 204 | do { |
205 | unsigned long flags; | ||
206 | |||
212 | /* clear pending */ | 207 | /* clear pending */ |
213 | xen_clear_irq_pending(irq); | 208 | xen_clear_irq_pending(irq); |
214 | 209 | ||
@@ -228,6 +223,12 @@ static noinline int xen_spin_lock_slow(struct raw_spinlock *lock, bool irq_enabl | |||
228 | goto out; | 223 | goto out; |
229 | } | 224 | } |
230 | 225 | ||
226 | flags = __raw_local_save_flags(); | ||
227 | if (irq_enable) { | ||
228 | ADD_STATS(taken_slow_irqenable, 1); | ||
229 | raw_local_irq_enable(); | ||
230 | } | ||
231 | |||
231 | /* | 232 | /* |
232 | * Block until irq becomes pending. If we're | 233 | * Block until irq becomes pending. If we're |
233 | * interrupted at this point (after the trylock but | 234 | * interrupted at this point (after the trylock but |
@@ -238,13 +239,15 @@ static noinline int xen_spin_lock_slow(struct raw_spinlock *lock, bool irq_enabl | |||
238 | * pending. | 239 | * pending. |
239 | */ | 240 | */ |
240 | xen_poll_irq(irq); | 241 | xen_poll_irq(irq); |
242 | |||
243 | raw_local_irq_restore(flags); | ||
244 | |||
241 | ADD_STATS(taken_slow_spurious, !xen_test_irq_pending(irq)); | 245 | ADD_STATS(taken_slow_spurious, !xen_test_irq_pending(irq)); |
242 | } while (!xen_test_irq_pending(irq)); /* check for spurious wakeups */ | 246 | } while (!xen_test_irq_pending(irq)); /* check for spurious wakeups */ |
243 | 247 | ||
244 | kstat_incr_irqs_this_cpu(irq, irq_to_desc(irq)); | 248 | kstat_incr_irqs_this_cpu(irq, irq_to_desc(irq)); |
245 | 249 | ||
246 | out: | 250 | out: |
247 | raw_local_irq_restore(flags); | ||
248 | unspinning_lock(xl, prev); | 251 | unspinning_lock(xl, prev); |
249 | spin_time_accum_blocked(start); | 252 | spin_time_accum_blocked(start); |
250 | 253 | ||