aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/xen
diff options
context:
space:
mode:
authorJeremy Fitzhardinge <jeremy@goop.org>2008-08-20 20:02:20 -0400
committerIngo Molnar <mingo@elte.hu>2008-08-21 07:52:58 -0400
commit1e696f638b523958ee3d95e655becdb4c4b6fcde (patch)
treefc633cd3d2d92c846463d2c7f476647de1dbc27a /arch/x86/xen
parent994025caba3e6beade9bde84dd1b70d9d250f27b (diff)
xen: allow interrupts to be enabled while doing a blocking spin
If spin_lock is called in an interrupts-enabled context, we can safely enable interrupts while spinning. We don't bother for the actual spin loop, but if we timeout and fall back to blocking, it's definitely worthwhile enabling interrupts if possible. Signed-off-by: Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com> Acked-by: Jan Beulich <jbeulich@novell.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86/xen')
-rw-r--r--arch/x86/xen/spinlock.c30
1 files changed, 27 insertions, 3 deletions
diff --git a/arch/x86/xen/spinlock.c b/arch/x86/xen/spinlock.c
index 0d8f3b2d9bec..e6061f2e64f2 100644
--- a/arch/x86/xen/spinlock.c
+++ b/arch/x86/xen/spinlock.c
@@ -23,6 +23,7 @@ static struct xen_spinlock_stats
23 u32 taken_slow_nested; 23 u32 taken_slow_nested;
24 u32 taken_slow_pickup; 24 u32 taken_slow_pickup;
25 u32 taken_slow_spurious; 25 u32 taken_slow_spurious;
26 u32 taken_slow_irqenable;
26 27
27 u64 released; 28 u64 released;
28 u32 released_slow; 29 u32 released_slow;
@@ -167,12 +168,13 @@ static inline void unspinning_lock(struct xen_spinlock *xl, struct xen_spinlock
167 __get_cpu_var(lock_spinners) = prev; 168 __get_cpu_var(lock_spinners) = prev;
168} 169}
169 170
170static noinline int xen_spin_lock_slow(struct raw_spinlock *lock) 171static noinline int xen_spin_lock_slow(struct raw_spinlock *lock, bool irq_enable)
171{ 172{
172 struct xen_spinlock *xl = (struct xen_spinlock *)lock; 173 struct xen_spinlock *xl = (struct xen_spinlock *)lock;
173 struct xen_spinlock *prev; 174 struct xen_spinlock *prev;
174 int irq = __get_cpu_var(lock_kicker_irq); 175 int irq = __get_cpu_var(lock_kicker_irq);
175 int ret; 176 int ret;
177 unsigned long flags;
176 178
177 /* If kicker interrupts not initialized yet, just spin */ 179 /* If kicker interrupts not initialized yet, just spin */
178 if (irq == -1) 180 if (irq == -1)
@@ -181,6 +183,12 @@ static noinline int xen_spin_lock_slow(struct raw_spinlock *lock)
181 /* announce we're spinning */ 183 /* announce we're spinning */
182 prev = spinning_lock(xl); 184 prev = spinning_lock(xl);
183 185
186 flags = __raw_local_save_flags();
187 if (irq_enable) {
188 ADD_STATS(taken_slow_irqenable, 1);
189 raw_local_irq_enable();
190 }
191
184 ADD_STATS(taken_slow, 1); 192 ADD_STATS(taken_slow, 1);
185 ADD_STATS(taken_slow_nested, prev != NULL); 193 ADD_STATS(taken_slow_nested, prev != NULL);
186 194
@@ -220,11 +228,12 @@ static noinline int xen_spin_lock_slow(struct raw_spinlock *lock)
220 kstat_this_cpu.irqs[irq]++; 228 kstat_this_cpu.irqs[irq]++;
221 229
222out: 230out:
231 raw_local_irq_restore(flags);
223 unspinning_lock(xl, prev); 232 unspinning_lock(xl, prev);
224 return ret; 233 return ret;
225} 234}
226 235
227static void xen_spin_lock(struct raw_spinlock *lock) 236static inline void __xen_spin_lock(struct raw_spinlock *lock, bool irq_enable)
228{ 237{
229 struct xen_spinlock *xl = (struct xen_spinlock *)lock; 238 struct xen_spinlock *xl = (struct xen_spinlock *)lock;
230 unsigned timeout; 239 unsigned timeout;
@@ -254,11 +263,23 @@ static void xen_spin_lock(struct raw_spinlock *lock)
254 : "memory"); 263 : "memory");
255 264
256 spin_time_accum_fast(start_spin_fast); 265 spin_time_accum_fast(start_spin_fast);
257 } while (unlikely(oldval != 0 && (TIMEOUT == ~0 || !xen_spin_lock_slow(lock)))); 266
267 } while (unlikely(oldval != 0 &&
268 (TIMEOUT == ~0 || !xen_spin_lock_slow(lock, irq_enable))));
258 269
259 spin_time_accum(start_spin); 270 spin_time_accum(start_spin);
260} 271}
261 272
273static void xen_spin_lock(struct raw_spinlock *lock)
274{
275 __xen_spin_lock(lock, false);
276}
277
278static void xen_spin_lock_flags(struct raw_spinlock *lock, unsigned long flags)
279{
280 __xen_spin_lock(lock, !raw_irqs_disabled_flags(flags));
281}
282
262static noinline void xen_spin_unlock_slow(struct xen_spinlock *xl) 283static noinline void xen_spin_unlock_slow(struct xen_spinlock *xl)
263{ 284{
264 int cpu; 285 int cpu;
@@ -323,6 +344,7 @@ void __init xen_init_spinlocks(void)
323 pv_lock_ops.spin_is_locked = xen_spin_is_locked; 344 pv_lock_ops.spin_is_locked = xen_spin_is_locked;
324 pv_lock_ops.spin_is_contended = xen_spin_is_contended; 345 pv_lock_ops.spin_is_contended = xen_spin_is_contended;
325 pv_lock_ops.spin_lock = xen_spin_lock; 346 pv_lock_ops.spin_lock = xen_spin_lock;
347 pv_lock_ops.spin_lock_flags = xen_spin_lock_flags;
326 pv_lock_ops.spin_trylock = xen_spin_trylock; 348 pv_lock_ops.spin_trylock = xen_spin_trylock;
327 pv_lock_ops.spin_unlock = xen_spin_unlock; 349 pv_lock_ops.spin_unlock = xen_spin_unlock;
328} 350}
@@ -353,6 +375,8 @@ static int __init xen_spinlock_debugfs(void)
353 &spinlock_stats.taken_slow_pickup); 375 &spinlock_stats.taken_slow_pickup);
354 debugfs_create_u32("taken_slow_spurious", 0444, d_spin_debug, 376 debugfs_create_u32("taken_slow_spurious", 0444, d_spin_debug,
355 &spinlock_stats.taken_slow_spurious); 377 &spinlock_stats.taken_slow_spurious);
378 debugfs_create_u32("taken_slow_irqenable", 0444, d_spin_debug,
379 &spinlock_stats.taken_slow_irqenable);
356 380
357 debugfs_create_u64("released", 0444, d_spin_debug, &spinlock_stats.released); 381 debugfs_create_u64("released", 0444, d_spin_debug, &spinlock_stats.released);
358 debugfs_create_u32("released_slow", 0444, d_spin_debug, 382 debugfs_create_u32("released_slow", 0444, d_spin_debug,