diff options
author | Jeremy Fitzhardinge <jeremy@goop.org> | 2008-08-19 16:19:36 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-08-20 06:40:07 -0400 |
commit | 63d3a75d6f1fcf2f33e6abbe84e1f428c3586152 (patch) | |
tree | 3b7453c784b4fad0afe002942d48bfc876010999 /include | |
parent | 6e833587e11ed0dbf12e647127f2650e2f80b26d (diff) |
x86/paravirt: add spin_lock_flags lock op
It is useful for a pv_lock_ops backend to know whether interrupts are
enabled or not in the context a spin_lock is being called. This
allows it to enable interrupts while spinning, which could be
particularly helpful when spinning becomes blocking.
The default implementation just calls the normal spin_lock op,
ignoring the flags.
Signed-off-by: Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'include')
-rw-r--r-- | include/asm-x86/paravirt.h | 7 | ||||
-rw-r--r-- | include/asm-x86/spinlock.h | 9 |
2 files changed, 14 insertions, 2 deletions
diff --git a/include/asm-x86/paravirt.h b/include/asm-x86/paravirt.h index db9b0647b346..8e9b1266898c 100644 --- a/include/asm-x86/paravirt.h +++ b/include/asm-x86/paravirt.h | |||
@@ -333,6 +333,7 @@ struct pv_lock_ops { | |||
333 | int (*spin_is_locked)(struct raw_spinlock *lock); | 333 | int (*spin_is_locked)(struct raw_spinlock *lock); |
334 | int (*spin_is_contended)(struct raw_spinlock *lock); | 334 | int (*spin_is_contended)(struct raw_spinlock *lock); |
335 | void (*spin_lock)(struct raw_spinlock *lock); | 335 | void (*spin_lock)(struct raw_spinlock *lock); |
336 | void (*spin_lock_flags)(struct raw_spinlock *lock, unsigned long flags); | ||
336 | int (*spin_trylock)(struct raw_spinlock *lock); | 337 | int (*spin_trylock)(struct raw_spinlock *lock); |
337 | void (*spin_unlock)(struct raw_spinlock *lock); | 338 | void (*spin_unlock)(struct raw_spinlock *lock); |
338 | }; | 339 | }; |
@@ -1414,6 +1415,12 @@ static __always_inline void __raw_spin_lock(struct raw_spinlock *lock) | |||
1414 | PVOP_VCALL1(pv_lock_ops.spin_lock, lock); | 1415 | PVOP_VCALL1(pv_lock_ops.spin_lock, lock); |
1415 | } | 1416 | } |
1416 | 1417 | ||
1418 | static __always_inline void __raw_spin_lock_flags(struct raw_spinlock *lock, | ||
1419 | unsigned long flags) | ||
1420 | { | ||
1421 | PVOP_VCALL2(pv_lock_ops.spin_lock_flags, lock, flags); | ||
1422 | } | ||
1423 | |||
1417 | static __always_inline int __raw_spin_trylock(struct raw_spinlock *lock) | 1424 | static __always_inline int __raw_spin_trylock(struct raw_spinlock *lock) |
1418 | { | 1425 | { |
1419 | return PVOP_CALL1(int, pv_lock_ops.spin_trylock, lock); | 1426 | return PVOP_CALL1(int, pv_lock_ops.spin_trylock, lock); |
diff --git a/include/asm-x86/spinlock.h b/include/asm-x86/spinlock.h index e39c790dbfd2..b755ea86367e 100644 --- a/include/asm-x86/spinlock.h +++ b/include/asm-x86/spinlock.h | |||
@@ -182,8 +182,6 @@ static __always_inline void __ticket_spin_unlock(raw_spinlock_t *lock) | |||
182 | } | 182 | } |
183 | #endif | 183 | #endif |
184 | 184 | ||
185 | #define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) | ||
186 | |||
187 | #ifdef CONFIG_PARAVIRT | 185 | #ifdef CONFIG_PARAVIRT |
188 | /* | 186 | /* |
189 | * Define virtualization-friendly old-style lock byte lock, for use in | 187 | * Define virtualization-friendly old-style lock byte lock, for use in |
@@ -272,6 +270,13 @@ static __always_inline void __raw_spin_unlock(raw_spinlock_t *lock) | |||
272 | { | 270 | { |
273 | __ticket_spin_unlock(lock); | 271 | __ticket_spin_unlock(lock); |
274 | } | 272 | } |
273 | |||
274 | static __always_inline void __raw_spin_lock_flags(raw_spinlock_t *lock, | ||
275 | unsigned long flags) | ||
276 | { | ||
277 | __raw_spin_lock(lock); | ||
278 | } | ||
279 | |||
275 | #endif /* CONFIG_PARAVIRT */ | 280 | #endif /* CONFIG_PARAVIRT */ |
276 | 281 | ||
277 | static inline void __raw_spin_unlock_wait(raw_spinlock_t *lock) | 282 | static inline void __raw_spin_unlock_wait(raw_spinlock_t *lock) |