aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/spinlock.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/spinlock.h')
-rw-r--r--include/linux/spinlock.h31
1 files changed, 0 insertions, 31 deletions
diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h
index d9510e8522d4..ef018a6e4985 100644
--- a/include/linux/spinlock.h
+++ b/include/linux/spinlock.h
@@ -130,12 +130,6 @@ do { \
130#define smp_mb__before_spinlock() smp_wmb() 130#define smp_mb__before_spinlock() smp_wmb()
131#endif 131#endif
132 132
133/**
134 * raw_spin_unlock_wait - wait until the spinlock gets unlocked
135 * @lock: the spinlock in question.
136 */
137#define raw_spin_unlock_wait(lock) arch_spin_unlock_wait(&(lock)->raw_lock)
138
139#ifdef CONFIG_DEBUG_SPINLOCK 133#ifdef CONFIG_DEBUG_SPINLOCK
140 extern void do_raw_spin_lock(raw_spinlock_t *lock) __acquires(lock); 134 extern void do_raw_spin_lock(raw_spinlock_t *lock) __acquires(lock);
141#define do_raw_spin_lock_flags(lock, flags) do_raw_spin_lock(lock) 135#define do_raw_spin_lock_flags(lock, flags) do_raw_spin_lock(lock)
@@ -369,31 +363,6 @@ static __always_inline int spin_trylock_irq(spinlock_t *lock)
369 raw_spin_trylock_irqsave(spinlock_check(lock), flags); \ 363 raw_spin_trylock_irqsave(spinlock_check(lock), flags); \
370}) 364})
371 365
372/**
373 * spin_unlock_wait - Interpose between successive critical sections
374 * @lock: the spinlock whose critical sections are to be interposed.
375 *
376 * Semantically this is equivalent to a spin_lock() immediately
377 * followed by a spin_unlock(). However, most architectures have
378 * more efficient implementations in which the spin_unlock_wait()
379 * cannot block concurrent lock acquisition, and in some cases
380 * where spin_unlock_wait() does not write to the lock variable.
381 * Nevertheless, spin_unlock_wait() can have high overhead, so if
382 * you feel the need to use it, please check to see if there is
383 * a better way to get your job done.
384 *
385 * The ordering guarantees provided by spin_unlock_wait() are:
386 *
387 * 1. All accesses preceding the spin_unlock_wait() happen before
388 * any accesses in later critical sections for this same lock.
389 * 2. All accesses following the spin_unlock_wait() happen after
390 * any accesses in earlier critical sections for this same lock.
391 */
392static __always_inline void spin_unlock_wait(spinlock_t *lock)
393{
394 raw_spin_unlock_wait(&lock->rlock);
395}
396
397static __always_inline int spin_is_locked(spinlock_t *lock) 366static __always_inline int spin_is_locked(spinlock_t *lock)
398{ 367{
399 return raw_spin_is_locked(&lock->rlock); 368 return raw_spin_is_locked(&lock->rlock);