aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/spinlock.h
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2010-03-13 17:43:01 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2010-03-13 17:43:01 -0500
commit4e3eaddd142e2142c048c5052a0a9d2604fccfc6 (patch)
tree5bc45a286502e54e790c54948f22364c5afd9d89 /include/linux/spinlock.h
parent8655e7e3ddec60603c4f6c14cdf642e2ba198df8 (diff)
parentb97c4bc16734a2e597dac7f91ee9eb78f4aeef9a (diff)
Merge branch 'core-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'core-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: locking: Make sparse work with inline spinlocks and rwlocks x86/mce: Fix RCU lockdep splats rcu: Increase RCU CPU stall timeouts if PROVE_RCU ftrace: Replace read_barrier_depends() with rcu_dereference_raw() rcu: Suppress RCU lockdep warnings during early boot rcu, ftrace: Fix RCU lockdep splat in ftrace_perf_buf_prepare() rcu: Suppress __mpol_dup() false positive from RCU lockdep rcu: Make rcu_read_lock_sched_held() handle !PREEMPT rcu: Add control variables to lockdep_rcu_dereference() diagnostics rcu, cgroup: Relax the check in task_subsys_state() as early boot is now handled by lockdep-RCU rcu: Use wrapper function instead of exporting tasklist_lock sched, rcu: Fix rcu_dereference() for RCU-lockdep rcu: Make task_subsys_state() RCU-lockdep checks handle boot-time use rcu: Fix holdoff for accelerated GPs for last non-dynticked CPU x86/gart: Unexport gart_iommu_aperture Fix trivial conflicts in kernel/trace/ftrace.c
Diffstat (limited to 'include/linux/spinlock.h')
-rw-r--r--include/linux/spinlock.h13
1 files changed, 8 insertions, 5 deletions
diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h
index 86088213334a..89fac6a3f78b 100644
--- a/include/linux/spinlock.h
+++ b/include/linux/spinlock.h
@@ -128,19 +128,21 @@ static inline void smp_mb__after_lock(void) { smp_mb(); }
128#define raw_spin_unlock_wait(lock) arch_spin_unlock_wait(&(lock)->raw_lock) 128#define raw_spin_unlock_wait(lock) arch_spin_unlock_wait(&(lock)->raw_lock)
129 129
130#ifdef CONFIG_DEBUG_SPINLOCK 130#ifdef CONFIG_DEBUG_SPINLOCK
131 extern void do_raw_spin_lock(raw_spinlock_t *lock); 131 extern void do_raw_spin_lock(raw_spinlock_t *lock) __acquires(lock);
132#define do_raw_spin_lock_flags(lock, flags) do_raw_spin_lock(lock) 132#define do_raw_spin_lock_flags(lock, flags) do_raw_spin_lock(lock)
133 extern int do_raw_spin_trylock(raw_spinlock_t *lock); 133 extern int do_raw_spin_trylock(raw_spinlock_t *lock);
134 extern void do_raw_spin_unlock(raw_spinlock_t *lock); 134 extern void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock);
135#else 135#else
136static inline void do_raw_spin_lock(raw_spinlock_t *lock) 136static inline void do_raw_spin_lock(raw_spinlock_t *lock) __acquires(lock)
137{ 137{
138 __acquire(lock);
138 arch_spin_lock(&lock->raw_lock); 139 arch_spin_lock(&lock->raw_lock);
139} 140}
140 141
141static inline void 142static inline void
142do_raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long *flags) 143do_raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long *flags) __acquires(lock)
143{ 144{
145 __acquire(lock);
144 arch_spin_lock_flags(&lock->raw_lock, *flags); 146 arch_spin_lock_flags(&lock->raw_lock, *flags);
145} 147}
146 148
@@ -149,9 +151,10 @@ static inline int do_raw_spin_trylock(raw_spinlock_t *lock)
149 return arch_spin_trylock(&(lock)->raw_lock); 151 return arch_spin_trylock(&(lock)->raw_lock);
150} 152}
151 153
152static inline void do_raw_spin_unlock(raw_spinlock_t *lock) 154static inline void do_raw_spin_unlock(raw_spinlock_t *lock) __releases(lock)
153{ 155{
154 arch_spin_unlock(&lock->raw_lock); 156 arch_spin_unlock(&lock->raw_lock);
157 __release(lock);
155} 158}
156#endif 159#endif
157 160