aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2009-12-03 14:55:53 -0500
committerThomas Gleixner <tglx@linutronix.de>2009-12-14 17:55:33 -0500
commit9828ea9d75c38fe3dce05d00566eed61c85732e6 (patch)
tree6cee5c8ffb07cdf45cc12d58f74a3053ffefcb5f /kernel
parent5f6384c5fb6bfc9aac506e058974d3ba293951b3 (diff)
locking: Further name space cleanups
The name space hierarchy for the internal lock functions is now a bit backwards. raw_spin* functions map to _spin* which use __spin*, while we would like to have _raw_spin* and __raw_spin*. _raw_spin* is already used by lock debugging, so rename those funtions to do_raw_spin* to free up the _raw_spin* name space. No functional change. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Acked-by: Peter Zijlstra <peterz@infradead.org> Acked-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched.c2
-rw-r--r--kernel/spinlock.c12
2 files changed, 7 insertions, 7 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index e6acf2d7b753..91c65dd91435 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -6684,7 +6684,7 @@ SYSCALL_DEFINE0(sched_yield)
6684 */ 6684 */
6685 __release(rq->lock); 6685 __release(rq->lock);
6686 spin_release(&rq->lock.dep_map, 1, _THIS_IP_); 6686 spin_release(&rq->lock.dep_map, 1, _THIS_IP_);
6687 _raw_spin_unlock(&rq->lock); 6687 do_raw_spin_unlock(&rq->lock);
6688 preempt_enable_no_resched(); 6688 preempt_enable_no_resched();
6689 6689
6690 schedule(); 6690 schedule();
diff --git a/kernel/spinlock.c b/kernel/spinlock.c
index 54eb7dd3c608..795240b81224 100644
--- a/kernel/spinlock.c
+++ b/kernel/spinlock.c
@@ -48,7 +48,7 @@ void __lockfunc __##op##_lock(locktype##_t *lock) \
48{ \ 48{ \
49 for (;;) { \ 49 for (;;) { \
50 preempt_disable(); \ 50 preempt_disable(); \
51 if (likely(_raw_##op##_trylock(lock))) \ 51 if (likely(do_raw_##op##_trylock(lock))) \
52 break; \ 52 break; \
53 preempt_enable(); \ 53 preempt_enable(); \
54 \ 54 \
@@ -67,7 +67,7 @@ unsigned long __lockfunc __##op##_lock_irqsave(locktype##_t *lock) \
67 for (;;) { \ 67 for (;;) { \
68 preempt_disable(); \ 68 preempt_disable(); \
69 local_irq_save(flags); \ 69 local_irq_save(flags); \
70 if (likely(_raw_##op##_trylock(lock))) \ 70 if (likely(do_raw_##op##_trylock(lock))) \
71 break; \ 71 break; \
72 local_irq_restore(flags); \ 72 local_irq_restore(flags); \
73 preempt_enable(); \ 73 preempt_enable(); \
@@ -345,7 +345,7 @@ void __lockfunc _spin_lock_nested(raw_spinlock_t *lock, int subclass)
345{ 345{
346 preempt_disable(); 346 preempt_disable();
347 spin_acquire(&lock->dep_map, subclass, 0, _RET_IP_); 347 spin_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
348 LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock); 348 LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock);
349} 349}
350EXPORT_SYMBOL(_spin_lock_nested); 350EXPORT_SYMBOL(_spin_lock_nested);
351 351
@@ -357,8 +357,8 @@ unsigned long __lockfunc _spin_lock_irqsave_nested(raw_spinlock_t *lock,
357 local_irq_save(flags); 357 local_irq_save(flags);
358 preempt_disable(); 358 preempt_disable();
359 spin_acquire(&lock->dep_map, subclass, 0, _RET_IP_); 359 spin_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
360 LOCK_CONTENDED_FLAGS(lock, _raw_spin_trylock, _raw_spin_lock, 360 LOCK_CONTENDED_FLAGS(lock, do_raw_spin_trylock, do_raw_spin_lock,
361 _raw_spin_lock_flags, &flags); 361 do_raw_spin_lock_flags, &flags);
362 return flags; 362 return flags;
363} 363}
364EXPORT_SYMBOL(_spin_lock_irqsave_nested); 364EXPORT_SYMBOL(_spin_lock_irqsave_nested);
@@ -368,7 +368,7 @@ void __lockfunc _spin_lock_nest_lock(raw_spinlock_t *lock,
368{ 368{
369 preempt_disable(); 369 preempt_disable();
370 spin_acquire_nest(&lock->dep_map, 0, 0, nest_lock, _RET_IP_); 370 spin_acquire_nest(&lock->dep_map, 0, 0, nest_lock, _RET_IP_);
371 LOCK_CONTENDED(lock, _raw_spin_trylock, _raw_spin_lock); 371 LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock);
372} 372}
373EXPORT_SYMBOL(_spin_lock_nest_lock); 373EXPORT_SYMBOL(_spin_lock_nest_lock);
374 374