aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/rtmutex.c
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2006-07-03 03:24:33 -0400
committerLinus Torvalds <torvalds@g5.osdl.org>2006-07-03 18:27:01 -0400
commit9a11b49a805665e13a56aa067afaf81d43ec1514 (patch)
treebf499956e3f67d1211d68ab1e2eb76645f453dfb /kernel/rtmutex.c
parentfb7e42413a098cc45b3adf858da290033af62bae (diff)
[PATCH] lockdep: better lock debugging
Generic lock debugging: - generalized lock debugging framework. For example, a bug in one lock subsystem turns off debugging in all lock subsystems. - got rid of the caller address passing (__IP__/__IP_DECL__/etc.) from the mutex/rtmutex debugging code: it caused way too much prototype hackery, and lockdep will give the same information anyway. - ability to do silent tests - check lock freeing in vfree too. - more finegrained debugging options, to allow distributions to turn off more expensive debugging features. There's no separate 'held mutexes' list anymore - but there's a 'held locks' stack within lockdep, which unifies deadlock detection across all lock classes. (this is independent of the lockdep validation stuff - lockdep first checks whether we are holding a lock already) Here are the current debugging options: CONFIG_DEBUG_MUTEXES=y CONFIG_DEBUG_LOCK_ALLOC=y which do: config DEBUG_MUTEXES bool "Mutex debugging, basic checks" config DEBUG_LOCK_ALLOC bool "Detect incorrect freeing of live mutexes" Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Arjan van de Ven <arjan@linux.intel.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'kernel/rtmutex.c')
-rw-r--r--kernel/rtmutex.c46
1 files changed, 22 insertions, 24 deletions
diff --git a/kernel/rtmutex.c b/kernel/rtmutex.c
index 45d61016da57..91b699aa658b 100644
--- a/kernel/rtmutex.c
+++ b/kernel/rtmutex.c
@@ -161,8 +161,7 @@ static int rt_mutex_adjust_prio_chain(task_t *task,
161 int deadlock_detect, 161 int deadlock_detect,
162 struct rt_mutex *orig_lock, 162 struct rt_mutex *orig_lock,
163 struct rt_mutex_waiter *orig_waiter, 163 struct rt_mutex_waiter *orig_waiter,
164 struct task_struct *top_task 164 struct task_struct *top_task)
165 __IP_DECL__)
166{ 165{
167 struct rt_mutex *lock; 166 struct rt_mutex *lock;
168 struct rt_mutex_waiter *waiter, *top_waiter = orig_waiter; 167 struct rt_mutex_waiter *waiter, *top_waiter = orig_waiter;
@@ -357,7 +356,7 @@ static inline int try_to_steal_lock(struct rt_mutex *lock)
357 * 356 *
358 * Must be called with lock->wait_lock held. 357 * Must be called with lock->wait_lock held.
359 */ 358 */
360static int try_to_take_rt_mutex(struct rt_mutex *lock __IP_DECL__) 359static int try_to_take_rt_mutex(struct rt_mutex *lock)
361{ 360{
362 /* 361 /*
363 * We have to be careful here if the atomic speedups are 362 * We have to be careful here if the atomic speedups are
@@ -384,7 +383,7 @@ static int try_to_take_rt_mutex(struct rt_mutex *lock __IP_DECL__)
384 return 0; 383 return 0;
385 384
386 /* We got the lock. */ 385 /* We got the lock. */
387 debug_rt_mutex_lock(lock __IP__); 386 debug_rt_mutex_lock(lock);
388 387
389 rt_mutex_set_owner(lock, current, 0); 388 rt_mutex_set_owner(lock, current, 0);
390 389
@@ -402,8 +401,7 @@ static int try_to_take_rt_mutex(struct rt_mutex *lock __IP_DECL__)
402 */ 401 */
403static int task_blocks_on_rt_mutex(struct rt_mutex *lock, 402static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
404 struct rt_mutex_waiter *waiter, 403 struct rt_mutex_waiter *waiter,
405 int detect_deadlock 404 int detect_deadlock)
406 __IP_DECL__)
407{ 405{
408 struct rt_mutex_waiter *top_waiter = waiter; 406 struct rt_mutex_waiter *top_waiter = waiter;
409 task_t *owner = rt_mutex_owner(lock); 407 task_t *owner = rt_mutex_owner(lock);
@@ -454,7 +452,7 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
454 spin_unlock(&lock->wait_lock); 452 spin_unlock(&lock->wait_lock);
455 453
456 res = rt_mutex_adjust_prio_chain(owner, detect_deadlock, lock, waiter, 454 res = rt_mutex_adjust_prio_chain(owner, detect_deadlock, lock, waiter,
457 current __IP__); 455 current);
458 456
459 spin_lock(&lock->wait_lock); 457 spin_lock(&lock->wait_lock);
460 458
@@ -526,7 +524,7 @@ static void wakeup_next_waiter(struct rt_mutex *lock)
526 * Must be called with lock->wait_lock held 524 * Must be called with lock->wait_lock held
527 */ 525 */
528static void remove_waiter(struct rt_mutex *lock, 526static void remove_waiter(struct rt_mutex *lock,
529 struct rt_mutex_waiter *waiter __IP_DECL__) 527 struct rt_mutex_waiter *waiter)
530{ 528{
531 int first = (waiter == rt_mutex_top_waiter(lock)); 529 int first = (waiter == rt_mutex_top_waiter(lock));
532 int boost = 0; 530 int boost = 0;
@@ -568,7 +566,7 @@ static void remove_waiter(struct rt_mutex *lock,
568 566
569 spin_unlock(&lock->wait_lock); 567 spin_unlock(&lock->wait_lock);
570 568
571 rt_mutex_adjust_prio_chain(owner, 0, lock, NULL, current __IP__); 569 rt_mutex_adjust_prio_chain(owner, 0, lock, NULL, current);
572 570
573 spin_lock(&lock->wait_lock); 571 spin_lock(&lock->wait_lock);
574} 572}
@@ -595,7 +593,7 @@ void rt_mutex_adjust_pi(struct task_struct *task)
595 get_task_struct(task); 593 get_task_struct(task);
596 spin_unlock_irqrestore(&task->pi_lock, flags); 594 spin_unlock_irqrestore(&task->pi_lock, flags);
597 595
598 rt_mutex_adjust_prio_chain(task, 0, NULL, NULL, task __RET_IP__); 596 rt_mutex_adjust_prio_chain(task, 0, NULL, NULL, task);
599} 597}
600 598
601/* 599/*
@@ -604,7 +602,7 @@ void rt_mutex_adjust_pi(struct task_struct *task)
604static int __sched 602static int __sched
605rt_mutex_slowlock(struct rt_mutex *lock, int state, 603rt_mutex_slowlock(struct rt_mutex *lock, int state,
606 struct hrtimer_sleeper *timeout, 604 struct hrtimer_sleeper *timeout,
607 int detect_deadlock __IP_DECL__) 605 int detect_deadlock)
608{ 606{
609 struct rt_mutex_waiter waiter; 607 struct rt_mutex_waiter waiter;
610 int ret = 0; 608 int ret = 0;
@@ -615,7 +613,7 @@ rt_mutex_slowlock(struct rt_mutex *lock, int state,
615 spin_lock(&lock->wait_lock); 613 spin_lock(&lock->wait_lock);
616 614
617 /* Try to acquire the lock again: */ 615 /* Try to acquire the lock again: */
618 if (try_to_take_rt_mutex(lock __IP__)) { 616 if (try_to_take_rt_mutex(lock)) {
619 spin_unlock(&lock->wait_lock); 617 spin_unlock(&lock->wait_lock);
620 return 0; 618 return 0;
621 } 619 }
@@ -629,7 +627,7 @@ rt_mutex_slowlock(struct rt_mutex *lock, int state,
629 627
630 for (;;) { 628 for (;;) {
631 /* Try to acquire the lock: */ 629 /* Try to acquire the lock: */
632 if (try_to_take_rt_mutex(lock __IP__)) 630 if (try_to_take_rt_mutex(lock))
633 break; 631 break;
634 632
635 /* 633 /*
@@ -653,7 +651,7 @@ rt_mutex_slowlock(struct rt_mutex *lock, int state,
653 */ 651 */
654 if (!waiter.task) { 652 if (!waiter.task) {
655 ret = task_blocks_on_rt_mutex(lock, &waiter, 653 ret = task_blocks_on_rt_mutex(lock, &waiter,
656 detect_deadlock __IP__); 654 detect_deadlock);
657 /* 655 /*
658 * If we got woken up by the owner then start loop 656 * If we got woken up by the owner then start loop
659 * all over without going into schedule to try 657 * all over without going into schedule to try
@@ -680,7 +678,7 @@ rt_mutex_slowlock(struct rt_mutex *lock, int state,
680 set_current_state(TASK_RUNNING); 678 set_current_state(TASK_RUNNING);
681 679
682 if (unlikely(waiter.task)) 680 if (unlikely(waiter.task))
683 remove_waiter(lock, &waiter __IP__); 681 remove_waiter(lock, &waiter);
684 682
685 /* 683 /*
686 * try_to_take_rt_mutex() sets the waiter bit 684 * try_to_take_rt_mutex() sets the waiter bit
@@ -711,7 +709,7 @@ rt_mutex_slowlock(struct rt_mutex *lock, int state,
711 * Slow path try-lock function: 709 * Slow path try-lock function:
712 */ 710 */
713static inline int 711static inline int
714rt_mutex_slowtrylock(struct rt_mutex *lock __IP_DECL__) 712rt_mutex_slowtrylock(struct rt_mutex *lock)
715{ 713{
716 int ret = 0; 714 int ret = 0;
717 715
@@ -719,7 +717,7 @@ rt_mutex_slowtrylock(struct rt_mutex *lock __IP_DECL__)
719 717
720 if (likely(rt_mutex_owner(lock) != current)) { 718 if (likely(rt_mutex_owner(lock) != current)) {
721 719
722 ret = try_to_take_rt_mutex(lock __IP__); 720 ret = try_to_take_rt_mutex(lock);
723 /* 721 /*
724 * try_to_take_rt_mutex() sets the lock waiters 722 * try_to_take_rt_mutex() sets the lock waiters
725 * bit unconditionally. Clean this up. 723 * bit unconditionally. Clean this up.
@@ -769,13 +767,13 @@ rt_mutex_fastlock(struct rt_mutex *lock, int state,
769 int detect_deadlock, 767 int detect_deadlock,
770 int (*slowfn)(struct rt_mutex *lock, int state, 768 int (*slowfn)(struct rt_mutex *lock, int state,
771 struct hrtimer_sleeper *timeout, 769 struct hrtimer_sleeper *timeout,
772 int detect_deadlock __IP_DECL__)) 770 int detect_deadlock))
773{ 771{
774 if (!detect_deadlock && likely(rt_mutex_cmpxchg(lock, NULL, current))) { 772 if (!detect_deadlock && likely(rt_mutex_cmpxchg(lock, NULL, current))) {
775 rt_mutex_deadlock_account_lock(lock, current); 773 rt_mutex_deadlock_account_lock(lock, current);
776 return 0; 774 return 0;
777 } else 775 } else
778 return slowfn(lock, state, NULL, detect_deadlock __RET_IP__); 776 return slowfn(lock, state, NULL, detect_deadlock);
779} 777}
780 778
781static inline int 779static inline int
@@ -783,24 +781,24 @@ rt_mutex_timed_fastlock(struct rt_mutex *lock, int state,
783 struct hrtimer_sleeper *timeout, int detect_deadlock, 781 struct hrtimer_sleeper *timeout, int detect_deadlock,
784 int (*slowfn)(struct rt_mutex *lock, int state, 782 int (*slowfn)(struct rt_mutex *lock, int state,
785 struct hrtimer_sleeper *timeout, 783 struct hrtimer_sleeper *timeout,
786 int detect_deadlock __IP_DECL__)) 784 int detect_deadlock))
787{ 785{
788 if (!detect_deadlock && likely(rt_mutex_cmpxchg(lock, NULL, current))) { 786 if (!detect_deadlock && likely(rt_mutex_cmpxchg(lock, NULL, current))) {
789 rt_mutex_deadlock_account_lock(lock, current); 787 rt_mutex_deadlock_account_lock(lock, current);
790 return 0; 788 return 0;
791 } else 789 } else
792 return slowfn(lock, state, timeout, detect_deadlock __RET_IP__); 790 return slowfn(lock, state, timeout, detect_deadlock);
793} 791}
794 792
795static inline int 793static inline int
796rt_mutex_fasttrylock(struct rt_mutex *lock, 794rt_mutex_fasttrylock(struct rt_mutex *lock,
797 int (*slowfn)(struct rt_mutex *lock __IP_DECL__)) 795 int (*slowfn)(struct rt_mutex *lock))
798{ 796{
799 if (likely(rt_mutex_cmpxchg(lock, NULL, current))) { 797 if (likely(rt_mutex_cmpxchg(lock, NULL, current))) {
800 rt_mutex_deadlock_account_lock(lock, current); 798 rt_mutex_deadlock_account_lock(lock, current);
801 return 1; 799 return 1;
802 } 800 }
803 return slowfn(lock __RET_IP__); 801 return slowfn(lock);
804} 802}
805 803
806static inline void 804static inline void
@@ -948,7 +946,7 @@ void rt_mutex_init_proxy_locked(struct rt_mutex *lock,
948 struct task_struct *proxy_owner) 946 struct task_struct *proxy_owner)
949{ 947{
950 __rt_mutex_init(lock, NULL); 948 __rt_mutex_init(lock, NULL);
951 debug_rt_mutex_proxy_lock(lock, proxy_owner __RET_IP__); 949 debug_rt_mutex_proxy_lock(lock, proxy_owner);
952 rt_mutex_set_owner(lock, proxy_owner, 0); 950 rt_mutex_set_owner(lock, proxy_owner, 0);
953 rt_mutex_deadlock_account_lock(lock, proxy_owner); 951 rt_mutex_deadlock_account_lock(lock, proxy_owner);
954} 952}