aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched.c
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2006-07-03 03:24:33 -0400
committerLinus Torvalds <torvalds@g5.osdl.org>2006-07-03 18:27:01 -0400
commit9a11b49a805665e13a56aa067afaf81d43ec1514 (patch)
treebf499956e3f67d1211d68ab1e2eb76645f453dfb /kernel/sched.c
parentfb7e42413a098cc45b3adf858da290033af62bae (diff)
[PATCH] lockdep: better lock debugging
Generic lock debugging: - generalized lock debugging framework. For example, a bug in one lock subsystem turns off debugging in all lock subsystems. - got rid of the caller address passing (__IP__/__IP_DECL__/etc.) from the mutex/rtmutex debugging code: it caused way too much prototype hackery, and lockdep will give the same information anyway. - ability to do silent tests - check lock freeing in vfree too. - more finegrained debugging options, to allow distributions to turn off more expensive debugging features. There's no separate 'held mutexes' list anymore - but there's a 'held locks' stack within lockdep, which unifies deadlock detection across all lock classes. (this is independent of the lockdep validation stuff - lockdep first checks whether we are holding a lock already) Here are the current debugging options: CONFIG_DEBUG_MUTEXES=y CONFIG_DEBUG_LOCK_ALLOC=y which do: config DEBUG_MUTEXES bool "Mutex debugging, basic checks" config DEBUG_LOCK_ALLOC bool "Detect incorrect freeing of live mutexes" Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Arjan van de Ven <arjan@linux.intel.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'kernel/sched.c')
-rw-r--r--kernel/sched.c16
1 files changed, 11 insertions, 5 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index d5e37072ea54..48c1faa60a67 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -30,6 +30,7 @@
30#include <linux/capability.h> 30#include <linux/capability.h>
31#include <linux/completion.h> 31#include <linux/completion.h>
32#include <linux/kernel_stat.h> 32#include <linux/kernel_stat.h>
33#include <linux/debug_locks.h>
33#include <linux/security.h> 34#include <linux/security.h>
34#include <linux/notifier.h> 35#include <linux/notifier.h>
35#include <linux/profile.h> 36#include <linux/profile.h>
@@ -3142,12 +3143,13 @@ void fastcall add_preempt_count(int val)
3142 /* 3143 /*
3143 * Underflow? 3144 * Underflow?
3144 */ 3145 */
3145 BUG_ON((preempt_count() < 0)); 3146 if (DEBUG_LOCKS_WARN_ON((preempt_count() < 0)))
3147 return;
3146 preempt_count() += val; 3148 preempt_count() += val;
3147 /* 3149 /*
3148 * Spinlock count overflowing soon? 3150 * Spinlock count overflowing soon?
3149 */ 3151 */
3150 BUG_ON((preempt_count() & PREEMPT_MASK) >= PREEMPT_MASK-10); 3152 DEBUG_LOCKS_WARN_ON((preempt_count() & PREEMPT_MASK) >= PREEMPT_MASK-10);
3151} 3153}
3152EXPORT_SYMBOL(add_preempt_count); 3154EXPORT_SYMBOL(add_preempt_count);
3153 3155
@@ -3156,11 +3158,15 @@ void fastcall sub_preempt_count(int val)
3156 /* 3158 /*
3157 * Underflow? 3159 * Underflow?
3158 */ 3160 */
3159 BUG_ON(val > preempt_count()); 3161 if (DEBUG_LOCKS_WARN_ON(val > preempt_count()))
3162 return;
3160 /* 3163 /*
3161 * Is the spinlock portion underflowing? 3164 * Is the spinlock portion underflowing?
3162 */ 3165 */
3163 BUG_ON((val < PREEMPT_MASK) && !(preempt_count() & PREEMPT_MASK)); 3166 if (DEBUG_LOCKS_WARN_ON((val < PREEMPT_MASK) &&
3167 !(preempt_count() & PREEMPT_MASK)))
3168 return;
3169
3164 preempt_count() -= val; 3170 preempt_count() -= val;
3165} 3171}
3166EXPORT_SYMBOL(sub_preempt_count); 3172EXPORT_SYMBOL(sub_preempt_count);
@@ -4690,7 +4696,7 @@ void show_state(void)
4690 } while_each_thread(g, p); 4696 } while_each_thread(g, p);
4691 4697
4692 read_unlock(&tasklist_lock); 4698 read_unlock(&tasklist_lock);
4693 mutex_debug_show_all_locks(); 4699 debug_show_all_locks();
4694} 4700}
4695 4701
4696/** 4702/**