aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/lockdep.c
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2006-12-13 03:34:43 -0500
committerLinus Torvalds <torvalds@woody.osdl.org>2006-12-13 12:05:50 -0500
commit74c383f1400f559562aa517d6d62f77245bddf52 (patch)
tree031f6524f0fba31c980bd3d00cc1e6b433d3d2c5 /kernel/lockdep.c
parent3117df0453828bd045c16244e6f50e5714667a8a (diff)
[PATCH] lockdep: fix possible races while disabling lock-debugging
Jarek Poplawski noticed that lockdep global state could be accessed in a racy way if one CPU did a lockdep assert (shutting lockdep down), while the other CPU would try to do something that changes its global state. This patch fixes those races and cleans up lockdep's internal locking by adding a graph_lock()/graph_unlock()/debug_locks_off_graph_unlock helpers. (Also note that as we all know the Linux kernel is, by definition, bug-free and perfect, so this code never triggers, so these fixes are highly theoretical. I wrote this patch for aesthetic reasons alone.) [akpm@osdl.org: build fix] [jarkao2@o2.pl: build fix's refix] Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Jarek Poplawski <jarkao2@o2.pl> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'kernel/lockdep.c')
-rw-r--r--kernel/lockdep.c170
1 files changed, 105 insertions, 65 deletions
diff --git a/kernel/lockdep.c b/kernel/lockdep.c
index 07a3d74a84be..01e750559034 100644
--- a/kernel/lockdep.c
+++ b/kernel/lockdep.c
@@ -43,13 +43,49 @@
43#include "lockdep_internals.h" 43#include "lockdep_internals.h"
44 44
45/* 45/*
46 * hash_lock: protects the lockdep hashes and class/list/hash allocators. 46 * lockdep_lock: protects the lockdep graph, the hashes and the
47 * class/list/hash allocators.
47 * 48 *
48 * This is one of the rare exceptions where it's justified 49 * This is one of the rare exceptions where it's justified
49 * to use a raw spinlock - we really dont want the spinlock 50 * to use a raw spinlock - we really dont want the spinlock
50 * code to recurse back into the lockdep code. 51 * code to recurse back into the lockdep code...
51 */ 52 */
52static raw_spinlock_t hash_lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; 53static raw_spinlock_t lockdep_lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
54
55static int graph_lock(void)
56{
57 __raw_spin_lock(&lockdep_lock);
58 /*
59 * Make sure that if another CPU detected a bug while
60 * walking the graph we dont change it (while the other
61 * CPU is busy printing out stuff with the graph lock
62 * dropped already)
63 */
64 if (!debug_locks) {
65 __raw_spin_unlock(&lockdep_lock);
66 return 0;
67 }
68 return 1;
69}
70
71static inline int graph_unlock(void)
72{
73 __raw_spin_unlock(&lockdep_lock);
74 return 0;
75}
76
77/*
78 * Turn lock debugging off and return with 0 if it was off already,
79 * and also release the graph lock:
80 */
81static inline int debug_locks_off_graph_unlock(void)
82{
83 int ret = debug_locks_off();
84
85 __raw_spin_unlock(&lockdep_lock);
86
87 return ret;
88}
53 89
54static int lockdep_initialized; 90static int lockdep_initialized;
55 91
@@ -57,14 +93,15 @@ unsigned long nr_list_entries;
57static struct lock_list list_entries[MAX_LOCKDEP_ENTRIES]; 93static struct lock_list list_entries[MAX_LOCKDEP_ENTRIES];
58 94
59/* 95/*
60 * Allocate a lockdep entry. (assumes hash_lock held, returns 96 * Allocate a lockdep entry. (assumes the graph_lock held, returns
61 * with NULL on failure) 97 * with NULL on failure)
62 */ 98 */
63static struct lock_list *alloc_list_entry(void) 99static struct lock_list *alloc_list_entry(void)
64{ 100{
65 if (nr_list_entries >= MAX_LOCKDEP_ENTRIES) { 101 if (nr_list_entries >= MAX_LOCKDEP_ENTRIES) {
66 __raw_spin_unlock(&hash_lock); 102 if (!debug_locks_off_graph_unlock())
67 debug_locks_off(); 103 return NULL;
104
68 printk("BUG: MAX_LOCKDEP_ENTRIES too low!\n"); 105 printk("BUG: MAX_LOCKDEP_ENTRIES too low!\n");
69 printk("turning off the locking correctness validator.\n"); 106 printk("turning off the locking correctness validator.\n");
70 return NULL; 107 return NULL;
@@ -205,7 +242,7 @@ static int softirq_verbose(struct lock_class *class)
205 242
206/* 243/*
207 * Stack-trace: tightly packed array of stack backtrace 244 * Stack-trace: tightly packed array of stack backtrace
208 * addresses. Protected by the hash_lock. 245 * addresses. Protected by the graph_lock.
209 */ 246 */
210unsigned long nr_stack_trace_entries; 247unsigned long nr_stack_trace_entries;
211static unsigned long stack_trace[MAX_STACK_TRACE_ENTRIES]; 248static unsigned long stack_trace[MAX_STACK_TRACE_ENTRIES];
@@ -224,18 +261,15 @@ static int save_trace(struct stack_trace *trace)
224 trace->max_entries = trace->nr_entries; 261 trace->max_entries = trace->nr_entries;
225 262
226 nr_stack_trace_entries += trace->nr_entries; 263 nr_stack_trace_entries += trace->nr_entries;
227 if (DEBUG_LOCKS_WARN_ON(nr_stack_trace_entries > MAX_STACK_TRACE_ENTRIES)) {
228 __raw_spin_unlock(&hash_lock);
229 return 0;
230 }
231 264
232 if (nr_stack_trace_entries == MAX_STACK_TRACE_ENTRIES) { 265 if (nr_stack_trace_entries == MAX_STACK_TRACE_ENTRIES) {
233 __raw_spin_unlock(&hash_lock); 266 if (!debug_locks_off_graph_unlock())
234 if (debug_locks_off()) { 267 return 0;
235 printk("BUG: MAX_STACK_TRACE_ENTRIES too low!\n"); 268
236 printk("turning off the locking correctness validator.\n"); 269 printk("BUG: MAX_STACK_TRACE_ENTRIES too low!\n");
237 dump_stack(); 270 printk("turning off the locking correctness validator.\n");
238 } 271 dump_stack();
272
239 return 0; 273 return 0;
240 } 274 }
241 275
@@ -524,9 +558,7 @@ print_circular_bug_header(struct lock_list *entry, unsigned int depth)
524{ 558{
525 struct task_struct *curr = current; 559 struct task_struct *curr = current;
526 560
527 __raw_spin_unlock(&hash_lock); 561 if (!debug_locks_off_graph_unlock() || debug_locks_silent)
528 debug_locks_off();
529 if (debug_locks_silent)
530 return 0; 562 return 0;
531 563
532 printk("\n=======================================================\n"); 564 printk("\n=======================================================\n");
@@ -554,12 +586,10 @@ static noinline int print_circular_bug_tail(void)
554 if (debug_locks_silent) 586 if (debug_locks_silent)
555 return 0; 587 return 0;
556 588
557 /* hash_lock unlocked by the header */
558 __raw_spin_lock(&hash_lock);
559 this.class = check_source->class; 589 this.class = check_source->class;
560 if (!save_trace(&this.trace)) 590 if (!save_trace(&this.trace))
561 return 0; 591 return 0;
562 __raw_spin_unlock(&hash_lock); 592
563 print_circular_bug_entry(&this, 0); 593 print_circular_bug_entry(&this, 0);
564 594
565 printk("\nother info that might help us debug this:\n\n"); 595 printk("\nother info that might help us debug this:\n\n");
@@ -575,8 +605,10 @@ static noinline int print_circular_bug_tail(void)
575 605
576static int noinline print_infinite_recursion_bug(void) 606static int noinline print_infinite_recursion_bug(void)
577{ 607{
578 __raw_spin_unlock(&hash_lock); 608 if (!debug_locks_off_graph_unlock())
579 DEBUG_LOCKS_WARN_ON(1); 609 return 0;
610
611 WARN_ON(1);
580 612
581 return 0; 613 return 0;
582} 614}
@@ -711,9 +743,7 @@ print_bad_irq_dependency(struct task_struct *curr,
711 enum lock_usage_bit bit2, 743 enum lock_usage_bit bit2,
712 const char *irqclass) 744 const char *irqclass)
713{ 745{
714 __raw_spin_unlock(&hash_lock); 746 if (!debug_locks_off_graph_unlock() || debug_locks_silent)
715 debug_locks_off();
716 if (debug_locks_silent)
717 return 0; 747 return 0;
718 748
719 printk("\n======================================================\n"); 749 printk("\n======================================================\n");
@@ -794,9 +824,7 @@ static int
794print_deadlock_bug(struct task_struct *curr, struct held_lock *prev, 824print_deadlock_bug(struct task_struct *curr, struct held_lock *prev,
795 struct held_lock *next) 825 struct held_lock *next)
796{ 826{
797 debug_locks_off(); 827 if (!debug_locks_off_graph_unlock() || debug_locks_silent)
798 __raw_spin_unlock(&hash_lock);
799 if (debug_locks_silent)
800 return 0; 828 return 0;
801 829
802 printk("\n=============================================\n"); 830 printk("\n=============================================\n");
@@ -972,14 +1000,14 @@ check_prev_add(struct task_struct *curr, struct held_lock *prev,
972 * Debugging printouts: 1000 * Debugging printouts:
973 */ 1001 */
974 if (verbose(prev->class) || verbose(next->class)) { 1002 if (verbose(prev->class) || verbose(next->class)) {
975 __raw_spin_unlock(&hash_lock); 1003 graph_unlock();
976 printk("\n new dependency: "); 1004 printk("\n new dependency: ");
977 print_lock_name(prev->class); 1005 print_lock_name(prev->class);
978 printk(" => "); 1006 printk(" => ");
979 print_lock_name(next->class); 1007 print_lock_name(next->class);
980 printk("\n"); 1008 printk("\n");
981 dump_stack(); 1009 dump_stack();
982 __raw_spin_lock(&hash_lock); 1010 return graph_lock();
983 } 1011 }
984 return 1; 1012 return 1;
985} 1013}
@@ -1044,8 +1072,10 @@ check_prevs_add(struct task_struct *curr, struct held_lock *next)
1044 } 1072 }
1045 return 1; 1073 return 1;
1046out_bug: 1074out_bug:
1047 __raw_spin_unlock(&hash_lock); 1075 if (!debug_locks_off_graph_unlock())
1048 DEBUG_LOCKS_WARN_ON(1); 1076 return 0;
1077
1078 WARN_ON(1);
1049 1079
1050 return 0; 1080 return 0;
1051} 1081}
@@ -1199,7 +1229,10 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
1199 hash_head = classhashentry(key); 1229 hash_head = classhashentry(key);
1200 1230
1201 raw_local_irq_save(flags); 1231 raw_local_irq_save(flags);
1202 __raw_spin_lock(&hash_lock); 1232 if (!graph_lock()) {
1233 raw_local_irq_restore(flags);
1234 return NULL;
1235 }
1203 /* 1236 /*
1204 * We have to do the hash-walk again, to avoid races 1237 * We have to do the hash-walk again, to avoid races
1205 * with another CPU: 1238 * with another CPU:
@@ -1212,9 +1245,12 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
1212 * the hash: 1245 * the hash:
1213 */ 1246 */
1214 if (nr_lock_classes >= MAX_LOCKDEP_KEYS) { 1247 if (nr_lock_classes >= MAX_LOCKDEP_KEYS) {
1215 __raw_spin_unlock(&hash_lock); 1248 if (!debug_locks_off_graph_unlock()) {
1249 raw_local_irq_restore(flags);
1250 return NULL;
1251 }
1216 raw_local_irq_restore(flags); 1252 raw_local_irq_restore(flags);
1217 debug_locks_off(); 1253
1218 printk("BUG: MAX_LOCKDEP_KEYS too low!\n"); 1254 printk("BUG: MAX_LOCKDEP_KEYS too low!\n");
1219 printk("turning off the locking correctness validator.\n"); 1255 printk("turning off the locking correctness validator.\n");
1220 return NULL; 1256 return NULL;
@@ -1235,18 +1271,23 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
1235 list_add_tail_rcu(&class->hash_entry, hash_head); 1271 list_add_tail_rcu(&class->hash_entry, hash_head);
1236 1272
1237 if (verbose(class)) { 1273 if (verbose(class)) {
1238 __raw_spin_unlock(&hash_lock); 1274 graph_unlock();
1239 raw_local_irq_restore(flags); 1275 raw_local_irq_restore(flags);
1276
1240 printk("\nnew class %p: %s", class->key, class->name); 1277 printk("\nnew class %p: %s", class->key, class->name);
1241 if (class->name_version > 1) 1278 if (class->name_version > 1)
1242 printk("#%d", class->name_version); 1279 printk("#%d", class->name_version);
1243 printk("\n"); 1280 printk("\n");
1244 dump_stack(); 1281 dump_stack();
1282
1245 raw_local_irq_save(flags); 1283 raw_local_irq_save(flags);
1246 __raw_spin_lock(&hash_lock); 1284 if (!graph_lock()) {
1285 raw_local_irq_restore(flags);
1286 return NULL;
1287 }
1247 } 1288 }
1248out_unlock_set: 1289out_unlock_set:
1249 __raw_spin_unlock(&hash_lock); 1290 graph_unlock();
1250 raw_local_irq_restore(flags); 1291 raw_local_irq_restore(flags);
1251 1292
1252 if (!subclass || force) 1293 if (!subclass || force)
@@ -1287,19 +1328,21 @@ cache_hit:
1287 * Allocate a new chain entry from the static array, and add 1328 * Allocate a new chain entry from the static array, and add
1288 * it to the hash: 1329 * it to the hash:
1289 */ 1330 */
1290 __raw_spin_lock(&hash_lock); 1331 if (!graph_lock())
1332 return 0;
1291 /* 1333 /*
1292 * We have to walk the chain again locked - to avoid duplicates: 1334 * We have to walk the chain again locked - to avoid duplicates:
1293 */ 1335 */
1294 list_for_each_entry(chain, hash_head, entry) { 1336 list_for_each_entry(chain, hash_head, entry) {
1295 if (chain->chain_key == chain_key) { 1337 if (chain->chain_key == chain_key) {
1296 __raw_spin_unlock(&hash_lock); 1338 graph_unlock();
1297 goto cache_hit; 1339 goto cache_hit;
1298 } 1340 }
1299 } 1341 }
1300 if (unlikely(nr_lock_chains >= MAX_LOCKDEP_CHAINS)) { 1342 if (unlikely(nr_lock_chains >= MAX_LOCKDEP_CHAINS)) {
1301 __raw_spin_unlock(&hash_lock); 1343 if (!debug_locks_off_graph_unlock())
1302 debug_locks_off(); 1344 return 0;
1345
1303 printk("BUG: MAX_LOCKDEP_CHAINS too low!\n"); 1346 printk("BUG: MAX_LOCKDEP_CHAINS too low!\n");
1304 printk("turning off the locking correctness validator.\n"); 1347 printk("turning off the locking correctness validator.\n");
1305 return 0; 1348 return 0;
@@ -1375,9 +1418,7 @@ print_irq_inversion_bug(struct task_struct *curr, struct lock_class *other,
1375 struct held_lock *this, int forwards, 1418 struct held_lock *this, int forwards,
1376 const char *irqclass) 1419 const char *irqclass)
1377{ 1420{
1378 __raw_spin_unlock(&hash_lock); 1421 if (!debug_locks_off_graph_unlock() || debug_locks_silent)
1379 debug_locks_off();
1380 if (debug_locks_silent)
1381 return 0; 1422 return 0;
1382 1423
1383 printk("\n=========================================================\n"); 1424 printk("\n=========================================================\n");
@@ -1466,9 +1507,7 @@ static int
1466print_usage_bug(struct task_struct *curr, struct held_lock *this, 1507print_usage_bug(struct task_struct *curr, struct held_lock *this,
1467 enum lock_usage_bit prev_bit, enum lock_usage_bit new_bit) 1508 enum lock_usage_bit prev_bit, enum lock_usage_bit new_bit)
1468{ 1509{
1469 __raw_spin_unlock(&hash_lock); 1510 if (!debug_locks_off_graph_unlock() || debug_locks_silent)
1470 debug_locks_off();
1471 if (debug_locks_silent)
1472 return 0; 1511 return 0;
1473 1512
1474 printk("\n=================================\n"); 1513 printk("\n=================================\n");
@@ -1529,12 +1568,13 @@ static int mark_lock(struct task_struct *curr, struct held_lock *this,
1529 if (likely(this->class->usage_mask & new_mask)) 1568 if (likely(this->class->usage_mask & new_mask))
1530 return 1; 1569 return 1;
1531 1570
1532 __raw_spin_lock(&hash_lock); 1571 if (!graph_lock())
1572 return 0;
1533 /* 1573 /*
1534 * Make sure we didnt race: 1574 * Make sure we didnt race:
1535 */ 1575 */
1536 if (unlikely(this->class->usage_mask & new_mask)) { 1576 if (unlikely(this->class->usage_mask & new_mask)) {
1537 __raw_spin_unlock(&hash_lock); 1577 graph_unlock();
1538 return 1; 1578 return 1;
1539 } 1579 }
1540 1580
@@ -1720,16 +1760,16 @@ static int mark_lock(struct task_struct *curr, struct held_lock *this,
1720 debug_atomic_dec(&nr_unused_locks); 1760 debug_atomic_dec(&nr_unused_locks);
1721 break; 1761 break;
1722 default: 1762 default:
1723 __raw_spin_unlock(&hash_lock); 1763 if (!debug_locks_off_graph_unlock())
1724 debug_locks_off(); 1764 return 0;
1725 WARN_ON(1); 1765 WARN_ON(1);
1726 return 0; 1766 return 0;
1727 } 1767 }
1728 1768
1729 __raw_spin_unlock(&hash_lock); 1769 graph_unlock();
1730 1770
1731 /* 1771 /*
1732 * We must printk outside of the hash_lock: 1772 * We must printk outside of the graph_lock:
1733 */ 1773 */
1734 if (ret == 2) { 1774 if (ret == 2) {
1735 printk("\nmarked lock as {%s}:\n", usage_str[new_bit]); 1775 printk("\nmarked lock as {%s}:\n", usage_str[new_bit]);
@@ -2127,7 +2167,7 @@ out_calc_hash:
2127 * We look up the chain_key and do the O(N^2) check and update of 2167 * We look up the chain_key and do the O(N^2) check and update of
2128 * the dependencies only if this is a new dependency chain. 2168 * the dependencies only if this is a new dependency chain.
2129 * (If lookup_chain_cache() returns with 1 it acquires 2169 * (If lookup_chain_cache() returns with 1 it acquires
2130 * hash_lock for us) 2170 * graph_lock for us)
2131 */ 2171 */
2132 if (!trylock && (check == 2) && lookup_chain_cache(chain_key, class)) { 2172 if (!trylock && (check == 2) && lookup_chain_cache(chain_key, class)) {
2133 /* 2173 /*
@@ -2160,7 +2200,7 @@ out_calc_hash:
2160 if (!chain_head && ret != 2) 2200 if (!chain_head && ret != 2)
2161 if (!check_prevs_add(curr, hlock)) 2201 if (!check_prevs_add(curr, hlock))
2162 return 0; 2202 return 0;
2163 __raw_spin_unlock(&hash_lock); 2203 graph_unlock();
2164 } 2204 }
2165 curr->lockdep_depth++; 2205 curr->lockdep_depth++;
2166 check_chain_key(curr); 2206 check_chain_key(curr);
@@ -2472,7 +2512,7 @@ void lockdep_free_key_range(void *start, unsigned long size)
2472 int i; 2512 int i;
2473 2513
2474 raw_local_irq_save(flags); 2514 raw_local_irq_save(flags);
2475 __raw_spin_lock(&hash_lock); 2515 graph_lock();
2476 2516
2477 /* 2517 /*
2478 * Unhash all classes that were created by this module: 2518 * Unhash all classes that were created by this module:
@@ -2486,7 +2526,7 @@ void lockdep_free_key_range(void *start, unsigned long size)
2486 zap_class(class); 2526 zap_class(class);
2487 } 2527 }
2488 2528
2489 __raw_spin_unlock(&hash_lock); 2529 graph_unlock();
2490 raw_local_irq_restore(flags); 2530 raw_local_irq_restore(flags);
2491} 2531}
2492 2532
@@ -2514,20 +2554,20 @@ void lockdep_reset_lock(struct lockdep_map *lock)
2514 * Debug check: in the end all mapped classes should 2554 * Debug check: in the end all mapped classes should
2515 * be gone. 2555 * be gone.
2516 */ 2556 */
2517 __raw_spin_lock(&hash_lock); 2557 graph_lock();
2518 for (i = 0; i < CLASSHASH_SIZE; i++) { 2558 for (i = 0; i < CLASSHASH_SIZE; i++) {
2519 head = classhash_table + i; 2559 head = classhash_table + i;
2520 if (list_empty(head)) 2560 if (list_empty(head))
2521 continue; 2561 continue;
2522 list_for_each_entry_safe(class, next, head, hash_entry) { 2562 list_for_each_entry_safe(class, next, head, hash_entry) {
2523 if (unlikely(class == lock->class_cache)) { 2563 if (unlikely(class == lock->class_cache)) {
2524 __raw_spin_unlock(&hash_lock); 2564 if (debug_locks_off_graph_unlock())
2525 DEBUG_LOCKS_WARN_ON(1); 2565 WARN_ON(1);
2526 goto out_restore; 2566 goto out_restore;
2527 } 2567 }
2528 } 2568 }
2529 } 2569 }
2530 __raw_spin_unlock(&hash_lock); 2570 graph_unlock();
2531 2571
2532out_restore: 2572out_restore:
2533 raw_local_irq_restore(flags); 2573 raw_local_irq_restore(flags);