diff options
| author | Peter Zijlstra <a.p.zijlstra@chello.nl> | 2007-07-19 04:48:53 -0400 |
|---|---|---|
| committer | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-07-19 13:04:49 -0400 |
| commit | ca58abcb4a6d52ee2db1b1130cea3ca2a76677b9 (patch) | |
| tree | ad35a81d318a39cb33f28fe09c4374ec90b118ed | |
| parent | 21f8ca3bf6198bd21e3c4cc820af2ccf753a6ec8 (diff) | |
lockdep: sanitise CONFIG_PROVE_LOCKING
Ensure that all of the lock dependency tracking code is under
CONFIG_PROVE_LOCKING. This allows us to use the held lock tracking code for
other purposes.
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Acked-by: Ingo Molnar <mingo@elte.hu>
Acked-by: Jason Baron <jbaron@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
| -rw-r--r-- | kernel/lockdep.c | 13 | ||||
| -rw-r--r-- | kernel/spinlock.c | 4 |
2 files changed, 14 insertions, 3 deletions
diff --git a/kernel/lockdep.c b/kernel/lockdep.c index edba2ffb43de..05c1261791f4 100644 --- a/kernel/lockdep.c +++ b/kernel/lockdep.c | |||
| @@ -95,6 +95,7 @@ static int lockdep_initialized; | |||
| 95 | unsigned long nr_list_entries; | 95 | unsigned long nr_list_entries; |
| 96 | static struct lock_list list_entries[MAX_LOCKDEP_ENTRIES]; | 96 | static struct lock_list list_entries[MAX_LOCKDEP_ENTRIES]; |
| 97 | 97 | ||
| 98 | #ifdef CONFIG_PROVE_LOCKING | ||
| 98 | /* | 99 | /* |
| 99 | * Allocate a lockdep entry. (assumes the graph_lock held, returns | 100 | * Allocate a lockdep entry. (assumes the graph_lock held, returns |
| 100 | * with NULL on failure) | 101 | * with NULL on failure) |
| @@ -111,6 +112,7 @@ static struct lock_list *alloc_list_entry(void) | |||
| 111 | } | 112 | } |
| 112 | return list_entries + nr_list_entries++; | 113 | return list_entries + nr_list_entries++; |
| 113 | } | 114 | } |
| 115 | #endif | ||
| 114 | 116 | ||
| 115 | /* | 117 | /* |
| 116 | * All data structures here are protected by the global debug_lock. | 118 | * All data structures here are protected by the global debug_lock. |
| @@ -140,7 +142,9 @@ LIST_HEAD(all_lock_classes); | |||
| 140 | static struct list_head classhash_table[CLASSHASH_SIZE]; | 142 | static struct list_head classhash_table[CLASSHASH_SIZE]; |
| 141 | 143 | ||
| 142 | unsigned long nr_lock_chains; | 144 | unsigned long nr_lock_chains; |
| 145 | #ifdef CONFIG_PROVE_LOCKING | ||
| 143 | static struct lock_chain lock_chains[MAX_LOCKDEP_CHAINS]; | 146 | static struct lock_chain lock_chains[MAX_LOCKDEP_CHAINS]; |
| 147 | #endif | ||
| 144 | 148 | ||
| 145 | /* | 149 | /* |
| 146 | * We put the lock dependency chains into a hash-table as well, to cache | 150 | * We put the lock dependency chains into a hash-table as well, to cache |
| @@ -482,6 +486,7 @@ static void print_lock_dependencies(struct lock_class *class, int depth) | |||
| 482 | } | 486 | } |
| 483 | } | 487 | } |
| 484 | 488 | ||
| 489 | #ifdef CONFIG_PROVE_LOCKING | ||
| 485 | /* | 490 | /* |
| 486 | * Add a new dependency to the head of the list: | 491 | * Add a new dependency to the head of the list: |
| 487 | */ | 492 | */ |
| @@ -541,6 +546,7 @@ print_circular_bug_entry(struct lock_list *target, unsigned int depth) | |||
| 541 | 546 | ||
| 542 | return 0; | 547 | return 0; |
| 543 | } | 548 | } |
| 549 | #endif | ||
| 544 | 550 | ||
| 545 | static void print_kernel_version(void) | 551 | static void print_kernel_version(void) |
| 546 | { | 552 | { |
| @@ -549,6 +555,7 @@ static void print_kernel_version(void) | |||
| 549 | init_utsname()->version); | 555 | init_utsname()->version); |
| 550 | } | 556 | } |
| 551 | 557 | ||
| 558 | #ifdef CONFIG_PROVE_LOCKING | ||
| 552 | /* | 559 | /* |
| 553 | * When a circular dependency is detected, print the | 560 | * When a circular dependency is detected, print the |
| 554 | * header first: | 561 | * header first: |
| @@ -639,6 +646,7 @@ check_noncircular(struct lock_class *source, unsigned int depth) | |||
| 639 | } | 646 | } |
| 640 | return 1; | 647 | return 1; |
| 641 | } | 648 | } |
| 649 | #endif | ||
| 642 | 650 | ||
| 643 | static int very_verbose(struct lock_class *class) | 651 | static int very_verbose(struct lock_class *class) |
| 644 | { | 652 | { |
| @@ -823,6 +831,7 @@ check_usage(struct task_struct *curr, struct held_lock *prev, | |||
| 823 | 831 | ||
| 824 | #endif | 832 | #endif |
| 825 | 833 | ||
| 834 | #ifdef CONFIG_PROVE_LOCKING | ||
| 826 | static int | 835 | static int |
| 827 | print_deadlock_bug(struct task_struct *curr, struct held_lock *prev, | 836 | print_deadlock_bug(struct task_struct *curr, struct held_lock *prev, |
| 828 | struct held_lock *next) | 837 | struct held_lock *next) |
| @@ -1087,7 +1096,7 @@ out_bug: | |||
| 1087 | 1096 | ||
| 1088 | return 0; | 1097 | return 0; |
| 1089 | } | 1098 | } |
| 1090 | 1099 | #endif | |
| 1091 | 1100 | ||
| 1092 | /* | 1101 | /* |
| 1093 | * Is this the address of a static object: | 1102 | * Is this the address of a static object: |
| @@ -1307,6 +1316,7 @@ out_unlock_set: | |||
| 1307 | return class; | 1316 | return class; |
| 1308 | } | 1317 | } |
| 1309 | 1318 | ||
| 1319 | #ifdef CONFIG_PROVE_LOCKING | ||
| 1310 | /* | 1320 | /* |
| 1311 | * Look up a dependency chain. If the key is not present yet then | 1321 | * Look up a dependency chain. If the key is not present yet then |
| 1312 | * add it and return 1 - in this case the new dependency chain is | 1322 | * add it and return 1 - in this case the new dependency chain is |
| @@ -1381,6 +1391,7 @@ cache_hit: | |||
| 1381 | 1391 | ||
| 1382 | return 1; | 1392 | return 1; |
| 1383 | } | 1393 | } |
| 1394 | #endif | ||
| 1384 | 1395 | ||
| 1385 | /* | 1396 | /* |
| 1386 | * We are building curr_chain_key incrementally, so double-check | 1397 | * We are building curr_chain_key incrementally, so double-check |
diff --git a/kernel/spinlock.c b/kernel/spinlock.c index 2c6c2bf85514..cd93bfe3f10d 100644 --- a/kernel/spinlock.c +++ b/kernel/spinlock.c | |||
| @@ -88,7 +88,7 @@ unsigned long __lockfunc _spin_lock_irqsave(spinlock_t *lock) | |||
| 88 | * _raw_spin_lock_flags() code, because lockdep assumes | 88 | * _raw_spin_lock_flags() code, because lockdep assumes |
| 89 | * that interrupts are not re-enabled during lock-acquire: | 89 | * that interrupts are not re-enabled during lock-acquire: |
| 90 | */ | 90 | */ |
| 91 | #ifdef CONFIG_PROVE_LOCKING | 91 | #ifdef CONFIG_LOCKDEP |
| 92 | _raw_spin_lock(lock); | 92 | _raw_spin_lock(lock); |
| 93 | #else | 93 | #else |
| 94 | _raw_spin_lock_flags(lock, &flags); | 94 | _raw_spin_lock_flags(lock, &flags); |
| @@ -305,7 +305,7 @@ unsigned long __lockfunc _spin_lock_irqsave_nested(spinlock_t *lock, int subclas | |||
| 305 | * _raw_spin_lock_flags() code, because lockdep assumes | 305 | * _raw_spin_lock_flags() code, because lockdep assumes |
| 306 | * that interrupts are not re-enabled during lock-acquire: | 306 | * that interrupts are not re-enabled during lock-acquire: |
| 307 | */ | 307 | */ |
| 308 | #ifdef CONFIG_PROVE_SPIN_LOCKING | 308 | #ifdef CONFIG_LOCKDEP |
| 309 | _raw_spin_lock(lock); | 309 | _raw_spin_lock(lock); |
| 310 | #else | 310 | #else |
| 311 | _raw_spin_lock_flags(lock, &flags); | 311 | _raw_spin_lock_flags(lock, &flags); |
