diff options
Diffstat (limited to 'kernel/lockdep.c')
-rw-r--r-- | kernel/lockdep.c | 74 |
1 files changed, 49 insertions, 25 deletions
diff --git a/kernel/lockdep.c b/kernel/lockdep.c index 298c9276dfd..447960603fb 100644 --- a/kernel/lockdep.c +++ b/kernel/lockdep.c | |||
@@ -44,6 +44,7 @@ | |||
44 | #include <linux/stringify.h> | 44 | #include <linux/stringify.h> |
45 | #include <linux/bitops.h> | 45 | #include <linux/bitops.h> |
46 | #include <linux/gfp.h> | 46 | #include <linux/gfp.h> |
47 | #include <linux/kmemcheck.h> | ||
47 | 48 | ||
48 | #include <asm/sections.h> | 49 | #include <asm/sections.h> |
49 | 50 | ||
@@ -2468,6 +2469,9 @@ mark_held_locks(struct task_struct *curr, enum mark_type mark) | |||
2468 | 2469 | ||
2469 | BUG_ON(usage_bit >= LOCK_USAGE_STATES); | 2470 | BUG_ON(usage_bit >= LOCK_USAGE_STATES); |
2470 | 2471 | ||
2472 | if (hlock_class(hlock)->key == __lockdep_no_validate__.subkeys) | ||
2473 | continue; | ||
2474 | |||
2471 | if (!mark_lock(curr, hlock, usage_bit)) | 2475 | if (!mark_lock(curr, hlock, usage_bit)) |
2472 | return 0; | 2476 | return 0; |
2473 | } | 2477 | } |
@@ -2478,34 +2482,13 @@ mark_held_locks(struct task_struct *curr, enum mark_type mark) | |||
2478 | /* | 2482 | /* |
2479 | * Hardirqs will be enabled: | 2483 | * Hardirqs will be enabled: |
2480 | */ | 2484 | */ |
2481 | void trace_hardirqs_on_caller(unsigned long ip) | 2485 | static void __trace_hardirqs_on_caller(unsigned long ip) |
2482 | { | 2486 | { |
2483 | struct task_struct *curr = current; | 2487 | struct task_struct *curr = current; |
2484 | 2488 | ||
2485 | time_hardirqs_on(CALLER_ADDR0, ip); | ||
2486 | |||
2487 | if (unlikely(!debug_locks || current->lockdep_recursion)) | ||
2488 | return; | ||
2489 | |||
2490 | if (DEBUG_LOCKS_WARN_ON(unlikely(early_boot_irqs_disabled))) | ||
2491 | return; | ||
2492 | |||
2493 | if (unlikely(curr->hardirqs_enabled)) { | ||
2494 | /* | ||
2495 | * Neither irq nor preemption are disabled here | ||
2496 | * so this is racy by nature but losing one hit | ||
2497 | * in a stat is not a big deal. | ||
2498 | */ | ||
2499 | __debug_atomic_inc(redundant_hardirqs_on); | ||
2500 | return; | ||
2501 | } | ||
2502 | /* we'll do an OFF -> ON transition: */ | 2489 | /* we'll do an OFF -> ON transition: */ |
2503 | curr->hardirqs_enabled = 1; | 2490 | curr->hardirqs_enabled = 1; |
2504 | 2491 | ||
2505 | if (DEBUG_LOCKS_WARN_ON(!irqs_disabled())) | ||
2506 | return; | ||
2507 | if (DEBUG_LOCKS_WARN_ON(current->hardirq_context)) | ||
2508 | return; | ||
2509 | /* | 2492 | /* |
2510 | * We are going to turn hardirqs on, so set the | 2493 | * We are going to turn hardirqs on, so set the |
2511 | * usage bit for all held locks: | 2494 | * usage bit for all held locks: |
@@ -2525,6 +2508,37 @@ void trace_hardirqs_on_caller(unsigned long ip) | |||
2525 | curr->hardirq_enable_event = ++curr->irq_events; | 2508 | curr->hardirq_enable_event = ++curr->irq_events; |
2526 | debug_atomic_inc(hardirqs_on_events); | 2509 | debug_atomic_inc(hardirqs_on_events); |
2527 | } | 2510 | } |
2511 | |||
2512 | void trace_hardirqs_on_caller(unsigned long ip) | ||
2513 | { | ||
2514 | time_hardirqs_on(CALLER_ADDR0, ip); | ||
2515 | |||
2516 | if (unlikely(!debug_locks || current->lockdep_recursion)) | ||
2517 | return; | ||
2518 | |||
2519 | if (unlikely(current->hardirqs_enabled)) { | ||
2520 | /* | ||
2521 | * Neither irq nor preemption are disabled here | ||
2522 | * so this is racy by nature but losing one hit | ||
2523 | * in a stat is not a big deal. | ||
2524 | */ | ||
2525 | __debug_atomic_inc(redundant_hardirqs_on); | ||
2526 | return; | ||
2527 | } | ||
2528 | |||
2529 | if (DEBUG_LOCKS_WARN_ON(!irqs_disabled())) | ||
2530 | return; | ||
2531 | |||
2532 | if (DEBUG_LOCKS_WARN_ON(unlikely(early_boot_irqs_disabled))) | ||
2533 | return; | ||
2534 | |||
2535 | if (DEBUG_LOCKS_WARN_ON(current->hardirq_context)) | ||
2536 | return; | ||
2537 | |||
2538 | current->lockdep_recursion = 1; | ||
2539 | __trace_hardirqs_on_caller(ip); | ||
2540 | current->lockdep_recursion = 0; | ||
2541 | } | ||
2528 | EXPORT_SYMBOL(trace_hardirqs_on_caller); | 2542 | EXPORT_SYMBOL(trace_hardirqs_on_caller); |
2529 | 2543 | ||
2530 | void trace_hardirqs_on(void) | 2544 | void trace_hardirqs_on(void) |
@@ -2574,7 +2588,7 @@ void trace_softirqs_on(unsigned long ip) | |||
2574 | { | 2588 | { |
2575 | struct task_struct *curr = current; | 2589 | struct task_struct *curr = current; |
2576 | 2590 | ||
2577 | if (unlikely(!debug_locks)) | 2591 | if (unlikely(!debug_locks || current->lockdep_recursion)) |
2578 | return; | 2592 | return; |
2579 | 2593 | ||
2580 | if (DEBUG_LOCKS_WARN_ON(!irqs_disabled())) | 2594 | if (DEBUG_LOCKS_WARN_ON(!irqs_disabled())) |
@@ -2585,6 +2599,7 @@ void trace_softirqs_on(unsigned long ip) | |||
2585 | return; | 2599 | return; |
2586 | } | 2600 | } |
2587 | 2601 | ||
2602 | current->lockdep_recursion = 1; | ||
2588 | /* | 2603 | /* |
2589 | * We'll do an OFF -> ON transition: | 2604 | * We'll do an OFF -> ON transition: |
2590 | */ | 2605 | */ |
@@ -2599,6 +2614,7 @@ void trace_softirqs_on(unsigned long ip) | |||
2599 | */ | 2614 | */ |
2600 | if (curr->hardirqs_enabled) | 2615 | if (curr->hardirqs_enabled) |
2601 | mark_held_locks(curr, SOFTIRQ); | 2616 | mark_held_locks(curr, SOFTIRQ); |
2617 | current->lockdep_recursion = 0; | ||
2602 | } | 2618 | } |
2603 | 2619 | ||
2604 | /* | 2620 | /* |
@@ -2608,7 +2624,7 @@ void trace_softirqs_off(unsigned long ip) | |||
2608 | { | 2624 | { |
2609 | struct task_struct *curr = current; | 2625 | struct task_struct *curr = current; |
2610 | 2626 | ||
2611 | if (unlikely(!debug_locks)) | 2627 | if (unlikely(!debug_locks || current->lockdep_recursion)) |
2612 | return; | 2628 | return; |
2613 | 2629 | ||
2614 | if (DEBUG_LOCKS_WARN_ON(!irqs_disabled())) | 2630 | if (DEBUG_LOCKS_WARN_ON(!irqs_disabled())) |
@@ -2861,6 +2877,8 @@ void lockdep_init_map(struct lockdep_map *lock, const char *name, | |||
2861 | { | 2877 | { |
2862 | int i; | 2878 | int i; |
2863 | 2879 | ||
2880 | kmemcheck_mark_initialized(lock, sizeof(*lock)); | ||
2881 | |||
2864 | for (i = 0; i < NR_LOCKDEP_CACHING_CLASSES; i++) | 2882 | for (i = 0; i < NR_LOCKDEP_CACHING_CLASSES; i++) |
2865 | lock->class_cache[i] = NULL; | 2883 | lock->class_cache[i] = NULL; |
2866 | 2884 | ||
@@ -3099,7 +3117,13 @@ static int match_held_lock(struct held_lock *hlock, struct lockdep_map *lock) | |||
3099 | if (!class) | 3117 | if (!class) |
3100 | class = look_up_lock_class(lock, 0); | 3118 | class = look_up_lock_class(lock, 0); |
3101 | 3119 | ||
3102 | if (DEBUG_LOCKS_WARN_ON(!class)) | 3120 | /* |
3121 | * If look_up_lock_class() failed to find a class, we're trying | ||
3122 | * to test if we hold a lock that has never yet been acquired. | ||
3123 | * Clearly if the lock hasn't been acquired _ever_, we're not | ||
3124 | * holding it either, so report failure. | ||
3125 | */ | ||
3126 | if (!class) | ||
3103 | return 0; | 3127 | return 0; |
3104 | 3128 | ||
3105 | if (DEBUG_LOCKS_WARN_ON(!hlock->nest_lock)) | 3129 | if (DEBUG_LOCKS_WARN_ON(!hlock->nest_lock)) |