aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/lockdep.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/lockdep.c')
-rw-r--r--kernel/lockdep.c70
1 files changed, 41 insertions, 29 deletions
diff --git a/kernel/lockdep.c b/kernel/lockdep.c
index 63437d065ac8..8c24294e477f 100644
--- a/kernel/lockdep.c
+++ b/kernel/lockdep.c
@@ -2468,6 +2468,9 @@ mark_held_locks(struct task_struct *curr, enum mark_type mark)
2468 2468
2469 BUG_ON(usage_bit >= LOCK_USAGE_STATES); 2469 BUG_ON(usage_bit >= LOCK_USAGE_STATES);
2470 2470
2471 if (hlock_class(hlock)->key == __lockdep_no_validate__.subkeys)
2472 continue;
2473
2471 if (!mark_lock(curr, hlock, usage_bit)) 2474 if (!mark_lock(curr, hlock, usage_bit))
2472 return 0; 2475 return 0;
2473 } 2476 }
@@ -2478,34 +2481,13 @@ mark_held_locks(struct task_struct *curr, enum mark_type mark)
2478/* 2481/*
2479 * Hardirqs will be enabled: 2482 * Hardirqs will be enabled:
2480 */ 2483 */
2481void trace_hardirqs_on_caller(unsigned long ip) 2484static void __trace_hardirqs_on_caller(unsigned long ip)
2482{ 2485{
2483 struct task_struct *curr = current; 2486 struct task_struct *curr = current;
2484 2487
2485 time_hardirqs_on(CALLER_ADDR0, ip);
2486
2487 if (unlikely(!debug_locks || current->lockdep_recursion))
2488 return;
2489
2490 if (DEBUG_LOCKS_WARN_ON(unlikely(early_boot_irqs_disabled)))
2491 return;
2492
2493 if (unlikely(curr->hardirqs_enabled)) {
2494 /*
2495 * Neither irq nor preemption are disabled here
2496 * so this is racy by nature but losing one hit
2497 * in a stat is not a big deal.
2498 */
2499 __debug_atomic_inc(redundant_hardirqs_on);
2500 return;
2501 }
2502 /* we'll do an OFF -> ON transition: */ 2488 /* we'll do an OFF -> ON transition: */
2503 curr->hardirqs_enabled = 1; 2489 curr->hardirqs_enabled = 1;
2504 2490
2505 if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
2506 return;
2507 if (DEBUG_LOCKS_WARN_ON(current->hardirq_context))
2508 return;
2509 /* 2491 /*
2510 * We are going to turn hardirqs on, so set the 2492 * We are going to turn hardirqs on, so set the
2511 * usage bit for all held locks: 2493 * usage bit for all held locks:
@@ -2525,6 +2507,37 @@ void trace_hardirqs_on_caller(unsigned long ip)
2525 curr->hardirq_enable_event = ++curr->irq_events; 2507 curr->hardirq_enable_event = ++curr->irq_events;
2526 debug_atomic_inc(hardirqs_on_events); 2508 debug_atomic_inc(hardirqs_on_events);
2527} 2509}
2510
2511void trace_hardirqs_on_caller(unsigned long ip)
2512{
2513 time_hardirqs_on(CALLER_ADDR0, ip);
2514
2515 if (unlikely(!debug_locks || current->lockdep_recursion))
2516 return;
2517
2518 if (unlikely(current->hardirqs_enabled)) {
2519 /*
2520 * Neither irq nor preemption are disabled here
2521 * so this is racy by nature but losing one hit
2522 * in a stat is not a big deal.
2523 */
2524 __debug_atomic_inc(redundant_hardirqs_on);
2525 return;
2526 }
2527
2528 if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
2529 return;
2530
2531 if (DEBUG_LOCKS_WARN_ON(unlikely(early_boot_irqs_disabled)))
2532 return;
2533
2534 if (DEBUG_LOCKS_WARN_ON(current->hardirq_context))
2535 return;
2536
2537 current->lockdep_recursion = 1;
2538 __trace_hardirqs_on_caller(ip);
2539 current->lockdep_recursion = 0;
2540}
2528EXPORT_SYMBOL(trace_hardirqs_on_caller); 2541EXPORT_SYMBOL(trace_hardirqs_on_caller);
2529 2542
2530void trace_hardirqs_on(void) 2543void trace_hardirqs_on(void)
@@ -2574,7 +2587,7 @@ void trace_softirqs_on(unsigned long ip)
2574{ 2587{
2575 struct task_struct *curr = current; 2588 struct task_struct *curr = current;
2576 2589
2577 if (unlikely(!debug_locks)) 2590 if (unlikely(!debug_locks || current->lockdep_recursion))
2578 return; 2591 return;
2579 2592
2580 if (DEBUG_LOCKS_WARN_ON(!irqs_disabled())) 2593 if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
@@ -2585,6 +2598,7 @@ void trace_softirqs_on(unsigned long ip)
2585 return; 2598 return;
2586 } 2599 }
2587 2600
2601 current->lockdep_recursion = 1;
2588 /* 2602 /*
2589 * We'll do an OFF -> ON transition: 2603 * We'll do an OFF -> ON transition:
2590 */ 2604 */
@@ -2599,6 +2613,7 @@ void trace_softirqs_on(unsigned long ip)
2599 */ 2613 */
2600 if (curr->hardirqs_enabled) 2614 if (curr->hardirqs_enabled)
2601 mark_held_locks(curr, SOFTIRQ); 2615 mark_held_locks(curr, SOFTIRQ);
2616 current->lockdep_recursion = 0;
2602} 2617}
2603 2618
2604/* 2619/*
@@ -2608,7 +2623,7 @@ void trace_softirqs_off(unsigned long ip)
2608{ 2623{
2609 struct task_struct *curr = current; 2624 struct task_struct *curr = current;
2610 2625
2611 if (unlikely(!debug_locks)) 2626 if (unlikely(!debug_locks || current->lockdep_recursion))
2612 return; 2627 return;
2613 2628
2614 if (DEBUG_LOCKS_WARN_ON(!irqs_disabled())) 2629 if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
@@ -2859,10 +2874,7 @@ static int mark_lock(struct task_struct *curr, struct held_lock *this,
2859void lockdep_init_map(struct lockdep_map *lock, const char *name, 2874void lockdep_init_map(struct lockdep_map *lock, const char *name,
2860 struct lock_class_key *key, int subclass) 2875 struct lock_class_key *key, int subclass)
2861{ 2876{
2862 int i; 2877 memset(lock, 0, sizeof(*lock));
2863
2864 for (i = 0; i < NR_LOCKDEP_CACHING_CLASSES; i++)
2865 lock->class_cache[i] = NULL;
2866 2878
2867#ifdef CONFIG_LOCK_STAT 2879#ifdef CONFIG_LOCK_STAT
2868 lock->cpu = raw_smp_processor_id(); 2880 lock->cpu = raw_smp_processor_id();
@@ -3426,7 +3438,7 @@ int lock_is_held(struct lockdep_map *lock)
3426 int ret = 0; 3438 int ret = 0;
3427 3439
3428 if (unlikely(current->lockdep_recursion)) 3440 if (unlikely(current->lockdep_recursion))
3429 return ret; 3441 return 1; /* avoid false negative lockdep_assert_held() */
3430 3442
3431 raw_local_irq_save(flags); 3443 raw_local_irq_save(flags);
3432 check_flags(flags); 3444 check_flags(flags);