aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/lockdep.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/lockdep.c')
-rw-r--r--kernel/lockdep.c47
1 files changed, 17 insertions, 30 deletions
diff --git a/kernel/lockdep.c b/kernel/lockdep.c
index 2594e1ce41cb..78325f8f1139 100644
--- a/kernel/lockdep.c
+++ b/kernel/lockdep.c
@@ -431,20 +431,7 @@ static struct stack_trace lockdep_init_trace = {
431/* 431/*
432 * Various lockdep statistics: 432 * Various lockdep statistics:
433 */ 433 */
434atomic_t chain_lookup_hits; 434DEFINE_PER_CPU(struct lockdep_stats, lockdep_stats);
435atomic_t chain_lookup_misses;
436atomic_t hardirqs_on_events;
437atomic_t hardirqs_off_events;
438atomic_t redundant_hardirqs_on;
439atomic_t redundant_hardirqs_off;
440atomic_t softirqs_on_events;
441atomic_t softirqs_off_events;
442atomic_t redundant_softirqs_on;
443atomic_t redundant_softirqs_off;
444atomic_t nr_unused_locks;
445atomic_t nr_cyclic_checks;
446atomic_t nr_find_usage_forwards_checks;
447atomic_t nr_find_usage_backwards_checks;
448#endif 435#endif
449 436
450/* 437/*
@@ -748,7 +735,7 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
748 return NULL; 735 return NULL;
749 } 736 }
750 class = lock_classes + nr_lock_classes++; 737 class = lock_classes + nr_lock_classes++;
751 debug_atomic_inc(&nr_unused_locks); 738 debug_atomic_inc(nr_unused_locks);
752 class->key = key; 739 class->key = key;
753 class->name = lock->name; 740 class->name = lock->name;
754 class->subclass = subclass; 741 class->subclass = subclass;
@@ -1205,7 +1192,7 @@ check_noncircular(struct lock_list *root, struct lock_class *target,
1205{ 1192{
1206 int result; 1193 int result;
1207 1194
1208 debug_atomic_inc(&nr_cyclic_checks); 1195 debug_atomic_inc(nr_cyclic_checks);
1209 1196
1210 result = __bfs_forwards(root, target, class_equal, target_entry); 1197 result = __bfs_forwards(root, target, class_equal, target_entry);
1211 1198
@@ -1242,7 +1229,7 @@ find_usage_forwards(struct lock_list *root, enum lock_usage_bit bit,
1242{ 1229{
1243 int result; 1230 int result;
1244 1231
1245 debug_atomic_inc(&nr_find_usage_forwards_checks); 1232 debug_atomic_inc(nr_find_usage_forwards_checks);
1246 1233
1247 result = __bfs_forwards(root, (void *)bit, usage_match, target_entry); 1234 result = __bfs_forwards(root, (void *)bit, usage_match, target_entry);
1248 1235
@@ -1265,7 +1252,7 @@ find_usage_backwards(struct lock_list *root, enum lock_usage_bit bit,
1265{ 1252{
1266 int result; 1253 int result;
1267 1254
1268 debug_atomic_inc(&nr_find_usage_backwards_checks); 1255 debug_atomic_inc(nr_find_usage_backwards_checks);
1269 1256
1270 result = __bfs_backwards(root, (void *)bit, usage_match, target_entry); 1257 result = __bfs_backwards(root, (void *)bit, usage_match, target_entry);
1271 1258
@@ -1825,7 +1812,7 @@ static inline int lookup_chain_cache(struct task_struct *curr,
1825 list_for_each_entry(chain, hash_head, entry) { 1812 list_for_each_entry(chain, hash_head, entry) {
1826 if (chain->chain_key == chain_key) { 1813 if (chain->chain_key == chain_key) {
1827cache_hit: 1814cache_hit:
1828 debug_atomic_inc(&chain_lookup_hits); 1815 debug_atomic_inc(chain_lookup_hits);
1829 if (very_verbose(class)) 1816 if (very_verbose(class))
1830 printk("\nhash chain already cached, key: " 1817 printk("\nhash chain already cached, key: "
1831 "%016Lx tail class: [%p] %s\n", 1818 "%016Lx tail class: [%p] %s\n",
@@ -1890,7 +1877,7 @@ cache_hit:
1890 chain_hlocks[chain->base + j] = class - lock_classes; 1877 chain_hlocks[chain->base + j] = class - lock_classes;
1891 } 1878 }
1892 list_add_tail_rcu(&chain->entry, hash_head); 1879 list_add_tail_rcu(&chain->entry, hash_head);
1893 debug_atomic_inc(&chain_lookup_misses); 1880 debug_atomic_inc(chain_lookup_misses);
1894 inc_chains(); 1881 inc_chains();
1895 1882
1896 return 1; 1883 return 1;
@@ -2311,7 +2298,7 @@ void trace_hardirqs_on_caller(unsigned long ip)
2311 return; 2298 return;
2312 2299
2313 if (unlikely(curr->hardirqs_enabled)) { 2300 if (unlikely(curr->hardirqs_enabled)) {
2314 debug_atomic_inc(&redundant_hardirqs_on); 2301 debug_atomic_inc(redundant_hardirqs_on);
2315 return; 2302 return;
2316 } 2303 }
2317 /* we'll do an OFF -> ON transition: */ 2304 /* we'll do an OFF -> ON transition: */
@@ -2338,7 +2325,7 @@ void trace_hardirqs_on_caller(unsigned long ip)
2338 2325
2339 curr->hardirq_enable_ip = ip; 2326 curr->hardirq_enable_ip = ip;
2340 curr->hardirq_enable_event = ++curr->irq_events; 2327 curr->hardirq_enable_event = ++curr->irq_events;
2341 debug_atomic_inc(&hardirqs_on_events); 2328 debug_atomic_inc(hardirqs_on_events);
2342} 2329}
2343EXPORT_SYMBOL(trace_hardirqs_on_caller); 2330EXPORT_SYMBOL(trace_hardirqs_on_caller);
2344 2331
@@ -2370,9 +2357,9 @@ void trace_hardirqs_off_caller(unsigned long ip)
2370 curr->hardirqs_enabled = 0; 2357 curr->hardirqs_enabled = 0;
2371 curr->hardirq_disable_ip = ip; 2358 curr->hardirq_disable_ip = ip;
2372 curr->hardirq_disable_event = ++curr->irq_events; 2359 curr->hardirq_disable_event = ++curr->irq_events;
2373 debug_atomic_inc(&hardirqs_off_events); 2360 debug_atomic_inc(hardirqs_off_events);
2374 } else 2361 } else
2375 debug_atomic_inc(&redundant_hardirqs_off); 2362 debug_atomic_inc(redundant_hardirqs_off);
2376} 2363}
2377EXPORT_SYMBOL(trace_hardirqs_off_caller); 2364EXPORT_SYMBOL(trace_hardirqs_off_caller);
2378 2365
@@ -2396,7 +2383,7 @@ void trace_softirqs_on(unsigned long ip)
2396 return; 2383 return;
2397 2384
2398 if (curr->softirqs_enabled) { 2385 if (curr->softirqs_enabled) {
2399 debug_atomic_inc(&redundant_softirqs_on); 2386 debug_atomic_inc(redundant_softirqs_on);
2400 return; 2387 return;
2401 } 2388 }
2402 2389
@@ -2406,7 +2393,7 @@ void trace_softirqs_on(unsigned long ip)
2406 curr->softirqs_enabled = 1; 2393 curr->softirqs_enabled = 1;
2407 curr->softirq_enable_ip = ip; 2394 curr->softirq_enable_ip = ip;
2408 curr->softirq_enable_event = ++curr->irq_events; 2395 curr->softirq_enable_event = ++curr->irq_events;
2409 debug_atomic_inc(&softirqs_on_events); 2396 debug_atomic_inc(softirqs_on_events);
2410 /* 2397 /*
2411 * We are going to turn softirqs on, so set the 2398 * We are going to turn softirqs on, so set the
2412 * usage bit for all held locks, if hardirqs are 2399 * usage bit for all held locks, if hardirqs are
@@ -2436,10 +2423,10 @@ void trace_softirqs_off(unsigned long ip)
2436 curr->softirqs_enabled = 0; 2423 curr->softirqs_enabled = 0;
2437 curr->softirq_disable_ip = ip; 2424 curr->softirq_disable_ip = ip;
2438 curr->softirq_disable_event = ++curr->irq_events; 2425 curr->softirq_disable_event = ++curr->irq_events;
2439 debug_atomic_inc(&softirqs_off_events); 2426 debug_atomic_inc(softirqs_off_events);
2440 DEBUG_LOCKS_WARN_ON(!softirq_count()); 2427 DEBUG_LOCKS_WARN_ON(!softirq_count());
2441 } else 2428 } else
2442 debug_atomic_inc(&redundant_softirqs_off); 2429 debug_atomic_inc(redundant_softirqs_off);
2443} 2430}
2444 2431
2445static void __lockdep_trace_alloc(gfp_t gfp_mask, unsigned long flags) 2432static void __lockdep_trace_alloc(gfp_t gfp_mask, unsigned long flags)
@@ -2644,7 +2631,7 @@ static int mark_lock(struct task_struct *curr, struct held_lock *this,
2644 return 0; 2631 return 0;
2645 break; 2632 break;
2646 case LOCK_USED: 2633 case LOCK_USED:
2647 debug_atomic_dec(&nr_unused_locks); 2634 debug_atomic_dec(nr_unused_locks);
2648 break; 2635 break;
2649 default: 2636 default:
2650 if (!debug_locks_off_graph_unlock()) 2637 if (!debug_locks_off_graph_unlock())
@@ -2750,7 +2737,7 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
2750 if (!class) 2737 if (!class)
2751 return 0; 2738 return 0;
2752 } 2739 }
2753 debug_atomic_inc((atomic_t *)&class->ops); 2740 atomic_inc((atomic_t *)&class->ops);
2754 if (very_verbose(class)) { 2741 if (very_verbose(class)) {
2755 printk("\nacquire class [%p] %s", class->key, class->name); 2742 printk("\nacquire class [%p] %s", class->key, class->name);
2756 if (class->name_version > 1) 2743 if (class->name_version > 1)