diff options
author | Paul E. McKenney <paulmck@linux.vnet.ibm.com> | 2017-05-04 18:44:38 -0400 |
---|---|---|
committer | Paul E. McKenney <paulmck@linux.vnet.ibm.com> | 2017-06-08 21:52:36 -0400 |
commit | 681fbec881dea1848e9246d7d1ecb3b97f11026d (patch) | |
tree | 20b5fcdc6739b3790c718f846be284c15e9315e6 | |
parent | 2464dd940e23bad227c387a40eec99f7aa02ed96 (diff) |
lockdep: Use consistent printing primitives
Commit a5dd63efda3d ("lockdep: Use "WARNING" tag on lockdep splats")
substituted pr_warn() for printk() in places called out by Dmitry Vyukov.
However, this resulted in an ugly mix of pr_warn() and printk(). This
commit therefore changes printk() to pr_warn() or pr_cont(), depending
on the absence or presence of KERN_CONT. This is done in all functions
that had printk() changed to pr_warn() by the aforementioned commit.
Reported-by: Peter Zijlstra <peterz@infradead.org>
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
-rw-r--r-- | kernel/locking/lockdep.c | 172 |
1 files changed, 86 insertions, 86 deletions
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c index c0e31bfee25c..cceb9534338a 100644 --- a/kernel/locking/lockdep.c +++ b/kernel/locking/lockdep.c | |||
@@ -1157,18 +1157,18 @@ print_circular_bug_header(struct lock_list *entry, unsigned int depth, | |||
1157 | if (debug_locks_silent) | 1157 | if (debug_locks_silent) |
1158 | return 0; | 1158 | return 0; |
1159 | 1159 | ||
1160 | printk("\n"); | 1160 | pr_warn("\n"); |
1161 | pr_warn("======================================================\n"); | 1161 | pr_warn("======================================================\n"); |
1162 | pr_warn("WARNING: possible circular locking dependency detected\n"); | 1162 | pr_warn("WARNING: possible circular locking dependency detected\n"); |
1163 | print_kernel_ident(); | 1163 | print_kernel_ident(); |
1164 | pr_warn("------------------------------------------------------\n"); | 1164 | pr_warn("------------------------------------------------------\n"); |
1165 | printk("%s/%d is trying to acquire lock:\n", | 1165 | pr_warn("%s/%d is trying to acquire lock:\n", |
1166 | curr->comm, task_pid_nr(curr)); | 1166 | curr->comm, task_pid_nr(curr)); |
1167 | print_lock(check_src); | 1167 | print_lock(check_src); |
1168 | printk("\nbut task is already holding lock:\n"); | 1168 | pr_warn("\nbut task is already holding lock:\n"); |
1169 | print_lock(check_tgt); | 1169 | print_lock(check_tgt); |
1170 | printk("\nwhich lock already depends on the new lock.\n\n"); | 1170 | pr_warn("\nwhich lock already depends on the new lock.\n\n"); |
1171 | printk("\nthe existing dependency chain (in reverse order) is:\n"); | 1171 | pr_warn("\nthe existing dependency chain (in reverse order) is:\n"); |
1172 | 1172 | ||
1173 | print_circular_bug_entry(entry, depth); | 1173 | print_circular_bug_entry(entry, depth); |
1174 | 1174 | ||
@@ -1495,13 +1495,13 @@ print_bad_irq_dependency(struct task_struct *curr, | |||
1495 | if (!debug_locks_off_graph_unlock() || debug_locks_silent) | 1495 | if (!debug_locks_off_graph_unlock() || debug_locks_silent) |
1496 | return 0; | 1496 | return 0; |
1497 | 1497 | ||
1498 | printk("\n"); | 1498 | pr_warn("\n"); |
1499 | pr_warn("=====================================================\n"); | 1499 | pr_warn("=====================================================\n"); |
1500 | pr_warn("WARNING: %s-safe -> %s-unsafe lock order detected\n", | 1500 | pr_warn("WARNING: %s-safe -> %s-unsafe lock order detected\n", |
1501 | irqclass, irqclass); | 1501 | irqclass, irqclass); |
1502 | print_kernel_ident(); | 1502 | print_kernel_ident(); |
1503 | pr_warn("-----------------------------------------------------\n"); | 1503 | pr_warn("-----------------------------------------------------\n"); |
1504 | printk("%s/%d [HC%u[%lu]:SC%u[%lu]:HE%u:SE%u] is trying to acquire:\n", | 1504 | pr_warn("%s/%d [HC%u[%lu]:SC%u[%lu]:HE%u:SE%u] is trying to acquire:\n", |
1505 | curr->comm, task_pid_nr(curr), | 1505 | curr->comm, task_pid_nr(curr), |
1506 | curr->hardirq_context, hardirq_count() >> HARDIRQ_SHIFT, | 1506 | curr->hardirq_context, hardirq_count() >> HARDIRQ_SHIFT, |
1507 | curr->softirq_context, softirq_count() >> SOFTIRQ_SHIFT, | 1507 | curr->softirq_context, softirq_count() >> SOFTIRQ_SHIFT, |
@@ -1509,46 +1509,46 @@ print_bad_irq_dependency(struct task_struct *curr, | |||
1509 | curr->softirqs_enabled); | 1509 | curr->softirqs_enabled); |
1510 | print_lock(next); | 1510 | print_lock(next); |
1511 | 1511 | ||
1512 | printk("\nand this task is already holding:\n"); | 1512 | pr_warn("\nand this task is already holding:\n"); |
1513 | print_lock(prev); | 1513 | print_lock(prev); |
1514 | printk("which would create a new lock dependency:\n"); | 1514 | pr_warn("which would create a new lock dependency:\n"); |
1515 | print_lock_name(hlock_class(prev)); | 1515 | print_lock_name(hlock_class(prev)); |
1516 | printk(KERN_CONT " ->"); | 1516 | pr_cont(" ->"); |
1517 | print_lock_name(hlock_class(next)); | 1517 | print_lock_name(hlock_class(next)); |
1518 | printk(KERN_CONT "\n"); | 1518 | pr_cont("\n"); |
1519 | 1519 | ||
1520 | printk("\nbut this new dependency connects a %s-irq-safe lock:\n", | 1520 | pr_warn("\nbut this new dependency connects a %s-irq-safe lock:\n", |
1521 | irqclass); | 1521 | irqclass); |
1522 | print_lock_name(backwards_entry->class); | 1522 | print_lock_name(backwards_entry->class); |
1523 | printk("\n... which became %s-irq-safe at:\n", irqclass); | 1523 | pr_warn("\n... which became %s-irq-safe at:\n", irqclass); |
1524 | 1524 | ||
1525 | print_stack_trace(backwards_entry->class->usage_traces + bit1, 1); | 1525 | print_stack_trace(backwards_entry->class->usage_traces + bit1, 1); |
1526 | 1526 | ||
1527 | printk("\nto a %s-irq-unsafe lock:\n", irqclass); | 1527 | pr_warn("\nto a %s-irq-unsafe lock:\n", irqclass); |
1528 | print_lock_name(forwards_entry->class); | 1528 | print_lock_name(forwards_entry->class); |
1529 | printk("\n... which became %s-irq-unsafe at:\n", irqclass); | 1529 | pr_warn("\n... which became %s-irq-unsafe at:\n", irqclass); |
1530 | printk("..."); | 1530 | pr_warn("..."); |
1531 | 1531 | ||
1532 | print_stack_trace(forwards_entry->class->usage_traces + bit2, 1); | 1532 | print_stack_trace(forwards_entry->class->usage_traces + bit2, 1); |
1533 | 1533 | ||
1534 | printk("\nother info that might help us debug this:\n\n"); | 1534 | pr_warn("\nother info that might help us debug this:\n\n"); |
1535 | print_irq_lock_scenario(backwards_entry, forwards_entry, | 1535 | print_irq_lock_scenario(backwards_entry, forwards_entry, |
1536 | hlock_class(prev), hlock_class(next)); | 1536 | hlock_class(prev), hlock_class(next)); |
1537 | 1537 | ||
1538 | lockdep_print_held_locks(curr); | 1538 | lockdep_print_held_locks(curr); |
1539 | 1539 | ||
1540 | printk("\nthe dependencies between %s-irq-safe lock and the holding lock:\n", irqclass); | 1540 | pr_warn("\nthe dependencies between %s-irq-safe lock and the holding lock:\n", irqclass); |
1541 | if (!save_trace(&prev_root->trace)) | 1541 | if (!save_trace(&prev_root->trace)) |
1542 | return 0; | 1542 | return 0; |
1543 | print_shortest_lock_dependencies(backwards_entry, prev_root); | 1543 | print_shortest_lock_dependencies(backwards_entry, prev_root); |
1544 | 1544 | ||
1545 | printk("\nthe dependencies between the lock to be acquired"); | 1545 | pr_warn("\nthe dependencies between the lock to be acquired"); |
1546 | printk(" and %s-irq-unsafe lock:\n", irqclass); | 1546 | pr_warn(" and %s-irq-unsafe lock:\n", irqclass); |
1547 | if (!save_trace(&next_root->trace)) | 1547 | if (!save_trace(&next_root->trace)) |
1548 | return 0; | 1548 | return 0; |
1549 | print_shortest_lock_dependencies(forwards_entry, next_root); | 1549 | print_shortest_lock_dependencies(forwards_entry, next_root); |
1550 | 1550 | ||
1551 | printk("\nstack backtrace:\n"); | 1551 | pr_warn("\nstack backtrace:\n"); |
1552 | dump_stack(); | 1552 | dump_stack(); |
1553 | 1553 | ||
1554 | return 0; | 1554 | return 0; |
@@ -1724,22 +1724,22 @@ print_deadlock_bug(struct task_struct *curr, struct held_lock *prev, | |||
1724 | if (!debug_locks_off_graph_unlock() || debug_locks_silent) | 1724 | if (!debug_locks_off_graph_unlock() || debug_locks_silent) |
1725 | return 0; | 1725 | return 0; |
1726 | 1726 | ||
1727 | printk("\n"); | 1727 | pr_warn("\n"); |
1728 | pr_warn("============================================\n"); | 1728 | pr_warn("============================================\n"); |
1729 | pr_warn("WARNING: possible recursive locking detected\n"); | 1729 | pr_warn("WARNING: possible recursive locking detected\n"); |
1730 | print_kernel_ident(); | 1730 | print_kernel_ident(); |
1731 | pr_warn("--------------------------------------------\n"); | 1731 | pr_warn("--------------------------------------------\n"); |
1732 | printk("%s/%d is trying to acquire lock:\n", | 1732 | pr_warn("%s/%d is trying to acquire lock:\n", |
1733 | curr->comm, task_pid_nr(curr)); | 1733 | curr->comm, task_pid_nr(curr)); |
1734 | print_lock(next); | 1734 | print_lock(next); |
1735 | printk("\nbut task is already holding lock:\n"); | 1735 | pr_warn("\nbut task is already holding lock:\n"); |
1736 | print_lock(prev); | 1736 | print_lock(prev); |
1737 | 1737 | ||
1738 | printk("\nother info that might help us debug this:\n"); | 1738 | pr_warn("\nother info that might help us debug this:\n"); |
1739 | print_deadlock_scenario(next, prev); | 1739 | print_deadlock_scenario(next, prev); |
1740 | lockdep_print_held_locks(curr); | 1740 | lockdep_print_held_locks(curr); |
1741 | 1741 | ||
1742 | printk("\nstack backtrace:\n"); | 1742 | pr_warn("\nstack backtrace:\n"); |
1743 | dump_stack(); | 1743 | dump_stack(); |
1744 | 1744 | ||
1745 | return 0; | 1745 | return 0; |
@@ -2074,21 +2074,21 @@ static void print_collision(struct task_struct *curr, | |||
2074 | struct held_lock *hlock_next, | 2074 | struct held_lock *hlock_next, |
2075 | struct lock_chain *chain) | 2075 | struct lock_chain *chain) |
2076 | { | 2076 | { |
2077 | printk("\n"); | 2077 | pr_warn("\n"); |
2078 | pr_warn("============================\n"); | 2078 | pr_warn("============================\n"); |
2079 | pr_warn("WARNING: chain_key collision\n"); | 2079 | pr_warn("WARNING: chain_key collision\n"); |
2080 | print_kernel_ident(); | 2080 | print_kernel_ident(); |
2081 | pr_warn("----------------------------\n"); | 2081 | pr_warn("----------------------------\n"); |
2082 | printk("%s/%d: ", current->comm, task_pid_nr(current)); | 2082 | pr_warn("%s/%d: ", current->comm, task_pid_nr(current)); |
2083 | printk("Hash chain already cached but the contents don't match!\n"); | 2083 | pr_warn("Hash chain already cached but the contents don't match!\n"); |
2084 | 2084 | ||
2085 | printk("Held locks:"); | 2085 | pr_warn("Held locks:"); |
2086 | print_chain_keys_held_locks(curr, hlock_next); | 2086 | print_chain_keys_held_locks(curr, hlock_next); |
2087 | 2087 | ||
2088 | printk("Locks in cached chain:"); | 2088 | pr_warn("Locks in cached chain:"); |
2089 | print_chain_keys_chain(chain); | 2089 | print_chain_keys_chain(chain); |
2090 | 2090 | ||
2091 | printk("\nstack backtrace:\n"); | 2091 | pr_warn("\nstack backtrace:\n"); |
2092 | dump_stack(); | 2092 | dump_stack(); |
2093 | } | 2093 | } |
2094 | #endif | 2094 | #endif |
@@ -2373,16 +2373,16 @@ print_usage_bug(struct task_struct *curr, struct held_lock *this, | |||
2373 | if (!debug_locks_off_graph_unlock() || debug_locks_silent) | 2373 | if (!debug_locks_off_graph_unlock() || debug_locks_silent) |
2374 | return 0; | 2374 | return 0; |
2375 | 2375 | ||
2376 | printk("\n"); | 2376 | pr_warn("\n"); |
2377 | pr_warn("================================\n"); | 2377 | pr_warn("================================\n"); |
2378 | pr_warn("WARNING: inconsistent lock state\n"); | 2378 | pr_warn("WARNING: inconsistent lock state\n"); |
2379 | print_kernel_ident(); | 2379 | print_kernel_ident(); |
2380 | pr_warn("--------------------------------\n"); | 2380 | pr_warn("--------------------------------\n"); |
2381 | 2381 | ||
2382 | printk("inconsistent {%s} -> {%s} usage.\n", | 2382 | pr_warn("inconsistent {%s} -> {%s} usage.\n", |
2383 | usage_str[prev_bit], usage_str[new_bit]); | 2383 | usage_str[prev_bit], usage_str[new_bit]); |
2384 | 2384 | ||
2385 | printk("%s/%d [HC%u[%lu]:SC%u[%lu]:HE%u:SE%u] takes:\n", | 2385 | pr_warn("%s/%d [HC%u[%lu]:SC%u[%lu]:HE%u:SE%u] takes:\n", |
2386 | curr->comm, task_pid_nr(curr), | 2386 | curr->comm, task_pid_nr(curr), |
2387 | trace_hardirq_context(curr), hardirq_count() >> HARDIRQ_SHIFT, | 2387 | trace_hardirq_context(curr), hardirq_count() >> HARDIRQ_SHIFT, |
2388 | trace_softirq_context(curr), softirq_count() >> SOFTIRQ_SHIFT, | 2388 | trace_softirq_context(curr), softirq_count() >> SOFTIRQ_SHIFT, |
@@ -2390,16 +2390,16 @@ print_usage_bug(struct task_struct *curr, struct held_lock *this, | |||
2390 | trace_softirqs_enabled(curr)); | 2390 | trace_softirqs_enabled(curr)); |
2391 | print_lock(this); | 2391 | print_lock(this); |
2392 | 2392 | ||
2393 | printk("{%s} state was registered at:\n", usage_str[prev_bit]); | 2393 | pr_warn("{%s} state was registered at:\n", usage_str[prev_bit]); |
2394 | print_stack_trace(hlock_class(this)->usage_traces + prev_bit, 1); | 2394 | print_stack_trace(hlock_class(this)->usage_traces + prev_bit, 1); |
2395 | 2395 | ||
2396 | print_irqtrace_events(curr); | 2396 | print_irqtrace_events(curr); |
2397 | printk("\nother info that might help us debug this:\n"); | 2397 | pr_warn("\nother info that might help us debug this:\n"); |
2398 | print_usage_bug_scenario(this); | 2398 | print_usage_bug_scenario(this); |
2399 | 2399 | ||
2400 | lockdep_print_held_locks(curr); | 2400 | lockdep_print_held_locks(curr); |
2401 | 2401 | ||
2402 | printk("\nstack backtrace:\n"); | 2402 | pr_warn("\nstack backtrace:\n"); |
2403 | dump_stack(); | 2403 | dump_stack(); |
2404 | 2404 | ||
2405 | return 0; | 2405 | return 0; |
@@ -2438,28 +2438,28 @@ print_irq_inversion_bug(struct task_struct *curr, | |||
2438 | if (!debug_locks_off_graph_unlock() || debug_locks_silent) | 2438 | if (!debug_locks_off_graph_unlock() || debug_locks_silent) |
2439 | return 0; | 2439 | return 0; |
2440 | 2440 | ||
2441 | printk("\n"); | 2441 | pr_warn("\n"); |
2442 | pr_warn("========================================================\n"); | 2442 | pr_warn("========================================================\n"); |
2443 | pr_warn("WARNING: possible irq lock inversion dependency detected\n"); | 2443 | pr_warn("WARNING: possible irq lock inversion dependency detected\n"); |
2444 | print_kernel_ident(); | 2444 | print_kernel_ident(); |
2445 | pr_warn("--------------------------------------------------------\n"); | 2445 | pr_warn("--------------------------------------------------------\n"); |
2446 | printk("%s/%d just changed the state of lock:\n", | 2446 | pr_warn("%s/%d just changed the state of lock:\n", |
2447 | curr->comm, task_pid_nr(curr)); | 2447 | curr->comm, task_pid_nr(curr)); |
2448 | print_lock(this); | 2448 | print_lock(this); |
2449 | if (forwards) | 2449 | if (forwards) |
2450 | printk("but this lock took another, %s-unsafe lock in the past:\n", irqclass); | 2450 | pr_warn("but this lock took another, %s-unsafe lock in the past:\n", irqclass); |
2451 | else | 2451 | else |
2452 | printk("but this lock was taken by another, %s-safe lock in the past:\n", irqclass); | 2452 | pr_warn("but this lock was taken by another, %s-safe lock in the past:\n", irqclass); |
2453 | print_lock_name(other->class); | 2453 | print_lock_name(other->class); |
2454 | printk("\n\nand interrupts could create inverse lock ordering between them.\n\n"); | 2454 | pr_warn("\n\nand interrupts could create inverse lock ordering between them.\n\n"); |
2455 | 2455 | ||
2456 | printk("\nother info that might help us debug this:\n"); | 2456 | pr_warn("\nother info that might help us debug this:\n"); |
2457 | 2457 | ||
2458 | /* Find a middle lock (if one exists) */ | 2458 | /* Find a middle lock (if one exists) */ |
2459 | depth = get_lock_depth(other); | 2459 | depth = get_lock_depth(other); |
2460 | do { | 2460 | do { |
2461 | if (depth == 0 && (entry != root)) { | 2461 | if (depth == 0 && (entry != root)) { |
2462 | printk("lockdep:%s bad path found in chain graph\n", __func__); | 2462 | pr_warn("lockdep:%s bad path found in chain graph\n", __func__); |
2463 | break; | 2463 | break; |
2464 | } | 2464 | } |
2465 | middle = entry; | 2465 | middle = entry; |
@@ -2475,12 +2475,12 @@ print_irq_inversion_bug(struct task_struct *curr, | |||
2475 | 2475 | ||
2476 | lockdep_print_held_locks(curr); | 2476 | lockdep_print_held_locks(curr); |
2477 | 2477 | ||
2478 | printk("\nthe shortest dependencies between 2nd lock and 1st lock:\n"); | 2478 | pr_warn("\nthe shortest dependencies between 2nd lock and 1st lock:\n"); |
2479 | if (!save_trace(&root->trace)) | 2479 | if (!save_trace(&root->trace)) |
2480 | return 0; | 2480 | return 0; |
2481 | print_shortest_lock_dependencies(other, root); | 2481 | print_shortest_lock_dependencies(other, root); |
2482 | 2482 | ||
2483 | printk("\nstack backtrace:\n"); | 2483 | pr_warn("\nstack backtrace:\n"); |
2484 | dump_stack(); | 2484 | dump_stack(); |
2485 | 2485 | ||
2486 | return 0; | 2486 | return 0; |
@@ -3189,25 +3189,25 @@ print_lock_nested_lock_not_held(struct task_struct *curr, | |||
3189 | if (debug_locks_silent) | 3189 | if (debug_locks_silent) |
3190 | return 0; | 3190 | return 0; |
3191 | 3191 | ||
3192 | printk("\n"); | 3192 | pr_warn("\n"); |
3193 | pr_warn("==================================\n"); | 3193 | pr_warn("==================================\n"); |
3194 | pr_warn("WARNING: Nested lock was not taken\n"); | 3194 | pr_warn("WARNING: Nested lock was not taken\n"); |
3195 | print_kernel_ident(); | 3195 | print_kernel_ident(); |
3196 | pr_warn("----------------------------------\n"); | 3196 | pr_warn("----------------------------------\n"); |
3197 | 3197 | ||
3198 | printk("%s/%d is trying to lock:\n", curr->comm, task_pid_nr(curr)); | 3198 | pr_warn("%s/%d is trying to lock:\n", curr->comm, task_pid_nr(curr)); |
3199 | print_lock(hlock); | 3199 | print_lock(hlock); |
3200 | 3200 | ||
3201 | printk("\nbut this task is not holding:\n"); | 3201 | pr_warn("\nbut this task is not holding:\n"); |
3202 | printk("%s\n", hlock->nest_lock->name); | 3202 | pr_warn("%s\n", hlock->nest_lock->name); |
3203 | 3203 | ||
3204 | printk("\nstack backtrace:\n"); | 3204 | pr_warn("\nstack backtrace:\n"); |
3205 | dump_stack(); | 3205 | dump_stack(); |
3206 | 3206 | ||
3207 | printk("\nother info that might help us debug this:\n"); | 3207 | pr_warn("\nother info that might help us debug this:\n"); |
3208 | lockdep_print_held_locks(curr); | 3208 | lockdep_print_held_locks(curr); |
3209 | 3209 | ||
3210 | printk("\nstack backtrace:\n"); | 3210 | pr_warn("\nstack backtrace:\n"); |
3211 | dump_stack(); | 3211 | dump_stack(); |
3212 | 3212 | ||
3213 | return 0; | 3213 | return 0; |
@@ -3402,21 +3402,21 @@ print_unlock_imbalance_bug(struct task_struct *curr, struct lockdep_map *lock, | |||
3402 | if (debug_locks_silent) | 3402 | if (debug_locks_silent) |
3403 | return 0; | 3403 | return 0; |
3404 | 3404 | ||
3405 | printk("\n"); | 3405 | pr_warn("\n"); |
3406 | pr_warn("=====================================\n"); | 3406 | pr_warn("=====================================\n"); |
3407 | pr_warn("WARNING: bad unlock balance detected!\n"); | 3407 | pr_warn("WARNING: bad unlock balance detected!\n"); |
3408 | print_kernel_ident(); | 3408 | print_kernel_ident(); |
3409 | pr_warn("-------------------------------------\n"); | 3409 | pr_warn("-------------------------------------\n"); |
3410 | printk("%s/%d is trying to release lock (", | 3410 | pr_warn("%s/%d is trying to release lock (", |
3411 | curr->comm, task_pid_nr(curr)); | 3411 | curr->comm, task_pid_nr(curr)); |
3412 | print_lockdep_cache(lock); | 3412 | print_lockdep_cache(lock); |
3413 | printk(KERN_CONT ") at:\n"); | 3413 | pr_cont(") at:\n"); |
3414 | print_ip_sym(ip); | 3414 | print_ip_sym(ip); |
3415 | printk("but there are no more locks to release!\n"); | 3415 | pr_warn("but there are no more locks to release!\n"); |
3416 | printk("\nother info that might help us debug this:\n"); | 3416 | pr_warn("\nother info that might help us debug this:\n"); |
3417 | lockdep_print_held_locks(curr); | 3417 | lockdep_print_held_locks(curr); |
3418 | 3418 | ||
3419 | printk("\nstack backtrace:\n"); | 3419 | pr_warn("\nstack backtrace:\n"); |
3420 | dump_stack(); | 3420 | dump_stack(); |
3421 | 3421 | ||
3422 | return 0; | 3422 | return 0; |
@@ -3974,21 +3974,21 @@ print_lock_contention_bug(struct task_struct *curr, struct lockdep_map *lock, | |||
3974 | if (debug_locks_silent) | 3974 | if (debug_locks_silent) |
3975 | return 0; | 3975 | return 0; |
3976 | 3976 | ||
3977 | printk("\n"); | 3977 | pr_warn("\n"); |
3978 | pr_warn("=================================\n"); | 3978 | pr_warn("=================================\n"); |
3979 | pr_warn("WARNING: bad contention detected!\n"); | 3979 | pr_warn("WARNING: bad contention detected!\n"); |
3980 | print_kernel_ident(); | 3980 | print_kernel_ident(); |
3981 | pr_warn("---------------------------------\n"); | 3981 | pr_warn("---------------------------------\n"); |
3982 | printk("%s/%d is trying to contend lock (", | 3982 | pr_warn("%s/%d is trying to contend lock (", |
3983 | curr->comm, task_pid_nr(curr)); | 3983 | curr->comm, task_pid_nr(curr)); |
3984 | print_lockdep_cache(lock); | 3984 | print_lockdep_cache(lock); |
3985 | printk(KERN_CONT ") at:\n"); | 3985 | pr_cont(") at:\n"); |
3986 | print_ip_sym(ip); | 3986 | print_ip_sym(ip); |
3987 | printk("but there are no locks held!\n"); | 3987 | pr_warn("but there are no locks held!\n"); |
3988 | printk("\nother info that might help us debug this:\n"); | 3988 | pr_warn("\nother info that might help us debug this:\n"); |
3989 | lockdep_print_held_locks(curr); | 3989 | lockdep_print_held_locks(curr); |
3990 | 3990 | ||
3991 | printk("\nstack backtrace:\n"); | 3991 | pr_warn("\nstack backtrace:\n"); |
3992 | dump_stack(); | 3992 | dump_stack(); |
3993 | 3993 | ||
3994 | return 0; | 3994 | return 0; |
@@ -4318,17 +4318,17 @@ print_freed_lock_bug(struct task_struct *curr, const void *mem_from, | |||
4318 | if (debug_locks_silent) | 4318 | if (debug_locks_silent) |
4319 | return; | 4319 | return; |
4320 | 4320 | ||
4321 | printk("\n"); | 4321 | pr_warn("\n"); |
4322 | pr_warn("=========================\n"); | 4322 | pr_warn("=========================\n"); |
4323 | pr_warn("WARNING: held lock freed!\n"); | 4323 | pr_warn("WARNING: held lock freed!\n"); |
4324 | print_kernel_ident(); | 4324 | print_kernel_ident(); |
4325 | pr_warn("-------------------------\n"); | 4325 | pr_warn("-------------------------\n"); |
4326 | printk("%s/%d is freeing memory %p-%p, with a lock still held there!\n", | 4326 | pr_warn("%s/%d is freeing memory %p-%p, with a lock still held there!\n", |
4327 | curr->comm, task_pid_nr(curr), mem_from, mem_to-1); | 4327 | curr->comm, task_pid_nr(curr), mem_from, mem_to-1); |
4328 | print_lock(hlock); | 4328 | print_lock(hlock); |
4329 | lockdep_print_held_locks(curr); | 4329 | lockdep_print_held_locks(curr); |
4330 | 4330 | ||
4331 | printk("\nstack backtrace:\n"); | 4331 | pr_warn("\nstack backtrace:\n"); |
4332 | dump_stack(); | 4332 | dump_stack(); |
4333 | } | 4333 | } |
4334 | 4334 | ||
@@ -4376,14 +4376,14 @@ static void print_held_locks_bug(void) | |||
4376 | if (debug_locks_silent) | 4376 | if (debug_locks_silent) |
4377 | return; | 4377 | return; |
4378 | 4378 | ||
4379 | printk("\n"); | 4379 | pr_warn("\n"); |
4380 | pr_warn("====================================\n"); | 4380 | pr_warn("====================================\n"); |
4381 | pr_warn("WARNING: %s/%d still has locks held!\n", | 4381 | pr_warn("WARNING: %s/%d still has locks held!\n", |
4382 | current->comm, task_pid_nr(current)); | 4382 | current->comm, task_pid_nr(current)); |
4383 | print_kernel_ident(); | 4383 | print_kernel_ident(); |
4384 | pr_warn("------------------------------------\n"); | 4384 | pr_warn("------------------------------------\n"); |
4385 | lockdep_print_held_locks(current); | 4385 | lockdep_print_held_locks(current); |
4386 | printk("\nstack backtrace:\n"); | 4386 | pr_warn("\nstack backtrace:\n"); |
4387 | dump_stack(); | 4387 | dump_stack(); |
4388 | } | 4388 | } |
4389 | 4389 | ||
@@ -4402,10 +4402,10 @@ void debug_show_all_locks(void) | |||
4402 | int unlock = 1; | 4402 | int unlock = 1; |
4403 | 4403 | ||
4404 | if (unlikely(!debug_locks)) { | 4404 | if (unlikely(!debug_locks)) { |
4405 | printk("INFO: lockdep is turned off.\n"); | 4405 | pr_warn("INFO: lockdep is turned off.\n"); |
4406 | return; | 4406 | return; |
4407 | } | 4407 | } |
4408 | printk("\nShowing all locks held in the system:\n"); | 4408 | pr_warn("\nShowing all locks held in the system:\n"); |
4409 | 4409 | ||
4410 | /* | 4410 | /* |
4411 | * Here we try to get the tasklist_lock as hard as possible, | 4411 | * Here we try to get the tasklist_lock as hard as possible, |
@@ -4416,18 +4416,18 @@ void debug_show_all_locks(void) | |||
4416 | retry: | 4416 | retry: |
4417 | if (!read_trylock(&tasklist_lock)) { | 4417 | if (!read_trylock(&tasklist_lock)) { |
4418 | if (count == 10) | 4418 | if (count == 10) |
4419 | printk("hm, tasklist_lock locked, retrying... "); | 4419 | pr_warn("hm, tasklist_lock locked, retrying... "); |
4420 | if (count) { | 4420 | if (count) { |
4421 | count--; | 4421 | count--; |
4422 | printk(" #%d", 10-count); | 4422 | pr_cont(" #%d", 10-count); |
4423 | mdelay(200); | 4423 | mdelay(200); |
4424 | goto retry; | 4424 | goto retry; |
4425 | } | 4425 | } |
4426 | printk(" ignoring it.\n"); | 4426 | pr_cont(" ignoring it.\n"); |
4427 | unlock = 0; | 4427 | unlock = 0; |
4428 | } else { | 4428 | } else { |
4429 | if (count != 10) | 4429 | if (count != 10) |
4430 | printk(KERN_CONT " locked it.\n"); | 4430 | pr_cont(" locked it.\n"); |
4431 | } | 4431 | } |
4432 | 4432 | ||
4433 | do_each_thread(g, p) { | 4433 | do_each_thread(g, p) { |
@@ -4445,7 +4445,7 @@ retry: | |||
4445 | unlock = 1; | 4445 | unlock = 1; |
4446 | } while_each_thread(g, p); | 4446 | } while_each_thread(g, p); |
4447 | 4447 | ||
4448 | printk("\n"); | 4448 | pr_warn("\n"); |
4449 | pr_warn("=============================================\n\n"); | 4449 | pr_warn("=============================================\n\n"); |
4450 | 4450 | ||
4451 | if (unlock) | 4451 | if (unlock) |
@@ -4475,12 +4475,12 @@ asmlinkage __visible void lockdep_sys_exit(void) | |||
4475 | if (unlikely(curr->lockdep_depth)) { | 4475 | if (unlikely(curr->lockdep_depth)) { |
4476 | if (!debug_locks_off()) | 4476 | if (!debug_locks_off()) |
4477 | return; | 4477 | return; |
4478 | printk("\n"); | 4478 | pr_warn("\n"); |
4479 | pr_warn("================================================\n"); | 4479 | pr_warn("================================================\n"); |
4480 | pr_warn("WARNING: lock held when returning to user space!\n"); | 4480 | pr_warn("WARNING: lock held when returning to user space!\n"); |
4481 | print_kernel_ident(); | 4481 | print_kernel_ident(); |
4482 | pr_warn("------------------------------------------------\n"); | 4482 | pr_warn("------------------------------------------------\n"); |
4483 | printk("%s/%d is leaving the kernel with locks still held!\n", | 4483 | pr_warn("%s/%d is leaving the kernel with locks still held!\n", |
4484 | curr->comm, curr->pid); | 4484 | curr->comm, curr->pid); |
4485 | lockdep_print_held_locks(curr); | 4485 | lockdep_print_held_locks(curr); |
4486 | } | 4486 | } |
@@ -4495,14 +4495,14 @@ void lockdep_rcu_suspicious(const char *file, const int line, const char *s) | |||
4495 | return; | 4495 | return; |
4496 | #endif /* #ifdef CONFIG_PROVE_RCU_REPEATEDLY */ | 4496 | #endif /* #ifdef CONFIG_PROVE_RCU_REPEATEDLY */ |
4497 | /* Note: the following can be executed concurrently, so be careful. */ | 4497 | /* Note: the following can be executed concurrently, so be careful. */ |
4498 | printk("\n"); | 4498 | pr_warn("\n"); |
4499 | pr_warn("=============================\n"); | 4499 | pr_warn("=============================\n"); |
4500 | pr_warn("WARNING: suspicious RCU usage\n"); | 4500 | pr_warn("WARNING: suspicious RCU usage\n"); |
4501 | print_kernel_ident(); | 4501 | print_kernel_ident(); |
4502 | pr_warn("-----------------------------\n"); | 4502 | pr_warn("-----------------------------\n"); |
4503 | printk("%s:%d %s!\n", file, line, s); | 4503 | pr_warn("%s:%d %s!\n", file, line, s); |
4504 | printk("\nother info that might help us debug this:\n\n"); | 4504 | pr_warn("\nother info that might help us debug this:\n\n"); |
4505 | printk("\n%srcu_scheduler_active = %d, debug_locks = %d\n", | 4505 | pr_warn("\n%srcu_scheduler_active = %d, debug_locks = %d\n", |
4506 | !rcu_lockdep_current_cpu_online() | 4506 | !rcu_lockdep_current_cpu_online() |
4507 | ? "RCU used illegally from offline CPU!\n" | 4507 | ? "RCU used illegally from offline CPU!\n" |
4508 | : !rcu_is_watching() | 4508 | : !rcu_is_watching() |
@@ -4529,10 +4529,10 @@ void lockdep_rcu_suspicious(const char *file, const int line, const char *s) | |||
4529 | * rcu_read_lock_bh() and so on from extended quiescent states. | 4529 | * rcu_read_lock_bh() and so on from extended quiescent states. |
4530 | */ | 4530 | */ |
4531 | if (!rcu_is_watching()) | 4531 | if (!rcu_is_watching()) |
4532 | printk("RCU used illegally from extended quiescent state!\n"); | 4532 | pr_warn("RCU used illegally from extended quiescent state!\n"); |
4533 | 4533 | ||
4534 | lockdep_print_held_locks(curr); | 4534 | lockdep_print_held_locks(curr); |
4535 | printk("\nstack backtrace:\n"); | 4535 | pr_warn("\nstack backtrace:\n"); |
4536 | dump_stack(); | 4536 | dump_stack(); |
4537 | } | 4537 | } |
4538 | EXPORT_SYMBOL_GPL(lockdep_rcu_suspicious); | 4538 | EXPORT_SYMBOL_GPL(lockdep_rcu_suspicious); |