aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/locking
diff options
context:
space:
mode:
authorJames Morris <james.l.morris@oracle.com>2017-07-24 20:44:18 -0400
committerJames Morris <james.l.morris@oracle.com>2017-07-24 20:44:18 -0400
commit53a2ebaaabc1eb8458796fec3bc1e0e80746b642 (patch)
tree9d1f9227b49392cdd2edcc01057517da4f4b09c2 /kernel/locking
parent3cf29931453215536916d0c4da953fce1911ced3 (diff)
parent520eccdfe187591a51ea9ab4c1a024ae4d0f68d9 (diff)
sync to Linus v4.13-rc2 for subsystem developers to work against
Diffstat (limited to 'kernel/locking')
-rw-r--r--kernel/locking/lockdep.c176
-rw-r--r--kernel/locking/mutex.c6
-rw-r--r--kernel/locking/qrwlock.c1
-rw-r--r--kernel/locking/qspinlock.c1
-rw-r--r--kernel/locking/qspinlock_paravirt.h3
-rw-r--r--kernel/locking/rtmutex-debug.c6
-rw-r--r--kernel/locking/rtmutex-debug.h2
-rw-r--r--kernel/locking/rtmutex.c62
-rw-r--r--kernel/locking/rtmutex.h2
-rw-r--r--kernel/locking/rwsem-spinlock.c4
10 files changed, 150 insertions, 113 deletions
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index c0e31bfee25c..7d2499bec5fe 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -1157,18 +1157,18 @@ print_circular_bug_header(struct lock_list *entry, unsigned int depth,
1157 if (debug_locks_silent) 1157 if (debug_locks_silent)
1158 return 0; 1158 return 0;
1159 1159
1160 printk("\n"); 1160 pr_warn("\n");
1161 pr_warn("======================================================\n"); 1161 pr_warn("======================================================\n");
1162 pr_warn("WARNING: possible circular locking dependency detected\n"); 1162 pr_warn("WARNING: possible circular locking dependency detected\n");
1163 print_kernel_ident(); 1163 print_kernel_ident();
1164 pr_warn("------------------------------------------------------\n"); 1164 pr_warn("------------------------------------------------------\n");
1165 printk("%s/%d is trying to acquire lock:\n", 1165 pr_warn("%s/%d is trying to acquire lock:\n",
1166 curr->comm, task_pid_nr(curr)); 1166 curr->comm, task_pid_nr(curr));
1167 print_lock(check_src); 1167 print_lock(check_src);
1168 printk("\nbut task is already holding lock:\n"); 1168 pr_warn("\nbut task is already holding lock:\n");
1169 print_lock(check_tgt); 1169 print_lock(check_tgt);
1170 printk("\nwhich lock already depends on the new lock.\n\n"); 1170 pr_warn("\nwhich lock already depends on the new lock.\n\n");
1171 printk("\nthe existing dependency chain (in reverse order) is:\n"); 1171 pr_warn("\nthe existing dependency chain (in reverse order) is:\n");
1172 1172
1173 print_circular_bug_entry(entry, depth); 1173 print_circular_bug_entry(entry, depth);
1174 1174
@@ -1495,13 +1495,13 @@ print_bad_irq_dependency(struct task_struct *curr,
1495 if (!debug_locks_off_graph_unlock() || debug_locks_silent) 1495 if (!debug_locks_off_graph_unlock() || debug_locks_silent)
1496 return 0; 1496 return 0;
1497 1497
1498 printk("\n"); 1498 pr_warn("\n");
1499 pr_warn("=====================================================\n"); 1499 pr_warn("=====================================================\n");
1500 pr_warn("WARNING: %s-safe -> %s-unsafe lock order detected\n", 1500 pr_warn("WARNING: %s-safe -> %s-unsafe lock order detected\n",
1501 irqclass, irqclass); 1501 irqclass, irqclass);
1502 print_kernel_ident(); 1502 print_kernel_ident();
1503 pr_warn("-----------------------------------------------------\n"); 1503 pr_warn("-----------------------------------------------------\n");
1504 printk("%s/%d [HC%u[%lu]:SC%u[%lu]:HE%u:SE%u] is trying to acquire:\n", 1504 pr_warn("%s/%d [HC%u[%lu]:SC%u[%lu]:HE%u:SE%u] is trying to acquire:\n",
1505 curr->comm, task_pid_nr(curr), 1505 curr->comm, task_pid_nr(curr),
1506 curr->hardirq_context, hardirq_count() >> HARDIRQ_SHIFT, 1506 curr->hardirq_context, hardirq_count() >> HARDIRQ_SHIFT,
1507 curr->softirq_context, softirq_count() >> SOFTIRQ_SHIFT, 1507 curr->softirq_context, softirq_count() >> SOFTIRQ_SHIFT,
@@ -1509,46 +1509,46 @@ print_bad_irq_dependency(struct task_struct *curr,
1509 curr->softirqs_enabled); 1509 curr->softirqs_enabled);
1510 print_lock(next); 1510 print_lock(next);
1511 1511
1512 printk("\nand this task is already holding:\n"); 1512 pr_warn("\nand this task is already holding:\n");
1513 print_lock(prev); 1513 print_lock(prev);
1514 printk("which would create a new lock dependency:\n"); 1514 pr_warn("which would create a new lock dependency:\n");
1515 print_lock_name(hlock_class(prev)); 1515 print_lock_name(hlock_class(prev));
1516 printk(KERN_CONT " ->"); 1516 pr_cont(" ->");
1517 print_lock_name(hlock_class(next)); 1517 print_lock_name(hlock_class(next));
1518 printk(KERN_CONT "\n"); 1518 pr_cont("\n");
1519 1519
1520 printk("\nbut this new dependency connects a %s-irq-safe lock:\n", 1520 pr_warn("\nbut this new dependency connects a %s-irq-safe lock:\n",
1521 irqclass); 1521 irqclass);
1522 print_lock_name(backwards_entry->class); 1522 print_lock_name(backwards_entry->class);
1523 printk("\n... which became %s-irq-safe at:\n", irqclass); 1523 pr_warn("\n... which became %s-irq-safe at:\n", irqclass);
1524 1524
1525 print_stack_trace(backwards_entry->class->usage_traces + bit1, 1); 1525 print_stack_trace(backwards_entry->class->usage_traces + bit1, 1);
1526 1526
1527 printk("\nto a %s-irq-unsafe lock:\n", irqclass); 1527 pr_warn("\nto a %s-irq-unsafe lock:\n", irqclass);
1528 print_lock_name(forwards_entry->class); 1528 print_lock_name(forwards_entry->class);
1529 printk("\n... which became %s-irq-unsafe at:\n", irqclass); 1529 pr_warn("\n... which became %s-irq-unsafe at:\n", irqclass);
1530 printk("..."); 1530 pr_warn("...");
1531 1531
1532 print_stack_trace(forwards_entry->class->usage_traces + bit2, 1); 1532 print_stack_trace(forwards_entry->class->usage_traces + bit2, 1);
1533 1533
1534 printk("\nother info that might help us debug this:\n\n"); 1534 pr_warn("\nother info that might help us debug this:\n\n");
1535 print_irq_lock_scenario(backwards_entry, forwards_entry, 1535 print_irq_lock_scenario(backwards_entry, forwards_entry,
1536 hlock_class(prev), hlock_class(next)); 1536 hlock_class(prev), hlock_class(next));
1537 1537
1538 lockdep_print_held_locks(curr); 1538 lockdep_print_held_locks(curr);
1539 1539
1540 printk("\nthe dependencies between %s-irq-safe lock and the holding lock:\n", irqclass); 1540 pr_warn("\nthe dependencies between %s-irq-safe lock and the holding lock:\n", irqclass);
1541 if (!save_trace(&prev_root->trace)) 1541 if (!save_trace(&prev_root->trace))
1542 return 0; 1542 return 0;
1543 print_shortest_lock_dependencies(backwards_entry, prev_root); 1543 print_shortest_lock_dependencies(backwards_entry, prev_root);
1544 1544
1545 printk("\nthe dependencies between the lock to be acquired"); 1545 pr_warn("\nthe dependencies between the lock to be acquired");
1546 printk(" and %s-irq-unsafe lock:\n", irqclass); 1546 pr_warn(" and %s-irq-unsafe lock:\n", irqclass);
1547 if (!save_trace(&next_root->trace)) 1547 if (!save_trace(&next_root->trace))
1548 return 0; 1548 return 0;
1549 print_shortest_lock_dependencies(forwards_entry, next_root); 1549 print_shortest_lock_dependencies(forwards_entry, next_root);
1550 1550
1551 printk("\nstack backtrace:\n"); 1551 pr_warn("\nstack backtrace:\n");
1552 dump_stack(); 1552 dump_stack();
1553 1553
1554 return 0; 1554 return 0;
@@ -1724,22 +1724,22 @@ print_deadlock_bug(struct task_struct *curr, struct held_lock *prev,
1724 if (!debug_locks_off_graph_unlock() || debug_locks_silent) 1724 if (!debug_locks_off_graph_unlock() || debug_locks_silent)
1725 return 0; 1725 return 0;
1726 1726
1727 printk("\n"); 1727 pr_warn("\n");
1728 pr_warn("============================================\n"); 1728 pr_warn("============================================\n");
1729 pr_warn("WARNING: possible recursive locking detected\n"); 1729 pr_warn("WARNING: possible recursive locking detected\n");
1730 print_kernel_ident(); 1730 print_kernel_ident();
1731 pr_warn("--------------------------------------------\n"); 1731 pr_warn("--------------------------------------------\n");
1732 printk("%s/%d is trying to acquire lock:\n", 1732 pr_warn("%s/%d is trying to acquire lock:\n",
1733 curr->comm, task_pid_nr(curr)); 1733 curr->comm, task_pid_nr(curr));
1734 print_lock(next); 1734 print_lock(next);
1735 printk("\nbut task is already holding lock:\n"); 1735 pr_warn("\nbut task is already holding lock:\n");
1736 print_lock(prev); 1736 print_lock(prev);
1737 1737
1738 printk("\nother info that might help us debug this:\n"); 1738 pr_warn("\nother info that might help us debug this:\n");
1739 print_deadlock_scenario(next, prev); 1739 print_deadlock_scenario(next, prev);
1740 lockdep_print_held_locks(curr); 1740 lockdep_print_held_locks(curr);
1741 1741
1742 printk("\nstack backtrace:\n"); 1742 pr_warn("\nstack backtrace:\n");
1743 dump_stack(); 1743 dump_stack();
1744 1744
1745 return 0; 1745 return 0;
@@ -2074,21 +2074,21 @@ static void print_collision(struct task_struct *curr,
2074 struct held_lock *hlock_next, 2074 struct held_lock *hlock_next,
2075 struct lock_chain *chain) 2075 struct lock_chain *chain)
2076{ 2076{
2077 printk("\n"); 2077 pr_warn("\n");
2078 pr_warn("============================\n"); 2078 pr_warn("============================\n");
2079 pr_warn("WARNING: chain_key collision\n"); 2079 pr_warn("WARNING: chain_key collision\n");
2080 print_kernel_ident(); 2080 print_kernel_ident();
2081 pr_warn("----------------------------\n"); 2081 pr_warn("----------------------------\n");
2082 printk("%s/%d: ", current->comm, task_pid_nr(current)); 2082 pr_warn("%s/%d: ", current->comm, task_pid_nr(current));
2083 printk("Hash chain already cached but the contents don't match!\n"); 2083 pr_warn("Hash chain already cached but the contents don't match!\n");
2084 2084
2085 printk("Held locks:"); 2085 pr_warn("Held locks:");
2086 print_chain_keys_held_locks(curr, hlock_next); 2086 print_chain_keys_held_locks(curr, hlock_next);
2087 2087
2088 printk("Locks in cached chain:"); 2088 pr_warn("Locks in cached chain:");
2089 print_chain_keys_chain(chain); 2089 print_chain_keys_chain(chain);
2090 2090
2091 printk("\nstack backtrace:\n"); 2091 pr_warn("\nstack backtrace:\n");
2092 dump_stack(); 2092 dump_stack();
2093} 2093}
2094#endif 2094#endif
@@ -2373,16 +2373,16 @@ print_usage_bug(struct task_struct *curr, struct held_lock *this,
2373 if (!debug_locks_off_graph_unlock() || debug_locks_silent) 2373 if (!debug_locks_off_graph_unlock() || debug_locks_silent)
2374 return 0; 2374 return 0;
2375 2375
2376 printk("\n"); 2376 pr_warn("\n");
2377 pr_warn("================================\n"); 2377 pr_warn("================================\n");
2378 pr_warn("WARNING: inconsistent lock state\n"); 2378 pr_warn("WARNING: inconsistent lock state\n");
2379 print_kernel_ident(); 2379 print_kernel_ident();
2380 pr_warn("--------------------------------\n"); 2380 pr_warn("--------------------------------\n");
2381 2381
2382 printk("inconsistent {%s} -> {%s} usage.\n", 2382 pr_warn("inconsistent {%s} -> {%s} usage.\n",
2383 usage_str[prev_bit], usage_str[new_bit]); 2383 usage_str[prev_bit], usage_str[new_bit]);
2384 2384
2385 printk("%s/%d [HC%u[%lu]:SC%u[%lu]:HE%u:SE%u] takes:\n", 2385 pr_warn("%s/%d [HC%u[%lu]:SC%u[%lu]:HE%u:SE%u] takes:\n",
2386 curr->comm, task_pid_nr(curr), 2386 curr->comm, task_pid_nr(curr),
2387 trace_hardirq_context(curr), hardirq_count() >> HARDIRQ_SHIFT, 2387 trace_hardirq_context(curr), hardirq_count() >> HARDIRQ_SHIFT,
2388 trace_softirq_context(curr), softirq_count() >> SOFTIRQ_SHIFT, 2388 trace_softirq_context(curr), softirq_count() >> SOFTIRQ_SHIFT,
@@ -2390,16 +2390,16 @@ print_usage_bug(struct task_struct *curr, struct held_lock *this,
2390 trace_softirqs_enabled(curr)); 2390 trace_softirqs_enabled(curr));
2391 print_lock(this); 2391 print_lock(this);
2392 2392
2393 printk("{%s} state was registered at:\n", usage_str[prev_bit]); 2393 pr_warn("{%s} state was registered at:\n", usage_str[prev_bit]);
2394 print_stack_trace(hlock_class(this)->usage_traces + prev_bit, 1); 2394 print_stack_trace(hlock_class(this)->usage_traces + prev_bit, 1);
2395 2395
2396 print_irqtrace_events(curr); 2396 print_irqtrace_events(curr);
2397 printk("\nother info that might help us debug this:\n"); 2397 pr_warn("\nother info that might help us debug this:\n");
2398 print_usage_bug_scenario(this); 2398 print_usage_bug_scenario(this);
2399 2399
2400 lockdep_print_held_locks(curr); 2400 lockdep_print_held_locks(curr);
2401 2401
2402 printk("\nstack backtrace:\n"); 2402 pr_warn("\nstack backtrace:\n");
2403 dump_stack(); 2403 dump_stack();
2404 2404
2405 return 0; 2405 return 0;
@@ -2438,28 +2438,28 @@ print_irq_inversion_bug(struct task_struct *curr,
2438 if (!debug_locks_off_graph_unlock() || debug_locks_silent) 2438 if (!debug_locks_off_graph_unlock() || debug_locks_silent)
2439 return 0; 2439 return 0;
2440 2440
2441 printk("\n"); 2441 pr_warn("\n");
2442 pr_warn("========================================================\n"); 2442 pr_warn("========================================================\n");
2443 pr_warn("WARNING: possible irq lock inversion dependency detected\n"); 2443 pr_warn("WARNING: possible irq lock inversion dependency detected\n");
2444 print_kernel_ident(); 2444 print_kernel_ident();
2445 pr_warn("--------------------------------------------------------\n"); 2445 pr_warn("--------------------------------------------------------\n");
2446 printk("%s/%d just changed the state of lock:\n", 2446 pr_warn("%s/%d just changed the state of lock:\n",
2447 curr->comm, task_pid_nr(curr)); 2447 curr->comm, task_pid_nr(curr));
2448 print_lock(this); 2448 print_lock(this);
2449 if (forwards) 2449 if (forwards)
2450 printk("but this lock took another, %s-unsafe lock in the past:\n", irqclass); 2450 pr_warn("but this lock took another, %s-unsafe lock in the past:\n", irqclass);
2451 else 2451 else
2452 printk("but this lock was taken by another, %s-safe lock in the past:\n", irqclass); 2452 pr_warn("but this lock was taken by another, %s-safe lock in the past:\n", irqclass);
2453 print_lock_name(other->class); 2453 print_lock_name(other->class);
2454 printk("\n\nand interrupts could create inverse lock ordering between them.\n\n"); 2454 pr_warn("\n\nand interrupts could create inverse lock ordering between them.\n\n");
2455 2455
2456 printk("\nother info that might help us debug this:\n"); 2456 pr_warn("\nother info that might help us debug this:\n");
2457 2457
2458 /* Find a middle lock (if one exists) */ 2458 /* Find a middle lock (if one exists) */
2459 depth = get_lock_depth(other); 2459 depth = get_lock_depth(other);
2460 do { 2460 do {
2461 if (depth == 0 && (entry != root)) { 2461 if (depth == 0 && (entry != root)) {
2462 printk("lockdep:%s bad path found in chain graph\n", __func__); 2462 pr_warn("lockdep:%s bad path found in chain graph\n", __func__);
2463 break; 2463 break;
2464 } 2464 }
2465 middle = entry; 2465 middle = entry;
@@ -2475,12 +2475,12 @@ print_irq_inversion_bug(struct task_struct *curr,
2475 2475
2476 lockdep_print_held_locks(curr); 2476 lockdep_print_held_locks(curr);
2477 2477
2478 printk("\nthe shortest dependencies between 2nd lock and 1st lock:\n"); 2478 pr_warn("\nthe shortest dependencies between 2nd lock and 1st lock:\n");
2479 if (!save_trace(&root->trace)) 2479 if (!save_trace(&root->trace))
2480 return 0; 2480 return 0;
2481 print_shortest_lock_dependencies(other, root); 2481 print_shortest_lock_dependencies(other, root);
2482 2482
2483 printk("\nstack backtrace:\n"); 2483 pr_warn("\nstack backtrace:\n");
2484 dump_stack(); 2484 dump_stack();
2485 2485
2486 return 0; 2486 return 0;
@@ -3189,25 +3189,25 @@ print_lock_nested_lock_not_held(struct task_struct *curr,
3189 if (debug_locks_silent) 3189 if (debug_locks_silent)
3190 return 0; 3190 return 0;
3191 3191
3192 printk("\n"); 3192 pr_warn("\n");
3193 pr_warn("==================================\n"); 3193 pr_warn("==================================\n");
3194 pr_warn("WARNING: Nested lock was not taken\n"); 3194 pr_warn("WARNING: Nested lock was not taken\n");
3195 print_kernel_ident(); 3195 print_kernel_ident();
3196 pr_warn("----------------------------------\n"); 3196 pr_warn("----------------------------------\n");
3197 3197
3198 printk("%s/%d is trying to lock:\n", curr->comm, task_pid_nr(curr)); 3198 pr_warn("%s/%d is trying to lock:\n", curr->comm, task_pid_nr(curr));
3199 print_lock(hlock); 3199 print_lock(hlock);
3200 3200
3201 printk("\nbut this task is not holding:\n"); 3201 pr_warn("\nbut this task is not holding:\n");
3202 printk("%s\n", hlock->nest_lock->name); 3202 pr_warn("%s\n", hlock->nest_lock->name);
3203 3203
3204 printk("\nstack backtrace:\n"); 3204 pr_warn("\nstack backtrace:\n");
3205 dump_stack(); 3205 dump_stack();
3206 3206
3207 printk("\nother info that might help us debug this:\n"); 3207 pr_warn("\nother info that might help us debug this:\n");
3208 lockdep_print_held_locks(curr); 3208 lockdep_print_held_locks(curr);
3209 3209
3210 printk("\nstack backtrace:\n"); 3210 pr_warn("\nstack backtrace:\n");
3211 dump_stack(); 3211 dump_stack();
3212 3212
3213 return 0; 3213 return 0;
@@ -3402,21 +3402,21 @@ print_unlock_imbalance_bug(struct task_struct *curr, struct lockdep_map *lock,
3402 if (debug_locks_silent) 3402 if (debug_locks_silent)
3403 return 0; 3403 return 0;
3404 3404
3405 printk("\n"); 3405 pr_warn("\n");
3406 pr_warn("=====================================\n"); 3406 pr_warn("=====================================\n");
3407 pr_warn("WARNING: bad unlock balance detected!\n"); 3407 pr_warn("WARNING: bad unlock balance detected!\n");
3408 print_kernel_ident(); 3408 print_kernel_ident();
3409 pr_warn("-------------------------------------\n"); 3409 pr_warn("-------------------------------------\n");
3410 printk("%s/%d is trying to release lock (", 3410 pr_warn("%s/%d is trying to release lock (",
3411 curr->comm, task_pid_nr(curr)); 3411 curr->comm, task_pid_nr(curr));
3412 print_lockdep_cache(lock); 3412 print_lockdep_cache(lock);
3413 printk(KERN_CONT ") at:\n"); 3413 pr_cont(") at:\n");
3414 print_ip_sym(ip); 3414 print_ip_sym(ip);
3415 printk("but there are no more locks to release!\n"); 3415 pr_warn("but there are no more locks to release!\n");
3416 printk("\nother info that might help us debug this:\n"); 3416 pr_warn("\nother info that might help us debug this:\n");
3417 lockdep_print_held_locks(curr); 3417 lockdep_print_held_locks(curr);
3418 3418
3419 printk("\nstack backtrace:\n"); 3419 pr_warn("\nstack backtrace:\n");
3420 dump_stack(); 3420 dump_stack();
3421 3421
3422 return 0; 3422 return 0;
@@ -3974,21 +3974,21 @@ print_lock_contention_bug(struct task_struct *curr, struct lockdep_map *lock,
3974 if (debug_locks_silent) 3974 if (debug_locks_silent)
3975 return 0; 3975 return 0;
3976 3976
3977 printk("\n"); 3977 pr_warn("\n");
3978 pr_warn("=================================\n"); 3978 pr_warn("=================================\n");
3979 pr_warn("WARNING: bad contention detected!\n"); 3979 pr_warn("WARNING: bad contention detected!\n");
3980 print_kernel_ident(); 3980 print_kernel_ident();
3981 pr_warn("---------------------------------\n"); 3981 pr_warn("---------------------------------\n");
3982 printk("%s/%d is trying to contend lock (", 3982 pr_warn("%s/%d is trying to contend lock (",
3983 curr->comm, task_pid_nr(curr)); 3983 curr->comm, task_pid_nr(curr));
3984 print_lockdep_cache(lock); 3984 print_lockdep_cache(lock);
3985 printk(KERN_CONT ") at:\n"); 3985 pr_cont(") at:\n");
3986 print_ip_sym(ip); 3986 print_ip_sym(ip);
3987 printk("but there are no locks held!\n"); 3987 pr_warn("but there are no locks held!\n");
3988 printk("\nother info that might help us debug this:\n"); 3988 pr_warn("\nother info that might help us debug this:\n");
3989 lockdep_print_held_locks(curr); 3989 lockdep_print_held_locks(curr);
3990 3990
3991 printk("\nstack backtrace:\n"); 3991 pr_warn("\nstack backtrace:\n");
3992 dump_stack(); 3992 dump_stack();
3993 3993
3994 return 0; 3994 return 0;
@@ -4318,17 +4318,17 @@ print_freed_lock_bug(struct task_struct *curr, const void *mem_from,
4318 if (debug_locks_silent) 4318 if (debug_locks_silent)
4319 return; 4319 return;
4320 4320
4321 printk("\n"); 4321 pr_warn("\n");
4322 pr_warn("=========================\n"); 4322 pr_warn("=========================\n");
4323 pr_warn("WARNING: held lock freed!\n"); 4323 pr_warn("WARNING: held lock freed!\n");
4324 print_kernel_ident(); 4324 print_kernel_ident();
4325 pr_warn("-------------------------\n"); 4325 pr_warn("-------------------------\n");
4326 printk("%s/%d is freeing memory %p-%p, with a lock still held there!\n", 4326 pr_warn("%s/%d is freeing memory %p-%p, with a lock still held there!\n",
4327 curr->comm, task_pid_nr(curr), mem_from, mem_to-1); 4327 curr->comm, task_pid_nr(curr), mem_from, mem_to-1);
4328 print_lock(hlock); 4328 print_lock(hlock);
4329 lockdep_print_held_locks(curr); 4329 lockdep_print_held_locks(curr);
4330 4330
4331 printk("\nstack backtrace:\n"); 4331 pr_warn("\nstack backtrace:\n");
4332 dump_stack(); 4332 dump_stack();
4333} 4333}
4334 4334
@@ -4376,14 +4376,14 @@ static void print_held_locks_bug(void)
4376 if (debug_locks_silent) 4376 if (debug_locks_silent)
4377 return; 4377 return;
4378 4378
4379 printk("\n"); 4379 pr_warn("\n");
4380 pr_warn("====================================\n"); 4380 pr_warn("====================================\n");
4381 pr_warn("WARNING: %s/%d still has locks held!\n", 4381 pr_warn("WARNING: %s/%d still has locks held!\n",
4382 current->comm, task_pid_nr(current)); 4382 current->comm, task_pid_nr(current));
4383 print_kernel_ident(); 4383 print_kernel_ident();
4384 pr_warn("------------------------------------\n"); 4384 pr_warn("------------------------------------\n");
4385 lockdep_print_held_locks(current); 4385 lockdep_print_held_locks(current);
4386 printk("\nstack backtrace:\n"); 4386 pr_warn("\nstack backtrace:\n");
4387 dump_stack(); 4387 dump_stack();
4388} 4388}
4389 4389
@@ -4402,10 +4402,10 @@ void debug_show_all_locks(void)
4402 int unlock = 1; 4402 int unlock = 1;
4403 4403
4404 if (unlikely(!debug_locks)) { 4404 if (unlikely(!debug_locks)) {
4405 printk("INFO: lockdep is turned off.\n"); 4405 pr_warn("INFO: lockdep is turned off.\n");
4406 return; 4406 return;
4407 } 4407 }
4408 printk("\nShowing all locks held in the system:\n"); 4408 pr_warn("\nShowing all locks held in the system:\n");
4409 4409
4410 /* 4410 /*
4411 * Here we try to get the tasklist_lock as hard as possible, 4411 * Here we try to get the tasklist_lock as hard as possible,
@@ -4416,18 +4416,18 @@ void debug_show_all_locks(void)
4416retry: 4416retry:
4417 if (!read_trylock(&tasklist_lock)) { 4417 if (!read_trylock(&tasklist_lock)) {
4418 if (count == 10) 4418 if (count == 10)
4419 printk("hm, tasklist_lock locked, retrying... "); 4419 pr_warn("hm, tasklist_lock locked, retrying... ");
4420 if (count) { 4420 if (count) {
4421 count--; 4421 count--;
4422 printk(" #%d", 10-count); 4422 pr_cont(" #%d", 10-count);
4423 mdelay(200); 4423 mdelay(200);
4424 goto retry; 4424 goto retry;
4425 } 4425 }
4426 printk(" ignoring it.\n"); 4426 pr_cont(" ignoring it.\n");
4427 unlock = 0; 4427 unlock = 0;
4428 } else { 4428 } else {
4429 if (count != 10) 4429 if (count != 10)
4430 printk(KERN_CONT " locked it.\n"); 4430 pr_cont(" locked it.\n");
4431 } 4431 }
4432 4432
4433 do_each_thread(g, p) { 4433 do_each_thread(g, p) {
@@ -4445,7 +4445,7 @@ retry:
4445 unlock = 1; 4445 unlock = 1;
4446 } while_each_thread(g, p); 4446 } while_each_thread(g, p);
4447 4447
4448 printk("\n"); 4448 pr_warn("\n");
4449 pr_warn("=============================================\n\n"); 4449 pr_warn("=============================================\n\n");
4450 4450
4451 if (unlock) 4451 if (unlock)
@@ -4475,12 +4475,12 @@ asmlinkage __visible void lockdep_sys_exit(void)
4475 if (unlikely(curr->lockdep_depth)) { 4475 if (unlikely(curr->lockdep_depth)) {
4476 if (!debug_locks_off()) 4476 if (!debug_locks_off())
4477 return; 4477 return;
4478 printk("\n"); 4478 pr_warn("\n");
4479 pr_warn("================================================\n"); 4479 pr_warn("================================================\n");
4480 pr_warn("WARNING: lock held when returning to user space!\n"); 4480 pr_warn("WARNING: lock held when returning to user space!\n");
4481 print_kernel_ident(); 4481 print_kernel_ident();
4482 pr_warn("------------------------------------------------\n"); 4482 pr_warn("------------------------------------------------\n");
4483 printk("%s/%d is leaving the kernel with locks still held!\n", 4483 pr_warn("%s/%d is leaving the kernel with locks still held!\n",
4484 curr->comm, curr->pid); 4484 curr->comm, curr->pid);
4485 lockdep_print_held_locks(curr); 4485 lockdep_print_held_locks(curr);
4486 } 4486 }
@@ -4490,19 +4490,15 @@ void lockdep_rcu_suspicious(const char *file, const int line, const char *s)
4490{ 4490{
4491 struct task_struct *curr = current; 4491 struct task_struct *curr = current;
4492 4492
4493#ifndef CONFIG_PROVE_RCU_REPEATEDLY
4494 if (!debug_locks_off())
4495 return;
4496#endif /* #ifdef CONFIG_PROVE_RCU_REPEATEDLY */
4497 /* Note: the following can be executed concurrently, so be careful. */ 4493 /* Note: the following can be executed concurrently, so be careful. */
4498 printk("\n"); 4494 pr_warn("\n");
4499 pr_warn("=============================\n"); 4495 pr_warn("=============================\n");
4500 pr_warn("WARNING: suspicious RCU usage\n"); 4496 pr_warn("WARNING: suspicious RCU usage\n");
4501 print_kernel_ident(); 4497 print_kernel_ident();
4502 pr_warn("-----------------------------\n"); 4498 pr_warn("-----------------------------\n");
4503 printk("%s:%d %s!\n", file, line, s); 4499 pr_warn("%s:%d %s!\n", file, line, s);
4504 printk("\nother info that might help us debug this:\n\n"); 4500 pr_warn("\nother info that might help us debug this:\n\n");
4505 printk("\n%srcu_scheduler_active = %d, debug_locks = %d\n", 4501 pr_warn("\n%srcu_scheduler_active = %d, debug_locks = %d\n",
4506 !rcu_lockdep_current_cpu_online() 4502 !rcu_lockdep_current_cpu_online()
4507 ? "RCU used illegally from offline CPU!\n" 4503 ? "RCU used illegally from offline CPU!\n"
4508 : !rcu_is_watching() 4504 : !rcu_is_watching()
@@ -4529,10 +4525,10 @@ void lockdep_rcu_suspicious(const char *file, const int line, const char *s)
4529 * rcu_read_lock_bh() and so on from extended quiescent states. 4525 * rcu_read_lock_bh() and so on from extended quiescent states.
4530 */ 4526 */
4531 if (!rcu_is_watching()) 4527 if (!rcu_is_watching())
4532 printk("RCU used illegally from extended quiescent state!\n"); 4528 pr_warn("RCU used illegally from extended quiescent state!\n");
4533 4529
4534 lockdep_print_held_locks(curr); 4530 lockdep_print_held_locks(curr);
4535 printk("\nstack backtrace:\n"); 4531 pr_warn("\nstack backtrace:\n");
4536 dump_stack(); 4532 dump_stack();
4537} 4533}
4538EXPORT_SYMBOL_GPL(lockdep_rcu_suspicious); 4534EXPORT_SYMBOL_GPL(lockdep_rcu_suspicious);
diff --git a/kernel/locking/mutex.c b/kernel/locking/mutex.c
index 198527a62149..858a07590e39 100644
--- a/kernel/locking/mutex.c
+++ b/kernel/locking/mutex.c
@@ -227,9 +227,9 @@ static void __sched __mutex_lock_slowpath(struct mutex *lock);
227 * (or statically defined) before it can be locked. memset()-ing 227 * (or statically defined) before it can be locked. memset()-ing
228 * the mutex to 0 is not allowed. 228 * the mutex to 0 is not allowed.
229 * 229 *
230 * ( The CONFIG_DEBUG_MUTEXES .config option turns on debugging 230 * (The CONFIG_DEBUG_MUTEXES .config option turns on debugging
231 * checks that will enforce the restrictions and will also do 231 * checks that will enforce the restrictions and will also do
232 * deadlock debugging. ) 232 * deadlock debugging)
233 * 233 *
234 * This function is similar to (but not equivalent to) down(). 234 * This function is similar to (but not equivalent to) down().
235 */ 235 */
diff --git a/kernel/locking/qrwlock.c b/kernel/locking/qrwlock.c
index cc3ed0ccdfa2..2655f26ec882 100644
--- a/kernel/locking/qrwlock.c
+++ b/kernel/locking/qrwlock.c
@@ -20,6 +20,7 @@
20#include <linux/cpumask.h> 20#include <linux/cpumask.h>
21#include <linux/percpu.h> 21#include <linux/percpu.h>
22#include <linux/hardirq.h> 22#include <linux/hardirq.h>
23#include <linux/spinlock.h>
23#include <asm/qrwlock.h> 24#include <asm/qrwlock.h>
24 25
25/* 26/*
diff --git a/kernel/locking/qspinlock.c b/kernel/locking/qspinlock.c
index b2caec7315af..fd24153e8a48 100644
--- a/kernel/locking/qspinlock.c
+++ b/kernel/locking/qspinlock.c
@@ -28,6 +28,7 @@
28#include <linux/percpu.h> 28#include <linux/percpu.h>
29#include <linux/hardirq.h> 29#include <linux/hardirq.h>
30#include <linux/mutex.h> 30#include <linux/mutex.h>
31#include <linux/prefetch.h>
31#include <asm/byteorder.h> 32#include <asm/byteorder.h>
32#include <asm/qspinlock.h> 33#include <asm/qspinlock.h>
33 34
diff --git a/kernel/locking/qspinlock_paravirt.h b/kernel/locking/qspinlock_paravirt.h
index e6b2f7ad3e51..4ccfcaae5b89 100644
--- a/kernel/locking/qspinlock_paravirt.h
+++ b/kernel/locking/qspinlock_paravirt.h
@@ -193,7 +193,8 @@ void __init __pv_init_lock_hash(void)
193 */ 193 */
194 pv_lock_hash = alloc_large_system_hash("PV qspinlock", 194 pv_lock_hash = alloc_large_system_hash("PV qspinlock",
195 sizeof(struct pv_hash_entry), 195 sizeof(struct pv_hash_entry),
196 pv_hash_size, 0, HASH_EARLY, 196 pv_hash_size, 0,
197 HASH_EARLY | HASH_ZERO,
197 &pv_lock_hash_bits, NULL, 198 &pv_lock_hash_bits, NULL,
198 pv_hash_size, pv_hash_size); 199 pv_hash_size, pv_hash_size);
199} 200}
diff --git a/kernel/locking/rtmutex-debug.c b/kernel/locking/rtmutex-debug.c
index 58e366ad36f4..ac35e648b0e5 100644
--- a/kernel/locking/rtmutex-debug.c
+++ b/kernel/locking/rtmutex-debug.c
@@ -166,12 +166,16 @@ void debug_rt_mutex_free_waiter(struct rt_mutex_waiter *waiter)
166 memset(waiter, 0x22, sizeof(*waiter)); 166 memset(waiter, 0x22, sizeof(*waiter));
167} 167}
168 168
169void debug_rt_mutex_init(struct rt_mutex *lock, const char *name) 169void debug_rt_mutex_init(struct rt_mutex *lock, const char *name, struct lock_class_key *key)
170{ 170{
171 /* 171 /*
172 * Make sure we are not reinitializing a held lock: 172 * Make sure we are not reinitializing a held lock:
173 */ 173 */
174 debug_check_no_locks_freed((void *)lock, sizeof(*lock)); 174 debug_check_no_locks_freed((void *)lock, sizeof(*lock));
175 lock->name = name; 175 lock->name = name;
176
177#ifdef CONFIG_DEBUG_LOCK_ALLOC
178 lockdep_init_map(&lock->dep_map, name, key, 0);
179#endif
176} 180}
177 181
diff --git a/kernel/locking/rtmutex-debug.h b/kernel/locking/rtmutex-debug.h
index b585af9a1b50..5078c6ddf4a5 100644
--- a/kernel/locking/rtmutex-debug.h
+++ b/kernel/locking/rtmutex-debug.h
@@ -11,7 +11,7 @@
11 11
12extern void debug_rt_mutex_init_waiter(struct rt_mutex_waiter *waiter); 12extern void debug_rt_mutex_init_waiter(struct rt_mutex_waiter *waiter);
13extern void debug_rt_mutex_free_waiter(struct rt_mutex_waiter *waiter); 13extern void debug_rt_mutex_free_waiter(struct rt_mutex_waiter *waiter);
14extern void debug_rt_mutex_init(struct rt_mutex *lock, const char *name); 14extern void debug_rt_mutex_init(struct rt_mutex *lock, const char *name, struct lock_class_key *key);
15extern void debug_rt_mutex_lock(struct rt_mutex *lock); 15extern void debug_rt_mutex_lock(struct rt_mutex *lock);
16extern void debug_rt_mutex_unlock(struct rt_mutex *lock); 16extern void debug_rt_mutex_unlock(struct rt_mutex *lock);
17extern void debug_rt_mutex_proxy_lock(struct rt_mutex *lock, 17extern void debug_rt_mutex_proxy_lock(struct rt_mutex *lock,
diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c
index b95509416909..649dc9d3951a 100644
--- a/kernel/locking/rtmutex.c
+++ b/kernel/locking/rtmutex.c
@@ -963,7 +963,6 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
963 return -EDEADLK; 963 return -EDEADLK;
964 964
965 raw_spin_lock(&task->pi_lock); 965 raw_spin_lock(&task->pi_lock);
966 rt_mutex_adjust_prio(task);
967 waiter->task = task; 966 waiter->task = task;
968 waiter->lock = lock; 967 waiter->lock = lock;
969 waiter->prio = task->prio; 968 waiter->prio = task->prio;
@@ -1481,6 +1480,7 @@ void __sched rt_mutex_lock(struct rt_mutex *lock)
1481{ 1480{
1482 might_sleep(); 1481 might_sleep();
1483 1482
1483 mutex_acquire(&lock->dep_map, 0, 0, _RET_IP_);
1484 rt_mutex_fastlock(lock, TASK_UNINTERRUPTIBLE, rt_mutex_slowlock); 1484 rt_mutex_fastlock(lock, TASK_UNINTERRUPTIBLE, rt_mutex_slowlock);
1485} 1485}
1486EXPORT_SYMBOL_GPL(rt_mutex_lock); 1486EXPORT_SYMBOL_GPL(rt_mutex_lock);
@@ -1496,9 +1496,16 @@ EXPORT_SYMBOL_GPL(rt_mutex_lock);
1496 */ 1496 */
1497int __sched rt_mutex_lock_interruptible(struct rt_mutex *lock) 1497int __sched rt_mutex_lock_interruptible(struct rt_mutex *lock)
1498{ 1498{
1499 int ret;
1500
1499 might_sleep(); 1501 might_sleep();
1500 1502
1501 return rt_mutex_fastlock(lock, TASK_INTERRUPTIBLE, rt_mutex_slowlock); 1503 mutex_acquire(&lock->dep_map, 0, 0, _RET_IP_);
1504 ret = rt_mutex_fastlock(lock, TASK_INTERRUPTIBLE, rt_mutex_slowlock);
1505 if (ret)
1506 mutex_release(&lock->dep_map, 1, _RET_IP_);
1507
1508 return ret;
1502} 1509}
1503EXPORT_SYMBOL_GPL(rt_mutex_lock_interruptible); 1510EXPORT_SYMBOL_GPL(rt_mutex_lock_interruptible);
1504 1511
@@ -1526,11 +1533,18 @@ int __sched rt_mutex_futex_trylock(struct rt_mutex *lock)
1526int 1533int
1527rt_mutex_timed_lock(struct rt_mutex *lock, struct hrtimer_sleeper *timeout) 1534rt_mutex_timed_lock(struct rt_mutex *lock, struct hrtimer_sleeper *timeout)
1528{ 1535{
1536 int ret;
1537
1529 might_sleep(); 1538 might_sleep();
1530 1539
1531 return rt_mutex_timed_fastlock(lock, TASK_INTERRUPTIBLE, timeout, 1540 mutex_acquire(&lock->dep_map, 0, 0, _RET_IP_);
1541 ret = rt_mutex_timed_fastlock(lock, TASK_INTERRUPTIBLE, timeout,
1532 RT_MUTEX_MIN_CHAINWALK, 1542 RT_MUTEX_MIN_CHAINWALK,
1533 rt_mutex_slowlock); 1543 rt_mutex_slowlock);
1544 if (ret)
1545 mutex_release(&lock->dep_map, 1, _RET_IP_);
1546
1547 return ret;
1534} 1548}
1535EXPORT_SYMBOL_GPL(rt_mutex_timed_lock); 1549EXPORT_SYMBOL_GPL(rt_mutex_timed_lock);
1536 1550
@@ -1547,10 +1561,16 @@ EXPORT_SYMBOL_GPL(rt_mutex_timed_lock);
1547 */ 1561 */
1548int __sched rt_mutex_trylock(struct rt_mutex *lock) 1562int __sched rt_mutex_trylock(struct rt_mutex *lock)
1549{ 1563{
1564 int ret;
1565
1550 if (WARN_ON_ONCE(in_irq() || in_nmi() || in_serving_softirq())) 1566 if (WARN_ON_ONCE(in_irq() || in_nmi() || in_serving_softirq()))
1551 return 0; 1567 return 0;
1552 1568
1553 return rt_mutex_fasttrylock(lock, rt_mutex_slowtrylock); 1569 ret = rt_mutex_fasttrylock(lock, rt_mutex_slowtrylock);
1570 if (ret)
1571 mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_);
1572
1573 return ret;
1554} 1574}
1555EXPORT_SYMBOL_GPL(rt_mutex_trylock); 1575EXPORT_SYMBOL_GPL(rt_mutex_trylock);
1556 1576
@@ -1561,6 +1581,7 @@ EXPORT_SYMBOL_GPL(rt_mutex_trylock);
1561 */ 1581 */
1562void __sched rt_mutex_unlock(struct rt_mutex *lock) 1582void __sched rt_mutex_unlock(struct rt_mutex *lock)
1563{ 1583{
1584 mutex_release(&lock->dep_map, 1, _RET_IP_);
1564 rt_mutex_fastunlock(lock, rt_mutex_slowunlock); 1585 rt_mutex_fastunlock(lock, rt_mutex_slowunlock);
1565} 1586}
1566EXPORT_SYMBOL_GPL(rt_mutex_unlock); 1587EXPORT_SYMBOL_GPL(rt_mutex_unlock);
@@ -1620,7 +1641,6 @@ void rt_mutex_destroy(struct rt_mutex *lock)
1620 lock->magic = NULL; 1641 lock->magic = NULL;
1621#endif 1642#endif
1622} 1643}
1623
1624EXPORT_SYMBOL_GPL(rt_mutex_destroy); 1644EXPORT_SYMBOL_GPL(rt_mutex_destroy);
1625 1645
1626/** 1646/**
@@ -1632,14 +1652,16 @@ EXPORT_SYMBOL_GPL(rt_mutex_destroy);
1632 * 1652 *
1633 * Initializing of a locked rt lock is not allowed 1653 * Initializing of a locked rt lock is not allowed
1634 */ 1654 */
1635void __rt_mutex_init(struct rt_mutex *lock, const char *name) 1655void __rt_mutex_init(struct rt_mutex *lock, const char *name,
1656 struct lock_class_key *key)
1636{ 1657{
1637 lock->owner = NULL; 1658 lock->owner = NULL;
1638 raw_spin_lock_init(&lock->wait_lock); 1659 raw_spin_lock_init(&lock->wait_lock);
1639 lock->waiters = RB_ROOT; 1660 lock->waiters = RB_ROOT;
1640 lock->waiters_leftmost = NULL; 1661 lock->waiters_leftmost = NULL;
1641 1662
1642 debug_rt_mutex_init(lock, name); 1663 if (name && key)
1664 debug_rt_mutex_init(lock, name, key);
1643} 1665}
1644EXPORT_SYMBOL_GPL(__rt_mutex_init); 1666EXPORT_SYMBOL_GPL(__rt_mutex_init);
1645 1667
@@ -1660,7 +1682,7 @@ EXPORT_SYMBOL_GPL(__rt_mutex_init);
1660void rt_mutex_init_proxy_locked(struct rt_mutex *lock, 1682void rt_mutex_init_proxy_locked(struct rt_mutex *lock,
1661 struct task_struct *proxy_owner) 1683 struct task_struct *proxy_owner)
1662{ 1684{
1663 __rt_mutex_init(lock, NULL); 1685 __rt_mutex_init(lock, NULL, NULL);
1664 debug_rt_mutex_proxy_lock(lock, proxy_owner); 1686 debug_rt_mutex_proxy_lock(lock, proxy_owner);
1665 rt_mutex_set_owner(lock, proxy_owner); 1687 rt_mutex_set_owner(lock, proxy_owner);
1666} 1688}
@@ -1785,12 +1807,14 @@ int rt_mutex_wait_proxy_lock(struct rt_mutex *lock,
1785 int ret; 1807 int ret;
1786 1808
1787 raw_spin_lock_irq(&lock->wait_lock); 1809 raw_spin_lock_irq(&lock->wait_lock);
1788
1789 set_current_state(TASK_INTERRUPTIBLE);
1790
1791 /* sleep on the mutex */ 1810 /* sleep on the mutex */
1811 set_current_state(TASK_INTERRUPTIBLE);
1792 ret = __rt_mutex_slowlock(lock, TASK_INTERRUPTIBLE, to, waiter); 1812 ret = __rt_mutex_slowlock(lock, TASK_INTERRUPTIBLE, to, waiter);
1793 1813 /*
1814 * try_to_take_rt_mutex() sets the waiter bit unconditionally. We might
1815 * have to fix that up.
1816 */
1817 fixup_rt_mutex_waiters(lock);
1794 raw_spin_unlock_irq(&lock->wait_lock); 1818 raw_spin_unlock_irq(&lock->wait_lock);
1795 1819
1796 return ret; 1820 return ret;
@@ -1822,15 +1846,25 @@ bool rt_mutex_cleanup_proxy_lock(struct rt_mutex *lock,
1822 1846
1823 raw_spin_lock_irq(&lock->wait_lock); 1847 raw_spin_lock_irq(&lock->wait_lock);
1824 /* 1848 /*
1849 * Do an unconditional try-lock, this deals with the lock stealing
1850 * state where __rt_mutex_futex_unlock() -> mark_wakeup_next_waiter()
1851 * sets a NULL owner.
1852 *
1853 * We're not interested in the return value, because the subsequent
1854 * test on rt_mutex_owner() will infer that. If the trylock succeeded,
1855 * we will own the lock and it will have removed the waiter. If we
1856 * failed the trylock, we're still not owner and we need to remove
1857 * ourselves.
1858 */
1859 try_to_take_rt_mutex(lock, current, waiter);
1860 /*
1825 * Unless we're the owner; we're still enqueued on the wait_list. 1861 * Unless we're the owner; we're still enqueued on the wait_list.
1826 * So check if we became owner, if not, take us off the wait_list. 1862 * So check if we became owner, if not, take us off the wait_list.
1827 */ 1863 */
1828 if (rt_mutex_owner(lock) != current) { 1864 if (rt_mutex_owner(lock) != current) {
1829 remove_waiter(lock, waiter); 1865 remove_waiter(lock, waiter);
1830 fixup_rt_mutex_waiters(lock);
1831 cleanup = true; 1866 cleanup = true;
1832 } 1867 }
1833
1834 /* 1868 /*
1835 * try_to_take_rt_mutex() sets the waiter bit unconditionally. We might 1869 * try_to_take_rt_mutex() sets the waiter bit unconditionally. We might
1836 * have to fix that up. 1870 * have to fix that up.
diff --git a/kernel/locking/rtmutex.h b/kernel/locking/rtmutex.h
index 6607802efa8b..5c253caffe91 100644
--- a/kernel/locking/rtmutex.h
+++ b/kernel/locking/rtmutex.h
@@ -17,7 +17,7 @@
17#define debug_rt_mutex_proxy_lock(l,p) do { } while (0) 17#define debug_rt_mutex_proxy_lock(l,p) do { } while (0)
18#define debug_rt_mutex_proxy_unlock(l) do { } while (0) 18#define debug_rt_mutex_proxy_unlock(l) do { } while (0)
19#define debug_rt_mutex_unlock(l) do { } while (0) 19#define debug_rt_mutex_unlock(l) do { } while (0)
20#define debug_rt_mutex_init(m, n) do { } while (0) 20#define debug_rt_mutex_init(m, n, k) do { } while (0)
21#define debug_rt_mutex_deadlock(d, a ,l) do { } while (0) 21#define debug_rt_mutex_deadlock(d, a ,l) do { } while (0)
22#define debug_rt_mutex_print_deadlock(w) do { } while (0) 22#define debug_rt_mutex_print_deadlock(w) do { } while (0)
23#define debug_rt_mutex_reset_waiter(w) do { } while (0) 23#define debug_rt_mutex_reset_waiter(w) do { } while (0)
diff --git a/kernel/locking/rwsem-spinlock.c b/kernel/locking/rwsem-spinlock.c
index c65f7989f850..20819df98125 100644
--- a/kernel/locking/rwsem-spinlock.c
+++ b/kernel/locking/rwsem-spinlock.c
@@ -231,8 +231,8 @@ int __sched __down_write_common(struct rw_semaphore *sem, int state)
231 231
232out_nolock: 232out_nolock:
233 list_del(&waiter.list); 233 list_del(&waiter.list);
234 if (!list_empty(&sem->wait_list)) 234 if (!list_empty(&sem->wait_list) && sem->count >= 0)
235 __rwsem_do_wake(sem, 1); 235 __rwsem_do_wake(sem, 0);
236 raw_spin_unlock_irqrestore(&sem->wait_lock, flags); 236 raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
237 237
238 return -EINTR; 238 return -EINTR;