diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2016-12-07 14:27:33 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2016-12-07 14:27:33 -0500 |
commit | 5b43f97f3f21c42ba738df2797930e32e05d5a25 (patch) | |
tree | 8d2b757f710c5bc19e6ddcecfc99f6b6706e9976 /kernel | |
parent | 407cf05d46fe59e2becfad3a55387d172f6fd0d0 (diff) | |
parent | f943fe0faf27991d256e10b5a85f175385c64cdc (diff) |
Merge branch 'locking-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull locking fixes from Ingo Molnar:
"Two rtmutex race fixes (which miraculously never triggered, that we
know of), plus two lockdep printk formatting regression fixes"
* 'locking-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
lockdep: Fix report formatting
locking/rtmutex: Use READ_ONCE() in rt_mutex_owner()
locking/rtmutex: Prevent dequeue vs. unlock race
locking/selftest: Fix output since KERN_CONT changes
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/locking/lockdep.c | 111 | ||||
-rw-r--r-- | kernel/locking/rtmutex.c | 68 | ||||
-rw-r--r-- | kernel/locking/rtmutex_common.h | 5 |
3 files changed, 126 insertions, 58 deletions
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c index 589d763a49b3..4d7ffc0a0d00 100644 --- a/kernel/locking/lockdep.c +++ b/kernel/locking/lockdep.c | |||
@@ -506,13 +506,13 @@ static void __print_lock_name(struct lock_class *class) | |||
506 | name = class->name; | 506 | name = class->name; |
507 | if (!name) { | 507 | if (!name) { |
508 | name = __get_key_name(class->key, str); | 508 | name = __get_key_name(class->key, str); |
509 | printk("%s", name); | 509 | printk(KERN_CONT "%s", name); |
510 | } else { | 510 | } else { |
511 | printk("%s", name); | 511 | printk(KERN_CONT "%s", name); |
512 | if (class->name_version > 1) | 512 | if (class->name_version > 1) |
513 | printk("#%d", class->name_version); | 513 | printk(KERN_CONT "#%d", class->name_version); |
514 | if (class->subclass) | 514 | if (class->subclass) |
515 | printk("/%d", class->subclass); | 515 | printk(KERN_CONT "/%d", class->subclass); |
516 | } | 516 | } |
517 | } | 517 | } |
518 | 518 | ||
@@ -522,9 +522,9 @@ static void print_lock_name(struct lock_class *class) | |||
522 | 522 | ||
523 | get_usage_chars(class, usage); | 523 | get_usage_chars(class, usage); |
524 | 524 | ||
525 | printk(" ("); | 525 | printk(KERN_CONT " ("); |
526 | __print_lock_name(class); | 526 | __print_lock_name(class); |
527 | printk("){%s}", usage); | 527 | printk(KERN_CONT "){%s}", usage); |
528 | } | 528 | } |
529 | 529 | ||
530 | static void print_lockdep_cache(struct lockdep_map *lock) | 530 | static void print_lockdep_cache(struct lockdep_map *lock) |
@@ -536,7 +536,7 @@ static void print_lockdep_cache(struct lockdep_map *lock) | |||
536 | if (!name) | 536 | if (!name) |
537 | name = __get_key_name(lock->key->subkeys, str); | 537 | name = __get_key_name(lock->key->subkeys, str); |
538 | 538 | ||
539 | printk("%s", name); | 539 | printk(KERN_CONT "%s", name); |
540 | } | 540 | } |
541 | 541 | ||
542 | static void print_lock(struct held_lock *hlock) | 542 | static void print_lock(struct held_lock *hlock) |
@@ -551,13 +551,13 @@ static void print_lock(struct held_lock *hlock) | |||
551 | barrier(); | 551 | barrier(); |
552 | 552 | ||
553 | if (!class_idx || (class_idx - 1) >= MAX_LOCKDEP_KEYS) { | 553 | if (!class_idx || (class_idx - 1) >= MAX_LOCKDEP_KEYS) { |
554 | printk("<RELEASED>\n"); | 554 | printk(KERN_CONT "<RELEASED>\n"); |
555 | return; | 555 | return; |
556 | } | 556 | } |
557 | 557 | ||
558 | print_lock_name(lock_classes + class_idx - 1); | 558 | print_lock_name(lock_classes + class_idx - 1); |
559 | printk(", at: "); | 559 | printk(KERN_CONT ", at: [<%p>] %pS\n", |
560 | print_ip_sym(hlock->acquire_ip); | 560 | (void *)hlock->acquire_ip, (void *)hlock->acquire_ip); |
561 | } | 561 | } |
562 | 562 | ||
563 | static void lockdep_print_held_locks(struct task_struct *curr) | 563 | static void lockdep_print_held_locks(struct task_struct *curr) |
@@ -792,8 +792,8 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force) | |||
792 | 792 | ||
793 | printk("\nnew class %p: %s", class->key, class->name); | 793 | printk("\nnew class %p: %s", class->key, class->name); |
794 | if (class->name_version > 1) | 794 | if (class->name_version > 1) |
795 | printk("#%d", class->name_version); | 795 | printk(KERN_CONT "#%d", class->name_version); |
796 | printk("\n"); | 796 | printk(KERN_CONT "\n"); |
797 | dump_stack(); | 797 | dump_stack(); |
798 | 798 | ||
799 | if (!graph_lock()) { | 799 | if (!graph_lock()) { |
@@ -1071,7 +1071,7 @@ print_circular_bug_entry(struct lock_list *target, int depth) | |||
1071 | return 0; | 1071 | return 0; |
1072 | printk("\n-> #%u", depth); | 1072 | printk("\n-> #%u", depth); |
1073 | print_lock_name(target->class); | 1073 | print_lock_name(target->class); |
1074 | printk(":\n"); | 1074 | printk(KERN_CONT ":\n"); |
1075 | print_stack_trace(&target->trace, 6); | 1075 | print_stack_trace(&target->trace, 6); |
1076 | 1076 | ||
1077 | return 0; | 1077 | return 0; |
@@ -1102,11 +1102,11 @@ print_circular_lock_scenario(struct held_lock *src, | |||
1102 | if (parent != source) { | 1102 | if (parent != source) { |
1103 | printk("Chain exists of:\n "); | 1103 | printk("Chain exists of:\n "); |
1104 | __print_lock_name(source); | 1104 | __print_lock_name(source); |
1105 | printk(" --> "); | 1105 | printk(KERN_CONT " --> "); |
1106 | __print_lock_name(parent); | 1106 | __print_lock_name(parent); |
1107 | printk(" --> "); | 1107 | printk(KERN_CONT " --> "); |
1108 | __print_lock_name(target); | 1108 | __print_lock_name(target); |
1109 | printk("\n\n"); | 1109 | printk(KERN_CONT "\n\n"); |
1110 | } | 1110 | } |
1111 | 1111 | ||
1112 | printk(" Possible unsafe locking scenario:\n\n"); | 1112 | printk(" Possible unsafe locking scenario:\n\n"); |
@@ -1114,16 +1114,16 @@ print_circular_lock_scenario(struct held_lock *src, | |||
1114 | printk(" ---- ----\n"); | 1114 | printk(" ---- ----\n"); |
1115 | printk(" lock("); | 1115 | printk(" lock("); |
1116 | __print_lock_name(target); | 1116 | __print_lock_name(target); |
1117 | printk(");\n"); | 1117 | printk(KERN_CONT ");\n"); |
1118 | printk(" lock("); | 1118 | printk(" lock("); |
1119 | __print_lock_name(parent); | 1119 | __print_lock_name(parent); |
1120 | printk(");\n"); | 1120 | printk(KERN_CONT ");\n"); |
1121 | printk(" lock("); | 1121 | printk(" lock("); |
1122 | __print_lock_name(target); | 1122 | __print_lock_name(target); |
1123 | printk(");\n"); | 1123 | printk(KERN_CONT ");\n"); |
1124 | printk(" lock("); | 1124 | printk(" lock("); |
1125 | __print_lock_name(source); | 1125 | __print_lock_name(source); |
1126 | printk(");\n"); | 1126 | printk(KERN_CONT ");\n"); |
1127 | printk("\n *** DEADLOCK ***\n\n"); | 1127 | printk("\n *** DEADLOCK ***\n\n"); |
1128 | } | 1128 | } |
1129 | 1129 | ||
@@ -1359,22 +1359,22 @@ static void print_lock_class_header(struct lock_class *class, int depth) | |||
1359 | 1359 | ||
1360 | printk("%*s->", depth, ""); | 1360 | printk("%*s->", depth, ""); |
1361 | print_lock_name(class); | 1361 | print_lock_name(class); |
1362 | printk(" ops: %lu", class->ops); | 1362 | printk(KERN_CONT " ops: %lu", class->ops); |
1363 | printk(" {\n"); | 1363 | printk(KERN_CONT " {\n"); |
1364 | 1364 | ||
1365 | for (bit = 0; bit < LOCK_USAGE_STATES; bit++) { | 1365 | for (bit = 0; bit < LOCK_USAGE_STATES; bit++) { |
1366 | if (class->usage_mask & (1 << bit)) { | 1366 | if (class->usage_mask & (1 << bit)) { |
1367 | int len = depth; | 1367 | int len = depth; |
1368 | 1368 | ||
1369 | len += printk("%*s %s", depth, "", usage_str[bit]); | 1369 | len += printk("%*s %s", depth, "", usage_str[bit]); |
1370 | len += printk(" at:\n"); | 1370 | len += printk(KERN_CONT " at:\n"); |
1371 | print_stack_trace(class->usage_traces + bit, len); | 1371 | print_stack_trace(class->usage_traces + bit, len); |
1372 | } | 1372 | } |
1373 | } | 1373 | } |
1374 | printk("%*s }\n", depth, ""); | 1374 | printk("%*s }\n", depth, ""); |
1375 | 1375 | ||
1376 | printk("%*s ... key at: ",depth,""); | 1376 | printk("%*s ... key at: [<%p>] %pS\n", |
1377 | print_ip_sym((unsigned long)class->key); | 1377 | depth, "", class->key, class->key); |
1378 | } | 1378 | } |
1379 | 1379 | ||
1380 | /* | 1380 | /* |
@@ -1437,11 +1437,11 @@ print_irq_lock_scenario(struct lock_list *safe_entry, | |||
1437 | if (middle_class != unsafe_class) { | 1437 | if (middle_class != unsafe_class) { |
1438 | printk("Chain exists of:\n "); | 1438 | printk("Chain exists of:\n "); |
1439 | __print_lock_name(safe_class); | 1439 | __print_lock_name(safe_class); |
1440 | printk(" --> "); | 1440 | printk(KERN_CONT " --> "); |
1441 | __print_lock_name(middle_class); | 1441 | __print_lock_name(middle_class); |
1442 | printk(" --> "); | 1442 | printk(KERN_CONT " --> "); |
1443 | __print_lock_name(unsafe_class); | 1443 | __print_lock_name(unsafe_class); |
1444 | printk("\n\n"); | 1444 | printk(KERN_CONT "\n\n"); |
1445 | } | 1445 | } |
1446 | 1446 | ||
1447 | printk(" Possible interrupt unsafe locking scenario:\n\n"); | 1447 | printk(" Possible interrupt unsafe locking scenario:\n\n"); |
@@ -1449,18 +1449,18 @@ print_irq_lock_scenario(struct lock_list *safe_entry, | |||
1449 | printk(" ---- ----\n"); | 1449 | printk(" ---- ----\n"); |
1450 | printk(" lock("); | 1450 | printk(" lock("); |
1451 | __print_lock_name(unsafe_class); | 1451 | __print_lock_name(unsafe_class); |
1452 | printk(");\n"); | 1452 | printk(KERN_CONT ");\n"); |
1453 | printk(" local_irq_disable();\n"); | 1453 | printk(" local_irq_disable();\n"); |
1454 | printk(" lock("); | 1454 | printk(" lock("); |
1455 | __print_lock_name(safe_class); | 1455 | __print_lock_name(safe_class); |
1456 | printk(");\n"); | 1456 | printk(KERN_CONT ");\n"); |
1457 | printk(" lock("); | 1457 | printk(" lock("); |
1458 | __print_lock_name(middle_class); | 1458 | __print_lock_name(middle_class); |
1459 | printk(");\n"); | 1459 | printk(KERN_CONT ");\n"); |
1460 | printk(" <Interrupt>\n"); | 1460 | printk(" <Interrupt>\n"); |
1461 | printk(" lock("); | 1461 | printk(" lock("); |
1462 | __print_lock_name(safe_class); | 1462 | __print_lock_name(safe_class); |
1463 | printk(");\n"); | 1463 | printk(KERN_CONT ");\n"); |
1464 | printk("\n *** DEADLOCK ***\n\n"); | 1464 | printk("\n *** DEADLOCK ***\n\n"); |
1465 | } | 1465 | } |
1466 | 1466 | ||
@@ -1497,9 +1497,9 @@ print_bad_irq_dependency(struct task_struct *curr, | |||
1497 | print_lock(prev); | 1497 | print_lock(prev); |
1498 | printk("which would create a new lock dependency:\n"); | 1498 | printk("which would create a new lock dependency:\n"); |
1499 | print_lock_name(hlock_class(prev)); | 1499 | print_lock_name(hlock_class(prev)); |
1500 | printk(" ->"); | 1500 | printk(KERN_CONT " ->"); |
1501 | print_lock_name(hlock_class(next)); | 1501 | print_lock_name(hlock_class(next)); |
1502 | printk("\n"); | 1502 | printk(KERN_CONT "\n"); |
1503 | 1503 | ||
1504 | printk("\nbut this new dependency connects a %s-irq-safe lock:\n", | 1504 | printk("\nbut this new dependency connects a %s-irq-safe lock:\n", |
1505 | irqclass); | 1505 | irqclass); |
@@ -1521,8 +1521,7 @@ print_bad_irq_dependency(struct task_struct *curr, | |||
1521 | 1521 | ||
1522 | lockdep_print_held_locks(curr); | 1522 | lockdep_print_held_locks(curr); |
1523 | 1523 | ||
1524 | printk("\nthe dependencies between %s-irq-safe lock", irqclass); | 1524 | printk("\nthe dependencies between %s-irq-safe lock and the holding lock:\n", irqclass); |
1525 | printk(" and the holding lock:\n"); | ||
1526 | if (!save_trace(&prev_root->trace)) | 1525 | if (!save_trace(&prev_root->trace)) |
1527 | return 0; | 1526 | return 0; |
1528 | print_shortest_lock_dependencies(backwards_entry, prev_root); | 1527 | print_shortest_lock_dependencies(backwards_entry, prev_root); |
@@ -1694,10 +1693,10 @@ print_deadlock_scenario(struct held_lock *nxt, | |||
1694 | printk(" ----\n"); | 1693 | printk(" ----\n"); |
1695 | printk(" lock("); | 1694 | printk(" lock("); |
1696 | __print_lock_name(prev); | 1695 | __print_lock_name(prev); |
1697 | printk(");\n"); | 1696 | printk(KERN_CONT ");\n"); |
1698 | printk(" lock("); | 1697 | printk(" lock("); |
1699 | __print_lock_name(next); | 1698 | __print_lock_name(next); |
1700 | printk(");\n"); | 1699 | printk(KERN_CONT ");\n"); |
1701 | printk("\n *** DEADLOCK ***\n\n"); | 1700 | printk("\n *** DEADLOCK ***\n\n"); |
1702 | printk(" May be due to missing lock nesting notation\n\n"); | 1701 | printk(" May be due to missing lock nesting notation\n\n"); |
1703 | } | 1702 | } |
@@ -1891,9 +1890,9 @@ check_prev_add(struct task_struct *curr, struct held_lock *prev, | |||
1891 | graph_unlock(); | 1890 | graph_unlock(); |
1892 | printk("\n new dependency: "); | 1891 | printk("\n new dependency: "); |
1893 | print_lock_name(hlock_class(prev)); | 1892 | print_lock_name(hlock_class(prev)); |
1894 | printk(" => "); | 1893 | printk(KERN_CONT " => "); |
1895 | print_lock_name(hlock_class(next)); | 1894 | print_lock_name(hlock_class(next)); |
1896 | printk("\n"); | 1895 | printk(KERN_CONT "\n"); |
1897 | dump_stack(); | 1896 | dump_stack(); |
1898 | return graph_lock(); | 1897 | return graph_lock(); |
1899 | } | 1898 | } |
@@ -2343,11 +2342,11 @@ print_usage_bug_scenario(struct held_lock *lock) | |||
2343 | printk(" ----\n"); | 2342 | printk(" ----\n"); |
2344 | printk(" lock("); | 2343 | printk(" lock("); |
2345 | __print_lock_name(class); | 2344 | __print_lock_name(class); |
2346 | printk(");\n"); | 2345 | printk(KERN_CONT ");\n"); |
2347 | printk(" <Interrupt>\n"); | 2346 | printk(" <Interrupt>\n"); |
2348 | printk(" lock("); | 2347 | printk(" lock("); |
2349 | __print_lock_name(class); | 2348 | __print_lock_name(class); |
2350 | printk(");\n"); | 2349 | printk(KERN_CONT ");\n"); |
2351 | printk("\n *** DEADLOCK ***\n\n"); | 2350 | printk("\n *** DEADLOCK ***\n\n"); |
2352 | } | 2351 | } |
2353 | 2352 | ||
@@ -2522,14 +2521,18 @@ check_usage_backwards(struct task_struct *curr, struct held_lock *this, | |||
2522 | void print_irqtrace_events(struct task_struct *curr) | 2521 | void print_irqtrace_events(struct task_struct *curr) |
2523 | { | 2522 | { |
2524 | printk("irq event stamp: %u\n", curr->irq_events); | 2523 | printk("irq event stamp: %u\n", curr->irq_events); |
2525 | printk("hardirqs last enabled at (%u): ", curr->hardirq_enable_event); | 2524 | printk("hardirqs last enabled at (%u): [<%p>] %pS\n", |
2526 | print_ip_sym(curr->hardirq_enable_ip); | 2525 | curr->hardirq_enable_event, (void *)curr->hardirq_enable_ip, |
2527 | printk("hardirqs last disabled at (%u): ", curr->hardirq_disable_event); | 2526 | (void *)curr->hardirq_enable_ip); |
2528 | print_ip_sym(curr->hardirq_disable_ip); | 2527 | printk("hardirqs last disabled at (%u): [<%p>] %pS\n", |
2529 | printk("softirqs last enabled at (%u): ", curr->softirq_enable_event); | 2528 | curr->hardirq_disable_event, (void *)curr->hardirq_disable_ip, |
2530 | print_ip_sym(curr->softirq_enable_ip); | 2529 | (void *)curr->hardirq_disable_ip); |
2531 | printk("softirqs last disabled at (%u): ", curr->softirq_disable_event); | 2530 | printk("softirqs last enabled at (%u): [<%p>] %pS\n", |
2532 | print_ip_sym(curr->softirq_disable_ip); | 2531 | curr->softirq_enable_event, (void *)curr->softirq_enable_ip, |
2532 | (void *)curr->softirq_enable_ip); | ||
2533 | printk("softirqs last disabled at (%u): [<%p>] %pS\n", | ||
2534 | curr->softirq_disable_event, (void *)curr->softirq_disable_ip, | ||
2535 | (void *)curr->softirq_disable_ip); | ||
2533 | } | 2536 | } |
2534 | 2537 | ||
2535 | static int HARDIRQ_verbose(struct lock_class *class) | 2538 | static int HARDIRQ_verbose(struct lock_class *class) |
@@ -3235,8 +3238,8 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass, | |||
3235 | if (very_verbose(class)) { | 3238 | if (very_verbose(class)) { |
3236 | printk("\nacquire class [%p] %s", class->key, class->name); | 3239 | printk("\nacquire class [%p] %s", class->key, class->name); |
3237 | if (class->name_version > 1) | 3240 | if (class->name_version > 1) |
3238 | printk("#%d", class->name_version); | 3241 | printk(KERN_CONT "#%d", class->name_version); |
3239 | printk("\n"); | 3242 | printk(KERN_CONT "\n"); |
3240 | dump_stack(); | 3243 | dump_stack(); |
3241 | } | 3244 | } |
3242 | 3245 | ||
@@ -3378,7 +3381,7 @@ print_unlock_imbalance_bug(struct task_struct *curr, struct lockdep_map *lock, | |||
3378 | printk("%s/%d is trying to release lock (", | 3381 | printk("%s/%d is trying to release lock (", |
3379 | curr->comm, task_pid_nr(curr)); | 3382 | curr->comm, task_pid_nr(curr)); |
3380 | print_lockdep_cache(lock); | 3383 | print_lockdep_cache(lock); |
3381 | printk(") at:\n"); | 3384 | printk(KERN_CONT ") at:\n"); |
3382 | print_ip_sym(ip); | 3385 | print_ip_sym(ip); |
3383 | printk("but there are no more locks to release!\n"); | 3386 | printk("but there are no more locks to release!\n"); |
3384 | printk("\nother info that might help us debug this:\n"); | 3387 | printk("\nother info that might help us debug this:\n"); |
@@ -3871,7 +3874,7 @@ print_lock_contention_bug(struct task_struct *curr, struct lockdep_map *lock, | |||
3871 | printk("%s/%d is trying to contend lock (", | 3874 | printk("%s/%d is trying to contend lock (", |
3872 | curr->comm, task_pid_nr(curr)); | 3875 | curr->comm, task_pid_nr(curr)); |
3873 | print_lockdep_cache(lock); | 3876 | print_lockdep_cache(lock); |
3874 | printk(") at:\n"); | 3877 | printk(KERN_CONT ") at:\n"); |
3875 | print_ip_sym(ip); | 3878 | print_ip_sym(ip); |
3876 | printk("but there are no locks held!\n"); | 3879 | printk("but there are no locks held!\n"); |
3877 | printk("\nother info that might help us debug this:\n"); | 3880 | printk("\nother info that might help us debug this:\n"); |
diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c index 1ec0f48962b3..2c49d76f96c3 100644 --- a/kernel/locking/rtmutex.c +++ b/kernel/locking/rtmutex.c | |||
@@ -65,8 +65,72 @@ static inline void clear_rt_mutex_waiters(struct rt_mutex *lock) | |||
65 | 65 | ||
66 | static void fixup_rt_mutex_waiters(struct rt_mutex *lock) | 66 | static void fixup_rt_mutex_waiters(struct rt_mutex *lock) |
67 | { | 67 | { |
68 | if (!rt_mutex_has_waiters(lock)) | 68 | unsigned long owner, *p = (unsigned long *) &lock->owner; |
69 | clear_rt_mutex_waiters(lock); | 69 | |
70 | if (rt_mutex_has_waiters(lock)) | ||
71 | return; | ||
72 | |||
73 | /* | ||
74 | * The rbtree has no waiters enqueued, now make sure that the | ||
75 | * lock->owner still has the waiters bit set, otherwise the | ||
76 | * following can happen: | ||
77 | * | ||
78 | * CPU 0 CPU 1 CPU2 | ||
79 | * l->owner=T1 | ||
80 | * rt_mutex_lock(l) | ||
81 | * lock(l->lock) | ||
82 | * l->owner = T1 | HAS_WAITERS; | ||
83 | * enqueue(T2) | ||
84 | * boost() | ||
85 | * unlock(l->lock) | ||
86 | * block() | ||
87 | * | ||
88 | * rt_mutex_lock(l) | ||
89 | * lock(l->lock) | ||
90 | * l->owner = T1 | HAS_WAITERS; | ||
91 | * enqueue(T3) | ||
92 | * boost() | ||
93 | * unlock(l->lock) | ||
94 | * block() | ||
95 | * signal(->T2) signal(->T3) | ||
96 | * lock(l->lock) | ||
97 | * dequeue(T2) | ||
98 | * deboost() | ||
99 | * unlock(l->lock) | ||
100 | * lock(l->lock) | ||
101 | * dequeue(T3) | ||
102 | * ==> wait list is empty | ||
103 | * deboost() | ||
104 | * unlock(l->lock) | ||
105 | * lock(l->lock) | ||
106 | * fixup_rt_mutex_waiters() | ||
107 | * if (wait_list_empty(l) { | ||
108 | * l->owner = owner | ||
109 | * owner = l->owner & ~HAS_WAITERS; | ||
110 | * ==> l->owner = T1 | ||
111 | * } | ||
112 | * lock(l->lock) | ||
113 | * rt_mutex_unlock(l) fixup_rt_mutex_waiters() | ||
114 | * if (wait_list_empty(l) { | ||
115 | * owner = l->owner & ~HAS_WAITERS; | ||
116 | * cmpxchg(l->owner, T1, NULL) | ||
117 | * ===> Success (l->owner = NULL) | ||
118 | * | ||
119 | * l->owner = owner | ||
120 | * ==> l->owner = T1 | ||
121 | * } | ||
122 | * | ||
123 | * With the check for the waiter bit in place T3 on CPU2 will not | ||
124 | * overwrite. All tasks fiddling with the waiters bit are | ||
125 | * serialized by l->lock, so nothing else can modify the waiters | ||
126 | * bit. If the bit is set then nothing can change l->owner either | ||
127 | * so the simple RMW is safe. The cmpxchg() will simply fail if it | ||
128 | * happens in the middle of the RMW because the waiters bit is | ||
129 | * still set. | ||
130 | */ | ||
131 | owner = READ_ONCE(*p); | ||
132 | if (owner & RT_MUTEX_HAS_WAITERS) | ||
133 | WRITE_ONCE(*p, owner & ~RT_MUTEX_HAS_WAITERS); | ||
70 | } | 134 | } |
71 | 135 | ||
72 | /* | 136 | /* |
diff --git a/kernel/locking/rtmutex_common.h b/kernel/locking/rtmutex_common.h index 4f5f83c7d2d3..e317e1cbb3eb 100644 --- a/kernel/locking/rtmutex_common.h +++ b/kernel/locking/rtmutex_common.h | |||
@@ -75,8 +75,9 @@ task_top_pi_waiter(struct task_struct *p) | |||
75 | 75 | ||
76 | static inline struct task_struct *rt_mutex_owner(struct rt_mutex *lock) | 76 | static inline struct task_struct *rt_mutex_owner(struct rt_mutex *lock) |
77 | { | 77 | { |
78 | return (struct task_struct *) | 78 | unsigned long owner = (unsigned long) READ_ONCE(lock->owner); |
79 | ((unsigned long)lock->owner & ~RT_MUTEX_OWNER_MASKALL); | 79 | |
80 | return (struct task_struct *) (owner & ~RT_MUTEX_OWNER_MASKALL); | ||
80 | } | 81 | } |
81 | 82 | ||
82 | /* | 83 | /* |