diff options
Diffstat (limited to 'kernel/locking')
-rw-r--r-- | kernel/locking/lockdep.c | 2 | ||||
-rw-r--r-- | kernel/locking/locktorture.c | 12 | ||||
-rw-r--r-- | kernel/locking/rtmutex.c | 32 |
3 files changed, 36 insertions, 10 deletions
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c index b0e9467922e1..d24e4339b46d 100644 --- a/kernel/locking/lockdep.c +++ b/kernel/locking/lockdep.c | |||
@@ -4188,7 +4188,7 @@ void debug_show_held_locks(struct task_struct *task) | |||
4188 | } | 4188 | } |
4189 | EXPORT_SYMBOL_GPL(debug_show_held_locks); | 4189 | EXPORT_SYMBOL_GPL(debug_show_held_locks); |
4190 | 4190 | ||
4191 | asmlinkage void lockdep_sys_exit(void) | 4191 | asmlinkage __visible void lockdep_sys_exit(void) |
4192 | { | 4192 | { |
4193 | struct task_struct *curr = current; | 4193 | struct task_struct *curr = current; |
4194 | 4194 | ||
diff --git a/kernel/locking/locktorture.c b/kernel/locking/locktorture.c index f26b1a18e34e..0955b885d0dc 100644 --- a/kernel/locking/locktorture.c +++ b/kernel/locking/locktorture.c | |||
@@ -82,14 +82,14 @@ struct lock_writer_stress_stats { | |||
82 | }; | 82 | }; |
83 | static struct lock_writer_stress_stats *lwsa; | 83 | static struct lock_writer_stress_stats *lwsa; |
84 | 84 | ||
85 | #if defined(MODULE) || defined(CONFIG_LOCK_TORTURE_TEST_RUNNABLE) | 85 | #if defined(MODULE) |
86 | #define LOCKTORTURE_RUNNABLE_INIT 1 | 86 | #define LOCKTORTURE_RUNNABLE_INIT 1 |
87 | #else | 87 | #else |
88 | #define LOCKTORTURE_RUNNABLE_INIT 0 | 88 | #define LOCKTORTURE_RUNNABLE_INIT 0 |
89 | #endif | 89 | #endif |
90 | int locktorture_runnable = LOCKTORTURE_RUNNABLE_INIT; | 90 | int locktorture_runnable = LOCKTORTURE_RUNNABLE_INIT; |
91 | module_param(locktorture_runnable, int, 0444); | 91 | module_param(locktorture_runnable, int, 0444); |
92 | MODULE_PARM_DESC(locktorture_runnable, "Start locktorture at boot"); | 92 | MODULE_PARM_DESC(locktorture_runnable, "Start locktorture at module init"); |
93 | 93 | ||
94 | /* Forward reference. */ | 94 | /* Forward reference. */ |
95 | static void lock_torture_cleanup(void); | 95 | static void lock_torture_cleanup(void); |
@@ -216,10 +216,11 @@ static int lock_torture_writer(void *arg) | |||
216 | static DEFINE_TORTURE_RANDOM(rand); | 216 | static DEFINE_TORTURE_RANDOM(rand); |
217 | 217 | ||
218 | VERBOSE_TOROUT_STRING("lock_torture_writer task started"); | 218 | VERBOSE_TOROUT_STRING("lock_torture_writer task started"); |
219 | set_user_nice(current, 19); | 219 | set_user_nice(current, MAX_NICE); |
220 | 220 | ||
221 | do { | 221 | do { |
222 | schedule_timeout_uninterruptible(1); | 222 | if ((torture_random(&rand) & 0xfffff) == 0) |
223 | schedule_timeout_uninterruptible(1); | ||
223 | cur_ops->writelock(); | 224 | cur_ops->writelock(); |
224 | if (WARN_ON_ONCE(lock_is_write_held)) | 225 | if (WARN_ON_ONCE(lock_is_write_held)) |
225 | lwsp->n_write_lock_fail++; | 226 | lwsp->n_write_lock_fail++; |
@@ -354,7 +355,8 @@ static int __init lock_torture_init(void) | |||
354 | &lock_busted_ops, &spin_lock_ops, &spin_lock_irq_ops, | 355 | &lock_busted_ops, &spin_lock_ops, &spin_lock_irq_ops, |
355 | }; | 356 | }; |
356 | 357 | ||
357 | torture_init_begin(torture_type, verbose, &locktorture_runnable); | 358 | if (!torture_init_begin(torture_type, verbose, &locktorture_runnable)) |
359 | return -EBUSY; | ||
358 | 360 | ||
359 | /* Process args and tell the world that the torturer is on the job. */ | 361 | /* Process args and tell the world that the torturer is on the job. */ |
360 | for (i = 0; i < ARRAY_SIZE(torture_ops); i++) { | 362 | for (i = 0; i < ARRAY_SIZE(torture_ops); i++) { |
diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c index aa4dff04b594..a620d4d08ca6 100644 --- a/kernel/locking/rtmutex.c +++ b/kernel/locking/rtmutex.c | |||
@@ -343,9 +343,16 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task, | |||
343 | * top_waiter can be NULL, when we are in the deboosting | 343 | * top_waiter can be NULL, when we are in the deboosting |
344 | * mode! | 344 | * mode! |
345 | */ | 345 | */ |
346 | if (top_waiter && (!task_has_pi_waiters(task) || | 346 | if (top_waiter) { |
347 | top_waiter != task_top_pi_waiter(task))) | 347 | if (!task_has_pi_waiters(task)) |
348 | goto out_unlock_pi; | 348 | goto out_unlock_pi; |
349 | /* | ||
350 | * If deadlock detection is off, we stop here if we | ||
351 | * are not the top pi waiter of the task. | ||
352 | */ | ||
353 | if (!detect_deadlock && top_waiter != task_top_pi_waiter(task)) | ||
354 | goto out_unlock_pi; | ||
355 | } | ||
349 | 356 | ||
350 | /* | 357 | /* |
351 | * When deadlock detection is off then we check, if further | 358 | * When deadlock detection is off then we check, if further |
@@ -361,7 +368,12 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task, | |||
361 | goto retry; | 368 | goto retry; |
362 | } | 369 | } |
363 | 370 | ||
364 | /* Deadlock detection */ | 371 | /* |
372 | * Deadlock detection. If the lock is the same as the original | ||
373 | * lock which caused us to walk the lock chain or if the | ||
374 | * current lock is owned by the task which initiated the chain | ||
375 | * walk, we detected a deadlock. | ||
376 | */ | ||
365 | if (lock == orig_lock || rt_mutex_owner(lock) == top_task) { | 377 | if (lock == orig_lock || rt_mutex_owner(lock) == top_task) { |
366 | debug_rt_mutex_deadlock(deadlock_detect, orig_waiter, lock); | 378 | debug_rt_mutex_deadlock(deadlock_detect, orig_waiter, lock); |
367 | raw_spin_unlock(&lock->wait_lock); | 379 | raw_spin_unlock(&lock->wait_lock); |
@@ -527,6 +539,18 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock, | |||
527 | unsigned long flags; | 539 | unsigned long flags; |
528 | int chain_walk = 0, res; | 540 | int chain_walk = 0, res; |
529 | 541 | ||
542 | /* | ||
543 | * Early deadlock detection. We really don't want the task to | ||
544 | * enqueue on itself just to untangle the mess later. It's not | ||
545 | * only an optimization. We drop the locks, so another waiter | ||
546 | * can come in before the chain walk detects the deadlock. So | ||
547 | * the other will detect the deadlock and return -EDEADLOCK, | ||
548 | * which is wrong, as the other waiter is not in a deadlock | ||
549 | * situation. | ||
550 | */ | ||
551 | if (detect_deadlock && owner == task) | ||
552 | return -EDEADLK; | ||
553 | |||
530 | raw_spin_lock_irqsave(&task->pi_lock, flags); | 554 | raw_spin_lock_irqsave(&task->pi_lock, flags); |
531 | __rt_mutex_adjust_prio(task); | 555 | __rt_mutex_adjust_prio(task); |
532 | waiter->task = task; | 556 | waiter->task = task; |