diff options
Diffstat (limited to 'kernel/rtmutex.c')
-rw-r--r-- | kernel/rtmutex.c | 42 |
1 files changed, 36 insertions, 6 deletions
diff --git a/kernel/rtmutex.c b/kernel/rtmutex.c index 3fc0f0680ca2..45d61016da57 100644 --- a/kernel/rtmutex.c +++ b/kernel/rtmutex.c | |||
@@ -160,7 +160,8 @@ int max_lock_depth = 1024; | |||
160 | static int rt_mutex_adjust_prio_chain(task_t *task, | 160 | static int rt_mutex_adjust_prio_chain(task_t *task, |
161 | int deadlock_detect, | 161 | int deadlock_detect, |
162 | struct rt_mutex *orig_lock, | 162 | struct rt_mutex *orig_lock, |
163 | struct rt_mutex_waiter *orig_waiter | 163 | struct rt_mutex_waiter *orig_waiter, |
164 | struct task_struct *top_task | ||
164 | __IP_DECL__) | 165 | __IP_DECL__) |
165 | { | 166 | { |
166 | struct rt_mutex *lock; | 167 | struct rt_mutex *lock; |
@@ -189,7 +190,7 @@ static int rt_mutex_adjust_prio_chain(task_t *task, | |||
189 | prev_max = max_lock_depth; | 190 | prev_max = max_lock_depth; |
190 | printk(KERN_WARNING "Maximum lock depth %d reached " | 191 | printk(KERN_WARNING "Maximum lock depth %d reached " |
191 | "task: %s (%d)\n", max_lock_depth, | 192 | "task: %s (%d)\n", max_lock_depth, |
192 | current->comm, current->pid); | 193 | top_task->comm, top_task->pid); |
193 | } | 194 | } |
194 | put_task_struct(task); | 195 | put_task_struct(task); |
195 | 196 | ||
@@ -229,7 +230,7 @@ static int rt_mutex_adjust_prio_chain(task_t *task, | |||
229 | } | 230 | } |
230 | 231 | ||
231 | /* Deadlock detection */ | 232 | /* Deadlock detection */ |
232 | if (lock == orig_lock || rt_mutex_owner(lock) == current) { | 233 | if (lock == orig_lock || rt_mutex_owner(lock) == top_task) { |
233 | debug_rt_mutex_deadlock(deadlock_detect, orig_waiter, lock); | 234 | debug_rt_mutex_deadlock(deadlock_detect, orig_waiter, lock); |
234 | spin_unlock(&lock->wait_lock); | 235 | spin_unlock(&lock->wait_lock); |
235 | ret = deadlock_detect ? -EDEADLK : 0; | 236 | ret = deadlock_detect ? -EDEADLK : 0; |
@@ -433,6 +434,7 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock, | |||
433 | __rt_mutex_adjust_prio(owner); | 434 | __rt_mutex_adjust_prio(owner); |
434 | if (owner->pi_blocked_on) { | 435 | if (owner->pi_blocked_on) { |
435 | boost = 1; | 436 | boost = 1; |
437 | /* gets dropped in rt_mutex_adjust_prio_chain()! */ | ||
436 | get_task_struct(owner); | 438 | get_task_struct(owner); |
437 | } | 439 | } |
438 | spin_unlock_irqrestore(&owner->pi_lock, flags); | 440 | spin_unlock_irqrestore(&owner->pi_lock, flags); |
@@ -441,6 +443,7 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock, | |||
441 | spin_lock_irqsave(&owner->pi_lock, flags); | 443 | spin_lock_irqsave(&owner->pi_lock, flags); |
442 | if (owner->pi_blocked_on) { | 444 | if (owner->pi_blocked_on) { |
443 | boost = 1; | 445 | boost = 1; |
446 | /* gets dropped in rt_mutex_adjust_prio_chain()! */ | ||
444 | get_task_struct(owner); | 447 | get_task_struct(owner); |
445 | } | 448 | } |
446 | spin_unlock_irqrestore(&owner->pi_lock, flags); | 449 | spin_unlock_irqrestore(&owner->pi_lock, flags); |
@@ -450,8 +453,8 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock, | |||
450 | 453 | ||
451 | spin_unlock(&lock->wait_lock); | 454 | spin_unlock(&lock->wait_lock); |
452 | 455 | ||
453 | res = rt_mutex_adjust_prio_chain(owner, detect_deadlock, lock, | 456 | res = rt_mutex_adjust_prio_chain(owner, detect_deadlock, lock, waiter, |
454 | waiter __IP__); | 457 | current __IP__); |
455 | 458 | ||
456 | spin_lock(&lock->wait_lock); | 459 | spin_lock(&lock->wait_lock); |
457 | 460 | ||
@@ -552,6 +555,7 @@ static void remove_waiter(struct rt_mutex *lock, | |||
552 | 555 | ||
553 | if (owner->pi_blocked_on) { | 556 | if (owner->pi_blocked_on) { |
554 | boost = 1; | 557 | boost = 1; |
558 | /* gets dropped in rt_mutex_adjust_prio_chain()! */ | ||
555 | get_task_struct(owner); | 559 | get_task_struct(owner); |
556 | } | 560 | } |
557 | spin_unlock_irqrestore(&owner->pi_lock, flags); | 561 | spin_unlock_irqrestore(&owner->pi_lock, flags); |
@@ -564,12 +568,37 @@ static void remove_waiter(struct rt_mutex *lock, | |||
564 | 568 | ||
565 | spin_unlock(&lock->wait_lock); | 569 | spin_unlock(&lock->wait_lock); |
566 | 570 | ||
567 | rt_mutex_adjust_prio_chain(owner, 0, lock, NULL __IP__); | 571 | rt_mutex_adjust_prio_chain(owner, 0, lock, NULL, current __IP__); |
568 | 572 | ||
569 | spin_lock(&lock->wait_lock); | 573 | spin_lock(&lock->wait_lock); |
570 | } | 574 | } |
571 | 575 | ||
572 | /* | 576 | /* |
577 | * Recheck the pi chain, in case we got a priority setting | ||
578 | * | ||
579 | * Called from sched_setscheduler | ||
580 | */ | ||
581 | void rt_mutex_adjust_pi(struct task_struct *task) | ||
582 | { | ||
583 | struct rt_mutex_waiter *waiter; | ||
584 | unsigned long flags; | ||
585 | |||
586 | spin_lock_irqsave(&task->pi_lock, flags); | ||
587 | |||
588 | waiter = task->pi_blocked_on; | ||
589 | if (!waiter || waiter->list_entry.prio == task->prio) { | ||
590 | spin_unlock_irqrestore(&task->pi_lock, flags); | ||
591 | return; | ||
592 | } | ||
593 | |||
594 | /* gets dropped in rt_mutex_adjust_prio_chain()! */ | ||
595 | get_task_struct(task); | ||
596 | spin_unlock_irqrestore(&task->pi_lock, flags); | ||
597 | |||
598 | rt_mutex_adjust_prio_chain(task, 0, NULL, NULL, task __RET_IP__); | ||
599 | } | ||
600 | |||
601 | /* | ||
573 | * Slow path lock function: | 602 | * Slow path lock function: |
574 | */ | 603 | */ |
575 | static int __sched | 604 | static int __sched |
@@ -636,6 +665,7 @@ rt_mutex_slowlock(struct rt_mutex *lock, int state, | |||
636 | if (unlikely(ret)) | 665 | if (unlikely(ret)) |
637 | break; | 666 | break; |
638 | } | 667 | } |
668 | |||
639 | spin_unlock(&lock->wait_lock); | 669 | spin_unlock(&lock->wait_lock); |
640 | 670 | ||
641 | debug_rt_mutex_print_deadlock(&waiter); | 671 | debug_rt_mutex_print_deadlock(&waiter); |