diff options
-rw-r--r-- | include/linux/sched.h | 2 | ||||
-rw-r--r-- | kernel/rtmutex.c | 42 | ||||
-rw-r--r-- | kernel/sched.c | 2 |
3 files changed, 40 insertions, 6 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h index b4e6be7de5ad..821f0481ebe1 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
@@ -1044,11 +1044,13 @@ extern void sched_idle_next(void); | |||
1044 | #ifdef CONFIG_RT_MUTEXES | 1044 | #ifdef CONFIG_RT_MUTEXES |
1045 | extern int rt_mutex_getprio(task_t *p); | 1045 | extern int rt_mutex_getprio(task_t *p); |
1046 | extern void rt_mutex_setprio(task_t *p, int prio); | 1046 | extern void rt_mutex_setprio(task_t *p, int prio); |
1047 | extern void rt_mutex_adjust_pi(task_t *p); | ||
1047 | #else | 1048 | #else |
1048 | static inline int rt_mutex_getprio(task_t *p) | 1049 | static inline int rt_mutex_getprio(task_t *p) |
1049 | { | 1050 | { |
1050 | return p->normal_prio; | 1051 | return p->normal_prio; |
1051 | } | 1052 | } |
1053 | # define rt_mutex_adjust_pi(p) do { } while (0) | ||
1052 | #endif | 1054 | #endif |
1053 | 1055 | ||
1054 | extern void set_user_nice(task_t *p, long nice); | 1056 | extern void set_user_nice(task_t *p, long nice); |
diff --git a/kernel/rtmutex.c b/kernel/rtmutex.c index 3fc0f0680ca2..45d61016da57 100644 --- a/kernel/rtmutex.c +++ b/kernel/rtmutex.c | |||
@@ -160,7 +160,8 @@ int max_lock_depth = 1024; | |||
160 | static int rt_mutex_adjust_prio_chain(task_t *task, | 160 | static int rt_mutex_adjust_prio_chain(task_t *task, |
161 | int deadlock_detect, | 161 | int deadlock_detect, |
162 | struct rt_mutex *orig_lock, | 162 | struct rt_mutex *orig_lock, |
163 | struct rt_mutex_waiter *orig_waiter | 163 | struct rt_mutex_waiter *orig_waiter, |
164 | struct task_struct *top_task | ||
164 | __IP_DECL__) | 165 | __IP_DECL__) |
165 | { | 166 | { |
166 | struct rt_mutex *lock; | 167 | struct rt_mutex *lock; |
@@ -189,7 +190,7 @@ static int rt_mutex_adjust_prio_chain(task_t *task, | |||
189 | prev_max = max_lock_depth; | 190 | prev_max = max_lock_depth; |
190 | printk(KERN_WARNING "Maximum lock depth %d reached " | 191 | printk(KERN_WARNING "Maximum lock depth %d reached " |
191 | "task: %s (%d)\n", max_lock_depth, | 192 | "task: %s (%d)\n", max_lock_depth, |
192 | current->comm, current->pid); | 193 | top_task->comm, top_task->pid); |
193 | } | 194 | } |
194 | put_task_struct(task); | 195 | put_task_struct(task); |
195 | 196 | ||
@@ -229,7 +230,7 @@ static int rt_mutex_adjust_prio_chain(task_t *task, | |||
229 | } | 230 | } |
230 | 231 | ||
231 | /* Deadlock detection */ | 232 | /* Deadlock detection */ |
232 | if (lock == orig_lock || rt_mutex_owner(lock) == current) { | 233 | if (lock == orig_lock || rt_mutex_owner(lock) == top_task) { |
233 | debug_rt_mutex_deadlock(deadlock_detect, orig_waiter, lock); | 234 | debug_rt_mutex_deadlock(deadlock_detect, orig_waiter, lock); |
234 | spin_unlock(&lock->wait_lock); | 235 | spin_unlock(&lock->wait_lock); |
235 | ret = deadlock_detect ? -EDEADLK : 0; | 236 | ret = deadlock_detect ? -EDEADLK : 0; |
@@ -433,6 +434,7 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock, | |||
433 | __rt_mutex_adjust_prio(owner); | 434 | __rt_mutex_adjust_prio(owner); |
434 | if (owner->pi_blocked_on) { | 435 | if (owner->pi_blocked_on) { |
435 | boost = 1; | 436 | boost = 1; |
437 | /* gets dropped in rt_mutex_adjust_prio_chain()! */ | ||
436 | get_task_struct(owner); | 438 | get_task_struct(owner); |
437 | } | 439 | } |
438 | spin_unlock_irqrestore(&owner->pi_lock, flags); | 440 | spin_unlock_irqrestore(&owner->pi_lock, flags); |
@@ -441,6 +443,7 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock, | |||
441 | spin_lock_irqsave(&owner->pi_lock, flags); | 443 | spin_lock_irqsave(&owner->pi_lock, flags); |
442 | if (owner->pi_blocked_on) { | 444 | if (owner->pi_blocked_on) { |
443 | boost = 1; | 445 | boost = 1; |
446 | /* gets dropped in rt_mutex_adjust_prio_chain()! */ | ||
444 | get_task_struct(owner); | 447 | get_task_struct(owner); |
445 | } | 448 | } |
446 | spin_unlock_irqrestore(&owner->pi_lock, flags); | 449 | spin_unlock_irqrestore(&owner->pi_lock, flags); |
@@ -450,8 +453,8 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock, | |||
450 | 453 | ||
451 | spin_unlock(&lock->wait_lock); | 454 | spin_unlock(&lock->wait_lock); |
452 | 455 | ||
453 | res = rt_mutex_adjust_prio_chain(owner, detect_deadlock, lock, | 456 | res = rt_mutex_adjust_prio_chain(owner, detect_deadlock, lock, waiter, |
454 | waiter __IP__); | 457 | current __IP__); |
455 | 458 | ||
456 | spin_lock(&lock->wait_lock); | 459 | spin_lock(&lock->wait_lock); |
457 | 460 | ||
@@ -552,6 +555,7 @@ static void remove_waiter(struct rt_mutex *lock, | |||
552 | 555 | ||
553 | if (owner->pi_blocked_on) { | 556 | if (owner->pi_blocked_on) { |
554 | boost = 1; | 557 | boost = 1; |
558 | /* gets dropped in rt_mutex_adjust_prio_chain()! */ | ||
555 | get_task_struct(owner); | 559 | get_task_struct(owner); |
556 | } | 560 | } |
557 | spin_unlock_irqrestore(&owner->pi_lock, flags); | 561 | spin_unlock_irqrestore(&owner->pi_lock, flags); |
@@ -564,12 +568,37 @@ static void remove_waiter(struct rt_mutex *lock, | |||
564 | 568 | ||
565 | spin_unlock(&lock->wait_lock); | 569 | spin_unlock(&lock->wait_lock); |
566 | 570 | ||
567 | rt_mutex_adjust_prio_chain(owner, 0, lock, NULL __IP__); | 571 | rt_mutex_adjust_prio_chain(owner, 0, lock, NULL, current __IP__); |
568 | 572 | ||
569 | spin_lock(&lock->wait_lock); | 573 | spin_lock(&lock->wait_lock); |
570 | } | 574 | } |
571 | 575 | ||
572 | /* | 576 | /* |
577 | * Recheck the pi chain, in case we got a priority setting | ||
578 | * | ||
579 | * Called from sched_setscheduler | ||
580 | */ | ||
581 | void rt_mutex_adjust_pi(struct task_struct *task) | ||
582 | { | ||
583 | struct rt_mutex_waiter *waiter; | ||
584 | unsigned long flags; | ||
585 | |||
586 | spin_lock_irqsave(&task->pi_lock, flags); | ||
587 | |||
588 | waiter = task->pi_blocked_on; | ||
589 | if (!waiter || waiter->list_entry.prio == task->prio) { | ||
590 | spin_unlock_irqrestore(&task->pi_lock, flags); | ||
591 | return; | ||
592 | } | ||
593 | |||
594 | /* gets dropped in rt_mutex_adjust_prio_chain()! */ | ||
595 | get_task_struct(task); | ||
596 | spin_unlock_irqrestore(&task->pi_lock, flags); | ||
597 | |||
598 | rt_mutex_adjust_prio_chain(task, 0, NULL, NULL, task __RET_IP__); | ||
599 | } | ||
600 | |||
601 | /* | ||
573 | * Slow path lock function: | 602 | * Slow path lock function: |
574 | */ | 603 | */ |
575 | static int __sched | 604 | static int __sched |
@@ -636,6 +665,7 @@ rt_mutex_slowlock(struct rt_mutex *lock, int state, | |||
636 | if (unlikely(ret)) | 665 | if (unlikely(ret)) |
637 | break; | 666 | break; |
638 | } | 667 | } |
668 | |||
639 | spin_unlock(&lock->wait_lock); | 669 | spin_unlock(&lock->wait_lock); |
640 | 670 | ||
641 | debug_rt_mutex_print_deadlock(&waiter); | 671 | debug_rt_mutex_print_deadlock(&waiter); |
diff --git a/kernel/sched.c b/kernel/sched.c index 7a30addfd235..2629c1711fd6 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -4070,6 +4070,8 @@ recheck: | |||
4070 | __task_rq_unlock(rq); | 4070 | __task_rq_unlock(rq); |
4071 | spin_unlock_irqrestore(&p->pi_lock, flags); | 4071 | spin_unlock_irqrestore(&p->pi_lock, flags); |
4072 | 4072 | ||
4073 | rt_mutex_adjust_pi(p); | ||
4074 | |||
4073 | return 0; | 4075 | return 0; |
4074 | } | 4076 | } |
4075 | EXPORT_SYMBOL_GPL(sched_setscheduler); | 4077 | EXPORT_SYMBOL_GPL(sched_setscheduler); |