aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/locking
diff options
context:
space:
mode:
authorDario Faggioli <raistlin@linux.it>2013-11-07 08:43:44 -0500
committerIngo Molnar <mingo@kernel.org>2014-01-13 07:42:56 -0500
commit2d3d891d3344159d5b452a645e355bbe29591e8b (patch)
treeab7c4ef00b48d68efa2d57cabf8c3c86160f2406 /kernel/locking
parentfb00aca474405f4fa8a8519c3179fed722eabd83 (diff)
sched/deadline: Add SCHED_DEADLINE inheritance logic
Some method to deal with rt-mutexes and make sched_dl interact with the current PI-coded is needed, raising all but trivial issues, that needs (according to us) to be solved with some restructuring of the pi-code (i.e., going toward a proxy execution-ish implementation). This is under development, in the meanwhile, as a temporary solution, what this commits does is: - ensure a pi-lock owner with waiters is never throttled down. Instead, when it runs out of runtime, it immediately gets replenished and it's deadline is postponed; - the scheduling parameters (relative deadline and default runtime) used for that replenishments --during the whole period it holds the pi-lock-- are the ones of the waiting task with earliest deadline. Acting this way, we provide some kind of boosting to the lock-owner, still by using the existing (actually, slightly modified by the previous commit) pi-architecture. We would stress the fact that this is only a surely needed, all but clean solution to the problem. In the end it's only a way to re-start discussion within the community. So, as always, comments, ideas, rants, etc.. are welcome! :-) Signed-off-by: Dario Faggioli <raistlin@linux.it> Signed-off-by: Juri Lelli <juri.lelli@gmail.com> [ Added !RT_MUTEXES build fix. ] Signed-off-by: Peter Zijlstra <peterz@infradead.org> Link: http://lkml.kernel.org/r/1383831828-15501-11-git-send-email-juri.lelli@gmail.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/locking')
-rw-r--r--kernel/locking/rtmutex.c31
-rw-r--r--kernel/locking/rtmutex_common.h1
2 files changed, 23 insertions, 9 deletions
diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c
index 3bf0aa68dd3f..2e960a2bab81 100644
--- a/kernel/locking/rtmutex.c
+++ b/kernel/locking/rtmutex.c
@@ -96,13 +96,16 @@ static inline int
96rt_mutex_waiter_less(struct rt_mutex_waiter *left, 96rt_mutex_waiter_less(struct rt_mutex_waiter *left,
97 struct rt_mutex_waiter *right) 97 struct rt_mutex_waiter *right)
98{ 98{
99 if (left->task->prio < right->task->prio) 99 if (left->prio < right->prio)
100 return 1; 100 return 1;
101 101
102 /* 102 /*
103 * If both tasks are dl_task(), we check their deadlines. 103 * If both waiters have dl_prio(), we check the deadlines of the
104 * associated tasks.
105 * If left waiter has a dl_prio(), and we didn't return 1 above,
106 * then right waiter has a dl_prio() too.
104 */ 107 */
105 if (dl_prio(left->task->prio) && dl_prio(right->task->prio)) 108 if (dl_prio(left->prio))
106 return (left->task->dl.deadline < right->task->dl.deadline); 109 return (left->task->dl.deadline < right->task->dl.deadline);
107 110
108 return 0; 111 return 0;
@@ -197,10 +200,18 @@ int rt_mutex_getprio(struct task_struct *task)
197 if (likely(!task_has_pi_waiters(task))) 200 if (likely(!task_has_pi_waiters(task)))
198 return task->normal_prio; 201 return task->normal_prio;
199 202
200 return min(task_top_pi_waiter(task)->task->prio, 203 return min(task_top_pi_waiter(task)->prio,
201 task->normal_prio); 204 task->normal_prio);
202} 205}
203 206
207struct task_struct *rt_mutex_get_top_task(struct task_struct *task)
208{
209 if (likely(!task_has_pi_waiters(task)))
210 return NULL;
211
212 return task_top_pi_waiter(task)->task;
213}
214
204/* 215/*
205 * Adjust the priority of a task, after its pi_waiters got modified. 216 * Adjust the priority of a task, after its pi_waiters got modified.
206 * 217 *
@@ -210,7 +221,7 @@ static void __rt_mutex_adjust_prio(struct task_struct *task)
210{ 221{
211 int prio = rt_mutex_getprio(task); 222 int prio = rt_mutex_getprio(task);
212 223
213 if (task->prio != prio) 224 if (task->prio != prio || dl_prio(prio))
214 rt_mutex_setprio(task, prio); 225 rt_mutex_setprio(task, prio);
215} 226}
216 227
@@ -328,7 +339,7 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
328 * When deadlock detection is off then we check, if further 339 * When deadlock detection is off then we check, if further
329 * priority adjustment is necessary. 340 * priority adjustment is necessary.
330 */ 341 */
331 if (!detect_deadlock && waiter->task->prio == task->prio) 342 if (!detect_deadlock && waiter->prio == task->prio)
332 goto out_unlock_pi; 343 goto out_unlock_pi;
333 344
334 lock = waiter->lock; 345 lock = waiter->lock;
@@ -350,7 +361,7 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
350 361
351 /* Requeue the waiter */ 362 /* Requeue the waiter */
352 rt_mutex_dequeue(lock, waiter); 363 rt_mutex_dequeue(lock, waiter);
353 waiter->task->prio = task->prio; 364 waiter->prio = task->prio;
354 rt_mutex_enqueue(lock, waiter); 365 rt_mutex_enqueue(lock, waiter);
355 366
356 /* Release the task */ 367 /* Release the task */
@@ -448,7 +459,7 @@ static int try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task,
448 * 3) it is top waiter 459 * 3) it is top waiter
449 */ 460 */
450 if (rt_mutex_has_waiters(lock)) { 461 if (rt_mutex_has_waiters(lock)) {
451 if (task->prio >= rt_mutex_top_waiter(lock)->task->prio) { 462 if (task->prio >= rt_mutex_top_waiter(lock)->prio) {
452 if (!waiter || waiter != rt_mutex_top_waiter(lock)) 463 if (!waiter || waiter != rt_mutex_top_waiter(lock))
453 return 0; 464 return 0;
454 } 465 }
@@ -508,6 +519,7 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
508 __rt_mutex_adjust_prio(task); 519 __rt_mutex_adjust_prio(task);
509 waiter->task = task; 520 waiter->task = task;
510 waiter->lock = lock; 521 waiter->lock = lock;
522 waiter->prio = task->prio;
511 523
512 /* Get the top priority waiter on the lock */ 524 /* Get the top priority waiter on the lock */
513 if (rt_mutex_has_waiters(lock)) 525 if (rt_mutex_has_waiters(lock))
@@ -653,7 +665,8 @@ void rt_mutex_adjust_pi(struct task_struct *task)
653 raw_spin_lock_irqsave(&task->pi_lock, flags); 665 raw_spin_lock_irqsave(&task->pi_lock, flags);
654 666
655 waiter = task->pi_blocked_on; 667 waiter = task->pi_blocked_on;
656 if (!waiter || waiter->task->prio == task->prio) { 668 if (!waiter || (waiter->prio == task->prio &&
669 !dl_prio(task->prio))) {
657 raw_spin_unlock_irqrestore(&task->pi_lock, flags); 670 raw_spin_unlock_irqrestore(&task->pi_lock, flags);
658 return; 671 return;
659 } 672 }
diff --git a/kernel/locking/rtmutex_common.h b/kernel/locking/rtmutex_common.h
index b65442fe5ade..7431a9c86f35 100644
--- a/kernel/locking/rtmutex_common.h
+++ b/kernel/locking/rtmutex_common.h
@@ -54,6 +54,7 @@ struct rt_mutex_waiter {
54 struct pid *deadlock_task_pid; 54 struct pid *deadlock_task_pid;
55 struct rt_mutex *deadlock_lock; 55 struct rt_mutex *deadlock_lock;
56#endif 56#endif
57 int prio;
57}; 58};
58 59
59/* 60/*