diff options
author | Kirill Tkhai <ktkhai@parallels.com> | 2014-03-04 10:25:46 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2014-03-11 07:05:35 -0400 |
commit | 734ff2a71f9e6aa6fedfa5a9a34818b8586516d5 (patch) | |
tree | 50ee9626f98a1849c98a6d7308e259e4ad93b889 /kernel | |
parent | a02ed5e3e05ec5e8af21e645cccc77f3a6480aaf (diff) |
sched/rt: Fix picking RT and DL tasks from empty queue
The problems:
1) We check for rt_nr_running before call of put_prev_task().
If previous task is RT, its rt_rq may become throttled
and dequeued after this call.
In case of p is from rt->rq this just causes picking a task
from throttled queue, but in case of its rt_rq is child
we are guaranteed catch BUG_ON.
2) The same with deadline class. The only difference we operate
on only dl_rq.
This patch fixes all the above problems and it adds a small skip in the
DL update like we've already done for RT class:
if (unlikely((s64)delta_exec <= 0))
return;
This will optimize sequential update_curr_dl() calls a little.
Signed-off-by: Kirill Tkhai <ktkhai@parallels.com>
Signed-off-by: Peter Zijlstra <peterz@infradead.org>
Cc: Juri Lelli <juri.lelli@gmail.com>
Link: http://lkml.kernel.org/r/1393946746.3643.3.camel@tkhai
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/sched/deadline.c | 10 | ||||
-rw-r--r-- | kernel/sched/rt.c | 7 |
2 files changed, 15 insertions, 2 deletions
diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c index e4f3ac3b8514..27ef40925525 100644 --- a/kernel/sched/deadline.c +++ b/kernel/sched/deadline.c | |||
@@ -609,8 +609,8 @@ static void update_curr_dl(struct rq *rq) | |||
609 | * approach need further study. | 609 | * approach need further study. |
610 | */ | 610 | */ |
611 | delta_exec = rq_clock_task(rq) - curr->se.exec_start; | 611 | delta_exec = rq_clock_task(rq) - curr->se.exec_start; |
612 | if (unlikely((s64)delta_exec < 0)) | 612 | if (unlikely((s64)delta_exec <= 0)) |
613 | delta_exec = 0; | 613 | return; |
614 | 614 | ||
615 | schedstat_set(curr->se.statistics.exec_max, | 615 | schedstat_set(curr->se.statistics.exec_max, |
616 | max(curr->se.statistics.exec_max, delta_exec)); | 616 | max(curr->se.statistics.exec_max, delta_exec)); |
@@ -1023,6 +1023,12 @@ struct task_struct *pick_next_task_dl(struct rq *rq, struct task_struct *prev) | |||
1023 | 1023 | ||
1024 | if (need_pull_dl_task(rq, prev)) | 1024 | if (need_pull_dl_task(rq, prev)) |
1025 | pull_dl_task(rq); | 1025 | pull_dl_task(rq); |
1026 | /* | ||
1027 | * When prev is DL, we may throttle it in put_prev_task(). | ||
1028 | * So, we update time before we check for dl_nr_running. | ||
1029 | */ | ||
1030 | if (prev->sched_class == &dl_sched_class) | ||
1031 | update_curr_dl(rq); | ||
1026 | 1032 | ||
1027 | if (unlikely(!dl_rq->dl_nr_running)) | 1033 | if (unlikely(!dl_rq->dl_nr_running)) |
1028 | return NULL; | 1034 | return NULL; |
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c index facc824334fb..f3cee0a63b76 100644 --- a/kernel/sched/rt.c +++ b/kernel/sched/rt.c | |||
@@ -1379,6 +1379,13 @@ pick_next_task_rt(struct rq *rq, struct task_struct *prev) | |||
1379 | return RETRY_TASK; | 1379 | return RETRY_TASK; |
1380 | } | 1380 | } |
1381 | 1381 | ||
1382 | /* | ||
1383 | * We may dequeue prev's rt_rq in put_prev_task(). | ||
1384 | * So, we update time before rt_nr_running check. | ||
1385 | */ | ||
1386 | if (prev->sched_class == &rt_sched_class) | ||
1387 | update_curr_rt(rq); | ||
1388 | |||
1382 | if (!rt_rq->rt_nr_running) | 1389 | if (!rt_rq->rt_nr_running) |
1383 | return NULL; | 1390 | return NULL; |
1384 | 1391 | ||