aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJuri Lelli <juri.lelli@gmail.com>2014-04-15 07:49:04 -0400
committerIngo Molnar <mingo@kernel.org>2014-05-07 05:51:31 -0400
commit5bfd126e80dca70431aef8fdbc1cf14535f3c338 (patch)
tree7f1988050d32bcab50468b0bf4afb2556b638d68
parent2d513868e2a33e1d5315490ef4c861ee65babd65 (diff)
sched/deadline: Fix sched_yield() behavior
yield_task_dl() is broken: o it forces current to be throttled setting its runtime to zero; o it sets current's dl_se->dl_new to one, expecting that dl_task_timer() will queue it back with proper parameters at replenish time. Unfortunately, dl_task_timer() has this check at the very beginning: if (!dl_task(p) || dl_se->dl_new) goto unlock; So, it just bails out and the task is never replenished. It actually yielded forever. To fix this, introduce a new flag indicating that the task properly yielded the CPU before its current runtime expired. While this is a little overdoing at the moment, the flag would be useful in the future to discriminate between "good" jobs (of which remaining runtime could be reclaimed, i.e. recycled) and "bad" jobs (for which dl_throttled task has been set) that needed to be stopped. Reported-by: yjay.kim <yjay.kim@lge.com> Signed-off-by: Juri Lelli <juri.lelli@gmail.com> Signed-off-by: Peter Zijlstra <peterz@infradead.org> Link: http://lkml.kernel.org/r/20140429103953.e68eba1b2ac3309214e3dc5a@gmail.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r--include/linux/sched.h7
-rw-r--r--kernel/sched/core.c1
-rw-r--r--kernel/sched/deadline.c5
3 files changed, 9 insertions, 4 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 25f54c79f757..2a4298fb0d85 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1153,9 +1153,12 @@ struct sched_dl_entity {
1153 * 1153 *
1154 * @dl_boosted tells if we are boosted due to DI. If so we are 1154 * @dl_boosted tells if we are boosted due to DI. If so we are
1155 * outside bandwidth enforcement mechanism (but only until we 1155 * outside bandwidth enforcement mechanism (but only until we
1156 * exit the critical section). 1156 * exit the critical section);
1157 *
1158 * @dl_yielded tells if task gave up the cpu before consuming
1159 * all its available runtime during the last job.
1157 */ 1160 */
1158 int dl_throttled, dl_new, dl_boosted; 1161 int dl_throttled, dl_new, dl_boosted, dl_yielded;
1159 1162
1160 /* 1163 /*
1161 * Bandwidth enforcement timer. Each -deadline task has its 1164 * Bandwidth enforcement timer. Each -deadline task has its
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 9fe2190005cb..e62c65a12d5b 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -3124,6 +3124,7 @@ __setparam_dl(struct task_struct *p, const struct sched_attr *attr)
3124 dl_se->dl_bw = to_ratio(dl_se->dl_period, dl_se->dl_runtime); 3124 dl_se->dl_bw = to_ratio(dl_se->dl_period, dl_se->dl_runtime);
3125 dl_se->dl_throttled = 0; 3125 dl_se->dl_throttled = 0;
3126 dl_se->dl_new = 1; 3126 dl_se->dl_new = 1;
3127 dl_se->dl_yielded = 0;
3127} 3128}
3128 3129
3129static void __setscheduler_params(struct task_struct *p, 3130static void __setscheduler_params(struct task_struct *p,
diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
index b08095786cb8..800e99b99075 100644
--- a/kernel/sched/deadline.c
+++ b/kernel/sched/deadline.c
@@ -528,6 +528,7 @@ static enum hrtimer_restart dl_task_timer(struct hrtimer *timer)
528 sched_clock_tick(); 528 sched_clock_tick();
529 update_rq_clock(rq); 529 update_rq_clock(rq);
530 dl_se->dl_throttled = 0; 530 dl_se->dl_throttled = 0;
531 dl_se->dl_yielded = 0;
531 if (p->on_rq) { 532 if (p->on_rq) {
532 enqueue_task_dl(rq, p, ENQUEUE_REPLENISH); 533 enqueue_task_dl(rq, p, ENQUEUE_REPLENISH);
533 if (task_has_dl_policy(rq->curr)) 534 if (task_has_dl_policy(rq->curr))
@@ -893,10 +894,10 @@ static void yield_task_dl(struct rq *rq)
893 * We make the task go to sleep until its current deadline by 894 * We make the task go to sleep until its current deadline by
894 * forcing its runtime to zero. This way, update_curr_dl() stops 895 * forcing its runtime to zero. This way, update_curr_dl() stops
895 * it and the bandwidth timer will wake it up and will give it 896 * it and the bandwidth timer will wake it up and will give it
896 * new scheduling parameters (thanks to dl_new=1). 897 * new scheduling parameters (thanks to dl_yielded=1).
897 */ 898 */
898 if (p->dl.runtime > 0) { 899 if (p->dl.runtime > 0) {
899 rq->curr->dl.dl_new = 1; 900 rq->curr->dl.dl_yielded = 1;
900 p->dl.runtime = 0; 901 p->dl.runtime = 0;
901 } 902 }
902 update_curr_dl(rq); 903 update_curr_dl(rq);