diff options
author | Ingo Molnar <mingo@kernel.org> | 2015-02-04 01:44:00 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2015-02-04 01:44:00 -0500 |
commit | 4c195c8a1967ff8bee13a811518a99db04618ab7 (patch) | |
tree | 555682bd243ac56920ef7a2f3c1f3b666996c7b3 /kernel | |
parent | 16b269436b7213ebc01dcfcc9dafa8535b676ccb (diff) | |
parent | 40767b0dc768060266d261b4a330164b4be53f7c (diff) |
Merge branch 'sched/urgent' into sched/core, to merge fixes before applying new patches
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/sched/core.c | 33 | ||||
-rw-r--r-- | kernel/sched/deadline.c | 3 |
2 files changed, 30 insertions, 6 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 54dce019c0ce..50a5352f6205 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c | |||
@@ -1816,6 +1816,10 @@ void __dl_clear_params(struct task_struct *p) | |||
1816 | dl_se->dl_period = 0; | 1816 | dl_se->dl_period = 0; |
1817 | dl_se->flags = 0; | 1817 | dl_se->flags = 0; |
1818 | dl_se->dl_bw = 0; | 1818 | dl_se->dl_bw = 0; |
1819 | |||
1820 | dl_se->dl_throttled = 0; | ||
1821 | dl_se->dl_new = 1; | ||
1822 | dl_se->dl_yielded = 0; | ||
1819 | } | 1823 | } |
1820 | 1824 | ||
1821 | /* | 1825 | /* |
@@ -1844,7 +1848,7 @@ static void __sched_fork(unsigned long clone_flags, struct task_struct *p) | |||
1844 | #endif | 1848 | #endif |
1845 | 1849 | ||
1846 | RB_CLEAR_NODE(&p->dl.rb_node); | 1850 | RB_CLEAR_NODE(&p->dl.rb_node); |
1847 | hrtimer_init(&p->dl.dl_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); | 1851 | init_dl_task_timer(&p->dl); |
1848 | __dl_clear_params(p); | 1852 | __dl_clear_params(p); |
1849 | 1853 | ||
1850 | INIT_LIST_HEAD(&p->rt.run_list); | 1854 | INIT_LIST_HEAD(&p->rt.run_list); |
@@ -2054,6 +2058,9 @@ static inline int dl_bw_cpus(int i) | |||
2054 | * allocated bandwidth to reflect the new situation. | 2058 | * allocated bandwidth to reflect the new situation. |
2055 | * | 2059 | * |
2056 | * This function is called while holding p's rq->lock. | 2060 | * This function is called while holding p's rq->lock. |
2061 | * | ||
2062 | * XXX we should delay bw change until the task's 0-lag point, see | ||
2063 | * __setparam_dl(). | ||
2057 | */ | 2064 | */ |
2058 | static int dl_overflow(struct task_struct *p, int policy, | 2065 | static int dl_overflow(struct task_struct *p, int policy, |
2059 | const struct sched_attr *attr) | 2066 | const struct sched_attr *attr) |
@@ -3263,15 +3270,31 @@ __setparam_dl(struct task_struct *p, const struct sched_attr *attr) | |||
3263 | { | 3270 | { |
3264 | struct sched_dl_entity *dl_se = &p->dl; | 3271 | struct sched_dl_entity *dl_se = &p->dl; |
3265 | 3272 | ||
3266 | init_dl_task_timer(dl_se); | ||
3267 | dl_se->dl_runtime = attr->sched_runtime; | 3273 | dl_se->dl_runtime = attr->sched_runtime; |
3268 | dl_se->dl_deadline = attr->sched_deadline; | 3274 | dl_se->dl_deadline = attr->sched_deadline; |
3269 | dl_se->dl_period = attr->sched_period ?: dl_se->dl_deadline; | 3275 | dl_se->dl_period = attr->sched_period ?: dl_se->dl_deadline; |
3270 | dl_se->flags = attr->sched_flags; | 3276 | dl_se->flags = attr->sched_flags; |
3271 | dl_se->dl_bw = to_ratio(dl_se->dl_period, dl_se->dl_runtime); | 3277 | dl_se->dl_bw = to_ratio(dl_se->dl_period, dl_se->dl_runtime); |
3272 | dl_se->dl_throttled = 0; | 3278 | |
3273 | dl_se->dl_new = 1; | 3279 | /* |
3274 | dl_se->dl_yielded = 0; | 3280 | * Changing the parameters of a task is 'tricky' and we're not doing |
3281 | * the correct thing -- also see task_dead_dl() and switched_from_dl(). | ||
3282 | * | ||
3283 | * What we SHOULD do is delay the bandwidth release until the 0-lag | ||
3284 | * point. This would include retaining the task_struct until that time | ||
3285 | * and change dl_overflow() to not immediately decrement the current | ||
3286 | * amount. | ||
3287 | * | ||
3288 | * Instead we retain the current runtime/deadline and let the new | ||
3289 | * parameters take effect after the current reservation period lapses. | ||
3290 | * This is safe (albeit pessimistic) because the 0-lag point is always | ||
3291 | * before the current scheduling deadline. | ||
3292 | * | ||
3293 | * We can still have temporary overloads because we do not delay the | ||
3294 | * change in bandwidth until that time; so admission control is | ||
3295 | * not on the safe side. It does however guarantee tasks will never | ||
3296 | * consume more than promised. | ||
3297 | */ | ||
3275 | } | 3298 | } |
3276 | 3299 | ||
3277 | /* | 3300 | /* |
diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c index e7b272233c5c..e0e9c2986976 100644 --- a/kernel/sched/deadline.c +++ b/kernel/sched/deadline.c | |||
@@ -1094,6 +1094,7 @@ static void task_dead_dl(struct task_struct *p) | |||
1094 | * Since we are TASK_DEAD we won't slip out of the domain! | 1094 | * Since we are TASK_DEAD we won't slip out of the domain! |
1095 | */ | 1095 | */ |
1096 | raw_spin_lock_irq(&dl_b->lock); | 1096 | raw_spin_lock_irq(&dl_b->lock); |
1097 | /* XXX we should retain the bw until 0-lag */ | ||
1097 | dl_b->total_bw -= p->dl.dl_bw; | 1098 | dl_b->total_bw -= p->dl.dl_bw; |
1098 | raw_spin_unlock_irq(&dl_b->lock); | 1099 | raw_spin_unlock_irq(&dl_b->lock); |
1099 | 1100 | ||
@@ -1613,8 +1614,8 @@ static void cancel_dl_timer(struct rq *rq, struct task_struct *p) | |||
1613 | 1614 | ||
1614 | static void switched_from_dl(struct rq *rq, struct task_struct *p) | 1615 | static void switched_from_dl(struct rq *rq, struct task_struct *p) |
1615 | { | 1616 | { |
1617 | /* XXX we should retain the bw until 0-lag */ | ||
1616 | cancel_dl_timer(rq, p); | 1618 | cancel_dl_timer(rq, p); |
1617 | |||
1618 | __dl_clear_params(p); | 1619 | __dl_clear_params(p); |
1619 | 1620 | ||
1620 | /* | 1621 | /* |