aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched/core.c
diff options
context:
space:
mode:
authorDmitry Torokhov <dmitry.torokhov@gmail.com>2015-02-20 14:54:53 -0500
committerDmitry Torokhov <dmitry.torokhov@gmail.com>2015-02-20 14:54:53 -0500
commit4c971aa78314253cce914ed29e3d90df3326d646 (patch)
treea9dcf0b1fdc9e1aacff90afb5b3ab79983115dcc /kernel/sched/core.c
parent4ba24fef3eb3b142197135223b90ced2f319cd53 (diff)
parent290b799c390d77d27effee3ce312203aaa32ee74 (diff)
Merge branch 'next' into for-linus
Second round of updates for 3.20.
Diffstat (limited to 'kernel/sched/core.c')
-rw-r--r--kernel/sched/core.c41
1 files changed, 33 insertions, 8 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index c0accc00566e..5eab11d4b747 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -1814,6 +1814,10 @@ void __dl_clear_params(struct task_struct *p)
1814 dl_se->dl_period = 0; 1814 dl_se->dl_period = 0;
1815 dl_se->flags = 0; 1815 dl_se->flags = 0;
1816 dl_se->dl_bw = 0; 1816 dl_se->dl_bw = 0;
1817
1818 dl_se->dl_throttled = 0;
1819 dl_se->dl_new = 1;
1820 dl_se->dl_yielded = 0;
1817} 1821}
1818 1822
1819/* 1823/*
@@ -1839,7 +1843,7 @@ static void __sched_fork(unsigned long clone_flags, struct task_struct *p)
1839#endif 1843#endif
1840 1844
1841 RB_CLEAR_NODE(&p->dl.rb_node); 1845 RB_CLEAR_NODE(&p->dl.rb_node);
1842 hrtimer_init(&p->dl.dl_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); 1846 init_dl_task_timer(&p->dl);
1843 __dl_clear_params(p); 1847 __dl_clear_params(p);
1844 1848
1845 INIT_LIST_HEAD(&p->rt.run_list); 1849 INIT_LIST_HEAD(&p->rt.run_list);
@@ -2049,6 +2053,9 @@ static inline int dl_bw_cpus(int i)
2049 * allocated bandwidth to reflect the new situation. 2053 * allocated bandwidth to reflect the new situation.
2050 * 2054 *
2051 * This function is called while holding p's rq->lock. 2055 * This function is called while holding p's rq->lock.
2056 *
2057 * XXX we should delay bw change until the task's 0-lag point, see
2058 * __setparam_dl().
2052 */ 2059 */
2053static int dl_overflow(struct task_struct *p, int policy, 2060static int dl_overflow(struct task_struct *p, int policy,
2054 const struct sched_attr *attr) 2061 const struct sched_attr *attr)
@@ -3251,15 +3258,31 @@ __setparam_dl(struct task_struct *p, const struct sched_attr *attr)
3251{ 3258{
3252 struct sched_dl_entity *dl_se = &p->dl; 3259 struct sched_dl_entity *dl_se = &p->dl;
3253 3260
3254 init_dl_task_timer(dl_se);
3255 dl_se->dl_runtime = attr->sched_runtime; 3261 dl_se->dl_runtime = attr->sched_runtime;
3256 dl_se->dl_deadline = attr->sched_deadline; 3262 dl_se->dl_deadline = attr->sched_deadline;
3257 dl_se->dl_period = attr->sched_period ?: dl_se->dl_deadline; 3263 dl_se->dl_period = attr->sched_period ?: dl_se->dl_deadline;
3258 dl_se->flags = attr->sched_flags; 3264 dl_se->flags = attr->sched_flags;
3259 dl_se->dl_bw = to_ratio(dl_se->dl_period, dl_se->dl_runtime); 3265 dl_se->dl_bw = to_ratio(dl_se->dl_period, dl_se->dl_runtime);
3260 dl_se->dl_throttled = 0; 3266
3261 dl_se->dl_new = 1; 3267 /*
3262 dl_se->dl_yielded = 0; 3268 * Changing the parameters of a task is 'tricky' and we're not doing
3269 * the correct thing -- also see task_dead_dl() and switched_from_dl().
3270 *
3271 * What we SHOULD do is delay the bandwidth release until the 0-lag
3272 * point. This would include retaining the task_struct until that time
3273 * and change dl_overflow() to not immediately decrement the current
3274 * amount.
3275 *
3276 * Instead we retain the current runtime/deadline and let the new
3277 * parameters take effect after the current reservation period lapses.
3278 * This is safe (albeit pessimistic) because the 0-lag point is always
3279 * before the current scheduling deadline.
3280 *
3281 * We can still have temporary overloads because we do not delay the
3282 * change in bandwidth until that time; so admission control is
3283 * not on the safe side. It does however guarantee tasks will never
3284 * consume more than promised.
3285 */
3263} 3286}
3264 3287
3265/* 3288/*
@@ -4642,6 +4665,9 @@ int cpuset_cpumask_can_shrink(const struct cpumask *cur,
4642 struct dl_bw *cur_dl_b; 4665 struct dl_bw *cur_dl_b;
4643 unsigned long flags; 4666 unsigned long flags;
4644 4667
4668 if (!cpumask_weight(cur))
4669 return ret;
4670
4645 rcu_read_lock_sched(); 4671 rcu_read_lock_sched();
4646 cur_dl_b = dl_bw_of(cpumask_any(cur)); 4672 cur_dl_b = dl_bw_of(cpumask_any(cur));
4647 trial_cpus = cpumask_weight(trial); 4673 trial_cpus = cpumask_weight(trial);
@@ -7292,13 +7318,12 @@ void __might_sleep(const char *file, int line, int preempt_offset)
7292 * since we will exit with TASK_RUNNING make sure we enter with it, 7318 * since we will exit with TASK_RUNNING make sure we enter with it,
7293 * otherwise we will destroy state. 7319 * otherwise we will destroy state.
7294 */ 7320 */
7295 if (WARN_ONCE(current->state != TASK_RUNNING, 7321 WARN_ONCE(current->state != TASK_RUNNING && current->task_state_change,
7296 "do not call blocking ops when !TASK_RUNNING; " 7322 "do not call blocking ops when !TASK_RUNNING; "
7297 "state=%lx set at [<%p>] %pS\n", 7323 "state=%lx set at [<%p>] %pS\n",
7298 current->state, 7324 current->state,
7299 (void *)current->task_state_change, 7325 (void *)current->task_state_change,
7300 (void *)current->task_state_change)) 7326 (void *)current->task_state_change);
7301 __set_current_state(TASK_RUNNING);
7302 7327
7303 ___might_sleep(file, line, preempt_offset); 7328 ___might_sleep(file, line, preempt_offset);
7304} 7329}