aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2015-02-06 16:34:26 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2015-02-06 16:34:26 -0500
commit396e9099ea5b4bc442995c807a779025d06663e8 (patch)
tree6fe77a91b60abcf0cc5f7153a068f2e0ea60d1a0
parent29f12c48df4e6cba9df39dbe9d99649be27fb346 (diff)
parent40767b0dc768060266d261b4a330164b4be53f7c (diff)
Merge branch 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull scheduler fixes from Ingo Molnar: "Misc fixes" * 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: sched/deadline: Fix deadline parameter modification handling sched/wait: Remove might_sleep() from wait_event_cmd() sched: Fix crash if cpuset_cpumask_can_shrink() is passed an empty cpumask sched/fair: Avoid using uninitialized variable in preferred_group_nid()
-rw-r--r--include/linux/wait.h1
-rw-r--r--kernel/sched/core.c36
-rw-r--r--kernel/sched/deadline.c3
-rw-r--r--kernel/sched/fair.c2
4 files changed, 34 insertions, 8 deletions
diff --git a/include/linux/wait.h b/include/linux/wait.h
index 2232ed16635a..37423e0e1379 100644
--- a/include/linux/wait.h
+++ b/include/linux/wait.h
@@ -363,7 +363,6 @@ do { \
363 */ 363 */
364#define wait_event_cmd(wq, condition, cmd1, cmd2) \ 364#define wait_event_cmd(wq, condition, cmd1, cmd2) \
365do { \ 365do { \
366 might_sleep(); \
367 if (condition) \ 366 if (condition) \
368 break; \ 367 break; \
369 __wait_event_cmd(wq, condition, cmd1, cmd2); \ 368 __wait_event_cmd(wq, condition, cmd1, cmd2); \
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index e628cb11b560..5eab11d4b747 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -1814,6 +1814,10 @@ void __dl_clear_params(struct task_struct *p)
1814 dl_se->dl_period = 0; 1814 dl_se->dl_period = 0;
1815 dl_se->flags = 0; 1815 dl_se->flags = 0;
1816 dl_se->dl_bw = 0; 1816 dl_se->dl_bw = 0;
1817
1818 dl_se->dl_throttled = 0;
1819 dl_se->dl_new = 1;
1820 dl_se->dl_yielded = 0;
1817} 1821}
1818 1822
1819/* 1823/*
@@ -1839,7 +1843,7 @@ static void __sched_fork(unsigned long clone_flags, struct task_struct *p)
1839#endif 1843#endif
1840 1844
1841 RB_CLEAR_NODE(&p->dl.rb_node); 1845 RB_CLEAR_NODE(&p->dl.rb_node);
1842 hrtimer_init(&p->dl.dl_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); 1846 init_dl_task_timer(&p->dl);
1843 __dl_clear_params(p); 1847 __dl_clear_params(p);
1844 1848
1845 INIT_LIST_HEAD(&p->rt.run_list); 1849 INIT_LIST_HEAD(&p->rt.run_list);
@@ -2049,6 +2053,9 @@ static inline int dl_bw_cpus(int i)
2049 * allocated bandwidth to reflect the new situation. 2053 * allocated bandwidth to reflect the new situation.
2050 * 2054 *
2051 * This function is called while holding p's rq->lock. 2055 * This function is called while holding p's rq->lock.
2056 *
2057 * XXX we should delay bw change until the task's 0-lag point, see
2058 * __setparam_dl().
2052 */ 2059 */
2053static int dl_overflow(struct task_struct *p, int policy, 2060static int dl_overflow(struct task_struct *p, int policy,
2054 const struct sched_attr *attr) 2061 const struct sched_attr *attr)
@@ -3251,15 +3258,31 @@ __setparam_dl(struct task_struct *p, const struct sched_attr *attr)
3251{ 3258{
3252 struct sched_dl_entity *dl_se = &p->dl; 3259 struct sched_dl_entity *dl_se = &p->dl;
3253 3260
3254 init_dl_task_timer(dl_se);
3255 dl_se->dl_runtime = attr->sched_runtime; 3261 dl_se->dl_runtime = attr->sched_runtime;
3256 dl_se->dl_deadline = attr->sched_deadline; 3262 dl_se->dl_deadline = attr->sched_deadline;
3257 dl_se->dl_period = attr->sched_period ?: dl_se->dl_deadline; 3263 dl_se->dl_period = attr->sched_period ?: dl_se->dl_deadline;
3258 dl_se->flags = attr->sched_flags; 3264 dl_se->flags = attr->sched_flags;
3259 dl_se->dl_bw = to_ratio(dl_se->dl_period, dl_se->dl_runtime); 3265 dl_se->dl_bw = to_ratio(dl_se->dl_period, dl_se->dl_runtime);
3260 dl_se->dl_throttled = 0; 3266
3261 dl_se->dl_new = 1; 3267 /*
3262 dl_se->dl_yielded = 0; 3268 * Changing the parameters of a task is 'tricky' and we're not doing
3269 * the correct thing -- also see task_dead_dl() and switched_from_dl().
3270 *
3271 * What we SHOULD do is delay the bandwidth release until the 0-lag
3272 * point. This would include retaining the task_struct until that time
3273 * and change dl_overflow() to not immediately decrement the current
3274 * amount.
3275 *
3276 * Instead we retain the current runtime/deadline and let the new
3277 * parameters take effect after the current reservation period lapses.
3278 * This is safe (albeit pessimistic) because the 0-lag point is always
3279 * before the current scheduling deadline.
3280 *
3281 * We can still have temporary overloads because we do not delay the
3282 * change in bandwidth until that time; so admission control is
3283 * not on the safe side. It does however guarantee tasks will never
3284 * consume more than promised.
3285 */
3263} 3286}
3264 3287
3265/* 3288/*
@@ -4642,6 +4665,9 @@ int cpuset_cpumask_can_shrink(const struct cpumask *cur,
4642 struct dl_bw *cur_dl_b; 4665 struct dl_bw *cur_dl_b;
4643 unsigned long flags; 4666 unsigned long flags;
4644 4667
4668 if (!cpumask_weight(cur))
4669 return ret;
4670
4645 rcu_read_lock_sched(); 4671 rcu_read_lock_sched();
4646 cur_dl_b = dl_bw_of(cpumask_any(cur)); 4672 cur_dl_b = dl_bw_of(cpumask_any(cur));
4647 trial_cpus = cpumask_weight(trial); 4673 trial_cpus = cpumask_weight(trial);
diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
index b52092f2636d..726470d47f87 100644
--- a/kernel/sched/deadline.c
+++ b/kernel/sched/deadline.c
@@ -1094,6 +1094,7 @@ static void task_dead_dl(struct task_struct *p)
1094 * Since we are TASK_DEAD we won't slip out of the domain! 1094 * Since we are TASK_DEAD we won't slip out of the domain!
1095 */ 1095 */
1096 raw_spin_lock_irq(&dl_b->lock); 1096 raw_spin_lock_irq(&dl_b->lock);
1097 /* XXX we should retain the bw until 0-lag */
1097 dl_b->total_bw -= p->dl.dl_bw; 1098 dl_b->total_bw -= p->dl.dl_bw;
1098 raw_spin_unlock_irq(&dl_b->lock); 1099 raw_spin_unlock_irq(&dl_b->lock);
1099 1100
@@ -1614,8 +1615,8 @@ static void cancel_dl_timer(struct rq *rq, struct task_struct *p)
1614 1615
1615static void switched_from_dl(struct rq *rq, struct task_struct *p) 1616static void switched_from_dl(struct rq *rq, struct task_struct *p)
1616{ 1617{
1618 /* XXX we should retain the bw until 0-lag */
1617 cancel_dl_timer(rq, p); 1619 cancel_dl_timer(rq, p);
1618
1619 __dl_clear_params(p); 1620 __dl_clear_params(p);
1620 1621
1621 /* 1622 /*
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 40667cbf371b..fe331fc391f5 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -1730,7 +1730,7 @@ static int preferred_group_nid(struct task_struct *p, int nid)
1730 nodes = node_online_map; 1730 nodes = node_online_map;
1731 for (dist = sched_max_numa_distance; dist > LOCAL_DISTANCE; dist--) { 1731 for (dist = sched_max_numa_distance; dist > LOCAL_DISTANCE; dist--) {
1732 unsigned long max_faults = 0; 1732 unsigned long max_faults = 0;
1733 nodemask_t max_group; 1733 nodemask_t max_group = NODE_MASK_NONE;
1734 int a, b; 1734 int a, b;
1735 1735
1736 /* Are there nodes at this distance from each other? */ 1736 /* Are there nodes at this distance from each other? */