aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2015-01-11 14:51:49 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2015-01-11 14:51:49 -0500
commit5ab551d662396f8437ec5aba12210b7a67eb492b (patch)
treea4bbf500a6455afed4edf0bee6e1171f0e703ae4 /kernel
parentddb321a8dd158520d97ed1cbade1d4ac36b6af31 (diff)
parent7f1a169b88f513e32a432ca0f85bfd282d117bd6 (diff)
Merge branch 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull scheduler fixes from Ingo Molnar: "Misc fixes: group scheduling corner case fix, two deadline scheduler fixes, effective_load() overflow fix, nested sleep fix, 6144 CPUs system fix" * 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: sched/fair: Fix RCU stall upon -ENOMEM in sched_create_group() sched/deadline: Avoid double-accounting in case of missed deadlines sched/deadline: Fix migration of SCHED_DEADLINE tasks sched: Fix odd values in effective_load() calculations sched, fanotify: Deal with nested sleeps sched: Fix KMALLOC_MAX_SIZE overflow during cpumask allocation
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched/core.c13
-rw-r--r--kernel/sched/deadline.c25
-rw-r--r--kernel/sched/fair.c6
3 files changed, 14 insertions, 30 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index b5797b78add6..c0accc00566e 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -7113,9 +7113,6 @@ void __init sched_init(void)
7113#ifdef CONFIG_RT_GROUP_SCHED 7113#ifdef CONFIG_RT_GROUP_SCHED
7114 alloc_size += 2 * nr_cpu_ids * sizeof(void **); 7114 alloc_size += 2 * nr_cpu_ids * sizeof(void **);
7115#endif 7115#endif
7116#ifdef CONFIG_CPUMASK_OFFSTACK
7117 alloc_size += num_possible_cpus() * cpumask_size();
7118#endif
7119 if (alloc_size) { 7116 if (alloc_size) {
7120 ptr = (unsigned long)kzalloc(alloc_size, GFP_NOWAIT); 7117 ptr = (unsigned long)kzalloc(alloc_size, GFP_NOWAIT);
7121 7118
@@ -7135,13 +7132,13 @@ void __init sched_init(void)
7135 ptr += nr_cpu_ids * sizeof(void **); 7132 ptr += nr_cpu_ids * sizeof(void **);
7136 7133
7137#endif /* CONFIG_RT_GROUP_SCHED */ 7134#endif /* CONFIG_RT_GROUP_SCHED */
7135 }
7138#ifdef CONFIG_CPUMASK_OFFSTACK 7136#ifdef CONFIG_CPUMASK_OFFSTACK
7139 for_each_possible_cpu(i) { 7137 for_each_possible_cpu(i) {
7140 per_cpu(load_balance_mask, i) = (void *)ptr; 7138 per_cpu(load_balance_mask, i) = (cpumask_var_t)kzalloc_node(
7141 ptr += cpumask_size(); 7139 cpumask_size(), GFP_KERNEL, cpu_to_node(i));
7142 }
7143#endif /* CONFIG_CPUMASK_OFFSTACK */
7144 } 7140 }
7141#endif /* CONFIG_CPUMASK_OFFSTACK */
7145 7142
7146 init_rt_bandwidth(&def_rt_bandwidth, 7143 init_rt_bandwidth(&def_rt_bandwidth,
7147 global_rt_period(), global_rt_runtime()); 7144 global_rt_period(), global_rt_runtime());
diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
index e5db8c6feebd..b52092f2636d 100644
--- a/kernel/sched/deadline.c
+++ b/kernel/sched/deadline.c
@@ -570,24 +570,7 @@ void init_dl_task_timer(struct sched_dl_entity *dl_se)
570static 570static
571int dl_runtime_exceeded(struct rq *rq, struct sched_dl_entity *dl_se) 571int dl_runtime_exceeded(struct rq *rq, struct sched_dl_entity *dl_se)
572{ 572{
573 int dmiss = dl_time_before(dl_se->deadline, rq_clock(rq)); 573 return (dl_se->runtime <= 0);
574 int rorun = dl_se->runtime <= 0;
575
576 if (!rorun && !dmiss)
577 return 0;
578
579 /*
580 * If we are beyond our current deadline and we are still
581 * executing, then we have already used some of the runtime of
582 * the next instance. Thus, if we do not account that, we are
583 * stealing bandwidth from the system at each deadline miss!
584 */
585 if (dmiss) {
586 dl_se->runtime = rorun ? dl_se->runtime : 0;
587 dl_se->runtime -= rq_clock(rq) - dl_se->deadline;
588 }
589
590 return 1;
591} 574}
592 575
593extern bool sched_rt_bandwidth_account(struct rt_rq *rt_rq); 576extern bool sched_rt_bandwidth_account(struct rt_rq *rt_rq);
@@ -826,10 +809,10 @@ enqueue_dl_entity(struct sched_dl_entity *dl_se,
826 * parameters of the task might need updating. Otherwise, 809 * parameters of the task might need updating. Otherwise,
827 * we want a replenishment of its runtime. 810 * we want a replenishment of its runtime.
828 */ 811 */
829 if (!dl_se->dl_new && flags & ENQUEUE_REPLENISH) 812 if (dl_se->dl_new || flags & ENQUEUE_WAKEUP)
830 replenish_dl_entity(dl_se, pi_se);
831 else
832 update_dl_entity(dl_se, pi_se); 813 update_dl_entity(dl_se, pi_se);
814 else if (flags & ENQUEUE_REPLENISH)
815 replenish_dl_entity(dl_se, pi_se);
833 816
834 __enqueue_dl_entity(dl_se); 817 __enqueue_dl_entity(dl_se);
835} 818}
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index df2cdf77f899..40667cbf371b 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -4005,6 +4005,10 @@ void __start_cfs_bandwidth(struct cfs_bandwidth *cfs_b, bool force)
4005 4005
4006static void destroy_cfs_bandwidth(struct cfs_bandwidth *cfs_b) 4006static void destroy_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
4007{ 4007{
4008 /* init_cfs_bandwidth() was not called */
4009 if (!cfs_b->throttled_cfs_rq.next)
4010 return;
4011
4008 hrtimer_cancel(&cfs_b->period_timer); 4012 hrtimer_cancel(&cfs_b->period_timer);
4009 hrtimer_cancel(&cfs_b->slack_timer); 4013 hrtimer_cancel(&cfs_b->slack_timer);
4010} 4014}
@@ -4424,7 +4428,7 @@ static long effective_load(struct task_group *tg, int cpu, long wl, long wg)
4424 * wl = S * s'_i; see (2) 4428 * wl = S * s'_i; see (2)
4425 */ 4429 */
4426 if (W > 0 && w < W) 4430 if (W > 0 && w < W)
4427 wl = (w * tg->shares) / W; 4431 wl = (w * (long)tg->shares) / W;
4428 else 4432 else
4429 wl = tg->shares; 4433 wl = tg->shares;
4430 4434