aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2012-09-14 20:44:52 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2012-09-14 20:44:52 -0400
commit889cb3b9a4da012f8f8a336b050d7871d1f7383f (patch)
treee6d9a04aa86840e5367f69b6227b7e195f9bd132 /kernel
parent7ef6e97380a1cb0f38cab795fe696f43c71d3ae9 (diff)
parent9450d57eab5cad36774c297da123062744472588 (diff)
Merge branch 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull scheduler fixes from Ingo Molnar: "Smaller fixlets" * 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: sched: Fix kernel-doc warnings in kernel/sched/fair.c sched: Unthrottle rt runqueues in __disable_runtime() sched: Add missing call to calc_load_exit_idle() sched: Fix load avg vs cpu-hotplug
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched/core.c34
-rw-r--r--kernel/sched/fair.c9
-rw-r--r--kernel/sched/rt.c1
-rw-r--r--kernel/sched/sched.h1
-rw-r--r--kernel/time/tick-sched.c1
5 files changed, 17 insertions, 29 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index fbf1fd098dc6..a4ea245f3d85 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -5304,27 +5304,17 @@ void idle_task_exit(void)
5304} 5304}
5305 5305
5306/* 5306/*
5307 * While a dead CPU has no uninterruptible tasks queued at this point, 5307 * Since this CPU is going 'away' for a while, fold any nr_active delta
5308 * it might still have a nonzero ->nr_uninterruptible counter, because 5308 * we might have. Assumes we're called after migrate_tasks() so that the
5309 * for performance reasons the counter is not stricly tracking tasks to 5309 * nr_active count is stable.
5310 * their home CPUs. So we just add the counter to another CPU's counter, 5310 *
5311 * to keep the global sum constant after CPU-down: 5311 * Also see the comment "Global load-average calculations".
5312 */
5313static void migrate_nr_uninterruptible(struct rq *rq_src)
5314{
5315 struct rq *rq_dest = cpu_rq(cpumask_any(cpu_active_mask));
5316
5317 rq_dest->nr_uninterruptible += rq_src->nr_uninterruptible;
5318 rq_src->nr_uninterruptible = 0;
5319}
5320
5321/*
5322 * remove the tasks which were accounted by rq from calc_load_tasks.
5323 */ 5312 */
5324static void calc_global_load_remove(struct rq *rq) 5313static void calc_load_migrate(struct rq *rq)
5325{ 5314{
5326 atomic_long_sub(rq->calc_load_active, &calc_load_tasks); 5315 long delta = calc_load_fold_active(rq);
5327 rq->calc_load_active = 0; 5316 if (delta)
5317 atomic_long_add(delta, &calc_load_tasks);
5328} 5318}
5329 5319
5330/* 5320/*
@@ -5352,9 +5342,6 @@ static void migrate_tasks(unsigned int dead_cpu)
5352 */ 5342 */
5353 rq->stop = NULL; 5343 rq->stop = NULL;
5354 5344
5355 /* Ensure any throttled groups are reachable by pick_next_task */
5356 unthrottle_offline_cfs_rqs(rq);
5357
5358 for ( ; ; ) { 5345 for ( ; ; ) {
5359 /* 5346 /*
5360 * There's this thread running, bail when that's the only 5347 * There's this thread running, bail when that's the only
@@ -5618,8 +5605,7 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
5618 BUG_ON(rq->nr_running != 1); /* the migration thread */ 5605 BUG_ON(rq->nr_running != 1); /* the migration thread */
5619 raw_spin_unlock_irqrestore(&rq->lock, flags); 5606 raw_spin_unlock_irqrestore(&rq->lock, flags);
5620 5607
5621 migrate_nr_uninterruptible(rq); 5608 calc_load_migrate(rq);
5622 calc_global_load_remove(rq);
5623 break; 5609 break;
5624#endif 5610#endif
5625 } 5611 }
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index c219bf8d704c..42d9df6a5ca4 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -2052,7 +2052,7 @@ static void destroy_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
2052 hrtimer_cancel(&cfs_b->slack_timer); 2052 hrtimer_cancel(&cfs_b->slack_timer);
2053} 2053}
2054 2054
2055void unthrottle_offline_cfs_rqs(struct rq *rq) 2055static void unthrottle_offline_cfs_rqs(struct rq *rq)
2056{ 2056{
2057 struct cfs_rq *cfs_rq; 2057 struct cfs_rq *cfs_rq;
2058 2058
@@ -2106,7 +2106,7 @@ static inline struct cfs_bandwidth *tg_cfs_bandwidth(struct task_group *tg)
2106 return NULL; 2106 return NULL;
2107} 2107}
2108static inline void destroy_cfs_bandwidth(struct cfs_bandwidth *cfs_b) {} 2108static inline void destroy_cfs_bandwidth(struct cfs_bandwidth *cfs_b) {}
2109void unthrottle_offline_cfs_rqs(struct rq *rq) {} 2109static inline void unthrottle_offline_cfs_rqs(struct rq *rq) {}
2110 2110
2111#endif /* CONFIG_CFS_BANDWIDTH */ 2111#endif /* CONFIG_CFS_BANDWIDTH */
2112 2112
@@ -3658,7 +3658,6 @@ fix_small_capacity(struct sched_domain *sd, struct sched_group *group)
3658 * @group: sched_group whose statistics are to be updated. 3658 * @group: sched_group whose statistics are to be updated.
3659 * @load_idx: Load index of sched_domain of this_cpu for load calc. 3659 * @load_idx: Load index of sched_domain of this_cpu for load calc.
3660 * @local_group: Does group contain this_cpu. 3660 * @local_group: Does group contain this_cpu.
3661 * @cpus: Set of cpus considered for load balancing.
3662 * @balance: Should we balance. 3661 * @balance: Should we balance.
3663 * @sgs: variable to hold the statistics for this group. 3662 * @sgs: variable to hold the statistics for this group.
3664 */ 3663 */
@@ -3805,7 +3804,6 @@ static bool update_sd_pick_busiest(struct lb_env *env,
3805/** 3804/**
3806 * update_sd_lb_stats - Update sched_domain's statistics for load balancing. 3805 * update_sd_lb_stats - Update sched_domain's statistics for load balancing.
3807 * @env: The load balancing environment. 3806 * @env: The load balancing environment.
3808 * @cpus: Set of cpus considered for load balancing.
3809 * @balance: Should we balance. 3807 * @balance: Should we balance.
3810 * @sds: variable to hold the statistics for this sched_domain. 3808 * @sds: variable to hold the statistics for this sched_domain.
3811 */ 3809 */
@@ -4956,6 +4954,9 @@ static void rq_online_fair(struct rq *rq)
4956static void rq_offline_fair(struct rq *rq) 4954static void rq_offline_fair(struct rq *rq)
4957{ 4955{
4958 update_sysctl(); 4956 update_sysctl();
4957
4958 /* Ensure any throttled groups are reachable by pick_next_task */
4959 unthrottle_offline_cfs_rqs(rq);
4959} 4960}
4960 4961
4961#endif /* CONFIG_SMP */ 4962#endif /* CONFIG_SMP */
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
index 944cb68420e9..e0b7ba9c040f 100644
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -691,6 +691,7 @@ balanced:
691 * runtime - in which case borrowing doesn't make sense. 691 * runtime - in which case borrowing doesn't make sense.
692 */ 692 */
693 rt_rq->rt_runtime = RUNTIME_INF; 693 rt_rq->rt_runtime = RUNTIME_INF;
694 rt_rq->rt_throttled = 0;
694 raw_spin_unlock(&rt_rq->rt_runtime_lock); 695 raw_spin_unlock(&rt_rq->rt_runtime_lock);
695 raw_spin_unlock(&rt_b->rt_runtime_lock); 696 raw_spin_unlock(&rt_b->rt_runtime_lock);
696 } 697 }
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index f6714d009e77..0848fa36c383 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -1144,7 +1144,6 @@ extern void print_rt_stats(struct seq_file *m, int cpu);
1144 1144
1145extern void init_cfs_rq(struct cfs_rq *cfs_rq); 1145extern void init_cfs_rq(struct cfs_rq *cfs_rq);
1146extern void init_rt_rq(struct rt_rq *rt_rq, struct rq *rq); 1146extern void init_rt_rq(struct rt_rq *rt_rq, struct rq *rq);
1147extern void unthrottle_offline_cfs_rqs(struct rq *rq);
1148 1147
1149extern void account_cfs_bandwidth_used(int enabled, int was_enabled); 1148extern void account_cfs_bandwidth_used(int enabled, int was_enabled);
1150 1149
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index 024540f97f74..3a9e5d5c1091 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -573,6 +573,7 @@ static void tick_nohz_restart_sched_tick(struct tick_sched *ts, ktime_t now)
573 tick_do_update_jiffies64(now); 573 tick_do_update_jiffies64(now);
574 update_cpu_load_nohz(); 574 update_cpu_load_nohz();
575 575
576 calc_load_exit_idle();
576 touch_softlockup_watchdog(); 577 touch_softlockup_watchdog();
577 /* 578 /*
578 * Cancel the scheduled timer and restore the tick 579 * Cancel the scheduled timer and restore the tick