aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched/core.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/sched/core.c')
-rw-r--r--kernel/sched/core.c73
1 files changed, 11 insertions, 62 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index fbf1fd098dc6..649c9f876cb1 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -5304,27 +5304,17 @@ void idle_task_exit(void)
5304} 5304}
5305 5305
5306/* 5306/*
5307 * While a dead CPU has no uninterruptible tasks queued at this point, 5307 * Since this CPU is going 'away' for a while, fold any nr_active delta
5308 * it might still have a nonzero ->nr_uninterruptible counter, because 5308 * we might have. Assumes we're called after migrate_tasks() so that the
5309 * for performance reasons the counter is not stricly tracking tasks to 5309 * nr_active count is stable.
5310 * their home CPUs. So we just add the counter to another CPU's counter, 5310 *
5311 * to keep the global sum constant after CPU-down: 5311 * Also see the comment "Global load-average calculations".
5312 */
5313static void migrate_nr_uninterruptible(struct rq *rq_src)
5314{
5315 struct rq *rq_dest = cpu_rq(cpumask_any(cpu_active_mask));
5316
5317 rq_dest->nr_uninterruptible += rq_src->nr_uninterruptible;
5318 rq_src->nr_uninterruptible = 0;
5319}
5320
5321/*
5322 * remove the tasks which were accounted by rq from calc_load_tasks.
5323 */ 5312 */
5324static void calc_global_load_remove(struct rq *rq) 5313static void calc_load_migrate(struct rq *rq)
5325{ 5314{
5326 atomic_long_sub(rq->calc_load_active, &calc_load_tasks); 5315 long delta = calc_load_fold_active(rq);
5327 rq->calc_load_active = 0; 5316 if (delta)
5317 atomic_long_add(delta, &calc_load_tasks);
5328} 5318}
5329 5319
5330/* 5320/*
@@ -5352,9 +5342,6 @@ static void migrate_tasks(unsigned int dead_cpu)
5352 */ 5342 */
5353 rq->stop = NULL; 5343 rq->stop = NULL;
5354 5344
5355 /* Ensure any throttled groups are reachable by pick_next_task */
5356 unthrottle_offline_cfs_rqs(rq);
5357
5358 for ( ; ; ) { 5345 for ( ; ; ) {
5359 /* 5346 /*
5360 * There's this thread running, bail when that's the only 5347 * There's this thread running, bail when that's the only
@@ -5618,8 +5605,7 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
5618 BUG_ON(rq->nr_running != 1); /* the migration thread */ 5605 BUG_ON(rq->nr_running != 1); /* the migration thread */
5619 raw_spin_unlock_irqrestore(&rq->lock, flags); 5606 raw_spin_unlock_irqrestore(&rq->lock, flags);
5620 5607
5621 migrate_nr_uninterruptible(rq); 5608 calc_load_migrate(rq);
5622 calc_global_load_remove(rq);
5623 break; 5609 break;
5624#endif 5610#endif
5625 } 5611 }
@@ -6028,11 +6014,6 @@ static void destroy_sched_domains(struct sched_domain *sd, int cpu)
6028 * SD_SHARE_PKG_RESOURCE set (Last Level Cache Domain) for this 6014 * SD_SHARE_PKG_RESOURCE set (Last Level Cache Domain) for this
6029 * allows us to avoid some pointer chasing select_idle_sibling(). 6015 * allows us to avoid some pointer chasing select_idle_sibling().
6030 * 6016 *
6031 * Iterate domains and sched_groups downward, assigning CPUs to be
6032 * select_idle_sibling() hw buddy. Cross-wiring hw makes bouncing
6033 * due to random perturbation self canceling, ie sw buddies pull
6034 * their counterpart to their CPU's hw counterpart.
6035 *
6036 * Also keep a unique ID per domain (we use the first cpu number in 6017 * Also keep a unique ID per domain (we use the first cpu number in
6037 * the cpumask of the domain), this allows us to quickly tell if 6018 * the cpumask of the domain), this allows us to quickly tell if
6038 * two cpus are in the same cache domain, see cpus_share_cache(). 6019 * two cpus are in the same cache domain, see cpus_share_cache().
@@ -6046,40 +6027,8 @@ static void update_top_cache_domain(int cpu)
6046 int id = cpu; 6027 int id = cpu;
6047 6028
6048 sd = highest_flag_domain(cpu, SD_SHARE_PKG_RESOURCES); 6029 sd = highest_flag_domain(cpu, SD_SHARE_PKG_RESOURCES);
6049 if (sd) { 6030 if (sd)
6050 struct sched_domain *tmp = sd;
6051 struct sched_group *sg, *prev;
6052 bool right;
6053
6054 /*
6055 * Traverse to first CPU in group, and count hops
6056 * to cpu from there, switching direction on each
6057 * hop, never ever pointing the last CPU rightward.
6058 */
6059 do {
6060 id = cpumask_first(sched_domain_span(tmp));
6061 prev = sg = tmp->groups;
6062 right = 1;
6063
6064 while (cpumask_first(sched_group_cpus(sg)) != id)
6065 sg = sg->next;
6066
6067 while (!cpumask_test_cpu(cpu, sched_group_cpus(sg))) {
6068 prev = sg;
6069 sg = sg->next;
6070 right = !right;
6071 }
6072
6073 /* A CPU went down, never point back to domain start. */
6074 if (right && cpumask_first(sched_group_cpus(sg->next)) == id)
6075 right = false;
6076
6077 sg = right ? sg->next : prev;
6078 tmp->idle_buddy = cpumask_first(sched_group_cpus(sg));
6079 } while ((tmp = tmp->child));
6080
6081 id = cpumask_first(sched_domain_span(sd)); 6031 id = cpumask_first(sched_domain_span(sd));
6082 }
6083 6032
6084 rcu_assign_pointer(per_cpu(sd_llc, cpu), sd); 6033 rcu_assign_pointer(per_cpu(sd_llc, cpu), sd);
6085 per_cpu(sd_llc_id, cpu) = id; 6034 per_cpu(sd_llc_id, cpu) = id;