aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched/core.c
diff options
context:
space:
mode:
authorPaul E. McKenney <paulmck@linux.vnet.ibm.com>2012-09-25 13:03:56 -0400
committerPaul E. McKenney <paulmck@linux.vnet.ibm.com>2012-09-25 13:03:56 -0400
commit593d1006cdf710ab3469c0c37c184fea0bc3da97 (patch)
treee4db58440018a52089e8d6b39160f753ab10df99 /kernel/sched/core.c
parent5217192b85480353aeeb395574e60d0db04f3676 (diff)
parent9b20aa63b8fc9a6a3b6831f4eae3621755e51211 (diff)
Merge remote-tracking branch 'tip/core/rcu' into next.2012.09.25b
Resolved conflict in kernel/sched/core.c using Peter Zijlstra's approach from https://lkml.org/lkml/2012/9/5/585.
Diffstat (limited to 'kernel/sched/core.c')
-rw-r--r--kernel/sched/core.c52
1 files changed, 2 insertions, 50 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 8c38b5e7ce47..1a48cdbc8631 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -5342,9 +5342,6 @@ static void migrate_tasks(unsigned int dead_cpu)
5342 */ 5342 */
5343 rq->stop = NULL; 5343 rq->stop = NULL;
5344 5344
5345 /* Ensure any throttled groups are reachable by pick_next_task */
5346 unthrottle_offline_cfs_rqs(rq);
5347
5348 for ( ; ; ) { 5345 for ( ; ; ) {
5349 /* 5346 /*
5350 * There's this thread running, bail when that's the only 5347 * There's this thread running, bail when that's the only
@@ -5610,15 +5607,7 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
5610 break; 5607 break;
5611 5608
5612 case CPU_DEAD: 5609 case CPU_DEAD:
5613 { 5610 calc_load_migrate(rq);
5614 struct rq *dest_rq;
5615
5616 local_irq_save(flags);
5617 dest_rq = cpu_rq(smp_processor_id());
5618 raw_spin_lock(&dest_rq->lock);
5619 calc_load_migrate(rq);
5620 raw_spin_unlock_irqrestore(&dest_rq->lock, flags);
5621 }
5622 break; 5611 break;
5623#endif 5612#endif
5624 } 5613 }
@@ -6027,11 +6016,6 @@ static void destroy_sched_domains(struct sched_domain *sd, int cpu)
6027 * SD_SHARE_PKG_RESOURCE set (Last Level Cache Domain) for this 6016 * SD_SHARE_PKG_RESOURCE set (Last Level Cache Domain) for this
6028 * allows us to avoid some pointer chasing select_idle_sibling(). 6017 * allows us to avoid some pointer chasing select_idle_sibling().
6029 * 6018 *
6030 * Iterate domains and sched_groups downward, assigning CPUs to be
6031 * select_idle_sibling() hw buddy. Cross-wiring hw makes bouncing
6032 * due to random perturbation self canceling, ie sw buddies pull
6033 * their counterpart to their CPU's hw counterpart.
6034 *
6035 * Also keep a unique ID per domain (we use the first cpu number in 6019 * Also keep a unique ID per domain (we use the first cpu number in
6036 * the cpumask of the domain), this allows us to quickly tell if 6020 * the cpumask of the domain), this allows us to quickly tell if
6037 * two cpus are in the same cache domain, see cpus_share_cache(). 6021 * two cpus are in the same cache domain, see cpus_share_cache().
@@ -6045,40 +6029,8 @@ static void update_top_cache_domain(int cpu)
6045 int id = cpu; 6029 int id = cpu;
6046 6030
6047 sd = highest_flag_domain(cpu, SD_SHARE_PKG_RESOURCES); 6031 sd = highest_flag_domain(cpu, SD_SHARE_PKG_RESOURCES);
6048 if (sd) { 6032 if (sd)
6049 struct sched_domain *tmp = sd;
6050 struct sched_group *sg, *prev;
6051 bool right;
6052
6053 /*
6054 * Traverse to first CPU in group, and count hops
6055 * to cpu from there, switching direction on each
6056 * hop, never ever pointing the last CPU rightward.
6057 */
6058 do {
6059 id = cpumask_first(sched_domain_span(tmp));
6060 prev = sg = tmp->groups;
6061 right = 1;
6062
6063 while (cpumask_first(sched_group_cpus(sg)) != id)
6064 sg = sg->next;
6065
6066 while (!cpumask_test_cpu(cpu, sched_group_cpus(sg))) {
6067 prev = sg;
6068 sg = sg->next;
6069 right = !right;
6070 }
6071
6072 /* A CPU went down, never point back to domain start. */
6073 if (right && cpumask_first(sched_group_cpus(sg->next)) == id)
6074 right = false;
6075
6076 sg = right ? sg->next : prev;
6077 tmp->idle_buddy = cpumask_first(sched_group_cpus(sg));
6078 } while ((tmp = tmp->child));
6079
6080 id = cpumask_first(sched_domain_span(sd)); 6033 id = cpumask_first(sched_domain_span(sd));
6081 }
6082 6034
6083 rcu_assign_pointer(per_cpu(sd_llc, cpu), sd); 6035 rcu_assign_pointer(per_cpu(sd_llc, cpu), sd);
6084 per_cpu(sd_llc_id, cpu) = id; 6036 per_cpu(sd_llc_id, cpu) = id;