aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/sched.c')
-rw-r--r--kernel/sched.c37
1 files changed, 4 insertions, 33 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index bba57adb9504..0da2b2635c54 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -5869,7 +5869,7 @@ static int cpu_to_core_group(int cpu, const cpumask_t *cpu_map,
5869 struct sched_group **sg) 5869 struct sched_group **sg)
5870{ 5870{
5871 int group; 5871 int group;
5872 cpumask_t mask = cpu_sibling_map[cpu]; 5872 cpumask_t mask = per_cpu(cpu_sibling_map, cpu);
5873 cpus_and(mask, mask, *cpu_map); 5873 cpus_and(mask, mask, *cpu_map);
5874 group = first_cpu(mask); 5874 group = first_cpu(mask);
5875 if (sg) 5875 if (sg)
@@ -5898,7 +5898,7 @@ static int cpu_to_phys_group(int cpu, const cpumask_t *cpu_map,
5898 cpus_and(mask, mask, *cpu_map); 5898 cpus_and(mask, mask, *cpu_map);
5899 group = first_cpu(mask); 5899 group = first_cpu(mask);
5900#elif defined(CONFIG_SCHED_SMT) 5900#elif defined(CONFIG_SCHED_SMT)
5901 cpumask_t mask = cpu_sibling_map[cpu]; 5901 cpumask_t mask = per_cpu(cpu_sibling_map, cpu);
5902 cpus_and(mask, mask, *cpu_map); 5902 cpus_and(mask, mask, *cpu_map);
5903 group = first_cpu(mask); 5903 group = first_cpu(mask);
5904#else 5904#else
@@ -6132,7 +6132,7 @@ static int build_sched_domains(const cpumask_t *cpu_map)
6132 p = sd; 6132 p = sd;
6133 sd = &per_cpu(cpu_domains, i); 6133 sd = &per_cpu(cpu_domains, i);
6134 *sd = SD_SIBLING_INIT; 6134 *sd = SD_SIBLING_INIT;
6135 sd->span = cpu_sibling_map[i]; 6135 sd->span = per_cpu(cpu_sibling_map, i);
6136 cpus_and(sd->span, sd->span, *cpu_map); 6136 cpus_and(sd->span, sd->span, *cpu_map);
6137 sd->parent = p; 6137 sd->parent = p;
6138 p->child = sd; 6138 p->child = sd;
@@ -6143,7 +6143,7 @@ static int build_sched_domains(const cpumask_t *cpu_map)
6143#ifdef CONFIG_SCHED_SMT 6143#ifdef CONFIG_SCHED_SMT
6144 /* Set up CPU (sibling) groups */ 6144 /* Set up CPU (sibling) groups */
6145 for_each_cpu_mask(i, *cpu_map) { 6145 for_each_cpu_mask(i, *cpu_map) {
6146 cpumask_t this_sibling_map = cpu_sibling_map[i]; 6146 cpumask_t this_sibling_map = per_cpu(cpu_sibling_map, i);
6147 cpus_and(this_sibling_map, this_sibling_map, *cpu_map); 6147 cpus_and(this_sibling_map, this_sibling_map, *cpu_map);
6148 if (i != first_cpu(this_sibling_map)) 6148 if (i != first_cpu(this_sibling_map))
6149 continue; 6149 continue;
@@ -6348,35 +6348,6 @@ static void detach_destroy_domains(const cpumask_t *cpu_map)
6348 arch_destroy_sched_domains(cpu_map); 6348 arch_destroy_sched_domains(cpu_map);
6349} 6349}
6350 6350
6351/*
6352 * Partition sched domains as specified by the cpumasks below.
6353 * This attaches all cpus from the cpumasks to the NULL domain,
6354 * waits for a RCU quiescent period, recalculates sched
6355 * domain information and then attaches them back to the
6356 * correct sched domains
6357 * Call with hotplug lock held
6358 */
6359int partition_sched_domains(cpumask_t *partition1, cpumask_t *partition2)
6360{
6361 cpumask_t change_map;
6362 int err = 0;
6363
6364 cpus_and(*partition1, *partition1, cpu_online_map);
6365 cpus_and(*partition2, *partition2, cpu_online_map);
6366 cpus_or(change_map, *partition1, *partition2);
6367
6368 /* Detach sched domains from all of the affected cpus */
6369 detach_destroy_domains(&change_map);
6370 if (!cpus_empty(*partition1))
6371 err = build_sched_domains(partition1);
6372 if (!err && !cpus_empty(*partition2))
6373 err = build_sched_domains(partition2);
6374
6375 register_sched_domain_sysctl();
6376
6377 return err;
6378}
6379
6380#if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT) 6351#if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
6381static int arch_reinit_sched_domains(void) 6352static int arch_reinit_sched_domains(void)
6382{ 6353{