aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorRusty Russell <rusty@rustcorp.com.au>2008-11-24 11:05:04 -0500
committerIngo Molnar <mingo@elte.hu>2008-11-24 11:50:45 -0500
commit758b2cdc6f6a22c702bd8f2344382fb1270b2161 (patch)
tree270aec3d0f6235c1519c16e8dc8148f195e133db
parent1e5ce4f4a755ee498bd9217dae26143afa0d8f31 (diff)
sched: wrap sched_group and sched_domain cpumask accesses.
Impact: trivial wrap of member accesses This eases the transition in the next patch. We also get rid of a temporary cpumask in find_idlest_cpu() thanks to for_each_cpu_and, and sched_balance_self() due to getting weight before setting sd to NULL. Signed-off-by: Rusty Russell <rusty@rustcorp.com.au> Signed-off-by: Ingo Molnar <mingo@elte.hu>
-rw-r--r--include/linux/sched.h10
-rw-r--r--kernel/sched.c114
-rw-r--r--kernel/sched_fair.c10
-rw-r--r--kernel/sched_rt.c3
-rw-r--r--kernel/sched_stats.h3
5 files changed, 73 insertions, 67 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 4ce5c603c51a..2b95aa9f779b 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -786,6 +786,11 @@ struct sched_group {
786 u32 reciprocal_cpu_power; 786 u32 reciprocal_cpu_power;
787}; 787};
788 788
789static inline struct cpumask *sched_group_cpus(struct sched_group *sg)
790{
791 return &sg->cpumask;
792}
793
789enum sched_domain_level { 794enum sched_domain_level {
790 SD_LV_NONE = 0, 795 SD_LV_NONE = 0,
791 SD_LV_SIBLING, 796 SD_LV_SIBLING,
@@ -866,6 +871,11 @@ struct sched_domain {
866#endif 871#endif
867}; 872};
868 873
874static inline struct cpumask *sched_domain_span(struct sched_domain *sd)
875{
876 return &sd->span;
877}
878
869extern void partition_sched_domains(int ndoms_new, cpumask_t *doms_new, 879extern void partition_sched_domains(int ndoms_new, cpumask_t *doms_new,
870 struct sched_domain_attr *dattr_new); 880 struct sched_domain_attr *dattr_new);
871extern int arch_reinit_sched_domains(void); 881extern int arch_reinit_sched_domains(void);
diff --git a/kernel/sched.c b/kernel/sched.c
index a2de33d05340..575f38acf4da 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -1501,7 +1501,7 @@ static int tg_shares_up(struct task_group *tg, void *data)
1501 struct sched_domain *sd = data; 1501 struct sched_domain *sd = data;
1502 int i; 1502 int i;
1503 1503
1504 for_each_cpu_mask(i, sd->span) { 1504 for_each_cpu(i, sched_domain_span(sd)) {
1505 /* 1505 /*
1506 * If there are currently no tasks on the cpu pretend there 1506 * If there are currently no tasks on the cpu pretend there
1507 * is one of average load so that when a new task gets to 1507 * is one of average load so that when a new task gets to
@@ -1522,7 +1522,7 @@ static int tg_shares_up(struct task_group *tg, void *data)
1522 if (!sd->parent || !(sd->parent->flags & SD_LOAD_BALANCE)) 1522 if (!sd->parent || !(sd->parent->flags & SD_LOAD_BALANCE))
1523 shares = tg->shares; 1523 shares = tg->shares;
1524 1524
1525 for_each_cpu_mask(i, sd->span) 1525 for_each_cpu(i, sched_domain_span(sd))
1526 update_group_shares_cpu(tg, i, shares, rq_weight); 1526 update_group_shares_cpu(tg, i, shares, rq_weight);
1527 1527
1528 return 0; 1528 return 0;
@@ -2053,15 +2053,17 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p, int this_cpu)
2053 int i; 2053 int i;
2054 2054
2055 /* Skip over this group if it has no CPUs allowed */ 2055 /* Skip over this group if it has no CPUs allowed */
2056 if (!cpus_intersects(group->cpumask, p->cpus_allowed)) 2056 if (!cpumask_intersects(sched_group_cpus(group),
2057 &p->cpus_allowed))
2057 continue; 2058 continue;
2058 2059
2059 local_group = cpu_isset(this_cpu, group->cpumask); 2060 local_group = cpumask_test_cpu(this_cpu,
2061 sched_group_cpus(group));
2060 2062
2061 /* Tally up the load of all CPUs in the group */ 2063 /* Tally up the load of all CPUs in the group */
2062 avg_load = 0; 2064 avg_load = 0;
2063 2065
2064 for_each_cpu(i, &group->cpumask) { 2066 for_each_cpu(i, sched_group_cpus(group)) {
2065 /* Bias balancing toward cpus of our domain */ 2067 /* Bias balancing toward cpus of our domain */
2066 if (local_group) 2068 if (local_group)
2067 load = source_load(i, load_idx); 2069 load = source_load(i, load_idx);
@@ -2093,17 +2095,14 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p, int this_cpu)
2093 * find_idlest_cpu - find the idlest cpu among the cpus in group. 2095 * find_idlest_cpu - find the idlest cpu among the cpus in group.
2094 */ 2096 */
2095static int 2097static int
2096find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu, 2098find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu)
2097 cpumask_t *tmp)
2098{ 2099{
2099 unsigned long load, min_load = ULONG_MAX; 2100 unsigned long load, min_load = ULONG_MAX;
2100 int idlest = -1; 2101 int idlest = -1;
2101 int i; 2102 int i;
2102 2103
2103 /* Traverse only the allowed CPUs */ 2104 /* Traverse only the allowed CPUs */
2104 cpus_and(*tmp, group->cpumask, p->cpus_allowed); 2105 for_each_cpu_and(i, sched_group_cpus(group), &p->cpus_allowed) {
2105
2106 for_each_cpu(i, tmp) {
2107 load = weighted_cpuload(i); 2106 load = weighted_cpuload(i);
2108 2107
2109 if (load < min_load || (load == min_load && i == this_cpu)) { 2108 if (load < min_load || (load == min_load && i == this_cpu)) {
@@ -2145,7 +2144,6 @@ static int sched_balance_self(int cpu, int flag)
2145 update_shares(sd); 2144 update_shares(sd);
2146 2145
2147 while (sd) { 2146 while (sd) {
2148 cpumask_t span, tmpmask;
2149 struct sched_group *group; 2147 struct sched_group *group;
2150 int new_cpu, weight; 2148 int new_cpu, weight;
2151 2149
@@ -2154,14 +2152,13 @@ static int sched_balance_self(int cpu, int flag)
2154 continue; 2152 continue;
2155 } 2153 }
2156 2154
2157 span = sd->span;
2158 group = find_idlest_group(sd, t, cpu); 2155 group = find_idlest_group(sd, t, cpu);
2159 if (!group) { 2156 if (!group) {
2160 sd = sd->child; 2157 sd = sd->child;
2161 continue; 2158 continue;
2162 } 2159 }
2163 2160
2164 new_cpu = find_idlest_cpu(group, t, cpu, &tmpmask); 2161 new_cpu = find_idlest_cpu(group, t, cpu);
2165 if (new_cpu == -1 || new_cpu == cpu) { 2162 if (new_cpu == -1 || new_cpu == cpu) {
2166 /* Now try balancing at a lower domain level of cpu */ 2163 /* Now try balancing at a lower domain level of cpu */
2167 sd = sd->child; 2164 sd = sd->child;
@@ -2170,10 +2167,10 @@ static int sched_balance_self(int cpu, int flag)
2170 2167
2171 /* Now try balancing at a lower domain level of new_cpu */ 2168 /* Now try balancing at a lower domain level of new_cpu */
2172 cpu = new_cpu; 2169 cpu = new_cpu;
2170 weight = cpumask_weight(sched_domain_span(sd));
2173 sd = NULL; 2171 sd = NULL;
2174 weight = cpus_weight(span);
2175 for_each_domain(cpu, tmp) { 2172 for_each_domain(cpu, tmp) {
2176 if (weight <= cpus_weight(tmp->span)) 2173 if (weight <= cpumask_weight(sched_domain_span(tmp)))
2177 break; 2174 break;
2178 if (tmp->flags & flag) 2175 if (tmp->flags & flag)
2179 sd = tmp; 2176 sd = tmp;
@@ -2218,7 +2215,7 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state, int sync)
2218 cpu = task_cpu(p); 2215 cpu = task_cpu(p);
2219 2216
2220 for_each_domain(this_cpu, sd) { 2217 for_each_domain(this_cpu, sd) {
2221 if (cpu_isset(cpu, sd->span)) { 2218 if (cpumask_test_cpu(cpu, sched_domain_span(sd))) {
2222 update_shares(sd); 2219 update_shares(sd);
2223 break; 2220 break;
2224 } 2221 }
@@ -2266,7 +2263,7 @@ static int try_to_wake_up(struct task_struct *p, unsigned int state, int sync)
2266 else { 2263 else {
2267 struct sched_domain *sd; 2264 struct sched_domain *sd;
2268 for_each_domain(this_cpu, sd) { 2265 for_each_domain(this_cpu, sd) {
2269 if (cpu_isset(cpu, sd->span)) { 2266 if (cpumask_test_cpu(cpu, sched_domain_span(sd))) {
2270 schedstat_inc(sd, ttwu_wake_remote); 2267 schedstat_inc(sd, ttwu_wake_remote);
2271 break; 2268 break;
2272 } 2269 }
@@ -3109,10 +3106,11 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
3109 unsigned long sum_avg_load_per_task; 3106 unsigned long sum_avg_load_per_task;
3110 unsigned long avg_load_per_task; 3107 unsigned long avg_load_per_task;
3111 3108
3112 local_group = cpu_isset(this_cpu, group->cpumask); 3109 local_group = cpumask_test_cpu(this_cpu,
3110 sched_group_cpus(group));
3113 3111
3114 if (local_group) 3112 if (local_group)
3115 balance_cpu = first_cpu(group->cpumask); 3113 balance_cpu = cpumask_first(sched_group_cpus(group));
3116 3114
3117 /* Tally up the load of all CPUs in the group */ 3115 /* Tally up the load of all CPUs in the group */
3118 sum_weighted_load = sum_nr_running = avg_load = 0; 3116 sum_weighted_load = sum_nr_running = avg_load = 0;
@@ -3121,13 +3119,8 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
3121 max_cpu_load = 0; 3119 max_cpu_load = 0;
3122 min_cpu_load = ~0UL; 3120 min_cpu_load = ~0UL;
3123 3121
3124 for_each_cpu(i, &group->cpumask) { 3122 for_each_cpu_and(i, sched_group_cpus(group), cpus) {
3125 struct rq *rq; 3123 struct rq *rq = cpu_rq(i);
3126
3127 if (!cpu_isset(i, *cpus))
3128 continue;
3129
3130 rq = cpu_rq(i);
3131 3124
3132 if (*sd_idle && rq->nr_running) 3125 if (*sd_idle && rq->nr_running)
3133 *sd_idle = 0; 3126 *sd_idle = 0;
@@ -3238,8 +3231,8 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
3238 */ 3231 */
3239 if ((sum_nr_running < min_nr_running) || 3232 if ((sum_nr_running < min_nr_running) ||
3240 (sum_nr_running == min_nr_running && 3233 (sum_nr_running == min_nr_running &&
3241 first_cpu(group->cpumask) < 3234 cpumask_first(sched_group_cpus(group)) <
3242 first_cpu(group_min->cpumask))) { 3235 cpumask_first(sched_group_cpus(group_min)))) {
3243 group_min = group; 3236 group_min = group;
3244 min_nr_running = sum_nr_running; 3237 min_nr_running = sum_nr_running;
3245 min_load_per_task = sum_weighted_load / 3238 min_load_per_task = sum_weighted_load /
@@ -3254,8 +3247,8 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
3254 if (sum_nr_running <= group_capacity - 1) { 3247 if (sum_nr_running <= group_capacity - 1) {
3255 if (sum_nr_running > leader_nr_running || 3248 if (sum_nr_running > leader_nr_running ||
3256 (sum_nr_running == leader_nr_running && 3249 (sum_nr_running == leader_nr_running &&
3257 first_cpu(group->cpumask) > 3250 cpumask_first(sched_group_cpus(group)) >
3258 first_cpu(group_leader->cpumask))) { 3251 cpumask_first(sched_group_cpus(group_leader)))) {
3259 group_leader = group; 3252 group_leader = group;
3260 leader_nr_running = sum_nr_running; 3253 leader_nr_running = sum_nr_running;
3261 } 3254 }
@@ -3400,7 +3393,7 @@ find_busiest_queue(struct sched_group *group, enum cpu_idle_type idle,
3400 unsigned long max_load = 0; 3393 unsigned long max_load = 0;
3401 int i; 3394 int i;
3402 3395
3403 for_each_cpu(i, &group->cpumask) { 3396 for_each_cpu(i, sched_group_cpus(group)) {
3404 unsigned long wl; 3397 unsigned long wl;
3405 3398
3406 if (!cpu_isset(i, *cpus)) 3399 if (!cpu_isset(i, *cpus))
@@ -3746,7 +3739,7 @@ static void active_load_balance(struct rq *busiest_rq, int busiest_cpu)
3746 /* Search for an sd spanning us and the target CPU. */ 3739 /* Search for an sd spanning us and the target CPU. */
3747 for_each_domain(target_cpu, sd) { 3740 for_each_domain(target_cpu, sd) {
3748 if ((sd->flags & SD_LOAD_BALANCE) && 3741 if ((sd->flags & SD_LOAD_BALANCE) &&
3749 cpu_isset(busiest_cpu, sd->span)) 3742 cpumask_test_cpu(busiest_cpu, sched_domain_span(sd)))
3750 break; 3743 break;
3751 } 3744 }
3752 3745
@@ -6618,7 +6611,7 @@ static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level,
6618 struct sched_group *group = sd->groups; 6611 struct sched_group *group = sd->groups;
6619 char str[256]; 6612 char str[256];
6620 6613
6621 cpulist_scnprintf(str, sizeof(str), sd->span); 6614 cpulist_scnprintf(str, sizeof(str), *sched_domain_span(sd));
6622 cpus_clear(*groupmask); 6615 cpus_clear(*groupmask);
6623 6616
6624 printk(KERN_DEBUG "%*s domain %d: ", level, "", level); 6617 printk(KERN_DEBUG "%*s domain %d: ", level, "", level);
@@ -6633,11 +6626,11 @@ static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level,
6633 6626
6634 printk(KERN_CONT "span %s level %s\n", str, sd->name); 6627 printk(KERN_CONT "span %s level %s\n", str, sd->name);
6635 6628
6636 if (!cpu_isset(cpu, sd->span)) { 6629 if (!cpumask_test_cpu(cpu, sched_domain_span(sd))) {
6637 printk(KERN_ERR "ERROR: domain->span does not contain " 6630 printk(KERN_ERR "ERROR: domain->span does not contain "
6638 "CPU%d\n", cpu); 6631 "CPU%d\n", cpu);
6639 } 6632 }
6640 if (!cpu_isset(cpu, group->cpumask)) { 6633 if (!cpumask_test_cpu(cpu, sched_group_cpus(group))) {
6641 printk(KERN_ERR "ERROR: domain->groups does not contain" 6634 printk(KERN_ERR "ERROR: domain->groups does not contain"
6642 " CPU%d\n", cpu); 6635 " CPU%d\n", cpu);
6643 } 6636 }
@@ -6657,31 +6650,32 @@ static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level,
6657 break; 6650 break;
6658 } 6651 }
6659 6652
6660 if (!cpus_weight(group->cpumask)) { 6653 if (!cpumask_weight(sched_group_cpus(group))) {
6661 printk(KERN_CONT "\n"); 6654 printk(KERN_CONT "\n");
6662 printk(KERN_ERR "ERROR: empty group\n"); 6655 printk(KERN_ERR "ERROR: empty group\n");
6663 break; 6656 break;
6664 } 6657 }
6665 6658
6666 if (cpus_intersects(*groupmask, group->cpumask)) { 6659 if (cpumask_intersects(groupmask, sched_group_cpus(group))) {
6667 printk(KERN_CONT "\n"); 6660 printk(KERN_CONT "\n");
6668 printk(KERN_ERR "ERROR: repeated CPUs\n"); 6661 printk(KERN_ERR "ERROR: repeated CPUs\n");
6669 break; 6662 break;
6670 } 6663 }
6671 6664
6672 cpus_or(*groupmask, *groupmask, group->cpumask); 6665 cpumask_or(groupmask, groupmask, sched_group_cpus(group));
6673 6666
6674 cpulist_scnprintf(str, sizeof(str), group->cpumask); 6667 cpulist_scnprintf(str, sizeof(str), *sched_group_cpus(group));
6675 printk(KERN_CONT " %s", str); 6668 printk(KERN_CONT " %s", str);
6676 6669
6677 group = group->next; 6670 group = group->next;
6678 } while (group != sd->groups); 6671 } while (group != sd->groups);
6679 printk(KERN_CONT "\n"); 6672 printk(KERN_CONT "\n");
6680 6673
6681 if (!cpus_equal(sd->span, *groupmask)) 6674 if (!cpumask_equal(sched_domain_span(sd), groupmask))
6682 printk(KERN_ERR "ERROR: groups don't span domain->span\n"); 6675 printk(KERN_ERR "ERROR: groups don't span domain->span\n");
6683 6676
6684 if (sd->parent && !cpus_subset(*groupmask, sd->parent->span)) 6677 if (sd->parent &&
6678 !cpumask_subset(groupmask, sched_domain_span(sd->parent)))
6685 printk(KERN_ERR "ERROR: parent span is not a superset " 6679 printk(KERN_ERR "ERROR: parent span is not a superset "
6686 "of domain->span\n"); 6680 "of domain->span\n");
6687 return 0; 6681 return 0;
@@ -6721,7 +6715,7 @@ static void sched_domain_debug(struct sched_domain *sd, int cpu)
6721 6715
6722static int sd_degenerate(struct sched_domain *sd) 6716static int sd_degenerate(struct sched_domain *sd)
6723{ 6717{
6724 if (cpus_weight(sd->span) == 1) 6718 if (cpumask_weight(sched_domain_span(sd)) == 1)
6725 return 1; 6719 return 1;
6726 6720
6727 /* Following flags need at least 2 groups */ 6721 /* Following flags need at least 2 groups */
@@ -6752,7 +6746,7 @@ sd_parent_degenerate(struct sched_domain *sd, struct sched_domain *parent)
6752 if (sd_degenerate(parent)) 6746 if (sd_degenerate(parent))
6753 return 1; 6747 return 1;
6754 6748
6755 if (!cpus_equal(sd->span, parent->span)) 6749 if (!cpumask_equal(sched_domain_span(sd), sched_domain_span(parent)))
6756 return 0; 6750 return 0;
6757 6751
6758 /* Does parent contain flags not in child? */ 6752 /* Does parent contain flags not in child? */
@@ -6913,10 +6907,10 @@ init_sched_build_groups(const cpumask_t *span, const cpumask_t *cpu_map,
6913 int group = group_fn(i, cpu_map, &sg, tmpmask); 6907 int group = group_fn(i, cpu_map, &sg, tmpmask);
6914 int j; 6908 int j;
6915 6909
6916 if (cpu_isset(i, *covered)) 6910 if (cpumask_test_cpu(i, covered))
6917 continue; 6911 continue;
6918 6912
6919 cpus_clear(sg->cpumask); 6913 cpumask_clear(sched_group_cpus(sg));
6920 sg->__cpu_power = 0; 6914 sg->__cpu_power = 0;
6921 6915
6922 for_each_cpu(j, span) { 6916 for_each_cpu(j, span) {
@@ -6924,7 +6918,7 @@ init_sched_build_groups(const cpumask_t *span, const cpumask_t *cpu_map,
6924 continue; 6918 continue;
6925 6919
6926 cpu_set(j, *covered); 6920 cpu_set(j, *covered);
6927 cpu_set(j, sg->cpumask); 6921 cpumask_set_cpu(j, sched_group_cpus(sg));
6928 } 6922 }
6929 if (!first) 6923 if (!first)
6930 first = sg; 6924 first = sg;
@@ -7119,11 +7113,11 @@ static void init_numa_sched_groups_power(struct sched_group *group_head)
7119 if (!sg) 7113 if (!sg)
7120 return; 7114 return;
7121 do { 7115 do {
7122 for_each_cpu(j, &sg->cpumask) { 7116 for_each_cpu(j, sched_group_cpus(sg)) {
7123 struct sched_domain *sd; 7117 struct sched_domain *sd;
7124 7118
7125 sd = &per_cpu(phys_domains, j); 7119 sd = &per_cpu(phys_domains, j);
7126 if (j != first_cpu(sd->groups->cpumask)) { 7120 if (j != cpumask_first(sched_group_cpus(sd->groups))) {
7127 /* 7121 /*
7128 * Only add "power" once for each 7122 * Only add "power" once for each
7129 * physical package. 7123 * physical package.
@@ -7200,7 +7194,7 @@ static void init_sched_groups_power(int cpu, struct sched_domain *sd)
7200 7194
7201 WARN_ON(!sd || !sd->groups); 7195 WARN_ON(!sd || !sd->groups);
7202 7196
7203 if (cpu != first_cpu(sd->groups->cpumask)) 7197 if (cpu != cpumask_first(sched_group_cpus(sd->groups)))
7204 return; 7198 return;
7205 7199
7206 child = sd->child; 7200 child = sd->child;
@@ -7372,7 +7366,7 @@ static int __build_sched_domains(const cpumask_t *cpu_map,
7372 sd = &per_cpu(allnodes_domains, i); 7366 sd = &per_cpu(allnodes_domains, i);
7373 SD_INIT(sd, ALLNODES); 7367 SD_INIT(sd, ALLNODES);
7374 set_domain_attribute(sd, attr); 7368 set_domain_attribute(sd, attr);
7375 sd->span = *cpu_map; 7369 cpumask_copy(sched_domain_span(sd), cpu_map);
7376 cpu_to_allnodes_group(i, cpu_map, &sd->groups, tmpmask); 7370 cpu_to_allnodes_group(i, cpu_map, &sd->groups, tmpmask);
7377 p = sd; 7371 p = sd;
7378 sd_allnodes = 1; 7372 sd_allnodes = 1;
@@ -7382,18 +7376,19 @@ static int __build_sched_domains(const cpumask_t *cpu_map,
7382 sd = &per_cpu(node_domains, i); 7376 sd = &per_cpu(node_domains, i);
7383 SD_INIT(sd, NODE); 7377 SD_INIT(sd, NODE);
7384 set_domain_attribute(sd, attr); 7378 set_domain_attribute(sd, attr);
7385 sched_domain_node_span(cpu_to_node(i), &sd->span); 7379 sched_domain_node_span(cpu_to_node(i), sched_domain_span(sd));
7386 sd->parent = p; 7380 sd->parent = p;
7387 if (p) 7381 if (p)
7388 p->child = sd; 7382 p->child = sd;
7389 cpus_and(sd->span, sd->span, *cpu_map); 7383 cpumask_and(sched_domain_span(sd),
7384 sched_domain_span(sd), cpu_map);
7390#endif 7385#endif
7391 7386
7392 p = sd; 7387 p = sd;
7393 sd = &per_cpu(phys_domains, i); 7388 sd = &per_cpu(phys_domains, i);
7394 SD_INIT(sd, CPU); 7389 SD_INIT(sd, CPU);
7395 set_domain_attribute(sd, attr); 7390 set_domain_attribute(sd, attr);
7396 sd->span = *nodemask; 7391 cpumask_copy(sched_domain_span(sd), nodemask);
7397 sd->parent = p; 7392 sd->parent = p;
7398 if (p) 7393 if (p)
7399 p->child = sd; 7394 p->child = sd;
@@ -7404,8 +7399,9 @@ static int __build_sched_domains(const cpumask_t *cpu_map,
7404 sd = &per_cpu(core_domains, i); 7399 sd = &per_cpu(core_domains, i);
7405 SD_INIT(sd, MC); 7400 SD_INIT(sd, MC);
7406 set_domain_attribute(sd, attr); 7401 set_domain_attribute(sd, attr);
7407 sd->span = cpu_coregroup_map(i); 7402 *sched_domain_span(sd) = cpu_coregroup_map(i);
7408 cpus_and(sd->span, sd->span, *cpu_map); 7403 cpumask_and(sched_domain_span(sd),
7404 sched_domain_span(sd), cpu_map);
7409 sd->parent = p; 7405 sd->parent = p;
7410 p->child = sd; 7406 p->child = sd;
7411 cpu_to_core_group(i, cpu_map, &sd->groups, tmpmask); 7407 cpu_to_core_group(i, cpu_map, &sd->groups, tmpmask);
@@ -7416,8 +7412,8 @@ static int __build_sched_domains(const cpumask_t *cpu_map,
7416 sd = &per_cpu(cpu_domains, i); 7412 sd = &per_cpu(cpu_domains, i);
7417 SD_INIT(sd, SIBLING); 7413 SD_INIT(sd, SIBLING);
7418 set_domain_attribute(sd, attr); 7414 set_domain_attribute(sd, attr);
7419 sd->span = per_cpu(cpu_sibling_map, i); 7415 cpumask_and(sched_domain_span(sd),
7420 cpus_and(sd->span, sd->span, *cpu_map); 7416 &per_cpu(cpu_sibling_map, i), cpu_map);
7421 sd->parent = p; 7417 sd->parent = p;
7422 p->child = sd; 7418 p->child = sd;
7423 cpu_to_cpu_group(i, cpu_map, &sd->groups, tmpmask); 7419 cpu_to_cpu_group(i, cpu_map, &sd->groups, tmpmask);
@@ -7503,7 +7499,7 @@ static int __build_sched_domains(const cpumask_t *cpu_map,
7503 sd->groups = sg; 7499 sd->groups = sg;
7504 } 7500 }
7505 sg->__cpu_power = 0; 7501 sg->__cpu_power = 0;
7506 sg->cpumask = *nodemask; 7502 cpumask_copy(sched_group_cpus(sg), nodemask);
7507 sg->next = sg; 7503 sg->next = sg;
7508 cpus_or(*covered, *covered, *nodemask); 7504 cpus_or(*covered, *covered, *nodemask);
7509 prev = sg; 7505 prev = sg;
@@ -7530,7 +7526,7 @@ static int __build_sched_domains(const cpumask_t *cpu_map,
7530 goto error; 7526 goto error;
7531 } 7527 }
7532 sg->__cpu_power = 0; 7528 sg->__cpu_power = 0;
7533 sg->cpumask = *tmpmask; 7529 cpumask_copy(sched_group_cpus(sg), tmpmask);
7534 sg->next = prev->next; 7530 sg->next = prev->next;
7535 cpus_or(*covered, *covered, *tmpmask); 7531 cpus_or(*covered, *covered, *tmpmask);
7536 prev->next = sg; 7532 prev->next = sg;
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index 98345e45b059..bba00402ed90 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -1024,7 +1024,6 @@ static void yield_task_fair(struct rq *rq)
1024#if defined(ARCH_HAS_SCHED_WAKE_IDLE) 1024#if defined(ARCH_HAS_SCHED_WAKE_IDLE)
1025static int wake_idle(int cpu, struct task_struct *p) 1025static int wake_idle(int cpu, struct task_struct *p)
1026{ 1026{
1027 cpumask_t tmp;
1028 struct sched_domain *sd; 1027 struct sched_domain *sd;
1029 int i; 1028 int i;
1030 1029
@@ -1044,10 +1043,9 @@ static int wake_idle(int cpu, struct task_struct *p)
1044 if ((sd->flags & SD_WAKE_IDLE) 1043 if ((sd->flags & SD_WAKE_IDLE)
1045 || ((sd->flags & SD_WAKE_IDLE_FAR) 1044 || ((sd->flags & SD_WAKE_IDLE_FAR)
1046 && !task_hot(p, task_rq(p)->clock, sd))) { 1045 && !task_hot(p, task_rq(p)->clock, sd))) {
1047 cpus_and(tmp, sd->span, p->cpus_allowed); 1046 for_each_cpu_and(i, sched_domain_span(sd),
1048 cpus_and(tmp, tmp, cpu_active_map); 1047 &p->cpus_allowed) {
1049 for_each_cpu_mask_nr(i, tmp) { 1048 if (cpu_active(i) && idle_cpu(i)) {
1050 if (idle_cpu(i)) {
1051 if (i != task_cpu(p)) { 1049 if (i != task_cpu(p)) {
1052 schedstat_inc(p, 1050 schedstat_inc(p,
1053 se.nr_wakeups_idle); 1051 se.nr_wakeups_idle);
@@ -1240,7 +1238,7 @@ static int select_task_rq_fair(struct task_struct *p, int sync)
1240 * this_cpu and prev_cpu are present in: 1238 * this_cpu and prev_cpu are present in:
1241 */ 1239 */
1242 for_each_domain(this_cpu, sd) { 1240 for_each_domain(this_cpu, sd) {
1243 if (cpu_isset(prev_cpu, sd->span)) { 1241 if (cpumask_test_cpu(prev_cpu, sched_domain_span(sd))) {
1244 this_sd = sd; 1242 this_sd = sd;
1245 break; 1243 break;
1246 } 1244 }
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c
index 2bdd44423599..4cd813abc23a 100644
--- a/kernel/sched_rt.c
+++ b/kernel/sched_rt.c
@@ -1017,7 +1017,8 @@ static int find_lowest_rq(struct task_struct *task)
1017 cpumask_t domain_mask; 1017 cpumask_t domain_mask;
1018 int best_cpu; 1018 int best_cpu;
1019 1019
1020 cpus_and(domain_mask, sd->span, *lowest_mask); 1020 cpumask_and(&domain_mask, sched_domain_span(sd),
1021 lowest_mask);
1021 1022
1022 best_cpu = pick_optimal_cpu(this_cpu, 1023 best_cpu = pick_optimal_cpu(this_cpu,
1023 &domain_mask); 1024 &domain_mask);
diff --git a/kernel/sched_stats.h b/kernel/sched_stats.h
index 7dbf72a2b02c..ce340835d055 100644
--- a/kernel/sched_stats.h
+++ b/kernel/sched_stats.h
@@ -42,7 +42,8 @@ static int show_schedstat(struct seq_file *seq, void *v)
42 for_each_domain(cpu, sd) { 42 for_each_domain(cpu, sd) {
43 enum cpu_idle_type itype; 43 enum cpu_idle_type itype;
44 44
45 cpumask_scnprintf(mask_str, mask_len, sd->span); 45 cpumask_scnprintf(mask_str, mask_len,
46 *sched_domain_span(sd));
46 seq_printf(seq, "domain%d %s", dcount++, mask_str); 47 seq_printf(seq, "domain%d %s", dcount++, mask_str);
47 for (itype = CPU_IDLE; itype < CPU_MAX_IDLE_TYPES; 48 for (itype = CPU_IDLE; itype < CPU_MAX_IDLE_TYPES;
48 itype++) { 49 itype++) {