diff options
author | Ingo Molnar <mingo@elte.hu> | 2009-01-04 04:59:36 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-01-04 04:59:36 -0500 |
commit | 4010b0192ddf6ec7ec1b9feb9b0953692aeb7329 (patch) | |
tree | 188a36186f6ce580b479a9f90404fa7bfd8b22d7 /kernel/sched.c | |
parent | 79ff56ebd3edfb16f8badc558cb439b203a3298f (diff) | |
parent | 7d3b56ba37a95f1f370f50258ed3954c304c524b (diff) |
Merge branch 'linus' into core/urgent
Diffstat (limited to 'kernel/sched.c')
-rw-r--r-- | kernel/sched.c | 168 |
1 files changed, 91 insertions, 77 deletions
diff --git a/kernel/sched.c b/kernel/sched.c index 27ba1d642f0f..545c6fccd1dc 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -3715,7 +3715,7 @@ redo: | |||
3715 | * don't kick the migration_thread, if the curr | 3715 | * don't kick the migration_thread, if the curr |
3716 | * task on busiest cpu can't be moved to this_cpu | 3716 | * task on busiest cpu can't be moved to this_cpu |
3717 | */ | 3717 | */ |
3718 | if (!cpu_isset(this_cpu, busiest->curr->cpus_allowed)) { | 3718 | if (!cpumask_test_cpu(this_cpu, &busiest->curr->cpus_allowed)) { |
3719 | double_unlock_balance(this_rq, busiest); | 3719 | double_unlock_balance(this_rq, busiest); |
3720 | all_pinned = 1; | 3720 | all_pinned = 1; |
3721 | return ld_moved; | 3721 | return ld_moved; |
@@ -4150,13 +4150,17 @@ unsigned long long task_delta_exec(struct task_struct *p) | |||
4150 | * Account user cpu time to a process. | 4150 | * Account user cpu time to a process. |
4151 | * @p: the process that the cpu time gets accounted to | 4151 | * @p: the process that the cpu time gets accounted to |
4152 | * @cputime: the cpu time spent in user space since the last update | 4152 | * @cputime: the cpu time spent in user space since the last update |
4153 | * @cputime_scaled: cputime scaled by cpu frequency | ||
4153 | */ | 4154 | */ |
4154 | void account_user_time(struct task_struct *p, cputime_t cputime) | 4155 | void account_user_time(struct task_struct *p, cputime_t cputime, |
4156 | cputime_t cputime_scaled) | ||
4155 | { | 4157 | { |
4156 | struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat; | 4158 | struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat; |
4157 | cputime64_t tmp; | 4159 | cputime64_t tmp; |
4158 | 4160 | ||
4161 | /* Add user time to process. */ | ||
4159 | p->utime = cputime_add(p->utime, cputime); | 4162 | p->utime = cputime_add(p->utime, cputime); |
4163 | p->utimescaled = cputime_add(p->utimescaled, cputime_scaled); | ||
4160 | account_group_user_time(p, cputime); | 4164 | account_group_user_time(p, cputime); |
4161 | 4165 | ||
4162 | /* Add user time to cpustat. */ | 4166 | /* Add user time to cpustat. */ |
@@ -4173,51 +4177,48 @@ void account_user_time(struct task_struct *p, cputime_t cputime) | |||
4173 | * Account guest cpu time to a process. | 4177 | * Account guest cpu time to a process. |
4174 | * @p: the process that the cpu time gets accounted to | 4178 | * @p: the process that the cpu time gets accounted to |
4175 | * @cputime: the cpu time spent in virtual machine since the last update | 4179 | * @cputime: the cpu time spent in virtual machine since the last update |
4180 | * @cputime_scaled: cputime scaled by cpu frequency | ||
4176 | */ | 4181 | */ |
4177 | static void account_guest_time(struct task_struct *p, cputime_t cputime) | 4182 | static void account_guest_time(struct task_struct *p, cputime_t cputime, |
4183 | cputime_t cputime_scaled) | ||
4178 | { | 4184 | { |
4179 | cputime64_t tmp; | 4185 | cputime64_t tmp; |
4180 | struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat; | 4186 | struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat; |
4181 | 4187 | ||
4182 | tmp = cputime_to_cputime64(cputime); | 4188 | tmp = cputime_to_cputime64(cputime); |
4183 | 4189 | ||
4190 | /* Add guest time to process. */ | ||
4184 | p->utime = cputime_add(p->utime, cputime); | 4191 | p->utime = cputime_add(p->utime, cputime); |
4192 | p->utimescaled = cputime_add(p->utimescaled, cputime_scaled); | ||
4185 | account_group_user_time(p, cputime); | 4193 | account_group_user_time(p, cputime); |
4186 | p->gtime = cputime_add(p->gtime, cputime); | 4194 | p->gtime = cputime_add(p->gtime, cputime); |
4187 | 4195 | ||
4196 | /* Add guest time to cpustat. */ | ||
4188 | cpustat->user = cputime64_add(cpustat->user, tmp); | 4197 | cpustat->user = cputime64_add(cpustat->user, tmp); |
4189 | cpustat->guest = cputime64_add(cpustat->guest, tmp); | 4198 | cpustat->guest = cputime64_add(cpustat->guest, tmp); |
4190 | } | 4199 | } |
4191 | 4200 | ||
4192 | /* | 4201 | /* |
4193 | * Account scaled user cpu time to a process. | ||
4194 | * @p: the process that the cpu time gets accounted to | ||
4195 | * @cputime: the cpu time spent in user space since the last update | ||
4196 | */ | ||
4197 | void account_user_time_scaled(struct task_struct *p, cputime_t cputime) | ||
4198 | { | ||
4199 | p->utimescaled = cputime_add(p->utimescaled, cputime); | ||
4200 | } | ||
4201 | |||
4202 | /* | ||
4203 | * Account system cpu time to a process. | 4202 | * Account system cpu time to a process. |
4204 | * @p: the process that the cpu time gets accounted to | 4203 | * @p: the process that the cpu time gets accounted to |
4205 | * @hardirq_offset: the offset to subtract from hardirq_count() | 4204 | * @hardirq_offset: the offset to subtract from hardirq_count() |
4206 | * @cputime: the cpu time spent in kernel space since the last update | 4205 | * @cputime: the cpu time spent in kernel space since the last update |
4206 | * @cputime_scaled: cputime scaled by cpu frequency | ||
4207 | */ | 4207 | */ |
4208 | void account_system_time(struct task_struct *p, int hardirq_offset, | 4208 | void account_system_time(struct task_struct *p, int hardirq_offset, |
4209 | cputime_t cputime) | 4209 | cputime_t cputime, cputime_t cputime_scaled) |
4210 | { | 4210 | { |
4211 | struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat; | 4211 | struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat; |
4212 | struct rq *rq = this_rq(); | ||
4213 | cputime64_t tmp; | 4212 | cputime64_t tmp; |
4214 | 4213 | ||
4215 | if ((p->flags & PF_VCPU) && (irq_count() - hardirq_offset == 0)) { | 4214 | if ((p->flags & PF_VCPU) && (irq_count() - hardirq_offset == 0)) { |
4216 | account_guest_time(p, cputime); | 4215 | account_guest_time(p, cputime, cputime_scaled); |
4217 | return; | 4216 | return; |
4218 | } | 4217 | } |
4219 | 4218 | ||
4219 | /* Add system time to process. */ | ||
4220 | p->stime = cputime_add(p->stime, cputime); | 4220 | p->stime = cputime_add(p->stime, cputime); |
4221 | p->stimescaled = cputime_add(p->stimescaled, cputime_scaled); | ||
4221 | account_group_system_time(p, cputime); | 4222 | account_group_system_time(p, cputime); |
4222 | 4223 | ||
4223 | /* Add system time to cpustat. */ | 4224 | /* Add system time to cpustat. */ |
@@ -4226,49 +4227,85 @@ void account_system_time(struct task_struct *p, int hardirq_offset, | |||
4226 | cpustat->irq = cputime64_add(cpustat->irq, tmp); | 4227 | cpustat->irq = cputime64_add(cpustat->irq, tmp); |
4227 | else if (softirq_count()) | 4228 | else if (softirq_count()) |
4228 | cpustat->softirq = cputime64_add(cpustat->softirq, tmp); | 4229 | cpustat->softirq = cputime64_add(cpustat->softirq, tmp); |
4229 | else if (p != rq->idle) | ||
4230 | cpustat->system = cputime64_add(cpustat->system, tmp); | ||
4231 | else if (atomic_read(&rq->nr_iowait) > 0) | ||
4232 | cpustat->iowait = cputime64_add(cpustat->iowait, tmp); | ||
4233 | else | 4230 | else |
4234 | cpustat->idle = cputime64_add(cpustat->idle, tmp); | 4231 | cpustat->system = cputime64_add(cpustat->system, tmp); |
4232 | |||
4235 | /* Account for system time used */ | 4233 | /* Account for system time used */ |
4236 | acct_update_integrals(p); | 4234 | acct_update_integrals(p); |
4237 | } | 4235 | } |
4238 | 4236 | ||
4239 | /* | 4237 | /* |
4240 | * Account scaled system cpu time to a process. | 4238 | * Account for involuntary wait time. |
4241 | * @p: the process that the cpu time gets accounted to | 4239 | * @steal: the cpu time spent in involuntary wait |
4242 | * @hardirq_offset: the offset to subtract from hardirq_count() | ||
4243 | * @cputime: the cpu time spent in kernel space since the last update | ||
4244 | */ | 4240 | */ |
4245 | void account_system_time_scaled(struct task_struct *p, cputime_t cputime) | 4241 | void account_steal_time(cputime_t cputime) |
4246 | { | 4242 | { |
4247 | p->stimescaled = cputime_add(p->stimescaled, cputime); | 4243 | struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat; |
4244 | cputime64_t cputime64 = cputime_to_cputime64(cputime); | ||
4245 | |||
4246 | cpustat->steal = cputime64_add(cpustat->steal, cputime64); | ||
4248 | } | 4247 | } |
4249 | 4248 | ||
4250 | /* | 4249 | /* |
4251 | * Account for involuntary wait time. | 4250 | * Account for idle time. |
4252 | * @p: the process from which the cpu time has been stolen | 4251 | * @cputime: the cpu time spent in idle wait |
4253 | * @steal: the cpu time spent in involuntary wait | ||
4254 | */ | 4252 | */ |
4255 | void account_steal_time(struct task_struct *p, cputime_t steal) | 4253 | void account_idle_time(cputime_t cputime) |
4256 | { | 4254 | { |
4257 | struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat; | 4255 | struct cpu_usage_stat *cpustat = &kstat_this_cpu.cpustat; |
4258 | cputime64_t tmp = cputime_to_cputime64(steal); | 4256 | cputime64_t cputime64 = cputime_to_cputime64(cputime); |
4259 | struct rq *rq = this_rq(); | 4257 | struct rq *rq = this_rq(); |
4260 | 4258 | ||
4261 | if (p == rq->idle) { | 4259 | if (atomic_read(&rq->nr_iowait) > 0) |
4262 | p->stime = cputime_add(p->stime, steal); | 4260 | cpustat->iowait = cputime64_add(cpustat->iowait, cputime64); |
4263 | if (atomic_read(&rq->nr_iowait) > 0) | 4261 | else |
4264 | cpustat->iowait = cputime64_add(cpustat->iowait, tmp); | 4262 | cpustat->idle = cputime64_add(cpustat->idle, cputime64); |
4265 | else | 4263 | } |
4266 | cpustat->idle = cputime64_add(cpustat->idle, tmp); | 4264 | |
4267 | } else | 4265 | #ifndef CONFIG_VIRT_CPU_ACCOUNTING |
4268 | cpustat->steal = cputime64_add(cpustat->steal, tmp); | 4266 | |
4267 | /* | ||
4268 | * Account a single tick of cpu time. | ||
4269 | * @p: the process that the cpu time gets accounted to | ||
4270 | * @user_tick: indicates if the tick is a user or a system tick | ||
4271 | */ | ||
4272 | void account_process_tick(struct task_struct *p, int user_tick) | ||
4273 | { | ||
4274 | cputime_t one_jiffy = jiffies_to_cputime(1); | ||
4275 | cputime_t one_jiffy_scaled = cputime_to_scaled(one_jiffy); | ||
4276 | struct rq *rq = this_rq(); | ||
4277 | |||
4278 | if (user_tick) | ||
4279 | account_user_time(p, one_jiffy, one_jiffy_scaled); | ||
4280 | else if (p != rq->idle) | ||
4281 | account_system_time(p, HARDIRQ_OFFSET, one_jiffy, | ||
4282 | one_jiffy_scaled); | ||
4283 | else | ||
4284 | account_idle_time(one_jiffy); | ||
4285 | } | ||
4286 | |||
4287 | /* | ||
4288 | * Account multiple ticks of steal time. | ||
4289 | * @p: the process from which the cpu time has been stolen | ||
4290 | * @ticks: number of stolen ticks | ||
4291 | */ | ||
4292 | void account_steal_ticks(unsigned long ticks) | ||
4293 | { | ||
4294 | account_steal_time(jiffies_to_cputime(ticks)); | ||
4269 | } | 4295 | } |
4270 | 4296 | ||
4271 | /* | 4297 | /* |
4298 | * Account multiple ticks of idle time. | ||
4299 | * @ticks: number of stolen ticks | ||
4300 | */ | ||
4301 | void account_idle_ticks(unsigned long ticks) | ||
4302 | { | ||
4303 | account_idle_time(jiffies_to_cputime(ticks)); | ||
4304 | } | ||
4305 | |||
4306 | #endif | ||
4307 | |||
4308 | /* | ||
4272 | * Use precise platform statistics if available: | 4309 | * Use precise platform statistics if available: |
4273 | */ | 4310 | */ |
4274 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | 4311 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING |
@@ -6220,9 +6257,7 @@ static int __migrate_task_irq(struct task_struct *p, int src_cpu, int dest_cpu) | |||
6220 | static void move_task_off_dead_cpu(int dead_cpu, struct task_struct *p) | 6257 | static void move_task_off_dead_cpu(int dead_cpu, struct task_struct *p) |
6221 | { | 6258 | { |
6222 | int dest_cpu; | 6259 | int dest_cpu; |
6223 | /* FIXME: Use cpumask_of_node here. */ | 6260 | const struct cpumask *nodemask = cpumask_of_node(cpu_to_node(dead_cpu)); |
6224 | cpumask_t _nodemask = node_to_cpumask(cpu_to_node(dead_cpu)); | ||
6225 | const struct cpumask *nodemask = &_nodemask; | ||
6226 | 6261 | ||
6227 | again: | 6262 | again: |
6228 | /* Look for allowed, online CPU in same node. */ | 6263 | /* Look for allowed, online CPU in same node. */ |
@@ -7133,21 +7168,18 @@ static int find_next_best_node(int node, nodemask_t *used_nodes) | |||
7133 | static void sched_domain_node_span(int node, struct cpumask *span) | 7168 | static void sched_domain_node_span(int node, struct cpumask *span) |
7134 | { | 7169 | { |
7135 | nodemask_t used_nodes; | 7170 | nodemask_t used_nodes; |
7136 | /* FIXME: use cpumask_of_node() */ | ||
7137 | node_to_cpumask_ptr(nodemask, node); | ||
7138 | int i; | 7171 | int i; |
7139 | 7172 | ||
7140 | cpus_clear(*span); | 7173 | cpumask_clear(span); |
7141 | nodes_clear(used_nodes); | 7174 | nodes_clear(used_nodes); |
7142 | 7175 | ||
7143 | cpus_or(*span, *span, *nodemask); | 7176 | cpumask_or(span, span, cpumask_of_node(node)); |
7144 | node_set(node, used_nodes); | 7177 | node_set(node, used_nodes); |
7145 | 7178 | ||
7146 | for (i = 1; i < SD_NODES_PER_DOMAIN; i++) { | 7179 | for (i = 1; i < SD_NODES_PER_DOMAIN; i++) { |
7147 | int next_node = find_next_best_node(node, &used_nodes); | 7180 | int next_node = find_next_best_node(node, &used_nodes); |
7148 | 7181 | ||
7149 | node_to_cpumask_ptr_next(nodemask, next_node); | 7182 | cpumask_or(span, span, cpumask_of_node(next_node)); |
7150 | cpus_or(*span, *span, *nodemask); | ||
7151 | } | 7183 | } |
7152 | } | 7184 | } |
7153 | #endif /* CONFIG_NUMA */ | 7185 | #endif /* CONFIG_NUMA */ |
@@ -7227,9 +7259,7 @@ cpu_to_phys_group(int cpu, const struct cpumask *cpu_map, | |||
7227 | { | 7259 | { |
7228 | int group; | 7260 | int group; |
7229 | #ifdef CONFIG_SCHED_MC | 7261 | #ifdef CONFIG_SCHED_MC |
7230 | /* FIXME: Use cpu_coregroup_mask. */ | 7262 | cpumask_and(mask, cpu_coregroup_mask(cpu), cpu_map); |
7231 | *mask = cpu_coregroup_map(cpu); | ||
7232 | cpus_and(*mask, *mask, *cpu_map); | ||
7233 | group = cpumask_first(mask); | 7263 | group = cpumask_first(mask); |
7234 | #elif defined(CONFIG_SCHED_SMT) | 7264 | #elif defined(CONFIG_SCHED_SMT) |
7235 | cpumask_and(mask, &per_cpu(cpu_sibling_map, cpu), cpu_map); | 7265 | cpumask_and(mask, &per_cpu(cpu_sibling_map, cpu), cpu_map); |
@@ -7259,10 +7289,8 @@ static int cpu_to_allnodes_group(int cpu, const struct cpumask *cpu_map, | |||
7259 | struct cpumask *nodemask) | 7289 | struct cpumask *nodemask) |
7260 | { | 7290 | { |
7261 | int group; | 7291 | int group; |
7262 | /* FIXME: use cpumask_of_node */ | ||
7263 | node_to_cpumask_ptr(pnodemask, cpu_to_node(cpu)); | ||
7264 | 7292 | ||
7265 | cpumask_and(nodemask, pnodemask, cpu_map); | 7293 | cpumask_and(nodemask, cpumask_of_node(cpu_to_node(cpu)), cpu_map); |
7266 | group = cpumask_first(nodemask); | 7294 | group = cpumask_first(nodemask); |
7267 | 7295 | ||
7268 | if (sg) | 7296 | if (sg) |
@@ -7313,10 +7341,8 @@ static void free_sched_groups(const struct cpumask *cpu_map, | |||
7313 | 7341 | ||
7314 | for (i = 0; i < nr_node_ids; i++) { | 7342 | for (i = 0; i < nr_node_ids; i++) { |
7315 | struct sched_group *oldsg, *sg = sched_group_nodes[i]; | 7343 | struct sched_group *oldsg, *sg = sched_group_nodes[i]; |
7316 | /* FIXME: Use cpumask_of_node */ | ||
7317 | node_to_cpumask_ptr(pnodemask, i); | ||
7318 | 7344 | ||
7319 | cpus_and(*nodemask, *pnodemask, *cpu_map); | 7345 | cpumask_and(nodemask, cpumask_of_node(i), cpu_map); |
7320 | if (cpumask_empty(nodemask)) | 7346 | if (cpumask_empty(nodemask)) |
7321 | continue; | 7347 | continue; |
7322 | 7348 | ||
@@ -7525,9 +7551,7 @@ static int __build_sched_domains(const struct cpumask *cpu_map, | |||
7525 | for_each_cpu(i, cpu_map) { | 7551 | for_each_cpu(i, cpu_map) { |
7526 | struct sched_domain *sd = NULL, *p; | 7552 | struct sched_domain *sd = NULL, *p; |
7527 | 7553 | ||
7528 | /* FIXME: use cpumask_of_node */ | 7554 | cpumask_and(nodemask, cpumask_of_node(cpu_to_node(i)), cpu_map); |
7529 | *nodemask = node_to_cpumask(cpu_to_node(i)); | ||
7530 | cpus_and(*nodemask, *nodemask, *cpu_map); | ||
7531 | 7555 | ||
7532 | #ifdef CONFIG_NUMA | 7556 | #ifdef CONFIG_NUMA |
7533 | if (cpumask_weight(cpu_map) > | 7557 | if (cpumask_weight(cpu_map) > |
@@ -7568,9 +7592,8 @@ static int __build_sched_domains(const struct cpumask *cpu_map, | |||
7568 | sd = &per_cpu(core_domains, i).sd; | 7592 | sd = &per_cpu(core_domains, i).sd; |
7569 | SD_INIT(sd, MC); | 7593 | SD_INIT(sd, MC); |
7570 | set_domain_attribute(sd, attr); | 7594 | set_domain_attribute(sd, attr); |
7571 | *sched_domain_span(sd) = cpu_coregroup_map(i); | 7595 | cpumask_and(sched_domain_span(sd), cpu_map, |
7572 | cpumask_and(sched_domain_span(sd), | 7596 | cpu_coregroup_mask(i)); |
7573 | sched_domain_span(sd), cpu_map); | ||
7574 | sd->parent = p; | 7597 | sd->parent = p; |
7575 | p->child = sd; | 7598 | p->child = sd; |
7576 | cpu_to_core_group(i, cpu_map, &sd->groups, tmpmask); | 7599 | cpu_to_core_group(i, cpu_map, &sd->groups, tmpmask); |
@@ -7606,9 +7629,7 @@ static int __build_sched_domains(const struct cpumask *cpu_map, | |||
7606 | #ifdef CONFIG_SCHED_MC | 7629 | #ifdef CONFIG_SCHED_MC |
7607 | /* Set up multi-core groups */ | 7630 | /* Set up multi-core groups */ |
7608 | for_each_cpu(i, cpu_map) { | 7631 | for_each_cpu(i, cpu_map) { |
7609 | /* FIXME: Use cpu_coregroup_mask */ | 7632 | cpumask_and(this_core_map, cpu_coregroup_mask(i), cpu_map); |
7610 | *this_core_map = cpu_coregroup_map(i); | ||
7611 | cpus_and(*this_core_map, *this_core_map, *cpu_map); | ||
7612 | if (i != cpumask_first(this_core_map)) | 7633 | if (i != cpumask_first(this_core_map)) |
7613 | continue; | 7634 | continue; |
7614 | 7635 | ||
@@ -7620,9 +7641,7 @@ static int __build_sched_domains(const struct cpumask *cpu_map, | |||
7620 | 7641 | ||
7621 | /* Set up physical groups */ | 7642 | /* Set up physical groups */ |
7622 | for (i = 0; i < nr_node_ids; i++) { | 7643 | for (i = 0; i < nr_node_ids; i++) { |
7623 | /* FIXME: Use cpumask_of_node */ | 7644 | cpumask_and(nodemask, cpumask_of_node(i), cpu_map); |
7624 | *nodemask = node_to_cpumask(i); | ||
7625 | cpus_and(*nodemask, *nodemask, *cpu_map); | ||
7626 | if (cpumask_empty(nodemask)) | 7645 | if (cpumask_empty(nodemask)) |
7627 | continue; | 7646 | continue; |
7628 | 7647 | ||
@@ -7644,11 +7663,8 @@ static int __build_sched_domains(const struct cpumask *cpu_map, | |||
7644 | struct sched_group *sg, *prev; | 7663 | struct sched_group *sg, *prev; |
7645 | int j; | 7664 | int j; |
7646 | 7665 | ||
7647 | /* FIXME: Use cpumask_of_node */ | ||
7648 | *nodemask = node_to_cpumask(i); | ||
7649 | cpumask_clear(covered); | 7666 | cpumask_clear(covered); |
7650 | 7667 | cpumask_and(nodemask, cpumask_of_node(i), cpu_map); | |
7651 | cpus_and(*nodemask, *nodemask, *cpu_map); | ||
7652 | if (cpumask_empty(nodemask)) { | 7668 | if (cpumask_empty(nodemask)) { |
7653 | sched_group_nodes[i] = NULL; | 7669 | sched_group_nodes[i] = NULL; |
7654 | continue; | 7670 | continue; |
@@ -7679,8 +7695,6 @@ static int __build_sched_domains(const struct cpumask *cpu_map, | |||
7679 | 7695 | ||
7680 | for (j = 0; j < nr_node_ids; j++) { | 7696 | for (j = 0; j < nr_node_ids; j++) { |
7681 | int n = (i + j) % nr_node_ids; | 7697 | int n = (i + j) % nr_node_ids; |
7682 | /* FIXME: Use cpumask_of_node */ | ||
7683 | node_to_cpumask_ptr(pnodemask, n); | ||
7684 | 7698 | ||
7685 | cpumask_complement(notcovered, covered); | 7699 | cpumask_complement(notcovered, covered); |
7686 | cpumask_and(tmpmask, notcovered, cpu_map); | 7700 | cpumask_and(tmpmask, notcovered, cpu_map); |
@@ -7688,7 +7702,7 @@ static int __build_sched_domains(const struct cpumask *cpu_map, | |||
7688 | if (cpumask_empty(tmpmask)) | 7702 | if (cpumask_empty(tmpmask)) |
7689 | break; | 7703 | break; |
7690 | 7704 | ||
7691 | cpumask_and(tmpmask, tmpmask, pnodemask); | 7705 | cpumask_and(tmpmask, tmpmask, cpumask_of_node(n)); |
7692 | if (cpumask_empty(tmpmask)) | 7706 | if (cpumask_empty(tmpmask)) |
7693 | continue; | 7707 | continue; |
7694 | 7708 | ||