diff options
Diffstat (limited to 'kernel/cpu.c')
| -rw-r--r-- | kernel/cpu.c | 36 |
1 files changed, 13 insertions, 23 deletions
diff --git a/kernel/cpu.c b/kernel/cpu.c index 7c4e2713df0a..677f25376a38 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c | |||
| @@ -151,13 +151,13 @@ static inline void check_for_tasks(int cpu) | |||
| 151 | 151 | ||
| 152 | write_lock_irq(&tasklist_lock); | 152 | write_lock_irq(&tasklist_lock); |
| 153 | for_each_process(p) { | 153 | for_each_process(p) { |
| 154 | if (task_cpu(p) == cpu && | 154 | if (task_cpu(p) == cpu && p->state == TASK_RUNNING && |
| 155 | (!cputime_eq(p->utime, cputime_zero) || | 155 | (!cputime_eq(p->utime, cputime_zero) || |
| 156 | !cputime_eq(p->stime, cputime_zero))) | 156 | !cputime_eq(p->stime, cputime_zero))) |
| 157 | printk(KERN_WARNING "Task %s (pid = %d) is on cpu %d\ | 157 | printk(KERN_WARNING "Task %s (pid = %d) is on cpu %d " |
| 158 | (state = %ld, flags = %x) \n", | 158 | "(state = %ld, flags = %x)\n", |
| 159 | p->comm, task_pid_nr(p), cpu, | 159 | p->comm, task_pid_nr(p), cpu, |
| 160 | p->state, p->flags); | 160 | p->state, p->flags); |
| 161 | } | 161 | } |
| 162 | write_unlock_irq(&tasklist_lock); | 162 | write_unlock_irq(&tasklist_lock); |
| 163 | } | 163 | } |
| @@ -209,9 +209,12 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen) | |||
| 209 | return -ENOMEM; | 209 | return -ENOMEM; |
| 210 | 210 | ||
| 211 | cpu_hotplug_begin(); | 211 | cpu_hotplug_begin(); |
| 212 | set_cpu_active(cpu, false); | ||
| 212 | err = __raw_notifier_call_chain(&cpu_chain, CPU_DOWN_PREPARE | mod, | 213 | err = __raw_notifier_call_chain(&cpu_chain, CPU_DOWN_PREPARE | mod, |
| 213 | hcpu, -1, &nr_calls); | 214 | hcpu, -1, &nr_calls); |
| 214 | if (err == NOTIFY_BAD) { | 215 | if (err == NOTIFY_BAD) { |
| 216 | set_cpu_active(cpu, true); | ||
| 217 | |||
| 215 | nr_calls--; | 218 | nr_calls--; |
| 216 | __raw_notifier_call_chain(&cpu_chain, CPU_DOWN_FAILED | mod, | 219 | __raw_notifier_call_chain(&cpu_chain, CPU_DOWN_FAILED | mod, |
| 217 | hcpu, nr_calls, NULL); | 220 | hcpu, nr_calls, NULL); |
| @@ -223,11 +226,11 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen) | |||
| 223 | 226 | ||
| 224 | /* Ensure that we are not runnable on dying cpu */ | 227 | /* Ensure that we are not runnable on dying cpu */ |
| 225 | cpumask_copy(old_allowed, ¤t->cpus_allowed); | 228 | cpumask_copy(old_allowed, ¤t->cpus_allowed); |
| 226 | set_cpus_allowed_ptr(current, | 229 | set_cpus_allowed_ptr(current, cpu_active_mask); |
| 227 | cpumask_of(cpumask_any_but(cpu_online_mask, cpu))); | ||
| 228 | 230 | ||
| 229 | err = __stop_machine(take_cpu_down, &tcd_param, cpumask_of(cpu)); | 231 | err = __stop_machine(take_cpu_down, &tcd_param, cpumask_of(cpu)); |
| 230 | if (err) { | 232 | if (err) { |
| 233 | set_cpu_active(cpu, true); | ||
| 231 | /* CPU didn't die: tell everyone. Can't complain. */ | 234 | /* CPU didn't die: tell everyone. Can't complain. */ |
| 232 | if (raw_notifier_call_chain(&cpu_chain, CPU_DOWN_FAILED | mod, | 235 | if (raw_notifier_call_chain(&cpu_chain, CPU_DOWN_FAILED | mod, |
| 233 | hcpu) == NOTIFY_BAD) | 236 | hcpu) == NOTIFY_BAD) |
| @@ -278,23 +281,8 @@ int __ref cpu_down(unsigned int cpu) | |||
| 278 | goto out; | 281 | goto out; |
| 279 | } | 282 | } |
| 280 | 283 | ||
| 281 | set_cpu_active(cpu, false); | ||
| 282 | |||
| 283 | /* | ||
| 284 | * Make sure the all cpus did the reschedule and are not | ||
| 285 | * using stale version of the cpu_active_mask. | ||
| 286 | * This is not strictly necessary becuase stop_machine() | ||
| 287 | * that we run down the line already provides the required | ||
| 288 | * synchronization. But it's really a side effect and we do not | ||
| 289 | * want to depend on the innards of the stop_machine here. | ||
| 290 | */ | ||
| 291 | synchronize_sched(); | ||
| 292 | |||
| 293 | err = _cpu_down(cpu, 0); | 284 | err = _cpu_down(cpu, 0); |
| 294 | 285 | ||
| 295 | if (cpu_online(cpu)) | ||
| 296 | set_cpu_active(cpu, true); | ||
| 297 | |||
| 298 | out: | 286 | out: |
| 299 | cpu_maps_update_done(); | 287 | cpu_maps_update_done(); |
| 300 | stop_machine_destroy(); | 288 | stop_machine_destroy(); |
| @@ -383,10 +371,12 @@ int disable_nonboot_cpus(void) | |||
| 383 | return error; | 371 | return error; |
| 384 | cpu_maps_update_begin(); | 372 | cpu_maps_update_begin(); |
| 385 | first_cpu = cpumask_first(cpu_online_mask); | 373 | first_cpu = cpumask_first(cpu_online_mask); |
| 386 | /* We take down all of the non-boot CPUs in one shot to avoid races | 374 | /* |
| 375 | * We take down all of the non-boot CPUs in one shot to avoid races | ||
| 387 | * with the userspace trying to use the CPU hotplug at the same time | 376 | * with the userspace trying to use the CPU hotplug at the same time |
| 388 | */ | 377 | */ |
| 389 | cpumask_clear(frozen_cpus); | 378 | cpumask_clear(frozen_cpus); |
| 379 | |||
| 390 | printk("Disabling non-boot CPUs ...\n"); | 380 | printk("Disabling non-boot CPUs ...\n"); |
| 391 | for_each_online_cpu(cpu) { | 381 | for_each_online_cpu(cpu) { |
| 392 | if (cpu == first_cpu) | 382 | if (cpu == first_cpu) |
