diff options
Diffstat (limited to 'kernel/cpu.c')
| -rw-r--r-- | kernel/cpu.c | 44 |
1 files changed, 17 insertions, 27 deletions
diff --git a/kernel/cpu.c b/kernel/cpu.c index 6ba0f1ecb212..25bba73b1be3 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c | |||
| @@ -14,6 +14,7 @@ | |||
| 14 | #include <linux/kthread.h> | 14 | #include <linux/kthread.h> |
| 15 | #include <linux/stop_machine.h> | 15 | #include <linux/stop_machine.h> |
| 16 | #include <linux/mutex.h> | 16 | #include <linux/mutex.h> |
| 17 | #include <linux/gfp.h> | ||
| 17 | 18 | ||
| 18 | #ifdef CONFIG_SMP | 19 | #ifdef CONFIG_SMP |
| 19 | /* Serializes the updates to cpu_online_mask, cpu_present_mask */ | 20 | /* Serializes the updates to cpu_online_mask, cpu_present_mask */ |
| @@ -151,13 +152,13 @@ static inline void check_for_tasks(int cpu) | |||
| 151 | 152 | ||
| 152 | write_lock_irq(&tasklist_lock); | 153 | write_lock_irq(&tasklist_lock); |
| 153 | for_each_process(p) { | 154 | for_each_process(p) { |
| 154 | if (task_cpu(p) == cpu && | 155 | if (task_cpu(p) == cpu && p->state == TASK_RUNNING && |
| 155 | (!cputime_eq(p->utime, cputime_zero) || | 156 | (!cputime_eq(p->utime, cputime_zero) || |
| 156 | !cputime_eq(p->stime, cputime_zero))) | 157 | !cputime_eq(p->stime, cputime_zero))) |
| 157 | printk(KERN_WARNING "Task %s (pid = %d) is on cpu %d\ | 158 | printk(KERN_WARNING "Task %s (pid = %d) is on cpu %d " |
| 158 | (state = %ld, flags = %x) \n", | 159 | "(state = %ld, flags = %x)\n", |
| 159 | p->comm, task_pid_nr(p), cpu, | 160 | p->comm, task_pid_nr(p), cpu, |
| 160 | p->state, p->flags); | 161 | p->state, p->flags); |
| 161 | } | 162 | } |
| 162 | write_unlock_irq(&tasklist_lock); | 163 | write_unlock_irq(&tasklist_lock); |
| 163 | } | 164 | } |
| @@ -209,9 +210,12 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen) | |||
| 209 | return -ENOMEM; | 210 | return -ENOMEM; |
| 210 | 211 | ||
| 211 | cpu_hotplug_begin(); | 212 | cpu_hotplug_begin(); |
| 213 | set_cpu_active(cpu, false); | ||
| 212 | err = __raw_notifier_call_chain(&cpu_chain, CPU_DOWN_PREPARE | mod, | 214 | err = __raw_notifier_call_chain(&cpu_chain, CPU_DOWN_PREPARE | mod, |
| 213 | hcpu, -1, &nr_calls); | 215 | hcpu, -1, &nr_calls); |
| 214 | if (err == NOTIFY_BAD) { | 216 | if (err == NOTIFY_BAD) { |
| 217 | set_cpu_active(cpu, true); | ||
| 218 | |||
| 215 | nr_calls--; | 219 | nr_calls--; |
| 216 | __raw_notifier_call_chain(&cpu_chain, CPU_DOWN_FAILED | mod, | 220 | __raw_notifier_call_chain(&cpu_chain, CPU_DOWN_FAILED | mod, |
| 217 | hcpu, nr_calls, NULL); | 221 | hcpu, nr_calls, NULL); |
| @@ -223,11 +227,11 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen) | |||
| 223 | 227 | ||
| 224 | /* Ensure that we are not runnable on dying cpu */ | 228 | /* Ensure that we are not runnable on dying cpu */ |
| 225 | cpumask_copy(old_allowed, ¤t->cpus_allowed); | 229 | cpumask_copy(old_allowed, ¤t->cpus_allowed); |
| 226 | set_cpus_allowed_ptr(current, | 230 | set_cpus_allowed_ptr(current, cpu_active_mask); |
| 227 | cpumask_of(cpumask_any_but(cpu_online_mask, cpu))); | ||
| 228 | 231 | ||
| 229 | err = __stop_machine(take_cpu_down, &tcd_param, cpumask_of(cpu)); | 232 | err = __stop_machine(take_cpu_down, &tcd_param, cpumask_of(cpu)); |
| 230 | if (err) { | 233 | if (err) { |
| 234 | set_cpu_active(cpu, true); | ||
| 231 | /* CPU didn't die: tell everyone. Can't complain. */ | 235 | /* CPU didn't die: tell everyone. Can't complain. */ |
| 232 | if (raw_notifier_call_chain(&cpu_chain, CPU_DOWN_FAILED | mod, | 236 | if (raw_notifier_call_chain(&cpu_chain, CPU_DOWN_FAILED | mod, |
| 233 | hcpu) == NOTIFY_BAD) | 237 | hcpu) == NOTIFY_BAD) |
| @@ -278,23 +282,8 @@ int __ref cpu_down(unsigned int cpu) | |||
| 278 | goto out; | 282 | goto out; |
| 279 | } | 283 | } |
| 280 | 284 | ||
| 281 | set_cpu_active(cpu, false); | ||
| 282 | |||
| 283 | /* | ||
| 284 | * Make sure the all cpus did the reschedule and are not | ||
| 285 | * using stale version of the cpu_active_mask. | ||
| 286 | * This is not strictly necessary becuase stop_machine() | ||
| 287 | * that we run down the line already provides the required | ||
| 288 | * synchronization. But it's really a side effect and we do not | ||
| 289 | * want to depend on the innards of the stop_machine here. | ||
| 290 | */ | ||
| 291 | synchronize_sched(); | ||
| 292 | |||
| 293 | err = _cpu_down(cpu, 0); | 285 | err = _cpu_down(cpu, 0); |
| 294 | 286 | ||
| 295 | if (cpu_online(cpu)) | ||
| 296 | set_cpu_active(cpu, true); | ||
| 297 | |||
| 298 | out: | 287 | out: |
| 299 | cpu_maps_update_done(); | 288 | cpu_maps_update_done(); |
| 300 | stop_machine_destroy(); | 289 | stop_machine_destroy(); |
| @@ -350,7 +339,7 @@ int __cpuinit cpu_up(unsigned int cpu) | |||
| 350 | if (!cpu_possible(cpu)) { | 339 | if (!cpu_possible(cpu)) { |
| 351 | printk(KERN_ERR "can't online cpu %d because it is not " | 340 | printk(KERN_ERR "can't online cpu %d because it is not " |
| 352 | "configured as may-hotadd at boot time\n", cpu); | 341 | "configured as may-hotadd at boot time\n", cpu); |
| 353 | #if defined(CONFIG_IA64) || defined(CONFIG_X86_64) | 342 | #if defined(CONFIG_IA64) |
| 354 | printk(KERN_ERR "please check additional_cpus= boot " | 343 | printk(KERN_ERR "please check additional_cpus= boot " |
| 355 | "parameter\n"); | 344 | "parameter\n"); |
| 356 | #endif | 345 | #endif |
| @@ -383,19 +372,20 @@ int disable_nonboot_cpus(void) | |||
| 383 | return error; | 372 | return error; |
| 384 | cpu_maps_update_begin(); | 373 | cpu_maps_update_begin(); |
| 385 | first_cpu = cpumask_first(cpu_online_mask); | 374 | first_cpu = cpumask_first(cpu_online_mask); |
| 386 | /* We take down all of the non-boot CPUs in one shot to avoid races | 375 | /* |
| 376 | * We take down all of the non-boot CPUs in one shot to avoid races | ||
| 387 | * with the userspace trying to use the CPU hotplug at the same time | 377 | * with the userspace trying to use the CPU hotplug at the same time |
| 388 | */ | 378 | */ |
| 389 | cpumask_clear(frozen_cpus); | 379 | cpumask_clear(frozen_cpus); |
| 380 | |||
| 390 | printk("Disabling non-boot CPUs ...\n"); | 381 | printk("Disabling non-boot CPUs ...\n"); |
| 391 | for_each_online_cpu(cpu) { | 382 | for_each_online_cpu(cpu) { |
| 392 | if (cpu == first_cpu) | 383 | if (cpu == first_cpu) |
| 393 | continue; | 384 | continue; |
| 394 | error = _cpu_down(cpu, 1); | 385 | error = _cpu_down(cpu, 1); |
| 395 | if (!error) { | 386 | if (!error) |
| 396 | cpumask_set_cpu(cpu, frozen_cpus); | 387 | cpumask_set_cpu(cpu, frozen_cpus); |
| 397 | printk("CPU%d is down\n", cpu); | 388 | else { |
| 398 | } else { | ||
| 399 | printk(KERN_ERR "Error taking CPU%d down: %d\n", | 389 | printk(KERN_ERR "Error taking CPU%d down: %d\n", |
| 400 | cpu, error); | 390 | cpu, error); |
| 401 | break; | 391 | break; |
