diff options
Diffstat (limited to 'kernel/cpu.c')
-rw-r--r-- | kernel/cpu.c | 88 |
1 files changed, 70 insertions, 18 deletions
diff --git a/kernel/cpu.c b/kernel/cpu.c index cfb1d43ab801..e202a68d1cc1 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c | |||
@@ -64,6 +64,8 @@ void __init cpu_hotplug_init(void) | |||
64 | cpu_hotplug.refcount = 0; | 64 | cpu_hotplug.refcount = 0; |
65 | } | 65 | } |
66 | 66 | ||
67 | cpumask_t cpu_active_map; | ||
68 | |||
67 | #ifdef CONFIG_HOTPLUG_CPU | 69 | #ifdef CONFIG_HOTPLUG_CPU |
68 | 70 | ||
69 | void get_online_cpus(void) | 71 | void get_online_cpus(void) |
@@ -214,7 +216,6 @@ static int __ref take_cpu_down(void *_param) | |||
214 | static int __ref _cpu_down(unsigned int cpu, int tasks_frozen) | 216 | static int __ref _cpu_down(unsigned int cpu, int tasks_frozen) |
215 | { | 217 | { |
216 | int err, nr_calls = 0; | 218 | int err, nr_calls = 0; |
217 | struct task_struct *p; | ||
218 | cpumask_t old_allowed, tmp; | 219 | cpumask_t old_allowed, tmp; |
219 | void *hcpu = (void *)(long)cpu; | 220 | void *hcpu = (void *)(long)cpu; |
220 | unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0; | 221 | unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0; |
@@ -247,21 +248,18 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen) | |||
247 | cpus_setall(tmp); | 248 | cpus_setall(tmp); |
248 | cpu_clear(cpu, tmp); | 249 | cpu_clear(cpu, tmp); |
249 | set_cpus_allowed_ptr(current, &tmp); | 250 | set_cpus_allowed_ptr(current, &tmp); |
251 | tmp = cpumask_of_cpu(cpu); | ||
250 | 252 | ||
251 | p = __stop_machine_run(take_cpu_down, &tcd_param, cpu); | 253 | err = __stop_machine(take_cpu_down, &tcd_param, &tmp); |
252 | 254 | if (err) { | |
253 | if (IS_ERR(p) || cpu_online(cpu)) { | ||
254 | /* CPU didn't die: tell everyone. Can't complain. */ | 255 | /* CPU didn't die: tell everyone. Can't complain. */ |
255 | if (raw_notifier_call_chain(&cpu_chain, CPU_DOWN_FAILED | mod, | 256 | if (raw_notifier_call_chain(&cpu_chain, CPU_DOWN_FAILED | mod, |
256 | hcpu) == NOTIFY_BAD) | 257 | hcpu) == NOTIFY_BAD) |
257 | BUG(); | 258 | BUG(); |
258 | 259 | ||
259 | if (IS_ERR(p)) { | 260 | goto out_allowed; |
260 | err = PTR_ERR(p); | ||
261 | goto out_allowed; | ||
262 | } | ||
263 | goto out_thread; | ||
264 | } | 261 | } |
262 | BUG_ON(cpu_online(cpu)); | ||
265 | 263 | ||
266 | /* Wait for it to sleep (leaving idle task). */ | 264 | /* Wait for it to sleep (leaving idle task). */ |
267 | while (!idle_cpu(cpu)) | 265 | while (!idle_cpu(cpu)) |
@@ -277,12 +275,15 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen) | |||
277 | 275 | ||
278 | check_for_tasks(cpu); | 276 | check_for_tasks(cpu); |
279 | 277 | ||
280 | out_thread: | ||
281 | err = kthread_stop(p); | ||
282 | out_allowed: | 278 | out_allowed: |
283 | set_cpus_allowed_ptr(current, &old_allowed); | 279 | set_cpus_allowed_ptr(current, &old_allowed); |
284 | out_release: | 280 | out_release: |
285 | cpu_hotplug_done(); | 281 | cpu_hotplug_done(); |
282 | if (!err) { | ||
283 | if (raw_notifier_call_chain(&cpu_chain, CPU_POST_DEAD | mod, | ||
284 | hcpu) == NOTIFY_BAD) | ||
285 | BUG(); | ||
286 | } | ||
286 | return err; | 287 | return err; |
287 | } | 288 | } |
288 | 289 | ||
@@ -291,11 +292,30 @@ int __ref cpu_down(unsigned int cpu) | |||
291 | int err = 0; | 292 | int err = 0; |
292 | 293 | ||
293 | cpu_maps_update_begin(); | 294 | cpu_maps_update_begin(); |
294 | if (cpu_hotplug_disabled) | 295 | |
296 | if (cpu_hotplug_disabled) { | ||
295 | err = -EBUSY; | 297 | err = -EBUSY; |
296 | else | 298 | goto out; |
297 | err = _cpu_down(cpu, 0); | 299 | } |
300 | |||
301 | cpu_clear(cpu, cpu_active_map); | ||
302 | |||
303 | /* | ||
304 | * Make sure the all cpus did the reschedule and are not | ||
305 | * using stale version of the cpu_active_map. | ||
306 | * This is not strictly necessary becuase stop_machine() | ||
307 | * that we run down the line already provides the required | ||
308 | * synchronization. But it's really a side effect and we do not | ||
309 | * want to depend on the innards of the stop_machine here. | ||
310 | */ | ||
311 | synchronize_sched(); | ||
312 | |||
313 | err = _cpu_down(cpu, 0); | ||
314 | |||
315 | if (cpu_online(cpu)) | ||
316 | cpu_set(cpu, cpu_active_map); | ||
298 | 317 | ||
318 | out: | ||
299 | cpu_maps_update_done(); | 319 | cpu_maps_update_done(); |
300 | return err; | 320 | return err; |
301 | } | 321 | } |
@@ -355,11 +375,18 @@ int __cpuinit cpu_up(unsigned int cpu) | |||
355 | } | 375 | } |
356 | 376 | ||
357 | cpu_maps_update_begin(); | 377 | cpu_maps_update_begin(); |
358 | if (cpu_hotplug_disabled) | 378 | |
379 | if (cpu_hotplug_disabled) { | ||
359 | err = -EBUSY; | 380 | err = -EBUSY; |
360 | else | 381 | goto out; |
361 | err = _cpu_up(cpu, 0); | 382 | } |
362 | 383 | ||
384 | err = _cpu_up(cpu, 0); | ||
385 | |||
386 | if (cpu_online(cpu)) | ||
387 | cpu_set(cpu, cpu_active_map); | ||
388 | |||
389 | out: | ||
363 | cpu_maps_update_done(); | 390 | cpu_maps_update_done(); |
364 | return err; | 391 | return err; |
365 | } | 392 | } |
@@ -413,7 +440,7 @@ void __ref enable_nonboot_cpus(void) | |||
413 | goto out; | 440 | goto out; |
414 | 441 | ||
415 | printk("Enabling non-boot CPUs ...\n"); | 442 | printk("Enabling non-boot CPUs ...\n"); |
416 | for_each_cpu_mask(cpu, frozen_cpus) { | 443 | for_each_cpu_mask_nr(cpu, frozen_cpus) { |
417 | error = _cpu_up(cpu, 1); | 444 | error = _cpu_up(cpu, 1); |
418 | if (!error) { | 445 | if (!error) { |
419 | printk("CPU%d is up\n", cpu); | 446 | printk("CPU%d is up\n", cpu); |
@@ -428,3 +455,28 @@ out: | |||
428 | #endif /* CONFIG_PM_SLEEP_SMP */ | 455 | #endif /* CONFIG_PM_SLEEP_SMP */ |
429 | 456 | ||
430 | #endif /* CONFIG_SMP */ | 457 | #endif /* CONFIG_SMP */ |
458 | |||
459 | /* | ||
460 | * cpu_bit_bitmap[] is a special, "compressed" data structure that | ||
461 | * represents all NR_CPUS bits binary values of 1<<nr. | ||
462 | * | ||
463 | * It is used by cpumask_of_cpu() to get a constant address to a CPU | ||
464 | * mask value that has a single bit set only. | ||
465 | */ | ||
466 | |||
467 | /* cpu_bit_bitmap[0] is empty - so we can back into it */ | ||
468 | #define MASK_DECLARE_1(x) [x+1][0] = 1UL << (x) | ||
469 | #define MASK_DECLARE_2(x) MASK_DECLARE_1(x), MASK_DECLARE_1(x+1) | ||
470 | #define MASK_DECLARE_4(x) MASK_DECLARE_2(x), MASK_DECLARE_2(x+2) | ||
471 | #define MASK_DECLARE_8(x) MASK_DECLARE_4(x), MASK_DECLARE_4(x+4) | ||
472 | |||
473 | const unsigned long cpu_bit_bitmap[BITS_PER_LONG+1][BITS_TO_LONGS(NR_CPUS)] = { | ||
474 | |||
475 | MASK_DECLARE_8(0), MASK_DECLARE_8(8), | ||
476 | MASK_DECLARE_8(16), MASK_DECLARE_8(24), | ||
477 | #if BITS_PER_LONG > 32 | ||
478 | MASK_DECLARE_8(32), MASK_DECLARE_8(40), | ||
479 | MASK_DECLARE_8(48), MASK_DECLARE_8(56), | ||
480 | #endif | ||
481 | }; | ||
482 | EXPORT_SYMBOL_GPL(cpu_bit_bitmap); | ||