diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2009-01-03 15:04:39 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2009-01-03 15:04:39 -0500 |
commit | 7d3b56ba37a95f1f370f50258ed3954c304c524b (patch) | |
tree | 86102527b92f02450aa245f084ffb491c18d2e0a /kernel/cpu.c | |
parent | 269b012321f2f1f8e4648c43a93bf432b42c6668 (diff) | |
parent | ab14398abd195af91a744c320a52a1bce814dd1e (diff) |
Merge branch 'cpus4096-for-linus-3' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'cpus4096-for-linus-3' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: (77 commits)
x86: setup_per_cpu_areas() cleanup
cpumask: fix compile error when CONFIG_NR_CPUS is not defined
cpumask: use alloc_cpumask_var_node where appropriate
cpumask: convert shared_cpu_map in acpi_processor* structs to cpumask_var_t
x86: use cpumask_var_t in acpi/boot.c
x86: cleanup some remaining usages of NR_CPUS where s/b nr_cpu_ids
sched: put back some stack hog changes that were undone in kernel/sched.c
x86: enable cpus display of kernel_max and offlined cpus
ia64: cpumask fix for is_affinity_mask_valid()
cpumask: convert RCU implementations, fix
xtensa: define __fls
mn10300: define __fls
m32r: define __fls
h8300: define __fls
frv: define __fls
cris: define __fls
cpumask: CONFIG_DISABLE_OBSOLETE_CPUMASK_FUNCTIONS
cpumask: zero extra bits in alloc_cpumask_var_node
cpumask: replace for_each_cpu_mask_nr with for_each_cpu in kernel/time/
cpumask: convert mm/
...
Diffstat (limited to 'kernel/cpu.c')
-rw-r--r-- | kernel/cpu.c | 144 |
1 files changed, 99 insertions, 45 deletions
diff --git a/kernel/cpu.c b/kernel/cpu.c index bae131a1211b..47fff3b63cbf 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c | |||
@@ -15,30 +15,8 @@ | |||
15 | #include <linux/stop_machine.h> | 15 | #include <linux/stop_machine.h> |
16 | #include <linux/mutex.h> | 16 | #include <linux/mutex.h> |
17 | 17 | ||
18 | /* | ||
19 | * Represents all cpu's present in the system | ||
20 | * In systems capable of hotplug, this map could dynamically grow | ||
21 | * as new cpu's are detected in the system via any platform specific | ||
22 | * method, such as ACPI for e.g. | ||
23 | */ | ||
24 | cpumask_t cpu_present_map __read_mostly; | ||
25 | EXPORT_SYMBOL(cpu_present_map); | ||
26 | |||
27 | /* | ||
28 | * Represents all cpu's that are currently online. | ||
29 | */ | ||
30 | cpumask_t cpu_online_map __read_mostly; | ||
31 | EXPORT_SYMBOL(cpu_online_map); | ||
32 | |||
33 | #ifdef CONFIG_INIT_ALL_POSSIBLE | ||
34 | cpumask_t cpu_possible_map __read_mostly = CPU_MASK_ALL; | ||
35 | #else | ||
36 | cpumask_t cpu_possible_map __read_mostly; | ||
37 | #endif | ||
38 | EXPORT_SYMBOL(cpu_possible_map); | ||
39 | |||
40 | #ifdef CONFIG_SMP | 18 | #ifdef CONFIG_SMP |
41 | /* Serializes the updates to cpu_online_map, cpu_present_map */ | 19 | /* Serializes the updates to cpu_online_mask, cpu_present_mask */ |
42 | static DEFINE_MUTEX(cpu_add_remove_lock); | 20 | static DEFINE_MUTEX(cpu_add_remove_lock); |
43 | 21 | ||
44 | static __cpuinitdata RAW_NOTIFIER_HEAD(cpu_chain); | 22 | static __cpuinitdata RAW_NOTIFIER_HEAD(cpu_chain); |
@@ -65,8 +43,6 @@ void __init cpu_hotplug_init(void) | |||
65 | cpu_hotplug.refcount = 0; | 43 | cpu_hotplug.refcount = 0; |
66 | } | 44 | } |
67 | 45 | ||
68 | cpumask_t cpu_active_map; | ||
69 | |||
70 | #ifdef CONFIG_HOTPLUG_CPU | 46 | #ifdef CONFIG_HOTPLUG_CPU |
71 | 47 | ||
72 | void get_online_cpus(void) | 48 | void get_online_cpus(void) |
@@ -97,7 +73,7 @@ EXPORT_SYMBOL_GPL(put_online_cpus); | |||
97 | 73 | ||
98 | /* | 74 | /* |
99 | * The following two API's must be used when attempting | 75 | * The following two API's must be used when attempting |
100 | * to serialize the updates to cpu_online_map, cpu_present_map. | 76 | * to serialize the updates to cpu_online_mask, cpu_present_mask. |
101 | */ | 77 | */ |
102 | void cpu_maps_update_begin(void) | 78 | void cpu_maps_update_begin(void) |
103 | { | 79 | { |
@@ -218,7 +194,7 @@ static int __ref take_cpu_down(void *_param) | |||
218 | static int __ref _cpu_down(unsigned int cpu, int tasks_frozen) | 194 | static int __ref _cpu_down(unsigned int cpu, int tasks_frozen) |
219 | { | 195 | { |
220 | int err, nr_calls = 0; | 196 | int err, nr_calls = 0; |
221 | cpumask_t old_allowed, tmp; | 197 | cpumask_var_t old_allowed; |
222 | void *hcpu = (void *)(long)cpu; | 198 | void *hcpu = (void *)(long)cpu; |
223 | unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0; | 199 | unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0; |
224 | struct take_cpu_down_param tcd_param = { | 200 | struct take_cpu_down_param tcd_param = { |
@@ -232,6 +208,9 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen) | |||
232 | if (!cpu_online(cpu)) | 208 | if (!cpu_online(cpu)) |
233 | return -EINVAL; | 209 | return -EINVAL; |
234 | 210 | ||
211 | if (!alloc_cpumask_var(&old_allowed, GFP_KERNEL)) | ||
212 | return -ENOMEM; | ||
213 | |||
235 | cpu_hotplug_begin(); | 214 | cpu_hotplug_begin(); |
236 | err = __raw_notifier_call_chain(&cpu_chain, CPU_DOWN_PREPARE | mod, | 215 | err = __raw_notifier_call_chain(&cpu_chain, CPU_DOWN_PREPARE | mod, |
237 | hcpu, -1, &nr_calls); | 216 | hcpu, -1, &nr_calls); |
@@ -246,13 +225,11 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen) | |||
246 | } | 225 | } |
247 | 226 | ||
248 | /* Ensure that we are not runnable on dying cpu */ | 227 | /* Ensure that we are not runnable on dying cpu */ |
249 | old_allowed = current->cpus_allowed; | 228 | cpumask_copy(old_allowed, ¤t->cpus_allowed); |
250 | cpus_setall(tmp); | 229 | set_cpus_allowed_ptr(current, |
251 | cpu_clear(cpu, tmp); | 230 | cpumask_of(cpumask_any_but(cpu_online_mask, cpu))); |
252 | set_cpus_allowed_ptr(current, &tmp); | ||
253 | tmp = cpumask_of_cpu(cpu); | ||
254 | 231 | ||
255 | err = __stop_machine(take_cpu_down, &tcd_param, &tmp); | 232 | err = __stop_machine(take_cpu_down, &tcd_param, cpumask_of(cpu)); |
256 | if (err) { | 233 | if (err) { |
257 | /* CPU didn't die: tell everyone. Can't complain. */ | 234 | /* CPU didn't die: tell everyone. Can't complain. */ |
258 | if (raw_notifier_call_chain(&cpu_chain, CPU_DOWN_FAILED | mod, | 235 | if (raw_notifier_call_chain(&cpu_chain, CPU_DOWN_FAILED | mod, |
@@ -278,7 +255,7 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen) | |||
278 | check_for_tasks(cpu); | 255 | check_for_tasks(cpu); |
279 | 256 | ||
280 | out_allowed: | 257 | out_allowed: |
281 | set_cpus_allowed_ptr(current, &old_allowed); | 258 | set_cpus_allowed_ptr(current, old_allowed); |
282 | out_release: | 259 | out_release: |
283 | cpu_hotplug_done(); | 260 | cpu_hotplug_done(); |
284 | if (!err) { | 261 | if (!err) { |
@@ -286,6 +263,7 @@ out_release: | |||
286 | hcpu) == NOTIFY_BAD) | 263 | hcpu) == NOTIFY_BAD) |
287 | BUG(); | 264 | BUG(); |
288 | } | 265 | } |
266 | free_cpumask_var(old_allowed); | ||
289 | return err; | 267 | return err; |
290 | } | 268 | } |
291 | 269 | ||
@@ -304,7 +282,7 @@ int __ref cpu_down(unsigned int cpu) | |||
304 | 282 | ||
305 | /* | 283 | /* |
306 | * Make sure the all cpus did the reschedule and are not | 284 | * Make sure the all cpus did the reschedule and are not |
307 | * using stale version of the cpu_active_map. | 285 | * using stale version of the cpu_active_mask. |
308 | * This is not strictly necessary becuase stop_machine() | 286 | * This is not strictly necessary becuase stop_machine() |
309 | * that we run down the line already provides the required | 287 | * that we run down the line already provides the required |
310 | * synchronization. But it's really a side effect and we do not | 288 | * synchronization. But it's really a side effect and we do not |
@@ -368,7 +346,7 @@ out_notify: | |||
368 | int __cpuinit cpu_up(unsigned int cpu) | 346 | int __cpuinit cpu_up(unsigned int cpu) |
369 | { | 347 | { |
370 | int err = 0; | 348 | int err = 0; |
371 | if (!cpu_isset(cpu, cpu_possible_map)) { | 349 | if (!cpu_possible(cpu)) { |
372 | printk(KERN_ERR "can't online cpu %d because it is not " | 350 | printk(KERN_ERR "can't online cpu %d because it is not " |
373 | "configured as may-hotadd at boot time\n", cpu); | 351 | "configured as may-hotadd at boot time\n", cpu); |
374 | #if defined(CONFIG_IA64) || defined(CONFIG_X86_64) | 352 | #if defined(CONFIG_IA64) || defined(CONFIG_X86_64) |
@@ -393,25 +371,25 @@ out: | |||
393 | } | 371 | } |
394 | 372 | ||
395 | #ifdef CONFIG_PM_SLEEP_SMP | 373 | #ifdef CONFIG_PM_SLEEP_SMP |
396 | static cpumask_t frozen_cpus; | 374 | static cpumask_var_t frozen_cpus; |
397 | 375 | ||
398 | int disable_nonboot_cpus(void) | 376 | int disable_nonboot_cpus(void) |
399 | { | 377 | { |
400 | int cpu, first_cpu, error = 0; | 378 | int cpu, first_cpu, error = 0; |
401 | 379 | ||
402 | cpu_maps_update_begin(); | 380 | cpu_maps_update_begin(); |
403 | first_cpu = first_cpu(cpu_online_map); | 381 | first_cpu = cpumask_first(cpu_online_mask); |
404 | /* We take down all of the non-boot CPUs in one shot to avoid races | 382 | /* We take down all of the non-boot CPUs in one shot to avoid races |
405 | * with the userspace trying to use the CPU hotplug at the same time | 383 | * with the userspace trying to use the CPU hotplug at the same time |
406 | */ | 384 | */ |
407 | cpus_clear(frozen_cpus); | 385 | cpumask_clear(frozen_cpus); |
408 | printk("Disabling non-boot CPUs ...\n"); | 386 | printk("Disabling non-boot CPUs ...\n"); |
409 | for_each_online_cpu(cpu) { | 387 | for_each_online_cpu(cpu) { |
410 | if (cpu == first_cpu) | 388 | if (cpu == first_cpu) |
411 | continue; | 389 | continue; |
412 | error = _cpu_down(cpu, 1); | 390 | error = _cpu_down(cpu, 1); |
413 | if (!error) { | 391 | if (!error) { |
414 | cpu_set(cpu, frozen_cpus); | 392 | cpumask_set_cpu(cpu, frozen_cpus); |
415 | printk("CPU%d is down\n", cpu); | 393 | printk("CPU%d is down\n", cpu); |
416 | } else { | 394 | } else { |
417 | printk(KERN_ERR "Error taking CPU%d down: %d\n", | 395 | printk(KERN_ERR "Error taking CPU%d down: %d\n", |
@@ -437,11 +415,11 @@ void __ref enable_nonboot_cpus(void) | |||
437 | /* Allow everyone to use the CPU hotplug again */ | 415 | /* Allow everyone to use the CPU hotplug again */ |
438 | cpu_maps_update_begin(); | 416 | cpu_maps_update_begin(); |
439 | cpu_hotplug_disabled = 0; | 417 | cpu_hotplug_disabled = 0; |
440 | if (cpus_empty(frozen_cpus)) | 418 | if (cpumask_empty(frozen_cpus)) |
441 | goto out; | 419 | goto out; |
442 | 420 | ||
443 | printk("Enabling non-boot CPUs ...\n"); | 421 | printk("Enabling non-boot CPUs ...\n"); |
444 | for_each_cpu_mask_nr(cpu, frozen_cpus) { | 422 | for_each_cpu(cpu, frozen_cpus) { |
445 | error = _cpu_up(cpu, 1); | 423 | error = _cpu_up(cpu, 1); |
446 | if (!error) { | 424 | if (!error) { |
447 | printk("CPU%d is up\n", cpu); | 425 | printk("CPU%d is up\n", cpu); |
@@ -449,10 +427,18 @@ void __ref enable_nonboot_cpus(void) | |||
449 | } | 427 | } |
450 | printk(KERN_WARNING "Error taking CPU%d up: %d\n", cpu, error); | 428 | printk(KERN_WARNING "Error taking CPU%d up: %d\n", cpu, error); |
451 | } | 429 | } |
452 | cpus_clear(frozen_cpus); | 430 | cpumask_clear(frozen_cpus); |
453 | out: | 431 | out: |
454 | cpu_maps_update_done(); | 432 | cpu_maps_update_done(); |
455 | } | 433 | } |
434 | |||
435 | static int alloc_frozen_cpus(void) | ||
436 | { | ||
437 | if (!alloc_cpumask_var(&frozen_cpus, GFP_KERNEL|__GFP_ZERO)) | ||
438 | return -ENOMEM; | ||
439 | return 0; | ||
440 | } | ||
441 | core_initcall(alloc_frozen_cpus); | ||
456 | #endif /* CONFIG_PM_SLEEP_SMP */ | 442 | #endif /* CONFIG_PM_SLEEP_SMP */ |
457 | 443 | ||
458 | /** | 444 | /** |
@@ -468,7 +454,7 @@ void __cpuinit notify_cpu_starting(unsigned int cpu) | |||
468 | unsigned long val = CPU_STARTING; | 454 | unsigned long val = CPU_STARTING; |
469 | 455 | ||
470 | #ifdef CONFIG_PM_SLEEP_SMP | 456 | #ifdef CONFIG_PM_SLEEP_SMP |
471 | if (cpu_isset(cpu, frozen_cpus)) | 457 | if (frozen_cpus != NULL && cpumask_test_cpu(cpu, frozen_cpus)) |
472 | val = CPU_STARTING_FROZEN; | 458 | val = CPU_STARTING_FROZEN; |
473 | #endif /* CONFIG_PM_SLEEP_SMP */ | 459 | #endif /* CONFIG_PM_SLEEP_SMP */ |
474 | raw_notifier_call_chain(&cpu_chain, val, (void *)(long)cpu); | 460 | raw_notifier_call_chain(&cpu_chain, val, (void *)(long)cpu); |
@@ -480,7 +466,7 @@ void __cpuinit notify_cpu_starting(unsigned int cpu) | |||
480 | * cpu_bit_bitmap[] is a special, "compressed" data structure that | 466 | * cpu_bit_bitmap[] is a special, "compressed" data structure that |
481 | * represents all NR_CPUS bits binary values of 1<<nr. | 467 | * represents all NR_CPUS bits binary values of 1<<nr. |
482 | * | 468 | * |
483 | * It is used by cpumask_of_cpu() to get a constant address to a CPU | 469 | * It is used by cpumask_of() to get a constant address to a CPU |
484 | * mask value that has a single bit set only. | 470 | * mask value that has a single bit set only. |
485 | */ | 471 | */ |
486 | 472 | ||
@@ -503,3 +489,71 @@ EXPORT_SYMBOL_GPL(cpu_bit_bitmap); | |||
503 | 489 | ||
504 | const DECLARE_BITMAP(cpu_all_bits, NR_CPUS) = CPU_BITS_ALL; | 490 | const DECLARE_BITMAP(cpu_all_bits, NR_CPUS) = CPU_BITS_ALL; |
505 | EXPORT_SYMBOL(cpu_all_bits); | 491 | EXPORT_SYMBOL(cpu_all_bits); |
492 | |||
493 | #ifdef CONFIG_INIT_ALL_POSSIBLE | ||
494 | static DECLARE_BITMAP(cpu_possible_bits, CONFIG_NR_CPUS) __read_mostly | ||
495 | = CPU_BITS_ALL; | ||
496 | #else | ||
497 | static DECLARE_BITMAP(cpu_possible_bits, CONFIG_NR_CPUS) __read_mostly; | ||
498 | #endif | ||
499 | const struct cpumask *const cpu_possible_mask = to_cpumask(cpu_possible_bits); | ||
500 | EXPORT_SYMBOL(cpu_possible_mask); | ||
501 | |||
502 | static DECLARE_BITMAP(cpu_online_bits, CONFIG_NR_CPUS) __read_mostly; | ||
503 | const struct cpumask *const cpu_online_mask = to_cpumask(cpu_online_bits); | ||
504 | EXPORT_SYMBOL(cpu_online_mask); | ||
505 | |||
506 | static DECLARE_BITMAP(cpu_present_bits, CONFIG_NR_CPUS) __read_mostly; | ||
507 | const struct cpumask *const cpu_present_mask = to_cpumask(cpu_present_bits); | ||
508 | EXPORT_SYMBOL(cpu_present_mask); | ||
509 | |||
510 | static DECLARE_BITMAP(cpu_active_bits, CONFIG_NR_CPUS) __read_mostly; | ||
511 | const struct cpumask *const cpu_active_mask = to_cpumask(cpu_active_bits); | ||
512 | EXPORT_SYMBOL(cpu_active_mask); | ||
513 | |||
514 | void set_cpu_possible(unsigned int cpu, bool possible) | ||
515 | { | ||
516 | if (possible) | ||
517 | cpumask_set_cpu(cpu, to_cpumask(cpu_possible_bits)); | ||
518 | else | ||
519 | cpumask_clear_cpu(cpu, to_cpumask(cpu_possible_bits)); | ||
520 | } | ||
521 | |||
522 | void set_cpu_present(unsigned int cpu, bool present) | ||
523 | { | ||
524 | if (present) | ||
525 | cpumask_set_cpu(cpu, to_cpumask(cpu_present_bits)); | ||
526 | else | ||
527 | cpumask_clear_cpu(cpu, to_cpumask(cpu_present_bits)); | ||
528 | } | ||
529 | |||
530 | void set_cpu_online(unsigned int cpu, bool online) | ||
531 | { | ||
532 | if (online) | ||
533 | cpumask_set_cpu(cpu, to_cpumask(cpu_online_bits)); | ||
534 | else | ||
535 | cpumask_clear_cpu(cpu, to_cpumask(cpu_online_bits)); | ||
536 | } | ||
537 | |||
538 | void set_cpu_active(unsigned int cpu, bool active) | ||
539 | { | ||
540 | if (active) | ||
541 | cpumask_set_cpu(cpu, to_cpumask(cpu_active_bits)); | ||
542 | else | ||
543 | cpumask_clear_cpu(cpu, to_cpumask(cpu_active_bits)); | ||
544 | } | ||
545 | |||
546 | void init_cpu_present(const struct cpumask *src) | ||
547 | { | ||
548 | cpumask_copy(to_cpumask(cpu_present_bits), src); | ||
549 | } | ||
550 | |||
551 | void init_cpu_possible(const struct cpumask *src) | ||
552 | { | ||
553 | cpumask_copy(to_cpumask(cpu_possible_bits), src); | ||
554 | } | ||
555 | |||
556 | void init_cpu_online(const struct cpumask *src) | ||
557 | { | ||
558 | cpumask_copy(to_cpumask(cpu_online_bits), src); | ||
559 | } | ||