diff options
Diffstat (limited to 'kernel/cpu.c')
| -rw-r--r-- | kernel/cpu.c | 153 |
1 files changed, 106 insertions, 47 deletions
diff --git a/kernel/cpu.c b/kernel/cpu.c index 5a732c5ef08b..30e74dd6d01b 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c | |||
| @@ -15,29 +15,8 @@ | |||
| 15 | #include <linux/stop_machine.h> | 15 | #include <linux/stop_machine.h> |
| 16 | #include <linux/mutex.h> | 16 | #include <linux/mutex.h> |
| 17 | 17 | ||
| 18 | /* | 18 | #ifdef CONFIG_SMP |
| 19 | * Represents all cpu's present in the system | 19 | /* Serializes the updates to cpu_online_mask, cpu_present_mask */ |
| 20 | * In systems capable of hotplug, this map could dynamically grow | ||
| 21 | * as new cpu's are detected in the system via any platform specific | ||
| 22 | * method, such as ACPI for e.g. | ||
| 23 | */ | ||
| 24 | cpumask_t cpu_present_map __read_mostly; | ||
| 25 | EXPORT_SYMBOL(cpu_present_map); | ||
| 26 | |||
| 27 | #ifndef CONFIG_SMP | ||
| 28 | |||
| 29 | /* | ||
| 30 | * Represents all cpu's that are currently online. | ||
| 31 | */ | ||
| 32 | cpumask_t cpu_online_map __read_mostly = CPU_MASK_ALL; | ||
| 33 | EXPORT_SYMBOL(cpu_online_map); | ||
| 34 | |||
| 35 | cpumask_t cpu_possible_map __read_mostly = CPU_MASK_ALL; | ||
| 36 | EXPORT_SYMBOL(cpu_possible_map); | ||
| 37 | |||
| 38 | #else /* CONFIG_SMP */ | ||
| 39 | |||
| 40 | /* Serializes the updates to cpu_online_map, cpu_present_map */ | ||
| 41 | static DEFINE_MUTEX(cpu_add_remove_lock); | 20 | static DEFINE_MUTEX(cpu_add_remove_lock); |
| 42 | 21 | ||
| 43 | static __cpuinitdata RAW_NOTIFIER_HEAD(cpu_chain); | 22 | static __cpuinitdata RAW_NOTIFIER_HEAD(cpu_chain); |
| @@ -64,8 +43,6 @@ void __init cpu_hotplug_init(void) | |||
| 64 | cpu_hotplug.refcount = 0; | 43 | cpu_hotplug.refcount = 0; |
| 65 | } | 44 | } |
| 66 | 45 | ||
| 67 | cpumask_t cpu_active_map; | ||
| 68 | |||
| 69 | #ifdef CONFIG_HOTPLUG_CPU | 46 | #ifdef CONFIG_HOTPLUG_CPU |
| 70 | 47 | ||
| 71 | void get_online_cpus(void) | 48 | void get_online_cpus(void) |
| @@ -96,7 +73,7 @@ EXPORT_SYMBOL_GPL(put_online_cpus); | |||
| 96 | 73 | ||
| 97 | /* | 74 | /* |
| 98 | * The following two API's must be used when attempting | 75 | * The following two API's must be used when attempting |
| 99 | * to serialize the updates to cpu_online_map, cpu_present_map. | 76 | * to serialize the updates to cpu_online_mask, cpu_present_mask. |
| 100 | */ | 77 | */ |
| 101 | void cpu_maps_update_begin(void) | 78 | void cpu_maps_update_begin(void) |
| 102 | { | 79 | { |
| @@ -217,7 +194,7 @@ static int __ref take_cpu_down(void *_param) | |||
| 217 | static int __ref _cpu_down(unsigned int cpu, int tasks_frozen) | 194 | static int __ref _cpu_down(unsigned int cpu, int tasks_frozen) |
| 218 | { | 195 | { |
| 219 | int err, nr_calls = 0; | 196 | int err, nr_calls = 0; |
| 220 | cpumask_t old_allowed, tmp; | 197 | cpumask_var_t old_allowed; |
| 221 | void *hcpu = (void *)(long)cpu; | 198 | void *hcpu = (void *)(long)cpu; |
| 222 | unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0; | 199 | unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0; |
| 223 | struct take_cpu_down_param tcd_param = { | 200 | struct take_cpu_down_param tcd_param = { |
| @@ -231,6 +208,9 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen) | |||
| 231 | if (!cpu_online(cpu)) | 208 | if (!cpu_online(cpu)) |
| 232 | return -EINVAL; | 209 | return -EINVAL; |
| 233 | 210 | ||
| 211 | if (!alloc_cpumask_var(&old_allowed, GFP_KERNEL)) | ||
| 212 | return -ENOMEM; | ||
| 213 | |||
| 234 | cpu_hotplug_begin(); | 214 | cpu_hotplug_begin(); |
| 235 | err = __raw_notifier_call_chain(&cpu_chain, CPU_DOWN_PREPARE | mod, | 215 | err = __raw_notifier_call_chain(&cpu_chain, CPU_DOWN_PREPARE | mod, |
| 236 | hcpu, -1, &nr_calls); | 216 | hcpu, -1, &nr_calls); |
| @@ -245,13 +225,11 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen) | |||
| 245 | } | 225 | } |
| 246 | 226 | ||
| 247 | /* Ensure that we are not runnable on dying cpu */ | 227 | /* Ensure that we are not runnable on dying cpu */ |
| 248 | old_allowed = current->cpus_allowed; | 228 | cpumask_copy(old_allowed, ¤t->cpus_allowed); |
| 249 | cpus_setall(tmp); | 229 | set_cpus_allowed_ptr(current, |
| 250 | cpu_clear(cpu, tmp); | 230 | cpumask_of(cpumask_any_but(cpu_online_mask, cpu))); |
| 251 | set_cpus_allowed_ptr(current, &tmp); | ||
| 252 | tmp = cpumask_of_cpu(cpu); | ||
| 253 | 231 | ||
| 254 | err = __stop_machine(take_cpu_down, &tcd_param, &tmp); | 232 | err = __stop_machine(take_cpu_down, &tcd_param, cpumask_of(cpu)); |
| 255 | if (err) { | 233 | if (err) { |
| 256 | /* CPU didn't die: tell everyone. Can't complain. */ | 234 | /* CPU didn't die: tell everyone. Can't complain. */ |
| 257 | if (raw_notifier_call_chain(&cpu_chain, CPU_DOWN_FAILED | mod, | 235 | if (raw_notifier_call_chain(&cpu_chain, CPU_DOWN_FAILED | mod, |
| @@ -277,7 +255,7 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen) | |||
| 277 | check_for_tasks(cpu); | 255 | check_for_tasks(cpu); |
| 278 | 256 | ||
| 279 | out_allowed: | 257 | out_allowed: |
| 280 | set_cpus_allowed_ptr(current, &old_allowed); | 258 | set_cpus_allowed_ptr(current, old_allowed); |
| 281 | out_release: | 259 | out_release: |
| 282 | cpu_hotplug_done(); | 260 | cpu_hotplug_done(); |
| 283 | if (!err) { | 261 | if (!err) { |
| @@ -285,13 +263,17 @@ out_release: | |||
| 285 | hcpu) == NOTIFY_BAD) | 263 | hcpu) == NOTIFY_BAD) |
| 286 | BUG(); | 264 | BUG(); |
| 287 | } | 265 | } |
| 266 | free_cpumask_var(old_allowed); | ||
| 288 | return err; | 267 | return err; |
| 289 | } | 268 | } |
| 290 | 269 | ||
| 291 | int __ref cpu_down(unsigned int cpu) | 270 | int __ref cpu_down(unsigned int cpu) |
| 292 | { | 271 | { |
| 293 | int err = 0; | 272 | int err; |
| 294 | 273 | ||
| 274 | err = stop_machine_create(); | ||
| 275 | if (err) | ||
| 276 | return err; | ||
| 295 | cpu_maps_update_begin(); | 277 | cpu_maps_update_begin(); |
| 296 | 278 | ||
| 297 | if (cpu_hotplug_disabled) { | 279 | if (cpu_hotplug_disabled) { |
| @@ -303,7 +285,7 @@ int __ref cpu_down(unsigned int cpu) | |||
| 303 | 285 | ||
| 304 | /* | 286 | /* |
| 305 | * Make sure the all cpus did the reschedule and are not | 287 | * Make sure the all cpus did the reschedule and are not |
| 306 | * using stale version of the cpu_active_map. | 288 | * using stale version of the cpu_active_mask. |
| 307 | * This is not strictly necessary becuase stop_machine() | 289 | * This is not strictly necessary becuase stop_machine() |
| 308 | * that we run down the line already provides the required | 290 | * that we run down the line already provides the required |
| 309 | * synchronization. But it's really a side effect and we do not | 291 | * synchronization. But it's really a side effect and we do not |
| @@ -318,6 +300,7 @@ int __ref cpu_down(unsigned int cpu) | |||
| 318 | 300 | ||
| 319 | out: | 301 | out: |
| 320 | cpu_maps_update_done(); | 302 | cpu_maps_update_done(); |
| 303 | stop_machine_destroy(); | ||
| 321 | return err; | 304 | return err; |
| 322 | } | 305 | } |
| 323 | EXPORT_SYMBOL(cpu_down); | 306 | EXPORT_SYMBOL(cpu_down); |
| @@ -367,7 +350,7 @@ out_notify: | |||
| 367 | int __cpuinit cpu_up(unsigned int cpu) | 350 | int __cpuinit cpu_up(unsigned int cpu) |
| 368 | { | 351 | { |
| 369 | int err = 0; | 352 | int err = 0; |
| 370 | if (!cpu_isset(cpu, cpu_possible_map)) { | 353 | if (!cpu_possible(cpu)) { |
| 371 | printk(KERN_ERR "can't online cpu %d because it is not " | 354 | printk(KERN_ERR "can't online cpu %d because it is not " |
| 372 | "configured as may-hotadd at boot time\n", cpu); | 355 | "configured as may-hotadd at boot time\n", cpu); |
| 373 | #if defined(CONFIG_IA64) || defined(CONFIG_X86_64) | 356 | #if defined(CONFIG_IA64) || defined(CONFIG_X86_64) |
| @@ -392,25 +375,25 @@ out: | |||
| 392 | } | 375 | } |
| 393 | 376 | ||
| 394 | #ifdef CONFIG_PM_SLEEP_SMP | 377 | #ifdef CONFIG_PM_SLEEP_SMP |
| 395 | static cpumask_t frozen_cpus; | 378 | static cpumask_var_t frozen_cpus; |
| 396 | 379 | ||
| 397 | int disable_nonboot_cpus(void) | 380 | int disable_nonboot_cpus(void) |
| 398 | { | 381 | { |
| 399 | int cpu, first_cpu, error = 0; | 382 | int cpu, first_cpu, error = 0; |
| 400 | 383 | ||
| 401 | cpu_maps_update_begin(); | 384 | cpu_maps_update_begin(); |
| 402 | first_cpu = first_cpu(cpu_online_map); | 385 | first_cpu = cpumask_first(cpu_online_mask); |
| 403 | /* We take down all of the non-boot CPUs in one shot to avoid races | 386 | /* We take down all of the non-boot CPUs in one shot to avoid races |
| 404 | * with the userspace trying to use the CPU hotplug at the same time | 387 | * with the userspace trying to use the CPU hotplug at the same time |
| 405 | */ | 388 | */ |
| 406 | cpus_clear(frozen_cpus); | 389 | cpumask_clear(frozen_cpus); |
| 407 | printk("Disabling non-boot CPUs ...\n"); | 390 | printk("Disabling non-boot CPUs ...\n"); |
| 408 | for_each_online_cpu(cpu) { | 391 | for_each_online_cpu(cpu) { |
| 409 | if (cpu == first_cpu) | 392 | if (cpu == first_cpu) |
| 410 | continue; | 393 | continue; |
| 411 | error = _cpu_down(cpu, 1); | 394 | error = _cpu_down(cpu, 1); |
| 412 | if (!error) { | 395 | if (!error) { |
| 413 | cpu_set(cpu, frozen_cpus); | 396 | cpumask_set_cpu(cpu, frozen_cpus); |
| 414 | printk("CPU%d is down\n", cpu); | 397 | printk("CPU%d is down\n", cpu); |
| 415 | } else { | 398 | } else { |
| 416 | printk(KERN_ERR "Error taking CPU%d down: %d\n", | 399 | printk(KERN_ERR "Error taking CPU%d down: %d\n", |
| @@ -436,11 +419,11 @@ void __ref enable_nonboot_cpus(void) | |||
| 436 | /* Allow everyone to use the CPU hotplug again */ | 419 | /* Allow everyone to use the CPU hotplug again */ |
| 437 | cpu_maps_update_begin(); | 420 | cpu_maps_update_begin(); |
| 438 | cpu_hotplug_disabled = 0; | 421 | cpu_hotplug_disabled = 0; |
| 439 | if (cpus_empty(frozen_cpus)) | 422 | if (cpumask_empty(frozen_cpus)) |
| 440 | goto out; | 423 | goto out; |
| 441 | 424 | ||
| 442 | printk("Enabling non-boot CPUs ...\n"); | 425 | printk("Enabling non-boot CPUs ...\n"); |
| 443 | for_each_cpu_mask_nr(cpu, frozen_cpus) { | 426 | for_each_cpu(cpu, frozen_cpus) { |
| 444 | error = _cpu_up(cpu, 1); | 427 | error = _cpu_up(cpu, 1); |
| 445 | if (!error) { | 428 | if (!error) { |
| 446 | printk("CPU%d is up\n", cpu); | 429 | printk("CPU%d is up\n", cpu); |
| @@ -448,10 +431,18 @@ void __ref enable_nonboot_cpus(void) | |||
| 448 | } | 431 | } |
| 449 | printk(KERN_WARNING "Error taking CPU%d up: %d\n", cpu, error); | 432 | printk(KERN_WARNING "Error taking CPU%d up: %d\n", cpu, error); |
| 450 | } | 433 | } |
| 451 | cpus_clear(frozen_cpus); | 434 | cpumask_clear(frozen_cpus); |
| 452 | out: | 435 | out: |
| 453 | cpu_maps_update_done(); | 436 | cpu_maps_update_done(); |
| 454 | } | 437 | } |
| 438 | |||
| 439 | static int alloc_frozen_cpus(void) | ||
| 440 | { | ||
| 441 | if (!alloc_cpumask_var(&frozen_cpus, GFP_KERNEL|__GFP_ZERO)) | ||
| 442 | return -ENOMEM; | ||
| 443 | return 0; | ||
| 444 | } | ||
| 445 | core_initcall(alloc_frozen_cpus); | ||
| 455 | #endif /* CONFIG_PM_SLEEP_SMP */ | 446 | #endif /* CONFIG_PM_SLEEP_SMP */ |
| 456 | 447 | ||
| 457 | /** | 448 | /** |
| @@ -462,12 +453,12 @@ out: | |||
| 462 | * It must be called by the arch code on the new cpu, before the new cpu | 453 | * It must be called by the arch code on the new cpu, before the new cpu |
| 463 | * enables interrupts and before the "boot" cpu returns from __cpu_up(). | 454 | * enables interrupts and before the "boot" cpu returns from __cpu_up(). |
| 464 | */ | 455 | */ |
| 465 | void notify_cpu_starting(unsigned int cpu) | 456 | void __cpuinit notify_cpu_starting(unsigned int cpu) |
| 466 | { | 457 | { |
| 467 | unsigned long val = CPU_STARTING; | 458 | unsigned long val = CPU_STARTING; |
| 468 | 459 | ||
| 469 | #ifdef CONFIG_PM_SLEEP_SMP | 460 | #ifdef CONFIG_PM_SLEEP_SMP |
| 470 | if (cpu_isset(cpu, frozen_cpus)) | 461 | if (frozen_cpus != NULL && cpumask_test_cpu(cpu, frozen_cpus)) |
| 471 | val = CPU_STARTING_FROZEN; | 462 | val = CPU_STARTING_FROZEN; |
| 472 | #endif /* CONFIG_PM_SLEEP_SMP */ | 463 | #endif /* CONFIG_PM_SLEEP_SMP */ |
| 473 | raw_notifier_call_chain(&cpu_chain, val, (void *)(long)cpu); | 464 | raw_notifier_call_chain(&cpu_chain, val, (void *)(long)cpu); |
| @@ -479,7 +470,7 @@ void notify_cpu_starting(unsigned int cpu) | |||
| 479 | * cpu_bit_bitmap[] is a special, "compressed" data structure that | 470 | * cpu_bit_bitmap[] is a special, "compressed" data structure that |
| 480 | * represents all NR_CPUS bits binary values of 1<<nr. | 471 | * represents all NR_CPUS bits binary values of 1<<nr. |
| 481 | * | 472 | * |
| 482 | * It is used by cpumask_of_cpu() to get a constant address to a CPU | 473 | * It is used by cpumask_of() to get a constant address to a CPU |
| 483 | * mask value that has a single bit set only. | 474 | * mask value that has a single bit set only. |
| 484 | */ | 475 | */ |
| 485 | 476 | ||
| @@ -502,3 +493,71 @@ EXPORT_SYMBOL_GPL(cpu_bit_bitmap); | |||
| 502 | 493 | ||
| 503 | const DECLARE_BITMAP(cpu_all_bits, NR_CPUS) = CPU_BITS_ALL; | 494 | const DECLARE_BITMAP(cpu_all_bits, NR_CPUS) = CPU_BITS_ALL; |
| 504 | EXPORT_SYMBOL(cpu_all_bits); | 495 | EXPORT_SYMBOL(cpu_all_bits); |
| 496 | |||
| 497 | #ifdef CONFIG_INIT_ALL_POSSIBLE | ||
| 498 | static DECLARE_BITMAP(cpu_possible_bits, CONFIG_NR_CPUS) __read_mostly | ||
| 499 | = CPU_BITS_ALL; | ||
| 500 | #else | ||
| 501 | static DECLARE_BITMAP(cpu_possible_bits, CONFIG_NR_CPUS) __read_mostly; | ||
| 502 | #endif | ||
| 503 | const struct cpumask *const cpu_possible_mask = to_cpumask(cpu_possible_bits); | ||
| 504 | EXPORT_SYMBOL(cpu_possible_mask); | ||
| 505 | |||
| 506 | static DECLARE_BITMAP(cpu_online_bits, CONFIG_NR_CPUS) __read_mostly; | ||
| 507 | const struct cpumask *const cpu_online_mask = to_cpumask(cpu_online_bits); | ||
| 508 | EXPORT_SYMBOL(cpu_online_mask); | ||
| 509 | |||
| 510 | static DECLARE_BITMAP(cpu_present_bits, CONFIG_NR_CPUS) __read_mostly; | ||
| 511 | const struct cpumask *const cpu_present_mask = to_cpumask(cpu_present_bits); | ||
| 512 | EXPORT_SYMBOL(cpu_present_mask); | ||
| 513 | |||
| 514 | static DECLARE_BITMAP(cpu_active_bits, CONFIG_NR_CPUS) __read_mostly; | ||
| 515 | const struct cpumask *const cpu_active_mask = to_cpumask(cpu_active_bits); | ||
| 516 | EXPORT_SYMBOL(cpu_active_mask); | ||
| 517 | |||
| 518 | void set_cpu_possible(unsigned int cpu, bool possible) | ||
| 519 | { | ||
| 520 | if (possible) | ||
| 521 | cpumask_set_cpu(cpu, to_cpumask(cpu_possible_bits)); | ||
| 522 | else | ||
| 523 | cpumask_clear_cpu(cpu, to_cpumask(cpu_possible_bits)); | ||
| 524 | } | ||
| 525 | |||
| 526 | void set_cpu_present(unsigned int cpu, bool present) | ||
| 527 | { | ||
| 528 | if (present) | ||
| 529 | cpumask_set_cpu(cpu, to_cpumask(cpu_present_bits)); | ||
| 530 | else | ||
| 531 | cpumask_clear_cpu(cpu, to_cpumask(cpu_present_bits)); | ||
| 532 | } | ||
| 533 | |||
| 534 | void set_cpu_online(unsigned int cpu, bool online) | ||
| 535 | { | ||
| 536 | if (online) | ||
| 537 | cpumask_set_cpu(cpu, to_cpumask(cpu_online_bits)); | ||
| 538 | else | ||
| 539 | cpumask_clear_cpu(cpu, to_cpumask(cpu_online_bits)); | ||
| 540 | } | ||
| 541 | |||
| 542 | void set_cpu_active(unsigned int cpu, bool active) | ||
| 543 | { | ||
| 544 | if (active) | ||
| 545 | cpumask_set_cpu(cpu, to_cpumask(cpu_active_bits)); | ||
| 546 | else | ||
| 547 | cpumask_clear_cpu(cpu, to_cpumask(cpu_active_bits)); | ||
| 548 | } | ||
| 549 | |||
| 550 | void init_cpu_present(const struct cpumask *src) | ||
| 551 | { | ||
| 552 | cpumask_copy(to_cpumask(cpu_present_bits), src); | ||
| 553 | } | ||
| 554 | |||
| 555 | void init_cpu_possible(const struct cpumask *src) | ||
| 556 | { | ||
| 557 | cpumask_copy(to_cpumask(cpu_possible_bits), src); | ||
| 558 | } | ||
| 559 | |||
| 560 | void init_cpu_online(const struct cpumask *src) | ||
| 561 | { | ||
| 562 | cpumask_copy(to_cpumask(cpu_online_bits), src); | ||
| 563 | } | ||
