aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/cpu.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/cpu.c')
-rw-r--r--kernel/cpu.c150
1 files changed, 104 insertions, 46 deletions
diff --git a/kernel/cpu.c b/kernel/cpu.c
index bae131a1211b..30e74dd6d01b 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -15,30 +15,8 @@
15#include <linux/stop_machine.h> 15#include <linux/stop_machine.h>
16#include <linux/mutex.h> 16#include <linux/mutex.h>
17 17
18/*
19 * Represents all cpu's present in the system
20 * In systems capable of hotplug, this map could dynamically grow
21 * as new cpu's are detected in the system via any platform specific
22 * method, such as ACPI for e.g.
23 */
24cpumask_t cpu_present_map __read_mostly;
25EXPORT_SYMBOL(cpu_present_map);
26
27/*
28 * Represents all cpu's that are currently online.
29 */
30cpumask_t cpu_online_map __read_mostly;
31EXPORT_SYMBOL(cpu_online_map);
32
33#ifdef CONFIG_INIT_ALL_POSSIBLE
34cpumask_t cpu_possible_map __read_mostly = CPU_MASK_ALL;
35#else
36cpumask_t cpu_possible_map __read_mostly;
37#endif
38EXPORT_SYMBOL(cpu_possible_map);
39
40#ifdef CONFIG_SMP 18#ifdef CONFIG_SMP
41/* Serializes the updates to cpu_online_map, cpu_present_map */ 19/* Serializes the updates to cpu_online_mask, cpu_present_mask */
42static DEFINE_MUTEX(cpu_add_remove_lock); 20static DEFINE_MUTEX(cpu_add_remove_lock);
43 21
44static __cpuinitdata RAW_NOTIFIER_HEAD(cpu_chain); 22static __cpuinitdata RAW_NOTIFIER_HEAD(cpu_chain);
@@ -65,8 +43,6 @@ void __init cpu_hotplug_init(void)
65 cpu_hotplug.refcount = 0; 43 cpu_hotplug.refcount = 0;
66} 44}
67 45
68cpumask_t cpu_active_map;
69
70#ifdef CONFIG_HOTPLUG_CPU 46#ifdef CONFIG_HOTPLUG_CPU
71 47
72void get_online_cpus(void) 48void get_online_cpus(void)
@@ -97,7 +73,7 @@ EXPORT_SYMBOL_GPL(put_online_cpus);
97 73
98/* 74/*
99 * The following two API's must be used when attempting 75 * The following two API's must be used when attempting
100 * to serialize the updates to cpu_online_map, cpu_present_map. 76 * to serialize the updates to cpu_online_mask, cpu_present_mask.
101 */ 77 */
102void cpu_maps_update_begin(void) 78void cpu_maps_update_begin(void)
103{ 79{
@@ -218,7 +194,7 @@ static int __ref take_cpu_down(void *_param)
218static int __ref _cpu_down(unsigned int cpu, int tasks_frozen) 194static int __ref _cpu_down(unsigned int cpu, int tasks_frozen)
219{ 195{
220 int err, nr_calls = 0; 196 int err, nr_calls = 0;
221 cpumask_t old_allowed, tmp; 197 cpumask_var_t old_allowed;
222 void *hcpu = (void *)(long)cpu; 198 void *hcpu = (void *)(long)cpu;
223 unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0; 199 unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0;
224 struct take_cpu_down_param tcd_param = { 200 struct take_cpu_down_param tcd_param = {
@@ -232,6 +208,9 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen)
232 if (!cpu_online(cpu)) 208 if (!cpu_online(cpu))
233 return -EINVAL; 209 return -EINVAL;
234 210
211 if (!alloc_cpumask_var(&old_allowed, GFP_KERNEL))
212 return -ENOMEM;
213
235 cpu_hotplug_begin(); 214 cpu_hotplug_begin();
236 err = __raw_notifier_call_chain(&cpu_chain, CPU_DOWN_PREPARE | mod, 215 err = __raw_notifier_call_chain(&cpu_chain, CPU_DOWN_PREPARE | mod,
237 hcpu, -1, &nr_calls); 216 hcpu, -1, &nr_calls);
@@ -246,13 +225,11 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen)
246 } 225 }
247 226
248 /* Ensure that we are not runnable on dying cpu */ 227 /* Ensure that we are not runnable on dying cpu */
249 old_allowed = current->cpus_allowed; 228 cpumask_copy(old_allowed, &current->cpus_allowed);
250 cpus_setall(tmp); 229 set_cpus_allowed_ptr(current,
251 cpu_clear(cpu, tmp); 230 cpumask_of(cpumask_any_but(cpu_online_mask, cpu)));
252 set_cpus_allowed_ptr(current, &tmp);
253 tmp = cpumask_of_cpu(cpu);
254 231
255 err = __stop_machine(take_cpu_down, &tcd_param, &tmp); 232 err = __stop_machine(take_cpu_down, &tcd_param, cpumask_of(cpu));
256 if (err) { 233 if (err) {
257 /* CPU didn't die: tell everyone. Can't complain. */ 234 /* CPU didn't die: tell everyone. Can't complain. */
258 if (raw_notifier_call_chain(&cpu_chain, CPU_DOWN_FAILED | mod, 235 if (raw_notifier_call_chain(&cpu_chain, CPU_DOWN_FAILED | mod,
@@ -278,7 +255,7 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen)
278 check_for_tasks(cpu); 255 check_for_tasks(cpu);
279 256
280out_allowed: 257out_allowed:
281 set_cpus_allowed_ptr(current, &old_allowed); 258 set_cpus_allowed_ptr(current, old_allowed);
282out_release: 259out_release:
283 cpu_hotplug_done(); 260 cpu_hotplug_done();
284 if (!err) { 261 if (!err) {
@@ -286,13 +263,17 @@ out_release:
286 hcpu) == NOTIFY_BAD) 263 hcpu) == NOTIFY_BAD)
287 BUG(); 264 BUG();
288 } 265 }
266 free_cpumask_var(old_allowed);
289 return err; 267 return err;
290} 268}
291 269
292int __ref cpu_down(unsigned int cpu) 270int __ref cpu_down(unsigned int cpu)
293{ 271{
294 int err = 0; 272 int err;
295 273
274 err = stop_machine_create();
275 if (err)
276 return err;
296 cpu_maps_update_begin(); 277 cpu_maps_update_begin();
297 278
298 if (cpu_hotplug_disabled) { 279 if (cpu_hotplug_disabled) {
@@ -304,7 +285,7 @@ int __ref cpu_down(unsigned int cpu)
304 285
305 /* 286 /*
306 * Make sure the all cpus did the reschedule and are not 287 * Make sure the all cpus did the reschedule and are not
307 * using stale version of the cpu_active_map. 288 * using stale version of the cpu_active_mask.
308 * This is not strictly necessary becuase stop_machine() 289 * This is not strictly necessary becuase stop_machine()
309 * that we run down the line already provides the required 290 * that we run down the line already provides the required
310 * synchronization. But it's really a side effect and we do not 291 * synchronization. But it's really a side effect and we do not
@@ -319,6 +300,7 @@ int __ref cpu_down(unsigned int cpu)
319 300
320out: 301out:
321 cpu_maps_update_done(); 302 cpu_maps_update_done();
303 stop_machine_destroy();
322 return err; 304 return err;
323} 305}
324EXPORT_SYMBOL(cpu_down); 306EXPORT_SYMBOL(cpu_down);
@@ -368,7 +350,7 @@ out_notify:
368int __cpuinit cpu_up(unsigned int cpu) 350int __cpuinit cpu_up(unsigned int cpu)
369{ 351{
370 int err = 0; 352 int err = 0;
371 if (!cpu_isset(cpu, cpu_possible_map)) { 353 if (!cpu_possible(cpu)) {
372 printk(KERN_ERR "can't online cpu %d because it is not " 354 printk(KERN_ERR "can't online cpu %d because it is not "
373 "configured as may-hotadd at boot time\n", cpu); 355 "configured as may-hotadd at boot time\n", cpu);
374#if defined(CONFIG_IA64) || defined(CONFIG_X86_64) 356#if defined(CONFIG_IA64) || defined(CONFIG_X86_64)
@@ -393,25 +375,25 @@ out:
393} 375}
394 376
395#ifdef CONFIG_PM_SLEEP_SMP 377#ifdef CONFIG_PM_SLEEP_SMP
396static cpumask_t frozen_cpus; 378static cpumask_var_t frozen_cpus;
397 379
398int disable_nonboot_cpus(void) 380int disable_nonboot_cpus(void)
399{ 381{
400 int cpu, first_cpu, error = 0; 382 int cpu, first_cpu, error = 0;
401 383
402 cpu_maps_update_begin(); 384 cpu_maps_update_begin();
403 first_cpu = first_cpu(cpu_online_map); 385 first_cpu = cpumask_first(cpu_online_mask);
404 /* We take down all of the non-boot CPUs in one shot to avoid races 386 /* We take down all of the non-boot CPUs in one shot to avoid races
405 * with the userspace trying to use the CPU hotplug at the same time 387 * with the userspace trying to use the CPU hotplug at the same time
406 */ 388 */
407 cpus_clear(frozen_cpus); 389 cpumask_clear(frozen_cpus);
408 printk("Disabling non-boot CPUs ...\n"); 390 printk("Disabling non-boot CPUs ...\n");
409 for_each_online_cpu(cpu) { 391 for_each_online_cpu(cpu) {
410 if (cpu == first_cpu) 392 if (cpu == first_cpu)
411 continue; 393 continue;
412 error = _cpu_down(cpu, 1); 394 error = _cpu_down(cpu, 1);
413 if (!error) { 395 if (!error) {
414 cpu_set(cpu, frozen_cpus); 396 cpumask_set_cpu(cpu, frozen_cpus);
415 printk("CPU%d is down\n", cpu); 397 printk("CPU%d is down\n", cpu);
416 } else { 398 } else {
417 printk(KERN_ERR "Error taking CPU%d down: %d\n", 399 printk(KERN_ERR "Error taking CPU%d down: %d\n",
@@ -437,11 +419,11 @@ void __ref enable_nonboot_cpus(void)
437 /* Allow everyone to use the CPU hotplug again */ 419 /* Allow everyone to use the CPU hotplug again */
438 cpu_maps_update_begin(); 420 cpu_maps_update_begin();
439 cpu_hotplug_disabled = 0; 421 cpu_hotplug_disabled = 0;
440 if (cpus_empty(frozen_cpus)) 422 if (cpumask_empty(frozen_cpus))
441 goto out; 423 goto out;
442 424
443 printk("Enabling non-boot CPUs ...\n"); 425 printk("Enabling non-boot CPUs ...\n");
444 for_each_cpu_mask_nr(cpu, frozen_cpus) { 426 for_each_cpu(cpu, frozen_cpus) {
445 error = _cpu_up(cpu, 1); 427 error = _cpu_up(cpu, 1);
446 if (!error) { 428 if (!error) {
447 printk("CPU%d is up\n", cpu); 429 printk("CPU%d is up\n", cpu);
@@ -449,10 +431,18 @@ void __ref enable_nonboot_cpus(void)
449 } 431 }
450 printk(KERN_WARNING "Error taking CPU%d up: %d\n", cpu, error); 432 printk(KERN_WARNING "Error taking CPU%d up: %d\n", cpu, error);
451 } 433 }
452 cpus_clear(frozen_cpus); 434 cpumask_clear(frozen_cpus);
453out: 435out:
454 cpu_maps_update_done(); 436 cpu_maps_update_done();
455} 437}
438
439static int alloc_frozen_cpus(void)
440{
441 if (!alloc_cpumask_var(&frozen_cpus, GFP_KERNEL|__GFP_ZERO))
442 return -ENOMEM;
443 return 0;
444}
445core_initcall(alloc_frozen_cpus);
456#endif /* CONFIG_PM_SLEEP_SMP */ 446#endif /* CONFIG_PM_SLEEP_SMP */
457 447
458/** 448/**
@@ -468,7 +458,7 @@ void __cpuinit notify_cpu_starting(unsigned int cpu)
468 unsigned long val = CPU_STARTING; 458 unsigned long val = CPU_STARTING;
469 459
470#ifdef CONFIG_PM_SLEEP_SMP 460#ifdef CONFIG_PM_SLEEP_SMP
471 if (cpu_isset(cpu, frozen_cpus)) 461 if (frozen_cpus != NULL && cpumask_test_cpu(cpu, frozen_cpus))
472 val = CPU_STARTING_FROZEN; 462 val = CPU_STARTING_FROZEN;
473#endif /* CONFIG_PM_SLEEP_SMP */ 463#endif /* CONFIG_PM_SLEEP_SMP */
474 raw_notifier_call_chain(&cpu_chain, val, (void *)(long)cpu); 464 raw_notifier_call_chain(&cpu_chain, val, (void *)(long)cpu);
@@ -480,7 +470,7 @@ void __cpuinit notify_cpu_starting(unsigned int cpu)
480 * cpu_bit_bitmap[] is a special, "compressed" data structure that 470 * cpu_bit_bitmap[] is a special, "compressed" data structure that
481 * represents all NR_CPUS bits binary values of 1<<nr. 471 * represents all NR_CPUS bits binary values of 1<<nr.
482 * 472 *
483 * It is used by cpumask_of_cpu() to get a constant address to a CPU 473 * It is used by cpumask_of() to get a constant address to a CPU
484 * mask value that has a single bit set only. 474 * mask value that has a single bit set only.
485 */ 475 */
486 476
@@ -503,3 +493,71 @@ EXPORT_SYMBOL_GPL(cpu_bit_bitmap);
503 493
504const DECLARE_BITMAP(cpu_all_bits, NR_CPUS) = CPU_BITS_ALL; 494const DECLARE_BITMAP(cpu_all_bits, NR_CPUS) = CPU_BITS_ALL;
505EXPORT_SYMBOL(cpu_all_bits); 495EXPORT_SYMBOL(cpu_all_bits);
496
497#ifdef CONFIG_INIT_ALL_POSSIBLE
498static DECLARE_BITMAP(cpu_possible_bits, CONFIG_NR_CPUS) __read_mostly
499 = CPU_BITS_ALL;
500#else
501static DECLARE_BITMAP(cpu_possible_bits, CONFIG_NR_CPUS) __read_mostly;
502#endif
503const struct cpumask *const cpu_possible_mask = to_cpumask(cpu_possible_bits);
504EXPORT_SYMBOL(cpu_possible_mask);
505
506static DECLARE_BITMAP(cpu_online_bits, CONFIG_NR_CPUS) __read_mostly;
507const struct cpumask *const cpu_online_mask = to_cpumask(cpu_online_bits);
508EXPORT_SYMBOL(cpu_online_mask);
509
510static DECLARE_BITMAP(cpu_present_bits, CONFIG_NR_CPUS) __read_mostly;
511const struct cpumask *const cpu_present_mask = to_cpumask(cpu_present_bits);
512EXPORT_SYMBOL(cpu_present_mask);
513
514static DECLARE_BITMAP(cpu_active_bits, CONFIG_NR_CPUS) __read_mostly;
515const struct cpumask *const cpu_active_mask = to_cpumask(cpu_active_bits);
516EXPORT_SYMBOL(cpu_active_mask);
517
518void set_cpu_possible(unsigned int cpu, bool possible)
519{
520 if (possible)
521 cpumask_set_cpu(cpu, to_cpumask(cpu_possible_bits));
522 else
523 cpumask_clear_cpu(cpu, to_cpumask(cpu_possible_bits));
524}
525
526void set_cpu_present(unsigned int cpu, bool present)
527{
528 if (present)
529 cpumask_set_cpu(cpu, to_cpumask(cpu_present_bits));
530 else
531 cpumask_clear_cpu(cpu, to_cpumask(cpu_present_bits));
532}
533
534void set_cpu_online(unsigned int cpu, bool online)
535{
536 if (online)
537 cpumask_set_cpu(cpu, to_cpumask(cpu_online_bits));
538 else
539 cpumask_clear_cpu(cpu, to_cpumask(cpu_online_bits));
540}
541
542void set_cpu_active(unsigned int cpu, bool active)
543{
544 if (active)
545 cpumask_set_cpu(cpu, to_cpumask(cpu_active_bits));
546 else
547 cpumask_clear_cpu(cpu, to_cpumask(cpu_active_bits));
548}
549
550void init_cpu_present(const struct cpumask *src)
551{
552 cpumask_copy(to_cpumask(cpu_present_bits), src);
553}
554
555void init_cpu_possible(const struct cpumask *src)
556{
557 cpumask_copy(to_cpumask(cpu_possible_bits), src);
558}
559
560void init_cpu_online(const struct cpumask *src)
561{
562 cpumask_copy(to_cpumask(cpu_online_bits), src);
563}