diff options
author | Rusty Russell <rusty@rustcorp.com.au> | 2008-12-31 18:42:28 -0500 |
---|---|---|
committer | Rusty Russell <rusty@rustcorp.com.au> | 2008-12-31 18:42:28 -0500 |
commit | e0b582ec56f1a1d8b30ebf340a7b91fb09f26c8c (patch) | |
tree | d96b9b657eda13b902a712dfb4f95321133caf1c | |
parent | c309b917cab55799ea489d7b5f1b77025d9f8462 (diff) |
cpumask: convert kernel/cpu.c
Impact: Reduce kernel stack and memory usage, use new cpumask API.
Use cpumask_var_t for take_cpu_down() stack var, and frozen_cpus.
Note that notify_cpu_starting() can be called before core_initcall
allocates frozen_cpus, but the NULL check is optimized out by gcc for
the CONFIG_CPUMASK_OFFSTACK=n case.
Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
-rw-r--r-- | kernel/cpu.c | 48 |
1 files changed, 29 insertions, 19 deletions
diff --git a/kernel/cpu.c b/kernel/cpu.c index 2c9f78f3a2f..47fff3b63cb 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c | |||
@@ -194,7 +194,7 @@ static int __ref take_cpu_down(void *_param) | |||
194 | static int __ref _cpu_down(unsigned int cpu, int tasks_frozen) | 194 | static int __ref _cpu_down(unsigned int cpu, int tasks_frozen) |
195 | { | 195 | { |
196 | int err, nr_calls = 0; | 196 | int err, nr_calls = 0; |
197 | cpumask_t old_allowed, tmp; | 197 | cpumask_var_t old_allowed; |
198 | void *hcpu = (void *)(long)cpu; | 198 | void *hcpu = (void *)(long)cpu; |
199 | unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0; | 199 | unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0; |
200 | struct take_cpu_down_param tcd_param = { | 200 | struct take_cpu_down_param tcd_param = { |
@@ -208,6 +208,9 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen) | |||
208 | if (!cpu_online(cpu)) | 208 | if (!cpu_online(cpu)) |
209 | return -EINVAL; | 209 | return -EINVAL; |
210 | 210 | ||
211 | if (!alloc_cpumask_var(&old_allowed, GFP_KERNEL)) | ||
212 | return -ENOMEM; | ||
213 | |||
211 | cpu_hotplug_begin(); | 214 | cpu_hotplug_begin(); |
212 | err = __raw_notifier_call_chain(&cpu_chain, CPU_DOWN_PREPARE | mod, | 215 | err = __raw_notifier_call_chain(&cpu_chain, CPU_DOWN_PREPARE | mod, |
213 | hcpu, -1, &nr_calls); | 216 | hcpu, -1, &nr_calls); |
@@ -222,13 +225,11 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen) | |||
222 | } | 225 | } |
223 | 226 | ||
224 | /* Ensure that we are not runnable on dying cpu */ | 227 | /* Ensure that we are not runnable on dying cpu */ |
225 | old_allowed = current->cpus_allowed; | 228 | cpumask_copy(old_allowed, ¤t->cpus_allowed); |
226 | cpus_setall(tmp); | 229 | set_cpus_allowed_ptr(current, |
227 | cpu_clear(cpu, tmp); | 230 | cpumask_of(cpumask_any_but(cpu_online_mask, cpu))); |
228 | set_cpus_allowed_ptr(current, &tmp); | ||
229 | tmp = cpumask_of_cpu(cpu); | ||
230 | 231 | ||
231 | err = __stop_machine(take_cpu_down, &tcd_param, &tmp); | 232 | err = __stop_machine(take_cpu_down, &tcd_param, cpumask_of(cpu)); |
232 | if (err) { | 233 | if (err) { |
233 | /* CPU didn't die: tell everyone. Can't complain. */ | 234 | /* CPU didn't die: tell everyone. Can't complain. */ |
234 | if (raw_notifier_call_chain(&cpu_chain, CPU_DOWN_FAILED | mod, | 235 | if (raw_notifier_call_chain(&cpu_chain, CPU_DOWN_FAILED | mod, |
@@ -254,7 +255,7 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen) | |||
254 | check_for_tasks(cpu); | 255 | check_for_tasks(cpu); |
255 | 256 | ||
256 | out_allowed: | 257 | out_allowed: |
257 | set_cpus_allowed_ptr(current, &old_allowed); | 258 | set_cpus_allowed_ptr(current, old_allowed); |
258 | out_release: | 259 | out_release: |
259 | cpu_hotplug_done(); | 260 | cpu_hotplug_done(); |
260 | if (!err) { | 261 | if (!err) { |
@@ -262,6 +263,7 @@ out_release: | |||
262 | hcpu) == NOTIFY_BAD) | 263 | hcpu) == NOTIFY_BAD) |
263 | BUG(); | 264 | BUG(); |
264 | } | 265 | } |
266 | free_cpumask_var(old_allowed); | ||
265 | return err; | 267 | return err; |
266 | } | 268 | } |
267 | 269 | ||
@@ -280,7 +282,7 @@ int __ref cpu_down(unsigned int cpu) | |||
280 | 282 | ||
281 | /* | 283 | /* |
282 | * Make sure the all cpus did the reschedule and are not | 284 | * Make sure the all cpus did the reschedule and are not |
283 | * using stale version of the cpu_active_map. | 285 | * using stale version of the cpu_active_mask. |
284 | * This is not strictly necessary becuase stop_machine() | 286 | * This is not strictly necessary becuase stop_machine() |
285 | * that we run down the line already provides the required | 287 | * that we run down the line already provides the required |
286 | * synchronization. But it's really a side effect and we do not | 288 | * synchronization. But it's really a side effect and we do not |
@@ -344,7 +346,7 @@ out_notify: | |||
344 | int __cpuinit cpu_up(unsigned int cpu) | 346 | int __cpuinit cpu_up(unsigned int cpu) |
345 | { | 347 | { |
346 | int err = 0; | 348 | int err = 0; |
347 | if (!cpu_isset(cpu, cpu_possible_map)) { | 349 | if (!cpu_possible(cpu)) { |
348 | printk(KERN_ERR "can't online cpu %d because it is not " | 350 | printk(KERN_ERR "can't online cpu %d because it is not " |
349 | "configured as may-hotadd at boot time\n", cpu); | 351 | "configured as may-hotadd at boot time\n", cpu); |
350 | #if defined(CONFIG_IA64) || defined(CONFIG_X86_64) | 352 | #if defined(CONFIG_IA64) || defined(CONFIG_X86_64) |
@@ -369,25 +371,25 @@ out: | |||
369 | } | 371 | } |
370 | 372 | ||
371 | #ifdef CONFIG_PM_SLEEP_SMP | 373 | #ifdef CONFIG_PM_SLEEP_SMP |
372 | static cpumask_t frozen_cpus; | 374 | static cpumask_var_t frozen_cpus; |
373 | 375 | ||
374 | int disable_nonboot_cpus(void) | 376 | int disable_nonboot_cpus(void) |
375 | { | 377 | { |
376 | int cpu, first_cpu, error = 0; | 378 | int cpu, first_cpu, error = 0; |
377 | 379 | ||
378 | cpu_maps_update_begin(); | 380 | cpu_maps_update_begin(); |
379 | first_cpu = first_cpu(cpu_online_map); | 381 | first_cpu = cpumask_first(cpu_online_mask); |
380 | /* We take down all of the non-boot CPUs in one shot to avoid races | 382 | /* We take down all of the non-boot CPUs in one shot to avoid races |
381 | * with the userspace trying to use the CPU hotplug at the same time | 383 | * with the userspace trying to use the CPU hotplug at the same time |
382 | */ | 384 | */ |
383 | cpus_clear(frozen_cpus); | 385 | cpumask_clear(frozen_cpus); |
384 | printk("Disabling non-boot CPUs ...\n"); | 386 | printk("Disabling non-boot CPUs ...\n"); |
385 | for_each_online_cpu(cpu) { | 387 | for_each_online_cpu(cpu) { |
386 | if (cpu == first_cpu) | 388 | if (cpu == first_cpu) |
387 | continue; | 389 | continue; |
388 | error = _cpu_down(cpu, 1); | 390 | error = _cpu_down(cpu, 1); |
389 | if (!error) { | 391 | if (!error) { |
390 | cpu_set(cpu, frozen_cpus); | 392 | cpumask_set_cpu(cpu, frozen_cpus); |
391 | printk("CPU%d is down\n", cpu); | 393 | printk("CPU%d is down\n", cpu); |
392 | } else { | 394 | } else { |
393 | printk(KERN_ERR "Error taking CPU%d down: %d\n", | 395 | printk(KERN_ERR "Error taking CPU%d down: %d\n", |
@@ -413,11 +415,11 @@ void __ref enable_nonboot_cpus(void) | |||
413 | /* Allow everyone to use the CPU hotplug again */ | 415 | /* Allow everyone to use the CPU hotplug again */ |
414 | cpu_maps_update_begin(); | 416 | cpu_maps_update_begin(); |
415 | cpu_hotplug_disabled = 0; | 417 | cpu_hotplug_disabled = 0; |
416 | if (cpus_empty(frozen_cpus)) | 418 | if (cpumask_empty(frozen_cpus)) |
417 | goto out; | 419 | goto out; |
418 | 420 | ||
419 | printk("Enabling non-boot CPUs ...\n"); | 421 | printk("Enabling non-boot CPUs ...\n"); |
420 | for_each_cpu_mask_nr(cpu, frozen_cpus) { | 422 | for_each_cpu(cpu, frozen_cpus) { |
421 | error = _cpu_up(cpu, 1); | 423 | error = _cpu_up(cpu, 1); |
422 | if (!error) { | 424 | if (!error) { |
423 | printk("CPU%d is up\n", cpu); | 425 | printk("CPU%d is up\n", cpu); |
@@ -425,10 +427,18 @@ void __ref enable_nonboot_cpus(void) | |||
425 | } | 427 | } |
426 | printk(KERN_WARNING "Error taking CPU%d up: %d\n", cpu, error); | 428 | printk(KERN_WARNING "Error taking CPU%d up: %d\n", cpu, error); |
427 | } | 429 | } |
428 | cpus_clear(frozen_cpus); | 430 | cpumask_clear(frozen_cpus); |
429 | out: | 431 | out: |
430 | cpu_maps_update_done(); | 432 | cpu_maps_update_done(); |
431 | } | 433 | } |
434 | |||
435 | static int alloc_frozen_cpus(void) | ||
436 | { | ||
437 | if (!alloc_cpumask_var(&frozen_cpus, GFP_KERNEL|__GFP_ZERO)) | ||
438 | return -ENOMEM; | ||
439 | return 0; | ||
440 | } | ||
441 | core_initcall(alloc_frozen_cpus); | ||
432 | #endif /* CONFIG_PM_SLEEP_SMP */ | 442 | #endif /* CONFIG_PM_SLEEP_SMP */ |
433 | 443 | ||
434 | /** | 444 | /** |
@@ -444,7 +454,7 @@ void __cpuinit notify_cpu_starting(unsigned int cpu) | |||
444 | unsigned long val = CPU_STARTING; | 454 | unsigned long val = CPU_STARTING; |
445 | 455 | ||
446 | #ifdef CONFIG_PM_SLEEP_SMP | 456 | #ifdef CONFIG_PM_SLEEP_SMP |
447 | if (cpu_isset(cpu, frozen_cpus)) | 457 | if (frozen_cpus != NULL && cpumask_test_cpu(cpu, frozen_cpus)) |
448 | val = CPU_STARTING_FROZEN; | 458 | val = CPU_STARTING_FROZEN; |
449 | #endif /* CONFIG_PM_SLEEP_SMP */ | 459 | #endif /* CONFIG_PM_SLEEP_SMP */ |
450 | raw_notifier_call_chain(&cpu_chain, val, (void *)(long)cpu); | 460 | raw_notifier_call_chain(&cpu_chain, val, (void *)(long)cpu); |
@@ -456,7 +466,7 @@ void __cpuinit notify_cpu_starting(unsigned int cpu) | |||
456 | * cpu_bit_bitmap[] is a special, "compressed" data structure that | 466 | * cpu_bit_bitmap[] is a special, "compressed" data structure that |
457 | * represents all NR_CPUS bits binary values of 1<<nr. | 467 | * represents all NR_CPUS bits binary values of 1<<nr. |
458 | * | 468 | * |
459 | * It is used by cpumask_of_cpu() to get a constant address to a CPU | 469 | * It is used by cpumask_of() to get a constant address to a CPU |
460 | * mask value that has a single bit set only. | 470 | * mask value that has a single bit set only. |
461 | */ | 471 | */ |
462 | 472 | ||