diff options
author | Gautham R Shenoy <ego@in.ibm.com> | 2008-01-25 15:08:02 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-01-25 15:08:02 -0500 |
commit | 86ef5c9a8edd78e6bf92879f32329d89b2d55b5a (patch) | |
tree | 7bf46885326a6fdbb0c3596855408e9a5634dd3a | |
parent | d221938c049f4845da13c8593132595a6b9222a8 (diff) |
cpu-hotplug: replace lock_cpu_hotplug() with get_online_cpus()
Replace all lock_cpu_hotplug/unlock_cpu_hotplug from the kernel and use
get_online_cpus and put_online_cpus instead as it highlights the
refcount semantics in these operations.
The new API guarantees protection against the cpu-hotplug operation, but
it doesn't guarantee serialized access to any of the local data
structures. Hence the changes needs to be reviewed.
In case of pseries_add_processor/pseries_remove_processor, use
cpu_maps_update_begin()/cpu_maps_update_done() as we're modifying the
cpu_present_map there.
Signed-off-by: Gautham R Shenoy <ego@in.ibm.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
-rw-r--r-- | Documentation/cpu-hotplug.txt | 11 | ||||
-rw-r--r-- | arch/mips/kernel/mips-mt-fpaff.c | 10 | ||||
-rw-r--r-- | arch/powerpc/platforms/pseries/hotplug-cpu.c | 8 | ||||
-rw-r--r-- | arch/powerpc/platforms/pseries/rtasd.c | 8 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/mtrr/main.c | 8 | ||||
-rw-r--r-- | arch/x86/kernel/microcode.c | 16 | ||||
-rw-r--r-- | drivers/lguest/x86/core.c | 8 | ||||
-rw-r--r-- | drivers/s390/char/sclp_config.c | 4 | ||||
-rw-r--r-- | include/linux/cpu.h | 8 | ||||
-rw-r--r-- | kernel/cpu.c | 10 | ||||
-rw-r--r-- | kernel/cpuset.c | 14 | ||||
-rw-r--r-- | kernel/rcutorture.c | 6 | ||||
-rw-r--r-- | kernel/sched.c | 4 | ||||
-rw-r--r-- | kernel/stop_machine.c | 4 | ||||
-rw-r--r-- | net/core/flow.c | 4 |
15 files changed, 62 insertions, 61 deletions
diff --git a/Documentation/cpu-hotplug.txt b/Documentation/cpu-hotplug.txt index a741f658a3c9..fb94f5a71b68 100644 --- a/Documentation/cpu-hotplug.txt +++ b/Documentation/cpu-hotplug.txt | |||
@@ -109,12 +109,13 @@ Never use anything other than cpumask_t to represent bitmap of CPUs. | |||
109 | for_each_cpu_mask(x,mask) - Iterate over some random collection of cpu mask. | 109 | for_each_cpu_mask(x,mask) - Iterate over some random collection of cpu mask. |
110 | 110 | ||
111 | #include <linux/cpu.h> | 111 | #include <linux/cpu.h> |
112 | lock_cpu_hotplug() and unlock_cpu_hotplug(): | 112 | get_online_cpus() and put_online_cpus(): |
113 | 113 | ||
114 | The above calls are used to inhibit cpu hotplug operations. While holding the | 114 | The above calls are used to inhibit cpu hotplug operations. While the |
115 | cpucontrol mutex, cpu_online_map will not change. If you merely need to avoid | 115 | cpu_hotplug.refcount is non zero, the cpu_online_map will not change. |
116 | cpus going away, you could also use preempt_disable() and preempt_enable() | 116 | If you merely need to avoid cpus going away, you could also use |
117 | for those sections. Just remember the critical section cannot call any | 117 | preempt_disable() and preempt_enable() for those sections. |
118 | Just remember the critical section cannot call any | ||
118 | function that can sleep or schedule this process away. The preempt_disable() | 119 | function that can sleep or schedule this process away. The preempt_disable() |
119 | will work as long as stop_machine_run() is used to take a cpu down. | 120 | will work as long as stop_machine_run() is used to take a cpu down. |
120 | 121 | ||
diff --git a/arch/mips/kernel/mips-mt-fpaff.c b/arch/mips/kernel/mips-mt-fpaff.c index 892665bb12b1..bb4f00c0cbe9 100644 --- a/arch/mips/kernel/mips-mt-fpaff.c +++ b/arch/mips/kernel/mips-mt-fpaff.c | |||
@@ -58,13 +58,13 @@ asmlinkage long mipsmt_sys_sched_setaffinity(pid_t pid, unsigned int len, | |||
58 | if (copy_from_user(&new_mask, user_mask_ptr, sizeof(new_mask))) | 58 | if (copy_from_user(&new_mask, user_mask_ptr, sizeof(new_mask))) |
59 | return -EFAULT; | 59 | return -EFAULT; |
60 | 60 | ||
61 | lock_cpu_hotplug(); | 61 | get_online_cpus(); |
62 | read_lock(&tasklist_lock); | 62 | read_lock(&tasklist_lock); |
63 | 63 | ||
64 | p = find_process_by_pid(pid); | 64 | p = find_process_by_pid(pid); |
65 | if (!p) { | 65 | if (!p) { |
66 | read_unlock(&tasklist_lock); | 66 | read_unlock(&tasklist_lock); |
67 | unlock_cpu_hotplug(); | 67 | put_online_cpus(); |
68 | return -ESRCH; | 68 | return -ESRCH; |
69 | } | 69 | } |
70 | 70 | ||
@@ -106,7 +106,7 @@ asmlinkage long mipsmt_sys_sched_setaffinity(pid_t pid, unsigned int len, | |||
106 | 106 | ||
107 | out_unlock: | 107 | out_unlock: |
108 | put_task_struct(p); | 108 | put_task_struct(p); |
109 | unlock_cpu_hotplug(); | 109 | put_online_cpus(); |
110 | return retval; | 110 | return retval; |
111 | } | 111 | } |
112 | 112 | ||
@@ -125,7 +125,7 @@ asmlinkage long mipsmt_sys_sched_getaffinity(pid_t pid, unsigned int len, | |||
125 | if (len < real_len) | 125 | if (len < real_len) |
126 | return -EINVAL; | 126 | return -EINVAL; |
127 | 127 | ||
128 | lock_cpu_hotplug(); | 128 | get_online_cpus(); |
129 | read_lock(&tasklist_lock); | 129 | read_lock(&tasklist_lock); |
130 | 130 | ||
131 | retval = -ESRCH; | 131 | retval = -ESRCH; |
@@ -140,7 +140,7 @@ asmlinkage long mipsmt_sys_sched_getaffinity(pid_t pid, unsigned int len, | |||
140 | 140 | ||
141 | out_unlock: | 141 | out_unlock: |
142 | read_unlock(&tasklist_lock); | 142 | read_unlock(&tasklist_lock); |
143 | unlock_cpu_hotplug(); | 143 | put_online_cpus(); |
144 | if (retval) | 144 | if (retval) |
145 | return retval; | 145 | return retval; |
146 | if (copy_to_user(user_mask_ptr, &mask, real_len)) | 146 | if (copy_to_user(user_mask_ptr, &mask, real_len)) |
diff --git a/arch/powerpc/platforms/pseries/hotplug-cpu.c b/arch/powerpc/platforms/pseries/hotplug-cpu.c index 412e6b42986f..c4ad54e0f288 100644 --- a/arch/powerpc/platforms/pseries/hotplug-cpu.c +++ b/arch/powerpc/platforms/pseries/hotplug-cpu.c | |||
@@ -153,7 +153,7 @@ static int pseries_add_processor(struct device_node *np) | |||
153 | for (i = 0; i < nthreads; i++) | 153 | for (i = 0; i < nthreads; i++) |
154 | cpu_set(i, tmp); | 154 | cpu_set(i, tmp); |
155 | 155 | ||
156 | lock_cpu_hotplug(); | 156 | cpu_maps_update_begin(); |
157 | 157 | ||
158 | BUG_ON(!cpus_subset(cpu_present_map, cpu_possible_map)); | 158 | BUG_ON(!cpus_subset(cpu_present_map, cpu_possible_map)); |
159 | 159 | ||
@@ -190,7 +190,7 @@ static int pseries_add_processor(struct device_node *np) | |||
190 | } | 190 | } |
191 | err = 0; | 191 | err = 0; |
192 | out_unlock: | 192 | out_unlock: |
193 | unlock_cpu_hotplug(); | 193 | cpu_maps_update_done(); |
194 | return err; | 194 | return err; |
195 | } | 195 | } |
196 | 196 | ||
@@ -211,7 +211,7 @@ static void pseries_remove_processor(struct device_node *np) | |||
211 | 211 | ||
212 | nthreads = len / sizeof(u32); | 212 | nthreads = len / sizeof(u32); |
213 | 213 | ||
214 | lock_cpu_hotplug(); | 214 | cpu_maps_update_begin(); |
215 | for (i = 0; i < nthreads; i++) { | 215 | for (i = 0; i < nthreads; i++) { |
216 | for_each_present_cpu(cpu) { | 216 | for_each_present_cpu(cpu) { |
217 | if (get_hard_smp_processor_id(cpu) != intserv[i]) | 217 | if (get_hard_smp_processor_id(cpu) != intserv[i]) |
@@ -225,7 +225,7 @@ static void pseries_remove_processor(struct device_node *np) | |||
225 | printk(KERN_WARNING "Could not find cpu to remove " | 225 | printk(KERN_WARNING "Could not find cpu to remove " |
226 | "with physical id 0x%x\n", intserv[i]); | 226 | "with physical id 0x%x\n", intserv[i]); |
227 | } | 227 | } |
228 | unlock_cpu_hotplug(); | 228 | cpu_maps_update_done(); |
229 | } | 229 | } |
230 | 230 | ||
231 | static int pseries_smp_notifier(struct notifier_block *nb, | 231 | static int pseries_smp_notifier(struct notifier_block *nb, |
diff --git a/arch/powerpc/platforms/pseries/rtasd.c b/arch/powerpc/platforms/pseries/rtasd.c index 73401c820110..e3078ce41518 100644 --- a/arch/powerpc/platforms/pseries/rtasd.c +++ b/arch/powerpc/platforms/pseries/rtasd.c | |||
@@ -382,7 +382,7 @@ static void do_event_scan_all_cpus(long delay) | |||
382 | { | 382 | { |
383 | int cpu; | 383 | int cpu; |
384 | 384 | ||
385 | lock_cpu_hotplug(); | 385 | get_online_cpus(); |
386 | cpu = first_cpu(cpu_online_map); | 386 | cpu = first_cpu(cpu_online_map); |
387 | for (;;) { | 387 | for (;;) { |
388 | set_cpus_allowed(current, cpumask_of_cpu(cpu)); | 388 | set_cpus_allowed(current, cpumask_of_cpu(cpu)); |
@@ -390,15 +390,15 @@ static void do_event_scan_all_cpus(long delay) | |||
390 | set_cpus_allowed(current, CPU_MASK_ALL); | 390 | set_cpus_allowed(current, CPU_MASK_ALL); |
391 | 391 | ||
392 | /* Drop hotplug lock, and sleep for the specified delay */ | 392 | /* Drop hotplug lock, and sleep for the specified delay */ |
393 | unlock_cpu_hotplug(); | 393 | put_online_cpus(); |
394 | msleep_interruptible(delay); | 394 | msleep_interruptible(delay); |
395 | lock_cpu_hotplug(); | 395 | get_online_cpus(); |
396 | 396 | ||
397 | cpu = next_cpu(cpu, cpu_online_map); | 397 | cpu = next_cpu(cpu, cpu_online_map); |
398 | if (cpu == NR_CPUS) | 398 | if (cpu == NR_CPUS) |
399 | break; | 399 | break; |
400 | } | 400 | } |
401 | unlock_cpu_hotplug(); | 401 | put_online_cpus(); |
402 | } | 402 | } |
403 | 403 | ||
404 | static int rtasd(void *unused) | 404 | static int rtasd(void *unused) |
diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c index 3b20613325dc..beb45c9c0835 100644 --- a/arch/x86/kernel/cpu/mtrr/main.c +++ b/arch/x86/kernel/cpu/mtrr/main.c | |||
@@ -349,7 +349,7 @@ int mtrr_add_page(unsigned long base, unsigned long size, | |||
349 | replace = -1; | 349 | replace = -1; |
350 | 350 | ||
351 | /* No CPU hotplug when we change MTRR entries */ | 351 | /* No CPU hotplug when we change MTRR entries */ |
352 | lock_cpu_hotplug(); | 352 | get_online_cpus(); |
353 | /* Search for existing MTRR */ | 353 | /* Search for existing MTRR */ |
354 | mutex_lock(&mtrr_mutex); | 354 | mutex_lock(&mtrr_mutex); |
355 | for (i = 0; i < num_var_ranges; ++i) { | 355 | for (i = 0; i < num_var_ranges; ++i) { |
@@ -405,7 +405,7 @@ int mtrr_add_page(unsigned long base, unsigned long size, | |||
405 | error = i; | 405 | error = i; |
406 | out: | 406 | out: |
407 | mutex_unlock(&mtrr_mutex); | 407 | mutex_unlock(&mtrr_mutex); |
408 | unlock_cpu_hotplug(); | 408 | put_online_cpus(); |
409 | return error; | 409 | return error; |
410 | } | 410 | } |
411 | 411 | ||
@@ -495,7 +495,7 @@ int mtrr_del_page(int reg, unsigned long base, unsigned long size) | |||
495 | 495 | ||
496 | max = num_var_ranges; | 496 | max = num_var_ranges; |
497 | /* No CPU hotplug when we change MTRR entries */ | 497 | /* No CPU hotplug when we change MTRR entries */ |
498 | lock_cpu_hotplug(); | 498 | get_online_cpus(); |
499 | mutex_lock(&mtrr_mutex); | 499 | mutex_lock(&mtrr_mutex); |
500 | if (reg < 0) { | 500 | if (reg < 0) { |
501 | /* Search for existing MTRR */ | 501 | /* Search for existing MTRR */ |
@@ -536,7 +536,7 @@ int mtrr_del_page(int reg, unsigned long base, unsigned long size) | |||
536 | error = reg; | 536 | error = reg; |
537 | out: | 537 | out: |
538 | mutex_unlock(&mtrr_mutex); | 538 | mutex_unlock(&mtrr_mutex); |
539 | unlock_cpu_hotplug(); | 539 | put_online_cpus(); |
540 | return error; | 540 | return error; |
541 | } | 541 | } |
542 | /** | 542 | /** |
diff --git a/arch/x86/kernel/microcode.c b/arch/x86/kernel/microcode.c index 09c315214a5e..40cfd5488719 100644 --- a/arch/x86/kernel/microcode.c +++ b/arch/x86/kernel/microcode.c | |||
@@ -436,7 +436,7 @@ static ssize_t microcode_write (struct file *file, const char __user *buf, size_ | |||
436 | return -EINVAL; | 436 | return -EINVAL; |
437 | } | 437 | } |
438 | 438 | ||
439 | lock_cpu_hotplug(); | 439 | get_online_cpus(); |
440 | mutex_lock(µcode_mutex); | 440 | mutex_lock(µcode_mutex); |
441 | 441 | ||
442 | user_buffer = (void __user *) buf; | 442 | user_buffer = (void __user *) buf; |
@@ -447,7 +447,7 @@ static ssize_t microcode_write (struct file *file, const char __user *buf, size_ | |||
447 | ret = (ssize_t)len; | 447 | ret = (ssize_t)len; |
448 | 448 | ||
449 | mutex_unlock(µcode_mutex); | 449 | mutex_unlock(µcode_mutex); |
450 | unlock_cpu_hotplug(); | 450 | put_online_cpus(); |
451 | 451 | ||
452 | return ret; | 452 | return ret; |
453 | } | 453 | } |
@@ -658,14 +658,14 @@ static ssize_t reload_store(struct sys_device *dev, const char *buf, size_t sz) | |||
658 | 658 | ||
659 | old = current->cpus_allowed; | 659 | old = current->cpus_allowed; |
660 | 660 | ||
661 | lock_cpu_hotplug(); | 661 | get_online_cpus(); |
662 | set_cpus_allowed(current, cpumask_of_cpu(cpu)); | 662 | set_cpus_allowed(current, cpumask_of_cpu(cpu)); |
663 | 663 | ||
664 | mutex_lock(µcode_mutex); | 664 | mutex_lock(µcode_mutex); |
665 | if (uci->valid) | 665 | if (uci->valid) |
666 | err = cpu_request_microcode(cpu); | 666 | err = cpu_request_microcode(cpu); |
667 | mutex_unlock(µcode_mutex); | 667 | mutex_unlock(µcode_mutex); |
668 | unlock_cpu_hotplug(); | 668 | put_online_cpus(); |
669 | set_cpus_allowed(current, old); | 669 | set_cpus_allowed(current, old); |
670 | } | 670 | } |
671 | if (err) | 671 | if (err) |
@@ -817,9 +817,9 @@ static int __init microcode_init (void) | |||
817 | return PTR_ERR(microcode_pdev); | 817 | return PTR_ERR(microcode_pdev); |
818 | } | 818 | } |
819 | 819 | ||
820 | lock_cpu_hotplug(); | 820 | get_online_cpus(); |
821 | error = sysdev_driver_register(&cpu_sysdev_class, &mc_sysdev_driver); | 821 | error = sysdev_driver_register(&cpu_sysdev_class, &mc_sysdev_driver); |
822 | unlock_cpu_hotplug(); | 822 | put_online_cpus(); |
823 | if (error) { | 823 | if (error) { |
824 | microcode_dev_exit(); | 824 | microcode_dev_exit(); |
825 | platform_device_unregister(microcode_pdev); | 825 | platform_device_unregister(microcode_pdev); |
@@ -839,9 +839,9 @@ static void __exit microcode_exit (void) | |||
839 | 839 | ||
840 | unregister_hotcpu_notifier(&mc_cpu_notifier); | 840 | unregister_hotcpu_notifier(&mc_cpu_notifier); |
841 | 841 | ||
842 | lock_cpu_hotplug(); | 842 | get_online_cpus(); |
843 | sysdev_driver_unregister(&cpu_sysdev_class, &mc_sysdev_driver); | 843 | sysdev_driver_unregister(&cpu_sysdev_class, &mc_sysdev_driver); |
844 | unlock_cpu_hotplug(); | 844 | put_online_cpus(); |
845 | 845 | ||
846 | platform_device_unregister(microcode_pdev); | 846 | platform_device_unregister(microcode_pdev); |
847 | } | 847 | } |
diff --git a/drivers/lguest/x86/core.c b/drivers/lguest/x86/core.c index 482aec2a9631..96d0fd07c57d 100644 --- a/drivers/lguest/x86/core.c +++ b/drivers/lguest/x86/core.c | |||
@@ -459,7 +459,7 @@ void __init lguest_arch_host_init(void) | |||
459 | 459 | ||
460 | /* We don't need the complexity of CPUs coming and going while we're | 460 | /* We don't need the complexity of CPUs coming and going while we're |
461 | * doing this. */ | 461 | * doing this. */ |
462 | lock_cpu_hotplug(); | 462 | get_online_cpus(); |
463 | if (cpu_has_pge) { /* We have a broader idea of "global". */ | 463 | if (cpu_has_pge) { /* We have a broader idea of "global". */ |
464 | /* Remember that this was originally set (for cleanup). */ | 464 | /* Remember that this was originally set (for cleanup). */ |
465 | cpu_had_pge = 1; | 465 | cpu_had_pge = 1; |
@@ -469,20 +469,20 @@ void __init lguest_arch_host_init(void) | |||
469 | /* Turn off the feature in the global feature set. */ | 469 | /* Turn off the feature in the global feature set. */ |
470 | clear_bit(X86_FEATURE_PGE, boot_cpu_data.x86_capability); | 470 | clear_bit(X86_FEATURE_PGE, boot_cpu_data.x86_capability); |
471 | } | 471 | } |
472 | unlock_cpu_hotplug(); | 472 | put_online_cpus(); |
473 | }; | 473 | }; |
474 | /*:*/ | 474 | /*:*/ |
475 | 475 | ||
476 | void __exit lguest_arch_host_fini(void) | 476 | void __exit lguest_arch_host_fini(void) |
477 | { | 477 | { |
478 | /* If we had PGE before we started, turn it back on now. */ | 478 | /* If we had PGE before we started, turn it back on now. */ |
479 | lock_cpu_hotplug(); | 479 | get_online_cpus(); |
480 | if (cpu_had_pge) { | 480 | if (cpu_had_pge) { |
481 | set_bit(X86_FEATURE_PGE, boot_cpu_data.x86_capability); | 481 | set_bit(X86_FEATURE_PGE, boot_cpu_data.x86_capability); |
482 | /* adjust_pge's argument "1" means set PGE. */ | 482 | /* adjust_pge's argument "1" means set PGE. */ |
483 | on_each_cpu(adjust_pge, (void *)1, 0, 1); | 483 | on_each_cpu(adjust_pge, (void *)1, 0, 1); |
484 | } | 484 | } |
485 | unlock_cpu_hotplug(); | 485 | put_online_cpus(); |
486 | } | 486 | } |
487 | 487 | ||
488 | 488 | ||
diff --git a/drivers/s390/char/sclp_config.c b/drivers/s390/char/sclp_config.c index 5322e5e54a98..9dc77f14fa52 100644 --- a/drivers/s390/char/sclp_config.c +++ b/drivers/s390/char/sclp_config.c | |||
@@ -29,12 +29,12 @@ static void sclp_cpu_capability_notify(struct work_struct *work) | |||
29 | struct sys_device *sysdev; | 29 | struct sys_device *sysdev; |
30 | 30 | ||
31 | printk(KERN_WARNING TAG "cpu capability changed.\n"); | 31 | printk(KERN_WARNING TAG "cpu capability changed.\n"); |
32 | lock_cpu_hotplug(); | 32 | get_online_cpus(); |
33 | for_each_online_cpu(cpu) { | 33 | for_each_online_cpu(cpu) { |
34 | sysdev = get_cpu_sysdev(cpu); | 34 | sysdev = get_cpu_sysdev(cpu); |
35 | kobject_uevent(&sysdev->kobj, KOBJ_CHANGE); | 35 | kobject_uevent(&sysdev->kobj, KOBJ_CHANGE); |
36 | } | 36 | } |
37 | unlock_cpu_hotplug(); | 37 | put_online_cpus(); |
38 | } | 38 | } |
39 | 39 | ||
40 | static void sclp_conf_receiver_fn(struct evbuf_header *evbuf) | 40 | static void sclp_conf_receiver_fn(struct evbuf_header *evbuf) |
diff --git a/include/linux/cpu.h b/include/linux/cpu.h index a40247e4d462..3a3ff1c5cbef 100644 --- a/include/linux/cpu.h +++ b/include/linux/cpu.h | |||
@@ -100,8 +100,8 @@ static inline void cpuhotplug_mutex_unlock(struct mutex *cpu_hp_mutex) | |||
100 | mutex_unlock(cpu_hp_mutex); | 100 | mutex_unlock(cpu_hp_mutex); |
101 | } | 101 | } |
102 | 102 | ||
103 | extern void lock_cpu_hotplug(void); | 103 | extern void get_online_cpus(void); |
104 | extern void unlock_cpu_hotplug(void); | 104 | extern void put_online_cpus(void); |
105 | #define hotcpu_notifier(fn, pri) { \ | 105 | #define hotcpu_notifier(fn, pri) { \ |
106 | static struct notifier_block fn##_nb = \ | 106 | static struct notifier_block fn##_nb = \ |
107 | { .notifier_call = fn, .priority = pri }; \ | 107 | { .notifier_call = fn, .priority = pri }; \ |
@@ -118,8 +118,8 @@ static inline void cpuhotplug_mutex_lock(struct mutex *cpu_hp_mutex) | |||
118 | static inline void cpuhotplug_mutex_unlock(struct mutex *cpu_hp_mutex) | 118 | static inline void cpuhotplug_mutex_unlock(struct mutex *cpu_hp_mutex) |
119 | { } | 119 | { } |
120 | 120 | ||
121 | #define lock_cpu_hotplug() do { } while (0) | 121 | #define get_online_cpus() do { } while (0) |
122 | #define unlock_cpu_hotplug() do { } while (0) | 122 | #define put_online_cpus() do { } while (0) |
123 | #define hotcpu_notifier(fn, pri) do { (void)(fn); } while (0) | 123 | #define hotcpu_notifier(fn, pri) do { (void)(fn); } while (0) |
124 | /* These aren't inline functions due to a GCC bug. */ | 124 | /* These aren't inline functions due to a GCC bug. */ |
125 | #define register_hotcpu_notifier(nb) ({ (void)(nb); 0; }) | 125 | #define register_hotcpu_notifier(nb) ({ (void)(nb); 0; }) |
diff --git a/kernel/cpu.c b/kernel/cpu.c index 656dc3fcbbae..b0c4152995f8 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c | |||
@@ -48,7 +48,7 @@ void __init cpu_hotplug_init(void) | |||
48 | 48 | ||
49 | #ifdef CONFIG_HOTPLUG_CPU | 49 | #ifdef CONFIG_HOTPLUG_CPU |
50 | 50 | ||
51 | void lock_cpu_hotplug(void) | 51 | void get_online_cpus(void) |
52 | { | 52 | { |
53 | might_sleep(); | 53 | might_sleep(); |
54 | if (cpu_hotplug.active_writer == current) | 54 | if (cpu_hotplug.active_writer == current) |
@@ -58,9 +58,9 @@ void lock_cpu_hotplug(void) | |||
58 | mutex_unlock(&cpu_hotplug.lock); | 58 | mutex_unlock(&cpu_hotplug.lock); |
59 | 59 | ||
60 | } | 60 | } |
61 | EXPORT_SYMBOL_GPL(lock_cpu_hotplug); | 61 | EXPORT_SYMBOL_GPL(get_online_cpus); |
62 | 62 | ||
63 | void unlock_cpu_hotplug(void) | 63 | void put_online_cpus(void) |
64 | { | 64 | { |
65 | if (cpu_hotplug.active_writer == current) | 65 | if (cpu_hotplug.active_writer == current) |
66 | return; | 66 | return; |
@@ -73,7 +73,7 @@ void unlock_cpu_hotplug(void) | |||
73 | mutex_unlock(&cpu_hotplug.lock); | 73 | mutex_unlock(&cpu_hotplug.lock); |
74 | 74 | ||
75 | } | 75 | } |
76 | EXPORT_SYMBOL_GPL(unlock_cpu_hotplug); | 76 | EXPORT_SYMBOL_GPL(put_online_cpus); |
77 | 77 | ||
78 | #endif /* CONFIG_HOTPLUG_CPU */ | 78 | #endif /* CONFIG_HOTPLUG_CPU */ |
79 | 79 | ||
@@ -110,7 +110,7 @@ void cpu_maps_update_done(void) | |||
110 | * non zero and goes to sleep again. | 110 | * non zero and goes to sleep again. |
111 | * | 111 | * |
112 | * However, this is very difficult to achieve in practice since | 112 | * However, this is very difficult to achieve in practice since |
113 | * lock_cpu_hotplug() not an api which is called all that often. | 113 | * get_online_cpus() not an api which is called all that often. |
114 | * | 114 | * |
115 | */ | 115 | */ |
116 | static void cpu_hotplug_begin(void) | 116 | static void cpu_hotplug_begin(void) |
diff --git a/kernel/cpuset.c b/kernel/cpuset.c index 50f5dc463688..cfaf6419d817 100644 --- a/kernel/cpuset.c +++ b/kernel/cpuset.c | |||
@@ -537,10 +537,10 @@ static int cpusets_overlap(struct cpuset *a, struct cpuset *b) | |||
537 | * | 537 | * |
538 | * Call with cgroup_mutex held. May take callback_mutex during | 538 | * Call with cgroup_mutex held. May take callback_mutex during |
539 | * call due to the kfifo_alloc() and kmalloc() calls. May nest | 539 | * call due to the kfifo_alloc() and kmalloc() calls. May nest |
540 | * a call to the lock_cpu_hotplug()/unlock_cpu_hotplug() pair. | 540 | * a call to the get_online_cpus()/put_online_cpus() pair. |
541 | * Must not be called holding callback_mutex, because we must not | 541 | * Must not be called holding callback_mutex, because we must not |
542 | * call lock_cpu_hotplug() while holding callback_mutex. Elsewhere | 542 | * call get_online_cpus() while holding callback_mutex. Elsewhere |
543 | * the kernel nests callback_mutex inside lock_cpu_hotplug() calls. | 543 | * the kernel nests callback_mutex inside get_online_cpus() calls. |
544 | * So the reverse nesting would risk an ABBA deadlock. | 544 | * So the reverse nesting would risk an ABBA deadlock. |
545 | * | 545 | * |
546 | * The three key local variables below are: | 546 | * The three key local variables below are: |
@@ -691,9 +691,9 @@ restart: | |||
691 | 691 | ||
692 | rebuild: | 692 | rebuild: |
693 | /* Have scheduler rebuild sched domains */ | 693 | /* Have scheduler rebuild sched domains */ |
694 | lock_cpu_hotplug(); | 694 | get_online_cpus(); |
695 | partition_sched_domains(ndoms, doms); | 695 | partition_sched_domains(ndoms, doms); |
696 | unlock_cpu_hotplug(); | 696 | put_online_cpus(); |
697 | 697 | ||
698 | done: | 698 | done: |
699 | if (q && !IS_ERR(q)) | 699 | if (q && !IS_ERR(q)) |
@@ -1617,10 +1617,10 @@ static struct cgroup_subsys_state *cpuset_create( | |||
1617 | * | 1617 | * |
1618 | * If the cpuset being removed has its flag 'sched_load_balance' | 1618 | * If the cpuset being removed has its flag 'sched_load_balance' |
1619 | * enabled, then simulate turning sched_load_balance off, which | 1619 | * enabled, then simulate turning sched_load_balance off, which |
1620 | * will call rebuild_sched_domains(). The lock_cpu_hotplug() | 1620 | * will call rebuild_sched_domains(). The get_online_cpus() |
1621 | * call in rebuild_sched_domains() must not be made while holding | 1621 | * call in rebuild_sched_domains() must not be made while holding |
1622 | * callback_mutex. Elsewhere the kernel nests callback_mutex inside | 1622 | * callback_mutex. Elsewhere the kernel nests callback_mutex inside |
1623 | * lock_cpu_hotplug() calls. So the reverse nesting would risk an | 1623 | * get_online_cpus() calls. So the reverse nesting would risk an |
1624 | * ABBA deadlock. | 1624 | * ABBA deadlock. |
1625 | */ | 1625 | */ |
1626 | 1626 | ||
diff --git a/kernel/rcutorture.c b/kernel/rcutorture.c index c3e165c2318f..fd599829e72a 100644 --- a/kernel/rcutorture.c +++ b/kernel/rcutorture.c | |||
@@ -726,11 +726,11 @@ static void rcu_torture_shuffle_tasks(void) | |||
726 | cpumask_t tmp_mask = CPU_MASK_ALL; | 726 | cpumask_t tmp_mask = CPU_MASK_ALL; |
727 | int i; | 727 | int i; |
728 | 728 | ||
729 | lock_cpu_hotplug(); | 729 | get_online_cpus(); |
730 | 730 | ||
731 | /* No point in shuffling if there is only one online CPU (ex: UP) */ | 731 | /* No point in shuffling if there is only one online CPU (ex: UP) */ |
732 | if (num_online_cpus() == 1) { | 732 | if (num_online_cpus() == 1) { |
733 | unlock_cpu_hotplug(); | 733 | put_online_cpus(); |
734 | return; | 734 | return; |
735 | } | 735 | } |
736 | 736 | ||
@@ -762,7 +762,7 @@ static void rcu_torture_shuffle_tasks(void) | |||
762 | else | 762 | else |
763 | rcu_idle_cpu--; | 763 | rcu_idle_cpu--; |
764 | 764 | ||
765 | unlock_cpu_hotplug(); | 765 | put_online_cpus(); |
766 | } | 766 | } |
767 | 767 | ||
768 | /* Shuffle tasks across CPUs, with the intent of allowing each CPU in the | 768 | /* Shuffle tasks across CPUs, with the intent of allowing each CPU in the |
diff --git a/kernel/sched.c b/kernel/sched.c index 86e55a9c2de6..672aa68bfeac 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -7152,7 +7152,7 @@ static int load_balance_monitor(void *unused) | |||
7152 | int i, cpu, balanced = 1; | 7152 | int i, cpu, balanced = 1; |
7153 | 7153 | ||
7154 | /* Prevent cpus going down or coming up */ | 7154 | /* Prevent cpus going down or coming up */ |
7155 | lock_cpu_hotplug(); | 7155 | get_online_cpus(); |
7156 | /* lockout changes to doms_cur[] array */ | 7156 | /* lockout changes to doms_cur[] array */ |
7157 | lock_doms_cur(); | 7157 | lock_doms_cur(); |
7158 | /* | 7158 | /* |
@@ -7186,7 +7186,7 @@ static int load_balance_monitor(void *unused) | |||
7186 | rcu_read_unlock(); | 7186 | rcu_read_unlock(); |
7187 | 7187 | ||
7188 | unlock_doms_cur(); | 7188 | unlock_doms_cur(); |
7189 | unlock_cpu_hotplug(); | 7189 | put_online_cpus(); |
7190 | 7190 | ||
7191 | if (!balanced) | 7191 | if (!balanced) |
7192 | timeout = sysctl_sched_min_bal_int_shares; | 7192 | timeout = sysctl_sched_min_bal_int_shares; |
diff --git a/kernel/stop_machine.c b/kernel/stop_machine.c index 319821ef78af..51b5ee53571a 100644 --- a/kernel/stop_machine.c +++ b/kernel/stop_machine.c | |||
@@ -203,13 +203,13 @@ int stop_machine_run(int (*fn)(void *), void *data, unsigned int cpu) | |||
203 | int ret; | 203 | int ret; |
204 | 204 | ||
205 | /* No CPUs can come up or down during this. */ | 205 | /* No CPUs can come up or down during this. */ |
206 | lock_cpu_hotplug(); | 206 | get_online_cpus(); |
207 | p = __stop_machine_run(fn, data, cpu); | 207 | p = __stop_machine_run(fn, data, cpu); |
208 | if (!IS_ERR(p)) | 208 | if (!IS_ERR(p)) |
209 | ret = kthread_stop(p); | 209 | ret = kthread_stop(p); |
210 | else | 210 | else |
211 | ret = PTR_ERR(p); | 211 | ret = PTR_ERR(p); |
212 | unlock_cpu_hotplug(); | 212 | put_online_cpus(); |
213 | 213 | ||
214 | return ret; | 214 | return ret; |
215 | } | 215 | } |
diff --git a/net/core/flow.c b/net/core/flow.c index 3ed2b4b1d6d4..6489f4e24ecf 100644 --- a/net/core/flow.c +++ b/net/core/flow.c | |||
@@ -293,7 +293,7 @@ void flow_cache_flush(void) | |||
293 | static DEFINE_MUTEX(flow_flush_sem); | 293 | static DEFINE_MUTEX(flow_flush_sem); |
294 | 294 | ||
295 | /* Don't want cpus going down or up during this. */ | 295 | /* Don't want cpus going down or up during this. */ |
296 | lock_cpu_hotplug(); | 296 | get_online_cpus(); |
297 | mutex_lock(&flow_flush_sem); | 297 | mutex_lock(&flow_flush_sem); |
298 | atomic_set(&info.cpuleft, num_online_cpus()); | 298 | atomic_set(&info.cpuleft, num_online_cpus()); |
299 | init_completion(&info.completion); | 299 | init_completion(&info.completion); |
@@ -305,7 +305,7 @@ void flow_cache_flush(void) | |||
305 | 305 | ||
306 | wait_for_completion(&info.completion); | 306 | wait_for_completion(&info.completion); |
307 | mutex_unlock(&flow_flush_sem); | 307 | mutex_unlock(&flow_flush_sem); |
308 | unlock_cpu_hotplug(); | 308 | put_online_cpus(); |
309 | } | 309 | } |
310 | 310 | ||
311 | static void __devinit flow_cache_cpu_prepare(int cpu) | 311 | static void __devinit flow_cache_cpu_prepare(int cpu) |