diff options
Diffstat (limited to 'kernel/cpu.c')
| -rw-r--r-- | kernel/cpu.c | 83 |
1 files changed, 49 insertions, 34 deletions
diff --git a/kernel/cpu.c b/kernel/cpu.c index d61ba88f34e5..e882c6babf41 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c | |||
| @@ -16,47 +16,76 @@ | |||
| 16 | #include <asm/semaphore.h> | 16 | #include <asm/semaphore.h> |
| 17 | 17 | ||
| 18 | /* This protects CPUs going up and down... */ | 18 | /* This protects CPUs going up and down... */ |
| 19 | DECLARE_MUTEX(cpucontrol); | 19 | static DECLARE_MUTEX(cpucontrol); |
| 20 | EXPORT_SYMBOL_GPL(cpucontrol); | ||
| 21 | 20 | ||
| 22 | static struct notifier_block *cpu_chain; | 21 | static struct notifier_block *cpu_chain; |
| 23 | 22 | ||
| 24 | /* | 23 | #ifdef CONFIG_HOTPLUG_CPU |
| 25 | * Used to check by callers if they need to acquire the cpucontrol | 24 | static struct task_struct *lock_cpu_hotplug_owner; |
| 26 | * or not to protect a cpu from being removed. Its sometimes required to | 25 | static int lock_cpu_hotplug_depth; |
| 27 | * call these functions both for normal operations, and in response to | ||
| 28 | * a cpu being added/removed. If the context of the call is in the same | ||
| 29 | * thread context as a CPU hotplug thread, we dont need to take the lock | ||
| 30 | * since its already protected | ||
| 31 | * check drivers/cpufreq/cpufreq.c for its usage - Ashok Raj | ||
| 32 | */ | ||
| 33 | 26 | ||
| 34 | int current_in_cpu_hotplug(void) | 27 | static int __lock_cpu_hotplug(int interruptible) |
| 35 | { | 28 | { |
| 36 | return (current->flags & PF_HOTPLUG_CPU); | 29 | int ret = 0; |
| 30 | |||
| 31 | if (lock_cpu_hotplug_owner != current) { | ||
| 32 | if (interruptible) | ||
| 33 | ret = down_interruptible(&cpucontrol); | ||
| 34 | else | ||
| 35 | down(&cpucontrol); | ||
| 36 | } | ||
| 37 | |||
| 38 | /* | ||
| 39 | * Set only if we succeed in locking | ||
| 40 | */ | ||
| 41 | if (!ret) { | ||
| 42 | lock_cpu_hotplug_depth++; | ||
| 43 | lock_cpu_hotplug_owner = current; | ||
| 44 | } | ||
| 45 | |||
| 46 | return ret; | ||
| 37 | } | 47 | } |
| 38 | 48 | ||
| 39 | EXPORT_SYMBOL_GPL(current_in_cpu_hotplug); | 49 | void lock_cpu_hotplug(void) |
| 50 | { | ||
| 51 | __lock_cpu_hotplug(0); | ||
| 52 | } | ||
| 53 | EXPORT_SYMBOL_GPL(lock_cpu_hotplug); | ||
| 40 | 54 | ||
| 55 | void unlock_cpu_hotplug(void) | ||
| 56 | { | ||
| 57 | if (--lock_cpu_hotplug_depth == 0) { | ||
| 58 | lock_cpu_hotplug_owner = NULL; | ||
| 59 | up(&cpucontrol); | ||
| 60 | } | ||
| 61 | } | ||
| 62 | EXPORT_SYMBOL_GPL(unlock_cpu_hotplug); | ||
| 63 | |||
| 64 | int lock_cpu_hotplug_interruptible(void) | ||
| 65 | { | ||
| 66 | return __lock_cpu_hotplug(1); | ||
| 67 | } | ||
| 68 | EXPORT_SYMBOL_GPL(lock_cpu_hotplug_interruptible); | ||
| 69 | #endif /* CONFIG_HOTPLUG_CPU */ | ||
| 41 | 70 | ||
| 42 | /* Need to know about CPUs going up/down? */ | 71 | /* Need to know about CPUs going up/down? */ |
| 43 | int register_cpu_notifier(struct notifier_block *nb) | 72 | int register_cpu_notifier(struct notifier_block *nb) |
| 44 | { | 73 | { |
| 45 | int ret; | 74 | int ret; |
| 46 | 75 | ||
| 47 | if ((ret = down_interruptible(&cpucontrol)) != 0) | 76 | if ((ret = lock_cpu_hotplug_interruptible()) != 0) |
| 48 | return ret; | 77 | return ret; |
| 49 | ret = notifier_chain_register(&cpu_chain, nb); | 78 | ret = notifier_chain_register(&cpu_chain, nb); |
| 50 | up(&cpucontrol); | 79 | unlock_cpu_hotplug(); |
| 51 | return ret; | 80 | return ret; |
| 52 | } | 81 | } |
| 53 | EXPORT_SYMBOL(register_cpu_notifier); | 82 | EXPORT_SYMBOL(register_cpu_notifier); |
| 54 | 83 | ||
| 55 | void unregister_cpu_notifier(struct notifier_block *nb) | 84 | void unregister_cpu_notifier(struct notifier_block *nb) |
| 56 | { | 85 | { |
| 57 | down(&cpucontrol); | 86 | lock_cpu_hotplug(); |
| 58 | notifier_chain_unregister(&cpu_chain, nb); | 87 | notifier_chain_unregister(&cpu_chain, nb); |
| 59 | up(&cpucontrol); | 88 | unlock_cpu_hotplug(); |
| 60 | } | 89 | } |
| 61 | EXPORT_SYMBOL(unregister_cpu_notifier); | 90 | EXPORT_SYMBOL(unregister_cpu_notifier); |
| 62 | 91 | ||
| @@ -112,13 +141,6 @@ int cpu_down(unsigned int cpu) | |||
| 112 | goto out; | 141 | goto out; |
| 113 | } | 142 | } |
| 114 | 143 | ||
| 115 | /* | ||
| 116 | * Leave a trace in current->flags indicating we are already in | ||
| 117 | * process of performing CPU hotplug. Callers can check if cpucontrol | ||
| 118 | * is already acquired by current thread, and if so not cause | ||
| 119 | * a dead lock by not acquiring the lock | ||
| 120 | */ | ||
| 121 | current->flags |= PF_HOTPLUG_CPU; | ||
| 122 | err = notifier_call_chain(&cpu_chain, CPU_DOWN_PREPARE, | 144 | err = notifier_call_chain(&cpu_chain, CPU_DOWN_PREPARE, |
| 123 | (void *)(long)cpu); | 145 | (void *)(long)cpu); |
| 124 | if (err == NOTIFY_BAD) { | 146 | if (err == NOTIFY_BAD) { |
| @@ -171,7 +193,6 @@ out_thread: | |||
| 171 | out_allowed: | 193 | out_allowed: |
| 172 | set_cpus_allowed(current, old_allowed); | 194 | set_cpus_allowed(current, old_allowed); |
| 173 | out: | 195 | out: |
| 174 | current->flags &= ~PF_HOTPLUG_CPU; | ||
| 175 | unlock_cpu_hotplug(); | 196 | unlock_cpu_hotplug(); |
| 176 | return err; | 197 | return err; |
| 177 | } | 198 | } |
| @@ -182,7 +203,7 @@ int __devinit cpu_up(unsigned int cpu) | |||
| 182 | int ret; | 203 | int ret; |
| 183 | void *hcpu = (void *)(long)cpu; | 204 | void *hcpu = (void *)(long)cpu; |
| 184 | 205 | ||
| 185 | if ((ret = down_interruptible(&cpucontrol)) != 0) | 206 | if ((ret = lock_cpu_hotplug_interruptible()) != 0) |
| 186 | return ret; | 207 | return ret; |
| 187 | 208 | ||
| 188 | if (cpu_online(cpu) || !cpu_present(cpu)) { | 209 | if (cpu_online(cpu) || !cpu_present(cpu)) { |
| @@ -190,11 +211,6 @@ int __devinit cpu_up(unsigned int cpu) | |||
| 190 | goto out; | 211 | goto out; |
| 191 | } | 212 | } |
| 192 | 213 | ||
| 193 | /* | ||
| 194 | * Leave a trace in current->flags indicating we are already in | ||
| 195 | * process of performing CPU hotplug. | ||
| 196 | */ | ||
| 197 | current->flags |= PF_HOTPLUG_CPU; | ||
| 198 | ret = notifier_call_chain(&cpu_chain, CPU_UP_PREPARE, hcpu); | 214 | ret = notifier_call_chain(&cpu_chain, CPU_UP_PREPARE, hcpu); |
| 199 | if (ret == NOTIFY_BAD) { | 215 | if (ret == NOTIFY_BAD) { |
| 200 | printk("%s: attempt to bring up CPU %u failed\n", | 216 | printk("%s: attempt to bring up CPU %u failed\n", |
| @@ -217,7 +233,6 @@ out_notify: | |||
| 217 | if (ret != 0) | 233 | if (ret != 0) |
| 218 | notifier_call_chain(&cpu_chain, CPU_UP_CANCELED, hcpu); | 234 | notifier_call_chain(&cpu_chain, CPU_UP_CANCELED, hcpu); |
| 219 | out: | 235 | out: |
| 220 | current->flags &= ~PF_HOTPLUG_CPU; | 236 | unlock_cpu_hotplug(); |
| 221 | up(&cpucontrol); | ||
| 222 | return ret; | 237 | return ret; |
| 223 | } | 238 | } |
