diff options
Diffstat (limited to 'kernel/cpu.c')
| -rw-r--r-- | kernel/cpu.c | 219 |
1 files changed, 156 insertions, 63 deletions
diff --git a/kernel/cpu.c b/kernel/cpu.c index fe2b8d0bfe4c..32c96628463e 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c | |||
| @@ -13,66 +13,66 @@ | |||
| 13 | #include <linux/module.h> | 13 | #include <linux/module.h> |
| 14 | #include <linux/kthread.h> | 14 | #include <linux/kthread.h> |
| 15 | #include <linux/stop_machine.h> | 15 | #include <linux/stop_machine.h> |
| 16 | #include <asm/semaphore.h> | 16 | #include <linux/mutex.h> |
| 17 | 17 | ||
| 18 | /* This protects CPUs going up and down... */ | 18 | /* This protects CPUs going up and down... */ |
| 19 | static DECLARE_MUTEX(cpucontrol); | 19 | static DEFINE_MUTEX(cpu_add_remove_lock); |
| 20 | static DEFINE_MUTEX(cpu_bitmask_lock); | ||
| 20 | 21 | ||
| 21 | static BLOCKING_NOTIFIER_HEAD(cpu_chain); | 22 | static __cpuinitdata BLOCKING_NOTIFIER_HEAD(cpu_chain); |
| 22 | 23 | ||
| 23 | #ifdef CONFIG_HOTPLUG_CPU | 24 | /* If set, cpu_up and cpu_down will return -EBUSY and do nothing. |
| 24 | static struct task_struct *lock_cpu_hotplug_owner; | 25 | * Should always be manipulated under cpu_add_remove_lock |
| 25 | static int lock_cpu_hotplug_depth; | 26 | */ |
| 26 | 27 | static int cpu_hotplug_disabled; | |
| 27 | static int __lock_cpu_hotplug(int interruptible) | ||
| 28 | { | ||
| 29 | int ret = 0; | ||
| 30 | 28 | ||
| 31 | if (lock_cpu_hotplug_owner != current) { | 29 | #ifdef CONFIG_HOTPLUG_CPU |
| 32 | if (interruptible) | ||
| 33 | ret = down_interruptible(&cpucontrol); | ||
| 34 | else | ||
| 35 | down(&cpucontrol); | ||
| 36 | } | ||
| 37 | 30 | ||
| 38 | /* | 31 | /* Crappy recursive lock-takers in cpufreq! Complain loudly about idiots */ |
| 39 | * Set only if we succeed in locking | 32 | static struct task_struct *recursive; |
| 40 | */ | 33 | static int recursive_depth; |
| 41 | if (!ret) { | ||
| 42 | lock_cpu_hotplug_depth++; | ||
| 43 | lock_cpu_hotplug_owner = current; | ||
| 44 | } | ||
| 45 | |||
| 46 | return ret; | ||
| 47 | } | ||
| 48 | 34 | ||
| 49 | void lock_cpu_hotplug(void) | 35 | void lock_cpu_hotplug(void) |
| 50 | { | 36 | { |
| 51 | __lock_cpu_hotplug(0); | 37 | struct task_struct *tsk = current; |
| 38 | |||
| 39 | if (tsk == recursive) { | ||
| 40 | static int warnings = 10; | ||
| 41 | if (warnings) { | ||
| 42 | printk(KERN_ERR "Lukewarm IQ detected in hotplug locking\n"); | ||
| 43 | WARN_ON(1); | ||
| 44 | warnings--; | ||
| 45 | } | ||
| 46 | recursive_depth++; | ||
| 47 | return; | ||
| 48 | } | ||
| 49 | mutex_lock(&cpu_bitmask_lock); | ||
| 50 | recursive = tsk; | ||
| 52 | } | 51 | } |
| 53 | EXPORT_SYMBOL_GPL(lock_cpu_hotplug); | 52 | EXPORT_SYMBOL_GPL(lock_cpu_hotplug); |
| 54 | 53 | ||
| 55 | void unlock_cpu_hotplug(void) | 54 | void unlock_cpu_hotplug(void) |
| 56 | { | 55 | { |
| 57 | if (--lock_cpu_hotplug_depth == 0) { | 56 | WARN_ON(recursive != current); |
| 58 | lock_cpu_hotplug_owner = NULL; | 57 | if (recursive_depth) { |
| 59 | up(&cpucontrol); | 58 | recursive_depth--; |
| 59 | return; | ||
| 60 | } | 60 | } |
| 61 | mutex_unlock(&cpu_bitmask_lock); | ||
| 62 | recursive = NULL; | ||
| 61 | } | 63 | } |
| 62 | EXPORT_SYMBOL_GPL(unlock_cpu_hotplug); | 64 | EXPORT_SYMBOL_GPL(unlock_cpu_hotplug); |
| 63 | 65 | ||
| 64 | int lock_cpu_hotplug_interruptible(void) | ||
| 65 | { | ||
| 66 | return __lock_cpu_hotplug(1); | ||
| 67 | } | ||
| 68 | EXPORT_SYMBOL_GPL(lock_cpu_hotplug_interruptible); | ||
| 69 | #endif /* CONFIG_HOTPLUG_CPU */ | 66 | #endif /* CONFIG_HOTPLUG_CPU */ |
| 70 | 67 | ||
| 71 | /* Need to know about CPUs going up/down? */ | 68 | /* Need to know about CPUs going up/down? */ |
| 72 | int register_cpu_notifier(struct notifier_block *nb) | 69 | int __cpuinit register_cpu_notifier(struct notifier_block *nb) |
| 73 | { | 70 | { |
| 74 | return blocking_notifier_chain_register(&cpu_chain, nb); | 71 | return blocking_notifier_chain_register(&cpu_chain, nb); |
| 75 | } | 72 | } |
| 73 | |||
| 74 | #ifdef CONFIG_HOTPLUG_CPU | ||
| 75 | |||
| 76 | EXPORT_SYMBOL(register_cpu_notifier); | 76 | EXPORT_SYMBOL(register_cpu_notifier); |
| 77 | 77 | ||
| 78 | void unregister_cpu_notifier(struct notifier_block *nb) | 78 | void unregister_cpu_notifier(struct notifier_block *nb) |
| @@ -81,7 +81,6 @@ void unregister_cpu_notifier(struct notifier_block *nb) | |||
| 81 | } | 81 | } |
| 82 | EXPORT_SYMBOL(unregister_cpu_notifier); | 82 | EXPORT_SYMBOL(unregister_cpu_notifier); |
| 83 | 83 | ||
| 84 | #ifdef CONFIG_HOTPLUG_CPU | ||
| 85 | static inline void check_for_tasks(int cpu) | 84 | static inline void check_for_tasks(int cpu) |
| 86 | { | 85 | { |
| 87 | struct task_struct *p; | 86 | struct task_struct *p; |
| @@ -114,32 +113,25 @@ static int take_cpu_down(void *unused) | |||
| 114 | return 0; | 113 | return 0; |
| 115 | } | 114 | } |
| 116 | 115 | ||
| 117 | int cpu_down(unsigned int cpu) | 116 | /* Requires cpu_add_remove_lock to be held */ |
| 117 | static int _cpu_down(unsigned int cpu) | ||
| 118 | { | 118 | { |
| 119 | int err; | 119 | int err; |
| 120 | struct task_struct *p; | 120 | struct task_struct *p; |
| 121 | cpumask_t old_allowed, tmp; | 121 | cpumask_t old_allowed, tmp; |
| 122 | 122 | ||
| 123 | if ((err = lock_cpu_hotplug_interruptible()) != 0) | 123 | if (num_online_cpus() == 1) |
| 124 | return err; | 124 | return -EBUSY; |
| 125 | 125 | ||
| 126 | if (num_online_cpus() == 1) { | 126 | if (!cpu_online(cpu)) |
| 127 | err = -EBUSY; | 127 | return -EINVAL; |
| 128 | goto out; | ||
| 129 | } | ||
| 130 | |||
| 131 | if (!cpu_online(cpu)) { | ||
| 132 | err = -EINVAL; | ||
| 133 | goto out; | ||
| 134 | } | ||
| 135 | 128 | ||
| 136 | err = blocking_notifier_call_chain(&cpu_chain, CPU_DOWN_PREPARE, | 129 | err = blocking_notifier_call_chain(&cpu_chain, CPU_DOWN_PREPARE, |
| 137 | (void *)(long)cpu); | 130 | (void *)(long)cpu); |
| 138 | if (err == NOTIFY_BAD) { | 131 | if (err == NOTIFY_BAD) { |
| 139 | printk("%s: attempt to take down CPU %u failed\n", | 132 | printk("%s: attempt to take down CPU %u failed\n", |
| 140 | __FUNCTION__, cpu); | 133 | __FUNCTION__, cpu); |
| 141 | err = -EINVAL; | 134 | return -EINVAL; |
| 142 | goto out; | ||
| 143 | } | 135 | } |
| 144 | 136 | ||
| 145 | /* Ensure that we are not runnable on dying cpu */ | 137 | /* Ensure that we are not runnable on dying cpu */ |
| @@ -148,7 +140,10 @@ int cpu_down(unsigned int cpu) | |||
| 148 | cpu_clear(cpu, tmp); | 140 | cpu_clear(cpu, tmp); |
| 149 | set_cpus_allowed(current, tmp); | 141 | set_cpus_allowed(current, tmp); |
| 150 | 142 | ||
| 143 | mutex_lock(&cpu_bitmask_lock); | ||
| 151 | p = __stop_machine_run(take_cpu_down, NULL, cpu); | 144 | p = __stop_machine_run(take_cpu_down, NULL, cpu); |
| 145 | mutex_unlock(&cpu_bitmask_lock); | ||
| 146 | |||
| 152 | if (IS_ERR(p)) { | 147 | if (IS_ERR(p)) { |
| 153 | /* CPU didn't die: tell everyone. Can't complain. */ | 148 | /* CPU didn't die: tell everyone. Can't complain. */ |
| 154 | if (blocking_notifier_call_chain(&cpu_chain, CPU_DOWN_FAILED, | 149 | if (blocking_notifier_call_chain(&cpu_chain, CPU_DOWN_FAILED, |
| @@ -184,24 +179,32 @@ out_thread: | |||
| 184 | err = kthread_stop(p); | 179 | err = kthread_stop(p); |
| 185 | out_allowed: | 180 | out_allowed: |
| 186 | set_cpus_allowed(current, old_allowed); | 181 | set_cpus_allowed(current, old_allowed); |
| 187 | out: | 182 | return err; |
| 188 | unlock_cpu_hotplug(); | 183 | } |
| 184 | |||
| 185 | int cpu_down(unsigned int cpu) | ||
| 186 | { | ||
| 187 | int err = 0; | ||
| 188 | |||
| 189 | mutex_lock(&cpu_add_remove_lock); | ||
| 190 | if (cpu_hotplug_disabled) | ||
| 191 | err = -EBUSY; | ||
| 192 | else | ||
| 193 | err = _cpu_down(cpu); | ||
| 194 | |||
| 195 | mutex_unlock(&cpu_add_remove_lock); | ||
| 189 | return err; | 196 | return err; |
| 190 | } | 197 | } |
| 191 | #endif /*CONFIG_HOTPLUG_CPU*/ | 198 | #endif /*CONFIG_HOTPLUG_CPU*/ |
| 192 | 199 | ||
| 193 | int __devinit cpu_up(unsigned int cpu) | 200 | /* Requires cpu_add_remove_lock to be held */ |
| 201 | static int __devinit _cpu_up(unsigned int cpu) | ||
| 194 | { | 202 | { |
| 195 | int ret; | 203 | int ret; |
| 196 | void *hcpu = (void *)(long)cpu; | 204 | void *hcpu = (void *)(long)cpu; |
| 197 | 205 | ||
| 198 | if ((ret = lock_cpu_hotplug_interruptible()) != 0) | 206 | if (cpu_online(cpu) || !cpu_present(cpu)) |
| 199 | return ret; | 207 | return -EINVAL; |
| 200 | |||
| 201 | if (cpu_online(cpu) || !cpu_present(cpu)) { | ||
| 202 | ret = -EINVAL; | ||
| 203 | goto out; | ||
| 204 | } | ||
| 205 | 208 | ||
| 206 | ret = blocking_notifier_call_chain(&cpu_chain, CPU_UP_PREPARE, hcpu); | 209 | ret = blocking_notifier_call_chain(&cpu_chain, CPU_UP_PREPARE, hcpu); |
| 207 | if (ret == NOTIFY_BAD) { | 210 | if (ret == NOTIFY_BAD) { |
| @@ -212,7 +215,9 @@ int __devinit cpu_up(unsigned int cpu) | |||
| 212 | } | 215 | } |
| 213 | 216 | ||
| 214 | /* Arch-specific enabling code. */ | 217 | /* Arch-specific enabling code. */ |
| 218 | mutex_lock(&cpu_bitmask_lock); | ||
| 215 | ret = __cpu_up(cpu); | 219 | ret = __cpu_up(cpu); |
| 220 | mutex_unlock(&cpu_bitmask_lock); | ||
| 216 | if (ret != 0) | 221 | if (ret != 0) |
| 217 | goto out_notify; | 222 | goto out_notify; |
| 218 | BUG_ON(!cpu_online(cpu)); | 223 | BUG_ON(!cpu_online(cpu)); |
| @@ -224,7 +229,95 @@ out_notify: | |||
| 224 | if (ret != 0) | 229 | if (ret != 0) |
| 225 | blocking_notifier_call_chain(&cpu_chain, | 230 | blocking_notifier_call_chain(&cpu_chain, |
| 226 | CPU_UP_CANCELED, hcpu); | 231 | CPU_UP_CANCELED, hcpu); |
| 227 | out: | 232 | |
| 228 | unlock_cpu_hotplug(); | ||
| 229 | return ret; | 233 | return ret; |
| 230 | } | 234 | } |
| 235 | |||
| 236 | int __devinit cpu_up(unsigned int cpu) | ||
| 237 | { | ||
| 238 | int err = 0; | ||
| 239 | |||
| 240 | mutex_lock(&cpu_add_remove_lock); | ||
| 241 | if (cpu_hotplug_disabled) | ||
| 242 | err = -EBUSY; | ||
| 243 | else | ||
| 244 | err = _cpu_up(cpu); | ||
| 245 | |||
| 246 | mutex_unlock(&cpu_add_remove_lock); | ||
| 247 | return err; | ||
| 248 | } | ||
| 249 | |||
| 250 | #ifdef CONFIG_SUSPEND_SMP | ||
| 251 | static cpumask_t frozen_cpus; | ||
| 252 | |||
| 253 | int disable_nonboot_cpus(void) | ||
| 254 | { | ||
| 255 | int cpu, first_cpu, error; | ||
| 256 | |||
| 257 | mutex_lock(&cpu_add_remove_lock); | ||
| 258 | first_cpu = first_cpu(cpu_present_map); | ||
| 259 | if (!cpu_online(first_cpu)) { | ||
| 260 | error = _cpu_up(first_cpu); | ||
| 261 | if (error) { | ||
| 262 | printk(KERN_ERR "Could not bring CPU%d up.\n", | ||
| 263 | first_cpu); | ||
| 264 | goto out; | ||
| 265 | } | ||
| 266 | } | ||
| 267 | error = set_cpus_allowed(current, cpumask_of_cpu(first_cpu)); | ||
| 268 | if (error) { | ||
| 269 | printk(KERN_ERR "Could not run on CPU%d\n", first_cpu); | ||
| 270 | goto out; | ||
| 271 | } | ||
| 272 | /* We take down all of the non-boot CPUs in one shot to avoid races | ||
| 273 | * with the userspace trying to use the CPU hotplug at the same time | ||
| 274 | */ | ||
| 275 | cpus_clear(frozen_cpus); | ||
| 276 | printk("Disabling non-boot CPUs ...\n"); | ||
| 277 | for_each_online_cpu(cpu) { | ||
| 278 | if (cpu == first_cpu) | ||
| 279 | continue; | ||
| 280 | error = _cpu_down(cpu); | ||
| 281 | if (!error) { | ||
| 282 | cpu_set(cpu, frozen_cpus); | ||
| 283 | printk("CPU%d is down\n", cpu); | ||
| 284 | } else { | ||
| 285 | printk(KERN_ERR "Error taking CPU%d down: %d\n", | ||
| 286 | cpu, error); | ||
| 287 | break; | ||
| 288 | } | ||
| 289 | } | ||
| 290 | if (!error) { | ||
| 291 | BUG_ON(num_online_cpus() > 1); | ||
| 292 | /* Make sure the CPUs won't be enabled by someone else */ | ||
| 293 | cpu_hotplug_disabled = 1; | ||
| 294 | } else { | ||
| 295 | printk(KERN_ERR "Non-boot CPUs are not disabled"); | ||
| 296 | } | ||
| 297 | out: | ||
| 298 | mutex_unlock(&cpu_add_remove_lock); | ||
| 299 | return error; | ||
| 300 | } | ||
| 301 | |||
| 302 | void enable_nonboot_cpus(void) | ||
| 303 | { | ||
| 304 | int cpu, error; | ||
| 305 | |||
| 306 | /* Allow everyone to use the CPU hotplug again */ | ||
| 307 | mutex_lock(&cpu_add_remove_lock); | ||
| 308 | cpu_hotplug_disabled = 0; | ||
| 309 | mutex_unlock(&cpu_add_remove_lock); | ||
| 310 | |||
| 311 | printk("Enabling non-boot CPUs ...\n"); | ||
| 312 | for_each_cpu_mask(cpu, frozen_cpus) { | ||
| 313 | error = cpu_up(cpu); | ||
| 314 | if (!error) { | ||
| 315 | printk("CPU%d is up\n", cpu); | ||
| 316 | continue; | ||
| 317 | } | ||
| 318 | printk(KERN_WARNING "Error taking CPU%d up: %d\n", | ||
| 319 | cpu, error); | ||
| 320 | } | ||
| 321 | cpus_clear(frozen_cpus); | ||
| 322 | } | ||
| 323 | #endif | ||
