diff options
Diffstat (limited to 'kernel/cpu.c')
-rw-r--r-- | kernel/cpu.c | 103 |
1 files changed, 57 insertions, 46 deletions
diff --git a/kernel/cpu.c b/kernel/cpu.c index 124ad9d6be16..8b92539b4754 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c | |||
@@ -20,6 +20,20 @@ | |||
20 | /* Serializes the updates to cpu_online_mask, cpu_present_mask */ | 20 | /* Serializes the updates to cpu_online_mask, cpu_present_mask */ |
21 | static DEFINE_MUTEX(cpu_add_remove_lock); | 21 | static DEFINE_MUTEX(cpu_add_remove_lock); |
22 | 22 | ||
23 | /* | ||
24 | * The following two API's must be used when attempting | ||
25 | * to serialize the updates to cpu_online_mask, cpu_present_mask. | ||
26 | */ | ||
27 | void cpu_maps_update_begin(void) | ||
28 | { | ||
29 | mutex_lock(&cpu_add_remove_lock); | ||
30 | } | ||
31 | |||
32 | void cpu_maps_update_done(void) | ||
33 | { | ||
34 | mutex_unlock(&cpu_add_remove_lock); | ||
35 | } | ||
36 | |||
23 | static __cpuinitdata RAW_NOTIFIER_HEAD(cpu_chain); | 37 | static __cpuinitdata RAW_NOTIFIER_HEAD(cpu_chain); |
24 | 38 | ||
25 | /* If set, cpu_up and cpu_down will return -EBUSY and do nothing. | 39 | /* If set, cpu_up and cpu_down will return -EBUSY and do nothing. |
@@ -27,6 +41,8 @@ static __cpuinitdata RAW_NOTIFIER_HEAD(cpu_chain); | |||
27 | */ | 41 | */ |
28 | static int cpu_hotplug_disabled; | 42 | static int cpu_hotplug_disabled; |
29 | 43 | ||
44 | #ifdef CONFIG_HOTPLUG_CPU | ||
45 | |||
30 | static struct { | 46 | static struct { |
31 | struct task_struct *active_writer; | 47 | struct task_struct *active_writer; |
32 | struct mutex lock; /* Synchronizes accesses to refcount, */ | 48 | struct mutex lock; /* Synchronizes accesses to refcount, */ |
@@ -41,8 +57,6 @@ static struct { | |||
41 | .refcount = 0, | 57 | .refcount = 0, |
42 | }; | 58 | }; |
43 | 59 | ||
44 | #ifdef CONFIG_HOTPLUG_CPU | ||
45 | |||
46 | void get_online_cpus(void) | 60 | void get_online_cpus(void) |
47 | { | 61 | { |
48 | might_sleep(); | 62 | might_sleep(); |
@@ -67,22 +81,6 @@ void put_online_cpus(void) | |||
67 | } | 81 | } |
68 | EXPORT_SYMBOL_GPL(put_online_cpus); | 82 | EXPORT_SYMBOL_GPL(put_online_cpus); |
69 | 83 | ||
70 | #endif /* CONFIG_HOTPLUG_CPU */ | ||
71 | |||
72 | /* | ||
73 | * The following two API's must be used when attempting | ||
74 | * to serialize the updates to cpu_online_mask, cpu_present_mask. | ||
75 | */ | ||
76 | void cpu_maps_update_begin(void) | ||
77 | { | ||
78 | mutex_lock(&cpu_add_remove_lock); | ||
79 | } | ||
80 | |||
81 | void cpu_maps_update_done(void) | ||
82 | { | ||
83 | mutex_unlock(&cpu_add_remove_lock); | ||
84 | } | ||
85 | |||
86 | /* | 84 | /* |
87 | * This ensures that the hotplug operation can begin only when the | 85 | * This ensures that the hotplug operation can begin only when the |
88 | * refcount goes to zero. | 86 | * refcount goes to zero. |
@@ -124,6 +122,12 @@ static void cpu_hotplug_done(void) | |||
124 | cpu_hotplug.active_writer = NULL; | 122 | cpu_hotplug.active_writer = NULL; |
125 | mutex_unlock(&cpu_hotplug.lock); | 123 | mutex_unlock(&cpu_hotplug.lock); |
126 | } | 124 | } |
125 | |||
126 | #else /* #if CONFIG_HOTPLUG_CPU */ | ||
127 | static void cpu_hotplug_begin(void) {} | ||
128 | static void cpu_hotplug_done(void) {} | ||
129 | #endif /* #esle #if CONFIG_HOTPLUG_CPU */ | ||
130 | |||
127 | /* Need to know about CPUs going up/down? */ | 131 | /* Need to know about CPUs going up/down? */ |
128 | int __ref register_cpu_notifier(struct notifier_block *nb) | 132 | int __ref register_cpu_notifier(struct notifier_block *nb) |
129 | { | 133 | { |
@@ -134,8 +138,29 @@ int __ref register_cpu_notifier(struct notifier_block *nb) | |||
134 | return ret; | 138 | return ret; |
135 | } | 139 | } |
136 | 140 | ||
141 | static int __cpu_notify(unsigned long val, void *v, int nr_to_call, | ||
142 | int *nr_calls) | ||
143 | { | ||
144 | int ret; | ||
145 | |||
146 | ret = __raw_notifier_call_chain(&cpu_chain, val, v, nr_to_call, | ||
147 | nr_calls); | ||
148 | |||
149 | return notifier_to_errno(ret); | ||
150 | } | ||
151 | |||
152 | static int cpu_notify(unsigned long val, void *v) | ||
153 | { | ||
154 | return __cpu_notify(val, v, -1, NULL); | ||
155 | } | ||
156 | |||
137 | #ifdef CONFIG_HOTPLUG_CPU | 157 | #ifdef CONFIG_HOTPLUG_CPU |
138 | 158 | ||
159 | static void cpu_notify_nofail(unsigned long val, void *v) | ||
160 | { | ||
161 | BUG_ON(cpu_notify(val, v)); | ||
162 | } | ||
163 | |||
139 | EXPORT_SYMBOL(register_cpu_notifier); | 164 | EXPORT_SYMBOL(register_cpu_notifier); |
140 | 165 | ||
141 | void __ref unregister_cpu_notifier(struct notifier_block *nb) | 166 | void __ref unregister_cpu_notifier(struct notifier_block *nb) |
@@ -181,8 +206,7 @@ static int __ref take_cpu_down(void *_param) | |||
181 | if (err < 0) | 206 | if (err < 0) |
182 | return err; | 207 | return err; |
183 | 208 | ||
184 | raw_notifier_call_chain(&cpu_chain, CPU_DYING | param->mod, | 209 | cpu_notify(CPU_DYING | param->mod, param->hcpu); |
185 | param->hcpu); | ||
186 | 210 | ||
187 | if (task_cpu(param->caller) == cpu) | 211 | if (task_cpu(param->caller) == cpu) |
188 | move_task_off_dead_cpu(cpu, param->caller); | 212 | move_task_off_dead_cpu(cpu, param->caller); |
@@ -212,17 +236,14 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen) | |||
212 | 236 | ||
213 | cpu_hotplug_begin(); | 237 | cpu_hotplug_begin(); |
214 | set_cpu_active(cpu, false); | 238 | set_cpu_active(cpu, false); |
215 | err = __raw_notifier_call_chain(&cpu_chain, CPU_DOWN_PREPARE | mod, | 239 | err = __cpu_notify(CPU_DOWN_PREPARE | mod, hcpu, -1, &nr_calls); |
216 | hcpu, -1, &nr_calls); | 240 | if (err) { |
217 | if (err == NOTIFY_BAD) { | ||
218 | set_cpu_active(cpu, true); | 241 | set_cpu_active(cpu, true); |
219 | 242 | ||
220 | nr_calls--; | 243 | nr_calls--; |
221 | __raw_notifier_call_chain(&cpu_chain, CPU_DOWN_FAILED | mod, | 244 | __cpu_notify(CPU_DOWN_FAILED | mod, hcpu, nr_calls, NULL); |
222 | hcpu, nr_calls, NULL); | ||
223 | printk("%s: attempt to take down CPU %u failed\n", | 245 | printk("%s: attempt to take down CPU %u failed\n", |
224 | __func__, cpu); | 246 | __func__, cpu); |
225 | err = -EINVAL; | ||
226 | goto out_release; | 247 | goto out_release; |
227 | } | 248 | } |
228 | 249 | ||
@@ -230,9 +251,7 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen) | |||
230 | if (err) { | 251 | if (err) { |
231 | set_cpu_active(cpu, true); | 252 | set_cpu_active(cpu, true); |
232 | /* CPU didn't die: tell everyone. Can't complain. */ | 253 | /* CPU didn't die: tell everyone. Can't complain. */ |
233 | if (raw_notifier_call_chain(&cpu_chain, CPU_DOWN_FAILED | mod, | 254 | cpu_notify_nofail(CPU_DOWN_FAILED | mod, hcpu); |
234 | hcpu) == NOTIFY_BAD) | ||
235 | BUG(); | ||
236 | 255 | ||
237 | goto out_release; | 256 | goto out_release; |
238 | } | 257 | } |
@@ -246,19 +265,14 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen) | |||
246 | __cpu_die(cpu); | 265 | __cpu_die(cpu); |
247 | 266 | ||
248 | /* CPU is completely dead: tell everyone. Too late to complain. */ | 267 | /* CPU is completely dead: tell everyone. Too late to complain. */ |
249 | if (raw_notifier_call_chain(&cpu_chain, CPU_DEAD | mod, | 268 | cpu_notify_nofail(CPU_DEAD | mod, hcpu); |
250 | hcpu) == NOTIFY_BAD) | ||
251 | BUG(); | ||
252 | 269 | ||
253 | check_for_tasks(cpu); | 270 | check_for_tasks(cpu); |
254 | 271 | ||
255 | out_release: | 272 | out_release: |
256 | cpu_hotplug_done(); | 273 | cpu_hotplug_done(); |
257 | if (!err) { | 274 | if (!err) |
258 | if (raw_notifier_call_chain(&cpu_chain, CPU_POST_DEAD | mod, | 275 | cpu_notify_nofail(CPU_POST_DEAD | mod, hcpu); |
259 | hcpu) == NOTIFY_BAD) | ||
260 | BUG(); | ||
261 | } | ||
262 | return err; | 276 | return err; |
263 | } | 277 | } |
264 | 278 | ||
@@ -293,13 +307,11 @@ static int __cpuinit _cpu_up(unsigned int cpu, int tasks_frozen) | |||
293 | return -EINVAL; | 307 | return -EINVAL; |
294 | 308 | ||
295 | cpu_hotplug_begin(); | 309 | cpu_hotplug_begin(); |
296 | ret = __raw_notifier_call_chain(&cpu_chain, CPU_UP_PREPARE | mod, hcpu, | 310 | ret = __cpu_notify(CPU_UP_PREPARE | mod, hcpu, -1, &nr_calls); |
297 | -1, &nr_calls); | 311 | if (ret) { |
298 | if (ret == NOTIFY_BAD) { | ||
299 | nr_calls--; | 312 | nr_calls--; |
300 | printk("%s: attempt to bring up CPU %u failed\n", | 313 | printk("%s: attempt to bring up CPU %u failed\n", |
301 | __func__, cpu); | 314 | __func__, cpu); |
302 | ret = -EINVAL; | ||
303 | goto out_notify; | 315 | goto out_notify; |
304 | } | 316 | } |
305 | 317 | ||
@@ -312,12 +324,11 @@ static int __cpuinit _cpu_up(unsigned int cpu, int tasks_frozen) | |||
312 | set_cpu_active(cpu, true); | 324 | set_cpu_active(cpu, true); |
313 | 325 | ||
314 | /* Now call notifier in preparation. */ | 326 | /* Now call notifier in preparation. */ |
315 | raw_notifier_call_chain(&cpu_chain, CPU_ONLINE | mod, hcpu); | 327 | cpu_notify(CPU_ONLINE | mod, hcpu); |
316 | 328 | ||
317 | out_notify: | 329 | out_notify: |
318 | if (ret != 0) | 330 | if (ret != 0) |
319 | __raw_notifier_call_chain(&cpu_chain, | 331 | __cpu_notify(CPU_UP_CANCELED | mod, hcpu, nr_calls, NULL); |
320 | CPU_UP_CANCELED | mod, hcpu, nr_calls, NULL); | ||
321 | cpu_hotplug_done(); | 332 | cpu_hotplug_done(); |
322 | 333 | ||
323 | return ret; | 334 | return ret; |
@@ -383,7 +394,7 @@ static cpumask_var_t frozen_cpus; | |||
383 | 394 | ||
384 | int disable_nonboot_cpus(void) | 395 | int disable_nonboot_cpus(void) |
385 | { | 396 | { |
386 | int cpu, first_cpu, error; | 397 | int cpu, first_cpu, error = 0; |
387 | 398 | ||
388 | cpu_maps_update_begin(); | 399 | cpu_maps_update_begin(); |
389 | first_cpu = cpumask_first(cpu_online_mask); | 400 | first_cpu = cpumask_first(cpu_online_mask); |
@@ -481,7 +492,7 @@ void __cpuinit notify_cpu_starting(unsigned int cpu) | |||
481 | if (frozen_cpus != NULL && cpumask_test_cpu(cpu, frozen_cpus)) | 492 | if (frozen_cpus != NULL && cpumask_test_cpu(cpu, frozen_cpus)) |
482 | val = CPU_STARTING_FROZEN; | 493 | val = CPU_STARTING_FROZEN; |
483 | #endif /* CONFIG_PM_SLEEP_SMP */ | 494 | #endif /* CONFIG_PM_SLEEP_SMP */ |
484 | raw_notifier_call_chain(&cpu_chain, val, (void *)(long)cpu); | 495 | cpu_notify(val, (void *)(long)cpu); |
485 | } | 496 | } |
486 | 497 | ||
487 | #endif /* CONFIG_SMP */ | 498 | #endif /* CONFIG_SMP */ |