diff options
Diffstat (limited to 'kernel/cpu.c')
-rw-r--r-- | kernel/cpu.c | 138 |
1 files changed, 117 insertions, 21 deletions
diff --git a/kernel/cpu.c b/kernel/cpu.c index c77bc3a1c722..86d49045daed 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c | |||
@@ -15,6 +15,28 @@ | |||
15 | #include <linux/stop_machine.h> | 15 | #include <linux/stop_machine.h> |
16 | #include <linux/mutex.h> | 16 | #include <linux/mutex.h> |
17 | 17 | ||
18 | /* | ||
19 | * Represents all cpu's present in the system | ||
20 | * In systems capable of hotplug, this map could dynamically grow | ||
21 | * as new cpu's are detected in the system via any platform specific | ||
22 | * method, such as ACPI for e.g. | ||
23 | */ | ||
24 | cpumask_t cpu_present_map __read_mostly; | ||
25 | EXPORT_SYMBOL(cpu_present_map); | ||
26 | |||
27 | #ifndef CONFIG_SMP | ||
28 | |||
29 | /* | ||
30 | * Represents all cpu's that are currently online. | ||
31 | */ | ||
32 | cpumask_t cpu_online_map __read_mostly = CPU_MASK_ALL; | ||
33 | EXPORT_SYMBOL(cpu_online_map); | ||
34 | |||
35 | cpumask_t cpu_possible_map __read_mostly = CPU_MASK_ALL; | ||
36 | EXPORT_SYMBOL(cpu_possible_map); | ||
37 | |||
38 | #else /* CONFIG_SMP */ | ||
39 | |||
18 | /* Serializes the updates to cpu_online_map, cpu_present_map */ | 40 | /* Serializes the updates to cpu_online_map, cpu_present_map */ |
19 | static DEFINE_MUTEX(cpu_add_remove_lock); | 41 | static DEFINE_MUTEX(cpu_add_remove_lock); |
20 | 42 | ||
@@ -42,6 +64,8 @@ void __init cpu_hotplug_init(void) | |||
42 | cpu_hotplug.refcount = 0; | 64 | cpu_hotplug.refcount = 0; |
43 | } | 65 | } |
44 | 66 | ||
67 | cpumask_t cpu_active_map; | ||
68 | |||
45 | #ifdef CONFIG_HOTPLUG_CPU | 69 | #ifdef CONFIG_HOTPLUG_CPU |
46 | 70 | ||
47 | void get_online_cpus(void) | 71 | void get_online_cpus(void) |
@@ -175,13 +199,14 @@ static int __ref take_cpu_down(void *_param) | |||
175 | struct take_cpu_down_param *param = _param; | 199 | struct take_cpu_down_param *param = _param; |
176 | int err; | 200 | int err; |
177 | 201 | ||
178 | raw_notifier_call_chain(&cpu_chain, CPU_DYING | param->mod, | ||
179 | param->hcpu); | ||
180 | /* Ensure this CPU doesn't handle any more interrupts. */ | 202 | /* Ensure this CPU doesn't handle any more interrupts. */ |
181 | err = __cpu_disable(); | 203 | err = __cpu_disable(); |
182 | if (err < 0) | 204 | if (err < 0) |
183 | return err; | 205 | return err; |
184 | 206 | ||
207 | raw_notifier_call_chain(&cpu_chain, CPU_DYING | param->mod, | ||
208 | param->hcpu); | ||
209 | |||
185 | /* Force idle task to run as soon as we yield: it should | 210 | /* Force idle task to run as soon as we yield: it should |
186 | immediately notice cpu is offline and die quickly. */ | 211 | immediately notice cpu is offline and die quickly. */ |
187 | sched_idle_next(); | 212 | sched_idle_next(); |
@@ -192,7 +217,6 @@ static int __ref take_cpu_down(void *_param) | |||
192 | static int __ref _cpu_down(unsigned int cpu, int tasks_frozen) | 217 | static int __ref _cpu_down(unsigned int cpu, int tasks_frozen) |
193 | { | 218 | { |
194 | int err, nr_calls = 0; | 219 | int err, nr_calls = 0; |
195 | struct task_struct *p; | ||
196 | cpumask_t old_allowed, tmp; | 220 | cpumask_t old_allowed, tmp; |
197 | void *hcpu = (void *)(long)cpu; | 221 | void *hcpu = (void *)(long)cpu; |
198 | unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0; | 222 | unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0; |
@@ -225,21 +249,18 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen) | |||
225 | cpus_setall(tmp); | 249 | cpus_setall(tmp); |
226 | cpu_clear(cpu, tmp); | 250 | cpu_clear(cpu, tmp); |
227 | set_cpus_allowed_ptr(current, &tmp); | 251 | set_cpus_allowed_ptr(current, &tmp); |
252 | tmp = cpumask_of_cpu(cpu); | ||
228 | 253 | ||
229 | p = __stop_machine_run(take_cpu_down, &tcd_param, cpu); | 254 | err = __stop_machine(take_cpu_down, &tcd_param, &tmp); |
230 | 255 | if (err) { | |
231 | if (IS_ERR(p) || cpu_online(cpu)) { | ||
232 | /* CPU didn't die: tell everyone. Can't complain. */ | 256 | /* CPU didn't die: tell everyone. Can't complain. */ |
233 | if (raw_notifier_call_chain(&cpu_chain, CPU_DOWN_FAILED | mod, | 257 | if (raw_notifier_call_chain(&cpu_chain, CPU_DOWN_FAILED | mod, |
234 | hcpu) == NOTIFY_BAD) | 258 | hcpu) == NOTIFY_BAD) |
235 | BUG(); | 259 | BUG(); |
236 | 260 | ||
237 | if (IS_ERR(p)) { | 261 | goto out_allowed; |
238 | err = PTR_ERR(p); | ||
239 | goto out_allowed; | ||
240 | } | ||
241 | goto out_thread; | ||
242 | } | 262 | } |
263 | BUG_ON(cpu_online(cpu)); | ||
243 | 264 | ||
244 | /* Wait for it to sleep (leaving idle task). */ | 265 | /* Wait for it to sleep (leaving idle task). */ |
245 | while (!idle_cpu(cpu)) | 266 | while (!idle_cpu(cpu)) |
@@ -255,12 +276,15 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen) | |||
255 | 276 | ||
256 | check_for_tasks(cpu); | 277 | check_for_tasks(cpu); |
257 | 278 | ||
258 | out_thread: | ||
259 | err = kthread_stop(p); | ||
260 | out_allowed: | 279 | out_allowed: |
261 | set_cpus_allowed_ptr(current, &old_allowed); | 280 | set_cpus_allowed_ptr(current, &old_allowed); |
262 | out_release: | 281 | out_release: |
263 | cpu_hotplug_done(); | 282 | cpu_hotplug_done(); |
283 | if (!err) { | ||
284 | if (raw_notifier_call_chain(&cpu_chain, CPU_POST_DEAD | mod, | ||
285 | hcpu) == NOTIFY_BAD) | ||
286 | BUG(); | ||
287 | } | ||
264 | return err; | 288 | return err; |
265 | } | 289 | } |
266 | 290 | ||
@@ -269,14 +293,34 @@ int __ref cpu_down(unsigned int cpu) | |||
269 | int err = 0; | 293 | int err = 0; |
270 | 294 | ||
271 | cpu_maps_update_begin(); | 295 | cpu_maps_update_begin(); |
272 | if (cpu_hotplug_disabled) | 296 | |
297 | if (cpu_hotplug_disabled) { | ||
273 | err = -EBUSY; | 298 | err = -EBUSY; |
274 | else | 299 | goto out; |
275 | err = _cpu_down(cpu, 0); | 300 | } |
301 | |||
302 | cpu_clear(cpu, cpu_active_map); | ||
303 | |||
304 | /* | ||
305 | * Make sure the all cpus did the reschedule and are not | ||
306 | * using stale version of the cpu_active_map. | ||
307 | * This is not strictly necessary becuase stop_machine() | ||
308 | * that we run down the line already provides the required | ||
309 | * synchronization. But it's really a side effect and we do not | ||
310 | * want to depend on the innards of the stop_machine here. | ||
311 | */ | ||
312 | synchronize_sched(); | ||
276 | 313 | ||
314 | err = _cpu_down(cpu, 0); | ||
315 | |||
316 | if (cpu_online(cpu)) | ||
317 | cpu_set(cpu, cpu_active_map); | ||
318 | |||
319 | out: | ||
277 | cpu_maps_update_done(); | 320 | cpu_maps_update_done(); |
278 | return err; | 321 | return err; |
279 | } | 322 | } |
323 | EXPORT_SYMBOL(cpu_down); | ||
280 | #endif /*CONFIG_HOTPLUG_CPU*/ | 324 | #endif /*CONFIG_HOTPLUG_CPU*/ |
281 | 325 | ||
282 | /* Requires cpu_add_remove_lock to be held */ | 326 | /* Requires cpu_add_remove_lock to be held */ |
@@ -306,6 +350,8 @@ static int __cpuinit _cpu_up(unsigned int cpu, int tasks_frozen) | |||
306 | goto out_notify; | 350 | goto out_notify; |
307 | BUG_ON(!cpu_online(cpu)); | 351 | BUG_ON(!cpu_online(cpu)); |
308 | 352 | ||
353 | cpu_set(cpu, cpu_active_map); | ||
354 | |||
309 | /* Now call notifier in preparation. */ | 355 | /* Now call notifier in preparation. */ |
310 | raw_notifier_call_chain(&cpu_chain, CPU_ONLINE | mod, hcpu); | 356 | raw_notifier_call_chain(&cpu_chain, CPU_ONLINE | mod, hcpu); |
311 | 357 | ||
@@ -324,7 +370,7 @@ int __cpuinit cpu_up(unsigned int cpu) | |||
324 | if (!cpu_isset(cpu, cpu_possible_map)) { | 370 | if (!cpu_isset(cpu, cpu_possible_map)) { |
325 | printk(KERN_ERR "can't online cpu %d because it is not " | 371 | printk(KERN_ERR "can't online cpu %d because it is not " |
326 | "configured as may-hotadd at boot time\n", cpu); | 372 | "configured as may-hotadd at boot time\n", cpu); |
327 | #if defined(CONFIG_IA64) || defined(CONFIG_X86_64) || defined(CONFIG_S390) | 373 | #if defined(CONFIG_IA64) || defined(CONFIG_X86_64) |
328 | printk(KERN_ERR "please check additional_cpus= boot " | 374 | printk(KERN_ERR "please check additional_cpus= boot " |
329 | "parameter\n"); | 375 | "parameter\n"); |
330 | #endif | 376 | #endif |
@@ -332,11 +378,15 @@ int __cpuinit cpu_up(unsigned int cpu) | |||
332 | } | 378 | } |
333 | 379 | ||
334 | cpu_maps_update_begin(); | 380 | cpu_maps_update_begin(); |
335 | if (cpu_hotplug_disabled) | 381 | |
382 | if (cpu_hotplug_disabled) { | ||
336 | err = -EBUSY; | 383 | err = -EBUSY; |
337 | else | 384 | goto out; |
338 | err = _cpu_up(cpu, 0); | 385 | } |
339 | 386 | ||
387 | err = _cpu_up(cpu, 0); | ||
388 | |||
389 | out: | ||
340 | cpu_maps_update_done(); | 390 | cpu_maps_update_done(); |
341 | return err; | 391 | return err; |
342 | } | 392 | } |
@@ -390,7 +440,7 @@ void __ref enable_nonboot_cpus(void) | |||
390 | goto out; | 440 | goto out; |
391 | 441 | ||
392 | printk("Enabling non-boot CPUs ...\n"); | 442 | printk("Enabling non-boot CPUs ...\n"); |
393 | for_each_cpu_mask(cpu, frozen_cpus) { | 443 | for_each_cpu_mask_nr(cpu, frozen_cpus) { |
394 | error = _cpu_up(cpu, 1); | 444 | error = _cpu_up(cpu, 1); |
395 | if (!error) { | 445 | if (!error) { |
396 | printk("CPU%d is up\n", cpu); | 446 | printk("CPU%d is up\n", cpu); |
@@ -403,3 +453,49 @@ out: | |||
403 | cpu_maps_update_done(); | 453 | cpu_maps_update_done(); |
404 | } | 454 | } |
405 | #endif /* CONFIG_PM_SLEEP_SMP */ | 455 | #endif /* CONFIG_PM_SLEEP_SMP */ |
456 | |||
457 | /** | ||
458 | * notify_cpu_starting(cpu) - call the CPU_STARTING notifiers | ||
459 | * @cpu: cpu that just started | ||
460 | * | ||
461 | * This function calls the cpu_chain notifiers with CPU_STARTING. | ||
462 | * It must be called by the arch code on the new cpu, before the new cpu | ||
463 | * enables interrupts and before the "boot" cpu returns from __cpu_up(). | ||
464 | */ | ||
465 | void notify_cpu_starting(unsigned int cpu) | ||
466 | { | ||
467 | unsigned long val = CPU_STARTING; | ||
468 | |||
469 | #ifdef CONFIG_PM_SLEEP_SMP | ||
470 | if (cpu_isset(cpu, frozen_cpus)) | ||
471 | val = CPU_STARTING_FROZEN; | ||
472 | #endif /* CONFIG_PM_SLEEP_SMP */ | ||
473 | raw_notifier_call_chain(&cpu_chain, val, (void *)(long)cpu); | ||
474 | } | ||
475 | |||
476 | #endif /* CONFIG_SMP */ | ||
477 | |||
478 | /* | ||
479 | * cpu_bit_bitmap[] is a special, "compressed" data structure that | ||
480 | * represents all NR_CPUS bits binary values of 1<<nr. | ||
481 | * | ||
482 | * It is used by cpumask_of_cpu() to get a constant address to a CPU | ||
483 | * mask value that has a single bit set only. | ||
484 | */ | ||
485 | |||
486 | /* cpu_bit_bitmap[0] is empty - so we can back into it */ | ||
487 | #define MASK_DECLARE_1(x) [x+1][0] = 1UL << (x) | ||
488 | #define MASK_DECLARE_2(x) MASK_DECLARE_1(x), MASK_DECLARE_1(x+1) | ||
489 | #define MASK_DECLARE_4(x) MASK_DECLARE_2(x), MASK_DECLARE_2(x+2) | ||
490 | #define MASK_DECLARE_8(x) MASK_DECLARE_4(x), MASK_DECLARE_4(x+4) | ||
491 | |||
492 | const unsigned long cpu_bit_bitmap[BITS_PER_LONG+1][BITS_TO_LONGS(NR_CPUS)] = { | ||
493 | |||
494 | MASK_DECLARE_8(0), MASK_DECLARE_8(8), | ||
495 | MASK_DECLARE_8(16), MASK_DECLARE_8(24), | ||
496 | #if BITS_PER_LONG > 32 | ||
497 | MASK_DECLARE_8(32), MASK_DECLARE_8(40), | ||
498 | MASK_DECLARE_8(48), MASK_DECLARE_8(56), | ||
499 | #endif | ||
500 | }; | ||
501 | EXPORT_SYMBOL_GPL(cpu_bit_bitmap); | ||