diff options
Diffstat (limited to 'kernel/cpu.c')
-rw-r--r-- | kernel/cpu.c | 129 |
1 files changed, 97 insertions, 32 deletions
diff --git a/kernel/cpu.c b/kernel/cpu.c index a343bde710b1..94bbe4695232 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c | |||
@@ -20,6 +20,7 @@ | |||
20 | #include <linux/gfp.h> | 20 | #include <linux/gfp.h> |
21 | #include <linux/suspend.h> | 21 | #include <linux/suspend.h> |
22 | #include <linux/lockdep.h> | 22 | #include <linux/lockdep.h> |
23 | #include <linux/tick.h> | ||
23 | #include <trace/events/power.h> | 24 | #include <trace/events/power.h> |
24 | 25 | ||
25 | #include "smpboot.h" | 26 | #include "smpboot.h" |
@@ -58,20 +59,23 @@ static int cpu_hotplug_disabled; | |||
58 | 59 | ||
59 | static struct { | 60 | static struct { |
60 | struct task_struct *active_writer; | 61 | struct task_struct *active_writer; |
61 | struct mutex lock; /* Synchronizes accesses to refcount, */ | 62 | /* wait queue to wake up the active_writer */ |
63 | wait_queue_head_t wq; | ||
64 | /* verifies that no writer will get active while readers are active */ | ||
65 | struct mutex lock; | ||
62 | /* | 66 | /* |
63 | * Also blocks the new readers during | 67 | * Also blocks the new readers during |
64 | * an ongoing cpu hotplug operation. | 68 | * an ongoing cpu hotplug operation. |
65 | */ | 69 | */ |
66 | int refcount; | 70 | atomic_t refcount; |
67 | 71 | ||
68 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | 72 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
69 | struct lockdep_map dep_map; | 73 | struct lockdep_map dep_map; |
70 | #endif | 74 | #endif |
71 | } cpu_hotplug = { | 75 | } cpu_hotplug = { |
72 | .active_writer = NULL, | 76 | .active_writer = NULL, |
77 | .wq = __WAIT_QUEUE_HEAD_INITIALIZER(cpu_hotplug.wq), | ||
73 | .lock = __MUTEX_INITIALIZER(cpu_hotplug.lock), | 78 | .lock = __MUTEX_INITIALIZER(cpu_hotplug.lock), |
74 | .refcount = 0, | ||
75 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | 79 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
76 | .dep_map = {.name = "cpu_hotplug.lock" }, | 80 | .dep_map = {.name = "cpu_hotplug.lock" }, |
77 | #endif | 81 | #endif |
@@ -79,9 +83,12 @@ static struct { | |||
79 | 83 | ||
80 | /* Lockdep annotations for get/put_online_cpus() and cpu_hotplug_begin/end() */ | 84 | /* Lockdep annotations for get/put_online_cpus() and cpu_hotplug_begin/end() */ |
81 | #define cpuhp_lock_acquire_read() lock_map_acquire_read(&cpu_hotplug.dep_map) | 85 | #define cpuhp_lock_acquire_read() lock_map_acquire_read(&cpu_hotplug.dep_map) |
86 | #define cpuhp_lock_acquire_tryread() \ | ||
87 | lock_map_acquire_tryread(&cpu_hotplug.dep_map) | ||
82 | #define cpuhp_lock_acquire() lock_map_acquire(&cpu_hotplug.dep_map) | 88 | #define cpuhp_lock_acquire() lock_map_acquire(&cpu_hotplug.dep_map) |
83 | #define cpuhp_lock_release() lock_map_release(&cpu_hotplug.dep_map) | 89 | #define cpuhp_lock_release() lock_map_release(&cpu_hotplug.dep_map) |
84 | 90 | ||
91 | |||
85 | void get_online_cpus(void) | 92 | void get_online_cpus(void) |
86 | { | 93 | { |
87 | might_sleep(); | 94 | might_sleep(); |
@@ -89,24 +96,38 @@ void get_online_cpus(void) | |||
89 | return; | 96 | return; |
90 | cpuhp_lock_acquire_read(); | 97 | cpuhp_lock_acquire_read(); |
91 | mutex_lock(&cpu_hotplug.lock); | 98 | mutex_lock(&cpu_hotplug.lock); |
92 | cpu_hotplug.refcount++; | 99 | atomic_inc(&cpu_hotplug.refcount); |
93 | mutex_unlock(&cpu_hotplug.lock); | 100 | mutex_unlock(&cpu_hotplug.lock); |
94 | |||
95 | } | 101 | } |
96 | EXPORT_SYMBOL_GPL(get_online_cpus); | 102 | EXPORT_SYMBOL_GPL(get_online_cpus); |
97 | 103 | ||
104 | bool try_get_online_cpus(void) | ||
105 | { | ||
106 | if (cpu_hotplug.active_writer == current) | ||
107 | return true; | ||
108 | if (!mutex_trylock(&cpu_hotplug.lock)) | ||
109 | return false; | ||
110 | cpuhp_lock_acquire_tryread(); | ||
111 | atomic_inc(&cpu_hotplug.refcount); | ||
112 | mutex_unlock(&cpu_hotplug.lock); | ||
113 | return true; | ||
114 | } | ||
115 | EXPORT_SYMBOL_GPL(try_get_online_cpus); | ||
116 | |||
98 | void put_online_cpus(void) | 117 | void put_online_cpus(void) |
99 | { | 118 | { |
119 | int refcount; | ||
120 | |||
100 | if (cpu_hotplug.active_writer == current) | 121 | if (cpu_hotplug.active_writer == current) |
101 | return; | 122 | return; |
102 | mutex_lock(&cpu_hotplug.lock); | ||
103 | 123 | ||
104 | if (WARN_ON(!cpu_hotplug.refcount)) | 124 | refcount = atomic_dec_return(&cpu_hotplug.refcount); |
105 | cpu_hotplug.refcount++; /* try to fix things up */ | 125 | if (WARN_ON(refcount < 0)) /* try to fix things up */ |
126 | atomic_inc(&cpu_hotplug.refcount); | ||
127 | |||
128 | if (refcount <= 0 && waitqueue_active(&cpu_hotplug.wq)) | ||
129 | wake_up(&cpu_hotplug.wq); | ||
106 | 130 | ||
107 | if (!--cpu_hotplug.refcount && unlikely(cpu_hotplug.active_writer)) | ||
108 | wake_up_process(cpu_hotplug.active_writer); | ||
109 | mutex_unlock(&cpu_hotplug.lock); | ||
110 | cpuhp_lock_release(); | 131 | cpuhp_lock_release(); |
111 | 132 | ||
112 | } | 133 | } |
@@ -136,17 +157,20 @@ EXPORT_SYMBOL_GPL(put_online_cpus); | |||
136 | */ | 157 | */ |
137 | void cpu_hotplug_begin(void) | 158 | void cpu_hotplug_begin(void) |
138 | { | 159 | { |
139 | cpu_hotplug.active_writer = current; | 160 | DEFINE_WAIT(wait); |
140 | 161 | ||
162 | cpu_hotplug.active_writer = current; | ||
141 | cpuhp_lock_acquire(); | 163 | cpuhp_lock_acquire(); |
164 | |||
142 | for (;;) { | 165 | for (;;) { |
143 | mutex_lock(&cpu_hotplug.lock); | 166 | mutex_lock(&cpu_hotplug.lock); |
144 | if (likely(!cpu_hotplug.refcount)) | 167 | prepare_to_wait(&cpu_hotplug.wq, &wait, TASK_UNINTERRUPTIBLE); |
145 | break; | 168 | if (likely(!atomic_read(&cpu_hotplug.refcount))) |
146 | __set_current_state(TASK_UNINTERRUPTIBLE); | 169 | break; |
147 | mutex_unlock(&cpu_hotplug.lock); | 170 | mutex_unlock(&cpu_hotplug.lock); |
148 | schedule(); | 171 | schedule(); |
149 | } | 172 | } |
173 | finish_wait(&cpu_hotplug.wq, &wait); | ||
150 | } | 174 | } |
151 | 175 | ||
152 | void cpu_hotplug_done(void) | 176 | void cpu_hotplug_done(void) |
@@ -274,21 +298,28 @@ void clear_tasks_mm_cpumask(int cpu) | |||
274 | rcu_read_unlock(); | 298 | rcu_read_unlock(); |
275 | } | 299 | } |
276 | 300 | ||
277 | static inline void check_for_tasks(int cpu) | 301 | static inline void check_for_tasks(int dead_cpu) |
278 | { | 302 | { |
279 | struct task_struct *p; | 303 | struct task_struct *g, *p; |
280 | cputime_t utime, stime; | ||
281 | 304 | ||
282 | write_lock_irq(&tasklist_lock); | 305 | read_lock_irq(&tasklist_lock); |
283 | for_each_process(p) { | 306 | do_each_thread(g, p) { |
284 | task_cputime(p, &utime, &stime); | 307 | if (!p->on_rq) |
285 | if (task_cpu(p) == cpu && p->state == TASK_RUNNING && | 308 | continue; |
286 | (utime || stime)) | 309 | /* |
287 | pr_warn("Task %s (pid = %d) is on cpu %d (state = %ld, flags = %x)\n", | 310 | * We do the check with unlocked task_rq(p)->lock. |
288 | p->comm, task_pid_nr(p), cpu, | 311 | * Order the reading to do not warn about a task, |
289 | p->state, p->flags); | 312 | * which was running on this cpu in the past, and |
290 | } | 313 | * it's just been woken on another cpu. |
291 | write_unlock_irq(&tasklist_lock); | 314 | */ |
315 | rmb(); | ||
316 | if (task_cpu(p) != dead_cpu) | ||
317 | continue; | ||
318 | |||
319 | pr_warn("Task %s (pid=%d) is on cpu %d (state=%ld, flags=%x)\n", | ||
320 | p->comm, task_pid_nr(p), dead_cpu, p->state, p->flags); | ||
321 | } while_each_thread(g, p); | ||
322 | read_unlock_irq(&tasklist_lock); | ||
292 | } | 323 | } |
293 | 324 | ||
294 | struct take_cpu_down_param { | 325 | struct take_cpu_down_param { |
@@ -308,6 +339,8 @@ static int __ref take_cpu_down(void *_param) | |||
308 | return err; | 339 | return err; |
309 | 340 | ||
310 | cpu_notify(CPU_DYING | param->mod, param->hcpu); | 341 | cpu_notify(CPU_DYING | param->mod, param->hcpu); |
342 | /* Give up timekeeping duties */ | ||
343 | tick_handover_do_timer(); | ||
311 | /* Park the stopper thread */ | 344 | /* Park the stopper thread */ |
312 | kthread_park(current); | 345 | kthread_park(current); |
313 | return 0; | 346 | return 0; |
@@ -378,13 +411,17 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen) | |||
378 | * | 411 | * |
379 | * Wait for the stop thread to go away. | 412 | * Wait for the stop thread to go away. |
380 | */ | 413 | */ |
381 | while (!idle_cpu(cpu)) | 414 | while (!per_cpu(cpu_dead_idle, cpu)) |
382 | cpu_relax(); | 415 | cpu_relax(); |
416 | smp_mb(); /* Read from cpu_dead_idle before __cpu_die(). */ | ||
417 | per_cpu(cpu_dead_idle, cpu) = false; | ||
383 | 418 | ||
419 | hotplug_cpu__broadcast_tick_pull(cpu); | ||
384 | /* This actually kills the CPU. */ | 420 | /* This actually kills the CPU. */ |
385 | __cpu_die(cpu); | 421 | __cpu_die(cpu); |
386 | 422 | ||
387 | /* CPU is completely dead: tell everyone. Too late to complain. */ | 423 | /* CPU is completely dead: tell everyone. Too late to complain. */ |
424 | tick_cleanup_dead_cpu(cpu); | ||
388 | cpu_notify_nofail(CPU_DEAD | mod, hcpu); | 425 | cpu_notify_nofail(CPU_DEAD | mod, hcpu); |
389 | 426 | ||
390 | check_for_tasks(cpu); | 427 | check_for_tasks(cpu); |
@@ -416,6 +453,37 @@ out: | |||
416 | EXPORT_SYMBOL(cpu_down); | 453 | EXPORT_SYMBOL(cpu_down); |
417 | #endif /*CONFIG_HOTPLUG_CPU*/ | 454 | #endif /*CONFIG_HOTPLUG_CPU*/ |
418 | 455 | ||
456 | /* | ||
457 | * Unpark per-CPU smpboot kthreads at CPU-online time. | ||
458 | */ | ||
459 | static int smpboot_thread_call(struct notifier_block *nfb, | ||
460 | unsigned long action, void *hcpu) | ||
461 | { | ||
462 | int cpu = (long)hcpu; | ||
463 | |||
464 | switch (action & ~CPU_TASKS_FROZEN) { | ||
465 | |||
466 | case CPU_ONLINE: | ||
467 | smpboot_unpark_threads(cpu); | ||
468 | break; | ||
469 | |||
470 | default: | ||
471 | break; | ||
472 | } | ||
473 | |||
474 | return NOTIFY_OK; | ||
475 | } | ||
476 | |||
477 | static struct notifier_block smpboot_thread_notifier = { | ||
478 | .notifier_call = smpboot_thread_call, | ||
479 | .priority = CPU_PRI_SMPBOOT, | ||
480 | }; | ||
481 | |||
482 | void __cpuinit smpboot_thread_init(void) | ||
483 | { | ||
484 | register_cpu_notifier(&smpboot_thread_notifier); | ||
485 | } | ||
486 | |||
419 | /* Requires cpu_add_remove_lock to be held */ | 487 | /* Requires cpu_add_remove_lock to be held */ |
420 | static int _cpu_up(unsigned int cpu, int tasks_frozen) | 488 | static int _cpu_up(unsigned int cpu, int tasks_frozen) |
421 | { | 489 | { |
@@ -455,9 +523,6 @@ static int _cpu_up(unsigned int cpu, int tasks_frozen) | |||
455 | goto out_notify; | 523 | goto out_notify; |
456 | BUG_ON(!cpu_online(cpu)); | 524 | BUG_ON(!cpu_online(cpu)); |
457 | 525 | ||
458 | /* Wake the per cpu threads */ | ||
459 | smpboot_unpark_threads(cpu); | ||
460 | |||
461 | /* Now call notifier in preparation. */ | 526 | /* Now call notifier in preparation. */ |
462 | cpu_notify(CPU_ONLINE | mod, hcpu); | 527 | cpu_notify(CPU_ONLINE | mod, hcpu); |
463 | 528 | ||