diff options
Diffstat (limited to 'kernel/workqueue.c')
-rw-r--r-- | kernel/workqueue.c | 63 |
1 files changed, 29 insertions, 34 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 1f0c509b40d3..b6b966ce1451 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c | |||
@@ -33,6 +33,7 @@ | |||
33 | #include <linux/kallsyms.h> | 33 | #include <linux/kallsyms.h> |
34 | #include <linux/debug_locks.h> | 34 | #include <linux/debug_locks.h> |
35 | #include <linux/lockdep.h> | 35 | #include <linux/lockdep.h> |
36 | #include <trace/workqueue.h> | ||
36 | 37 | ||
37 | /* | 38 | /* |
38 | * The per-CPU workqueue (if single thread, we always use the first | 39 | * The per-CPU workqueue (if single thread, we always use the first |
@@ -48,8 +49,6 @@ struct cpu_workqueue_struct { | |||
48 | 49 | ||
49 | struct workqueue_struct *wq; | 50 | struct workqueue_struct *wq; |
50 | struct task_struct *thread; | 51 | struct task_struct *thread; |
51 | |||
52 | int run_depth; /* Detect run_workqueue() recursion depth */ | ||
53 | } ____cacheline_aligned; | 52 | } ____cacheline_aligned; |
54 | 53 | ||
55 | /* | 54 | /* |
@@ -125,9 +124,13 @@ struct cpu_workqueue_struct *get_wq_data(struct work_struct *work) | |||
125 | return (void *) (atomic_long_read(&work->data) & WORK_STRUCT_WQ_DATA_MASK); | 124 | return (void *) (atomic_long_read(&work->data) & WORK_STRUCT_WQ_DATA_MASK); |
126 | } | 125 | } |
127 | 126 | ||
127 | DEFINE_TRACE(workqueue_insertion); | ||
128 | |||
128 | static void insert_work(struct cpu_workqueue_struct *cwq, | 129 | static void insert_work(struct cpu_workqueue_struct *cwq, |
129 | struct work_struct *work, struct list_head *head) | 130 | struct work_struct *work, struct list_head *head) |
130 | { | 131 | { |
132 | trace_workqueue_insertion(cwq->thread, work); | ||
133 | |||
131 | set_wq_data(work, cwq); | 134 | set_wq_data(work, cwq); |
132 | /* | 135 | /* |
133 | * Ensure that we get the right work->data if we see the | 136 | * Ensure that we get the right work->data if we see the |
@@ -259,16 +262,11 @@ int queue_delayed_work_on(int cpu, struct workqueue_struct *wq, | |||
259 | } | 262 | } |
260 | EXPORT_SYMBOL_GPL(queue_delayed_work_on); | 263 | EXPORT_SYMBOL_GPL(queue_delayed_work_on); |
261 | 264 | ||
265 | DEFINE_TRACE(workqueue_execution); | ||
266 | |||
262 | static void run_workqueue(struct cpu_workqueue_struct *cwq) | 267 | static void run_workqueue(struct cpu_workqueue_struct *cwq) |
263 | { | 268 | { |
264 | spin_lock_irq(&cwq->lock); | 269 | spin_lock_irq(&cwq->lock); |
265 | cwq->run_depth++; | ||
266 | if (cwq->run_depth > 3) { | ||
267 | /* morton gets to eat his hat */ | ||
268 | printk("%s: recursion depth exceeded: %d\n", | ||
269 | __func__, cwq->run_depth); | ||
270 | dump_stack(); | ||
271 | } | ||
272 | while (!list_empty(&cwq->worklist)) { | 270 | while (!list_empty(&cwq->worklist)) { |
273 | struct work_struct *work = list_entry(cwq->worklist.next, | 271 | struct work_struct *work = list_entry(cwq->worklist.next, |
274 | struct work_struct, entry); | 272 | struct work_struct, entry); |
@@ -284,7 +282,7 @@ static void run_workqueue(struct cpu_workqueue_struct *cwq) | |||
284 | */ | 282 | */ |
285 | struct lockdep_map lockdep_map = work->lockdep_map; | 283 | struct lockdep_map lockdep_map = work->lockdep_map; |
286 | #endif | 284 | #endif |
287 | 285 | trace_workqueue_execution(cwq->thread, work); | |
288 | cwq->current_work = work; | 286 | cwq->current_work = work; |
289 | list_del_init(cwq->worklist.next); | 287 | list_del_init(cwq->worklist.next); |
290 | spin_unlock_irq(&cwq->lock); | 288 | spin_unlock_irq(&cwq->lock); |
@@ -311,7 +309,6 @@ static void run_workqueue(struct cpu_workqueue_struct *cwq) | |||
311 | spin_lock_irq(&cwq->lock); | 309 | spin_lock_irq(&cwq->lock); |
312 | cwq->current_work = NULL; | 310 | cwq->current_work = NULL; |
313 | } | 311 | } |
314 | cwq->run_depth--; | ||
315 | spin_unlock_irq(&cwq->lock); | 312 | spin_unlock_irq(&cwq->lock); |
316 | } | 313 | } |
317 | 314 | ||
@@ -368,29 +365,20 @@ static void insert_wq_barrier(struct cpu_workqueue_struct *cwq, | |||
368 | 365 | ||
369 | static int flush_cpu_workqueue(struct cpu_workqueue_struct *cwq) | 366 | static int flush_cpu_workqueue(struct cpu_workqueue_struct *cwq) |
370 | { | 367 | { |
371 | int active; | 368 | int active = 0; |
372 | 369 | struct wq_barrier barr; | |
373 | if (cwq->thread == current) { | ||
374 | /* | ||
375 | * Probably keventd trying to flush its own queue. So simply run | ||
376 | * it by hand rather than deadlocking. | ||
377 | */ | ||
378 | run_workqueue(cwq); | ||
379 | active = 1; | ||
380 | } else { | ||
381 | struct wq_barrier barr; | ||
382 | 370 | ||
383 | active = 0; | 371 | WARN_ON(cwq->thread == current); |
384 | spin_lock_irq(&cwq->lock); | ||
385 | if (!list_empty(&cwq->worklist) || cwq->current_work != NULL) { | ||
386 | insert_wq_barrier(cwq, &barr, &cwq->worklist); | ||
387 | active = 1; | ||
388 | } | ||
389 | spin_unlock_irq(&cwq->lock); | ||
390 | 372 | ||
391 | if (active) | 373 | spin_lock_irq(&cwq->lock); |
392 | wait_for_completion(&barr.done); | 374 | if (!list_empty(&cwq->worklist) || cwq->current_work != NULL) { |
375 | insert_wq_barrier(cwq, &barr, &cwq->worklist); | ||
376 | active = 1; | ||
393 | } | 377 | } |
378 | spin_unlock_irq(&cwq->lock); | ||
379 | |||
380 | if (active) | ||
381 | wait_for_completion(&barr.done); | ||
394 | 382 | ||
395 | return active; | 383 | return active; |
396 | } | 384 | } |
@@ -416,7 +404,7 @@ void flush_workqueue(struct workqueue_struct *wq) | |||
416 | might_sleep(); | 404 | might_sleep(); |
417 | lock_map_acquire(&wq->lockdep_map); | 405 | lock_map_acquire(&wq->lockdep_map); |
418 | lock_map_release(&wq->lockdep_map); | 406 | lock_map_release(&wq->lockdep_map); |
419 | for_each_cpu_mask_nr(cpu, *cpu_map) | 407 | for_each_cpu(cpu, cpu_map) |
420 | flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, cpu)); | 408 | flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, cpu)); |
421 | } | 409 | } |
422 | EXPORT_SYMBOL_GPL(flush_workqueue); | 410 | EXPORT_SYMBOL_GPL(flush_workqueue); |
@@ -547,7 +535,7 @@ static void wait_on_work(struct work_struct *work) | |||
547 | wq = cwq->wq; | 535 | wq = cwq->wq; |
548 | cpu_map = wq_cpu_map(wq); | 536 | cpu_map = wq_cpu_map(wq); |
549 | 537 | ||
550 | for_each_cpu_mask_nr(cpu, *cpu_map) | 538 | for_each_cpu(cpu, cpu_map) |
551 | wait_on_cpu_work(per_cpu_ptr(wq->cpu_wq, cpu), work); | 539 | wait_on_cpu_work(per_cpu_ptr(wq->cpu_wq, cpu), work); |
552 | } | 540 | } |
553 | 541 | ||
@@ -765,6 +753,8 @@ init_cpu_workqueue(struct workqueue_struct *wq, int cpu) | |||
765 | return cwq; | 753 | return cwq; |
766 | } | 754 | } |
767 | 755 | ||
756 | DEFINE_TRACE(workqueue_creation); | ||
757 | |||
768 | static int create_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu) | 758 | static int create_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu) |
769 | { | 759 | { |
770 | struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 }; | 760 | struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 }; |
@@ -787,6 +777,8 @@ static int create_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu) | |||
787 | sched_setscheduler_nocheck(p, SCHED_FIFO, ¶m); | 777 | sched_setscheduler_nocheck(p, SCHED_FIFO, ¶m); |
788 | cwq->thread = p; | 778 | cwq->thread = p; |
789 | 779 | ||
780 | trace_workqueue_creation(cwq->thread, cpu); | ||
781 | |||
790 | return 0; | 782 | return 0; |
791 | } | 783 | } |
792 | 784 | ||
@@ -868,6 +860,8 @@ struct workqueue_struct *__create_workqueue_key(const char *name, | |||
868 | } | 860 | } |
869 | EXPORT_SYMBOL_GPL(__create_workqueue_key); | 861 | EXPORT_SYMBOL_GPL(__create_workqueue_key); |
870 | 862 | ||
863 | DEFINE_TRACE(workqueue_destruction); | ||
864 | |||
871 | static void cleanup_workqueue_thread(struct cpu_workqueue_struct *cwq) | 865 | static void cleanup_workqueue_thread(struct cpu_workqueue_struct *cwq) |
872 | { | 866 | { |
873 | /* | 867 | /* |
@@ -891,6 +885,7 @@ static void cleanup_workqueue_thread(struct cpu_workqueue_struct *cwq) | |||
891 | * checks list_empty(), and a "normal" queue_work() can't use | 885 | * checks list_empty(), and a "normal" queue_work() can't use |
892 | * a dead CPU. | 886 | * a dead CPU. |
893 | */ | 887 | */ |
888 | trace_workqueue_destruction(cwq->thread); | ||
894 | kthread_stop(cwq->thread); | 889 | kthread_stop(cwq->thread); |
895 | cwq->thread = NULL; | 890 | cwq->thread = NULL; |
896 | } | 891 | } |
@@ -911,7 +906,7 @@ void destroy_workqueue(struct workqueue_struct *wq) | |||
911 | list_del(&wq->list); | 906 | list_del(&wq->list); |
912 | spin_unlock(&workqueue_lock); | 907 | spin_unlock(&workqueue_lock); |
913 | 908 | ||
914 | for_each_cpu_mask_nr(cpu, *cpu_map) | 909 | for_each_cpu(cpu, cpu_map) |
915 | cleanup_workqueue_thread(per_cpu_ptr(wq->cpu_wq, cpu)); | 910 | cleanup_workqueue_thread(per_cpu_ptr(wq->cpu_wq, cpu)); |
916 | cpu_maps_update_done(); | 911 | cpu_maps_update_done(); |
917 | 912 | ||