diff options
author | Johannes Berg <johannes@sipsolutions.net> | 2007-10-19 02:39:55 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-10-19 14:53:38 -0400 |
commit | 4e6045f134784f4b158b3c0f7a282b04bd816887 (patch) | |
tree | 3304628f666c8524accd10f40da48cfba8b08608 /kernel/workqueue.c | |
parent | cf7b708c8d1d7a27736771bcf4c457b332b0f818 (diff) |
workqueue: debug flushing deadlocks with lockdep
In the following scenario:
code path 1:
my_function() -> lock(L1); ...; flush_workqueue(); ...
code path 2:
run_workqueue() -> my_work() -> ...; lock(L1); ...
you can get a deadlock when my_work() is queued or running
but my_function() has acquired L1 already.
This patch adds a pseudo-lock to each workqueue to make lockdep
warn about this scenario.
[akpm@linux-foundation.org: coding-style fixes]
Signed-off-by: Johannes Berg <johannes@sipsolutions.net>
Acked-by: Oleg Nesterov <oleg@tv-sign.ru>
Acked-by: Ingo Molnar <mingo@elte.hu>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'kernel/workqueue.c')
-rw-r--r-- | kernel/workqueue.c | 36 |
1 files changed, 33 insertions, 3 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index e080d1d744cc..d1916fea7108 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c | |||
@@ -32,6 +32,7 @@ | |||
32 | #include <linux/freezer.h> | 32 | #include <linux/freezer.h> |
33 | #include <linux/kallsyms.h> | 33 | #include <linux/kallsyms.h> |
34 | #include <linux/debug_locks.h> | 34 | #include <linux/debug_locks.h> |
35 | #include <linux/lockdep.h> | ||
35 | 36 | ||
36 | /* | 37 | /* |
37 | * The per-CPU workqueue (if single thread, we always use the first | 38 | * The per-CPU workqueue (if single thread, we always use the first |
@@ -61,6 +62,9 @@ struct workqueue_struct { | |||
61 | const char *name; | 62 | const char *name; |
62 | int singlethread; | 63 | int singlethread; |
63 | int freezeable; /* Freeze threads during suspend */ | 64 | int freezeable; /* Freeze threads during suspend */ |
65 | #ifdef CONFIG_LOCKDEP | ||
66 | struct lockdep_map lockdep_map; | ||
67 | #endif | ||
64 | }; | 68 | }; |
65 | 69 | ||
66 | /* All the per-cpu workqueues on the system, for hotplug cpu to add/remove | 70 | /* All the per-cpu workqueues on the system, for hotplug cpu to add/remove |
@@ -250,6 +254,17 @@ static void run_workqueue(struct cpu_workqueue_struct *cwq) | |||
250 | struct work_struct *work = list_entry(cwq->worklist.next, | 254 | struct work_struct *work = list_entry(cwq->worklist.next, |
251 | struct work_struct, entry); | 255 | struct work_struct, entry); |
252 | work_func_t f = work->func; | 256 | work_func_t f = work->func; |
257 | #ifdef CONFIG_LOCKDEP | ||
258 | /* | ||
259 | * It is permissible to free the struct work_struct | ||
260 | * from inside the function that is called from it, | ||
261 | * this we need to take into account for lockdep too. | ||
262 | * To avoid bogus "held lock freed" warnings as well | ||
263 | * as problems when looking into work->lockdep_map, | ||
264 | * make a copy and use that here. | ||
265 | */ | ||
266 | struct lockdep_map lockdep_map = work->lockdep_map; | ||
267 | #endif | ||
253 | 268 | ||
254 | cwq->current_work = work; | 269 | cwq->current_work = work; |
255 | list_del_init(cwq->worklist.next); | 270 | list_del_init(cwq->worklist.next); |
@@ -257,7 +272,11 @@ static void run_workqueue(struct cpu_workqueue_struct *cwq) | |||
257 | 272 | ||
258 | BUG_ON(get_wq_data(work) != cwq); | 273 | BUG_ON(get_wq_data(work) != cwq); |
259 | work_clear_pending(work); | 274 | work_clear_pending(work); |
275 | lock_acquire(&cwq->wq->lockdep_map, 0, 0, 0, 2, _THIS_IP_); | ||
276 | lock_acquire(&lockdep_map, 0, 0, 0, 2, _THIS_IP_); | ||
260 | f(work); | 277 | f(work); |
278 | lock_release(&lockdep_map, 1, _THIS_IP_); | ||
279 | lock_release(&cwq->wq->lockdep_map, 1, _THIS_IP_); | ||
261 | 280 | ||
262 | if (unlikely(in_atomic() || lockdep_depth(current) > 0)) { | 281 | if (unlikely(in_atomic() || lockdep_depth(current) > 0)) { |
263 | printk(KERN_ERR "BUG: workqueue leaked lock or atomic: " | 282 | printk(KERN_ERR "BUG: workqueue leaked lock or atomic: " |
@@ -376,6 +395,8 @@ void fastcall flush_workqueue(struct workqueue_struct *wq) | |||
376 | int cpu; | 395 | int cpu; |
377 | 396 | ||
378 | might_sleep(); | 397 | might_sleep(); |
398 | lock_acquire(&wq->lockdep_map, 0, 0, 0, 2, _THIS_IP_); | ||
399 | lock_release(&wq->lockdep_map, 1, _THIS_IP_); | ||
379 | for_each_cpu_mask(cpu, *cpu_map) | 400 | for_each_cpu_mask(cpu, *cpu_map) |
380 | flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, cpu)); | 401 | flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, cpu)); |
381 | } | 402 | } |
@@ -446,6 +467,9 @@ static void wait_on_work(struct work_struct *work) | |||
446 | 467 | ||
447 | might_sleep(); | 468 | might_sleep(); |
448 | 469 | ||
470 | lock_acquire(&work->lockdep_map, 0, 0, 0, 2, _THIS_IP_); | ||
471 | lock_release(&work->lockdep_map, 1, _THIS_IP_); | ||
472 | |||
449 | cwq = get_wq_data(work); | 473 | cwq = get_wq_data(work); |
450 | if (!cwq) | 474 | if (!cwq) |
451 | return; | 475 | return; |
@@ -695,8 +719,10 @@ static void start_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu) | |||
695 | } | 719 | } |
696 | } | 720 | } |
697 | 721 | ||
698 | struct workqueue_struct *__create_workqueue(const char *name, | 722 | struct workqueue_struct *__create_workqueue_key(const char *name, |
699 | int singlethread, int freezeable) | 723 | int singlethread, |
724 | int freezeable, | ||
725 | struct lock_class_key *key) | ||
700 | { | 726 | { |
701 | struct workqueue_struct *wq; | 727 | struct workqueue_struct *wq; |
702 | struct cpu_workqueue_struct *cwq; | 728 | struct cpu_workqueue_struct *cwq; |
@@ -713,6 +739,7 @@ struct workqueue_struct *__create_workqueue(const char *name, | |||
713 | } | 739 | } |
714 | 740 | ||
715 | wq->name = name; | 741 | wq->name = name; |
742 | lockdep_init_map(&wq->lockdep_map, name, key, 0); | ||
716 | wq->singlethread = singlethread; | 743 | wq->singlethread = singlethread; |
717 | wq->freezeable = freezeable; | 744 | wq->freezeable = freezeable; |
718 | INIT_LIST_HEAD(&wq->list); | 745 | INIT_LIST_HEAD(&wq->list); |
@@ -741,7 +768,7 @@ struct workqueue_struct *__create_workqueue(const char *name, | |||
741 | } | 768 | } |
742 | return wq; | 769 | return wq; |
743 | } | 770 | } |
744 | EXPORT_SYMBOL_GPL(__create_workqueue); | 771 | EXPORT_SYMBOL_GPL(__create_workqueue_key); |
745 | 772 | ||
746 | static void cleanup_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu) | 773 | static void cleanup_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu) |
747 | { | 774 | { |
@@ -752,6 +779,9 @@ static void cleanup_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu) | |||
752 | if (cwq->thread == NULL) | 779 | if (cwq->thread == NULL) |
753 | return; | 780 | return; |
754 | 781 | ||
782 | lock_acquire(&cwq->wq->lockdep_map, 0, 0, 0, 2, _THIS_IP_); | ||
783 | lock_release(&cwq->wq->lockdep_map, 1, _THIS_IP_); | ||
784 | |||
755 | flush_cpu_workqueue(cwq); | 785 | flush_cpu_workqueue(cwq); |
756 | /* | 786 | /* |
757 | * If the caller is CPU_DEAD and cwq->worklist was not empty, | 787 | * If the caller is CPU_DEAD and cwq->worklist was not empty, |