diff options
-rw-r--r-- | include/linux/lockdep.h | 8 | ||||
-rw-r--r-- | include/linux/workqueue.h | 49 | ||||
-rw-r--r-- | kernel/lockdep.c | 2 | ||||
-rw-r--r-- | kernel/workqueue.c | 36 |
4 files changed, 88 insertions, 7 deletions
diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h index f6279f68a827..4c4d236ded18 100644 --- a/include/linux/lockdep.h +++ b/include/linux/lockdep.h | |||
@@ -276,6 +276,14 @@ extern void lockdep_init_map(struct lockdep_map *lock, const char *name, | |||
276 | (lock)->dep_map.key, sub) | 276 | (lock)->dep_map.key, sub) |
277 | 277 | ||
278 | /* | 278 | /* |
279 | * To initialize a lockdep_map statically use this macro. | ||
280 | * Note that _name must not be NULL. | ||
281 | */ | ||
282 | #define STATIC_LOCKDEP_MAP_INIT(_name, _key) \ | ||
283 | { .name = (_name), .key = (void *)(_key), } | ||
284 | |||
285 | |||
286 | /* | ||
279 | * Acquire a lock. | 287 | * Acquire a lock. |
280 | * | 288 | * |
281 | * Values for "read": | 289 | * Values for "read": |
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h index ce6badc98f6d..7daafdc2514b 100644 --- a/include/linux/workqueue.h +++ b/include/linux/workqueue.h | |||
@@ -8,6 +8,7 @@ | |||
8 | #include <linux/timer.h> | 8 | #include <linux/timer.h> |
9 | #include <linux/linkage.h> | 9 | #include <linux/linkage.h> |
10 | #include <linux/bitops.h> | 10 | #include <linux/bitops.h> |
11 | #include <linux/lockdep.h> | ||
11 | #include <asm/atomic.h> | 12 | #include <asm/atomic.h> |
12 | 13 | ||
13 | struct workqueue_struct; | 14 | struct workqueue_struct; |
@@ -28,6 +29,9 @@ struct work_struct { | |||
28 | #define WORK_STRUCT_WQ_DATA_MASK (~WORK_STRUCT_FLAG_MASK) | 29 | #define WORK_STRUCT_WQ_DATA_MASK (~WORK_STRUCT_FLAG_MASK) |
29 | struct list_head entry; | 30 | struct list_head entry; |
30 | work_func_t func; | 31 | work_func_t func; |
32 | #ifdef CONFIG_LOCKDEP | ||
33 | struct lockdep_map lockdep_map; | ||
34 | #endif | ||
31 | }; | 35 | }; |
32 | 36 | ||
33 | #define WORK_DATA_INIT() ATOMIC_LONG_INIT(0) | 37 | #define WORK_DATA_INIT() ATOMIC_LONG_INIT(0) |
@@ -41,10 +45,23 @@ struct execute_work { | |||
41 | struct work_struct work; | 45 | struct work_struct work; |
42 | }; | 46 | }; |
43 | 47 | ||
48 | #ifdef CONFIG_LOCKDEP | ||
49 | /* | ||
50 | * NB: because we have to copy the lockdep_map, setting _key | ||
51 | * here is required, otherwise it could get initialised to the | ||
52 | * copy of the lockdep_map! | ||
53 | */ | ||
54 | #define __WORK_INIT_LOCKDEP_MAP(n, k) \ | ||
55 | .lockdep_map = STATIC_LOCKDEP_MAP_INIT(n, k), | ||
56 | #else | ||
57 | #define __WORK_INIT_LOCKDEP_MAP(n, k) | ||
58 | #endif | ||
59 | |||
44 | #define __WORK_INITIALIZER(n, f) { \ | 60 | #define __WORK_INITIALIZER(n, f) { \ |
45 | .data = WORK_DATA_INIT(), \ | 61 | .data = WORK_DATA_INIT(), \ |
46 | .entry = { &(n).entry, &(n).entry }, \ | 62 | .entry = { &(n).entry, &(n).entry }, \ |
47 | .func = (f), \ | 63 | .func = (f), \ |
64 | __WORK_INIT_LOCKDEP_MAP(#n, &(n)) \ | ||
48 | } | 65 | } |
49 | 66 | ||
50 | #define __DELAYED_WORK_INITIALIZER(n, f) { \ | 67 | #define __DELAYED_WORK_INITIALIZER(n, f) { \ |
@@ -76,12 +93,24 @@ struct execute_work { | |||
76 | * assignment of the work data initializer allows the compiler | 93 | * assignment of the work data initializer allows the compiler |
77 | * to generate better code. | 94 | * to generate better code. |
78 | */ | 95 | */ |
96 | #ifdef CONFIG_LOCKDEP | ||
97 | #define INIT_WORK(_work, _func) \ | ||
98 | do { \ | ||
99 | static struct lock_class_key __key; \ | ||
100 | \ | ||
101 | (_work)->data = (atomic_long_t) WORK_DATA_INIT(); \ | ||
102 | lockdep_init_map(&(_work)->lockdep_map, #_work, &__key, 0);\ | ||
103 | INIT_LIST_HEAD(&(_work)->entry); \ | ||
104 | PREPARE_WORK((_work), (_func)); \ | ||
105 | } while (0) | ||
106 | #else | ||
79 | #define INIT_WORK(_work, _func) \ | 107 | #define INIT_WORK(_work, _func) \ |
80 | do { \ | 108 | do { \ |
81 | (_work)->data = (atomic_long_t) WORK_DATA_INIT(); \ | 109 | (_work)->data = (atomic_long_t) WORK_DATA_INIT(); \ |
82 | INIT_LIST_HEAD(&(_work)->entry); \ | 110 | INIT_LIST_HEAD(&(_work)->entry); \ |
83 | PREPARE_WORK((_work), (_func)); \ | 111 | PREPARE_WORK((_work), (_func)); \ |
84 | } while (0) | 112 | } while (0) |
113 | #endif | ||
85 | 114 | ||
86 | #define INIT_DELAYED_WORK(_work, _func) \ | 115 | #define INIT_DELAYED_WORK(_work, _func) \ |
87 | do { \ | 116 | do { \ |
@@ -118,9 +147,23 @@ struct execute_work { | |||
118 | clear_bit(WORK_STRUCT_PENDING, work_data_bits(work)) | 147 | clear_bit(WORK_STRUCT_PENDING, work_data_bits(work)) |
119 | 148 | ||
120 | 149 | ||
121 | extern struct workqueue_struct *__create_workqueue(const char *name, | 150 | extern struct workqueue_struct * |
122 | int singlethread, | 151 | __create_workqueue_key(const char *name, int singlethread, |
123 | int freezeable); | 152 | int freezeable, struct lock_class_key *key); |
153 | |||
154 | #ifdef CONFIG_LOCKDEP | ||
155 | #define __create_workqueue(name, singlethread, freezeable) \ | ||
156 | ({ \ | ||
157 | static struct lock_class_key __key; \ | ||
158 | \ | ||
159 | __create_workqueue_key((name), (singlethread), \ | ||
160 | (freezeable), &__key); \ | ||
161 | }) | ||
162 | #else | ||
163 | #define __create_workqueue(name, singlethread, freezeable) \ | ||
164 | __create_workqueue_key((name), (singlethread), (freezeable), NULL) | ||
165 | #endif | ||
166 | |||
124 | #define create_workqueue(name) __create_workqueue((name), 0, 0) | 167 | #define create_workqueue(name) __create_workqueue((name), 0, 0) |
125 | #define create_freezeable_workqueue(name) __create_workqueue((name), 1, 1) | 168 | #define create_freezeable_workqueue(name) __create_workqueue((name), 1, 1) |
126 | #define create_singlethread_workqueue(name) __create_workqueue((name), 1, 0) | 169 | #define create_singlethread_workqueue(name) __create_workqueue((name), 1, 0) |
diff --git a/kernel/lockdep.c b/kernel/lockdep.c index a6f1ee9c92d9..b5392ff7e6a6 100644 --- a/kernel/lockdep.c +++ b/kernel/lockdep.c | |||
@@ -1521,7 +1521,7 @@ cache_hit: | |||
1521 | } | 1521 | } |
1522 | 1522 | ||
1523 | static int validate_chain(struct task_struct *curr, struct lockdep_map *lock, | 1523 | static int validate_chain(struct task_struct *curr, struct lockdep_map *lock, |
1524 | struct held_lock *hlock, int chain_head, u64 chain_key) | 1524 | struct held_lock *hlock, int chain_head, u64 chain_key) |
1525 | { | 1525 | { |
1526 | /* | 1526 | /* |
1527 | * Trylock needs to maintain the stack of held locks, but it | 1527 | * Trylock needs to maintain the stack of held locks, but it |
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index e080d1d744cc..d1916fea7108 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c | |||
@@ -32,6 +32,7 @@ | |||
32 | #include <linux/freezer.h> | 32 | #include <linux/freezer.h> |
33 | #include <linux/kallsyms.h> | 33 | #include <linux/kallsyms.h> |
34 | #include <linux/debug_locks.h> | 34 | #include <linux/debug_locks.h> |
35 | #include <linux/lockdep.h> | ||
35 | 36 | ||
36 | /* | 37 | /* |
37 | * The per-CPU workqueue (if single thread, we always use the first | 38 | * The per-CPU workqueue (if single thread, we always use the first |
@@ -61,6 +62,9 @@ struct workqueue_struct { | |||
61 | const char *name; | 62 | const char *name; |
62 | int singlethread; | 63 | int singlethread; |
63 | int freezeable; /* Freeze threads during suspend */ | 64 | int freezeable; /* Freeze threads during suspend */ |
65 | #ifdef CONFIG_LOCKDEP | ||
66 | struct lockdep_map lockdep_map; | ||
67 | #endif | ||
64 | }; | 68 | }; |
65 | 69 | ||
66 | /* All the per-cpu workqueues on the system, for hotplug cpu to add/remove | 70 | /* All the per-cpu workqueues on the system, for hotplug cpu to add/remove |
@@ -250,6 +254,17 @@ static void run_workqueue(struct cpu_workqueue_struct *cwq) | |||
250 | struct work_struct *work = list_entry(cwq->worklist.next, | 254 | struct work_struct *work = list_entry(cwq->worklist.next, |
251 | struct work_struct, entry); | 255 | struct work_struct, entry); |
252 | work_func_t f = work->func; | 256 | work_func_t f = work->func; |
257 | #ifdef CONFIG_LOCKDEP | ||
258 | /* | ||
259 | * It is permissible to free the struct work_struct | ||
260 | * from inside the function that is called from it, | ||
261 | * this we need to take into account for lockdep too. | ||
262 | * To avoid bogus "held lock freed" warnings as well | ||
263 | * as problems when looking into work->lockdep_map, | ||
264 | * make a copy and use that here. | ||
265 | */ | ||
266 | struct lockdep_map lockdep_map = work->lockdep_map; | ||
267 | #endif | ||
253 | 268 | ||
254 | cwq->current_work = work; | 269 | cwq->current_work = work; |
255 | list_del_init(cwq->worklist.next); | 270 | list_del_init(cwq->worklist.next); |
@@ -257,7 +272,11 @@ static void run_workqueue(struct cpu_workqueue_struct *cwq) | |||
257 | 272 | ||
258 | BUG_ON(get_wq_data(work) != cwq); | 273 | BUG_ON(get_wq_data(work) != cwq); |
259 | work_clear_pending(work); | 274 | work_clear_pending(work); |
275 | lock_acquire(&cwq->wq->lockdep_map, 0, 0, 0, 2, _THIS_IP_); | ||
276 | lock_acquire(&lockdep_map, 0, 0, 0, 2, _THIS_IP_); | ||
260 | f(work); | 277 | f(work); |
278 | lock_release(&lockdep_map, 1, _THIS_IP_); | ||
279 | lock_release(&cwq->wq->lockdep_map, 1, _THIS_IP_); | ||
261 | 280 | ||
262 | if (unlikely(in_atomic() || lockdep_depth(current) > 0)) { | 281 | if (unlikely(in_atomic() || lockdep_depth(current) > 0)) { |
263 | printk(KERN_ERR "BUG: workqueue leaked lock or atomic: " | 282 | printk(KERN_ERR "BUG: workqueue leaked lock or atomic: " |
@@ -376,6 +395,8 @@ void fastcall flush_workqueue(struct workqueue_struct *wq) | |||
376 | int cpu; | 395 | int cpu; |
377 | 396 | ||
378 | might_sleep(); | 397 | might_sleep(); |
398 | lock_acquire(&wq->lockdep_map, 0, 0, 0, 2, _THIS_IP_); | ||
399 | lock_release(&wq->lockdep_map, 1, _THIS_IP_); | ||
379 | for_each_cpu_mask(cpu, *cpu_map) | 400 | for_each_cpu_mask(cpu, *cpu_map) |
380 | flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, cpu)); | 401 | flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, cpu)); |
381 | } | 402 | } |
@@ -446,6 +467,9 @@ static void wait_on_work(struct work_struct *work) | |||
446 | 467 | ||
447 | might_sleep(); | 468 | might_sleep(); |
448 | 469 | ||
470 | lock_acquire(&work->lockdep_map, 0, 0, 0, 2, _THIS_IP_); | ||
471 | lock_release(&work->lockdep_map, 1, _THIS_IP_); | ||
472 | |||
449 | cwq = get_wq_data(work); | 473 | cwq = get_wq_data(work); |
450 | if (!cwq) | 474 | if (!cwq) |
451 | return; | 475 | return; |
@@ -695,8 +719,10 @@ static void start_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu) | |||
695 | } | 719 | } |
696 | } | 720 | } |
697 | 721 | ||
698 | struct workqueue_struct *__create_workqueue(const char *name, | 722 | struct workqueue_struct *__create_workqueue_key(const char *name, |
699 | int singlethread, int freezeable) | 723 | int singlethread, |
724 | int freezeable, | ||
725 | struct lock_class_key *key) | ||
700 | { | 726 | { |
701 | struct workqueue_struct *wq; | 727 | struct workqueue_struct *wq; |
702 | struct cpu_workqueue_struct *cwq; | 728 | struct cpu_workqueue_struct *cwq; |
@@ -713,6 +739,7 @@ struct workqueue_struct *__create_workqueue(const char *name, | |||
713 | } | 739 | } |
714 | 740 | ||
715 | wq->name = name; | 741 | wq->name = name; |
742 | lockdep_init_map(&wq->lockdep_map, name, key, 0); | ||
716 | wq->singlethread = singlethread; | 743 | wq->singlethread = singlethread; |
717 | wq->freezeable = freezeable; | 744 | wq->freezeable = freezeable; |
718 | INIT_LIST_HEAD(&wq->list); | 745 | INIT_LIST_HEAD(&wq->list); |
@@ -741,7 +768,7 @@ struct workqueue_struct *__create_workqueue(const char *name, | |||
741 | } | 768 | } |
742 | return wq; | 769 | return wq; |
743 | } | 770 | } |
744 | EXPORT_SYMBOL_GPL(__create_workqueue); | 771 | EXPORT_SYMBOL_GPL(__create_workqueue_key); |
745 | 772 | ||
746 | static void cleanup_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu) | 773 | static void cleanup_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu) |
747 | { | 774 | { |
@@ -752,6 +779,9 @@ static void cleanup_workqueue_thread(struct cpu_workqueue_struct *cwq, int cpu) | |||
752 | if (cwq->thread == NULL) | 779 | if (cwq->thread == NULL) |
753 | return; | 780 | return; |
754 | 781 | ||
782 | lock_acquire(&cwq->wq->lockdep_map, 0, 0, 0, 2, _THIS_IP_); | ||
783 | lock_release(&cwq->wq->lockdep_map, 1, _THIS_IP_); | ||
784 | |||
755 | flush_cpu_workqueue(cwq); | 785 | flush_cpu_workqueue(cwq); |
756 | /* | 786 | /* |
757 | * If the caller is CPU_DEAD and cwq->worklist was not empty, | 787 | * If the caller is CPU_DEAD and cwq->worklist was not empty, |