diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2009-12-10 12:35:44 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2009-12-10 12:35:44 -0500 |
commit | d71cb81af3817193bc605de061da0499934263a6 (patch) | |
tree | f7ff95e0cf0cdf00234be29ba4050135314ab859 | |
parent | ab1831b0b87851c874a75e4b3a8538e3d76b37d7 (diff) | |
parent | dc186ad741c12ae9ecac8b89e317ef706fdaf8f6 (diff) |
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/wq
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/wq:
workqueue: Add debugobjects support
-rw-r--r-- | arch/x86/kernel/smpboot.c | 4 | ||||
-rw-r--r-- | include/linux/workqueue.h | 38 | ||||
-rw-r--r-- | kernel/workqueue.c | 131 | ||||
-rw-r--r-- | lib/Kconfig.debug | 8 |
4 files changed, 166 insertions, 15 deletions
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index 324f2a44c221..29e6744f51e3 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c | |||
@@ -687,7 +687,7 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu) | |||
687 | .done = COMPLETION_INITIALIZER_ONSTACK(c_idle.done), | 687 | .done = COMPLETION_INITIALIZER_ONSTACK(c_idle.done), |
688 | }; | 688 | }; |
689 | 689 | ||
690 | INIT_WORK(&c_idle.work, do_fork_idle); | 690 | INIT_WORK_ON_STACK(&c_idle.work, do_fork_idle); |
691 | 691 | ||
692 | alternatives_smp_switch(1); | 692 | alternatives_smp_switch(1); |
693 | 693 | ||
@@ -713,6 +713,7 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu) | |||
713 | 713 | ||
714 | if (IS_ERR(c_idle.idle)) { | 714 | if (IS_ERR(c_idle.idle)) { |
715 | printk("failed fork for CPU %d\n", cpu); | 715 | printk("failed fork for CPU %d\n", cpu); |
716 | destroy_work_on_stack(&c_idle.work); | ||
716 | return PTR_ERR(c_idle.idle); | 717 | return PTR_ERR(c_idle.idle); |
717 | } | 718 | } |
718 | 719 | ||
@@ -831,6 +832,7 @@ do_rest: | |||
831 | smpboot_restore_warm_reset_vector(); | 832 | smpboot_restore_warm_reset_vector(); |
832 | } | 833 | } |
833 | 834 | ||
835 | destroy_work_on_stack(&c_idle.work); | ||
834 | return boot_error; | 836 | return boot_error; |
835 | } | 837 | } |
836 | 838 | ||
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h index cf24c20de9e4..9466e860d8c2 100644 --- a/include/linux/workqueue.h +++ b/include/linux/workqueue.h | |||
@@ -25,6 +25,7 @@ typedef void (*work_func_t)(struct work_struct *work); | |||
25 | struct work_struct { | 25 | struct work_struct { |
26 | atomic_long_t data; | 26 | atomic_long_t data; |
27 | #define WORK_STRUCT_PENDING 0 /* T if work item pending execution */ | 27 | #define WORK_STRUCT_PENDING 0 /* T if work item pending execution */ |
28 | #define WORK_STRUCT_STATIC 1 /* static initializer (debugobjects) */ | ||
28 | #define WORK_STRUCT_FLAG_MASK (3UL) | 29 | #define WORK_STRUCT_FLAG_MASK (3UL) |
29 | #define WORK_STRUCT_WQ_DATA_MASK (~WORK_STRUCT_FLAG_MASK) | 30 | #define WORK_STRUCT_WQ_DATA_MASK (~WORK_STRUCT_FLAG_MASK) |
30 | struct list_head entry; | 31 | struct list_head entry; |
@@ -35,6 +36,7 @@ struct work_struct { | |||
35 | }; | 36 | }; |
36 | 37 | ||
37 | #define WORK_DATA_INIT() ATOMIC_LONG_INIT(0) | 38 | #define WORK_DATA_INIT() ATOMIC_LONG_INIT(0) |
39 | #define WORK_DATA_STATIC_INIT() ATOMIC_LONG_INIT(2) | ||
38 | 40 | ||
39 | struct delayed_work { | 41 | struct delayed_work { |
40 | struct work_struct work; | 42 | struct work_struct work; |
@@ -63,7 +65,7 @@ struct execute_work { | |||
63 | #endif | 65 | #endif |
64 | 66 | ||
65 | #define __WORK_INITIALIZER(n, f) { \ | 67 | #define __WORK_INITIALIZER(n, f) { \ |
66 | .data = WORK_DATA_INIT(), \ | 68 | .data = WORK_DATA_STATIC_INIT(), \ |
67 | .entry = { &(n).entry, &(n).entry }, \ | 69 | .entry = { &(n).entry, &(n).entry }, \ |
68 | .func = (f), \ | 70 | .func = (f), \ |
69 | __WORK_INIT_LOCKDEP_MAP(#n, &(n)) \ | 71 | __WORK_INIT_LOCKDEP_MAP(#n, &(n)) \ |
@@ -91,6 +93,14 @@ struct execute_work { | |||
91 | #define PREPARE_DELAYED_WORK(_work, _func) \ | 93 | #define PREPARE_DELAYED_WORK(_work, _func) \ |
92 | PREPARE_WORK(&(_work)->work, (_func)) | 94 | PREPARE_WORK(&(_work)->work, (_func)) |
93 | 95 | ||
96 | #ifdef CONFIG_DEBUG_OBJECTS_WORK | ||
97 | extern void __init_work(struct work_struct *work, int onstack); | ||
98 | extern void destroy_work_on_stack(struct work_struct *work); | ||
99 | #else | ||
100 | static inline void __init_work(struct work_struct *work, int onstack) { } | ||
101 | static inline void destroy_work_on_stack(struct work_struct *work) { } | ||
102 | #endif | ||
103 | |||
94 | /* | 104 | /* |
95 | * initialize all of a work item in one go | 105 | * initialize all of a work item in one go |
96 | * | 106 | * |
@@ -99,24 +109,36 @@ struct execute_work { | |||
99 | * to generate better code. | 109 | * to generate better code. |
100 | */ | 110 | */ |
101 | #ifdef CONFIG_LOCKDEP | 111 | #ifdef CONFIG_LOCKDEP |
102 | #define INIT_WORK(_work, _func) \ | 112 | #define __INIT_WORK(_work, _func, _onstack) \ |
103 | do { \ | 113 | do { \ |
104 | static struct lock_class_key __key; \ | 114 | static struct lock_class_key __key; \ |
105 | \ | 115 | \ |
116 | __init_work((_work), _onstack); \ | ||
106 | (_work)->data = (atomic_long_t) WORK_DATA_INIT(); \ | 117 | (_work)->data = (atomic_long_t) WORK_DATA_INIT(); \ |
107 | lockdep_init_map(&(_work)->lockdep_map, #_work, &__key, 0);\ | 118 | lockdep_init_map(&(_work)->lockdep_map, #_work, &__key, 0);\ |
108 | INIT_LIST_HEAD(&(_work)->entry); \ | 119 | INIT_LIST_HEAD(&(_work)->entry); \ |
109 | PREPARE_WORK((_work), (_func)); \ | 120 | PREPARE_WORK((_work), (_func)); \ |
110 | } while (0) | 121 | } while (0) |
111 | #else | 122 | #else |
112 | #define INIT_WORK(_work, _func) \ | 123 | #define __INIT_WORK(_work, _func, _onstack) \ |
113 | do { \ | 124 | do { \ |
125 | __init_work((_work), _onstack); \ | ||
114 | (_work)->data = (atomic_long_t) WORK_DATA_INIT(); \ | 126 | (_work)->data = (atomic_long_t) WORK_DATA_INIT(); \ |
115 | INIT_LIST_HEAD(&(_work)->entry); \ | 127 | INIT_LIST_HEAD(&(_work)->entry); \ |
116 | PREPARE_WORK((_work), (_func)); \ | 128 | PREPARE_WORK((_work), (_func)); \ |
117 | } while (0) | 129 | } while (0) |
118 | #endif | 130 | #endif |
119 | 131 | ||
132 | #define INIT_WORK(_work, _func) \ | ||
133 | do { \ | ||
134 | __INIT_WORK((_work), (_func), 0); \ | ||
135 | } while (0) | ||
136 | |||
137 | #define INIT_WORK_ON_STACK(_work, _func) \ | ||
138 | do { \ | ||
139 | __INIT_WORK((_work), (_func), 1); \ | ||
140 | } while (0) | ||
141 | |||
120 | #define INIT_DELAYED_WORK(_work, _func) \ | 142 | #define INIT_DELAYED_WORK(_work, _func) \ |
121 | do { \ | 143 | do { \ |
122 | INIT_WORK(&(_work)->work, (_func)); \ | 144 | INIT_WORK(&(_work)->work, (_func)); \ |
@@ -125,22 +147,16 @@ struct execute_work { | |||
125 | 147 | ||
126 | #define INIT_DELAYED_WORK_ON_STACK(_work, _func) \ | 148 | #define INIT_DELAYED_WORK_ON_STACK(_work, _func) \ |
127 | do { \ | 149 | do { \ |
128 | INIT_WORK(&(_work)->work, (_func)); \ | 150 | INIT_WORK_ON_STACK(&(_work)->work, (_func)); \ |
129 | init_timer_on_stack(&(_work)->timer); \ | 151 | init_timer_on_stack(&(_work)->timer); \ |
130 | } while (0) | 152 | } while (0) |
131 | 153 | ||
132 | #define INIT_DELAYED_WORK_DEFERRABLE(_work, _func) \ | 154 | #define INIT_DELAYED_WORK_DEFERRABLE(_work, _func) \ |
133 | do { \ | 155 | do { \ |
134 | INIT_WORK(&(_work)->work, (_func)); \ | 156 | INIT_WORK(&(_work)->work, (_func)); \ |
135 | init_timer_deferrable(&(_work)->timer); \ | 157 | init_timer_deferrable(&(_work)->timer); \ |
136 | } while (0) | 158 | } while (0) |
137 | 159 | ||
138 | #define INIT_DELAYED_WORK_ON_STACK(_work, _func) \ | ||
139 | do { \ | ||
140 | INIT_WORK(&(_work)->work, (_func)); \ | ||
141 | init_timer_on_stack(&(_work)->timer); \ | ||
142 | } while (0) | ||
143 | |||
144 | /** | 160 | /** |
145 | * work_pending - Find out whether a work item is currently pending | 161 | * work_pending - Find out whether a work item is currently pending |
146 | * @work: The work item in question | 162 | * @work: The work item in question |
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 67e526b6ae81..dee48658805c 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c | |||
@@ -68,6 +68,116 @@ struct workqueue_struct { | |||
68 | #endif | 68 | #endif |
69 | }; | 69 | }; |
70 | 70 | ||
71 | #ifdef CONFIG_DEBUG_OBJECTS_WORK | ||
72 | |||
73 | static struct debug_obj_descr work_debug_descr; | ||
74 | |||
75 | /* | ||
76 | * fixup_init is called when: | ||
77 | * - an active object is initialized | ||
78 | */ | ||
79 | static int work_fixup_init(void *addr, enum debug_obj_state state) | ||
80 | { | ||
81 | struct work_struct *work = addr; | ||
82 | |||
83 | switch (state) { | ||
84 | case ODEBUG_STATE_ACTIVE: | ||
85 | cancel_work_sync(work); | ||
86 | debug_object_init(work, &work_debug_descr); | ||
87 | return 1; | ||
88 | default: | ||
89 | return 0; | ||
90 | } | ||
91 | } | ||
92 | |||
93 | /* | ||
94 | * fixup_activate is called when: | ||
95 | * - an active object is activated | ||
96 | * - an unknown object is activated (might be a statically initialized object) | ||
97 | */ | ||
98 | static int work_fixup_activate(void *addr, enum debug_obj_state state) | ||
99 | { | ||
100 | struct work_struct *work = addr; | ||
101 | |||
102 | switch (state) { | ||
103 | |||
104 | case ODEBUG_STATE_NOTAVAILABLE: | ||
105 | /* | ||
106 | * This is not really a fixup. The work struct was | ||
107 | * statically initialized. We just make sure that it | ||
108 | * is tracked in the object tracker. | ||
109 | */ | ||
110 | if (test_bit(WORK_STRUCT_STATIC, work_data_bits(work))) { | ||
111 | debug_object_init(work, &work_debug_descr); | ||
112 | debug_object_activate(work, &work_debug_descr); | ||
113 | return 0; | ||
114 | } | ||
115 | WARN_ON_ONCE(1); | ||
116 | return 0; | ||
117 | |||
118 | case ODEBUG_STATE_ACTIVE: | ||
119 | WARN_ON(1); | ||
120 | |||
121 | default: | ||
122 | return 0; | ||
123 | } | ||
124 | } | ||
125 | |||
126 | /* | ||
127 | * fixup_free is called when: | ||
128 | * - an active object is freed | ||
129 | */ | ||
130 | static int work_fixup_free(void *addr, enum debug_obj_state state) | ||
131 | { | ||
132 | struct work_struct *work = addr; | ||
133 | |||
134 | switch (state) { | ||
135 | case ODEBUG_STATE_ACTIVE: | ||
136 | cancel_work_sync(work); | ||
137 | debug_object_free(work, &work_debug_descr); | ||
138 | return 1; | ||
139 | default: | ||
140 | return 0; | ||
141 | } | ||
142 | } | ||
143 | |||
144 | static struct debug_obj_descr work_debug_descr = { | ||
145 | .name = "work_struct", | ||
146 | .fixup_init = work_fixup_init, | ||
147 | .fixup_activate = work_fixup_activate, | ||
148 | .fixup_free = work_fixup_free, | ||
149 | }; | ||
150 | |||
151 | static inline void debug_work_activate(struct work_struct *work) | ||
152 | { | ||
153 | debug_object_activate(work, &work_debug_descr); | ||
154 | } | ||
155 | |||
156 | static inline void debug_work_deactivate(struct work_struct *work) | ||
157 | { | ||
158 | debug_object_deactivate(work, &work_debug_descr); | ||
159 | } | ||
160 | |||
161 | void __init_work(struct work_struct *work, int onstack) | ||
162 | { | ||
163 | if (onstack) | ||
164 | debug_object_init_on_stack(work, &work_debug_descr); | ||
165 | else | ||
166 | debug_object_init(work, &work_debug_descr); | ||
167 | } | ||
168 | EXPORT_SYMBOL_GPL(__init_work); | ||
169 | |||
170 | void destroy_work_on_stack(struct work_struct *work) | ||
171 | { | ||
172 | debug_object_free(work, &work_debug_descr); | ||
173 | } | ||
174 | EXPORT_SYMBOL_GPL(destroy_work_on_stack); | ||
175 | |||
176 | #else | ||
177 | static inline void debug_work_activate(struct work_struct *work) { } | ||
178 | static inline void debug_work_deactivate(struct work_struct *work) { } | ||
179 | #endif | ||
180 | |||
71 | /* Serializes the accesses to the list of workqueues. */ | 181 | /* Serializes the accesses to the list of workqueues. */ |
72 | static DEFINE_SPINLOCK(workqueue_lock); | 182 | static DEFINE_SPINLOCK(workqueue_lock); |
73 | static LIST_HEAD(workqueues); | 183 | static LIST_HEAD(workqueues); |
@@ -145,6 +255,7 @@ static void __queue_work(struct cpu_workqueue_struct *cwq, | |||
145 | { | 255 | { |
146 | unsigned long flags; | 256 | unsigned long flags; |
147 | 257 | ||
258 | debug_work_activate(work); | ||
148 | spin_lock_irqsave(&cwq->lock, flags); | 259 | spin_lock_irqsave(&cwq->lock, flags); |
149 | insert_work(cwq, work, &cwq->worklist); | 260 | insert_work(cwq, work, &cwq->worklist); |
150 | spin_unlock_irqrestore(&cwq->lock, flags); | 261 | spin_unlock_irqrestore(&cwq->lock, flags); |
@@ -280,6 +391,7 @@ static void run_workqueue(struct cpu_workqueue_struct *cwq) | |||
280 | struct lockdep_map lockdep_map = work->lockdep_map; | 391 | struct lockdep_map lockdep_map = work->lockdep_map; |
281 | #endif | 392 | #endif |
282 | trace_workqueue_execution(cwq->thread, work); | 393 | trace_workqueue_execution(cwq->thread, work); |
394 | debug_work_deactivate(work); | ||
283 | cwq->current_work = work; | 395 | cwq->current_work = work; |
284 | list_del_init(cwq->worklist.next); | 396 | list_del_init(cwq->worklist.next); |
285 | spin_unlock_irq(&cwq->lock); | 397 | spin_unlock_irq(&cwq->lock); |
@@ -350,11 +462,18 @@ static void wq_barrier_func(struct work_struct *work) | |||
350 | static void insert_wq_barrier(struct cpu_workqueue_struct *cwq, | 462 | static void insert_wq_barrier(struct cpu_workqueue_struct *cwq, |
351 | struct wq_barrier *barr, struct list_head *head) | 463 | struct wq_barrier *barr, struct list_head *head) |
352 | { | 464 | { |
353 | INIT_WORK(&barr->work, wq_barrier_func); | 465 | /* |
466 | * debugobject calls are safe here even with cwq->lock locked | ||
467 | * as we know for sure that this will not trigger any of the | ||
468 | * checks and call back into the fixup functions where we | ||
469 | * might deadlock. | ||
470 | */ | ||
471 | INIT_WORK_ON_STACK(&barr->work, wq_barrier_func); | ||
354 | __set_bit(WORK_STRUCT_PENDING, work_data_bits(&barr->work)); | 472 | __set_bit(WORK_STRUCT_PENDING, work_data_bits(&barr->work)); |
355 | 473 | ||
356 | init_completion(&barr->done); | 474 | init_completion(&barr->done); |
357 | 475 | ||
476 | debug_work_activate(&barr->work); | ||
358 | insert_work(cwq, &barr->work, head); | 477 | insert_work(cwq, &barr->work, head); |
359 | } | 478 | } |
360 | 479 | ||
@@ -372,8 +491,10 @@ static int flush_cpu_workqueue(struct cpu_workqueue_struct *cwq) | |||
372 | } | 491 | } |
373 | spin_unlock_irq(&cwq->lock); | 492 | spin_unlock_irq(&cwq->lock); |
374 | 493 | ||
375 | if (active) | 494 | if (active) { |
376 | wait_for_completion(&barr.done); | 495 | wait_for_completion(&barr.done); |
496 | destroy_work_on_stack(&barr.work); | ||
497 | } | ||
377 | 498 | ||
378 | return active; | 499 | return active; |
379 | } | 500 | } |
@@ -451,6 +572,7 @@ out: | |||
451 | return 0; | 572 | return 0; |
452 | 573 | ||
453 | wait_for_completion(&barr.done); | 574 | wait_for_completion(&barr.done); |
575 | destroy_work_on_stack(&barr.work); | ||
454 | return 1; | 576 | return 1; |
455 | } | 577 | } |
456 | EXPORT_SYMBOL_GPL(flush_work); | 578 | EXPORT_SYMBOL_GPL(flush_work); |
@@ -485,6 +607,7 @@ static int try_to_grab_pending(struct work_struct *work) | |||
485 | */ | 607 | */ |
486 | smp_rmb(); | 608 | smp_rmb(); |
487 | if (cwq == get_wq_data(work)) { | 609 | if (cwq == get_wq_data(work)) { |
610 | debug_work_deactivate(work); | ||
488 | list_del_init(&work->entry); | 611 | list_del_init(&work->entry); |
489 | ret = 1; | 612 | ret = 1; |
490 | } | 613 | } |
@@ -507,8 +630,10 @@ static void wait_on_cpu_work(struct cpu_workqueue_struct *cwq, | |||
507 | } | 630 | } |
508 | spin_unlock_irq(&cwq->lock); | 631 | spin_unlock_irq(&cwq->lock); |
509 | 632 | ||
510 | if (unlikely(running)) | 633 | if (unlikely(running)) { |
511 | wait_for_completion(&barr.done); | 634 | wait_for_completion(&barr.done); |
635 | destroy_work_on_stack(&barr.work); | ||
636 | } | ||
512 | } | 637 | } |
513 | 638 | ||
514 | static void wait_on_work(struct work_struct *work) | 639 | static void wait_on_work(struct work_struct *work) |
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 96c620a0c59c..2f22cf4576db 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug | |||
@@ -298,6 +298,14 @@ config DEBUG_OBJECTS_TIMERS | |||
298 | timer routines to track the life time of timer objects and | 298 | timer routines to track the life time of timer objects and |
299 | validate the timer operations. | 299 | validate the timer operations. |
300 | 300 | ||
301 | config DEBUG_OBJECTS_WORK | ||
302 | bool "Debug work objects" | ||
303 | depends on DEBUG_OBJECTS | ||
304 | help | ||
305 | If you say Y here, additional code will be inserted into the | ||
306 | work queue routines to track the life time of work objects and | ||
307 | validate the work operations. | ||
308 | |||
301 | config DEBUG_OBJECTS_ENABLE_DEFAULT | 309 | config DEBUG_OBJECTS_ENABLE_DEFAULT |
302 | int "debug_objects bootup default value (0-1)" | 310 | int "debug_objects bootup default value (0-1)" |
303 | range 0 1 | 311 | range 0 1 |