diff options
Diffstat (limited to 'kernel/workqueue.c')
| -rw-r--r-- | kernel/workqueue.c | 131 |
1 files changed, 128 insertions, 3 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 67e526b6ae81..dee48658805c 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c | |||
| @@ -68,6 +68,116 @@ struct workqueue_struct { | |||
| 68 | #endif | 68 | #endif |
| 69 | }; | 69 | }; |
| 70 | 70 | ||
| 71 | #ifdef CONFIG_DEBUG_OBJECTS_WORK | ||
| 72 | |||
| 73 | static struct debug_obj_descr work_debug_descr; | ||
| 74 | |||
| 75 | /* | ||
| 76 | * fixup_init is called when: | ||
| 77 | * - an active object is initialized | ||
| 78 | */ | ||
| 79 | static int work_fixup_init(void *addr, enum debug_obj_state state) | ||
| 80 | { | ||
| 81 | struct work_struct *work = addr; | ||
| 82 | |||
| 83 | switch (state) { | ||
| 84 | case ODEBUG_STATE_ACTIVE: | ||
| 85 | cancel_work_sync(work); | ||
| 86 | debug_object_init(work, &work_debug_descr); | ||
| 87 | return 1; | ||
| 88 | default: | ||
| 89 | return 0; | ||
| 90 | } | ||
| 91 | } | ||
| 92 | |||
| 93 | /* | ||
| 94 | * fixup_activate is called when: | ||
| 95 | * - an active object is activated | ||
| 96 | * - an unknown object is activated (might be a statically initialized object) | ||
| 97 | */ | ||
| 98 | static int work_fixup_activate(void *addr, enum debug_obj_state state) | ||
| 99 | { | ||
| 100 | struct work_struct *work = addr; | ||
| 101 | |||
| 102 | switch (state) { | ||
| 103 | |||
| 104 | case ODEBUG_STATE_NOTAVAILABLE: | ||
| 105 | /* | ||
| 106 | * This is not really a fixup. The work struct was | ||
| 107 | * statically initialized. We just make sure that it | ||
| 108 | * is tracked in the object tracker. | ||
| 109 | */ | ||
| 110 | if (test_bit(WORK_STRUCT_STATIC, work_data_bits(work))) { | ||
| 111 | debug_object_init(work, &work_debug_descr); | ||
| 112 | debug_object_activate(work, &work_debug_descr); | ||
| 113 | return 0; | ||
| 114 | } | ||
| 115 | WARN_ON_ONCE(1); | ||
| 116 | return 0; | ||
| 117 | |||
| 118 | case ODEBUG_STATE_ACTIVE: | ||
| 119 | WARN_ON(1); | ||
| 120 | |||
| 121 | default: | ||
| 122 | return 0; | ||
| 123 | } | ||
| 124 | } | ||
| 125 | |||
| 126 | /* | ||
| 127 | * fixup_free is called when: | ||
| 128 | * - an active object is freed | ||
| 129 | */ | ||
| 130 | static int work_fixup_free(void *addr, enum debug_obj_state state) | ||
| 131 | { | ||
| 132 | struct work_struct *work = addr; | ||
| 133 | |||
| 134 | switch (state) { | ||
| 135 | case ODEBUG_STATE_ACTIVE: | ||
| 136 | cancel_work_sync(work); | ||
| 137 | debug_object_free(work, &work_debug_descr); | ||
| 138 | return 1; | ||
| 139 | default: | ||
| 140 | return 0; | ||
| 141 | } | ||
| 142 | } | ||
| 143 | |||
| 144 | static struct debug_obj_descr work_debug_descr = { | ||
| 145 | .name = "work_struct", | ||
| 146 | .fixup_init = work_fixup_init, | ||
| 147 | .fixup_activate = work_fixup_activate, | ||
| 148 | .fixup_free = work_fixup_free, | ||
| 149 | }; | ||
| 150 | |||
| 151 | static inline void debug_work_activate(struct work_struct *work) | ||
| 152 | { | ||
| 153 | debug_object_activate(work, &work_debug_descr); | ||
| 154 | } | ||
| 155 | |||
| 156 | static inline void debug_work_deactivate(struct work_struct *work) | ||
| 157 | { | ||
| 158 | debug_object_deactivate(work, &work_debug_descr); | ||
| 159 | } | ||
| 160 | |||
| 161 | void __init_work(struct work_struct *work, int onstack) | ||
| 162 | { | ||
| 163 | if (onstack) | ||
| 164 | debug_object_init_on_stack(work, &work_debug_descr); | ||
| 165 | else | ||
| 166 | debug_object_init(work, &work_debug_descr); | ||
| 167 | } | ||
| 168 | EXPORT_SYMBOL_GPL(__init_work); | ||
| 169 | |||
| 170 | void destroy_work_on_stack(struct work_struct *work) | ||
| 171 | { | ||
| 172 | debug_object_free(work, &work_debug_descr); | ||
| 173 | } | ||
| 174 | EXPORT_SYMBOL_GPL(destroy_work_on_stack); | ||
| 175 | |||
| 176 | #else | ||
| 177 | static inline void debug_work_activate(struct work_struct *work) { } | ||
| 178 | static inline void debug_work_deactivate(struct work_struct *work) { } | ||
| 179 | #endif | ||
| 180 | |||
| 71 | /* Serializes the accesses to the list of workqueues. */ | 181 | /* Serializes the accesses to the list of workqueues. */ |
| 72 | static DEFINE_SPINLOCK(workqueue_lock); | 182 | static DEFINE_SPINLOCK(workqueue_lock); |
| 73 | static LIST_HEAD(workqueues); | 183 | static LIST_HEAD(workqueues); |
| @@ -145,6 +255,7 @@ static void __queue_work(struct cpu_workqueue_struct *cwq, | |||
| 145 | { | 255 | { |
| 146 | unsigned long flags; | 256 | unsigned long flags; |
| 147 | 257 | ||
| 258 | debug_work_activate(work); | ||
| 148 | spin_lock_irqsave(&cwq->lock, flags); | 259 | spin_lock_irqsave(&cwq->lock, flags); |
| 149 | insert_work(cwq, work, &cwq->worklist); | 260 | insert_work(cwq, work, &cwq->worklist); |
| 150 | spin_unlock_irqrestore(&cwq->lock, flags); | 261 | spin_unlock_irqrestore(&cwq->lock, flags); |
| @@ -280,6 +391,7 @@ static void run_workqueue(struct cpu_workqueue_struct *cwq) | |||
| 280 | struct lockdep_map lockdep_map = work->lockdep_map; | 391 | struct lockdep_map lockdep_map = work->lockdep_map; |
| 281 | #endif | 392 | #endif |
| 282 | trace_workqueue_execution(cwq->thread, work); | 393 | trace_workqueue_execution(cwq->thread, work); |
| 394 | debug_work_deactivate(work); | ||
| 283 | cwq->current_work = work; | 395 | cwq->current_work = work; |
| 284 | list_del_init(cwq->worklist.next); | 396 | list_del_init(cwq->worklist.next); |
| 285 | spin_unlock_irq(&cwq->lock); | 397 | spin_unlock_irq(&cwq->lock); |
| @@ -350,11 +462,18 @@ static void wq_barrier_func(struct work_struct *work) | |||
| 350 | static void insert_wq_barrier(struct cpu_workqueue_struct *cwq, | 462 | static void insert_wq_barrier(struct cpu_workqueue_struct *cwq, |
| 351 | struct wq_barrier *barr, struct list_head *head) | 463 | struct wq_barrier *barr, struct list_head *head) |
| 352 | { | 464 | { |
| 353 | INIT_WORK(&barr->work, wq_barrier_func); | 465 | /* |
| 466 | * debugobject calls are safe here even with cwq->lock locked | ||
| 467 | * as we know for sure that this will not trigger any of the | ||
| 468 | * checks and call back into the fixup functions where we | ||
| 469 | * might deadlock. | ||
| 470 | */ | ||
| 471 | INIT_WORK_ON_STACK(&barr->work, wq_barrier_func); | ||
| 354 | __set_bit(WORK_STRUCT_PENDING, work_data_bits(&barr->work)); | 472 | __set_bit(WORK_STRUCT_PENDING, work_data_bits(&barr->work)); |
| 355 | 473 | ||
| 356 | init_completion(&barr->done); | 474 | init_completion(&barr->done); |
| 357 | 475 | ||
| 476 | debug_work_activate(&barr->work); | ||
| 358 | insert_work(cwq, &barr->work, head); | 477 | insert_work(cwq, &barr->work, head); |
| 359 | } | 478 | } |
| 360 | 479 | ||
| @@ -372,8 +491,10 @@ static int flush_cpu_workqueue(struct cpu_workqueue_struct *cwq) | |||
| 372 | } | 491 | } |
| 373 | spin_unlock_irq(&cwq->lock); | 492 | spin_unlock_irq(&cwq->lock); |
| 374 | 493 | ||
| 375 | if (active) | 494 | if (active) { |
| 376 | wait_for_completion(&barr.done); | 495 | wait_for_completion(&barr.done); |
| 496 | destroy_work_on_stack(&barr.work); | ||
| 497 | } | ||
| 377 | 498 | ||
| 378 | return active; | 499 | return active; |
| 379 | } | 500 | } |
| @@ -451,6 +572,7 @@ out: | |||
| 451 | return 0; | 572 | return 0; |
| 452 | 573 | ||
| 453 | wait_for_completion(&barr.done); | 574 | wait_for_completion(&barr.done); |
| 575 | destroy_work_on_stack(&barr.work); | ||
| 454 | return 1; | 576 | return 1; |
| 455 | } | 577 | } |
| 456 | EXPORT_SYMBOL_GPL(flush_work); | 578 | EXPORT_SYMBOL_GPL(flush_work); |
| @@ -485,6 +607,7 @@ static int try_to_grab_pending(struct work_struct *work) | |||
| 485 | */ | 607 | */ |
| 486 | smp_rmb(); | 608 | smp_rmb(); |
| 487 | if (cwq == get_wq_data(work)) { | 609 | if (cwq == get_wq_data(work)) { |
| 610 | debug_work_deactivate(work); | ||
| 488 | list_del_init(&work->entry); | 611 | list_del_init(&work->entry); |
| 489 | ret = 1; | 612 | ret = 1; |
| 490 | } | 613 | } |
| @@ -507,8 +630,10 @@ static void wait_on_cpu_work(struct cpu_workqueue_struct *cwq, | |||
| 507 | } | 630 | } |
| 508 | spin_unlock_irq(&cwq->lock); | 631 | spin_unlock_irq(&cwq->lock); |
| 509 | 632 | ||
| 510 | if (unlikely(running)) | 633 | if (unlikely(running)) { |
| 511 | wait_for_completion(&barr.done); | 634 | wait_for_completion(&barr.done); |
| 635 | destroy_work_on_stack(&barr.work); | ||
| 636 | } | ||
| 512 | } | 637 | } |
| 513 | 638 | ||
| 514 | static void wait_on_work(struct work_struct *work) | 639 | static void wait_on_work(struct work_struct *work) |
