diff options
| -rw-r--r-- | kernel/workqueue.c | 100 |
1 files changed, 61 insertions, 39 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 5c49d762293b..8e3082b76c7f 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c | |||
| @@ -402,51 +402,73 @@ int queue_delayed_work_on(int cpu, struct workqueue_struct *wq, | |||
| 402 | } | 402 | } |
| 403 | EXPORT_SYMBOL_GPL(queue_delayed_work_on); | 403 | EXPORT_SYMBOL_GPL(queue_delayed_work_on); |
| 404 | 404 | ||
| 405 | /** | ||
| 406 | * process_one_work - process single work | ||
| 407 | * @cwq: cwq to process work for | ||
| 408 | * @work: work to process | ||
| 409 | * | ||
| 410 | * Process @work. This function contains all the logics necessary to | ||
| 411 | * process a single work including synchronization against and | ||
| 412 | * interaction with other workers on the same cpu, queueing and | ||
| 413 | * flushing. As long as context requirement is met, any worker can | ||
| 414 | * call this function to process a work. | ||
| 415 | * | ||
| 416 | * CONTEXT: | ||
| 417 | * spin_lock_irq(cwq->lock) which is released and regrabbed. | ||
| 418 | */ | ||
| 419 | static void process_one_work(struct cpu_workqueue_struct *cwq, | ||
| 420 | struct work_struct *work) | ||
| 421 | { | ||
| 422 | work_func_t f = work->func; | ||
| 423 | #ifdef CONFIG_LOCKDEP | ||
| 424 | /* | ||
| 425 | * It is permissible to free the struct work_struct from | ||
| 426 | * inside the function that is called from it, this we need to | ||
| 427 | * take into account for lockdep too. To avoid bogus "held | ||
| 428 | * lock freed" warnings as well as problems when looking into | ||
| 429 | * work->lockdep_map, make a copy and use that here. | ||
| 430 | */ | ||
| 431 | struct lockdep_map lockdep_map = work->lockdep_map; | ||
| 432 | #endif | ||
| 433 | /* claim and process */ | ||
| 434 | trace_workqueue_execution(cwq->thread, work); | ||
| 435 | debug_work_deactivate(work); | ||
| 436 | cwq->current_work = work; | ||
| 437 | list_del_init(&work->entry); | ||
| 438 | |||
| 439 | spin_unlock_irq(&cwq->lock); | ||
| 440 | |||
| 441 | BUG_ON(get_wq_data(work) != cwq); | ||
| 442 | work_clear_pending(work); | ||
| 443 | lock_map_acquire(&cwq->wq->lockdep_map); | ||
| 444 | lock_map_acquire(&lockdep_map); | ||
| 445 | f(work); | ||
| 446 | lock_map_release(&lockdep_map); | ||
| 447 | lock_map_release(&cwq->wq->lockdep_map); | ||
| 448 | |||
| 449 | if (unlikely(in_atomic() || lockdep_depth(current) > 0)) { | ||
| 450 | printk(KERN_ERR "BUG: workqueue leaked lock or atomic: " | ||
| 451 | "%s/0x%08x/%d\n", | ||
| 452 | current->comm, preempt_count(), task_pid_nr(current)); | ||
| 453 | printk(KERN_ERR " last function: "); | ||
| 454 | print_symbol("%s\n", (unsigned long)f); | ||
| 455 | debug_show_held_locks(current); | ||
| 456 | dump_stack(); | ||
| 457 | } | ||
| 458 | |||
| 459 | spin_lock_irq(&cwq->lock); | ||
| 460 | |||
| 461 | /* we're done with it, release */ | ||
| 462 | cwq->current_work = NULL; | ||
| 463 | } | ||
| 464 | |||
| 405 | static void run_workqueue(struct cpu_workqueue_struct *cwq) | 465 | static void run_workqueue(struct cpu_workqueue_struct *cwq) |
| 406 | { | 466 | { |
| 407 | spin_lock_irq(&cwq->lock); | 467 | spin_lock_irq(&cwq->lock); |
| 408 | while (!list_empty(&cwq->worklist)) { | 468 | while (!list_empty(&cwq->worklist)) { |
| 409 | struct work_struct *work = list_entry(cwq->worklist.next, | 469 | struct work_struct *work = list_entry(cwq->worklist.next, |
| 410 | struct work_struct, entry); | 470 | struct work_struct, entry); |
| 411 | work_func_t f = work->func; | 471 | process_one_work(cwq, work); |
| 412 | #ifdef CONFIG_LOCKDEP | ||
| 413 | /* | ||
| 414 | * It is permissible to free the struct work_struct | ||
| 415 | * from inside the function that is called from it, | ||
| 416 | * this we need to take into account for lockdep too. | ||
| 417 | * To avoid bogus "held lock freed" warnings as well | ||
| 418 | * as problems when looking into work->lockdep_map, | ||
| 419 | * make a copy and use that here. | ||
| 420 | */ | ||
| 421 | struct lockdep_map lockdep_map = work->lockdep_map; | ||
| 422 | #endif | ||
| 423 | trace_workqueue_execution(cwq->thread, work); | ||
| 424 | debug_work_deactivate(work); | ||
| 425 | cwq->current_work = work; | ||
| 426 | list_del_init(cwq->worklist.next); | ||
| 427 | spin_unlock_irq(&cwq->lock); | ||
| 428 | |||
| 429 | BUG_ON(get_wq_data(work) != cwq); | ||
| 430 | work_clear_pending(work); | ||
| 431 | lock_map_acquire(&cwq->wq->lockdep_map); | ||
| 432 | lock_map_acquire(&lockdep_map); | ||
| 433 | f(work); | ||
| 434 | lock_map_release(&lockdep_map); | ||
| 435 | lock_map_release(&cwq->wq->lockdep_map); | ||
| 436 | |||
| 437 | if (unlikely(in_atomic() || lockdep_depth(current) > 0)) { | ||
| 438 | printk(KERN_ERR "BUG: workqueue leaked lock or atomic: " | ||
| 439 | "%s/0x%08x/%d\n", | ||
| 440 | current->comm, preempt_count(), | ||
| 441 | task_pid_nr(current)); | ||
| 442 | printk(KERN_ERR " last function: "); | ||
| 443 | print_symbol("%s\n", (unsigned long)f); | ||
| 444 | debug_show_held_locks(current); | ||
| 445 | dump_stack(); | ||
| 446 | } | ||
| 447 | |||
| 448 | spin_lock_irq(&cwq->lock); | ||
| 449 | cwq->current_work = NULL; | ||
| 450 | } | 472 | } |
| 451 | spin_unlock_irq(&cwq->lock); | 473 | spin_unlock_irq(&cwq->lock); |
| 452 | } | 474 | } |
