diff options
Diffstat (limited to 'kernel/workqueue.c')
| -rw-r--r-- | kernel/workqueue.c | 89 |
1 files changed, 64 insertions, 25 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 2994a0e3a61c..f77afd939229 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c | |||
| @@ -1,19 +1,26 @@ | |||
| 1 | /* | 1 | /* |
| 2 | * linux/kernel/workqueue.c | 2 | * kernel/workqueue.c - generic async execution with shared worker pool |
| 3 | * | 3 | * |
| 4 | * Generic mechanism for defining kernel helper threads for running | 4 | * Copyright (C) 2002 Ingo Molnar |
| 5 | * arbitrary tasks in process context. | ||
| 6 | * | 5 | * |
| 7 | * Started by Ingo Molnar, Copyright (C) 2002 | 6 | * Derived from the taskqueue/keventd code by: |
| 7 | * David Woodhouse <dwmw2@infradead.org> | ||
| 8 | * Andrew Morton | ||
| 9 | * Kai Petzke <wpp@marie.physik.tu-berlin.de> | ||
| 10 | * Theodore Ts'o <tytso@mit.edu> | ||
| 8 | * | 11 | * |
| 9 | * Derived from the taskqueue/keventd code by: | 12 | * Made to use alloc_percpu by Christoph Lameter. |
| 10 | * | 13 | * |
| 11 | * David Woodhouse <dwmw2@infradead.org> | 14 | * Copyright (C) 2010 SUSE Linux Products GmbH |
| 12 | * Andrew Morton | 15 | * Copyright (C) 2010 Tejun Heo <tj@kernel.org> |
| 13 | * Kai Petzke <wpp@marie.physik.tu-berlin.de> | ||
| 14 | * Theodore Ts'o <tytso@mit.edu> | ||
| 15 | * | 16 | * |
| 16 | * Made to use alloc_percpu by Christoph Lameter. | 17 | * This is the generic async execution mechanism. Work items as are |
| 18 | * executed in process context. The worker pool is shared and | ||
| 19 | * automatically managed. There is one worker pool for each CPU and | ||
| 20 | * one extra for works which are better served by workers which are | ||
| 21 | * not bound to any specific CPU. | ||
| 22 | * | ||
| 23 | * Please read Documentation/workqueue.txt for details. | ||
| 17 | */ | 24 | */ |
| 18 | 25 | ||
| 19 | #include <linux/module.h> | 26 | #include <linux/module.h> |
| @@ -35,6 +42,9 @@ | |||
| 35 | #include <linux/lockdep.h> | 42 | #include <linux/lockdep.h> |
| 36 | #include <linux/idr.h> | 43 | #include <linux/idr.h> |
| 37 | 44 | ||
| 45 | #define CREATE_TRACE_POINTS | ||
| 46 | #include <trace/events/workqueue.h> | ||
| 47 | |||
| 38 | #include "workqueue_sched.h" | 48 | #include "workqueue_sched.h" |
| 39 | 49 | ||
| 40 | enum { | 50 | enum { |
| @@ -87,7 +97,8 @@ enum { | |||
| 87 | /* | 97 | /* |
| 88 | * Structure fields follow one of the following exclusion rules. | 98 | * Structure fields follow one of the following exclusion rules. |
| 89 | * | 99 | * |
| 90 | * I: Set during initialization and read-only afterwards. | 100 | * I: Modifiable by initialization/destruction paths and read-only for |
| 101 | * everyone else. | ||
| 91 | * | 102 | * |
| 92 | * P: Preemption protected. Disabling preemption is enough and should | 103 | * P: Preemption protected. Disabling preemption is enough and should |
| 93 | * only be modified and accessed from the local cpu. | 104 | * only be modified and accessed from the local cpu. |
| @@ -195,7 +206,7 @@ typedef cpumask_var_t mayday_mask_t; | |||
| 195 | cpumask_test_and_set_cpu((cpu), (mask)) | 206 | cpumask_test_and_set_cpu((cpu), (mask)) |
| 196 | #define mayday_clear_cpu(cpu, mask) cpumask_clear_cpu((cpu), (mask)) | 207 | #define mayday_clear_cpu(cpu, mask) cpumask_clear_cpu((cpu), (mask)) |
| 197 | #define for_each_mayday_cpu(cpu, mask) for_each_cpu((cpu), (mask)) | 208 | #define for_each_mayday_cpu(cpu, mask) for_each_cpu((cpu), (mask)) |
| 198 | #define alloc_mayday_mask(maskp, gfp) alloc_cpumask_var((maskp), (gfp)) | 209 | #define alloc_mayday_mask(maskp, gfp) zalloc_cpumask_var((maskp), (gfp)) |
| 199 | #define free_mayday_mask(mask) free_cpumask_var((mask)) | 210 | #define free_mayday_mask(mask) free_cpumask_var((mask)) |
| 200 | #else | 211 | #else |
| 201 | typedef unsigned long mayday_mask_t; | 212 | typedef unsigned long mayday_mask_t; |
| @@ -940,10 +951,14 @@ static void __queue_work(unsigned int cpu, struct workqueue_struct *wq, | |||
| 940 | struct global_cwq *gcwq; | 951 | struct global_cwq *gcwq; |
| 941 | struct cpu_workqueue_struct *cwq; | 952 | struct cpu_workqueue_struct *cwq; |
| 942 | struct list_head *worklist; | 953 | struct list_head *worklist; |
| 954 | unsigned int work_flags; | ||
| 943 | unsigned long flags; | 955 | unsigned long flags; |
| 944 | 956 | ||
| 945 | debug_work_activate(work); | 957 | debug_work_activate(work); |
| 946 | 958 | ||
| 959 | if (WARN_ON_ONCE(wq->flags & WQ_DYING)) | ||
| 960 | return; | ||
| 961 | |||
| 947 | /* determine gcwq to use */ | 962 | /* determine gcwq to use */ |
| 948 | if (!(wq->flags & WQ_UNBOUND)) { | 963 | if (!(wq->flags & WQ_UNBOUND)) { |
| 949 | struct global_cwq *last_gcwq; | 964 | struct global_cwq *last_gcwq; |
| @@ -986,14 +1001,17 @@ static void __queue_work(unsigned int cpu, struct workqueue_struct *wq, | |||
| 986 | BUG_ON(!list_empty(&work->entry)); | 1001 | BUG_ON(!list_empty(&work->entry)); |
| 987 | 1002 | ||
| 988 | cwq->nr_in_flight[cwq->work_color]++; | 1003 | cwq->nr_in_flight[cwq->work_color]++; |
| 1004 | work_flags = work_color_to_flags(cwq->work_color); | ||
| 989 | 1005 | ||
| 990 | if (likely(cwq->nr_active < cwq->max_active)) { | 1006 | if (likely(cwq->nr_active < cwq->max_active)) { |
| 991 | cwq->nr_active++; | 1007 | cwq->nr_active++; |
| 992 | worklist = gcwq_determine_ins_pos(gcwq, cwq); | 1008 | worklist = gcwq_determine_ins_pos(gcwq, cwq); |
| 993 | } else | 1009 | } else { |
| 1010 | work_flags |= WORK_STRUCT_DELAYED; | ||
| 994 | worklist = &cwq->delayed_works; | 1011 | worklist = &cwq->delayed_works; |
| 1012 | } | ||
| 995 | 1013 | ||
| 996 | insert_work(cwq, work, worklist, work_color_to_flags(cwq->work_color)); | 1014 | insert_work(cwq, work, worklist, work_flags); |
| 997 | 1015 | ||
| 998 | spin_unlock_irqrestore(&gcwq->lock, flags); | 1016 | spin_unlock_irqrestore(&gcwq->lock, flags); |
| 999 | } | 1017 | } |
| @@ -1212,6 +1230,7 @@ static void worker_leave_idle(struct worker *worker) | |||
| 1212 | * bound), %false if offline. | 1230 | * bound), %false if offline. |
| 1213 | */ | 1231 | */ |
| 1214 | static bool worker_maybe_bind_and_lock(struct worker *worker) | 1232 | static bool worker_maybe_bind_and_lock(struct worker *worker) |
| 1233 | __acquires(&gcwq->lock) | ||
| 1215 | { | 1234 | { |
| 1216 | struct global_cwq *gcwq = worker->gcwq; | 1235 | struct global_cwq *gcwq = worker->gcwq; |
| 1217 | struct task_struct *task = worker->task; | 1236 | struct task_struct *task = worker->task; |
| @@ -1485,6 +1504,8 @@ static void gcwq_mayday_timeout(unsigned long __gcwq) | |||
| 1485 | * otherwise. | 1504 | * otherwise. |
| 1486 | */ | 1505 | */ |
| 1487 | static bool maybe_create_worker(struct global_cwq *gcwq) | 1506 | static bool maybe_create_worker(struct global_cwq *gcwq) |
| 1507 | __releases(&gcwq->lock) | ||
| 1508 | __acquires(&gcwq->lock) | ||
| 1488 | { | 1509 | { |
| 1489 | if (!need_to_create_worker(gcwq)) | 1510 | if (!need_to_create_worker(gcwq)) |
| 1490 | return false; | 1511 | return false; |
| @@ -1659,6 +1680,7 @@ static void cwq_activate_first_delayed(struct cpu_workqueue_struct *cwq) | |||
| 1659 | struct list_head *pos = gcwq_determine_ins_pos(cwq->gcwq, cwq); | 1680 | struct list_head *pos = gcwq_determine_ins_pos(cwq->gcwq, cwq); |
| 1660 | 1681 | ||
| 1661 | move_linked_works(work, pos, NULL); | 1682 | move_linked_works(work, pos, NULL); |
| 1683 | __clear_bit(WORK_STRUCT_DELAYED_BIT, work_data_bits(work)); | ||
| 1662 | cwq->nr_active++; | 1684 | cwq->nr_active++; |
| 1663 | } | 1685 | } |
| 1664 | 1686 | ||
| @@ -1666,6 +1688,7 @@ static void cwq_activate_first_delayed(struct cpu_workqueue_struct *cwq) | |||
| 1666 | * cwq_dec_nr_in_flight - decrement cwq's nr_in_flight | 1688 | * cwq_dec_nr_in_flight - decrement cwq's nr_in_flight |
| 1667 | * @cwq: cwq of interest | 1689 | * @cwq: cwq of interest |
| 1668 | * @color: color of work which left the queue | 1690 | * @color: color of work which left the queue |
| 1691 | * @delayed: for a delayed work | ||
| 1669 | * | 1692 | * |
| 1670 | * A work either has completed or is removed from pending queue, | 1693 | * A work either has completed or is removed from pending queue, |
| 1671 | * decrement nr_in_flight of its cwq and handle workqueue flushing. | 1694 | * decrement nr_in_flight of its cwq and handle workqueue flushing. |
| @@ -1673,19 +1696,22 @@ static void cwq_activate_first_delayed(struct cpu_workqueue_struct *cwq) | |||
| 1673 | * CONTEXT: | 1696 | * CONTEXT: |
| 1674 | * spin_lock_irq(gcwq->lock). | 1697 | * spin_lock_irq(gcwq->lock). |
| 1675 | */ | 1698 | */ |
| 1676 | static void cwq_dec_nr_in_flight(struct cpu_workqueue_struct *cwq, int color) | 1699 | static void cwq_dec_nr_in_flight(struct cpu_workqueue_struct *cwq, int color, |
| 1700 | bool delayed) | ||
| 1677 | { | 1701 | { |
| 1678 | /* ignore uncolored works */ | 1702 | /* ignore uncolored works */ |
| 1679 | if (color == WORK_NO_COLOR) | 1703 | if (color == WORK_NO_COLOR) |
| 1680 | return; | 1704 | return; |
| 1681 | 1705 | ||
| 1682 | cwq->nr_in_flight[color]--; | 1706 | cwq->nr_in_flight[color]--; |
| 1683 | cwq->nr_active--; | ||
| 1684 | 1707 | ||
| 1685 | if (!list_empty(&cwq->delayed_works)) { | 1708 | if (!delayed) { |
| 1686 | /* one down, submit a delayed one */ | 1709 | cwq->nr_active--; |
| 1687 | if (cwq->nr_active < cwq->max_active) | 1710 | if (!list_empty(&cwq->delayed_works)) { |
| 1688 | cwq_activate_first_delayed(cwq); | 1711 | /* one down, submit a delayed one */ |
| 1712 | if (cwq->nr_active < cwq->max_active) | ||
| 1713 | cwq_activate_first_delayed(cwq); | ||
| 1714 | } | ||
| 1689 | } | 1715 | } |
| 1690 | 1716 | ||
| 1691 | /* is flush in progress and are we at the flushing tip? */ | 1717 | /* is flush in progress and are we at the flushing tip? */ |
| @@ -1722,6 +1748,8 @@ static void cwq_dec_nr_in_flight(struct cpu_workqueue_struct *cwq, int color) | |||
| 1722 | * spin_lock_irq(gcwq->lock) which is released and regrabbed. | 1748 | * spin_lock_irq(gcwq->lock) which is released and regrabbed. |
| 1723 | */ | 1749 | */ |
| 1724 | static void process_one_work(struct worker *worker, struct work_struct *work) | 1750 | static void process_one_work(struct worker *worker, struct work_struct *work) |
| 1751 | __releases(&gcwq->lock) | ||
| 1752 | __acquires(&gcwq->lock) | ||
| 1725 | { | 1753 | { |
| 1726 | struct cpu_workqueue_struct *cwq = get_work_cwq(work); | 1754 | struct cpu_workqueue_struct *cwq = get_work_cwq(work); |
| 1727 | struct global_cwq *gcwq = cwq->gcwq; | 1755 | struct global_cwq *gcwq = cwq->gcwq; |
| @@ -1790,7 +1818,13 @@ static void process_one_work(struct worker *worker, struct work_struct *work) | |||
| 1790 | work_clear_pending(work); | 1818 | work_clear_pending(work); |
| 1791 | lock_map_acquire(&cwq->wq->lockdep_map); | 1819 | lock_map_acquire(&cwq->wq->lockdep_map); |
| 1792 | lock_map_acquire(&lockdep_map); | 1820 | lock_map_acquire(&lockdep_map); |
| 1821 | trace_workqueue_execute_start(work); | ||
| 1793 | f(work); | 1822 | f(work); |
| 1823 | /* | ||
| 1824 | * While we must be careful to not use "work" after this, the trace | ||
| 1825 | * point will only record its address. | ||
| 1826 | */ | ||
| 1827 | trace_workqueue_execute_end(work); | ||
| 1794 | lock_map_release(&lockdep_map); | 1828 | lock_map_release(&lockdep_map); |
| 1795 | lock_map_release(&cwq->wq->lockdep_map); | 1829 | lock_map_release(&cwq->wq->lockdep_map); |
| 1796 | 1830 | ||
| @@ -1814,7 +1848,7 @@ static void process_one_work(struct worker *worker, struct work_struct *work) | |||
| 1814 | hlist_del_init(&worker->hentry); | 1848 | hlist_del_init(&worker->hentry); |
| 1815 | worker->current_work = NULL; | 1849 | worker->current_work = NULL; |
| 1816 | worker->current_cwq = NULL; | 1850 | worker->current_cwq = NULL; |
| 1817 | cwq_dec_nr_in_flight(cwq, work_color); | 1851 | cwq_dec_nr_in_flight(cwq, work_color, false); |
| 1818 | } | 1852 | } |
| 1819 | 1853 | ||
| 1820 | /** | 1854 | /** |
| @@ -2379,7 +2413,8 @@ static int try_to_grab_pending(struct work_struct *work) | |||
| 2379 | debug_work_deactivate(work); | 2413 | debug_work_deactivate(work); |
| 2380 | list_del_init(&work->entry); | 2414 | list_del_init(&work->entry); |
| 2381 | cwq_dec_nr_in_flight(get_work_cwq(work), | 2415 | cwq_dec_nr_in_flight(get_work_cwq(work), |
| 2382 | get_work_color(work)); | 2416 | get_work_color(work), |
| 2417 | *work_data_bits(work) & WORK_STRUCT_DELAYED); | ||
| 2383 | ret = 1; | 2418 | ret = 1; |
| 2384 | } | 2419 | } |
| 2385 | } | 2420 | } |
| @@ -2782,7 +2817,6 @@ struct workqueue_struct *__alloc_workqueue_key(const char *name, | |||
| 2782 | if (IS_ERR(rescuer->task)) | 2817 | if (IS_ERR(rescuer->task)) |
| 2783 | goto err; | 2818 | goto err; |
| 2784 | 2819 | ||
| 2785 | wq->rescuer = rescuer; | ||
| 2786 | rescuer->task->flags |= PF_THREAD_BOUND; | 2820 | rescuer->task->flags |= PF_THREAD_BOUND; |
| 2787 | wake_up_process(rescuer->task); | 2821 | wake_up_process(rescuer->task); |
| 2788 | } | 2822 | } |
| @@ -2824,6 +2858,7 @@ void destroy_workqueue(struct workqueue_struct *wq) | |||
| 2824 | { | 2858 | { |
| 2825 | unsigned int cpu; | 2859 | unsigned int cpu; |
| 2826 | 2860 | ||
| 2861 | wq->flags |= WQ_DYING; | ||
| 2827 | flush_workqueue(wq); | 2862 | flush_workqueue(wq); |
| 2828 | 2863 | ||
| 2829 | /* | 2864 | /* |
| @@ -2848,6 +2883,7 @@ void destroy_workqueue(struct workqueue_struct *wq) | |||
| 2848 | if (wq->flags & WQ_RESCUER) { | 2883 | if (wq->flags & WQ_RESCUER) { |
| 2849 | kthread_stop(wq->rescuer->task); | 2884 | kthread_stop(wq->rescuer->task); |
| 2850 | free_mayday_mask(wq->mayday_mask); | 2885 | free_mayday_mask(wq->mayday_mask); |
| 2886 | kfree(wq->rescuer); | ||
| 2851 | } | 2887 | } |
| 2852 | 2888 | ||
| 2853 | free_cwqs(wq); | 2889 | free_cwqs(wq); |
| @@ -3230,6 +3266,8 @@ static int __cpuinit trustee_thread(void *__gcwq) | |||
| 3230 | * multiple times. To be used by cpu_callback. | 3266 | * multiple times. To be used by cpu_callback. |
| 3231 | */ | 3267 | */ |
| 3232 | static void __cpuinit wait_trustee_state(struct global_cwq *gcwq, int state) | 3268 | static void __cpuinit wait_trustee_state(struct global_cwq *gcwq, int state) |
| 3269 | __releases(&gcwq->lock) | ||
| 3270 | __acquires(&gcwq->lock) | ||
| 3233 | { | 3271 | { |
| 3234 | if (!(gcwq->trustee_state == state || | 3272 | if (!(gcwq->trustee_state == state || |
| 3235 | gcwq->trustee_state == TRUSTEE_DONE)) { | 3273 | gcwq->trustee_state == TRUSTEE_DONE)) { |
| @@ -3536,8 +3574,7 @@ static int __init init_workqueues(void) | |||
| 3536 | spin_lock_init(&gcwq->lock); | 3574 | spin_lock_init(&gcwq->lock); |
| 3537 | INIT_LIST_HEAD(&gcwq->worklist); | 3575 | INIT_LIST_HEAD(&gcwq->worklist); |
| 3538 | gcwq->cpu = cpu; | 3576 | gcwq->cpu = cpu; |
| 3539 | if (cpu == WORK_CPU_UNBOUND) | 3577 | gcwq->flags |= GCWQ_DISASSOCIATED; |
| 3540 | gcwq->flags |= GCWQ_DISASSOCIATED; | ||
| 3541 | 3578 | ||
| 3542 | INIT_LIST_HEAD(&gcwq->idle_list); | 3579 | INIT_LIST_HEAD(&gcwq->idle_list); |
| 3543 | for (i = 0; i < BUSY_WORKER_HASH_SIZE; i++) | 3580 | for (i = 0; i < BUSY_WORKER_HASH_SIZE; i++) |
| @@ -3561,6 +3598,8 @@ static int __init init_workqueues(void) | |||
| 3561 | struct global_cwq *gcwq = get_gcwq(cpu); | 3598 | struct global_cwq *gcwq = get_gcwq(cpu); |
| 3562 | struct worker *worker; | 3599 | struct worker *worker; |
| 3563 | 3600 | ||
| 3601 | if (cpu != WORK_CPU_UNBOUND) | ||
| 3602 | gcwq->flags &= ~GCWQ_DISASSOCIATED; | ||
| 3564 | worker = create_worker(gcwq, true); | 3603 | worker = create_worker(gcwq, true); |
| 3565 | BUG_ON(!worker); | 3604 | BUG_ON(!worker); |
| 3566 | spin_lock_irq(&gcwq->lock); | 3605 | spin_lock_irq(&gcwq->lock); |
