diff options
Diffstat (limited to 'kernel/workqueue.c')
| -rw-r--r-- | kernel/workqueue.c | 80 |
1 files changed, 55 insertions, 25 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 8bd600c020e5..f77afd939229 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c | |||
| @@ -1,19 +1,26 @@ | |||
| 1 | /* | 1 | /* |
| 2 | * linux/kernel/workqueue.c | 2 | * kernel/workqueue.c - generic async execution with shared worker pool |
| 3 | * | 3 | * |
| 4 | * Generic mechanism for defining kernel helper threads for running | 4 | * Copyright (C) 2002 Ingo Molnar |
| 5 | * arbitrary tasks in process context. | ||
| 6 | * | 5 | * |
| 7 | * Started by Ingo Molnar, Copyright (C) 2002 | 6 | * Derived from the taskqueue/keventd code by: |
| 7 | * David Woodhouse <dwmw2@infradead.org> | ||
| 8 | * Andrew Morton | ||
| 9 | * Kai Petzke <wpp@marie.physik.tu-berlin.de> | ||
| 10 | * Theodore Ts'o <tytso@mit.edu> | ||
| 8 | * | 11 | * |
| 9 | * Derived from the taskqueue/keventd code by: | 12 | * Made to use alloc_percpu by Christoph Lameter. |
| 10 | * | 13 | * |
| 11 | * David Woodhouse <dwmw2@infradead.org> | 14 | * Copyright (C) 2010 SUSE Linux Products GmbH |
| 12 | * Andrew Morton | 15 | * Copyright (C) 2010 Tejun Heo <tj@kernel.org> |
| 13 | * Kai Petzke <wpp@marie.physik.tu-berlin.de> | ||
| 14 | * Theodore Ts'o <tytso@mit.edu> | ||
| 15 | * | 16 | * |
| 16 | * Made to use alloc_percpu by Christoph Lameter. | 17 | * This is the generic async execution mechanism. Work items as are |
| 18 | * executed in process context. The worker pool is shared and | ||
| 19 | * automatically managed. There is one worker pool for each CPU and | ||
| 20 | * one extra for works which are better served by workers which are | ||
| 21 | * not bound to any specific CPU. | ||
| 22 | * | ||
| 23 | * Please read Documentation/workqueue.txt for details. | ||
| 17 | */ | 24 | */ |
| 18 | 25 | ||
| 19 | #include <linux/module.h> | 26 | #include <linux/module.h> |
| @@ -90,7 +97,8 @@ enum { | |||
| 90 | /* | 97 | /* |
| 91 | * Structure fields follow one of the following exclusion rules. | 98 | * Structure fields follow one of the following exclusion rules. |
| 92 | * | 99 | * |
| 93 | * I: Set during initialization and read-only afterwards. | 100 | * I: Modifiable by initialization/destruction paths and read-only for |
| 101 | * everyone else. | ||
| 94 | * | 102 | * |
| 95 | * P: Preemption protected. Disabling preemption is enough and should | 103 | * P: Preemption protected. Disabling preemption is enough and should |
| 96 | * only be modified and accessed from the local cpu. | 104 | * only be modified and accessed from the local cpu. |
| @@ -198,7 +206,7 @@ typedef cpumask_var_t mayday_mask_t; | |||
| 198 | cpumask_test_and_set_cpu((cpu), (mask)) | 206 | cpumask_test_and_set_cpu((cpu), (mask)) |
| 199 | #define mayday_clear_cpu(cpu, mask) cpumask_clear_cpu((cpu), (mask)) | 207 | #define mayday_clear_cpu(cpu, mask) cpumask_clear_cpu((cpu), (mask)) |
| 200 | #define for_each_mayday_cpu(cpu, mask) for_each_cpu((cpu), (mask)) | 208 | #define for_each_mayday_cpu(cpu, mask) for_each_cpu((cpu), (mask)) |
| 201 | #define alloc_mayday_mask(maskp, gfp) alloc_cpumask_var((maskp), (gfp)) | 209 | #define alloc_mayday_mask(maskp, gfp) zalloc_cpumask_var((maskp), (gfp)) |
| 202 | #define free_mayday_mask(mask) free_cpumask_var((mask)) | 210 | #define free_mayday_mask(mask) free_cpumask_var((mask)) |
| 203 | #else | 211 | #else |
| 204 | typedef unsigned long mayday_mask_t; | 212 | typedef unsigned long mayday_mask_t; |
| @@ -943,10 +951,14 @@ static void __queue_work(unsigned int cpu, struct workqueue_struct *wq, | |||
| 943 | struct global_cwq *gcwq; | 951 | struct global_cwq *gcwq; |
| 944 | struct cpu_workqueue_struct *cwq; | 952 | struct cpu_workqueue_struct *cwq; |
| 945 | struct list_head *worklist; | 953 | struct list_head *worklist; |
| 954 | unsigned int work_flags; | ||
| 946 | unsigned long flags; | 955 | unsigned long flags; |
| 947 | 956 | ||
| 948 | debug_work_activate(work); | 957 | debug_work_activate(work); |
| 949 | 958 | ||
| 959 | if (WARN_ON_ONCE(wq->flags & WQ_DYING)) | ||
| 960 | return; | ||
| 961 | |||
| 950 | /* determine gcwq to use */ | 962 | /* determine gcwq to use */ |
| 951 | if (!(wq->flags & WQ_UNBOUND)) { | 963 | if (!(wq->flags & WQ_UNBOUND)) { |
| 952 | struct global_cwq *last_gcwq; | 964 | struct global_cwq *last_gcwq; |
| @@ -989,14 +1001,17 @@ static void __queue_work(unsigned int cpu, struct workqueue_struct *wq, | |||
| 989 | BUG_ON(!list_empty(&work->entry)); | 1001 | BUG_ON(!list_empty(&work->entry)); |
| 990 | 1002 | ||
| 991 | cwq->nr_in_flight[cwq->work_color]++; | 1003 | cwq->nr_in_flight[cwq->work_color]++; |
| 1004 | work_flags = work_color_to_flags(cwq->work_color); | ||
| 992 | 1005 | ||
| 993 | if (likely(cwq->nr_active < cwq->max_active)) { | 1006 | if (likely(cwq->nr_active < cwq->max_active)) { |
| 994 | cwq->nr_active++; | 1007 | cwq->nr_active++; |
| 995 | worklist = gcwq_determine_ins_pos(gcwq, cwq); | 1008 | worklist = gcwq_determine_ins_pos(gcwq, cwq); |
| 996 | } else | 1009 | } else { |
| 1010 | work_flags |= WORK_STRUCT_DELAYED; | ||
| 997 | worklist = &cwq->delayed_works; | 1011 | worklist = &cwq->delayed_works; |
| 1012 | } | ||
| 998 | 1013 | ||
| 999 | insert_work(cwq, work, worklist, work_color_to_flags(cwq->work_color)); | 1014 | insert_work(cwq, work, worklist, work_flags); |
| 1000 | 1015 | ||
| 1001 | spin_unlock_irqrestore(&gcwq->lock, flags); | 1016 | spin_unlock_irqrestore(&gcwq->lock, flags); |
| 1002 | } | 1017 | } |
| @@ -1215,6 +1230,7 @@ static void worker_leave_idle(struct worker *worker) | |||
| 1215 | * bound), %false if offline. | 1230 | * bound), %false if offline. |
| 1216 | */ | 1231 | */ |
| 1217 | static bool worker_maybe_bind_and_lock(struct worker *worker) | 1232 | static bool worker_maybe_bind_and_lock(struct worker *worker) |
| 1233 | __acquires(&gcwq->lock) | ||
| 1218 | { | 1234 | { |
| 1219 | struct global_cwq *gcwq = worker->gcwq; | 1235 | struct global_cwq *gcwq = worker->gcwq; |
| 1220 | struct task_struct *task = worker->task; | 1236 | struct task_struct *task = worker->task; |
| @@ -1488,6 +1504,8 @@ static void gcwq_mayday_timeout(unsigned long __gcwq) | |||
| 1488 | * otherwise. | 1504 | * otherwise. |
| 1489 | */ | 1505 | */ |
| 1490 | static bool maybe_create_worker(struct global_cwq *gcwq) | 1506 | static bool maybe_create_worker(struct global_cwq *gcwq) |
| 1507 | __releases(&gcwq->lock) | ||
| 1508 | __acquires(&gcwq->lock) | ||
| 1491 | { | 1509 | { |
| 1492 | if (!need_to_create_worker(gcwq)) | 1510 | if (!need_to_create_worker(gcwq)) |
| 1493 | return false; | 1511 | return false; |
| @@ -1662,6 +1680,7 @@ static void cwq_activate_first_delayed(struct cpu_workqueue_struct *cwq) | |||
| 1662 | struct list_head *pos = gcwq_determine_ins_pos(cwq->gcwq, cwq); | 1680 | struct list_head *pos = gcwq_determine_ins_pos(cwq->gcwq, cwq); |
| 1663 | 1681 | ||
| 1664 | move_linked_works(work, pos, NULL); | 1682 | move_linked_works(work, pos, NULL); |
| 1683 | __clear_bit(WORK_STRUCT_DELAYED_BIT, work_data_bits(work)); | ||
| 1665 | cwq->nr_active++; | 1684 | cwq->nr_active++; |
| 1666 | } | 1685 | } |
| 1667 | 1686 | ||
| @@ -1669,6 +1688,7 @@ static void cwq_activate_first_delayed(struct cpu_workqueue_struct *cwq) | |||
| 1669 | * cwq_dec_nr_in_flight - decrement cwq's nr_in_flight | 1688 | * cwq_dec_nr_in_flight - decrement cwq's nr_in_flight |
| 1670 | * @cwq: cwq of interest | 1689 | * @cwq: cwq of interest |
| 1671 | * @color: color of work which left the queue | 1690 | * @color: color of work which left the queue |
| 1691 | * @delayed: for a delayed work | ||
| 1672 | * | 1692 | * |
| 1673 | * A work either has completed or is removed from pending queue, | 1693 | * A work either has completed or is removed from pending queue, |
| 1674 | * decrement nr_in_flight of its cwq and handle workqueue flushing. | 1694 | * decrement nr_in_flight of its cwq and handle workqueue flushing. |
| @@ -1676,19 +1696,22 @@ static void cwq_activate_first_delayed(struct cpu_workqueue_struct *cwq) | |||
| 1676 | * CONTEXT: | 1696 | * CONTEXT: |
| 1677 | * spin_lock_irq(gcwq->lock). | 1697 | * spin_lock_irq(gcwq->lock). |
| 1678 | */ | 1698 | */ |
| 1679 | static void cwq_dec_nr_in_flight(struct cpu_workqueue_struct *cwq, int color) | 1699 | static void cwq_dec_nr_in_flight(struct cpu_workqueue_struct *cwq, int color, |
| 1700 | bool delayed) | ||
| 1680 | { | 1701 | { |
| 1681 | /* ignore uncolored works */ | 1702 | /* ignore uncolored works */ |
| 1682 | if (color == WORK_NO_COLOR) | 1703 | if (color == WORK_NO_COLOR) |
| 1683 | return; | 1704 | return; |
| 1684 | 1705 | ||
| 1685 | cwq->nr_in_flight[color]--; | 1706 | cwq->nr_in_flight[color]--; |
| 1686 | cwq->nr_active--; | ||
| 1687 | 1707 | ||
| 1688 | if (!list_empty(&cwq->delayed_works)) { | 1708 | if (!delayed) { |
| 1689 | /* one down, submit a delayed one */ | 1709 | cwq->nr_active--; |
| 1690 | if (cwq->nr_active < cwq->max_active) | 1710 | if (!list_empty(&cwq->delayed_works)) { |
| 1691 | cwq_activate_first_delayed(cwq); | 1711 | /* one down, submit a delayed one */ |
| 1712 | if (cwq->nr_active < cwq->max_active) | ||
| 1713 | cwq_activate_first_delayed(cwq); | ||
| 1714 | } | ||
| 1692 | } | 1715 | } |
| 1693 | 1716 | ||
| 1694 | /* is flush in progress and are we at the flushing tip? */ | 1717 | /* is flush in progress and are we at the flushing tip? */ |
| @@ -1725,6 +1748,8 @@ static void cwq_dec_nr_in_flight(struct cpu_workqueue_struct *cwq, int color) | |||
| 1725 | * spin_lock_irq(gcwq->lock) which is released and regrabbed. | 1748 | * spin_lock_irq(gcwq->lock) which is released and regrabbed. |
| 1726 | */ | 1749 | */ |
| 1727 | static void process_one_work(struct worker *worker, struct work_struct *work) | 1750 | static void process_one_work(struct worker *worker, struct work_struct *work) |
| 1751 | __releases(&gcwq->lock) | ||
| 1752 | __acquires(&gcwq->lock) | ||
| 1728 | { | 1753 | { |
| 1729 | struct cpu_workqueue_struct *cwq = get_work_cwq(work); | 1754 | struct cpu_workqueue_struct *cwq = get_work_cwq(work); |
| 1730 | struct global_cwq *gcwq = cwq->gcwq; | 1755 | struct global_cwq *gcwq = cwq->gcwq; |
| @@ -1823,7 +1848,7 @@ static void process_one_work(struct worker *worker, struct work_struct *work) | |||
| 1823 | hlist_del_init(&worker->hentry); | 1848 | hlist_del_init(&worker->hentry); |
| 1824 | worker->current_work = NULL; | 1849 | worker->current_work = NULL; |
| 1825 | worker->current_cwq = NULL; | 1850 | worker->current_cwq = NULL; |
| 1826 | cwq_dec_nr_in_flight(cwq, work_color); | 1851 | cwq_dec_nr_in_flight(cwq, work_color, false); |
| 1827 | } | 1852 | } |
| 1828 | 1853 | ||
| 1829 | /** | 1854 | /** |
| @@ -2388,7 +2413,8 @@ static int try_to_grab_pending(struct work_struct *work) | |||
| 2388 | debug_work_deactivate(work); | 2413 | debug_work_deactivate(work); |
| 2389 | list_del_init(&work->entry); | 2414 | list_del_init(&work->entry); |
| 2390 | cwq_dec_nr_in_flight(get_work_cwq(work), | 2415 | cwq_dec_nr_in_flight(get_work_cwq(work), |
| 2391 | get_work_color(work)); | 2416 | get_work_color(work), |
| 2417 | *work_data_bits(work) & WORK_STRUCT_DELAYED); | ||
| 2392 | ret = 1; | 2418 | ret = 1; |
| 2393 | } | 2419 | } |
| 2394 | } | 2420 | } |
| @@ -2791,7 +2817,6 @@ struct workqueue_struct *__alloc_workqueue_key(const char *name, | |||
| 2791 | if (IS_ERR(rescuer->task)) | 2817 | if (IS_ERR(rescuer->task)) |
| 2792 | goto err; | 2818 | goto err; |
| 2793 | 2819 | ||
| 2794 | wq->rescuer = rescuer; | ||
| 2795 | rescuer->task->flags |= PF_THREAD_BOUND; | 2820 | rescuer->task->flags |= PF_THREAD_BOUND; |
| 2796 | wake_up_process(rescuer->task); | 2821 | wake_up_process(rescuer->task); |
| 2797 | } | 2822 | } |
| @@ -2833,6 +2858,7 @@ void destroy_workqueue(struct workqueue_struct *wq) | |||
| 2833 | { | 2858 | { |
| 2834 | unsigned int cpu; | 2859 | unsigned int cpu; |
| 2835 | 2860 | ||
| 2861 | wq->flags |= WQ_DYING; | ||
| 2836 | flush_workqueue(wq); | 2862 | flush_workqueue(wq); |
| 2837 | 2863 | ||
| 2838 | /* | 2864 | /* |
| @@ -2857,6 +2883,7 @@ void destroy_workqueue(struct workqueue_struct *wq) | |||
| 2857 | if (wq->flags & WQ_RESCUER) { | 2883 | if (wq->flags & WQ_RESCUER) { |
| 2858 | kthread_stop(wq->rescuer->task); | 2884 | kthread_stop(wq->rescuer->task); |
| 2859 | free_mayday_mask(wq->mayday_mask); | 2885 | free_mayday_mask(wq->mayday_mask); |
| 2886 | kfree(wq->rescuer); | ||
| 2860 | } | 2887 | } |
| 2861 | 2888 | ||
| 2862 | free_cwqs(wq); | 2889 | free_cwqs(wq); |
| @@ -3239,6 +3266,8 @@ static int __cpuinit trustee_thread(void *__gcwq) | |||
| 3239 | * multiple times. To be used by cpu_callback. | 3266 | * multiple times. To be used by cpu_callback. |
| 3240 | */ | 3267 | */ |
| 3241 | static void __cpuinit wait_trustee_state(struct global_cwq *gcwq, int state) | 3268 | static void __cpuinit wait_trustee_state(struct global_cwq *gcwq, int state) |
| 3269 | __releases(&gcwq->lock) | ||
| 3270 | __acquires(&gcwq->lock) | ||
| 3242 | { | 3271 | { |
| 3243 | if (!(gcwq->trustee_state == state || | 3272 | if (!(gcwq->trustee_state == state || |
| 3244 | gcwq->trustee_state == TRUSTEE_DONE)) { | 3273 | gcwq->trustee_state == TRUSTEE_DONE)) { |
| @@ -3545,8 +3574,7 @@ static int __init init_workqueues(void) | |||
| 3545 | spin_lock_init(&gcwq->lock); | 3574 | spin_lock_init(&gcwq->lock); |
| 3546 | INIT_LIST_HEAD(&gcwq->worklist); | 3575 | INIT_LIST_HEAD(&gcwq->worklist); |
| 3547 | gcwq->cpu = cpu; | 3576 | gcwq->cpu = cpu; |
| 3548 | if (cpu == WORK_CPU_UNBOUND) | 3577 | gcwq->flags |= GCWQ_DISASSOCIATED; |
| 3549 | gcwq->flags |= GCWQ_DISASSOCIATED; | ||
| 3550 | 3578 | ||
| 3551 | INIT_LIST_HEAD(&gcwq->idle_list); | 3579 | INIT_LIST_HEAD(&gcwq->idle_list); |
| 3552 | for (i = 0; i < BUSY_WORKER_HASH_SIZE; i++) | 3580 | for (i = 0; i < BUSY_WORKER_HASH_SIZE; i++) |
| @@ -3570,6 +3598,8 @@ static int __init init_workqueues(void) | |||
| 3570 | struct global_cwq *gcwq = get_gcwq(cpu); | 3598 | struct global_cwq *gcwq = get_gcwq(cpu); |
| 3571 | struct worker *worker; | 3599 | struct worker *worker; |
| 3572 | 3600 | ||
| 3601 | if (cpu != WORK_CPU_UNBOUND) | ||
| 3602 | gcwq->flags &= ~GCWQ_DISASSOCIATED; | ||
| 3573 | worker = create_worker(gcwq, true); | 3603 | worker = create_worker(gcwq, true); |
| 3574 | BUG_ON(!worker); | 3604 | BUG_ON(!worker); |
| 3575 | spin_lock_irq(&gcwq->lock); | 3605 | spin_lock_irq(&gcwq->lock); |
