aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/workqueue.c53
1 files changed, 38 insertions, 15 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 8bd600c020e5..727f24e563ae 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -90,7 +90,8 @@ enum {
90/* 90/*
91 * Structure fields follow one of the following exclusion rules. 91 * Structure fields follow one of the following exclusion rules.
92 * 92 *
93 * I: Set during initialization and read-only afterwards. 93 * I: Modifiable by initialization/destruction paths and read-only for
94 * everyone else.
94 * 95 *
95 * P: Preemption protected. Disabling preemption is enough and should 96 * P: Preemption protected. Disabling preemption is enough and should
96 * only be modified and accessed from the local cpu. 97 * only be modified and accessed from the local cpu.
@@ -198,7 +199,7 @@ typedef cpumask_var_t mayday_mask_t;
198 cpumask_test_and_set_cpu((cpu), (mask)) 199 cpumask_test_and_set_cpu((cpu), (mask))
199#define mayday_clear_cpu(cpu, mask) cpumask_clear_cpu((cpu), (mask)) 200#define mayday_clear_cpu(cpu, mask) cpumask_clear_cpu((cpu), (mask))
200#define for_each_mayday_cpu(cpu, mask) for_each_cpu((cpu), (mask)) 201#define for_each_mayday_cpu(cpu, mask) for_each_cpu((cpu), (mask))
201#define alloc_mayday_mask(maskp, gfp) alloc_cpumask_var((maskp), (gfp)) 202#define alloc_mayday_mask(maskp, gfp) zalloc_cpumask_var((maskp), (gfp))
202#define free_mayday_mask(mask) free_cpumask_var((mask)) 203#define free_mayday_mask(mask) free_cpumask_var((mask))
203#else 204#else
204typedef unsigned long mayday_mask_t; 205typedef unsigned long mayday_mask_t;
@@ -943,10 +944,14 @@ static void __queue_work(unsigned int cpu, struct workqueue_struct *wq,
943 struct global_cwq *gcwq; 944 struct global_cwq *gcwq;
944 struct cpu_workqueue_struct *cwq; 945 struct cpu_workqueue_struct *cwq;
945 struct list_head *worklist; 946 struct list_head *worklist;
947 unsigned int work_flags;
946 unsigned long flags; 948 unsigned long flags;
947 949
948 debug_work_activate(work); 950 debug_work_activate(work);
949 951
952 if (WARN_ON_ONCE(wq->flags & WQ_DYING))
953 return;
954
950 /* determine gcwq to use */ 955 /* determine gcwq to use */
951 if (!(wq->flags & WQ_UNBOUND)) { 956 if (!(wq->flags & WQ_UNBOUND)) {
952 struct global_cwq *last_gcwq; 957 struct global_cwq *last_gcwq;
@@ -989,14 +994,17 @@ static void __queue_work(unsigned int cpu, struct workqueue_struct *wq,
989 BUG_ON(!list_empty(&work->entry)); 994 BUG_ON(!list_empty(&work->entry));
990 995
991 cwq->nr_in_flight[cwq->work_color]++; 996 cwq->nr_in_flight[cwq->work_color]++;
997 work_flags = work_color_to_flags(cwq->work_color);
992 998
993 if (likely(cwq->nr_active < cwq->max_active)) { 999 if (likely(cwq->nr_active < cwq->max_active)) {
994 cwq->nr_active++; 1000 cwq->nr_active++;
995 worklist = gcwq_determine_ins_pos(gcwq, cwq); 1001 worklist = gcwq_determine_ins_pos(gcwq, cwq);
996 } else 1002 } else {
1003 work_flags |= WORK_STRUCT_DELAYED;
997 worklist = &cwq->delayed_works; 1004 worklist = &cwq->delayed_works;
1005 }
998 1006
999 insert_work(cwq, work, worklist, work_color_to_flags(cwq->work_color)); 1007 insert_work(cwq, work, worklist, work_flags);
1000 1008
1001 spin_unlock_irqrestore(&gcwq->lock, flags); 1009 spin_unlock_irqrestore(&gcwq->lock, flags);
1002} 1010}
@@ -1215,6 +1223,7 @@ static void worker_leave_idle(struct worker *worker)
1215 * bound), %false if offline. 1223 * bound), %false if offline.
1216 */ 1224 */
1217static bool worker_maybe_bind_and_lock(struct worker *worker) 1225static bool worker_maybe_bind_and_lock(struct worker *worker)
1226__acquires(&gcwq->lock)
1218{ 1227{
1219 struct global_cwq *gcwq = worker->gcwq; 1228 struct global_cwq *gcwq = worker->gcwq;
1220 struct task_struct *task = worker->task; 1229 struct task_struct *task = worker->task;
@@ -1488,6 +1497,8 @@ static void gcwq_mayday_timeout(unsigned long __gcwq)
1488 * otherwise. 1497 * otherwise.
1489 */ 1498 */
1490static bool maybe_create_worker(struct global_cwq *gcwq) 1499static bool maybe_create_worker(struct global_cwq *gcwq)
1500__releases(&gcwq->lock)
1501__acquires(&gcwq->lock)
1491{ 1502{
1492 if (!need_to_create_worker(gcwq)) 1503 if (!need_to_create_worker(gcwq))
1493 return false; 1504 return false;
@@ -1662,6 +1673,7 @@ static void cwq_activate_first_delayed(struct cpu_workqueue_struct *cwq)
1662 struct list_head *pos = gcwq_determine_ins_pos(cwq->gcwq, cwq); 1673 struct list_head *pos = gcwq_determine_ins_pos(cwq->gcwq, cwq);
1663 1674
1664 move_linked_works(work, pos, NULL); 1675 move_linked_works(work, pos, NULL);
1676 __clear_bit(WORK_STRUCT_DELAYED_BIT, work_data_bits(work));
1665 cwq->nr_active++; 1677 cwq->nr_active++;
1666} 1678}
1667 1679
@@ -1669,6 +1681,7 @@ static void cwq_activate_first_delayed(struct cpu_workqueue_struct *cwq)
1669 * cwq_dec_nr_in_flight - decrement cwq's nr_in_flight 1681 * cwq_dec_nr_in_flight - decrement cwq's nr_in_flight
1670 * @cwq: cwq of interest 1682 * @cwq: cwq of interest
1671 * @color: color of work which left the queue 1683 * @color: color of work which left the queue
1684 * @delayed: for a delayed work
1672 * 1685 *
1673 * A work either has completed or is removed from pending queue, 1686 * A work either has completed or is removed from pending queue,
1674 * decrement nr_in_flight of its cwq and handle workqueue flushing. 1687 * decrement nr_in_flight of its cwq and handle workqueue flushing.
@@ -1676,19 +1689,22 @@ static void cwq_activate_first_delayed(struct cpu_workqueue_struct *cwq)
1676 * CONTEXT: 1689 * CONTEXT:
1677 * spin_lock_irq(gcwq->lock). 1690 * spin_lock_irq(gcwq->lock).
1678 */ 1691 */
1679static void cwq_dec_nr_in_flight(struct cpu_workqueue_struct *cwq, int color) 1692static void cwq_dec_nr_in_flight(struct cpu_workqueue_struct *cwq, int color,
1693 bool delayed)
1680{ 1694{
1681 /* ignore uncolored works */ 1695 /* ignore uncolored works */
1682 if (color == WORK_NO_COLOR) 1696 if (color == WORK_NO_COLOR)
1683 return; 1697 return;
1684 1698
1685 cwq->nr_in_flight[color]--; 1699 cwq->nr_in_flight[color]--;
1686 cwq->nr_active--;
1687 1700
1688 if (!list_empty(&cwq->delayed_works)) { 1701 if (!delayed) {
1689 /* one down, submit a delayed one */ 1702 cwq->nr_active--;
1690 if (cwq->nr_active < cwq->max_active) 1703 if (!list_empty(&cwq->delayed_works)) {
1691 cwq_activate_first_delayed(cwq); 1704 /* one down, submit a delayed one */
1705 if (cwq->nr_active < cwq->max_active)
1706 cwq_activate_first_delayed(cwq);
1707 }
1692 } 1708 }
1693 1709
1694 /* is flush in progress and are we at the flushing tip? */ 1710 /* is flush in progress and are we at the flushing tip? */
@@ -1725,6 +1741,8 @@ static void cwq_dec_nr_in_flight(struct cpu_workqueue_struct *cwq, int color)
1725 * spin_lock_irq(gcwq->lock) which is released and regrabbed. 1741 * spin_lock_irq(gcwq->lock) which is released and regrabbed.
1726 */ 1742 */
1727static void process_one_work(struct worker *worker, struct work_struct *work) 1743static void process_one_work(struct worker *worker, struct work_struct *work)
1744__releases(&gcwq->lock)
1745__acquires(&gcwq->lock)
1728{ 1746{
1729 struct cpu_workqueue_struct *cwq = get_work_cwq(work); 1747 struct cpu_workqueue_struct *cwq = get_work_cwq(work);
1730 struct global_cwq *gcwq = cwq->gcwq; 1748 struct global_cwq *gcwq = cwq->gcwq;
@@ -1823,7 +1841,7 @@ static void process_one_work(struct worker *worker, struct work_struct *work)
1823 hlist_del_init(&worker->hentry); 1841 hlist_del_init(&worker->hentry);
1824 worker->current_work = NULL; 1842 worker->current_work = NULL;
1825 worker->current_cwq = NULL; 1843 worker->current_cwq = NULL;
1826 cwq_dec_nr_in_flight(cwq, work_color); 1844 cwq_dec_nr_in_flight(cwq, work_color, false);
1827} 1845}
1828 1846
1829/** 1847/**
@@ -2388,7 +2406,8 @@ static int try_to_grab_pending(struct work_struct *work)
2388 debug_work_deactivate(work); 2406 debug_work_deactivate(work);
2389 list_del_init(&work->entry); 2407 list_del_init(&work->entry);
2390 cwq_dec_nr_in_flight(get_work_cwq(work), 2408 cwq_dec_nr_in_flight(get_work_cwq(work),
2391 get_work_color(work)); 2409 get_work_color(work),
2410 *work_data_bits(work) & WORK_STRUCT_DELAYED);
2392 ret = 1; 2411 ret = 1;
2393 } 2412 }
2394 } 2413 }
@@ -2791,7 +2810,6 @@ struct workqueue_struct *__alloc_workqueue_key(const char *name,
2791 if (IS_ERR(rescuer->task)) 2810 if (IS_ERR(rescuer->task))
2792 goto err; 2811 goto err;
2793 2812
2794 wq->rescuer = rescuer;
2795 rescuer->task->flags |= PF_THREAD_BOUND; 2813 rescuer->task->flags |= PF_THREAD_BOUND;
2796 wake_up_process(rescuer->task); 2814 wake_up_process(rescuer->task);
2797 } 2815 }
@@ -2833,6 +2851,7 @@ void destroy_workqueue(struct workqueue_struct *wq)
2833{ 2851{
2834 unsigned int cpu; 2852 unsigned int cpu;
2835 2853
2854 wq->flags |= WQ_DYING;
2836 flush_workqueue(wq); 2855 flush_workqueue(wq);
2837 2856
2838 /* 2857 /*
@@ -2857,6 +2876,7 @@ void destroy_workqueue(struct workqueue_struct *wq)
2857 if (wq->flags & WQ_RESCUER) { 2876 if (wq->flags & WQ_RESCUER) {
2858 kthread_stop(wq->rescuer->task); 2877 kthread_stop(wq->rescuer->task);
2859 free_mayday_mask(wq->mayday_mask); 2878 free_mayday_mask(wq->mayday_mask);
2879 kfree(wq->rescuer);
2860 } 2880 }
2861 2881
2862 free_cwqs(wq); 2882 free_cwqs(wq);
@@ -3239,6 +3259,8 @@ static int __cpuinit trustee_thread(void *__gcwq)
3239 * multiple times. To be used by cpu_callback. 3259 * multiple times. To be used by cpu_callback.
3240 */ 3260 */
3241static void __cpuinit wait_trustee_state(struct global_cwq *gcwq, int state) 3261static void __cpuinit wait_trustee_state(struct global_cwq *gcwq, int state)
3262__releases(&gcwq->lock)
3263__acquires(&gcwq->lock)
3242{ 3264{
3243 if (!(gcwq->trustee_state == state || 3265 if (!(gcwq->trustee_state == state ||
3244 gcwq->trustee_state == TRUSTEE_DONE)) { 3266 gcwq->trustee_state == TRUSTEE_DONE)) {
@@ -3545,8 +3567,7 @@ static int __init init_workqueues(void)
3545 spin_lock_init(&gcwq->lock); 3567 spin_lock_init(&gcwq->lock);
3546 INIT_LIST_HEAD(&gcwq->worklist); 3568 INIT_LIST_HEAD(&gcwq->worklist);
3547 gcwq->cpu = cpu; 3569 gcwq->cpu = cpu;
3548 if (cpu == WORK_CPU_UNBOUND) 3570 gcwq->flags |= GCWQ_DISASSOCIATED;
3549 gcwq->flags |= GCWQ_DISASSOCIATED;
3550 3571
3551 INIT_LIST_HEAD(&gcwq->idle_list); 3572 INIT_LIST_HEAD(&gcwq->idle_list);
3552 for (i = 0; i < BUSY_WORKER_HASH_SIZE; i++) 3573 for (i = 0; i < BUSY_WORKER_HASH_SIZE; i++)
@@ -3570,6 +3591,8 @@ static int __init init_workqueues(void)
3570 struct global_cwq *gcwq = get_gcwq(cpu); 3591 struct global_cwq *gcwq = get_gcwq(cpu);
3571 struct worker *worker; 3592 struct worker *worker;
3572 3593
3594 if (cpu != WORK_CPU_UNBOUND)
3595 gcwq->flags &= ~GCWQ_DISASSOCIATED;
3573 worker = create_worker(gcwq, true); 3596 worker = create_worker(gcwq, true);
3574 BUG_ON(!worker); 3597 BUG_ON(!worker);
3575 spin_lock_irq(&gcwq->lock); 3598 spin_lock_irq(&gcwq->lock);