aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/workqueue.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/workqueue.c')
-rw-r--r--kernel/workqueue.c62
1 files changed, 47 insertions, 15 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 2994a0e3a61c..727f24e563ae 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -35,6 +35,9 @@
35#include <linux/lockdep.h> 35#include <linux/lockdep.h>
36#include <linux/idr.h> 36#include <linux/idr.h>
37 37
38#define CREATE_TRACE_POINTS
39#include <trace/events/workqueue.h>
40
38#include "workqueue_sched.h" 41#include "workqueue_sched.h"
39 42
40enum { 43enum {
@@ -87,7 +90,8 @@ enum {
87/* 90/*
88 * Structure fields follow one of the following exclusion rules. 91 * Structure fields follow one of the following exclusion rules.
89 * 92 *
90 * I: Set during initialization and read-only afterwards. 93 * I: Modifiable by initialization/destruction paths and read-only for
94 * everyone else.
91 * 95 *
92 * P: Preemption protected. Disabling preemption is enough and should 96 * P: Preemption protected. Disabling preemption is enough and should
93 * only be modified and accessed from the local cpu. 97 * only be modified and accessed from the local cpu.
@@ -195,7 +199,7 @@ typedef cpumask_var_t mayday_mask_t;
195 cpumask_test_and_set_cpu((cpu), (mask)) 199 cpumask_test_and_set_cpu((cpu), (mask))
196#define mayday_clear_cpu(cpu, mask) cpumask_clear_cpu((cpu), (mask)) 200#define mayday_clear_cpu(cpu, mask) cpumask_clear_cpu((cpu), (mask))
197#define for_each_mayday_cpu(cpu, mask) for_each_cpu((cpu), (mask)) 201#define for_each_mayday_cpu(cpu, mask) for_each_cpu((cpu), (mask))
198#define alloc_mayday_mask(maskp, gfp) alloc_cpumask_var((maskp), (gfp)) 202#define alloc_mayday_mask(maskp, gfp) zalloc_cpumask_var((maskp), (gfp))
199#define free_mayday_mask(mask) free_cpumask_var((mask)) 203#define free_mayday_mask(mask) free_cpumask_var((mask))
200#else 204#else
201typedef unsigned long mayday_mask_t; 205typedef unsigned long mayday_mask_t;
@@ -940,10 +944,14 @@ static void __queue_work(unsigned int cpu, struct workqueue_struct *wq,
940 struct global_cwq *gcwq; 944 struct global_cwq *gcwq;
941 struct cpu_workqueue_struct *cwq; 945 struct cpu_workqueue_struct *cwq;
942 struct list_head *worklist; 946 struct list_head *worklist;
947 unsigned int work_flags;
943 unsigned long flags; 948 unsigned long flags;
944 949
945 debug_work_activate(work); 950 debug_work_activate(work);
946 951
952 if (WARN_ON_ONCE(wq->flags & WQ_DYING))
953 return;
954
947 /* determine gcwq to use */ 955 /* determine gcwq to use */
948 if (!(wq->flags & WQ_UNBOUND)) { 956 if (!(wq->flags & WQ_UNBOUND)) {
949 struct global_cwq *last_gcwq; 957 struct global_cwq *last_gcwq;
@@ -986,14 +994,17 @@ static void __queue_work(unsigned int cpu, struct workqueue_struct *wq,
986 BUG_ON(!list_empty(&work->entry)); 994 BUG_ON(!list_empty(&work->entry));
987 995
988 cwq->nr_in_flight[cwq->work_color]++; 996 cwq->nr_in_flight[cwq->work_color]++;
997 work_flags = work_color_to_flags(cwq->work_color);
989 998
990 if (likely(cwq->nr_active < cwq->max_active)) { 999 if (likely(cwq->nr_active < cwq->max_active)) {
991 cwq->nr_active++; 1000 cwq->nr_active++;
992 worklist = gcwq_determine_ins_pos(gcwq, cwq); 1001 worklist = gcwq_determine_ins_pos(gcwq, cwq);
993 } else 1002 } else {
1003 work_flags |= WORK_STRUCT_DELAYED;
994 worklist = &cwq->delayed_works; 1004 worklist = &cwq->delayed_works;
1005 }
995 1006
996 insert_work(cwq, work, worklist, work_color_to_flags(cwq->work_color)); 1007 insert_work(cwq, work, worklist, work_flags);
997 1008
998 spin_unlock_irqrestore(&gcwq->lock, flags); 1009 spin_unlock_irqrestore(&gcwq->lock, flags);
999} 1010}
@@ -1212,6 +1223,7 @@ static void worker_leave_idle(struct worker *worker)
1212 * bound), %false if offline. 1223 * bound), %false if offline.
1213 */ 1224 */
1214static bool worker_maybe_bind_and_lock(struct worker *worker) 1225static bool worker_maybe_bind_and_lock(struct worker *worker)
1226__acquires(&gcwq->lock)
1215{ 1227{
1216 struct global_cwq *gcwq = worker->gcwq; 1228 struct global_cwq *gcwq = worker->gcwq;
1217 struct task_struct *task = worker->task; 1229 struct task_struct *task = worker->task;
@@ -1485,6 +1497,8 @@ static void gcwq_mayday_timeout(unsigned long __gcwq)
1485 * otherwise. 1497 * otherwise.
1486 */ 1498 */
1487static bool maybe_create_worker(struct global_cwq *gcwq) 1499static bool maybe_create_worker(struct global_cwq *gcwq)
1500__releases(&gcwq->lock)
1501__acquires(&gcwq->lock)
1488{ 1502{
1489 if (!need_to_create_worker(gcwq)) 1503 if (!need_to_create_worker(gcwq))
1490 return false; 1504 return false;
@@ -1659,6 +1673,7 @@ static void cwq_activate_first_delayed(struct cpu_workqueue_struct *cwq)
1659 struct list_head *pos = gcwq_determine_ins_pos(cwq->gcwq, cwq); 1673 struct list_head *pos = gcwq_determine_ins_pos(cwq->gcwq, cwq);
1660 1674
1661 move_linked_works(work, pos, NULL); 1675 move_linked_works(work, pos, NULL);
1676 __clear_bit(WORK_STRUCT_DELAYED_BIT, work_data_bits(work));
1662 cwq->nr_active++; 1677 cwq->nr_active++;
1663} 1678}
1664 1679
@@ -1666,6 +1681,7 @@ static void cwq_activate_first_delayed(struct cpu_workqueue_struct *cwq)
1666 * cwq_dec_nr_in_flight - decrement cwq's nr_in_flight 1681 * cwq_dec_nr_in_flight - decrement cwq's nr_in_flight
1667 * @cwq: cwq of interest 1682 * @cwq: cwq of interest
1668 * @color: color of work which left the queue 1683 * @color: color of work which left the queue
1684 * @delayed: for a delayed work
1669 * 1685 *
1670 * A work either has completed or is removed from pending queue, 1686 * A work either has completed or is removed from pending queue,
1671 * decrement nr_in_flight of its cwq and handle workqueue flushing. 1687 * decrement nr_in_flight of its cwq and handle workqueue flushing.
@@ -1673,19 +1689,22 @@ static void cwq_activate_first_delayed(struct cpu_workqueue_struct *cwq)
1673 * CONTEXT: 1689 * CONTEXT:
1674 * spin_lock_irq(gcwq->lock). 1690 * spin_lock_irq(gcwq->lock).
1675 */ 1691 */
1676static void cwq_dec_nr_in_flight(struct cpu_workqueue_struct *cwq, int color) 1692static void cwq_dec_nr_in_flight(struct cpu_workqueue_struct *cwq, int color,
1693 bool delayed)
1677{ 1694{
1678 /* ignore uncolored works */ 1695 /* ignore uncolored works */
1679 if (color == WORK_NO_COLOR) 1696 if (color == WORK_NO_COLOR)
1680 return; 1697 return;
1681 1698
1682 cwq->nr_in_flight[color]--; 1699 cwq->nr_in_flight[color]--;
1683 cwq->nr_active--;
1684 1700
1685 if (!list_empty(&cwq->delayed_works)) { 1701 if (!delayed) {
1686 /* one down, submit a delayed one */ 1702 cwq->nr_active--;
1687 if (cwq->nr_active < cwq->max_active) 1703 if (!list_empty(&cwq->delayed_works)) {
1688 cwq_activate_first_delayed(cwq); 1704 /* one down, submit a delayed one */
1705 if (cwq->nr_active < cwq->max_active)
1706 cwq_activate_first_delayed(cwq);
1707 }
1689 } 1708 }
1690 1709
1691 /* is flush in progress and are we at the flushing tip? */ 1710 /* is flush in progress and are we at the flushing tip? */
@@ -1722,6 +1741,8 @@ static void cwq_dec_nr_in_flight(struct cpu_workqueue_struct *cwq, int color)
1722 * spin_lock_irq(gcwq->lock) which is released and regrabbed. 1741 * spin_lock_irq(gcwq->lock) which is released and regrabbed.
1723 */ 1742 */
1724static void process_one_work(struct worker *worker, struct work_struct *work) 1743static void process_one_work(struct worker *worker, struct work_struct *work)
1744__releases(&gcwq->lock)
1745__acquires(&gcwq->lock)
1725{ 1746{
1726 struct cpu_workqueue_struct *cwq = get_work_cwq(work); 1747 struct cpu_workqueue_struct *cwq = get_work_cwq(work);
1727 struct global_cwq *gcwq = cwq->gcwq; 1748 struct global_cwq *gcwq = cwq->gcwq;
@@ -1790,7 +1811,13 @@ static void process_one_work(struct worker *worker, struct work_struct *work)
1790 work_clear_pending(work); 1811 work_clear_pending(work);
1791 lock_map_acquire(&cwq->wq->lockdep_map); 1812 lock_map_acquire(&cwq->wq->lockdep_map);
1792 lock_map_acquire(&lockdep_map); 1813 lock_map_acquire(&lockdep_map);
1814 trace_workqueue_execute_start(work);
1793 f(work); 1815 f(work);
1816 /*
1817 * While we must be careful to not use "work" after this, the trace
1818 * point will only record its address.
1819 */
1820 trace_workqueue_execute_end(work);
1794 lock_map_release(&lockdep_map); 1821 lock_map_release(&lockdep_map);
1795 lock_map_release(&cwq->wq->lockdep_map); 1822 lock_map_release(&cwq->wq->lockdep_map);
1796 1823
@@ -1814,7 +1841,7 @@ static void process_one_work(struct worker *worker, struct work_struct *work)
1814 hlist_del_init(&worker->hentry); 1841 hlist_del_init(&worker->hentry);
1815 worker->current_work = NULL; 1842 worker->current_work = NULL;
1816 worker->current_cwq = NULL; 1843 worker->current_cwq = NULL;
1817 cwq_dec_nr_in_flight(cwq, work_color); 1844 cwq_dec_nr_in_flight(cwq, work_color, false);
1818} 1845}
1819 1846
1820/** 1847/**
@@ -2379,7 +2406,8 @@ static int try_to_grab_pending(struct work_struct *work)
2379 debug_work_deactivate(work); 2406 debug_work_deactivate(work);
2380 list_del_init(&work->entry); 2407 list_del_init(&work->entry);
2381 cwq_dec_nr_in_flight(get_work_cwq(work), 2408 cwq_dec_nr_in_flight(get_work_cwq(work),
2382 get_work_color(work)); 2409 get_work_color(work),
2410 *work_data_bits(work) & WORK_STRUCT_DELAYED);
2383 ret = 1; 2411 ret = 1;
2384 } 2412 }
2385 } 2413 }
@@ -2782,7 +2810,6 @@ struct workqueue_struct *__alloc_workqueue_key(const char *name,
2782 if (IS_ERR(rescuer->task)) 2810 if (IS_ERR(rescuer->task))
2783 goto err; 2811 goto err;
2784 2812
2785 wq->rescuer = rescuer;
2786 rescuer->task->flags |= PF_THREAD_BOUND; 2813 rescuer->task->flags |= PF_THREAD_BOUND;
2787 wake_up_process(rescuer->task); 2814 wake_up_process(rescuer->task);
2788 } 2815 }
@@ -2824,6 +2851,7 @@ void destroy_workqueue(struct workqueue_struct *wq)
2824{ 2851{
2825 unsigned int cpu; 2852 unsigned int cpu;
2826 2853
2854 wq->flags |= WQ_DYING;
2827 flush_workqueue(wq); 2855 flush_workqueue(wq);
2828 2856
2829 /* 2857 /*
@@ -2848,6 +2876,7 @@ void destroy_workqueue(struct workqueue_struct *wq)
2848 if (wq->flags & WQ_RESCUER) { 2876 if (wq->flags & WQ_RESCUER) {
2849 kthread_stop(wq->rescuer->task); 2877 kthread_stop(wq->rescuer->task);
2850 free_mayday_mask(wq->mayday_mask); 2878 free_mayday_mask(wq->mayday_mask);
2879 kfree(wq->rescuer);
2851 } 2880 }
2852 2881
2853 free_cwqs(wq); 2882 free_cwqs(wq);
@@ -3230,6 +3259,8 @@ static int __cpuinit trustee_thread(void *__gcwq)
3230 * multiple times. To be used by cpu_callback. 3259 * multiple times. To be used by cpu_callback.
3231 */ 3260 */
3232static void __cpuinit wait_trustee_state(struct global_cwq *gcwq, int state) 3261static void __cpuinit wait_trustee_state(struct global_cwq *gcwq, int state)
3262__releases(&gcwq->lock)
3263__acquires(&gcwq->lock)
3233{ 3264{
3234 if (!(gcwq->trustee_state == state || 3265 if (!(gcwq->trustee_state == state ||
3235 gcwq->trustee_state == TRUSTEE_DONE)) { 3266 gcwq->trustee_state == TRUSTEE_DONE)) {
@@ -3536,8 +3567,7 @@ static int __init init_workqueues(void)
3536 spin_lock_init(&gcwq->lock); 3567 spin_lock_init(&gcwq->lock);
3537 INIT_LIST_HEAD(&gcwq->worklist); 3568 INIT_LIST_HEAD(&gcwq->worklist);
3538 gcwq->cpu = cpu; 3569 gcwq->cpu = cpu;
3539 if (cpu == WORK_CPU_UNBOUND) 3570 gcwq->flags |= GCWQ_DISASSOCIATED;
3540 gcwq->flags |= GCWQ_DISASSOCIATED;
3541 3571
3542 INIT_LIST_HEAD(&gcwq->idle_list); 3572 INIT_LIST_HEAD(&gcwq->idle_list);
3543 for (i = 0; i < BUSY_WORKER_HASH_SIZE; i++) 3573 for (i = 0; i < BUSY_WORKER_HASH_SIZE; i++)
@@ -3561,6 +3591,8 @@ static int __init init_workqueues(void)
3561 struct global_cwq *gcwq = get_gcwq(cpu); 3591 struct global_cwq *gcwq = get_gcwq(cpu);
3562 struct worker *worker; 3592 struct worker *worker;
3563 3593
3594 if (cpu != WORK_CPU_UNBOUND)
3595 gcwq->flags &= ~GCWQ_DISASSOCIATED;
3564 worker = create_worker(gcwq, true); 3596 worker = create_worker(gcwq, true);
3565 BUG_ON(!worker); 3597 BUG_ON(!worker);
3566 spin_lock_irq(&gcwq->lock); 3598 spin_lock_irq(&gcwq->lock);