diff options
author | Tejun Heo <tj@kernel.org> | 2013-01-24 14:01:33 -0500 |
---|---|---|
committer | Tejun Heo <tj@kernel.org> | 2013-01-24 14:01:33 -0500 |
commit | d565ed6309300304de4a865a04adef07a85edc45 (patch) | |
tree | b79e83064232d5bbf47550b090d6b1e288e123fb /kernel | |
parent | ec22ca5eab0bd225588c69ccd06b16504cb05adf (diff) |
workqueue: move global_cwq->lock to worker_pool
Move gcwq->lock to pool->lock. The conversion is mostly
straight-forward. Things worth noting are
* In many places, this removes the need to use gcwq completely. pool
is used directly instead. get_std_worker_pool() is added to help
some of these conversions. This also leaves get_work_gcwq() without
any user. Removed.
* In hotplug and freezer paths, the pools belonging to a CPU are often
processed together. This patch makes those paths hold locks of all
pools, with highpri lock nested inside, to keep the conversion
straight-forward. These nested lockings will be removed by
following patches.
This is part of an effort to remove global_cwq and make worker_pool
the top level abstraction, which in turn will help implementing worker
pools with user-specified attributes.
Signed-off-by: Tejun Heo <tj@kernel.org>
Reviewed-by: Lai Jiangshan <laijs@cn.fujitsu.com>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/workqueue.c | 316 |
1 files changed, 154 insertions, 162 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 366132bd226f..c93651208760 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c | |||
@@ -108,12 +108,12 @@ enum { | |||
108 | * P: Preemption protected. Disabling preemption is enough and should | 108 | * P: Preemption protected. Disabling preemption is enough and should |
109 | * only be modified and accessed from the local cpu. | 109 | * only be modified and accessed from the local cpu. |
110 | * | 110 | * |
111 | * L: gcwq->lock protected. Access with gcwq->lock held. | 111 | * L: pool->lock protected. Access with pool->lock held. |
112 | * | 112 | * |
113 | * X: During normal operation, modification requires gcwq->lock and | 113 | * X: During normal operation, modification requires pool->lock and should |
114 | * should be done only from local cpu. Either disabling preemption | 114 | * be done only from local cpu. Either disabling preemption on local |
115 | * on local cpu or grabbing gcwq->lock is enough for read access. | 115 | * cpu or grabbing pool->lock is enough for read access. If |
116 | * If POOL_DISASSOCIATED is set, it's identical to L. | 116 | * POOL_DISASSOCIATED is set, it's identical to L. |
117 | * | 117 | * |
118 | * F: wq->flush_mutex protected. | 118 | * F: wq->flush_mutex protected. |
119 | * | 119 | * |
@@ -124,6 +124,7 @@ enum { | |||
124 | 124 | ||
125 | struct worker_pool { | 125 | struct worker_pool { |
126 | struct global_cwq *gcwq; /* I: the owning gcwq */ | 126 | struct global_cwq *gcwq; /* I: the owning gcwq */ |
127 | spinlock_t lock; /* the pool lock */ | ||
127 | unsigned int cpu; /* I: the associated cpu */ | 128 | unsigned int cpu; /* I: the associated cpu */ |
128 | int id; /* I: pool ID */ | 129 | int id; /* I: pool ID */ |
129 | unsigned int flags; /* X: flags */ | 130 | unsigned int flags; /* X: flags */ |
@@ -152,8 +153,6 @@ struct worker_pool { | |||
152 | * target workqueues. | 153 | * target workqueues. |
153 | */ | 154 | */ |
154 | struct global_cwq { | 155 | struct global_cwq { |
155 | spinlock_t lock; /* the gcwq lock */ | ||
156 | |||
157 | struct worker_pool pools[NR_STD_WORKER_POOLS]; | 156 | struct worker_pool pools[NR_STD_WORKER_POOLS]; |
158 | /* normal and highpri pools */ | 157 | /* normal and highpri pools */ |
159 | } ____cacheline_aligned_in_smp; | 158 | } ____cacheline_aligned_in_smp; |
@@ -487,6 +486,13 @@ static struct worker_pool *worker_pool_by_id(int pool_id) | |||
487 | return idr_find(&worker_pool_idr, pool_id); | 486 | return idr_find(&worker_pool_idr, pool_id); |
488 | } | 487 | } |
489 | 488 | ||
489 | static struct worker_pool *get_std_worker_pool(int cpu, bool highpri) | ||
490 | { | ||
491 | struct global_cwq *gcwq = get_gcwq(cpu); | ||
492 | |||
493 | return &gcwq->pools[highpri]; | ||
494 | } | ||
495 | |||
490 | static atomic_t *get_pool_nr_running(struct worker_pool *pool) | 496 | static atomic_t *get_pool_nr_running(struct worker_pool *pool) |
491 | { | 497 | { |
492 | int cpu = pool->cpu; | 498 | int cpu = pool->cpu; |
@@ -628,13 +634,6 @@ static int get_work_pool_id(struct work_struct *work) | |||
628 | return pool ? pool->id : WORK_OFFQ_POOL_NONE; | 634 | return pool ? pool->id : WORK_OFFQ_POOL_NONE; |
629 | } | 635 | } |
630 | 636 | ||
631 | static struct global_cwq *get_work_gcwq(struct work_struct *work) | ||
632 | { | ||
633 | struct worker_pool *pool = get_work_pool(work); | ||
634 | |||
635 | return pool ? pool->gcwq : NULL; | ||
636 | } | ||
637 | |||
638 | static void mark_work_canceling(struct work_struct *work) | 637 | static void mark_work_canceling(struct work_struct *work) |
639 | { | 638 | { |
640 | unsigned long pool_id = get_work_pool_id(work); | 639 | unsigned long pool_id = get_work_pool_id(work); |
@@ -653,7 +652,7 @@ static bool work_is_canceling(struct work_struct *work) | |||
653 | /* | 652 | /* |
654 | * Policy functions. These define the policies on how the global worker | 653 | * Policy functions. These define the policies on how the global worker |
655 | * pools are managed. Unless noted otherwise, these functions assume that | 654 | * pools are managed. Unless noted otherwise, these functions assume that |
656 | * they're being called with gcwq->lock held. | 655 | * they're being called with pool->lock held. |
657 | */ | 656 | */ |
658 | 657 | ||
659 | static bool __need_more_worker(struct worker_pool *pool) | 658 | static bool __need_more_worker(struct worker_pool *pool) |
@@ -738,7 +737,7 @@ static struct worker *first_worker(struct worker_pool *pool) | |||
738 | * Wake up the first idle worker of @pool. | 737 | * Wake up the first idle worker of @pool. |
739 | * | 738 | * |
740 | * CONTEXT: | 739 | * CONTEXT: |
741 | * spin_lock_irq(gcwq->lock). | 740 | * spin_lock_irq(pool->lock). |
742 | */ | 741 | */ |
743 | static void wake_up_worker(struct worker_pool *pool) | 742 | static void wake_up_worker(struct worker_pool *pool) |
744 | { | 743 | { |
@@ -813,7 +812,7 @@ struct task_struct *wq_worker_sleeping(struct task_struct *task, | |||
813 | * NOT_RUNNING is clear. This means that we're bound to and | 812 | * NOT_RUNNING is clear. This means that we're bound to and |
814 | * running on the local cpu w/ rq lock held and preemption | 813 | * running on the local cpu w/ rq lock held and preemption |
815 | * disabled, which in turn means that none else could be | 814 | * disabled, which in turn means that none else could be |
816 | * manipulating idle_list, so dereferencing idle_list without gcwq | 815 | * manipulating idle_list, so dereferencing idle_list without pool |
817 | * lock is safe. | 816 | * lock is safe. |
818 | */ | 817 | */ |
819 | if (atomic_dec_and_test(nr_running) && !list_empty(&pool->worklist)) | 818 | if (atomic_dec_and_test(nr_running) && !list_empty(&pool->worklist)) |
@@ -832,7 +831,7 @@ struct task_struct *wq_worker_sleeping(struct task_struct *task, | |||
832 | * woken up. | 831 | * woken up. |
833 | * | 832 | * |
834 | * CONTEXT: | 833 | * CONTEXT: |
835 | * spin_lock_irq(gcwq->lock) | 834 | * spin_lock_irq(pool->lock) |
836 | */ | 835 | */ |
837 | static inline void worker_set_flags(struct worker *worker, unsigned int flags, | 836 | static inline void worker_set_flags(struct worker *worker, unsigned int flags, |
838 | bool wakeup) | 837 | bool wakeup) |
@@ -869,7 +868,7 @@ static inline void worker_set_flags(struct worker *worker, unsigned int flags, | |||
869 | * Clear @flags in @worker->flags and adjust nr_running accordingly. | 868 | * Clear @flags in @worker->flags and adjust nr_running accordingly. |
870 | * | 869 | * |
871 | * CONTEXT: | 870 | * CONTEXT: |
872 | * spin_lock_irq(gcwq->lock) | 871 | * spin_lock_irq(pool->lock) |
873 | */ | 872 | */ |
874 | static inline void worker_clr_flags(struct worker *worker, unsigned int flags) | 873 | static inline void worker_clr_flags(struct worker *worker, unsigned int flags) |
875 | { | 874 | { |
@@ -918,7 +917,7 @@ static inline void worker_clr_flags(struct worker *worker, unsigned int flags) | |||
918 | * function. | 917 | * function. |
919 | * | 918 | * |
920 | * CONTEXT: | 919 | * CONTEXT: |
921 | * spin_lock_irq(gcwq->lock). | 920 | * spin_lock_irq(pool->lock). |
922 | * | 921 | * |
923 | * RETURNS: | 922 | * RETURNS: |
924 | * Pointer to worker which is executing @work if found, NULL | 923 | * Pointer to worker which is executing @work if found, NULL |
@@ -954,7 +953,7 @@ static struct worker *find_worker_executing_work(struct worker_pool *pool, | |||
954 | * nested inside outer list_for_each_entry_safe(). | 953 | * nested inside outer list_for_each_entry_safe(). |
955 | * | 954 | * |
956 | * CONTEXT: | 955 | * CONTEXT: |
957 | * spin_lock_irq(gcwq->lock). | 956 | * spin_lock_irq(pool->lock). |
958 | */ | 957 | */ |
959 | static void move_linked_works(struct work_struct *work, struct list_head *head, | 958 | static void move_linked_works(struct work_struct *work, struct list_head *head, |
960 | struct work_struct **nextp) | 959 | struct work_struct **nextp) |
@@ -1007,7 +1006,7 @@ static void cwq_activate_first_delayed(struct cpu_workqueue_struct *cwq) | |||
1007 | * decrement nr_in_flight of its cwq and handle workqueue flushing. | 1006 | * decrement nr_in_flight of its cwq and handle workqueue flushing. |
1008 | * | 1007 | * |
1009 | * CONTEXT: | 1008 | * CONTEXT: |
1010 | * spin_lock_irq(gcwq->lock). | 1009 | * spin_lock_irq(pool->lock). |
1011 | */ | 1010 | */ |
1012 | static void cwq_dec_nr_in_flight(struct cpu_workqueue_struct *cwq, int color) | 1011 | static void cwq_dec_nr_in_flight(struct cpu_workqueue_struct *cwq, int color) |
1013 | { | 1012 | { |
@@ -1071,7 +1070,7 @@ static void cwq_dec_nr_in_flight(struct cpu_workqueue_struct *cwq, int color) | |||
1071 | static int try_to_grab_pending(struct work_struct *work, bool is_dwork, | 1070 | static int try_to_grab_pending(struct work_struct *work, bool is_dwork, |
1072 | unsigned long *flags) | 1071 | unsigned long *flags) |
1073 | { | 1072 | { |
1074 | struct global_cwq *gcwq; | 1073 | struct worker_pool *pool; |
1075 | 1074 | ||
1076 | local_irq_save(*flags); | 1075 | local_irq_save(*flags); |
1077 | 1076 | ||
@@ -1096,19 +1095,19 @@ static int try_to_grab_pending(struct work_struct *work, bool is_dwork, | |||
1096 | * The queueing is in progress, or it is already queued. Try to | 1095 | * The queueing is in progress, or it is already queued. Try to |
1097 | * steal it from ->worklist without clearing WORK_STRUCT_PENDING. | 1096 | * steal it from ->worklist without clearing WORK_STRUCT_PENDING. |
1098 | */ | 1097 | */ |
1099 | gcwq = get_work_gcwq(work); | 1098 | pool = get_work_pool(work); |
1100 | if (!gcwq) | 1099 | if (!pool) |
1101 | goto fail; | 1100 | goto fail; |
1102 | 1101 | ||
1103 | spin_lock(&gcwq->lock); | 1102 | spin_lock(&pool->lock); |
1104 | if (!list_empty(&work->entry)) { | 1103 | if (!list_empty(&work->entry)) { |
1105 | /* | 1104 | /* |
1106 | * This work is queued, but perhaps we locked the wrong gcwq. | 1105 | * This work is queued, but perhaps we locked the wrong |
1107 | * In that case we must see the new value after rmb(), see | 1106 | * pool. In that case we must see the new value after |
1108 | * insert_work()->wmb(). | 1107 | * rmb(), see insert_work()->wmb(). |
1109 | */ | 1108 | */ |
1110 | smp_rmb(); | 1109 | smp_rmb(); |
1111 | if (gcwq == get_work_gcwq(work)) { | 1110 | if (pool == get_work_pool(work)) { |
1112 | debug_work_deactivate(work); | 1111 | debug_work_deactivate(work); |
1113 | 1112 | ||
1114 | /* | 1113 | /* |
@@ -1126,11 +1125,11 @@ static int try_to_grab_pending(struct work_struct *work, bool is_dwork, | |||
1126 | cwq_dec_nr_in_flight(get_work_cwq(work), | 1125 | cwq_dec_nr_in_flight(get_work_cwq(work), |
1127 | get_work_color(work)); | 1126 | get_work_color(work)); |
1128 | 1127 | ||
1129 | spin_unlock(&gcwq->lock); | 1128 | spin_unlock(&pool->lock); |
1130 | return 1; | 1129 | return 1; |
1131 | } | 1130 | } |
1132 | } | 1131 | } |
1133 | spin_unlock(&gcwq->lock); | 1132 | spin_unlock(&pool->lock); |
1134 | fail: | 1133 | fail: |
1135 | local_irq_restore(*flags); | 1134 | local_irq_restore(*flags); |
1136 | if (work_is_canceling(work)) | 1135 | if (work_is_canceling(work)) |
@@ -1150,7 +1149,7 @@ fail: | |||
1150 | * @extra_flags is or'd to work_struct flags. | 1149 | * @extra_flags is or'd to work_struct flags. |
1151 | * | 1150 | * |
1152 | * CONTEXT: | 1151 | * CONTEXT: |
1153 | * spin_lock_irq(gcwq->lock). | 1152 | * spin_lock_irq(pool->lock). |
1154 | */ | 1153 | */ |
1155 | static void insert_work(struct cpu_workqueue_struct *cwq, | 1154 | static void insert_work(struct cpu_workqueue_struct *cwq, |
1156 | struct work_struct *work, struct list_head *head, | 1155 | struct work_struct *work, struct list_head *head, |
@@ -1193,23 +1192,22 @@ static bool is_chained_work(struct workqueue_struct *wq) | |||
1193 | for_each_gcwq_cpu(cpu) { | 1192 | for_each_gcwq_cpu(cpu) { |
1194 | struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); | 1193 | struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); |
1195 | struct worker_pool *pool = cwq->pool; | 1194 | struct worker_pool *pool = cwq->pool; |
1196 | struct global_cwq *gcwq = pool->gcwq; | ||
1197 | struct worker *worker; | 1195 | struct worker *worker; |
1198 | struct hlist_node *pos; | 1196 | struct hlist_node *pos; |
1199 | int i; | 1197 | int i; |
1200 | 1198 | ||
1201 | spin_lock_irqsave(&gcwq->lock, flags); | 1199 | spin_lock_irqsave(&pool->lock, flags); |
1202 | for_each_busy_worker(worker, i, pos, pool) { | 1200 | for_each_busy_worker(worker, i, pos, pool) { |
1203 | if (worker->task != current) | 1201 | if (worker->task != current) |
1204 | continue; | 1202 | continue; |
1205 | spin_unlock_irqrestore(&gcwq->lock, flags); | 1203 | spin_unlock_irqrestore(&pool->lock, flags); |
1206 | /* | 1204 | /* |
1207 | * I'm @worker, no locking necessary. See if @work | 1205 | * I'm @worker, no locking necessary. See if @work |
1208 | * is headed to the same workqueue. | 1206 | * is headed to the same workqueue. |
1209 | */ | 1207 | */ |
1210 | return worker->current_cwq->wq == wq; | 1208 | return worker->current_cwq->wq == wq; |
1211 | } | 1209 | } |
1212 | spin_unlock_irqrestore(&gcwq->lock, flags); | 1210 | spin_unlock_irqrestore(&pool->lock, flags); |
1213 | } | 1211 | } |
1214 | return false; | 1212 | return false; |
1215 | } | 1213 | } |
@@ -1217,7 +1215,8 @@ static bool is_chained_work(struct workqueue_struct *wq) | |||
1217 | static void __queue_work(unsigned int cpu, struct workqueue_struct *wq, | 1215 | static void __queue_work(unsigned int cpu, struct workqueue_struct *wq, |
1218 | struct work_struct *work) | 1216 | struct work_struct *work) |
1219 | { | 1217 | { |
1220 | struct global_cwq *gcwq; | 1218 | bool highpri = wq->flags & WQ_HIGHPRI; |
1219 | struct worker_pool *pool; | ||
1221 | struct cpu_workqueue_struct *cwq; | 1220 | struct cpu_workqueue_struct *cwq; |
1222 | struct list_head *worklist; | 1221 | struct list_head *worklist; |
1223 | unsigned int work_flags; | 1222 | unsigned int work_flags; |
@@ -1238,7 +1237,7 @@ static void __queue_work(unsigned int cpu, struct workqueue_struct *wq, | |||
1238 | WARN_ON_ONCE(!is_chained_work(wq))) | 1237 | WARN_ON_ONCE(!is_chained_work(wq))) |
1239 | return; | 1238 | return; |
1240 | 1239 | ||
1241 | /* determine gcwq to use */ | 1240 | /* determine pool to use */ |
1242 | if (!(wq->flags & WQ_UNBOUND)) { | 1241 | if (!(wq->flags & WQ_UNBOUND)) { |
1243 | struct worker_pool *last_pool; | 1242 | struct worker_pool *last_pool; |
1244 | 1243 | ||
@@ -1251,38 +1250,37 @@ static void __queue_work(unsigned int cpu, struct workqueue_struct *wq, | |||
1251 | * work needs to be queued on that cpu to guarantee | 1250 | * work needs to be queued on that cpu to guarantee |
1252 | * non-reentrancy. | 1251 | * non-reentrancy. |
1253 | */ | 1252 | */ |
1254 | gcwq = get_gcwq(cpu); | 1253 | pool = get_std_worker_pool(cpu, highpri); |
1255 | last_pool = get_work_pool(work); | 1254 | last_pool = get_work_pool(work); |
1256 | 1255 | ||
1257 | if (last_pool && last_pool->gcwq != gcwq) { | 1256 | if (last_pool && last_pool != pool) { |
1258 | struct global_cwq *last_gcwq = last_pool->gcwq; | ||
1259 | struct worker *worker; | 1257 | struct worker *worker; |
1260 | 1258 | ||
1261 | spin_lock(&last_gcwq->lock); | 1259 | spin_lock(&last_pool->lock); |
1262 | 1260 | ||
1263 | worker = find_worker_executing_work(last_pool, work); | 1261 | worker = find_worker_executing_work(last_pool, work); |
1264 | 1262 | ||
1265 | if (worker && worker->current_cwq->wq == wq) | 1263 | if (worker && worker->current_cwq->wq == wq) |
1266 | gcwq = last_gcwq; | 1264 | pool = last_pool; |
1267 | else { | 1265 | else { |
1268 | /* meh... not running there, queue here */ | 1266 | /* meh... not running there, queue here */ |
1269 | spin_unlock(&last_gcwq->lock); | 1267 | spin_unlock(&last_pool->lock); |
1270 | spin_lock(&gcwq->lock); | 1268 | spin_lock(&pool->lock); |
1271 | } | 1269 | } |
1272 | } else { | 1270 | } else { |
1273 | spin_lock(&gcwq->lock); | 1271 | spin_lock(&pool->lock); |
1274 | } | 1272 | } |
1275 | } else { | 1273 | } else { |
1276 | gcwq = get_gcwq(WORK_CPU_UNBOUND); | 1274 | pool = get_std_worker_pool(WORK_CPU_UNBOUND, highpri); |
1277 | spin_lock(&gcwq->lock); | 1275 | spin_lock(&pool->lock); |
1278 | } | 1276 | } |
1279 | 1277 | ||
1280 | /* gcwq determined, get cwq and queue */ | 1278 | /* pool determined, get cwq and queue */ |
1281 | cwq = get_cwq(gcwq->pools[0].cpu, wq); | 1279 | cwq = get_cwq(pool->cpu, wq); |
1282 | trace_workqueue_queue_work(req_cpu, cwq, work); | 1280 | trace_workqueue_queue_work(req_cpu, cwq, work); |
1283 | 1281 | ||
1284 | if (WARN_ON(!list_empty(&work->entry))) { | 1282 | if (WARN_ON(!list_empty(&work->entry))) { |
1285 | spin_unlock(&gcwq->lock); | 1283 | spin_unlock(&pool->lock); |
1286 | return; | 1284 | return; |
1287 | } | 1285 | } |
1288 | 1286 | ||
@@ -1300,7 +1298,7 @@ static void __queue_work(unsigned int cpu, struct workqueue_struct *wq, | |||
1300 | 1298 | ||
1301 | insert_work(cwq, work, worklist, work_flags); | 1299 | insert_work(cwq, work, worklist, work_flags); |
1302 | 1300 | ||
1303 | spin_unlock(&gcwq->lock); | 1301 | spin_unlock(&pool->lock); |
1304 | } | 1302 | } |
1305 | 1303 | ||
1306 | /** | 1304 | /** |
@@ -1523,7 +1521,7 @@ EXPORT_SYMBOL_GPL(mod_delayed_work); | |||
1523 | * necessary. | 1521 | * necessary. |
1524 | * | 1522 | * |
1525 | * LOCKING: | 1523 | * LOCKING: |
1526 | * spin_lock_irq(gcwq->lock). | 1524 | * spin_lock_irq(pool->lock). |
1527 | */ | 1525 | */ |
1528 | static void worker_enter_idle(struct worker *worker) | 1526 | static void worker_enter_idle(struct worker *worker) |
1529 | { | 1527 | { |
@@ -1546,7 +1544,7 @@ static void worker_enter_idle(struct worker *worker) | |||
1546 | 1544 | ||
1547 | /* | 1545 | /* |
1548 | * Sanity check nr_running. Because gcwq_unbind_fn() releases | 1546 | * Sanity check nr_running. Because gcwq_unbind_fn() releases |
1549 | * gcwq->lock between setting %WORKER_UNBOUND and zapping | 1547 | * pool->lock between setting %WORKER_UNBOUND and zapping |
1550 | * nr_running, the warning may trigger spuriously. Check iff | 1548 | * nr_running, the warning may trigger spuriously. Check iff |
1551 | * unbind is not in progress. | 1549 | * unbind is not in progress. |
1552 | */ | 1550 | */ |
@@ -1562,7 +1560,7 @@ static void worker_enter_idle(struct worker *worker) | |||
1562 | * @worker is leaving idle state. Update stats. | 1560 | * @worker is leaving idle state. Update stats. |
1563 | * | 1561 | * |
1564 | * LOCKING: | 1562 | * LOCKING: |
1565 | * spin_lock_irq(gcwq->lock). | 1563 | * spin_lock_irq(pool->lock). |
1566 | */ | 1564 | */ |
1567 | static void worker_leave_idle(struct worker *worker) | 1565 | static void worker_leave_idle(struct worker *worker) |
1568 | { | 1566 | { |
@@ -1597,7 +1595,7 @@ static void worker_leave_idle(struct worker *worker) | |||
1597 | * guarantee the scheduling requirement described in the first paragraph. | 1595 | * guarantee the scheduling requirement described in the first paragraph. |
1598 | * | 1596 | * |
1599 | * CONTEXT: | 1597 | * CONTEXT: |
1600 | * Might sleep. Called without any lock but returns with gcwq->lock | 1598 | * Might sleep. Called without any lock but returns with pool->lock |
1601 | * held. | 1599 | * held. |
1602 | * | 1600 | * |
1603 | * RETURNS: | 1601 | * RETURNS: |
@@ -1605,10 +1603,9 @@ static void worker_leave_idle(struct worker *worker) | |||
1605 | * bound), %false if offline. | 1603 | * bound), %false if offline. |
1606 | */ | 1604 | */ |
1607 | static bool worker_maybe_bind_and_lock(struct worker *worker) | 1605 | static bool worker_maybe_bind_and_lock(struct worker *worker) |
1608 | __acquires(&gcwq->lock) | 1606 | __acquires(&pool->lock) |
1609 | { | 1607 | { |
1610 | struct worker_pool *pool = worker->pool; | 1608 | struct worker_pool *pool = worker->pool; |
1611 | struct global_cwq *gcwq = pool->gcwq; | ||
1612 | struct task_struct *task = worker->task; | 1609 | struct task_struct *task = worker->task; |
1613 | 1610 | ||
1614 | while (true) { | 1611 | while (true) { |
@@ -1621,14 +1618,14 @@ __acquires(&gcwq->lock) | |||
1621 | if (!(pool->flags & POOL_DISASSOCIATED)) | 1618 | if (!(pool->flags & POOL_DISASSOCIATED)) |
1622 | set_cpus_allowed_ptr(task, get_cpu_mask(pool->cpu)); | 1619 | set_cpus_allowed_ptr(task, get_cpu_mask(pool->cpu)); |
1623 | 1620 | ||
1624 | spin_lock_irq(&gcwq->lock); | 1621 | spin_lock_irq(&pool->lock); |
1625 | if (pool->flags & POOL_DISASSOCIATED) | 1622 | if (pool->flags & POOL_DISASSOCIATED) |
1626 | return false; | 1623 | return false; |
1627 | if (task_cpu(task) == pool->cpu && | 1624 | if (task_cpu(task) == pool->cpu && |
1628 | cpumask_equal(¤t->cpus_allowed, | 1625 | cpumask_equal(¤t->cpus_allowed, |
1629 | get_cpu_mask(pool->cpu))) | 1626 | get_cpu_mask(pool->cpu))) |
1630 | return true; | 1627 | return true; |
1631 | spin_unlock_irq(&gcwq->lock); | 1628 | spin_unlock_irq(&pool->lock); |
1632 | 1629 | ||
1633 | /* | 1630 | /* |
1634 | * We've raced with CPU hot[un]plug. Give it a breather | 1631 | * We've raced with CPU hot[un]plug. Give it a breather |
@@ -1647,15 +1644,13 @@ __acquires(&gcwq->lock) | |||
1647 | */ | 1644 | */ |
1648 | static void idle_worker_rebind(struct worker *worker) | 1645 | static void idle_worker_rebind(struct worker *worker) |
1649 | { | 1646 | { |
1650 | struct global_cwq *gcwq = worker->pool->gcwq; | ||
1651 | |||
1652 | /* CPU may go down again inbetween, clear UNBOUND only on success */ | 1647 | /* CPU may go down again inbetween, clear UNBOUND only on success */ |
1653 | if (worker_maybe_bind_and_lock(worker)) | 1648 | if (worker_maybe_bind_and_lock(worker)) |
1654 | worker_clr_flags(worker, WORKER_UNBOUND); | 1649 | worker_clr_flags(worker, WORKER_UNBOUND); |
1655 | 1650 | ||
1656 | /* rebind complete, become available again */ | 1651 | /* rebind complete, become available again */ |
1657 | list_add(&worker->entry, &worker->pool->idle_list); | 1652 | list_add(&worker->entry, &worker->pool->idle_list); |
1658 | spin_unlock_irq(&gcwq->lock); | 1653 | spin_unlock_irq(&worker->pool->lock); |
1659 | } | 1654 | } |
1660 | 1655 | ||
1661 | /* | 1656 | /* |
@@ -1667,12 +1662,11 @@ static void idle_worker_rebind(struct worker *worker) | |||
1667 | static void busy_worker_rebind_fn(struct work_struct *work) | 1662 | static void busy_worker_rebind_fn(struct work_struct *work) |
1668 | { | 1663 | { |
1669 | struct worker *worker = container_of(work, struct worker, rebind_work); | 1664 | struct worker *worker = container_of(work, struct worker, rebind_work); |
1670 | struct global_cwq *gcwq = worker->pool->gcwq; | ||
1671 | 1665 | ||
1672 | if (worker_maybe_bind_and_lock(worker)) | 1666 | if (worker_maybe_bind_and_lock(worker)) |
1673 | worker_clr_flags(worker, WORKER_UNBOUND); | 1667 | worker_clr_flags(worker, WORKER_UNBOUND); |
1674 | 1668 | ||
1675 | spin_unlock_irq(&gcwq->lock); | 1669 | spin_unlock_irq(&worker->pool->lock); |
1676 | } | 1670 | } |
1677 | 1671 | ||
1678 | /** | 1672 | /** |
@@ -1704,10 +1698,10 @@ static void rebind_workers(struct global_cwq *gcwq) | |||
1704 | struct hlist_node *pos; | 1698 | struct hlist_node *pos; |
1705 | int i; | 1699 | int i; |
1706 | 1700 | ||
1707 | lockdep_assert_held(&gcwq->lock); | 1701 | for_each_worker_pool(pool, gcwq) { |
1708 | |||
1709 | for_each_worker_pool(pool, gcwq) | ||
1710 | lockdep_assert_held(&pool->assoc_mutex); | 1702 | lockdep_assert_held(&pool->assoc_mutex); |
1703 | lockdep_assert_held(&pool->lock); | ||
1704 | } | ||
1711 | 1705 | ||
1712 | /* dequeue and kick idle ones */ | 1706 | /* dequeue and kick idle ones */ |
1713 | for_each_worker_pool(pool, gcwq) { | 1707 | for_each_worker_pool(pool, gcwq) { |
@@ -1785,19 +1779,18 @@ static struct worker *alloc_worker(void) | |||
1785 | */ | 1779 | */ |
1786 | static struct worker *create_worker(struct worker_pool *pool) | 1780 | static struct worker *create_worker(struct worker_pool *pool) |
1787 | { | 1781 | { |
1788 | struct global_cwq *gcwq = pool->gcwq; | ||
1789 | const char *pri = std_worker_pool_pri(pool) ? "H" : ""; | 1782 | const char *pri = std_worker_pool_pri(pool) ? "H" : ""; |
1790 | struct worker *worker = NULL; | 1783 | struct worker *worker = NULL; |
1791 | int id = -1; | 1784 | int id = -1; |
1792 | 1785 | ||
1793 | spin_lock_irq(&gcwq->lock); | 1786 | spin_lock_irq(&pool->lock); |
1794 | while (ida_get_new(&pool->worker_ida, &id)) { | 1787 | while (ida_get_new(&pool->worker_ida, &id)) { |
1795 | spin_unlock_irq(&gcwq->lock); | 1788 | spin_unlock_irq(&pool->lock); |
1796 | if (!ida_pre_get(&pool->worker_ida, GFP_KERNEL)) | 1789 | if (!ida_pre_get(&pool->worker_ida, GFP_KERNEL)) |
1797 | goto fail; | 1790 | goto fail; |
1798 | spin_lock_irq(&gcwq->lock); | 1791 | spin_lock_irq(&pool->lock); |
1799 | } | 1792 | } |
1800 | spin_unlock_irq(&gcwq->lock); | 1793 | spin_unlock_irq(&pool->lock); |
1801 | 1794 | ||
1802 | worker = alloc_worker(); | 1795 | worker = alloc_worker(); |
1803 | if (!worker) | 1796 | if (!worker) |
@@ -1838,9 +1831,9 @@ static struct worker *create_worker(struct worker_pool *pool) | |||
1838 | return worker; | 1831 | return worker; |
1839 | fail: | 1832 | fail: |
1840 | if (id >= 0) { | 1833 | if (id >= 0) { |
1841 | spin_lock_irq(&gcwq->lock); | 1834 | spin_lock_irq(&pool->lock); |
1842 | ida_remove(&pool->worker_ida, id); | 1835 | ida_remove(&pool->worker_ida, id); |
1843 | spin_unlock_irq(&gcwq->lock); | 1836 | spin_unlock_irq(&pool->lock); |
1844 | } | 1837 | } |
1845 | kfree(worker); | 1838 | kfree(worker); |
1846 | return NULL; | 1839 | return NULL; |
@@ -1853,7 +1846,7 @@ fail: | |||
1853 | * Make the gcwq aware of @worker and start it. | 1846 | * Make the gcwq aware of @worker and start it. |
1854 | * | 1847 | * |
1855 | * CONTEXT: | 1848 | * CONTEXT: |
1856 | * spin_lock_irq(gcwq->lock). | 1849 | * spin_lock_irq(pool->lock). |
1857 | */ | 1850 | */ |
1858 | static void start_worker(struct worker *worker) | 1851 | static void start_worker(struct worker *worker) |
1859 | { | 1852 | { |
@@ -1870,12 +1863,11 @@ static void start_worker(struct worker *worker) | |||
1870 | * Destroy @worker and adjust @gcwq stats accordingly. | 1863 | * Destroy @worker and adjust @gcwq stats accordingly. |
1871 | * | 1864 | * |
1872 | * CONTEXT: | 1865 | * CONTEXT: |
1873 | * spin_lock_irq(gcwq->lock) which is released and regrabbed. | 1866 | * spin_lock_irq(pool->lock) which is released and regrabbed. |
1874 | */ | 1867 | */ |
1875 | static void destroy_worker(struct worker *worker) | 1868 | static void destroy_worker(struct worker *worker) |
1876 | { | 1869 | { |
1877 | struct worker_pool *pool = worker->pool; | 1870 | struct worker_pool *pool = worker->pool; |
1878 | struct global_cwq *gcwq = pool->gcwq; | ||
1879 | int id = worker->id; | 1871 | int id = worker->id; |
1880 | 1872 | ||
1881 | /* sanity check frenzy */ | 1873 | /* sanity check frenzy */ |
@@ -1890,21 +1882,20 @@ static void destroy_worker(struct worker *worker) | |||
1890 | list_del_init(&worker->entry); | 1882 | list_del_init(&worker->entry); |
1891 | worker->flags |= WORKER_DIE; | 1883 | worker->flags |= WORKER_DIE; |
1892 | 1884 | ||
1893 | spin_unlock_irq(&gcwq->lock); | 1885 | spin_unlock_irq(&pool->lock); |
1894 | 1886 | ||
1895 | kthread_stop(worker->task); | 1887 | kthread_stop(worker->task); |
1896 | kfree(worker); | 1888 | kfree(worker); |
1897 | 1889 | ||
1898 | spin_lock_irq(&gcwq->lock); | 1890 | spin_lock_irq(&pool->lock); |
1899 | ida_remove(&pool->worker_ida, id); | 1891 | ida_remove(&pool->worker_ida, id); |
1900 | } | 1892 | } |
1901 | 1893 | ||
1902 | static void idle_worker_timeout(unsigned long __pool) | 1894 | static void idle_worker_timeout(unsigned long __pool) |
1903 | { | 1895 | { |
1904 | struct worker_pool *pool = (void *)__pool; | 1896 | struct worker_pool *pool = (void *)__pool; |
1905 | struct global_cwq *gcwq = pool->gcwq; | ||
1906 | 1897 | ||
1907 | spin_lock_irq(&gcwq->lock); | 1898 | spin_lock_irq(&pool->lock); |
1908 | 1899 | ||
1909 | if (too_many_workers(pool)) { | 1900 | if (too_many_workers(pool)) { |
1910 | struct worker *worker; | 1901 | struct worker *worker; |
@@ -1923,7 +1914,7 @@ static void idle_worker_timeout(unsigned long __pool) | |||
1923 | } | 1914 | } |
1924 | } | 1915 | } |
1925 | 1916 | ||
1926 | spin_unlock_irq(&gcwq->lock); | 1917 | spin_unlock_irq(&pool->lock); |
1927 | } | 1918 | } |
1928 | 1919 | ||
1929 | static bool send_mayday(struct work_struct *work) | 1920 | static bool send_mayday(struct work_struct *work) |
@@ -1948,10 +1939,9 @@ static bool send_mayday(struct work_struct *work) | |||
1948 | static void gcwq_mayday_timeout(unsigned long __pool) | 1939 | static void gcwq_mayday_timeout(unsigned long __pool) |
1949 | { | 1940 | { |
1950 | struct worker_pool *pool = (void *)__pool; | 1941 | struct worker_pool *pool = (void *)__pool; |
1951 | struct global_cwq *gcwq = pool->gcwq; | ||
1952 | struct work_struct *work; | 1942 | struct work_struct *work; |
1953 | 1943 | ||
1954 | spin_lock_irq(&gcwq->lock); | 1944 | spin_lock_irq(&pool->lock); |
1955 | 1945 | ||
1956 | if (need_to_create_worker(pool)) { | 1946 | if (need_to_create_worker(pool)) { |
1957 | /* | 1947 | /* |
@@ -1964,7 +1954,7 @@ static void gcwq_mayday_timeout(unsigned long __pool) | |||
1964 | send_mayday(work); | 1954 | send_mayday(work); |
1965 | } | 1955 | } |
1966 | 1956 | ||
1967 | spin_unlock_irq(&gcwq->lock); | 1957 | spin_unlock_irq(&pool->lock); |
1968 | 1958 | ||
1969 | mod_timer(&pool->mayday_timer, jiffies + MAYDAY_INTERVAL); | 1959 | mod_timer(&pool->mayday_timer, jiffies + MAYDAY_INTERVAL); |
1970 | } | 1960 | } |
@@ -1983,24 +1973,22 @@ static void gcwq_mayday_timeout(unsigned long __pool) | |||
1983 | * may_start_working() true. | 1973 | * may_start_working() true. |
1984 | * | 1974 | * |
1985 | * LOCKING: | 1975 | * LOCKING: |
1986 | * spin_lock_irq(gcwq->lock) which may be released and regrabbed | 1976 | * spin_lock_irq(pool->lock) which may be released and regrabbed |
1987 | * multiple times. Does GFP_KERNEL allocations. Called only from | 1977 | * multiple times. Does GFP_KERNEL allocations. Called only from |
1988 | * manager. | 1978 | * manager. |
1989 | * | 1979 | * |
1990 | * RETURNS: | 1980 | * RETURNS: |
1991 | * false if no action was taken and gcwq->lock stayed locked, true | 1981 | * false if no action was taken and pool->lock stayed locked, true |
1992 | * otherwise. | 1982 | * otherwise. |
1993 | */ | 1983 | */ |
1994 | static bool maybe_create_worker(struct worker_pool *pool) | 1984 | static bool maybe_create_worker(struct worker_pool *pool) |
1995 | __releases(&gcwq->lock) | 1985 | __releases(&pool->lock) |
1996 | __acquires(&gcwq->lock) | 1986 | __acquires(&pool->lock) |
1997 | { | 1987 | { |
1998 | struct global_cwq *gcwq = pool->gcwq; | ||
1999 | |||
2000 | if (!need_to_create_worker(pool)) | 1988 | if (!need_to_create_worker(pool)) |
2001 | return false; | 1989 | return false; |
2002 | restart: | 1990 | restart: |
2003 | spin_unlock_irq(&gcwq->lock); | 1991 | spin_unlock_irq(&pool->lock); |
2004 | 1992 | ||
2005 | /* if we don't make progress in MAYDAY_INITIAL_TIMEOUT, call for help */ | 1993 | /* if we don't make progress in MAYDAY_INITIAL_TIMEOUT, call for help */ |
2006 | mod_timer(&pool->mayday_timer, jiffies + MAYDAY_INITIAL_TIMEOUT); | 1994 | mod_timer(&pool->mayday_timer, jiffies + MAYDAY_INITIAL_TIMEOUT); |
@@ -2011,7 +1999,7 @@ restart: | |||
2011 | worker = create_worker(pool); | 1999 | worker = create_worker(pool); |
2012 | if (worker) { | 2000 | if (worker) { |
2013 | del_timer_sync(&pool->mayday_timer); | 2001 | del_timer_sync(&pool->mayday_timer); |
2014 | spin_lock_irq(&gcwq->lock); | 2002 | spin_lock_irq(&pool->lock); |
2015 | start_worker(worker); | 2003 | start_worker(worker); |
2016 | BUG_ON(need_to_create_worker(pool)); | 2004 | BUG_ON(need_to_create_worker(pool)); |
2017 | return true; | 2005 | return true; |
@@ -2028,7 +2016,7 @@ restart: | |||
2028 | } | 2016 | } |
2029 | 2017 | ||
2030 | del_timer_sync(&pool->mayday_timer); | 2018 | del_timer_sync(&pool->mayday_timer); |
2031 | spin_lock_irq(&gcwq->lock); | 2019 | spin_lock_irq(&pool->lock); |
2032 | if (need_to_create_worker(pool)) | 2020 | if (need_to_create_worker(pool)) |
2033 | goto restart; | 2021 | goto restart; |
2034 | return true; | 2022 | return true; |
@@ -2042,11 +2030,11 @@ restart: | |||
2042 | * IDLE_WORKER_TIMEOUT. | 2030 | * IDLE_WORKER_TIMEOUT. |
2043 | * | 2031 | * |
2044 | * LOCKING: | 2032 | * LOCKING: |
2045 | * spin_lock_irq(gcwq->lock) which may be released and regrabbed | 2033 | * spin_lock_irq(pool->lock) which may be released and regrabbed |
2046 | * multiple times. Called only from manager. | 2034 | * multiple times. Called only from manager. |
2047 | * | 2035 | * |
2048 | * RETURNS: | 2036 | * RETURNS: |
2049 | * false if no action was taken and gcwq->lock stayed locked, true | 2037 | * false if no action was taken and pool->lock stayed locked, true |
2050 | * otherwise. | 2038 | * otherwise. |
2051 | */ | 2039 | */ |
2052 | static bool maybe_destroy_workers(struct worker_pool *pool) | 2040 | static bool maybe_destroy_workers(struct worker_pool *pool) |
@@ -2085,12 +2073,12 @@ static bool maybe_destroy_workers(struct worker_pool *pool) | |||
2085 | * and may_start_working() is true. | 2073 | * and may_start_working() is true. |
2086 | * | 2074 | * |
2087 | * CONTEXT: | 2075 | * CONTEXT: |
2088 | * spin_lock_irq(gcwq->lock) which may be released and regrabbed | 2076 | * spin_lock_irq(pool->lock) which may be released and regrabbed |
2089 | * multiple times. Does GFP_KERNEL allocations. | 2077 | * multiple times. Does GFP_KERNEL allocations. |
2090 | * | 2078 | * |
2091 | * RETURNS: | 2079 | * RETURNS: |
2092 | * false if no action was taken and gcwq->lock stayed locked, true if | 2080 | * spin_lock_irq(pool->lock) which may be released and regrabbed |
2093 | * some action was taken. | 2081 | * multiple times. Does GFP_KERNEL allocations. |
2094 | */ | 2082 | */ |
2095 | static bool manage_workers(struct worker *worker) | 2083 | static bool manage_workers(struct worker *worker) |
2096 | { | 2084 | { |
@@ -2112,10 +2100,10 @@ static bool manage_workers(struct worker *worker) | |||
2112 | * manager against CPU hotplug. | 2100 | * manager against CPU hotplug. |
2113 | * | 2101 | * |
2114 | * assoc_mutex would always be free unless CPU hotplug is in | 2102 | * assoc_mutex would always be free unless CPU hotplug is in |
2115 | * progress. trylock first without dropping @gcwq->lock. | 2103 | * progress. trylock first without dropping @pool->lock. |
2116 | */ | 2104 | */ |
2117 | if (unlikely(!mutex_trylock(&pool->assoc_mutex))) { | 2105 | if (unlikely(!mutex_trylock(&pool->assoc_mutex))) { |
2118 | spin_unlock_irq(&pool->gcwq->lock); | 2106 | spin_unlock_irq(&pool->lock); |
2119 | mutex_lock(&pool->assoc_mutex); | 2107 | mutex_lock(&pool->assoc_mutex); |
2120 | /* | 2108 | /* |
2121 | * CPU hotplug could have happened while we were waiting | 2109 | * CPU hotplug could have happened while we were waiting |
@@ -2162,15 +2150,14 @@ static bool manage_workers(struct worker *worker) | |||
2162 | * call this function to process a work. | 2150 | * call this function to process a work. |
2163 | * | 2151 | * |
2164 | * CONTEXT: | 2152 | * CONTEXT: |
2165 | * spin_lock_irq(gcwq->lock) which is released and regrabbed. | 2153 | * spin_lock_irq(pool->lock) which is released and regrabbed. |
2166 | */ | 2154 | */ |
2167 | static void process_one_work(struct worker *worker, struct work_struct *work) | 2155 | static void process_one_work(struct worker *worker, struct work_struct *work) |
2168 | __releases(&gcwq->lock) | 2156 | __releases(&pool->lock) |
2169 | __acquires(&gcwq->lock) | 2157 | __acquires(&pool->lock) |
2170 | { | 2158 | { |
2171 | struct cpu_workqueue_struct *cwq = get_work_cwq(work); | 2159 | struct cpu_workqueue_struct *cwq = get_work_cwq(work); |
2172 | struct worker_pool *pool = worker->pool; | 2160 | struct worker_pool *pool = worker->pool; |
2173 | struct global_cwq *gcwq = pool->gcwq; | ||
2174 | bool cpu_intensive = cwq->wq->flags & WQ_CPU_INTENSIVE; | 2161 | bool cpu_intensive = cwq->wq->flags & WQ_CPU_INTENSIVE; |
2175 | int work_color; | 2162 | int work_color; |
2176 | struct worker *collision; | 2163 | struct worker *collision; |
@@ -2225,7 +2212,7 @@ __acquires(&gcwq->lock) | |||
2225 | worker_set_flags(worker, WORKER_CPU_INTENSIVE, true); | 2212 | worker_set_flags(worker, WORKER_CPU_INTENSIVE, true); |
2226 | 2213 | ||
2227 | /* | 2214 | /* |
2228 | * Unbound gcwq isn't concurrency managed and work items should be | 2215 | * Unbound pool isn't concurrency managed and work items should be |
2229 | * executed ASAP. Wake up another worker if necessary. | 2216 | * executed ASAP. Wake up another worker if necessary. |
2230 | */ | 2217 | */ |
2231 | if ((worker->flags & WORKER_UNBOUND) && need_more_worker(pool)) | 2218 | if ((worker->flags & WORKER_UNBOUND) && need_more_worker(pool)) |
@@ -2233,13 +2220,13 @@ __acquires(&gcwq->lock) | |||
2233 | 2220 | ||
2234 | /* | 2221 | /* |
2235 | * Record the last pool and clear PENDING which should be the last | 2222 | * Record the last pool and clear PENDING which should be the last |
2236 | * update to @work. Also, do this inside @gcwq->lock so that | 2223 | * update to @work. Also, do this inside @pool->lock so that |
2237 | * PENDING and queued state changes happen together while IRQ is | 2224 | * PENDING and queued state changes happen together while IRQ is |
2238 | * disabled. | 2225 | * disabled. |
2239 | */ | 2226 | */ |
2240 | set_work_pool_and_clear_pending(work, pool->id); | 2227 | set_work_pool_and_clear_pending(work, pool->id); |
2241 | 2228 | ||
2242 | spin_unlock_irq(&gcwq->lock); | 2229 | spin_unlock_irq(&pool->lock); |
2243 | 2230 | ||
2244 | lock_map_acquire_read(&cwq->wq->lockdep_map); | 2231 | lock_map_acquire_read(&cwq->wq->lockdep_map); |
2245 | lock_map_acquire(&lockdep_map); | 2232 | lock_map_acquire(&lockdep_map); |
@@ -2262,7 +2249,7 @@ __acquires(&gcwq->lock) | |||
2262 | dump_stack(); | 2249 | dump_stack(); |
2263 | } | 2250 | } |
2264 | 2251 | ||
2265 | spin_lock_irq(&gcwq->lock); | 2252 | spin_lock_irq(&pool->lock); |
2266 | 2253 | ||
2267 | /* clear cpu intensive status */ | 2254 | /* clear cpu intensive status */ |
2268 | if (unlikely(cpu_intensive)) | 2255 | if (unlikely(cpu_intensive)) |
@@ -2285,7 +2272,7 @@ __acquires(&gcwq->lock) | |||
2285 | * fetches a work from the top and executes it. | 2272 | * fetches a work from the top and executes it. |
2286 | * | 2273 | * |
2287 | * CONTEXT: | 2274 | * CONTEXT: |
2288 | * spin_lock_irq(gcwq->lock) which may be released and regrabbed | 2275 | * spin_lock_irq(pool->lock) which may be released and regrabbed |
2289 | * multiple times. | 2276 | * multiple times. |
2290 | */ | 2277 | */ |
2291 | static void process_scheduled_works(struct worker *worker) | 2278 | static void process_scheduled_works(struct worker *worker) |
@@ -2311,16 +2298,15 @@ static int worker_thread(void *__worker) | |||
2311 | { | 2298 | { |
2312 | struct worker *worker = __worker; | 2299 | struct worker *worker = __worker; |
2313 | struct worker_pool *pool = worker->pool; | 2300 | struct worker_pool *pool = worker->pool; |
2314 | struct global_cwq *gcwq = pool->gcwq; | ||
2315 | 2301 | ||
2316 | /* tell the scheduler that this is a workqueue worker */ | 2302 | /* tell the scheduler that this is a workqueue worker */ |
2317 | worker->task->flags |= PF_WQ_WORKER; | 2303 | worker->task->flags |= PF_WQ_WORKER; |
2318 | woke_up: | 2304 | woke_up: |
2319 | spin_lock_irq(&gcwq->lock); | 2305 | spin_lock_irq(&pool->lock); |
2320 | 2306 | ||
2321 | /* we are off idle list if destruction or rebind is requested */ | 2307 | /* we are off idle list if destruction or rebind is requested */ |
2322 | if (unlikely(list_empty(&worker->entry))) { | 2308 | if (unlikely(list_empty(&worker->entry))) { |
2323 | spin_unlock_irq(&gcwq->lock); | 2309 | spin_unlock_irq(&pool->lock); |
2324 | 2310 | ||
2325 | /* if DIE is set, destruction is requested */ | 2311 | /* if DIE is set, destruction is requested */ |
2326 | if (worker->flags & WORKER_DIE) { | 2312 | if (worker->flags & WORKER_DIE) { |
@@ -2379,15 +2365,15 @@ sleep: | |||
2379 | goto recheck; | 2365 | goto recheck; |
2380 | 2366 | ||
2381 | /* | 2367 | /* |
2382 | * gcwq->lock is held and there's no work to process and no | 2368 | * pool->lock is held and there's no work to process and no need to |
2383 | * need to manage, sleep. Workers are woken up only while | 2369 | * manage, sleep. Workers are woken up only while holding |
2384 | * holding gcwq->lock or from local cpu, so setting the | 2370 | * pool->lock or from local cpu, so setting the current state |
2385 | * current state before releasing gcwq->lock is enough to | 2371 | * before releasing pool->lock is enough to prevent losing any |
2386 | * prevent losing any event. | 2372 | * event. |
2387 | */ | 2373 | */ |
2388 | worker_enter_idle(worker); | 2374 | worker_enter_idle(worker); |
2389 | __set_current_state(TASK_INTERRUPTIBLE); | 2375 | __set_current_state(TASK_INTERRUPTIBLE); |
2390 | spin_unlock_irq(&gcwq->lock); | 2376 | spin_unlock_irq(&pool->lock); |
2391 | schedule(); | 2377 | schedule(); |
2392 | goto woke_up; | 2378 | goto woke_up; |
2393 | } | 2379 | } |
@@ -2443,7 +2429,6 @@ repeat: | |||
2443 | unsigned int tcpu = is_unbound ? WORK_CPU_UNBOUND : cpu; | 2429 | unsigned int tcpu = is_unbound ? WORK_CPU_UNBOUND : cpu; |
2444 | struct cpu_workqueue_struct *cwq = get_cwq(tcpu, wq); | 2430 | struct cpu_workqueue_struct *cwq = get_cwq(tcpu, wq); |
2445 | struct worker_pool *pool = cwq->pool; | 2431 | struct worker_pool *pool = cwq->pool; |
2446 | struct global_cwq *gcwq = pool->gcwq; | ||
2447 | struct work_struct *work, *n; | 2432 | struct work_struct *work, *n; |
2448 | 2433 | ||
2449 | __set_current_state(TASK_RUNNING); | 2434 | __set_current_state(TASK_RUNNING); |
@@ -2465,14 +2450,14 @@ repeat: | |||
2465 | process_scheduled_works(rescuer); | 2450 | process_scheduled_works(rescuer); |
2466 | 2451 | ||
2467 | /* | 2452 | /* |
2468 | * Leave this gcwq. If keep_working() is %true, notify a | 2453 | * Leave this pool. If keep_working() is %true, notify a |
2469 | * regular worker; otherwise, we end up with 0 concurrency | 2454 | * regular worker; otherwise, we end up with 0 concurrency |
2470 | * and stalling the execution. | 2455 | * and stalling the execution. |
2471 | */ | 2456 | */ |
2472 | if (keep_working(pool)) | 2457 | if (keep_working(pool)) |
2473 | wake_up_worker(pool); | 2458 | wake_up_worker(pool); |
2474 | 2459 | ||
2475 | spin_unlock_irq(&gcwq->lock); | 2460 | spin_unlock_irq(&pool->lock); |
2476 | } | 2461 | } |
2477 | 2462 | ||
2478 | /* rescuers should never participate in concurrency management */ | 2463 | /* rescuers should never participate in concurrency management */ |
@@ -2514,7 +2499,7 @@ static void wq_barrier_func(struct work_struct *work) | |||
2514 | * underneath us, so we can't reliably determine cwq from @target. | 2499 | * underneath us, so we can't reliably determine cwq from @target. |
2515 | * | 2500 | * |
2516 | * CONTEXT: | 2501 | * CONTEXT: |
2517 | * spin_lock_irq(gcwq->lock). | 2502 | * spin_lock_irq(pool->lock). |
2518 | */ | 2503 | */ |
2519 | static void insert_wq_barrier(struct cpu_workqueue_struct *cwq, | 2504 | static void insert_wq_barrier(struct cpu_workqueue_struct *cwq, |
2520 | struct wq_barrier *barr, | 2505 | struct wq_barrier *barr, |
@@ -2524,7 +2509,7 @@ static void insert_wq_barrier(struct cpu_workqueue_struct *cwq, | |||
2524 | unsigned int linked = 0; | 2509 | unsigned int linked = 0; |
2525 | 2510 | ||
2526 | /* | 2511 | /* |
2527 | * debugobject calls are safe here even with gcwq->lock locked | 2512 | * debugobject calls are safe here even with pool->lock locked |
2528 | * as we know for sure that this will not trigger any of the | 2513 | * as we know for sure that this will not trigger any of the |
2529 | * checks and call back into the fixup functions where we | 2514 | * checks and call back into the fixup functions where we |
2530 | * might deadlock. | 2515 | * might deadlock. |
@@ -2597,9 +2582,9 @@ static bool flush_workqueue_prep_cwqs(struct workqueue_struct *wq, | |||
2597 | 2582 | ||
2598 | for_each_cwq_cpu(cpu, wq) { | 2583 | for_each_cwq_cpu(cpu, wq) { |
2599 | struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); | 2584 | struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); |
2600 | struct global_cwq *gcwq = cwq->pool->gcwq; | 2585 | struct worker_pool *pool = cwq->pool; |
2601 | 2586 | ||
2602 | spin_lock_irq(&gcwq->lock); | 2587 | spin_lock_irq(&pool->lock); |
2603 | 2588 | ||
2604 | if (flush_color >= 0) { | 2589 | if (flush_color >= 0) { |
2605 | BUG_ON(cwq->flush_color != -1); | 2590 | BUG_ON(cwq->flush_color != -1); |
@@ -2616,7 +2601,7 @@ static bool flush_workqueue_prep_cwqs(struct workqueue_struct *wq, | |||
2616 | cwq->work_color = work_color; | 2601 | cwq->work_color = work_color; |
2617 | } | 2602 | } |
2618 | 2603 | ||
2619 | spin_unlock_irq(&gcwq->lock); | 2604 | spin_unlock_irq(&pool->lock); |
2620 | } | 2605 | } |
2621 | 2606 | ||
2622 | if (flush_color >= 0 && atomic_dec_and_test(&wq->nr_cwqs_to_flush)) | 2607 | if (flush_color >= 0 && atomic_dec_and_test(&wq->nr_cwqs_to_flush)) |
@@ -2813,9 +2798,9 @@ reflush: | |||
2813 | struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); | 2798 | struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); |
2814 | bool drained; | 2799 | bool drained; |
2815 | 2800 | ||
2816 | spin_lock_irq(&cwq->pool->gcwq->lock); | 2801 | spin_lock_irq(&cwq->pool->lock); |
2817 | drained = !cwq->nr_active && list_empty(&cwq->delayed_works); | 2802 | drained = !cwq->nr_active && list_empty(&cwq->delayed_works); |
2818 | spin_unlock_irq(&cwq->pool->gcwq->lock); | 2803 | spin_unlock_irq(&cwq->pool->lock); |
2819 | 2804 | ||
2820 | if (drained) | 2805 | if (drained) |
2821 | continue; | 2806 | continue; |
@@ -2838,25 +2823,23 @@ static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr) | |||
2838 | { | 2823 | { |
2839 | struct worker *worker = NULL; | 2824 | struct worker *worker = NULL; |
2840 | struct worker_pool *pool; | 2825 | struct worker_pool *pool; |
2841 | struct global_cwq *gcwq; | ||
2842 | struct cpu_workqueue_struct *cwq; | 2826 | struct cpu_workqueue_struct *cwq; |
2843 | 2827 | ||
2844 | might_sleep(); | 2828 | might_sleep(); |
2845 | pool = get_work_pool(work); | 2829 | pool = get_work_pool(work); |
2846 | if (!pool) | 2830 | if (!pool) |
2847 | return false; | 2831 | return false; |
2848 | gcwq = pool->gcwq; | ||
2849 | 2832 | ||
2850 | spin_lock_irq(&gcwq->lock); | 2833 | spin_lock_irq(&pool->lock); |
2851 | if (!list_empty(&work->entry)) { | 2834 | if (!list_empty(&work->entry)) { |
2852 | /* | 2835 | /* |
2853 | * See the comment near try_to_grab_pending()->smp_rmb(). | 2836 | * See the comment near try_to_grab_pending()->smp_rmb(). |
2854 | * If it was re-queued to a different gcwq under us, we | 2837 | * If it was re-queued to a different pool under us, we |
2855 | * are not going to wait. | 2838 | * are not going to wait. |
2856 | */ | 2839 | */ |
2857 | smp_rmb(); | 2840 | smp_rmb(); |
2858 | cwq = get_work_cwq(work); | 2841 | cwq = get_work_cwq(work); |
2859 | if (unlikely(!cwq || gcwq != cwq->pool->gcwq)) | 2842 | if (unlikely(!cwq || pool != cwq->pool)) |
2860 | goto already_gone; | 2843 | goto already_gone; |
2861 | } else { | 2844 | } else { |
2862 | worker = find_worker_executing_work(pool, work); | 2845 | worker = find_worker_executing_work(pool, work); |
@@ -2866,7 +2849,7 @@ static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr) | |||
2866 | } | 2849 | } |
2867 | 2850 | ||
2868 | insert_wq_barrier(cwq, barr, work, worker); | 2851 | insert_wq_barrier(cwq, barr, work, worker); |
2869 | spin_unlock_irq(&gcwq->lock); | 2852 | spin_unlock_irq(&pool->lock); |
2870 | 2853 | ||
2871 | /* | 2854 | /* |
2872 | * If @max_active is 1 or rescuer is in use, flushing another work | 2855 | * If @max_active is 1 or rescuer is in use, flushing another work |
@@ -2882,7 +2865,7 @@ static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr) | |||
2882 | 2865 | ||
2883 | return true; | 2866 | return true; |
2884 | already_gone: | 2867 | already_gone: |
2885 | spin_unlock_irq(&gcwq->lock); | 2868 | spin_unlock_irq(&pool->lock); |
2886 | return false; | 2869 | return false; |
2887 | } | 2870 | } |
2888 | 2871 | ||
@@ -3404,7 +3387,7 @@ EXPORT_SYMBOL_GPL(destroy_workqueue); | |||
3404 | * increased. | 3387 | * increased. |
3405 | * | 3388 | * |
3406 | * CONTEXT: | 3389 | * CONTEXT: |
3407 | * spin_lock_irq(gcwq->lock). | 3390 | * spin_lock_irq(pool->lock). |
3408 | */ | 3391 | */ |
3409 | static void cwq_set_max_active(struct cpu_workqueue_struct *cwq, int max_active) | 3392 | static void cwq_set_max_active(struct cpu_workqueue_struct *cwq, int max_active) |
3410 | { | 3393 | { |
@@ -3438,15 +3421,14 @@ void workqueue_set_max_active(struct workqueue_struct *wq, int max_active) | |||
3438 | for_each_cwq_cpu(cpu, wq) { | 3421 | for_each_cwq_cpu(cpu, wq) { |
3439 | struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); | 3422 | struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); |
3440 | struct worker_pool *pool = cwq->pool; | 3423 | struct worker_pool *pool = cwq->pool; |
3441 | struct global_cwq *gcwq = pool->gcwq; | ||
3442 | 3424 | ||
3443 | spin_lock_irq(&gcwq->lock); | 3425 | spin_lock_irq(&pool->lock); |
3444 | 3426 | ||
3445 | if (!(wq->flags & WQ_FREEZABLE) || | 3427 | if (!(wq->flags & WQ_FREEZABLE) || |
3446 | !(pool->flags & POOL_FREEZING)) | 3428 | !(pool->flags & POOL_FREEZING)) |
3447 | cwq_set_max_active(cwq, max_active); | 3429 | cwq_set_max_active(cwq, max_active); |
3448 | 3430 | ||
3449 | spin_unlock_irq(&gcwq->lock); | 3431 | spin_unlock_irq(&pool->lock); |
3450 | } | 3432 | } |
3451 | 3433 | ||
3452 | spin_unlock(&workqueue_lock); | 3434 | spin_unlock(&workqueue_lock); |
@@ -3489,22 +3471,20 @@ EXPORT_SYMBOL_GPL(workqueue_congested); | |||
3489 | unsigned int work_busy(struct work_struct *work) | 3471 | unsigned int work_busy(struct work_struct *work) |
3490 | { | 3472 | { |
3491 | struct worker_pool *pool = get_work_pool(work); | 3473 | struct worker_pool *pool = get_work_pool(work); |
3492 | struct global_cwq *gcwq; | ||
3493 | unsigned long flags; | 3474 | unsigned long flags; |
3494 | unsigned int ret = 0; | 3475 | unsigned int ret = 0; |
3495 | 3476 | ||
3496 | if (!pool) | 3477 | if (!pool) |
3497 | return 0; | 3478 | return 0; |
3498 | gcwq = pool->gcwq; | ||
3499 | 3479 | ||
3500 | spin_lock_irqsave(&gcwq->lock, flags); | 3480 | spin_lock_irqsave(&pool->lock, flags); |
3501 | 3481 | ||
3502 | if (work_pending(work)) | 3482 | if (work_pending(work)) |
3503 | ret |= WORK_BUSY_PENDING; | 3483 | ret |= WORK_BUSY_PENDING; |
3504 | if (find_worker_executing_work(pool, work)) | 3484 | if (find_worker_executing_work(pool, work)) |
3505 | ret |= WORK_BUSY_RUNNING; | 3485 | ret |= WORK_BUSY_RUNNING; |
3506 | 3486 | ||
3507 | spin_unlock_irqrestore(&gcwq->lock, flags); | 3487 | spin_unlock_irqrestore(&pool->lock, flags); |
3508 | 3488 | ||
3509 | return ret; | 3489 | return ret; |
3510 | } | 3490 | } |
@@ -3532,7 +3512,10 @@ static void gcwq_claim_assoc_and_lock(struct global_cwq *gcwq) | |||
3532 | 3512 | ||
3533 | for_each_worker_pool(pool, gcwq) | 3513 | for_each_worker_pool(pool, gcwq) |
3534 | mutex_lock_nested(&pool->assoc_mutex, pool - gcwq->pools); | 3514 | mutex_lock_nested(&pool->assoc_mutex, pool - gcwq->pools); |
3535 | spin_lock_irq(&gcwq->lock); | 3515 | |
3516 | local_irq_disable(); | ||
3517 | for_each_worker_pool(pool, gcwq) | ||
3518 | spin_lock_nested(&pool->lock, pool - gcwq->pools); | ||
3536 | } | 3519 | } |
3537 | 3520 | ||
3538 | /* release manager positions */ | 3521 | /* release manager positions */ |
@@ -3540,7 +3523,10 @@ static void gcwq_release_assoc_and_unlock(struct global_cwq *gcwq) | |||
3540 | { | 3523 | { |
3541 | struct worker_pool *pool; | 3524 | struct worker_pool *pool; |
3542 | 3525 | ||
3543 | spin_unlock_irq(&gcwq->lock); | 3526 | for_each_worker_pool(pool, gcwq) |
3527 | spin_unlock(&pool->lock); | ||
3528 | local_irq_enable(); | ||
3529 | |||
3544 | for_each_worker_pool(pool, gcwq) | 3530 | for_each_worker_pool(pool, gcwq) |
3545 | mutex_unlock(&pool->assoc_mutex); | 3531 | mutex_unlock(&pool->assoc_mutex); |
3546 | } | 3532 | } |
@@ -3621,9 +3607,9 @@ static int __cpuinit workqueue_cpu_up_callback(struct notifier_block *nfb, | |||
3621 | if (!worker) | 3607 | if (!worker) |
3622 | return NOTIFY_BAD; | 3608 | return NOTIFY_BAD; |
3623 | 3609 | ||
3624 | spin_lock_irq(&gcwq->lock); | 3610 | spin_lock_irq(&pool->lock); |
3625 | start_worker(worker); | 3611 | start_worker(worker); |
3626 | spin_unlock_irq(&gcwq->lock); | 3612 | spin_unlock_irq(&pool->lock); |
3627 | } | 3613 | } |
3628 | break; | 3614 | break; |
3629 | 3615 | ||
@@ -3709,7 +3695,7 @@ EXPORT_SYMBOL_GPL(work_on_cpu); | |||
3709 | * gcwq->worklist. | 3695 | * gcwq->worklist. |
3710 | * | 3696 | * |
3711 | * CONTEXT: | 3697 | * CONTEXT: |
3712 | * Grabs and releases workqueue_lock and gcwq->lock's. | 3698 | * Grabs and releases workqueue_lock and pool->lock's. |
3713 | */ | 3699 | */ |
3714 | void freeze_workqueues_begin(void) | 3700 | void freeze_workqueues_begin(void) |
3715 | { | 3701 | { |
@@ -3725,9 +3711,11 @@ void freeze_workqueues_begin(void) | |||
3725 | struct worker_pool *pool; | 3711 | struct worker_pool *pool; |
3726 | struct workqueue_struct *wq; | 3712 | struct workqueue_struct *wq; |
3727 | 3713 | ||
3728 | spin_lock_irq(&gcwq->lock); | 3714 | local_irq_disable(); |
3729 | 3715 | ||
3730 | for_each_worker_pool(pool, gcwq) { | 3716 | for_each_worker_pool(pool, gcwq) { |
3717 | spin_lock_nested(&pool->lock, pool - gcwq->pools); | ||
3718 | |||
3731 | WARN_ON_ONCE(pool->flags & POOL_FREEZING); | 3719 | WARN_ON_ONCE(pool->flags & POOL_FREEZING); |
3732 | pool->flags |= POOL_FREEZING; | 3720 | pool->flags |= POOL_FREEZING; |
3733 | } | 3721 | } |
@@ -3739,7 +3727,9 @@ void freeze_workqueues_begin(void) | |||
3739 | cwq->max_active = 0; | 3727 | cwq->max_active = 0; |
3740 | } | 3728 | } |
3741 | 3729 | ||
3742 | spin_unlock_irq(&gcwq->lock); | 3730 | for_each_worker_pool(pool, gcwq) |
3731 | spin_unlock(&pool->lock); | ||
3732 | local_irq_enable(); | ||
3743 | } | 3733 | } |
3744 | 3734 | ||
3745 | spin_unlock(&workqueue_lock); | 3735 | spin_unlock(&workqueue_lock); |
@@ -3798,7 +3788,7 @@ out_unlock: | |||
3798 | * frozen works are transferred to their respective gcwq worklists. | 3788 | * frozen works are transferred to their respective gcwq worklists. |
3799 | * | 3789 | * |
3800 | * CONTEXT: | 3790 | * CONTEXT: |
3801 | * Grabs and releases workqueue_lock and gcwq->lock's. | 3791 | * Grabs and releases workqueue_lock and pool->lock's. |
3802 | */ | 3792 | */ |
3803 | void thaw_workqueues(void) | 3793 | void thaw_workqueues(void) |
3804 | { | 3794 | { |
@@ -3814,9 +3804,11 @@ void thaw_workqueues(void) | |||
3814 | struct worker_pool *pool; | 3804 | struct worker_pool *pool; |
3815 | struct workqueue_struct *wq; | 3805 | struct workqueue_struct *wq; |
3816 | 3806 | ||
3817 | spin_lock_irq(&gcwq->lock); | 3807 | local_irq_disable(); |
3818 | 3808 | ||
3819 | for_each_worker_pool(pool, gcwq) { | 3809 | for_each_worker_pool(pool, gcwq) { |
3810 | spin_lock_nested(&pool->lock, pool - gcwq->pools); | ||
3811 | |||
3820 | WARN_ON_ONCE(!(pool->flags & POOL_FREEZING)); | 3812 | WARN_ON_ONCE(!(pool->flags & POOL_FREEZING)); |
3821 | pool->flags &= ~POOL_FREEZING; | 3813 | pool->flags &= ~POOL_FREEZING; |
3822 | } | 3814 | } |
@@ -3831,10 +3823,11 @@ void thaw_workqueues(void) | |||
3831 | cwq_set_max_active(cwq, wq->saved_max_active); | 3823 | cwq_set_max_active(cwq, wq->saved_max_active); |
3832 | } | 3824 | } |
3833 | 3825 | ||
3834 | for_each_worker_pool(pool, gcwq) | 3826 | for_each_worker_pool(pool, gcwq) { |
3835 | wake_up_worker(pool); | 3827 | wake_up_worker(pool); |
3836 | 3828 | spin_unlock(&pool->lock); | |
3837 | spin_unlock_irq(&gcwq->lock); | 3829 | } |
3830 | local_irq_enable(); | ||
3838 | } | 3831 | } |
3839 | 3832 | ||
3840 | workqueue_freezing = false; | 3833 | workqueue_freezing = false; |
@@ -3859,10 +3852,9 @@ static int __init init_workqueues(void) | |||
3859 | struct global_cwq *gcwq = get_gcwq(cpu); | 3852 | struct global_cwq *gcwq = get_gcwq(cpu); |
3860 | struct worker_pool *pool; | 3853 | struct worker_pool *pool; |
3861 | 3854 | ||
3862 | spin_lock_init(&gcwq->lock); | ||
3863 | |||
3864 | for_each_worker_pool(pool, gcwq) { | 3855 | for_each_worker_pool(pool, gcwq) { |
3865 | pool->gcwq = gcwq; | 3856 | pool->gcwq = gcwq; |
3857 | spin_lock_init(&pool->lock); | ||
3866 | pool->cpu = cpu; | 3858 | pool->cpu = cpu; |
3867 | pool->flags |= POOL_DISASSOCIATED; | 3859 | pool->flags |= POOL_DISASSOCIATED; |
3868 | INIT_LIST_HEAD(&pool->worklist); | 3860 | INIT_LIST_HEAD(&pool->worklist); |
@@ -3897,9 +3889,9 @@ static int __init init_workqueues(void) | |||
3897 | 3889 | ||
3898 | worker = create_worker(pool); | 3890 | worker = create_worker(pool); |
3899 | BUG_ON(!worker); | 3891 | BUG_ON(!worker); |
3900 | spin_lock_irq(&gcwq->lock); | 3892 | spin_lock_irq(&pool->lock); |
3901 | start_worker(worker); | 3893 | start_worker(worker); |
3902 | spin_unlock_irq(&gcwq->lock); | 3894 | spin_unlock_irq(&pool->lock); |
3903 | } | 3895 | } |
3904 | } | 3896 | } |
3905 | 3897 | ||