aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/workqueue.c
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2013-03-12 14:30:00 -0400
committerTejun Heo <tj@kernel.org>2013-03-12 14:30:00 -0400
commit7fb98ea79cecb14fc1735544146be06fdb1944c3 (patch)
tree1110c0288884b507b1fcf4dddb823ea626415be2 /kernel/workqueue.c
parent420c0ddb1f205a3511b766d0dfee2cc87ed9dae0 (diff)
workqueue: replace get_pwq() with explicit per_cpu_ptr() accesses and first_pwq()
get_pwq() takes @cpu, which can also be WORK_CPU_UNBOUND, and @wq and returns the matching pwq (pool_workqueue). We want to move away from using @cpu for identifying pools and pwqs for unbound pools with custom attributes and there is only one user - workqueue_congested() - which makes use of the WQ_UNBOUND conditional in get_pwq(). All other users already know whether they're dealing with a per-cpu or unbound workqueue. Replace get_pwq() with explicit per_cpu_ptr(wq->cpu_pwqs, cpu) for per-cpu workqueues and first_pwq() for unbound ones, and open-code WQ_UNBOUND conditional in workqueue_congested(). Note that this makes workqueue_congested() behave sligntly differently when @cpu other than WORK_CPU_UNBOUND is specified. It ignores @cpu for unbound workqueues and always uses the first pwq instead of oopsing. Signed-off-by: Tejun Heo <tj@kernel.org> Reviewed-by: Lai Jiangshan <laijs@cn.fujitsu.com>
Diffstat (limited to 'kernel/workqueue.c')
-rw-r--r--kernel/workqueue.c29
1 files changed, 14 insertions, 15 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index acee7b525d51..577ac719eaec 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -463,16 +463,9 @@ static struct worker_pool *get_std_worker_pool(int cpu, bool highpri)
463 return &pools[highpri]; 463 return &pools[highpri];
464} 464}
465 465
466static struct pool_workqueue *get_pwq(int cpu, struct workqueue_struct *wq) 466static struct pool_workqueue *first_pwq(struct workqueue_struct *wq)
467{ 467{
468 if (!(wq->flags & WQ_UNBOUND)) { 468 return list_first_entry(&wq->pwqs, struct pool_workqueue, pwqs_node);
469 if (likely(cpu < nr_cpu_ids))
470 return per_cpu_ptr(wq->cpu_pwqs, cpu);
471 } else if (likely(cpu == WORK_CPU_UNBOUND)) {
472 return list_first_entry(&wq->pwqs, struct pool_workqueue,
473 pwqs_node);
474 }
475 return NULL;
476} 469}
477 470
478static unsigned int work_color_to_flags(int color) 471static unsigned int work_color_to_flags(int color)
@@ -1191,7 +1184,7 @@ static void __queue_work(int cpu, struct workqueue_struct *wq,
1191 * work needs to be queued on that cpu to guarantee 1184 * work needs to be queued on that cpu to guarantee
1192 * non-reentrancy. 1185 * non-reentrancy.
1193 */ 1186 */
1194 pwq = get_pwq(cpu, wq); 1187 pwq = per_cpu_ptr(wq->cpu_pwqs, cpu);
1195 last_pool = get_work_pool(work); 1188 last_pool = get_work_pool(work);
1196 1189
1197 if (last_pool && last_pool != pwq->pool) { 1190 if (last_pool && last_pool != pwq->pool) {
@@ -1202,7 +1195,7 @@ static void __queue_work(int cpu, struct workqueue_struct *wq,
1202 worker = find_worker_executing_work(last_pool, work); 1195 worker = find_worker_executing_work(last_pool, work);
1203 1196
1204 if (worker && worker->current_pwq->wq == wq) { 1197 if (worker && worker->current_pwq->wq == wq) {
1205 pwq = get_pwq(last_pool->cpu, wq); 1198 pwq = per_cpu_ptr(wq->cpu_pwqs, last_pool->cpu);
1206 } else { 1199 } else {
1207 /* meh... not running there, queue here */ 1200 /* meh... not running there, queue here */
1208 spin_unlock(&last_pool->lock); 1201 spin_unlock(&last_pool->lock);
@@ -1212,7 +1205,7 @@ static void __queue_work(int cpu, struct workqueue_struct *wq,
1212 spin_lock(&pwq->pool->lock); 1205 spin_lock(&pwq->pool->lock);
1213 } 1206 }
1214 } else { 1207 } else {
1215 pwq = get_pwq(WORK_CPU_UNBOUND, wq); 1208 pwq = first_pwq(wq);
1216 spin_lock(&pwq->pool->lock); 1209 spin_lock(&pwq->pool->lock);
1217 } 1210 }
1218 1211
@@ -1650,7 +1643,7 @@ static void rebind_workers(struct worker_pool *pool)
1650 else 1643 else
1651 wq = system_wq; 1644 wq = system_wq;
1652 1645
1653 insert_work(get_pwq(pool->cpu, wq), rebind_work, 1646 insert_work(per_cpu_ptr(wq->cpu_pwqs, pool->cpu), rebind_work,
1654 worker->scheduled.next, 1647 worker->scheduled.next,
1655 work_color_to_flags(WORK_NO_COLOR)); 1648 work_color_to_flags(WORK_NO_COLOR));
1656 } 1649 }
@@ -3088,7 +3081,8 @@ static int alloc_and_link_pwqs(struct workqueue_struct *wq)
3088 return -ENOMEM; 3081 return -ENOMEM;
3089 3082
3090 for_each_possible_cpu(cpu) { 3083 for_each_possible_cpu(cpu) {
3091 struct pool_workqueue *pwq = get_pwq(cpu, wq); 3084 struct pool_workqueue *pwq =
3085 per_cpu_ptr(wq->cpu_pwqs, cpu);
3092 3086
3093 pwq->pool = get_std_worker_pool(cpu, highpri); 3087 pwq->pool = get_std_worker_pool(cpu, highpri);
3094 list_add_tail(&pwq->pwqs_node, &wq->pwqs); 3088 list_add_tail(&pwq->pwqs_node, &wq->pwqs);
@@ -3343,7 +3337,12 @@ EXPORT_SYMBOL_GPL(workqueue_set_max_active);
3343 */ 3337 */
3344bool workqueue_congested(int cpu, struct workqueue_struct *wq) 3338bool workqueue_congested(int cpu, struct workqueue_struct *wq)
3345{ 3339{
3346 struct pool_workqueue *pwq = get_pwq(cpu, wq); 3340 struct pool_workqueue *pwq;
3341
3342 if (!(wq->flags & WQ_UNBOUND))
3343 pwq = per_cpu_ptr(wq->cpu_pwqs, cpu);
3344 else
3345 pwq = first_pwq(wq);
3347 3346
3348 return !list_empty(&pwq->delayed_works); 3347 return !list_empty(&pwq->delayed_works);
3349} 3348}