diff options
author | Lai Jiangshan <laijs@cn.fujitsu.com> | 2013-02-07 16:14:20 -0500 |
---|---|---|
committer | Tejun Heo <tj@kernel.org> | 2013-02-07 16:17:51 -0500 |
commit | 8594fade39d3ad02ef856b8c53b5d7cc538a55f5 (patch) | |
tree | 7f14598186e3fbc5feb91b1c25905b51d106a104 /kernel/workqueue.c | |
parent | 54d5b7d079dffa74597715a892473b474babd5b5 (diff) |
workqueue: pick cwq instead of pool in __queue_work()
Currently, __queue_work() chooses the pool to queue a work item to and
then determines cwq from the target wq and the chosen pool. This is a
bit backwards in that we can determine cwq first and simply use
cwq->pool. This way, we can skip get_std_worker_pool() in queueing
path which will be a hurdle when implementing custom worker pools.
Update __queue_work() such that it chooses the target cwq and then use
cwq->pool instead of the other way around. While at it, add missing
{} in an if statement.
This patch doesn't introduce any functional changes.
tj: The original patch had two get_cwq() calls - the first to
determine the pool by doing get_cwq(cpu, wq)->pool and the second
to determine the matching cwq from get_cwq(pool->cpu, wq).
Updated the function such that it chooses cwq instead of pool and
removed the second call. Rewrote the description.
Signed-off-by: Lai Jiangshan <laijs@cn.fujitsu.com>
Signed-off-by: Tejun Heo <tj@kernel.org>
Diffstat (limited to 'kernel/workqueue.c')
-rw-r--r-- | kernel/workqueue.c | 29 |
1 files changed, 13 insertions, 16 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 1801c37b28c4..d6fdce12ca7e 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c | |||
@@ -1193,8 +1193,6 @@ static bool is_chained_work(struct workqueue_struct *wq) | |||
1193 | static void __queue_work(unsigned int cpu, struct workqueue_struct *wq, | 1193 | static void __queue_work(unsigned int cpu, struct workqueue_struct *wq, |
1194 | struct work_struct *work) | 1194 | struct work_struct *work) |
1195 | { | 1195 | { |
1196 | bool highpri = wq->flags & WQ_HIGHPRI; | ||
1197 | struct worker_pool *pool; | ||
1198 | struct cpu_workqueue_struct *cwq; | 1196 | struct cpu_workqueue_struct *cwq; |
1199 | struct list_head *worklist; | 1197 | struct list_head *worklist; |
1200 | unsigned int work_flags; | 1198 | unsigned int work_flags; |
@@ -1215,7 +1213,7 @@ static void __queue_work(unsigned int cpu, struct workqueue_struct *wq, | |||
1215 | WARN_ON_ONCE(!is_chained_work(wq))) | 1213 | WARN_ON_ONCE(!is_chained_work(wq))) |
1216 | return; | 1214 | return; |
1217 | 1215 | ||
1218 | /* determine pool to use */ | 1216 | /* determine the cwq to use */ |
1219 | if (!(wq->flags & WQ_UNBOUND)) { | 1217 | if (!(wq->flags & WQ_UNBOUND)) { |
1220 | struct worker_pool *last_pool; | 1218 | struct worker_pool *last_pool; |
1221 | 1219 | ||
@@ -1228,37 +1226,36 @@ static void __queue_work(unsigned int cpu, struct workqueue_struct *wq, | |||
1228 | * work needs to be queued on that cpu to guarantee | 1226 | * work needs to be queued on that cpu to guarantee |
1229 | * non-reentrancy. | 1227 | * non-reentrancy. |
1230 | */ | 1228 | */ |
1231 | pool = get_std_worker_pool(cpu, highpri); | 1229 | cwq = get_cwq(cpu, wq); |
1232 | last_pool = get_work_pool(work); | 1230 | last_pool = get_work_pool(work); |
1233 | 1231 | ||
1234 | if (last_pool && last_pool != pool) { | 1232 | if (last_pool && last_pool != cwq->pool) { |
1235 | struct worker *worker; | 1233 | struct worker *worker; |
1236 | 1234 | ||
1237 | spin_lock(&last_pool->lock); | 1235 | spin_lock(&last_pool->lock); |
1238 | 1236 | ||
1239 | worker = find_worker_executing_work(last_pool, work); | 1237 | worker = find_worker_executing_work(last_pool, work); |
1240 | 1238 | ||
1241 | if (worker && worker->current_cwq->wq == wq) | 1239 | if (worker && worker->current_cwq->wq == wq) { |
1242 | pool = last_pool; | 1240 | cwq = get_cwq(last_pool->cpu, wq); |
1243 | else { | 1241 | } else { |
1244 | /* meh... not running there, queue here */ | 1242 | /* meh... not running there, queue here */ |
1245 | spin_unlock(&last_pool->lock); | 1243 | spin_unlock(&last_pool->lock); |
1246 | spin_lock(&pool->lock); | 1244 | spin_lock(&cwq->pool->lock); |
1247 | } | 1245 | } |
1248 | } else { | 1246 | } else { |
1249 | spin_lock(&pool->lock); | 1247 | spin_lock(&cwq->pool->lock); |
1250 | } | 1248 | } |
1251 | } else { | 1249 | } else { |
1252 | pool = get_std_worker_pool(WORK_CPU_UNBOUND, highpri); | 1250 | cwq = get_cwq(WORK_CPU_UNBOUND, wq); |
1253 | spin_lock(&pool->lock); | 1251 | spin_lock(&cwq->pool->lock); |
1254 | } | 1252 | } |
1255 | 1253 | ||
1256 | /* pool determined, get cwq and queue */ | 1254 | /* cwq determined, queue */ |
1257 | cwq = get_cwq(pool->cpu, wq); | ||
1258 | trace_workqueue_queue_work(req_cpu, cwq, work); | 1255 | trace_workqueue_queue_work(req_cpu, cwq, work); |
1259 | 1256 | ||
1260 | if (WARN_ON(!list_empty(&work->entry))) { | 1257 | if (WARN_ON(!list_empty(&work->entry))) { |
1261 | spin_unlock(&pool->lock); | 1258 | spin_unlock(&cwq->pool->lock); |
1262 | return; | 1259 | return; |
1263 | } | 1260 | } |
1264 | 1261 | ||
@@ -1276,7 +1273,7 @@ static void __queue_work(unsigned int cpu, struct workqueue_struct *wq, | |||
1276 | 1273 | ||
1277 | insert_work(cwq, work, worklist, work_flags); | 1274 | insert_work(cwq, work, worklist, work_flags); |
1278 | 1275 | ||
1279 | spin_unlock(&pool->lock); | 1276 | spin_unlock(&cwq->pool->lock); |
1280 | } | 1277 | } |
1281 | 1278 | ||
1282 | /** | 1279 | /** |