diff options
author | Tejun Heo <tj@kernel.org> | 2013-03-12 14:30:04 -0400 |
---|---|---|
committer | Tejun Heo <tj@kernel.org> | 2013-03-12 14:30:04 -0400 |
commit | c9178087acd71b4ea010ea48e147cf66952d2da9 (patch) | |
tree | 0b226a810036ee110d0f894c821df50df64db29b | |
parent | 75ccf5950f828d53aebfd3a852283a00abf2c5bf (diff) |
workqueue: perform non-reentrancy test when queueing to unbound workqueues too
Because per-cpu workqueues have multiple pwqs (pool_workqueues) to
serve the CPUs, to guarantee that a single work item isn't queued on
one pwq while still executing another, __queue_work() takes a look at
the previous pool the target work item was on and if it's still
executing there, queue the work item on that pool.
To support changing workqueue_attrs on the fly, unbound workqueues too
will have multiple pwqs and thus need non-reentrancy test when
queueing. This patch modifies __queue_work() such that the reentrancy
test is performed regardless of the workqueue type.
per_cpu_ptr(wq->cpu_pwqs, cpu) used to be used to determine the
matching pwq for the last pool. This can't be used for unbound
workqueues and is replaced with worker->current_pwq which also happens
to be simpler.
Signed-off-by: Tejun Heo <tj@kernel.org>
Reviewed-by: Lai Jiangshan <laijs@cn.fujitsu.com>
-rw-r--r-- | kernel/workqueue.c | 42 |
1 files changed, 19 insertions, 23 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index e933979678e5..16fb6747276a 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c | |||
@@ -1209,6 +1209,7 @@ static void __queue_work(int cpu, struct workqueue_struct *wq, | |||
1209 | struct work_struct *work) | 1209 | struct work_struct *work) |
1210 | { | 1210 | { |
1211 | struct pool_workqueue *pwq; | 1211 | struct pool_workqueue *pwq; |
1212 | struct worker_pool *last_pool; | ||
1212 | struct list_head *worklist; | 1213 | struct list_head *worklist; |
1213 | unsigned int work_flags; | 1214 | unsigned int work_flags; |
1214 | unsigned int req_cpu = cpu; | 1215 | unsigned int req_cpu = cpu; |
@@ -1228,41 +1229,36 @@ static void __queue_work(int cpu, struct workqueue_struct *wq, | |||
1228 | WARN_ON_ONCE(!is_chained_work(wq))) | 1229 | WARN_ON_ONCE(!is_chained_work(wq))) |
1229 | return; | 1230 | return; |
1230 | 1231 | ||
1231 | /* determine the pwq to use */ | 1232 | /* pwq which will be used unless @work is executing elsewhere */ |
1232 | if (!(wq->flags & WQ_UNBOUND)) { | 1233 | if (!(wq->flags & WQ_UNBOUND)) { |
1233 | struct worker_pool *last_pool; | ||
1234 | |||
1235 | if (cpu == WORK_CPU_UNBOUND) | 1234 | if (cpu == WORK_CPU_UNBOUND) |
1236 | cpu = raw_smp_processor_id(); | 1235 | cpu = raw_smp_processor_id(); |
1237 | |||
1238 | /* | ||
1239 | * It's multi cpu. If @work was previously on a different | ||
1240 | * cpu, it might still be running there, in which case the | ||
1241 | * work needs to be queued on that cpu to guarantee | ||
1242 | * non-reentrancy. | ||
1243 | */ | ||
1244 | pwq = per_cpu_ptr(wq->cpu_pwqs, cpu); | 1236 | pwq = per_cpu_ptr(wq->cpu_pwqs, cpu); |
1245 | last_pool = get_work_pool(work); | 1237 | } else { |
1238 | pwq = first_pwq(wq); | ||
1239 | } | ||
1246 | 1240 | ||
1247 | if (last_pool && last_pool != pwq->pool) { | 1241 | /* |
1248 | struct worker *worker; | 1242 | * If @work was previously on a different pool, it might still be |
1243 | * running there, in which case the work needs to be queued on that | ||
1244 | * pool to guarantee non-reentrancy. | ||
1245 | */ | ||
1246 | last_pool = get_work_pool(work); | ||
1247 | if (last_pool && last_pool != pwq->pool) { | ||
1248 | struct worker *worker; | ||
1249 | 1249 | ||
1250 | spin_lock(&last_pool->lock); | 1250 | spin_lock(&last_pool->lock); |
1251 | 1251 | ||
1252 | worker = find_worker_executing_work(last_pool, work); | 1252 | worker = find_worker_executing_work(last_pool, work); |
1253 | 1253 | ||
1254 | if (worker && worker->current_pwq->wq == wq) { | 1254 | if (worker && worker->current_pwq->wq == wq) { |
1255 | pwq = per_cpu_ptr(wq->cpu_pwqs, last_pool->cpu); | 1255 | pwq = worker->current_pwq; |
1256 | } else { | ||
1257 | /* meh... not running there, queue here */ | ||
1258 | spin_unlock(&last_pool->lock); | ||
1259 | spin_lock(&pwq->pool->lock); | ||
1260 | } | ||
1261 | } else { | 1256 | } else { |
1257 | /* meh... not running there, queue here */ | ||
1258 | spin_unlock(&last_pool->lock); | ||
1262 | spin_lock(&pwq->pool->lock); | 1259 | spin_lock(&pwq->pool->lock); |
1263 | } | 1260 | } |
1264 | } else { | 1261 | } else { |
1265 | pwq = first_pwq(wq); | ||
1266 | spin_lock(&pwq->pool->lock); | 1262 | spin_lock(&pwq->pool->lock); |
1267 | } | 1263 | } |
1268 | 1264 | ||