diff options
author | Tejun Heo <tj@kernel.org> | 2013-03-12 14:30:04 -0400 |
---|---|---|
committer | Tejun Heo <tj@kernel.org> | 2013-03-12 14:30:04 -0400 |
commit | 9e8cd2f5898ab6710ad81f4583fada08bf8049a4 (patch) | |
tree | bb3950d07a5e23be7817eaaa0517066b7d5f82fb /kernel/workqueue.c | |
parent | c9178087acd71b4ea010ea48e147cf66952d2da9 (diff) |
workqueue: implement apply_workqueue_attrs()
Implement apply_workqueue_attrs() which applies workqueue_attrs to the
specified unbound workqueue by creating a new pwq (pool_workqueue)
linked to worker_pool with the specified attributes.
A new pwq is linked at the head of wq->pwqs instead of tail and
__queue_work() verifies that the first unbound pwq has positive refcnt
before choosing it for the actual queueing. This is to cover the case
where creation of a new pwq races with queueing. As base ref on a pwq
won't be dropped without making another pwq the first one,
__queue_work() is guaranteed to make progress and not add work item to
a dead pwq.
init_and_link_pwq() is updated to return the last first pwq the new
pwq replaced, which is put by apply_workqueue_attrs().
Note that apply_workqueue_attrs() is almost identical to unbound pwq
part of alloc_and_link_pwqs(). The only difference is that there is
no previous first pwq. apply_workqueue_attrs() is implemented to
handle such cases and replaces unbound pwq handling in
alloc_and_link_pwqs().
Signed-off-by: Tejun Heo <tj@kernel.org>
Reviewed-by: Lai Jiangshan <laijs@cn.fujitsu.com>
Diffstat (limited to 'kernel/workqueue.c')
-rw-r--r-- | kernel/workqueue.c | 91 |
1 files changed, 71 insertions, 20 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 16fb6747276a..2a67fbbd192c 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c | |||
@@ -1228,7 +1228,7 @@ static void __queue_work(int cpu, struct workqueue_struct *wq, | |||
1228 | if (unlikely(wq->flags & WQ_DRAINING) && | 1228 | if (unlikely(wq->flags & WQ_DRAINING) && |
1229 | WARN_ON_ONCE(!is_chained_work(wq))) | 1229 | WARN_ON_ONCE(!is_chained_work(wq))) |
1230 | return; | 1230 | return; |
1231 | 1231 | retry: | |
1232 | /* pwq which will be used unless @work is executing elsewhere */ | 1232 | /* pwq which will be used unless @work is executing elsewhere */ |
1233 | if (!(wq->flags & WQ_UNBOUND)) { | 1233 | if (!(wq->flags & WQ_UNBOUND)) { |
1234 | if (cpu == WORK_CPU_UNBOUND) | 1234 | if (cpu == WORK_CPU_UNBOUND) |
@@ -1262,6 +1262,25 @@ static void __queue_work(int cpu, struct workqueue_struct *wq, | |||
1262 | spin_lock(&pwq->pool->lock); | 1262 | spin_lock(&pwq->pool->lock); |
1263 | } | 1263 | } |
1264 | 1264 | ||
1265 | /* | ||
1266 | * pwq is determined and locked. For unbound pools, we could have | ||
1267 | * raced with pwq release and it could already be dead. If its | ||
1268 | * refcnt is zero, repeat pwq selection. Note that pwqs never die | ||
1269 | * without another pwq replacing it as the first pwq or while a | ||
1270 | * work item is executing on it, so the retying is guaranteed to | ||
1271 | * make forward-progress. | ||
1272 | */ | ||
1273 | if (unlikely(!pwq->refcnt)) { | ||
1274 | if (wq->flags & WQ_UNBOUND) { | ||
1275 | spin_unlock(&pwq->pool->lock); | ||
1276 | cpu_relax(); | ||
1277 | goto retry; | ||
1278 | } | ||
1279 | /* oops */ | ||
1280 | WARN_ONCE(true, "workqueue: per-cpu pwq for %s on cpu%d has 0 refcnt", | ||
1281 | wq->name, cpu); | ||
1282 | } | ||
1283 | |||
1265 | /* pwq determined, queue */ | 1284 | /* pwq determined, queue */ |
1266 | trace_workqueue_queue_work(req_cpu, pwq, work); | 1285 | trace_workqueue_queue_work(req_cpu, pwq, work); |
1267 | 1286 | ||
@@ -3425,7 +3444,8 @@ static void pwq_unbound_release_workfn(struct work_struct *work) | |||
3425 | 3444 | ||
3426 | static void init_and_link_pwq(struct pool_workqueue *pwq, | 3445 | static void init_and_link_pwq(struct pool_workqueue *pwq, |
3427 | struct workqueue_struct *wq, | 3446 | struct workqueue_struct *wq, |
3428 | struct worker_pool *pool) | 3447 | struct worker_pool *pool, |
3448 | struct pool_workqueue **p_last_pwq) | ||
3429 | { | 3449 | { |
3430 | BUG_ON((unsigned long)pwq & WORK_STRUCT_FLAG_MASK); | 3450 | BUG_ON((unsigned long)pwq & WORK_STRUCT_FLAG_MASK); |
3431 | 3451 | ||
@@ -3445,13 +3465,58 @@ static void init_and_link_pwq(struct pool_workqueue *pwq, | |||
3445 | mutex_lock(&wq->flush_mutex); | 3465 | mutex_lock(&wq->flush_mutex); |
3446 | spin_lock_irq(&workqueue_lock); | 3466 | spin_lock_irq(&workqueue_lock); |
3447 | 3467 | ||
3468 | if (p_last_pwq) | ||
3469 | *p_last_pwq = first_pwq(wq); | ||
3448 | pwq->work_color = wq->work_color; | 3470 | pwq->work_color = wq->work_color; |
3449 | list_add_tail_rcu(&pwq->pwqs_node, &wq->pwqs); | 3471 | list_add_rcu(&pwq->pwqs_node, &wq->pwqs); |
3450 | 3472 | ||
3451 | spin_unlock_irq(&workqueue_lock); | 3473 | spin_unlock_irq(&workqueue_lock); |
3452 | mutex_unlock(&wq->flush_mutex); | 3474 | mutex_unlock(&wq->flush_mutex); |
3453 | } | 3475 | } |
3454 | 3476 | ||
3477 | /** | ||
3478 | * apply_workqueue_attrs - apply new workqueue_attrs to an unbound workqueue | ||
3479 | * @wq: the target workqueue | ||
3480 | * @attrs: the workqueue_attrs to apply, allocated with alloc_workqueue_attrs() | ||
3481 | * | ||
3482 | * Apply @attrs to an unbound workqueue @wq. If @attrs doesn't match the | ||
3483 | * current attributes, a new pwq is created and made the first pwq which | ||
3484 | * will serve all new work items. Older pwqs are released as in-flight | ||
3485 | * work items finish. Note that a work item which repeatedly requeues | ||
3486 | * itself back-to-back will stay on its current pwq. | ||
3487 | * | ||
3488 | * Performs GFP_KERNEL allocations. Returns 0 on success and -errno on | ||
3489 | * failure. | ||
3490 | */ | ||
3491 | int apply_workqueue_attrs(struct workqueue_struct *wq, | ||
3492 | const struct workqueue_attrs *attrs) | ||
3493 | { | ||
3494 | struct pool_workqueue *pwq, *last_pwq; | ||
3495 | struct worker_pool *pool; | ||
3496 | |||
3497 | if (WARN_ON(!(wq->flags & WQ_UNBOUND))) | ||
3498 | return -EINVAL; | ||
3499 | |||
3500 | pwq = kmem_cache_zalloc(pwq_cache, GFP_KERNEL); | ||
3501 | if (!pwq) | ||
3502 | return -ENOMEM; | ||
3503 | |||
3504 | pool = get_unbound_pool(attrs); | ||
3505 | if (!pool) { | ||
3506 | kmem_cache_free(pwq_cache, pwq); | ||
3507 | return -ENOMEM; | ||
3508 | } | ||
3509 | |||
3510 | init_and_link_pwq(pwq, wq, pool, &last_pwq); | ||
3511 | if (last_pwq) { | ||
3512 | spin_lock_irq(&last_pwq->pool->lock); | ||
3513 | put_pwq(last_pwq); | ||
3514 | spin_unlock_irq(&last_pwq->pool->lock); | ||
3515 | } | ||
3516 | |||
3517 | return 0; | ||
3518 | } | ||
3519 | |||
3455 | static int alloc_and_link_pwqs(struct workqueue_struct *wq) | 3520 | static int alloc_and_link_pwqs(struct workqueue_struct *wq) |
3456 | { | 3521 | { |
3457 | bool highpri = wq->flags & WQ_HIGHPRI; | 3522 | bool highpri = wq->flags & WQ_HIGHPRI; |
@@ -3468,26 +3533,12 @@ static int alloc_and_link_pwqs(struct workqueue_struct *wq) | |||
3468 | struct worker_pool *cpu_pools = | 3533 | struct worker_pool *cpu_pools = |
3469 | per_cpu(cpu_worker_pools, cpu); | 3534 | per_cpu(cpu_worker_pools, cpu); |
3470 | 3535 | ||
3471 | init_and_link_pwq(pwq, wq, &cpu_pools[highpri]); | 3536 | init_and_link_pwq(pwq, wq, &cpu_pools[highpri], NULL); |
3472 | } | 3537 | } |
3538 | return 0; | ||
3473 | } else { | 3539 | } else { |
3474 | struct pool_workqueue *pwq; | 3540 | return apply_workqueue_attrs(wq, unbound_std_wq_attrs[highpri]); |
3475 | struct worker_pool *pool; | ||
3476 | |||
3477 | pwq = kmem_cache_zalloc(pwq_cache, GFP_KERNEL); | ||
3478 | if (!pwq) | ||
3479 | return -ENOMEM; | ||
3480 | |||
3481 | pool = get_unbound_pool(unbound_std_wq_attrs[highpri]); | ||
3482 | if (!pool) { | ||
3483 | kmem_cache_free(pwq_cache, pwq); | ||
3484 | return -ENOMEM; | ||
3485 | } | ||
3486 | |||
3487 | init_and_link_pwq(pwq, wq, pool); | ||
3488 | } | 3541 | } |
3489 | |||
3490 | return 0; | ||
3491 | } | 3542 | } |
3492 | 3543 | ||
3493 | static int wq_clamp_max_active(int max_active, unsigned int flags, | 3544 | static int wq_clamp_max_active(int max_active, unsigned int flags, |