summaryrefslogtreecommitdiffstats
path: root/kernel/workqueue.c
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2013-03-12 14:30:04 -0400
committerTejun Heo <tj@kernel.org>2013-03-12 14:30:04 -0400
commitd2c1d40487bb1884be085c187233084f80df052d (patch)
tree11ea6d2f331ed75935d0e65643d657f74278d881 /kernel/workqueue.c
parent493008a8e475771a2126e0ce95a73e35b371d277 (diff)
workqueue: restructure __alloc_workqueue_key()
* Move initialization and linking of pool_workqueues into init_and_link_pwq(). * Make the failure path use destroy_workqueue() once pool_workqueue initialization succeeds. These changes are to prepare for dynamic management of pool_workqueues and don't introduce any functional changes. While at it, convert list_del(&wq->list) to list_del_init() as a precaution as scheduled changes will make destruction more complex. Signed-off-by: Tejun Heo <tj@kernel.org> Reviewed-by: Lai Jiangshan <laijs@cn.fujitsu.com>
Diffstat (limited to 'kernel/workqueue.c')
-rw-r--r--kernel/workqueue.c67
1 files changed, 38 insertions, 29 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 7ff2b9c5cc3a..5ac846e0085e 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -3329,6 +3329,23 @@ fail:
3329 return NULL; 3329 return NULL;
3330} 3330}
3331 3331
3332/* initialize @pwq which interfaces with @pool for @wq and link it in */
3333static void init_and_link_pwq(struct pool_workqueue *pwq,
3334 struct workqueue_struct *wq,
3335 struct worker_pool *pool)
3336{
3337 BUG_ON((unsigned long)pwq & WORK_STRUCT_FLAG_MASK);
3338
3339 pwq->pool = pool;
3340 pwq->wq = wq;
3341 pwq->flush_color = -1;
3342 pwq->max_active = wq->saved_max_active;
3343 INIT_LIST_HEAD(&pwq->delayed_works);
3344 INIT_LIST_HEAD(&pwq->mayday_node);
3345
3346 list_add_tail_rcu(&pwq->pwqs_node, &wq->pwqs);
3347}
3348
3332static int alloc_and_link_pwqs(struct workqueue_struct *wq) 3349static int alloc_and_link_pwqs(struct workqueue_struct *wq)
3333{ 3350{
3334 bool highpri = wq->flags & WQ_HIGHPRI; 3351 bool highpri = wq->flags & WQ_HIGHPRI;
@@ -3345,23 +3362,23 @@ static int alloc_and_link_pwqs(struct workqueue_struct *wq)
3345 struct worker_pool *cpu_pools = 3362 struct worker_pool *cpu_pools =
3346 per_cpu(cpu_worker_pools, cpu); 3363 per_cpu(cpu_worker_pools, cpu);
3347 3364
3348 pwq->pool = &cpu_pools[highpri]; 3365 init_and_link_pwq(pwq, wq, &cpu_pools[highpri]);
3349 list_add_tail_rcu(&pwq->pwqs_node, &wq->pwqs);
3350 } 3366 }
3351 } else { 3367 } else {
3352 struct pool_workqueue *pwq; 3368 struct pool_workqueue *pwq;
3369 struct worker_pool *pool;
3353 3370
3354 pwq = kmem_cache_zalloc(pwq_cache, GFP_KERNEL); 3371 pwq = kmem_cache_zalloc(pwq_cache, GFP_KERNEL);
3355 if (!pwq) 3372 if (!pwq)
3356 return -ENOMEM; 3373 return -ENOMEM;
3357 3374
3358 pwq->pool = get_unbound_pool(unbound_std_wq_attrs[highpri]); 3375 pool = get_unbound_pool(unbound_std_wq_attrs[highpri]);
3359 if (!pwq->pool) { 3376 if (!pool) {
3360 kmem_cache_free(pwq_cache, pwq); 3377 kmem_cache_free(pwq_cache, pwq);
3361 return -ENOMEM; 3378 return -ENOMEM;
3362 } 3379 }
3363 3380
3364 list_add_tail_rcu(&pwq->pwqs_node, &wq->pwqs); 3381 init_and_link_pwq(pwq, wq, pool);
3365 } 3382 }
3366 3383
3367 return 0; 3384 return 0;
@@ -3406,7 +3423,7 @@ struct workqueue_struct *__alloc_workqueue_key(const char *fmt,
3406 3423
3407 wq = kzalloc(sizeof(*wq) + namelen, GFP_KERNEL); 3424 wq = kzalloc(sizeof(*wq) + namelen, GFP_KERNEL);
3408 if (!wq) 3425 if (!wq)
3409 goto err; 3426 return NULL;
3410 3427
3411 vsnprintf(wq->name, namelen, fmt, args1); 3428 vsnprintf(wq->name, namelen, fmt, args1);
3412 va_end(args); 3429 va_end(args);
@@ -3429,18 +3446,7 @@ struct workqueue_struct *__alloc_workqueue_key(const char *fmt,
3429 INIT_LIST_HEAD(&wq->list); 3446 INIT_LIST_HEAD(&wq->list);
3430 3447
3431 if (alloc_and_link_pwqs(wq) < 0) 3448 if (alloc_and_link_pwqs(wq) < 0)
3432 goto err; 3449 goto err_free_wq;
3433
3434 local_irq_disable();
3435 for_each_pwq(pwq, wq) {
3436 BUG_ON((unsigned long)pwq & WORK_STRUCT_FLAG_MASK);
3437 pwq->wq = wq;
3438 pwq->flush_color = -1;
3439 pwq->max_active = max_active;
3440 INIT_LIST_HEAD(&pwq->delayed_works);
3441 INIT_LIST_HEAD(&pwq->mayday_node);
3442 }
3443 local_irq_enable();
3444 3450
3445 /* 3451 /*
3446 * Workqueues which may be used during memory reclaim should 3452 * Workqueues which may be used during memory reclaim should
@@ -3449,16 +3455,19 @@ struct workqueue_struct *__alloc_workqueue_key(const char *fmt,
3449 if (flags & WQ_MEM_RECLAIM) { 3455 if (flags & WQ_MEM_RECLAIM) {
3450 struct worker *rescuer; 3456 struct worker *rescuer;
3451 3457
3452 wq->rescuer = rescuer = alloc_worker(); 3458 rescuer = alloc_worker();
3453 if (!rescuer) 3459 if (!rescuer)
3454 goto err; 3460 goto err_destroy;
3455 3461
3456 rescuer->rescue_wq = wq; 3462 rescuer->rescue_wq = wq;
3457 rescuer->task = kthread_create(rescuer_thread, rescuer, "%s", 3463 rescuer->task = kthread_create(rescuer_thread, rescuer, "%s",
3458 wq->name); 3464 wq->name);
3459 if (IS_ERR(rescuer->task)) 3465 if (IS_ERR(rescuer->task)) {
3460 goto err; 3466 kfree(rescuer);
3467 goto err_destroy;
3468 }
3461 3469
3470 wq->rescuer = rescuer;
3462 rescuer->task->flags |= PF_THREAD_BOUND; 3471 rescuer->task->flags |= PF_THREAD_BOUND;
3463 wake_up_process(rescuer->task); 3472 wake_up_process(rescuer->task);
3464 } 3473 }
@@ -3479,12 +3488,12 @@ struct workqueue_struct *__alloc_workqueue_key(const char *fmt,
3479 spin_unlock_irq(&workqueue_lock); 3488 spin_unlock_irq(&workqueue_lock);
3480 3489
3481 return wq; 3490 return wq;
3482err: 3491
3483 if (wq) { 3492err_free_wq:
3484 free_pwqs(wq); 3493 kfree(wq);
3485 kfree(wq->rescuer); 3494 return NULL;
3486 kfree(wq); 3495err_destroy:
3487 } 3496 destroy_workqueue(wq);
3488 return NULL; 3497 return NULL;
3489} 3498}
3490EXPORT_SYMBOL_GPL(__alloc_workqueue_key); 3499EXPORT_SYMBOL_GPL(__alloc_workqueue_key);
@@ -3526,7 +3535,7 @@ void destroy_workqueue(struct workqueue_struct *wq)
3526 * wq list is used to freeze wq, remove from list after 3535 * wq list is used to freeze wq, remove from list after
3527 * flushing is complete in case freeze races us. 3536 * flushing is complete in case freeze races us.
3528 */ 3537 */
3529 list_del(&wq->list); 3538 list_del_init(&wq->list);
3530 3539
3531 spin_unlock_irq(&workqueue_lock); 3540 spin_unlock_irq(&workqueue_lock);
3532 3541