aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/workqueue.c
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2013-04-01 14:23:35 -0400
committerTejun Heo <tj@kernel.org>2013-04-01 14:23:35 -0400
commitf147f29eb7c4959e5f8be604ce2d23979c86378c (patch)
treeed650b77760d1989668342b1d4fd52febaf76c20 /kernel/workqueue.c
parentdf2d5ae4995b3fb9392b6089b9623d20b6c3a542 (diff)
workqueue: break init_and_link_pwq() into two functions and introduce alloc_unbound_pwq()
Break init_and_link_pwq() into init_pwq() and link_pwq() and move unbound-workqueue specific handling into apply_workqueue_attrs(). Also, factor out unbound pool and pool_workqueue allocation into alloc_unbound_pwq(). This reorganization is to prepare for NUMA affinity and doesn't introduce any functional changes. Signed-off-by: Tejun Heo <tj@kernel.org> Reviewed-by: Lai Jiangshan <laijs@cn.fujitsu.com>
Diffstat (limited to 'kernel/workqueue.c')
-rw-r--r--kernel/workqueue.c78
1 files changed, 49 insertions, 29 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 170226a24da8..c8d047b6c895 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -3626,13 +3626,10 @@ static void pwq_adjust_max_active(struct pool_workqueue *pwq)
3626 spin_unlock_irq(&pwq->pool->lock); 3626 spin_unlock_irq(&pwq->pool->lock);
3627} 3627}
3628 3628
3629static void init_and_link_pwq(struct pool_workqueue *pwq, 3629/* initialize newly zalloced @pwq which is associated with @wq and @pool */
3630 struct workqueue_struct *wq, 3630static void init_pwq(struct pool_workqueue *pwq, struct workqueue_struct *wq,
3631 struct worker_pool *pool, 3631 struct worker_pool *pool)
3632 struct pool_workqueue **p_last_pwq)
3633{ 3632{
3634 int node;
3635
3636 BUG_ON((unsigned long)pwq & WORK_STRUCT_FLAG_MASK); 3633 BUG_ON((unsigned long)pwq & WORK_STRUCT_FLAG_MASK);
3637 3634
3638 pwq->pool = pool; 3635 pwq->pool = pool;
@@ -3642,8 +3639,15 @@ static void init_and_link_pwq(struct pool_workqueue *pwq,
3642 INIT_LIST_HEAD(&pwq->delayed_works); 3639 INIT_LIST_HEAD(&pwq->delayed_works);
3643 INIT_LIST_HEAD(&pwq->mayday_node); 3640 INIT_LIST_HEAD(&pwq->mayday_node);
3644 INIT_WORK(&pwq->unbound_release_work, pwq_unbound_release_workfn); 3641 INIT_WORK(&pwq->unbound_release_work, pwq_unbound_release_workfn);
3642}
3645 3643
3646 mutex_lock(&wq->mutex); 3644/* sync @pwq with the current state of its associated wq and link it */
3645static void link_pwq(struct pool_workqueue *pwq,
3646 struct pool_workqueue **p_last_pwq)
3647{
3648 struct workqueue_struct *wq = pwq->wq;
3649
3650 lockdep_assert_held(&wq->mutex);
3647 3651
3648 /* 3652 /*
3649 * Set the matching work_color. This is synchronized with 3653 * Set the matching work_color. This is synchronized with
@@ -3658,14 +3662,29 @@ static void init_and_link_pwq(struct pool_workqueue *pwq,
3658 3662
3659 /* link in @pwq */ 3663 /* link in @pwq */
3660 list_add_rcu(&pwq->pwqs_node, &wq->pwqs); 3664 list_add_rcu(&pwq->pwqs_node, &wq->pwqs);
3665}
3661 3666
3662 if (wq->flags & WQ_UNBOUND) { 3667/* obtain a pool matching @attr and create a pwq associating the pool and @wq */
3663 copy_workqueue_attrs(wq->unbound_attrs, pool->attrs); 3668static struct pool_workqueue *alloc_unbound_pwq(struct workqueue_struct *wq,
3664 for_each_node(node) 3669 const struct workqueue_attrs *attrs)
3665 rcu_assign_pointer(wq->numa_pwq_tbl[node], pwq); 3670{
3671 struct worker_pool *pool;
3672 struct pool_workqueue *pwq;
3673
3674 lockdep_assert_held(&wq_pool_mutex);
3675
3676 pool = get_unbound_pool(attrs);
3677 if (!pool)
3678 return NULL;
3679
3680 pwq = kmem_cache_zalloc(pwq_cache, GFP_KERNEL);
3681 if (!pwq) {
3682 put_unbound_pool(pool);
3683 return NULL;
3666 } 3684 }
3667 3685
3668 mutex_unlock(&wq->mutex); 3686 init_pwq(pwq, wq, pool);
3687 return pwq;
3669} 3688}
3670 3689
3671/** 3690/**
@@ -3686,9 +3705,8 @@ int apply_workqueue_attrs(struct workqueue_struct *wq,
3686 const struct workqueue_attrs *attrs) 3705 const struct workqueue_attrs *attrs)
3687{ 3706{
3688 struct workqueue_attrs *new_attrs; 3707 struct workqueue_attrs *new_attrs;
3689 struct pool_workqueue *pwq = NULL, *last_pwq; 3708 struct pool_workqueue *pwq, *last_pwq;
3690 struct worker_pool *pool; 3709 int node, ret;
3691 int ret;
3692 3710
3693 /* only unbound workqueues can change attributes */ 3711 /* only unbound workqueues can change attributes */
3694 if (WARN_ON(!(wq->flags & WQ_UNBOUND))) 3712 if (WARN_ON(!(wq->flags & WQ_UNBOUND)))
@@ -3707,22 +3725,21 @@ int apply_workqueue_attrs(struct workqueue_struct *wq,
3707 cpumask_and(new_attrs->cpumask, new_attrs->cpumask, cpu_possible_mask); 3725 cpumask_and(new_attrs->cpumask, new_attrs->cpumask, cpu_possible_mask);
3708 3726
3709 mutex_lock(&wq_pool_mutex); 3727 mutex_lock(&wq_pool_mutex);
3710 3728 pwq = alloc_unbound_pwq(wq, new_attrs);
3711 pwq = kmem_cache_zalloc(pwq_cache, GFP_KERNEL); 3729 mutex_unlock(&wq_pool_mutex);
3712 if (!pwq) { 3730 if (!pwq)
3713 mutex_unlock(&wq_pool_mutex);
3714 goto enomem; 3731 goto enomem;
3715 }
3716 3732
3717 pool = get_unbound_pool(new_attrs); 3733 mutex_lock(&wq->mutex);
3718 if (!pool) {
3719 mutex_unlock(&wq_pool_mutex);
3720 goto enomem;
3721 }
3722 3734
3723 mutex_unlock(&wq_pool_mutex); 3735 link_pwq(pwq, &last_pwq);
3736
3737 copy_workqueue_attrs(wq->unbound_attrs, new_attrs);
3738 for_each_node(node)
3739 rcu_assign_pointer(wq->numa_pwq_tbl[node], pwq);
3740
3741 mutex_unlock(&wq->mutex);
3724 3742
3725 init_and_link_pwq(pwq, wq, pool, &last_pwq);
3726 if (last_pwq) { 3743 if (last_pwq) {
3727 spin_lock_irq(&last_pwq->pool->lock); 3744 spin_lock_irq(&last_pwq->pool->lock);
3728 put_pwq(last_pwq); 3745 put_pwq(last_pwq);
@@ -3736,7 +3753,6 @@ out_free:
3736 return ret; 3753 return ret;
3737 3754
3738enomem: 3755enomem:
3739 kmem_cache_free(pwq_cache, pwq);
3740 ret = -ENOMEM; 3756 ret = -ENOMEM;
3741 goto out_free; 3757 goto out_free;
3742} 3758}
@@ -3757,7 +3773,11 @@ static int alloc_and_link_pwqs(struct workqueue_struct *wq)
3757 struct worker_pool *cpu_pools = 3773 struct worker_pool *cpu_pools =
3758 per_cpu(cpu_worker_pools, cpu); 3774 per_cpu(cpu_worker_pools, cpu);
3759 3775
3760 init_and_link_pwq(pwq, wq, &cpu_pools[highpri], NULL); 3776 init_pwq(pwq, wq, &cpu_pools[highpri]);
3777
3778 mutex_lock(&wq->mutex);
3779 link_pwq(pwq, NULL);
3780 mutex_unlock(&wq->mutex);
3761 } 3781 }
3762 return 0; 3782 return 0;
3763 } else { 3783 } else {