summaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2013-04-01 14:23:35 -0400
committerTejun Heo <tj@kernel.org>2013-04-01 14:23:35 -0400
commit1befcf3073fa083e7dc48c384ce06f3bd900f514 (patch)
treed5ca2582430566d6f5577abbe19360afebba8ecc /kernel
parente50aba9aea63b7617887b4d9694184f478731c82 (diff)
workqueue: introduce numa_pwq_tbl_install()
Factor out pool_workqueue linking and installation into numa_pwq_tbl[] from apply_workqueue_attrs() into numa_pwq_tbl_install(). link_pwq() is made safe to call multiple times. numa_pwq_tbl_install() links the pwq, installs it into numa_pwq_tbl[] at the specified node and returns the old entry. @last_pwq is removed from link_pwq() as the return value of the new function can be used instead. This is to prepare for NUMA affinity support for unbound workqueues. Signed-off-by: Tejun Heo <tj@kernel.org> Reviewed-by: Lai Jiangshan <laijs@cn.fujitsu.com>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/workqueue.c35
1 files changed, 26 insertions, 9 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 07ec57459457..3825c14304e1 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -3639,24 +3639,26 @@ static void init_pwq(struct pool_workqueue *pwq, struct workqueue_struct *wq,
3639 pwq->flush_color = -1; 3639 pwq->flush_color = -1;
3640 pwq->refcnt = 1; 3640 pwq->refcnt = 1;
3641 INIT_LIST_HEAD(&pwq->delayed_works); 3641 INIT_LIST_HEAD(&pwq->delayed_works);
3642 INIT_LIST_HEAD(&pwq->pwqs_node);
3642 INIT_LIST_HEAD(&pwq->mayday_node); 3643 INIT_LIST_HEAD(&pwq->mayday_node);
3643 INIT_WORK(&pwq->unbound_release_work, pwq_unbound_release_workfn); 3644 INIT_WORK(&pwq->unbound_release_work, pwq_unbound_release_workfn);
3644} 3645}
3645 3646
3646/* sync @pwq with the current state of its associated wq and link it */ 3647/* sync @pwq with the current state of its associated wq and link it */
3647static void link_pwq(struct pool_workqueue *pwq, 3648static void link_pwq(struct pool_workqueue *pwq)
3648 struct pool_workqueue **p_last_pwq)
3649{ 3649{
3650 struct workqueue_struct *wq = pwq->wq; 3650 struct workqueue_struct *wq = pwq->wq;
3651 3651
3652 lockdep_assert_held(&wq->mutex); 3652 lockdep_assert_held(&wq->mutex);
3653 3653
3654 /* may be called multiple times, ignore if already linked */
3655 if (!list_empty(&pwq->pwqs_node))
3656 return;
3657
3654 /* 3658 /*
3655 * Set the matching work_color. This is synchronized with 3659 * Set the matching work_color. This is synchronized with
3656 * wq->mutex to avoid confusing flush_workqueue(). 3660 * wq->mutex to avoid confusing flush_workqueue().
3657 */ 3661 */
3658 if (p_last_pwq)
3659 *p_last_pwq = first_pwq(wq);
3660 pwq->work_color = wq->work_color; 3662 pwq->work_color = wq->work_color;
3661 3663
3662 /* sync max_active to the current setting */ 3664 /* sync max_active to the current setting */
@@ -3689,6 +3691,23 @@ static struct pool_workqueue *alloc_unbound_pwq(struct workqueue_struct *wq,
3689 return pwq; 3691 return pwq;
3690} 3692}
3691 3693
3694/* install @pwq into @wq's numa_pwq_tbl[] for @node and return the old pwq */
3695static struct pool_workqueue *numa_pwq_tbl_install(struct workqueue_struct *wq,
3696 int node,
3697 struct pool_workqueue *pwq)
3698{
3699 struct pool_workqueue *old_pwq;
3700
3701 lockdep_assert_held(&wq->mutex);
3702
3703 /* link_pwq() can handle duplicate calls */
3704 link_pwq(pwq);
3705
3706 old_pwq = rcu_access_pointer(wq->numa_pwq_tbl[node]);
3707 rcu_assign_pointer(wq->numa_pwq_tbl[node], pwq);
3708 return old_pwq;
3709}
3710
3692/** 3711/**
3693 * apply_workqueue_attrs - apply new workqueue_attrs to an unbound workqueue 3712 * apply_workqueue_attrs - apply new workqueue_attrs to an unbound workqueue
3694 * @wq: the target workqueue 3713 * @wq: the target workqueue
@@ -3707,7 +3726,7 @@ int apply_workqueue_attrs(struct workqueue_struct *wq,
3707 const struct workqueue_attrs *attrs) 3726 const struct workqueue_attrs *attrs)
3708{ 3727{
3709 struct workqueue_attrs *new_attrs; 3728 struct workqueue_attrs *new_attrs;
3710 struct pool_workqueue *pwq, *last_pwq; 3729 struct pool_workqueue *pwq, *last_pwq = NULL;
3711 int node, ret; 3730 int node, ret;
3712 3731
3713 /* only unbound workqueues can change attributes */ 3732 /* only unbound workqueues can change attributes */
@@ -3734,11 +3753,9 @@ int apply_workqueue_attrs(struct workqueue_struct *wq,
3734 3753
3735 mutex_lock(&wq->mutex); 3754 mutex_lock(&wq->mutex);
3736 3755
3737 link_pwq(pwq, &last_pwq);
3738
3739 copy_workqueue_attrs(wq->unbound_attrs, new_attrs); 3756 copy_workqueue_attrs(wq->unbound_attrs, new_attrs);
3740 for_each_node(node) 3757 for_each_node(node)
3741 rcu_assign_pointer(wq->numa_pwq_tbl[node], pwq); 3758 last_pwq = numa_pwq_tbl_install(wq, node, pwq);
3742 3759
3743 mutex_unlock(&wq->mutex); 3760 mutex_unlock(&wq->mutex);
3744 3761
@@ -3778,7 +3795,7 @@ static int alloc_and_link_pwqs(struct workqueue_struct *wq)
3778 init_pwq(pwq, wq, &cpu_pools[highpri]); 3795 init_pwq(pwq, wq, &cpu_pools[highpri]);
3779 3796
3780 mutex_lock(&wq->mutex); 3797 mutex_lock(&wq->mutex);
3781 link_pwq(pwq, NULL); 3798 link_pwq(pwq);
3782 mutex_unlock(&wq->mutex); 3799 mutex_unlock(&wq->mutex);
3783 } 3800 }
3784 return 0; 3801 return 0;