aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/workqueue.c
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2013-04-01 14:23:35 -0400
committerTejun Heo <tj@kernel.org>2013-04-01 14:23:35 -0400
commitdce90d47c4288c7d3c1988bebb059ea7451d5fd5 (patch)
tree636f36bd20d7aba953b28346f927be5cf33ea055 /kernel/workqueue.c
parent1befcf3073fa083e7dc48c384ce06f3bd900f514 (diff)
workqueue: introduce put_pwq_unlocked()
Factor out lock pool, put_pwq(), unlock sequence into put_pwq_unlocked(). The two existing places are converted and there will be more with NUMA affinity support. This is to prepare for NUMA affinity support for unbound workqueues and doesn't introduce any functional difference. Signed-off-by: Tejun Heo <tj@kernel.org> Reviewed-by: Lai Jiangshan <laijs@cn.fujitsu.com>
Diffstat (limited to 'kernel/workqueue.c')
-rw-r--r--kernel/workqueue.c36
1 files changed, 23 insertions, 13 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 3825c14304e1..d9a4aeb844d5 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -1057,6 +1057,25 @@ static void put_pwq(struct pool_workqueue *pwq)
1057 schedule_work(&pwq->unbound_release_work); 1057 schedule_work(&pwq->unbound_release_work);
1058} 1058}
1059 1059
1060/**
1061 * put_pwq_unlocked - put_pwq() with surrounding pool lock/unlock
1062 * @pwq: pool_workqueue to put (can be %NULL)
1063 *
1064 * put_pwq() with locking. This function also allows %NULL @pwq.
1065 */
1066static void put_pwq_unlocked(struct pool_workqueue *pwq)
1067{
1068 if (pwq) {
1069 /*
1070 * As both pwqs and pools are sched-RCU protected, the
1071 * following lock operations are safe.
1072 */
1073 spin_lock_irq(&pwq->pool->lock);
1074 put_pwq(pwq);
1075 spin_unlock_irq(&pwq->pool->lock);
1076 }
1077}
1078
1060static void pwq_activate_delayed_work(struct work_struct *work) 1079static void pwq_activate_delayed_work(struct work_struct *work)
1061{ 1080{
1062 struct pool_workqueue *pwq = get_work_pwq(work); 1081 struct pool_workqueue *pwq = get_work_pwq(work);
@@ -3759,12 +3778,7 @@ int apply_workqueue_attrs(struct workqueue_struct *wq,
3759 3778
3760 mutex_unlock(&wq->mutex); 3779 mutex_unlock(&wq->mutex);
3761 3780
3762 if (last_pwq) { 3781 put_pwq_unlocked(last_pwq);
3763 spin_lock_irq(&last_pwq->pool->lock);
3764 put_pwq(last_pwq);
3765 spin_unlock_irq(&last_pwq->pool->lock);
3766 }
3767
3768 ret = 0; 3782 ret = 0;
3769 /* fall through */ 3783 /* fall through */
3770out_free: 3784out_free:
@@ -3979,16 +3993,12 @@ void destroy_workqueue(struct workqueue_struct *wq)
3979 } else { 3993 } else {
3980 /* 3994 /*
3981 * We're the sole accessor of @wq at this point. Directly 3995 * We're the sole accessor of @wq at this point. Directly
3982 * access the first pwq and put the base ref. As both pwqs 3996 * access the first pwq and put the base ref. @wq will be
3983 * and pools are sched-RCU protected, the lock operations 3997 * freed when the last pwq is released.
3984 * are safe. @wq will be freed when the last pwq is
3985 * released.
3986 */ 3998 */
3987 pwq = list_first_entry(&wq->pwqs, struct pool_workqueue, 3999 pwq = list_first_entry(&wq->pwqs, struct pool_workqueue,
3988 pwqs_node); 4000 pwqs_node);
3989 spin_lock_irq(&pwq->pool->lock); 4001 put_pwq_unlocked(pwq);
3990 put_pwq(pwq);
3991 spin_unlock_irq(&pwq->pool->lock);
3992 } 4002 }
3993} 4003}
3994EXPORT_SYMBOL_GPL(destroy_workqueue); 4004EXPORT_SYMBOL_GPL(destroy_workqueue);