diff options
author | Lai Jiangshan <laijs@cn.fujitsu.com> | 2015-05-19 06:03:47 -0400 |
---|---|---|
committer | Tejun Heo <tj@kernel.org> | 2015-05-19 17:37:00 -0400 |
commit | a0111cf6710bd1b4145ef313d3f4772602af051b (patch) | |
tree | 21acf858096755c322754314519dd04d2c8ed335 /kernel/workqueue.c | |
parent | f7142ed483f49f9108bea1be0c1afcd5d9098e05 (diff) |
workqueue: separate out and refactor the locking of applying attrs
Applying attrs requires two locks: get_online_cpus() and wq_pool_mutex,
and this code is duplicated at two places (apply_workqueue_attrs() and
workqueue_set_unbound_cpumask()). So we separate out this locking
code into apply_wqattrs_[un]lock() and do a minor refactor on
apply_workqueue_attrs().
The apply_wqattrs_[un]lock() will be also used on later patch for
ensuring attrs changes are properly synchronized.
tj: minor updates to comments
Signed-off-by: Lai Jiangshan <laijs@cn.fujitsu.com>
Signed-off-by: Tejun Heo <tj@kernel.org>
Diffstat (limited to 'kernel/workqueue.c')
-rw-r--r-- | kernel/workqueue.c | 78 |
1 files changed, 45 insertions, 33 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 4a9f65b54ee5..72c1adbf7632 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c | |||
@@ -3621,24 +3621,21 @@ static void apply_wqattrs_commit(struct apply_wqattrs_ctx *ctx) | |||
3621 | mutex_unlock(&ctx->wq->mutex); | 3621 | mutex_unlock(&ctx->wq->mutex); |
3622 | } | 3622 | } |
3623 | 3623 | ||
3624 | /** | 3624 | static void apply_wqattrs_lock(void) |
3625 | * apply_workqueue_attrs - apply new workqueue_attrs to an unbound workqueue | 3625 | { |
3626 | * @wq: the target workqueue | 3626 | /* CPUs should stay stable across pwq creations and installations */ |
3627 | * @attrs: the workqueue_attrs to apply, allocated with alloc_workqueue_attrs() | 3627 | get_online_cpus(); |
3628 | * | 3628 | mutex_lock(&wq_pool_mutex); |
3629 | * Apply @attrs to an unbound workqueue @wq. Unless disabled, on NUMA | 3629 | } |
3630 | * machines, this function maps a separate pwq to each NUMA node with | 3630 | |
3631 | * possibles CPUs in @attrs->cpumask so that work items are affine to the | 3631 | static void apply_wqattrs_unlock(void) |
3632 | * NUMA node it was issued on. Older pwqs are released as in-flight work | 3632 | { |
3633 | * items finish. Note that a work item which repeatedly requeues itself | 3633 | mutex_unlock(&wq_pool_mutex); |
3634 | * back-to-back will stay on its current pwq. | 3634 | put_online_cpus(); |
3635 | * | 3635 | } |
3636 | * Performs GFP_KERNEL allocations. | 3636 | |
3637 | * | 3637 | static int apply_workqueue_attrs_locked(struct workqueue_struct *wq, |
3638 | * Return: 0 on success and -errno on failure. | 3638 | const struct workqueue_attrs *attrs) |
3639 | */ | ||
3640 | int apply_workqueue_attrs(struct workqueue_struct *wq, | ||
3641 | const struct workqueue_attrs *attrs) | ||
3642 | { | 3639 | { |
3643 | struct apply_wqattrs_ctx *ctx; | 3640 | struct apply_wqattrs_ctx *ctx; |
3644 | int ret = -ENOMEM; | 3641 | int ret = -ENOMEM; |
@@ -3651,14 +3648,6 @@ int apply_workqueue_attrs(struct workqueue_struct *wq, | |||
3651 | if (WARN_ON((wq->flags & __WQ_ORDERED) && !list_empty(&wq->pwqs))) | 3648 | if (WARN_ON((wq->flags & __WQ_ORDERED) && !list_empty(&wq->pwqs))) |
3652 | return -EINVAL; | 3649 | return -EINVAL; |
3653 | 3650 | ||
3654 | /* | ||
3655 | * CPUs should stay stable across pwq creations and installations. | ||
3656 | * Pin CPUs, determine the target cpumask for each node and create | ||
3657 | * pwqs accordingly. | ||
3658 | */ | ||
3659 | get_online_cpus(); | ||
3660 | mutex_lock(&wq_pool_mutex); | ||
3661 | |||
3662 | ctx = apply_wqattrs_prepare(wq, attrs); | 3651 | ctx = apply_wqattrs_prepare(wq, attrs); |
3663 | 3652 | ||
3664 | /* the ctx has been prepared successfully, let's commit it */ | 3653 | /* the ctx has been prepared successfully, let's commit it */ |
@@ -3667,15 +3656,40 @@ int apply_workqueue_attrs(struct workqueue_struct *wq, | |||
3667 | ret = 0; | 3656 | ret = 0; |
3668 | } | 3657 | } |
3669 | 3658 | ||
3670 | mutex_unlock(&wq_pool_mutex); | ||
3671 | put_online_cpus(); | ||
3672 | |||
3673 | apply_wqattrs_cleanup(ctx); | 3659 | apply_wqattrs_cleanup(ctx); |
3674 | 3660 | ||
3675 | return ret; | 3661 | return ret; |
3676 | } | 3662 | } |
3677 | 3663 | ||
3678 | /** | 3664 | /** |
3665 | * apply_workqueue_attrs - apply new workqueue_attrs to an unbound workqueue | ||
3666 | * @wq: the target workqueue | ||
3667 | * @attrs: the workqueue_attrs to apply, allocated with alloc_workqueue_attrs() | ||
3668 | * | ||
3669 | * Apply @attrs to an unbound workqueue @wq. Unless disabled, on NUMA | ||
3670 | * machines, this function maps a separate pwq to each NUMA node with | ||
3671 | * possibles CPUs in @attrs->cpumask so that work items are affine to the | ||
3672 | * NUMA node it was issued on. Older pwqs are released as in-flight work | ||
3673 | * items finish. Note that a work item which repeatedly requeues itself | ||
3674 | * back-to-back will stay on its current pwq. | ||
3675 | * | ||
3676 | * Performs GFP_KERNEL allocations. | ||
3677 | * | ||
3678 | * Return: 0 on success and -errno on failure. | ||
3679 | */ | ||
3680 | int apply_workqueue_attrs(struct workqueue_struct *wq, | ||
3681 | const struct workqueue_attrs *attrs) | ||
3682 | { | ||
3683 | int ret; | ||
3684 | |||
3685 | apply_wqattrs_lock(); | ||
3686 | ret = apply_workqueue_attrs_locked(wq, attrs); | ||
3687 | apply_wqattrs_unlock(); | ||
3688 | |||
3689 | return ret; | ||
3690 | } | ||
3691 | |||
3692 | /** | ||
3679 | * wq_update_unbound_numa - update NUMA affinity of a wq for CPU hot[un]plug | 3693 | * wq_update_unbound_numa - update NUMA affinity of a wq for CPU hot[un]plug |
3680 | * @wq: the target workqueue | 3694 | * @wq: the target workqueue |
3681 | * @cpu: the CPU coming up or going down | 3695 | * @cpu: the CPU coming up or going down |
@@ -4799,10 +4813,9 @@ int workqueue_set_unbound_cpumask(cpumask_var_t cpumask) | |||
4799 | if (!zalloc_cpumask_var(&saved_cpumask, GFP_KERNEL)) | 4813 | if (!zalloc_cpumask_var(&saved_cpumask, GFP_KERNEL)) |
4800 | return -ENOMEM; | 4814 | return -ENOMEM; |
4801 | 4815 | ||
4802 | get_online_cpus(); | ||
4803 | cpumask_and(cpumask, cpumask, cpu_possible_mask); | 4816 | cpumask_and(cpumask, cpumask, cpu_possible_mask); |
4804 | if (!cpumask_empty(cpumask)) { | 4817 | if (!cpumask_empty(cpumask)) { |
4805 | mutex_lock(&wq_pool_mutex); | 4818 | apply_wqattrs_lock(); |
4806 | 4819 | ||
4807 | /* save the old wq_unbound_cpumask. */ | 4820 | /* save the old wq_unbound_cpumask. */ |
4808 | cpumask_copy(saved_cpumask, wq_unbound_cpumask); | 4821 | cpumask_copy(saved_cpumask, wq_unbound_cpumask); |
@@ -4815,9 +4828,8 @@ int workqueue_set_unbound_cpumask(cpumask_var_t cpumask) | |||
4815 | if (ret < 0) | 4828 | if (ret < 0) |
4816 | cpumask_copy(wq_unbound_cpumask, saved_cpumask); | 4829 | cpumask_copy(wq_unbound_cpumask, saved_cpumask); |
4817 | 4830 | ||
4818 | mutex_unlock(&wq_pool_mutex); | 4831 | apply_wqattrs_unlock(); |
4819 | } | 4832 | } |
4820 | put_online_cpus(); | ||
4821 | 4833 | ||
4822 | free_cpumask_var(saved_cpumask); | 4834 | free_cpumask_var(saved_cpumask); |
4823 | return ret; | 4835 | return ret; |