aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/workqueue.c
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2017-09-05 09:33:41 -0400
committerTejun Heo <tj@kernel.org>2017-09-05 09:33:41 -0400
commit058fc47ee22c60c817c362ef6844d40d8ebb8140 (patch)
treeda0bf3dec31d4c30a5420930cd11a5c2cdc5e46b /kernel/workqueue.c
parentc5a94a618e7ac86b20f53d947f68d7cee6a4c6bc (diff)
parent1ad0f0a7aa1bf3bd42dcd108a96713d255eacd9f (diff)
Merge branch 'for-4.13-fixes' into for-4.14
Diffstat (limited to 'kernel/workqueue.c')
-rw-r--r--kernel/workqueue.c30
1 files changed, 26 insertions, 4 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 2d278b9a5469..a44ef675fd3a 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -3577,6 +3577,13 @@ static bool wq_calc_node_cpumask(const struct workqueue_attrs *attrs, int node,
3577 3577
3578 /* yeap, return possible CPUs in @node that @attrs wants */ 3578 /* yeap, return possible CPUs in @node that @attrs wants */
3579 cpumask_and(cpumask, attrs->cpumask, wq_numa_possible_cpumask[node]); 3579 cpumask_and(cpumask, attrs->cpumask, wq_numa_possible_cpumask[node]);
3580
3581 if (cpumask_empty(cpumask)) {
3582 pr_warn_once("WARNING: workqueue cpumask: online intersect > "
3583 "possible intersect\n");
3584 return false;
3585 }
3586
3580 return !cpumask_equal(cpumask, attrs->cpumask); 3587 return !cpumask_equal(cpumask, attrs->cpumask);
3581 3588
3582use_dfl: 3589use_dfl:
@@ -3744,8 +3751,12 @@ static int apply_workqueue_attrs_locked(struct workqueue_struct *wq,
3744 return -EINVAL; 3751 return -EINVAL;
3745 3752
3746 /* creating multiple pwqs breaks ordering guarantee */ 3753 /* creating multiple pwqs breaks ordering guarantee */
3747 if (WARN_ON((wq->flags & __WQ_ORDERED) && !list_empty(&wq->pwqs))) 3754 if (!list_empty(&wq->pwqs)) {
3748 return -EINVAL; 3755 if (WARN_ON(wq->flags & __WQ_ORDERED_EXPLICIT))
3756 return -EINVAL;
3757
3758 wq->flags &= ~__WQ_ORDERED;
3759 }
3749 3760
3750 ctx = apply_wqattrs_prepare(wq, attrs); 3761 ctx = apply_wqattrs_prepare(wq, attrs);
3751 if (!ctx) 3762 if (!ctx)
@@ -3929,6 +3940,16 @@ struct workqueue_struct *__alloc_workqueue_key(const char *fmt,
3929 struct workqueue_struct *wq; 3940 struct workqueue_struct *wq;
3930 struct pool_workqueue *pwq; 3941 struct pool_workqueue *pwq;
3931 3942
3943 /*
3944 * Unbound && max_active == 1 used to imply ordered, which is no
3945 * longer the case on NUMA machines due to per-node pools. While
3946 * alloc_ordered_workqueue() is the right way to create an ordered
3947 * workqueue, keep the previous behavior to avoid subtle breakages
3948 * on NUMA.
3949 */
3950 if ((flags & WQ_UNBOUND) && max_active == 1)
3951 flags |= __WQ_ORDERED;
3952
3932 /* see the comment above the definition of WQ_POWER_EFFICIENT */ 3953 /* see the comment above the definition of WQ_POWER_EFFICIENT */
3933 if ((flags & WQ_POWER_EFFICIENT) && wq_power_efficient) 3954 if ((flags & WQ_POWER_EFFICIENT) && wq_power_efficient)
3934 flags |= WQ_UNBOUND; 3955 flags |= WQ_UNBOUND;
@@ -4119,13 +4140,14 @@ void workqueue_set_max_active(struct workqueue_struct *wq, int max_active)
4119 struct pool_workqueue *pwq; 4140 struct pool_workqueue *pwq;
4120 4141
4121 /* disallow meddling with max_active for ordered workqueues */ 4142 /* disallow meddling with max_active for ordered workqueues */
4122 if (WARN_ON(wq->flags & __WQ_ORDERED)) 4143 if (WARN_ON(wq->flags & __WQ_ORDERED_EXPLICIT))
4123 return; 4144 return;
4124 4145
4125 max_active = wq_clamp_max_active(max_active, wq->flags, wq->name); 4146 max_active = wq_clamp_max_active(max_active, wq->flags, wq->name);
4126 4147
4127 mutex_lock(&wq->mutex); 4148 mutex_lock(&wq->mutex);
4128 4149
4150 wq->flags &= ~__WQ_ORDERED;
4129 wq->saved_max_active = max_active; 4151 wq->saved_max_active = max_active;
4130 4152
4131 for_each_pwq(pwq, wq) 4153 for_each_pwq(pwq, wq)
@@ -5253,7 +5275,7 @@ int workqueue_sysfs_register(struct workqueue_struct *wq)
5253 * attributes breaks ordering guarantee. Disallow exposing ordered 5275 * attributes breaks ordering guarantee. Disallow exposing ordered
5254 * workqueues. 5276 * workqueues.
5255 */ 5277 */
5256 if (WARN_ON(wq->flags & __WQ_ORDERED)) 5278 if (WARN_ON(wq->flags & __WQ_ORDERED_EXPLICIT))
5257 return -EINVAL; 5279 return -EINVAL;
5258 5280
5259 wq->wq_dev = wq_dev = kzalloc(sizeof(*wq_dev), GFP_KERNEL); 5281 wq->wq_dev = wq_dev = kzalloc(sizeof(*wq_dev), GFP_KERNEL);