diff options
author | Tejun Heo <tj@kernel.org> | 2017-07-18 18:41:52 -0400 |
---|---|---|
committer | Tejun Heo <tj@kernel.org> | 2017-07-19 11:24:19 -0400 |
commit | 5c0338c68706be53b3dc472e4308961c36e4ece1 (patch) | |
tree | 306525a446f682b5479f020c7976729bdae52139 /kernel/workqueue.c | |
parent | 74cbd96bc2e00f5daa805e2ebf49e998f7045062 (diff) |
workqueue: restore WQ_UNBOUND/max_active==1 to be ordered
The combination of WQ_UNBOUND and max_active == 1 used to imply
ordered execution. After NUMA affinity 4c16bd327c74 ("workqueue:
implement NUMA affinity for unbound workqueues"), this is no longer
true due to per-node worker pools.
While the right way to create an ordered workqueue is
alloc_ordered_workqueue(), the documentation has been misleading for a
long time and people do use WQ_UNBOUND and max_active == 1 for ordered
workqueues which can lead to subtle bugs which are very difficult to
trigger.
It's unlikely that we'd see noticeable performance impact by enforcing
ordering on WQ_UNBOUND / max_active == 1 workqueues. Let's
automatically set __WQ_ORDERED for those workqueues.
Signed-off-by: Tejun Heo <tj@kernel.org>
Reported-by: Christoph Hellwig <hch@infradead.org>
Reported-by: Alexei Potashnik <alexei@purestorage.com>
Fixes: 4c16bd327c74 ("workqueue: implement NUMA affinity for unbound workqueues")
Cc: stable@vger.kernel.org # v3.10+
Diffstat (limited to 'kernel/workqueue.c')
-rw-r--r-- | kernel/workqueue.c | 10 |
1 files changed, 10 insertions, 0 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index a86688fabc55..abe4a4971c24 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c | |||
@@ -3929,6 +3929,16 @@ struct workqueue_struct *__alloc_workqueue_key(const char *fmt, | |||
3929 | struct workqueue_struct *wq; | 3929 | struct workqueue_struct *wq; |
3930 | struct pool_workqueue *pwq; | 3930 | struct pool_workqueue *pwq; |
3931 | 3931 | ||
3932 | /* | ||
3933 | * Unbound && max_active == 1 used to imply ordered, which is no | ||
3934 | * longer the case on NUMA machines due to per-node pools. While | ||
3935 | * alloc_ordered_workqueue() is the right way to create an ordered | ||
3936 | * workqueue, keep the previous behavior to avoid subtle breakages | ||
3937 | * on NUMA. | ||
3938 | */ | ||
3939 | if ((flags & WQ_UNBOUND) && max_active == 1) | ||
3940 | flags |= __WQ_ORDERED; | ||
3941 | |||
3932 | /* see the comment above the definition of WQ_POWER_EFFICIENT */ | 3942 | /* see the comment above the definition of WQ_POWER_EFFICIENT */ |
3933 | if ((flags & WQ_POWER_EFFICIENT) && wq_power_efficient) | 3943 | if ((flags & WQ_POWER_EFFICIENT) && wq_power_efficient) |
3934 | flags |= WQ_UNBOUND; | 3944 | flags |= WQ_UNBOUND; |