diff options
author | Tejun Heo <tj@kernel.org> | 2012-07-12 17:46:37 -0400 |
---|---|---|
committer | Tejun Heo <tj@kernel.org> | 2012-07-12 17:46:37 -0400 |
commit | 974271c485a4d8bb801decc616748f90aafb07ec (patch) | |
tree | ed3e41b86293b207127a03bde638e4dd6acf6d58 /kernel | |
parent | 918227bb1b59444a2c467711fd50cc22bb4a897b (diff) |
workqueue: don't use WQ_HIGHPRI for unbound workqueues
Unbound wqs aren't concurrency-managed and try to execute work items
as soon as possible. This is currently achieved by implicitly setting
%WQ_HIGHPRI on all unbound workqueues; however, WQ_HIGHPRI
implementation is about to be restructured and this usage won't be
valid anymore.
Add an explicit chain-wakeup path for unbound workqueues in
process_one_work() instead of piggy backing on %WQ_HIGHPRI.
Signed-off-by: Tejun Heo <tj@kernel.org>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/workqueue.c | 18 |
1 files changed, 11 insertions, 7 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 9a3128dc67df..27637c284cb9 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c | |||
@@ -580,6 +580,10 @@ static bool __need_more_worker(struct global_cwq *gcwq) | |||
580 | /* | 580 | /* |
581 | * Need to wake up a worker? Called from anything but currently | 581 | * Need to wake up a worker? Called from anything but currently |
582 | * running workers. | 582 | * running workers. |
583 | * | ||
584 | * Note that, because unbound workers never contribute to nr_running, this | ||
585 | * function will always return %true for unbound gcwq as long as the | ||
586 | * worklist isn't empty. | ||
583 | */ | 587 | */ |
584 | static bool need_more_worker(struct global_cwq *gcwq) | 588 | static bool need_more_worker(struct global_cwq *gcwq) |
585 | { | 589 | { |
@@ -1867,6 +1871,13 @@ __acquires(&gcwq->lock) | |||
1867 | if (unlikely(cpu_intensive)) | 1871 | if (unlikely(cpu_intensive)) |
1868 | worker_set_flags(worker, WORKER_CPU_INTENSIVE, true); | 1872 | worker_set_flags(worker, WORKER_CPU_INTENSIVE, true); |
1869 | 1873 | ||
1874 | /* | ||
1875 | * Unbound gcwq isn't concurrency managed and work items should be | ||
1876 | * executed ASAP. Wake up another worker if necessary. | ||
1877 | */ | ||
1878 | if ((worker->flags & WORKER_UNBOUND) && need_more_worker(gcwq)) | ||
1879 | wake_up_worker(gcwq); | ||
1880 | |||
1870 | spin_unlock_irq(&gcwq->lock); | 1881 | spin_unlock_irq(&gcwq->lock); |
1871 | 1882 | ||
1872 | work_clear_pending(work); | 1883 | work_clear_pending(work); |
@@ -2984,13 +2995,6 @@ struct workqueue_struct *__alloc_workqueue_key(const char *fmt, | |||
2984 | if (flags & WQ_MEM_RECLAIM) | 2995 | if (flags & WQ_MEM_RECLAIM) |
2985 | flags |= WQ_RESCUER; | 2996 | flags |= WQ_RESCUER; |
2986 | 2997 | ||
2987 | /* | ||
2988 | * Unbound workqueues aren't concurrency managed and should be | ||
2989 | * dispatched to workers immediately. | ||
2990 | */ | ||
2991 | if (flags & WQ_UNBOUND) | ||
2992 | flags |= WQ_HIGHPRI; | ||
2993 | |||
2994 | max_active = max_active ?: WQ_DFL_ACTIVE; | 2998 | max_active = max_active ?: WQ_DFL_ACTIVE; |
2995 | max_active = wq_clamp_max_active(max_active, flags, wq->name); | 2999 | max_active = wq_clamp_max_active(max_active, flags, wq->name); |
2996 | 3000 | ||