aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2013-03-12 14:30:03 -0400
committerTejun Heo <tj@kernel.org>2013-03-12 14:30:03 -0400
commit493008a8e475771a2126e0ce95a73e35b371d277 (patch)
tree0e67b51ade42bb623456aa186cec7a5722a8420c
parentac6104cdf87cc162b0a0d78280d1dcb9752e25bb (diff)
workqueue: drop WQ_RESCUER and test workqueue->rescuer for NULL instead
WQ_RESCUER is superflous. WQ_MEM_RECLAIM indicates that the user wants a rescuer and testing wq->rescuer for NULL can answer whether a given workqueue has a rescuer or not. Drop WQ_RESCUER and test wq->rescuer directly. This will help simplifying __alloc_workqueue_key() failure path by allowing it to use destroy_workqueue() on a partially constructed workqueue, which in turn will help implementing dynamic management of pool_workqueues. While at it, clear wq->rescuer after freeing it in destroy_workqueue(). This is a precaution as scheduled changes will make destruction more complex. This patch doesn't introduce any functional changes. Signed-off-by: Tejun Heo <tj@kernel.org> Reviewed-by: Lai Jiangshan <laijs@cn.fujitsu.com>
-rw-r--r--include/linux/workqueue.h1
-rw-r--r--kernel/workqueue.c22
2 files changed, 10 insertions, 13 deletions
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
index 00c1b9ba8252..c270b4eedf16 100644
--- a/include/linux/workqueue.h
+++ b/include/linux/workqueue.h
@@ -295,7 +295,6 @@ enum {
295 WQ_CPU_INTENSIVE = 1 << 5, /* cpu instensive workqueue */ 295 WQ_CPU_INTENSIVE = 1 << 5, /* cpu instensive workqueue */
296 296
297 WQ_DRAINING = 1 << 6, /* internal: workqueue is draining */ 297 WQ_DRAINING = 1 << 6, /* internal: workqueue is draining */
298 WQ_RESCUER = 1 << 7, /* internal: workqueue has rescuer */
299 298
300 WQ_MAX_ACTIVE = 512, /* I like 512, better ideas? */ 299 WQ_MAX_ACTIVE = 512, /* I like 512, better ideas? */
301 WQ_MAX_UNBOUND_PER_CPU = 4, /* 4 * #cpus for unbound wq */ 300 WQ_MAX_UNBOUND_PER_CPU = 4, /* 4 * #cpus for unbound wq */
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index a8b86f7b6e34..7ff2b9c5cc3a 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -1827,7 +1827,7 @@ static void send_mayday(struct work_struct *work)
1827 1827
1828 lockdep_assert_held(&workqueue_lock); 1828 lockdep_assert_held(&workqueue_lock);
1829 1829
1830 if (!(wq->flags & WQ_RESCUER)) 1830 if (!wq->rescuer)
1831 return; 1831 return;
1832 1832
1833 /* mayday mayday mayday */ 1833 /* mayday mayday mayday */
@@ -2285,7 +2285,7 @@ sleep:
2285 * @__rescuer: self 2285 * @__rescuer: self
2286 * 2286 *
2287 * Workqueue rescuer thread function. There's one rescuer for each 2287 * Workqueue rescuer thread function. There's one rescuer for each
2288 * workqueue which has WQ_RESCUER set. 2288 * workqueue which has WQ_MEM_RECLAIM set.
2289 * 2289 *
2290 * Regular work processing on a pool may block trying to create a new 2290 * Regular work processing on a pool may block trying to create a new
2291 * worker which uses GFP_KERNEL allocation which has slight chance of 2291 * worker which uses GFP_KERNEL allocation which has slight chance of
@@ -2769,7 +2769,7 @@ static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr)
2769 * flusher is not running on the same workqueue by verifying write 2769 * flusher is not running on the same workqueue by verifying write
2770 * access. 2770 * access.
2771 */ 2771 */
2772 if (pwq->wq->saved_max_active == 1 || pwq->wq->flags & WQ_RESCUER) 2772 if (pwq->wq->saved_max_active == 1 || pwq->wq->rescuer)
2773 lock_map_acquire(&pwq->wq->lockdep_map); 2773 lock_map_acquire(&pwq->wq->lockdep_map);
2774 else 2774 else
2775 lock_map_acquire_read(&pwq->wq->lockdep_map); 2775 lock_map_acquire_read(&pwq->wq->lockdep_map);
@@ -3412,13 +3412,6 @@ struct workqueue_struct *__alloc_workqueue_key(const char *fmt,
3412 va_end(args); 3412 va_end(args);
3413 va_end(args1); 3413 va_end(args1);
3414 3414
3415 /*
3416 * Workqueues which may be used during memory reclaim should
3417 * have a rescuer to guarantee forward progress.
3418 */
3419 if (flags & WQ_MEM_RECLAIM)
3420 flags |= WQ_RESCUER;
3421
3422 max_active = max_active ?: WQ_DFL_ACTIVE; 3415 max_active = max_active ?: WQ_DFL_ACTIVE;
3423 max_active = wq_clamp_max_active(max_active, flags, wq->name); 3416 max_active = wq_clamp_max_active(max_active, flags, wq->name);
3424 3417
@@ -3449,7 +3442,11 @@ struct workqueue_struct *__alloc_workqueue_key(const char *fmt,
3449 } 3442 }
3450 local_irq_enable(); 3443 local_irq_enable();
3451 3444
3452 if (flags & WQ_RESCUER) { 3445 /*
3446 * Workqueues which may be used during memory reclaim should
3447 * have a rescuer to guarantee forward progress.
3448 */
3449 if (flags & WQ_MEM_RECLAIM) {
3453 struct worker *rescuer; 3450 struct worker *rescuer;
3454 3451
3455 wq->rescuer = rescuer = alloc_worker(); 3452 wq->rescuer = rescuer = alloc_worker();
@@ -3533,9 +3530,10 @@ void destroy_workqueue(struct workqueue_struct *wq)
3533 3530
3534 spin_unlock_irq(&workqueue_lock); 3531 spin_unlock_irq(&workqueue_lock);
3535 3532
3536 if (wq->flags & WQ_RESCUER) { 3533 if (wq->rescuer) {
3537 kthread_stop(wq->rescuer->task); 3534 kthread_stop(wq->rescuer->task);
3538 kfree(wq->rescuer); 3535 kfree(wq->rescuer);
3536 wq->rescuer = NULL;
3539 } 3537 }
3540 3538
3541 /* 3539 /*