aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2013-01-17 20:16:24 -0500
committerTejun Heo <tj@kernel.org>2013-01-17 20:19:58 -0500
commit111c225a5f8d872bc9327ada18d13b75edaa34be (patch)
tree8bb9e31b8345f67c50f5370e6ba03f613afd5b65 /kernel
parent023f27d3d6fcc9048754d879fe5e7d63402a5b16 (diff)
workqueue: set PF_WQ_WORKER on rescuers
PF_WQ_WORKER is used to tell scheduler that the task is a workqueue worker and needs wq_worker_sleeping/waking_up() invoked on it for concurrency management. As rescuers never participate in concurrency management, PF_WQ_WORKER wasn't set on them. There's a need for an interface which can query whether %current is executing a work item and if so which. Such interface requires a way to identify all tasks which may execute work items and PF_WQ_WORKER will be used for that. As all normal workers always have PF_WQ_WORKER set, we only need to add it to rescuers. As rescuers start with WORKER_PREP but never clear it, it's always NOT_RUNNING and there's no need to worry about it interfering with concurrency management even if PF_WQ_WORKER is set; however, unlike normal workers, rescuers currently don't have its worker struct as kthread_data(). It uses the associated workqueue_struct instead. This is problematic as wq_worker_sleeping/waking_up() expect struct worker at kthread_data(). This patch adds worker->rescue_wq and start rescuer kthreads with worker struct as kthread_data and sets PF_WQ_WORKER on rescuers. Signed-off-by: Tejun Heo <tj@kernel.org> Cc: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/workqueue.c35
1 files changed, 28 insertions, 7 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 7967f3476393..6b99ac7b19f6 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -149,6 +149,9 @@ struct worker {
149 149
150 /* for rebinding worker to CPU */ 150 /* for rebinding worker to CPU */
151 struct work_struct rebind_work; /* L: for busy worker */ 151 struct work_struct rebind_work; /* L: for busy worker */
152
153 /* used only by rescuers to point to the target workqueue */
154 struct workqueue_struct *rescue_wq; /* I: the workqueue to rescue */
152}; 155};
153 156
154struct worker_pool { 157struct worker_pool {
@@ -763,12 +766,20 @@ struct task_struct *wq_worker_sleeping(struct task_struct *task,
763 unsigned int cpu) 766 unsigned int cpu)
764{ 767{
765 struct worker *worker = kthread_data(task), *to_wakeup = NULL; 768 struct worker *worker = kthread_data(task), *to_wakeup = NULL;
766 struct worker_pool *pool = worker->pool; 769 struct worker_pool *pool;
767 atomic_t *nr_running = get_pool_nr_running(pool); 770 atomic_t *nr_running;
768 771
772 /*
773 * Rescuers, which may not have all the fields set up like normal
774 * workers, also reach here, let's not access anything before
775 * checking NOT_RUNNING.
776 */
769 if (worker->flags & WORKER_NOT_RUNNING) 777 if (worker->flags & WORKER_NOT_RUNNING)
770 return NULL; 778 return NULL;
771 779
780 pool = worker->pool;
781 nr_running = get_pool_nr_running(pool);
782
772 /* this can only happen on the local cpu */ 783 /* this can only happen on the local cpu */
773 BUG_ON(cpu != raw_smp_processor_id()); 784 BUG_ON(cpu != raw_smp_processor_id());
774 785
@@ -2357,7 +2368,7 @@ sleep:
2357 2368
2358/** 2369/**
2359 * rescuer_thread - the rescuer thread function 2370 * rescuer_thread - the rescuer thread function
2360 * @__wq: the associated workqueue 2371 * @__rescuer: self
2361 * 2372 *
2362 * Workqueue rescuer thread function. There's one rescuer for each 2373 * Workqueue rescuer thread function. There's one rescuer for each
2363 * workqueue which has WQ_RESCUER set. 2374 * workqueue which has WQ_RESCUER set.
@@ -2374,20 +2385,27 @@ sleep:
2374 * 2385 *
2375 * This should happen rarely. 2386 * This should happen rarely.
2376 */ 2387 */
2377static int rescuer_thread(void *__wq) 2388static int rescuer_thread(void *__rescuer)
2378{ 2389{
2379 struct workqueue_struct *wq = __wq; 2390 struct worker *rescuer = __rescuer;
2380 struct worker *rescuer = wq->rescuer; 2391 struct workqueue_struct *wq = rescuer->rescue_wq;
2381 struct list_head *scheduled = &rescuer->scheduled; 2392 struct list_head *scheduled = &rescuer->scheduled;
2382 bool is_unbound = wq->flags & WQ_UNBOUND; 2393 bool is_unbound = wq->flags & WQ_UNBOUND;
2383 unsigned int cpu; 2394 unsigned int cpu;
2384 2395
2385 set_user_nice(current, RESCUER_NICE_LEVEL); 2396 set_user_nice(current, RESCUER_NICE_LEVEL);
2397
2398 /*
2399 * Mark rescuer as worker too. As WORKER_PREP is never cleared, it
2400 * doesn't participate in concurrency management.
2401 */
2402 rescuer->task->flags |= PF_WQ_WORKER;
2386repeat: 2403repeat:
2387 set_current_state(TASK_INTERRUPTIBLE); 2404 set_current_state(TASK_INTERRUPTIBLE);
2388 2405
2389 if (kthread_should_stop()) { 2406 if (kthread_should_stop()) {
2390 __set_current_state(TASK_RUNNING); 2407 __set_current_state(TASK_RUNNING);
2408 rescuer->task->flags &= ~PF_WQ_WORKER;
2391 return 0; 2409 return 0;
2392 } 2410 }
2393 2411
@@ -2431,6 +2449,8 @@ repeat:
2431 spin_unlock_irq(&gcwq->lock); 2449 spin_unlock_irq(&gcwq->lock);
2432 } 2450 }
2433 2451
2452 /* rescuers should never participate in concurrency management */
2453 WARN_ON_ONCE(!(rescuer->flags & WORKER_NOT_RUNNING));
2434 schedule(); 2454 schedule();
2435 goto repeat; 2455 goto repeat;
2436} 2456}
@@ -3266,7 +3286,8 @@ struct workqueue_struct *__alloc_workqueue_key(const char *fmt,
3266 if (!rescuer) 3286 if (!rescuer)
3267 goto err; 3287 goto err;
3268 3288
3269 rescuer->task = kthread_create(rescuer_thread, wq, "%s", 3289 rescuer->rescue_wq = wq;
3290 rescuer->task = kthread_create(rescuer_thread, rescuer, "%s",
3270 wq->name); 3291 wq->name);
3271 if (IS_ERR(rescuer->task)) 3292 if (IS_ERR(rescuer->task))
3272 goto err; 3293 goto err;