aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/workqueue.c
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2010-07-02 04:03:50 -0400
committerTejun Heo <tj@kernel.org>2010-07-02 04:03:50 -0400
commitcb444766996395d4370bcc17ec895dd4e13ceb72 (patch)
treee682bd69dafadae8b27861888ec0802107d5a93a /kernel/workqueue.c
parentfb0e7beb5c1b6fb4da786ba709d7138373d5fb22 (diff)
workqueue: use worker_set/clr_flags() only from worker itself
worker_set/clr_flags() assume that if none of NOT_RUNNING flags is set the worker must be contributing to nr_running which is only true if the worker is actually running. As when called from self, it is guaranteed that the worker is running, those functions can be safely used from the worker itself and they aren't necessary from other places anyway. Make the following changes to fix the bug. * Make worker_set/clr_flags() whine if not called from self. * Convert all places which called those functions from other tasks to manipulate flags directly. * Make trustee_thread() directly clear nr_running after setting WORKER_ROGUE on all workers. This is the only place where nr_running manipulation is necessary outside of workers themselves. * While at it, add sanity check for nr_running in worker_enter_idle(). Signed-off-by: Tejun Heo <tj@kernel.org>
Diffstat (limited to 'kernel/workqueue.c')
-rw-r--r--kernel/workqueue.c47
1 files changed, 28 insertions, 19 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 6fa847c5c5e9..558733801ac0 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -601,7 +601,7 @@ struct task_struct *wq_worker_sleeping(struct task_struct *task,
601 601
602/** 602/**
603 * worker_set_flags - set worker flags and adjust nr_running accordingly 603 * worker_set_flags - set worker flags and adjust nr_running accordingly
604 * @worker: worker to set flags for 604 * @worker: self
605 * @flags: flags to set 605 * @flags: flags to set
606 * @wakeup: wakeup an idle worker if necessary 606 * @wakeup: wakeup an idle worker if necessary
607 * 607 *
@@ -609,14 +609,16 @@ struct task_struct *wq_worker_sleeping(struct task_struct *task,
609 * nr_running becomes zero and @wakeup is %true, an idle worker is 609 * nr_running becomes zero and @wakeup is %true, an idle worker is
610 * woken up. 610 * woken up.
611 * 611 *
612 * LOCKING: 612 * CONTEXT:
613 * spin_lock_irq(gcwq->lock). 613 * spin_lock_irq(gcwq->lock)
614 */ 614 */
615static inline void worker_set_flags(struct worker *worker, unsigned int flags, 615static inline void worker_set_flags(struct worker *worker, unsigned int flags,
616 bool wakeup) 616 bool wakeup)
617{ 617{
618 struct global_cwq *gcwq = worker->gcwq; 618 struct global_cwq *gcwq = worker->gcwq;
619 619
620 WARN_ON_ONCE(worker->task != current);
621
620 /* 622 /*
621 * If transitioning into NOT_RUNNING, adjust nr_running and 623 * If transitioning into NOT_RUNNING, adjust nr_running and
622 * wake up an idle worker as necessary if requested by 624 * wake up an idle worker as necessary if requested by
@@ -639,19 +641,21 @@ static inline void worker_set_flags(struct worker *worker, unsigned int flags,
639 641
640/** 642/**
641 * worker_clr_flags - clear worker flags and adjust nr_running accordingly 643 * worker_clr_flags - clear worker flags and adjust nr_running accordingly
642 * @worker: worker to set flags for 644 * @worker: self
643 * @flags: flags to clear 645 * @flags: flags to clear
644 * 646 *
645 * Clear @flags in @worker->flags and adjust nr_running accordingly. 647 * Clear @flags in @worker->flags and adjust nr_running accordingly.
646 * 648 *
647 * LOCKING: 649 * CONTEXT:
648 * spin_lock_irq(gcwq->lock). 650 * spin_lock_irq(gcwq->lock)
649 */ 651 */
650static inline void worker_clr_flags(struct worker *worker, unsigned int flags) 652static inline void worker_clr_flags(struct worker *worker, unsigned int flags)
651{ 653{
652 struct global_cwq *gcwq = worker->gcwq; 654 struct global_cwq *gcwq = worker->gcwq;
653 unsigned int oflags = worker->flags; 655 unsigned int oflags = worker->flags;
654 656
657 WARN_ON_ONCE(worker->task != current);
658
655 worker->flags &= ~flags; 659 worker->flags &= ~flags;
656 660
657 /* if transitioning out of NOT_RUNNING, increment nr_running */ 661 /* if transitioning out of NOT_RUNNING, increment nr_running */
@@ -1073,7 +1077,8 @@ static void worker_enter_idle(struct worker *worker)
1073 BUG_ON(!list_empty(&worker->entry) && 1077 BUG_ON(!list_empty(&worker->entry) &&
1074 (worker->hentry.next || worker->hentry.pprev)); 1078 (worker->hentry.next || worker->hentry.pprev));
1075 1079
1076 worker_set_flags(worker, WORKER_IDLE, false); 1080 /* can't use worker_set_flags(), also called from start_worker() */
1081 worker->flags |= WORKER_IDLE;
1077 gcwq->nr_idle++; 1082 gcwq->nr_idle++;
1078 worker->last_active = jiffies; 1083 worker->last_active = jiffies;
1079 1084
@@ -1086,6 +1091,10 @@ static void worker_enter_idle(struct worker *worker)
1086 jiffies + IDLE_WORKER_TIMEOUT); 1091 jiffies + IDLE_WORKER_TIMEOUT);
1087 } else 1092 } else
1088 wake_up_all(&gcwq->trustee_wait); 1093 wake_up_all(&gcwq->trustee_wait);
1094
1095 /* sanity check nr_running */
1096 WARN_ON_ONCE(gcwq->nr_workers == gcwq->nr_idle &&
1097 atomic_read(get_gcwq_nr_running(gcwq->cpu)));
1089} 1098}
1090 1099
1091/** 1100/**
@@ -1270,7 +1279,7 @@ fail:
1270 */ 1279 */
1271static void start_worker(struct worker *worker) 1280static void start_worker(struct worker *worker)
1272{ 1281{
1273 worker_set_flags(worker, WORKER_STARTED, false); 1282 worker->flags |= WORKER_STARTED;
1274 worker->gcwq->nr_workers++; 1283 worker->gcwq->nr_workers++;
1275 worker_enter_idle(worker); 1284 worker_enter_idle(worker);
1276 wake_up_process(worker->task); 1285 wake_up_process(worker->task);
@@ -1300,7 +1309,7 @@ static void destroy_worker(struct worker *worker)
1300 gcwq->nr_idle--; 1309 gcwq->nr_idle--;
1301 1310
1302 list_del_init(&worker->entry); 1311 list_del_init(&worker->entry);
1303 worker_set_flags(worker, WORKER_DIE, false); 1312 worker->flags |= WORKER_DIE;
1304 1313
1305 spin_unlock_irq(&gcwq->lock); 1314 spin_unlock_irq(&gcwq->lock);
1306 1315
@@ -2979,10 +2988,10 @@ static int __cpuinit trustee_thread(void *__gcwq)
2979 gcwq->flags |= GCWQ_MANAGING_WORKERS; 2988 gcwq->flags |= GCWQ_MANAGING_WORKERS;
2980 2989
2981 list_for_each_entry(worker, &gcwq->idle_list, entry) 2990 list_for_each_entry(worker, &gcwq->idle_list, entry)
2982 worker_set_flags(worker, WORKER_ROGUE, false); 2991 worker->flags |= WORKER_ROGUE;
2983 2992
2984 for_each_busy_worker(worker, i, pos, gcwq) 2993 for_each_busy_worker(worker, i, pos, gcwq)
2985 worker_set_flags(worker, WORKER_ROGUE, false); 2994 worker->flags |= WORKER_ROGUE;
2986 2995
2987 /* 2996 /*
2988 * Call schedule() so that we cross rq->lock and thus can 2997 * Call schedule() so that we cross rq->lock and thus can
@@ -2995,12 +3004,12 @@ static int __cpuinit trustee_thread(void *__gcwq)
2995 spin_lock_irq(&gcwq->lock); 3004 spin_lock_irq(&gcwq->lock);
2996 3005
2997 /* 3006 /*
2998 * Sched callbacks are disabled now. gcwq->nr_running should 3007 * Sched callbacks are disabled now. Zap nr_running. After
2999 * be zero and will stay that way, making need_more_worker() 3008 * this, nr_running stays zero and need_more_worker() and
3000 * and keep_working() always return true as long as the 3009 * keep_working() are always true as long as the worklist is
3001 * worklist is not empty. 3010 * not empty.
3002 */ 3011 */
3003 WARN_ON_ONCE(atomic_read(get_gcwq_nr_running(gcwq->cpu)) != 0); 3012 atomic_set(get_gcwq_nr_running(gcwq->cpu), 0);
3004 3013
3005 spin_unlock_irq(&gcwq->lock); 3014 spin_unlock_irq(&gcwq->lock);
3006 del_timer_sync(&gcwq->idle_timer); 3015 del_timer_sync(&gcwq->idle_timer);
@@ -3046,7 +3055,7 @@ static int __cpuinit trustee_thread(void *__gcwq)
3046 worker = create_worker(gcwq, false); 3055 worker = create_worker(gcwq, false);
3047 spin_lock_irq(&gcwq->lock); 3056 spin_lock_irq(&gcwq->lock);
3048 if (worker) { 3057 if (worker) {
3049 worker_set_flags(worker, WORKER_ROGUE, false); 3058 worker->flags |= WORKER_ROGUE;
3050 start_worker(worker); 3059 start_worker(worker);
3051 } 3060 }
3052 } 3061 }
@@ -3085,8 +3094,8 @@ static int __cpuinit trustee_thread(void *__gcwq)
3085 * operations. Use a separate flag to mark that 3094 * operations. Use a separate flag to mark that
3086 * rebinding is scheduled. 3095 * rebinding is scheduled.
3087 */ 3096 */
3088 worker_set_flags(worker, WORKER_REBIND, false); 3097 worker->flags |= WORKER_REBIND;
3089 worker_clr_flags(worker, WORKER_ROGUE); 3098 worker->flags &= ~WORKER_ROGUE;
3090 3099
3091 /* queue rebind_work, wq doesn't matter, use the default one */ 3100 /* queue rebind_work, wq doesn't matter, use the default one */
3092 if (test_and_set_bit(WORK_STRUCT_PENDING_BIT, 3101 if (test_and_set_bit(WORK_STRUCT_PENDING_BIT,