aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLai Jiangshan <laijs@cn.fujitsu.com>2013-02-19 15:17:02 -0500
committerTejun Heo <tj@kernel.org>2013-03-04 12:44:58 -0500
commitb31041042a8cdece67f925e4bae55b5f5fd754ca (patch)
tree6392271c537c05d59e0d72ac72463343d2a29249
parentf36dc67b27a689eeb3631b11ebef17bbff257fbb (diff)
workqueue: better define synchronization rule around rescuer->pool updates
Rescuers visit different worker_pools to process work items from pools under pressure. Currently, rescuer->pool is updated outside any locking and when an outsider looks at a rescuer, there's no way to tell when and whether rescuer->pool is gonna change. While this doesn't currently cause any problem, it is nasty. With recent worker_maybe_bind_and_lock() changes, we can move rescuer->pool updates inside pool locks such that if rescuer->pool equals a locked pool, it's guaranteed to stay that way until the pool is unlocked. Move rescuer->pool inside pool->lock. This patch doesn't introduce any visible behavior difference. tj: Updated the description. Signed-off-by: Lai Jiangshan <laijs@cn.fujitsu.com> Signed-off-by: Tejun Heo <tj@kernel.org>
-rw-r--r--kernel/workqueue.c3
-rw-r--r--kernel/workqueue_internal.h1
2 files changed, 3 insertions, 1 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 09545d445a55..fd9a28a13afd 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -2357,8 +2357,8 @@ repeat:
2357 mayday_clear_cpu(cpu, wq->mayday_mask); 2357 mayday_clear_cpu(cpu, wq->mayday_mask);
2358 2358
2359 /* migrate to the target cpu if possible */ 2359 /* migrate to the target cpu if possible */
2360 rescuer->pool = pool;
2361 worker_maybe_bind_and_lock(pool); 2360 worker_maybe_bind_and_lock(pool);
2361 rescuer->pool = pool;
2362 2362
2363 /* 2363 /*
2364 * Slurp in all works issued via this workqueue and 2364 * Slurp in all works issued via this workqueue and
@@ -2379,6 +2379,7 @@ repeat:
2379 if (keep_working(pool)) 2379 if (keep_working(pool))
2380 wake_up_worker(pool); 2380 wake_up_worker(pool);
2381 2381
2382 rescuer->pool = NULL;
2382 spin_unlock_irq(&pool->lock); 2383 spin_unlock_irq(&pool->lock);
2383 } 2384 }
2384 2385
diff --git a/kernel/workqueue_internal.h b/kernel/workqueue_internal.h
index 07650264ec15..f9c887731e2b 100644
--- a/kernel/workqueue_internal.h
+++ b/kernel/workqueue_internal.h
@@ -32,6 +32,7 @@ struct worker {
32 struct list_head scheduled; /* L: scheduled works */ 32 struct list_head scheduled; /* L: scheduled works */
33 struct task_struct *task; /* I: worker task */ 33 struct task_struct *task; /* I: worker task */
34 struct worker_pool *pool; /* I: the associated pool */ 34 struct worker_pool *pool; /* I: the associated pool */
35 /* L: for rescuers */
35 /* 64 bytes boundary on 64bit, 32 on 32bit */ 36 /* 64 bytes boundary on 64bit, 32 on 32bit */
36 unsigned long last_active; /* L: last active timestamp */ 37 unsigned long last_active; /* L: last active timestamp */
37 unsigned int flags; /* X: flags */ 38 unsigned int flags; /* X: flags */