aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/workqueue.c
diff options
context:
space:
mode:
authorLai Jiangshan <laijs@cn.fujitsu.com>2013-03-25 19:57:19 -0400
committerTejun Heo <tj@kernel.org>2013-03-25 19:57:19 -0400
commitb5927605478b740d73192f587e458de1632106e8 (patch)
tree368ce931dea72fadb8436385e423df76fb76d0d0 /kernel/workqueue.c
parenta357fc03262988f2aa6c4a668b89be22b11ff1e7 (diff)
workqueue: remove pwq_lock which is no longer used
To simplify locking, the previous patches expanded wq->mutex to protect all fields of each workqueue instance including the pwqs list leaving pwq_lock without any user. Remove the unused pwq_lock. tj: Rebased on top of the current dev branch. Updated description. Signed-off-by: Lai Jiangshan <laijs@cn.fujitsu.com> Signed-off-by: Tejun Heo <tj@kernel.org>
Diffstat (limited to 'kernel/workqueue.c')
-rw-r--r--kernel/workqueue.c13
1 files changed, 2 insertions, 11 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index af6087a5a10a..04a8b98d30ce 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -125,12 +125,9 @@ enum {
125 * 125 *
126 * PR: wq_pool_mutex protected for writes. Sched-RCU protected for reads. 126 * PR: wq_pool_mutex protected for writes. Sched-RCU protected for reads.
127 * 127 *
128 * PW: pwq_lock protected.
129 *
130 * WQ: wq->mutex protected. 128 * WQ: wq->mutex protected.
131 * 129 *
132 * WR: wq->mutex and pwq_lock protected for writes. Sched-RCU protected 130 * WR: wq->mutex protected for writes. Sched-RCU protected for reads.
133 * for reads.
134 * 131 *
135 * MD: wq_mayday_lock protected. 132 * MD: wq_mayday_lock protected.
136 */ 133 */
@@ -257,7 +254,6 @@ struct workqueue_struct {
257static struct kmem_cache *pwq_cache; 254static struct kmem_cache *pwq_cache;
258 255
259static DEFINE_MUTEX(wq_pool_mutex); /* protects pools and workqueues list */ 256static DEFINE_MUTEX(wq_pool_mutex); /* protects pools and workqueues list */
260static DEFINE_SPINLOCK(pwq_lock); /* protects pool_workqueues */
261static DEFINE_SPINLOCK(wq_mayday_lock); /* protects wq->maydays list */ 257static DEFINE_SPINLOCK(wq_mayday_lock); /* protects wq->maydays list */
262 258
263static LIST_HEAD(workqueues); /* PL: list of all workqueues */ 259static LIST_HEAD(workqueues); /* PL: list of all workqueues */
@@ -300,8 +296,7 @@ static void copy_workqueue_attrs(struct workqueue_attrs *to,
300 296
301#define assert_rcu_or_wq_mutex(wq) \ 297#define assert_rcu_or_wq_mutex(wq) \
302 rcu_lockdep_assert(rcu_read_lock_sched_held() || \ 298 rcu_lockdep_assert(rcu_read_lock_sched_held() || \
303 lockdep_is_held(&wq->mutex) || \ 299 lockdep_is_held(&wq->mutex), \
304 lockdep_is_held(&pwq_lock), \
305 "sched RCU or wq->mutex should be held") 300 "sched RCU or wq->mutex should be held")
306 301
307#ifdef CONFIG_LOCKDEP 302#ifdef CONFIG_LOCKDEP
@@ -3549,9 +3544,7 @@ static void pwq_unbound_release_workfn(struct work_struct *work)
3549 * and consistent with the linking path. 3544 * and consistent with the linking path.
3550 */ 3545 */
3551 mutex_lock(&wq->mutex); 3546 mutex_lock(&wq->mutex);
3552 spin_lock_irq(&pwq_lock);
3553 list_del_rcu(&pwq->pwqs_node); 3547 list_del_rcu(&pwq->pwqs_node);
3554 spin_unlock_irq(&pwq_lock);
3555 mutex_unlock(&wq->mutex); 3548 mutex_unlock(&wq->mutex);
3556 3549
3557 put_unbound_pool(pool); 3550 put_unbound_pool(pool);
@@ -3635,9 +3628,7 @@ static void init_and_link_pwq(struct pool_workqueue *pwq,
3635 pwq_adjust_max_active(pwq); 3628 pwq_adjust_max_active(pwq);
3636 3629
3637 /* link in @pwq */ 3630 /* link in @pwq */
3638 spin_lock_irq(&pwq_lock);
3639 list_add_rcu(&pwq->pwqs_node, &wq->pwqs); 3631 list_add_rcu(&pwq->pwqs_node, &wq->pwqs);
3640 spin_unlock_irq(&pwq_lock);
3641 3632
3642 mutex_unlock(&wq->mutex); 3633 mutex_unlock(&wq->mutex);
3643} 3634}