aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/workqueue.c
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2013-03-12 14:30:00 -0400
committerTejun Heo <tj@kernel.org>2013-03-12 14:30:00 -0400
commitfa1b54e69bc6c04674c9bb96a6cfa8b2c9f44771 (patch)
treed04342a5015b1b88fdefeceabdb1f26479dcff65 /kernel/workqueue.c
parent76af4d936153afec176c53378e6ba8671e7e237d (diff)
workqueue: update synchronization rules on worker_pool_idr
Make worker_pool_idr protected by workqueue_lock for writes and sched-RCU protected for reads. Lockdep assertions are added to for_each_pool() and get_work_pool() and all their users are converted to either hold workqueue_lock or disable preemption/irq. worker_pool_assign_id() is updated to hold workqueue_lock when allocating a pool ID. As idr_get_new() always performs RCU-safe assignment, this is enough on the writer side. As standard pools are never destroyed, there's nothing to do on that side. The locking is superflous at this point. This is to help implementation of unbound pools/pwqs with custom attributes. This patch doesn't introduce any behavior changes. v2: Updated for_each_pwq() use if/else for the hidden assertion statement instead of just if as suggested by Lai. This avoids confusing the following else clause. Signed-off-by: Tejun Heo <tj@kernel.org> Reviewed-by: Lai Jiangshan <laijs@cn.fujitsu.com>
Diffstat (limited to 'kernel/workqueue.c')
-rw-r--r--kernel/workqueue.c71
1 files changed, 46 insertions, 25 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index e060ff2bc20c..46381490f496 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -282,9 +282,18 @@ static inline int __next_wq_cpu(int cpu, const struct cpumask *mask,
282 * for_each_pool - iterate through all worker_pools in the system 282 * for_each_pool - iterate through all worker_pools in the system
283 * @pool: iteration cursor 283 * @pool: iteration cursor
284 * @id: integer used for iteration 284 * @id: integer used for iteration
285 *
286 * This must be called either with workqueue_lock held or sched RCU read
287 * locked. If the pool needs to be used beyond the locking in effect, the
288 * caller is responsible for guaranteeing that the pool stays online.
289 *
290 * The if/else clause exists only for the lockdep assertion and can be
291 * ignored.
285 */ 292 */
286#define for_each_pool(pool, id) \ 293#define for_each_pool(pool, id) \
287 idr_for_each_entry(&worker_pool_idr, pool, id) 294 idr_for_each_entry(&worker_pool_idr, pool, id) \
295 if (({ assert_rcu_or_wq_lock(); false; })) { } \
296 else
288 297
289/** 298/**
290 * for_each_pwq - iterate through all pool_workqueues of the specified workqueue 299 * for_each_pwq - iterate through all pool_workqueues of the specified workqueue
@@ -432,8 +441,10 @@ static DEFINE_PER_CPU_SHARED_ALIGNED(struct worker_pool [NR_STD_WORKER_POOLS],
432 cpu_std_worker_pools); 441 cpu_std_worker_pools);
433static struct worker_pool unbound_std_worker_pools[NR_STD_WORKER_POOLS]; 442static struct worker_pool unbound_std_worker_pools[NR_STD_WORKER_POOLS];
434 443
435/* idr of all pools */ 444/*
436static DEFINE_MUTEX(worker_pool_idr_mutex); 445 * idr of all pools. Modifications are protected by workqueue_lock. Read
446 * accesses are protected by sched-RCU protected.
447 */
437static DEFINE_IDR(worker_pool_idr); 448static DEFINE_IDR(worker_pool_idr);
438 449
439static int worker_thread(void *__worker); 450static int worker_thread(void *__worker);
@@ -456,21 +467,16 @@ static int worker_pool_assign_id(struct worker_pool *pool)
456{ 467{
457 int ret; 468 int ret;
458 469
459 mutex_lock(&worker_pool_idr_mutex); 470 do {
460 idr_pre_get(&worker_pool_idr, GFP_KERNEL); 471 if (!idr_pre_get(&worker_pool_idr, GFP_KERNEL))
461 ret = idr_get_new(&worker_pool_idr, pool, &pool->id); 472 return -ENOMEM;
462 mutex_unlock(&worker_pool_idr_mutex);
463 473
464 return ret; 474 spin_lock_irq(&workqueue_lock);
465} 475 ret = idr_get_new(&worker_pool_idr, pool, &pool->id);
476 spin_unlock_irq(&workqueue_lock);
477 } while (ret == -EAGAIN);
466 478
467/* 479 return ret;
468 * Lookup worker_pool by id. The idr currently is built during boot and
469 * never modified. Don't worry about locking for now.
470 */
471static struct worker_pool *worker_pool_by_id(int pool_id)
472{
473 return idr_find(&worker_pool_idr, pool_id);
474} 480}
475 481
476static struct worker_pool *get_std_worker_pool(int cpu, bool highpri) 482static struct worker_pool *get_std_worker_pool(int cpu, bool highpri)
@@ -586,13 +592,23 @@ static struct pool_workqueue *get_work_pwq(struct work_struct *work)
586 * @work: the work item of interest 592 * @work: the work item of interest
587 * 593 *
588 * Return the worker_pool @work was last associated with. %NULL if none. 594 * Return the worker_pool @work was last associated with. %NULL if none.
595 *
596 * Pools are created and destroyed under workqueue_lock, and allows read
597 * access under sched-RCU read lock. As such, this function should be
598 * called under workqueue_lock or with preemption disabled.
599 *
600 * All fields of the returned pool are accessible as long as the above
601 * mentioned locking is in effect. If the returned pool needs to be used
602 * beyond the critical section, the caller is responsible for ensuring the
603 * returned pool is and stays online.
589 */ 604 */
590static struct worker_pool *get_work_pool(struct work_struct *work) 605static struct worker_pool *get_work_pool(struct work_struct *work)
591{ 606{
592 unsigned long data = atomic_long_read(&work->data); 607 unsigned long data = atomic_long_read(&work->data);
593 struct worker_pool *pool;
594 int pool_id; 608 int pool_id;
595 609
610 assert_rcu_or_wq_lock();
611
596 if (data & WORK_STRUCT_PWQ) 612 if (data & WORK_STRUCT_PWQ)
597 return ((struct pool_workqueue *) 613 return ((struct pool_workqueue *)
598 (data & WORK_STRUCT_WQ_DATA_MASK))->pool; 614 (data & WORK_STRUCT_WQ_DATA_MASK))->pool;
@@ -601,9 +617,7 @@ static struct worker_pool *get_work_pool(struct work_struct *work)
601 if (pool_id == WORK_OFFQ_POOL_NONE) 617 if (pool_id == WORK_OFFQ_POOL_NONE)
602 return NULL; 618 return NULL;
603 619
604 pool = worker_pool_by_id(pool_id); 620 return idr_find(&worker_pool_idr, pool_id);
605 WARN_ON_ONCE(!pool);
606 return pool;
607} 621}
608 622
609/** 623/**
@@ -2767,11 +2781,15 @@ static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr)
2767 struct pool_workqueue *pwq; 2781 struct pool_workqueue *pwq;
2768 2782
2769 might_sleep(); 2783 might_sleep();
2784
2785 local_irq_disable();
2770 pool = get_work_pool(work); 2786 pool = get_work_pool(work);
2771 if (!pool) 2787 if (!pool) {
2788 local_irq_enable();
2772 return false; 2789 return false;
2790 }
2773 2791
2774 spin_lock_irq(&pool->lock); 2792 spin_lock(&pool->lock);
2775 /* see the comment in try_to_grab_pending() with the same code */ 2793 /* see the comment in try_to_grab_pending() with the same code */
2776 pwq = get_work_pwq(work); 2794 pwq = get_work_pwq(work);
2777 if (pwq) { 2795 if (pwq) {
@@ -3414,19 +3432,22 @@ EXPORT_SYMBOL_GPL(workqueue_congested);
3414 */ 3432 */
3415unsigned int work_busy(struct work_struct *work) 3433unsigned int work_busy(struct work_struct *work)
3416{ 3434{
3417 struct worker_pool *pool = get_work_pool(work); 3435 struct worker_pool *pool;
3418 unsigned long flags; 3436 unsigned long flags;
3419 unsigned int ret = 0; 3437 unsigned int ret = 0;
3420 3438
3421 if (work_pending(work)) 3439 if (work_pending(work))
3422 ret |= WORK_BUSY_PENDING; 3440 ret |= WORK_BUSY_PENDING;
3423 3441
3442 local_irq_save(flags);
3443 pool = get_work_pool(work);
3424 if (pool) { 3444 if (pool) {
3425 spin_lock_irqsave(&pool->lock, flags); 3445 spin_lock(&pool->lock);
3426 if (find_worker_executing_work(pool, work)) 3446 if (find_worker_executing_work(pool, work))
3427 ret |= WORK_BUSY_RUNNING; 3447 ret |= WORK_BUSY_RUNNING;
3428 spin_unlock_irqrestore(&pool->lock, flags); 3448 spin_unlock(&pool->lock);
3429 } 3449 }
3450 local_irq_restore(flags);
3430 3451
3431 return ret; 3452 return ret;
3432} 3453}