aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2013-03-12 14:29:57 -0400
committerTejun Heo <tj@kernel.org>2013-03-12 14:29:57 -0400
commite98d5b16cf4df992c40a7c83f1eae61db5bb03da (patch)
tree57bb6a301fde2b7e5a8bf8f2a8198e3b344acdd2
parent6183c009f6cd94b42e5812adcfd4ba6220a196e1 (diff)
workqueue: make workqueue_lock irq-safe
workqueue_lock will be used to synchronize areas which require irq-safety and there isn't much benefit in keeping it not irq-safe. Make it irq-safe. This patch doesn't introduce any visible behavior changes. Signed-off-by: Tejun Heo <tj@kernel.org> Reviewed-by: Lai Jiangshan <laijs@cn.fujitsu.com>
-rw-r--r--kernel/workqueue.c44
1 files changed, 22 insertions, 22 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index c6e1bdb469ee..c585d0ebd353 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -2715,10 +2715,10 @@ void drain_workqueue(struct workqueue_struct *wq)
2715 * hotter than drain_workqueue() and already looks at @wq->flags. 2715 * hotter than drain_workqueue() and already looks at @wq->flags.
2716 * Use WQ_DRAINING so that queue doesn't have to check nr_drainers. 2716 * Use WQ_DRAINING so that queue doesn't have to check nr_drainers.
2717 */ 2717 */
2718 spin_lock(&workqueue_lock); 2718 spin_lock_irq(&workqueue_lock);
2719 if (!wq->nr_drainers++) 2719 if (!wq->nr_drainers++)
2720 wq->flags |= WQ_DRAINING; 2720 wq->flags |= WQ_DRAINING;
2721 spin_unlock(&workqueue_lock); 2721 spin_unlock_irq(&workqueue_lock);
2722reflush: 2722reflush:
2723 flush_workqueue(wq); 2723 flush_workqueue(wq);
2724 2724
@@ -2740,10 +2740,10 @@ reflush:
2740 goto reflush; 2740 goto reflush;
2741 } 2741 }
2742 2742
2743 spin_lock(&workqueue_lock); 2743 spin_lock_irq(&workqueue_lock);
2744 if (!--wq->nr_drainers) 2744 if (!--wq->nr_drainers)
2745 wq->flags &= ~WQ_DRAINING; 2745 wq->flags &= ~WQ_DRAINING;
2746 spin_unlock(&workqueue_lock); 2746 spin_unlock_irq(&workqueue_lock);
2747} 2747}
2748EXPORT_SYMBOL_GPL(drain_workqueue); 2748EXPORT_SYMBOL_GPL(drain_workqueue);
2749 2749
@@ -3233,7 +3233,7 @@ struct workqueue_struct *__alloc_workqueue_key(const char *fmt,
3233 * list. Grab it, set max_active accordingly and add the new 3233 * list. Grab it, set max_active accordingly and add the new
3234 * workqueue to workqueues list. 3234 * workqueue to workqueues list.
3235 */ 3235 */
3236 spin_lock(&workqueue_lock); 3236 spin_lock_irq(&workqueue_lock);
3237 3237
3238 if (workqueue_freezing && wq->flags & WQ_FREEZABLE) 3238 if (workqueue_freezing && wq->flags & WQ_FREEZABLE)
3239 for_each_pwq_cpu(cpu, wq) 3239 for_each_pwq_cpu(cpu, wq)
@@ -3241,7 +3241,7 @@ struct workqueue_struct *__alloc_workqueue_key(const char *fmt,
3241 3241
3242 list_add(&wq->list, &workqueues); 3242 list_add(&wq->list, &workqueues);
3243 3243
3244 spin_unlock(&workqueue_lock); 3244 spin_unlock_irq(&workqueue_lock);
3245 3245
3246 return wq; 3246 return wq;
3247err: 3247err:
@@ -3285,9 +3285,9 @@ void destroy_workqueue(struct workqueue_struct *wq)
3285 * wq list is used to freeze wq, remove from list after 3285 * wq list is used to freeze wq, remove from list after
3286 * flushing is complete in case freeze races us. 3286 * flushing is complete in case freeze races us.
3287 */ 3287 */
3288 spin_lock(&workqueue_lock); 3288 spin_lock_irq(&workqueue_lock);
3289 list_del(&wq->list); 3289 list_del(&wq->list);
3290 spin_unlock(&workqueue_lock); 3290 spin_unlock_irq(&workqueue_lock);
3291 3291
3292 if (wq->flags & WQ_RESCUER) { 3292 if (wq->flags & WQ_RESCUER) {
3293 kthread_stop(wq->rescuer->task); 3293 kthread_stop(wq->rescuer->task);
@@ -3336,7 +3336,7 @@ void workqueue_set_max_active(struct workqueue_struct *wq, int max_active)
3336 3336
3337 max_active = wq_clamp_max_active(max_active, wq->flags, wq->name); 3337 max_active = wq_clamp_max_active(max_active, wq->flags, wq->name);
3338 3338
3339 spin_lock(&workqueue_lock); 3339 spin_lock_irq(&workqueue_lock);
3340 3340
3341 wq->saved_max_active = max_active; 3341 wq->saved_max_active = max_active;
3342 3342
@@ -3344,16 +3344,16 @@ void workqueue_set_max_active(struct workqueue_struct *wq, int max_active)
3344 struct pool_workqueue *pwq = get_pwq(cpu, wq); 3344 struct pool_workqueue *pwq = get_pwq(cpu, wq);
3345 struct worker_pool *pool = pwq->pool; 3345 struct worker_pool *pool = pwq->pool;
3346 3346
3347 spin_lock_irq(&pool->lock); 3347 spin_lock(&pool->lock);
3348 3348
3349 if (!(wq->flags & WQ_FREEZABLE) || 3349 if (!(wq->flags & WQ_FREEZABLE) ||
3350 !(pool->flags & POOL_FREEZING)) 3350 !(pool->flags & POOL_FREEZING))
3351 pwq_set_max_active(pwq, max_active); 3351 pwq_set_max_active(pwq, max_active);
3352 3352
3353 spin_unlock_irq(&pool->lock); 3353 spin_unlock(&pool->lock);
3354 } 3354 }
3355 3355
3356 spin_unlock(&workqueue_lock); 3356 spin_unlock_irq(&workqueue_lock);
3357} 3357}
3358EXPORT_SYMBOL_GPL(workqueue_set_max_active); 3358EXPORT_SYMBOL_GPL(workqueue_set_max_active);
3359 3359
@@ -3599,7 +3599,7 @@ void freeze_workqueues_begin(void)
3599{ 3599{
3600 unsigned int cpu; 3600 unsigned int cpu;
3601 3601
3602 spin_lock(&workqueue_lock); 3602 spin_lock_irq(&workqueue_lock);
3603 3603
3604 WARN_ON_ONCE(workqueue_freezing); 3604 WARN_ON_ONCE(workqueue_freezing);
3605 workqueue_freezing = true; 3605 workqueue_freezing = true;
@@ -3609,7 +3609,7 @@ void freeze_workqueues_begin(void)
3609 struct workqueue_struct *wq; 3609 struct workqueue_struct *wq;
3610 3610
3611 for_each_std_worker_pool(pool, cpu) { 3611 for_each_std_worker_pool(pool, cpu) {
3612 spin_lock_irq(&pool->lock); 3612 spin_lock(&pool->lock);
3613 3613
3614 WARN_ON_ONCE(pool->flags & POOL_FREEZING); 3614 WARN_ON_ONCE(pool->flags & POOL_FREEZING);
3615 pool->flags |= POOL_FREEZING; 3615 pool->flags |= POOL_FREEZING;
@@ -3622,11 +3622,11 @@ void freeze_workqueues_begin(void)
3622 pwq->max_active = 0; 3622 pwq->max_active = 0;
3623 } 3623 }
3624 3624
3625 spin_unlock_irq(&pool->lock); 3625 spin_unlock(&pool->lock);
3626 } 3626 }
3627 } 3627 }
3628 3628
3629 spin_unlock(&workqueue_lock); 3629 spin_unlock_irq(&workqueue_lock);
3630} 3630}
3631 3631
3632/** 3632/**
@@ -3647,7 +3647,7 @@ bool freeze_workqueues_busy(void)
3647 unsigned int cpu; 3647 unsigned int cpu;
3648 bool busy = false; 3648 bool busy = false;
3649 3649
3650 spin_lock(&workqueue_lock); 3650 spin_lock_irq(&workqueue_lock);
3651 3651
3652 WARN_ON_ONCE(!workqueue_freezing); 3652 WARN_ON_ONCE(!workqueue_freezing);
3653 3653
@@ -3671,7 +3671,7 @@ bool freeze_workqueues_busy(void)
3671 } 3671 }
3672 } 3672 }
3673out_unlock: 3673out_unlock:
3674 spin_unlock(&workqueue_lock); 3674 spin_unlock_irq(&workqueue_lock);
3675 return busy; 3675 return busy;
3676} 3676}
3677 3677
@@ -3688,7 +3688,7 @@ void thaw_workqueues(void)
3688{ 3688{
3689 unsigned int cpu; 3689 unsigned int cpu;
3690 3690
3691 spin_lock(&workqueue_lock); 3691 spin_lock_irq(&workqueue_lock);
3692 3692
3693 if (!workqueue_freezing) 3693 if (!workqueue_freezing)
3694 goto out_unlock; 3694 goto out_unlock;
@@ -3698,7 +3698,7 @@ void thaw_workqueues(void)
3698 struct workqueue_struct *wq; 3698 struct workqueue_struct *wq;
3699 3699
3700 for_each_std_worker_pool(pool, cpu) { 3700 for_each_std_worker_pool(pool, cpu) {
3701 spin_lock_irq(&pool->lock); 3701 spin_lock(&pool->lock);
3702 3702
3703 WARN_ON_ONCE(!(pool->flags & POOL_FREEZING)); 3703 WARN_ON_ONCE(!(pool->flags & POOL_FREEZING));
3704 pool->flags &= ~POOL_FREEZING; 3704 pool->flags &= ~POOL_FREEZING;
@@ -3716,13 +3716,13 @@ void thaw_workqueues(void)
3716 3716
3717 wake_up_worker(pool); 3717 wake_up_worker(pool);
3718 3718
3719 spin_unlock_irq(&pool->lock); 3719 spin_unlock(&pool->lock);
3720 } 3720 }
3721 } 3721 }
3722 3722
3723 workqueue_freezing = false; 3723 workqueue_freezing = false;
3724out_unlock: 3724out_unlock:
3725 spin_unlock(&workqueue_lock); 3725 spin_unlock_irq(&workqueue_lock);
3726} 3726}
3727#endif /* CONFIG_FREEZER */ 3727#endif /* CONFIG_FREEZER */
3728 3728