diff options
author | Tejun Heo <tj@kernel.org> | 2013-01-24 14:01:33 -0500 |
---|---|---|
committer | Tejun Heo <tj@kernel.org> | 2013-01-24 14:01:33 -0500 |
commit | 35b6bb63b8a288f90e07948867941a553b3d97bc (patch) | |
tree | 275528f970a80c9bf403a66450808a006db65ba8 /kernel/workqueue.c | |
parent | 2464757086b4de0591738d5e30f069d068d70ec0 (diff) |
workqueue: make GCWQ_FREEZING a pool flag
Make GCWQ_FREEZING a pool flag POOL_FREEZING. This patch doesn't
change locking - FREEZING on both pools of a CPU are set or clear
together while holding gcwq->lock. It shouldn't cause any functional
difference.
This leaves gcwq->flags w/o any flags. Removed.
While at it, convert BUG_ON()s in freeze_workqueue_begin() and
thaw_workqueues() to WARN_ON_ONCE().
This is part of an effort to remove global_cwq and make worker_pool
the top level abstraction, which in turn will help implementing worker
pools with user-specified attributes.
Signed-off-by: Tejun Heo <tj@kernel.org>
Reviewed-by: Lai Jiangshan <laijs@cn.fujitsu.com>
Diffstat (limited to 'kernel/workqueue.c')
-rw-r--r-- | kernel/workqueue.c | 28 |
1 files changed, 15 insertions, 13 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 1b8af92cc2c9..1a686e481132 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c | |||
@@ -47,11 +47,6 @@ | |||
47 | 47 | ||
48 | enum { | 48 | enum { |
49 | /* | 49 | /* |
50 | * global_cwq flags | ||
51 | */ | ||
52 | GCWQ_FREEZING = 1 << 1, /* freeze in progress */ | ||
53 | |||
54 | /* | ||
55 | * worker_pool flags | 50 | * worker_pool flags |
56 | * | 51 | * |
57 | * A bound pool is either associated or disassociated with its CPU. | 52 | * A bound pool is either associated or disassociated with its CPU. |
@@ -70,6 +65,7 @@ enum { | |||
70 | POOL_MANAGE_WORKERS = 1 << 0, /* need to manage workers */ | 65 | POOL_MANAGE_WORKERS = 1 << 0, /* need to manage workers */ |
71 | POOL_MANAGING_WORKERS = 1 << 1, /* managing workers */ | 66 | POOL_MANAGING_WORKERS = 1 << 1, /* managing workers */ |
72 | POOL_DISASSOCIATED = 1 << 2, /* cpu can't serve workers */ | 67 | POOL_DISASSOCIATED = 1 << 2, /* cpu can't serve workers */ |
68 | POOL_FREEZING = 1 << 3, /* freeze in progress */ | ||
73 | 69 | ||
74 | /* worker flags */ | 70 | /* worker flags */ |
75 | WORKER_STARTED = 1 << 0, /* started */ | 71 | WORKER_STARTED = 1 << 0, /* started */ |
@@ -152,7 +148,6 @@ struct worker_pool { | |||
152 | struct global_cwq { | 148 | struct global_cwq { |
153 | spinlock_t lock; /* the gcwq lock */ | 149 | spinlock_t lock; /* the gcwq lock */ |
154 | unsigned int cpu; /* I: the associated cpu */ | 150 | unsigned int cpu; /* I: the associated cpu */ |
155 | unsigned int flags; /* L: GCWQ_* flags */ | ||
156 | 151 | ||
157 | /* workers are chained either in busy_hash or pool idle_list */ | 152 | /* workers are chained either in busy_hash or pool idle_list */ |
158 | DECLARE_HASHTABLE(busy_hash, BUSY_WORKER_HASH_ORDER); | 153 | DECLARE_HASHTABLE(busy_hash, BUSY_WORKER_HASH_ORDER); |
@@ -3380,13 +3375,15 @@ void workqueue_set_max_active(struct workqueue_struct *wq, int max_active) | |||
3380 | wq->saved_max_active = max_active; | 3375 | wq->saved_max_active = max_active; |
3381 | 3376 | ||
3382 | for_each_cwq_cpu(cpu, wq) { | 3377 | for_each_cwq_cpu(cpu, wq) { |
3383 | struct global_cwq *gcwq = get_gcwq(cpu); | 3378 | struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); |
3379 | struct worker_pool *pool = cwq->pool; | ||
3380 | struct global_cwq *gcwq = pool->gcwq; | ||
3384 | 3381 | ||
3385 | spin_lock_irq(&gcwq->lock); | 3382 | spin_lock_irq(&gcwq->lock); |
3386 | 3383 | ||
3387 | if (!(wq->flags & WQ_FREEZABLE) || | 3384 | if (!(wq->flags & WQ_FREEZABLE) || |
3388 | !(gcwq->flags & GCWQ_FREEZING)) | 3385 | !(pool->flags & POOL_FREEZING)) |
3389 | cwq_set_max_active(get_cwq(gcwq->cpu, wq), max_active); | 3386 | cwq_set_max_active(cwq, max_active); |
3390 | 3387 | ||
3391 | spin_unlock_irq(&gcwq->lock); | 3388 | spin_unlock_irq(&gcwq->lock); |
3392 | } | 3389 | } |
@@ -3676,12 +3673,15 @@ void freeze_workqueues_begin(void) | |||
3676 | 3673 | ||
3677 | for_each_gcwq_cpu(cpu) { | 3674 | for_each_gcwq_cpu(cpu) { |
3678 | struct global_cwq *gcwq = get_gcwq(cpu); | 3675 | struct global_cwq *gcwq = get_gcwq(cpu); |
3676 | struct worker_pool *pool; | ||
3679 | struct workqueue_struct *wq; | 3677 | struct workqueue_struct *wq; |
3680 | 3678 | ||
3681 | spin_lock_irq(&gcwq->lock); | 3679 | spin_lock_irq(&gcwq->lock); |
3682 | 3680 | ||
3683 | BUG_ON(gcwq->flags & GCWQ_FREEZING); | 3681 | for_each_worker_pool(pool, gcwq) { |
3684 | gcwq->flags |= GCWQ_FREEZING; | 3682 | WARN_ON_ONCE(pool->flags & POOL_FREEZING); |
3683 | pool->flags |= POOL_FREEZING; | ||
3684 | } | ||
3685 | 3685 | ||
3686 | list_for_each_entry(wq, &workqueues, list) { | 3686 | list_for_each_entry(wq, &workqueues, list) { |
3687 | struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); | 3687 | struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); |
@@ -3767,8 +3767,10 @@ void thaw_workqueues(void) | |||
3767 | 3767 | ||
3768 | spin_lock_irq(&gcwq->lock); | 3768 | spin_lock_irq(&gcwq->lock); |
3769 | 3769 | ||
3770 | BUG_ON(!(gcwq->flags & GCWQ_FREEZING)); | 3770 | for_each_worker_pool(pool, gcwq) { |
3771 | gcwq->flags &= ~GCWQ_FREEZING; | 3771 | WARN_ON_ONCE(!(pool->flags & POOL_FREEZING)); |
3772 | pool->flags &= ~POOL_FREEZING; | ||
3773 | } | ||
3772 | 3774 | ||
3773 | list_for_each_entry(wq, &workqueues, list) { | 3775 | list_for_each_entry(wq, &workqueues, list) { |
3774 | struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); | 3776 | struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); |