diff options
author | Tejun Heo <tj@kernel.org> | 2015-03-09 09:22:28 -0400 |
---|---|---|
committer | Tejun Heo <tj@kernel.org> | 2015-03-09 09:22:28 -0400 |
commit | e2dca7adff8f3fae0ab250a6362798550b3c79ee (patch) | |
tree | 3b86a4dfbc600872645c856d3d45b8b3af4d3cff /kernel | |
parent | 8603e1b30027f943cc9c1eef2b291d42c3347af1 (diff) |
workqueue: make the workqueues list RCU walkable
The workqueues list is protected by wq_pool_mutex and a workqueue and
its subordinate data structures are freed directly on destruction. We
want to add the ability dump workqueues from a sysrq callback which
requires walking all workqueues without grabbing wq_pool_mutex. This
patch makes freeing of workqueues RCU protected and makes the
workqueues list walkable while holding RCU read lock.
Note that pool_workqueues and pools are already sched-RCU protected.
For consistency, workqueues are also protected with sched-RCU.
While at it, reverse the workqueues list so that a workqueue which is
created earlier comes before. The order of the list isn't significant
functionally but this makes the planned sysrq dump list system
workqueues first.
Signed-off-by: Tejun Heo <tj@kernel.org>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/workqueue.c | 47 |
1 files changed, 31 insertions, 16 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 41ff75b478c6..6b9b0dc3dea5 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c | |||
@@ -230,7 +230,7 @@ struct wq_device; | |||
230 | */ | 230 | */ |
231 | struct workqueue_struct { | 231 | struct workqueue_struct { |
232 | struct list_head pwqs; /* WR: all pwqs of this wq */ | 232 | struct list_head pwqs; /* WR: all pwqs of this wq */ |
233 | struct list_head list; /* PL: list of all workqueues */ | 233 | struct list_head list; /* PR: list of all workqueues */ |
234 | 234 | ||
235 | struct mutex mutex; /* protects this wq */ | 235 | struct mutex mutex; /* protects this wq */ |
236 | int work_color; /* WQ: current work color */ | 236 | int work_color; /* WQ: current work color */ |
@@ -257,6 +257,13 @@ struct workqueue_struct { | |||
257 | #endif | 257 | #endif |
258 | char name[WQ_NAME_LEN]; /* I: workqueue name */ | 258 | char name[WQ_NAME_LEN]; /* I: workqueue name */ |
259 | 259 | ||
260 | /* | ||
261 | * Destruction of workqueue_struct is sched-RCU protected to allow | ||
262 | * walking the workqueues list without grabbing wq_pool_mutex. | ||
263 | * This is used to dump all workqueues from sysrq. | ||
264 | */ | ||
265 | struct rcu_head rcu; | ||
266 | |||
260 | /* hot fields used during command issue, aligned to cacheline */ | 267 | /* hot fields used during command issue, aligned to cacheline */ |
261 | unsigned int flags ____cacheline_aligned; /* WQ: WQ_* flags */ | 268 | unsigned int flags ____cacheline_aligned; /* WQ: WQ_* flags */ |
262 | struct pool_workqueue __percpu *cpu_pwqs; /* I: per-cpu pwqs */ | 269 | struct pool_workqueue __percpu *cpu_pwqs; /* I: per-cpu pwqs */ |
@@ -288,7 +295,7 @@ static struct workqueue_attrs *wq_update_unbound_numa_attrs_buf; | |||
288 | static DEFINE_MUTEX(wq_pool_mutex); /* protects pools and workqueues list */ | 295 | static DEFINE_MUTEX(wq_pool_mutex); /* protects pools and workqueues list */ |
289 | static DEFINE_SPINLOCK(wq_mayday_lock); /* protects wq->maydays list */ | 296 | static DEFINE_SPINLOCK(wq_mayday_lock); /* protects wq->maydays list */ |
290 | 297 | ||
291 | static LIST_HEAD(workqueues); /* PL: list of all workqueues */ | 298 | static LIST_HEAD(workqueues); /* PR: list of all workqueues */ |
292 | static bool workqueue_freezing; /* PL: have wqs started freezing? */ | 299 | static bool workqueue_freezing; /* PL: have wqs started freezing? */ |
293 | 300 | ||
294 | /* the per-cpu worker pools */ | 301 | /* the per-cpu worker pools */ |
@@ -3424,6 +3431,20 @@ static int init_worker_pool(struct worker_pool *pool) | |||
3424 | return 0; | 3431 | return 0; |
3425 | } | 3432 | } |
3426 | 3433 | ||
3434 | static void rcu_free_wq(struct rcu_head *rcu) | ||
3435 | { | ||
3436 | struct workqueue_struct *wq = | ||
3437 | container_of(rcu, struct workqueue_struct, rcu); | ||
3438 | |||
3439 | if (!(wq->flags & WQ_UNBOUND)) | ||
3440 | free_percpu(wq->cpu_pwqs); | ||
3441 | else | ||
3442 | free_workqueue_attrs(wq->unbound_attrs); | ||
3443 | |||
3444 | kfree(wq->rescuer); | ||
3445 | kfree(wq); | ||
3446 | } | ||
3447 | |||
3427 | static void rcu_free_pool(struct rcu_head *rcu) | 3448 | static void rcu_free_pool(struct rcu_head *rcu) |
3428 | { | 3449 | { |
3429 | struct worker_pool *pool = container_of(rcu, struct worker_pool, rcu); | 3450 | struct worker_pool *pool = container_of(rcu, struct worker_pool, rcu); |
@@ -3601,12 +3622,10 @@ static void pwq_unbound_release_workfn(struct work_struct *work) | |||
3601 | 3622 | ||
3602 | /* | 3623 | /* |
3603 | * If we're the last pwq going away, @wq is already dead and no one | 3624 | * If we're the last pwq going away, @wq is already dead and no one |
3604 | * is gonna access it anymore. Free it. | 3625 | * is gonna access it anymore. Schedule RCU free. |
3605 | */ | 3626 | */ |
3606 | if (is_last) { | 3627 | if (is_last) |
3607 | free_workqueue_attrs(wq->unbound_attrs); | 3628 | call_rcu_sched(&wq->rcu, rcu_free_wq); |
3608 | kfree(wq); | ||
3609 | } | ||
3610 | } | 3629 | } |
3611 | 3630 | ||
3612 | /** | 3631 | /** |
@@ -4143,7 +4162,7 @@ struct workqueue_struct *__alloc_workqueue_key(const char *fmt, | |||
4143 | pwq_adjust_max_active(pwq); | 4162 | pwq_adjust_max_active(pwq); |
4144 | mutex_unlock(&wq->mutex); | 4163 | mutex_unlock(&wq->mutex); |
4145 | 4164 | ||
4146 | list_add(&wq->list, &workqueues); | 4165 | list_add_tail_rcu(&wq->list, &workqueues); |
4147 | 4166 | ||
4148 | mutex_unlock(&wq_pool_mutex); | 4167 | mutex_unlock(&wq_pool_mutex); |
4149 | 4168 | ||
@@ -4199,24 +4218,20 @@ void destroy_workqueue(struct workqueue_struct *wq) | |||
4199 | * flushing is complete in case freeze races us. | 4218 | * flushing is complete in case freeze races us. |
4200 | */ | 4219 | */ |
4201 | mutex_lock(&wq_pool_mutex); | 4220 | mutex_lock(&wq_pool_mutex); |
4202 | list_del_init(&wq->list); | 4221 | list_del_rcu(&wq->list); |
4203 | mutex_unlock(&wq_pool_mutex); | 4222 | mutex_unlock(&wq_pool_mutex); |
4204 | 4223 | ||
4205 | workqueue_sysfs_unregister(wq); | 4224 | workqueue_sysfs_unregister(wq); |
4206 | 4225 | ||
4207 | if (wq->rescuer) { | 4226 | if (wq->rescuer) |
4208 | kthread_stop(wq->rescuer->task); | 4227 | kthread_stop(wq->rescuer->task); |
4209 | kfree(wq->rescuer); | ||
4210 | wq->rescuer = NULL; | ||
4211 | } | ||
4212 | 4228 | ||
4213 | if (!(wq->flags & WQ_UNBOUND)) { | 4229 | if (!(wq->flags & WQ_UNBOUND)) { |
4214 | /* | 4230 | /* |
4215 | * The base ref is never dropped on per-cpu pwqs. Directly | 4231 | * The base ref is never dropped on per-cpu pwqs. Directly |
4216 | * free the pwqs and wq. | 4232 | * schedule RCU free. |
4217 | */ | 4233 | */ |
4218 | free_percpu(wq->cpu_pwqs); | 4234 | call_rcu_sched(&wq->rcu, rcu_free_wq); |
4219 | kfree(wq); | ||
4220 | } else { | 4235 | } else { |
4221 | /* | 4236 | /* |
4222 | * We're the sole accessor of @wq at this point. Directly | 4237 | * We're the sole accessor of @wq at this point. Directly |