diff options
author | Tejun Heo <tj@kernel.org> | 2013-03-12 14:30:03 -0400 |
---|---|---|
committer | Tejun Heo <tj@kernel.org> | 2013-03-12 14:30:03 -0400 |
commit | 7a62c2c87e3bc174fe4b9e9720e148427510fcfb (patch) | |
tree | 9da41de39c7d55d0b54250eadb2980d12e1e825d /kernel/workqueue.c | |
parent | 29c91e9912bed7060df6116af90286500f5a700d (diff) |
workqueue: remove unbound_std_worker_pools[] and related helpers
Workqueue no longer makes use of unbound_std_worker_pools[]. All
unbound worker_pools are created dynamically and there's nothing
special about the standard ones. With unbound_std_worker_pools[]
unused, workqueue no longer has places where it needs to treat the
per-cpu pools-cpu and unbound pools together.
Remove unbound_std_worker_pools[] and the helpers wrapping it to
present unified per-cpu and unbound standard worker_pools.
* for_each_std_worker_pool() now only walks through per-cpu pools.
* for_each[_online]_wq_cpu() which don't have any users left are
removed.
* std_worker_pools() and std_worker_pool_pri() are unused and removed.
* get_std_worker_pool() is removed. Its only user -
alloc_and_link_pwqs() - only used it for per-cpu pools anyway. Open
code per_cpu access in alloc_and_link_pwqs() instead.
This patch doesn't introduce any functional changes.
Signed-off-by: Tejun Heo <tj@kernel.org>
Reviewed-by: Lai Jiangshan <laijs@cn.fujitsu.com>
Diffstat (limited to 'kernel/workqueue.c')
-rw-r--r-- | kernel/workqueue.c | 66 |
1 files changed, 6 insertions, 60 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 3fe2c79bf166..7642bb7b70ee 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c | |||
@@ -253,48 +253,13 @@ EXPORT_SYMBOL_GPL(system_freezable_wq); | |||
253 | "sched RCU or workqueue lock should be held") | 253 | "sched RCU or workqueue lock should be held") |
254 | 254 | ||
255 | #define for_each_std_worker_pool(pool, cpu) \ | 255 | #define for_each_std_worker_pool(pool, cpu) \ |
256 | for ((pool) = &std_worker_pools(cpu)[0]; \ | 256 | for ((pool) = &per_cpu(cpu_std_worker_pools, cpu)[0]; \ |
257 | (pool) < &std_worker_pools(cpu)[NR_STD_WORKER_POOLS]; (pool)++) | 257 | (pool) < &per_cpu(cpu_std_worker_pools, cpu)[NR_STD_WORKER_POOLS]; \ |
258 | (pool)++) | ||
258 | 259 | ||
259 | #define for_each_busy_worker(worker, i, pool) \ | 260 | #define for_each_busy_worker(worker, i, pool) \ |
260 | hash_for_each(pool->busy_hash, i, worker, hentry) | 261 | hash_for_each(pool->busy_hash, i, worker, hentry) |
261 | 262 | ||
262 | static inline int __next_wq_cpu(int cpu, const struct cpumask *mask, | ||
263 | unsigned int sw) | ||
264 | { | ||
265 | if (cpu < nr_cpu_ids) { | ||
266 | if (sw & 1) { | ||
267 | cpu = cpumask_next(cpu, mask); | ||
268 | if (cpu < nr_cpu_ids) | ||
269 | return cpu; | ||
270 | } | ||
271 | if (sw & 2) | ||
272 | return WORK_CPU_UNBOUND; | ||
273 | } | ||
274 | return WORK_CPU_END; | ||
275 | } | ||
276 | |||
277 | /* | ||
278 | * CPU iterators | ||
279 | * | ||
280 | * An extra cpu number is defined using an invalid cpu number | ||
281 | * (WORK_CPU_UNBOUND) to host workqueues which are not bound to any | ||
282 | * specific CPU. The following iterators are similar to for_each_*_cpu() | ||
283 | * iterators but also considers the unbound CPU. | ||
284 | * | ||
285 | * for_each_wq_cpu() : possible CPUs + WORK_CPU_UNBOUND | ||
286 | * for_each_online_wq_cpu() : online CPUs + WORK_CPU_UNBOUND | ||
287 | */ | ||
288 | #define for_each_wq_cpu(cpu) \ | ||
289 | for ((cpu) = __next_wq_cpu(-1, cpu_possible_mask, 3); \ | ||
290 | (cpu) < WORK_CPU_END; \ | ||
291 | (cpu) = __next_wq_cpu((cpu), cpu_possible_mask, 3)) | ||
292 | |||
293 | #define for_each_online_wq_cpu(cpu) \ | ||
294 | for ((cpu) = __next_wq_cpu(-1, cpu_online_mask, 3); \ | ||
295 | (cpu) < WORK_CPU_END; \ | ||
296 | (cpu) = __next_wq_cpu((cpu), cpu_online_mask, 3)) | ||
297 | |||
298 | /** | 263 | /** |
299 | * for_each_pool - iterate through all worker_pools in the system | 264 | * for_each_pool - iterate through all worker_pools in the system |
300 | * @pool: iteration cursor | 265 | * @pool: iteration cursor |
@@ -456,7 +421,6 @@ static bool workqueue_freezing; /* W: have wqs started freezing? */ | |||
456 | */ | 421 | */ |
457 | static DEFINE_PER_CPU_SHARED_ALIGNED(struct worker_pool [NR_STD_WORKER_POOLS], | 422 | static DEFINE_PER_CPU_SHARED_ALIGNED(struct worker_pool [NR_STD_WORKER_POOLS], |
458 | cpu_std_worker_pools); | 423 | cpu_std_worker_pools); |
459 | static struct worker_pool unbound_std_worker_pools[NR_STD_WORKER_POOLS]; | ||
460 | 424 | ||
461 | /* | 425 | /* |
462 | * idr of all pools. Modifications are protected by workqueue_lock. Read | 426 | * idr of all pools. Modifications are protected by workqueue_lock. Read |
@@ -466,19 +430,6 @@ static DEFINE_IDR(worker_pool_idr); | |||
466 | 430 | ||
467 | static int worker_thread(void *__worker); | 431 | static int worker_thread(void *__worker); |
468 | 432 | ||
469 | static struct worker_pool *std_worker_pools(int cpu) | ||
470 | { | ||
471 | if (cpu != WORK_CPU_UNBOUND) | ||
472 | return per_cpu(cpu_std_worker_pools, cpu); | ||
473 | else | ||
474 | return unbound_std_worker_pools; | ||
475 | } | ||
476 | |||
477 | static int std_worker_pool_pri(struct worker_pool *pool) | ||
478 | { | ||
479 | return pool - std_worker_pools(pool->cpu); | ||
480 | } | ||
481 | |||
482 | /* allocate ID and assign it to @pool */ | 433 | /* allocate ID and assign it to @pool */ |
483 | static int worker_pool_assign_id(struct worker_pool *pool) | 434 | static int worker_pool_assign_id(struct worker_pool *pool) |
484 | { | 435 | { |
@@ -496,13 +447,6 @@ static int worker_pool_assign_id(struct worker_pool *pool) | |||
496 | return ret; | 447 | return ret; |
497 | } | 448 | } |
498 | 449 | ||
499 | static struct worker_pool *get_std_worker_pool(int cpu, bool highpri) | ||
500 | { | ||
501 | struct worker_pool *pools = std_worker_pools(cpu); | ||
502 | |||
503 | return &pools[highpri]; | ||
504 | } | ||
505 | |||
506 | /** | 450 | /** |
507 | * first_pwq - return the first pool_workqueue of the specified workqueue | 451 | * first_pwq - return the first pool_workqueue of the specified workqueue |
508 | * @wq: the target workqueue | 452 | * @wq: the target workqueue |
@@ -3397,8 +3341,10 @@ static int alloc_and_link_pwqs(struct workqueue_struct *wq) | |||
3397 | for_each_possible_cpu(cpu) { | 3341 | for_each_possible_cpu(cpu) { |
3398 | struct pool_workqueue *pwq = | 3342 | struct pool_workqueue *pwq = |
3399 | per_cpu_ptr(wq->cpu_pwqs, cpu); | 3343 | per_cpu_ptr(wq->cpu_pwqs, cpu); |
3344 | struct worker_pool *cpu_pools = | ||
3345 | per_cpu(cpu_std_worker_pools, cpu); | ||
3400 | 3346 | ||
3401 | pwq->pool = get_std_worker_pool(cpu, highpri); | 3347 | pwq->pool = &cpu_pools[highpri]; |
3402 | list_add_tail_rcu(&pwq->pwqs_node, &wq->pwqs); | 3348 | list_add_tail_rcu(&pwq->pwqs_node, &wq->pwqs); |
3403 | } | 3349 | } |
3404 | } else { | 3350 | } else { |