diff options
author | Tejun Heo <tj@kernel.org> | 2010-06-29 04:07:14 -0400 |
---|---|---|
committer | Tejun Heo <tj@kernel.org> | 2010-06-29 04:07:14 -0400 |
commit | b71ab8c2025caef8db719aa41af0ed735dc543cd (patch) | |
tree | 8cc2c6164acf5fe82e8d8d05924590cb80fe088d /kernel/workqueue.c | |
parent | e22bee782b3b00bd4534ae9b1c5fb2e8e6573c5c (diff) |
workqueue: increase max_active of keventd and kill current_is_keventd()
Define WQ_MAX_ACTIVE and create keventd with max_active set to half of
it which means that keventd now can process upto WQ_MAX_ACTIVE / 2 - 1
works concurrently. Unless some combination can result in dependency
loop longer than max_active, deadlock won't happen and thus it's
unnecessary to check whether current_is_keventd() before trying to
schedule a work. Kill current_is_keventd().
(Lockdep annotations are broken. We need lock_map_acquire_read_norecurse())
Signed-off-by: Tejun Heo <tj@kernel.org>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Christoph Lameter <cl@linux-foundation.org>
Cc: Tony Luck <tony.luck@intel.com>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: Oleg Nesterov <oleg@redhat.com>
Diffstat (limited to 'kernel/workqueue.c')
-rw-r--r-- | kernel/workqueue.c | 63 |
1 files changed, 13 insertions, 50 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 0ad46523b423..4190e84cf995 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c | |||
@@ -2398,7 +2398,6 @@ EXPORT_SYMBOL(schedule_delayed_work_on); | |||
2398 | int schedule_on_each_cpu(work_func_t func) | 2398 | int schedule_on_each_cpu(work_func_t func) |
2399 | { | 2399 | { |
2400 | int cpu; | 2400 | int cpu; |
2401 | int orig = -1; | ||
2402 | struct work_struct *works; | 2401 | struct work_struct *works; |
2403 | 2402 | ||
2404 | works = alloc_percpu(struct work_struct); | 2403 | works = alloc_percpu(struct work_struct); |
@@ -2407,23 +2406,12 @@ int schedule_on_each_cpu(work_func_t func) | |||
2407 | 2406 | ||
2408 | get_online_cpus(); | 2407 | get_online_cpus(); |
2409 | 2408 | ||
2410 | /* | ||
2411 | * When running in keventd don't schedule a work item on | ||
2412 | * itself. Can just call directly because the work queue is | ||
2413 | * already bound. This also is faster. | ||
2414 | */ | ||
2415 | if (current_is_keventd()) | ||
2416 | orig = raw_smp_processor_id(); | ||
2417 | |||
2418 | for_each_online_cpu(cpu) { | 2409 | for_each_online_cpu(cpu) { |
2419 | struct work_struct *work = per_cpu_ptr(works, cpu); | 2410 | struct work_struct *work = per_cpu_ptr(works, cpu); |
2420 | 2411 | ||
2421 | INIT_WORK(work, func); | 2412 | INIT_WORK(work, func); |
2422 | if (cpu != orig) | 2413 | schedule_work_on(cpu, work); |
2423 | schedule_work_on(cpu, work); | ||
2424 | } | 2414 | } |
2425 | if (orig >= 0) | ||
2426 | func(per_cpu_ptr(works, orig)); | ||
2427 | 2415 | ||
2428 | for_each_online_cpu(cpu) | 2416 | for_each_online_cpu(cpu) |
2429 | flush_work(per_cpu_ptr(works, cpu)); | 2417 | flush_work(per_cpu_ptr(works, cpu)); |
@@ -2494,41 +2482,6 @@ int keventd_up(void) | |||
2494 | return keventd_wq != NULL; | 2482 | return keventd_wq != NULL; |
2495 | } | 2483 | } |
2496 | 2484 | ||
2497 | int current_is_keventd(void) | ||
2498 | { | ||
2499 | bool found = false; | ||
2500 | unsigned int cpu; | ||
2501 | |||
2502 | /* | ||
2503 | * There no longer is one-to-one relation between worker and | ||
2504 | * work queue and a worker task might be unbound from its cpu | ||
2505 | * if the cpu was offlined. Match all busy workers. This | ||
2506 | * function will go away once dynamic pool is implemented. | ||
2507 | */ | ||
2508 | for_each_possible_cpu(cpu) { | ||
2509 | struct global_cwq *gcwq = get_gcwq(cpu); | ||
2510 | struct worker *worker; | ||
2511 | struct hlist_node *pos; | ||
2512 | unsigned long flags; | ||
2513 | int i; | ||
2514 | |||
2515 | spin_lock_irqsave(&gcwq->lock, flags); | ||
2516 | |||
2517 | for_each_busy_worker(worker, i, pos, gcwq) { | ||
2518 | if (worker->task == current) { | ||
2519 | found = true; | ||
2520 | break; | ||
2521 | } | ||
2522 | } | ||
2523 | |||
2524 | spin_unlock_irqrestore(&gcwq->lock, flags); | ||
2525 | if (found) | ||
2526 | break; | ||
2527 | } | ||
2528 | |||
2529 | return found; | ||
2530 | } | ||
2531 | |||
2532 | static struct cpu_workqueue_struct *alloc_cwqs(void) | 2485 | static struct cpu_workqueue_struct *alloc_cwqs(void) |
2533 | { | 2486 | { |
2534 | /* | 2487 | /* |
@@ -2576,6 +2529,16 @@ static void free_cwqs(struct cpu_workqueue_struct *cwqs) | |||
2576 | #endif | 2529 | #endif |
2577 | } | 2530 | } |
2578 | 2531 | ||
2532 | static int wq_clamp_max_active(int max_active, const char *name) | ||
2533 | { | ||
2534 | if (max_active < 1 || max_active > WQ_MAX_ACTIVE) | ||
2535 | printk(KERN_WARNING "workqueue: max_active %d requested for %s " | ||
2536 | "is out of range, clamping between %d and %d\n", | ||
2537 | max_active, name, 1, WQ_MAX_ACTIVE); | ||
2538 | |||
2539 | return clamp_val(max_active, 1, WQ_MAX_ACTIVE); | ||
2540 | } | ||
2541 | |||
2579 | struct workqueue_struct *__create_workqueue_key(const char *name, | 2542 | struct workqueue_struct *__create_workqueue_key(const char *name, |
2580 | unsigned int flags, | 2543 | unsigned int flags, |
2581 | int max_active, | 2544 | int max_active, |
@@ -2585,7 +2548,7 @@ struct workqueue_struct *__create_workqueue_key(const char *name, | |||
2585 | struct workqueue_struct *wq; | 2548 | struct workqueue_struct *wq; |
2586 | unsigned int cpu; | 2549 | unsigned int cpu; |
2587 | 2550 | ||
2588 | max_active = clamp_val(max_active, 1, INT_MAX); | 2551 | max_active = wq_clamp_max_active(max_active, name); |
2589 | 2552 | ||
2590 | wq = kzalloc(sizeof(*wq), GFP_KERNEL); | 2553 | wq = kzalloc(sizeof(*wq), GFP_KERNEL); |
2591 | if (!wq) | 2554 | if (!wq) |
@@ -3324,6 +3287,6 @@ void __init init_workqueues(void) | |||
3324 | spin_unlock_irq(&gcwq->lock); | 3287 | spin_unlock_irq(&gcwq->lock); |
3325 | } | 3288 | } |
3326 | 3289 | ||
3327 | keventd_wq = create_workqueue("events"); | 3290 | keventd_wq = __create_workqueue("events", 0, WQ_DFL_ACTIVE); |
3328 | BUG_ON(!keventd_wq); | 3291 | BUG_ON(!keventd_wq); |
3329 | } | 3292 | } |