diff options
-rw-r--r-- | arch/ia64/kernel/smpboot.c | 2 | ||||
-rw-r--r-- | arch/x86/kernel/smpboot.c | 2 | ||||
-rw-r--r-- | include/linux/workqueue.h | 4 | ||||
-rw-r--r-- | kernel/workqueue.c | 63 |
4 files changed, 18 insertions, 53 deletions
diff --git a/arch/ia64/kernel/smpboot.c b/arch/ia64/kernel/smpboot.c index 6a1380e90f87..99dcc85193c9 100644 --- a/arch/ia64/kernel/smpboot.c +++ b/arch/ia64/kernel/smpboot.c | |||
@@ -519,7 +519,7 @@ do_boot_cpu (int sapicid, int cpu) | |||
519 | /* | 519 | /* |
520 | * We can't use kernel_thread since we must avoid to reschedule the child. | 520 | * We can't use kernel_thread since we must avoid to reschedule the child. |
521 | */ | 521 | */ |
522 | if (!keventd_up() || current_is_keventd()) | 522 | if (!keventd_up()) |
523 | c_idle.work.func(&c_idle.work); | 523 | c_idle.work.func(&c_idle.work); |
524 | else { | 524 | else { |
525 | schedule_work(&c_idle.work); | 525 | schedule_work(&c_idle.work); |
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index c4f33b2e77d6..4d90f376e985 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c | |||
@@ -735,7 +735,7 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu) | |||
735 | goto do_rest; | 735 | goto do_rest; |
736 | } | 736 | } |
737 | 737 | ||
738 | if (!keventd_up() || current_is_keventd()) | 738 | if (!keventd_up()) |
739 | c_idle.work.func(&c_idle.work); | 739 | c_idle.work.func(&c_idle.work); |
740 | else { | 740 | else { |
741 | schedule_work(&c_idle.work); | 741 | schedule_work(&c_idle.work); |
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h index b8f4ec45c40a..33e24e734d50 100644 --- a/include/linux/workqueue.h +++ b/include/linux/workqueue.h | |||
@@ -227,6 +227,9 @@ enum { | |||
227 | WQ_SINGLE_CPU = 1 << 1, /* only single cpu at a time */ | 227 | WQ_SINGLE_CPU = 1 << 1, /* only single cpu at a time */ |
228 | WQ_NON_REENTRANT = 1 << 2, /* guarantee non-reentrance */ | 228 | WQ_NON_REENTRANT = 1 << 2, /* guarantee non-reentrance */ |
229 | WQ_RESCUER = 1 << 3, /* has an rescue worker */ | 229 | WQ_RESCUER = 1 << 3, /* has an rescue worker */ |
230 | |||
231 | WQ_MAX_ACTIVE = 512, /* I like 512, better ideas? */ | ||
232 | WQ_DFL_ACTIVE = WQ_MAX_ACTIVE / 2, | ||
230 | }; | 233 | }; |
231 | 234 | ||
232 | extern struct workqueue_struct * | 235 | extern struct workqueue_struct * |
@@ -280,7 +283,6 @@ extern int schedule_delayed_work(struct delayed_work *work, unsigned long delay) | |||
280 | extern int schedule_delayed_work_on(int cpu, struct delayed_work *work, | 283 | extern int schedule_delayed_work_on(int cpu, struct delayed_work *work, |
281 | unsigned long delay); | 284 | unsigned long delay); |
282 | extern int schedule_on_each_cpu(work_func_t func); | 285 | extern int schedule_on_each_cpu(work_func_t func); |
283 | extern int current_is_keventd(void); | ||
284 | extern int keventd_up(void); | 286 | extern int keventd_up(void); |
285 | 287 | ||
286 | extern void init_workqueues(void); | 288 | extern void init_workqueues(void); |
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 0ad46523b423..4190e84cf995 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c | |||
@@ -2398,7 +2398,6 @@ EXPORT_SYMBOL(schedule_delayed_work_on); | |||
2398 | int schedule_on_each_cpu(work_func_t func) | 2398 | int schedule_on_each_cpu(work_func_t func) |
2399 | { | 2399 | { |
2400 | int cpu; | 2400 | int cpu; |
2401 | int orig = -1; | ||
2402 | struct work_struct *works; | 2401 | struct work_struct *works; |
2403 | 2402 | ||
2404 | works = alloc_percpu(struct work_struct); | 2403 | works = alloc_percpu(struct work_struct); |
@@ -2407,23 +2406,12 @@ int schedule_on_each_cpu(work_func_t func) | |||
2407 | 2406 | ||
2408 | get_online_cpus(); | 2407 | get_online_cpus(); |
2409 | 2408 | ||
2410 | /* | ||
2411 | * When running in keventd don't schedule a work item on | ||
2412 | * itself. Can just call directly because the work queue is | ||
2413 | * already bound. This also is faster. | ||
2414 | */ | ||
2415 | if (current_is_keventd()) | ||
2416 | orig = raw_smp_processor_id(); | ||
2417 | |||
2418 | for_each_online_cpu(cpu) { | 2409 | for_each_online_cpu(cpu) { |
2419 | struct work_struct *work = per_cpu_ptr(works, cpu); | 2410 | struct work_struct *work = per_cpu_ptr(works, cpu); |
2420 | 2411 | ||
2421 | INIT_WORK(work, func); | 2412 | INIT_WORK(work, func); |
2422 | if (cpu != orig) | 2413 | schedule_work_on(cpu, work); |
2423 | schedule_work_on(cpu, work); | ||
2424 | } | 2414 | } |
2425 | if (orig >= 0) | ||
2426 | func(per_cpu_ptr(works, orig)); | ||
2427 | 2415 | ||
2428 | for_each_online_cpu(cpu) | 2416 | for_each_online_cpu(cpu) |
2429 | flush_work(per_cpu_ptr(works, cpu)); | 2417 | flush_work(per_cpu_ptr(works, cpu)); |
@@ -2494,41 +2482,6 @@ int keventd_up(void) | |||
2494 | return keventd_wq != NULL; | 2482 | return keventd_wq != NULL; |
2495 | } | 2483 | } |
2496 | 2484 | ||
2497 | int current_is_keventd(void) | ||
2498 | { | ||
2499 | bool found = false; | ||
2500 | unsigned int cpu; | ||
2501 | |||
2502 | /* | ||
2503 | * There no longer is one-to-one relation between worker and | ||
2504 | * work queue and a worker task might be unbound from its cpu | ||
2505 | * if the cpu was offlined. Match all busy workers. This | ||
2506 | * function will go away once dynamic pool is implemented. | ||
2507 | */ | ||
2508 | for_each_possible_cpu(cpu) { | ||
2509 | struct global_cwq *gcwq = get_gcwq(cpu); | ||
2510 | struct worker *worker; | ||
2511 | struct hlist_node *pos; | ||
2512 | unsigned long flags; | ||
2513 | int i; | ||
2514 | |||
2515 | spin_lock_irqsave(&gcwq->lock, flags); | ||
2516 | |||
2517 | for_each_busy_worker(worker, i, pos, gcwq) { | ||
2518 | if (worker->task == current) { | ||
2519 | found = true; | ||
2520 | break; | ||
2521 | } | ||
2522 | } | ||
2523 | |||
2524 | spin_unlock_irqrestore(&gcwq->lock, flags); | ||
2525 | if (found) | ||
2526 | break; | ||
2527 | } | ||
2528 | |||
2529 | return found; | ||
2530 | } | ||
2531 | |||
2532 | static struct cpu_workqueue_struct *alloc_cwqs(void) | 2485 | static struct cpu_workqueue_struct *alloc_cwqs(void) |
2533 | { | 2486 | { |
2534 | /* | 2487 | /* |
@@ -2576,6 +2529,16 @@ static void free_cwqs(struct cpu_workqueue_struct *cwqs) | |||
2576 | #endif | 2529 | #endif |
2577 | } | 2530 | } |
2578 | 2531 | ||
2532 | static int wq_clamp_max_active(int max_active, const char *name) | ||
2533 | { | ||
2534 | if (max_active < 1 || max_active > WQ_MAX_ACTIVE) | ||
2535 | printk(KERN_WARNING "workqueue: max_active %d requested for %s " | ||
2536 | "is out of range, clamping between %d and %d\n", | ||
2537 | max_active, name, 1, WQ_MAX_ACTIVE); | ||
2538 | |||
2539 | return clamp_val(max_active, 1, WQ_MAX_ACTIVE); | ||
2540 | } | ||
2541 | |||
2579 | struct workqueue_struct *__create_workqueue_key(const char *name, | 2542 | struct workqueue_struct *__create_workqueue_key(const char *name, |
2580 | unsigned int flags, | 2543 | unsigned int flags, |
2581 | int max_active, | 2544 | int max_active, |
@@ -2585,7 +2548,7 @@ struct workqueue_struct *__create_workqueue_key(const char *name, | |||
2585 | struct workqueue_struct *wq; | 2548 | struct workqueue_struct *wq; |
2586 | unsigned int cpu; | 2549 | unsigned int cpu; |
2587 | 2550 | ||
2588 | max_active = clamp_val(max_active, 1, INT_MAX); | 2551 | max_active = wq_clamp_max_active(max_active, name); |
2589 | 2552 | ||
2590 | wq = kzalloc(sizeof(*wq), GFP_KERNEL); | 2553 | wq = kzalloc(sizeof(*wq), GFP_KERNEL); |
2591 | if (!wq) | 2554 | if (!wq) |
@@ -3324,6 +3287,6 @@ void __init init_workqueues(void) | |||
3324 | spin_unlock_irq(&gcwq->lock); | 3287 | spin_unlock_irq(&gcwq->lock); |
3325 | } | 3288 | } |
3326 | 3289 | ||
3327 | keventd_wq = create_workqueue("events"); | 3290 | keventd_wq = __create_workqueue("events", 0, WQ_DFL_ACTIVE); |
3328 | BUG_ON(!keventd_wq); | 3291 | BUG_ON(!keventd_wq); |
3329 | } | 3292 | } |