aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2012-07-14 01:16:44 -0400
committerTejun Heo <tj@kernel.org>2012-07-14 01:16:44 -0400
commit4ce62e9e30cacc26885cab133ad1de358dd79f21 (patch)
tree6b08a377a236f0e44f335f7813d3da58a7ee1c70 /kernel
parent11ebea50dbc1ade5994b2c838a096078d4c02399 (diff)
workqueue: introduce NR_WORKER_POOLS and for_each_worker_pool()
Introduce NR_WORKER_POOLS and for_each_worker_pool() and convert code paths which need to manipulate all pools in a gcwq to use them. NR_WORKER_POOLS is currently one and for_each_worker_pool() iterates over only @gcwq->pool. Note that nr_running is per-pool property and converted to an array with NR_WORKER_POOLS elements and renamed to pool_nr_running. Note that get_pool_nr_running() currently assumes 0 index. The next patch will make use of non-zero index. The changes in this patch are mechanical and don't caues any functional difference. This is to prepare for multiple pools per gcwq. v2: nr_running indexing bug in get_pool_nr_running() fixed. v3: Pointer to array is stupid. Don't use it in get_pool_nr_running() as suggested by Linus. Signed-off-by: Tejun Heo <tj@kernel.org> Cc: Tony Luck <tony.luck@intel.com> Cc: Fengguang Wu <fengguang.wu@intel.com> Cc: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/workqueue.c223
1 files changed, 153 insertions, 70 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 7a98bae635f..b0daaea44ea 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -74,6 +74,8 @@ enum {
74 TRUSTEE_RELEASE = 3, /* release workers */ 74 TRUSTEE_RELEASE = 3, /* release workers */
75 TRUSTEE_DONE = 4, /* trustee is done */ 75 TRUSTEE_DONE = 4, /* trustee is done */
76 76
77 NR_WORKER_POOLS = 1, /* # worker pools per gcwq */
78
77 BUSY_WORKER_HASH_ORDER = 6, /* 64 pointers */ 79 BUSY_WORKER_HASH_ORDER = 6, /* 64 pointers */
78 BUSY_WORKER_HASH_SIZE = 1 << BUSY_WORKER_HASH_ORDER, 80 BUSY_WORKER_HASH_SIZE = 1 << BUSY_WORKER_HASH_ORDER,
79 BUSY_WORKER_HASH_MASK = BUSY_WORKER_HASH_SIZE - 1, 81 BUSY_WORKER_HASH_MASK = BUSY_WORKER_HASH_SIZE - 1,
@@ -274,6 +276,9 @@ EXPORT_SYMBOL_GPL(system_nrt_freezable_wq);
274#define CREATE_TRACE_POINTS 276#define CREATE_TRACE_POINTS
275#include <trace/events/workqueue.h> 277#include <trace/events/workqueue.h>
276 278
279#define for_each_worker_pool(pool, gcwq) \
280 for ((pool) = &(gcwq)->pool; (pool); (pool) = NULL)
281
277#define for_each_busy_worker(worker, i, pos, gcwq) \ 282#define for_each_busy_worker(worker, i, pos, gcwq) \
278 for (i = 0; i < BUSY_WORKER_HASH_SIZE; i++) \ 283 for (i = 0; i < BUSY_WORKER_HASH_SIZE; i++) \
279 hlist_for_each_entry(worker, pos, &gcwq->busy_hash[i], hentry) 284 hlist_for_each_entry(worker, pos, &gcwq->busy_hash[i], hentry)
@@ -454,7 +459,7 @@ static bool workqueue_freezing; /* W: have wqs started freezing? */
454 * try_to_wake_up(). Put it in a separate cacheline. 459 * try_to_wake_up(). Put it in a separate cacheline.
455 */ 460 */
456static DEFINE_PER_CPU(struct global_cwq, global_cwq); 461static DEFINE_PER_CPU(struct global_cwq, global_cwq);
457static DEFINE_PER_CPU_SHARED_ALIGNED(atomic_t, gcwq_nr_running); 462static DEFINE_PER_CPU_SHARED_ALIGNED(atomic_t, pool_nr_running[NR_WORKER_POOLS]);
458 463
459/* 464/*
460 * Global cpu workqueue and nr_running counter for unbound gcwq. The 465 * Global cpu workqueue and nr_running counter for unbound gcwq. The
@@ -462,7 +467,9 @@ static DEFINE_PER_CPU_SHARED_ALIGNED(atomic_t, gcwq_nr_running);
462 * workers have WORKER_UNBOUND set. 467 * workers have WORKER_UNBOUND set.
463 */ 468 */
464static struct global_cwq unbound_global_cwq; 469static struct global_cwq unbound_global_cwq;
465static atomic_t unbound_gcwq_nr_running = ATOMIC_INIT(0); /* always 0 */ 470static atomic_t unbound_pool_nr_running[NR_WORKER_POOLS] = {
471 [0 ... NR_WORKER_POOLS - 1] = ATOMIC_INIT(0), /* always 0 */
472};
466 473
467static int worker_thread(void *__worker); 474static int worker_thread(void *__worker);
468 475
@@ -477,11 +484,12 @@ static struct global_cwq *get_gcwq(unsigned int cpu)
477static atomic_t *get_pool_nr_running(struct worker_pool *pool) 484static atomic_t *get_pool_nr_running(struct worker_pool *pool)
478{ 485{
479 int cpu = pool->gcwq->cpu; 486 int cpu = pool->gcwq->cpu;
487 int idx = 0;
480 488
481 if (cpu != WORK_CPU_UNBOUND) 489 if (cpu != WORK_CPU_UNBOUND)
482 return &per_cpu(gcwq_nr_running, cpu); 490 return &per_cpu(pool_nr_running, cpu)[idx];
483 else 491 else
484 return &unbound_gcwq_nr_running; 492 return &unbound_pool_nr_running[idx];
485} 493}
486 494
487static struct cpu_workqueue_struct *get_cwq(unsigned int cpu, 495static struct cpu_workqueue_struct *get_cwq(unsigned int cpu,
@@ -3345,9 +3353,30 @@ EXPORT_SYMBOL_GPL(work_busy);
3345 __ret1 < 0 ? -1 : 0; \ 3353 __ret1 < 0 ? -1 : 0; \
3346}) 3354})
3347 3355
3356static bool gcwq_is_managing_workers(struct global_cwq *gcwq)
3357{
3358 struct worker_pool *pool;
3359
3360 for_each_worker_pool(pool, gcwq)
3361 if (pool->flags & POOL_MANAGING_WORKERS)
3362 return true;
3363 return false;
3364}
3365
3366static bool gcwq_has_idle_workers(struct global_cwq *gcwq)
3367{
3368 struct worker_pool *pool;
3369
3370 for_each_worker_pool(pool, gcwq)
3371 if (!list_empty(&pool->idle_list))
3372 return true;
3373 return false;
3374}
3375
3348static int __cpuinit trustee_thread(void *__gcwq) 3376static int __cpuinit trustee_thread(void *__gcwq)
3349{ 3377{
3350 struct global_cwq *gcwq = __gcwq; 3378 struct global_cwq *gcwq = __gcwq;
3379 struct worker_pool *pool;
3351 struct worker *worker; 3380 struct worker *worker;
3352 struct work_struct *work; 3381 struct work_struct *work;
3353 struct hlist_node *pos; 3382 struct hlist_node *pos;
@@ -3363,13 +3392,15 @@ static int __cpuinit trustee_thread(void *__gcwq)
3363 * cancelled. 3392 * cancelled.
3364 */ 3393 */
3365 BUG_ON(gcwq->cpu != smp_processor_id()); 3394 BUG_ON(gcwq->cpu != smp_processor_id());
3366 rc = trustee_wait_event(!(gcwq->pool.flags & POOL_MANAGING_WORKERS)); 3395 rc = trustee_wait_event(!gcwq_is_managing_workers(gcwq));
3367 BUG_ON(rc < 0); 3396 BUG_ON(rc < 0);
3368 3397
3369 gcwq->pool.flags |= POOL_MANAGING_WORKERS; 3398 for_each_worker_pool(pool, gcwq) {
3399 pool->flags |= POOL_MANAGING_WORKERS;
3370 3400
3371 list_for_each_entry(worker, &gcwq->pool.idle_list, entry) 3401 list_for_each_entry(worker, &pool->idle_list, entry)
3372 worker->flags |= WORKER_ROGUE; 3402 worker->flags |= WORKER_ROGUE;
3403 }
3373 3404
3374 for_each_busy_worker(worker, i, pos, gcwq) 3405 for_each_busy_worker(worker, i, pos, gcwq)
3375 worker->flags |= WORKER_ROGUE; 3406 worker->flags |= WORKER_ROGUE;
@@ -3390,10 +3421,12 @@ static int __cpuinit trustee_thread(void *__gcwq)
3390 * keep_working() are always true as long as the worklist is 3421 * keep_working() are always true as long as the worklist is
3391 * not empty. 3422 * not empty.
3392 */ 3423 */
3393 atomic_set(get_pool_nr_running(&gcwq->pool), 0); 3424 for_each_worker_pool(pool, gcwq)
3425 atomic_set(get_pool_nr_running(pool), 0);
3394 3426
3395 spin_unlock_irq(&gcwq->lock); 3427 spin_unlock_irq(&gcwq->lock);
3396 del_timer_sync(&gcwq->pool.idle_timer); 3428 for_each_worker_pool(pool, gcwq)
3429 del_timer_sync(&pool->idle_timer);
3397 spin_lock_irq(&gcwq->lock); 3430 spin_lock_irq(&gcwq->lock);
3398 3431
3399 /* 3432 /*
@@ -3415,29 +3448,38 @@ static int __cpuinit trustee_thread(void *__gcwq)
3415 * may be frozen works in freezable cwqs. Don't declare 3448 * may be frozen works in freezable cwqs. Don't declare
3416 * completion while frozen. 3449 * completion while frozen.
3417 */ 3450 */
3418 while (gcwq->pool.nr_workers != gcwq->pool.nr_idle || 3451 while (true) {
3419 gcwq->flags & GCWQ_FREEZING || 3452 bool busy = false;
3420 gcwq->trustee_state == TRUSTEE_IN_CHARGE) {
3421 int nr_works = 0;
3422 3453
3423 list_for_each_entry(work, &gcwq->pool.worklist, entry) { 3454 for_each_worker_pool(pool, gcwq)
3424 send_mayday(work); 3455 busy |= pool->nr_workers != pool->nr_idle;
3425 nr_works++;
3426 }
3427 3456
3428 list_for_each_entry(worker, &gcwq->pool.idle_list, entry) { 3457 if (!busy && !(gcwq->flags & GCWQ_FREEZING) &&
3429 if (!nr_works--) 3458 gcwq->trustee_state != TRUSTEE_IN_CHARGE)
3430 break; 3459 break;
3431 wake_up_process(worker->task);
3432 }
3433 3460
3434 if (need_to_create_worker(&gcwq->pool)) { 3461 for_each_worker_pool(pool, gcwq) {
3435 spin_unlock_irq(&gcwq->lock); 3462 int nr_works = 0;
3436 worker = create_worker(&gcwq->pool, false); 3463
3437 spin_lock_irq(&gcwq->lock); 3464 list_for_each_entry(work, &pool->worklist, entry) {
3438 if (worker) { 3465 send_mayday(work);
3439 worker->flags |= WORKER_ROGUE; 3466 nr_works++;
3440 start_worker(worker); 3467 }
3468
3469 list_for_each_entry(worker, &pool->idle_list, entry) {
3470 if (!nr_works--)
3471 break;
3472 wake_up_process(worker->task);
3473 }
3474
3475 if (need_to_create_worker(pool)) {
3476 spin_unlock_irq(&gcwq->lock);
3477 worker = create_worker(pool, false);
3478 spin_lock_irq(&gcwq->lock);
3479 if (worker) {
3480 worker->flags |= WORKER_ROGUE;
3481 start_worker(worker);
3482 }
3441 } 3483 }
3442 } 3484 }
3443 3485
@@ -3452,11 +3494,18 @@ static int __cpuinit trustee_thread(void *__gcwq)
3452 * all workers till we're canceled. 3494 * all workers till we're canceled.
3453 */ 3495 */
3454 do { 3496 do {
3455 rc = trustee_wait_event(!list_empty(&gcwq->pool.idle_list)); 3497 rc = trustee_wait_event(gcwq_has_idle_workers(gcwq));
3456 while (!list_empty(&gcwq->pool.idle_list)) 3498
3457 destroy_worker(list_first_entry(&gcwq->pool.idle_list, 3499 i = 0;
3458 struct worker, entry)); 3500 for_each_worker_pool(pool, gcwq) {
3459 } while (gcwq->pool.nr_workers && rc >= 0); 3501 while (!list_empty(&pool->idle_list)) {
3502 worker = list_first_entry(&pool->idle_list,
3503 struct worker, entry);
3504 destroy_worker(worker);
3505 }
3506 i |= pool->nr_workers;
3507 }
3508 } while (i && rc >= 0);
3460 3509
3461 /* 3510 /*
3462 * At this point, either draining has completed and no worker 3511 * At this point, either draining has completed and no worker
@@ -3465,7 +3514,8 @@ static int __cpuinit trustee_thread(void *__gcwq)
3465 * Tell the remaining busy ones to rebind once it finishes the 3514 * Tell the remaining busy ones to rebind once it finishes the
3466 * currently scheduled works by scheduling the rebind_work. 3515 * currently scheduled works by scheduling the rebind_work.
3467 */ 3516 */
3468 WARN_ON(!list_empty(&gcwq->pool.idle_list)); 3517 for_each_worker_pool(pool, gcwq)
3518 WARN_ON(!list_empty(&pool->idle_list));
3469 3519
3470 for_each_busy_worker(worker, i, pos, gcwq) { 3520 for_each_busy_worker(worker, i, pos, gcwq) {
3471 struct work_struct *rebind_work = &worker->rebind_work; 3521 struct work_struct *rebind_work = &worker->rebind_work;
@@ -3490,7 +3540,8 @@ static int __cpuinit trustee_thread(void *__gcwq)
3490 } 3540 }
3491 3541
3492 /* relinquish manager role */ 3542 /* relinquish manager role */
3493 gcwq->pool.flags &= ~POOL_MANAGING_WORKERS; 3543 for_each_worker_pool(pool, gcwq)
3544 pool->flags &= ~POOL_MANAGING_WORKERS;
3494 3545
3495 /* notify completion */ 3546 /* notify completion */
3496 gcwq->trustee = NULL; 3547 gcwq->trustee = NULL;
@@ -3532,8 +3583,10 @@ static int __devinit workqueue_cpu_callback(struct notifier_block *nfb,
3532 unsigned int cpu = (unsigned long)hcpu; 3583 unsigned int cpu = (unsigned long)hcpu;
3533 struct global_cwq *gcwq = get_gcwq(cpu); 3584 struct global_cwq *gcwq = get_gcwq(cpu);
3534 struct task_struct *new_trustee = NULL; 3585 struct task_struct *new_trustee = NULL;
3535 struct worker *uninitialized_var(new_worker); 3586 struct worker *new_workers[NR_WORKER_POOLS] = { };
3587 struct worker_pool *pool;
3536 unsigned long flags; 3588 unsigned long flags;
3589 int i;
3537 3590
3538 action &= ~CPU_TASKS_FROZEN; 3591 action &= ~CPU_TASKS_FROZEN;
3539 3592
@@ -3546,12 +3599,12 @@ static int __devinit workqueue_cpu_callback(struct notifier_block *nfb,
3546 kthread_bind(new_trustee, cpu); 3599 kthread_bind(new_trustee, cpu);
3547 /* fall through */ 3600 /* fall through */
3548 case CPU_UP_PREPARE: 3601 case CPU_UP_PREPARE:
3549 BUG_ON(gcwq->pool.first_idle); 3602 i = 0;
3550 new_worker = create_worker(&gcwq->pool, false); 3603 for_each_worker_pool(pool, gcwq) {
3551 if (!new_worker) { 3604 BUG_ON(pool->first_idle);
3552 if (new_trustee) 3605 new_workers[i] = create_worker(pool, false);
3553 kthread_stop(new_trustee); 3606 if (!new_workers[i++])
3554 return NOTIFY_BAD; 3607 goto err_destroy;
3555 } 3608 }
3556 } 3609 }
3557 3610
@@ -3568,8 +3621,11 @@ static int __devinit workqueue_cpu_callback(struct notifier_block *nfb,
3568 wait_trustee_state(gcwq, TRUSTEE_IN_CHARGE); 3621 wait_trustee_state(gcwq, TRUSTEE_IN_CHARGE);
3569 /* fall through */ 3622 /* fall through */
3570 case CPU_UP_PREPARE: 3623 case CPU_UP_PREPARE:
3571 BUG_ON(gcwq->pool.first_idle); 3624 i = 0;
3572 gcwq->pool.first_idle = new_worker; 3625 for_each_worker_pool(pool, gcwq) {
3626 BUG_ON(pool->first_idle);
3627 pool->first_idle = new_workers[i++];
3628 }
3573 break; 3629 break;
3574 3630
3575 case CPU_DYING: 3631 case CPU_DYING:
@@ -3586,8 +3642,10 @@ static int __devinit workqueue_cpu_callback(struct notifier_block *nfb,
3586 gcwq->trustee_state = TRUSTEE_BUTCHER; 3642 gcwq->trustee_state = TRUSTEE_BUTCHER;
3587 /* fall through */ 3643 /* fall through */
3588 case CPU_UP_CANCELED: 3644 case CPU_UP_CANCELED:
3589 destroy_worker(gcwq->pool.first_idle); 3645 for_each_worker_pool(pool, gcwq) {
3590 gcwq->pool.first_idle = NULL; 3646 destroy_worker(pool->first_idle);
3647 pool->first_idle = NULL;
3648 }
3591 break; 3649 break;
3592 3650
3593 case CPU_DOWN_FAILED: 3651 case CPU_DOWN_FAILED:
@@ -3604,18 +3662,32 @@ static int __devinit workqueue_cpu_callback(struct notifier_block *nfb,
3604 * Put the first_idle in and request a real manager to 3662 * Put the first_idle in and request a real manager to
3605 * take a look. 3663 * take a look.
3606 */ 3664 */
3607 spin_unlock_irq(&gcwq->lock); 3665 for_each_worker_pool(pool, gcwq) {
3608 kthread_bind(gcwq->pool.first_idle->task, cpu); 3666 spin_unlock_irq(&gcwq->lock);
3609 spin_lock_irq(&gcwq->lock); 3667 kthread_bind(pool->first_idle->task, cpu);
3610 gcwq->pool.flags |= POOL_MANAGE_WORKERS; 3668 spin_lock_irq(&gcwq->lock);
3611 start_worker(gcwq->pool.first_idle); 3669 pool->flags |= POOL_MANAGE_WORKERS;
3612 gcwq->pool.first_idle = NULL; 3670 start_worker(pool->first_idle);
3671 pool->first_idle = NULL;
3672 }
3613 break; 3673 break;
3614 } 3674 }
3615 3675
3616 spin_unlock_irqrestore(&gcwq->lock, flags); 3676 spin_unlock_irqrestore(&gcwq->lock, flags);
3617 3677
3618 return notifier_from_errno(0); 3678 return notifier_from_errno(0);
3679
3680err_destroy:
3681 if (new_trustee)
3682 kthread_stop(new_trustee);
3683
3684 spin_lock_irqsave(&gcwq->lock, flags);
3685 for (i = 0; i < NR_WORKER_POOLS; i++)
3686 if (new_workers[i])
3687 destroy_worker(new_workers[i]);
3688 spin_unlock_irqrestore(&gcwq->lock, flags);
3689
3690 return NOTIFY_BAD;
3619} 3691}
3620 3692
3621#ifdef CONFIG_SMP 3693#ifdef CONFIG_SMP
@@ -3774,6 +3846,7 @@ void thaw_workqueues(void)
3774 3846
3775 for_each_gcwq_cpu(cpu) { 3847 for_each_gcwq_cpu(cpu) {
3776 struct global_cwq *gcwq = get_gcwq(cpu); 3848 struct global_cwq *gcwq = get_gcwq(cpu);
3849 struct worker_pool *pool;
3777 struct workqueue_struct *wq; 3850 struct workqueue_struct *wq;
3778 3851
3779 spin_lock_irq(&gcwq->lock); 3852 spin_lock_irq(&gcwq->lock);
@@ -3795,7 +3868,8 @@ void thaw_workqueues(void)
3795 cwq_activate_first_delayed(cwq); 3868 cwq_activate_first_delayed(cwq);
3796 } 3869 }
3797 3870
3798 wake_up_worker(&gcwq->pool); 3871 for_each_worker_pool(pool, gcwq)
3872 wake_up_worker(pool);
3799 3873
3800 spin_unlock_irq(&gcwq->lock); 3874 spin_unlock_irq(&gcwq->lock);
3801 } 3875 }
@@ -3816,25 +3890,29 @@ static int __init init_workqueues(void)
3816 /* initialize gcwqs */ 3890 /* initialize gcwqs */
3817 for_each_gcwq_cpu(cpu) { 3891 for_each_gcwq_cpu(cpu) {
3818 struct global_cwq *gcwq = get_gcwq(cpu); 3892 struct global_cwq *gcwq = get_gcwq(cpu);
3893 struct worker_pool *pool;
3819 3894
3820 spin_lock_init(&gcwq->lock); 3895 spin_lock_init(&gcwq->lock);
3821 gcwq->pool.gcwq = gcwq;
3822 INIT_LIST_HEAD(&gcwq->pool.worklist);
3823 gcwq->cpu = cpu; 3896 gcwq->cpu = cpu;
3824 gcwq->flags |= GCWQ_DISASSOCIATED; 3897 gcwq->flags |= GCWQ_DISASSOCIATED;
3825 3898
3826 INIT_LIST_HEAD(&gcwq->pool.idle_list);
3827 for (i = 0; i < BUSY_WORKER_HASH_SIZE; i++) 3899 for (i = 0; i < BUSY_WORKER_HASH_SIZE; i++)
3828 INIT_HLIST_HEAD(&gcwq->busy_hash[i]); 3900 INIT_HLIST_HEAD(&gcwq->busy_hash[i]);
3829 3901
3830 init_timer_deferrable(&gcwq->pool.idle_timer); 3902 for_each_worker_pool(pool, gcwq) {
3831 gcwq->pool.idle_timer.function = idle_worker_timeout; 3903 pool->gcwq = gcwq;
3832 gcwq->pool.idle_timer.data = (unsigned long)&gcwq->pool; 3904 INIT_LIST_HEAD(&pool->worklist);
3905 INIT_LIST_HEAD(&pool->idle_list);
3833 3906
3834 setup_timer(&gcwq->pool.mayday_timer, gcwq_mayday_timeout, 3907 init_timer_deferrable(&pool->idle_timer);
3835 (unsigned long)&gcwq->pool); 3908 pool->idle_timer.function = idle_worker_timeout;
3909 pool->idle_timer.data = (unsigned long)pool;
3836 3910
3837 ida_init(&gcwq->pool.worker_ida); 3911 setup_timer(&pool->mayday_timer, gcwq_mayday_timeout,
3912 (unsigned long)pool);
3913
3914 ida_init(&pool->worker_ida);
3915 }
3838 3916
3839 gcwq->trustee_state = TRUSTEE_DONE; 3917 gcwq->trustee_state = TRUSTEE_DONE;
3840 init_waitqueue_head(&gcwq->trustee_wait); 3918 init_waitqueue_head(&gcwq->trustee_wait);
@@ -3843,15 +3921,20 @@ static int __init init_workqueues(void)
3843 /* create the initial worker */ 3921 /* create the initial worker */
3844 for_each_online_gcwq_cpu(cpu) { 3922 for_each_online_gcwq_cpu(cpu) {
3845 struct global_cwq *gcwq = get_gcwq(cpu); 3923 struct global_cwq *gcwq = get_gcwq(cpu);
3846 struct worker *worker; 3924 struct worker_pool *pool;
3847 3925
3848 if (cpu != WORK_CPU_UNBOUND) 3926 if (cpu != WORK_CPU_UNBOUND)
3849 gcwq->flags &= ~GCWQ_DISASSOCIATED; 3927 gcwq->flags &= ~GCWQ_DISASSOCIATED;
3850 worker = create_worker(&gcwq->pool, true); 3928
3851 BUG_ON(!worker); 3929 for_each_worker_pool(pool, gcwq) {
3852 spin_lock_irq(&gcwq->lock); 3930 struct worker *worker;
3853 start_worker(worker); 3931
3854 spin_unlock_irq(&gcwq->lock); 3932 worker = create_worker(pool, true);
3933 BUG_ON(!worker);
3934 spin_lock_irq(&gcwq->lock);
3935 start_worker(worker);
3936 spin_unlock_irq(&gcwq->lock);
3937 }
3855 } 3938 }
3856 3939
3857 system_wq = alloc_workqueue("events", 0, 0); 3940 system_wq = alloc_workqueue("events", 0, 0);