diff options
author | Tejun Heo <tj@kernel.org> | 2013-01-24 14:01:33 -0500 |
---|---|---|
committer | Tejun Heo <tj@kernel.org> | 2013-01-24 14:01:33 -0500 |
commit | 94cf58bb2907bd2702fce2266955e29ab5261f53 (patch) | |
tree | 32b7998f475bf41754c74e9e55c45213263c89df /kernel/workqueue.c | |
parent | d565ed6309300304de4a865a04adef07a85edc45 (diff) |
workqueue: make hotplug processing per-pool
Instead of holding locks from both pools and then processing the pools
together, make hotplug processing per-pool - grab locks of one pool,
process it, release it and then proceed to the next pool.
rebind_workers() is updated to take and process @pool instead of @gcwq
which results in a lot of de-indentation. gcwq_claim_assoc_and_lock()
and its counterpart are replaced with in-line per-pool locking.
While this patch changes processing order across pools, order within
each pool remains the same. As each pool is independent, this
shouldn't break anything.
This is part of an effort to remove global_cwq and make worker_pool
the top level abstraction, which in turn will help implementing worker
pools with user-specified attributes.
Signed-off-by: Tejun Heo <tj@kernel.org>
Reviewed-by: Lai Jiangshan <laijs@cn.fujitsu.com>
Diffstat (limited to 'kernel/workqueue.c')
-rw-r--r-- | kernel/workqueue.c | 149 |
1 files changed, 62 insertions, 87 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index c93651208760..fd400f8c9514 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c | |||
@@ -1670,10 +1670,10 @@ static void busy_worker_rebind_fn(struct work_struct *work) | |||
1670 | } | 1670 | } |
1671 | 1671 | ||
1672 | /** | 1672 | /** |
1673 | * rebind_workers - rebind all workers of a gcwq to the associated CPU | 1673 | * rebind_workers - rebind all workers of a pool to the associated CPU |
1674 | * @gcwq: gcwq of interest | 1674 | * @pool: pool of interest |
1675 | * | 1675 | * |
1676 | * @gcwq->cpu is coming online. Rebind all workers to the CPU. Rebinding | 1676 | * @pool->cpu is coming online. Rebind all workers to the CPU. Rebinding |
1677 | * is different for idle and busy ones. | 1677 | * is different for idle and busy ones. |
1678 | * | 1678 | * |
1679 | * Idle ones will be removed from the idle_list and woken up. They will | 1679 | * Idle ones will be removed from the idle_list and woken up. They will |
@@ -1691,60 +1691,53 @@ static void busy_worker_rebind_fn(struct work_struct *work) | |||
1691 | * including the manager will not appear on @idle_list until rebind is | 1691 | * including the manager will not appear on @idle_list until rebind is |
1692 | * complete, making local wake-ups safe. | 1692 | * complete, making local wake-ups safe. |
1693 | */ | 1693 | */ |
1694 | static void rebind_workers(struct global_cwq *gcwq) | 1694 | static void rebind_workers(struct worker_pool *pool) |
1695 | { | 1695 | { |
1696 | struct worker_pool *pool; | ||
1697 | struct worker *worker, *n; | 1696 | struct worker *worker, *n; |
1698 | struct hlist_node *pos; | 1697 | struct hlist_node *pos; |
1699 | int i; | 1698 | int i; |
1700 | 1699 | ||
1701 | for_each_worker_pool(pool, gcwq) { | 1700 | lockdep_assert_held(&pool->assoc_mutex); |
1702 | lockdep_assert_held(&pool->assoc_mutex); | 1701 | lockdep_assert_held(&pool->lock); |
1703 | lockdep_assert_held(&pool->lock); | ||
1704 | } | ||
1705 | 1702 | ||
1706 | /* dequeue and kick idle ones */ | 1703 | /* dequeue and kick idle ones */ |
1707 | for_each_worker_pool(pool, gcwq) { | 1704 | list_for_each_entry_safe(worker, n, &pool->idle_list, entry) { |
1708 | list_for_each_entry_safe(worker, n, &pool->idle_list, entry) { | 1705 | /* |
1709 | /* | 1706 | * idle workers should be off @pool->idle_list until rebind |
1710 | * idle workers should be off @pool->idle_list | 1707 | * is complete to avoid receiving premature local wake-ups. |
1711 | * until rebind is complete to avoid receiving | 1708 | */ |
1712 | * premature local wake-ups. | 1709 | list_del_init(&worker->entry); |
1713 | */ | ||
1714 | list_del_init(&worker->entry); | ||
1715 | 1710 | ||
1716 | /* | 1711 | /* |
1717 | * worker_thread() will see the above dequeuing | 1712 | * worker_thread() will see the above dequeuing and call |
1718 | * and call idle_worker_rebind(). | 1713 | * idle_worker_rebind(). |
1719 | */ | 1714 | */ |
1720 | wake_up_process(worker->task); | 1715 | wake_up_process(worker->task); |
1721 | } | 1716 | } |
1722 | 1717 | ||
1723 | /* rebind busy workers */ | 1718 | /* rebind busy workers */ |
1724 | for_each_busy_worker(worker, i, pos, pool) { | 1719 | for_each_busy_worker(worker, i, pos, pool) { |
1725 | struct work_struct *rebind_work = &worker->rebind_work; | 1720 | struct work_struct *rebind_work = &worker->rebind_work; |
1726 | struct workqueue_struct *wq; | 1721 | struct workqueue_struct *wq; |
1727 | 1722 | ||
1728 | if (test_and_set_bit(WORK_STRUCT_PENDING_BIT, | 1723 | if (test_and_set_bit(WORK_STRUCT_PENDING_BIT, |
1729 | work_data_bits(rebind_work))) | 1724 | work_data_bits(rebind_work))) |
1730 | continue; | 1725 | continue; |
1731 | 1726 | ||
1732 | debug_work_activate(rebind_work); | 1727 | debug_work_activate(rebind_work); |
1733 | 1728 | ||
1734 | /* | 1729 | /* |
1735 | * wq doesn't really matter but let's keep | 1730 | * wq doesn't really matter but let's keep @worker->pool |
1736 | * @worker->pool and @cwq->pool consistent for | 1731 | * and @cwq->pool consistent for sanity. |
1737 | * sanity. | 1732 | */ |
1738 | */ | 1733 | if (std_worker_pool_pri(worker->pool)) |
1739 | if (std_worker_pool_pri(worker->pool)) | 1734 | wq = system_highpri_wq; |
1740 | wq = system_highpri_wq; | 1735 | else |
1741 | else | 1736 | wq = system_wq; |
1742 | wq = system_wq; | 1737 | |
1743 | 1738 | insert_work(get_cwq(pool->cpu, wq), rebind_work, | |
1744 | insert_work(get_cwq(pool->cpu, wq), rebind_work, | 1739 | worker->scheduled.next, |
1745 | worker->scheduled.next, | 1740 | work_color_to_flags(WORK_NO_COLOR)); |
1746 | work_color_to_flags(WORK_NO_COLOR)); | ||
1747 | } | ||
1748 | } | 1741 | } |
1749 | } | 1742 | } |
1750 | 1743 | ||
@@ -3497,7 +3490,7 @@ EXPORT_SYMBOL_GPL(work_busy); | |||
3497 | * are a lot of assumptions on strong associations among work, cwq and | 3490 | * are a lot of assumptions on strong associations among work, cwq and |
3498 | * gcwq which make migrating pending and scheduled works very | 3491 | * gcwq which make migrating pending and scheduled works very |
3499 | * difficult to implement without impacting hot paths. Secondly, | 3492 | * difficult to implement without impacting hot paths. Secondly, |
3500 | * gcwqs serve mix of short, long and very long running works making | 3493 | * worker pools serve mix of short, long and very long running works making |
3501 | * blocked draining impractical. | 3494 | * blocked draining impractical. |
3502 | * | 3495 | * |
3503 | * This is solved by allowing the pools to be disassociated from the CPU | 3496 | * This is solved by allowing the pools to be disassociated from the CPU |
@@ -3505,32 +3498,6 @@ EXPORT_SYMBOL_GPL(work_busy); | |||
3505 | * cpu comes back online. | 3498 | * cpu comes back online. |
3506 | */ | 3499 | */ |
3507 | 3500 | ||
3508 | /* claim manager positions of all pools */ | ||
3509 | static void gcwq_claim_assoc_and_lock(struct global_cwq *gcwq) | ||
3510 | { | ||
3511 | struct worker_pool *pool; | ||
3512 | |||
3513 | for_each_worker_pool(pool, gcwq) | ||
3514 | mutex_lock_nested(&pool->assoc_mutex, pool - gcwq->pools); | ||
3515 | |||
3516 | local_irq_disable(); | ||
3517 | for_each_worker_pool(pool, gcwq) | ||
3518 | spin_lock_nested(&pool->lock, pool - gcwq->pools); | ||
3519 | } | ||
3520 | |||
3521 | /* release manager positions */ | ||
3522 | static void gcwq_release_assoc_and_unlock(struct global_cwq *gcwq) | ||
3523 | { | ||
3524 | struct worker_pool *pool; | ||
3525 | |||
3526 | for_each_worker_pool(pool, gcwq) | ||
3527 | spin_unlock(&pool->lock); | ||
3528 | local_irq_enable(); | ||
3529 | |||
3530 | for_each_worker_pool(pool, gcwq) | ||
3531 | mutex_unlock(&pool->assoc_mutex); | ||
3532 | } | ||
3533 | |||
3534 | static void gcwq_unbind_fn(struct work_struct *work) | 3501 | static void gcwq_unbind_fn(struct work_struct *work) |
3535 | { | 3502 | { |
3536 | struct global_cwq *gcwq = get_gcwq(smp_processor_id()); | 3503 | struct global_cwq *gcwq = get_gcwq(smp_processor_id()); |
@@ -3539,17 +3506,19 @@ static void gcwq_unbind_fn(struct work_struct *work) | |||
3539 | struct hlist_node *pos; | 3506 | struct hlist_node *pos; |
3540 | int i; | 3507 | int i; |
3541 | 3508 | ||
3542 | BUG_ON(gcwq->pools[0].cpu != smp_processor_id()); | 3509 | for_each_worker_pool(pool, gcwq) { |
3510 | BUG_ON(pool->cpu != smp_processor_id()); | ||
3543 | 3511 | ||
3544 | gcwq_claim_assoc_and_lock(gcwq); | 3512 | mutex_lock(&pool->assoc_mutex); |
3513 | spin_lock_irq(&pool->lock); | ||
3545 | 3514 | ||
3546 | /* | 3515 | /* |
3547 | * We've claimed all manager positions. Make all workers unbound | 3516 | * We've claimed all manager positions. Make all workers |
3548 | * and set DISASSOCIATED. Before this, all workers except for the | 3517 | * unbound and set DISASSOCIATED. Before this, all workers |
3549 | * ones which are still executing works from before the last CPU | 3518 | * except for the ones which are still executing works from |
3550 | * down must be on the cpu. After this, they may become diasporas. | 3519 | * before the last CPU down must be on the cpu. After |
3551 | */ | 3520 | * this, they may become diasporas. |
3552 | for_each_worker_pool(pool, gcwq) { | 3521 | */ |
3553 | list_for_each_entry(worker, &pool->idle_list, entry) | 3522 | list_for_each_entry(worker, &pool->idle_list, entry) |
3554 | worker->flags |= WORKER_UNBOUND; | 3523 | worker->flags |= WORKER_UNBOUND; |
3555 | 3524 | ||
@@ -3557,9 +3526,10 @@ static void gcwq_unbind_fn(struct work_struct *work) | |||
3557 | worker->flags |= WORKER_UNBOUND; | 3526 | worker->flags |= WORKER_UNBOUND; |
3558 | 3527 | ||
3559 | pool->flags |= POOL_DISASSOCIATED; | 3528 | pool->flags |= POOL_DISASSOCIATED; |
3560 | } | ||
3561 | 3529 | ||
3562 | gcwq_release_assoc_and_unlock(gcwq); | 3530 | spin_unlock_irq(&pool->lock); |
3531 | mutex_unlock(&pool->assoc_mutex); | ||
3532 | } | ||
3563 | 3533 | ||
3564 | /* | 3534 | /* |
3565 | * Call schedule() so that we cross rq->lock and thus can guarantee | 3535 | * Call schedule() so that we cross rq->lock and thus can guarantee |
@@ -3615,11 +3585,16 @@ static int __cpuinit workqueue_cpu_up_callback(struct notifier_block *nfb, | |||
3615 | 3585 | ||
3616 | case CPU_DOWN_FAILED: | 3586 | case CPU_DOWN_FAILED: |
3617 | case CPU_ONLINE: | 3587 | case CPU_ONLINE: |
3618 | gcwq_claim_assoc_and_lock(gcwq); | 3588 | for_each_worker_pool(pool, gcwq) { |
3619 | for_each_worker_pool(pool, gcwq) | 3589 | mutex_lock(&pool->assoc_mutex); |
3590 | spin_lock_irq(&pool->lock); | ||
3591 | |||
3620 | pool->flags &= ~POOL_DISASSOCIATED; | 3592 | pool->flags &= ~POOL_DISASSOCIATED; |
3621 | rebind_workers(gcwq); | 3593 | rebind_workers(pool); |
3622 | gcwq_release_assoc_and_unlock(gcwq); | 3594 | |
3595 | spin_unlock_irq(&pool->lock); | ||
3596 | mutex_unlock(&pool->assoc_mutex); | ||
3597 | } | ||
3623 | break; | 3598 | break; |
3624 | } | 3599 | } |
3625 | return NOTIFY_OK; | 3600 | return NOTIFY_OK; |