aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLai Jiangshan <laijs@cn.fujitsu.com>2014-07-22 01:03:02 -0400
committerTejun Heo <tj@kernel.org>2014-07-22 12:10:39 -0400
commit051e1850106687896d4c4eeaf6ae4d61c4862e85 (patch)
tree4c0777d0dd4cb9179b31944a40b7fb843335ef84
parent228f1d0018ba6b24c9f718a97a5bc35b24f1e1e3 (diff)
workqueue: unfold start_worker() into create_worker()
Simply unfold the code of start_worker() into create_worker() and remove the original start_worker() and create_and_start_worker(). The only trade-off is the introduced overhead that the pool->lock is released and regrabbed after the newly worker is started. The overhead is acceptible since the manager is slow path. And because this new locking behavior, the newly created worker may grab the lock earlier than the manager and go to process work items. In this case, the recheck need_to_create_worker() may be true as expected and the manager goes to restart which is the correct behavior. tj: Minor updates to description and comments. Signed-off-by: Lai Jiangshan <laijs@cn.fujitsu.com> Signed-off-by: Tejun Heo <tj@kernel.org>
-rw-r--r--kernel/workqueue.c75
1 files changed, 18 insertions, 57 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 54efc68f656e..4cb8527a5783 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -1540,7 +1540,7 @@ static void worker_enter_idle(struct worker *worker)
1540 (worker->hentry.next || worker->hentry.pprev))) 1540 (worker->hentry.next || worker->hentry.pprev)))
1541 return; 1541 return;
1542 1542
1543 /* can't use worker_set_flags(), also called from start_worker() */ 1543 /* can't use worker_set_flags(), also called from create_worker() */
1544 worker->flags |= WORKER_IDLE; 1544 worker->flags |= WORKER_IDLE;
1545 pool->nr_idle++; 1545 pool->nr_idle++;
1546 worker->last_active = jiffies; 1546 worker->last_active = jiffies;
@@ -1661,8 +1661,7 @@ static void worker_detach_from_pool(struct worker *worker,
1661 * create_worker - create a new workqueue worker 1661 * create_worker - create a new workqueue worker
1662 * @pool: pool the new worker will belong to 1662 * @pool: pool the new worker will belong to
1663 * 1663 *
1664 * Create a new worker which is attached to @pool. The new worker must be 1664 * Create and start a new worker which is attached to @pool.
1665 * started by start_worker().
1666 * 1665 *
1667 * CONTEXT: 1666 * CONTEXT:
1668 * Might sleep. Does GFP_KERNEL allocations. 1667 * Might sleep. Does GFP_KERNEL allocations.
@@ -1707,6 +1706,13 @@ static struct worker *create_worker(struct worker_pool *pool)
1707 /* successful, attach the worker to the pool */ 1706 /* successful, attach the worker to the pool */
1708 worker_attach_to_pool(worker, pool); 1707 worker_attach_to_pool(worker, pool);
1709 1708
1709 /* start the newly created worker */
1710 spin_lock_irq(&pool->lock);
1711 worker->pool->nr_workers++;
1712 worker_enter_idle(worker);
1713 wake_up_process(worker->task);
1714 spin_unlock_irq(&pool->lock);
1715
1710 return worker; 1716 return worker;
1711 1717
1712fail: 1718fail:
@@ -1717,44 +1723,6 @@ fail:
1717} 1723}
1718 1724
1719/** 1725/**
1720 * start_worker - start a newly created worker
1721 * @worker: worker to start
1722 *
1723 * Make the pool aware of @worker and start it.
1724 *
1725 * CONTEXT:
1726 * spin_lock_irq(pool->lock).
1727 */
1728static void start_worker(struct worker *worker)
1729{
1730 worker->pool->nr_workers++;
1731 worker_enter_idle(worker);
1732 wake_up_process(worker->task);
1733}
1734
1735/**
1736 * create_and_start_worker - create and start a worker for a pool
1737 * @pool: the target pool
1738 *
1739 * Grab the managership of @pool and create and start a new worker for it.
1740 *
1741 * Return: 0 on success. A negative error code otherwise.
1742 */
1743static int create_and_start_worker(struct worker_pool *pool)
1744{
1745 struct worker *worker;
1746
1747 worker = create_worker(pool);
1748 if (worker) {
1749 spin_lock_irq(&pool->lock);
1750 start_worker(worker);
1751 spin_unlock_irq(&pool->lock);
1752 }
1753
1754 return worker ? 0 : -ENOMEM;
1755}
1756
1757/**
1758 * destroy_worker - destroy a workqueue worker 1726 * destroy_worker - destroy a workqueue worker
1759 * @worker: worker to be destroyed 1727 * @worker: worker to be destroyed
1760 * 1728 *
@@ -1892,19 +1860,7 @@ restart:
1892 mod_timer(&pool->mayday_timer, jiffies + MAYDAY_INITIAL_TIMEOUT); 1860 mod_timer(&pool->mayday_timer, jiffies + MAYDAY_INITIAL_TIMEOUT);
1893 1861
1894 while (true) { 1862 while (true) {
1895 struct worker *worker; 1863 if (create_worker(pool) || !need_to_create_worker(pool))
1896
1897 worker = create_worker(pool);
1898 if (worker) {
1899 del_timer_sync(&pool->mayday_timer);
1900 spin_lock_irq(&pool->lock);
1901 start_worker(worker);
1902 if (WARN_ON_ONCE(need_to_create_worker(pool)))
1903 goto restart;
1904 return true;
1905 }
1906
1907 if (!need_to_create_worker(pool))
1908 break; 1864 break;
1909 1865
1910 schedule_timeout_interruptible(CREATE_COOLDOWN); 1866 schedule_timeout_interruptible(CREATE_COOLDOWN);
@@ -1915,6 +1871,11 @@ restart:
1915 1871
1916 del_timer_sync(&pool->mayday_timer); 1872 del_timer_sync(&pool->mayday_timer);
1917 spin_lock_irq(&pool->lock); 1873 spin_lock_irq(&pool->lock);
1874 /*
1875 * This is necessary even after a new worker was just successfully
1876 * created as @pool->lock was dropped and the new worker might have
1877 * already become busy.
1878 */
1918 if (need_to_create_worker(pool)) 1879 if (need_to_create_worker(pool))
1919 goto restart; 1880 goto restart;
1920 return true; 1881 return true;
@@ -3537,7 +3498,7 @@ static struct worker_pool *get_unbound_pool(const struct workqueue_attrs *attrs)
3537 goto fail; 3498 goto fail;
3538 3499
3539 /* create and start the initial worker */ 3500 /* create and start the initial worker */
3540 if (create_and_start_worker(pool) < 0) 3501 if (!create_worker(pool))
3541 goto fail; 3502 goto fail;
3542 3503
3543 /* install */ 3504 /* install */
@@ -4611,7 +4572,7 @@ static int workqueue_cpu_up_callback(struct notifier_block *nfb,
4611 for_each_cpu_worker_pool(pool, cpu) { 4572 for_each_cpu_worker_pool(pool, cpu) {
4612 if (pool->nr_workers) 4573 if (pool->nr_workers)
4613 continue; 4574 continue;
4614 if (create_and_start_worker(pool) < 0) 4575 if (!create_worker(pool))
4615 return NOTIFY_BAD; 4576 return NOTIFY_BAD;
4616 } 4577 }
4617 break; 4578 break;
@@ -4911,7 +4872,7 @@ static int __init init_workqueues(void)
4911 4872
4912 for_each_cpu_worker_pool(pool, cpu) { 4873 for_each_cpu_worker_pool(pool, cpu) {
4913 pool->flags &= ~POOL_DISASSOCIATED; 4874 pool->flags &= ~POOL_DISASSOCIATED;
4914 BUG_ON(create_and_start_worker(pool) < 0); 4875 BUG_ON(!create_worker(pool));
4915 } 4876 }
4916 } 4877 }
4917 4878