aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/workqueue.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-08-04 13:04:44 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2014-08-04 13:04:44 -0400
commitc4c3f5fba01e189fb3618f09545abdb4cf8ec8ee (patch)
tree2939a5d998104ddd5d68b44337956f668f2d701a /kernel/workqueue.c
parent1bff598860f535e525c2a549f7f584ae7cc3fc1c (diff)
parent95847e1bd34c0de86039408b24a05f07e788061d (diff)
Merge branch 'for-3.17' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/wq
Pull workqueue updates from Tejun Heo: "Lai has been doing a lot of cleanups of workqueue and kthread_work. No significant behavior change. Just a lot of cleanups all over the place. Some are a bit invasive but overall nothing too dangerous" * 'for-3.17' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/wq: kthread_work: remove the unused wait_queue_head kthread_work: wake up worker only when the worker is idle workqueue: use nr_node_ids instead of wq_numa_tbl_len workqueue: remove the misnamed out_unlock label in get_unbound_pool() workqueue: remove the stale comment in pwq_unbound_release_workfn() workqueue: move rescuer pool detachment to the end workqueue: unfold start_worker() into create_worker() workqueue: remove @wakeup from worker_set_flags() workqueue: remove an unneeded UNBOUND test before waking up the next worker workqueue: wake regular worker if need_more_worker() when rescuer leave the pool workqueue: alloc struct worker on its local node workqueue: reuse the already calculated pwq in try_to_grab_pending() workqueue: stronger test in process_one_work() workqueue: clear POOL_DISASSOCIATED in rebind_workers() workqueue: sanity check pool->cpu in wq_worker_sleeping() workqueue: clear leftover flags when detached workqueue: remove useless WARN_ON_ONCE() workqueue: use schedule_timeout_interruptible() instead of open code workqueue: remove the empty check in too_many_workers() workqueue: use "pool->cpu < 0" to stand for an unbound pool
Diffstat (limited to 'kernel/workqueue.c')
-rw-r--r--kernel/workqueue.c200
1 files changed, 61 insertions, 139 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 35974ac69600..7a2e449a96b1 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -265,7 +265,6 @@ struct workqueue_struct {
265 265
266static struct kmem_cache *pwq_cache; 266static struct kmem_cache *pwq_cache;
267 267
268static int wq_numa_tbl_len; /* highest possible NUMA node id + 1 */
269static cpumask_var_t *wq_numa_possible_cpumask; 268static cpumask_var_t *wq_numa_possible_cpumask;
270 /* possible CPUs of each node */ 269 /* possible CPUs of each node */
271 270
@@ -758,13 +757,6 @@ static bool too_many_workers(struct worker_pool *pool)
758 int nr_idle = pool->nr_idle + managing; /* manager is considered idle */ 757 int nr_idle = pool->nr_idle + managing; /* manager is considered idle */
759 int nr_busy = pool->nr_workers - nr_idle; 758 int nr_busy = pool->nr_workers - nr_idle;
760 759
761 /*
762 * nr_idle and idle_list may disagree if idle rebinding is in
763 * progress. Never return %true if idle_list is empty.
764 */
765 if (list_empty(&pool->idle_list))
766 return false;
767
768 return nr_idle > 2 && (nr_idle - 2) * MAX_IDLE_WORKERS_RATIO >= nr_busy; 760 return nr_idle > 2 && (nr_idle - 2) * MAX_IDLE_WORKERS_RATIO >= nr_busy;
769} 761}
770 762
@@ -850,7 +842,7 @@ struct task_struct *wq_worker_sleeping(struct task_struct *task, int cpu)
850 pool = worker->pool; 842 pool = worker->pool;
851 843
852 /* this can only happen on the local cpu */ 844 /* this can only happen on the local cpu */
853 if (WARN_ON_ONCE(cpu != raw_smp_processor_id())) 845 if (WARN_ON_ONCE(cpu != raw_smp_processor_id() || pool->cpu != cpu))
854 return NULL; 846 return NULL;
855 847
856 /* 848 /*
@@ -874,35 +866,22 @@ struct task_struct *wq_worker_sleeping(struct task_struct *task, int cpu)
874 * worker_set_flags - set worker flags and adjust nr_running accordingly 866 * worker_set_flags - set worker flags and adjust nr_running accordingly
875 * @worker: self 867 * @worker: self
876 * @flags: flags to set 868 * @flags: flags to set
877 * @wakeup: wakeup an idle worker if necessary
878 * 869 *
879 * Set @flags in @worker->flags and adjust nr_running accordingly. If 870 * Set @flags in @worker->flags and adjust nr_running accordingly.
880 * nr_running becomes zero and @wakeup is %true, an idle worker is
881 * woken up.
882 * 871 *
883 * CONTEXT: 872 * CONTEXT:
884 * spin_lock_irq(pool->lock) 873 * spin_lock_irq(pool->lock)
885 */ 874 */
886static inline void worker_set_flags(struct worker *worker, unsigned int flags, 875static inline void worker_set_flags(struct worker *worker, unsigned int flags)
887 bool wakeup)
888{ 876{
889 struct worker_pool *pool = worker->pool; 877 struct worker_pool *pool = worker->pool;
890 878
891 WARN_ON_ONCE(worker->task != current); 879 WARN_ON_ONCE(worker->task != current);
892 880
893 /* 881 /* If transitioning into NOT_RUNNING, adjust nr_running. */
894 * If transitioning into NOT_RUNNING, adjust nr_running and
895 * wake up an idle worker as necessary if requested by
896 * @wakeup.
897 */
898 if ((flags & WORKER_NOT_RUNNING) && 882 if ((flags & WORKER_NOT_RUNNING) &&
899 !(worker->flags & WORKER_NOT_RUNNING)) { 883 !(worker->flags & WORKER_NOT_RUNNING)) {
900 if (wakeup) { 884 atomic_dec(&pool->nr_running);
901 if (atomic_dec_and_test(&pool->nr_running) &&
902 !list_empty(&pool->worklist))
903 wake_up_worker(pool);
904 } else
905 atomic_dec(&pool->nr_running);
906 } 885 }
907 886
908 worker->flags |= flags; 887 worker->flags |= flags;
@@ -1232,7 +1211,7 @@ static int try_to_grab_pending(struct work_struct *work, bool is_dwork,
1232 pwq_activate_delayed_work(work); 1211 pwq_activate_delayed_work(work);
1233 1212
1234 list_del_init(&work->entry); 1213 list_del_init(&work->entry);
1235 pwq_dec_nr_in_flight(get_work_pwq(work), get_work_color(work)); 1214 pwq_dec_nr_in_flight(pwq, get_work_color(work));
1236 1215
1237 /* work->data points to pwq iff queued, point to pool */ 1216 /* work->data points to pwq iff queued, point to pool */
1238 set_work_pool_and_keep_pending(work, pool->id); 1217 set_work_pool_and_keep_pending(work, pool->id);
@@ -1560,7 +1539,7 @@ static void worker_enter_idle(struct worker *worker)
1560 (worker->hentry.next || worker->hentry.pprev))) 1539 (worker->hentry.next || worker->hentry.pprev)))
1561 return; 1540 return;
1562 1541
1563 /* can't use worker_set_flags(), also called from start_worker() */ 1542 /* can't use worker_set_flags(), also called from create_worker() */
1564 worker->flags |= WORKER_IDLE; 1543 worker->flags |= WORKER_IDLE;
1565 pool->nr_idle++; 1544 pool->nr_idle++;
1566 worker->last_active = jiffies; 1545 worker->last_active = jiffies;
@@ -1602,11 +1581,11 @@ static void worker_leave_idle(struct worker *worker)
1602 list_del_init(&worker->entry); 1581 list_del_init(&worker->entry);
1603} 1582}
1604 1583
1605static struct worker *alloc_worker(void) 1584static struct worker *alloc_worker(int node)
1606{ 1585{
1607 struct worker *worker; 1586 struct worker *worker;
1608 1587
1609 worker = kzalloc(sizeof(*worker), GFP_KERNEL); 1588 worker = kzalloc_node(sizeof(*worker), GFP_KERNEL, node);
1610 if (worker) { 1589 if (worker) {
1611 INIT_LIST_HEAD(&worker->entry); 1590 INIT_LIST_HEAD(&worker->entry);
1612 INIT_LIST_HEAD(&worker->scheduled); 1591 INIT_LIST_HEAD(&worker->scheduled);
@@ -1670,6 +1649,9 @@ static void worker_detach_from_pool(struct worker *worker,
1670 detach_completion = pool->detach_completion; 1649 detach_completion = pool->detach_completion;
1671 mutex_unlock(&pool->attach_mutex); 1650 mutex_unlock(&pool->attach_mutex);
1672 1651
1652 /* clear leftover flags without pool->lock after it is detached */
1653 worker->flags &= ~(WORKER_UNBOUND | WORKER_REBOUND);
1654
1673 if (detach_completion) 1655 if (detach_completion)
1674 complete(detach_completion); 1656 complete(detach_completion);
1675} 1657}
@@ -1678,8 +1660,7 @@ static void worker_detach_from_pool(struct worker *worker,
1678 * create_worker - create a new workqueue worker 1660 * create_worker - create a new workqueue worker
1679 * @pool: pool the new worker will belong to 1661 * @pool: pool the new worker will belong to
1680 * 1662 *
1681 * Create a new worker which is attached to @pool. The new worker must be 1663 * Create and start a new worker which is attached to @pool.
1682 * started by start_worker().
1683 * 1664 *
1684 * CONTEXT: 1665 * CONTEXT:
1685 * Might sleep. Does GFP_KERNEL allocations. 1666 * Might sleep. Does GFP_KERNEL allocations.
@@ -1698,7 +1679,7 @@ static struct worker *create_worker(struct worker_pool *pool)
1698 if (id < 0) 1679 if (id < 0)
1699 goto fail; 1680 goto fail;
1700 1681
1701 worker = alloc_worker(); 1682 worker = alloc_worker(pool->node);
1702 if (!worker) 1683 if (!worker)
1703 goto fail; 1684 goto fail;
1704 1685
@@ -1724,6 +1705,13 @@ static struct worker *create_worker(struct worker_pool *pool)
1724 /* successful, attach the worker to the pool */ 1705 /* successful, attach the worker to the pool */
1725 worker_attach_to_pool(worker, pool); 1706 worker_attach_to_pool(worker, pool);
1726 1707
1708 /* start the newly created worker */
1709 spin_lock_irq(&pool->lock);
1710 worker->pool->nr_workers++;
1711 worker_enter_idle(worker);
1712 wake_up_process(worker->task);
1713 spin_unlock_irq(&pool->lock);
1714
1727 return worker; 1715 return worker;
1728 1716
1729fail: 1717fail:
@@ -1734,44 +1722,6 @@ fail:
1734} 1722}
1735 1723
1736/** 1724/**
1737 * start_worker - start a newly created worker
1738 * @worker: worker to start
1739 *
1740 * Make the pool aware of @worker and start it.
1741 *
1742 * CONTEXT:
1743 * spin_lock_irq(pool->lock).
1744 */
1745static void start_worker(struct worker *worker)
1746{
1747 worker->pool->nr_workers++;
1748 worker_enter_idle(worker);
1749 wake_up_process(worker->task);
1750}
1751
1752/**
1753 * create_and_start_worker - create and start a worker for a pool
1754 * @pool: the target pool
1755 *
1756 * Grab the managership of @pool and create and start a new worker for it.
1757 *
1758 * Return: 0 on success. A negative error code otherwise.
1759 */
1760static int create_and_start_worker(struct worker_pool *pool)
1761{
1762 struct worker *worker;
1763
1764 worker = create_worker(pool);
1765 if (worker) {
1766 spin_lock_irq(&pool->lock);
1767 start_worker(worker);
1768 spin_unlock_irq(&pool->lock);
1769 }
1770
1771 return worker ? 0 : -ENOMEM;
1772}
1773
1774/**
1775 * destroy_worker - destroy a workqueue worker 1725 * destroy_worker - destroy a workqueue worker
1776 * @worker: worker to be destroyed 1726 * @worker: worker to be destroyed
1777 * 1727 *
@@ -1909,23 +1859,10 @@ restart:
1909 mod_timer(&pool->mayday_timer, jiffies + MAYDAY_INITIAL_TIMEOUT); 1859 mod_timer(&pool->mayday_timer, jiffies + MAYDAY_INITIAL_TIMEOUT);
1910 1860
1911 while (true) { 1861 while (true) {
1912 struct worker *worker; 1862 if (create_worker(pool) || !need_to_create_worker(pool))
1913
1914 worker = create_worker(pool);
1915 if (worker) {
1916 del_timer_sync(&pool->mayday_timer);
1917 spin_lock_irq(&pool->lock);
1918 start_worker(worker);
1919 if (WARN_ON_ONCE(need_to_create_worker(pool)))
1920 goto restart;
1921 return true;
1922 }
1923
1924 if (!need_to_create_worker(pool))
1925 break; 1863 break;
1926 1864
1927 __set_current_state(TASK_INTERRUPTIBLE); 1865 schedule_timeout_interruptible(CREATE_COOLDOWN);
1928 schedule_timeout(CREATE_COOLDOWN);
1929 1866
1930 if (!need_to_create_worker(pool)) 1867 if (!need_to_create_worker(pool))
1931 break; 1868 break;
@@ -1933,6 +1870,11 @@ restart:
1933 1870
1934 del_timer_sync(&pool->mayday_timer); 1871 del_timer_sync(&pool->mayday_timer);
1935 spin_lock_irq(&pool->lock); 1872 spin_lock_irq(&pool->lock);
1873 /*
1874 * This is necessary even after a new worker was just successfully
1875 * created as @pool->lock was dropped and the new worker might have
1876 * already become busy.
1877 */
1936 if (need_to_create_worker(pool)) 1878 if (need_to_create_worker(pool))
1937 goto restart; 1879 goto restart;
1938 return true; 1880 return true;
@@ -2020,13 +1962,7 @@ __acquires(&pool->lock)
2020 1962
2021 lockdep_copy_map(&lockdep_map, &work->lockdep_map); 1963 lockdep_copy_map(&lockdep_map, &work->lockdep_map);
2022#endif 1964#endif
2023 /* 1965 WARN_ON_ONCE(!(pool->flags & POOL_DISASSOCIATED) &&
2024 * Ensure we're on the correct CPU. DISASSOCIATED test is
2025 * necessary to avoid spurious warnings from rescuers servicing the
2026 * unbound or a disassociated pool.
2027 */
2028 WARN_ON_ONCE(!(worker->flags & WORKER_UNBOUND) &&
2029 !(pool->flags & POOL_DISASSOCIATED) &&
2030 raw_smp_processor_id() != pool->cpu); 1966 raw_smp_processor_id() != pool->cpu);
2031 1967
2032 /* 1968 /*
@@ -2052,17 +1988,22 @@ __acquires(&pool->lock)
2052 list_del_init(&work->entry); 1988 list_del_init(&work->entry);
2053 1989
2054 /* 1990 /*
2055 * CPU intensive works don't participate in concurrency 1991 * CPU intensive works don't participate in concurrency management.
2056 * management. They're the scheduler's responsibility. 1992 * They're the scheduler's responsibility. This takes @worker out
1993 * of concurrency management and the next code block will chain
1994 * execution of the pending work items.
2057 */ 1995 */
2058 if (unlikely(cpu_intensive)) 1996 if (unlikely(cpu_intensive))
2059 worker_set_flags(worker, WORKER_CPU_INTENSIVE, true); 1997 worker_set_flags(worker, WORKER_CPU_INTENSIVE);
2060 1998
2061 /* 1999 /*
2062 * Unbound pool isn't concurrency managed and work items should be 2000 * Wake up another worker if necessary. The condition is always
2063 * executed ASAP. Wake up another worker if necessary. 2001 * false for normal per-cpu workers since nr_running would always
2002 * be >= 1 at this point. This is used to chain execution of the
2003 * pending work items for WORKER_NOT_RUNNING workers such as the
2004 * UNBOUND and CPU_INTENSIVE ones.
2064 */ 2005 */
2065 if ((worker->flags & WORKER_UNBOUND) && need_more_worker(pool)) 2006 if (need_more_worker(pool))
2066 wake_up_worker(pool); 2007 wake_up_worker(pool);
2067 2008
2068 /* 2009 /*
@@ -2218,7 +2159,7 @@ recheck:
2218 } 2159 }
2219 } while (keep_working(pool)); 2160 } while (keep_working(pool));
2220 2161
2221 worker_set_flags(worker, WORKER_PREP, false); 2162 worker_set_flags(worker, WORKER_PREP);
2222sleep: 2163sleep:
2223 /* 2164 /*
2224 * pool->lock is held and there's no work to process and no need to 2165 * pool->lock is held and there's no work to process and no need to
@@ -2311,29 +2252,27 @@ repeat:
2311 move_linked_works(work, scheduled, &n); 2252 move_linked_works(work, scheduled, &n);
2312 2253
2313 process_scheduled_works(rescuer); 2254 process_scheduled_works(rescuer);
2314 spin_unlock_irq(&pool->lock);
2315
2316 worker_detach_from_pool(rescuer, pool);
2317
2318 spin_lock_irq(&pool->lock);
2319 2255
2320 /* 2256 /*
2321 * Put the reference grabbed by send_mayday(). @pool won't 2257 * Put the reference grabbed by send_mayday(). @pool won't
2322 * go away while we're holding its lock. 2258 * go away while we're still attached to it.
2323 */ 2259 */
2324 put_pwq(pwq); 2260 put_pwq(pwq);
2325 2261
2326 /* 2262 /*
2327 * Leave this pool. If keep_working() is %true, notify a 2263 * Leave this pool. If need_more_worker() is %true, notify a
2328 * regular worker; otherwise, we end up with 0 concurrency 2264 * regular worker; otherwise, we end up with 0 concurrency
2329 * and stalling the execution. 2265 * and stalling the execution.
2330 */ 2266 */
2331 if (keep_working(pool)) 2267 if (need_more_worker(pool))
2332 wake_up_worker(pool); 2268 wake_up_worker(pool);
2333 2269
2334 rescuer->pool = NULL; 2270 rescuer->pool = NULL;
2335 spin_unlock(&pool->lock); 2271 spin_unlock_irq(&pool->lock);
2336 spin_lock(&wq_mayday_lock); 2272
2273 worker_detach_from_pool(rescuer, pool);
2274
2275 spin_lock_irq(&wq_mayday_lock);
2337 } 2276 }
2338 2277
2339 spin_unlock_irq(&wq_mayday_lock); 2278 spin_unlock_irq(&wq_mayday_lock);
@@ -3458,7 +3397,7 @@ static void put_unbound_pool(struct worker_pool *pool)
3458 return; 3397 return;
3459 3398
3460 /* sanity checks */ 3399 /* sanity checks */
3461 if (WARN_ON(!(pool->flags & POOL_DISASSOCIATED)) || 3400 if (WARN_ON(!(pool->cpu < 0)) ||
3462 WARN_ON(!list_empty(&pool->worklist))) 3401 WARN_ON(!list_empty(&pool->worklist)))
3463 return; 3402 return;
3464 3403
@@ -3524,7 +3463,7 @@ static struct worker_pool *get_unbound_pool(const struct workqueue_attrs *attrs)
3524 hash_for_each_possible(unbound_pool_hash, pool, hash_node, hash) { 3463 hash_for_each_possible(unbound_pool_hash, pool, hash_node, hash) {
3525 if (wqattrs_equal(pool->attrs, attrs)) { 3464 if (wqattrs_equal(pool->attrs, attrs)) {
3526 pool->refcnt++; 3465 pool->refcnt++;
3527 goto out_unlock; 3466 return pool;
3528 } 3467 }
3529 } 3468 }
3530 3469
@@ -3557,12 +3496,12 @@ static struct worker_pool *get_unbound_pool(const struct workqueue_attrs *attrs)
3557 goto fail; 3496 goto fail;
3558 3497
3559 /* create and start the initial worker */ 3498 /* create and start the initial worker */
3560 if (create_and_start_worker(pool) < 0) 3499 if (!create_worker(pool))
3561 goto fail; 3500 goto fail;
3562 3501
3563 /* install */ 3502 /* install */
3564 hash_add(unbound_pool_hash, &pool->hash_node, hash); 3503 hash_add(unbound_pool_hash, &pool->hash_node, hash);
3565out_unlock: 3504
3566 return pool; 3505 return pool;
3567fail: 3506fail:
3568 if (pool) 3507 if (pool)
@@ -3591,11 +3530,6 @@ static void pwq_unbound_release_workfn(struct work_struct *work)
3591 if (WARN_ON_ONCE(!(wq->flags & WQ_UNBOUND))) 3530 if (WARN_ON_ONCE(!(wq->flags & WQ_UNBOUND)))
3592 return; 3531 return;
3593 3532
3594 /*
3595 * Unlink @pwq. Synchronization against wq->mutex isn't strictly
3596 * necessary on release but do it anyway. It's easier to verify
3597 * and consistent with the linking path.
3598 */
3599 mutex_lock(&wq->mutex); 3533 mutex_lock(&wq->mutex);
3600 list_del_rcu(&pwq->pwqs_node); 3534 list_del_rcu(&pwq->pwqs_node);
3601 is_last = list_empty(&wq->pwqs); 3535 is_last = list_empty(&wq->pwqs);
@@ -3692,10 +3626,7 @@ static void link_pwq(struct pool_workqueue *pwq)
3692 if (!list_empty(&pwq->pwqs_node)) 3626 if (!list_empty(&pwq->pwqs_node))
3693 return; 3627 return;
3694 3628
3695 /* 3629 /* set the matching work_color */
3696 * Set the matching work_color. This is synchronized with
3697 * wq->mutex to avoid confusing flush_workqueue().
3698 */
3699 pwq->work_color = wq->work_color; 3630 pwq->work_color = wq->work_color;
3700 3631
3701 /* sync max_active to the current setting */ 3632 /* sync max_active to the current setting */
@@ -3832,7 +3763,7 @@ int apply_workqueue_attrs(struct workqueue_struct *wq,
3832 if (WARN_ON((wq->flags & __WQ_ORDERED) && !list_empty(&wq->pwqs))) 3763 if (WARN_ON((wq->flags & __WQ_ORDERED) && !list_empty(&wq->pwqs)))
3833 return -EINVAL; 3764 return -EINVAL;
3834 3765
3835 pwq_tbl = kzalloc(wq_numa_tbl_len * sizeof(pwq_tbl[0]), GFP_KERNEL); 3766 pwq_tbl = kzalloc(nr_node_ids * sizeof(pwq_tbl[0]), GFP_KERNEL);
3836 new_attrs = alloc_workqueue_attrs(GFP_KERNEL); 3767 new_attrs = alloc_workqueue_attrs(GFP_KERNEL);
3837 tmp_attrs = alloc_workqueue_attrs(GFP_KERNEL); 3768 tmp_attrs = alloc_workqueue_attrs(GFP_KERNEL);
3838 if (!pwq_tbl || !new_attrs || !tmp_attrs) 3769 if (!pwq_tbl || !new_attrs || !tmp_attrs)
@@ -4080,7 +4011,7 @@ struct workqueue_struct *__alloc_workqueue_key(const char *fmt,
4080 4011
4081 /* allocate wq and format name */ 4012 /* allocate wq and format name */
4082 if (flags & WQ_UNBOUND) 4013 if (flags & WQ_UNBOUND)
4083 tbl_size = wq_numa_tbl_len * sizeof(wq->numa_pwq_tbl[0]); 4014 tbl_size = nr_node_ids * sizeof(wq->numa_pwq_tbl[0]);
4084 4015
4085 wq = kzalloc(sizeof(*wq) + tbl_size, GFP_KERNEL); 4016 wq = kzalloc(sizeof(*wq) + tbl_size, GFP_KERNEL);
4086 if (!wq) 4017 if (!wq)
@@ -4122,7 +4053,7 @@ struct workqueue_struct *__alloc_workqueue_key(const char *fmt,
4122 if (flags & WQ_MEM_RECLAIM) { 4053 if (flags & WQ_MEM_RECLAIM) {
4123 struct worker *rescuer; 4054 struct worker *rescuer;
4124 4055
4125 rescuer = alloc_worker(); 4056 rescuer = alloc_worker(NUMA_NO_NODE);
4126 if (!rescuer) 4057 if (!rescuer)
4127 goto err_destroy; 4058 goto err_destroy;
4128 4059
@@ -4470,8 +4401,6 @@ static void wq_unbind_fn(struct work_struct *work)
4470 struct worker *worker; 4401 struct worker *worker;
4471 4402
4472 for_each_cpu_worker_pool(pool, cpu) { 4403 for_each_cpu_worker_pool(pool, cpu) {
4473 WARN_ON_ONCE(cpu != smp_processor_id());
4474
4475 mutex_lock(&pool->attach_mutex); 4404 mutex_lock(&pool->attach_mutex);
4476 spin_lock_irq(&pool->lock); 4405 spin_lock_irq(&pool->lock);
4477 4406
@@ -4543,6 +4472,7 @@ static void rebind_workers(struct worker_pool *pool)
4543 pool->attrs->cpumask) < 0); 4472 pool->attrs->cpumask) < 0);
4544 4473
4545 spin_lock_irq(&pool->lock); 4474 spin_lock_irq(&pool->lock);
4475 pool->flags &= ~POOL_DISASSOCIATED;
4546 4476
4547 for_each_pool_worker(worker, pool) { 4477 for_each_pool_worker(worker, pool) {
4548 unsigned int worker_flags = worker->flags; 4478 unsigned int worker_flags = worker->flags;
@@ -4632,7 +4562,7 @@ static int workqueue_cpu_up_callback(struct notifier_block *nfb,
4632 for_each_cpu_worker_pool(pool, cpu) { 4562 for_each_cpu_worker_pool(pool, cpu) {
4633 if (pool->nr_workers) 4563 if (pool->nr_workers)
4634 continue; 4564 continue;
4635 if (create_and_start_worker(pool) < 0) 4565 if (!create_worker(pool))
4636 return NOTIFY_BAD; 4566 return NOTIFY_BAD;
4637 } 4567 }
4638 break; 4568 break;
@@ -4645,10 +4575,6 @@ static int workqueue_cpu_up_callback(struct notifier_block *nfb,
4645 mutex_lock(&pool->attach_mutex); 4575 mutex_lock(&pool->attach_mutex);
4646 4576
4647 if (pool->cpu == cpu) { 4577 if (pool->cpu == cpu) {
4648 spin_lock_irq(&pool->lock);
4649 pool->flags &= ~POOL_DISASSOCIATED;
4650 spin_unlock_irq(&pool->lock);
4651
4652 rebind_workers(pool); 4578 rebind_workers(pool);
4653 } else if (pool->cpu < 0) { 4579 } else if (pool->cpu < 0) {
4654 restore_unbound_workers_cpumask(pool, cpu); 4580 restore_unbound_workers_cpumask(pool, cpu);
@@ -4856,10 +4782,6 @@ static void __init wq_numa_init(void)
4856 cpumask_var_t *tbl; 4782 cpumask_var_t *tbl;
4857 int node, cpu; 4783 int node, cpu;
4858 4784
4859 /* determine NUMA pwq table len - highest node id + 1 */
4860 for_each_node(node)
4861 wq_numa_tbl_len = max(wq_numa_tbl_len, node + 1);
4862
4863 if (num_possible_nodes() <= 1) 4785 if (num_possible_nodes() <= 1)
4864 return; 4786 return;
4865 4787
@@ -4876,7 +4798,7 @@ static void __init wq_numa_init(void)
4876 * available. Build one from cpu_to_node() which should have been 4798 * available. Build one from cpu_to_node() which should have been
4877 * fully initialized by now. 4799 * fully initialized by now.
4878 */ 4800 */
4879 tbl = kzalloc(wq_numa_tbl_len * sizeof(tbl[0]), GFP_KERNEL); 4801 tbl = kzalloc(nr_node_ids * sizeof(tbl[0]), GFP_KERNEL);
4880 BUG_ON(!tbl); 4802 BUG_ON(!tbl);
4881 4803
4882 for_each_node(node) 4804 for_each_node(node)
@@ -4936,7 +4858,7 @@ static int __init init_workqueues(void)
4936 4858
4937 for_each_cpu_worker_pool(pool, cpu) { 4859 for_each_cpu_worker_pool(pool, cpu) {
4938 pool->flags &= ~POOL_DISASSOCIATED; 4860 pool->flags &= ~POOL_DISASSOCIATED;
4939 BUG_ON(create_and_start_worker(pool) < 0); 4861 BUG_ON(!create_worker(pool));
4940 } 4862 }
4941 } 4863 }
4942 4864