aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/workqueue.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/workqueue.c')
-rw-r--r--kernel/workqueue.c206
1 files changed, 64 insertions, 142 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 35974ac69600..5dbe22aa3efd 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -265,7 +265,6 @@ struct workqueue_struct {
265 265
266static struct kmem_cache *pwq_cache; 266static struct kmem_cache *pwq_cache;
267 267
268static int wq_numa_tbl_len; /* highest possible NUMA node id + 1 */
269static cpumask_var_t *wq_numa_possible_cpumask; 268static cpumask_var_t *wq_numa_possible_cpumask;
270 /* possible CPUs of each node */ 269 /* possible CPUs of each node */
271 270
@@ -758,13 +757,6 @@ static bool too_many_workers(struct worker_pool *pool)
758 int nr_idle = pool->nr_idle + managing; /* manager is considered idle */ 757 int nr_idle = pool->nr_idle + managing; /* manager is considered idle */
759 int nr_busy = pool->nr_workers - nr_idle; 758 int nr_busy = pool->nr_workers - nr_idle;
760 759
761 /*
762 * nr_idle and idle_list may disagree if idle rebinding is in
763 * progress. Never return %true if idle_list is empty.
764 */
765 if (list_empty(&pool->idle_list))
766 return false;
767
768 return nr_idle > 2 && (nr_idle - 2) * MAX_IDLE_WORKERS_RATIO >= nr_busy; 760 return nr_idle > 2 && (nr_idle - 2) * MAX_IDLE_WORKERS_RATIO >= nr_busy;
769} 761}
770 762
@@ -850,7 +842,7 @@ struct task_struct *wq_worker_sleeping(struct task_struct *task, int cpu)
850 pool = worker->pool; 842 pool = worker->pool;
851 843
852 /* this can only happen on the local cpu */ 844 /* this can only happen on the local cpu */
853 if (WARN_ON_ONCE(cpu != raw_smp_processor_id())) 845 if (WARN_ON_ONCE(cpu != raw_smp_processor_id() || pool->cpu != cpu))
854 return NULL; 846 return NULL;
855 847
856 /* 848 /*
@@ -874,35 +866,22 @@ struct task_struct *wq_worker_sleeping(struct task_struct *task, int cpu)
874 * worker_set_flags - set worker flags and adjust nr_running accordingly 866 * worker_set_flags - set worker flags and adjust nr_running accordingly
875 * @worker: self 867 * @worker: self
876 * @flags: flags to set 868 * @flags: flags to set
877 * @wakeup: wakeup an idle worker if necessary
878 * 869 *
879 * Set @flags in @worker->flags and adjust nr_running accordingly. If 870 * Set @flags in @worker->flags and adjust nr_running accordingly.
880 * nr_running becomes zero and @wakeup is %true, an idle worker is
881 * woken up.
882 * 871 *
883 * CONTEXT: 872 * CONTEXT:
884 * spin_lock_irq(pool->lock) 873 * spin_lock_irq(pool->lock)
885 */ 874 */
886static inline void worker_set_flags(struct worker *worker, unsigned int flags, 875static inline void worker_set_flags(struct worker *worker, unsigned int flags)
887 bool wakeup)
888{ 876{
889 struct worker_pool *pool = worker->pool; 877 struct worker_pool *pool = worker->pool;
890 878
891 WARN_ON_ONCE(worker->task != current); 879 WARN_ON_ONCE(worker->task != current);
892 880
893 /* 881 /* If transitioning into NOT_RUNNING, adjust nr_running. */
894 * If transitioning into NOT_RUNNING, adjust nr_running and
895 * wake up an idle worker as necessary if requested by
896 * @wakeup.
897 */
898 if ((flags & WORKER_NOT_RUNNING) && 882 if ((flags & WORKER_NOT_RUNNING) &&
899 !(worker->flags & WORKER_NOT_RUNNING)) { 883 !(worker->flags & WORKER_NOT_RUNNING)) {
900 if (wakeup) { 884 atomic_dec(&pool->nr_running);
901 if (atomic_dec_and_test(&pool->nr_running) &&
902 !list_empty(&pool->worklist))
903 wake_up_worker(pool);
904 } else
905 atomic_dec(&pool->nr_running);
906 } 885 }
907 886
908 worker->flags |= flags; 887 worker->flags |= flags;
@@ -1232,7 +1211,7 @@ static int try_to_grab_pending(struct work_struct *work, bool is_dwork,
1232 pwq_activate_delayed_work(work); 1211 pwq_activate_delayed_work(work);
1233 1212
1234 list_del_init(&work->entry); 1213 list_del_init(&work->entry);
1235 pwq_dec_nr_in_flight(get_work_pwq(work), get_work_color(work)); 1214 pwq_dec_nr_in_flight(pwq, get_work_color(work));
1236 1215
1237 /* work->data points to pwq iff queued, point to pool */ 1216 /* work->data points to pwq iff queued, point to pool */
1238 set_work_pool_and_keep_pending(work, pool->id); 1217 set_work_pool_and_keep_pending(work, pool->id);
@@ -1560,7 +1539,7 @@ static void worker_enter_idle(struct worker *worker)
1560 (worker->hentry.next || worker->hentry.pprev))) 1539 (worker->hentry.next || worker->hentry.pprev)))
1561 return; 1540 return;
1562 1541
1563 /* can't use worker_set_flags(), also called from start_worker() */ 1542 /* can't use worker_set_flags(), also called from create_worker() */
1564 worker->flags |= WORKER_IDLE; 1543 worker->flags |= WORKER_IDLE;
1565 pool->nr_idle++; 1544 pool->nr_idle++;
1566 worker->last_active = jiffies; 1545 worker->last_active = jiffies;
@@ -1602,11 +1581,11 @@ static void worker_leave_idle(struct worker *worker)
1602 list_del_init(&worker->entry); 1581 list_del_init(&worker->entry);
1603} 1582}
1604 1583
1605static struct worker *alloc_worker(void) 1584static struct worker *alloc_worker(int node)
1606{ 1585{
1607 struct worker *worker; 1586 struct worker *worker;
1608 1587
1609 worker = kzalloc(sizeof(*worker), GFP_KERNEL); 1588 worker = kzalloc_node(sizeof(*worker), GFP_KERNEL, node);
1610 if (worker) { 1589 if (worker) {
1611 INIT_LIST_HEAD(&worker->entry); 1590 INIT_LIST_HEAD(&worker->entry);
1612 INIT_LIST_HEAD(&worker->scheduled); 1591 INIT_LIST_HEAD(&worker->scheduled);
@@ -1670,6 +1649,9 @@ static void worker_detach_from_pool(struct worker *worker,
1670 detach_completion = pool->detach_completion; 1649 detach_completion = pool->detach_completion;
1671 mutex_unlock(&pool->attach_mutex); 1650 mutex_unlock(&pool->attach_mutex);
1672 1651
1652 /* clear leftover flags without pool->lock after it is detached */
1653 worker->flags &= ~(WORKER_UNBOUND | WORKER_REBOUND);
1654
1673 if (detach_completion) 1655 if (detach_completion)
1674 complete(detach_completion); 1656 complete(detach_completion);
1675} 1657}
@@ -1678,8 +1660,7 @@ static void worker_detach_from_pool(struct worker *worker,
1678 * create_worker - create a new workqueue worker 1660 * create_worker - create a new workqueue worker
1679 * @pool: pool the new worker will belong to 1661 * @pool: pool the new worker will belong to
1680 * 1662 *
1681 * Create a new worker which is attached to @pool. The new worker must be 1663 * Create and start a new worker which is attached to @pool.
1682 * started by start_worker().
1683 * 1664 *
1684 * CONTEXT: 1665 * CONTEXT:
1685 * Might sleep. Does GFP_KERNEL allocations. 1666 * Might sleep. Does GFP_KERNEL allocations.
@@ -1698,7 +1679,7 @@ static struct worker *create_worker(struct worker_pool *pool)
1698 if (id < 0) 1679 if (id < 0)
1699 goto fail; 1680 goto fail;
1700 1681
1701 worker = alloc_worker(); 1682 worker = alloc_worker(pool->node);
1702 if (!worker) 1683 if (!worker)
1703 goto fail; 1684 goto fail;
1704 1685
@@ -1724,6 +1705,13 @@ static struct worker *create_worker(struct worker_pool *pool)
1724 /* successful, attach the worker to the pool */ 1705 /* successful, attach the worker to the pool */
1725 worker_attach_to_pool(worker, pool); 1706 worker_attach_to_pool(worker, pool);
1726 1707
1708 /* start the newly created worker */
1709 spin_lock_irq(&pool->lock);
1710 worker->pool->nr_workers++;
1711 worker_enter_idle(worker);
1712 wake_up_process(worker->task);
1713 spin_unlock_irq(&pool->lock);
1714
1727 return worker; 1715 return worker;
1728 1716
1729fail: 1717fail:
@@ -1734,44 +1722,6 @@ fail:
1734} 1722}
1735 1723
1736/** 1724/**
1737 * start_worker - start a newly created worker
1738 * @worker: worker to start
1739 *
1740 * Make the pool aware of @worker and start it.
1741 *
1742 * CONTEXT:
1743 * spin_lock_irq(pool->lock).
1744 */
1745static void start_worker(struct worker *worker)
1746{
1747 worker->pool->nr_workers++;
1748 worker_enter_idle(worker);
1749 wake_up_process(worker->task);
1750}
1751
1752/**
1753 * create_and_start_worker - create and start a worker for a pool
1754 * @pool: the target pool
1755 *
1756 * Grab the managership of @pool and create and start a new worker for it.
1757 *
1758 * Return: 0 on success. A negative error code otherwise.
1759 */
1760static int create_and_start_worker(struct worker_pool *pool)
1761{
1762 struct worker *worker;
1763
1764 worker = create_worker(pool);
1765 if (worker) {
1766 spin_lock_irq(&pool->lock);
1767 start_worker(worker);
1768 spin_unlock_irq(&pool->lock);
1769 }
1770
1771 return worker ? 0 : -ENOMEM;
1772}
1773
1774/**
1775 * destroy_worker - destroy a workqueue worker 1725 * destroy_worker - destroy a workqueue worker
1776 * @worker: worker to be destroyed 1726 * @worker: worker to be destroyed
1777 * 1727 *
@@ -1909,23 +1859,10 @@ restart:
1909 mod_timer(&pool->mayday_timer, jiffies + MAYDAY_INITIAL_TIMEOUT); 1859 mod_timer(&pool->mayday_timer, jiffies + MAYDAY_INITIAL_TIMEOUT);
1910 1860
1911 while (true) { 1861 while (true) {
1912 struct worker *worker; 1862 if (create_worker(pool) || !need_to_create_worker(pool))
1913
1914 worker = create_worker(pool);
1915 if (worker) {
1916 del_timer_sync(&pool->mayday_timer);
1917 spin_lock_irq(&pool->lock);
1918 start_worker(worker);
1919 if (WARN_ON_ONCE(need_to_create_worker(pool)))
1920 goto restart;
1921 return true;
1922 }
1923
1924 if (!need_to_create_worker(pool))
1925 break; 1863 break;
1926 1864
1927 __set_current_state(TASK_INTERRUPTIBLE); 1865 schedule_timeout_interruptible(CREATE_COOLDOWN);
1928 schedule_timeout(CREATE_COOLDOWN);
1929 1866
1930 if (!need_to_create_worker(pool)) 1867 if (!need_to_create_worker(pool))
1931 break; 1868 break;
@@ -1933,6 +1870,11 @@ restart:
1933 1870
1934 del_timer_sync(&pool->mayday_timer); 1871 del_timer_sync(&pool->mayday_timer);
1935 spin_lock_irq(&pool->lock); 1872 spin_lock_irq(&pool->lock);
1873 /*
1874 * This is necessary even after a new worker was just successfully
1875 * created as @pool->lock was dropped and the new worker might have
1876 * already become busy.
1877 */
1936 if (need_to_create_worker(pool)) 1878 if (need_to_create_worker(pool))
1937 goto restart; 1879 goto restart;
1938 return true; 1880 return true;
@@ -2020,13 +1962,8 @@ __acquires(&pool->lock)
2020 1962
2021 lockdep_copy_map(&lockdep_map, &work->lockdep_map); 1963 lockdep_copy_map(&lockdep_map, &work->lockdep_map);
2022#endif 1964#endif
2023 /* 1965 /* ensure we're on the correct CPU */
2024 * Ensure we're on the correct CPU. DISASSOCIATED test is 1966 WARN_ON_ONCE(!(pool->flags & POOL_DISASSOCIATED) &&
2025 * necessary to avoid spurious warnings from rescuers servicing the
2026 * unbound or a disassociated pool.
2027 */
2028 WARN_ON_ONCE(!(worker->flags & WORKER_UNBOUND) &&
2029 !(pool->flags & POOL_DISASSOCIATED) &&
2030 raw_smp_processor_id() != pool->cpu); 1967 raw_smp_processor_id() != pool->cpu);
2031 1968
2032 /* 1969 /*
@@ -2052,17 +1989,22 @@ __acquires(&pool->lock)
2052 list_del_init(&work->entry); 1989 list_del_init(&work->entry);
2053 1990
2054 /* 1991 /*
2055 * CPU intensive works don't participate in concurrency 1992 * CPU intensive works don't participate in concurrency management.
2056 * management. They're the scheduler's responsibility. 1993 * They're the scheduler's responsibility. This takes @worker out
1994 * of concurrency management and the next code block will chain
1995 * execution of the pending work items.
2057 */ 1996 */
2058 if (unlikely(cpu_intensive)) 1997 if (unlikely(cpu_intensive))
2059 worker_set_flags(worker, WORKER_CPU_INTENSIVE, true); 1998 worker_set_flags(worker, WORKER_CPU_INTENSIVE);
2060 1999
2061 /* 2000 /*
2062 * Unbound pool isn't concurrency managed and work items should be 2001 * Wake up another worker if necessary. The condition is always
2063 * executed ASAP. Wake up another worker if necessary. 2002 * false for normal per-cpu workers since nr_running would always
2003 * be >= 1 at this point. This is used to chain execution of the
2004 * pending work items for WORKER_NOT_RUNNING workers such as the
2005 * UNBOUND and CPU_INTENSIVE ones.
2064 */ 2006 */
2065 if ((worker->flags & WORKER_UNBOUND) && need_more_worker(pool)) 2007 if (need_more_worker(pool))
2066 wake_up_worker(pool); 2008 wake_up_worker(pool);
2067 2009
2068 /* 2010 /*
@@ -2218,7 +2160,7 @@ recheck:
2218 } 2160 }
2219 } while (keep_working(pool)); 2161 } while (keep_working(pool));
2220 2162
2221 worker_set_flags(worker, WORKER_PREP, false); 2163 worker_set_flags(worker, WORKER_PREP);
2222sleep: 2164sleep:
2223 /* 2165 /*
2224 * pool->lock is held and there's no work to process and no need to 2166 * pool->lock is held and there's no work to process and no need to
@@ -2311,29 +2253,27 @@ repeat:
2311 move_linked_works(work, scheduled, &n); 2253 move_linked_works(work, scheduled, &n);
2312 2254
2313 process_scheduled_works(rescuer); 2255 process_scheduled_works(rescuer);
2314 spin_unlock_irq(&pool->lock);
2315
2316 worker_detach_from_pool(rescuer, pool);
2317
2318 spin_lock_irq(&pool->lock);
2319 2256
2320 /* 2257 /*
2321 * Put the reference grabbed by send_mayday(). @pool won't 2258 * Put the reference grabbed by send_mayday(). @pool won't
2322 * go away while we're holding its lock. 2259 * go away while we're still attached to it.
2323 */ 2260 */
2324 put_pwq(pwq); 2261 put_pwq(pwq);
2325 2262
2326 /* 2263 /*
2327 * Leave this pool. If keep_working() is %true, notify a 2264 * Leave this pool. If need_more_worker() is %true, notify a
2328 * regular worker; otherwise, we end up with 0 concurrency 2265 * regular worker; otherwise, we end up with 0 concurrency
2329 * and stalling the execution. 2266 * and stalling the execution.
2330 */ 2267 */
2331 if (keep_working(pool)) 2268 if (need_more_worker(pool))
2332 wake_up_worker(pool); 2269 wake_up_worker(pool);
2333 2270
2334 rescuer->pool = NULL; 2271 rescuer->pool = NULL;
2335 spin_unlock(&pool->lock); 2272 spin_unlock_irq(&pool->lock);
2336 spin_lock(&wq_mayday_lock); 2273
2274 worker_detach_from_pool(rescuer, pool);
2275
2276 spin_lock_irq(&wq_mayday_lock);
2337 } 2277 }
2338 2278
2339 spin_unlock_irq(&wq_mayday_lock); 2279 spin_unlock_irq(&wq_mayday_lock);
@@ -3458,7 +3398,7 @@ static void put_unbound_pool(struct worker_pool *pool)
3458 return; 3398 return;
3459 3399
3460 /* sanity checks */ 3400 /* sanity checks */
3461 if (WARN_ON(!(pool->flags & POOL_DISASSOCIATED)) || 3401 if (WARN_ON(!(pool->cpu < 0)) ||
3462 WARN_ON(!list_empty(&pool->worklist))) 3402 WARN_ON(!list_empty(&pool->worklist)))
3463 return; 3403 return;
3464 3404
@@ -3524,7 +3464,7 @@ static struct worker_pool *get_unbound_pool(const struct workqueue_attrs *attrs)
3524 hash_for_each_possible(unbound_pool_hash, pool, hash_node, hash) { 3464 hash_for_each_possible(unbound_pool_hash, pool, hash_node, hash) {
3525 if (wqattrs_equal(pool->attrs, attrs)) { 3465 if (wqattrs_equal(pool->attrs, attrs)) {
3526 pool->refcnt++; 3466 pool->refcnt++;
3527 goto out_unlock; 3467 return pool;
3528 } 3468 }
3529 } 3469 }
3530 3470
@@ -3557,12 +3497,12 @@ static struct worker_pool *get_unbound_pool(const struct workqueue_attrs *attrs)
3557 goto fail; 3497 goto fail;
3558 3498
3559 /* create and start the initial worker */ 3499 /* create and start the initial worker */
3560 if (create_and_start_worker(pool) < 0) 3500 if (!create_worker(pool))
3561 goto fail; 3501 goto fail;
3562 3502
3563 /* install */ 3503 /* install */
3564 hash_add(unbound_pool_hash, &pool->hash_node, hash); 3504 hash_add(unbound_pool_hash, &pool->hash_node, hash);
3565out_unlock: 3505
3566 return pool; 3506 return pool;
3567fail: 3507fail:
3568 if (pool) 3508 if (pool)
@@ -3591,11 +3531,6 @@ static void pwq_unbound_release_workfn(struct work_struct *work)
3591 if (WARN_ON_ONCE(!(wq->flags & WQ_UNBOUND))) 3531 if (WARN_ON_ONCE(!(wq->flags & WQ_UNBOUND)))
3592 return; 3532 return;
3593 3533
3594 /*
3595 * Unlink @pwq. Synchronization against wq->mutex isn't strictly
3596 * necessary on release but do it anyway. It's easier to verify
3597 * and consistent with the linking path.
3598 */
3599 mutex_lock(&wq->mutex); 3534 mutex_lock(&wq->mutex);
3600 list_del_rcu(&pwq->pwqs_node); 3535 list_del_rcu(&pwq->pwqs_node);
3601 is_last = list_empty(&wq->pwqs); 3536 is_last = list_empty(&wq->pwqs);
@@ -3692,10 +3627,7 @@ static void link_pwq(struct pool_workqueue *pwq)
3692 if (!list_empty(&pwq->pwqs_node)) 3627 if (!list_empty(&pwq->pwqs_node))
3693 return; 3628 return;
3694 3629
3695 /* 3630 /* set the matching work_color */
3696 * Set the matching work_color. This is synchronized with
3697 * wq->mutex to avoid confusing flush_workqueue().
3698 */
3699 pwq->work_color = wq->work_color; 3631 pwq->work_color = wq->work_color;
3700 3632
3701 /* sync max_active to the current setting */ 3633 /* sync max_active to the current setting */
@@ -3832,7 +3764,7 @@ int apply_workqueue_attrs(struct workqueue_struct *wq,
3832 if (WARN_ON((wq->flags & __WQ_ORDERED) && !list_empty(&wq->pwqs))) 3764 if (WARN_ON((wq->flags & __WQ_ORDERED) && !list_empty(&wq->pwqs)))
3833 return -EINVAL; 3765 return -EINVAL;
3834 3766
3835 pwq_tbl = kzalloc(wq_numa_tbl_len * sizeof(pwq_tbl[0]), GFP_KERNEL); 3767 pwq_tbl = kzalloc(nr_node_ids * sizeof(pwq_tbl[0]), GFP_KERNEL);
3836 new_attrs = alloc_workqueue_attrs(GFP_KERNEL); 3768 new_attrs = alloc_workqueue_attrs(GFP_KERNEL);
3837 tmp_attrs = alloc_workqueue_attrs(GFP_KERNEL); 3769 tmp_attrs = alloc_workqueue_attrs(GFP_KERNEL);
3838 if (!pwq_tbl || !new_attrs || !tmp_attrs) 3770 if (!pwq_tbl || !new_attrs || !tmp_attrs)
@@ -4080,7 +4012,7 @@ struct workqueue_struct *__alloc_workqueue_key(const char *fmt,
4080 4012
4081 /* allocate wq and format name */ 4013 /* allocate wq and format name */
4082 if (flags & WQ_UNBOUND) 4014 if (flags & WQ_UNBOUND)
4083 tbl_size = wq_numa_tbl_len * sizeof(wq->numa_pwq_tbl[0]); 4015 tbl_size = nr_node_ids * sizeof(wq->numa_pwq_tbl[0]);
4084 4016
4085 wq = kzalloc(sizeof(*wq) + tbl_size, GFP_KERNEL); 4017 wq = kzalloc(sizeof(*wq) + tbl_size, GFP_KERNEL);
4086 if (!wq) 4018 if (!wq)
@@ -4122,7 +4054,7 @@ struct workqueue_struct *__alloc_workqueue_key(const char *fmt,
4122 if (flags & WQ_MEM_RECLAIM) { 4054 if (flags & WQ_MEM_RECLAIM) {
4123 struct worker *rescuer; 4055 struct worker *rescuer;
4124 4056
4125 rescuer = alloc_worker(); 4057 rescuer = alloc_worker(NUMA_NO_NODE);
4126 if (!rescuer) 4058 if (!rescuer)
4127 goto err_destroy; 4059 goto err_destroy;
4128 4060
@@ -4470,8 +4402,6 @@ static void wq_unbind_fn(struct work_struct *work)
4470 struct worker *worker; 4402 struct worker *worker;
4471 4403
4472 for_each_cpu_worker_pool(pool, cpu) { 4404 for_each_cpu_worker_pool(pool, cpu) {
4473 WARN_ON_ONCE(cpu != smp_processor_id());
4474
4475 mutex_lock(&pool->attach_mutex); 4405 mutex_lock(&pool->attach_mutex);
4476 spin_lock_irq(&pool->lock); 4406 spin_lock_irq(&pool->lock);
4477 4407
@@ -4543,6 +4473,7 @@ static void rebind_workers(struct worker_pool *pool)
4543 pool->attrs->cpumask) < 0); 4473 pool->attrs->cpumask) < 0);
4544 4474
4545 spin_lock_irq(&pool->lock); 4475 spin_lock_irq(&pool->lock);
4476 pool->flags &= ~POOL_DISASSOCIATED;
4546 4477
4547 for_each_pool_worker(worker, pool) { 4478 for_each_pool_worker(worker, pool) {
4548 unsigned int worker_flags = worker->flags; 4479 unsigned int worker_flags = worker->flags;
@@ -4632,7 +4563,7 @@ static int workqueue_cpu_up_callback(struct notifier_block *nfb,
4632 for_each_cpu_worker_pool(pool, cpu) { 4563 for_each_cpu_worker_pool(pool, cpu) {
4633 if (pool->nr_workers) 4564 if (pool->nr_workers)
4634 continue; 4565 continue;
4635 if (create_and_start_worker(pool) < 0) 4566 if (!create_worker(pool))
4636 return NOTIFY_BAD; 4567 return NOTIFY_BAD;
4637 } 4568 }
4638 break; 4569 break;
@@ -4644,15 +4575,10 @@ static int workqueue_cpu_up_callback(struct notifier_block *nfb,
4644 for_each_pool(pool, pi) { 4575 for_each_pool(pool, pi) {
4645 mutex_lock(&pool->attach_mutex); 4576 mutex_lock(&pool->attach_mutex);
4646 4577
4647 if (pool->cpu == cpu) { 4578 if (pool->cpu == cpu)
4648 spin_lock_irq(&pool->lock);
4649 pool->flags &= ~POOL_DISASSOCIATED;
4650 spin_unlock_irq(&pool->lock);
4651
4652 rebind_workers(pool); 4579 rebind_workers(pool);
4653 } else if (pool->cpu < 0) { 4580 else if (pool->cpu < 0)
4654 restore_unbound_workers_cpumask(pool, cpu); 4581 restore_unbound_workers_cpumask(pool, cpu);
4655 }
4656 4582
4657 mutex_unlock(&pool->attach_mutex); 4583 mutex_unlock(&pool->attach_mutex);
4658 } 4584 }
@@ -4856,10 +4782,6 @@ static void __init wq_numa_init(void)
4856 cpumask_var_t *tbl; 4782 cpumask_var_t *tbl;
4857 int node, cpu; 4783 int node, cpu;
4858 4784
4859 /* determine NUMA pwq table len - highest node id + 1 */
4860 for_each_node(node)
4861 wq_numa_tbl_len = max(wq_numa_tbl_len, node + 1);
4862
4863 if (num_possible_nodes() <= 1) 4785 if (num_possible_nodes() <= 1)
4864 return; 4786 return;
4865 4787
@@ -4876,7 +4798,7 @@ static void __init wq_numa_init(void)
4876 * available. Build one from cpu_to_node() which should have been 4798 * available. Build one from cpu_to_node() which should have been
4877 * fully initialized by now. 4799 * fully initialized by now.
4878 */ 4800 */
4879 tbl = kzalloc(wq_numa_tbl_len * sizeof(tbl[0]), GFP_KERNEL); 4801 tbl = kzalloc(nr_node_ids * sizeof(tbl[0]), GFP_KERNEL);
4880 BUG_ON(!tbl); 4802 BUG_ON(!tbl);
4881 4803
4882 for_each_node(node) 4804 for_each_node(node)
@@ -4936,7 +4858,7 @@ static int __init init_workqueues(void)
4936 4858
4937 for_each_cpu_worker_pool(pool, cpu) { 4859 for_each_cpu_worker_pool(pool, cpu) {
4938 pool->flags &= ~POOL_DISASSOCIATED; 4860 pool->flags &= ~POOL_DISASSOCIATED;
4939 BUG_ON(create_and_start_worker(pool) < 0); 4861 BUG_ON(!create_worker(pool));
4940 } 4862 }
4941 } 4863 }
4942 4864