diff options
Diffstat (limited to 'kernel/workqueue.c')
-rw-r--r-- | kernel/workqueue.c | 33 |
1 files changed, 12 insertions, 21 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 8fdb710bfdd7..43d18cb46308 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c | |||
@@ -38,7 +38,6 @@ | |||
38 | #include <linux/hardirq.h> | 38 | #include <linux/hardirq.h> |
39 | #include <linux/mempolicy.h> | 39 | #include <linux/mempolicy.h> |
40 | #include <linux/freezer.h> | 40 | #include <linux/freezer.h> |
41 | #include <linux/kallsyms.h> | ||
42 | #include <linux/debug_locks.h> | 41 | #include <linux/debug_locks.h> |
43 | #include <linux/lockdep.h> | 42 | #include <linux/lockdep.h> |
44 | #include <linux/idr.h> | 43 | #include <linux/idr.h> |
@@ -48,6 +47,7 @@ | |||
48 | #include <linux/nodemask.h> | 47 | #include <linux/nodemask.h> |
49 | #include <linux/moduleparam.h> | 48 | #include <linux/moduleparam.h> |
50 | #include <linux/uaccess.h> | 49 | #include <linux/uaccess.h> |
50 | #include <linux/sched/isolation.h> | ||
51 | 51 | ||
52 | #include "workqueue_internal.h" | 52 | #include "workqueue_internal.h" |
53 | 53 | ||
@@ -1634,7 +1634,7 @@ static void worker_enter_idle(struct worker *worker) | |||
1634 | mod_timer(&pool->idle_timer, jiffies + IDLE_WORKER_TIMEOUT); | 1634 | mod_timer(&pool->idle_timer, jiffies + IDLE_WORKER_TIMEOUT); |
1635 | 1635 | ||
1636 | /* | 1636 | /* |
1637 | * Sanity check nr_running. Because wq_unbind_fn() releases | 1637 | * Sanity check nr_running. Because unbind_workers() releases |
1638 | * pool->lock between setting %WORKER_UNBOUND and zapping | 1638 | * pool->lock between setting %WORKER_UNBOUND and zapping |
1639 | * nr_running, the warning may trigger spuriously. Check iff | 1639 | * nr_running, the warning may trigger spuriously. Check iff |
1640 | * unbind is not in progress. | 1640 | * unbind is not in progress. |
@@ -4510,9 +4510,8 @@ void show_workqueue_state(void) | |||
4510 | * cpu comes back online. | 4510 | * cpu comes back online. |
4511 | */ | 4511 | */ |
4512 | 4512 | ||
4513 | static void wq_unbind_fn(struct work_struct *work) | 4513 | static void unbind_workers(int cpu) |
4514 | { | 4514 | { |
4515 | int cpu = smp_processor_id(); | ||
4516 | struct worker_pool *pool; | 4515 | struct worker_pool *pool; |
4517 | struct worker *worker; | 4516 | struct worker *worker; |
4518 | 4517 | ||
@@ -4589,16 +4588,6 @@ static void rebind_workers(struct worker_pool *pool) | |||
4589 | 4588 | ||
4590 | spin_lock_irq(&pool->lock); | 4589 | spin_lock_irq(&pool->lock); |
4591 | 4590 | ||
4592 | /* | ||
4593 | * XXX: CPU hotplug notifiers are weird and can call DOWN_FAILED | ||
4594 | * w/o preceding DOWN_PREPARE. Work around it. CPU hotplug is | ||
4595 | * being reworked and this can go away in time. | ||
4596 | */ | ||
4597 | if (!(pool->flags & POOL_DISASSOCIATED)) { | ||
4598 | spin_unlock_irq(&pool->lock); | ||
4599 | return; | ||
4600 | } | ||
4601 | |||
4602 | pool->flags &= ~POOL_DISASSOCIATED; | 4591 | pool->flags &= ~POOL_DISASSOCIATED; |
4603 | 4592 | ||
4604 | for_each_pool_worker(worker, pool) { | 4593 | for_each_pool_worker(worker, pool) { |
@@ -4709,12 +4698,13 @@ int workqueue_online_cpu(unsigned int cpu) | |||
4709 | 4698 | ||
4710 | int workqueue_offline_cpu(unsigned int cpu) | 4699 | int workqueue_offline_cpu(unsigned int cpu) |
4711 | { | 4700 | { |
4712 | struct work_struct unbind_work; | ||
4713 | struct workqueue_struct *wq; | 4701 | struct workqueue_struct *wq; |
4714 | 4702 | ||
4715 | /* unbinding per-cpu workers should happen on the local CPU */ | 4703 | /* unbinding per-cpu workers should happen on the local CPU */ |
4716 | INIT_WORK_ONSTACK(&unbind_work, wq_unbind_fn); | 4704 | if (WARN_ON(cpu != smp_processor_id())) |
4717 | queue_work_on(cpu, system_highpri_wq, &unbind_work); | 4705 | return -1; |
4706 | |||
4707 | unbind_workers(cpu); | ||
4718 | 4708 | ||
4719 | /* update NUMA affinity of unbound workqueues */ | 4709 | /* update NUMA affinity of unbound workqueues */ |
4720 | mutex_lock(&wq_pool_mutex); | 4710 | mutex_lock(&wq_pool_mutex); |
@@ -4722,9 +4712,6 @@ int workqueue_offline_cpu(unsigned int cpu) | |||
4722 | wq_update_unbound_numa(wq, cpu, false); | 4712 | wq_update_unbound_numa(wq, cpu, false); |
4723 | mutex_unlock(&wq_pool_mutex); | 4713 | mutex_unlock(&wq_pool_mutex); |
4724 | 4714 | ||
4725 | /* wait for per-cpu unbinding to finish */ | ||
4726 | flush_work(&unbind_work); | ||
4727 | destroy_work_on_stack(&unbind_work); | ||
4728 | return 0; | 4715 | return 0; |
4729 | } | 4716 | } |
4730 | 4717 | ||
@@ -4957,6 +4944,10 @@ int workqueue_set_unbound_cpumask(cpumask_var_t cpumask) | |||
4957 | if (!zalloc_cpumask_var(&saved_cpumask, GFP_KERNEL)) | 4944 | if (!zalloc_cpumask_var(&saved_cpumask, GFP_KERNEL)) |
4958 | return -ENOMEM; | 4945 | return -ENOMEM; |
4959 | 4946 | ||
4947 | /* | ||
4948 | * Not excluding isolated cpus on purpose. | ||
4949 | * If the user wishes to include them, we allow that. | ||
4950 | */ | ||
4960 | cpumask_and(cpumask, cpumask, cpu_possible_mask); | 4951 | cpumask_and(cpumask, cpumask, cpu_possible_mask); |
4961 | if (!cpumask_empty(cpumask)) { | 4952 | if (!cpumask_empty(cpumask)) { |
4962 | apply_wqattrs_lock(); | 4953 | apply_wqattrs_lock(); |
@@ -5555,7 +5546,7 @@ int __init workqueue_init_early(void) | |||
5555 | WARN_ON(__alignof__(struct pool_workqueue) < __alignof__(long long)); | 5546 | WARN_ON(__alignof__(struct pool_workqueue) < __alignof__(long long)); |
5556 | 5547 | ||
5557 | BUG_ON(!alloc_cpumask_var(&wq_unbound_cpumask, GFP_KERNEL)); | 5548 | BUG_ON(!alloc_cpumask_var(&wq_unbound_cpumask, GFP_KERNEL)); |
5558 | cpumask_copy(wq_unbound_cpumask, cpu_possible_mask); | 5549 | cpumask_copy(wq_unbound_cpumask, housekeeping_cpumask(HK_FLAG_DOMAIN)); |
5559 | 5550 | ||
5560 | pwq_cache = KMEM_CACHE(pool_workqueue, SLAB_PANIC); | 5551 | pwq_cache = KMEM_CACHE(pool_workqueue, SLAB_PANIC); |
5561 | 5552 | ||