diff options
Diffstat (limited to 'kernel/workqueue.c')
-rw-r--r-- | kernel/workqueue.c | 46 |
1 files changed, 25 insertions, 21 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 8fdb710bfdd7..f699122dab32 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c | |||
@@ -38,7 +38,6 @@ | |||
38 | #include <linux/hardirq.h> | 38 | #include <linux/hardirq.h> |
39 | #include <linux/mempolicy.h> | 39 | #include <linux/mempolicy.h> |
40 | #include <linux/freezer.h> | 40 | #include <linux/freezer.h> |
41 | #include <linux/kallsyms.h> | ||
42 | #include <linux/debug_locks.h> | 41 | #include <linux/debug_locks.h> |
43 | #include <linux/lockdep.h> | 42 | #include <linux/lockdep.h> |
44 | #include <linux/idr.h> | 43 | #include <linux/idr.h> |
@@ -48,6 +47,8 @@ | |||
48 | #include <linux/nodemask.h> | 47 | #include <linux/nodemask.h> |
49 | #include <linux/moduleparam.h> | 48 | #include <linux/moduleparam.h> |
50 | #include <linux/uaccess.h> | 49 | #include <linux/uaccess.h> |
50 | #include <linux/sched/isolation.h> | ||
51 | #include <linux/nmi.h> | ||
51 | 52 | ||
52 | #include "workqueue_internal.h" | 53 | #include "workqueue_internal.h" |
53 | 54 | ||
@@ -1634,7 +1635,7 @@ static void worker_enter_idle(struct worker *worker) | |||
1634 | mod_timer(&pool->idle_timer, jiffies + IDLE_WORKER_TIMEOUT); | 1635 | mod_timer(&pool->idle_timer, jiffies + IDLE_WORKER_TIMEOUT); |
1635 | 1636 | ||
1636 | /* | 1637 | /* |
1637 | * Sanity check nr_running. Because wq_unbind_fn() releases | 1638 | * Sanity check nr_running. Because unbind_workers() releases |
1638 | * pool->lock between setting %WORKER_UNBOUND and zapping | 1639 | * pool->lock between setting %WORKER_UNBOUND and zapping |
1639 | * nr_running, the warning may trigger spuriously. Check iff | 1640 | * nr_running, the warning may trigger spuriously. Check iff |
1640 | * unbind is not in progress. | 1641 | * unbind is not in progress. |
@@ -4463,6 +4464,12 @@ void show_workqueue_state(void) | |||
4463 | if (pwq->nr_active || !list_empty(&pwq->delayed_works)) | 4464 | if (pwq->nr_active || !list_empty(&pwq->delayed_works)) |
4464 | show_pwq(pwq); | 4465 | show_pwq(pwq); |
4465 | spin_unlock_irqrestore(&pwq->pool->lock, flags); | 4466 | spin_unlock_irqrestore(&pwq->pool->lock, flags); |
4467 | /* | ||
4468 | * We could be printing a lot from atomic context, e.g. | ||
4469 | * sysrq-t -> show_workqueue_state(). Avoid triggering | ||
4470 | * hard lockup. | ||
4471 | */ | ||
4472 | touch_nmi_watchdog(); | ||
4466 | } | 4473 | } |
4467 | } | 4474 | } |
4468 | 4475 | ||
@@ -4490,6 +4497,12 @@ void show_workqueue_state(void) | |||
4490 | pr_cont("\n"); | 4497 | pr_cont("\n"); |
4491 | next_pool: | 4498 | next_pool: |
4492 | spin_unlock_irqrestore(&pool->lock, flags); | 4499 | spin_unlock_irqrestore(&pool->lock, flags); |
4500 | /* | ||
4501 | * We could be printing a lot from atomic context, e.g. | ||
4502 | * sysrq-t -> show_workqueue_state(). Avoid triggering | ||
4503 | * hard lockup. | ||
4504 | */ | ||
4505 | touch_nmi_watchdog(); | ||
4493 | } | 4506 | } |
4494 | 4507 | ||
4495 | rcu_read_unlock_sched(); | 4508 | rcu_read_unlock_sched(); |
@@ -4510,9 +4523,8 @@ void show_workqueue_state(void) | |||
4510 | * cpu comes back online. | 4523 | * cpu comes back online. |
4511 | */ | 4524 | */ |
4512 | 4525 | ||
4513 | static void wq_unbind_fn(struct work_struct *work) | 4526 | static void unbind_workers(int cpu) |
4514 | { | 4527 | { |
4515 | int cpu = smp_processor_id(); | ||
4516 | struct worker_pool *pool; | 4528 | struct worker_pool *pool; |
4517 | struct worker *worker; | 4529 | struct worker *worker; |
4518 | 4530 | ||
@@ -4589,16 +4601,6 @@ static void rebind_workers(struct worker_pool *pool) | |||
4589 | 4601 | ||
4590 | spin_lock_irq(&pool->lock); | 4602 | spin_lock_irq(&pool->lock); |
4591 | 4603 | ||
4592 | /* | ||
4593 | * XXX: CPU hotplug notifiers are weird and can call DOWN_FAILED | ||
4594 | * w/o preceding DOWN_PREPARE. Work around it. CPU hotplug is | ||
4595 | * being reworked and this can go away in time. | ||
4596 | */ | ||
4597 | if (!(pool->flags & POOL_DISASSOCIATED)) { | ||
4598 | spin_unlock_irq(&pool->lock); | ||
4599 | return; | ||
4600 | } | ||
4601 | |||
4602 | pool->flags &= ~POOL_DISASSOCIATED; | 4604 | pool->flags &= ~POOL_DISASSOCIATED; |
4603 | 4605 | ||
4604 | for_each_pool_worker(worker, pool) { | 4606 | for_each_pool_worker(worker, pool) { |
@@ -4709,12 +4711,13 @@ int workqueue_online_cpu(unsigned int cpu) | |||
4709 | 4711 | ||
4710 | int workqueue_offline_cpu(unsigned int cpu) | 4712 | int workqueue_offline_cpu(unsigned int cpu) |
4711 | { | 4713 | { |
4712 | struct work_struct unbind_work; | ||
4713 | struct workqueue_struct *wq; | 4714 | struct workqueue_struct *wq; |
4714 | 4715 | ||
4715 | /* unbinding per-cpu workers should happen on the local CPU */ | 4716 | /* unbinding per-cpu workers should happen on the local CPU */ |
4716 | INIT_WORK_ONSTACK(&unbind_work, wq_unbind_fn); | 4717 | if (WARN_ON(cpu != smp_processor_id())) |
4717 | queue_work_on(cpu, system_highpri_wq, &unbind_work); | 4718 | return -1; |
4719 | |||
4720 | unbind_workers(cpu); | ||
4718 | 4721 | ||
4719 | /* update NUMA affinity of unbound workqueues */ | 4722 | /* update NUMA affinity of unbound workqueues */ |
4720 | mutex_lock(&wq_pool_mutex); | 4723 | mutex_lock(&wq_pool_mutex); |
@@ -4722,9 +4725,6 @@ int workqueue_offline_cpu(unsigned int cpu) | |||
4722 | wq_update_unbound_numa(wq, cpu, false); | 4725 | wq_update_unbound_numa(wq, cpu, false); |
4723 | mutex_unlock(&wq_pool_mutex); | 4726 | mutex_unlock(&wq_pool_mutex); |
4724 | 4727 | ||
4725 | /* wait for per-cpu unbinding to finish */ | ||
4726 | flush_work(&unbind_work); | ||
4727 | destroy_work_on_stack(&unbind_work); | ||
4728 | return 0; | 4728 | return 0; |
4729 | } | 4729 | } |
4730 | 4730 | ||
@@ -4957,6 +4957,10 @@ int workqueue_set_unbound_cpumask(cpumask_var_t cpumask) | |||
4957 | if (!zalloc_cpumask_var(&saved_cpumask, GFP_KERNEL)) | 4957 | if (!zalloc_cpumask_var(&saved_cpumask, GFP_KERNEL)) |
4958 | return -ENOMEM; | 4958 | return -ENOMEM; |
4959 | 4959 | ||
4960 | /* | ||
4961 | * Not excluding isolated cpus on purpose. | ||
4962 | * If the user wishes to include them, we allow that. | ||
4963 | */ | ||
4960 | cpumask_and(cpumask, cpumask, cpu_possible_mask); | 4964 | cpumask_and(cpumask, cpumask, cpu_possible_mask); |
4961 | if (!cpumask_empty(cpumask)) { | 4965 | if (!cpumask_empty(cpumask)) { |
4962 | apply_wqattrs_lock(); | 4966 | apply_wqattrs_lock(); |
@@ -5555,7 +5559,7 @@ int __init workqueue_init_early(void) | |||
5555 | WARN_ON(__alignof__(struct pool_workqueue) < __alignof__(long long)); | 5559 | WARN_ON(__alignof__(struct pool_workqueue) < __alignof__(long long)); |
5556 | 5560 | ||
5557 | BUG_ON(!alloc_cpumask_var(&wq_unbound_cpumask, GFP_KERNEL)); | 5561 | BUG_ON(!alloc_cpumask_var(&wq_unbound_cpumask, GFP_KERNEL)); |
5558 | cpumask_copy(wq_unbound_cpumask, cpu_possible_mask); | 5562 | cpumask_copy(wq_unbound_cpumask, housekeeping_cpumask(HK_FLAG_DOMAIN)); |
5559 | 5563 | ||
5560 | pwq_cache = KMEM_CACHE(pool_workqueue, SLAB_PANIC); | 5564 | pwq_cache = KMEM_CACHE(pool_workqueue, SLAB_PANIC); |
5561 | 5565 | ||