diff options
Diffstat (limited to 'kernel/workqueue.c')
-rw-r--r-- | kernel/workqueue.c | 118 |
1 files changed, 46 insertions, 72 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index e1c0e996b5ae..ef071ca73fc3 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c | |||
@@ -4369,8 +4369,8 @@ static void show_pwq(struct pool_workqueue *pwq) | |||
4369 | /** | 4369 | /** |
4370 | * show_workqueue_state - dump workqueue state | 4370 | * show_workqueue_state - dump workqueue state |
4371 | * | 4371 | * |
4372 | * Called from a sysrq handler and prints out all busy workqueues and | 4372 | * Called from a sysrq handler or try_to_freeze_tasks() and prints out |
4373 | * pools. | 4373 | * all busy workqueues and pools. |
4374 | */ | 4374 | */ |
4375 | void show_workqueue_state(void) | 4375 | void show_workqueue_state(void) |
4376 | { | 4376 | { |
@@ -4600,95 +4600,72 @@ static void restore_unbound_workers_cpumask(struct worker_pool *pool, int cpu) | |||
4600 | if (!cpumask_test_cpu(cpu, pool->attrs->cpumask)) | 4600 | if (!cpumask_test_cpu(cpu, pool->attrs->cpumask)) |
4601 | return; | 4601 | return; |
4602 | 4602 | ||
4603 | /* is @cpu the only online CPU? */ | ||
4604 | cpumask_and(&cpumask, pool->attrs->cpumask, cpu_online_mask); | 4603 | cpumask_and(&cpumask, pool->attrs->cpumask, cpu_online_mask); |
4605 | if (cpumask_weight(&cpumask) != 1) | ||
4606 | return; | ||
4607 | 4604 | ||
4608 | /* as we're called from CPU_ONLINE, the following shouldn't fail */ | 4605 | /* as we're called from CPU_ONLINE, the following shouldn't fail */ |
4609 | for_each_pool_worker(worker, pool) | 4606 | for_each_pool_worker(worker, pool) |
4610 | WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task, | 4607 | WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task, &cpumask) < 0); |
4611 | pool->attrs->cpumask) < 0); | ||
4612 | } | 4608 | } |
4613 | 4609 | ||
4614 | /* | 4610 | int workqueue_prepare_cpu(unsigned int cpu) |
4615 | * Workqueues should be brought up before normal priority CPU notifiers. | 4611 | { |
4616 | * This will be registered high priority CPU notifier. | 4612 | struct worker_pool *pool; |
4617 | */ | 4613 | |
4618 | static int workqueue_cpu_up_callback(struct notifier_block *nfb, | 4614 | for_each_cpu_worker_pool(pool, cpu) { |
4619 | unsigned long action, | 4615 | if (pool->nr_workers) |
4620 | void *hcpu) | 4616 | continue; |
4617 | if (!create_worker(pool)) | ||
4618 | return -ENOMEM; | ||
4619 | } | ||
4620 | return 0; | ||
4621 | } | ||
4622 | |||
4623 | int workqueue_online_cpu(unsigned int cpu) | ||
4621 | { | 4624 | { |
4622 | int cpu = (unsigned long)hcpu; | ||
4623 | struct worker_pool *pool; | 4625 | struct worker_pool *pool; |
4624 | struct workqueue_struct *wq; | 4626 | struct workqueue_struct *wq; |
4625 | int pi; | 4627 | int pi; |
4626 | 4628 | ||
4627 | switch (action & ~CPU_TASKS_FROZEN) { | 4629 | mutex_lock(&wq_pool_mutex); |
4628 | case CPU_UP_PREPARE: | ||
4629 | for_each_cpu_worker_pool(pool, cpu) { | ||
4630 | if (pool->nr_workers) | ||
4631 | continue; | ||
4632 | if (!create_worker(pool)) | ||
4633 | return NOTIFY_BAD; | ||
4634 | } | ||
4635 | break; | ||
4636 | |||
4637 | case CPU_DOWN_FAILED: | ||
4638 | case CPU_ONLINE: | ||
4639 | mutex_lock(&wq_pool_mutex); | ||
4640 | 4630 | ||
4641 | for_each_pool(pool, pi) { | 4631 | for_each_pool(pool, pi) { |
4642 | mutex_lock(&pool->attach_mutex); | 4632 | mutex_lock(&pool->attach_mutex); |
4643 | 4633 | ||
4644 | if (pool->cpu == cpu) | 4634 | if (pool->cpu == cpu) |
4645 | rebind_workers(pool); | 4635 | rebind_workers(pool); |
4646 | else if (pool->cpu < 0) | 4636 | else if (pool->cpu < 0) |
4647 | restore_unbound_workers_cpumask(pool, cpu); | 4637 | restore_unbound_workers_cpumask(pool, cpu); |
4648 | 4638 | ||
4649 | mutex_unlock(&pool->attach_mutex); | 4639 | mutex_unlock(&pool->attach_mutex); |
4650 | } | 4640 | } |
4651 | 4641 | ||
4652 | /* update NUMA affinity of unbound workqueues */ | 4642 | /* update NUMA affinity of unbound workqueues */ |
4653 | list_for_each_entry(wq, &workqueues, list) | 4643 | list_for_each_entry(wq, &workqueues, list) |
4654 | wq_update_unbound_numa(wq, cpu, true); | 4644 | wq_update_unbound_numa(wq, cpu, true); |
4655 | 4645 | ||
4656 | mutex_unlock(&wq_pool_mutex); | 4646 | mutex_unlock(&wq_pool_mutex); |
4657 | break; | 4647 | return 0; |
4658 | } | ||
4659 | return NOTIFY_OK; | ||
4660 | } | 4648 | } |
4661 | 4649 | ||
4662 | /* | 4650 | int workqueue_offline_cpu(unsigned int cpu) |
4663 | * Workqueues should be brought down after normal priority CPU notifiers. | ||
4664 | * This will be registered as low priority CPU notifier. | ||
4665 | */ | ||
4666 | static int workqueue_cpu_down_callback(struct notifier_block *nfb, | ||
4667 | unsigned long action, | ||
4668 | void *hcpu) | ||
4669 | { | 4651 | { |
4670 | int cpu = (unsigned long)hcpu; | ||
4671 | struct work_struct unbind_work; | 4652 | struct work_struct unbind_work; |
4672 | struct workqueue_struct *wq; | 4653 | struct workqueue_struct *wq; |
4673 | 4654 | ||
4674 | switch (action & ~CPU_TASKS_FROZEN) { | 4655 | /* unbinding per-cpu workers should happen on the local CPU */ |
4675 | case CPU_DOWN_PREPARE: | 4656 | INIT_WORK_ONSTACK(&unbind_work, wq_unbind_fn); |
4676 | /* unbinding per-cpu workers should happen on the local CPU */ | 4657 | queue_work_on(cpu, system_highpri_wq, &unbind_work); |
4677 | INIT_WORK_ONSTACK(&unbind_work, wq_unbind_fn); | 4658 | |
4678 | queue_work_on(cpu, system_highpri_wq, &unbind_work); | 4659 | /* update NUMA affinity of unbound workqueues */ |
4679 | 4660 | mutex_lock(&wq_pool_mutex); | |
4680 | /* update NUMA affinity of unbound workqueues */ | 4661 | list_for_each_entry(wq, &workqueues, list) |
4681 | mutex_lock(&wq_pool_mutex); | 4662 | wq_update_unbound_numa(wq, cpu, false); |
4682 | list_for_each_entry(wq, &workqueues, list) | 4663 | mutex_unlock(&wq_pool_mutex); |
4683 | wq_update_unbound_numa(wq, cpu, false); | 4664 | |
4684 | mutex_unlock(&wq_pool_mutex); | 4665 | /* wait for per-cpu unbinding to finish */ |
4685 | 4666 | flush_work(&unbind_work); | |
4686 | /* wait for per-cpu unbinding to finish */ | 4667 | destroy_work_on_stack(&unbind_work); |
4687 | flush_work(&unbind_work); | 4668 | return 0; |
4688 | destroy_work_on_stack(&unbind_work); | ||
4689 | break; | ||
4690 | } | ||
4691 | return NOTIFY_OK; | ||
4692 | } | 4669 | } |
4693 | 4670 | ||
4694 | #ifdef CONFIG_SMP | 4671 | #ifdef CONFIG_SMP |
@@ -5490,9 +5467,6 @@ static int __init init_workqueues(void) | |||
5490 | 5467 | ||
5491 | pwq_cache = KMEM_CACHE(pool_workqueue, SLAB_PANIC); | 5468 | pwq_cache = KMEM_CACHE(pool_workqueue, SLAB_PANIC); |
5492 | 5469 | ||
5493 | cpu_notifier(workqueue_cpu_up_callback, CPU_PRI_WORKQUEUE_UP); | ||
5494 | hotcpu_notifier(workqueue_cpu_down_callback, CPU_PRI_WORKQUEUE_DOWN); | ||
5495 | |||
5496 | wq_numa_init(); | 5470 | wq_numa_init(); |
5497 | 5471 | ||
5498 | /* initialize CPU pools */ | 5472 | /* initialize CPU pools */ |