diff options
author | Tejun Heo <tj@kernel.org> | 2016-10-19 12:12:40 -0400 |
---|---|---|
committer | Tejun Heo <tj@kernel.org> | 2016-10-19 12:12:40 -0400 |
commit | 8bc4a04455969c36bf54a942ad9d28d80969ed51 (patch) | |
tree | 9fb87b458122c05f77b1fba28405761bedbcac1d /kernel/workqueue.c | |
parent | 1001354ca34179f3db924eb66672442a173147dc (diff) | |
parent | 2186d9f940b6a04f263a3bacd48f2a7ba96df4cf (diff) |
Merge branch 'for-4.9' into for-4.10
Diffstat (limited to 'kernel/workqueue.c')
-rw-r--r-- | kernel/workqueue.c | 103 |
1 files changed, 85 insertions, 18 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 479d840db286..1d9fb6543a66 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c | |||
@@ -290,6 +290,8 @@ module_param_named(disable_numa, wq_disable_numa, bool, 0444); | |||
290 | static bool wq_power_efficient = IS_ENABLED(CONFIG_WQ_POWER_EFFICIENT_DEFAULT); | 290 | static bool wq_power_efficient = IS_ENABLED(CONFIG_WQ_POWER_EFFICIENT_DEFAULT); |
291 | module_param_named(power_efficient, wq_power_efficient, bool, 0444); | 291 | module_param_named(power_efficient, wq_power_efficient, bool, 0444); |
292 | 292 | ||
293 | static bool wq_online; /* can kworkers be created yet? */ | ||
294 | |||
293 | static bool wq_numa_enabled; /* unbound NUMA affinity enabled */ | 295 | static bool wq_numa_enabled; /* unbound NUMA affinity enabled */ |
294 | 296 | ||
295 | /* buf for wq_update_unbound_numa_attrs(), protected by CPU hotplug exclusion */ | 297 | /* buf for wq_update_unbound_numa_attrs(), protected by CPU hotplug exclusion */ |
@@ -2583,6 +2585,9 @@ void flush_workqueue(struct workqueue_struct *wq) | |||
2583 | }; | 2585 | }; |
2584 | int next_color; | 2586 | int next_color; |
2585 | 2587 | ||
2588 | if (WARN_ON(!wq_online)) | ||
2589 | return; | ||
2590 | |||
2586 | lock_map_acquire(&wq->lockdep_map); | 2591 | lock_map_acquire(&wq->lockdep_map); |
2587 | lock_map_release(&wq->lockdep_map); | 2592 | lock_map_release(&wq->lockdep_map); |
2588 | 2593 | ||
@@ -2843,6 +2848,9 @@ bool flush_work(struct work_struct *work) | |||
2843 | { | 2848 | { |
2844 | struct wq_barrier barr; | 2849 | struct wq_barrier barr; |
2845 | 2850 | ||
2851 | if (WARN_ON(!wq_online)) | ||
2852 | return false; | ||
2853 | |||
2846 | lock_map_acquire(&work->lockdep_map); | 2854 | lock_map_acquire(&work->lockdep_map); |
2847 | lock_map_release(&work->lockdep_map); | 2855 | lock_map_release(&work->lockdep_map); |
2848 | 2856 | ||
@@ -2913,7 +2921,13 @@ static bool __cancel_work_timer(struct work_struct *work, bool is_dwork) | |||
2913 | mark_work_canceling(work); | 2921 | mark_work_canceling(work); |
2914 | local_irq_restore(flags); | 2922 | local_irq_restore(flags); |
2915 | 2923 | ||
2916 | flush_work(work); | 2924 | /* |
2925 | * This allows canceling during early boot. We know that @work | ||
2926 | * isn't executing. | ||
2927 | */ | ||
2928 | if (wq_online) | ||
2929 | flush_work(work); | ||
2930 | |||
2917 | clear_work_data(work); | 2931 | clear_work_data(work); |
2918 | 2932 | ||
2919 | /* | 2933 | /* |
@@ -3364,7 +3378,7 @@ static struct worker_pool *get_unbound_pool(const struct workqueue_attrs *attrs) | |||
3364 | goto fail; | 3378 | goto fail; |
3365 | 3379 | ||
3366 | /* create and start the initial worker */ | 3380 | /* create and start the initial worker */ |
3367 | if (!create_worker(pool)) | 3381 | if (wq_online && !create_worker(pool)) |
3368 | goto fail; | 3382 | goto fail; |
3369 | 3383 | ||
3370 | /* install */ | 3384 | /* install */ |
@@ -3429,6 +3443,7 @@ static void pwq_adjust_max_active(struct pool_workqueue *pwq) | |||
3429 | { | 3443 | { |
3430 | struct workqueue_struct *wq = pwq->wq; | 3444 | struct workqueue_struct *wq = pwq->wq; |
3431 | bool freezable = wq->flags & WQ_FREEZABLE; | 3445 | bool freezable = wq->flags & WQ_FREEZABLE; |
3446 | unsigned long flags; | ||
3432 | 3447 | ||
3433 | /* for @wq->saved_max_active */ | 3448 | /* for @wq->saved_max_active */ |
3434 | lockdep_assert_held(&wq->mutex); | 3449 | lockdep_assert_held(&wq->mutex); |
@@ -3437,7 +3452,8 @@ static void pwq_adjust_max_active(struct pool_workqueue *pwq) | |||
3437 | if (!freezable && pwq->max_active == wq->saved_max_active) | 3452 | if (!freezable && pwq->max_active == wq->saved_max_active) |
3438 | return; | 3453 | return; |
3439 | 3454 | ||
3440 | spin_lock_irq(&pwq->pool->lock); | 3455 | /* this function can be called during early boot w/ irq disabled */ |
3456 | spin_lock_irqsave(&pwq->pool->lock, flags); | ||
3441 | 3457 | ||
3442 | /* | 3458 | /* |
3443 | * During [un]freezing, the caller is responsible for ensuring that | 3459 | * During [un]freezing, the caller is responsible for ensuring that |
@@ -3460,7 +3476,7 @@ static void pwq_adjust_max_active(struct pool_workqueue *pwq) | |||
3460 | pwq->max_active = 0; | 3476 | pwq->max_active = 0; |
3461 | } | 3477 | } |
3462 | 3478 | ||
3463 | spin_unlock_irq(&pwq->pool->lock); | 3479 | spin_unlock_irqrestore(&pwq->pool->lock, flags); |
3464 | } | 3480 | } |
3465 | 3481 | ||
3466 | /* initialize newly alloced @pwq which is associated with @wq and @pool */ | 3482 | /* initialize newly alloced @pwq which is associated with @wq and @pool */ |
@@ -4033,6 +4049,7 @@ void destroy_workqueue(struct workqueue_struct *wq) | |||
4033 | for (i = 0; i < WORK_NR_COLORS; i++) { | 4049 | for (i = 0; i < WORK_NR_COLORS; i++) { |
4034 | if (WARN_ON(pwq->nr_in_flight[i])) { | 4050 | if (WARN_ON(pwq->nr_in_flight[i])) { |
4035 | mutex_unlock(&wq->mutex); | 4051 | mutex_unlock(&wq->mutex); |
4052 | show_workqueue_state(); | ||
4036 | return; | 4053 | return; |
4037 | } | 4054 | } |
4038 | } | 4055 | } |
@@ -4041,6 +4058,7 @@ void destroy_workqueue(struct workqueue_struct *wq) | |||
4041 | WARN_ON(pwq->nr_active) || | 4058 | WARN_ON(pwq->nr_active) || |
4042 | WARN_ON(!list_empty(&pwq->delayed_works))) { | 4059 | WARN_ON(!list_empty(&pwq->delayed_works))) { |
4043 | mutex_unlock(&wq->mutex); | 4060 | mutex_unlock(&wq->mutex); |
4061 | show_workqueue_state(); | ||
4044 | return; | 4062 | return; |
4045 | } | 4063 | } |
4046 | } | 4064 | } |
@@ -5467,7 +5485,17 @@ static void __init wq_numa_init(void) | |||
5467 | wq_numa_enabled = true; | 5485 | wq_numa_enabled = true; |
5468 | } | 5486 | } |
5469 | 5487 | ||
5470 | static int __init init_workqueues(void) | 5488 | /** |
5489 | * workqueue_init_early - early init for workqueue subsystem | ||
5490 | * | ||
5491 | * This is the first half of two-staged workqueue subsystem initialization | ||
5492 | * and invoked as soon as the bare basics - memory allocation, cpumasks and | ||
5493 | * idr are up. It sets up all the data structures and system workqueues | ||
5494 | * and allows early boot code to create workqueues and queue/cancel work | ||
5495 | * items. Actual work item execution starts only after kthreads can be | ||
5496 | * created and scheduled right before early initcalls. | ||
5497 | */ | ||
5498 | int __init workqueue_init_early(void) | ||
5471 | { | 5499 | { |
5472 | int std_nice[NR_STD_WORKER_POOLS] = { 0, HIGHPRI_NICE_LEVEL }; | 5500 | int std_nice[NR_STD_WORKER_POOLS] = { 0, HIGHPRI_NICE_LEVEL }; |
5473 | int i, cpu; | 5501 | int i, cpu; |
@@ -5479,8 +5507,6 @@ static int __init init_workqueues(void) | |||
5479 | 5507 | ||
5480 | pwq_cache = KMEM_CACHE(pool_workqueue, SLAB_PANIC); | 5508 | pwq_cache = KMEM_CACHE(pool_workqueue, SLAB_PANIC); |
5481 | 5509 | ||
5482 | wq_numa_init(); | ||
5483 | |||
5484 | /* initialize CPU pools */ | 5510 | /* initialize CPU pools */ |
5485 | for_each_possible_cpu(cpu) { | 5511 | for_each_possible_cpu(cpu) { |
5486 | struct worker_pool *pool; | 5512 | struct worker_pool *pool; |
@@ -5500,16 +5526,6 @@ static int __init init_workqueues(void) | |||
5500 | } | 5526 | } |
5501 | } | 5527 | } |
5502 | 5528 | ||
5503 | /* create the initial worker */ | ||
5504 | for_each_online_cpu(cpu) { | ||
5505 | struct worker_pool *pool; | ||
5506 | |||
5507 | for_each_cpu_worker_pool(pool, cpu) { | ||
5508 | pool->flags &= ~POOL_DISASSOCIATED; | ||
5509 | BUG_ON(!create_worker(pool)); | ||
5510 | } | ||
5511 | } | ||
5512 | |||
5513 | /* create default unbound and ordered wq attrs */ | 5529 | /* create default unbound and ordered wq attrs */ |
5514 | for (i = 0; i < NR_STD_WORKER_POOLS; i++) { | 5530 | for (i = 0; i < NR_STD_WORKER_POOLS; i++) { |
5515 | struct workqueue_attrs *attrs; | 5531 | struct workqueue_attrs *attrs; |
@@ -5546,8 +5562,59 @@ static int __init init_workqueues(void) | |||
5546 | !system_power_efficient_wq || | 5562 | !system_power_efficient_wq || |
5547 | !system_freezable_power_efficient_wq); | 5563 | !system_freezable_power_efficient_wq); |
5548 | 5564 | ||
5565 | return 0; | ||
5566 | } | ||
5567 | |||
5568 | /** | ||
5569 | * workqueue_init - bring workqueue subsystem fully online | ||
5570 | * | ||
5571 | * This is the latter half of two-staged workqueue subsystem initialization | ||
5572 | * and invoked as soon as kthreads can be created and scheduled. | ||
5573 | * Workqueues have been created and work items queued on them, but there | ||
5574 | * are no kworkers executing the work items yet. Populate the worker pools | ||
5575 | * with the initial workers and enable future kworker creations. | ||
5576 | */ | ||
5577 | int __init workqueue_init(void) | ||
5578 | { | ||
5579 | struct workqueue_struct *wq; | ||
5580 | struct worker_pool *pool; | ||
5581 | int cpu, bkt; | ||
5582 | |||
5583 | /* | ||
5584 | * It'd be simpler to initialize NUMA in workqueue_init_early() but | ||
5585 | * CPU to node mapping may not be available that early on some | ||
5586 | * archs such as power and arm64. As per-cpu pools created | ||
5587 | * previously could be missing node hint and unbound pools NUMA | ||
5588 | * affinity, fix them up. | ||
5589 | */ | ||
5590 | wq_numa_init(); | ||
5591 | |||
5592 | mutex_lock(&wq_pool_mutex); | ||
5593 | |||
5594 | for_each_possible_cpu(cpu) { | ||
5595 | for_each_cpu_worker_pool(pool, cpu) { | ||
5596 | pool->node = cpu_to_node(cpu); | ||
5597 | } | ||
5598 | } | ||
5599 | |||
5600 | list_for_each_entry(wq, &workqueues, list) | ||
5601 | wq_update_unbound_numa(wq, smp_processor_id(), true); | ||
5602 | |||
5603 | mutex_unlock(&wq_pool_mutex); | ||
5604 | |||
5605 | /* create the initial workers */ | ||
5606 | for_each_online_cpu(cpu) { | ||
5607 | for_each_cpu_worker_pool(pool, cpu) { | ||
5608 | pool->flags &= ~POOL_DISASSOCIATED; | ||
5609 | BUG_ON(!create_worker(pool)); | ||
5610 | } | ||
5611 | } | ||
5612 | |||
5613 | hash_for_each(unbound_pool_hash, bkt, pool, hash_node) | ||
5614 | BUG_ON(!create_worker(pool)); | ||
5615 | |||
5616 | wq_online = true; | ||
5549 | wq_watchdog_init(); | 5617 | wq_watchdog_init(); |
5550 | 5618 | ||
5551 | return 0; | 5619 | return 0; |
5552 | } | 5620 | } |
5553 | early_initcall(init_workqueues); | ||