diff options
| -rw-r--r-- | arch/x86/kernel/cpu/mcheck/mce.c | 2 | ||||
| -rw-r--r-- | drivers/tty/vt/vt.c | 4 | ||||
| -rw-r--r-- | include/linux/workqueue.h | 11 | ||||
| -rw-r--r-- | init/main.c | 10 | ||||
| -rw-r--r-- | kernel/power/qos.c | 11 | ||||
| -rw-r--r-- | kernel/workqueue.c | 103 | ||||
| -rw-r--r-- | lib/debugobjects.c | 2 | ||||
| -rw-r--r-- | mm/slab.c | 7 |
8 files changed, 102 insertions, 48 deletions
diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c index 132e1ec67da0..00ef43233e03 100644 --- a/arch/x86/kernel/cpu/mcheck/mce.c +++ b/arch/x86/kernel/cpu/mcheck/mce.c | |||
| @@ -516,7 +516,7 @@ int mce_available(struct cpuinfo_x86 *c) | |||
| 516 | 516 | ||
| 517 | static void mce_schedule_work(void) | 517 | static void mce_schedule_work(void) |
| 518 | { | 518 | { |
| 519 | if (!mce_gen_pool_empty() && keventd_up()) | 519 | if (!mce_gen_pool_empty()) |
| 520 | schedule_work(&mce_work); | 520 | schedule_work(&mce_work); |
| 521 | } | 521 | } |
| 522 | 522 | ||
diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c index 623264445100..4c10a9df3b91 100644 --- a/drivers/tty/vt/vt.c +++ b/drivers/tty/vt/vt.c | |||
| @@ -3923,10 +3923,6 @@ void unblank_screen(void) | |||
| 3923 | */ | 3923 | */ |
| 3924 | static void blank_screen_t(unsigned long dummy) | 3924 | static void blank_screen_t(unsigned long dummy) |
| 3925 | { | 3925 | { |
| 3926 | if (unlikely(!keventd_up())) { | ||
| 3927 | mod_timer(&console_timer, jiffies + (blankinterval * HZ)); | ||
| 3928 | return; | ||
| 3929 | } | ||
| 3930 | blank_timer_expired = 1; | 3926 | blank_timer_expired = 1; |
| 3931 | schedule_work(&console_work); | 3927 | schedule_work(&console_work); |
| 3932 | } | 3928 | } |
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h index d4f16cf6281c..a26cc437293c 100644 --- a/include/linux/workqueue.h +++ b/include/linux/workqueue.h | |||
| @@ -603,14 +603,6 @@ static inline bool schedule_delayed_work(struct delayed_work *dwork, | |||
| 603 | return queue_delayed_work(system_wq, dwork, delay); | 603 | return queue_delayed_work(system_wq, dwork, delay); |
| 604 | } | 604 | } |
| 605 | 605 | ||
| 606 | /** | ||
| 607 | * keventd_up - is workqueue initialized yet? | ||
| 608 | */ | ||
| 609 | static inline bool keventd_up(void) | ||
| 610 | { | ||
| 611 | return system_wq != NULL; | ||
| 612 | } | ||
| 613 | |||
| 614 | #ifndef CONFIG_SMP | 606 | #ifndef CONFIG_SMP |
| 615 | static inline long work_on_cpu(int cpu, long (*fn)(void *), void *arg) | 607 | static inline long work_on_cpu(int cpu, long (*fn)(void *), void *arg) |
| 616 | { | 608 | { |
| @@ -645,4 +637,7 @@ int workqueue_online_cpu(unsigned int cpu); | |||
| 645 | int workqueue_offline_cpu(unsigned int cpu); | 637 | int workqueue_offline_cpu(unsigned int cpu); |
| 646 | #endif | 638 | #endif |
| 647 | 639 | ||
| 640 | int __init workqueue_init_early(void); | ||
| 641 | int __init workqueue_init(void); | ||
| 642 | |||
| 648 | #endif | 643 | #endif |
diff --git a/init/main.c b/init/main.c index fa201166cba7..23c275cca73a 100644 --- a/init/main.c +++ b/init/main.c | |||
| @@ -553,6 +553,14 @@ asmlinkage __visible void __init start_kernel(void) | |||
| 553 | "Interrupts were enabled *very* early, fixing it\n")) | 553 | "Interrupts were enabled *very* early, fixing it\n")) |
| 554 | local_irq_disable(); | 554 | local_irq_disable(); |
| 555 | idr_init_cache(); | 555 | idr_init_cache(); |
| 556 | |||
| 557 | /* | ||
| 558 | * Allow workqueue creation and work item queueing/cancelling | ||
| 559 | * early. Work item execution depends on kthreads and starts after | ||
| 560 | * workqueue_init(). | ||
| 561 | */ | ||
| 562 | workqueue_init_early(); | ||
| 563 | |||
| 556 | rcu_init(); | 564 | rcu_init(); |
| 557 | 565 | ||
| 558 | /* trace_printk() and trace points may be used after this */ | 566 | /* trace_printk() and trace points may be used after this */ |
| @@ -1009,6 +1017,8 @@ static noinline void __init kernel_init_freeable(void) | |||
| 1009 | 1017 | ||
| 1010 | smp_prepare_cpus(setup_max_cpus); | 1018 | smp_prepare_cpus(setup_max_cpus); |
| 1011 | 1019 | ||
| 1020 | workqueue_init(); | ||
| 1021 | |||
| 1012 | do_pre_smp_initcalls(); | 1022 | do_pre_smp_initcalls(); |
| 1013 | lockup_detector_init(); | 1023 | lockup_detector_init(); |
| 1014 | 1024 | ||
diff --git a/kernel/power/qos.c b/kernel/power/qos.c index 168ff442ebde..97b0df71303e 100644 --- a/kernel/power/qos.c +++ b/kernel/power/qos.c | |||
| @@ -482,16 +482,7 @@ void pm_qos_update_request(struct pm_qos_request *req, | |||
| 482 | return; | 482 | return; |
| 483 | } | 483 | } |
| 484 | 484 | ||
| 485 | /* | 485 | cancel_delayed_work_sync(&req->work); |
| 486 | * This function may be called very early during boot, for example, | ||
| 487 | * from of_clk_init(), where irq needs to stay disabled. | ||
| 488 | * cancel_delayed_work_sync() assumes that irq is enabled on | ||
| 489 | * invocation and re-enables it on return. Avoid calling it until | ||
| 490 | * workqueue is initialized. | ||
| 491 | */ | ||
| 492 | if (keventd_up()) | ||
| 493 | cancel_delayed_work_sync(&req->work); | ||
| 494 | |||
| 495 | __pm_qos_update_request(req, new_value); | 486 | __pm_qos_update_request(req, new_value); |
| 496 | } | 487 | } |
| 497 | EXPORT_SYMBOL_GPL(pm_qos_update_request); | 488 | EXPORT_SYMBOL_GPL(pm_qos_update_request); |
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 479d840db286..1d9fb6543a66 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c | |||
| @@ -290,6 +290,8 @@ module_param_named(disable_numa, wq_disable_numa, bool, 0444); | |||
| 290 | static bool wq_power_efficient = IS_ENABLED(CONFIG_WQ_POWER_EFFICIENT_DEFAULT); | 290 | static bool wq_power_efficient = IS_ENABLED(CONFIG_WQ_POWER_EFFICIENT_DEFAULT); |
| 291 | module_param_named(power_efficient, wq_power_efficient, bool, 0444); | 291 | module_param_named(power_efficient, wq_power_efficient, bool, 0444); |
| 292 | 292 | ||
| 293 | static bool wq_online; /* can kworkers be created yet? */ | ||
| 294 | |||
| 293 | static bool wq_numa_enabled; /* unbound NUMA affinity enabled */ | 295 | static bool wq_numa_enabled; /* unbound NUMA affinity enabled */ |
| 294 | 296 | ||
| 295 | /* buf for wq_update_unbound_numa_attrs(), protected by CPU hotplug exclusion */ | 297 | /* buf for wq_update_unbound_numa_attrs(), protected by CPU hotplug exclusion */ |
| @@ -2583,6 +2585,9 @@ void flush_workqueue(struct workqueue_struct *wq) | |||
| 2583 | }; | 2585 | }; |
| 2584 | int next_color; | 2586 | int next_color; |
| 2585 | 2587 | ||
| 2588 | if (WARN_ON(!wq_online)) | ||
| 2589 | return; | ||
| 2590 | |||
| 2586 | lock_map_acquire(&wq->lockdep_map); | 2591 | lock_map_acquire(&wq->lockdep_map); |
| 2587 | lock_map_release(&wq->lockdep_map); | 2592 | lock_map_release(&wq->lockdep_map); |
| 2588 | 2593 | ||
| @@ -2843,6 +2848,9 @@ bool flush_work(struct work_struct *work) | |||
| 2843 | { | 2848 | { |
| 2844 | struct wq_barrier barr; | 2849 | struct wq_barrier barr; |
| 2845 | 2850 | ||
| 2851 | if (WARN_ON(!wq_online)) | ||
| 2852 | return false; | ||
| 2853 | |||
| 2846 | lock_map_acquire(&work->lockdep_map); | 2854 | lock_map_acquire(&work->lockdep_map); |
| 2847 | lock_map_release(&work->lockdep_map); | 2855 | lock_map_release(&work->lockdep_map); |
| 2848 | 2856 | ||
| @@ -2913,7 +2921,13 @@ static bool __cancel_work_timer(struct work_struct *work, bool is_dwork) | |||
| 2913 | mark_work_canceling(work); | 2921 | mark_work_canceling(work); |
| 2914 | local_irq_restore(flags); | 2922 | local_irq_restore(flags); |
| 2915 | 2923 | ||
| 2916 | flush_work(work); | 2924 | /* |
| 2925 | * This allows canceling during early boot. We know that @work | ||
| 2926 | * isn't executing. | ||
| 2927 | */ | ||
| 2928 | if (wq_online) | ||
| 2929 | flush_work(work); | ||
| 2930 | |||
| 2917 | clear_work_data(work); | 2931 | clear_work_data(work); |
| 2918 | 2932 | ||
| 2919 | /* | 2933 | /* |
| @@ -3364,7 +3378,7 @@ static struct worker_pool *get_unbound_pool(const struct workqueue_attrs *attrs) | |||
| 3364 | goto fail; | 3378 | goto fail; |
| 3365 | 3379 | ||
| 3366 | /* create and start the initial worker */ | 3380 | /* create and start the initial worker */ |
| 3367 | if (!create_worker(pool)) | 3381 | if (wq_online && !create_worker(pool)) |
| 3368 | goto fail; | 3382 | goto fail; |
| 3369 | 3383 | ||
| 3370 | /* install */ | 3384 | /* install */ |
| @@ -3429,6 +3443,7 @@ static void pwq_adjust_max_active(struct pool_workqueue *pwq) | |||
| 3429 | { | 3443 | { |
| 3430 | struct workqueue_struct *wq = pwq->wq; | 3444 | struct workqueue_struct *wq = pwq->wq; |
| 3431 | bool freezable = wq->flags & WQ_FREEZABLE; | 3445 | bool freezable = wq->flags & WQ_FREEZABLE; |
| 3446 | unsigned long flags; | ||
| 3432 | 3447 | ||
| 3433 | /* for @wq->saved_max_active */ | 3448 | /* for @wq->saved_max_active */ |
| 3434 | lockdep_assert_held(&wq->mutex); | 3449 | lockdep_assert_held(&wq->mutex); |
| @@ -3437,7 +3452,8 @@ static void pwq_adjust_max_active(struct pool_workqueue *pwq) | |||
| 3437 | if (!freezable && pwq->max_active == wq->saved_max_active) | 3452 | if (!freezable && pwq->max_active == wq->saved_max_active) |
| 3438 | return; | 3453 | return; |
| 3439 | 3454 | ||
| 3440 | spin_lock_irq(&pwq->pool->lock); | 3455 | /* this function can be called during early boot w/ irq disabled */ |
| 3456 | spin_lock_irqsave(&pwq->pool->lock, flags); | ||
| 3441 | 3457 | ||
| 3442 | /* | 3458 | /* |
| 3443 | * During [un]freezing, the caller is responsible for ensuring that | 3459 | * During [un]freezing, the caller is responsible for ensuring that |
| @@ -3460,7 +3476,7 @@ static void pwq_adjust_max_active(struct pool_workqueue *pwq) | |||
| 3460 | pwq->max_active = 0; | 3476 | pwq->max_active = 0; |
| 3461 | } | 3477 | } |
| 3462 | 3478 | ||
| 3463 | spin_unlock_irq(&pwq->pool->lock); | 3479 | spin_unlock_irqrestore(&pwq->pool->lock, flags); |
| 3464 | } | 3480 | } |
| 3465 | 3481 | ||
| 3466 | /* initialize newly alloced @pwq which is associated with @wq and @pool */ | 3482 | /* initialize newly alloced @pwq which is associated with @wq and @pool */ |
| @@ -4033,6 +4049,7 @@ void destroy_workqueue(struct workqueue_struct *wq) | |||
| 4033 | for (i = 0; i < WORK_NR_COLORS; i++) { | 4049 | for (i = 0; i < WORK_NR_COLORS; i++) { |
| 4034 | if (WARN_ON(pwq->nr_in_flight[i])) { | 4050 | if (WARN_ON(pwq->nr_in_flight[i])) { |
| 4035 | mutex_unlock(&wq->mutex); | 4051 | mutex_unlock(&wq->mutex); |
| 4052 | show_workqueue_state(); | ||
| 4036 | return; | 4053 | return; |
| 4037 | } | 4054 | } |
| 4038 | } | 4055 | } |
| @@ -4041,6 +4058,7 @@ void destroy_workqueue(struct workqueue_struct *wq) | |||
| 4041 | WARN_ON(pwq->nr_active) || | 4058 | WARN_ON(pwq->nr_active) || |
| 4042 | WARN_ON(!list_empty(&pwq->delayed_works))) { | 4059 | WARN_ON(!list_empty(&pwq->delayed_works))) { |
| 4043 | mutex_unlock(&wq->mutex); | 4060 | mutex_unlock(&wq->mutex); |
| 4061 | show_workqueue_state(); | ||
| 4044 | return; | 4062 | return; |
| 4045 | } | 4063 | } |
| 4046 | } | 4064 | } |
| @@ -5467,7 +5485,17 @@ static void __init wq_numa_init(void) | |||
| 5467 | wq_numa_enabled = true; | 5485 | wq_numa_enabled = true; |
| 5468 | } | 5486 | } |
| 5469 | 5487 | ||
| 5470 | static int __init init_workqueues(void) | 5488 | /** |
| 5489 | * workqueue_init_early - early init for workqueue subsystem | ||
| 5490 | * | ||
| 5491 | * This is the first half of two-staged workqueue subsystem initialization | ||
| 5492 | * and invoked as soon as the bare basics - memory allocation, cpumasks and | ||
| 5493 | * idr are up. It sets up all the data structures and system workqueues | ||
| 5494 | * and allows early boot code to create workqueues and queue/cancel work | ||
| 5495 | * items. Actual work item execution starts only after kthreads can be | ||
| 5496 | * created and scheduled right before early initcalls. | ||
| 5497 | */ | ||
| 5498 | int __init workqueue_init_early(void) | ||
| 5471 | { | 5499 | { |
| 5472 | int std_nice[NR_STD_WORKER_POOLS] = { 0, HIGHPRI_NICE_LEVEL }; | 5500 | int std_nice[NR_STD_WORKER_POOLS] = { 0, HIGHPRI_NICE_LEVEL }; |
| 5473 | int i, cpu; | 5501 | int i, cpu; |
| @@ -5479,8 +5507,6 @@ static int __init init_workqueues(void) | |||
| 5479 | 5507 | ||
| 5480 | pwq_cache = KMEM_CACHE(pool_workqueue, SLAB_PANIC); | 5508 | pwq_cache = KMEM_CACHE(pool_workqueue, SLAB_PANIC); |
| 5481 | 5509 | ||
| 5482 | wq_numa_init(); | ||
| 5483 | |||
| 5484 | /* initialize CPU pools */ | 5510 | /* initialize CPU pools */ |
| 5485 | for_each_possible_cpu(cpu) { | 5511 | for_each_possible_cpu(cpu) { |
| 5486 | struct worker_pool *pool; | 5512 | struct worker_pool *pool; |
| @@ -5500,16 +5526,6 @@ static int __init init_workqueues(void) | |||
| 5500 | } | 5526 | } |
| 5501 | } | 5527 | } |
| 5502 | 5528 | ||
| 5503 | /* create the initial worker */ | ||
| 5504 | for_each_online_cpu(cpu) { | ||
| 5505 | struct worker_pool *pool; | ||
| 5506 | |||
| 5507 | for_each_cpu_worker_pool(pool, cpu) { | ||
| 5508 | pool->flags &= ~POOL_DISASSOCIATED; | ||
| 5509 | BUG_ON(!create_worker(pool)); | ||
| 5510 | } | ||
| 5511 | } | ||
| 5512 | |||
| 5513 | /* create default unbound and ordered wq attrs */ | 5529 | /* create default unbound and ordered wq attrs */ |
| 5514 | for (i = 0; i < NR_STD_WORKER_POOLS; i++) { | 5530 | for (i = 0; i < NR_STD_WORKER_POOLS; i++) { |
| 5515 | struct workqueue_attrs *attrs; | 5531 | struct workqueue_attrs *attrs; |
| @@ -5546,8 +5562,59 @@ static int __init init_workqueues(void) | |||
| 5546 | !system_power_efficient_wq || | 5562 | !system_power_efficient_wq || |
| 5547 | !system_freezable_power_efficient_wq); | 5563 | !system_freezable_power_efficient_wq); |
| 5548 | 5564 | ||
| 5565 | return 0; | ||
| 5566 | } | ||
| 5567 | |||
| 5568 | /** | ||
| 5569 | * workqueue_init - bring workqueue subsystem fully online | ||
| 5570 | * | ||
| 5571 | * This is the latter half of two-staged workqueue subsystem initialization | ||
| 5572 | * and invoked as soon as kthreads can be created and scheduled. | ||
| 5573 | * Workqueues have been created and work items queued on them, but there | ||
| 5574 | * are no kworkers executing the work items yet. Populate the worker pools | ||
| 5575 | * with the initial workers and enable future kworker creations. | ||
| 5576 | */ | ||
| 5577 | int __init workqueue_init(void) | ||
| 5578 | { | ||
| 5579 | struct workqueue_struct *wq; | ||
| 5580 | struct worker_pool *pool; | ||
| 5581 | int cpu, bkt; | ||
| 5582 | |||
| 5583 | /* | ||
| 5584 | * It'd be simpler to initialize NUMA in workqueue_init_early() but | ||
| 5585 | * CPU to node mapping may not be available that early on some | ||
| 5586 | * archs such as power and arm64. As per-cpu pools created | ||
| 5587 | * previously could be missing node hint and unbound pools NUMA | ||
| 5588 | * affinity, fix them up. | ||
| 5589 | */ | ||
| 5590 | wq_numa_init(); | ||
| 5591 | |||
| 5592 | mutex_lock(&wq_pool_mutex); | ||
| 5593 | |||
| 5594 | for_each_possible_cpu(cpu) { | ||
| 5595 | for_each_cpu_worker_pool(pool, cpu) { | ||
| 5596 | pool->node = cpu_to_node(cpu); | ||
| 5597 | } | ||
| 5598 | } | ||
| 5599 | |||
| 5600 | list_for_each_entry(wq, &workqueues, list) | ||
| 5601 | wq_update_unbound_numa(wq, smp_processor_id(), true); | ||
| 5602 | |||
| 5603 | mutex_unlock(&wq_pool_mutex); | ||
| 5604 | |||
| 5605 | /* create the initial workers */ | ||
| 5606 | for_each_online_cpu(cpu) { | ||
| 5607 | for_each_cpu_worker_pool(pool, cpu) { | ||
| 5608 | pool->flags &= ~POOL_DISASSOCIATED; | ||
| 5609 | BUG_ON(!create_worker(pool)); | ||
| 5610 | } | ||
| 5611 | } | ||
| 5612 | |||
| 5613 | hash_for_each(unbound_pool_hash, bkt, pool, hash_node) | ||
| 5614 | BUG_ON(!create_worker(pool)); | ||
| 5615 | |||
| 5616 | wq_online = true; | ||
| 5549 | wq_watchdog_init(); | 5617 | wq_watchdog_init(); |
| 5550 | 5618 | ||
| 5551 | return 0; | 5619 | return 0; |
| 5552 | } | 5620 | } |
| 5553 | early_initcall(init_workqueues); | ||
diff --git a/lib/debugobjects.c b/lib/debugobjects.c index 056052dc8e91..04c1ef717fe0 100644 --- a/lib/debugobjects.c +++ b/lib/debugobjects.c | |||
| @@ -199,7 +199,7 @@ static void free_object(struct debug_obj *obj) | |||
| 199 | * initialized: | 199 | * initialized: |
| 200 | */ | 200 | */ |
| 201 | if (obj_pool_free > ODEBUG_POOL_SIZE && obj_cache) | 201 | if (obj_pool_free > ODEBUG_POOL_SIZE && obj_cache) |
| 202 | sched = keventd_up(); | 202 | sched = 1; |
| 203 | hlist_add_head(&obj->node, &obj_pool); | 203 | hlist_add_head(&obj->node, &obj_pool); |
| 204 | obj_pool_free++; | 204 | obj_pool_free++; |
| 205 | obj_pool_used--; | 205 | obj_pool_used--; |
| @@ -552,12 +552,7 @@ static void start_cpu_timer(int cpu) | |||
| 552 | { | 552 | { |
| 553 | struct delayed_work *reap_work = &per_cpu(slab_reap_work, cpu); | 553 | struct delayed_work *reap_work = &per_cpu(slab_reap_work, cpu); |
| 554 | 554 | ||
| 555 | /* | 555 | if (reap_work->work.func == NULL) { |
| 556 | * When this gets called from do_initcalls via cpucache_init(), | ||
| 557 | * init_workqueues() has already run, so keventd will be setup | ||
| 558 | * at that time. | ||
| 559 | */ | ||
| 560 | if (keventd_up() && reap_work->work.func == NULL) { | ||
| 561 | init_reap_node(cpu); | 556 | init_reap_node(cpu); |
| 562 | INIT_DEFERRABLE_WORK(reap_work, cache_reap); | 557 | INIT_DEFERRABLE_WORK(reap_work, cache_reap); |
| 563 | schedule_delayed_work_on(cpu, reap_work, | 558 | schedule_delayed_work_on(cpu, reap_work, |
