aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2016-10-19 12:12:40 -0400
committerTejun Heo <tj@kernel.org>2016-10-19 12:12:40 -0400
commit8bc4a04455969c36bf54a942ad9d28d80969ed51 (patch)
tree9fb87b458122c05f77b1fba28405761bedbcac1d
parent1001354ca34179f3db924eb66672442a173147dc (diff)
parent2186d9f940b6a04f263a3bacd48f2a7ba96df4cf (diff)
Merge branch 'for-4.9' into for-4.10
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce.c2
-rw-r--r--drivers/tty/vt/vt.c4
-rw-r--r--include/linux/workqueue.h11
-rw-r--r--init/main.c10
-rw-r--r--kernel/power/qos.c11
-rw-r--r--kernel/workqueue.c103
-rw-r--r--lib/debugobjects.c2
-rw-r--r--mm/slab.c7
8 files changed, 102 insertions, 48 deletions
diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
index a7fdf453d895..ac27acfeaf7f 100644
--- a/arch/x86/kernel/cpu/mcheck/mce.c
+++ b/arch/x86/kernel/cpu/mcheck/mce.c
@@ -499,7 +499,7 @@ int mce_available(struct cpuinfo_x86 *c)
499 499
500static void mce_schedule_work(void) 500static void mce_schedule_work(void)
501{ 501{
502 if (!mce_gen_pool_empty() && keventd_up()) 502 if (!mce_gen_pool_empty())
503 schedule_work(&mce_work); 503 schedule_work(&mce_work);
504} 504}
505 505
diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c
index 06fb39c1d6dd..95528461a021 100644
--- a/drivers/tty/vt/vt.c
+++ b/drivers/tty/vt/vt.c
@@ -3929,10 +3929,6 @@ void unblank_screen(void)
3929 */ 3929 */
3930static void blank_screen_t(unsigned long dummy) 3930static void blank_screen_t(unsigned long dummy)
3931{ 3931{
3932 if (unlikely(!keventd_up())) {
3933 mod_timer(&console_timer, jiffies + (blankinterval * HZ));
3934 return;
3935 }
3936 blank_timer_expired = 1; 3932 blank_timer_expired = 1;
3937 schedule_work(&console_work); 3933 schedule_work(&console_work);
3938} 3934}
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
index fc6e22186405..2cddd38794b2 100644
--- a/include/linux/workqueue.h
+++ b/include/linux/workqueue.h
@@ -590,14 +590,6 @@ static inline bool schedule_delayed_work(struct delayed_work *dwork,
590 return queue_delayed_work(system_wq, dwork, delay); 590 return queue_delayed_work(system_wq, dwork, delay);
591} 591}
592 592
593/**
594 * keventd_up - is workqueue initialized yet?
595 */
596static inline bool keventd_up(void)
597{
598 return system_wq != NULL;
599}
600
601#ifndef CONFIG_SMP 593#ifndef CONFIG_SMP
602static inline long work_on_cpu(int cpu, long (*fn)(void *), void *arg) 594static inline long work_on_cpu(int cpu, long (*fn)(void *), void *arg)
603{ 595{
@@ -632,4 +624,7 @@ int workqueue_online_cpu(unsigned int cpu);
632int workqueue_offline_cpu(unsigned int cpu); 624int workqueue_offline_cpu(unsigned int cpu);
633#endif 625#endif
634 626
627int __init workqueue_init_early(void);
628int __init workqueue_init(void);
629
635#endif 630#endif
diff --git a/init/main.c b/init/main.c
index 2858be732f6d..9af9274525b5 100644
--- a/init/main.c
+++ b/init/main.c
@@ -551,6 +551,14 @@ asmlinkage __visible void __init start_kernel(void)
551 "Interrupts were enabled *very* early, fixing it\n")) 551 "Interrupts were enabled *very* early, fixing it\n"))
552 local_irq_disable(); 552 local_irq_disable();
553 idr_init_cache(); 553 idr_init_cache();
554
555 /*
556 * Allow workqueue creation and work item queueing/cancelling
557 * early. Work item execution depends on kthreads and starts after
558 * workqueue_init().
559 */
560 workqueue_init_early();
561
554 rcu_init(); 562 rcu_init();
555 563
556 /* trace_printk() and trace points may be used after this */ 564 /* trace_printk() and trace points may be used after this */
@@ -1006,6 +1014,8 @@ static noinline void __init kernel_init_freeable(void)
1006 1014
1007 smp_prepare_cpus(setup_max_cpus); 1015 smp_prepare_cpus(setup_max_cpus);
1008 1016
1017 workqueue_init();
1018
1009 do_pre_smp_initcalls(); 1019 do_pre_smp_initcalls();
1010 lockup_detector_init(); 1020 lockup_detector_init();
1011 1021
diff --git a/kernel/power/qos.c b/kernel/power/qos.c
index 168ff442ebde..97b0df71303e 100644
--- a/kernel/power/qos.c
+++ b/kernel/power/qos.c
@@ -482,16 +482,7 @@ void pm_qos_update_request(struct pm_qos_request *req,
482 return; 482 return;
483 } 483 }
484 484
485 /* 485 cancel_delayed_work_sync(&req->work);
486 * This function may be called very early during boot, for example,
487 * from of_clk_init(), where irq needs to stay disabled.
488 * cancel_delayed_work_sync() assumes that irq is enabled on
489 * invocation and re-enables it on return. Avoid calling it until
490 * workqueue is initialized.
491 */
492 if (keventd_up())
493 cancel_delayed_work_sync(&req->work);
494
495 __pm_qos_update_request(req, new_value); 486 __pm_qos_update_request(req, new_value);
496} 487}
497EXPORT_SYMBOL_GPL(pm_qos_update_request); 488EXPORT_SYMBOL_GPL(pm_qos_update_request);
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 479d840db286..1d9fb6543a66 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -290,6 +290,8 @@ module_param_named(disable_numa, wq_disable_numa, bool, 0444);
290static bool wq_power_efficient = IS_ENABLED(CONFIG_WQ_POWER_EFFICIENT_DEFAULT); 290static bool wq_power_efficient = IS_ENABLED(CONFIG_WQ_POWER_EFFICIENT_DEFAULT);
291module_param_named(power_efficient, wq_power_efficient, bool, 0444); 291module_param_named(power_efficient, wq_power_efficient, bool, 0444);
292 292
293static bool wq_online; /* can kworkers be created yet? */
294
293static bool wq_numa_enabled; /* unbound NUMA affinity enabled */ 295static bool wq_numa_enabled; /* unbound NUMA affinity enabled */
294 296
295/* buf for wq_update_unbound_numa_attrs(), protected by CPU hotplug exclusion */ 297/* buf for wq_update_unbound_numa_attrs(), protected by CPU hotplug exclusion */
@@ -2583,6 +2585,9 @@ void flush_workqueue(struct workqueue_struct *wq)
2583 }; 2585 };
2584 int next_color; 2586 int next_color;
2585 2587
2588 if (WARN_ON(!wq_online))
2589 return;
2590
2586 lock_map_acquire(&wq->lockdep_map); 2591 lock_map_acquire(&wq->lockdep_map);
2587 lock_map_release(&wq->lockdep_map); 2592 lock_map_release(&wq->lockdep_map);
2588 2593
@@ -2843,6 +2848,9 @@ bool flush_work(struct work_struct *work)
2843{ 2848{
2844 struct wq_barrier barr; 2849 struct wq_barrier barr;
2845 2850
2851 if (WARN_ON(!wq_online))
2852 return false;
2853
2846 lock_map_acquire(&work->lockdep_map); 2854 lock_map_acquire(&work->lockdep_map);
2847 lock_map_release(&work->lockdep_map); 2855 lock_map_release(&work->lockdep_map);
2848 2856
@@ -2913,7 +2921,13 @@ static bool __cancel_work_timer(struct work_struct *work, bool is_dwork)
2913 mark_work_canceling(work); 2921 mark_work_canceling(work);
2914 local_irq_restore(flags); 2922 local_irq_restore(flags);
2915 2923
2916 flush_work(work); 2924 /*
2925 * This allows canceling during early boot. We know that @work
2926 * isn't executing.
2927 */
2928 if (wq_online)
2929 flush_work(work);
2930
2917 clear_work_data(work); 2931 clear_work_data(work);
2918 2932
2919 /* 2933 /*
@@ -3364,7 +3378,7 @@ static struct worker_pool *get_unbound_pool(const struct workqueue_attrs *attrs)
3364 goto fail; 3378 goto fail;
3365 3379
3366 /* create and start the initial worker */ 3380 /* create and start the initial worker */
3367 if (!create_worker(pool)) 3381 if (wq_online && !create_worker(pool))
3368 goto fail; 3382 goto fail;
3369 3383
3370 /* install */ 3384 /* install */
@@ -3429,6 +3443,7 @@ static void pwq_adjust_max_active(struct pool_workqueue *pwq)
3429{ 3443{
3430 struct workqueue_struct *wq = pwq->wq; 3444 struct workqueue_struct *wq = pwq->wq;
3431 bool freezable = wq->flags & WQ_FREEZABLE; 3445 bool freezable = wq->flags & WQ_FREEZABLE;
3446 unsigned long flags;
3432 3447
3433 /* for @wq->saved_max_active */ 3448 /* for @wq->saved_max_active */
3434 lockdep_assert_held(&wq->mutex); 3449 lockdep_assert_held(&wq->mutex);
@@ -3437,7 +3452,8 @@ static void pwq_adjust_max_active(struct pool_workqueue *pwq)
3437 if (!freezable && pwq->max_active == wq->saved_max_active) 3452 if (!freezable && pwq->max_active == wq->saved_max_active)
3438 return; 3453 return;
3439 3454
3440 spin_lock_irq(&pwq->pool->lock); 3455 /* this function can be called during early boot w/ irq disabled */
3456 spin_lock_irqsave(&pwq->pool->lock, flags);
3441 3457
3442 /* 3458 /*
3443 * During [un]freezing, the caller is responsible for ensuring that 3459 * During [un]freezing, the caller is responsible for ensuring that
@@ -3460,7 +3476,7 @@ static void pwq_adjust_max_active(struct pool_workqueue *pwq)
3460 pwq->max_active = 0; 3476 pwq->max_active = 0;
3461 } 3477 }
3462 3478
3463 spin_unlock_irq(&pwq->pool->lock); 3479 spin_unlock_irqrestore(&pwq->pool->lock, flags);
3464} 3480}
3465 3481
3466/* initialize newly alloced @pwq which is associated with @wq and @pool */ 3482/* initialize newly alloced @pwq which is associated with @wq and @pool */
@@ -4033,6 +4049,7 @@ void destroy_workqueue(struct workqueue_struct *wq)
4033 for (i = 0; i < WORK_NR_COLORS; i++) { 4049 for (i = 0; i < WORK_NR_COLORS; i++) {
4034 if (WARN_ON(pwq->nr_in_flight[i])) { 4050 if (WARN_ON(pwq->nr_in_flight[i])) {
4035 mutex_unlock(&wq->mutex); 4051 mutex_unlock(&wq->mutex);
4052 show_workqueue_state();
4036 return; 4053 return;
4037 } 4054 }
4038 } 4055 }
@@ -4041,6 +4058,7 @@ void destroy_workqueue(struct workqueue_struct *wq)
4041 WARN_ON(pwq->nr_active) || 4058 WARN_ON(pwq->nr_active) ||
4042 WARN_ON(!list_empty(&pwq->delayed_works))) { 4059 WARN_ON(!list_empty(&pwq->delayed_works))) {
4043 mutex_unlock(&wq->mutex); 4060 mutex_unlock(&wq->mutex);
4061 show_workqueue_state();
4044 return; 4062 return;
4045 } 4063 }
4046 } 4064 }
@@ -5467,7 +5485,17 @@ static void __init wq_numa_init(void)
5467 wq_numa_enabled = true; 5485 wq_numa_enabled = true;
5468} 5486}
5469 5487
5470static int __init init_workqueues(void) 5488/**
5489 * workqueue_init_early - early init for workqueue subsystem
5490 *
5491 * This is the first half of two-staged workqueue subsystem initialization
5492 * and invoked as soon as the bare basics - memory allocation, cpumasks and
5493 * idr are up. It sets up all the data structures and system workqueues
5494 * and allows early boot code to create workqueues and queue/cancel work
5495 * items. Actual work item execution starts only after kthreads can be
5496 * created and scheduled right before early initcalls.
5497 */
5498int __init workqueue_init_early(void)
5471{ 5499{
5472 int std_nice[NR_STD_WORKER_POOLS] = { 0, HIGHPRI_NICE_LEVEL }; 5500 int std_nice[NR_STD_WORKER_POOLS] = { 0, HIGHPRI_NICE_LEVEL };
5473 int i, cpu; 5501 int i, cpu;
@@ -5479,8 +5507,6 @@ static int __init init_workqueues(void)
5479 5507
5480 pwq_cache = KMEM_CACHE(pool_workqueue, SLAB_PANIC); 5508 pwq_cache = KMEM_CACHE(pool_workqueue, SLAB_PANIC);
5481 5509
5482 wq_numa_init();
5483
5484 /* initialize CPU pools */ 5510 /* initialize CPU pools */
5485 for_each_possible_cpu(cpu) { 5511 for_each_possible_cpu(cpu) {
5486 struct worker_pool *pool; 5512 struct worker_pool *pool;
@@ -5500,16 +5526,6 @@ static int __init init_workqueues(void)
5500 } 5526 }
5501 } 5527 }
5502 5528
5503 /* create the initial worker */
5504 for_each_online_cpu(cpu) {
5505 struct worker_pool *pool;
5506
5507 for_each_cpu_worker_pool(pool, cpu) {
5508 pool->flags &= ~POOL_DISASSOCIATED;
5509 BUG_ON(!create_worker(pool));
5510 }
5511 }
5512
5513 /* create default unbound and ordered wq attrs */ 5529 /* create default unbound and ordered wq attrs */
5514 for (i = 0; i < NR_STD_WORKER_POOLS; i++) { 5530 for (i = 0; i < NR_STD_WORKER_POOLS; i++) {
5515 struct workqueue_attrs *attrs; 5531 struct workqueue_attrs *attrs;
@@ -5546,8 +5562,59 @@ static int __init init_workqueues(void)
5546 !system_power_efficient_wq || 5562 !system_power_efficient_wq ||
5547 !system_freezable_power_efficient_wq); 5563 !system_freezable_power_efficient_wq);
5548 5564
5565 return 0;
5566}
5567
5568/**
5569 * workqueue_init - bring workqueue subsystem fully online
5570 *
5571 * This is the latter half of two-staged workqueue subsystem initialization
5572 * and invoked as soon as kthreads can be created and scheduled.
5573 * Workqueues have been created and work items queued on them, but there
5574 * are no kworkers executing the work items yet. Populate the worker pools
5575 * with the initial workers and enable future kworker creations.
5576 */
5577int __init workqueue_init(void)
5578{
5579 struct workqueue_struct *wq;
5580 struct worker_pool *pool;
5581 int cpu, bkt;
5582
5583 /*
5584 * It'd be simpler to initialize NUMA in workqueue_init_early() but
5585 * CPU to node mapping may not be available that early on some
5586 * archs such as power and arm64. As per-cpu pools created
5587 * previously could be missing node hint and unbound pools NUMA
5588 * affinity, fix them up.
5589 */
5590 wq_numa_init();
5591
5592 mutex_lock(&wq_pool_mutex);
5593
5594 for_each_possible_cpu(cpu) {
5595 for_each_cpu_worker_pool(pool, cpu) {
5596 pool->node = cpu_to_node(cpu);
5597 }
5598 }
5599
5600 list_for_each_entry(wq, &workqueues, list)
5601 wq_update_unbound_numa(wq, smp_processor_id(), true);
5602
5603 mutex_unlock(&wq_pool_mutex);
5604
5605 /* create the initial workers */
5606 for_each_online_cpu(cpu) {
5607 for_each_cpu_worker_pool(pool, cpu) {
5608 pool->flags &= ~POOL_DISASSOCIATED;
5609 BUG_ON(!create_worker(pool));
5610 }
5611 }
5612
5613 hash_for_each(unbound_pool_hash, bkt, pool, hash_node)
5614 BUG_ON(!create_worker(pool));
5615
5616 wq_online = true;
5549 wq_watchdog_init(); 5617 wq_watchdog_init();
5550 5618
5551 return 0; 5619 return 0;
5552} 5620}
5553early_initcall(init_workqueues);
diff --git a/lib/debugobjects.c b/lib/debugobjects.c
index a8e12601eb37..8458ec9d3d9f 100644
--- a/lib/debugobjects.c
+++ b/lib/debugobjects.c
@@ -199,7 +199,7 @@ static void free_object(struct debug_obj *obj)
199 * initialized: 199 * initialized:
200 */ 200 */
201 if (obj_pool_free > ODEBUG_POOL_SIZE && obj_cache) 201 if (obj_pool_free > ODEBUG_POOL_SIZE && obj_cache)
202 sched = keventd_up(); 202 sched = 1;
203 hlist_add_head(&obj->node, &obj_pool); 203 hlist_add_head(&obj->node, &obj_pool);
204 obj_pool_free++; 204 obj_pool_free++;
205 obj_pool_used--; 205 obj_pool_used--;
diff --git a/mm/slab.c b/mm/slab.c
index 090fb26b3a39..6508b4dab99d 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -550,12 +550,7 @@ static void start_cpu_timer(int cpu)
550{ 550{
551 struct delayed_work *reap_work = &per_cpu(slab_reap_work, cpu); 551 struct delayed_work *reap_work = &per_cpu(slab_reap_work, cpu);
552 552
553 /* 553 if (reap_work->work.func == NULL) {
554 * When this gets called from do_initcalls via cpucache_init(),
555 * init_workqueues() has already run, so keventd will be setup
556 * at that time.
557 */
558 if (keventd_up() && reap_work->work.func == NULL) {
559 init_reap_node(cpu); 554 init_reap_node(cpu);
560 INIT_DEFERRABLE_WORK(reap_work, cache_reap); 555 INIT_DEFERRABLE_WORK(reap_work, cache_reap);
561 schedule_delayed_work_on(cpu, reap_work, 556 schedule_delayed_work_on(cpu, reap_work,