aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/workqueue.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/workqueue.c')
-rw-r--r--kernel/workqueue.c67
1 files changed, 47 insertions, 20 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 11869faa6819..0400553f0d04 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -79,7 +79,9 @@ enum {
79 MAX_IDLE_WORKERS_RATIO = 4, /* 1/4 of busy can be idle */ 79 MAX_IDLE_WORKERS_RATIO = 4, /* 1/4 of busy can be idle */
80 IDLE_WORKER_TIMEOUT = 300 * HZ, /* keep idle ones for 5 mins */ 80 IDLE_WORKER_TIMEOUT = 300 * HZ, /* keep idle ones for 5 mins */
81 81
82 MAYDAY_INITIAL_TIMEOUT = HZ / 100, /* call for help after 10ms */ 82 MAYDAY_INITIAL_TIMEOUT = HZ / 100 >= 2 ? HZ / 100 : 2,
83 /* call for help after 10ms
84 (min two ticks) */
83 MAYDAY_INTERVAL = HZ / 10, /* and then every 100ms */ 85 MAYDAY_INTERVAL = HZ / 10, /* and then every 100ms */
84 CREATE_COOLDOWN = HZ, /* time to breath after fail */ 86 CREATE_COOLDOWN = HZ, /* time to breath after fail */
85 TRUSTEE_COOLDOWN = HZ / 10, /* for trustee draining */ 87 TRUSTEE_COOLDOWN = HZ / 10, /* for trustee draining */
@@ -249,10 +251,12 @@ struct workqueue_struct *system_wq __read_mostly;
249struct workqueue_struct *system_long_wq __read_mostly; 251struct workqueue_struct *system_long_wq __read_mostly;
250struct workqueue_struct *system_nrt_wq __read_mostly; 252struct workqueue_struct *system_nrt_wq __read_mostly;
251struct workqueue_struct *system_unbound_wq __read_mostly; 253struct workqueue_struct *system_unbound_wq __read_mostly;
254struct workqueue_struct *system_freezable_wq __read_mostly;
252EXPORT_SYMBOL_GPL(system_wq); 255EXPORT_SYMBOL_GPL(system_wq);
253EXPORT_SYMBOL_GPL(system_long_wq); 256EXPORT_SYMBOL_GPL(system_long_wq);
254EXPORT_SYMBOL_GPL(system_nrt_wq); 257EXPORT_SYMBOL_GPL(system_nrt_wq);
255EXPORT_SYMBOL_GPL(system_unbound_wq); 258EXPORT_SYMBOL_GPL(system_unbound_wq);
259EXPORT_SYMBOL_GPL(system_freezable_wq);
256 260
257#define CREATE_TRACE_POINTS 261#define CREATE_TRACE_POINTS
258#include <trace/events/workqueue.h> 262#include <trace/events/workqueue.h>
@@ -314,6 +318,11 @@ static inline int __next_wq_cpu(int cpu, const struct cpumask *mask,
314 318
315static struct debug_obj_descr work_debug_descr; 319static struct debug_obj_descr work_debug_descr;
316 320
321static void *work_debug_hint(void *addr)
322{
323 return ((struct work_struct *) addr)->func;
324}
325
317/* 326/*
318 * fixup_init is called when: 327 * fixup_init is called when:
319 * - an active object is initialized 328 * - an active object is initialized
@@ -385,6 +394,7 @@ static int work_fixup_free(void *addr, enum debug_obj_state state)
385 394
386static struct debug_obj_descr work_debug_descr = { 395static struct debug_obj_descr work_debug_descr = {
387 .name = "work_struct", 396 .name = "work_struct",
397 .debug_hint = work_debug_hint,
388 .fixup_init = work_fixup_init, 398 .fixup_init = work_fixup_init,
389 .fixup_activate = work_fixup_activate, 399 .fixup_activate = work_fixup_activate,
390 .fixup_free = work_fixup_free, 400 .fixup_free = work_fixup_free,
@@ -1281,8 +1291,14 @@ __acquires(&gcwq->lock)
1281 return true; 1291 return true;
1282 spin_unlock_irq(&gcwq->lock); 1292 spin_unlock_irq(&gcwq->lock);
1283 1293
1284 /* CPU has come up inbetween, retry migration */ 1294 /*
1295 * We've raced with CPU hot[un]plug. Give it a breather
1296 * and retry migration. cond_resched() is required here;
1297 * otherwise, we might deadlock against cpu_stop trying to
1298 * bring down the CPU on non-preemptive kernel.
1299 */
1285 cpu_relax(); 1300 cpu_relax();
1301 cond_resched();
1286 } 1302 }
1287} 1303}
1288 1304
@@ -1356,8 +1372,10 @@ static struct worker *create_worker(struct global_cwq *gcwq, bool bind)
1356 worker->id = id; 1372 worker->id = id;
1357 1373
1358 if (!on_unbound_cpu) 1374 if (!on_unbound_cpu)
1359 worker->task = kthread_create(worker_thread, worker, 1375 worker->task = kthread_create_on_node(worker_thread,
1360 "kworker/%u:%d", gcwq->cpu, id); 1376 worker,
1377 cpu_to_node(gcwq->cpu),
1378 "kworker/%u:%d", gcwq->cpu, id);
1361 else 1379 else
1362 worker->task = kthread_create(worker_thread, worker, 1380 worker->task = kthread_create(worker_thread, worker,
1363 "kworker/u:%d", id); 1381 "kworker/u:%d", id);
@@ -2047,6 +2065,15 @@ repeat:
2047 move_linked_works(work, scheduled, &n); 2065 move_linked_works(work, scheduled, &n);
2048 2066
2049 process_scheduled_works(rescuer); 2067 process_scheduled_works(rescuer);
2068
2069 /*
2070 * Leave this gcwq. If keep_working() is %true, notify a
2071 * regular worker; otherwise, we end up with 0 concurrency
2072 * and stalling the execution.
2073 */
2074 if (keep_working(gcwq))
2075 wake_up_worker(gcwq);
2076
2050 spin_unlock_irq(&gcwq->lock); 2077 spin_unlock_irq(&gcwq->lock);
2051 } 2078 }
2052 2079
@@ -2839,9 +2866,7 @@ static int alloc_cwqs(struct workqueue_struct *wq)
2839 } 2866 }
2840 } 2867 }
2841 2868
2842 /* just in case, make sure it's actually aligned 2869 /* just in case, make sure it's actually aligned */
2843 * - this is affected by PERCPU() alignment in vmlinux.lds.S
2844 */
2845 BUG_ON(!IS_ALIGNED(wq->cpu_wq.v, align)); 2870 BUG_ON(!IS_ALIGNED(wq->cpu_wq.v, align));
2846 return wq->cpu_wq.v ? 0 : -ENOMEM; 2871 return wq->cpu_wq.v ? 0 : -ENOMEM;
2847} 2872}
@@ -2956,7 +2981,7 @@ struct workqueue_struct *__alloc_workqueue_key(const char *name,
2956 */ 2981 */
2957 spin_lock(&workqueue_lock); 2982 spin_lock(&workqueue_lock);
2958 2983
2959 if (workqueue_freezing && wq->flags & WQ_FREEZEABLE) 2984 if (workqueue_freezing && wq->flags & WQ_FREEZABLE)
2960 for_each_cwq_cpu(cpu, wq) 2985 for_each_cwq_cpu(cpu, wq)
2961 get_cwq(cpu, wq)->max_active = 0; 2986 get_cwq(cpu, wq)->max_active = 0;
2962 2987
@@ -3068,7 +3093,7 @@ void workqueue_set_max_active(struct workqueue_struct *wq, int max_active)
3068 3093
3069 spin_lock_irq(&gcwq->lock); 3094 spin_lock_irq(&gcwq->lock);
3070 3095
3071 if (!(wq->flags & WQ_FREEZEABLE) || 3096 if (!(wq->flags & WQ_FREEZABLE) ||
3072 !(gcwq->flags & GCWQ_FREEZING)) 3097 !(gcwq->flags & GCWQ_FREEZING))
3073 get_cwq(gcwq->cpu, wq)->max_active = max_active; 3098 get_cwq(gcwq->cpu, wq)->max_active = max_active;
3074 3099
@@ -3318,7 +3343,7 @@ static int __cpuinit trustee_thread(void *__gcwq)
3318 * want to get it over with ASAP - spam rescuers, wake up as 3343 * want to get it over with ASAP - spam rescuers, wake up as
3319 * many idlers as necessary and create new ones till the 3344 * many idlers as necessary and create new ones till the
3320 * worklist is empty. Note that if the gcwq is frozen, there 3345 * worklist is empty. Note that if the gcwq is frozen, there
3321 * may be frozen works in freezeable cwqs. Don't declare 3346 * may be frozen works in freezable cwqs. Don't declare
3322 * completion while frozen. 3347 * completion while frozen.
3323 */ 3348 */
3324 while (gcwq->nr_workers != gcwq->nr_idle || 3349 while (gcwq->nr_workers != gcwq->nr_idle ||
@@ -3576,9 +3601,9 @@ EXPORT_SYMBOL_GPL(work_on_cpu);
3576/** 3601/**
3577 * freeze_workqueues_begin - begin freezing workqueues 3602 * freeze_workqueues_begin - begin freezing workqueues
3578 * 3603 *
3579 * Start freezing workqueues. After this function returns, all 3604 * Start freezing workqueues. After this function returns, all freezable
3580 * freezeable workqueues will queue new works to their frozen_works 3605 * workqueues will queue new works to their frozen_works list instead of
3581 * list instead of gcwq->worklist. 3606 * gcwq->worklist.
3582 * 3607 *
3583 * CONTEXT: 3608 * CONTEXT:
3584 * Grabs and releases workqueue_lock and gcwq->lock's. 3609 * Grabs and releases workqueue_lock and gcwq->lock's.
@@ -3604,7 +3629,7 @@ void freeze_workqueues_begin(void)
3604 list_for_each_entry(wq, &workqueues, list) { 3629 list_for_each_entry(wq, &workqueues, list) {
3605 struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); 3630 struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
3606 3631
3607 if (cwq && wq->flags & WQ_FREEZEABLE) 3632 if (cwq && wq->flags & WQ_FREEZABLE)
3608 cwq->max_active = 0; 3633 cwq->max_active = 0;
3609 } 3634 }
3610 3635
@@ -3615,7 +3640,7 @@ void freeze_workqueues_begin(void)
3615} 3640}
3616 3641
3617/** 3642/**
3618 * freeze_workqueues_busy - are freezeable workqueues still busy? 3643 * freeze_workqueues_busy - are freezable workqueues still busy?
3619 * 3644 *
3620 * Check whether freezing is complete. This function must be called 3645 * Check whether freezing is complete. This function must be called
3621 * between freeze_workqueues_begin() and thaw_workqueues(). 3646 * between freeze_workqueues_begin() and thaw_workqueues().
@@ -3624,8 +3649,8 @@ void freeze_workqueues_begin(void)
3624 * Grabs and releases workqueue_lock. 3649 * Grabs and releases workqueue_lock.
3625 * 3650 *
3626 * RETURNS: 3651 * RETURNS:
3627 * %true if some freezeable workqueues are still busy. %false if 3652 * %true if some freezable workqueues are still busy. %false if freezing
3628 * freezing is complete. 3653 * is complete.
3629 */ 3654 */
3630bool freeze_workqueues_busy(void) 3655bool freeze_workqueues_busy(void)
3631{ 3656{
@@ -3645,7 +3670,7 @@ bool freeze_workqueues_busy(void)
3645 list_for_each_entry(wq, &workqueues, list) { 3670 list_for_each_entry(wq, &workqueues, list) {
3646 struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); 3671 struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
3647 3672
3648 if (!cwq || !(wq->flags & WQ_FREEZEABLE)) 3673 if (!cwq || !(wq->flags & WQ_FREEZABLE))
3649 continue; 3674 continue;
3650 3675
3651 BUG_ON(cwq->nr_active < 0); 3676 BUG_ON(cwq->nr_active < 0);
@@ -3690,7 +3715,7 @@ void thaw_workqueues(void)
3690 list_for_each_entry(wq, &workqueues, list) { 3715 list_for_each_entry(wq, &workqueues, list) {
3691 struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); 3716 struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
3692 3717
3693 if (!cwq || !(wq->flags & WQ_FREEZEABLE)) 3718 if (!cwq || !(wq->flags & WQ_FREEZABLE))
3694 continue; 3719 continue;
3695 3720
3696 /* restore max_active and repopulate worklist */ 3721 /* restore max_active and repopulate worklist */
@@ -3764,8 +3789,10 @@ static int __init init_workqueues(void)
3764 system_nrt_wq = alloc_workqueue("events_nrt", WQ_NON_REENTRANT, 0); 3789 system_nrt_wq = alloc_workqueue("events_nrt", WQ_NON_REENTRANT, 0);
3765 system_unbound_wq = alloc_workqueue("events_unbound", WQ_UNBOUND, 3790 system_unbound_wq = alloc_workqueue("events_unbound", WQ_UNBOUND,
3766 WQ_UNBOUND_MAX_ACTIVE); 3791 WQ_UNBOUND_MAX_ACTIVE);
3792 system_freezable_wq = alloc_workqueue("events_freezable",
3793 WQ_FREEZABLE, 0);
3767 BUG_ON(!system_wq || !system_long_wq || !system_nrt_wq || 3794 BUG_ON(!system_wq || !system_long_wq || !system_nrt_wq ||
3768 !system_unbound_wq); 3795 !system_unbound_wq || !system_freezable_wq);
3769 return 0; 3796 return 0;
3770} 3797}
3771early_initcall(init_workqueues); 3798early_initcall(init_workqueues);