aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/workqueue.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/workqueue.c')
-rw-r--r--kernel/workqueue.c57
1 files changed, 41 insertions, 16 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 8ee6ec82f88..ee6578b578a 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -79,7 +79,9 @@ enum {
79 MAX_IDLE_WORKERS_RATIO = 4, /* 1/4 of busy can be idle */ 79 MAX_IDLE_WORKERS_RATIO = 4, /* 1/4 of busy can be idle */
80 IDLE_WORKER_TIMEOUT = 300 * HZ, /* keep idle ones for 5 mins */ 80 IDLE_WORKER_TIMEOUT = 300 * HZ, /* keep idle ones for 5 mins */
81 81
82 MAYDAY_INITIAL_TIMEOUT = HZ / 100, /* call for help after 10ms */ 82 MAYDAY_INITIAL_TIMEOUT = HZ / 100 >= 2 ? HZ / 100 : 2,
83 /* call for help after 10ms
84 (min two ticks) */
83 MAYDAY_INTERVAL = HZ / 10, /* and then every 100ms */ 85 MAYDAY_INTERVAL = HZ / 10, /* and then every 100ms */
84 CREATE_COOLDOWN = HZ, /* time to breath after fail */ 86 CREATE_COOLDOWN = HZ, /* time to breath after fail */
85 TRUSTEE_COOLDOWN = HZ / 10, /* for trustee draining */ 87 TRUSTEE_COOLDOWN = HZ / 10, /* for trustee draining */
@@ -768,7 +770,11 @@ static inline void worker_clr_flags(struct worker *worker, unsigned int flags)
768 770
769 worker->flags &= ~flags; 771 worker->flags &= ~flags;
770 772
771 /* if transitioning out of NOT_RUNNING, increment nr_running */ 773 /*
774 * If transitioning out of NOT_RUNNING, increment nr_running. Note
775 * that the nested NOT_RUNNING is not a noop. NOT_RUNNING is mask
776 * of multiple flags, not a single flag.
777 */
772 if ((flags & WORKER_NOT_RUNNING) && (oflags & WORKER_NOT_RUNNING)) 778 if ((flags & WORKER_NOT_RUNNING) && (oflags & WORKER_NOT_RUNNING))
773 if (!(worker->flags & WORKER_NOT_RUNNING)) 779 if (!(worker->flags & WORKER_NOT_RUNNING))
774 atomic_inc(get_gcwq_nr_running(gcwq->cpu)); 780 atomic_inc(get_gcwq_nr_running(gcwq->cpu));
@@ -1840,7 +1846,7 @@ __acquires(&gcwq->lock)
1840 spin_unlock_irq(&gcwq->lock); 1846 spin_unlock_irq(&gcwq->lock);
1841 1847
1842 work_clear_pending(work); 1848 work_clear_pending(work);
1843 lock_map_acquire(&cwq->wq->lockdep_map); 1849 lock_map_acquire_read(&cwq->wq->lockdep_map);
1844 lock_map_acquire(&lockdep_map); 1850 lock_map_acquire(&lockdep_map);
1845 trace_workqueue_execute_start(work); 1851 trace_workqueue_execute_start(work);
1846 f(work); 1852 f(work);
@@ -2043,6 +2049,15 @@ repeat:
2043 move_linked_works(work, scheduled, &n); 2049 move_linked_works(work, scheduled, &n);
2044 2050
2045 process_scheduled_works(rescuer); 2051 process_scheduled_works(rescuer);
2052
2053 /*
2054 * Leave this gcwq. If keep_working() is %true, notify a
2055 * regular worker; otherwise, we end up with 0 concurrency
2056 * and stalling the execution.
2057 */
2058 if (keep_working(gcwq))
2059 wake_up_worker(gcwq);
2060
2046 spin_unlock_irq(&gcwq->lock); 2061 spin_unlock_irq(&gcwq->lock);
2047 } 2062 }
2048 2063
@@ -2384,8 +2399,18 @@ static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr,
2384 insert_wq_barrier(cwq, barr, work, worker); 2399 insert_wq_barrier(cwq, barr, work, worker);
2385 spin_unlock_irq(&gcwq->lock); 2400 spin_unlock_irq(&gcwq->lock);
2386 2401
2387 lock_map_acquire(&cwq->wq->lockdep_map); 2402 /*
2403 * If @max_active is 1 or rescuer is in use, flushing another work
2404 * item on the same workqueue may lead to deadlock. Make sure the
2405 * flusher is not running on the same workqueue by verifying write
2406 * access.
2407 */
2408 if (cwq->wq->saved_max_active == 1 || cwq->wq->flags & WQ_RESCUER)
2409 lock_map_acquire(&cwq->wq->lockdep_map);
2410 else
2411 lock_map_acquire_read(&cwq->wq->lockdep_map);
2388 lock_map_release(&cwq->wq->lockdep_map); 2412 lock_map_release(&cwq->wq->lockdep_map);
2413
2389 return true; 2414 return true;
2390already_gone: 2415already_gone:
2391 spin_unlock_irq(&gcwq->lock); 2416 spin_unlock_irq(&gcwq->lock);
@@ -2942,7 +2967,7 @@ struct workqueue_struct *__alloc_workqueue_key(const char *name,
2942 */ 2967 */
2943 spin_lock(&workqueue_lock); 2968 spin_lock(&workqueue_lock);
2944 2969
2945 if (workqueue_freezing && wq->flags & WQ_FREEZEABLE) 2970 if (workqueue_freezing && wq->flags & WQ_FREEZABLE)
2946 for_each_cwq_cpu(cpu, wq) 2971 for_each_cwq_cpu(cpu, wq)
2947 get_cwq(cpu, wq)->max_active = 0; 2972 get_cwq(cpu, wq)->max_active = 0;
2948 2973
@@ -3054,7 +3079,7 @@ void workqueue_set_max_active(struct workqueue_struct *wq, int max_active)
3054 3079
3055 spin_lock_irq(&gcwq->lock); 3080 spin_lock_irq(&gcwq->lock);
3056 3081
3057 if (!(wq->flags & WQ_FREEZEABLE) || 3082 if (!(wq->flags & WQ_FREEZABLE) ||
3058 !(gcwq->flags & GCWQ_FREEZING)) 3083 !(gcwq->flags & GCWQ_FREEZING))
3059 get_cwq(gcwq->cpu, wq)->max_active = max_active; 3084 get_cwq(gcwq->cpu, wq)->max_active = max_active;
3060 3085
@@ -3304,7 +3329,7 @@ static int __cpuinit trustee_thread(void *__gcwq)
3304 * want to get it over with ASAP - spam rescuers, wake up as 3329 * want to get it over with ASAP - spam rescuers, wake up as
3305 * many idlers as necessary and create new ones till the 3330 * many idlers as necessary and create new ones till the
3306 * worklist is empty. Note that if the gcwq is frozen, there 3331 * worklist is empty. Note that if the gcwq is frozen, there
3307 * may be frozen works in freezeable cwqs. Don't declare 3332 * may be frozen works in freezable cwqs. Don't declare
3308 * completion while frozen. 3333 * completion while frozen.
3309 */ 3334 */
3310 while (gcwq->nr_workers != gcwq->nr_idle || 3335 while (gcwq->nr_workers != gcwq->nr_idle ||
@@ -3562,9 +3587,9 @@ EXPORT_SYMBOL_GPL(work_on_cpu);
3562/** 3587/**
3563 * freeze_workqueues_begin - begin freezing workqueues 3588 * freeze_workqueues_begin - begin freezing workqueues
3564 * 3589 *
3565 * Start freezing workqueues. After this function returns, all 3590 * Start freezing workqueues. After this function returns, all freezable
3566 * freezeable workqueues will queue new works to their frozen_works 3591 * workqueues will queue new works to their frozen_works list instead of
3567 * list instead of gcwq->worklist. 3592 * gcwq->worklist.
3568 * 3593 *
3569 * CONTEXT: 3594 * CONTEXT:
3570 * Grabs and releases workqueue_lock and gcwq->lock's. 3595 * Grabs and releases workqueue_lock and gcwq->lock's.
@@ -3590,7 +3615,7 @@ void freeze_workqueues_begin(void)
3590 list_for_each_entry(wq, &workqueues, list) { 3615 list_for_each_entry(wq, &workqueues, list) {
3591 struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); 3616 struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
3592 3617
3593 if (cwq && wq->flags & WQ_FREEZEABLE) 3618 if (cwq && wq->flags & WQ_FREEZABLE)
3594 cwq->max_active = 0; 3619 cwq->max_active = 0;
3595 } 3620 }
3596 3621
@@ -3601,7 +3626,7 @@ void freeze_workqueues_begin(void)
3601} 3626}
3602 3627
3603/** 3628/**
3604 * freeze_workqueues_busy - are freezeable workqueues still busy? 3629 * freeze_workqueues_busy - are freezable workqueues still busy?
3605 * 3630 *
3606 * Check whether freezing is complete. This function must be called 3631 * Check whether freezing is complete. This function must be called
3607 * between freeze_workqueues_begin() and thaw_workqueues(). 3632 * between freeze_workqueues_begin() and thaw_workqueues().
@@ -3610,8 +3635,8 @@ void freeze_workqueues_begin(void)
3610 * Grabs and releases workqueue_lock. 3635 * Grabs and releases workqueue_lock.
3611 * 3636 *
3612 * RETURNS: 3637 * RETURNS:
3613 * %true if some freezeable workqueues are still busy. %false if 3638 * %true if some freezable workqueues are still busy. %false if freezing
3614 * freezing is complete. 3639 * is complete.
3615 */ 3640 */
3616bool freeze_workqueues_busy(void) 3641bool freeze_workqueues_busy(void)
3617{ 3642{
@@ -3631,7 +3656,7 @@ bool freeze_workqueues_busy(void)
3631 list_for_each_entry(wq, &workqueues, list) { 3656 list_for_each_entry(wq, &workqueues, list) {
3632 struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); 3657 struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
3633 3658
3634 if (!cwq || !(wq->flags & WQ_FREEZEABLE)) 3659 if (!cwq || !(wq->flags & WQ_FREEZABLE))
3635 continue; 3660 continue;
3636 3661
3637 BUG_ON(cwq->nr_active < 0); 3662 BUG_ON(cwq->nr_active < 0);
@@ -3676,7 +3701,7 @@ void thaw_workqueues(void)
3676 list_for_each_entry(wq, &workqueues, list) { 3701 list_for_each_entry(wq, &workqueues, list) {
3677 struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); 3702 struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq);
3678 3703
3679 if (!cwq || !(wq->flags & WQ_FREEZEABLE)) 3704 if (!cwq || !(wq->flags & WQ_FREEZABLE))
3680 continue; 3705 continue;
3681 3706
3682 /* restore max_active and repopulate worklist */ 3707 /* restore max_active and repopulate worklist */