diff options
Diffstat (limited to 'kernel/workqueue.c')
| -rw-r--r-- | kernel/workqueue.c | 37 |
1 files changed, 24 insertions, 13 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 11869faa6819..ee6578b578ad 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c | |||
| @@ -79,7 +79,9 @@ enum { | |||
| 79 | MAX_IDLE_WORKERS_RATIO = 4, /* 1/4 of busy can be idle */ | 79 | MAX_IDLE_WORKERS_RATIO = 4, /* 1/4 of busy can be idle */ |
| 80 | IDLE_WORKER_TIMEOUT = 300 * HZ, /* keep idle ones for 5 mins */ | 80 | IDLE_WORKER_TIMEOUT = 300 * HZ, /* keep idle ones for 5 mins */ |
| 81 | 81 | ||
| 82 | MAYDAY_INITIAL_TIMEOUT = HZ / 100, /* call for help after 10ms */ | 82 | MAYDAY_INITIAL_TIMEOUT = HZ / 100 >= 2 ? HZ / 100 : 2, |
| 83 | /* call for help after 10ms | ||
| 84 | (min two ticks) */ | ||
| 83 | MAYDAY_INTERVAL = HZ / 10, /* and then every 100ms */ | 85 | MAYDAY_INTERVAL = HZ / 10, /* and then every 100ms */ |
| 84 | CREATE_COOLDOWN = HZ, /* time to breath after fail */ | 86 | CREATE_COOLDOWN = HZ, /* time to breath after fail */ |
| 85 | TRUSTEE_COOLDOWN = HZ / 10, /* for trustee draining */ | 87 | TRUSTEE_COOLDOWN = HZ / 10, /* for trustee draining */ |
| @@ -2047,6 +2049,15 @@ repeat: | |||
| 2047 | move_linked_works(work, scheduled, &n); | 2049 | move_linked_works(work, scheduled, &n); |
| 2048 | 2050 | ||
| 2049 | process_scheduled_works(rescuer); | 2051 | process_scheduled_works(rescuer); |
| 2052 | |||
| 2053 | /* | ||
| 2054 | * Leave this gcwq. If keep_working() is %true, notify a | ||
| 2055 | * regular worker; otherwise, we end up with 0 concurrency | ||
| 2056 | * and stalling the execution. | ||
| 2057 | */ | ||
| 2058 | if (keep_working(gcwq)) | ||
| 2059 | wake_up_worker(gcwq); | ||
| 2060 | |||
| 2050 | spin_unlock_irq(&gcwq->lock); | 2061 | spin_unlock_irq(&gcwq->lock); |
| 2051 | } | 2062 | } |
| 2052 | 2063 | ||
| @@ -2956,7 +2967,7 @@ struct workqueue_struct *__alloc_workqueue_key(const char *name, | |||
| 2956 | */ | 2967 | */ |
| 2957 | spin_lock(&workqueue_lock); | 2968 | spin_lock(&workqueue_lock); |
| 2958 | 2969 | ||
| 2959 | if (workqueue_freezing && wq->flags & WQ_FREEZEABLE) | 2970 | if (workqueue_freezing && wq->flags & WQ_FREEZABLE) |
| 2960 | for_each_cwq_cpu(cpu, wq) | 2971 | for_each_cwq_cpu(cpu, wq) |
| 2961 | get_cwq(cpu, wq)->max_active = 0; | 2972 | get_cwq(cpu, wq)->max_active = 0; |
| 2962 | 2973 | ||
| @@ -3068,7 +3079,7 @@ void workqueue_set_max_active(struct workqueue_struct *wq, int max_active) | |||
| 3068 | 3079 | ||
| 3069 | spin_lock_irq(&gcwq->lock); | 3080 | spin_lock_irq(&gcwq->lock); |
| 3070 | 3081 | ||
| 3071 | if (!(wq->flags & WQ_FREEZEABLE) || | 3082 | if (!(wq->flags & WQ_FREEZABLE) || |
| 3072 | !(gcwq->flags & GCWQ_FREEZING)) | 3083 | !(gcwq->flags & GCWQ_FREEZING)) |
| 3073 | get_cwq(gcwq->cpu, wq)->max_active = max_active; | 3084 | get_cwq(gcwq->cpu, wq)->max_active = max_active; |
| 3074 | 3085 | ||
| @@ -3318,7 +3329,7 @@ static int __cpuinit trustee_thread(void *__gcwq) | |||
| 3318 | * want to get it over with ASAP - spam rescuers, wake up as | 3329 | * want to get it over with ASAP - spam rescuers, wake up as |
| 3319 | * many idlers as necessary and create new ones till the | 3330 | * many idlers as necessary and create new ones till the |
| 3320 | * worklist is empty. Note that if the gcwq is frozen, there | 3331 | * worklist is empty. Note that if the gcwq is frozen, there |
| 3321 | * may be frozen works in freezeable cwqs. Don't declare | 3332 | * may be frozen works in freezable cwqs. Don't declare |
| 3322 | * completion while frozen. | 3333 | * completion while frozen. |
| 3323 | */ | 3334 | */ |
| 3324 | while (gcwq->nr_workers != gcwq->nr_idle || | 3335 | while (gcwq->nr_workers != gcwq->nr_idle || |
| @@ -3576,9 +3587,9 @@ EXPORT_SYMBOL_GPL(work_on_cpu); | |||
| 3576 | /** | 3587 | /** |
| 3577 | * freeze_workqueues_begin - begin freezing workqueues | 3588 | * freeze_workqueues_begin - begin freezing workqueues |
| 3578 | * | 3589 | * |
| 3579 | * Start freezing workqueues. After this function returns, all | 3590 | * Start freezing workqueues. After this function returns, all freezable |
| 3580 | * freezeable workqueues will queue new works to their frozen_works | 3591 | * workqueues will queue new works to their frozen_works list instead of |
| 3581 | * list instead of gcwq->worklist. | 3592 | * gcwq->worklist. |
| 3582 | * | 3593 | * |
| 3583 | * CONTEXT: | 3594 | * CONTEXT: |
| 3584 | * Grabs and releases workqueue_lock and gcwq->lock's. | 3595 | * Grabs and releases workqueue_lock and gcwq->lock's. |
| @@ -3604,7 +3615,7 @@ void freeze_workqueues_begin(void) | |||
| 3604 | list_for_each_entry(wq, &workqueues, list) { | 3615 | list_for_each_entry(wq, &workqueues, list) { |
| 3605 | struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); | 3616 | struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); |
| 3606 | 3617 | ||
| 3607 | if (cwq && wq->flags & WQ_FREEZEABLE) | 3618 | if (cwq && wq->flags & WQ_FREEZABLE) |
| 3608 | cwq->max_active = 0; | 3619 | cwq->max_active = 0; |
| 3609 | } | 3620 | } |
| 3610 | 3621 | ||
| @@ -3615,7 +3626,7 @@ void freeze_workqueues_begin(void) | |||
| 3615 | } | 3626 | } |
| 3616 | 3627 | ||
| 3617 | /** | 3628 | /** |
| 3618 | * freeze_workqueues_busy - are freezeable workqueues still busy? | 3629 | * freeze_workqueues_busy - are freezable workqueues still busy? |
| 3619 | * | 3630 | * |
| 3620 | * Check whether freezing is complete. This function must be called | 3631 | * Check whether freezing is complete. This function must be called |
| 3621 | * between freeze_workqueues_begin() and thaw_workqueues(). | 3632 | * between freeze_workqueues_begin() and thaw_workqueues(). |
| @@ -3624,8 +3635,8 @@ void freeze_workqueues_begin(void) | |||
| 3624 | * Grabs and releases workqueue_lock. | 3635 | * Grabs and releases workqueue_lock. |
| 3625 | * | 3636 | * |
| 3626 | * RETURNS: | 3637 | * RETURNS: |
| 3627 | * %true if some freezeable workqueues are still busy. %false if | 3638 | * %true if some freezable workqueues are still busy. %false if freezing |
| 3628 | * freezing is complete. | 3639 | * is complete. |
| 3629 | */ | 3640 | */ |
| 3630 | bool freeze_workqueues_busy(void) | 3641 | bool freeze_workqueues_busy(void) |
| 3631 | { | 3642 | { |
| @@ -3645,7 +3656,7 @@ bool freeze_workqueues_busy(void) | |||
| 3645 | list_for_each_entry(wq, &workqueues, list) { | 3656 | list_for_each_entry(wq, &workqueues, list) { |
| 3646 | struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); | 3657 | struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); |
| 3647 | 3658 | ||
| 3648 | if (!cwq || !(wq->flags & WQ_FREEZEABLE)) | 3659 | if (!cwq || !(wq->flags & WQ_FREEZABLE)) |
| 3649 | continue; | 3660 | continue; |
| 3650 | 3661 | ||
| 3651 | BUG_ON(cwq->nr_active < 0); | 3662 | BUG_ON(cwq->nr_active < 0); |
| @@ -3690,7 +3701,7 @@ void thaw_workqueues(void) | |||
| 3690 | list_for_each_entry(wq, &workqueues, list) { | 3701 | list_for_each_entry(wq, &workqueues, list) { |
| 3691 | struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); | 3702 | struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); |
| 3692 | 3703 | ||
| 3693 | if (!cwq || !(wq->flags & WQ_FREEZEABLE)) | 3704 | if (!cwq || !(wq->flags & WQ_FREEZABLE)) |
| 3694 | continue; | 3705 | continue; |
| 3695 | 3706 | ||
| 3696 | /* restore max_active and repopulate worklist */ | 3707 | /* restore max_active and repopulate worklist */ |
