diff options
author | Tejun Heo <tj@kernel.org> | 2011-02-21 03:43:56 -0500 |
---|---|---|
committer | Tejun Heo <tj@kernel.org> | 2011-02-21 03:43:56 -0500 |
commit | 43d133c18b44e7d82d82ef0dcc2bddd55d5dfe81 (patch) | |
tree | 8de75c837b55874cc8a81a29bdedbc62668d4481 /kernel/workqueue.c | |
parent | 4149efb22da66e326fc48baf80d628834509f7f0 (diff) | |
parent | 6f576d57f1fa0d6026b495d8746d56d949989161 (diff) |
Merge branch 'master' into for-2.6.39
Diffstat (limited to 'kernel/workqueue.c')
-rw-r--r-- | kernel/workqueue.c | 37 |
1 files changed, 24 insertions, 13 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 28f8bd08f0e7..572f559f6cb9 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c | |||
@@ -79,7 +79,9 @@ enum { | |||
79 | MAX_IDLE_WORKERS_RATIO = 4, /* 1/4 of busy can be idle */ | 79 | MAX_IDLE_WORKERS_RATIO = 4, /* 1/4 of busy can be idle */ |
80 | IDLE_WORKER_TIMEOUT = 300 * HZ, /* keep idle ones for 5 mins */ | 80 | IDLE_WORKER_TIMEOUT = 300 * HZ, /* keep idle ones for 5 mins */ |
81 | 81 | ||
82 | MAYDAY_INITIAL_TIMEOUT = HZ / 100, /* call for help after 10ms */ | 82 | MAYDAY_INITIAL_TIMEOUT = HZ / 100 >= 2 ? HZ / 100 : 2, |
83 | /* call for help after 10ms | ||
84 | (min two ticks) */ | ||
83 | MAYDAY_INTERVAL = HZ / 10, /* and then every 100ms */ | 85 | MAYDAY_INTERVAL = HZ / 10, /* and then every 100ms */ |
84 | CREATE_COOLDOWN = HZ, /* time to breath after fail */ | 86 | CREATE_COOLDOWN = HZ, /* time to breath after fail */ |
85 | TRUSTEE_COOLDOWN = HZ / 10, /* for trustee draining */ | 87 | TRUSTEE_COOLDOWN = HZ / 10, /* for trustee draining */ |
@@ -2049,6 +2051,15 @@ repeat: | |||
2049 | move_linked_works(work, scheduled, &n); | 2051 | move_linked_works(work, scheduled, &n); |
2050 | 2052 | ||
2051 | process_scheduled_works(rescuer); | 2053 | process_scheduled_works(rescuer); |
2054 | |||
2055 | /* | ||
2056 | * Leave this gcwq. If keep_working() is %true, notify a | ||
2057 | * regular worker; otherwise, we end up with 0 concurrency | ||
2058 | * and stalling the execution. | ||
2059 | */ | ||
2060 | if (keep_working(gcwq)) | ||
2061 | wake_up_worker(gcwq); | ||
2062 | |||
2052 | spin_unlock_irq(&gcwq->lock); | 2063 | spin_unlock_irq(&gcwq->lock); |
2053 | } | 2064 | } |
2054 | 2065 | ||
@@ -2958,7 +2969,7 @@ struct workqueue_struct *__alloc_workqueue_key(const char *name, | |||
2958 | */ | 2969 | */ |
2959 | spin_lock(&workqueue_lock); | 2970 | spin_lock(&workqueue_lock); |
2960 | 2971 | ||
2961 | if (workqueue_freezing && wq->flags & WQ_FREEZEABLE) | 2972 | if (workqueue_freezing && wq->flags & WQ_FREEZABLE) |
2962 | for_each_cwq_cpu(cpu, wq) | 2973 | for_each_cwq_cpu(cpu, wq) |
2963 | get_cwq(cpu, wq)->max_active = 0; | 2974 | get_cwq(cpu, wq)->max_active = 0; |
2964 | 2975 | ||
@@ -3070,7 +3081,7 @@ void workqueue_set_max_active(struct workqueue_struct *wq, int max_active) | |||
3070 | 3081 | ||
3071 | spin_lock_irq(&gcwq->lock); | 3082 | spin_lock_irq(&gcwq->lock); |
3072 | 3083 | ||
3073 | if (!(wq->flags & WQ_FREEZEABLE) || | 3084 | if (!(wq->flags & WQ_FREEZABLE) || |
3074 | !(gcwq->flags & GCWQ_FREEZING)) | 3085 | !(gcwq->flags & GCWQ_FREEZING)) |
3075 | get_cwq(gcwq->cpu, wq)->max_active = max_active; | 3086 | get_cwq(gcwq->cpu, wq)->max_active = max_active; |
3076 | 3087 | ||
@@ -3320,7 +3331,7 @@ static int __cpuinit trustee_thread(void *__gcwq) | |||
3320 | * want to get it over with ASAP - spam rescuers, wake up as | 3331 | * want to get it over with ASAP - spam rescuers, wake up as |
3321 | * many idlers as necessary and create new ones till the | 3332 | * many idlers as necessary and create new ones till the |
3322 | * worklist is empty. Note that if the gcwq is frozen, there | 3333 | * worklist is empty. Note that if the gcwq is frozen, there |
3323 | * may be frozen works in freezeable cwqs. Don't declare | 3334 | * may be frozen works in freezable cwqs. Don't declare |
3324 | * completion while frozen. | 3335 | * completion while frozen. |
3325 | */ | 3336 | */ |
3326 | while (gcwq->nr_workers != gcwq->nr_idle || | 3337 | while (gcwq->nr_workers != gcwq->nr_idle || |
@@ -3578,9 +3589,9 @@ EXPORT_SYMBOL_GPL(work_on_cpu); | |||
3578 | /** | 3589 | /** |
3579 | * freeze_workqueues_begin - begin freezing workqueues | 3590 | * freeze_workqueues_begin - begin freezing workqueues |
3580 | * | 3591 | * |
3581 | * Start freezing workqueues. After this function returns, all | 3592 | * Start freezing workqueues. After this function returns, all freezable |
3582 | * freezeable workqueues will queue new works to their frozen_works | 3593 | * workqueues will queue new works to their frozen_works list instead of |
3583 | * list instead of gcwq->worklist. | 3594 | * gcwq->worklist. |
3584 | * | 3595 | * |
3585 | * CONTEXT: | 3596 | * CONTEXT: |
3586 | * Grabs and releases workqueue_lock and gcwq->lock's. | 3597 | * Grabs and releases workqueue_lock and gcwq->lock's. |
@@ -3606,7 +3617,7 @@ void freeze_workqueues_begin(void) | |||
3606 | list_for_each_entry(wq, &workqueues, list) { | 3617 | list_for_each_entry(wq, &workqueues, list) { |
3607 | struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); | 3618 | struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); |
3608 | 3619 | ||
3609 | if (cwq && wq->flags & WQ_FREEZEABLE) | 3620 | if (cwq && wq->flags & WQ_FREEZABLE) |
3610 | cwq->max_active = 0; | 3621 | cwq->max_active = 0; |
3611 | } | 3622 | } |
3612 | 3623 | ||
@@ -3617,7 +3628,7 @@ void freeze_workqueues_begin(void) | |||
3617 | } | 3628 | } |
3618 | 3629 | ||
3619 | /** | 3630 | /** |
3620 | * freeze_workqueues_busy - are freezeable workqueues still busy? | 3631 | * freeze_workqueues_busy - are freezable workqueues still busy? |
3621 | * | 3632 | * |
3622 | * Check whether freezing is complete. This function must be called | 3633 | * Check whether freezing is complete. This function must be called |
3623 | * between freeze_workqueues_begin() and thaw_workqueues(). | 3634 | * between freeze_workqueues_begin() and thaw_workqueues(). |
@@ -3626,8 +3637,8 @@ void freeze_workqueues_begin(void) | |||
3626 | * Grabs and releases workqueue_lock. | 3637 | * Grabs and releases workqueue_lock. |
3627 | * | 3638 | * |
3628 | * RETURNS: | 3639 | * RETURNS: |
3629 | * %true if some freezeable workqueues are still busy. %false if | 3640 | * %true if some freezable workqueues are still busy. %false if freezing |
3630 | * freezing is complete. | 3641 | * is complete. |
3631 | */ | 3642 | */ |
3632 | bool freeze_workqueues_busy(void) | 3643 | bool freeze_workqueues_busy(void) |
3633 | { | 3644 | { |
@@ -3647,7 +3658,7 @@ bool freeze_workqueues_busy(void) | |||
3647 | list_for_each_entry(wq, &workqueues, list) { | 3658 | list_for_each_entry(wq, &workqueues, list) { |
3648 | struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); | 3659 | struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); |
3649 | 3660 | ||
3650 | if (!cwq || !(wq->flags & WQ_FREEZEABLE)) | 3661 | if (!cwq || !(wq->flags & WQ_FREEZABLE)) |
3651 | continue; | 3662 | continue; |
3652 | 3663 | ||
3653 | BUG_ON(cwq->nr_active < 0); | 3664 | BUG_ON(cwq->nr_active < 0); |
@@ -3692,7 +3703,7 @@ void thaw_workqueues(void) | |||
3692 | list_for_each_entry(wq, &workqueues, list) { | 3703 | list_for_each_entry(wq, &workqueues, list) { |
3693 | struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); | 3704 | struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); |
3694 | 3705 | ||
3695 | if (!cwq || !(wq->flags & WQ_FREEZEABLE)) | 3706 | if (!cwq || !(wq->flags & WQ_FREEZABLE)) |
3696 | continue; | 3707 | continue; |
3697 | 3708 | ||
3698 | /* restore max_active and repopulate worklist */ | 3709 | /* restore max_active and repopulate worklist */ |