diff options
author | Tejun Heo <tj@kernel.org> | 2013-04-01 20:08:13 -0400 |
---|---|---|
committer | Tejun Heo <tj@kernel.org> | 2013-04-01 21:45:36 -0400 |
commit | 229641a6f1f09e27a1f12fba38980f33f4c92975 (patch) | |
tree | 234a6f8aea0910de3242af0bbe6d7494fcf81847 /kernel/workqueue.c | |
parent | d55262c4d164759a8debe772da6c9b16059dec47 (diff) | |
parent | 07961ac7c0ee8b546658717034fe692fd12eefa9 (diff) |
Merge tag 'v3.9-rc5' into wq/for-3.10
Writeback conversion to workqueue will be based on top of wq/for-3.10
branch to take advantage of custom attrs and NUMA support for unbound
workqueues. Mainline currently contains two commits which result in
non-trivial merge conflicts with wq/for-3.10 and because
block/for-3.10/core is based on v3.9-rc3 which contains one of the
conflicting commits, we need a pre-merge-window merge anyway. Let's
pull v3.9-rc5 into wq/for-3.10 so that the block tree doesn't suffer
from workqueue merge conflicts.
The two conflicts and their resolutions:
* e68035fb65 ("workqueue: convert to idr_alloc()") in mainline changes
worker_pool_assign_id() to use idr_alloc() instead of the old idr
interface. worker_pool_assign_id() goes through multiple locking
changes in wq/for-3.10 causing the following conflict.
static int worker_pool_assign_id(struct worker_pool *pool)
{
int ret;
<<<<<<< HEAD
lockdep_assert_held(&wq_pool_mutex);
do {
if (!idr_pre_get(&worker_pool_idr, GFP_KERNEL))
return -ENOMEM;
ret = idr_get_new(&worker_pool_idr, pool, &pool->id);
} while (ret == -EAGAIN);
=======
mutex_lock(&worker_pool_idr_mutex);
ret = idr_alloc(&worker_pool_idr, pool, 0, 0, GFP_KERNEL);
if (ret >= 0)
pool->id = ret;
mutex_unlock(&worker_pool_idr_mutex);
>>>>>>> c67bf5361e7e66a0ff1f4caf95f89347d55dfb89
return ret < 0 ? ret : 0;
}
We want locking from the former and idr_alloc() usage from the
latter, which can be combined to the following.
static int worker_pool_assign_id(struct worker_pool *pool)
{
int ret;
lockdep_assert_held(&wq_pool_mutex);
ret = idr_alloc(&worker_pool_idr, pool, 0, 0, GFP_KERNEL);
if (ret >= 0) {
pool->id = ret;
return 0;
}
return ret;
}
* eb2834285c ("workqueue: fix possible pool stall bug in
wq_unbind_fn()") updated wq_unbind_fn() such that it has single
larger for_each_std_worker_pool() loop instead of two separate loops
with a schedule() call inbetween. wq/for-3.10 renamed
pool->assoc_mutex to pool->manager_mutex causing the following
conflict (earlier function body and comments omitted for brevity).
static void wq_unbind_fn(struct work_struct *work)
{
...
spin_unlock_irq(&pool->lock);
<<<<<<< HEAD
mutex_unlock(&pool->manager_mutex);
}
=======
mutex_unlock(&pool->assoc_mutex);
>>>>>>> c67bf5361e7e66a0ff1f4caf95f89347d55dfb89
schedule();
<<<<<<< HEAD
for_each_cpu_worker_pool(pool, cpu)
=======
>>>>>>> c67bf5361e7e66a0ff1f4caf95f89347d55dfb89
atomic_set(&pool->nr_running, 0);
spin_lock_irq(&pool->lock);
wake_up_worker(pool);
spin_unlock_irq(&pool->lock);
}
}
The resolution is mostly trivial. We want the control flow of the
latter with the rename of the former.
static void wq_unbind_fn(struct work_struct *work)
{
...
spin_unlock_irq(&pool->lock);
mutex_unlock(&pool->manager_mutex);
schedule();
atomic_set(&pool->nr_running, 0);
spin_lock_irq(&pool->lock);
wake_up_worker(pool);
spin_unlock_irq(&pool->lock);
}
}
Signed-off-by: Tejun Heo <tj@kernel.org>
Diffstat (limited to 'kernel/workqueue.c')
-rw-r--r-- | kernel/workqueue.c | 55 |
1 files changed, 30 insertions, 25 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 729ac6a44860..dd2a4c49a39a 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c | |||
@@ -510,12 +510,11 @@ static int worker_pool_assign_id(struct worker_pool *pool) | |||
510 | 510 | ||
511 | lockdep_assert_held(&wq_pool_mutex); | 511 | lockdep_assert_held(&wq_pool_mutex); |
512 | 512 | ||
513 | do { | 513 | ret = idr_alloc(&worker_pool_idr, pool, 0, 0, GFP_KERNEL); |
514 | if (!idr_pre_get(&worker_pool_idr, GFP_KERNEL)) | 514 | if (ret >= 0) { |
515 | return -ENOMEM; | 515 | pool->id = ret; |
516 | ret = idr_get_new(&worker_pool_idr, pool, &pool->id); | 516 | return 0; |
517 | } while (ret == -EAGAIN); | 517 | } |
518 | |||
519 | return ret; | 518 | return ret; |
520 | } | 519 | } |
521 | 520 | ||
@@ -4408,28 +4407,34 @@ static void wq_unbind_fn(struct work_struct *work) | |||
4408 | 4407 | ||
4409 | spin_unlock_irq(&pool->lock); | 4408 | spin_unlock_irq(&pool->lock); |
4410 | mutex_unlock(&pool->manager_mutex); | 4409 | mutex_unlock(&pool->manager_mutex); |
4411 | } | ||
4412 | 4410 | ||
4413 | /* | 4411 | /* |
4414 | * Call schedule() so that we cross rq->lock and thus can guarantee | 4412 | * Call schedule() so that we cross rq->lock and thus can |
4415 | * sched callbacks see the %WORKER_UNBOUND flag. This is necessary | 4413 | * guarantee sched callbacks see the %WORKER_UNBOUND flag. |
4416 | * as scheduler callbacks may be invoked from other cpus. | 4414 | * This is necessary as scheduler callbacks may be invoked |
4417 | */ | 4415 | * from other cpus. |
4418 | schedule(); | 4416 | */ |
4417 | schedule(); | ||
4419 | 4418 | ||
4420 | /* | 4419 | /* |
4421 | * Sched callbacks are disabled now. Zap nr_running. After this, | 4420 | * Sched callbacks are disabled now. Zap nr_running. |
4422 | * nr_running stays zero and need_more_worker() and keep_working() | 4421 | * After this, nr_running stays zero and need_more_worker() |
4423 | * are always true as long as the worklist is not empty. Pools on | 4422 | * and keep_working() are always true as long as the |
4424 | * @cpu now behave as unbound (in terms of concurrency management) | 4423 | * worklist is not empty. This pool now behaves as an |
4425 | * pools which are served by workers tied to the CPU. | 4424 | * unbound (in terms of concurrency management) pool which |
4426 | * | 4425 | * are served by workers tied to the pool. |
4427 | * On return from this function, the current worker would trigger | 4426 | */ |
4428 | * unbound chain execution of pending work items if other workers | ||
4429 | * didn't already. | ||
4430 | */ | ||
4431 | for_each_cpu_worker_pool(pool, cpu) | ||
4432 | atomic_set(&pool->nr_running, 0); | 4427 | atomic_set(&pool->nr_running, 0); |
4428 | |||
4429 | /* | ||
4430 | * With concurrency management just turned off, a busy | ||
4431 | * worker blocking could lead to lengthy stalls. Kick off | ||
4432 | * unbound chain execution of currently pending work items. | ||
4433 | */ | ||
4434 | spin_lock_irq(&pool->lock); | ||
4435 | wake_up_worker(pool); | ||
4436 | spin_unlock_irq(&pool->lock); | ||
4437 | } | ||
4433 | } | 4438 | } |
4434 | 4439 | ||
4435 | /** | 4440 | /** |