aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/workqueue.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/workqueue.c')
-rw-r--r--kernel/workqueue.c51
1 files changed, 29 insertions, 22 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 81f2457811eb..b48cd597145d 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -457,11 +457,12 @@ static int worker_pool_assign_id(struct worker_pool *pool)
457 int ret; 457 int ret;
458 458
459 mutex_lock(&worker_pool_idr_mutex); 459 mutex_lock(&worker_pool_idr_mutex);
460 idr_pre_get(&worker_pool_idr, GFP_KERNEL); 460 ret = idr_alloc(&worker_pool_idr, pool, 0, 0, GFP_KERNEL);
461 ret = idr_get_new(&worker_pool_idr, pool, &pool->id); 461 if (ret >= 0)
462 pool->id = ret;
462 mutex_unlock(&worker_pool_idr_mutex); 463 mutex_unlock(&worker_pool_idr_mutex);
463 464
464 return ret; 465 return ret < 0 ? ret : 0;
465} 466}
466 467
467/* 468/*
@@ -3446,28 +3447,34 @@ static void wq_unbind_fn(struct work_struct *work)
3446 3447
3447 spin_unlock_irq(&pool->lock); 3448 spin_unlock_irq(&pool->lock);
3448 mutex_unlock(&pool->assoc_mutex); 3449 mutex_unlock(&pool->assoc_mutex);
3449 }
3450 3450
3451 /* 3451 /*
3452 * Call schedule() so that we cross rq->lock and thus can guarantee 3452 * Call schedule() so that we cross rq->lock and thus can
3453 * sched callbacks see the %WORKER_UNBOUND flag. This is necessary 3453 * guarantee sched callbacks see the %WORKER_UNBOUND flag.
3454 * as scheduler callbacks may be invoked from other cpus. 3454 * This is necessary as scheduler callbacks may be invoked
3455 */ 3455 * from other cpus.
3456 schedule(); 3456 */
3457 schedule();
3457 3458
3458 /* 3459 /*
3459 * Sched callbacks are disabled now. Zap nr_running. After this, 3460 * Sched callbacks are disabled now. Zap nr_running.
3460 * nr_running stays zero and need_more_worker() and keep_working() 3461 * After this, nr_running stays zero and need_more_worker()
3461 * are always true as long as the worklist is not empty. Pools on 3462 * and keep_working() are always true as long as the
3462 * @cpu now behave as unbound (in terms of concurrency management) 3463 * worklist is not empty. This pool now behaves as an
3463 * pools which are served by workers tied to the CPU. 3464 * unbound (in terms of concurrency management) pool which
3464 * 3465 * are served by workers tied to the pool.
3465 * On return from this function, the current worker would trigger 3466 */
3466 * unbound chain execution of pending work items if other workers
3467 * didn't already.
3468 */
3469 for_each_std_worker_pool(pool, cpu)
3470 atomic_set(&pool->nr_running, 0); 3467 atomic_set(&pool->nr_running, 0);
3468
3469 /*
3470 * With concurrency management just turned off, a busy
3471 * worker blocking could lead to lengthy stalls. Kick off
3472 * unbound chain execution of currently pending work items.
3473 */
3474 spin_lock_irq(&pool->lock);
3475 wake_up_worker(pool);
3476 spin_unlock_irq(&pool->lock);
3477 }
3471} 3478}
3472 3479
3473/* 3480/*