aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/workqueue.c
diff options
context:
space:
mode:
authorDaniel Vetter <daniel.vetter@ffwll.ch>2013-04-03 05:25:32 -0400
committerDaniel Vetter <daniel.vetter@ffwll.ch>2013-04-03 05:28:48 -0400
commitecb135a1a1953d2895d149e78926be479fdc6f2c (patch)
treee40aa30eefa5cc42ed1169b9798810a710a1bd9a /kernel/workqueue.c
parentbd6946e87a98fea11907b2a47368e13044458a35 (diff)
parent07961ac7c0ee8b546658717034fe692fd12eefa9 (diff)
Merge tag 'v3.9-rc5' into drm-intel-next-queued
Backmerge Linux 3.9-rc5 since I want to merge a few dp clock cleanups for -next, but they will conflict all over the place with commit 9d1a455b0ca1c2c956b4d9ab212864a8695270f1 Author: Takashi Iwai <tiwai@suse.de> Date: Mon Mar 18 11:25:36 2013 +0100 drm/i915: Use the fixed pixel clock for eDP in intel_dp_set_m_n() from -fixes. Conflicts: drivers/gpu/drm/i915/intel_dp.c: Simply adjacent lines changed. drivers/gpu/drm/i915/intel_panel.c: A field rename in -next conflicts with a bugfix in -fixes. Take the version from -fixes and apply the rename. Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Diffstat (limited to 'kernel/workqueue.c')
-rw-r--r--kernel/workqueue.c44
1 files changed, 25 insertions, 19 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 55fac5b991b7..b48cd597145d 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -3447,28 +3447,34 @@ static void wq_unbind_fn(struct work_struct *work)
3447 3447
3448 spin_unlock_irq(&pool->lock); 3448 spin_unlock_irq(&pool->lock);
3449 mutex_unlock(&pool->assoc_mutex); 3449 mutex_unlock(&pool->assoc_mutex);
3450 }
3451 3450
3452 /* 3451 /*
3453 * Call schedule() so that we cross rq->lock and thus can guarantee 3452 * Call schedule() so that we cross rq->lock and thus can
3454 * sched callbacks see the %WORKER_UNBOUND flag. This is necessary 3453 * guarantee sched callbacks see the %WORKER_UNBOUND flag.
3455 * as scheduler callbacks may be invoked from other cpus. 3454 * This is necessary as scheduler callbacks may be invoked
3456 */ 3455 * from other cpus.
3457 schedule(); 3456 */
3457 schedule();
3458 3458
3459 /* 3459 /*
3460 * Sched callbacks are disabled now. Zap nr_running. After this, 3460 * Sched callbacks are disabled now. Zap nr_running.
3461 * nr_running stays zero and need_more_worker() and keep_working() 3461 * After this, nr_running stays zero and need_more_worker()
3462 * are always true as long as the worklist is not empty. Pools on 3462 * and keep_working() are always true as long as the
3463 * @cpu now behave as unbound (in terms of concurrency management) 3463 * worklist is not empty. This pool now behaves as an
3464 * pools which are served by workers tied to the CPU. 3464 * unbound (in terms of concurrency management) pool which
3465 * 3465 * are served by workers tied to the pool.
3466 * On return from this function, the current worker would trigger 3466 */
3467 * unbound chain execution of pending work items if other workers
3468 * didn't already.
3469 */
3470 for_each_std_worker_pool(pool, cpu)
3471 atomic_set(&pool->nr_running, 0); 3467 atomic_set(&pool->nr_running, 0);
3468
3469 /*
3470 * With concurrency management just turned off, a busy
3471 * worker blocking could lead to lengthy stalls. Kick off
3472 * unbound chain execution of currently pending work items.
3473 */
3474 spin_lock_irq(&pool->lock);
3475 wake_up_worker(pool);
3476 spin_unlock_irq(&pool->lock);
3477 }
3472} 3478}
3473 3479
3474/* 3480/*