aboutsummaryrefslogtreecommitdiffstats
path: root/block/blk-core.c
diff options
context:
space:
mode:
authorBart Van Assche <bvanassche@acm.org>2012-11-28 07:45:56 -0500
committerJens Axboe <axboe@kernel.dk>2012-12-06 08:32:30 -0500
commit704605711ef048a7c6ad2ec599f15d2e0baf86b2 (patch)
tree16620b55d00b289a9a343d1d2c150684fbb1661c /block/blk-core.c
parentc246e80d86736312933646896c4157daf511dadc (diff)
block: Avoid scheduling delayed work on a dead queue
Running a queue must continue after it has been marked dying until it has been marked dead. So the function blk_run_queue_async() must not schedule delayed work after blk_cleanup_queue() has marked a queue dead. Hence add a test for that queue state in blk_run_queue_async() and make sure that queue_unplugged() invokes that function with the queue lock held. This avoids that the queue state can change after it has been tested and before mod_delayed_work() is invoked. Drop the queue dying test in queue_unplugged() since it is now superfluous: __blk_run_queue() already tests whether or not the queue is dead. Signed-off-by: Bart Van Assche <bvanassche@acm.org> Cc: Mike Christie <michaelc@cs.wisc.edu> Acked-by: Tejun Heo <tj@kernel.org> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block/blk-core.c')
-rw-r--r--block/blk-core.c33
1 files changed, 9 insertions, 24 deletions
diff --git a/block/blk-core.c b/block/blk-core.c
index f52d05ff5d24..9fb23537c7ad 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -219,12 +219,13 @@ static void blk_delay_work(struct work_struct *work)
219 * Description: 219 * Description:
220 * Sometimes queueing needs to be postponed for a little while, to allow 220 * Sometimes queueing needs to be postponed for a little while, to allow
221 * resources to come back. This function will make sure that queueing is 221 * resources to come back. This function will make sure that queueing is
222 * restarted around the specified time. 222 * restarted around the specified time. Queue lock must be held.
223 */ 223 */
224void blk_delay_queue(struct request_queue *q, unsigned long msecs) 224void blk_delay_queue(struct request_queue *q, unsigned long msecs)
225{ 225{
226 queue_delayed_work(kblockd_workqueue, &q->delay_work, 226 if (likely(!blk_queue_dead(q)))
227 msecs_to_jiffies(msecs)); 227 queue_delayed_work(kblockd_workqueue, &q->delay_work,
228 msecs_to_jiffies(msecs));
228} 229}
229EXPORT_SYMBOL(blk_delay_queue); 230EXPORT_SYMBOL(blk_delay_queue);
230 231
@@ -334,11 +335,11 @@ EXPORT_SYMBOL(__blk_run_queue);
334 * 335 *
335 * Description: 336 * Description:
336 * Tells kblockd to perform the equivalent of @blk_run_queue on behalf 337 * Tells kblockd to perform the equivalent of @blk_run_queue on behalf
337 * of us. 338 * of us. The caller must hold the queue lock.
338 */ 339 */
339void blk_run_queue_async(struct request_queue *q) 340void blk_run_queue_async(struct request_queue *q)
340{ 341{
341 if (likely(!blk_queue_stopped(q))) 342 if (likely(!blk_queue_stopped(q) && !blk_queue_dead(q)))
342 mod_delayed_work(kblockd_workqueue, &q->delay_work, 0); 343 mod_delayed_work(kblockd_workqueue, &q->delay_work, 0);
343} 344}
344EXPORT_SYMBOL(blk_run_queue_async); 345EXPORT_SYMBOL(blk_run_queue_async);
@@ -2913,27 +2914,11 @@ static void queue_unplugged(struct request_queue *q, unsigned int depth,
2913{ 2914{
2914 trace_block_unplug(q, depth, !from_schedule); 2915 trace_block_unplug(q, depth, !from_schedule);
2915 2916
2916 /* 2917 if (from_schedule)
2917 * Don't mess with a dying queue.
2918 */
2919 if (unlikely(blk_queue_dying(q))) {
2920 spin_unlock(q->queue_lock);
2921 return;
2922 }
2923
2924 /*
2925 * If we are punting this to kblockd, then we can safely drop
2926 * the queue_lock before waking kblockd (which needs to take
2927 * this lock).
2928 */
2929 if (from_schedule) {
2930 spin_unlock(q->queue_lock);
2931 blk_run_queue_async(q); 2918 blk_run_queue_async(q);
2932 } else { 2919 else
2933 __blk_run_queue(q); 2920 __blk_run_queue(q);
2934 spin_unlock(q->queue_lock); 2921 spin_unlock(q->queue_lock);
2935 }
2936
2937} 2922}
2938 2923
2939static void flush_plug_callbacks(struct blk_plug *plug, bool from_schedule) 2924static void flush_plug_callbacks(struct blk_plug *plug, bool from_schedule)