aboutsummaryrefslogtreecommitdiffstats
path: root/block/blk-core.c
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2012-03-05 16:14:58 -0500
committerJens Axboe <axboe@kernel.dk>2012-03-06 15:27:21 -0500
commitd732580b4eb31553c63744a47d590f770cafb8f0 (patch)
treeea4e8e21df1b639603693e6f5fdfc5a620cd8737 /block/blk-core.c
parentb2fab5acd28ead6f0dd6c3996ba23f0ef1772f15 (diff)
block: implement blk_queue_bypass_start/end()
Rename and extend elv_queisce_start/end() to blk_queue_bypass_start/end() which are exported and supports nesting via @q->bypass_depth. Also add blk_queue_bypass() to test bypass state. This will be further extended and used for blkio_group management. Signed-off-by: Tejun Heo <tj@kernel.org> Cc: Vivek Goyal <vgoyal@redhat.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block/blk-core.c')
-rw-r--r--block/blk-core.c39
1 files changed, 37 insertions, 2 deletions
diff --git a/block/blk-core.c b/block/blk-core.c
index fccb25021121..98ddef430093 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -410,6 +410,42 @@ void blk_drain_queue(struct request_queue *q, bool drain_all)
410} 410}
411 411
412/** 412/**
413 * blk_queue_bypass_start - enter queue bypass mode
414 * @q: queue of interest
415 *
416 * In bypass mode, only the dispatch FIFO queue of @q is used. This
417 * function makes @q enter bypass mode and drains all requests which were
418 * issued before. On return, it's guaranteed that no request has ELVPRIV
419 * set.
420 */
421void blk_queue_bypass_start(struct request_queue *q)
422{
423 spin_lock_irq(q->queue_lock);
424 q->bypass_depth++;
425 queue_flag_set(QUEUE_FLAG_BYPASS, q);
426 spin_unlock_irq(q->queue_lock);
427
428 blk_drain_queue(q, false);
429}
430EXPORT_SYMBOL_GPL(blk_queue_bypass_start);
431
432/**
433 * blk_queue_bypass_end - leave queue bypass mode
434 * @q: queue of interest
435 *
436 * Leave bypass mode and restore the normal queueing behavior.
437 */
438void blk_queue_bypass_end(struct request_queue *q)
439{
440 spin_lock_irq(q->queue_lock);
441 if (!--q->bypass_depth)
442 queue_flag_clear(QUEUE_FLAG_BYPASS, q);
443 WARN_ON_ONCE(q->bypass_depth < 0);
444 spin_unlock_irq(q->queue_lock);
445}
446EXPORT_SYMBOL_GPL(blk_queue_bypass_end);
447
448/**
413 * blk_cleanup_queue - shutdown a request queue 449 * blk_cleanup_queue - shutdown a request queue
414 * @q: request queue to shutdown 450 * @q: request queue to shutdown
415 * 451 *
@@ -862,8 +898,7 @@ retry:
862 * Also, lookup icq while holding queue_lock. If it doesn't exist, 898 * Also, lookup icq while holding queue_lock. If it doesn't exist,
863 * it will be created after releasing queue_lock. 899 * it will be created after releasing queue_lock.
864 */ 900 */
865 if (blk_rq_should_init_elevator(bio) && 901 if (blk_rq_should_init_elevator(bio) && !blk_queue_bypass(q)) {
866 !test_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags)) {
867 rw_flags |= REQ_ELVPRIV; 902 rw_flags |= REQ_ELVPRIV;
868 rl->elvpriv++; 903 rl->elvpriv++;
869 if (et->icq_cache && ioc) 904 if (et->icq_cache && ioc)