aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--block/blk-core.c39
-rw-r--r--block/blk.h6
-rw-r--r--block/elevator.c25
-rw-r--r--include/linux/blkdev.h5
4 files changed, 46 insertions, 29 deletions
diff --git a/block/blk-core.c b/block/blk-core.c
index fccb25021121..98ddef430093 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -410,6 +410,42 @@ void blk_drain_queue(struct request_queue *q, bool drain_all)
410} 410}
411 411
412/** 412/**
413 * blk_queue_bypass_start - enter queue bypass mode
414 * @q: queue of interest
415 *
416 * In bypass mode, only the dispatch FIFO queue of @q is used. This
417 * function makes @q enter bypass mode and drains all requests which were
418 * issued before. On return, it's guaranteed that no request has ELVPRIV
419 * set.
420 */
421void blk_queue_bypass_start(struct request_queue *q)
422{
423 spin_lock_irq(q->queue_lock);
424 q->bypass_depth++;
425 queue_flag_set(QUEUE_FLAG_BYPASS, q);
426 spin_unlock_irq(q->queue_lock);
427
428 blk_drain_queue(q, false);
429}
430EXPORT_SYMBOL_GPL(blk_queue_bypass_start);
431
432/**
433 * blk_queue_bypass_end - leave queue bypass mode
434 * @q: queue of interest
435 *
436 * Leave bypass mode and restore the normal queueing behavior.
437 */
438void blk_queue_bypass_end(struct request_queue *q)
439{
440 spin_lock_irq(q->queue_lock);
441 if (!--q->bypass_depth)
442 queue_flag_clear(QUEUE_FLAG_BYPASS, q);
443 WARN_ON_ONCE(q->bypass_depth < 0);
444 spin_unlock_irq(q->queue_lock);
445}
446EXPORT_SYMBOL_GPL(blk_queue_bypass_end);
447
448/**
413 * blk_cleanup_queue - shutdown a request queue 449 * blk_cleanup_queue - shutdown a request queue
414 * @q: request queue to shutdown 450 * @q: request queue to shutdown
415 * 451 *
@@ -862,8 +898,7 @@ retry:
862 * Also, lookup icq while holding queue_lock. If it doesn't exist, 898 * Also, lookup icq while holding queue_lock. If it doesn't exist,
863 * it will be created after releasing queue_lock. 899 * it will be created after releasing queue_lock.
864 */ 900 */
865 if (blk_rq_should_init_elevator(bio) && 901 if (blk_rq_should_init_elevator(bio) && !blk_queue_bypass(q)) {
866 !test_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags)) {
867 rw_flags |= REQ_ELVPRIV; 902 rw_flags |= REQ_ELVPRIV;
868 rl->elvpriv++; 903 rl->elvpriv++;
869 if (et->icq_cache && ioc) 904 if (et->icq_cache && ioc)
diff --git a/block/blk.h b/block/blk.h
index 9c12f80882b0..7422f3133c5d 100644
--- a/block/blk.h
+++ b/block/blk.h
@@ -23,7 +23,8 @@ void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
23 struct bio *bio); 23 struct bio *bio);
24int blk_rq_append_bio(struct request_queue *q, struct request *rq, 24int blk_rq_append_bio(struct request_queue *q, struct request *rq,
25 struct bio *bio); 25 struct bio *bio);
26void blk_drain_queue(struct request_queue *q, bool drain_all); 26void blk_queue_bypass_start(struct request_queue *q);
27void blk_queue_bypass_end(struct request_queue *q);
27void blk_dequeue_request(struct request *rq); 28void blk_dequeue_request(struct request *rq);
28void __blk_queue_free_tags(struct request_queue *q); 29void __blk_queue_free_tags(struct request_queue *q);
29bool __blk_end_bidi_request(struct request *rq, int error, 30bool __blk_end_bidi_request(struct request *rq, int error,
@@ -144,9 +145,6 @@ void blk_queue_congestion_threshold(struct request_queue *q);
144 145
145int blk_dev_init(void); 146int blk_dev_init(void);
146 147
147void elv_quiesce_start(struct request_queue *q);
148void elv_quiesce_end(struct request_queue *q);
149
150 148
151/* 149/*
152 * Return the threshold (number of used requests) at which the queue is 150 * Return the threshold (number of used requests) at which the queue is
diff --git a/block/elevator.c b/block/elevator.c
index f81c061dad15..0bdea0ed03a3 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -553,25 +553,6 @@ void elv_drain_elevator(struct request_queue *q)
553 } 553 }
554} 554}
555 555
556void elv_quiesce_start(struct request_queue *q)
557{
558 if (!q->elevator)
559 return;
560
561 spin_lock_irq(q->queue_lock);
562 queue_flag_set(QUEUE_FLAG_ELVSWITCH, q);
563 spin_unlock_irq(q->queue_lock);
564
565 blk_drain_queue(q, false);
566}
567
568void elv_quiesce_end(struct request_queue *q)
569{
570 spin_lock_irq(q->queue_lock);
571 queue_flag_clear(QUEUE_FLAG_ELVSWITCH, q);
572 spin_unlock_irq(q->queue_lock);
573}
574
575void __elv_add_request(struct request_queue *q, struct request *rq, int where) 556void __elv_add_request(struct request_queue *q, struct request *rq, int where)
576{ 557{
577 trace_block_rq_insert(q, rq); 558 trace_block_rq_insert(q, rq);
@@ -903,7 +884,7 @@ static int elevator_switch(struct request_queue *q, struct elevator_type *new_e)
903 * using INSERT_BACK. All requests have SOFTBARRIER set and no 884 * using INSERT_BACK. All requests have SOFTBARRIER set and no
904 * merge happens either. 885 * merge happens either.
905 */ 886 */
906 elv_quiesce_start(q); 887 blk_queue_bypass_start(q);
907 888
908 /* unregister and clear all auxiliary data of the old elevator */ 889 /* unregister and clear all auxiliary data of the old elevator */
909 if (registered) 890 if (registered)
@@ -933,7 +914,7 @@ static int elevator_switch(struct request_queue *q, struct elevator_type *new_e)
933 914
934 /* done, kill the old one and finish */ 915 /* done, kill the old one and finish */
935 elevator_exit(old); 916 elevator_exit(old);
936 elv_quiesce_end(q); 917 blk_queue_bypass_end(q);
937 918
938 blk_add_trace_msg(q, "elv switch: %s", new_e->elevator_name); 919 blk_add_trace_msg(q, "elv switch: %s", new_e->elevator_name);
939 920
@@ -945,7 +926,7 @@ fail_init:
945 /* switch failed, restore and re-register old elevator */ 926 /* switch failed, restore and re-register old elevator */
946 q->elevator = old; 927 q->elevator = old;
947 elv_register_queue(q); 928 elv_register_queue(q);
948 elv_quiesce_end(q); 929 blk_queue_bypass_end(q);
949 930
950 return err; 931 return err;
951} 932}
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 606cf339bb56..315db1d91bc4 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -389,6 +389,8 @@ struct request_queue {
389 389
390 struct mutex sysfs_lock; 390 struct mutex sysfs_lock;
391 391
392 int bypass_depth;
393
392#if defined(CONFIG_BLK_DEV_BSG) 394#if defined(CONFIG_BLK_DEV_BSG)
393 bsg_job_fn *bsg_job_fn; 395 bsg_job_fn *bsg_job_fn;
394 int bsg_job_size; 396 int bsg_job_size;
@@ -406,7 +408,7 @@ struct request_queue {
406#define QUEUE_FLAG_SYNCFULL 3 /* read queue has been filled */ 408#define QUEUE_FLAG_SYNCFULL 3 /* read queue has been filled */
407#define QUEUE_FLAG_ASYNCFULL 4 /* write queue has been filled */ 409#define QUEUE_FLAG_ASYNCFULL 4 /* write queue has been filled */
408#define QUEUE_FLAG_DEAD 5 /* queue being torn down */ 410#define QUEUE_FLAG_DEAD 5 /* queue being torn down */
409#define QUEUE_FLAG_ELVSWITCH 6 /* don't use elevator, just do FIFO */ 411#define QUEUE_FLAG_BYPASS 6 /* act as dumb FIFO queue */
410#define QUEUE_FLAG_BIDI 7 /* queue supports bidi requests */ 412#define QUEUE_FLAG_BIDI 7 /* queue supports bidi requests */
411#define QUEUE_FLAG_NOMERGES 8 /* disable merge attempts */ 413#define QUEUE_FLAG_NOMERGES 8 /* disable merge attempts */
412#define QUEUE_FLAG_SAME_COMP 9 /* complete on same CPU-group */ 414#define QUEUE_FLAG_SAME_COMP 9 /* complete on same CPU-group */
@@ -494,6 +496,7 @@ static inline void queue_flag_clear(unsigned int flag, struct request_queue *q)
494#define blk_queue_tagged(q) test_bit(QUEUE_FLAG_QUEUED, &(q)->queue_flags) 496#define blk_queue_tagged(q) test_bit(QUEUE_FLAG_QUEUED, &(q)->queue_flags)
495#define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags) 497#define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags)
496#define blk_queue_dead(q) test_bit(QUEUE_FLAG_DEAD, &(q)->queue_flags) 498#define blk_queue_dead(q) test_bit(QUEUE_FLAG_DEAD, &(q)->queue_flags)
499#define blk_queue_bypass(q) test_bit(QUEUE_FLAG_BYPASS, &(q)->queue_flags)
497#define blk_queue_nomerges(q) test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags) 500#define blk_queue_nomerges(q) test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags)
498#define blk_queue_noxmerges(q) \ 501#define blk_queue_noxmerges(q) \
499 test_bit(QUEUE_FLAG_NOXMERGES, &(q)->queue_flags) 502 test_bit(QUEUE_FLAG_NOXMERGES, &(q)->queue_flags)