aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2017-01-27 10:30:47 -0500
committerJens Axboe <axboe@fb.com>2017-01-27 11:01:45 -0500
commitf73f44eb00cb136990cfb7d40e436c13d7669ec8 (patch)
treee0a5cc16d9373eec0346538fe073f0c0b08133bf
parentc13660a08c8b3bb49def4374bfd414aaaa564662 (diff)
block: add a op_is_flush helper
This centralizes the checks for bios that needs to be go into the flush state machine. Signed-off-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Martin K. Petersen <martin.petersen@oracle.com> Reviewed-by: Bart Van Assche <bart.vanassche@sandisk.com> Signed-off-by: Jens Axboe <axboe@fb.com>
-rw-r--r--block/blk-core.c8
-rw-r--r--block/blk-mq-sched.c5
-rw-r--r--block/blk-mq.c4
-rw-r--r--drivers/md/bcache/request.c2
-rw-r--r--drivers/md/dm-cache-target.c13
-rw-r--r--drivers/md/dm-thin.c13
-rw-r--r--include/linux/blk_types.h9
7 files changed, 26 insertions, 28 deletions
diff --git a/block/blk-core.c b/block/blk-core.c
index a61f1407f4f6..b830e14117dd 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -1035,7 +1035,7 @@ static bool blk_rq_should_init_elevator(struct bio *bio)
1035 * Flush requests do not use the elevator so skip initialization. 1035 * Flush requests do not use the elevator so skip initialization.
1036 * This allows a request to share the flush and elevator data. 1036 * This allows a request to share the flush and elevator data.
1037 */ 1037 */
1038 if (bio->bi_opf & (REQ_PREFLUSH | REQ_FUA)) 1038 if (op_is_flush(bio->bi_opf))
1039 return false; 1039 return false;
1040 1040
1041 return true; 1041 return true;
@@ -1641,7 +1641,7 @@ static blk_qc_t blk_queue_bio(struct request_queue *q, struct bio *bio)
1641 return BLK_QC_T_NONE; 1641 return BLK_QC_T_NONE;
1642 } 1642 }
1643 1643
1644 if (bio->bi_opf & (REQ_PREFLUSH | REQ_FUA)) { 1644 if (op_is_flush(bio->bi_opf)) {
1645 spin_lock_irq(q->queue_lock); 1645 spin_lock_irq(q->queue_lock);
1646 where = ELEVATOR_INSERT_FLUSH; 1646 where = ELEVATOR_INSERT_FLUSH;
1647 goto get_rq; 1647 goto get_rq;
@@ -2145,7 +2145,7 @@ int blk_insert_cloned_request(struct request_queue *q, struct request *rq)
2145 */ 2145 */
2146 BUG_ON(blk_queued_rq(rq)); 2146 BUG_ON(blk_queued_rq(rq));
2147 2147
2148 if (rq->cmd_flags & (REQ_PREFLUSH | REQ_FUA)) 2148 if (op_is_flush(rq->cmd_flags))
2149 where = ELEVATOR_INSERT_FLUSH; 2149 where = ELEVATOR_INSERT_FLUSH;
2150 2150
2151 add_acct_request(q, rq, where); 2151 add_acct_request(q, rq, where);
@@ -3256,7 +3256,7 @@ void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
3256 /* 3256 /*
3257 * rq is already accounted, so use raw insert 3257 * rq is already accounted, so use raw insert
3258 */ 3258 */
3259 if (rq->cmd_flags & (REQ_PREFLUSH | REQ_FUA)) 3259 if (op_is_flush(rq->cmd_flags))
3260 __elv_add_request(q, rq, ELEVATOR_INSERT_FLUSH); 3260 __elv_add_request(q, rq, ELEVATOR_INSERT_FLUSH);
3261 else 3261 else
3262 __elv_add_request(q, rq, ELEVATOR_INSERT_SORT_MERGE); 3262 __elv_add_request(q, rq, ELEVATOR_INSERT_SORT_MERGE);
diff --git a/block/blk-mq-sched.c b/block/blk-mq-sched.c
index c27613de80c5..4139b07ab33b 100644
--- a/block/blk-mq-sched.c
+++ b/block/blk-mq-sched.c
@@ -111,7 +111,6 @@ struct request *blk_mq_sched_get_request(struct request_queue *q,
111 struct blk_mq_hw_ctx *hctx; 111 struct blk_mq_hw_ctx *hctx;
112 struct blk_mq_ctx *ctx; 112 struct blk_mq_ctx *ctx;
113 struct request *rq; 113 struct request *rq;
114 const bool is_flush = op & (REQ_PREFLUSH | REQ_FUA);
115 114
116 blk_queue_enter_live(q); 115 blk_queue_enter_live(q);
117 ctx = blk_mq_get_ctx(q); 116 ctx = blk_mq_get_ctx(q);
@@ -126,7 +125,7 @@ struct request *blk_mq_sched_get_request(struct request_queue *q,
126 * Flush requests are special and go directly to the 125 * Flush requests are special and go directly to the
127 * dispatch list. 126 * dispatch list.
128 */ 127 */
129 if (!is_flush && e->type->ops.mq.get_request) { 128 if (!op_is_flush(op) && e->type->ops.mq.get_request) {
130 rq = e->type->ops.mq.get_request(q, op, data); 129 rq = e->type->ops.mq.get_request(q, op, data);
131 if (rq) 130 if (rq)
132 rq->rq_flags |= RQF_QUEUED; 131 rq->rq_flags |= RQF_QUEUED;
@@ -139,7 +138,7 @@ struct request *blk_mq_sched_get_request(struct request_queue *q,
139 } 138 }
140 139
141 if (rq) { 140 if (rq) {
142 if (!is_flush) { 141 if (!op_is_flush(op)) {
143 rq->elv.icq = NULL; 142 rq->elv.icq = NULL;
144 if (e && e->type->icq_cache) 143 if (e && e->type->icq_cache)
145 blk_mq_sched_assign_ioc(q, rq, bio); 144 blk_mq_sched_assign_ioc(q, rq, bio);
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 301ae29fd229..da2123dd681e 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -1406,7 +1406,7 @@ insert:
1406static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio) 1406static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
1407{ 1407{
1408 const int is_sync = op_is_sync(bio->bi_opf); 1408 const int is_sync = op_is_sync(bio->bi_opf);
1409 const int is_flush_fua = bio->bi_opf & (REQ_PREFLUSH | REQ_FUA); 1409 const int is_flush_fua = op_is_flush(bio->bi_opf);
1410 struct blk_mq_alloc_data data = { .flags = 0 }; 1410 struct blk_mq_alloc_data data = { .flags = 0 };
1411 struct request *rq; 1411 struct request *rq;
1412 unsigned int request_count = 0, srcu_idx; 1412 unsigned int request_count = 0, srcu_idx;
@@ -1527,7 +1527,7 @@ done:
1527static blk_qc_t blk_sq_make_request(struct request_queue *q, struct bio *bio) 1527static blk_qc_t blk_sq_make_request(struct request_queue *q, struct bio *bio)
1528{ 1528{
1529 const int is_sync = op_is_sync(bio->bi_opf); 1529 const int is_sync = op_is_sync(bio->bi_opf);
1530 const int is_flush_fua = bio->bi_opf & (REQ_PREFLUSH | REQ_FUA); 1530 const int is_flush_fua = op_is_flush(bio->bi_opf);
1531 struct blk_plug *plug; 1531 struct blk_plug *plug;
1532 unsigned int request_count = 0; 1532 unsigned int request_count = 0;
1533 struct blk_mq_alloc_data data = { .flags = 0 }; 1533 struct blk_mq_alloc_data data = { .flags = 0 };
diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c
index 76d20875503c..01035e718c1c 100644
--- a/drivers/md/bcache/request.c
+++ b/drivers/md/bcache/request.c
@@ -666,7 +666,7 @@ static inline struct search *search_alloc(struct bio *bio,
666 s->iop.write_prio = 0; 666 s->iop.write_prio = 0;
667 s->iop.error = 0; 667 s->iop.error = 0;
668 s->iop.flags = 0; 668 s->iop.flags = 0;
669 s->iop.flush_journal = (bio->bi_opf & (REQ_PREFLUSH|REQ_FUA)) != 0; 669 s->iop.flush_journal = op_is_flush(bio->bi_opf);
670 s->iop.wq = bcache_wq; 670 s->iop.wq = bcache_wq;
671 671
672 return s; 672 return s;
diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c
index e04c61e0839e..5b9cf56de8ef 100644
--- a/drivers/md/dm-cache-target.c
+++ b/drivers/md/dm-cache-target.c
@@ -787,8 +787,7 @@ static void check_if_tick_bio_needed(struct cache *cache, struct bio *bio)
787 struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size); 787 struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size);
788 788
789 spin_lock_irqsave(&cache->lock, flags); 789 spin_lock_irqsave(&cache->lock, flags);
790 if (cache->need_tick_bio && 790 if (cache->need_tick_bio && !op_is_flush(bio->bi_opf) &&
791 !(bio->bi_opf & (REQ_FUA | REQ_PREFLUSH)) &&
792 bio_op(bio) != REQ_OP_DISCARD) { 791 bio_op(bio) != REQ_OP_DISCARD) {
793 pb->tick = true; 792 pb->tick = true;
794 cache->need_tick_bio = false; 793 cache->need_tick_bio = false;
@@ -828,11 +827,6 @@ static dm_oblock_t get_bio_block(struct cache *cache, struct bio *bio)
828 return to_oblock(block_nr); 827 return to_oblock(block_nr);
829} 828}
830 829
831static int bio_triggers_commit(struct cache *cache, struct bio *bio)
832{
833 return bio->bi_opf & (REQ_PREFLUSH | REQ_FUA);
834}
835
836/* 830/*
837 * You must increment the deferred set whilst the prison cell is held. To 831 * You must increment the deferred set whilst the prison cell is held. To
838 * encourage this, we ask for 'cell' to be passed in. 832 * encourage this, we ask for 'cell' to be passed in.
@@ -884,7 +878,7 @@ static void issue(struct cache *cache, struct bio *bio)
884{ 878{
885 unsigned long flags; 879 unsigned long flags;
886 880
887 if (!bio_triggers_commit(cache, bio)) { 881 if (!op_is_flush(bio->bi_opf)) {
888 accounted_request(cache, bio); 882 accounted_request(cache, bio);
889 return; 883 return;
890 } 884 }
@@ -1069,8 +1063,7 @@ static void dec_io_migrations(struct cache *cache)
1069 1063
1070static bool discard_or_flush(struct bio *bio) 1064static bool discard_or_flush(struct bio *bio)
1071{ 1065{
1072 return bio_op(bio) == REQ_OP_DISCARD || 1066 return bio_op(bio) == REQ_OP_DISCARD || op_is_flush(bio->bi_opf);
1073 bio->bi_opf & (REQ_PREFLUSH | REQ_FUA);
1074} 1067}
1075 1068
1076static void __cell_defer(struct cache *cache, struct dm_bio_prison_cell *cell) 1069static void __cell_defer(struct cache *cache, struct dm_bio_prison_cell *cell)
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
index d1c05c12a9db..110982db4b48 100644
--- a/drivers/md/dm-thin.c
+++ b/drivers/md/dm-thin.c
@@ -699,7 +699,7 @@ static void remap_to_origin(struct thin_c *tc, struct bio *bio)
699 699
700static int bio_triggers_commit(struct thin_c *tc, struct bio *bio) 700static int bio_triggers_commit(struct thin_c *tc, struct bio *bio)
701{ 701{
702 return (bio->bi_opf & (REQ_PREFLUSH | REQ_FUA)) && 702 return op_is_flush(bio->bi_opf) &&
703 dm_thin_changed_this_transaction(tc->td); 703 dm_thin_changed_this_transaction(tc->td);
704} 704}
705 705
@@ -870,8 +870,7 @@ static void __inc_remap_and_issue_cell(void *context,
870 struct bio *bio; 870 struct bio *bio;
871 871
872 while ((bio = bio_list_pop(&cell->bios))) { 872 while ((bio = bio_list_pop(&cell->bios))) {
873 if (bio->bi_opf & (REQ_PREFLUSH | REQ_FUA) || 873 if (op_is_flush(bio->bi_opf) || bio_op(bio) == REQ_OP_DISCARD)
874 bio_op(bio) == REQ_OP_DISCARD)
875 bio_list_add(&info->defer_bios, bio); 874 bio_list_add(&info->defer_bios, bio);
876 else { 875 else {
877 inc_all_io_entry(info->tc->pool, bio); 876 inc_all_io_entry(info->tc->pool, bio);
@@ -1716,9 +1715,8 @@ static void __remap_and_issue_shared_cell(void *context,
1716 struct bio *bio; 1715 struct bio *bio;
1717 1716
1718 while ((bio = bio_list_pop(&cell->bios))) { 1717 while ((bio = bio_list_pop(&cell->bios))) {
1719 if ((bio_data_dir(bio) == WRITE) || 1718 if (bio_data_dir(bio) == WRITE || op_is_flush(bio->bi_opf) ||
1720 (bio->bi_opf & (REQ_PREFLUSH | REQ_FUA) || 1719 bio_op(bio) == REQ_OP_DISCARD)
1721 bio_op(bio) == REQ_OP_DISCARD))
1722 bio_list_add(&info->defer_bios, bio); 1720 bio_list_add(&info->defer_bios, bio);
1723 else { 1721 else {
1724 struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));; 1722 struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));;
@@ -2635,8 +2633,7 @@ static int thin_bio_map(struct dm_target *ti, struct bio *bio)
2635 return DM_MAPIO_SUBMITTED; 2633 return DM_MAPIO_SUBMITTED;
2636 } 2634 }
2637 2635
2638 if (bio->bi_opf & (REQ_PREFLUSH | REQ_FUA) || 2636 if (op_is_flush(bio->bi_opf) || bio_op(bio) == REQ_OP_DISCARD) {
2639 bio_op(bio) == REQ_OP_DISCARD) {
2640 thin_defer_bio_with_throttle(tc, bio); 2637 thin_defer_bio_with_throttle(tc, bio);
2641 return DM_MAPIO_SUBMITTED; 2638 return DM_MAPIO_SUBMITTED;
2642 } 2639 }
diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h
index 0e5b1cd5113c..37c9a43c5e78 100644
--- a/include/linux/blk_types.h
+++ b/include/linux/blk_types.h
@@ -221,6 +221,15 @@ static inline bool op_is_write(unsigned int op)
221} 221}
222 222
223/* 223/*
224 * Check if the bio or request is one that needs special treatment in the
225 * flush state machine.
226 */
227static inline bool op_is_flush(unsigned int op)
228{
229 return op & (REQ_FUA | REQ_PREFLUSH);
230}
231
232/*
224 * Reads are always treated as synchronous, as are requests with the FUA or 233 * Reads are always treated as synchronous, as are requests with the FUA or
225 * PREFLUSH flag. Other operations may be marked as synchronous using the 234 * PREFLUSH flag. Other operations may be marked as synchronous using the
226 * REQ_SYNC flag. 235 * REQ_SYNC flag.