diff options
author | Christoph Hellwig <hch@lst.de> | 2014-09-13 19:40:08 -0400 |
---|---|---|
committer | Jens Axboe <axboe@fb.com> | 2014-09-22 14:00:07 -0400 |
commit | bf57229745f849e500ba69ff91e35bc8160a7373 (patch) | |
tree | 9b90e6c5b32e96b082ad5c135c22de2c9cc82b77 | |
parent | 6d11fb454b161a4565c57be6f1c5527235741003 (diff) |
blk-mq: remove REQ_END
Pass an explicit parameter for the last request in a batch to ->queue_rq
instead of using a request flag. Besides being a cleaner and non-stateful
interface this is also required for the next patch, which fixes the blk-mq
I/O submission code to not start a time too early.
Signed-off-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Jens Axboe <axboe@fb.com>
-rw-r--r-- | block/blk-mq.c | 22 | ||||
-rw-r--r-- | drivers/block/mtip32xx/mtip32xx.c | 3 | ||||
-rw-r--r-- | drivers/block/null_blk.c | 3 | ||||
-rw-r--r-- | drivers/block/virtio_blk.c | 4 | ||||
-rw-r--r-- | drivers/scsi/scsi_lib.c | 3 | ||||
-rw-r--r-- | include/linux/blk-mq.h | 2 | ||||
-rw-r--r-- | include/linux/blk_types.h | 2 |
7 files changed, 14 insertions, 25 deletions
diff --git a/block/blk-mq.c b/block/blk-mq.c index e743d28620b2..32b4797f4186 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c | |||
@@ -384,7 +384,7 @@ void blk_mq_complete_request(struct request *rq) | |||
384 | } | 384 | } |
385 | EXPORT_SYMBOL(blk_mq_complete_request); | 385 | EXPORT_SYMBOL(blk_mq_complete_request); |
386 | 386 | ||
387 | static void blk_mq_start_request(struct request *rq, bool last) | 387 | static void blk_mq_start_request(struct request *rq) |
388 | { | 388 | { |
389 | struct request_queue *q = rq->q; | 389 | struct request_queue *q = rq->q; |
390 | 390 | ||
@@ -421,16 +421,6 @@ static void blk_mq_start_request(struct request *rq, bool last) | |||
421 | */ | 421 | */ |
422 | rq->nr_phys_segments++; | 422 | rq->nr_phys_segments++; |
423 | } | 423 | } |
424 | |||
425 | /* | ||
426 | * Flag the last request in the series so that drivers know when IO | ||
427 | * should be kicked off, if they don't do it on a per-request basis. | ||
428 | * | ||
429 | * Note: the flag isn't the only condition drivers should do kick off. | ||
430 | * If drive is busy, the last request might not have the bit set. | ||
431 | */ | ||
432 | if (last) | ||
433 | rq->cmd_flags |= REQ_END; | ||
434 | } | 424 | } |
435 | 425 | ||
436 | static void __blk_mq_requeue_request(struct request *rq) | 426 | static void __blk_mq_requeue_request(struct request *rq) |
@@ -440,8 +430,6 @@ static void __blk_mq_requeue_request(struct request *rq) | |||
440 | trace_block_rq_requeue(q, rq); | 430 | trace_block_rq_requeue(q, rq); |
441 | clear_bit(REQ_ATOM_STARTED, &rq->atomic_flags); | 431 | clear_bit(REQ_ATOM_STARTED, &rq->atomic_flags); |
442 | 432 | ||
443 | rq->cmd_flags &= ~REQ_END; | ||
444 | |||
445 | if (q->dma_drain_size && blk_rq_bytes(rq)) | 433 | if (q->dma_drain_size && blk_rq_bytes(rq)) |
446 | rq->nr_phys_segments--; | 434 | rq->nr_phys_segments--; |
447 | } | 435 | } |
@@ -755,9 +743,9 @@ static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx) | |||
755 | rq = list_first_entry(&rq_list, struct request, queuelist); | 743 | rq = list_first_entry(&rq_list, struct request, queuelist); |
756 | list_del_init(&rq->queuelist); | 744 | list_del_init(&rq->queuelist); |
757 | 745 | ||
758 | blk_mq_start_request(rq, list_empty(&rq_list)); | 746 | blk_mq_start_request(rq); |
759 | 747 | ||
760 | ret = q->mq_ops->queue_rq(hctx, rq); | 748 | ret = q->mq_ops->queue_rq(hctx, rq, list_empty(&rq_list)); |
761 | switch (ret) { | 749 | switch (ret) { |
762 | case BLK_MQ_RQ_QUEUE_OK: | 750 | case BLK_MQ_RQ_QUEUE_OK: |
763 | queued++; | 751 | queued++; |
@@ -1198,14 +1186,14 @@ static void blk_mq_make_request(struct request_queue *q, struct bio *bio) | |||
1198 | int ret; | 1186 | int ret; |
1199 | 1187 | ||
1200 | blk_mq_bio_to_request(rq, bio); | 1188 | blk_mq_bio_to_request(rq, bio); |
1201 | blk_mq_start_request(rq, true); | 1189 | blk_mq_start_request(rq); |
1202 | 1190 | ||
1203 | /* | 1191 | /* |
1204 | * For OK queue, we are done. For error, kill it. Any other | 1192 | * For OK queue, we are done. For error, kill it. Any other |
1205 | * error (busy), just add it to our list as we previously | 1193 | * error (busy), just add it to our list as we previously |
1206 | * would have done | 1194 | * would have done |
1207 | */ | 1195 | */ |
1208 | ret = q->mq_ops->queue_rq(data.hctx, rq); | 1196 | ret = q->mq_ops->queue_rq(data.hctx, rq, true); |
1209 | if (ret == BLK_MQ_RQ_QUEUE_OK) | 1197 | if (ret == BLK_MQ_RQ_QUEUE_OK) |
1210 | goto done; | 1198 | goto done; |
1211 | else { | 1199 | else { |
diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c index 5c8e7fe07745..0e2084f37c67 100644 --- a/drivers/block/mtip32xx/mtip32xx.c +++ b/drivers/block/mtip32xx/mtip32xx.c | |||
@@ -3775,7 +3775,8 @@ static bool mtip_check_unal_depth(struct blk_mq_hw_ctx *hctx, | |||
3775 | return false; | 3775 | return false; |
3776 | } | 3776 | } |
3777 | 3777 | ||
3778 | static int mtip_queue_rq(struct blk_mq_hw_ctx *hctx, struct request *rq) | 3778 | static int mtip_queue_rq(struct blk_mq_hw_ctx *hctx, struct request *rq, |
3779 | bool last) | ||
3779 | { | 3780 | { |
3780 | int ret; | 3781 | int ret; |
3781 | 3782 | ||
diff --git a/drivers/block/null_blk.c b/drivers/block/null_blk.c index 00d469c7f9f7..c5b7315c2c13 100644 --- a/drivers/block/null_blk.c +++ b/drivers/block/null_blk.c | |||
@@ -313,7 +313,8 @@ static void null_request_fn(struct request_queue *q) | |||
313 | } | 313 | } |
314 | } | 314 | } |
315 | 315 | ||
316 | static int null_queue_rq(struct blk_mq_hw_ctx *hctx, struct request *rq) | 316 | static int null_queue_rq(struct blk_mq_hw_ctx *hctx, struct request *rq, |
317 | bool last) | ||
317 | { | 318 | { |
318 | struct nullb_cmd *cmd = blk_mq_rq_to_pdu(rq); | 319 | struct nullb_cmd *cmd = blk_mq_rq_to_pdu(rq); |
319 | 320 | ||
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c index 0a581400de0f..13756e016797 100644 --- a/drivers/block/virtio_blk.c +++ b/drivers/block/virtio_blk.c | |||
@@ -164,14 +164,14 @@ static void virtblk_done(struct virtqueue *vq) | |||
164 | spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags); | 164 | spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags); |
165 | } | 165 | } |
166 | 166 | ||
167 | static int virtio_queue_rq(struct blk_mq_hw_ctx *hctx, struct request *req) | 167 | static int virtio_queue_rq(struct blk_mq_hw_ctx *hctx, struct request *req, |
168 | bool last) | ||
168 | { | 169 | { |
169 | struct virtio_blk *vblk = hctx->queue->queuedata; | 170 | struct virtio_blk *vblk = hctx->queue->queuedata; |
170 | struct virtblk_req *vbr = blk_mq_rq_to_pdu(req); | 171 | struct virtblk_req *vbr = blk_mq_rq_to_pdu(req); |
171 | unsigned long flags; | 172 | unsigned long flags; |
172 | unsigned int num; | 173 | unsigned int num; |
173 | int qid = hctx->queue_num; | 174 | int qid = hctx->queue_num; |
174 | const bool last = (req->cmd_flags & REQ_END) != 0; | ||
175 | int err; | 175 | int err; |
176 | bool notify = false; | 176 | bool notify = false; |
177 | 177 | ||
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index 1f2bae475cb7..f1df41168391 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c | |||
@@ -1855,7 +1855,8 @@ static void scsi_mq_done(struct scsi_cmnd *cmd) | |||
1855 | blk_mq_complete_request(cmd->request); | 1855 | blk_mq_complete_request(cmd->request); |
1856 | } | 1856 | } |
1857 | 1857 | ||
1858 | static int scsi_queue_rq(struct blk_mq_hw_ctx *hctx, struct request *req) | 1858 | static int scsi_queue_rq(struct blk_mq_hw_ctx *hctx, struct request *req, |
1859 | bool last) | ||
1859 | { | 1860 | { |
1860 | struct request_queue *q = req->q; | 1861 | struct request_queue *q = req->q; |
1861 | struct scsi_device *sdev = q->queuedata; | 1862 | struct scsi_device *sdev = q->queuedata; |
diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h index a1e31f274fcd..9c4e306a9217 100644 --- a/include/linux/blk-mq.h +++ b/include/linux/blk-mq.h | |||
@@ -77,7 +77,7 @@ struct blk_mq_tag_set { | |||
77 | struct list_head tag_list; | 77 | struct list_head tag_list; |
78 | }; | 78 | }; |
79 | 79 | ||
80 | typedef int (queue_rq_fn)(struct blk_mq_hw_ctx *, struct request *); | 80 | typedef int (queue_rq_fn)(struct blk_mq_hw_ctx *, struct request *, bool); |
81 | typedef struct blk_mq_hw_ctx *(map_queue_fn)(struct request_queue *, const int); | 81 | typedef struct blk_mq_hw_ctx *(map_queue_fn)(struct request_queue *, const int); |
82 | typedef int (init_hctx_fn)(struct blk_mq_hw_ctx *, void *, unsigned int); | 82 | typedef int (init_hctx_fn)(struct blk_mq_hw_ctx *, void *, unsigned int); |
83 | typedef void (exit_hctx_fn)(struct blk_mq_hw_ctx *, unsigned int); | 83 | typedef void (exit_hctx_fn)(struct blk_mq_hw_ctx *, unsigned int); |
diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h index 66c2167f04a9..bb7d66460e7a 100644 --- a/include/linux/blk_types.h +++ b/include/linux/blk_types.h | |||
@@ -188,7 +188,6 @@ enum rq_flag_bits { | |||
188 | __REQ_MIXED_MERGE, /* merge of different types, fail separately */ | 188 | __REQ_MIXED_MERGE, /* merge of different types, fail separately */ |
189 | __REQ_KERNEL, /* direct IO to kernel pages */ | 189 | __REQ_KERNEL, /* direct IO to kernel pages */ |
190 | __REQ_PM, /* runtime pm request */ | 190 | __REQ_PM, /* runtime pm request */ |
191 | __REQ_END, /* last of chain of requests */ | ||
192 | __REQ_HASHED, /* on IO scheduler merge hash */ | 191 | __REQ_HASHED, /* on IO scheduler merge hash */ |
193 | __REQ_MQ_INFLIGHT, /* track inflight for MQ */ | 192 | __REQ_MQ_INFLIGHT, /* track inflight for MQ */ |
194 | __REQ_NR_BITS, /* stops here */ | 193 | __REQ_NR_BITS, /* stops here */ |
@@ -242,7 +241,6 @@ enum rq_flag_bits { | |||
242 | #define REQ_SECURE (1ULL << __REQ_SECURE) | 241 | #define REQ_SECURE (1ULL << __REQ_SECURE) |
243 | #define REQ_KERNEL (1ULL << __REQ_KERNEL) | 242 | #define REQ_KERNEL (1ULL << __REQ_KERNEL) |
244 | #define REQ_PM (1ULL << __REQ_PM) | 243 | #define REQ_PM (1ULL << __REQ_PM) |
245 | #define REQ_END (1ULL << __REQ_END) | ||
246 | #define REQ_HASHED (1ULL << __REQ_HASHED) | 244 | #define REQ_HASHED (1ULL << __REQ_HASHED) |
247 | #define REQ_MQ_INFLIGHT (1ULL << __REQ_MQ_INFLIGHT) | 245 | #define REQ_MQ_INFLIGHT (1ULL << __REQ_MQ_INFLIGHT) |
248 | 246 | ||