aboutsummaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2019-08-28 18:05:57 -0400
committerJens Axboe <axboe@kernel.dk>2019-08-28 23:17:10 -0400
commit6f816b4b746c2241540e537682d30d8e9997d674 (patch)
tree367f6fc02a7f8605cb19fd71f683d69bd96fab98 /block
parentbeab17fc2a507e85dd18b3cef83820c5770c5f34 (diff)
blk-mq: add optional request->alloc_time_ns
There are currently two start time timestamps - start_time_ns and io_start_time_ns. The former marks the request allocation and and the second issue-to-device time. The planned io.weight controller needs to measure the total time bios take to execute after it leaves rq_qos including the time spent waiting for request to become available, which can easily dominate on saturated devices. This patch adds request->alloc_time_ns which records when the request allocation attempt started. As it isn't used for the usual stats, make it optional behind CONFIG_BLK_RQ_ALLOC_TIME and QUEUE_FLAG_RQ_ALLOC_TIME so that it can be compiled out when there are no users and it's active only on queues which need it even when compiled in. v2: s/pre_start_time/alloc_time/ and add CONFIG_BLK_RQ_ALLOC_TIME gating as suggested by Jens. Signed-off-by: Tejun Heo <tj@kernel.org> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block')
-rw-r--r--block/Kconfig3
-rw-r--r--block/blk-mq.c13
2 files changed, 14 insertions, 2 deletions
diff --git a/block/Kconfig b/block/Kconfig
index 8b5f8e560eb4..1b62ad6d0e12 100644
--- a/block/Kconfig
+++ b/block/Kconfig
@@ -26,6 +26,9 @@ menuconfig BLOCK
26 26
27if BLOCK 27if BLOCK
28 28
29config BLK_RQ_ALLOC_TIME
30 bool
31
29config BLK_SCSI_REQUEST 32config BLK_SCSI_REQUEST
30 bool 33 bool
31 34
diff --git a/block/blk-mq.c b/block/blk-mq.c
index cf768d0c2950..004411236034 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -291,7 +291,7 @@ static inline bool blk_mq_need_time_stamp(struct request *rq)
291} 291}
292 292
293static struct request *blk_mq_rq_ctx_init(struct blk_mq_alloc_data *data, 293static struct request *blk_mq_rq_ctx_init(struct blk_mq_alloc_data *data,
294 unsigned int tag, unsigned int op) 294 unsigned int tag, unsigned int op, u64 alloc_time_ns)
295{ 295{
296 struct blk_mq_tags *tags = blk_mq_tags_from_data(data); 296 struct blk_mq_tags *tags = blk_mq_tags_from_data(data);
297 struct request *rq = tags->static_rqs[tag]; 297 struct request *rq = tags->static_rqs[tag];
@@ -325,6 +325,9 @@ static struct request *blk_mq_rq_ctx_init(struct blk_mq_alloc_data *data,
325 RB_CLEAR_NODE(&rq->rb_node); 325 RB_CLEAR_NODE(&rq->rb_node);
326 rq->rq_disk = NULL; 326 rq->rq_disk = NULL;
327 rq->part = NULL; 327 rq->part = NULL;
328#ifdef CONFIG_BLK_RQ_ALLOC_TIME
329 rq->alloc_time_ns = alloc_time_ns;
330#endif
328 if (blk_mq_need_time_stamp(rq)) 331 if (blk_mq_need_time_stamp(rq))
329 rq->start_time_ns = ktime_get_ns(); 332 rq->start_time_ns = ktime_get_ns();
330 else 333 else
@@ -356,8 +359,14 @@ static struct request *blk_mq_get_request(struct request_queue *q,
356 struct request *rq; 359 struct request *rq;
357 unsigned int tag; 360 unsigned int tag;
358 bool clear_ctx_on_error = false; 361 bool clear_ctx_on_error = false;
362 u64 alloc_time_ns = 0;
359 363
360 blk_queue_enter_live(q); 364 blk_queue_enter_live(q);
365
366 /* alloc_time includes depth and tag waits */
367 if (blk_queue_rq_alloc_time(q))
368 alloc_time_ns = ktime_get_ns();
369
361 data->q = q; 370 data->q = q;
362 if (likely(!data->ctx)) { 371 if (likely(!data->ctx)) {
363 data->ctx = blk_mq_get_ctx(q); 372 data->ctx = blk_mq_get_ctx(q);
@@ -393,7 +402,7 @@ static struct request *blk_mq_get_request(struct request_queue *q,
393 return NULL; 402 return NULL;
394 } 403 }
395 404
396 rq = blk_mq_rq_ctx_init(data, tag, data->cmd_flags); 405 rq = blk_mq_rq_ctx_init(data, tag, data->cmd_flags, alloc_time_ns);
397 if (!op_is_flush(data->cmd_flags)) { 406 if (!op_is_flush(data->cmd_flags)) {
398 rq->elv.icq = NULL; 407 rq->elv.icq = NULL;
399 if (e && e->type->ops.prepare_request) { 408 if (e && e->type->ops.prepare_request) {