diff options
author | Christoph Hellwig <hch@lst.de> | 2017-06-16 12:15:22 -0400 |
---|---|---|
committer | Jens Axboe <axboe@kernel.dk> | 2017-06-18 12:08:55 -0400 |
commit | 6af54051a07041d8d4e36b1b01136a0db4eb7e23 (patch) | |
tree | 283576b0be587fc2b088d75cd2195525eb9e8460 /block/blk-mq.c | |
parent | 7b9e93616399638521aafd1f01dfcf474c736393 (diff) |
blk-mq: simplify blk_mq_free_request
Merge three functions only tail-called by blk_mq_free_request into
blk_mq_free_request.
Signed-off-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block/blk-mq.c')
-rw-r--r-- | block/blk-mq.c | 50 |
1 files changed, 15 insertions, 35 deletions
diff --git a/block/blk-mq.c b/block/blk-mq.c index 9df7e0394a48..0b17351fccfc 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c | |||
@@ -395,12 +395,24 @@ struct request *blk_mq_alloc_request_hctx(struct request_queue *q, int rw, | |||
395 | } | 395 | } |
396 | EXPORT_SYMBOL_GPL(blk_mq_alloc_request_hctx); | 396 | EXPORT_SYMBOL_GPL(blk_mq_alloc_request_hctx); |
397 | 397 | ||
398 | void __blk_mq_finish_request(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx, | 398 | void blk_mq_free_request(struct request *rq) |
399 | struct request *rq) | ||
400 | { | 399 | { |
401 | const int sched_tag = rq->internal_tag; | ||
402 | struct request_queue *q = rq->q; | 400 | struct request_queue *q = rq->q; |
401 | struct elevator_queue *e = q->elevator; | ||
402 | struct blk_mq_ctx *ctx = rq->mq_ctx; | ||
403 | struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, ctx->cpu); | ||
404 | const int sched_tag = rq->internal_tag; | ||
403 | 405 | ||
406 | if (rq->rq_flags & (RQF_ELVPRIV | RQF_QUEUED)) { | ||
407 | if (e && e->type->ops.mq.finish_request) | ||
408 | e->type->ops.mq.finish_request(rq); | ||
409 | if (rq->elv.icq) { | ||
410 | put_io_context(rq->elv.icq->ioc); | ||
411 | rq->elv.icq = NULL; | ||
412 | } | ||
413 | } | ||
414 | |||
415 | ctx->rq_completed[rq_is_sync(rq)]++; | ||
404 | if (rq->rq_flags & RQF_MQ_INFLIGHT) | 416 | if (rq->rq_flags & RQF_MQ_INFLIGHT) |
405 | atomic_dec(&hctx->nr_active); | 417 | atomic_dec(&hctx->nr_active); |
406 | 418 | ||
@@ -416,38 +428,6 @@ void __blk_mq_finish_request(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx, | |||
416 | blk_mq_sched_restart(hctx); | 428 | blk_mq_sched_restart(hctx); |
417 | blk_queue_exit(q); | 429 | blk_queue_exit(q); |
418 | } | 430 | } |
419 | |||
420 | static void blk_mq_finish_hctx_request(struct blk_mq_hw_ctx *hctx, | ||
421 | struct request *rq) | ||
422 | { | ||
423 | struct blk_mq_ctx *ctx = rq->mq_ctx; | ||
424 | |||
425 | ctx->rq_completed[rq_is_sync(rq)]++; | ||
426 | __blk_mq_finish_request(hctx, ctx, rq); | ||
427 | } | ||
428 | |||
429 | void blk_mq_finish_request(struct request *rq) | ||
430 | { | ||
431 | blk_mq_finish_hctx_request(blk_mq_map_queue(rq->q, rq->mq_ctx->cpu), rq); | ||
432 | } | ||
433 | EXPORT_SYMBOL_GPL(blk_mq_finish_request); | ||
434 | |||
435 | void blk_mq_free_request(struct request *rq) | ||
436 | { | ||
437 | struct request_queue *q = rq->q; | ||
438 | struct elevator_queue *e = q->elevator; | ||
439 | |||
440 | if (rq->rq_flags & (RQF_ELVPRIV | RQF_QUEUED)) { | ||
441 | if (e && e->type->ops.mq.finish_request) | ||
442 | e->type->ops.mq.finish_request(rq); | ||
443 | if (rq->elv.icq) { | ||
444 | put_io_context(rq->elv.icq->ioc); | ||
445 | rq->elv.icq = NULL; | ||
446 | } | ||
447 | } | ||
448 | |||
449 | blk_mq_finish_request(rq); | ||
450 | } | ||
451 | EXPORT_SYMBOL_GPL(blk_mq_free_request); | 431 | EXPORT_SYMBOL_GPL(blk_mq_free_request); |
452 | 432 | ||
453 | inline void __blk_mq_end_request(struct request *rq, blk_status_t error) | 433 | inline void __blk_mq_end_request(struct request *rq, blk_status_t error) |