aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@infradead.org>2014-02-10 06:24:38 -0500
committerJens Axboe <axboe@fb.com>2014-02-10 11:27:31 -0500
commit30a91cb4ef385fe1b260df204ef314d86fff2850 (patch)
tree97ecc925cb7868f456d5dd3219980be5dc01d900
parentc4540a7d8c1e595560e53acedf88901daf15a2b5 (diff)
blk-mq: rework I/O completions
Rework I/O completions to work more like the old code path. blk_mq_end_io now stays out of the business of deferring completions to others CPUs and calling blk_mark_rq_complete. The latter is very important to allow completing requests that have timed out and thus are already marked completed, the former allows using the IPI callout even for driver specific completions instead of having to reimplement them. Signed-off-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Jens Axboe <axboe@fb.com>
-rw-r--r--block/blk-mq.c52
-rw-r--r--block/blk-mq.h3
-rw-r--r--block/blk-timeout.c2
-rw-r--r--include/linux/blk-mq.h4
4 files changed, 37 insertions, 24 deletions
diff --git a/block/blk-mq.c b/block/blk-mq.c
index cee96234bf58..14c8f35946e1 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -326,7 +326,7 @@ static void blk_mq_bio_endio(struct request *rq, struct bio *bio, int error)
326 bio_endio(bio, error); 326 bio_endio(bio, error);
327} 327}
328 328
329void blk_mq_complete_request(struct request *rq, int error) 329void blk_mq_end_io(struct request *rq, int error)
330{ 330{
331 struct bio *bio = rq->bio; 331 struct bio *bio = rq->bio;
332 unsigned int bytes = 0; 332 unsigned int bytes = 0;
@@ -351,46 +351,53 @@ void blk_mq_complete_request(struct request *rq, int error)
351 else 351 else
352 blk_mq_free_request(rq); 352 blk_mq_free_request(rq);
353} 353}
354EXPORT_SYMBOL(blk_mq_end_io);
354 355
355void __blk_mq_end_io(struct request *rq, int error) 356static void __blk_mq_complete_request_remote(void *data)
356{
357 if (!blk_mark_rq_complete(rq))
358 blk_mq_complete_request(rq, error);
359}
360
361static void blk_mq_end_io_remote(void *data)
362{ 357{
363 struct request *rq = data; 358 struct request *rq = data;
364 359
365 __blk_mq_end_io(rq, rq->errors); 360 rq->q->softirq_done_fn(rq);
366} 361}
367 362
368/* 363void __blk_mq_complete_request(struct request *rq)
369 * End IO on this request on a multiqueue enabled driver. We'll either do
370 * it directly inline, or punt to a local IPI handler on the matching
371 * remote CPU.
372 */
373void blk_mq_end_io(struct request *rq, int error)
374{ 364{
375 struct blk_mq_ctx *ctx = rq->mq_ctx; 365 struct blk_mq_ctx *ctx = rq->mq_ctx;
376 int cpu; 366 int cpu;
377 367
378 if (!ctx->ipi_redirect) 368 if (!ctx->ipi_redirect) {
379 return __blk_mq_end_io(rq, error); 369 rq->q->softirq_done_fn(rq);
370 return;
371 }
380 372
381 cpu = get_cpu(); 373 cpu = get_cpu();
382 if (cpu != ctx->cpu && cpu_online(ctx->cpu)) { 374 if (cpu != ctx->cpu && cpu_online(ctx->cpu)) {
383 rq->errors = error; 375 rq->csd.func = __blk_mq_complete_request_remote;
384 rq->csd.func = blk_mq_end_io_remote;
385 rq->csd.info = rq; 376 rq->csd.info = rq;
386 rq->csd.flags = 0; 377 rq->csd.flags = 0;
387 __smp_call_function_single(ctx->cpu, &rq->csd, 0); 378 __smp_call_function_single(ctx->cpu, &rq->csd, 0);
388 } else { 379 } else {
389 __blk_mq_end_io(rq, error); 380 rq->q->softirq_done_fn(rq);
390 } 381 }
391 put_cpu(); 382 put_cpu();
392} 383}
393EXPORT_SYMBOL(blk_mq_end_io); 384
385/**
386 * blk_mq_complete_request - end I/O on a request
387 * @rq: the request being processed
388 *
389 * Description:
390 * Ends all I/O on a request. It does not handle partial completions.
391 * The actual completion happens out-of-order, through a IPI handler.
392 **/
393void blk_mq_complete_request(struct request *rq)
394{
395 if (unlikely(blk_should_fake_timeout(rq->q)))
396 return;
397 if (!blk_mark_rq_complete(rq))
398 __blk_mq_complete_request(rq);
399}
400EXPORT_SYMBOL(blk_mq_complete_request);
394 401
395static void blk_mq_start_request(struct request *rq) 402static void blk_mq_start_request(struct request *rq)
396{ 403{
@@ -1399,6 +1406,9 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_reg *reg,
1399 if (reg->timeout) 1406 if (reg->timeout)
1400 blk_queue_rq_timeout(q, reg->timeout); 1407 blk_queue_rq_timeout(q, reg->timeout);
1401 1408
1409 if (reg->ops->complete)
1410 blk_queue_softirq_done(q, reg->ops->complete);
1411
1402 blk_mq_init_flush(q); 1412 blk_mq_init_flush(q);
1403 blk_mq_init_cpu_queues(q, reg->nr_hw_queues); 1413 blk_mq_init_cpu_queues(q, reg->nr_hw_queues);
1404 1414
diff --git a/block/blk-mq.h b/block/blk-mq.h
index 5c3917984b00..f29b645f0e1c 100644
--- a/block/blk-mq.h
+++ b/block/blk-mq.h
@@ -22,8 +22,7 @@ struct blk_mq_ctx {
22 struct kobject kobj; 22 struct kobject kobj;
23}; 23};
24 24
25void __blk_mq_end_io(struct request *rq, int error); 25void __blk_mq_complete_request(struct request *rq);
26void blk_mq_complete_request(struct request *rq, int error);
27void blk_mq_run_request(struct request *rq, bool run_queue, bool async); 26void blk_mq_run_request(struct request *rq, bool run_queue, bool async);
28void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async); 27void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async);
29void blk_mq_init_flush(struct request_queue *q); 28void blk_mq_init_flush(struct request_queue *q);
diff --git a/block/blk-timeout.c b/block/blk-timeout.c
index bba81c9348e1..d96f7061c6fd 100644
--- a/block/blk-timeout.c
+++ b/block/blk-timeout.c
@@ -91,7 +91,7 @@ static void blk_rq_timed_out(struct request *req)
91 case BLK_EH_HANDLED: 91 case BLK_EH_HANDLED:
92 /* Can we use req->errors here? */ 92 /* Can we use req->errors here? */
93 if (q->mq_ops) 93 if (q->mq_ops)
94 blk_mq_complete_request(req, req->errors); 94 __blk_mq_complete_request(req);
95 else 95 else
96 __blk_complete_request(req); 96 __blk_complete_request(req);
97 break; 97 break;
diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h
index b7638be58599..468be242db90 100644
--- a/include/linux/blk-mq.h
+++ b/include/linux/blk-mq.h
@@ -86,6 +86,8 @@ struct blk_mq_ops {
86 */ 86 */
87 rq_timed_out_fn *timeout; 87 rq_timed_out_fn *timeout;
88 88
89 softirq_done_fn *complete;
90
89 /* 91 /*
90 * Override for hctx allocations (should probably go) 92 * Override for hctx allocations (should probably go)
91 */ 93 */
@@ -137,6 +139,8 @@ void blk_mq_free_single_hw_queue(struct blk_mq_hw_ctx *, unsigned int);
137 139
138void blk_mq_end_io(struct request *rq, int error); 140void blk_mq_end_io(struct request *rq, int error);
139 141
142void blk_mq_complete_request(struct request *rq);
143
140void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx); 144void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx);
141void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx); 145void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx);
142void blk_mq_stop_hw_queues(struct request_queue *q); 146void blk_mq_stop_hw_queues(struct request_queue *q);