aboutsummaryrefslogtreecommitdiffstats
path: root/block/blk-mq.c
diff options
context:
space:
mode:
Diffstat (limited to 'block/blk-mq.c')
-rw-r--r--block/blk-mq.c52
1 files changed, 31 insertions, 21 deletions
diff --git a/block/blk-mq.c b/block/blk-mq.c
index cee96234bf58..14c8f35946e1 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -326,7 +326,7 @@ static void blk_mq_bio_endio(struct request *rq, struct bio *bio, int error)
326 bio_endio(bio, error); 326 bio_endio(bio, error);
327} 327}
328 328
329void blk_mq_complete_request(struct request *rq, int error) 329void blk_mq_end_io(struct request *rq, int error)
330{ 330{
331 struct bio *bio = rq->bio; 331 struct bio *bio = rq->bio;
332 unsigned int bytes = 0; 332 unsigned int bytes = 0;
@@ -351,46 +351,53 @@ void blk_mq_complete_request(struct request *rq, int error)
351 else 351 else
352 blk_mq_free_request(rq); 352 blk_mq_free_request(rq);
353} 353}
354EXPORT_SYMBOL(blk_mq_end_io);
354 355
355void __blk_mq_end_io(struct request *rq, int error) 356static void __blk_mq_complete_request_remote(void *data)
356{
357 if (!blk_mark_rq_complete(rq))
358 blk_mq_complete_request(rq, error);
359}
360
361static void blk_mq_end_io_remote(void *data)
362{ 357{
363 struct request *rq = data; 358 struct request *rq = data;
364 359
365 __blk_mq_end_io(rq, rq->errors); 360 rq->q->softirq_done_fn(rq);
366} 361}
367 362
368/* 363void __blk_mq_complete_request(struct request *rq)
369 * End IO on this request on a multiqueue enabled driver. We'll either do
370 * it directly inline, or punt to a local IPI handler on the matching
371 * remote CPU.
372 */
373void blk_mq_end_io(struct request *rq, int error)
374{ 364{
375 struct blk_mq_ctx *ctx = rq->mq_ctx; 365 struct blk_mq_ctx *ctx = rq->mq_ctx;
376 int cpu; 366 int cpu;
377 367
378 if (!ctx->ipi_redirect) 368 if (!ctx->ipi_redirect) {
379 return __blk_mq_end_io(rq, error); 369 rq->q->softirq_done_fn(rq);
370 return;
371 }
380 372
381 cpu = get_cpu(); 373 cpu = get_cpu();
382 if (cpu != ctx->cpu && cpu_online(ctx->cpu)) { 374 if (cpu != ctx->cpu && cpu_online(ctx->cpu)) {
383 rq->errors = error; 375 rq->csd.func = __blk_mq_complete_request_remote;
384 rq->csd.func = blk_mq_end_io_remote;
385 rq->csd.info = rq; 376 rq->csd.info = rq;
386 rq->csd.flags = 0; 377 rq->csd.flags = 0;
387 __smp_call_function_single(ctx->cpu, &rq->csd, 0); 378 __smp_call_function_single(ctx->cpu, &rq->csd, 0);
388 } else { 379 } else {
389 __blk_mq_end_io(rq, error); 380 rq->q->softirq_done_fn(rq);
390 } 381 }
391 put_cpu(); 382 put_cpu();
392} 383}
393EXPORT_SYMBOL(blk_mq_end_io); 384
385/**
386 * blk_mq_complete_request - end I/O on a request
387 * @rq: the request being processed
388 *
389 * Description:
390 * Ends all I/O on a request. It does not handle partial completions.
391 * The actual completion happens out-of-order, through a IPI handler.
392 **/
393void blk_mq_complete_request(struct request *rq)
394{
395 if (unlikely(blk_should_fake_timeout(rq->q)))
396 return;
397 if (!blk_mark_rq_complete(rq))
398 __blk_mq_complete_request(rq);
399}
400EXPORT_SYMBOL(blk_mq_complete_request);
394 401
395static void blk_mq_start_request(struct request *rq) 402static void blk_mq_start_request(struct request *rq)
396{ 403{
@@ -1399,6 +1406,9 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_reg *reg,
1399 if (reg->timeout) 1406 if (reg->timeout)
1400 blk_queue_rq_timeout(q, reg->timeout); 1407 blk_queue_rq_timeout(q, reg->timeout);
1401 1408
1409 if (reg->ops->complete)
1410 blk_queue_softirq_done(q, reg->ops->complete);
1411
1402 blk_mq_init_flush(q); 1412 blk_mq_init_flush(q);
1403 blk_mq_init_cpu_queues(q, reg->nr_hw_queues); 1413 blk_mq_init_cpu_queues(q, reg->nr_hw_queues);
1404 1414