aboutsummaryrefslogtreecommitdiffstats
path: root/block/blk-mq.c
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2014-05-28 10:08:02 -0400
committerJens Axboe <axboe@fb.com>2014-05-28 10:08:02 -0400
commit6fca6a611c27f1f0d90fbe1cc3c229dbf8c09e48 (patch)
treed3348f3ab1169db9b5a1fca67a8fd2164152530c /block/blk-mq.c
parent7738dac4f697ffbd0ed4c4aeb69a714ef9d876da (diff)
blk-mq: add helper to insert requests from irq context
Both the cache flush state machine and the SCSI midlayer want to submit requests from irq context, and the current per-request requeue_work unfortunately causes corruption due to sharing with the csd field for flushes. Replace them with a per-request_queue list of requests to be requeued. Based on an earlier test by Ming Lei. Signed-off-by: Christoph Hellwig <hch@lst.de> Reported-by: Ming Lei <tom.leiming@gmail.com> Tested-by: Ming Lei <tom.leiming@gmail.com> Signed-off-by: Jens Axboe <axboe@fb.com>
Diffstat (limited to 'block/blk-mq.c')
-rw-r--r--block/blk-mq.c64
1 files changed, 63 insertions, 1 deletions
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 010b878d53b3..67066ecc79c0 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -516,10 +516,68 @@ void blk_mq_requeue_request(struct request *rq)
516 blk_clear_rq_complete(rq); 516 blk_clear_rq_complete(rq);
517 517
518 BUG_ON(blk_queued_rq(rq)); 518 BUG_ON(blk_queued_rq(rq));
519 blk_mq_insert_request(rq, true, true, false); 519 blk_mq_add_to_requeue_list(rq, true);
520} 520}
521EXPORT_SYMBOL(blk_mq_requeue_request); 521EXPORT_SYMBOL(blk_mq_requeue_request);
522 522
523static void blk_mq_requeue_work(struct work_struct *work)
524{
525 struct request_queue *q =
526 container_of(work, struct request_queue, requeue_work);
527 LIST_HEAD(rq_list);
528 struct request *rq, *next;
529 unsigned long flags;
530
531 spin_lock_irqsave(&q->requeue_lock, flags);
532 list_splice_init(&q->requeue_list, &rq_list);
533 spin_unlock_irqrestore(&q->requeue_lock, flags);
534
535 list_for_each_entry_safe(rq, next, &rq_list, queuelist) {
536 if (!(rq->cmd_flags & REQ_SOFTBARRIER))
537 continue;
538
539 rq->cmd_flags &= ~REQ_SOFTBARRIER;
540 list_del_init(&rq->queuelist);
541 blk_mq_insert_request(rq, true, false, false);
542 }
543
544 while (!list_empty(&rq_list)) {
545 rq = list_entry(rq_list.next, struct request, queuelist);
546 list_del_init(&rq->queuelist);
547 blk_mq_insert_request(rq, false, false, false);
548 }
549
550 blk_mq_run_queues(q, false);
551}
552
553void blk_mq_add_to_requeue_list(struct request *rq, bool at_head)
554{
555 struct request_queue *q = rq->q;
556 unsigned long flags;
557
558 /*
559 * We abuse this flag that is otherwise used by the I/O scheduler to
560 * request head insertation from the workqueue.
561 */
562 BUG_ON(rq->cmd_flags & REQ_SOFTBARRIER);
563
564 spin_lock_irqsave(&q->requeue_lock, flags);
565 if (at_head) {
566 rq->cmd_flags |= REQ_SOFTBARRIER;
567 list_add(&rq->queuelist, &q->requeue_list);
568 } else {
569 list_add_tail(&rq->queuelist, &q->requeue_list);
570 }
571 spin_unlock_irqrestore(&q->requeue_lock, flags);
572}
573EXPORT_SYMBOL(blk_mq_add_to_requeue_list);
574
575void blk_mq_kick_requeue_list(struct request_queue *q)
576{
577 kblockd_schedule_work(&q->requeue_work);
578}
579EXPORT_SYMBOL(blk_mq_kick_requeue_list);
580
523struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags, unsigned int tag) 581struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags, unsigned int tag)
524{ 582{
525 return tags->rqs[tag]; 583 return tags->rqs[tag];
@@ -1812,6 +1870,10 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set)
1812 1870
1813 q->sg_reserved_size = INT_MAX; 1871 q->sg_reserved_size = INT_MAX;
1814 1872
1873 INIT_WORK(&q->requeue_work, blk_mq_requeue_work);
1874 INIT_LIST_HEAD(&q->requeue_list);
1875 spin_lock_init(&q->requeue_lock);
1876
1815 if (q->nr_hw_queues > 1) 1877 if (q->nr_hw_queues > 1)
1816 blk_queue_make_request(q, blk_mq_make_request); 1878 blk_queue_make_request(q, blk_mq_make_request);
1817 else 1879 else