aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--block/blk-flush.c16
-rw-r--r--block/blk-mq.c64
-rw-r--r--include/linux/blk-mq.h2
-rw-r--r--include/linux/blkdev.h5
4 files changed, 73 insertions, 14 deletions
diff --git a/block/blk-flush.c b/block/blk-flush.c
index ec7a224d6733..ef608b35d9be 100644
--- a/block/blk-flush.c
+++ b/block/blk-flush.c
@@ -130,21 +130,13 @@ static void blk_flush_restore_request(struct request *rq)
130 blk_clear_rq_complete(rq); 130 blk_clear_rq_complete(rq);
131} 131}
132 132
133static void mq_flush_run(struct work_struct *work)
134{
135 struct request *rq;
136
137 rq = container_of(work, struct request, requeue_work);
138
139 memset(&rq->csd, 0, sizeof(rq->csd));
140 blk_mq_insert_request(rq, false, true, false);
141}
142
143static bool blk_flush_queue_rq(struct request *rq, bool add_front) 133static bool blk_flush_queue_rq(struct request *rq, bool add_front)
144{ 134{
145 if (rq->q->mq_ops) { 135 if (rq->q->mq_ops) {
146 INIT_WORK(&rq->requeue_work, mq_flush_run); 136 struct request_queue *q = rq->q;
147 kblockd_schedule_work(&rq->requeue_work); 137
138 blk_mq_add_to_requeue_list(rq, add_front);
139 blk_mq_kick_requeue_list(q);
148 return false; 140 return false;
149 } else { 141 } else {
150 if (add_front) 142 if (add_front)
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 010b878d53b3..67066ecc79c0 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -516,10 +516,68 @@ void blk_mq_requeue_request(struct request *rq)
516 blk_clear_rq_complete(rq); 516 blk_clear_rq_complete(rq);
517 517
518 BUG_ON(blk_queued_rq(rq)); 518 BUG_ON(blk_queued_rq(rq));
519 blk_mq_insert_request(rq, true, true, false); 519 blk_mq_add_to_requeue_list(rq, true);
520} 520}
521EXPORT_SYMBOL(blk_mq_requeue_request); 521EXPORT_SYMBOL(blk_mq_requeue_request);
522 522
523static void blk_mq_requeue_work(struct work_struct *work)
524{
525 struct request_queue *q =
526 container_of(work, struct request_queue, requeue_work);
527 LIST_HEAD(rq_list);
528 struct request *rq, *next;
529 unsigned long flags;
530
531 spin_lock_irqsave(&q->requeue_lock, flags);
532 list_splice_init(&q->requeue_list, &rq_list);
533 spin_unlock_irqrestore(&q->requeue_lock, flags);
534
535 list_for_each_entry_safe(rq, next, &rq_list, queuelist) {
536 if (!(rq->cmd_flags & REQ_SOFTBARRIER))
537 continue;
538
539 rq->cmd_flags &= ~REQ_SOFTBARRIER;
540 list_del_init(&rq->queuelist);
541 blk_mq_insert_request(rq, true, false, false);
542 }
543
544 while (!list_empty(&rq_list)) {
545 rq = list_entry(rq_list.next, struct request, queuelist);
546 list_del_init(&rq->queuelist);
547 blk_mq_insert_request(rq, false, false, false);
548 }
549
550 blk_mq_run_queues(q, false);
551}
552
553void blk_mq_add_to_requeue_list(struct request *rq, bool at_head)
554{
555 struct request_queue *q = rq->q;
556 unsigned long flags;
557
558 /*
559 * We abuse this flag that is otherwise used by the I/O scheduler to
560 * request head insertation from the workqueue.
561 */
562 BUG_ON(rq->cmd_flags & REQ_SOFTBARRIER);
563
564 spin_lock_irqsave(&q->requeue_lock, flags);
565 if (at_head) {
566 rq->cmd_flags |= REQ_SOFTBARRIER;
567 list_add(&rq->queuelist, &q->requeue_list);
568 } else {
569 list_add_tail(&rq->queuelist, &q->requeue_list);
570 }
571 spin_unlock_irqrestore(&q->requeue_lock, flags);
572}
573EXPORT_SYMBOL(blk_mq_add_to_requeue_list);
574
575void blk_mq_kick_requeue_list(struct request_queue *q)
576{
577 kblockd_schedule_work(&q->requeue_work);
578}
579EXPORT_SYMBOL(blk_mq_kick_requeue_list);
580
523struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags, unsigned int tag) 581struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags, unsigned int tag)
524{ 582{
525 return tags->rqs[tag]; 583 return tags->rqs[tag];
@@ -1812,6 +1870,10 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *set)
1812 1870
1813 q->sg_reserved_size = INT_MAX; 1871 q->sg_reserved_size = INT_MAX;
1814 1872
1873 INIT_WORK(&q->requeue_work, blk_mq_requeue_work);
1874 INIT_LIST_HEAD(&q->requeue_list);
1875 spin_lock_init(&q->requeue_lock);
1876
1815 if (q->nr_hw_queues > 1) 1877 if (q->nr_hw_queues > 1)
1816 blk_queue_make_request(q, blk_mq_make_request); 1878 blk_queue_make_request(q, blk_mq_make_request);
1817 else 1879 else
diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h
index 5b171fbe95c5..b9a74a386dbc 100644
--- a/include/linux/blk-mq.h
+++ b/include/linux/blk-mq.h
@@ -172,6 +172,8 @@ void blk_mq_end_io(struct request *rq, int error);
172void __blk_mq_end_io(struct request *rq, int error); 172void __blk_mq_end_io(struct request *rq, int error);
173 173
174void blk_mq_requeue_request(struct request *rq); 174void blk_mq_requeue_request(struct request *rq);
175void blk_mq_add_to_requeue_list(struct request *rq, bool at_head);
176void blk_mq_kick_requeue_list(struct request_queue *q);
175void blk_mq_complete_request(struct request *rq); 177void blk_mq_complete_request(struct request *rq);
176 178
177void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx); 179void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx);
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 6bc011a09e82..913f1c2d3be0 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -99,7 +99,6 @@ struct request {
99 struct list_head queuelist; 99 struct list_head queuelist;
100 union { 100 union {
101 struct call_single_data csd; 101 struct call_single_data csd;
102 struct work_struct requeue_work;
103 unsigned long fifo_time; 102 unsigned long fifo_time;
104 }; 103 };
105 104
@@ -463,6 +462,10 @@ struct request_queue {
463 struct request *flush_rq; 462 struct request *flush_rq;
464 spinlock_t mq_flush_lock; 463 spinlock_t mq_flush_lock;
465 464
465 struct list_head requeue_list;
466 spinlock_t requeue_lock;
467 struct work_struct requeue_work;
468
466 struct mutex sysfs_lock; 469 struct mutex sysfs_lock;
467 470
468 int bypass_depth; 471 int bypass_depth;