aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md/dm.c
diff options
context:
space:
mode:
authorMike Snitzer <snitzer@redhat.com>2015-05-27 15:25:27 -0400
committerMike Snitzer <snitzer@redhat.com>2015-05-27 17:37:23 -0400
commit45714fbed4556149d7f1730f5bae74f81d5e2cd5 (patch)
tree7bebdecc13511fc03de8b4d08e98d967c45d5d3b /drivers/md/dm.c
parent4c6dd53dd3674c310d7379c6b3273daa9fd95c79 (diff)
dm: requeue from blk-mq dm_mq_queue_rq() using BLK_MQ_RQ_QUEUE_BUSY
Use BLK_MQ_RQ_QUEUE_BUSY to requeue a blk-mq request directly from the DM blk-mq device's .queue_rq. This cleans up the previous convoluted handling of request requeueing that would return BLK_MQ_RQ_QUEUE_OK (even though it wasn't) and then run blk_mq_requeue_request() followed by blk_mq_kick_requeue_list(). Also, document that DM blk-mq ontop of old request_fn devices cannot fail in clone_rq() since the clone request is preallocated as part of the pdu. Reported-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Mike Snitzer <snitzer@redhat.com>
Diffstat (limited to 'drivers/md/dm.c')
-rw-r--r--drivers/md/dm.c10
1 files changed, 6 insertions, 4 deletions
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 1c62ed8d09f4..1badfb250a18 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -2754,13 +2754,15 @@ static int dm_mq_queue_rq(struct blk_mq_hw_ctx *hctx,
2754 if (dm_table_get_type(map) == DM_TYPE_REQUEST_BASED) { 2754 if (dm_table_get_type(map) == DM_TYPE_REQUEST_BASED) {
2755 /* clone request is allocated at the end of the pdu */ 2755 /* clone request is allocated at the end of the pdu */
2756 tio->clone = (void *)blk_mq_rq_to_pdu(rq) + sizeof(struct dm_rq_target_io); 2756 tio->clone = (void *)blk_mq_rq_to_pdu(rq) + sizeof(struct dm_rq_target_io);
2757 if (!clone_rq(rq, md, tio, GFP_ATOMIC)) 2757 (void) clone_rq(rq, md, tio, GFP_ATOMIC);
2758 return BLK_MQ_RQ_QUEUE_BUSY;
2759 queue_kthread_work(&md->kworker, &tio->work); 2758 queue_kthread_work(&md->kworker, &tio->work);
2760 } else { 2759 } else {
2761 /* Direct call is fine since .queue_rq allows allocations */ 2760 /* Direct call is fine since .queue_rq allows allocations */
2762 if (map_request(tio, rq, md) == DM_MAPIO_REQUEUE) 2761 if (map_request(tio, rq, md) == DM_MAPIO_REQUEUE) {
2763 dm_requeue_unmapped_original_request(md, rq); 2762 /* Undo dm_start_request() before requeuing */
2763 rq_completed(md, rq_data_dir(rq), false);
2764 return BLK_MQ_RQ_QUEUE_BUSY;
2765 }
2764 } 2766 }
2765 2767
2766 return BLK_MQ_RQ_QUEUE_OK; 2768 return BLK_MQ_RQ_QUEUE_OK;