aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2017-06-03 03:38:05 -0400
committerJens Axboe <axboe@fb.com>2017-06-09 11:27:32 -0400
commitfc17b6534eb8395f0b3133eb31d87deec32c642b (patch)
tree18686a2326ebd60d68f144c70f83f4441c6b4e2f
parent2a842acab109f40f0d7d10b38e9ca88390628996 (diff)
blk-mq: switch ->queue_rq return value to blk_status_t
Use the same values for use for request completion errors as the return value from ->queue_rq. BLK_STS_RESOURCE is special cased to cause a requeue, and all the others are completed as-is. Signed-off-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Jens Axboe <axboe@fb.com>
-rw-r--r--block/blk-mq.c37
-rw-r--r--drivers/block/loop.c6
-rw-r--r--drivers/block/mtip32xx/mtip32xx.c17
-rw-r--r--drivers/block/nbd.c12
-rw-r--r--drivers/block/null_blk.c4
-rw-r--r--drivers/block/rbd.c4
-rw-r--r--drivers/block/virtio_blk.c10
-rw-r--r--drivers/block/xen-blkfront.c8
-rw-r--r--drivers/md/dm-rq.c8
-rw-r--r--drivers/mtd/ubi/block.c6
-rw-r--r--drivers/nvme/host/core.c14
-rw-r--r--drivers/nvme/host/fc.c23
-rw-r--r--drivers/nvme/host/nvme.h2
-rw-r--r--drivers/nvme/host/pci.c42
-rw-r--r--drivers/nvme/host/rdma.c26
-rw-r--r--drivers/nvme/target/loop.c17
-rw-r--r--drivers/scsi/scsi_lib.c30
-rw-r--r--include/linux/blk-mq.h7
18 files changed, 131 insertions, 142 deletions
diff --git a/block/blk-mq.c b/block/blk-mq.c
index adcc1c0dce6e..7af78b1e9db9 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -924,7 +924,7 @@ bool blk_mq_dispatch_rq_list(struct request_queue *q, struct list_head *list)
924{ 924{
925 struct blk_mq_hw_ctx *hctx; 925 struct blk_mq_hw_ctx *hctx;
926 struct request *rq; 926 struct request *rq;
927 int errors, queued, ret = BLK_MQ_RQ_QUEUE_OK; 927 int errors, queued;
928 928
929 if (list_empty(list)) 929 if (list_empty(list))
930 return false; 930 return false;
@@ -935,6 +935,7 @@ bool blk_mq_dispatch_rq_list(struct request_queue *q, struct list_head *list)
935 errors = queued = 0; 935 errors = queued = 0;
936 do { 936 do {
937 struct blk_mq_queue_data bd; 937 struct blk_mq_queue_data bd;
938 blk_status_t ret;
938 939
939 rq = list_first_entry(list, struct request, queuelist); 940 rq = list_first_entry(list, struct request, queuelist);
940 if (!blk_mq_get_driver_tag(rq, &hctx, false)) { 941 if (!blk_mq_get_driver_tag(rq, &hctx, false)) {
@@ -975,25 +976,20 @@ bool blk_mq_dispatch_rq_list(struct request_queue *q, struct list_head *list)
975 } 976 }
976 977
977 ret = q->mq_ops->queue_rq(hctx, &bd); 978 ret = q->mq_ops->queue_rq(hctx, &bd);
978 switch (ret) { 979 if (ret == BLK_STS_RESOURCE) {
979 case BLK_MQ_RQ_QUEUE_OK:
980 queued++;
981 break;
982 case BLK_MQ_RQ_QUEUE_BUSY:
983 blk_mq_put_driver_tag_hctx(hctx, rq); 980 blk_mq_put_driver_tag_hctx(hctx, rq);
984 list_add(&rq->queuelist, list); 981 list_add(&rq->queuelist, list);
985 __blk_mq_requeue_request(rq); 982 __blk_mq_requeue_request(rq);
986 break; 983 break;
987 default: 984 }
988 pr_err("blk-mq: bad return on queue: %d\n", ret); 985
989 case BLK_MQ_RQ_QUEUE_ERROR: 986 if (unlikely(ret != BLK_STS_OK)) {
990 errors++; 987 errors++;
991 blk_mq_end_request(rq, BLK_STS_IOERR); 988 blk_mq_end_request(rq, BLK_STS_IOERR);
992 break; 989 continue;
993 } 990 }
994 991
995 if (ret == BLK_MQ_RQ_QUEUE_BUSY) 992 queued++;
996 break;
997 } while (!list_empty(list)); 993 } while (!list_empty(list));
998 994
999 hctx->dispatched[queued_to_index(queued)]++; 995 hctx->dispatched[queued_to_index(queued)]++;
@@ -1031,7 +1027,7 @@ bool blk_mq_dispatch_rq_list(struct request_queue *q, struct list_head *list)
1031 * - blk_mq_run_hw_queue() checks whether or not a queue has 1027 * - blk_mq_run_hw_queue() checks whether or not a queue has
1032 * been stopped before rerunning a queue. 1028 * been stopped before rerunning a queue.
1033 * - Some but not all block drivers stop a queue before 1029 * - Some but not all block drivers stop a queue before
1034 * returning BLK_MQ_RQ_QUEUE_BUSY. Two exceptions are scsi-mq 1030 * returning BLK_STS_RESOURCE. Two exceptions are scsi-mq
1035 * and dm-rq. 1031 * and dm-rq.
1036 */ 1032 */
1037 if (!blk_mq_sched_needs_restart(hctx) && 1033 if (!blk_mq_sched_needs_restart(hctx) &&
@@ -1410,7 +1406,7 @@ static void __blk_mq_try_issue_directly(struct request *rq, blk_qc_t *cookie,
1410 }; 1406 };
1411 struct blk_mq_hw_ctx *hctx; 1407 struct blk_mq_hw_ctx *hctx;
1412 blk_qc_t new_cookie; 1408 blk_qc_t new_cookie;
1413 int ret; 1409 blk_status_t ret;
1414 1410
1415 if (q->elevator) 1411 if (q->elevator)
1416 goto insert; 1412 goto insert;
@@ -1426,18 +1422,19 @@ static void __blk_mq_try_issue_directly(struct request *rq, blk_qc_t *cookie,
1426 * would have done 1422 * would have done
1427 */ 1423 */
1428 ret = q->mq_ops->queue_rq(hctx, &bd); 1424 ret = q->mq_ops->queue_rq(hctx, &bd);
1429 if (ret == BLK_MQ_RQ_QUEUE_OK) { 1425 switch (ret) {
1426 case BLK_STS_OK:
1430 *cookie = new_cookie; 1427 *cookie = new_cookie;
1431 return; 1428 return;
1432 } 1429 case BLK_STS_RESOURCE:
1433 1430 __blk_mq_requeue_request(rq);
1434 if (ret == BLK_MQ_RQ_QUEUE_ERROR) { 1431 goto insert;
1432 default:
1435 *cookie = BLK_QC_T_NONE; 1433 *cookie = BLK_QC_T_NONE;
1436 blk_mq_end_request(rq, BLK_STS_IOERR); 1434 blk_mq_end_request(rq, ret);
1437 return; 1435 return;
1438 } 1436 }
1439 1437
1440 __blk_mq_requeue_request(rq);
1441insert: 1438insert:
1442 blk_mq_sched_insert_request(rq, false, true, false, may_sleep); 1439 blk_mq_sched_insert_request(rq, false, true, false, may_sleep);
1443} 1440}
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index 4caf6338c012..70fd7e0de0fa 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -1674,7 +1674,7 @@ int loop_unregister_transfer(int number)
1674EXPORT_SYMBOL(loop_register_transfer); 1674EXPORT_SYMBOL(loop_register_transfer);
1675EXPORT_SYMBOL(loop_unregister_transfer); 1675EXPORT_SYMBOL(loop_unregister_transfer);
1676 1676
1677static int loop_queue_rq(struct blk_mq_hw_ctx *hctx, 1677static blk_status_t loop_queue_rq(struct blk_mq_hw_ctx *hctx,
1678 const struct blk_mq_queue_data *bd) 1678 const struct blk_mq_queue_data *bd)
1679{ 1679{
1680 struct loop_cmd *cmd = blk_mq_rq_to_pdu(bd->rq); 1680 struct loop_cmd *cmd = blk_mq_rq_to_pdu(bd->rq);
@@ -1683,7 +1683,7 @@ static int loop_queue_rq(struct blk_mq_hw_ctx *hctx,
1683 blk_mq_start_request(bd->rq); 1683 blk_mq_start_request(bd->rq);
1684 1684
1685 if (lo->lo_state != Lo_bound) 1685 if (lo->lo_state != Lo_bound)
1686 return BLK_MQ_RQ_QUEUE_ERROR; 1686 return BLK_STS_IOERR;
1687 1687
1688 switch (req_op(cmd->rq)) { 1688 switch (req_op(cmd->rq)) {
1689 case REQ_OP_FLUSH: 1689 case REQ_OP_FLUSH:
@@ -1698,7 +1698,7 @@ static int loop_queue_rq(struct blk_mq_hw_ctx *hctx,
1698 1698
1699 kthread_queue_work(&lo->worker, &cmd->work); 1699 kthread_queue_work(&lo->worker, &cmd->work);
1700 1700
1701 return BLK_MQ_RQ_QUEUE_OK; 1701 return BLK_STS_OK;
1702} 1702}
1703 1703
1704static void loop_handle_cmd(struct loop_cmd *cmd) 1704static void loop_handle_cmd(struct loop_cmd *cmd)
diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c
index ee6f66bb50c7..d8618a71da74 100644
--- a/drivers/block/mtip32xx/mtip32xx.c
+++ b/drivers/block/mtip32xx/mtip32xx.c
@@ -3633,8 +3633,8 @@ static bool mtip_check_unal_depth(struct blk_mq_hw_ctx *hctx,
3633 return false; 3633 return false;
3634} 3634}
3635 3635
3636static int mtip_issue_reserved_cmd(struct blk_mq_hw_ctx *hctx, 3636static blk_status_t mtip_issue_reserved_cmd(struct blk_mq_hw_ctx *hctx,
3637 struct request *rq) 3637 struct request *rq)
3638{ 3638{
3639 struct driver_data *dd = hctx->queue->queuedata; 3639 struct driver_data *dd = hctx->queue->queuedata;
3640 struct mtip_int_cmd *icmd = rq->special; 3640 struct mtip_int_cmd *icmd = rq->special;
@@ -3642,7 +3642,7 @@ static int mtip_issue_reserved_cmd(struct blk_mq_hw_ctx *hctx,
3642 struct mtip_cmd_sg *command_sg; 3642 struct mtip_cmd_sg *command_sg;
3643 3643
3644 if (mtip_commands_active(dd->port)) 3644 if (mtip_commands_active(dd->port))
3645 return BLK_MQ_RQ_QUEUE_BUSY; 3645 return BLK_STS_RESOURCE;
3646 3646
3647 /* Populate the SG list */ 3647 /* Populate the SG list */
3648 cmd->command_header->opts = 3648 cmd->command_header->opts =
@@ -3666,10 +3666,10 @@ static int mtip_issue_reserved_cmd(struct blk_mq_hw_ctx *hctx,
3666 3666
3667 blk_mq_start_request(rq); 3667 blk_mq_start_request(rq);
3668 mtip_issue_non_ncq_command(dd->port, rq->tag); 3668 mtip_issue_non_ncq_command(dd->port, rq->tag);
3669 return BLK_MQ_RQ_QUEUE_OK; 3669 return 0;
3670} 3670}
3671 3671
3672static int mtip_queue_rq(struct blk_mq_hw_ctx *hctx, 3672static blk_status_t mtip_queue_rq(struct blk_mq_hw_ctx *hctx,
3673 const struct blk_mq_queue_data *bd) 3673 const struct blk_mq_queue_data *bd)
3674{ 3674{
3675 struct request *rq = bd->rq; 3675 struct request *rq = bd->rq;
@@ -3681,15 +3681,14 @@ static int mtip_queue_rq(struct blk_mq_hw_ctx *hctx,
3681 return mtip_issue_reserved_cmd(hctx, rq); 3681 return mtip_issue_reserved_cmd(hctx, rq);
3682 3682
3683 if (unlikely(mtip_check_unal_depth(hctx, rq))) 3683 if (unlikely(mtip_check_unal_depth(hctx, rq)))
3684 return BLK_MQ_RQ_QUEUE_BUSY; 3684 return BLK_STS_RESOURCE;
3685 3685
3686 blk_mq_start_request(rq); 3686 blk_mq_start_request(rq);
3687 3687
3688 ret = mtip_submit_request(hctx, rq); 3688 ret = mtip_submit_request(hctx, rq);
3689 if (likely(!ret)) 3689 if (likely(!ret))
3690 return BLK_MQ_RQ_QUEUE_OK; 3690 return BLK_STS_OK;
3691 3691 return BLK_STS_IOERR;
3692 return BLK_MQ_RQ_QUEUE_ERROR;
3693} 3692}
3694 3693
3695static void mtip_free_cmd(struct blk_mq_tag_set *set, struct request *rq, 3694static void mtip_free_cmd(struct blk_mq_tag_set *set, struct request *rq,
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index 978d2d2d08d6..36839dc45472 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -469,7 +469,7 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
469 nsock->pending = req; 469 nsock->pending = req;
470 nsock->sent = sent; 470 nsock->sent = sent;
471 } 471 }
472 return BLK_MQ_RQ_QUEUE_BUSY; 472 return BLK_STS_RESOURCE;
473 } 473 }
474 dev_err_ratelimited(disk_to_dev(nbd->disk), 474 dev_err_ratelimited(disk_to_dev(nbd->disk),
475 "Send control failed (result %d)\n", result); 475 "Send control failed (result %d)\n", result);
@@ -510,7 +510,7 @@ send_pages:
510 */ 510 */
511 nsock->pending = req; 511 nsock->pending = req;
512 nsock->sent = sent; 512 nsock->sent = sent;
513 return BLK_MQ_RQ_QUEUE_BUSY; 513 return BLK_STS_RESOURCE;
514 } 514 }
515 dev_err(disk_to_dev(nbd->disk), 515 dev_err(disk_to_dev(nbd->disk),
516 "Send data failed (result %d)\n", 516 "Send data failed (result %d)\n",
@@ -798,7 +798,7 @@ out:
798 return ret; 798 return ret;
799} 799}
800 800
801static int nbd_queue_rq(struct blk_mq_hw_ctx *hctx, 801static blk_status_t nbd_queue_rq(struct blk_mq_hw_ctx *hctx,
802 const struct blk_mq_queue_data *bd) 802 const struct blk_mq_queue_data *bd)
803{ 803{
804 struct nbd_cmd *cmd = blk_mq_rq_to_pdu(bd->rq); 804 struct nbd_cmd *cmd = blk_mq_rq_to_pdu(bd->rq);
@@ -822,13 +822,9 @@ static int nbd_queue_rq(struct blk_mq_hw_ctx *hctx,
822 * appropriate. 822 * appropriate.
823 */ 823 */
824 ret = nbd_handle_cmd(cmd, hctx->queue_num); 824 ret = nbd_handle_cmd(cmd, hctx->queue_num);
825 if (ret < 0)
826 ret = BLK_MQ_RQ_QUEUE_ERROR;
827 if (!ret)
828 ret = BLK_MQ_RQ_QUEUE_OK;
829 complete(&cmd->send_complete); 825 complete(&cmd->send_complete);
830 826
831 return ret; 827 return ret < 0 ? BLK_STS_IOERR : BLK_STS_OK;
832} 828}
833 829
834static int nbd_add_socket(struct nbd_device *nbd, unsigned long arg, 830static int nbd_add_socket(struct nbd_device *nbd, unsigned long arg,
diff --git a/drivers/block/null_blk.c b/drivers/block/null_blk.c
index e6b81d370882..586dfff5d53f 100644
--- a/drivers/block/null_blk.c
+++ b/drivers/block/null_blk.c
@@ -356,7 +356,7 @@ static void null_request_fn(struct request_queue *q)
356 } 356 }
357} 357}
358 358
359static int null_queue_rq(struct blk_mq_hw_ctx *hctx, 359static blk_status_t null_queue_rq(struct blk_mq_hw_ctx *hctx,
360 const struct blk_mq_queue_data *bd) 360 const struct blk_mq_queue_data *bd)
361{ 361{
362 struct nullb_cmd *cmd = blk_mq_rq_to_pdu(bd->rq); 362 struct nullb_cmd *cmd = blk_mq_rq_to_pdu(bd->rq);
@@ -373,7 +373,7 @@ static int null_queue_rq(struct blk_mq_hw_ctx *hctx,
373 blk_mq_start_request(bd->rq); 373 blk_mq_start_request(bd->rq);
374 374
375 null_handle_cmd(cmd); 375 null_handle_cmd(cmd);
376 return BLK_MQ_RQ_QUEUE_OK; 376 return BLK_STS_OK;
377} 377}
378 378
379static void null_init_queue(struct nullb *nullb, struct nullb_queue *nq) 379static void null_init_queue(struct nullb *nullb, struct nullb_queue *nq)
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index 3e8b43d792c2..74a6791b15c8 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -4154,14 +4154,14 @@ err:
4154 blk_mq_end_request(rq, errno_to_blk_status(result)); 4154 blk_mq_end_request(rq, errno_to_blk_status(result));
4155} 4155}
4156 4156
4157static int rbd_queue_rq(struct blk_mq_hw_ctx *hctx, 4157static blk_status_t rbd_queue_rq(struct blk_mq_hw_ctx *hctx,
4158 const struct blk_mq_queue_data *bd) 4158 const struct blk_mq_queue_data *bd)
4159{ 4159{
4160 struct request *rq = bd->rq; 4160 struct request *rq = bd->rq;
4161 struct work_struct *work = blk_mq_rq_to_pdu(rq); 4161 struct work_struct *work = blk_mq_rq_to_pdu(rq);
4162 4162
4163 queue_work(rbd_wq, work); 4163 queue_work(rbd_wq, work);
4164 return BLK_MQ_RQ_QUEUE_OK; 4164 return BLK_STS_OK;
4165} 4165}
4166 4166
4167static void rbd_free_disk(struct rbd_device *rbd_dev) 4167static void rbd_free_disk(struct rbd_device *rbd_dev)
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index 205b74d70efc..e59bd4549a8a 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -214,7 +214,7 @@ static void virtblk_done(struct virtqueue *vq)
214 spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags); 214 spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags);
215} 215}
216 216
217static int virtio_queue_rq(struct blk_mq_hw_ctx *hctx, 217static blk_status_t virtio_queue_rq(struct blk_mq_hw_ctx *hctx,
218 const struct blk_mq_queue_data *bd) 218 const struct blk_mq_queue_data *bd)
219{ 219{
220 struct virtio_blk *vblk = hctx->queue->queuedata; 220 struct virtio_blk *vblk = hctx->queue->queuedata;
@@ -246,7 +246,7 @@ static int virtio_queue_rq(struct blk_mq_hw_ctx *hctx,
246 break; 246 break;
247 default: 247 default:
248 WARN_ON_ONCE(1); 248 WARN_ON_ONCE(1);
249 return BLK_MQ_RQ_QUEUE_ERROR; 249 return BLK_STS_IOERR;
250 } 250 }
251 251
252 vbr->out_hdr.type = cpu_to_virtio32(vblk->vdev, type); 252 vbr->out_hdr.type = cpu_to_virtio32(vblk->vdev, type);
@@ -276,8 +276,8 @@ static int virtio_queue_rq(struct blk_mq_hw_ctx *hctx,
276 /* Out of mem doesn't actually happen, since we fall back 276 /* Out of mem doesn't actually happen, since we fall back
277 * to direct descriptors */ 277 * to direct descriptors */
278 if (err == -ENOMEM || err == -ENOSPC) 278 if (err == -ENOMEM || err == -ENOSPC)
279 return BLK_MQ_RQ_QUEUE_BUSY; 279 return BLK_STS_RESOURCE;
280 return BLK_MQ_RQ_QUEUE_ERROR; 280 return BLK_STS_IOERR;
281 } 281 }
282 282
283 if (bd->last && virtqueue_kick_prepare(vblk->vqs[qid].vq)) 283 if (bd->last && virtqueue_kick_prepare(vblk->vqs[qid].vq))
@@ -286,7 +286,7 @@ static int virtio_queue_rq(struct blk_mq_hw_ctx *hctx,
286 286
287 if (notify) 287 if (notify)
288 virtqueue_notify(vblk->vqs[qid].vq); 288 virtqueue_notify(vblk->vqs[qid].vq);
289 return BLK_MQ_RQ_QUEUE_OK; 289 return BLK_STS_OK;
290} 290}
291 291
292/* return id (s/n) string for *disk to *id_str 292/* return id (s/n) string for *disk to *id_str
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index aedc3c759273..2f468cf86dcf 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -881,7 +881,7 @@ static inline bool blkif_request_flush_invalid(struct request *req,
881 !info->feature_fua)); 881 !info->feature_fua));
882} 882}
883 883
884static int blkif_queue_rq(struct blk_mq_hw_ctx *hctx, 884static blk_status_t blkif_queue_rq(struct blk_mq_hw_ctx *hctx,
885 const struct blk_mq_queue_data *qd) 885 const struct blk_mq_queue_data *qd)
886{ 886{
887 unsigned long flags; 887 unsigned long flags;
@@ -904,16 +904,16 @@ static int blkif_queue_rq(struct blk_mq_hw_ctx *hctx,
904 904
905 flush_requests(rinfo); 905 flush_requests(rinfo);
906 spin_unlock_irqrestore(&rinfo->ring_lock, flags); 906 spin_unlock_irqrestore(&rinfo->ring_lock, flags);
907 return BLK_MQ_RQ_QUEUE_OK; 907 return BLK_STS_OK;
908 908
909out_err: 909out_err:
910 spin_unlock_irqrestore(&rinfo->ring_lock, flags); 910 spin_unlock_irqrestore(&rinfo->ring_lock, flags);
911 return BLK_MQ_RQ_QUEUE_ERROR; 911 return BLK_STS_IOERR;
912 912
913out_busy: 913out_busy:
914 spin_unlock_irqrestore(&rinfo->ring_lock, flags); 914 spin_unlock_irqrestore(&rinfo->ring_lock, flags);
915 blk_mq_stop_hw_queue(hctx); 915 blk_mq_stop_hw_queue(hctx);
916 return BLK_MQ_RQ_QUEUE_BUSY; 916 return BLK_STS_RESOURCE;
917} 917}
918 918
919static void blkif_complete_rq(struct request *rq) 919static void blkif_complete_rq(struct request *rq)
diff --git a/drivers/md/dm-rq.c b/drivers/md/dm-rq.c
index bee334389173..63402f8a38de 100644
--- a/drivers/md/dm-rq.c
+++ b/drivers/md/dm-rq.c
@@ -727,7 +727,7 @@ static int dm_mq_init_request(struct blk_mq_tag_set *set, struct request *rq,
727 return __dm_rq_init_rq(set->driver_data, rq); 727 return __dm_rq_init_rq(set->driver_data, rq);
728} 728}
729 729
730static int dm_mq_queue_rq(struct blk_mq_hw_ctx *hctx, 730static blk_status_t dm_mq_queue_rq(struct blk_mq_hw_ctx *hctx,
731 const struct blk_mq_queue_data *bd) 731 const struct blk_mq_queue_data *bd)
732{ 732{
733 struct request *rq = bd->rq; 733 struct request *rq = bd->rq;
@@ -744,7 +744,7 @@ static int dm_mq_queue_rq(struct blk_mq_hw_ctx *hctx,
744 } 744 }
745 745
746 if (ti->type->busy && ti->type->busy(ti)) 746 if (ti->type->busy && ti->type->busy(ti))
747 return BLK_MQ_RQ_QUEUE_BUSY; 747 return BLK_STS_RESOURCE;
748 748
749 dm_start_request(md, rq); 749 dm_start_request(md, rq);
750 750
@@ -762,10 +762,10 @@ static int dm_mq_queue_rq(struct blk_mq_hw_ctx *hctx,
762 rq_end_stats(md, rq); 762 rq_end_stats(md, rq);
763 rq_completed(md, rq_data_dir(rq), false); 763 rq_completed(md, rq_data_dir(rq), false);
764 blk_mq_delay_run_hw_queue(hctx, 100/*ms*/); 764 blk_mq_delay_run_hw_queue(hctx, 100/*ms*/);
765 return BLK_MQ_RQ_QUEUE_BUSY; 765 return BLK_STS_RESOURCE;
766 } 766 }
767 767
768 return BLK_MQ_RQ_QUEUE_OK; 768 return BLK_STS_OK;
769} 769}
770 770
771static const struct blk_mq_ops dm_mq_ops = { 771static const struct blk_mq_ops dm_mq_ops = {
diff --git a/drivers/mtd/ubi/block.c b/drivers/mtd/ubi/block.c
index 3ecdb39d1985..c3963f880448 100644
--- a/drivers/mtd/ubi/block.c
+++ b/drivers/mtd/ubi/block.c
@@ -316,7 +316,7 @@ static void ubiblock_do_work(struct work_struct *work)
316 blk_mq_end_request(req, errno_to_blk_status(ret)); 316 blk_mq_end_request(req, errno_to_blk_status(ret));
317} 317}
318 318
319static int ubiblock_queue_rq(struct blk_mq_hw_ctx *hctx, 319static blk_status_t ubiblock_queue_rq(struct blk_mq_hw_ctx *hctx,
320 const struct blk_mq_queue_data *bd) 320 const struct blk_mq_queue_data *bd)
321{ 321{
322 struct request *req = bd->rq; 322 struct request *req = bd->rq;
@@ -327,9 +327,9 @@ static int ubiblock_queue_rq(struct blk_mq_hw_ctx *hctx,
327 case REQ_OP_READ: 327 case REQ_OP_READ:
328 ubi_sgl_init(&pdu->usgl); 328 ubi_sgl_init(&pdu->usgl);
329 queue_work(dev->wq, &pdu->work); 329 queue_work(dev->wq, &pdu->work);
330 return BLK_MQ_RQ_QUEUE_OK; 330 return BLK_STS_OK;
331 default: 331 default:
332 return BLK_MQ_RQ_QUEUE_ERROR; 332 return BLK_STS_IOERR;
333 } 333 }
334 334
335} 335}
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 07e95c7d837a..4e193b93d1d9 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -283,7 +283,7 @@ static inline void nvme_setup_flush(struct nvme_ns *ns,
283 cmnd->common.nsid = cpu_to_le32(ns->ns_id); 283 cmnd->common.nsid = cpu_to_le32(ns->ns_id);
284} 284}
285 285
286static inline int nvme_setup_discard(struct nvme_ns *ns, struct request *req, 286static blk_status_t nvme_setup_discard(struct nvme_ns *ns, struct request *req,
287 struct nvme_command *cmnd) 287 struct nvme_command *cmnd)
288{ 288{
289 unsigned short segments = blk_rq_nr_discard_segments(req), n = 0; 289 unsigned short segments = blk_rq_nr_discard_segments(req), n = 0;
@@ -292,7 +292,7 @@ static inline int nvme_setup_discard(struct nvme_ns *ns, struct request *req,
292 292
293 range = kmalloc_array(segments, sizeof(*range), GFP_ATOMIC); 293 range = kmalloc_array(segments, sizeof(*range), GFP_ATOMIC);
294 if (!range) 294 if (!range)
295 return BLK_MQ_RQ_QUEUE_BUSY; 295 return BLK_STS_RESOURCE;
296 296
297 __rq_for_each_bio(bio, req) { 297 __rq_for_each_bio(bio, req) {
298 u64 slba = nvme_block_nr(ns, bio->bi_iter.bi_sector); 298 u64 slba = nvme_block_nr(ns, bio->bi_iter.bi_sector);
@@ -306,7 +306,7 @@ static inline int nvme_setup_discard(struct nvme_ns *ns, struct request *req,
306 306
307 if (WARN_ON_ONCE(n != segments)) { 307 if (WARN_ON_ONCE(n != segments)) {
308 kfree(range); 308 kfree(range);
309 return BLK_MQ_RQ_QUEUE_ERROR; 309 return BLK_STS_IOERR;
310 } 310 }
311 311
312 memset(cmnd, 0, sizeof(*cmnd)); 312 memset(cmnd, 0, sizeof(*cmnd));
@@ -320,7 +320,7 @@ static inline int nvme_setup_discard(struct nvme_ns *ns, struct request *req,
320 req->special_vec.bv_len = sizeof(*range) * segments; 320 req->special_vec.bv_len = sizeof(*range) * segments;
321 req->rq_flags |= RQF_SPECIAL_PAYLOAD; 321 req->rq_flags |= RQF_SPECIAL_PAYLOAD;
322 322
323 return BLK_MQ_RQ_QUEUE_OK; 323 return BLK_STS_OK;
324} 324}
325 325
326static inline void nvme_setup_rw(struct nvme_ns *ns, struct request *req, 326static inline void nvme_setup_rw(struct nvme_ns *ns, struct request *req,
@@ -364,10 +364,10 @@ static inline void nvme_setup_rw(struct nvme_ns *ns, struct request *req,
364 cmnd->rw.dsmgmt = cpu_to_le32(dsmgmt); 364 cmnd->rw.dsmgmt = cpu_to_le32(dsmgmt);
365} 365}
366 366
367int nvme_setup_cmd(struct nvme_ns *ns, struct request *req, 367blk_status_t nvme_setup_cmd(struct nvme_ns *ns, struct request *req,
368 struct nvme_command *cmd) 368 struct nvme_command *cmd)
369{ 369{
370 int ret = BLK_MQ_RQ_QUEUE_OK; 370 blk_status_t ret = BLK_STS_OK;
371 371
372 if (!(req->rq_flags & RQF_DONTPREP)) { 372 if (!(req->rq_flags & RQF_DONTPREP)) {
373 nvme_req(req)->retries = 0; 373 nvme_req(req)->retries = 0;
@@ -394,7 +394,7 @@ int nvme_setup_cmd(struct nvme_ns *ns, struct request *req,
394 break; 394 break;
395 default: 395 default:
396 WARN_ON_ONCE(1); 396 WARN_ON_ONCE(1);
397 return BLK_MQ_RQ_QUEUE_ERROR; 397 return BLK_STS_IOERR;
398 } 398 }
399 399
400 cmd->common.command_id = req->tag; 400 cmd->common.command_id = req->tag;
diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
index 5b14cbefb724..eb0973ac9e17 100644
--- a/drivers/nvme/host/fc.c
+++ b/drivers/nvme/host/fc.c
@@ -1873,7 +1873,7 @@ nvme_fc_unmap_data(struct nvme_fc_ctrl *ctrl, struct request *rq,
1873 * level FC exchange resource that is also outstanding. This must be 1873 * level FC exchange resource that is also outstanding. This must be
1874 * considered in all cleanup operations. 1874 * considered in all cleanup operations.
1875 */ 1875 */
1876static int 1876static blk_status_t
1877nvme_fc_start_fcp_op(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue, 1877nvme_fc_start_fcp_op(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue,
1878 struct nvme_fc_fcp_op *op, u32 data_len, 1878 struct nvme_fc_fcp_op *op, u32 data_len,
1879 enum nvmefc_fcp_datadir io_dir) 1879 enum nvmefc_fcp_datadir io_dir)
@@ -1888,10 +1888,10 @@ nvme_fc_start_fcp_op(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue,
1888 * the target device is present 1888 * the target device is present
1889 */ 1889 */
1890 if (ctrl->rport->remoteport.port_state != FC_OBJSTATE_ONLINE) 1890 if (ctrl->rport->remoteport.port_state != FC_OBJSTATE_ONLINE)
1891 return BLK_MQ_RQ_QUEUE_ERROR; 1891 return BLK_STS_IOERR;
1892 1892
1893 if (!nvme_fc_ctrl_get(ctrl)) 1893 if (!nvme_fc_ctrl_get(ctrl))
1894 return BLK_MQ_RQ_QUEUE_ERROR; 1894 return BLK_STS_IOERR;
1895 1895
1896 /* format the FC-NVME CMD IU and fcp_req */ 1896 /* format the FC-NVME CMD IU and fcp_req */
1897 cmdiu->connection_id = cpu_to_be64(queue->connection_id); 1897 cmdiu->connection_id = cpu_to_be64(queue->connection_id);
@@ -1939,8 +1939,9 @@ nvme_fc_start_fcp_op(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue,
1939 if (ret < 0) { 1939 if (ret < 0) {
1940 nvme_cleanup_cmd(op->rq); 1940 nvme_cleanup_cmd(op->rq);
1941 nvme_fc_ctrl_put(ctrl); 1941 nvme_fc_ctrl_put(ctrl);
1942 return (ret == -ENOMEM || ret == -EAGAIN) ? 1942 if (ret == -ENOMEM || ret == -EAGAIN)
1943 BLK_MQ_RQ_QUEUE_BUSY : BLK_MQ_RQ_QUEUE_ERROR; 1943 return BLK_STS_RESOURCE;
1944 return BLK_STS_IOERR;
1944 } 1945 }
1945 } 1946 }
1946 1947
@@ -1966,19 +1967,19 @@ nvme_fc_start_fcp_op(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue,
1966 nvme_fc_ctrl_put(ctrl); 1967 nvme_fc_ctrl_put(ctrl);
1967 1968
1968 if (ret != -EBUSY) 1969 if (ret != -EBUSY)
1969 return BLK_MQ_RQ_QUEUE_ERROR; 1970 return BLK_STS_IOERR;
1970 1971
1971 if (op->rq) { 1972 if (op->rq) {
1972 blk_mq_stop_hw_queues(op->rq->q); 1973 blk_mq_stop_hw_queues(op->rq->q);
1973 blk_mq_delay_queue(queue->hctx, NVMEFC_QUEUE_DELAY); 1974 blk_mq_delay_queue(queue->hctx, NVMEFC_QUEUE_DELAY);
1974 } 1975 }
1975 return BLK_MQ_RQ_QUEUE_BUSY; 1976 return BLK_STS_RESOURCE;
1976 } 1977 }
1977 1978
1978 return BLK_MQ_RQ_QUEUE_OK; 1979 return BLK_STS_OK;
1979} 1980}
1980 1981
1981static int 1982static blk_status_t
1982nvme_fc_queue_rq(struct blk_mq_hw_ctx *hctx, 1983nvme_fc_queue_rq(struct blk_mq_hw_ctx *hctx,
1983 const struct blk_mq_queue_data *bd) 1984 const struct blk_mq_queue_data *bd)
1984{ 1985{
@@ -1991,7 +1992,7 @@ nvme_fc_queue_rq(struct blk_mq_hw_ctx *hctx,
1991 struct nvme_command *sqe = &cmdiu->sqe; 1992 struct nvme_command *sqe = &cmdiu->sqe;
1992 enum nvmefc_fcp_datadir io_dir; 1993 enum nvmefc_fcp_datadir io_dir;
1993 u32 data_len; 1994 u32 data_len;
1994 int ret; 1995 blk_status_t ret;
1995 1996
1996 ret = nvme_setup_cmd(ns, rq, sqe); 1997 ret = nvme_setup_cmd(ns, rq, sqe);
1997 if (ret) 1998 if (ret)
@@ -2046,7 +2047,7 @@ nvme_fc_submit_async_event(struct nvme_ctrl *arg, int aer_idx)
2046 struct nvme_fc_fcp_op *aen_op; 2047 struct nvme_fc_fcp_op *aen_op;
2047 unsigned long flags; 2048 unsigned long flags;
2048 bool terminating = false; 2049 bool terminating = false;
2049 int ret; 2050 blk_status_t ret;
2050 2051
2051 if (aer_idx > NVME_FC_NR_AEN_COMMANDS) 2052 if (aer_idx > NVME_FC_NR_AEN_COMMANDS)
2052 return; 2053 return;
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index 9d6a070d4391..22ee60b2a3e8 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -296,7 +296,7 @@ void nvme_start_freeze(struct nvme_ctrl *ctrl);
296#define NVME_QID_ANY -1 296#define NVME_QID_ANY -1
297struct request *nvme_alloc_request(struct request_queue *q, 297struct request *nvme_alloc_request(struct request_queue *q,
298 struct nvme_command *cmd, unsigned int flags, int qid); 298 struct nvme_command *cmd, unsigned int flags, int qid);
299int nvme_setup_cmd(struct nvme_ns *ns, struct request *req, 299blk_status_t nvme_setup_cmd(struct nvme_ns *ns, struct request *req,
300 struct nvme_command *cmd); 300 struct nvme_command *cmd);
301int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd, 301int nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd,
302 void *buf, unsigned bufflen); 302 void *buf, unsigned bufflen);
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index 819898428763..430d085af31c 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -427,7 +427,7 @@ static __le64 **iod_list(struct request *req)
427 return (__le64 **)(iod->sg + blk_rq_nr_phys_segments(req)); 427 return (__le64 **)(iod->sg + blk_rq_nr_phys_segments(req));
428} 428}
429 429
430static int nvme_init_iod(struct request *rq, struct nvme_dev *dev) 430static blk_status_t nvme_init_iod(struct request *rq, struct nvme_dev *dev)
431{ 431{
432 struct nvme_iod *iod = blk_mq_rq_to_pdu(rq); 432 struct nvme_iod *iod = blk_mq_rq_to_pdu(rq);
433 int nseg = blk_rq_nr_phys_segments(rq); 433 int nseg = blk_rq_nr_phys_segments(rq);
@@ -436,7 +436,7 @@ static int nvme_init_iod(struct request *rq, struct nvme_dev *dev)
436 if (nseg > NVME_INT_PAGES || size > NVME_INT_BYTES(dev)) { 436 if (nseg > NVME_INT_PAGES || size > NVME_INT_BYTES(dev)) {
437 iod->sg = kmalloc(nvme_iod_alloc_size(dev, size, nseg), GFP_ATOMIC); 437 iod->sg = kmalloc(nvme_iod_alloc_size(dev, size, nseg), GFP_ATOMIC);
438 if (!iod->sg) 438 if (!iod->sg)
439 return BLK_MQ_RQ_QUEUE_BUSY; 439 return BLK_STS_RESOURCE;
440 } else { 440 } else {
441 iod->sg = iod->inline_sg; 441 iod->sg = iod->inline_sg;
442 } 442 }
@@ -446,7 +446,7 @@ static int nvme_init_iod(struct request *rq, struct nvme_dev *dev)
446 iod->nents = 0; 446 iod->nents = 0;
447 iod->length = size; 447 iod->length = size;
448 448
449 return BLK_MQ_RQ_QUEUE_OK; 449 return BLK_STS_OK;
450} 450}
451 451
452static void nvme_free_iod(struct nvme_dev *dev, struct request *req) 452static void nvme_free_iod(struct nvme_dev *dev, struct request *req)
@@ -616,21 +616,21 @@ static bool nvme_setup_prps(struct nvme_dev *dev, struct request *req)
616 return true; 616 return true;
617} 617}
618 618
619static int nvme_map_data(struct nvme_dev *dev, struct request *req, 619static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req,
620 struct nvme_command *cmnd) 620 struct nvme_command *cmnd)
621{ 621{
622 struct nvme_iod *iod = blk_mq_rq_to_pdu(req); 622 struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
623 struct request_queue *q = req->q; 623 struct request_queue *q = req->q;
624 enum dma_data_direction dma_dir = rq_data_dir(req) ? 624 enum dma_data_direction dma_dir = rq_data_dir(req) ?
625 DMA_TO_DEVICE : DMA_FROM_DEVICE; 625 DMA_TO_DEVICE : DMA_FROM_DEVICE;
626 int ret = BLK_MQ_RQ_QUEUE_ERROR; 626 blk_status_t ret = BLK_STS_IOERR;
627 627
628 sg_init_table(iod->sg, blk_rq_nr_phys_segments(req)); 628 sg_init_table(iod->sg, blk_rq_nr_phys_segments(req));
629 iod->nents = blk_rq_map_sg(q, req, iod->sg); 629 iod->nents = blk_rq_map_sg(q, req, iod->sg);
630 if (!iod->nents) 630 if (!iod->nents)
631 goto out; 631 goto out;
632 632
633 ret = BLK_MQ_RQ_QUEUE_BUSY; 633 ret = BLK_STS_RESOURCE;
634 if (!dma_map_sg_attrs(dev->dev, iod->sg, iod->nents, dma_dir, 634 if (!dma_map_sg_attrs(dev->dev, iod->sg, iod->nents, dma_dir,
635 DMA_ATTR_NO_WARN)) 635 DMA_ATTR_NO_WARN))
636 goto out; 636 goto out;
@@ -638,7 +638,7 @@ static int nvme_map_data(struct nvme_dev *dev, struct request *req,
638 if (!nvme_setup_prps(dev, req)) 638 if (!nvme_setup_prps(dev, req))
639 goto out_unmap; 639 goto out_unmap;
640 640
641 ret = BLK_MQ_RQ_QUEUE_ERROR; 641 ret = BLK_STS_IOERR;
642 if (blk_integrity_rq(req)) { 642 if (blk_integrity_rq(req)) {
643 if (blk_rq_count_integrity_sg(q, req->bio) != 1) 643 if (blk_rq_count_integrity_sg(q, req->bio) != 1)
644 goto out_unmap; 644 goto out_unmap;
@@ -658,7 +658,7 @@ static int nvme_map_data(struct nvme_dev *dev, struct request *req,
658 cmnd->rw.dptr.prp2 = cpu_to_le64(iod->first_dma); 658 cmnd->rw.dptr.prp2 = cpu_to_le64(iod->first_dma);
659 if (blk_integrity_rq(req)) 659 if (blk_integrity_rq(req))
660 cmnd->rw.metadata = cpu_to_le64(sg_dma_address(&iod->meta_sg)); 660 cmnd->rw.metadata = cpu_to_le64(sg_dma_address(&iod->meta_sg));
661 return BLK_MQ_RQ_QUEUE_OK; 661 return BLK_STS_OK;
662 662
663out_unmap: 663out_unmap:
664 dma_unmap_sg(dev->dev, iod->sg, iod->nents, dma_dir); 664 dma_unmap_sg(dev->dev, iod->sg, iod->nents, dma_dir);
@@ -688,7 +688,7 @@ static void nvme_unmap_data(struct nvme_dev *dev, struct request *req)
688/* 688/*
689 * NOTE: ns is NULL when called on the admin queue. 689 * NOTE: ns is NULL when called on the admin queue.
690 */ 690 */
691static int nvme_queue_rq(struct blk_mq_hw_ctx *hctx, 691static blk_status_t nvme_queue_rq(struct blk_mq_hw_ctx *hctx,
692 const struct blk_mq_queue_data *bd) 692 const struct blk_mq_queue_data *bd)
693{ 693{
694 struct nvme_ns *ns = hctx->queue->queuedata; 694 struct nvme_ns *ns = hctx->queue->queuedata;
@@ -696,7 +696,7 @@ static int nvme_queue_rq(struct blk_mq_hw_ctx *hctx,
696 struct nvme_dev *dev = nvmeq->dev; 696 struct nvme_dev *dev = nvmeq->dev;
697 struct request *req = bd->rq; 697 struct request *req = bd->rq;
698 struct nvme_command cmnd; 698 struct nvme_command cmnd;
699 int ret = BLK_MQ_RQ_QUEUE_OK; 699 blk_status_t ret = BLK_STS_OK;
700 700
701 /* 701 /*
702 * If formated with metadata, require the block layer provide a buffer 702 * If formated with metadata, require the block layer provide a buffer
@@ -705,38 +705,36 @@ static int nvme_queue_rq(struct blk_mq_hw_ctx *hctx,
705 */ 705 */
706 if (ns && ns->ms && !blk_integrity_rq(req)) { 706 if (ns && ns->ms && !blk_integrity_rq(req)) {
707 if (!(ns->pi_type && ns->ms == 8) && 707 if (!(ns->pi_type && ns->ms == 8) &&
708 !blk_rq_is_passthrough(req)) { 708 !blk_rq_is_passthrough(req))
709 blk_mq_end_request(req, BLK_STS_NOTSUPP); 709 return BLK_STS_NOTSUPP;
710 return BLK_MQ_RQ_QUEUE_OK;
711 }
712 } 710 }
713 711
714 ret = nvme_setup_cmd(ns, req, &cmnd); 712 ret = nvme_setup_cmd(ns, req, &cmnd);
715 if (ret != BLK_MQ_RQ_QUEUE_OK) 713 if (ret)
716 return ret; 714 return ret;
717 715
718 ret = nvme_init_iod(req, dev); 716 ret = nvme_init_iod(req, dev);
719 if (ret != BLK_MQ_RQ_QUEUE_OK) 717 if (ret)
720 goto out_free_cmd; 718 goto out_free_cmd;
721 719
722 if (blk_rq_nr_phys_segments(req)) 720 if (blk_rq_nr_phys_segments(req)) {
723 ret = nvme_map_data(dev, req, &cmnd); 721 ret = nvme_map_data(dev, req, &cmnd);
724 722 if (ret)
725 if (ret != BLK_MQ_RQ_QUEUE_OK) 723 goto out_cleanup_iod;
726 goto out_cleanup_iod; 724 }
727 725
728 blk_mq_start_request(req); 726 blk_mq_start_request(req);
729 727
730 spin_lock_irq(&nvmeq->q_lock); 728 spin_lock_irq(&nvmeq->q_lock);
731 if (unlikely(nvmeq->cq_vector < 0)) { 729 if (unlikely(nvmeq->cq_vector < 0)) {
732 ret = BLK_MQ_RQ_QUEUE_ERROR; 730 ret = BLK_STS_IOERR;
733 spin_unlock_irq(&nvmeq->q_lock); 731 spin_unlock_irq(&nvmeq->q_lock);
734 goto out_cleanup_iod; 732 goto out_cleanup_iod;
735 } 733 }
736 __nvme_submit_cmd(nvmeq, &cmnd); 734 __nvme_submit_cmd(nvmeq, &cmnd);
737 nvme_process_cq(nvmeq); 735 nvme_process_cq(nvmeq);
738 spin_unlock_irq(&nvmeq->q_lock); 736 spin_unlock_irq(&nvmeq->q_lock);
739 return BLK_MQ_RQ_QUEUE_OK; 737 return BLK_STS_OK;
740out_cleanup_iod: 738out_cleanup_iod:
741 nvme_free_iod(dev, req); 739 nvme_free_iod(dev, req);
742out_free_cmd: 740out_free_cmd:
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
index 28bd255c144d..58d311e704e5 100644
--- a/drivers/nvme/host/rdma.c
+++ b/drivers/nvme/host/rdma.c
@@ -1448,7 +1448,7 @@ static inline bool nvme_rdma_queue_is_ready(struct nvme_rdma_queue *queue,
1448 return true; 1448 return true;
1449} 1449}
1450 1450
1451static int nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx, 1451static blk_status_t nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx,
1452 const struct blk_mq_queue_data *bd) 1452 const struct blk_mq_queue_data *bd)
1453{ 1453{
1454 struct nvme_ns *ns = hctx->queue->queuedata; 1454 struct nvme_ns *ns = hctx->queue->queuedata;
@@ -1459,27 +1459,28 @@ static int nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx,
1459 struct nvme_command *c = sqe->data; 1459 struct nvme_command *c = sqe->data;
1460 bool flush = false; 1460 bool flush = false;
1461 struct ib_device *dev; 1461 struct ib_device *dev;
1462 int ret; 1462 blk_status_t ret;
1463 int err;
1463 1464
1464 WARN_ON_ONCE(rq->tag < 0); 1465 WARN_ON_ONCE(rq->tag < 0);
1465 1466
1466 if (!nvme_rdma_queue_is_ready(queue, rq)) 1467 if (!nvme_rdma_queue_is_ready(queue, rq))
1467 return BLK_MQ_RQ_QUEUE_BUSY; 1468 return BLK_STS_RESOURCE;
1468 1469
1469 dev = queue->device->dev; 1470 dev = queue->device->dev;
1470 ib_dma_sync_single_for_cpu(dev, sqe->dma, 1471 ib_dma_sync_single_for_cpu(dev, sqe->dma,
1471 sizeof(struct nvme_command), DMA_TO_DEVICE); 1472 sizeof(struct nvme_command), DMA_TO_DEVICE);
1472 1473
1473 ret = nvme_setup_cmd(ns, rq, c); 1474 ret = nvme_setup_cmd(ns, rq, c);
1474 if (ret != BLK_MQ_RQ_QUEUE_OK) 1475 if (ret)
1475 return ret; 1476 return ret;
1476 1477
1477 blk_mq_start_request(rq); 1478 blk_mq_start_request(rq);
1478 1479
1479 ret = nvme_rdma_map_data(queue, rq, c); 1480 err = nvme_rdma_map_data(queue, rq, c);
1480 if (ret < 0) { 1481 if (err < 0) {
1481 dev_err(queue->ctrl->ctrl.device, 1482 dev_err(queue->ctrl->ctrl.device,
1482 "Failed to map data (%d)\n", ret); 1483 "Failed to map data (%d)\n", err);
1483 nvme_cleanup_cmd(rq); 1484 nvme_cleanup_cmd(rq);
1484 goto err; 1485 goto err;
1485 } 1486 }
@@ -1489,17 +1490,18 @@ static int nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx,
1489 1490
1490 if (req_op(rq) == REQ_OP_FLUSH) 1491 if (req_op(rq) == REQ_OP_FLUSH)
1491 flush = true; 1492 flush = true;
1492 ret = nvme_rdma_post_send(queue, sqe, req->sge, req->num_sge, 1493 err = nvme_rdma_post_send(queue, sqe, req->sge, req->num_sge,
1493 req->mr->need_inval ? &req->reg_wr.wr : NULL, flush); 1494 req->mr->need_inval ? &req->reg_wr.wr : NULL, flush);
1494 if (ret) { 1495 if (err) {
1495 nvme_rdma_unmap_data(queue, rq); 1496 nvme_rdma_unmap_data(queue, rq);
1496 goto err; 1497 goto err;
1497 } 1498 }
1498 1499
1499 return BLK_MQ_RQ_QUEUE_OK; 1500 return BLK_STS_OK;
1500err: 1501err:
1501 return (ret == -ENOMEM || ret == -EAGAIN) ? 1502 if (err == -ENOMEM || err == -EAGAIN)
1502 BLK_MQ_RQ_QUEUE_BUSY : BLK_MQ_RQ_QUEUE_ERROR; 1503 return BLK_STS_RESOURCE;
1504 return BLK_STS_IOERR;
1503} 1505}
1504 1506
1505static int nvme_rdma_poll(struct blk_mq_hw_ctx *hctx, unsigned int tag) 1507static int nvme_rdma_poll(struct blk_mq_hw_ctx *hctx, unsigned int tag)
diff --git a/drivers/nvme/target/loop.c b/drivers/nvme/target/loop.c
index e503cfff0337..db8ebadf885b 100644
--- a/drivers/nvme/target/loop.c
+++ b/drivers/nvme/target/loop.c
@@ -159,17 +159,17 @@ nvme_loop_timeout(struct request *rq, bool reserved)
159 return BLK_EH_HANDLED; 159 return BLK_EH_HANDLED;
160} 160}
161 161
162static int nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx, 162static blk_status_t nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx,
163 const struct blk_mq_queue_data *bd) 163 const struct blk_mq_queue_data *bd)
164{ 164{
165 struct nvme_ns *ns = hctx->queue->queuedata; 165 struct nvme_ns *ns = hctx->queue->queuedata;
166 struct nvme_loop_queue *queue = hctx->driver_data; 166 struct nvme_loop_queue *queue = hctx->driver_data;
167 struct request *req = bd->rq; 167 struct request *req = bd->rq;
168 struct nvme_loop_iod *iod = blk_mq_rq_to_pdu(req); 168 struct nvme_loop_iod *iod = blk_mq_rq_to_pdu(req);
169 int ret; 169 blk_status_t ret;
170 170
171 ret = nvme_setup_cmd(ns, req, &iod->cmd); 171 ret = nvme_setup_cmd(ns, req, &iod->cmd);
172 if (ret != BLK_MQ_RQ_QUEUE_OK) 172 if (ret)
173 return ret; 173 return ret;
174 174
175 iod->cmd.common.flags |= NVME_CMD_SGL_METABUF; 175 iod->cmd.common.flags |= NVME_CMD_SGL_METABUF;
@@ -179,16 +179,15 @@ static int nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx,
179 nvme_cleanup_cmd(req); 179 nvme_cleanup_cmd(req);
180 blk_mq_start_request(req); 180 blk_mq_start_request(req);
181 nvme_loop_queue_response(&iod->req); 181 nvme_loop_queue_response(&iod->req);
182 return BLK_MQ_RQ_QUEUE_OK; 182 return BLK_STS_OK;
183 } 183 }
184 184
185 if (blk_rq_bytes(req)) { 185 if (blk_rq_bytes(req)) {
186 iod->sg_table.sgl = iod->first_sgl; 186 iod->sg_table.sgl = iod->first_sgl;
187 ret = sg_alloc_table_chained(&iod->sg_table, 187 if (sg_alloc_table_chained(&iod->sg_table,
188 blk_rq_nr_phys_segments(req), 188 blk_rq_nr_phys_segments(req),
189 iod->sg_table.sgl); 189 iod->sg_table.sgl))
190 if (ret) 190 return BLK_STS_RESOURCE;
191 return BLK_MQ_RQ_QUEUE_BUSY;
192 191
193 iod->req.sg = iod->sg_table.sgl; 192 iod->req.sg = iod->sg_table.sgl;
194 iod->req.sg_cnt = blk_rq_map_sg(req->q, req, iod->sg_table.sgl); 193 iod->req.sg_cnt = blk_rq_map_sg(req->q, req, iod->sg_table.sgl);
@@ -197,7 +196,7 @@ static int nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx,
197 blk_mq_start_request(req); 196 blk_mq_start_request(req);
198 197
199 schedule_work(&iod->work); 198 schedule_work(&iod->work);
200 return BLK_MQ_RQ_QUEUE_OK; 199 return BLK_STS_OK;
201} 200}
202 201
203static void nvme_loop_submit_async_event(struct nvme_ctrl *arg, int aer_idx) 202static void nvme_loop_submit_async_event(struct nvme_ctrl *arg, int aer_idx)
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 67a67191520f..b5f310b9e910 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -1812,15 +1812,15 @@ out_delay:
1812 blk_delay_queue(q, SCSI_QUEUE_DELAY); 1812 blk_delay_queue(q, SCSI_QUEUE_DELAY);
1813} 1813}
1814 1814
1815static inline int prep_to_mq(int ret) 1815static inline blk_status_t prep_to_mq(int ret)
1816{ 1816{
1817 switch (ret) { 1817 switch (ret) {
1818 case BLKPREP_OK: 1818 case BLKPREP_OK:
1819 return BLK_MQ_RQ_QUEUE_OK; 1819 return BLK_STS_OK;
1820 case BLKPREP_DEFER: 1820 case BLKPREP_DEFER:
1821 return BLK_MQ_RQ_QUEUE_BUSY; 1821 return BLK_STS_RESOURCE;
1822 default: 1822 default:
1823 return BLK_MQ_RQ_QUEUE_ERROR; 1823 return BLK_STS_IOERR;
1824 } 1824 }
1825} 1825}
1826 1826
@@ -1892,7 +1892,7 @@ static void scsi_mq_done(struct scsi_cmnd *cmd)
1892 blk_mq_complete_request(cmd->request); 1892 blk_mq_complete_request(cmd->request);
1893} 1893}
1894 1894
1895static int scsi_queue_rq(struct blk_mq_hw_ctx *hctx, 1895static blk_status_t scsi_queue_rq(struct blk_mq_hw_ctx *hctx,
1896 const struct blk_mq_queue_data *bd) 1896 const struct blk_mq_queue_data *bd)
1897{ 1897{
1898 struct request *req = bd->rq; 1898 struct request *req = bd->rq;
@@ -1900,14 +1900,14 @@ static int scsi_queue_rq(struct blk_mq_hw_ctx *hctx,
1900 struct scsi_device *sdev = q->queuedata; 1900 struct scsi_device *sdev = q->queuedata;
1901 struct Scsi_Host *shost = sdev->host; 1901 struct Scsi_Host *shost = sdev->host;
1902 struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(req); 1902 struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(req);
1903 int ret; 1903 blk_status_t ret;
1904 int reason; 1904 int reason;
1905 1905
1906 ret = prep_to_mq(scsi_prep_state_check(sdev, req)); 1906 ret = prep_to_mq(scsi_prep_state_check(sdev, req));
1907 if (ret != BLK_MQ_RQ_QUEUE_OK) 1907 if (ret != BLK_STS_OK)
1908 goto out; 1908 goto out;
1909 1909
1910 ret = BLK_MQ_RQ_QUEUE_BUSY; 1910 ret = BLK_STS_RESOURCE;
1911 if (!get_device(&sdev->sdev_gendev)) 1911 if (!get_device(&sdev->sdev_gendev))
1912 goto out; 1912 goto out;
1913 1913
@@ -1920,7 +1920,7 @@ static int scsi_queue_rq(struct blk_mq_hw_ctx *hctx,
1920 1920
1921 if (!(req->rq_flags & RQF_DONTPREP)) { 1921 if (!(req->rq_flags & RQF_DONTPREP)) {
1922 ret = prep_to_mq(scsi_mq_prep_fn(req)); 1922 ret = prep_to_mq(scsi_mq_prep_fn(req));
1923 if (ret != BLK_MQ_RQ_QUEUE_OK) 1923 if (ret != BLK_STS_OK)
1924 goto out_dec_host_busy; 1924 goto out_dec_host_busy;
1925 req->rq_flags |= RQF_DONTPREP; 1925 req->rq_flags |= RQF_DONTPREP;
1926 } else { 1926 } else {
@@ -1938,11 +1938,11 @@ static int scsi_queue_rq(struct blk_mq_hw_ctx *hctx,
1938 reason = scsi_dispatch_cmd(cmd); 1938 reason = scsi_dispatch_cmd(cmd);
1939 if (reason) { 1939 if (reason) {
1940 scsi_set_blocked(cmd, reason); 1940 scsi_set_blocked(cmd, reason);
1941 ret = BLK_MQ_RQ_QUEUE_BUSY; 1941 ret = BLK_STS_RESOURCE;
1942 goto out_dec_host_busy; 1942 goto out_dec_host_busy;
1943 } 1943 }
1944 1944
1945 return BLK_MQ_RQ_QUEUE_OK; 1945 return BLK_STS_OK;
1946 1946
1947out_dec_host_busy: 1947out_dec_host_busy:
1948 atomic_dec(&shost->host_busy); 1948 atomic_dec(&shost->host_busy);
@@ -1955,12 +1955,14 @@ out_put_device:
1955 put_device(&sdev->sdev_gendev); 1955 put_device(&sdev->sdev_gendev);
1956out: 1956out:
1957 switch (ret) { 1957 switch (ret) {
1958 case BLK_MQ_RQ_QUEUE_BUSY: 1958 case BLK_STS_OK:
1959 break;
1960 case BLK_STS_RESOURCE:
1959 if (atomic_read(&sdev->device_busy) == 0 && 1961 if (atomic_read(&sdev->device_busy) == 0 &&
1960 !scsi_device_blocked(sdev)) 1962 !scsi_device_blocked(sdev))
1961 blk_mq_delay_run_hw_queue(hctx, SCSI_QUEUE_DELAY); 1963 blk_mq_delay_run_hw_queue(hctx, SCSI_QUEUE_DELAY);
1962 break; 1964 break;
1963 case BLK_MQ_RQ_QUEUE_ERROR: 1965 default:
1964 /* 1966 /*
1965 * Make sure to release all allocated ressources when 1967 * Make sure to release all allocated ressources when
1966 * we hit an error, as we will never see this command 1968 * we hit an error, as we will never see this command
@@ -1969,8 +1971,6 @@ out:
1969 if (req->rq_flags & RQF_DONTPREP) 1971 if (req->rq_flags & RQF_DONTPREP)
1970 scsi_mq_uninit_cmd(cmd); 1972 scsi_mq_uninit_cmd(cmd);
1971 break; 1973 break;
1972 default:
1973 break;
1974 } 1974 }
1975 return ret; 1975 return ret;
1976} 1976}
diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h
index 0cf6735046d3..b144b7b0e104 100644
--- a/include/linux/blk-mq.h
+++ b/include/linux/blk-mq.h
@@ -87,7 +87,8 @@ struct blk_mq_queue_data {
87 bool last; 87 bool last;
88}; 88};
89 89
90typedef int (queue_rq_fn)(struct blk_mq_hw_ctx *, const struct blk_mq_queue_data *); 90typedef blk_status_t (queue_rq_fn)(struct blk_mq_hw_ctx *,
91 const struct blk_mq_queue_data *);
91typedef enum blk_eh_timer_return (timeout_fn)(struct request *, bool); 92typedef enum blk_eh_timer_return (timeout_fn)(struct request *, bool);
92typedef int (init_hctx_fn)(struct blk_mq_hw_ctx *, void *, unsigned int); 93typedef int (init_hctx_fn)(struct blk_mq_hw_ctx *, void *, unsigned int);
93typedef void (exit_hctx_fn)(struct blk_mq_hw_ctx *, unsigned int); 94typedef void (exit_hctx_fn)(struct blk_mq_hw_ctx *, unsigned int);
@@ -155,10 +156,6 @@ struct blk_mq_ops {
155}; 156};
156 157
157enum { 158enum {
158 BLK_MQ_RQ_QUEUE_OK = 0, /* queued fine */
159 BLK_MQ_RQ_QUEUE_BUSY = 1, /* requeue IO for later */
160 BLK_MQ_RQ_QUEUE_ERROR = 2, /* end IO with error */
161
162 BLK_MQ_F_SHOULD_MERGE = 1 << 0, 159 BLK_MQ_F_SHOULD_MERGE = 1 << 0,
163 BLK_MQ_F_TAG_SHARED = 1 << 1, 160 BLK_MQ_F_TAG_SHARED = 1 << 1,
164 BLK_MQ_F_SG_MERGE = 1 << 2, 161 BLK_MQ_F_SG_MERGE = 1 << 2,