diff options
Diffstat (limited to 'drivers/nvme/host/rdma.c')
-rw-r--r-- | drivers/nvme/host/rdma.c | 46 |
1 files changed, 28 insertions, 18 deletions
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c index 8d2875b4c56d..ab545fb347a0 100644 --- a/drivers/nvme/host/rdma.c +++ b/drivers/nvme/host/rdma.c | |||
@@ -43,10 +43,6 @@ | |||
43 | 43 | ||
44 | #define NVME_RDMA_MAX_INLINE_SEGMENTS 1 | 44 | #define NVME_RDMA_MAX_INLINE_SEGMENTS 1 |
45 | 45 | ||
46 | #define NVME_RDMA_MAX_PAGES_PER_MR 512 | ||
47 | |||
48 | #define NVME_RDMA_DEF_RECONNECT_DELAY 20 | ||
49 | |||
50 | /* | 46 | /* |
51 | * We handle AEN commands ourselves and don't even let the | 47 | * We handle AEN commands ourselves and don't even let the |
52 | * block layer know about them. | 48 | * block layer know about them. |
@@ -77,7 +73,6 @@ struct nvme_rdma_request { | |||
77 | u32 num_sge; | 73 | u32 num_sge; |
78 | int nents; | 74 | int nents; |
79 | bool inline_data; | 75 | bool inline_data; |
80 | bool need_inval; | ||
81 | struct ib_reg_wr reg_wr; | 76 | struct ib_reg_wr reg_wr; |
82 | struct ib_cqe reg_cqe; | 77 | struct ib_cqe reg_cqe; |
83 | struct nvme_rdma_queue *queue; | 78 | struct nvme_rdma_queue *queue; |
@@ -286,7 +281,7 @@ static int nvme_rdma_reinit_request(void *data, struct request *rq) | |||
286 | struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq); | 281 | struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq); |
287 | int ret = 0; | 282 | int ret = 0; |
288 | 283 | ||
289 | if (!req->need_inval) | 284 | if (!req->mr->need_inval) |
290 | goto out; | 285 | goto out; |
291 | 286 | ||
292 | ib_dereg_mr(req->mr); | 287 | ib_dereg_mr(req->mr); |
@@ -298,7 +293,7 @@ static int nvme_rdma_reinit_request(void *data, struct request *rq) | |||
298 | req->mr = NULL; | 293 | req->mr = NULL; |
299 | } | 294 | } |
300 | 295 | ||
301 | req->need_inval = false; | 296 | req->mr->need_inval = false; |
302 | 297 | ||
303 | out: | 298 | out: |
304 | return ret; | 299 | return ret; |
@@ -645,7 +640,8 @@ static int nvme_rdma_init_io_queues(struct nvme_rdma_ctrl *ctrl) | |||
645 | int i, ret; | 640 | int i, ret; |
646 | 641 | ||
647 | for (i = 1; i < ctrl->queue_count; i++) { | 642 | for (i = 1; i < ctrl->queue_count; i++) { |
648 | ret = nvme_rdma_init_queue(ctrl, i, ctrl->ctrl.sqsize); | 643 | ret = nvme_rdma_init_queue(ctrl, i, |
644 | ctrl->ctrl.opts->queue_size); | ||
649 | if (ret) { | 645 | if (ret) { |
650 | dev_info(ctrl->ctrl.device, | 646 | dev_info(ctrl->ctrl.device, |
651 | "failed to initialize i/o queue: %d\n", ret); | 647 | "failed to initialize i/o queue: %d\n", ret); |
@@ -849,7 +845,7 @@ static void nvme_rdma_unmap_data(struct nvme_rdma_queue *queue, | |||
849 | if (!blk_rq_bytes(rq)) | 845 | if (!blk_rq_bytes(rq)) |
850 | return; | 846 | return; |
851 | 847 | ||
852 | if (req->need_inval) { | 848 | if (req->mr->need_inval) { |
853 | res = nvme_rdma_inv_rkey(queue, req); | 849 | res = nvme_rdma_inv_rkey(queue, req); |
854 | if (res < 0) { | 850 | if (res < 0) { |
855 | dev_err(ctrl->ctrl.device, | 851 | dev_err(ctrl->ctrl.device, |
@@ -935,7 +931,7 @@ static int nvme_rdma_map_sg_fr(struct nvme_rdma_queue *queue, | |||
935 | IB_ACCESS_REMOTE_READ | | 931 | IB_ACCESS_REMOTE_READ | |
936 | IB_ACCESS_REMOTE_WRITE; | 932 | IB_ACCESS_REMOTE_WRITE; |
937 | 933 | ||
938 | req->need_inval = true; | 934 | req->mr->need_inval = true; |
939 | 935 | ||
940 | sg->addr = cpu_to_le64(req->mr->iova); | 936 | sg->addr = cpu_to_le64(req->mr->iova); |
941 | put_unaligned_le24(req->mr->length, sg->length); | 937 | put_unaligned_le24(req->mr->length, sg->length); |
@@ -958,7 +954,7 @@ static int nvme_rdma_map_data(struct nvme_rdma_queue *queue, | |||
958 | 954 | ||
959 | req->num_sge = 1; | 955 | req->num_sge = 1; |
960 | req->inline_data = false; | 956 | req->inline_data = false; |
961 | req->need_inval = false; | 957 | req->mr->need_inval = false; |
962 | 958 | ||
963 | c->common.flags |= NVME_CMD_SGL_METABUF; | 959 | c->common.flags |= NVME_CMD_SGL_METABUF; |
964 | 960 | ||
@@ -1145,7 +1141,7 @@ static int nvme_rdma_process_nvme_rsp(struct nvme_rdma_queue *queue, | |||
1145 | 1141 | ||
1146 | if ((wc->wc_flags & IB_WC_WITH_INVALIDATE) && | 1142 | if ((wc->wc_flags & IB_WC_WITH_INVALIDATE) && |
1147 | wc->ex.invalidate_rkey == req->mr->rkey) | 1143 | wc->ex.invalidate_rkey == req->mr->rkey) |
1148 | req->need_inval = false; | 1144 | req->mr->need_inval = false; |
1149 | 1145 | ||
1150 | blk_mq_complete_request(rq, status); | 1146 | blk_mq_complete_request(rq, status); |
1151 | 1147 | ||
@@ -1278,8 +1274,22 @@ static int nvme_rdma_route_resolved(struct nvme_rdma_queue *queue) | |||
1278 | 1274 | ||
1279 | priv.recfmt = cpu_to_le16(NVME_RDMA_CM_FMT_1_0); | 1275 | priv.recfmt = cpu_to_le16(NVME_RDMA_CM_FMT_1_0); |
1280 | priv.qid = cpu_to_le16(nvme_rdma_queue_idx(queue)); | 1276 | priv.qid = cpu_to_le16(nvme_rdma_queue_idx(queue)); |
1281 | priv.hrqsize = cpu_to_le16(queue->queue_size); | 1277 | /* |
1282 | priv.hsqsize = cpu_to_le16(queue->queue_size); | 1278 | * set the admin queue depth to the minimum size |
1279 | * specified by the Fabrics standard. | ||
1280 | */ | ||
1281 | if (priv.qid == 0) { | ||
1282 | priv.hrqsize = cpu_to_le16(NVMF_AQ_DEPTH); | ||
1283 | priv.hsqsize = cpu_to_le16(NVMF_AQ_DEPTH - 1); | ||
1284 | } else { | ||
1285 | /* | ||
1286 | * current interpretation of the fabrics spec | ||
1287 | * is at minimum you make hrqsize sqsize+1, or a | ||
1288 | * 1's based representation of sqsize. | ||
1289 | */ | ||
1290 | priv.hrqsize = cpu_to_le16(queue->queue_size); | ||
1291 | priv.hsqsize = cpu_to_le16(queue->ctrl->ctrl.sqsize); | ||
1292 | } | ||
1283 | 1293 | ||
1284 | ret = rdma_connect(queue->cm_id, ¶m); | 1294 | ret = rdma_connect(queue->cm_id, ¶m); |
1285 | if (ret) { | 1295 | if (ret) { |
@@ -1319,7 +1329,7 @@ out_destroy_queue_ib: | |||
1319 | static int nvme_rdma_device_unplug(struct nvme_rdma_queue *queue) | 1329 | static int nvme_rdma_device_unplug(struct nvme_rdma_queue *queue) |
1320 | { | 1330 | { |
1321 | struct nvme_rdma_ctrl *ctrl = queue->ctrl; | 1331 | struct nvme_rdma_ctrl *ctrl = queue->ctrl; |
1322 | int ret; | 1332 | int ret = 0; |
1323 | 1333 | ||
1324 | /* Own the controller deletion */ | 1334 | /* Own the controller deletion */ |
1325 | if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_DELETING)) | 1335 | if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_DELETING)) |
@@ -1461,7 +1471,7 @@ static int nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx, | |||
1461 | if (rq->cmd_type == REQ_TYPE_FS && req_op(rq) == REQ_OP_FLUSH) | 1471 | if (rq->cmd_type == REQ_TYPE_FS && req_op(rq) == REQ_OP_FLUSH) |
1462 | flush = true; | 1472 | flush = true; |
1463 | ret = nvme_rdma_post_send(queue, sqe, req->sge, req->num_sge, | 1473 | ret = nvme_rdma_post_send(queue, sqe, req->sge, req->num_sge, |
1464 | req->need_inval ? &req->reg_wr.wr : NULL, flush); | 1474 | req->mr->need_inval ? &req->reg_wr.wr : NULL, flush); |
1465 | if (ret) { | 1475 | if (ret) { |
1466 | nvme_rdma_unmap_data(queue, rq); | 1476 | nvme_rdma_unmap_data(queue, rq); |
1467 | goto err; | 1477 | goto err; |
@@ -1816,7 +1826,7 @@ static int nvme_rdma_create_io_queues(struct nvme_rdma_ctrl *ctrl) | |||
1816 | 1826 | ||
1817 | memset(&ctrl->tag_set, 0, sizeof(ctrl->tag_set)); | 1827 | memset(&ctrl->tag_set, 0, sizeof(ctrl->tag_set)); |
1818 | ctrl->tag_set.ops = &nvme_rdma_mq_ops; | 1828 | ctrl->tag_set.ops = &nvme_rdma_mq_ops; |
1819 | ctrl->tag_set.queue_depth = ctrl->ctrl.sqsize; | 1829 | ctrl->tag_set.queue_depth = ctrl->ctrl.opts->queue_size; |
1820 | ctrl->tag_set.reserved_tags = 1; /* fabric connect */ | 1830 | ctrl->tag_set.reserved_tags = 1; /* fabric connect */ |
1821 | ctrl->tag_set.numa_node = NUMA_NO_NODE; | 1831 | ctrl->tag_set.numa_node = NUMA_NO_NODE; |
1822 | ctrl->tag_set.flags = BLK_MQ_F_SHOULD_MERGE; | 1832 | ctrl->tag_set.flags = BLK_MQ_F_SHOULD_MERGE; |
@@ -1914,7 +1924,7 @@ static struct nvme_ctrl *nvme_rdma_create_ctrl(struct device *dev, | |||
1914 | spin_lock_init(&ctrl->lock); | 1924 | spin_lock_init(&ctrl->lock); |
1915 | 1925 | ||
1916 | ctrl->queue_count = opts->nr_io_queues + 1; /* +1 for admin queue */ | 1926 | ctrl->queue_count = opts->nr_io_queues + 1; /* +1 for admin queue */ |
1917 | ctrl->ctrl.sqsize = opts->queue_size; | 1927 | ctrl->ctrl.sqsize = opts->queue_size - 1; |
1918 | ctrl->ctrl.kato = opts->kato; | 1928 | ctrl->ctrl.kato = opts->kato; |
1919 | 1929 | ||
1920 | ret = -ENOMEM; | 1930 | ret = -ENOMEM; |