aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJens Axboe <axboe@kernel.dk>2018-07-26 13:48:44 -0400
committerJens Axboe <axboe@kernel.dk>2018-07-26 13:48:44 -0400
commit78e18063a9696c508dc95581fb8ec1aee539c4ee (patch)
tree5dfa2c5c01caf7b9dc29b4178902b3fc0073a83a
parent065990bd198e0e67417c2c34e5e80140d4b8cef7 (diff)
parent405a7519607e7a364114896264440c0f87b325c0 (diff)
Merge branch 'nvme-4.18' of git://git.infradead.org/nvme into for-linus
Pull NVMe fixes from Christoph: "Two small fixes each for the FC code and the target." * 'nvme-4.18' of git://git.infradead.org/nvme: nvmet: only check for filebacking on -ENOTBLK nvmet: fixup crash on NULL device path nvme: if_ready checks to fail io to deleting controller nvmet-fc: fix target sgl list on large transfers
-rw-r--r--drivers/nvme/host/fabrics.c10
-rw-r--r--drivers/nvme/host/fabrics.h3
-rw-r--r--drivers/nvme/host/fc.c2
-rw-r--r--drivers/nvme/host/rdma.c2
-rw-r--r--drivers/nvme/target/configfs.c9
-rw-r--r--drivers/nvme/target/core.c2
-rw-r--r--drivers/nvme/target/fc.c44
-rw-r--r--drivers/nvme/target/loop.c2
8 files changed, 55 insertions, 19 deletions
diff --git a/drivers/nvme/host/fabrics.c b/drivers/nvme/host/fabrics.c
index 903eb4545e26..f7efe5a58cc7 100644
--- a/drivers/nvme/host/fabrics.c
+++ b/drivers/nvme/host/fabrics.c
@@ -539,14 +539,18 @@ static struct nvmf_transport_ops *nvmf_lookup_transport(
539/* 539/*
540 * For something we're not in a state to send to the device the default action 540 * For something we're not in a state to send to the device the default action
541 * is to busy it and retry it after the controller state is recovered. However, 541 * is to busy it and retry it after the controller state is recovered. However,
542 * anything marked for failfast or nvme multipath is immediately failed. 542 * if the controller is deleting or if anything is marked for failfast or
543 * nvme multipath it is immediately failed.
543 * 544 *
544 * Note: commands used to initialize the controller will be marked for failfast. 545 * Note: commands used to initialize the controller will be marked for failfast.
545 * Note: nvme cli/ioctl commands are marked for failfast. 546 * Note: nvme cli/ioctl commands are marked for failfast.
546 */ 547 */
547blk_status_t nvmf_fail_nonready_command(struct request *rq) 548blk_status_t nvmf_fail_nonready_command(struct nvme_ctrl *ctrl,
549 struct request *rq)
548{ 550{
549 if (!blk_noretry_request(rq) && !(rq->cmd_flags & REQ_NVME_MPATH)) 551 if (ctrl->state != NVME_CTRL_DELETING &&
552 ctrl->state != NVME_CTRL_DEAD &&
553 !blk_noretry_request(rq) && !(rq->cmd_flags & REQ_NVME_MPATH))
550 return BLK_STS_RESOURCE; 554 return BLK_STS_RESOURCE;
551 nvme_req(rq)->status = NVME_SC_ABORT_REQ; 555 nvme_req(rq)->status = NVME_SC_ABORT_REQ;
552 return BLK_STS_IOERR; 556 return BLK_STS_IOERR;
diff --git a/drivers/nvme/host/fabrics.h b/drivers/nvme/host/fabrics.h
index e1818a27aa2d..aa2fdb2a2e8f 100644
--- a/drivers/nvme/host/fabrics.h
+++ b/drivers/nvme/host/fabrics.h
@@ -162,7 +162,8 @@ void nvmf_unregister_transport(struct nvmf_transport_ops *ops);
162void nvmf_free_options(struct nvmf_ctrl_options *opts); 162void nvmf_free_options(struct nvmf_ctrl_options *opts);
163int nvmf_get_address(struct nvme_ctrl *ctrl, char *buf, int size); 163int nvmf_get_address(struct nvme_ctrl *ctrl, char *buf, int size);
164bool nvmf_should_reconnect(struct nvme_ctrl *ctrl); 164bool nvmf_should_reconnect(struct nvme_ctrl *ctrl);
165blk_status_t nvmf_fail_nonready_command(struct request *rq); 165blk_status_t nvmf_fail_nonready_command(struct nvme_ctrl *ctrl,
166 struct request *rq);
166bool __nvmf_check_ready(struct nvme_ctrl *ctrl, struct request *rq, 167bool __nvmf_check_ready(struct nvme_ctrl *ctrl, struct request *rq,
167 bool queue_live); 168 bool queue_live);
168 169
diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
index 41d45a1b5c62..9bac912173ba 100644
--- a/drivers/nvme/host/fc.c
+++ b/drivers/nvme/host/fc.c
@@ -2272,7 +2272,7 @@ nvme_fc_queue_rq(struct blk_mq_hw_ctx *hctx,
2272 2272
2273 if (ctrl->rport->remoteport.port_state != FC_OBJSTATE_ONLINE || 2273 if (ctrl->rport->remoteport.port_state != FC_OBJSTATE_ONLINE ||
2274 !nvmf_check_ready(&queue->ctrl->ctrl, rq, queue_ready)) 2274 !nvmf_check_ready(&queue->ctrl->ctrl, rq, queue_ready))
2275 return nvmf_fail_nonready_command(rq); 2275 return nvmf_fail_nonready_command(&queue->ctrl->ctrl, rq);
2276 2276
2277 ret = nvme_setup_cmd(ns, rq, sqe); 2277 ret = nvme_setup_cmd(ns, rq, sqe);
2278 if (ret) 2278 if (ret)
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
index 518c5b09038c..66ec5985c9f3 100644
--- a/drivers/nvme/host/rdma.c
+++ b/drivers/nvme/host/rdma.c
@@ -1639,7 +1639,7 @@ static blk_status_t nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx,
1639 WARN_ON_ONCE(rq->tag < 0); 1639 WARN_ON_ONCE(rq->tag < 0);
1640 1640
1641 if (!nvmf_check_ready(&queue->ctrl->ctrl, rq, queue_ready)) 1641 if (!nvmf_check_ready(&queue->ctrl->ctrl, rq, queue_ready))
1642 return nvmf_fail_nonready_command(rq); 1642 return nvmf_fail_nonready_command(&queue->ctrl->ctrl, rq);
1643 1643
1644 dev = queue->device->dev; 1644 dev = queue->device->dev;
1645 ib_dma_sync_single_for_cpu(dev, sqe->dma, 1645 ib_dma_sync_single_for_cpu(dev, sqe->dma,
diff --git a/drivers/nvme/target/configfs.c b/drivers/nvme/target/configfs.c
index d3f3b3ec4d1a..ebea1373d1b7 100644
--- a/drivers/nvme/target/configfs.c
+++ b/drivers/nvme/target/configfs.c
@@ -282,6 +282,7 @@ static ssize_t nvmet_ns_device_path_store(struct config_item *item,
282{ 282{
283 struct nvmet_ns *ns = to_nvmet_ns(item); 283 struct nvmet_ns *ns = to_nvmet_ns(item);
284 struct nvmet_subsys *subsys = ns->subsys; 284 struct nvmet_subsys *subsys = ns->subsys;
285 size_t len;
285 int ret; 286 int ret;
286 287
287 mutex_lock(&subsys->lock); 288 mutex_lock(&subsys->lock);
@@ -289,10 +290,14 @@ static ssize_t nvmet_ns_device_path_store(struct config_item *item,
289 if (ns->enabled) 290 if (ns->enabled)
290 goto out_unlock; 291 goto out_unlock;
291 292
292 kfree(ns->device_path); 293 ret = -EINVAL;
294 len = strcspn(page, "\n");
295 if (!len)
296 goto out_unlock;
293 297
298 kfree(ns->device_path);
294 ret = -ENOMEM; 299 ret = -ENOMEM;
295 ns->device_path = kstrndup(page, strcspn(page, "\n"), GFP_KERNEL); 300 ns->device_path = kstrndup(page, len, GFP_KERNEL);
296 if (!ns->device_path) 301 if (!ns->device_path)
297 goto out_unlock; 302 goto out_unlock;
298 303
diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
index 74d4b785d2da..9838103f2d62 100644
--- a/drivers/nvme/target/core.c
+++ b/drivers/nvme/target/core.c
@@ -339,7 +339,7 @@ int nvmet_ns_enable(struct nvmet_ns *ns)
339 goto out_unlock; 339 goto out_unlock;
340 340
341 ret = nvmet_bdev_ns_enable(ns); 341 ret = nvmet_bdev_ns_enable(ns);
342 if (ret) 342 if (ret == -ENOTBLK)
343 ret = nvmet_file_ns_enable(ns); 343 ret = nvmet_file_ns_enable(ns);
344 if (ret) 344 if (ret)
345 goto out_unlock; 345 goto out_unlock;
diff --git a/drivers/nvme/target/fc.c b/drivers/nvme/target/fc.c
index 408279cb6f2c..29b4b236afd8 100644
--- a/drivers/nvme/target/fc.c
+++ b/drivers/nvme/target/fc.c
@@ -58,8 +58,8 @@ struct nvmet_fc_ls_iod {
58 struct work_struct work; 58 struct work_struct work;
59} __aligned(sizeof(unsigned long long)); 59} __aligned(sizeof(unsigned long long));
60 60
61/* desired maximum for a single sequence - if sg list allows it */
61#define NVMET_FC_MAX_SEQ_LENGTH (256 * 1024) 62#define NVMET_FC_MAX_SEQ_LENGTH (256 * 1024)
62#define NVMET_FC_MAX_XFR_SGENTS (NVMET_FC_MAX_SEQ_LENGTH / PAGE_SIZE)
63 63
64enum nvmet_fcp_datadir { 64enum nvmet_fcp_datadir {
65 NVMET_FCP_NODATA, 65 NVMET_FCP_NODATA,
@@ -74,6 +74,7 @@ struct nvmet_fc_fcp_iod {
74 struct nvme_fc_cmd_iu cmdiubuf; 74 struct nvme_fc_cmd_iu cmdiubuf;
75 struct nvme_fc_ersp_iu rspiubuf; 75 struct nvme_fc_ersp_iu rspiubuf;
76 dma_addr_t rspdma; 76 dma_addr_t rspdma;
77 struct scatterlist *next_sg;
77 struct scatterlist *data_sg; 78 struct scatterlist *data_sg;
78 int data_sg_cnt; 79 int data_sg_cnt;
79 u32 offset; 80 u32 offset;
@@ -1025,8 +1026,7 @@ nvmet_fc_register_targetport(struct nvmet_fc_port_info *pinfo,
1025 INIT_LIST_HEAD(&newrec->assoc_list); 1026 INIT_LIST_HEAD(&newrec->assoc_list);
1026 kref_init(&newrec->ref); 1027 kref_init(&newrec->ref);
1027 ida_init(&newrec->assoc_cnt); 1028 ida_init(&newrec->assoc_cnt);
1028 newrec->max_sg_cnt = min_t(u32, NVMET_FC_MAX_XFR_SGENTS, 1029 newrec->max_sg_cnt = template->max_sgl_segments;
1029 template->max_sgl_segments);
1030 1030
1031 ret = nvmet_fc_alloc_ls_iodlist(newrec); 1031 ret = nvmet_fc_alloc_ls_iodlist(newrec);
1032 if (ret) { 1032 if (ret) {
@@ -1722,6 +1722,7 @@ nvmet_fc_alloc_tgt_pgs(struct nvmet_fc_fcp_iod *fod)
1722 ((fod->io_dir == NVMET_FCP_WRITE) ? 1722 ((fod->io_dir == NVMET_FCP_WRITE) ?
1723 DMA_FROM_DEVICE : DMA_TO_DEVICE)); 1723 DMA_FROM_DEVICE : DMA_TO_DEVICE));
1724 /* note: write from initiator perspective */ 1724 /* note: write from initiator perspective */
1725 fod->next_sg = fod->data_sg;
1725 1726
1726 return 0; 1727 return 0;
1727 1728
@@ -1866,24 +1867,49 @@ nvmet_fc_transfer_fcp_data(struct nvmet_fc_tgtport *tgtport,
1866 struct nvmet_fc_fcp_iod *fod, u8 op) 1867 struct nvmet_fc_fcp_iod *fod, u8 op)
1867{ 1868{
1868 struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq; 1869 struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq;
1870 struct scatterlist *sg = fod->next_sg;
1869 unsigned long flags; 1871 unsigned long flags;
1870 u32 tlen; 1872 u32 remaininglen = fod->req.transfer_len - fod->offset;
1873 u32 tlen = 0;
1871 int ret; 1874 int ret;
1872 1875
1873 fcpreq->op = op; 1876 fcpreq->op = op;
1874 fcpreq->offset = fod->offset; 1877 fcpreq->offset = fod->offset;
1875 fcpreq->timeout = NVME_FC_TGTOP_TIMEOUT_SEC; 1878 fcpreq->timeout = NVME_FC_TGTOP_TIMEOUT_SEC;
1876 1879
1877 tlen = min_t(u32, tgtport->max_sg_cnt * PAGE_SIZE, 1880 /*
1878 (fod->req.transfer_len - fod->offset)); 1881 * for next sequence:
1882 * break at a sg element boundary
1883 * attempt to keep sequence length capped at
1884 * NVMET_FC_MAX_SEQ_LENGTH but allow sequence to
1885 * be longer if a single sg element is larger
1886 * than that amount. This is done to avoid creating
1887 * a new sg list to use for the tgtport api.
1888 */
1889 fcpreq->sg = sg;
1890 fcpreq->sg_cnt = 0;
1891 while (tlen < remaininglen &&
1892 fcpreq->sg_cnt < tgtport->max_sg_cnt &&
1893 tlen + sg_dma_len(sg) < NVMET_FC_MAX_SEQ_LENGTH) {
1894 fcpreq->sg_cnt++;
1895 tlen += sg_dma_len(sg);
1896 sg = sg_next(sg);
1897 }
1898 if (tlen < remaininglen && fcpreq->sg_cnt == 0) {
1899 fcpreq->sg_cnt++;
1900 tlen += min_t(u32, sg_dma_len(sg), remaininglen);
1901 sg = sg_next(sg);
1902 }
1903 if (tlen < remaininglen)
1904 fod->next_sg = sg;
1905 else
1906 fod->next_sg = NULL;
1907
1879 fcpreq->transfer_length = tlen; 1908 fcpreq->transfer_length = tlen;
1880 fcpreq->transferred_length = 0; 1909 fcpreq->transferred_length = 0;
1881 fcpreq->fcp_error = 0; 1910 fcpreq->fcp_error = 0;
1882 fcpreq->rsplen = 0; 1911 fcpreq->rsplen = 0;
1883 1912
1884 fcpreq->sg = &fod->data_sg[fod->offset / PAGE_SIZE];
1885 fcpreq->sg_cnt = DIV_ROUND_UP(tlen, PAGE_SIZE);
1886
1887 /* 1913 /*
1888 * If the last READDATA request: check if LLDD supports 1914 * If the last READDATA request: check if LLDD supports
1889 * combined xfr with response. 1915 * combined xfr with response.
diff --git a/drivers/nvme/target/loop.c b/drivers/nvme/target/loop.c
index d8d91f04bd7e..ae7586b8be07 100644
--- a/drivers/nvme/target/loop.c
+++ b/drivers/nvme/target/loop.c
@@ -162,7 +162,7 @@ static blk_status_t nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx,
162 blk_status_t ret; 162 blk_status_t ret;
163 163
164 if (!nvmf_check_ready(&queue->ctrl->ctrl, req, queue_ready)) 164 if (!nvmf_check_ready(&queue->ctrl->ctrl, req, queue_ready))
165 return nvmf_fail_nonready_command(req); 165 return nvmf_fail_nonready_command(&queue->ctrl->ctrl, req);
166 166
167 ret = nvme_setup_cmd(ns, req, &iod->cmd); 167 ret = nvme_setup_cmd(ns, req, &iod->cmd);
168 if (ret) 168 if (ret)