diff options
author | Jens Axboe <axboe@fb.com> | 2017-01-26 11:56:15 -0500 |
---|---|---|
committer | Jens Axboe <axboe@fb.com> | 2017-01-26 11:56:15 -0500 |
commit | 0d4ee015d5ea50febb882d00520d62c6de3f725c (patch) | |
tree | 6bd2e032e00c34590a8d39d5a43b6a7518d1384d | |
parent | 690e5325b8c7d5db05fc569c0f7b888bb4248272 (diff) | |
parent | 19e420bb4076ace670addc55300e3b8c4a02dfc6 (diff) |
Merge branch 'nvme-4.10-fixes' of git://git.infradead.org/nvme into for-linus
Pull nvme target fixes from Sagi:
Given that its -rc6, I removed anything that is not
bug fix.
- nvmet-fc discard fix from Christoph
- queue disconnect fix from James
- nvmet-rdma dma sync fix from Parav
- Some more nvmet fixes
-rw-r--r-- | drivers/nvme/host/fc.c | 6 | ||||
-rw-r--r-- | drivers/nvme/target/configfs.c | 1 | ||||
-rw-r--r-- | drivers/nvme/target/core.c | 15 | ||||
-rw-r--r-- | drivers/nvme/target/fc.c | 36 | ||||
-rw-r--r-- | drivers/nvme/target/nvmet.h | 1 | ||||
-rw-r--r-- | drivers/nvme/target/rdma.c | 17 |
6 files changed, 58 insertions, 18 deletions
diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c index fcc9dcfdf675..e65041c640cb 100644 --- a/drivers/nvme/host/fc.c +++ b/drivers/nvme/host/fc.c | |||
@@ -1663,13 +1663,13 @@ nvme_fc_map_data(struct nvme_fc_ctrl *ctrl, struct request *rq, | |||
1663 | return 0; | 1663 | return 0; |
1664 | 1664 | ||
1665 | freq->sg_table.sgl = freq->first_sgl; | 1665 | freq->sg_table.sgl = freq->first_sgl; |
1666 | ret = sg_alloc_table_chained(&freq->sg_table, rq->nr_phys_segments, | 1666 | ret = sg_alloc_table_chained(&freq->sg_table, |
1667 | freq->sg_table.sgl); | 1667 | blk_rq_nr_phys_segments(rq), freq->sg_table.sgl); |
1668 | if (ret) | 1668 | if (ret) |
1669 | return -ENOMEM; | 1669 | return -ENOMEM; |
1670 | 1670 | ||
1671 | op->nents = blk_rq_map_sg(rq->q, rq, freq->sg_table.sgl); | 1671 | op->nents = blk_rq_map_sg(rq->q, rq, freq->sg_table.sgl); |
1672 | WARN_ON(op->nents > rq->nr_phys_segments); | 1672 | WARN_ON(op->nents > blk_rq_nr_phys_segments(rq)); |
1673 | dir = (rq_data_dir(rq) == WRITE) ? DMA_TO_DEVICE : DMA_FROM_DEVICE; | 1673 | dir = (rq_data_dir(rq) == WRITE) ? DMA_TO_DEVICE : DMA_FROM_DEVICE; |
1674 | freq->sg_cnt = fc_dma_map_sg(ctrl->lport->dev, freq->sg_table.sgl, | 1674 | freq->sg_cnt = fc_dma_map_sg(ctrl->lport->dev, freq->sg_table.sgl, |
1675 | op->nents, dir); | 1675 | op->nents, dir); |
diff --git a/drivers/nvme/target/configfs.c b/drivers/nvme/target/configfs.c index 6f5074153dcd..be8c800078e2 100644 --- a/drivers/nvme/target/configfs.c +++ b/drivers/nvme/target/configfs.c | |||
@@ -631,6 +631,7 @@ static void nvmet_subsys_release(struct config_item *item) | |||
631 | { | 631 | { |
632 | struct nvmet_subsys *subsys = to_subsys(item); | 632 | struct nvmet_subsys *subsys = to_subsys(item); |
633 | 633 | ||
634 | nvmet_subsys_del_ctrls(subsys); | ||
634 | nvmet_subsys_put(subsys); | 635 | nvmet_subsys_put(subsys); |
635 | } | 636 | } |
636 | 637 | ||
diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c index b1d66ed655c9..fc5ba2f9e15f 100644 --- a/drivers/nvme/target/core.c +++ b/drivers/nvme/target/core.c | |||
@@ -200,7 +200,7 @@ static void nvmet_keep_alive_timer(struct work_struct *work) | |||
200 | pr_err("ctrl %d keep-alive timer (%d seconds) expired!\n", | 200 | pr_err("ctrl %d keep-alive timer (%d seconds) expired!\n", |
201 | ctrl->cntlid, ctrl->kato); | 201 | ctrl->cntlid, ctrl->kato); |
202 | 202 | ||
203 | ctrl->ops->delete_ctrl(ctrl); | 203 | nvmet_ctrl_fatal_error(ctrl); |
204 | } | 204 | } |
205 | 205 | ||
206 | static void nvmet_start_keep_alive_timer(struct nvmet_ctrl *ctrl) | 206 | static void nvmet_start_keep_alive_timer(struct nvmet_ctrl *ctrl) |
@@ -816,6 +816,9 @@ static void nvmet_ctrl_free(struct kref *ref) | |||
816 | list_del(&ctrl->subsys_entry); | 816 | list_del(&ctrl->subsys_entry); |
817 | mutex_unlock(&subsys->lock); | 817 | mutex_unlock(&subsys->lock); |
818 | 818 | ||
819 | flush_work(&ctrl->async_event_work); | ||
820 | cancel_work_sync(&ctrl->fatal_err_work); | ||
821 | |||
819 | ida_simple_remove(&subsys->cntlid_ida, ctrl->cntlid); | 822 | ida_simple_remove(&subsys->cntlid_ida, ctrl->cntlid); |
820 | nvmet_subsys_put(subsys); | 823 | nvmet_subsys_put(subsys); |
821 | 824 | ||
@@ -935,6 +938,16 @@ static void nvmet_subsys_free(struct kref *ref) | |||
935 | kfree(subsys); | 938 | kfree(subsys); |
936 | } | 939 | } |
937 | 940 | ||
941 | void nvmet_subsys_del_ctrls(struct nvmet_subsys *subsys) | ||
942 | { | ||
943 | struct nvmet_ctrl *ctrl; | ||
944 | |||
945 | mutex_lock(&subsys->lock); | ||
946 | list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry) | ||
947 | ctrl->ops->delete_ctrl(ctrl); | ||
948 | mutex_unlock(&subsys->lock); | ||
949 | } | ||
950 | |||
938 | void nvmet_subsys_put(struct nvmet_subsys *subsys) | 951 | void nvmet_subsys_put(struct nvmet_subsys *subsys) |
939 | { | 952 | { |
940 | kref_put(&subsys->ref, nvmet_subsys_free); | 953 | kref_put(&subsys->ref, nvmet_subsys_free); |
diff --git a/drivers/nvme/target/fc.c b/drivers/nvme/target/fc.c index 173e842f19c9..ba57f9852bde 100644 --- a/drivers/nvme/target/fc.c +++ b/drivers/nvme/target/fc.c | |||
@@ -1314,7 +1314,7 @@ nvmet_fc_ls_disconnect(struct nvmet_fc_tgtport *tgtport, | |||
1314 | (struct fcnvme_ls_disconnect_rqst *)iod->rqstbuf; | 1314 | (struct fcnvme_ls_disconnect_rqst *)iod->rqstbuf; |
1315 | struct fcnvme_ls_disconnect_acc *acc = | 1315 | struct fcnvme_ls_disconnect_acc *acc = |
1316 | (struct fcnvme_ls_disconnect_acc *)iod->rspbuf; | 1316 | (struct fcnvme_ls_disconnect_acc *)iod->rspbuf; |
1317 | struct nvmet_fc_tgt_queue *queue; | 1317 | struct nvmet_fc_tgt_queue *queue = NULL; |
1318 | struct nvmet_fc_tgt_assoc *assoc; | 1318 | struct nvmet_fc_tgt_assoc *assoc; |
1319 | int ret = 0; | 1319 | int ret = 0; |
1320 | bool del_assoc = false; | 1320 | bool del_assoc = false; |
@@ -1348,7 +1348,18 @@ nvmet_fc_ls_disconnect(struct nvmet_fc_tgtport *tgtport, | |||
1348 | assoc = nvmet_fc_find_target_assoc(tgtport, | 1348 | assoc = nvmet_fc_find_target_assoc(tgtport, |
1349 | be64_to_cpu(rqst->associd.association_id)); | 1349 | be64_to_cpu(rqst->associd.association_id)); |
1350 | iod->assoc = assoc; | 1350 | iod->assoc = assoc; |
1351 | if (!assoc) | 1351 | if (assoc) { |
1352 | if (rqst->discon_cmd.scope == | ||
1353 | FCNVME_DISCONN_CONNECTION) { | ||
1354 | queue = nvmet_fc_find_target_queue(tgtport, | ||
1355 | be64_to_cpu( | ||
1356 | rqst->discon_cmd.id)); | ||
1357 | if (!queue) { | ||
1358 | nvmet_fc_tgt_a_put(assoc); | ||
1359 | ret = VERR_NO_CONN; | ||
1360 | } | ||
1361 | } | ||
1362 | } else | ||
1352 | ret = VERR_NO_ASSOC; | 1363 | ret = VERR_NO_ASSOC; |
1353 | } | 1364 | } |
1354 | 1365 | ||
@@ -1373,21 +1384,18 @@ nvmet_fc_ls_disconnect(struct nvmet_fc_tgtport *tgtport, | |||
1373 | FCNVME_LS_DISCONNECT); | 1384 | FCNVME_LS_DISCONNECT); |
1374 | 1385 | ||
1375 | 1386 | ||
1376 | if (rqst->discon_cmd.scope == FCNVME_DISCONN_CONNECTION) { | 1387 | /* are we to delete a Connection ID (queue) */ |
1377 | queue = nvmet_fc_find_target_queue(tgtport, | 1388 | if (queue) { |
1378 | be64_to_cpu(rqst->discon_cmd.id)); | 1389 | int qid = queue->qid; |
1379 | if (queue) { | ||
1380 | int qid = queue->qid; | ||
1381 | 1390 | ||
1382 | nvmet_fc_delete_target_queue(queue); | 1391 | nvmet_fc_delete_target_queue(queue); |
1383 | 1392 | ||
1384 | /* release the get taken by find_target_queue */ | 1393 | /* release the get taken by find_target_queue */ |
1385 | nvmet_fc_tgt_q_put(queue); | 1394 | nvmet_fc_tgt_q_put(queue); |
1386 | 1395 | ||
1387 | /* tear association down if io queue terminated */ | 1396 | /* tear association down if io queue terminated */ |
1388 | if (!qid) | 1397 | if (!qid) |
1389 | del_assoc = true; | 1398 | del_assoc = true; |
1390 | } | ||
1391 | } | 1399 | } |
1392 | 1400 | ||
1393 | /* release get taken in nvmet_fc_find_target_assoc */ | 1401 | /* release get taken in nvmet_fc_find_target_assoc */ |
diff --git a/drivers/nvme/target/nvmet.h b/drivers/nvme/target/nvmet.h index 23d5eb1c944f..cc7ad06b43a7 100644 --- a/drivers/nvme/target/nvmet.h +++ b/drivers/nvme/target/nvmet.h | |||
@@ -282,6 +282,7 @@ void nvmet_ctrl_put(struct nvmet_ctrl *ctrl); | |||
282 | struct nvmet_subsys *nvmet_subsys_alloc(const char *subsysnqn, | 282 | struct nvmet_subsys *nvmet_subsys_alloc(const char *subsysnqn, |
283 | enum nvme_subsys_type type); | 283 | enum nvme_subsys_type type); |
284 | void nvmet_subsys_put(struct nvmet_subsys *subsys); | 284 | void nvmet_subsys_put(struct nvmet_subsys *subsys); |
285 | void nvmet_subsys_del_ctrls(struct nvmet_subsys *subsys); | ||
285 | 286 | ||
286 | struct nvmet_ns *nvmet_find_namespace(struct nvmet_ctrl *ctrl, __le32 nsid); | 287 | struct nvmet_ns *nvmet_find_namespace(struct nvmet_ctrl *ctrl, __le32 nsid); |
287 | void nvmet_put_namespace(struct nvmet_ns *ns); | 288 | void nvmet_put_namespace(struct nvmet_ns *ns); |
diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c index 8c3760a78ac0..60990220bd83 100644 --- a/drivers/nvme/target/rdma.c +++ b/drivers/nvme/target/rdma.c | |||
@@ -438,6 +438,10 @@ static int nvmet_rdma_post_recv(struct nvmet_rdma_device *ndev, | |||
438 | { | 438 | { |
439 | struct ib_recv_wr *bad_wr; | 439 | struct ib_recv_wr *bad_wr; |
440 | 440 | ||
441 | ib_dma_sync_single_for_device(ndev->device, | ||
442 | cmd->sge[0].addr, cmd->sge[0].length, | ||
443 | DMA_FROM_DEVICE); | ||
444 | |||
441 | if (ndev->srq) | 445 | if (ndev->srq) |
442 | return ib_post_srq_recv(ndev->srq, &cmd->wr, &bad_wr); | 446 | return ib_post_srq_recv(ndev->srq, &cmd->wr, &bad_wr); |
443 | return ib_post_recv(cmd->queue->cm_id->qp, &cmd->wr, &bad_wr); | 447 | return ib_post_recv(cmd->queue->cm_id->qp, &cmd->wr, &bad_wr); |
@@ -538,6 +542,11 @@ static void nvmet_rdma_queue_response(struct nvmet_req *req) | |||
538 | first_wr = &rsp->send_wr; | 542 | first_wr = &rsp->send_wr; |
539 | 543 | ||
540 | nvmet_rdma_post_recv(rsp->queue->dev, rsp->cmd); | 544 | nvmet_rdma_post_recv(rsp->queue->dev, rsp->cmd); |
545 | |||
546 | ib_dma_sync_single_for_device(rsp->queue->dev->device, | ||
547 | rsp->send_sge.addr, rsp->send_sge.length, | ||
548 | DMA_TO_DEVICE); | ||
549 | |||
541 | if (ib_post_send(cm_id->qp, first_wr, &bad_wr)) { | 550 | if (ib_post_send(cm_id->qp, first_wr, &bad_wr)) { |
542 | pr_err("sending cmd response failed\n"); | 551 | pr_err("sending cmd response failed\n"); |
543 | nvmet_rdma_release_rsp(rsp); | 552 | nvmet_rdma_release_rsp(rsp); |
@@ -698,6 +707,14 @@ static void nvmet_rdma_handle_command(struct nvmet_rdma_queue *queue, | |||
698 | cmd->n_rdma = 0; | 707 | cmd->n_rdma = 0; |
699 | cmd->req.port = queue->port; | 708 | cmd->req.port = queue->port; |
700 | 709 | ||
710 | |||
711 | ib_dma_sync_single_for_cpu(queue->dev->device, | ||
712 | cmd->cmd->sge[0].addr, cmd->cmd->sge[0].length, | ||
713 | DMA_FROM_DEVICE); | ||
714 | ib_dma_sync_single_for_cpu(queue->dev->device, | ||
715 | cmd->send_sge.addr, cmd->send_sge.length, | ||
716 | DMA_TO_DEVICE); | ||
717 | |||
701 | if (!nvmet_req_init(&cmd->req, &queue->nvme_cq, | 718 | if (!nvmet_req_init(&cmd->req, &queue->nvme_cq, |
702 | &queue->nvme_sq, &nvmet_rdma_ops)) | 719 | &queue->nvme_sq, &nvmet_rdma_ops)) |
703 | return; | 720 | return; |