diff options
Diffstat (limited to 'drivers/nvme/host/rdma.c')
-rw-r--r-- | drivers/nvme/host/rdma.c | 83 |
1 files changed, 45 insertions, 38 deletions
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c index 3e3ce2b0424e..8d2875b4c56d 100644 --- a/drivers/nvme/host/rdma.c +++ b/drivers/nvme/host/rdma.c | |||
@@ -12,13 +12,11 @@ | |||
12 | * more details. | 12 | * more details. |
13 | */ | 13 | */ |
14 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | 14 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
15 | #include <linux/delay.h> | ||
16 | #include <linux/module.h> | 15 | #include <linux/module.h> |
17 | #include <linux/init.h> | 16 | #include <linux/init.h> |
18 | #include <linux/slab.h> | 17 | #include <linux/slab.h> |
19 | #include <linux/err.h> | 18 | #include <linux/err.h> |
20 | #include <linux/string.h> | 19 | #include <linux/string.h> |
21 | #include <linux/jiffies.h> | ||
22 | #include <linux/atomic.h> | 20 | #include <linux/atomic.h> |
23 | #include <linux/blk-mq.h> | 21 | #include <linux/blk-mq.h> |
24 | #include <linux/types.h> | 22 | #include <linux/types.h> |
@@ -26,7 +24,6 @@ | |||
26 | #include <linux/mutex.h> | 24 | #include <linux/mutex.h> |
27 | #include <linux/scatterlist.h> | 25 | #include <linux/scatterlist.h> |
28 | #include <linux/nvme.h> | 26 | #include <linux/nvme.h> |
29 | #include <linux/t10-pi.h> | ||
30 | #include <asm/unaligned.h> | 27 | #include <asm/unaligned.h> |
31 | 28 | ||
32 | #include <rdma/ib_verbs.h> | 29 | #include <rdma/ib_verbs.h> |
@@ -169,7 +166,6 @@ MODULE_PARM_DESC(register_always, | |||
169 | static int nvme_rdma_cm_handler(struct rdma_cm_id *cm_id, | 166 | static int nvme_rdma_cm_handler(struct rdma_cm_id *cm_id, |
170 | struct rdma_cm_event *event); | 167 | struct rdma_cm_event *event); |
171 | static void nvme_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc); | 168 | static void nvme_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc); |
172 | static int __nvme_rdma_del_ctrl(struct nvme_rdma_ctrl *ctrl); | ||
173 | 169 | ||
174 | /* XXX: really should move to a generic header sooner or later.. */ | 170 | /* XXX: really should move to a generic header sooner or later.. */ |
175 | static inline void put_unaligned_le24(u32 val, u8 *p) | 171 | static inline void put_unaligned_le24(u32 val, u8 *p) |
@@ -687,11 +683,6 @@ static void nvme_rdma_free_ctrl(struct nvme_ctrl *nctrl) | |||
687 | list_del(&ctrl->list); | 683 | list_del(&ctrl->list); |
688 | mutex_unlock(&nvme_rdma_ctrl_mutex); | 684 | mutex_unlock(&nvme_rdma_ctrl_mutex); |
689 | 685 | ||
690 | if (ctrl->ctrl.tagset) { | ||
691 | blk_cleanup_queue(ctrl->ctrl.connect_q); | ||
692 | blk_mq_free_tag_set(&ctrl->tag_set); | ||
693 | nvme_rdma_dev_put(ctrl->device); | ||
694 | } | ||
695 | kfree(ctrl->queues); | 686 | kfree(ctrl->queues); |
696 | nvmf_free_options(nctrl->opts); | 687 | nvmf_free_options(nctrl->opts); |
697 | free_ctrl: | 688 | free_ctrl: |
@@ -748,8 +739,11 @@ static void nvme_rdma_reconnect_ctrl_work(struct work_struct *work) | |||
748 | changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE); | 739 | changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE); |
749 | WARN_ON_ONCE(!changed); | 740 | WARN_ON_ONCE(!changed); |
750 | 741 | ||
751 | if (ctrl->queue_count > 1) | 742 | if (ctrl->queue_count > 1) { |
752 | nvme_start_queues(&ctrl->ctrl); | 743 | nvme_start_queues(&ctrl->ctrl); |
744 | nvme_queue_scan(&ctrl->ctrl); | ||
745 | nvme_queue_async_events(&ctrl->ctrl); | ||
746 | } | ||
753 | 747 | ||
754 | dev_info(ctrl->ctrl.device, "Successfully reconnected\n"); | 748 | dev_info(ctrl->ctrl.device, "Successfully reconnected\n"); |
755 | 749 | ||
@@ -1269,7 +1263,7 @@ static int nvme_rdma_route_resolved(struct nvme_rdma_queue *queue) | |||
1269 | { | 1263 | { |
1270 | struct nvme_rdma_ctrl *ctrl = queue->ctrl; | 1264 | struct nvme_rdma_ctrl *ctrl = queue->ctrl; |
1271 | struct rdma_conn_param param = { }; | 1265 | struct rdma_conn_param param = { }; |
1272 | struct nvme_rdma_cm_req priv; | 1266 | struct nvme_rdma_cm_req priv = { }; |
1273 | int ret; | 1267 | int ret; |
1274 | 1268 | ||
1275 | param.qp_num = queue->qp->qp_num; | 1269 | param.qp_num = queue->qp->qp_num; |
@@ -1318,37 +1312,39 @@ out_destroy_queue_ib: | |||
1318 | * that caught the event. Since we hold the callout until the controller | 1312 | * that caught the event. Since we hold the callout until the controller |
1319 | * deletion is completed, we'll deadlock if the controller deletion will | 1313 | * deletion is completed, we'll deadlock if the controller deletion will |
1320 | * call rdma_destroy_id on this queue's cm_id. Thus, we claim ownership | 1314 | * call rdma_destroy_id on this queue's cm_id. Thus, we claim ownership |
1321 | * of destroying this queue before-hand, destroy the queue resources | 1315 | * of destroying this queue before-hand, destroy the queue resources, |
1322 | * after the controller deletion completed with the exception of destroying | 1316 | * then queue the controller deletion which won't destroy this queue and |
1323 | * the cm_id implicitely by returning a non-zero rc to the callout. | 1317 | * we destroy the cm_id implicitely by returning a non-zero rc to the callout. |
1324 | */ | 1318 | */ |
1325 | static int nvme_rdma_device_unplug(struct nvme_rdma_queue *queue) | 1319 | static int nvme_rdma_device_unplug(struct nvme_rdma_queue *queue) |
1326 | { | 1320 | { |
1327 | struct nvme_rdma_ctrl *ctrl = queue->ctrl; | 1321 | struct nvme_rdma_ctrl *ctrl = queue->ctrl; |
1328 | int ret, ctrl_deleted = 0; | 1322 | int ret; |
1329 | 1323 | ||
1330 | /* First disable the queue so ctrl delete won't free it */ | 1324 | /* Own the controller deletion */ |
1331 | if (!test_and_clear_bit(NVME_RDMA_Q_CONNECTED, &queue->flags)) | 1325 | if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_DELETING)) |
1332 | goto out; | 1326 | return 0; |
1333 | 1327 | ||
1334 | /* delete the controller */ | 1328 | dev_warn(ctrl->ctrl.device, |
1335 | ret = __nvme_rdma_del_ctrl(ctrl); | 1329 | "Got rdma device removal event, deleting ctrl\n"); |
1336 | if (!ret) { | ||
1337 | dev_warn(ctrl->ctrl.device, | ||
1338 | "Got rdma device removal event, deleting ctrl\n"); | ||
1339 | flush_work(&ctrl->delete_work); | ||
1340 | 1330 | ||
1341 | /* Return non-zero so the cm_id will destroy implicitly */ | 1331 | /* Get rid of reconnect work if its running */ |
1342 | ctrl_deleted = 1; | 1332 | cancel_delayed_work_sync(&ctrl->reconnect_work); |
1343 | 1333 | ||
1334 | /* Disable the queue so ctrl delete won't free it */ | ||
1335 | if (test_and_clear_bit(NVME_RDMA_Q_CONNECTED, &queue->flags)) { | ||
1344 | /* Free this queue ourselves */ | 1336 | /* Free this queue ourselves */ |
1345 | rdma_disconnect(queue->cm_id); | 1337 | nvme_rdma_stop_queue(queue); |
1346 | ib_drain_qp(queue->qp); | ||
1347 | nvme_rdma_destroy_queue_ib(queue); | 1338 | nvme_rdma_destroy_queue_ib(queue); |
1339 | |||
1340 | /* Return non-zero so the cm_id will destroy implicitly */ | ||
1341 | ret = 1; | ||
1348 | } | 1342 | } |
1349 | 1343 | ||
1350 | out: | 1344 | /* Queue controller deletion */ |
1351 | return ctrl_deleted; | 1345 | queue_work(nvme_rdma_wq, &ctrl->delete_work); |
1346 | flush_work(&ctrl->delete_work); | ||
1347 | return ret; | ||
1352 | } | 1348 | } |
1353 | 1349 | ||
1354 | static int nvme_rdma_cm_handler(struct rdma_cm_id *cm_id, | 1350 | static int nvme_rdma_cm_handler(struct rdma_cm_id *cm_id, |
@@ -1648,7 +1644,7 @@ static void nvme_rdma_shutdown_ctrl(struct nvme_rdma_ctrl *ctrl) | |||
1648 | nvme_rdma_free_io_queues(ctrl); | 1644 | nvme_rdma_free_io_queues(ctrl); |
1649 | } | 1645 | } |
1650 | 1646 | ||
1651 | if (ctrl->ctrl.state == NVME_CTRL_LIVE) | 1647 | if (test_bit(NVME_RDMA_Q_CONNECTED, &ctrl->queues[0].flags)) |
1652 | nvme_shutdown_ctrl(&ctrl->ctrl); | 1648 | nvme_shutdown_ctrl(&ctrl->ctrl); |
1653 | 1649 | ||
1654 | blk_mq_stop_hw_queues(ctrl->ctrl.admin_q); | 1650 | blk_mq_stop_hw_queues(ctrl->ctrl.admin_q); |
@@ -1657,15 +1653,27 @@ static void nvme_rdma_shutdown_ctrl(struct nvme_rdma_ctrl *ctrl) | |||
1657 | nvme_rdma_destroy_admin_queue(ctrl); | 1653 | nvme_rdma_destroy_admin_queue(ctrl); |
1658 | } | 1654 | } |
1659 | 1655 | ||
1656 | static void __nvme_rdma_remove_ctrl(struct nvme_rdma_ctrl *ctrl, bool shutdown) | ||
1657 | { | ||
1658 | nvme_uninit_ctrl(&ctrl->ctrl); | ||
1659 | if (shutdown) | ||
1660 | nvme_rdma_shutdown_ctrl(ctrl); | ||
1661 | |||
1662 | if (ctrl->ctrl.tagset) { | ||
1663 | blk_cleanup_queue(ctrl->ctrl.connect_q); | ||
1664 | blk_mq_free_tag_set(&ctrl->tag_set); | ||
1665 | nvme_rdma_dev_put(ctrl->device); | ||
1666 | } | ||
1667 | |||
1668 | nvme_put_ctrl(&ctrl->ctrl); | ||
1669 | } | ||
1670 | |||
1660 | static void nvme_rdma_del_ctrl_work(struct work_struct *work) | 1671 | static void nvme_rdma_del_ctrl_work(struct work_struct *work) |
1661 | { | 1672 | { |
1662 | struct nvme_rdma_ctrl *ctrl = container_of(work, | 1673 | struct nvme_rdma_ctrl *ctrl = container_of(work, |
1663 | struct nvme_rdma_ctrl, delete_work); | 1674 | struct nvme_rdma_ctrl, delete_work); |
1664 | 1675 | ||
1665 | nvme_remove_namespaces(&ctrl->ctrl); | 1676 | __nvme_rdma_remove_ctrl(ctrl, true); |
1666 | nvme_rdma_shutdown_ctrl(ctrl); | ||
1667 | nvme_uninit_ctrl(&ctrl->ctrl); | ||
1668 | nvme_put_ctrl(&ctrl->ctrl); | ||
1669 | } | 1677 | } |
1670 | 1678 | ||
1671 | static int __nvme_rdma_del_ctrl(struct nvme_rdma_ctrl *ctrl) | 1679 | static int __nvme_rdma_del_ctrl(struct nvme_rdma_ctrl *ctrl) |
@@ -1698,9 +1706,7 @@ static void nvme_rdma_remove_ctrl_work(struct work_struct *work) | |||
1698 | struct nvme_rdma_ctrl *ctrl = container_of(work, | 1706 | struct nvme_rdma_ctrl *ctrl = container_of(work, |
1699 | struct nvme_rdma_ctrl, delete_work); | 1707 | struct nvme_rdma_ctrl, delete_work); |
1700 | 1708 | ||
1701 | nvme_remove_namespaces(&ctrl->ctrl); | 1709 | __nvme_rdma_remove_ctrl(ctrl, false); |
1702 | nvme_uninit_ctrl(&ctrl->ctrl); | ||
1703 | nvme_put_ctrl(&ctrl->ctrl); | ||
1704 | } | 1710 | } |
1705 | 1711 | ||
1706 | static void nvme_rdma_reset_ctrl_work(struct work_struct *work) | 1712 | static void nvme_rdma_reset_ctrl_work(struct work_struct *work) |
@@ -1739,6 +1745,7 @@ static void nvme_rdma_reset_ctrl_work(struct work_struct *work) | |||
1739 | if (ctrl->queue_count > 1) { | 1745 | if (ctrl->queue_count > 1) { |
1740 | nvme_start_queues(&ctrl->ctrl); | 1746 | nvme_start_queues(&ctrl->ctrl); |
1741 | nvme_queue_scan(&ctrl->ctrl); | 1747 | nvme_queue_scan(&ctrl->ctrl); |
1748 | nvme_queue_async_events(&ctrl->ctrl); | ||
1742 | } | 1749 | } |
1743 | 1750 | ||
1744 | return; | 1751 | return; |