aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/nvme/target/rdma.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/nvme/target/rdma.c')
-rw-r--r--drivers/nvme/target/rdma.c19
1 files changed, 4 insertions, 15 deletions
diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c
index ddce100be57a..3f7971d3706d 100644
--- a/drivers/nvme/target/rdma.c
+++ b/drivers/nvme/target/rdma.c
@@ -122,7 +122,6 @@ struct nvmet_rdma_device {
122 int inline_page_count; 122 int inline_page_count;
123}; 123};
124 124
125static struct workqueue_struct *nvmet_rdma_delete_wq;
126static bool nvmet_rdma_use_srq; 125static bool nvmet_rdma_use_srq;
127module_param_named(use_srq, nvmet_rdma_use_srq, bool, 0444); 126module_param_named(use_srq, nvmet_rdma_use_srq, bool, 0444);
128MODULE_PARM_DESC(use_srq, "Use shared receive queue."); 127MODULE_PARM_DESC(use_srq, "Use shared receive queue.");
@@ -1274,12 +1273,12 @@ static int nvmet_rdma_queue_connect(struct rdma_cm_id *cm_id,
1274 1273
1275 if (queue->host_qid == 0) { 1274 if (queue->host_qid == 0) {
1276 /* Let inflight controller teardown complete */ 1275 /* Let inflight controller teardown complete */
1277 flush_workqueue(nvmet_rdma_delete_wq); 1276 flush_scheduled_work();
1278 } 1277 }
1279 1278
1280 ret = nvmet_rdma_cm_accept(cm_id, queue, &event->param.conn); 1279 ret = nvmet_rdma_cm_accept(cm_id, queue, &event->param.conn);
1281 if (ret) { 1280 if (ret) {
1282 queue_work(nvmet_rdma_delete_wq, &queue->release_work); 1281 schedule_work(&queue->release_work);
1283 /* Destroying rdma_cm id is not needed here */ 1282 /* Destroying rdma_cm id is not needed here */
1284 return 0; 1283 return 0;
1285 } 1284 }
@@ -1344,7 +1343,7 @@ static void __nvmet_rdma_queue_disconnect(struct nvmet_rdma_queue *queue)
1344 1343
1345 if (disconnect) { 1344 if (disconnect) {
1346 rdma_disconnect(queue->cm_id); 1345 rdma_disconnect(queue->cm_id);
1347 queue_work(nvmet_rdma_delete_wq, &queue->release_work); 1346 schedule_work(&queue->release_work);
1348 } 1347 }
1349} 1348}
1350 1349
@@ -1374,7 +1373,7 @@ static void nvmet_rdma_queue_connect_fail(struct rdma_cm_id *cm_id,
1374 mutex_unlock(&nvmet_rdma_queue_mutex); 1373 mutex_unlock(&nvmet_rdma_queue_mutex);
1375 1374
1376 pr_err("failed to connect queue %d\n", queue->idx); 1375 pr_err("failed to connect queue %d\n", queue->idx);
1377 queue_work(nvmet_rdma_delete_wq, &queue->release_work); 1376 schedule_work(&queue->release_work);
1378} 1377}
1379 1378
1380/** 1379/**
@@ -1656,17 +1655,8 @@ static int __init nvmet_rdma_init(void)
1656 if (ret) 1655 if (ret)
1657 goto err_ib_client; 1656 goto err_ib_client;
1658 1657
1659 nvmet_rdma_delete_wq = alloc_workqueue("nvmet-rdma-delete-wq",
1660 WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_SYSFS, 0);
1661 if (!nvmet_rdma_delete_wq) {
1662 ret = -ENOMEM;
1663 goto err_unreg_transport;
1664 }
1665
1666 return 0; 1658 return 0;
1667 1659
1668err_unreg_transport:
1669 nvmet_unregister_transport(&nvmet_rdma_ops);
1670err_ib_client: 1660err_ib_client:
1671 ib_unregister_client(&nvmet_rdma_ib_client); 1661 ib_unregister_client(&nvmet_rdma_ib_client);
1672 return ret; 1662 return ret;
@@ -1674,7 +1664,6 @@ err_ib_client:
1674 1664
1675static void __exit nvmet_rdma_exit(void) 1665static void __exit nvmet_rdma_exit(void)
1676{ 1666{
1677 destroy_workqueue(nvmet_rdma_delete_wq);
1678 nvmet_unregister_transport(&nvmet_rdma_ops); 1667 nvmet_unregister_transport(&nvmet_rdma_ops);
1679 ib_unregister_client(&nvmet_rdma_ib_client); 1668 ib_unregister_client(&nvmet_rdma_ib_client);
1680 WARN_ON_ONCE(!list_empty(&nvmet_rdma_queue_list)); 1669 WARN_ON_ONCE(!list_empty(&nvmet_rdma_queue_list));