aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorSagi Grimberg <sagi@grimberg.me>2018-06-19 08:34:10 -0400
committerChristoph Hellwig <hch@lst.de>2018-06-20 08:20:28 -0400
commit94e42213cc1ae41c57819539c0130f8dfc69d718 (patch)
tree9e9f01f9576663e64878e24ee25373ee7f712dac
parent3d0641015bf73aaa1cb54c936674959e7805070f (diff)
nvme-rdma: fix possible free of a non-allocated async event buffer
If nvme_rdma_configure_admin_queue fails before we allocated the async event buffer, we will falsly free it because nvme_rdma_free_queue is freeing it. Fix it by allocating the buffer right after nvme_rdma_alloc_queue and free it right before nvme_rdma_queue_free to maintain orderly reverse cleanup sequence. Reported-by: Israel Rukshin <israelr@mellanox.com> Signed-off-by: Sagi Grimberg <sagi@grimberg.me> Reviewed-by: Max Gurtovoy <maxg@mellanox.com> Signed-off-by: Christoph Hellwig <hch@lst.de>
-rw-r--r--drivers/nvme/host/rdma.c24
1 files changed, 11 insertions, 13 deletions
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
index bcb0e5d6343d..f9affb71ac85 100644
--- a/drivers/nvme/host/rdma.c
+++ b/drivers/nvme/host/rdma.c
@@ -560,12 +560,6 @@ static void nvme_rdma_free_queue(struct nvme_rdma_queue *queue)
560 if (!test_and_clear_bit(NVME_RDMA_Q_ALLOCATED, &queue->flags)) 560 if (!test_and_clear_bit(NVME_RDMA_Q_ALLOCATED, &queue->flags))
561 return; 561 return;
562 562
563 if (nvme_rdma_queue_idx(queue) == 0) {
564 nvme_rdma_free_qe(queue->device->dev,
565 &queue->ctrl->async_event_sqe,
566 sizeof(struct nvme_command), DMA_TO_DEVICE);
567 }
568
569 nvme_rdma_destroy_queue_ib(queue); 563 nvme_rdma_destroy_queue_ib(queue);
570 rdma_destroy_id(queue->cm_id); 564 rdma_destroy_id(queue->cm_id);
571} 565}
@@ -739,6 +733,8 @@ static void nvme_rdma_destroy_admin_queue(struct nvme_rdma_ctrl *ctrl,
739 blk_cleanup_queue(ctrl->ctrl.admin_q); 733 blk_cleanup_queue(ctrl->ctrl.admin_q);
740 nvme_rdma_free_tagset(&ctrl->ctrl, ctrl->ctrl.admin_tagset); 734 nvme_rdma_free_tagset(&ctrl->ctrl, ctrl->ctrl.admin_tagset);
741 } 735 }
736 nvme_rdma_free_qe(ctrl->device->dev, &ctrl->async_event_sqe,
737 sizeof(struct nvme_command), DMA_TO_DEVICE);
742 nvme_rdma_free_queue(&ctrl->queues[0]); 738 nvme_rdma_free_queue(&ctrl->queues[0]);
743} 739}
744 740
@@ -755,11 +751,16 @@ static int nvme_rdma_configure_admin_queue(struct nvme_rdma_ctrl *ctrl,
755 751
756 ctrl->max_fr_pages = nvme_rdma_get_max_fr_pages(ctrl->device->dev); 752 ctrl->max_fr_pages = nvme_rdma_get_max_fr_pages(ctrl->device->dev);
757 753
754 error = nvme_rdma_alloc_qe(ctrl->device->dev, &ctrl->async_event_sqe,
755 sizeof(struct nvme_command), DMA_TO_DEVICE);
756 if (error)
757 goto out_free_queue;
758
758 if (new) { 759 if (new) {
759 ctrl->ctrl.admin_tagset = nvme_rdma_alloc_tagset(&ctrl->ctrl, true); 760 ctrl->ctrl.admin_tagset = nvme_rdma_alloc_tagset(&ctrl->ctrl, true);
760 if (IS_ERR(ctrl->ctrl.admin_tagset)) { 761 if (IS_ERR(ctrl->ctrl.admin_tagset)) {
761 error = PTR_ERR(ctrl->ctrl.admin_tagset); 762 error = PTR_ERR(ctrl->ctrl.admin_tagset);
762 goto out_free_queue; 763 goto out_free_async_qe;
763 } 764 }
764 765
765 ctrl->ctrl.admin_q = blk_mq_init_queue(&ctrl->admin_tag_set); 766 ctrl->ctrl.admin_q = blk_mq_init_queue(&ctrl->admin_tag_set);
@@ -795,12 +796,6 @@ static int nvme_rdma_configure_admin_queue(struct nvme_rdma_ctrl *ctrl,
795 if (error) 796 if (error)
796 goto out_stop_queue; 797 goto out_stop_queue;
797 798
798 error = nvme_rdma_alloc_qe(ctrl->queues[0].device->dev,
799 &ctrl->async_event_sqe, sizeof(struct nvme_command),
800 DMA_TO_DEVICE);
801 if (error)
802 goto out_stop_queue;
803
804 return 0; 799 return 0;
805 800
806out_stop_queue: 801out_stop_queue:
@@ -811,6 +806,9 @@ out_cleanup_queue:
811out_free_tagset: 806out_free_tagset:
812 if (new) 807 if (new)
813 nvme_rdma_free_tagset(&ctrl->ctrl, ctrl->ctrl.admin_tagset); 808 nvme_rdma_free_tagset(&ctrl->ctrl, ctrl->ctrl.admin_tagset);
809out_free_async_qe:
810 nvme_rdma_free_qe(ctrl->device->dev, &ctrl->async_event_sqe,
811 sizeof(struct nvme_command), DMA_TO_DEVICE);
814out_free_queue: 812out_free_queue:
815 nvme_rdma_free_queue(&ctrl->queues[0]); 813 nvme_rdma_free_queue(&ctrl->queues[0]);
816 return error; 814 return error;