aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/block/nvme-scsi.c
diff options
context:
space:
mode:
authorKeith Busch <keith.busch@intel.com>2014-03-03 18:39:13 -0500
committerMatthew Wilcox <matthew.r.wilcox@intel.com>2014-03-24 08:54:40 -0400
commit4f5099af4f3d5f999d8ab7784472d93e810e3912 (patch)
treef0dc2bc897e723037ae63a5f7f41bb3bff710399 /drivers/block/nvme-scsi.c
parent5a92e700af2e5e0e6404988d6a7f2ed3dad3f46f (diff)
NVMe: IOCTL path RCU protect queue access
This adds rcu protected access to a queue in the nvme IOCTL path to fix potential races between a surprise removal and queue usage in nvme_submit_sync_cmd. The fix holds the rcu_read_lock() here to prevent the nvme_queue from freeing while this path is executing so it can't sleep, and so this path will no longer wait for a available command id should they all be in use at the time a passthrough IOCTL request is received. Signed-off-by: Keith Busch <keith.busch@intel.com> Signed-off-by: Matthew Wilcox <matthew.r.wilcox@intel.com>
Diffstat (limited to 'drivers/block/nvme-scsi.c')
-rw-r--r--drivers/block/nvme-scsi.c31
1 files changed, 4 insertions, 27 deletions
diff --git a/drivers/block/nvme-scsi.c b/drivers/block/nvme-scsi.c
index 4a0ceb64e269..e157e85bb5d7 100644
--- a/drivers/block/nvme-scsi.c
+++ b/drivers/block/nvme-scsi.c
@@ -2033,7 +2033,6 @@ static int nvme_trans_do_nvme_io(struct nvme_ns *ns, struct sg_io_hdr *hdr,
2033 int res = SNTI_TRANSLATION_SUCCESS; 2033 int res = SNTI_TRANSLATION_SUCCESS;
2034 int nvme_sc; 2034 int nvme_sc;
2035 struct nvme_dev *dev = ns->dev; 2035 struct nvme_dev *dev = ns->dev;
2036 struct nvme_queue *nvmeq;
2037 u32 num_cmds; 2036 u32 num_cmds;
2038 struct nvme_iod *iod; 2037 struct nvme_iod *iod;
2039 u64 unit_len; 2038 u64 unit_len;
@@ -2106,18 +2105,7 @@ static int nvme_trans_do_nvme_io(struct nvme_ns *ns, struct sg_io_hdr *hdr,
2106 2105
2107 nvme_offset += unit_num_blocks; 2106 nvme_offset += unit_num_blocks;
2108 2107
2109 nvmeq = get_nvmeq(dev); 2108 nvme_sc = nvme_submit_io_cmd(dev, &c, NULL);
2110 /*
2111 * Since nvme_submit_sync_cmd sleeps, we can't keep
2112 * preemption disabled. We may be preempted at any
2113 * point, and be rescheduled to a different CPU. That
2114 * will cause cacheline bouncing, but no additional
2115 * races since q_lock already protects against other
2116 * CPUs.
2117 */
2118 put_nvmeq(nvmeq);
2119 nvme_sc = nvme_submit_sync_cmd(nvmeq, &c, NULL,
2120 NVME_IO_TIMEOUT);
2121 if (nvme_sc != NVME_SC_SUCCESS) { 2109 if (nvme_sc != NVME_SC_SUCCESS) {
2122 nvme_unmap_user_pages(dev, 2110 nvme_unmap_user_pages(dev,
2123 (is_write) ? DMA_TO_DEVICE : DMA_FROM_DEVICE, 2111 (is_write) ? DMA_TO_DEVICE : DMA_FROM_DEVICE,
@@ -2644,7 +2632,6 @@ static int nvme_trans_start_stop(struct nvme_ns *ns, struct sg_io_hdr *hdr,
2644{ 2632{
2645 int res = SNTI_TRANSLATION_SUCCESS; 2633 int res = SNTI_TRANSLATION_SUCCESS;
2646 int nvme_sc; 2634 int nvme_sc;
2647 struct nvme_queue *nvmeq;
2648 struct nvme_command c; 2635 struct nvme_command c;
2649 u8 immed, pcmod, pc, no_flush, start; 2636 u8 immed, pcmod, pc, no_flush, start;
2650 2637
@@ -2671,10 +2658,7 @@ static int nvme_trans_start_stop(struct nvme_ns *ns, struct sg_io_hdr *hdr,
2671 c.common.opcode = nvme_cmd_flush; 2658 c.common.opcode = nvme_cmd_flush;
2672 c.common.nsid = cpu_to_le32(ns->ns_id); 2659 c.common.nsid = cpu_to_le32(ns->ns_id);
2673 2660
2674 nvmeq = get_nvmeq(ns->dev); 2661 nvme_sc = nvme_submit_io_cmd(ns->dev, &c, NULL);
2675 put_nvmeq(nvmeq);
2676 nvme_sc = nvme_submit_sync_cmd(nvmeq, &c, NULL, NVME_IO_TIMEOUT);
2677
2678 res = nvme_trans_status_code(hdr, nvme_sc); 2662 res = nvme_trans_status_code(hdr, nvme_sc);
2679 if (res) 2663 if (res)
2680 goto out; 2664 goto out;
@@ -2697,15 +2681,12 @@ static int nvme_trans_synchronize_cache(struct nvme_ns *ns,
2697 int res = SNTI_TRANSLATION_SUCCESS; 2681 int res = SNTI_TRANSLATION_SUCCESS;
2698 int nvme_sc; 2682 int nvme_sc;
2699 struct nvme_command c; 2683 struct nvme_command c;
2700 struct nvme_queue *nvmeq;
2701 2684
2702 memset(&c, 0, sizeof(c)); 2685 memset(&c, 0, sizeof(c));
2703 c.common.opcode = nvme_cmd_flush; 2686 c.common.opcode = nvme_cmd_flush;
2704 c.common.nsid = cpu_to_le32(ns->ns_id); 2687 c.common.nsid = cpu_to_le32(ns->ns_id);
2705 2688
2706 nvmeq = get_nvmeq(ns->dev); 2689 nvme_sc = nvme_submit_io_cmd(ns->dev, &c, NULL);
2707 put_nvmeq(nvmeq);
2708 nvme_sc = nvme_submit_sync_cmd(nvmeq, &c, NULL, NVME_IO_TIMEOUT);
2709 2690
2710 res = nvme_trans_status_code(hdr, nvme_sc); 2691 res = nvme_trans_status_code(hdr, nvme_sc);
2711 if (res) 2692 if (res)
@@ -2872,7 +2853,6 @@ static int nvme_trans_unmap(struct nvme_ns *ns, struct sg_io_hdr *hdr,
2872 struct nvme_dev *dev = ns->dev; 2853 struct nvme_dev *dev = ns->dev;
2873 struct scsi_unmap_parm_list *plist; 2854 struct scsi_unmap_parm_list *plist;
2874 struct nvme_dsm_range *range; 2855 struct nvme_dsm_range *range;
2875 struct nvme_queue *nvmeq;
2876 struct nvme_command c; 2856 struct nvme_command c;
2877 int i, nvme_sc, res = -ENOMEM; 2857 int i, nvme_sc, res = -ENOMEM;
2878 u16 ndesc, list_len; 2858 u16 ndesc, list_len;
@@ -2914,10 +2894,7 @@ static int nvme_trans_unmap(struct nvme_ns *ns, struct sg_io_hdr *hdr,
2914 c.dsm.nr = cpu_to_le32(ndesc - 1); 2894 c.dsm.nr = cpu_to_le32(ndesc - 1);
2915 c.dsm.attributes = cpu_to_le32(NVME_DSMGMT_AD); 2895 c.dsm.attributes = cpu_to_le32(NVME_DSMGMT_AD);
2916 2896
2917 nvmeq = get_nvmeq(dev); 2897 nvme_sc = nvme_submit_io_cmd(dev, &c, NULL);
2918 put_nvmeq(nvmeq);
2919
2920 nvme_sc = nvme_submit_sync_cmd(nvmeq, &c, NULL, NVME_IO_TIMEOUT);
2921 res = nvme_trans_status_code(hdr, nvme_sc); 2898 res = nvme_trans_status_code(hdr, nvme_sc);
2922 2899
2923 dma_free_coherent(&dev->pci_dev->dev, ndesc * sizeof(*range), 2900 dma_free_coherent(&dev->pci_dev->dev, ndesc * sizeof(*range),