diff options
author | Keith Busch <keith.busch@intel.com> | 2013-07-15 17:02:19 -0400 |
---|---|---|
committer | Matthew Wilcox <matthew.r.wilcox@intel.com> | 2013-09-03 16:39:25 -0400 |
commit | 0877cb0d285c7f1d53d0b84b360bdea4be4f3f59 (patch) | |
tree | 3bbad0b26e8cb58d928e4e2d1661da2f22ba29a2 /drivers/block/nvme-core.c | |
parent | 9e59d091b0eb04f223ed037348e3d9e36f30e72b (diff) |
NVMe: Group pci related actions in functions
This will make it easier to reuse these outside probe/remove.
Signed-off-by: Keith Busch <keith.busch@intel.com>
Signed-off-by: Matthew Wilcox <matthew.r.wilcox@intel.com>
Diffstat (limited to 'drivers/block/nvme-core.c')
-rw-r--r-- | drivers/block/nvme-core.c | 112 |
1 files changed, 66 insertions, 46 deletions
diff --git a/drivers/block/nvme-core.c b/drivers/block/nvme-core.c index 360ac5d32d26..a93f52c48036 100644 --- a/drivers/block/nvme-core.c +++ b/drivers/block/nvme-core.c | |||
@@ -1191,9 +1191,6 @@ static int nvme_configure_admin_queue(struct nvme_dev *dev) | |||
1191 | u64 cap = readq(&dev->bar->cap); | 1191 | u64 cap = readq(&dev->bar->cap); |
1192 | struct nvme_queue *nvmeq; | 1192 | struct nvme_queue *nvmeq; |
1193 | 1193 | ||
1194 | dev->dbs = ((void __iomem *)dev->bar) + 4096; | ||
1195 | dev->db_stride = NVME_CAP_STRIDE(cap); | ||
1196 | |||
1197 | result = nvme_disable_ctrl(dev, cap); | 1194 | result = nvme_disable_ctrl(dev, cap); |
1198 | if (result < 0) | 1195 | if (result < 0) |
1199 | return result; | 1196 | return result; |
@@ -1832,6 +1829,61 @@ static int nvme_dev_add(struct nvme_dev *dev) | |||
1832 | return res; | 1829 | return res; |
1833 | } | 1830 | } |
1834 | 1831 | ||
1832 | static int nvme_dev_map(struct nvme_dev *dev) | ||
1833 | { | ||
1834 | int bars, result = -ENOMEM; | ||
1835 | struct pci_dev *pdev = dev->pci_dev; | ||
1836 | |||
1837 | if (pci_enable_device_mem(pdev)) | ||
1838 | return result; | ||
1839 | |||
1840 | dev->entry[0].vector = pdev->irq; | ||
1841 | pci_set_master(pdev); | ||
1842 | bars = pci_select_bars(pdev, IORESOURCE_MEM); | ||
1843 | if (pci_request_selected_regions(pdev, bars, "nvme")) | ||
1844 | goto disable_pci; | ||
1845 | |||
1846 | if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) | ||
1847 | dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)); | ||
1848 | else if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) | ||
1849 | dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)); | ||
1850 | else | ||
1851 | goto disable_pci; | ||
1852 | |||
1853 | pci_set_drvdata(pdev, dev); | ||
1854 | dev->bar = ioremap(pci_resource_start(pdev, 0), 8192); | ||
1855 | if (!dev->bar) | ||
1856 | goto disable; | ||
1857 | |||
1858 | dev->db_stride = NVME_CAP_STRIDE(readq(&dev->bar->cap)); | ||
1859 | dev->dbs = ((void __iomem *)dev->bar) + 4096; | ||
1860 | |||
1861 | return 0; | ||
1862 | |||
1863 | disable: | ||
1864 | pci_release_regions(pdev); | ||
1865 | disable_pci: | ||
1866 | pci_disable_device(pdev); | ||
1867 | return result; | ||
1868 | } | ||
1869 | |||
1870 | static void nvme_dev_unmap(struct nvme_dev *dev) | ||
1871 | { | ||
1872 | if (dev->pci_dev->msi_enabled) | ||
1873 | pci_disable_msi(dev->pci_dev); | ||
1874 | else if (dev->pci_dev->msix_enabled) | ||
1875 | pci_disable_msix(dev->pci_dev); | ||
1876 | |||
1877 | if (dev->bar) { | ||
1878 | iounmap(dev->bar); | ||
1879 | dev->bar = NULL; | ||
1880 | } | ||
1881 | |||
1882 | pci_release_regions(dev->pci_dev); | ||
1883 | if (pci_is_enabled(dev->pci_dev)) | ||
1884 | pci_disable_device(dev->pci_dev); | ||
1885 | } | ||
1886 | |||
1835 | static int nvme_dev_remove(struct nvme_dev *dev) | 1887 | static int nvme_dev_remove(struct nvme_dev *dev) |
1836 | { | 1888 | { |
1837 | struct nvme_ns *ns, *next; | 1889 | struct nvme_ns *ns, *next; |
@@ -1908,15 +1960,9 @@ static void nvme_free_dev(struct kref *kref) | |||
1908 | { | 1960 | { |
1909 | struct nvme_dev *dev = container_of(kref, struct nvme_dev, kref); | 1961 | struct nvme_dev *dev = container_of(kref, struct nvme_dev, kref); |
1910 | nvme_dev_remove(dev); | 1962 | nvme_dev_remove(dev); |
1911 | if (dev->pci_dev->msi_enabled) | 1963 | nvme_dev_unmap(dev); |
1912 | pci_disable_msi(dev->pci_dev); | ||
1913 | else if (dev->pci_dev->msix_enabled) | ||
1914 | pci_disable_msix(dev->pci_dev); | ||
1915 | iounmap(dev->bar); | ||
1916 | nvme_release_instance(dev); | 1964 | nvme_release_instance(dev); |
1917 | nvme_release_prp_pools(dev); | 1965 | nvme_release_prp_pools(dev); |
1918 | pci_disable_device(dev->pci_dev); | ||
1919 | pci_release_regions(dev->pci_dev); | ||
1920 | kfree(dev->queues); | 1966 | kfree(dev->queues); |
1921 | kfree(dev->entry); | 1967 | kfree(dev->entry); |
1922 | kfree(dev); | 1968 | kfree(dev); |
@@ -1959,7 +2005,7 @@ static const struct file_operations nvme_dev_fops = { | |||
1959 | 2005 | ||
1960 | static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id) | 2006 | static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id) |
1961 | { | 2007 | { |
1962 | int bars, result = -ENOMEM; | 2008 | int result = -ENOMEM; |
1963 | struct nvme_dev *dev; | 2009 | struct nvme_dev *dev; |
1964 | 2010 | ||
1965 | dev = kzalloc(sizeof(*dev), GFP_KERNEL); | 2011 | dev = kzalloc(sizeof(*dev), GFP_KERNEL); |
@@ -1974,39 +2020,19 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id) | |||
1974 | if (!dev->queues) | 2020 | if (!dev->queues) |
1975 | goto free; | 2021 | goto free; |
1976 | 2022 | ||
1977 | if (pci_enable_device_mem(pdev)) | ||
1978 | goto free; | ||
1979 | pci_set_master(pdev); | ||
1980 | bars = pci_select_bars(pdev, IORESOURCE_MEM); | ||
1981 | if (pci_request_selected_regions(pdev, bars, "nvme")) | ||
1982 | goto disable; | ||
1983 | |||
1984 | INIT_LIST_HEAD(&dev->namespaces); | 2023 | INIT_LIST_HEAD(&dev->namespaces); |
1985 | dev->pci_dev = pdev; | 2024 | dev->pci_dev = pdev; |
1986 | pci_set_drvdata(pdev, dev); | ||
1987 | |||
1988 | if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) | ||
1989 | dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)); | ||
1990 | else if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) | ||
1991 | dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)); | ||
1992 | else | ||
1993 | goto disable; | ||
1994 | |||
1995 | result = nvme_set_instance(dev); | 2025 | result = nvme_set_instance(dev); |
1996 | if (result) | 2026 | if (result) |
1997 | goto disable; | 2027 | goto free; |
1998 | |||
1999 | dev->entry[0].vector = pdev->irq; | ||
2000 | 2028 | ||
2001 | result = nvme_setup_prp_pools(dev); | 2029 | result = nvme_setup_prp_pools(dev); |
2002 | if (result) | 2030 | if (result) |
2003 | goto disable_msix; | 2031 | goto release; |
2004 | 2032 | ||
2005 | dev->bar = ioremap(pci_resource_start(pdev, 0), 8192); | 2033 | result = nvme_dev_map(dev); |
2006 | if (!dev->bar) { | 2034 | if (result) |
2007 | result = -ENOMEM; | 2035 | goto release_pools; |
2008 | goto disable_msix; | ||
2009 | } | ||
2010 | 2036 | ||
2011 | result = nvme_configure_admin_queue(dev); | 2037 | result = nvme_configure_admin_queue(dev); |
2012 | if (result) | 2038 | if (result) |
@@ -2042,17 +2068,11 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id) | |||
2042 | 2068 | ||
2043 | nvme_free_queues(dev); | 2069 | nvme_free_queues(dev); |
2044 | unmap: | 2070 | unmap: |
2045 | iounmap(dev->bar); | 2071 | nvme_dev_unmap(dev); |
2046 | disable_msix: | 2072 | release_pools: |
2047 | if (dev->pci_dev->msi_enabled) | ||
2048 | pci_disable_msi(dev->pci_dev); | ||
2049 | else if (dev->pci_dev->msix_enabled) | ||
2050 | pci_disable_msix(dev->pci_dev); | ||
2051 | nvme_release_instance(dev); | ||
2052 | nvme_release_prp_pools(dev); | 2073 | nvme_release_prp_pools(dev); |
2053 | disable: | 2074 | release: |
2054 | pci_disable_device(pdev); | 2075 | nvme_release_instance(dev); |
2055 | pci_release_regions(pdev); | ||
2056 | free: | 2076 | free: |
2057 | kfree(dev->queues); | 2077 | kfree(dev->queues); |
2058 | kfree(dev->entry); | 2078 | kfree(dev->entry); |