diff options
author | Keith Busch <keith.busch@intel.com> | 2013-07-15 17:02:24 -0400 |
---|---|---|
committer | Matthew Wilcox <matthew.r.wilcox@intel.com> | 2013-09-03 16:44:25 -0400 |
commit | 9d713c2bfb5e1d6abb18a8b12293631f9fcdc708 (patch) | |
tree | a31be5d7d9a6596a1d0f1efc6e01b7d5f560ad93 /drivers/block/nvme-core.c | |
parent | cd63894630ab17a192bf97427d16dbec10710a6a (diff) |
NVMe: Handle ioremap failure
Decrement the number of queues required for doorbell remapping until
the memory is successfully mapped for that size.
Additional checks are done so that we don't call free_irq if it has
already been freed.
Signed-off-by: Keith Busch <keith.busch@intel.com>
Signed-off-by: Matthew Wilcox <matthew.r.wilcox@intel.com>
Diffstat (limited to 'drivers/block/nvme-core.c')
-rw-r--r-- | drivers/block/nvme-core.c | 30 |
1 files changed, 22 insertions, 8 deletions
diff --git a/drivers/block/nvme-core.c b/drivers/block/nvme-core.c index 8efa728f1eac..9f2b424c445e 100644 --- a/drivers/block/nvme-core.c +++ b/drivers/block/nvme-core.c | |||
@@ -1739,10 +1739,15 @@ static int set_queue_count(struct nvme_dev *dev, int count) | |||
1739 | return min(result & 0xffff, result >> 16) + 1; | 1739 | return min(result & 0xffff, result >> 16) + 1; |
1740 | } | 1740 | } |
1741 | 1741 | ||
1742 | static size_t db_bar_size(struct nvme_dev *dev, unsigned nr_io_queues) | ||
1743 | { | ||
1744 | return 4096 + ((nr_io_queues + 1) << (dev->db_stride + 3)); | ||
1745 | } | ||
1746 | |||
1742 | static int nvme_setup_io_queues(struct nvme_dev *dev) | 1747 | static int nvme_setup_io_queues(struct nvme_dev *dev) |
1743 | { | 1748 | { |
1744 | struct pci_dev *pdev = dev->pci_dev; | 1749 | struct pci_dev *pdev = dev->pci_dev; |
1745 | int result, cpu, i, vecs, nr_io_queues, db_bar_size, q_depth; | 1750 | int result, cpu, i, vecs, nr_io_queues, size, q_depth; |
1746 | 1751 | ||
1747 | nr_io_queues = num_online_cpus(); | 1752 | nr_io_queues = num_online_cpus(); |
1748 | result = set_queue_count(dev, nr_io_queues); | 1753 | result = set_queue_count(dev, nr_io_queues); |
@@ -1751,17 +1756,24 @@ static int nvme_setup_io_queues(struct nvme_dev *dev) | |||
1751 | if (result < nr_io_queues) | 1756 | if (result < nr_io_queues) |
1752 | nr_io_queues = result; | 1757 | nr_io_queues = result; |
1753 | 1758 | ||
1754 | /* Deregister the admin queue's interrupt */ | 1759 | size = db_bar_size(dev, nr_io_queues); |
1755 | free_irq(dev->entry[0].vector, dev->queues[0]); | 1760 | if (size > 8192) { |
1756 | |||
1757 | db_bar_size = 4096 + ((nr_io_queues + 1) << (dev->db_stride + 3)); | ||
1758 | if (db_bar_size > 8192) { | ||
1759 | iounmap(dev->bar); | 1761 | iounmap(dev->bar); |
1760 | dev->bar = ioremap(pci_resource_start(pdev, 0), db_bar_size); | 1762 | do { |
1763 | dev->bar = ioremap(pci_resource_start(pdev, 0), size); | ||
1764 | if (dev->bar) | ||
1765 | break; | ||
1766 | if (!--nr_io_queues) | ||
1767 | return -ENOMEM; | ||
1768 | size = db_bar_size(dev, nr_io_queues); | ||
1769 | } while (1); | ||
1761 | dev->dbs = ((void __iomem *)dev->bar) + 4096; | 1770 | dev->dbs = ((void __iomem *)dev->bar) + 4096; |
1762 | dev->queues[0]->q_db = dev->dbs; | 1771 | dev->queues[0]->q_db = dev->dbs; |
1763 | } | 1772 | } |
1764 | 1773 | ||
1774 | /* Deregister the admin queue's interrupt */ | ||
1775 | free_irq(dev->entry[0].vector, dev->queues[0]); | ||
1776 | |||
1765 | vecs = nr_io_queues; | 1777 | vecs = nr_io_queues; |
1766 | for (i = 0; i < vecs; i++) | 1778 | for (i = 0; i < vecs; i++) |
1767 | dev->entry[i].entry = i; | 1779 | dev->entry[i].entry = i; |
@@ -1799,8 +1811,10 @@ static int nvme_setup_io_queues(struct nvme_dev *dev) | |||
1799 | nr_io_queues = vecs; | 1811 | nr_io_queues = vecs; |
1800 | 1812 | ||
1801 | result = queue_request_irq(dev, dev->queues[0], "nvme admin"); | 1813 | result = queue_request_irq(dev, dev->queues[0], "nvme admin"); |
1802 | if (result) | 1814 | if (result) { |
1815 | dev->queues[0]->q_suspended = 1; | ||
1803 | goto free_queues; | 1816 | goto free_queues; |
1817 | } | ||
1804 | 1818 | ||
1805 | /* Free previously allocated queues that are no longer usable */ | 1819 | /* Free previously allocated queues that are no longer usable */ |
1806 | spin_lock(&dev_list_lock); | 1820 | spin_lock(&dev_list_lock); |