diff options
| author | Ramachandra Rao Gajula <rama@fastorsystems.com> | 2013-05-11 18:19:31 -0400 |
|---|---|---|
| committer | Matthew Wilcox <matthew.r.wilcox@intel.com> | 2013-05-31 11:45:52 -0400 |
| commit | fa08a396647767abd24a9e7015cb177121d0cf15 (patch) | |
| tree | 3447b579447e68d3c1a1f888d0123610eb9db7d7 | |
| parent | cf9f123b38345b2c2777e642eb9bb561f4b7de91 (diff) | |
NVMe: Add MSI support
Some devices only have support for MSI, not MSI-X. While MSI is more
limited, it still provides better performance than line-based interrupts.
Signed-off-by: Ramachandra Gajula <rama@fastorsystems.com>
Signed-off-by: Matthew Wilcox <matthew.r.wilcox@intel.com>
| -rw-r--r-- | drivers/block/nvme-core.c | 40 |
1 files changed, 32 insertions, 8 deletions
diff --git a/drivers/block/nvme-core.c b/drivers/block/nvme-core.c index a57d3bcec803..ce79a590b45b 100644 --- a/drivers/block/nvme-core.c +++ b/drivers/block/nvme-core.c | |||
| @@ -1637,7 +1637,8 @@ static int set_queue_count(struct nvme_dev *dev, int count) | |||
| 1637 | 1637 | ||
| 1638 | static int nvme_setup_io_queues(struct nvme_dev *dev) | 1638 | static int nvme_setup_io_queues(struct nvme_dev *dev) |
| 1639 | { | 1639 | { |
| 1640 | int result, cpu, i, nr_io_queues, db_bar_size, q_depth; | 1640 | struct pci_dev *pdev = dev->pci_dev; |
| 1641 | int result, cpu, i, nr_io_queues, db_bar_size, q_depth, q_count; | ||
| 1641 | 1642 | ||
| 1642 | nr_io_queues = num_online_cpus(); | 1643 | nr_io_queues = num_online_cpus(); |
| 1643 | result = set_queue_count(dev, nr_io_queues); | 1644 | result = set_queue_count(dev, nr_io_queues); |
| @@ -1646,14 +1647,14 @@ static int nvme_setup_io_queues(struct nvme_dev *dev) | |||
| 1646 | if (result < nr_io_queues) | 1647 | if (result < nr_io_queues) |
| 1647 | nr_io_queues = result; | 1648 | nr_io_queues = result; |
| 1648 | 1649 | ||
| 1650 | q_count = nr_io_queues; | ||
| 1649 | /* Deregister the admin queue's interrupt */ | 1651 | /* Deregister the admin queue's interrupt */ |
| 1650 | free_irq(dev->entry[0].vector, dev->queues[0]); | 1652 | free_irq(dev->entry[0].vector, dev->queues[0]); |
| 1651 | 1653 | ||
| 1652 | db_bar_size = 4096 + ((nr_io_queues + 1) << (dev->db_stride + 3)); | 1654 | db_bar_size = 4096 + ((nr_io_queues + 1) << (dev->db_stride + 3)); |
| 1653 | if (db_bar_size > 8192) { | 1655 | if (db_bar_size > 8192) { |
| 1654 | iounmap(dev->bar); | 1656 | iounmap(dev->bar); |
| 1655 | dev->bar = ioremap(pci_resource_start(dev->pci_dev, 0), | 1657 | dev->bar = ioremap(pci_resource_start(pdev, 0), db_bar_size); |
| 1656 | db_bar_size); | ||
| 1657 | dev->dbs = ((void __iomem *)dev->bar) + 4096; | 1658 | dev->dbs = ((void __iomem *)dev->bar) + 4096; |
| 1658 | dev->queues[0]->q_db = dev->dbs; | 1659 | dev->queues[0]->q_db = dev->dbs; |
| 1659 | } | 1660 | } |
| @@ -1661,19 +1662,36 @@ static int nvme_setup_io_queues(struct nvme_dev *dev) | |||
| 1661 | for (i = 0; i < nr_io_queues; i++) | 1662 | for (i = 0; i < nr_io_queues; i++) |
| 1662 | dev->entry[i].entry = i; | 1663 | dev->entry[i].entry = i; |
| 1663 | for (;;) { | 1664 | for (;;) { |
| 1664 | result = pci_enable_msix(dev->pci_dev, dev->entry, | 1665 | result = pci_enable_msix(pdev, dev->entry, nr_io_queues); |
| 1665 | nr_io_queues); | ||
| 1666 | if (result == 0) { | 1666 | if (result == 0) { |
| 1667 | break; | 1667 | break; |
| 1668 | } else if (result > 0) { | 1668 | } else if (result > 0) { |
| 1669 | nr_io_queues = result; | 1669 | nr_io_queues = result; |
| 1670 | continue; | 1670 | continue; |
| 1671 | } else { | 1671 | } else { |
| 1672 | nr_io_queues = 1; | 1672 | nr_io_queues = 0; |
| 1673 | break; | 1673 | break; |
| 1674 | } | 1674 | } |
| 1675 | } | 1675 | } |
| 1676 | 1676 | ||
| 1677 | if (nr_io_queues == 0) { | ||
| 1678 | nr_io_queues = q_count; | ||
| 1679 | for (;;) { | ||
| 1680 | result = pci_enable_msi_block(pdev, nr_io_queues); | ||
| 1681 | if (result == 0) { | ||
| 1682 | for (i = 0; i < nr_io_queues; i++) | ||
| 1683 | dev->entry[i].vector = i + pdev->irq; | ||
| 1684 | break; | ||
| 1685 | } else if (result > 0) { | ||
| 1686 | nr_io_queues = result; | ||
| 1687 | continue; | ||
| 1688 | } else { | ||
| 1689 | nr_io_queues = 1; | ||
| 1690 | break; | ||
| 1691 | } | ||
| 1692 | } | ||
| 1693 | } | ||
| 1694 | |||
| 1677 | result = queue_request_irq(dev, dev->queues[0], "nvme admin"); | 1695 | result = queue_request_irq(dev, dev->queues[0], "nvme admin"); |
| 1678 | /* XXX: handle failure here */ | 1696 | /* XXX: handle failure here */ |
| 1679 | 1697 | ||
| @@ -1854,7 +1872,10 @@ static void nvme_free_dev(struct kref *kref) | |||
| 1854 | { | 1872 | { |
| 1855 | struct nvme_dev *dev = container_of(kref, struct nvme_dev, kref); | 1873 | struct nvme_dev *dev = container_of(kref, struct nvme_dev, kref); |
| 1856 | nvme_dev_remove(dev); | 1874 | nvme_dev_remove(dev); |
| 1857 | pci_disable_msix(dev->pci_dev); | 1875 | if (dev->pci_dev->msi_enabled) |
| 1876 | pci_disable_msi(dev->pci_dev); | ||
| 1877 | else if (dev->pci_dev->msix_enabled) | ||
| 1878 | pci_disable_msix(dev->pci_dev); | ||
| 1858 | iounmap(dev->bar); | 1879 | iounmap(dev->bar); |
| 1859 | nvme_release_instance(dev); | 1880 | nvme_release_instance(dev); |
| 1860 | nvme_release_prp_pools(dev); | 1881 | nvme_release_prp_pools(dev); |
| @@ -1987,7 +2008,10 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id) | |||
| 1987 | unmap: | 2008 | unmap: |
| 1988 | iounmap(dev->bar); | 2009 | iounmap(dev->bar); |
| 1989 | disable_msix: | 2010 | disable_msix: |
| 1990 | pci_disable_msix(pdev); | 2011 | if (dev->pci_dev->msi_enabled) |
| 2012 | pci_disable_msi(dev->pci_dev); | ||
| 2013 | else if (dev->pci_dev->msix_enabled) | ||
| 2014 | pci_disable_msix(dev->pci_dev); | ||
| 1991 | nvme_release_instance(dev); | 2015 | nvme_release_instance(dev); |
| 1992 | nvme_release_prp_pools(dev); | 2016 | nvme_release_prp_pools(dev); |
| 1993 | disable: | 2017 | disable: |
