aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2013-06-12 02:07:21 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2013-06-12 02:07:21 -0400
commit77293e215edef7871a39de4b326f777bc39278ca (patch)
tree9707a27fb1eaa6d67e95b31504fbe49f06e52db3
parentaf180b81a3f4ea925fae88878f367e676e99bf73 (diff)
parentfa08a396647767abd24a9e7015cb177121d0cf15 (diff)
Merge branch 'fixes-3.10' of git://git.infradead.org/users/willy/linux-nvme
Pull NVMe fixes from Matthew Wilcox. * 'fixes-3.10' of git://git.infradead.org/users/willy/linux-nvme: NVMe: Add MSI support NVMe: Use dma_set_mask() correctly Return the result from user admin command IOCTL even in case of failure NVMe: Do not cancel command multiple times NVMe: fix error return code in nvme_submit_bio_queue() NVMe: check for integer overflow in nvme_map_user_pages() MAINTAINERS: update NVM EXPRESS DRIVER file list NVMe: Fix a signedness bug in nvme_trans_modesel_get_mp NVMe: Remove redundant version.h header include
-rw-r--r--MAINTAINERS2
-rw-r--r--drivers/block/nvme-core.c62
-rw-r--r--drivers/block/nvme-scsi.c3
3 files changed, 50 insertions, 17 deletions
diff --git a/MAINTAINERS b/MAINTAINERS
index 0fda00f39849..0c9dc71a3d9f 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -5766,7 +5766,7 @@ M: Matthew Wilcox <willy@linux.intel.com>
5766L: linux-nvme@lists.infradead.org 5766L: linux-nvme@lists.infradead.org
5767T: git git://git.infradead.org/users/willy/linux-nvme.git 5767T: git git://git.infradead.org/users/willy/linux-nvme.git
5768S: Supported 5768S: Supported
5769F: drivers/block/nvme.c 5769F: drivers/block/nvme*
5770F: include/linux/nvme.h 5770F: include/linux/nvme.h
5771 5771
5772OMAP SUPPORT 5772OMAP SUPPORT
diff --git a/drivers/block/nvme-core.c b/drivers/block/nvme-core.c
index 8efdfaa44a59..ce79a590b45b 100644
--- a/drivers/block/nvme-core.c
+++ b/drivers/block/nvme-core.c
@@ -629,7 +629,7 @@ static int nvme_submit_bio_queue(struct nvme_queue *nvmeq, struct nvme_ns *ns,
629 struct nvme_command *cmnd; 629 struct nvme_command *cmnd;
630 struct nvme_iod *iod; 630 struct nvme_iod *iod;
631 enum dma_data_direction dma_dir; 631 enum dma_data_direction dma_dir;
632 int cmdid, length, result = -ENOMEM; 632 int cmdid, length, result;
633 u16 control; 633 u16 control;
634 u32 dsmgmt; 634 u32 dsmgmt;
635 int psegs = bio_phys_segments(ns->queue, bio); 635 int psegs = bio_phys_segments(ns->queue, bio);
@@ -640,6 +640,7 @@ static int nvme_submit_bio_queue(struct nvme_queue *nvmeq, struct nvme_ns *ns,
640 return result; 640 return result;
641 } 641 }
642 642
643 result = -ENOMEM;
643 iod = nvme_alloc_iod(psegs, bio->bi_size, GFP_ATOMIC); 644 iod = nvme_alloc_iod(psegs, bio->bi_size, GFP_ATOMIC);
644 if (!iod) 645 if (!iod)
645 goto nomem; 646 goto nomem;
@@ -977,6 +978,8 @@ static void nvme_cancel_ios(struct nvme_queue *nvmeq, bool timeout)
977 978
978 if (timeout && !time_after(now, info[cmdid].timeout)) 979 if (timeout && !time_after(now, info[cmdid].timeout))
979 continue; 980 continue;
981 if (info[cmdid].ctx == CMD_CTX_CANCELLED)
982 continue;
980 dev_warn(nvmeq->q_dmadev, "Cancelling I/O %d\n", cmdid); 983 dev_warn(nvmeq->q_dmadev, "Cancelling I/O %d\n", cmdid);
981 ctx = cancel_cmdid(nvmeq, cmdid, &fn); 984 ctx = cancel_cmdid(nvmeq, cmdid, &fn);
982 fn(nvmeq->dev, ctx, &cqe); 985 fn(nvmeq->dev, ctx, &cqe);
@@ -1206,7 +1209,7 @@ struct nvme_iod *nvme_map_user_pages(struct nvme_dev *dev, int write,
1206 1209
1207 if (addr & 3) 1210 if (addr & 3)
1208 return ERR_PTR(-EINVAL); 1211 return ERR_PTR(-EINVAL);
1209 if (!length) 1212 if (!length || length > INT_MAX - PAGE_SIZE)
1210 return ERR_PTR(-EINVAL); 1213 return ERR_PTR(-EINVAL);
1211 1214
1212 offset = offset_in_page(addr); 1215 offset = offset_in_page(addr);
@@ -1227,7 +1230,8 @@ struct nvme_iod *nvme_map_user_pages(struct nvme_dev *dev, int write,
1227 sg_init_table(sg, count); 1230 sg_init_table(sg, count);
1228 for (i = 0; i < count; i++) { 1231 for (i = 0; i < count; i++) {
1229 sg_set_page(&sg[i], pages[i], 1232 sg_set_page(&sg[i], pages[i],
1230 min_t(int, length, PAGE_SIZE - offset), offset); 1233 min_t(unsigned, length, PAGE_SIZE - offset),
1234 offset);
1231 length -= (PAGE_SIZE - offset); 1235 length -= (PAGE_SIZE - offset);
1232 offset = 0; 1236 offset = 0;
1233 } 1237 }
@@ -1435,7 +1439,7 @@ static int nvme_user_admin_cmd(struct nvme_dev *dev,
1435 nvme_free_iod(dev, iod); 1439 nvme_free_iod(dev, iod);
1436 } 1440 }
1437 1441
1438 if (!status && copy_to_user(&ucmd->result, &cmd.result, 1442 if ((status >= 0) && copy_to_user(&ucmd->result, &cmd.result,
1439 sizeof(cmd.result))) 1443 sizeof(cmd.result)))
1440 status = -EFAULT; 1444 status = -EFAULT;
1441 1445
@@ -1633,7 +1637,8 @@ static int set_queue_count(struct nvme_dev *dev, int count)
1633 1637
1634static int nvme_setup_io_queues(struct nvme_dev *dev) 1638static int nvme_setup_io_queues(struct nvme_dev *dev)
1635{ 1639{
1636 int result, cpu, i, nr_io_queues, db_bar_size, q_depth; 1640 struct pci_dev *pdev = dev->pci_dev;
1641 int result, cpu, i, nr_io_queues, db_bar_size, q_depth, q_count;
1637 1642
1638 nr_io_queues = num_online_cpus(); 1643 nr_io_queues = num_online_cpus();
1639 result = set_queue_count(dev, nr_io_queues); 1644 result = set_queue_count(dev, nr_io_queues);
@@ -1642,14 +1647,14 @@ static int nvme_setup_io_queues(struct nvme_dev *dev)
1642 if (result < nr_io_queues) 1647 if (result < nr_io_queues)
1643 nr_io_queues = result; 1648 nr_io_queues = result;
1644 1649
1650 q_count = nr_io_queues;
1645 /* Deregister the admin queue's interrupt */ 1651 /* Deregister the admin queue's interrupt */
1646 free_irq(dev->entry[0].vector, dev->queues[0]); 1652 free_irq(dev->entry[0].vector, dev->queues[0]);
1647 1653
1648 db_bar_size = 4096 + ((nr_io_queues + 1) << (dev->db_stride + 3)); 1654 db_bar_size = 4096 + ((nr_io_queues + 1) << (dev->db_stride + 3));
1649 if (db_bar_size > 8192) { 1655 if (db_bar_size > 8192) {
1650 iounmap(dev->bar); 1656 iounmap(dev->bar);
1651 dev->bar = ioremap(pci_resource_start(dev->pci_dev, 0), 1657 dev->bar = ioremap(pci_resource_start(pdev, 0), db_bar_size);
1652 db_bar_size);
1653 dev->dbs = ((void __iomem *)dev->bar) + 4096; 1658 dev->dbs = ((void __iomem *)dev->bar) + 4096;
1654 dev->queues[0]->q_db = dev->dbs; 1659 dev->queues[0]->q_db = dev->dbs;
1655 } 1660 }
@@ -1657,19 +1662,36 @@ static int nvme_setup_io_queues(struct nvme_dev *dev)
1657 for (i = 0; i < nr_io_queues; i++) 1662 for (i = 0; i < nr_io_queues; i++)
1658 dev->entry[i].entry = i; 1663 dev->entry[i].entry = i;
1659 for (;;) { 1664 for (;;) {
1660 result = pci_enable_msix(dev->pci_dev, dev->entry, 1665 result = pci_enable_msix(pdev, dev->entry, nr_io_queues);
1661 nr_io_queues);
1662 if (result == 0) { 1666 if (result == 0) {
1663 break; 1667 break;
1664 } else if (result > 0) { 1668 } else if (result > 0) {
1665 nr_io_queues = result; 1669 nr_io_queues = result;
1666 continue; 1670 continue;
1667 } else { 1671 } else {
1668 nr_io_queues = 1; 1672 nr_io_queues = 0;
1669 break; 1673 break;
1670 } 1674 }
1671 } 1675 }
1672 1676
1677 if (nr_io_queues == 0) {
1678 nr_io_queues = q_count;
1679 for (;;) {
1680 result = pci_enable_msi_block(pdev, nr_io_queues);
1681 if (result == 0) {
1682 for (i = 0; i < nr_io_queues; i++)
1683 dev->entry[i].vector = i + pdev->irq;
1684 break;
1685 } else if (result > 0) {
1686 nr_io_queues = result;
1687 continue;
1688 } else {
1689 nr_io_queues = 1;
1690 break;
1691 }
1692 }
1693 }
1694
1673 result = queue_request_irq(dev, dev->queues[0], "nvme admin"); 1695 result = queue_request_irq(dev, dev->queues[0], "nvme admin");
1674 /* XXX: handle failure here */ 1696 /* XXX: handle failure here */
1675 1697
@@ -1850,7 +1872,10 @@ static void nvme_free_dev(struct kref *kref)
1850{ 1872{
1851 struct nvme_dev *dev = container_of(kref, struct nvme_dev, kref); 1873 struct nvme_dev *dev = container_of(kref, struct nvme_dev, kref);
1852 nvme_dev_remove(dev); 1874 nvme_dev_remove(dev);
1853 pci_disable_msix(dev->pci_dev); 1875 if (dev->pci_dev->msi_enabled)
1876 pci_disable_msi(dev->pci_dev);
1877 else if (dev->pci_dev->msix_enabled)
1878 pci_disable_msix(dev->pci_dev);
1854 iounmap(dev->bar); 1879 iounmap(dev->bar);
1855 nvme_release_instance(dev); 1880 nvme_release_instance(dev);
1856 nvme_release_prp_pools(dev); 1881 nvme_release_prp_pools(dev);
@@ -1923,8 +1948,14 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1923 INIT_LIST_HEAD(&dev->namespaces); 1948 INIT_LIST_HEAD(&dev->namespaces);
1924 dev->pci_dev = pdev; 1949 dev->pci_dev = pdev;
1925 pci_set_drvdata(pdev, dev); 1950 pci_set_drvdata(pdev, dev);
1926 dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)); 1951
1927 dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)); 1952 if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)))
1953 dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
1954 else if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)))
1955 dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
1956 else
1957 goto disable;
1958
1928 result = nvme_set_instance(dev); 1959 result = nvme_set_instance(dev);
1929 if (result) 1960 if (result)
1930 goto disable; 1961 goto disable;
@@ -1977,7 +2008,10 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1977 unmap: 2008 unmap:
1978 iounmap(dev->bar); 2009 iounmap(dev->bar);
1979 disable_msix: 2010 disable_msix:
1980 pci_disable_msix(pdev); 2011 if (dev->pci_dev->msi_enabled)
2012 pci_disable_msi(dev->pci_dev);
2013 else if (dev->pci_dev->msix_enabled)
2014 pci_disable_msix(dev->pci_dev);
1981 nvme_release_instance(dev); 2015 nvme_release_instance(dev);
1982 nvme_release_prp_pools(dev); 2016 nvme_release_prp_pools(dev);
1983 disable: 2017 disable:
diff --git a/drivers/block/nvme-scsi.c b/drivers/block/nvme-scsi.c
index fed54b039893..102de2f52b5c 100644
--- a/drivers/block/nvme-scsi.c
+++ b/drivers/block/nvme-scsi.c
@@ -44,7 +44,6 @@
44#include <linux/sched.h> 44#include <linux/sched.h>
45#include <linux/slab.h> 45#include <linux/slab.h>
46#include <linux/types.h> 46#include <linux/types.h>
47#include <linux/version.h>
48#include <scsi/sg.h> 47#include <scsi/sg.h>
49#include <scsi/scsi.h> 48#include <scsi/scsi.h>
50 49
@@ -1654,7 +1653,7 @@ static void nvme_trans_modesel_save_bd(struct nvme_ns *ns, u8 *parm_list,
1654 } 1653 }
1655} 1654}
1656 1655
1657static u16 nvme_trans_modesel_get_mp(struct nvme_ns *ns, struct sg_io_hdr *hdr, 1656static int nvme_trans_modesel_get_mp(struct nvme_ns *ns, struct sg_io_hdr *hdr,
1658 u8 *mode_page, u8 page_code) 1657 u8 *mode_page, u8 page_code)
1659{ 1658{
1660 int res = SNTI_TRANSLATION_SUCCESS; 1659 int res = SNTI_TRANSLATION_SUCCESS;