aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/nvme/host/pci.c
diff options
context:
space:
mode:
authorIngo Molnar <mingo@kernel.org>2016-01-06 05:02:29 -0500
committerIngo Molnar <mingo@kernel.org>2016-01-06 05:02:29 -0500
commit567bee2803cb46caeb6011de5b738fde33dc3896 (patch)
tree05bab01377bffa356bfbe06c4b6193b23b7c24ca /drivers/nvme/host/pci.c
parentaa0b7ae06387d40a988ce16a189082dee6e570bc (diff)
parent093e5840ae76f1082633503964d035f40ed0216d (diff)
Merge branch 'sched/urgent' into sched/core, to pick up fixes before merging new patches
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'drivers/nvme/host/pci.c')
-rw-r--r--drivers/nvme/host/pci.c71
1 files changed, 56 insertions, 15 deletions
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index 8187df204695..0c67b57be83c 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -896,19 +896,28 @@ static int nvme_queue_rq(struct blk_mq_hw_ctx *hctx,
896 goto retry_cmd; 896 goto retry_cmd;
897 } 897 }
898 if (blk_integrity_rq(req)) { 898 if (blk_integrity_rq(req)) {
899 if (blk_rq_count_integrity_sg(req->q, req->bio) != 1) 899 if (blk_rq_count_integrity_sg(req->q, req->bio) != 1) {
900 dma_unmap_sg(dev->dev, iod->sg, iod->nents,
901 dma_dir);
900 goto error_cmd; 902 goto error_cmd;
903 }
901 904
902 sg_init_table(iod->meta_sg, 1); 905 sg_init_table(iod->meta_sg, 1);
903 if (blk_rq_map_integrity_sg( 906 if (blk_rq_map_integrity_sg(
904 req->q, req->bio, iod->meta_sg) != 1) 907 req->q, req->bio, iod->meta_sg) != 1) {
908 dma_unmap_sg(dev->dev, iod->sg, iod->nents,
909 dma_dir);
905 goto error_cmd; 910 goto error_cmd;
911 }
906 912
907 if (rq_data_dir(req)) 913 if (rq_data_dir(req))
908 nvme_dif_remap(req, nvme_dif_prep); 914 nvme_dif_remap(req, nvme_dif_prep);
909 915
910 if (!dma_map_sg(nvmeq->q_dmadev, iod->meta_sg, 1, dma_dir)) 916 if (!dma_map_sg(nvmeq->q_dmadev, iod->meta_sg, 1, dma_dir)) {
917 dma_unmap_sg(dev->dev, iod->sg, iod->nents,
918 dma_dir);
911 goto error_cmd; 919 goto error_cmd;
920 }
912 } 921 }
913 } 922 }
914 923
@@ -968,7 +977,8 @@ static void __nvme_process_cq(struct nvme_queue *nvmeq, unsigned int *tag)
968 if (head == nvmeq->cq_head && phase == nvmeq->cq_phase) 977 if (head == nvmeq->cq_head && phase == nvmeq->cq_phase)
969 return; 978 return;
970 979
971 writel(head, nvmeq->q_db + nvmeq->dev->db_stride); 980 if (likely(nvmeq->cq_vector >= 0))
981 writel(head, nvmeq->q_db + nvmeq->dev->db_stride);
972 nvmeq->cq_head = head; 982 nvmeq->cq_head = head;
973 nvmeq->cq_phase = phase; 983 nvmeq->cq_phase = phase;
974 984
@@ -1727,9 +1737,13 @@ static int nvme_configure_admin_queue(struct nvme_dev *dev)
1727 u32 aqa; 1737 u32 aqa;
1728 u64 cap = lo_hi_readq(&dev->bar->cap); 1738 u64 cap = lo_hi_readq(&dev->bar->cap);
1729 struct nvme_queue *nvmeq; 1739 struct nvme_queue *nvmeq;
1730 unsigned page_shift = PAGE_SHIFT; 1740 /*
1741 * default to a 4K page size, with the intention to update this
1742 * path in the future to accomodate architectures with differing
1743 * kernel and IO page sizes.
1744 */
1745 unsigned page_shift = 12;
1731 unsigned dev_page_min = NVME_CAP_MPSMIN(cap) + 12; 1746 unsigned dev_page_min = NVME_CAP_MPSMIN(cap) + 12;
1732 unsigned dev_page_max = NVME_CAP_MPSMAX(cap) + 12;
1733 1747
1734 if (page_shift < dev_page_min) { 1748 if (page_shift < dev_page_min) {
1735 dev_err(dev->dev, 1749 dev_err(dev->dev,
@@ -1738,13 +1752,6 @@ static int nvme_configure_admin_queue(struct nvme_dev *dev)
1738 1 << page_shift); 1752 1 << page_shift);
1739 return -ENODEV; 1753 return -ENODEV;
1740 } 1754 }
1741 if (page_shift > dev_page_max) {
1742 dev_info(dev->dev,
1743 "Device maximum page size (%u) smaller than "
1744 "host (%u); enabling work-around\n",
1745 1 << dev_page_max, 1 << page_shift);
1746 page_shift = dev_page_max;
1747 }
1748 1755
1749 dev->subsystem = readl(&dev->bar->vs) >= NVME_VS(1, 1) ? 1756 dev->subsystem = readl(&dev->bar->vs) >= NVME_VS(1, 1) ?
1750 NVME_CAP_NSSRC(cap) : 0; 1757 NVME_CAP_NSSRC(cap) : 0;
@@ -2268,7 +2275,7 @@ static void nvme_alloc_ns(struct nvme_dev *dev, unsigned nsid)
2268 if (dev->max_hw_sectors) { 2275 if (dev->max_hw_sectors) {
2269 blk_queue_max_hw_sectors(ns->queue, dev->max_hw_sectors); 2276 blk_queue_max_hw_sectors(ns->queue, dev->max_hw_sectors);
2270 blk_queue_max_segments(ns->queue, 2277 blk_queue_max_segments(ns->queue,
2271 ((dev->max_hw_sectors << 9) / dev->page_size) + 1); 2278 (dev->max_hw_sectors / (dev->page_size >> 9)) + 1);
2272 } 2279 }
2273 if (dev->stripe_size) 2280 if (dev->stripe_size)
2274 blk_queue_chunk_sectors(ns->queue, dev->stripe_size >> 9); 2281 blk_queue_chunk_sectors(ns->queue, dev->stripe_size >> 9);
@@ -2533,8 +2540,17 @@ static void nvme_ns_remove(struct nvme_ns *ns)
2533{ 2540{
2534 bool kill = nvme_io_incapable(ns->dev) && !blk_queue_dying(ns->queue); 2541 bool kill = nvme_io_incapable(ns->dev) && !blk_queue_dying(ns->queue);
2535 2542
2536 if (kill) 2543 if (kill) {
2537 blk_set_queue_dying(ns->queue); 2544 blk_set_queue_dying(ns->queue);
2545
2546 /*
2547 * The controller was shutdown first if we got here through
2548 * device removal. The shutdown may requeue outstanding
2549 * requests. These need to be aborted immediately so
2550 * del_gendisk doesn't block indefinitely for their completion.
2551 */
2552 blk_mq_abort_requeue_list(ns->queue);
2553 }
2538 if (ns->disk->flags & GENHD_FL_UP) 2554 if (ns->disk->flags & GENHD_FL_UP)
2539 del_gendisk(ns->disk); 2555 del_gendisk(ns->disk);
2540 if (kill || !blk_queue_dying(ns->queue)) { 2556 if (kill || !blk_queue_dying(ns->queue)) {
@@ -2701,6 +2717,18 @@ static int nvme_dev_map(struct nvme_dev *dev)
2701 dev->q_depth = min_t(int, NVME_CAP_MQES(cap) + 1, NVME_Q_DEPTH); 2717 dev->q_depth = min_t(int, NVME_CAP_MQES(cap) + 1, NVME_Q_DEPTH);
2702 dev->db_stride = 1 << NVME_CAP_STRIDE(cap); 2718 dev->db_stride = 1 << NVME_CAP_STRIDE(cap);
2703 dev->dbs = ((void __iomem *)dev->bar) + 4096; 2719 dev->dbs = ((void __iomem *)dev->bar) + 4096;
2720
2721 /*
2722 * Temporary fix for the Apple controller found in the MacBook8,1 and
2723 * some MacBook7,1 to avoid controller resets and data loss.
2724 */
2725 if (pdev->vendor == PCI_VENDOR_ID_APPLE && pdev->device == 0x2001) {
2726 dev->q_depth = 2;
2727 dev_warn(dev->dev, "detected Apple NVMe controller, set "
2728 "queue depth=%u to work around controller resets\n",
2729 dev->q_depth);
2730 }
2731
2704 if (readl(&dev->bar->vs) >= NVME_VS(1, 2)) 2732 if (readl(&dev->bar->vs) >= NVME_VS(1, 2))
2705 dev->cmb = nvme_map_cmb(dev); 2733 dev->cmb = nvme_map_cmb(dev);
2706 2734
@@ -2787,6 +2815,10 @@ static void nvme_del_queue_end(struct nvme_queue *nvmeq)
2787{ 2815{
2788 struct nvme_delq_ctx *dq = nvmeq->cmdinfo.ctx; 2816 struct nvme_delq_ctx *dq = nvmeq->cmdinfo.ctx;
2789 nvme_put_dq(dq); 2817 nvme_put_dq(dq);
2818
2819 spin_lock_irq(&nvmeq->q_lock);
2820 nvme_process_cq(nvmeq);
2821 spin_unlock_irq(&nvmeq->q_lock);
2790} 2822}
2791 2823
2792static int adapter_async_del_queue(struct nvme_queue *nvmeq, u8 opcode, 2824static int adapter_async_del_queue(struct nvme_queue *nvmeq, u8 opcode,
@@ -2954,6 +2986,15 @@ static void nvme_dev_remove(struct nvme_dev *dev)
2954{ 2986{
2955 struct nvme_ns *ns, *next; 2987 struct nvme_ns *ns, *next;
2956 2988
2989 if (nvme_io_incapable(dev)) {
2990 /*
2991 * If the device is not capable of IO (surprise hot-removal,
2992 * for example), we need to quiesce prior to deleting the
2993 * namespaces. This will end outstanding requests and prevent
2994 * attempts to sync dirty data.
2995 */
2996 nvme_dev_shutdown(dev);
2997 }
2957 list_for_each_entry_safe(ns, next, &dev->namespaces, list) 2998 list_for_each_entry_safe(ns, next, &dev->namespaces, list)
2958 nvme_ns_remove(ns); 2999 nvme_ns_remove(ns);
2959} 3000}