aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2017-04-13 03:06:43 -0400
committerBjorn Helgaas <bhelgaas@google.com>2017-04-18 14:43:29 -0400
commit0ff199cb48b4af6f29a1bf15d92d93f44a22eeb4 (patch)
tree3f69907be80d6e4cfcb5a184a2f60427debabc85
parent704e8953d3e9db29d5d93c0bf6973d86fe15e679 (diff)
nvme/pci: Switch to pci_request_irq()
Signed-off-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Bjorn Helgaas <bhelgaas@google.com> Reviewed-by: Thomas Gleixner <tglx@linutronix.de> Reviewed-by: Keith Busch <keith.busch@intel.com>
-rw-r--r--drivers/nvme/host/pci.c30
1 files changed, 13 insertions, 17 deletions
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index 26a5fd05fe88..925997127a6b 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -117,7 +117,6 @@ static inline struct nvme_dev *to_nvme_dev(struct nvme_ctrl *ctrl)
117struct nvme_queue { 117struct nvme_queue {
118 struct device *q_dmadev; 118 struct device *q_dmadev;
119 struct nvme_dev *dev; 119 struct nvme_dev *dev;
120 char irqname[24]; /* nvme4294967295-65535\0 */
121 spinlock_t q_lock; 120 spinlock_t q_lock;
122 struct nvme_command *sq_cmds; 121 struct nvme_command *sq_cmds;
123 struct nvme_command __iomem *sq_cmds_io; 122 struct nvme_command __iomem *sq_cmds_io;
@@ -204,11 +203,6 @@ static unsigned int nvme_cmd_size(struct nvme_dev *dev)
204 nvme_iod_alloc_size(dev, NVME_INT_BYTES(dev), NVME_INT_PAGES); 203 nvme_iod_alloc_size(dev, NVME_INT_BYTES(dev), NVME_INT_PAGES);
205} 204}
206 205
207static int nvmeq_irq(struct nvme_queue *nvmeq)
208{
209 return pci_irq_vector(to_pci_dev(nvmeq->dev->dev), nvmeq->cq_vector);
210}
211
212static int nvme_admin_init_hctx(struct blk_mq_hw_ctx *hctx, void *data, 206static int nvme_admin_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
213 unsigned int hctx_idx) 207 unsigned int hctx_idx)
214{ 208{
@@ -962,7 +956,7 @@ static int nvme_suspend_queue(struct nvme_queue *nvmeq)
962 spin_unlock_irq(&nvmeq->q_lock); 956 spin_unlock_irq(&nvmeq->q_lock);
963 return 1; 957 return 1;
964 } 958 }
965 vector = nvmeq_irq(nvmeq); 959 vector = nvmeq->cq_vector;
966 nvmeq->dev->online_queues--; 960 nvmeq->dev->online_queues--;
967 nvmeq->cq_vector = -1; 961 nvmeq->cq_vector = -1;
968 spin_unlock_irq(&nvmeq->q_lock); 962 spin_unlock_irq(&nvmeq->q_lock);
@@ -970,7 +964,7 @@ static int nvme_suspend_queue(struct nvme_queue *nvmeq)
970 if (!nvmeq->qid && nvmeq->dev->ctrl.admin_q) 964 if (!nvmeq->qid && nvmeq->dev->ctrl.admin_q)
971 blk_mq_stop_hw_queues(nvmeq->dev->ctrl.admin_q); 965 blk_mq_stop_hw_queues(nvmeq->dev->ctrl.admin_q);
972 966
973 free_irq(vector, nvmeq); 967 pci_free_irq(to_pci_dev(nvmeq->dev->dev), vector, nvmeq);
974 968
975 return 0; 969 return 0;
976} 970}
@@ -1055,8 +1049,6 @@ static struct nvme_queue *nvme_alloc_queue(struct nvme_dev *dev, int qid,
1055 1049
1056 nvmeq->q_dmadev = dev->dev; 1050 nvmeq->q_dmadev = dev->dev;
1057 nvmeq->dev = dev; 1051 nvmeq->dev = dev;
1058 snprintf(nvmeq->irqname, sizeof(nvmeq->irqname), "nvme%dq%d",
1059 dev->ctrl.instance, qid);
1060 spin_lock_init(&nvmeq->q_lock); 1052 spin_lock_init(&nvmeq->q_lock);
1061 nvmeq->cq_head = 0; 1053 nvmeq->cq_head = 0;
1062 nvmeq->cq_phase = 1; 1054 nvmeq->cq_phase = 1;
@@ -1079,12 +1071,16 @@ static struct nvme_queue *nvme_alloc_queue(struct nvme_dev *dev, int qid,
1079 1071
1080static int queue_request_irq(struct nvme_queue *nvmeq) 1072static int queue_request_irq(struct nvme_queue *nvmeq)
1081{ 1073{
1082 if (use_threaded_interrupts) 1074 struct pci_dev *pdev = to_pci_dev(nvmeq->dev->dev);
1083 return request_threaded_irq(nvmeq_irq(nvmeq), nvme_irq_check, 1075 int nr = nvmeq->dev->ctrl.instance;
1084 nvme_irq, IRQF_SHARED, nvmeq->irqname, nvmeq); 1076
1085 else 1077 if (use_threaded_interrupts) {
1086 return request_irq(nvmeq_irq(nvmeq), nvme_irq, IRQF_SHARED, 1078 return pci_request_irq(pdev, nvmeq->cq_vector, nvme_irq_check,
1087 nvmeq->irqname, nvmeq); 1079 nvme_irq, nvmeq, "nvme%dq%d", nr, nvmeq->qid);
1080 } else {
1081 return pci_request_irq(pdev, nvmeq->cq_vector, nvme_irq,
1082 NULL, nvmeq, "nvme%dq%d", nr, nvmeq->qid);
1083 }
1088} 1084}
1089 1085
1090static void nvme_init_queue(struct nvme_queue *nvmeq, u16 qid) 1086static void nvme_init_queue(struct nvme_queue *nvmeq, u16 qid)
@@ -1440,7 +1436,7 @@ static int nvme_setup_io_queues(struct nvme_dev *dev)
1440 } 1436 }
1441 1437
1442 /* Deregister the admin queue's interrupt */ 1438 /* Deregister the admin queue's interrupt */
1443 free_irq(pci_irq_vector(pdev, 0), adminq); 1439 pci_free_irq(pdev, 0, adminq);
1444 1440
1445 /* 1441 /*
1446 * If we enable msix early due to not intx, disable it again before 1442 * If we enable msix early due to not intx, disable it again before