aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/block
diff options
context:
space:
mode:
authorKeith Busch <keith.busch@intel.com>2014-12-22 14:59:04 -0500
committerJens Axboe <axboe@fb.com>2014-12-22 14:59:04 -0500
commit2b25d981790b830f0e045881386866b970bf9066 (patch)
tree660376bfb6dec385f428e22830558a221c6ed3b0 /drivers/block
parentb4c6a028774bcf3f20ed1e66c27a05aa51a8cf55 (diff)
NVMe: Fix double free irq
Sets the vector to an invalid value after it's freed so we don't free it twice. Signed-off-by: Keith Busch <keith.busch@intel.com> Signed-off-by: Jens Axboe <axboe@fb.com>
Diffstat (limited to 'drivers/block')
-rw-r--r--drivers/block/nvme-core.c17
1 files changed, 12 insertions, 5 deletions
diff --git a/drivers/block/nvme-core.c b/drivers/block/nvme-core.c
index b1d5d8797315..52d0f2d9fecd 100644
--- a/drivers/block/nvme-core.c
+++ b/drivers/block/nvme-core.c
@@ -1131,10 +1131,16 @@ static void nvme_free_queues(struct nvme_dev *dev, int lowest)
1131 */ 1131 */
1132static int nvme_suspend_queue(struct nvme_queue *nvmeq) 1132static int nvme_suspend_queue(struct nvme_queue *nvmeq)
1133{ 1133{
1134 int vector = nvmeq->dev->entry[nvmeq->cq_vector].vector; 1134 int vector;
1135 1135
1136 spin_lock_irq(&nvmeq->q_lock); 1136 spin_lock_irq(&nvmeq->q_lock);
1137 if (nvmeq->cq_vector == -1) {
1138 spin_unlock_irq(&nvmeq->q_lock);
1139 return 1;
1140 }
1141 vector = nvmeq->dev->entry[nvmeq->cq_vector].vector;
1137 nvmeq->dev->online_queues--; 1142 nvmeq->dev->online_queues--;
1143 nvmeq->cq_vector = -1;
1138 spin_unlock_irq(&nvmeq->q_lock); 1144 spin_unlock_irq(&nvmeq->q_lock);
1139 1145
1140 irq_set_affinity_hint(vector, NULL); 1146 irq_set_affinity_hint(vector, NULL);
@@ -1173,7 +1179,7 @@ static void nvme_disable_queue(struct nvme_dev *dev, int qid)
1173} 1179}
1174 1180
1175static struct nvme_queue *nvme_alloc_queue(struct nvme_dev *dev, int qid, 1181static struct nvme_queue *nvme_alloc_queue(struct nvme_dev *dev, int qid,
1176 int depth, int vector) 1182 int depth)
1177{ 1183{
1178 struct device *dmadev = &dev->pci_dev->dev; 1184 struct device *dmadev = &dev->pci_dev->dev;
1179 struct nvme_queue *nvmeq = kzalloc(sizeof(*nvmeq), GFP_KERNEL); 1185 struct nvme_queue *nvmeq = kzalloc(sizeof(*nvmeq), GFP_KERNEL);
@@ -1199,7 +1205,6 @@ static struct nvme_queue *nvme_alloc_queue(struct nvme_dev *dev, int qid,
1199 nvmeq->cq_phase = 1; 1205 nvmeq->cq_phase = 1;
1200 nvmeq->q_db = &dev->dbs[qid * 2 * dev->db_stride]; 1206 nvmeq->q_db = &dev->dbs[qid * 2 * dev->db_stride];
1201 nvmeq->q_depth = depth; 1207 nvmeq->q_depth = depth;
1202 nvmeq->cq_vector = vector;
1203 nvmeq->qid = qid; 1208 nvmeq->qid = qid;
1204 dev->queue_count++; 1209 dev->queue_count++;
1205 dev->queues[qid] = nvmeq; 1210 dev->queues[qid] = nvmeq;
@@ -1244,6 +1249,7 @@ static int nvme_create_queue(struct nvme_queue *nvmeq, int qid)
1244 struct nvme_dev *dev = nvmeq->dev; 1249 struct nvme_dev *dev = nvmeq->dev;
1245 int result; 1250 int result;
1246 1251
1252 nvmeq->cq_vector = qid - 1;
1247 result = adapter_alloc_cq(dev, qid, nvmeq); 1253 result = adapter_alloc_cq(dev, qid, nvmeq);
1248 if (result < 0) 1254 if (result < 0)
1249 return result; 1255 return result;
@@ -1416,7 +1422,7 @@ static int nvme_configure_admin_queue(struct nvme_dev *dev)
1416 1422
1417 nvmeq = dev->queues[0]; 1423 nvmeq = dev->queues[0];
1418 if (!nvmeq) { 1424 if (!nvmeq) {
1419 nvmeq = nvme_alloc_queue(dev, 0, NVME_AQ_DEPTH, 0); 1425 nvmeq = nvme_alloc_queue(dev, 0, NVME_AQ_DEPTH);
1420 if (!nvmeq) 1426 if (!nvmeq)
1421 return -ENOMEM; 1427 return -ENOMEM;
1422 } 1428 }
@@ -1443,6 +1449,7 @@ static int nvme_configure_admin_queue(struct nvme_dev *dev)
1443 if (result) 1449 if (result)
1444 goto free_nvmeq; 1450 goto free_nvmeq;
1445 1451
1452 nvmeq->cq_vector = 0;
1446 result = queue_request_irq(dev, nvmeq, nvmeq->irqname); 1453 result = queue_request_irq(dev, nvmeq, nvmeq->irqname);
1447 if (result) 1454 if (result)
1448 goto free_tags; 1455 goto free_tags;
@@ -1944,7 +1951,7 @@ static void nvme_create_io_queues(struct nvme_dev *dev)
1944 unsigned i; 1951 unsigned i;
1945 1952
1946 for (i = dev->queue_count; i <= dev->max_qid; i++) 1953 for (i = dev->queue_count; i <= dev->max_qid; i++)
1947 if (!nvme_alloc_queue(dev, i, dev->q_depth, i - 1)) 1954 if (!nvme_alloc_queue(dev, i, dev->q_depth))
1948 break; 1955 break;
1949 1956
1950 for (i = dev->online_queues; i <= dev->queue_count - 1; i++) 1957 for (i = dev->online_queues; i <= dev->queue_count - 1; i++)