diff options
author | Matthew Wilcox <matthew.r.wilcox@intel.com> | 2011-02-10 09:56:01 -0500 |
---|---|---|
committer | Matthew Wilcox <matthew.r.wilcox@intel.com> | 2011-11-04 15:52:57 -0400 |
commit | 091b609258b8e01cc45b01a41ca5e496f674d989 (patch) | |
tree | fc047bbe0b579d2708c4ac36937ce403df8d38f2 /drivers/block/nvme.c | |
parent | d534df3c730af9073a9ddc076d9fd65cbdca22b3 (diff) |
NVMe: Switch to use DMA Pool API
Calling dma_free_coherent from interrupt context causes warnings.
Using the DMA pools delays freeing until pool destruction, so avoids
the problem.
Signed-off-by: Matthew Wilcox <matthew.r.wilcox@intel.com>
Diffstat (limited to 'drivers/block/nvme.c')
-rw-r--r-- | drivers/block/nvme.c | 39 |
1 files changed, 32 insertions, 7 deletions
diff --git a/drivers/block/nvme.c b/drivers/block/nvme.c index 11df0e90edad..80fe6a7a8163 100644 --- a/drivers/block/nvme.c +++ b/drivers/block/nvme.c | |||
@@ -57,6 +57,7 @@ struct nvme_dev { | |||
57 | struct nvme_queue **queues; | 57 | struct nvme_queue **queues; |
58 | u32 __iomem *dbs; | 58 | u32 __iomem *dbs; |
59 | struct pci_dev *pci_dev; | 59 | struct pci_dev *pci_dev; |
60 | struct dma_pool *prp_page_pool; | ||
60 | int instance; | 61 | int instance; |
61 | int queue_count; | 62 | int queue_count; |
62 | u32 ctrl_config; | 63 | u32 ctrl_config; |
@@ -88,6 +89,7 @@ struct nvme_ns { | |||
88 | */ | 89 | */ |
89 | struct nvme_queue { | 90 | struct nvme_queue { |
90 | struct device *q_dmadev; | 91 | struct device *q_dmadev; |
92 | struct nvme_dev *dev; | ||
91 | spinlock_t q_lock; | 93 | spinlock_t q_lock; |
92 | struct nvme_command *sq_cmds; | 94 | struct nvme_command *sq_cmds; |
93 | volatile struct nvme_completion *cqes; | 95 | volatile struct nvme_completion *cqes; |
@@ -247,10 +249,9 @@ static int nvme_submit_cmd(struct nvme_queue *nvmeq, struct nvme_command *cmd) | |||
247 | return 0; | 249 | return 0; |
248 | } | 250 | } |
249 | 251 | ||
250 | static __le64 *alloc_prp_list(struct nvme_queue *nvmeq, int length, | 252 | static __le64 *alloc_prp_list(struct nvme_dev *dev, dma_addr_t *addr) |
251 | dma_addr_t *addr) | ||
252 | { | 253 | { |
253 | return dma_alloc_coherent(nvmeq->q_dmadev, PAGE_SIZE, addr, GFP_ATOMIC); | 254 | return dma_pool_alloc(dev->prp_page_pool, GFP_ATOMIC, addr); |
254 | } | 255 | } |
255 | 256 | ||
256 | struct nvme_prps { | 257 | struct nvme_prps { |
@@ -262,6 +263,7 @@ struct nvme_prps { | |||
262 | static void nvme_free_prps(struct nvme_queue *nvmeq, struct nvme_prps *prps) | 263 | static void nvme_free_prps(struct nvme_queue *nvmeq, struct nvme_prps *prps) |
263 | { | 264 | { |
264 | const int last_prp = PAGE_SIZE / 8 - 1; | 265 | const int last_prp = PAGE_SIZE / 8 - 1; |
266 | struct nvme_dev *dev = nvmeq->dev; | ||
265 | int i; | 267 | int i; |
266 | dma_addr_t prp_dma; | 268 | dma_addr_t prp_dma; |
267 | 269 | ||
@@ -272,8 +274,7 @@ static void nvme_free_prps(struct nvme_queue *nvmeq, struct nvme_prps *prps) | |||
272 | for (i = 0; i < prps->npages; i++) { | 274 | for (i = 0; i < prps->npages; i++) { |
273 | __le64 *prp_list = prps->list[i]; | 275 | __le64 *prp_list = prps->list[i]; |
274 | dma_addr_t next_prp_dma = le64_to_cpu(prp_list[last_prp]); | 276 | dma_addr_t next_prp_dma = le64_to_cpu(prp_list[last_prp]); |
275 | dma_free_coherent(nvmeq->q_dmadev, PAGE_SIZE, prp_list, | 277 | dma_pool_free(dev->prp_page_pool, prp_list, prp_dma); |
276 | prp_dma); | ||
277 | prp_dma = next_prp_dma; | 278 | prp_dma = next_prp_dma; |
278 | } | 279 | } |
279 | kfree(prps); | 280 | kfree(prps); |
@@ -320,6 +321,7 @@ static struct nvme_prps *nvme_setup_prps(struct nvme_queue *nvmeq, | |||
320 | struct nvme_common_command *cmd, | 321 | struct nvme_common_command *cmd, |
321 | struct scatterlist *sg, int length) | 322 | struct scatterlist *sg, int length) |
322 | { | 323 | { |
324 | struct nvme_dev *dev = nvmeq->dev; | ||
323 | int dma_len = sg_dma_len(sg); | 325 | int dma_len = sg_dma_len(sg); |
324 | u64 dma_addr = sg_dma_address(sg); | 326 | u64 dma_addr = sg_dma_address(sg); |
325 | int offset = offset_in_page(dma_addr); | 327 | int offset = offset_in_page(dma_addr); |
@@ -352,7 +354,7 @@ static struct nvme_prps *nvme_setup_prps(struct nvme_queue *nvmeq, | |||
352 | prps = kmalloc(sizeof(*prps) + sizeof(__le64 *) * npages, GFP_ATOMIC); | 354 | prps = kmalloc(sizeof(*prps) + sizeof(__le64 *) * npages, GFP_ATOMIC); |
353 | prps->npages = npages; | 355 | prps->npages = npages; |
354 | prp_page = 0; | 356 | prp_page = 0; |
355 | prp_list = alloc_prp_list(nvmeq, length, &prp_dma); | 357 | prp_list = alloc_prp_list(dev, &prp_dma); |
356 | prps->list[prp_page++] = prp_list; | 358 | prps->list[prp_page++] = prp_list; |
357 | prps->first_dma = prp_dma; | 359 | prps->first_dma = prp_dma; |
358 | cmd->prp2 = cpu_to_le64(prp_dma); | 360 | cmd->prp2 = cpu_to_le64(prp_dma); |
@@ -360,7 +362,7 @@ static struct nvme_prps *nvme_setup_prps(struct nvme_queue *nvmeq, | |||
360 | for (;;) { | 362 | for (;;) { |
361 | if (i == PAGE_SIZE / 8 - 1) { | 363 | if (i == PAGE_SIZE / 8 - 1) { |
362 | __le64 *old_prp_list = prp_list; | 364 | __le64 *old_prp_list = prp_list; |
363 | prp_list = alloc_prp_list(nvmeq, length, &prp_dma); | 365 | prp_list = alloc_prp_list(dev, &prp_dma); |
364 | prps->list[prp_page++] = prp_list; | 366 | prps->list[prp_page++] = prp_list; |
365 | old_prp_list[i] = cpu_to_le64(prp_dma); | 367 | old_prp_list[i] = cpu_to_le64(prp_dma); |
366 | i = 0; | 368 | i = 0; |
@@ -752,6 +754,7 @@ static struct nvme_queue *nvme_alloc_queue(struct nvme_dev *dev, int qid, | |||
752 | goto free_cqdma; | 754 | goto free_cqdma; |
753 | 755 | ||
754 | nvmeq->q_dmadev = dmadev; | 756 | nvmeq->q_dmadev = dmadev; |
757 | nvmeq->dev = dev; | ||
755 | spin_lock_init(&nvmeq->q_lock); | 758 | spin_lock_init(&nvmeq->q_lock); |
756 | nvmeq->cq_head = 0; | 759 | nvmeq->cq_head = 0; |
757 | nvmeq->cq_phase = 1; | 760 | nvmeq->cq_phase = 1; |
@@ -1302,6 +1305,22 @@ static int nvme_dev_remove(struct nvme_dev *dev) | |||
1302 | return 0; | 1305 | return 0; |
1303 | } | 1306 | } |
1304 | 1307 | ||
1308 | static int nvme_setup_prp_pools(struct nvme_dev *dev) | ||
1309 | { | ||
1310 | struct device *dmadev = &dev->pci_dev->dev; | ||
1311 | dev->prp_page_pool = dma_pool_create("prp list page", dmadev, | ||
1312 | PAGE_SIZE, PAGE_SIZE, 0); | ||
1313 | if (!dev->prp_page_pool) | ||
1314 | return -ENOMEM; | ||
1315 | |||
1316 | return 0; | ||
1317 | } | ||
1318 | |||
1319 | static void nvme_release_prp_pools(struct nvme_dev *dev) | ||
1320 | { | ||
1321 | dma_pool_destroy(dev->prp_page_pool); | ||
1322 | } | ||
1323 | |||
1305 | /* XXX: Use an ida or something to let remove / add work correctly */ | 1324 | /* XXX: Use an ida or something to let remove / add work correctly */ |
1306 | static void nvme_set_instance(struct nvme_dev *dev) | 1325 | static void nvme_set_instance(struct nvme_dev *dev) |
1307 | { | 1326 | { |
@@ -1346,6 +1365,10 @@ static int __devinit nvme_probe(struct pci_dev *pdev, | |||
1346 | nvme_set_instance(dev); | 1365 | nvme_set_instance(dev); |
1347 | dev->entry[0].vector = pdev->irq; | 1366 | dev->entry[0].vector = pdev->irq; |
1348 | 1367 | ||
1368 | result = nvme_setup_prp_pools(dev); | ||
1369 | if (result) | ||
1370 | goto disable_msix; | ||
1371 | |||
1349 | dev->bar = ioremap(pci_resource_start(pdev, 0), 8192); | 1372 | dev->bar = ioremap(pci_resource_start(pdev, 0), 8192); |
1350 | if (!dev->bar) { | 1373 | if (!dev->bar) { |
1351 | result = -ENOMEM; | 1374 | result = -ENOMEM; |
@@ -1369,6 +1392,7 @@ static int __devinit nvme_probe(struct pci_dev *pdev, | |||
1369 | disable_msix: | 1392 | disable_msix: |
1370 | pci_disable_msix(pdev); | 1393 | pci_disable_msix(pdev); |
1371 | nvme_release_instance(dev); | 1394 | nvme_release_instance(dev); |
1395 | nvme_release_prp_pools(dev); | ||
1372 | disable: | 1396 | disable: |
1373 | pci_disable_device(pdev); | 1397 | pci_disable_device(pdev); |
1374 | pci_release_regions(pdev); | 1398 | pci_release_regions(pdev); |
@@ -1386,6 +1410,7 @@ static void __devexit nvme_remove(struct pci_dev *pdev) | |||
1386 | pci_disable_msix(pdev); | 1410 | pci_disable_msix(pdev); |
1387 | iounmap(dev->bar); | 1411 | iounmap(dev->bar); |
1388 | nvme_release_instance(dev); | 1412 | nvme_release_instance(dev); |
1413 | nvme_release_prp_pools(dev); | ||
1389 | pci_disable_device(pdev); | 1414 | pci_disable_device(pdev); |
1390 | pci_release_regions(pdev); | 1415 | pci_release_regions(pdev); |
1391 | kfree(dev->queues); | 1416 | kfree(dev->queues); |