summaryrefslogtreecommitdiffstats
path: root/drivers/nvdimm
diff options
context:
space:
mode:
authorDan Williams <dan.j.williams@intel.com>2016-03-17 23:24:31 -0400
committerDan Williams <dan.j.williams@intel.com>2016-04-22 15:26:23 -0400
commit030b99e39cad33b104474fbe688e0eb23d8209b4 (patch)
treeedbcc412b1dcedd8abe08535686931e0c7868b8f /drivers/nvdimm
parent9d90725ddca347450c4ab177ad680ed76063afd4 (diff)
libnvdimm, pmem: use devm_add_action to release bdev resources
Register a callback to clean up the request_queue and put the gendisk at driver disable time. Cc: Ross Zwisler <ross.zwisler@linux.intel.com> Reviewed-by: Johannes Thumshirn <jthumshirn@suse.de> Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Diffstat (limited to 'drivers/nvdimm')
-rw-r--r--drivers/nvdimm/pmem.c88
1 files changed, 39 insertions, 49 deletions
diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c
index 2238e3af48ae..d936defdc1e2 100644
--- a/drivers/nvdimm/pmem.c
+++ b/drivers/nvdimm/pmem.c
@@ -198,6 +198,17 @@ static const struct block_device_operations pmem_fops = {
198 .revalidate_disk = nvdimm_revalidate_disk, 198 .revalidate_disk = nvdimm_revalidate_disk,
199}; 199};
200 200
201static void pmem_release_queue(void *q)
202{
203 blk_cleanup_queue(q);
204}
205
206void pmem_release_disk(void *disk)
207{
208 del_gendisk(disk);
209 put_disk(disk);
210}
211
201static struct pmem_device *pmem_alloc(struct device *dev, 212static struct pmem_device *pmem_alloc(struct device *dev,
202 struct resource *res, int id) 213 struct resource *res, int id)
203{ 214{
@@ -234,25 +245,22 @@ static struct pmem_device *pmem_alloc(struct device *dev,
234 pmem->phys_addr, pmem->size, 245 pmem->phys_addr, pmem->size,
235 ARCH_MEMREMAP_PMEM); 246 ARCH_MEMREMAP_PMEM);
236 247
237 if (IS_ERR(pmem->virt_addr)) { 248 /*
249 * At release time the queue must be dead before
250 * devm_memremap_pages is unwound
251 */
252 if (devm_add_action(dev, pmem_release_queue, q)) {
238 blk_cleanup_queue(q); 253 blk_cleanup_queue(q);
239 return (void __force *) pmem->virt_addr; 254 return ERR_PTR(-ENOMEM);
240 } 255 }
241 256
257 if (IS_ERR(pmem->virt_addr))
258 return (void __force *) pmem->virt_addr;
259
242 pmem->pmem_queue = q; 260 pmem->pmem_queue = q;
243 return pmem; 261 return pmem;
244} 262}
245 263
246static void pmem_detach_disk(struct pmem_device *pmem)
247{
248 if (!pmem->pmem_disk)
249 return;
250
251 del_gendisk(pmem->pmem_disk);
252 put_disk(pmem->pmem_disk);
253 blk_cleanup_queue(pmem->pmem_queue);
254}
255
256static int pmem_attach_disk(struct device *dev, 264static int pmem_attach_disk(struct device *dev,
257 struct nd_namespace_common *ndns, struct pmem_device *pmem) 265 struct nd_namespace_common *ndns, struct pmem_device *pmem)
258{ 266{
@@ -269,8 +277,10 @@ static int pmem_attach_disk(struct device *dev,
269 pmem->pmem_queue->queuedata = pmem; 277 pmem->pmem_queue->queuedata = pmem;
270 278
271 disk = alloc_disk_node(0, nid); 279 disk = alloc_disk_node(0, nid);
272 if (!disk) { 280 if (!disk)
273 blk_cleanup_queue(pmem->pmem_queue); 281 return -ENOMEM;
282 if (devm_add_action(dev, pmem_release_disk, disk)) {
283 put_disk(disk);
274 return -ENOMEM; 284 return -ENOMEM;
275 } 285 }
276 286
@@ -427,15 +437,6 @@ static int nd_pfn_init(struct nd_pfn *nd_pfn)
427 return nvdimm_write_bytes(ndns, SZ_4K, pfn_sb, sizeof(*pfn_sb)); 437 return nvdimm_write_bytes(ndns, SZ_4K, pfn_sb, sizeof(*pfn_sb));
428} 438}
429 439
430static void nvdimm_namespace_detach_pfn(struct nd_pfn *nd_pfn)
431{
432 struct pmem_device *pmem;
433
434 /* free pmem disk */
435 pmem = dev_get_drvdata(&nd_pfn->dev);
436 pmem_detach_disk(pmem);
437}
438
439/* 440/*
440 * We hotplug memory at section granularity, pad the reserved area from 441 * We hotplug memory at section granularity, pad the reserved area from
441 * the previous section base to the namespace base address. 442 * the previous section base to the namespace base address.
@@ -458,7 +459,6 @@ static unsigned long init_altmap_reserve(resource_size_t base)
458 459
459static int __nvdimm_namespace_attach_pfn(struct nd_pfn *nd_pfn) 460static int __nvdimm_namespace_attach_pfn(struct nd_pfn *nd_pfn)
460{ 461{
461 int rc;
462 struct resource res; 462 struct resource res;
463 struct request_queue *q; 463 struct request_queue *q;
464 struct pmem_device *pmem; 464 struct pmem_device *pmem;
@@ -495,35 +495,33 @@ static int __nvdimm_namespace_attach_pfn(struct nd_pfn *nd_pfn)
495 altmap = & __altmap; 495 altmap = & __altmap;
496 altmap->free = PHYS_PFN(pmem->data_offset - SZ_8K); 496 altmap->free = PHYS_PFN(pmem->data_offset - SZ_8K);
497 altmap->alloc = 0; 497 altmap->alloc = 0;
498 } else { 498 } else
499 rc = -ENXIO; 499 return -ENXIO;
500 goto err;
501 }
502 500
503 /* establish pfn range for lookup, and switch to direct map */ 501 /* establish pfn range for lookup, and switch to direct map */
504 q = pmem->pmem_queue; 502 q = pmem->pmem_queue;
505 memcpy(&res, &nsio->res, sizeof(res)); 503 memcpy(&res, &nsio->res, sizeof(res));
506 res.start += start_pad; 504 res.start += start_pad;
507 res.end -= end_trunc; 505 res.end -= end_trunc;
506 devm_remove_action(dev, pmem_release_queue, q);
508 devm_memunmap(dev, (void __force *) pmem->virt_addr); 507 devm_memunmap(dev, (void __force *) pmem->virt_addr);
509 pmem->virt_addr = (void __pmem *) devm_memremap_pages(dev, &res, 508 pmem->virt_addr = (void __pmem *) devm_memremap_pages(dev, &res,
510 &q->q_usage_counter, altmap); 509 &q->q_usage_counter, altmap);
511 pmem->pfn_flags |= PFN_MAP; 510 pmem->pfn_flags |= PFN_MAP;
512 if (IS_ERR(pmem->virt_addr)) { 511
513 rc = PTR_ERR(pmem->virt_addr); 512 /*
514 goto err; 513 * At release time the queue must be dead before
514 * devm_memremap_pages is unwound
515 */
516 if (devm_add_action(dev, pmem_release_queue, q)) {
517 blk_cleanup_queue(q);
518 return -ENOMEM;
515 } 519 }
520 if (IS_ERR(pmem->virt_addr))
521 return PTR_ERR(pmem->virt_addr);
516 522
517 /* attach pmem disk in "pfn-mode" */ 523 /* attach pmem disk in "pfn-mode" */
518 rc = pmem_attach_disk(dev, ndns, pmem); 524 return pmem_attach_disk(dev, ndns, pmem);
519 if (rc)
520 goto err;
521
522 return rc;
523 err:
524 nvdimm_namespace_detach_pfn(nd_pfn);
525 return rc;
526
527} 525}
528 526
529static int nvdimm_namespace_attach_pfn(struct nd_namespace_common *ndns) 527static int nvdimm_namespace_attach_pfn(struct nd_namespace_common *ndns)
@@ -565,8 +563,8 @@ static int nd_pmem_probe(struct device *dev)
565 563
566 if (is_nd_btt(dev)) { 564 if (is_nd_btt(dev)) {
567 /* btt allocates its own request_queue */ 565 /* btt allocates its own request_queue */
566 devm_remove_action(dev, pmem_release_queue, pmem->pmem_queue);
568 blk_cleanup_queue(pmem->pmem_queue); 567 blk_cleanup_queue(pmem->pmem_queue);
569 pmem->pmem_queue = NULL;
570 return nvdimm_namespace_attach_btt(ndns); 568 return nvdimm_namespace_attach_btt(ndns);
571 } 569 }
572 570
@@ -579,7 +577,6 @@ static int nd_pmem_probe(struct device *dev)
579 * We'll come back as either btt-pmem, or pfn-pmem, so 577 * We'll come back as either btt-pmem, or pfn-pmem, so
580 * drop the queue allocation for now. 578 * drop the queue allocation for now.
581 */ 579 */
582 blk_cleanup_queue(pmem->pmem_queue);
583 return -ENXIO; 580 return -ENXIO;
584 } 581 }
585 582
@@ -588,15 +585,8 @@ static int nd_pmem_probe(struct device *dev)
588 585
589static int nd_pmem_remove(struct device *dev) 586static int nd_pmem_remove(struct device *dev)
590{ 587{
591 struct pmem_device *pmem = dev_get_drvdata(dev);
592
593 if (is_nd_btt(dev)) 588 if (is_nd_btt(dev))
594 nvdimm_namespace_detach_btt(to_nd_btt(dev)); 589 nvdimm_namespace_detach_btt(to_nd_btt(dev));
595 else if (is_nd_pfn(dev))
596 nvdimm_namespace_detach_pfn(to_nd_pfn(dev));
597 else
598 pmem_detach_disk(pmem);
599
600 return 0; 590 return 0;
601} 591}
602 592