aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/nvdimm/pmem.c
diff options
context:
space:
mode:
authorDan Williams <dan.j.williams@intel.com>2016-03-15 19:41:04 -0400
committerDan Williams <dan.j.williams@intel.com>2016-04-22 13:59:54 -0400
commit298f2bc5db3851cf2e839a0025425256ef852139 (patch)
tree0a08fc850f39f7160ab9bfe049e5eb66078c2f13 /drivers/nvdimm/pmem.c
parentc3b46c73264b03000d1e18b22f5caf63332547c9 (diff)
libnvdimm, pmem: kill pmem->ndns
We can derive the common namespace from other information. We also do not need to cache it because all the usages are in slow paths. Reviewed-by: Johannes Thumshirn <jthumshirn@suse.de> Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Diffstat (limited to 'drivers/nvdimm/pmem.c')
-rw-r--r--drivers/nvdimm/pmem.c40
1 files changed, 22 insertions, 18 deletions
diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c
index f798899338ed..2b51d4d34207 100644
--- a/drivers/nvdimm/pmem.c
+++ b/drivers/nvdimm/pmem.c
@@ -35,7 +35,6 @@
35struct pmem_device { 35struct pmem_device {
36 struct request_queue *pmem_queue; 36 struct request_queue *pmem_queue;
37 struct gendisk *pmem_disk; 37 struct gendisk *pmem_disk;
38 struct nd_namespace_common *ndns;
39 38
40 /* One contiguous memory region per device */ 39 /* One contiguous memory region per device */
41 phys_addr_t phys_addr; 40 phys_addr_t phys_addr;
@@ -436,9 +435,8 @@ static int nd_pfn_init(struct nd_pfn *nd_pfn)
436 return -ENXIO; 435 return -ENXIO;
437} 436}
438 437
439static int nvdimm_namespace_detach_pfn(struct nd_namespace_common *ndns) 438static int nvdimm_namespace_detach_pfn(struct nd_pfn *nd_pfn)
440{ 439{
441 struct nd_pfn *nd_pfn = to_nd_pfn(ndns->claim);
442 struct pmem_device *pmem; 440 struct pmem_device *pmem;
443 441
444 /* free pmem disk */ 442 /* free pmem disk */
@@ -537,7 +535,7 @@ static int __nvdimm_namespace_attach_pfn(struct nd_pfn *nd_pfn)
537 535
538 return rc; 536 return rc;
539 err: 537 err:
540 nvdimm_namespace_detach_pfn(ndns); 538 nvdimm_namespace_detach_pfn(nd_pfn);
541 return rc; 539 return rc;
542 540
543} 541}
@@ -573,7 +571,6 @@ static int nd_pmem_probe(struct device *dev)
573 if (IS_ERR(pmem)) 571 if (IS_ERR(pmem))
574 return PTR_ERR(pmem); 572 return PTR_ERR(pmem);
575 573
576 pmem->ndns = ndns;
577 dev_set_drvdata(dev, pmem); 574 dev_set_drvdata(dev, pmem);
578 ndns->rw_bytes = pmem_rw_bytes; 575 ndns->rw_bytes = pmem_rw_bytes;
579 if (devm_init_badblocks(dev, &pmem->bb)) 576 if (devm_init_badblocks(dev, &pmem->bb))
@@ -607,9 +604,9 @@ static int nd_pmem_remove(struct device *dev)
607 struct pmem_device *pmem = dev_get_drvdata(dev); 604 struct pmem_device *pmem = dev_get_drvdata(dev);
608 605
609 if (is_nd_btt(dev)) 606 if (is_nd_btt(dev))
610 nvdimm_namespace_detach_btt(pmem->ndns); 607 nvdimm_namespace_detach_btt(to_nd_btt(dev));
611 else if (is_nd_pfn(dev)) 608 else if (is_nd_pfn(dev))
612 nvdimm_namespace_detach_pfn(pmem->ndns); 609 nvdimm_namespace_detach_pfn(to_nd_pfn(dev));
613 else 610 else
614 pmem_detach_disk(pmem); 611 pmem_detach_disk(pmem);
615 612
@@ -618,26 +615,33 @@ static int nd_pmem_remove(struct device *dev)
618 615
619static void nd_pmem_notify(struct device *dev, enum nvdimm_event event) 616static void nd_pmem_notify(struct device *dev, enum nvdimm_event event)
620{ 617{
621 struct pmem_device *pmem = dev_get_drvdata(dev);
622 struct nd_namespace_common *ndns = pmem->ndns;
623 struct nd_region *nd_region = to_nd_region(dev->parent); 618 struct nd_region *nd_region = to_nd_region(dev->parent);
624 struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev); 619 struct pmem_device *pmem = dev_get_drvdata(dev);
625 struct resource res = { 620 resource_size_t offset = 0, end_trunc = 0;
626 .start = nsio->res.start + pmem->data_offset, 621 struct nd_namespace_common *ndns;
627 .end = nsio->res.end, 622 struct nd_namespace_io *nsio;
628 }; 623 struct resource res;
629 624
630 if (event != NVDIMM_REVALIDATE_POISON) 625 if (event != NVDIMM_REVALIDATE_POISON)
631 return; 626 return;
632 627
633 if (is_nd_pfn(dev)) { 628 if (is_nd_btt(dev)) {
629 struct nd_btt *nd_btt = to_nd_btt(dev);
630
631 ndns = nd_btt->ndns;
632 } else if (is_nd_pfn(dev)) {
634 struct nd_pfn *nd_pfn = to_nd_pfn(dev); 633 struct nd_pfn *nd_pfn = to_nd_pfn(dev);
635 struct nd_pfn_sb *pfn_sb = nd_pfn->pfn_sb; 634 struct nd_pfn_sb *pfn_sb = nd_pfn->pfn_sb;
636 635
637 res.start += __le32_to_cpu(pfn_sb->start_pad); 636 ndns = nd_pfn->ndns;
638 res.end -= __le32_to_cpu(pfn_sb->end_trunc); 637 offset = pmem->data_offset + __le32_to_cpu(pfn_sb->start_pad);
639 } 638 end_trunc = __le32_to_cpu(pfn_sb->end_trunc);
639 } else
640 ndns = to_ndns(dev);
640 641
642 nsio = to_nd_namespace_io(&ndns->dev);
643 res.start = nsio->res.start + offset;
644 res.end = nsio->res.end - end_trunc;
641 nvdimm_badblocks_populate(nd_region, &pmem->bb, &res); 645 nvdimm_badblocks_populate(nd_region, &pmem->bb, &res);
642} 646}
643 647