aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDan Williams <dan.j.williams@intel.com>2016-06-07 19:38:04 -0400
committerDan Williams <dan.j.williams@intel.com>2016-07-11 18:09:26 -0400
commita8a6d2e04c4ffda055db70814c50bd106e44730f (patch)
tree708293a26c301df1473b78ab09b1d89849be37a7
parent29b9aa0aa3837c93ecd804dd3ada39b8cc75607d (diff)
libnvdimm, nfit: remove nfit_spa_map() infrastructure
Now that all shared mappings are handled by devm_nvdimm_memremap() we no longer need nfit_spa_map() nor do we need to trigger a callback to the bus provider at region disable time. Signed-off-by: Dan Williams <dan.j.williams@intel.com>
-rw-r--r--drivers/acpi/nfit.c146
-rw-r--r--drivers/acpi/nfit.h21
-rw-r--r--drivers/nvdimm/nd.h1
-rw-r--r--drivers/nvdimm/region_devs.c3
-rw-r--r--include/linux/libnvdimm.h1
5 files changed, 0 insertions, 172 deletions
diff --git a/drivers/acpi/nfit.c b/drivers/acpi/nfit.c
index b047dbe13bed..b76c95981547 100644
--- a/drivers/acpi/nfit.c
+++ b/drivers/acpi/nfit.c
@@ -1509,126 +1509,6 @@ static int acpi_nfit_blk_region_do_io(struct nd_blk_region *ndbr,
1509 return rc; 1509 return rc;
1510} 1510}
1511 1511
1512static void nfit_spa_mapping_release(struct kref *kref)
1513{
1514 struct nfit_spa_mapping *spa_map = to_spa_map(kref);
1515 struct acpi_nfit_system_address *spa = spa_map->spa;
1516 struct acpi_nfit_desc *acpi_desc = spa_map->acpi_desc;
1517
1518 WARN_ON(!mutex_is_locked(&acpi_desc->spa_map_mutex));
1519 dev_dbg(acpi_desc->dev, "%s: SPA%d\n", __func__, spa->range_index);
1520 if (spa_map->type == SPA_MAP_APERTURE)
1521 memunmap((void __force *)spa_map->addr.aperture);
1522 else
1523 iounmap(spa_map->addr.base);
1524 release_mem_region(spa->address, spa->length);
1525 list_del(&spa_map->list);
1526 kfree(spa_map);
1527}
1528
1529static struct nfit_spa_mapping *find_spa_mapping(
1530 struct acpi_nfit_desc *acpi_desc,
1531 struct acpi_nfit_system_address *spa)
1532{
1533 struct nfit_spa_mapping *spa_map;
1534
1535 WARN_ON(!mutex_is_locked(&acpi_desc->spa_map_mutex));
1536 list_for_each_entry(spa_map, &acpi_desc->spa_maps, list)
1537 if (spa_map->spa == spa)
1538 return spa_map;
1539
1540 return NULL;
1541}
1542
1543static void nfit_spa_unmap(struct acpi_nfit_desc *acpi_desc,
1544 struct acpi_nfit_system_address *spa)
1545{
1546 struct nfit_spa_mapping *spa_map;
1547
1548 mutex_lock(&acpi_desc->spa_map_mutex);
1549 spa_map = find_spa_mapping(acpi_desc, spa);
1550
1551 if (spa_map)
1552 kref_put(&spa_map->kref, nfit_spa_mapping_release);
1553 mutex_unlock(&acpi_desc->spa_map_mutex);
1554}
1555
1556static void __iomem *__nfit_spa_map(struct acpi_nfit_desc *acpi_desc,
1557 struct acpi_nfit_system_address *spa, enum spa_map_type type)
1558{
1559 resource_size_t start = spa->address;
1560 resource_size_t n = spa->length;
1561 struct nfit_spa_mapping *spa_map;
1562 struct resource *res;
1563
1564 WARN_ON(!mutex_is_locked(&acpi_desc->spa_map_mutex));
1565
1566 spa_map = find_spa_mapping(acpi_desc, spa);
1567 if (spa_map) {
1568 kref_get(&spa_map->kref);
1569 return spa_map->addr.base;
1570 }
1571
1572 spa_map = kzalloc(sizeof(*spa_map), GFP_KERNEL);
1573 if (!spa_map)
1574 return NULL;
1575
1576 INIT_LIST_HEAD(&spa_map->list);
1577 spa_map->spa = spa;
1578 kref_init(&spa_map->kref);
1579 spa_map->acpi_desc = acpi_desc;
1580
1581 res = request_mem_region(start, n, dev_name(acpi_desc->dev));
1582 if (!res)
1583 goto err_mem;
1584
1585 spa_map->type = type;
1586 if (type == SPA_MAP_APERTURE)
1587 spa_map->addr.aperture = (void __pmem *)memremap(start, n,
1588 ARCH_MEMREMAP_PMEM);
1589 else
1590 spa_map->addr.base = ioremap_nocache(start, n);
1591
1592
1593 if (!spa_map->addr.base)
1594 goto err_map;
1595
1596 list_add_tail(&spa_map->list, &acpi_desc->spa_maps);
1597 return spa_map->addr.base;
1598
1599 err_map:
1600 release_mem_region(start, n);
1601 err_mem:
1602 kfree(spa_map);
1603 return NULL;
1604}
1605
1606/**
1607 * nfit_spa_map - interleave-aware managed-mappings of acpi_nfit_system_address ranges
1608 * @nvdimm_bus: NFIT-bus that provided the spa table entry
1609 * @nfit_spa: spa table to map
1610 * @type: aperture or control region
1611 *
1612 * In the case where block-data-window apertures and
1613 * dimm-control-regions are interleaved they will end up sharing a
1614 * single request_mem_region() + ioremap() for the address range. In
1615 * the style of devm nfit_spa_map() mappings are automatically dropped
1616 * when all region devices referencing the same mapping are disabled /
1617 * unbound.
1618 */
1619static __maybe_unused void __iomem *nfit_spa_map(
1620 struct acpi_nfit_desc *acpi_desc,
1621 struct acpi_nfit_system_address *spa, enum spa_map_type type)
1622{
1623 void __iomem *iomem;
1624
1625 mutex_lock(&acpi_desc->spa_map_mutex);
1626 iomem = __nfit_spa_map(acpi_desc, spa, type);
1627 mutex_unlock(&acpi_desc->spa_map_mutex);
1628
1629 return iomem;
1630}
1631
1632static int nfit_blk_init_interleave(struct nfit_blk_mmio *mmio, 1512static int nfit_blk_init_interleave(struct nfit_blk_mmio *mmio,
1633 struct acpi_nfit_interleave *idt, u16 interleave_ways) 1513 struct acpi_nfit_interleave *idt, u16 interleave_ways)
1634{ 1514{
@@ -1773,29 +1653,6 @@ static int acpi_nfit_blk_region_enable(struct nvdimm_bus *nvdimm_bus,
1773 return 0; 1653 return 0;
1774} 1654}
1775 1655
1776static void acpi_nfit_blk_region_disable(struct nvdimm_bus *nvdimm_bus,
1777 struct device *dev)
1778{
1779 struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus);
1780 struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
1781 struct nd_blk_region *ndbr = to_nd_blk_region(dev);
1782 struct nfit_blk *nfit_blk = nd_blk_region_provider_data(ndbr);
1783 int i;
1784
1785 if (!nfit_blk)
1786 return; /* never enabled */
1787
1788 /* auto-free BLK spa mappings */
1789 for (i = 0; i < 2; i++) {
1790 struct nfit_blk_mmio *mmio = &nfit_blk->mmio[i];
1791
1792 if (mmio->addr.base)
1793 nfit_spa_unmap(acpi_desc, mmio->spa);
1794 }
1795 nd_blk_region_set_provider_data(ndbr, NULL);
1796 /* devm will free nfit_blk */
1797}
1798
1799static int ars_get_cap(struct acpi_nfit_desc *acpi_desc, 1656static int ars_get_cap(struct acpi_nfit_desc *acpi_desc,
1800 struct nd_cmd_ars_cap *cmd, struct nfit_spa *nfit_spa) 1657 struct nd_cmd_ars_cap *cmd, struct nfit_spa *nfit_spa)
1801{ 1658{
@@ -1969,7 +1826,6 @@ static int acpi_nfit_init_mapping(struct acpi_nfit_desc *acpi_desc,
1969 ndr_desc->num_mappings = blk_valid; 1826 ndr_desc->num_mappings = blk_valid;
1970 ndbr_desc = to_blk_region_desc(ndr_desc); 1827 ndbr_desc = to_blk_region_desc(ndr_desc);
1971 ndbr_desc->enable = acpi_nfit_blk_region_enable; 1828 ndbr_desc->enable = acpi_nfit_blk_region_enable;
1972 ndbr_desc->disable = acpi_nfit_blk_region_disable;
1973 ndbr_desc->do_io = acpi_desc->blk_do_io; 1829 ndbr_desc->do_io = acpi_desc->blk_do_io;
1974 nfit_spa->nd_region = nvdimm_blk_region_create(acpi_desc->nvdimm_bus, 1830 nfit_spa->nd_region = nvdimm_blk_region_create(acpi_desc->nvdimm_bus,
1975 ndr_desc); 1831 ndr_desc);
@@ -2509,7 +2365,6 @@ void acpi_nfit_desc_init(struct acpi_nfit_desc *acpi_desc, struct device *dev)
2509 nd_desc->clear_to_send = acpi_nfit_clear_to_send; 2365 nd_desc->clear_to_send = acpi_nfit_clear_to_send;
2510 nd_desc->attr_groups = acpi_nfit_attribute_groups; 2366 nd_desc->attr_groups = acpi_nfit_attribute_groups;
2511 2367
2512 INIT_LIST_HEAD(&acpi_desc->spa_maps);
2513 INIT_LIST_HEAD(&acpi_desc->spas); 2368 INIT_LIST_HEAD(&acpi_desc->spas);
2514 INIT_LIST_HEAD(&acpi_desc->dcrs); 2369 INIT_LIST_HEAD(&acpi_desc->dcrs);
2515 INIT_LIST_HEAD(&acpi_desc->bdws); 2370 INIT_LIST_HEAD(&acpi_desc->bdws);
@@ -2517,7 +2372,6 @@ void acpi_nfit_desc_init(struct acpi_nfit_desc *acpi_desc, struct device *dev)
2517 INIT_LIST_HEAD(&acpi_desc->flushes); 2372 INIT_LIST_HEAD(&acpi_desc->flushes);
2518 INIT_LIST_HEAD(&acpi_desc->memdevs); 2373 INIT_LIST_HEAD(&acpi_desc->memdevs);
2519 INIT_LIST_HEAD(&acpi_desc->dimms); 2374 INIT_LIST_HEAD(&acpi_desc->dimms);
2520 mutex_init(&acpi_desc->spa_map_mutex);
2521 mutex_init(&acpi_desc->init_mutex); 2375 mutex_init(&acpi_desc->init_mutex);
2522 INIT_WORK(&acpi_desc->work, acpi_nfit_scrub); 2376 INIT_WORK(&acpi_desc->work, acpi_nfit_scrub);
2523} 2377}
diff --git a/drivers/acpi/nfit.h b/drivers/acpi/nfit.h
index f06fa91c5abf..52078475d969 100644
--- a/drivers/acpi/nfit.h
+++ b/drivers/acpi/nfit.h
@@ -135,9 +135,7 @@ struct acpi_nfit_desc {
135 struct nvdimm_bus_descriptor nd_desc; 135 struct nvdimm_bus_descriptor nd_desc;
136 struct acpi_table_header acpi_header; 136 struct acpi_table_header acpi_header;
137 struct acpi_nfit_header *nfit; 137 struct acpi_nfit_header *nfit;
138 struct mutex spa_map_mutex;
139 struct mutex init_mutex; 138 struct mutex init_mutex;
140 struct list_head spa_maps;
141 struct list_head memdevs; 139 struct list_head memdevs;
142 struct list_head flushes; 140 struct list_head flushes;
143 struct list_head dimms; 141 struct list_head dimms;
@@ -188,25 +186,6 @@ struct nfit_blk {
188 u32 dimm_flags; 186 u32 dimm_flags;
189}; 187};
190 188
191enum spa_map_type {
192 SPA_MAP_CONTROL,
193 SPA_MAP_APERTURE,
194};
195
196struct nfit_spa_mapping {
197 struct acpi_nfit_desc *acpi_desc;
198 struct acpi_nfit_system_address *spa;
199 struct list_head list;
200 struct kref kref;
201 enum spa_map_type type;
202 struct nd_blk_addr addr;
203};
204
205static inline struct nfit_spa_mapping *to_spa_map(struct kref *kref)
206{
207 return container_of(kref, struct nfit_spa_mapping, kref);
208}
209
210static inline struct acpi_nfit_memory_map *__to_nfit_memdev( 189static inline struct acpi_nfit_memory_map *__to_nfit_memdev(
211 struct nfit_mem *nfit_mem) 190 struct nfit_mem *nfit_mem)
212{ 191{
diff --git a/drivers/nvdimm/nd.h b/drivers/nvdimm/nd.h
index d0ac93c31dda..2819e886dfd2 100644
--- a/drivers/nvdimm/nd.h
+++ b/drivers/nvdimm/nd.h
@@ -119,7 +119,6 @@ struct nd_region {
119 119
120struct nd_blk_region { 120struct nd_blk_region {
121 int (*enable)(struct nvdimm_bus *nvdimm_bus, struct device *dev); 121 int (*enable)(struct nvdimm_bus *nvdimm_bus, struct device *dev);
122 void (*disable)(struct nvdimm_bus *nvdimm_bus, struct device *dev);
123 int (*do_io)(struct nd_blk_region *ndbr, resource_size_t dpa, 122 int (*do_io)(struct nd_blk_region *ndbr, resource_size_t dpa,
124 void *iobuf, u64 len, int rw); 123 void *iobuf, u64 len, int rw);
125 void *blk_provider_data; 124 void *blk_provider_data;
diff --git a/drivers/nvdimm/region_devs.c b/drivers/nvdimm/region_devs.c
index 40fcfea26fbb..694b21024871 100644
--- a/drivers/nvdimm/region_devs.c
+++ b/drivers/nvdimm/region_devs.c
@@ -433,8 +433,6 @@ static void nd_region_notify_driver_action(struct nvdimm_bus *nvdimm_bus,
433 433
434 if (is_nd_pmem(dev)) 434 if (is_nd_pmem(dev))
435 return; 435 return;
436
437 to_nd_blk_region(dev)->disable(nvdimm_bus, dev);
438 } 436 }
439 if (dev->parent && is_nd_blk(dev->parent) && probe) { 437 if (dev->parent && is_nd_blk(dev->parent) && probe) {
440 nd_region = to_nd_region(dev->parent); 438 nd_region = to_nd_region(dev->parent);
@@ -698,7 +696,6 @@ static struct nd_region *nd_region_create(struct nvdimm_bus *nvdimm_bus,
698 if (ndbr) { 696 if (ndbr) {
699 nd_region = &ndbr->nd_region; 697 nd_region = &ndbr->nd_region;
700 ndbr->enable = ndbr_desc->enable; 698 ndbr->enable = ndbr_desc->enable;
701 ndbr->disable = ndbr_desc->disable;
702 ndbr->do_io = ndbr_desc->do_io; 699 ndbr->do_io = ndbr_desc->do_io;
703 } 700 }
704 region_buf = ndbr; 701 region_buf = ndbr;
diff --git a/include/linux/libnvdimm.h b/include/linux/libnvdimm.h
index 18c3cc48a970..1050f9aa3a3e 100644
--- a/include/linux/libnvdimm.h
+++ b/include/linux/libnvdimm.h
@@ -114,7 +114,6 @@ struct device;
114struct nd_blk_region; 114struct nd_blk_region;
115struct nd_blk_region_desc { 115struct nd_blk_region_desc {
116 int (*enable)(struct nvdimm_bus *nvdimm_bus, struct device *dev); 116 int (*enable)(struct nvdimm_bus *nvdimm_bus, struct device *dev);
117 void (*disable)(struct nvdimm_bus *nvdimm_bus, struct device *dev);
118 int (*do_io)(struct nd_blk_region *ndbr, resource_size_t dpa, 117 int (*do_io)(struct nd_blk_region *ndbr, resource_size_t dpa,
119 void *iobuf, u64 len, int rw); 118 void *iobuf, u64 len, int rw);
120 struct nd_region_desc ndr_desc; 119 struct nd_region_desc ndr_desc;