aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/dax/pmem.c20
-rw-r--r--drivers/nvdimm/nd.h9
-rw-r--r--drivers/nvdimm/pfn_devs.c27
-rw-r--r--drivers/nvdimm/pmem.c37
-rw-r--r--drivers/nvdimm/pmem.h1
-rw-r--r--include/linux/memremap.h6
-rw-r--r--kernel/memremap.c51
-rw-r--r--tools/testing/nvdimm/test/iomap.c7
8 files changed, 77 insertions, 81 deletions
diff --git a/drivers/dax/pmem.c b/drivers/dax/pmem.c
index 8d8c852ba8f2..31b6ecce4c64 100644
--- a/drivers/dax/pmem.c
+++ b/drivers/dax/pmem.c
@@ -21,6 +21,7 @@
21struct dax_pmem { 21struct dax_pmem {
22 struct device *dev; 22 struct device *dev;
23 struct percpu_ref ref; 23 struct percpu_ref ref;
24 struct dev_pagemap pgmap;
24 struct completion cmp; 25 struct completion cmp;
25}; 26};
26 27
@@ -69,20 +70,23 @@ static int dax_pmem_probe(struct device *dev)
69 struct nd_namespace_common *ndns; 70 struct nd_namespace_common *ndns;
70 struct nd_dax *nd_dax = to_nd_dax(dev); 71 struct nd_dax *nd_dax = to_nd_dax(dev);
71 struct nd_pfn *nd_pfn = &nd_dax->nd_pfn; 72 struct nd_pfn *nd_pfn = &nd_dax->nd_pfn;
72 struct vmem_altmap __altmap, *altmap = NULL;
73 73
74 ndns = nvdimm_namespace_common_probe(dev); 74 ndns = nvdimm_namespace_common_probe(dev);
75 if (IS_ERR(ndns)) 75 if (IS_ERR(ndns))
76 return PTR_ERR(ndns); 76 return PTR_ERR(ndns);
77 nsio = to_nd_namespace_io(&ndns->dev); 77 nsio = to_nd_namespace_io(&ndns->dev);
78 78
79 dax_pmem = devm_kzalloc(dev, sizeof(*dax_pmem), GFP_KERNEL);
80 if (!dax_pmem)
81 return -ENOMEM;
82
79 /* parse the 'pfn' info block via ->rw_bytes */ 83 /* parse the 'pfn' info block via ->rw_bytes */
80 rc = devm_nsio_enable(dev, nsio); 84 rc = devm_nsio_enable(dev, nsio);
81 if (rc) 85 if (rc)
82 return rc; 86 return rc;
83 altmap = nvdimm_setup_pfn(nd_pfn, &res, &__altmap); 87 rc = nvdimm_setup_pfn(nd_pfn, &dax_pmem->pgmap);
84 if (IS_ERR(altmap)) 88 if (rc)
85 return PTR_ERR(altmap); 89 return rc;
86 devm_nsio_disable(dev, nsio); 90 devm_nsio_disable(dev, nsio);
87 91
88 pfn_sb = nd_pfn->pfn_sb; 92 pfn_sb = nd_pfn->pfn_sb;
@@ -94,10 +98,6 @@ static int dax_pmem_probe(struct device *dev)
94 return -EBUSY; 98 return -EBUSY;
95 } 99 }
96 100
97 dax_pmem = devm_kzalloc(dev, sizeof(*dax_pmem), GFP_KERNEL);
98 if (!dax_pmem)
99 return -ENOMEM;
100
101 dax_pmem->dev = dev; 101 dax_pmem->dev = dev;
102 init_completion(&dax_pmem->cmp); 102 init_completion(&dax_pmem->cmp);
103 rc = percpu_ref_init(&dax_pmem->ref, dax_pmem_percpu_release, 0, 103 rc = percpu_ref_init(&dax_pmem->ref, dax_pmem_percpu_release, 0,
@@ -110,7 +110,8 @@ static int dax_pmem_probe(struct device *dev)
110 if (rc) 110 if (rc)
111 return rc; 111 return rc;
112 112
113 addr = devm_memremap_pages(dev, &res, &dax_pmem->ref, altmap); 113 dax_pmem->pgmap.ref = &dax_pmem->ref;
114 addr = devm_memremap_pages(dev, &dax_pmem->pgmap);
114 if (IS_ERR(addr)) 115 if (IS_ERR(addr))
115 return PTR_ERR(addr); 116 return PTR_ERR(addr);
116 117
@@ -120,6 +121,7 @@ static int dax_pmem_probe(struct device *dev)
120 return rc; 121 return rc;
121 122
122 /* adjust the dax_region resource to the start of data */ 123 /* adjust the dax_region resource to the start of data */
124 memcpy(&res, &dax_pmem->pgmap.res, sizeof(res));
123 res.start += le64_to_cpu(pfn_sb->dataoff); 125 res.start += le64_to_cpu(pfn_sb->dataoff);
124 126
125 rc = sscanf(dev_name(&ndns->dev), "namespace%d.%d", &region_id, &id); 127 rc = sscanf(dev_name(&ndns->dev), "namespace%d.%d", &region_id, &id);
diff --git a/drivers/nvdimm/nd.h b/drivers/nvdimm/nd.h
index e958f3724c41..8d6375ee0fda 100644
--- a/drivers/nvdimm/nd.h
+++ b/drivers/nvdimm/nd.h
@@ -368,15 +368,14 @@ unsigned int pmem_sector_size(struct nd_namespace_common *ndns);
368void nvdimm_badblocks_populate(struct nd_region *nd_region, 368void nvdimm_badblocks_populate(struct nd_region *nd_region,
369 struct badblocks *bb, const struct resource *res); 369 struct badblocks *bb, const struct resource *res);
370#if IS_ENABLED(CONFIG_ND_CLAIM) 370#if IS_ENABLED(CONFIG_ND_CLAIM)
371struct vmem_altmap *nvdimm_setup_pfn(struct nd_pfn *nd_pfn, 371int nvdimm_setup_pfn(struct nd_pfn *nd_pfn, struct dev_pagemap *pgmap);
372 struct resource *res, struct vmem_altmap *altmap);
373int devm_nsio_enable(struct device *dev, struct nd_namespace_io *nsio); 372int devm_nsio_enable(struct device *dev, struct nd_namespace_io *nsio);
374void devm_nsio_disable(struct device *dev, struct nd_namespace_io *nsio); 373void devm_nsio_disable(struct device *dev, struct nd_namespace_io *nsio);
375#else 374#else
376static inline struct vmem_altmap *nvdimm_setup_pfn(struct nd_pfn *nd_pfn, 375static inline int nvdimm_setup_pfn(struct nd_pfn *nd_pfn,
377 struct resource *res, struct vmem_altmap *altmap) 376 struct dev_pagemap *pgmap)
378{ 377{
379 return ERR_PTR(-ENXIO); 378 return -ENXIO;
380} 379}
381static inline int devm_nsio_enable(struct device *dev, 380static inline int devm_nsio_enable(struct device *dev,
382 struct nd_namespace_io *nsio) 381 struct nd_namespace_io *nsio)
diff --git a/drivers/nvdimm/pfn_devs.c b/drivers/nvdimm/pfn_devs.c
index 2adada1a5855..f5c4e8c6e29d 100644
--- a/drivers/nvdimm/pfn_devs.c
+++ b/drivers/nvdimm/pfn_devs.c
@@ -542,9 +542,10 @@ static unsigned long init_altmap_reserve(resource_size_t base)
542 return reserve; 542 return reserve;
543} 543}
544 544
545static struct vmem_altmap *__nvdimm_setup_pfn(struct nd_pfn *nd_pfn, 545static int __nvdimm_setup_pfn(struct nd_pfn *nd_pfn, struct dev_pagemap *pgmap)
546 struct resource *res, struct vmem_altmap *altmap)
547{ 546{
547 struct resource *res = &pgmap->res;
548 struct vmem_altmap *altmap = &pgmap->altmap;
548 struct nd_pfn_sb *pfn_sb = nd_pfn->pfn_sb; 549 struct nd_pfn_sb *pfn_sb = nd_pfn->pfn_sb;
549 u64 offset = le64_to_cpu(pfn_sb->dataoff); 550 u64 offset = le64_to_cpu(pfn_sb->dataoff);
550 u32 start_pad = __le32_to_cpu(pfn_sb->start_pad); 551 u32 start_pad = __le32_to_cpu(pfn_sb->start_pad);
@@ -561,11 +562,13 @@ static struct vmem_altmap *__nvdimm_setup_pfn(struct nd_pfn *nd_pfn,
561 res->start += start_pad; 562 res->start += start_pad;
562 res->end -= end_trunc; 563 res->end -= end_trunc;
563 564
565 pgmap->type = MEMORY_DEVICE_HOST;
566
564 if (nd_pfn->mode == PFN_MODE_RAM) { 567 if (nd_pfn->mode == PFN_MODE_RAM) {
565 if (offset < SZ_8K) 568 if (offset < SZ_8K)
566 return ERR_PTR(-EINVAL); 569 return -EINVAL;
567 nd_pfn->npfns = le64_to_cpu(pfn_sb->npfns); 570 nd_pfn->npfns = le64_to_cpu(pfn_sb->npfns);
568 altmap = NULL; 571 pgmap->altmap_valid = false;
569 } else if (nd_pfn->mode == PFN_MODE_PMEM) { 572 } else if (nd_pfn->mode == PFN_MODE_PMEM) {
570 nd_pfn->npfns = PFN_SECTION_ALIGN_UP((resource_size(res) 573 nd_pfn->npfns = PFN_SECTION_ALIGN_UP((resource_size(res)
571 - offset) / PAGE_SIZE); 574 - offset) / PAGE_SIZE);
@@ -577,10 +580,11 @@ static struct vmem_altmap *__nvdimm_setup_pfn(struct nd_pfn *nd_pfn,
577 memcpy(altmap, &__altmap, sizeof(*altmap)); 580 memcpy(altmap, &__altmap, sizeof(*altmap));
578 altmap->free = PHYS_PFN(offset - SZ_8K); 581 altmap->free = PHYS_PFN(offset - SZ_8K);
579 altmap->alloc = 0; 582 altmap->alloc = 0;
583 pgmap->altmap_valid = true;
580 } else 584 } else
581 return ERR_PTR(-ENXIO); 585 return -ENXIO;
582 586
583 return altmap; 587 return 0;
584} 588}
585 589
586static u64 phys_pmem_align_down(struct nd_pfn *nd_pfn, u64 phys) 590static u64 phys_pmem_align_down(struct nd_pfn *nd_pfn, u64 phys)
@@ -708,19 +712,18 @@ static int nd_pfn_init(struct nd_pfn *nd_pfn)
708 * Determine the effective resource range and vmem_altmap from an nd_pfn 712 * Determine the effective resource range and vmem_altmap from an nd_pfn
709 * instance. 713 * instance.
710 */ 714 */
711struct vmem_altmap *nvdimm_setup_pfn(struct nd_pfn *nd_pfn, 715int nvdimm_setup_pfn(struct nd_pfn *nd_pfn, struct dev_pagemap *pgmap)
712 struct resource *res, struct vmem_altmap *altmap)
713{ 716{
714 int rc; 717 int rc;
715 718
716 if (!nd_pfn->uuid || !nd_pfn->ndns) 719 if (!nd_pfn->uuid || !nd_pfn->ndns)
717 return ERR_PTR(-ENODEV); 720 return -ENODEV;
718 721
719 rc = nd_pfn_init(nd_pfn); 722 rc = nd_pfn_init(nd_pfn);
720 if (rc) 723 if (rc)
721 return ERR_PTR(rc); 724 return rc;
722 725
723 /* we need a valid pfn_sb before we can init a vmem_altmap */ 726 /* we need a valid pfn_sb before we can init a dev_pagemap */
724 return __nvdimm_setup_pfn(nd_pfn, res, altmap); 727 return __nvdimm_setup_pfn(nd_pfn, pgmap);
725} 728}
726EXPORT_SYMBOL_GPL(nvdimm_setup_pfn); 729EXPORT_SYMBOL_GPL(nvdimm_setup_pfn);
diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c
index 7fbc5c5dc8e1..cf074b1ce219 100644
--- a/drivers/nvdimm/pmem.c
+++ b/drivers/nvdimm/pmem.c
@@ -298,34 +298,34 @@ static int pmem_attach_disk(struct device *dev,
298{ 298{
299 struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev); 299 struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev);
300 struct nd_region *nd_region = to_nd_region(dev->parent); 300 struct nd_region *nd_region = to_nd_region(dev->parent);
301 struct vmem_altmap __altmap, *altmap = NULL;
302 int nid = dev_to_node(dev), fua, wbc; 301 int nid = dev_to_node(dev), fua, wbc;
303 struct resource *res = &nsio->res; 302 struct resource *res = &nsio->res;
303 struct resource bb_res;
304 struct nd_pfn *nd_pfn = NULL; 304 struct nd_pfn *nd_pfn = NULL;
305 struct dax_device *dax_dev; 305 struct dax_device *dax_dev;
306 struct nd_pfn_sb *pfn_sb; 306 struct nd_pfn_sb *pfn_sb;
307 struct pmem_device *pmem; 307 struct pmem_device *pmem;
308 struct resource pfn_res;
309 struct request_queue *q; 308 struct request_queue *q;
310 struct device *gendev; 309 struct device *gendev;
311 struct gendisk *disk; 310 struct gendisk *disk;
312 void *addr; 311 void *addr;
312 int rc;
313
314 pmem = devm_kzalloc(dev, sizeof(*pmem), GFP_KERNEL);
315 if (!pmem)
316 return -ENOMEM;
313 317
314 /* while nsio_rw_bytes is active, parse a pfn info block if present */ 318 /* while nsio_rw_bytes is active, parse a pfn info block if present */
315 if (is_nd_pfn(dev)) { 319 if (is_nd_pfn(dev)) {
316 nd_pfn = to_nd_pfn(dev); 320 nd_pfn = to_nd_pfn(dev);
317 altmap = nvdimm_setup_pfn(nd_pfn, &pfn_res, &__altmap); 321 rc = nvdimm_setup_pfn(nd_pfn, &pmem->pgmap);
318 if (IS_ERR(altmap)) 322 if (rc)
319 return PTR_ERR(altmap); 323 return rc;
320 } 324 }
321 325
322 /* we're attaching a block device, disable raw namespace access */ 326 /* we're attaching a block device, disable raw namespace access */
323 devm_nsio_disable(dev, nsio); 327 devm_nsio_disable(dev, nsio);
324 328
325 pmem = devm_kzalloc(dev, sizeof(*pmem), GFP_KERNEL);
326 if (!pmem)
327 return -ENOMEM;
328
329 dev_set_drvdata(dev, pmem); 329 dev_set_drvdata(dev, pmem);
330 pmem->phys_addr = res->start; 330 pmem->phys_addr = res->start;
331 pmem->size = resource_size(res); 331 pmem->size = resource_size(res);
@@ -350,19 +350,22 @@ static int pmem_attach_disk(struct device *dev,
350 return -ENOMEM; 350 return -ENOMEM;
351 351
352 pmem->pfn_flags = PFN_DEV; 352 pmem->pfn_flags = PFN_DEV;
353 pmem->pgmap.ref = &q->q_usage_counter;
353 if (is_nd_pfn(dev)) { 354 if (is_nd_pfn(dev)) {
354 addr = devm_memremap_pages(dev, &pfn_res, &q->q_usage_counter, 355 addr = devm_memremap_pages(dev, &pmem->pgmap);
355 altmap);
356 pfn_sb = nd_pfn->pfn_sb; 356 pfn_sb = nd_pfn->pfn_sb;
357 pmem->data_offset = le64_to_cpu(pfn_sb->dataoff); 357 pmem->data_offset = le64_to_cpu(pfn_sb->dataoff);
358 pmem->pfn_pad = resource_size(res) - resource_size(&pfn_res); 358 pmem->pfn_pad = resource_size(res) -
359 resource_size(&pmem->pgmap.res);
359 pmem->pfn_flags |= PFN_MAP; 360 pmem->pfn_flags |= PFN_MAP;
360 res = &pfn_res; /* for badblocks populate */ 361 memcpy(&bb_res, &pmem->pgmap.res, sizeof(bb_res));
361 res->start += pmem->data_offset; 362 bb_res.start += pmem->data_offset;
362 } else if (pmem_should_map_pages(dev)) { 363 } else if (pmem_should_map_pages(dev)) {
363 addr = devm_memremap_pages(dev, &nsio->res, 364 memcpy(&pmem->pgmap.res, &nsio->res, sizeof(pmem->pgmap.res));
364 &q->q_usage_counter, NULL); 365 pmem->pgmap.altmap_valid = false;
366 addr = devm_memremap_pages(dev, &pmem->pgmap);
365 pmem->pfn_flags |= PFN_MAP; 367 pmem->pfn_flags |= PFN_MAP;
368 memcpy(&bb_res, &pmem->pgmap.res, sizeof(bb_res));
366 } else 369 } else
367 addr = devm_memremap(dev, pmem->phys_addr, 370 addr = devm_memremap(dev, pmem->phys_addr,
368 pmem->size, ARCH_MEMREMAP_PMEM); 371 pmem->size, ARCH_MEMREMAP_PMEM);
@@ -401,7 +404,7 @@ static int pmem_attach_disk(struct device *dev,
401 / 512); 404 / 512);
402 if (devm_init_badblocks(dev, &pmem->bb)) 405 if (devm_init_badblocks(dev, &pmem->bb))
403 return -ENOMEM; 406 return -ENOMEM;
404 nvdimm_badblocks_populate(nd_region, &pmem->bb, res); 407 nvdimm_badblocks_populate(nd_region, &pmem->bb, &bb_res);
405 disk->bb = &pmem->bb; 408 disk->bb = &pmem->bb;
406 409
407 dax_dev = alloc_dax(pmem, disk->disk_name, &pmem_dax_ops); 410 dax_dev = alloc_dax(pmem, disk->disk_name, &pmem_dax_ops);
diff --git a/drivers/nvdimm/pmem.h b/drivers/nvdimm/pmem.h
index 6a3cd2a10db6..a64ebc78b5df 100644
--- a/drivers/nvdimm/pmem.h
+++ b/drivers/nvdimm/pmem.h
@@ -22,6 +22,7 @@ struct pmem_device {
22 struct badblocks bb; 22 struct badblocks bb;
23 struct dax_device *dax_dev; 23 struct dax_device *dax_dev;
24 struct gendisk *disk; 24 struct gendisk *disk;
25 struct dev_pagemap pgmap;
25}; 26};
26 27
27long __pmem_direct_access(struct pmem_device *pmem, pgoff_t pgoff, 28long __pmem_direct_access(struct pmem_device *pmem, pgoff_t pgoff,
diff --git a/include/linux/memremap.h b/include/linux/memremap.h
index 1cb5f39d25c1..7b4899c06f49 100644
--- a/include/linux/memremap.h
+++ b/include/linux/memremap.h
@@ -123,8 +123,7 @@ struct dev_pagemap {
123}; 123};
124 124
125#ifdef CONFIG_ZONE_DEVICE 125#ifdef CONFIG_ZONE_DEVICE
126void *devm_memremap_pages(struct device *dev, struct resource *res, 126void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap);
127 struct percpu_ref *ref, struct vmem_altmap *altmap);
128struct dev_pagemap *get_dev_pagemap(unsigned long pfn, 127struct dev_pagemap *get_dev_pagemap(unsigned long pfn,
129 struct dev_pagemap *pgmap); 128 struct dev_pagemap *pgmap);
130 129
@@ -134,8 +133,7 @@ void vmem_altmap_free(struct vmem_altmap *altmap, unsigned long nr_pfns);
134static inline bool is_zone_device_page(const struct page *page); 133static inline bool is_zone_device_page(const struct page *page);
135#else 134#else
136static inline void *devm_memremap_pages(struct device *dev, 135static inline void *devm_memremap_pages(struct device *dev,
137 struct resource *res, struct percpu_ref *ref, 136 struct dev_pagemap *pgmap)
138 struct vmem_altmap *altmap)
139{ 137{
140 /* 138 /*
141 * Fail attempts to call devm_memremap_pages() without 139 * Fail attempts to call devm_memremap_pages() without
diff --git a/kernel/memremap.c b/kernel/memremap.c
index 9207c44cce20..a9a948cd3d7f 100644
--- a/kernel/memremap.c
+++ b/kernel/memremap.c
@@ -275,9 +275,10 @@ static unsigned long pfn_end(struct dev_pagemap *pgmap)
275#define for_each_device_pfn(pfn, map) \ 275#define for_each_device_pfn(pfn, map) \
276 for (pfn = pfn_first(map); pfn < pfn_end(map); pfn++) 276 for (pfn = pfn_first(map); pfn < pfn_end(map); pfn++)
277 277
278static void devm_memremap_pages_release(struct device *dev, void *data) 278static void devm_memremap_pages_release(void *data)
279{ 279{
280 struct dev_pagemap *pgmap = data; 280 struct dev_pagemap *pgmap = data;
281 struct device *dev = pgmap->dev;
281 struct resource *res = &pgmap->res; 282 struct resource *res = &pgmap->res;
282 resource_size_t align_start, align_size; 283 resource_size_t align_start, align_size;
283 unsigned long pfn; 284 unsigned long pfn;
@@ -316,29 +317,34 @@ static struct dev_pagemap *find_dev_pagemap(resource_size_t phys)
316/** 317/**
317 * devm_memremap_pages - remap and provide memmap backing for the given resource 318 * devm_memremap_pages - remap and provide memmap backing for the given resource
318 * @dev: hosting device for @res 319 * @dev: hosting device for @res
319 * @res: "host memory" address range 320 * @pgmap: pointer to a struct dev_pgmap
320 * @ref: a live per-cpu reference count
321 * @altmap: optional descriptor for allocating the memmap from @res
322 * 321 *
323 * Notes: 322 * Notes:
324 * 1/ @ref must be 'live' on entry and 'dead' before devm_memunmap_pages() time 323 * 1/ At a minimum the res, ref and type members of @pgmap must be initialized
325 * (or devm release event). The expected order of events is that @ref has 324 * by the caller before passing it to this function
325 *
326 * 2/ The altmap field may optionally be initialized, in which case altmap_valid
327 * must be set to true
328 *
329 * 3/ pgmap.ref must be 'live' on entry and 'dead' before devm_memunmap_pages()
330 * time (or devm release event). The expected order of events is that ref has
326 * been through percpu_ref_kill() before devm_memremap_pages_release(). The 331 * been through percpu_ref_kill() before devm_memremap_pages_release(). The
327 * wait for the completion of all references being dropped and 332 * wait for the completion of all references being dropped and
328 * percpu_ref_exit() must occur after devm_memremap_pages_release(). 333 * percpu_ref_exit() must occur after devm_memremap_pages_release().
329 * 334 *
330 * 2/ @res is expected to be a host memory range that could feasibly be 335 * 4/ res is expected to be a host memory range that could feasibly be
331 * treated as a "System RAM" range, i.e. not a device mmio range, but 336 * treated as a "System RAM" range, i.e. not a device mmio range, but
332 * this is not enforced. 337 * this is not enforced.
333 */ 338 */
334void *devm_memremap_pages(struct device *dev, struct resource *res, 339void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap)
335 struct percpu_ref *ref, struct vmem_altmap *altmap)
336{ 340{
337 resource_size_t align_start, align_size, align_end; 341 resource_size_t align_start, align_size, align_end;
342 struct vmem_altmap *altmap = pgmap->altmap_valid ?
343 &pgmap->altmap : NULL;
338 unsigned long pfn, pgoff, order; 344 unsigned long pfn, pgoff, order;
339 pgprot_t pgprot = PAGE_KERNEL; 345 pgprot_t pgprot = PAGE_KERNEL;
340 struct dev_pagemap *pgmap;
341 int error, nid, is_ram, i = 0; 346 int error, nid, is_ram, i = 0;
347 struct resource *res = &pgmap->res;
342 348
343 align_start = res->start & ~(SECTION_SIZE - 1); 349 align_start = res->start & ~(SECTION_SIZE - 1);
344 align_size = ALIGN(res->start + resource_size(res), SECTION_SIZE) 350 align_size = ALIGN(res->start + resource_size(res), SECTION_SIZE)
@@ -355,27 +361,10 @@ void *devm_memremap_pages(struct device *dev, struct resource *res,
355 if (is_ram == REGION_INTERSECTS) 361 if (is_ram == REGION_INTERSECTS)
356 return __va(res->start); 362 return __va(res->start);
357 363
358 if (!ref) 364 if (!pgmap->ref)
359 return ERR_PTR(-EINVAL); 365 return ERR_PTR(-EINVAL);
360 366
361 pgmap = devres_alloc_node(devm_memremap_pages_release,
362 sizeof(*pgmap), GFP_KERNEL, dev_to_node(dev));
363 if (!pgmap)
364 return ERR_PTR(-ENOMEM);
365
366 memcpy(&pgmap->res, res, sizeof(*res));
367
368 pgmap->dev = dev; 367 pgmap->dev = dev;
369 if (altmap) {
370 memcpy(&pgmap->altmap, altmap, sizeof(*altmap));
371 pgmap->altmap_valid = true;
372 altmap = &pgmap->altmap;
373 }
374 pgmap->ref = ref;
375 pgmap->type = MEMORY_DEVICE_HOST;
376 pgmap->page_fault = NULL;
377 pgmap->page_free = NULL;
378 pgmap->data = NULL;
379 368
380 mutex_lock(&pgmap_lock); 369 mutex_lock(&pgmap_lock);
381 error = 0; 370 error = 0;
@@ -423,11 +412,13 @@ void *devm_memremap_pages(struct device *dev, struct resource *res,
423 */ 412 */
424 list_del(&page->lru); 413 list_del(&page->lru);
425 page->pgmap = pgmap; 414 page->pgmap = pgmap;
426 percpu_ref_get(ref); 415 percpu_ref_get(pgmap->ref);
427 if (!(++i % 1024)) 416 if (!(++i % 1024))
428 cond_resched(); 417 cond_resched();
429 } 418 }
430 devres_add(dev, pgmap); 419
420 devm_add_action(dev, devm_memremap_pages_release, pgmap);
421
431 return __va(res->start); 422 return __va(res->start);
432 423
433 err_add_memory: 424 err_add_memory:
diff --git a/tools/testing/nvdimm/test/iomap.c b/tools/testing/nvdimm/test/iomap.c
index e1f75a1914a1..ff9d3a5825e1 100644
--- a/tools/testing/nvdimm/test/iomap.c
+++ b/tools/testing/nvdimm/test/iomap.c
@@ -104,15 +104,14 @@ void *__wrap_devm_memremap(struct device *dev, resource_size_t offset,
104} 104}
105EXPORT_SYMBOL(__wrap_devm_memremap); 105EXPORT_SYMBOL(__wrap_devm_memremap);
106 106
107void *__wrap_devm_memremap_pages(struct device *dev, struct resource *res, 107void *__wrap_devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap)
108 struct percpu_ref *ref, struct vmem_altmap *altmap)
109{ 108{
110 resource_size_t offset = res->start; 109 resource_size_t offset = pgmap->res.start;
111 struct nfit_test_resource *nfit_res = get_nfit_res(offset); 110 struct nfit_test_resource *nfit_res = get_nfit_res(offset);
112 111
113 if (nfit_res) 112 if (nfit_res)
114 return nfit_res->buf + offset - nfit_res->res.start; 113 return nfit_res->buf + offset - nfit_res->res.start;
115 return devm_memremap_pages(dev, res, ref, altmap); 114 return devm_memremap_pages(dev, pgmap);
116} 115}
117EXPORT_SYMBOL(__wrap_devm_memremap_pages); 116EXPORT_SYMBOL(__wrap_devm_memremap_pages);
118 117