aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2016-02-01 18:21:20 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2016-02-01 18:21:20 -0500
commit29a8ea4fbe6beda81300835a739740c35c7abcab (patch)
tree06689d2880150badb1c234c73e8d7517dabe1ade
parent36f90b0a2ddd60823fe193a85e60ff1906c2a9b3 (diff)
parent76e9f0ee52b0be5761e29847e0ef01f23f24f1df (diff)
Merge branch 'libnvdimm-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/nvdimm/nvdimm
Pull libnvdimm fixes from Dan Williams: "1/ Fixes to the libnvdimm 'pfn' device that establishes a reserved area for storing a struct page array. 2/ Fixes for dax operations on a raw block device to prevent pagecache collisions with dax mappings. 3/ A fix for pfn_t usage in vm_insert_mixed that lead to a null pointer de-reference. These have received build success notification from the kbuild robot across 153 configs and pass the latest ndctl tests" * 'libnvdimm-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/nvdimm/nvdimm: phys_to_pfn_t: use phys_addr_t mm: fix pfn_t to page conversion in vm_insert_mixed block: use DAX for partition table reads block: revert runtime dax control of the raw block device fs, block: force direct-I/O for dax-enabled block devices devm_memremap_pages: fix vmem_altmap lifetime + alignment handling libnvdimm, pfn: fix restoring memmap location libnvdimm: fix mode determination for e820 devices
-rw-r--r--block/ioctl.c38
-rw-r--r--block/partition-generic.c18
-rw-r--r--drivers/nvdimm/namespace_devs.c8
-rw-r--r--drivers/nvdimm/pfn_devs.c4
-rw-r--r--fs/block_dev.c28
-rw-r--r--fs/dax.c20
-rw-r--r--include/linux/dax.h11
-rw-r--r--include/linux/fs.h5
-rw-r--r--include/linux/pfn_t.h4
-rw-r--r--include/uapi/linux/fs.h1
-rw-r--r--kernel/memremap.c20
-rw-r--r--mm/memory.c9
-rw-r--r--tools/testing/nvdimm/test/iomap.c2
13 files changed, 75 insertions, 93 deletions
diff --git a/block/ioctl.c b/block/ioctl.c
index 77f5d17779d6..d8996bbd7f12 100644
--- a/block/ioctl.c
+++ b/block/ioctl.c
@@ -434,42 +434,6 @@ bool blkdev_dax_capable(struct block_device *bdev)
434 434
435 return true; 435 return true;
436} 436}
437
438static int blkdev_daxset(struct block_device *bdev, unsigned long argp)
439{
440 unsigned long arg;
441 int rc = 0;
442
443 if (!capable(CAP_SYS_ADMIN))
444 return -EACCES;
445
446 if (get_user(arg, (int __user *)(argp)))
447 return -EFAULT;
448 arg = !!arg;
449 if (arg == !!(bdev->bd_inode->i_flags & S_DAX))
450 return 0;
451
452 if (arg)
453 arg = S_DAX;
454
455 if (arg && !blkdev_dax_capable(bdev))
456 return -ENOTTY;
457
458 inode_lock(bdev->bd_inode);
459 if (bdev->bd_map_count == 0)
460 inode_set_flags(bdev->bd_inode, arg, S_DAX);
461 else
462 rc = -EBUSY;
463 inode_unlock(bdev->bd_inode);
464 return rc;
465}
466#else
467static int blkdev_daxset(struct block_device *bdev, int arg)
468{
469 if (arg)
470 return -ENOTTY;
471 return 0;
472}
473#endif 437#endif
474 438
475static int blkdev_flushbuf(struct block_device *bdev, fmode_t mode, 439static int blkdev_flushbuf(struct block_device *bdev, fmode_t mode,
@@ -634,8 +598,6 @@ int blkdev_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd,
634 case BLKTRACESETUP: 598 case BLKTRACESETUP:
635 case BLKTRACETEARDOWN: 599 case BLKTRACETEARDOWN:
636 return blk_trace_ioctl(bdev, cmd, argp); 600 return blk_trace_ioctl(bdev, cmd, argp);
637 case BLKDAXSET:
638 return blkdev_daxset(bdev, arg);
639 case BLKDAXGET: 601 case BLKDAXGET:
640 return put_int(arg, !!(bdev->bd_inode->i_flags & S_DAX)); 602 return put_int(arg, !!(bdev->bd_inode->i_flags & S_DAX));
641 break; 603 break;
diff --git a/block/partition-generic.c b/block/partition-generic.c
index 746935a5973c..fefd01b496a0 100644
--- a/block/partition-generic.c
+++ b/block/partition-generic.c
@@ -16,6 +16,7 @@
16#include <linux/kmod.h> 16#include <linux/kmod.h>
17#include <linux/ctype.h> 17#include <linux/ctype.h>
18#include <linux/genhd.h> 18#include <linux/genhd.h>
19#include <linux/dax.h>
19#include <linux/blktrace_api.h> 20#include <linux/blktrace_api.h>
20 21
21#include "partitions/check.h" 22#include "partitions/check.h"
@@ -550,13 +551,24 @@ int invalidate_partitions(struct gendisk *disk, struct block_device *bdev)
550 return 0; 551 return 0;
551} 552}
552 553
553unsigned char *read_dev_sector(struct block_device *bdev, sector_t n, Sector *p) 554static struct page *read_pagecache_sector(struct block_device *bdev, sector_t n)
554{ 555{
555 struct address_space *mapping = bdev->bd_inode->i_mapping; 556 struct address_space *mapping = bdev->bd_inode->i_mapping;
557
558 return read_mapping_page(mapping, (pgoff_t)(n >> (PAGE_CACHE_SHIFT-9)),
559 NULL);
560}
561
562unsigned char *read_dev_sector(struct block_device *bdev, sector_t n, Sector *p)
563{
556 struct page *page; 564 struct page *page;
557 565
558 page = read_mapping_page(mapping, (pgoff_t)(n >> (PAGE_CACHE_SHIFT-9)), 566 /* don't populate page cache for dax capable devices */
559 NULL); 567 if (IS_DAX(bdev->bd_inode))
568 page = read_dax_sector(bdev, n);
569 else
570 page = read_pagecache_sector(bdev, n);
571
560 if (!IS_ERR(page)) { 572 if (!IS_ERR(page)) {
561 if (PageError(page)) 573 if (PageError(page))
562 goto fail; 574 goto fail;
diff --git a/drivers/nvdimm/namespace_devs.c b/drivers/nvdimm/namespace_devs.c
index 8ebfcaae3f5a..9edf7eb7d17c 100644
--- a/drivers/nvdimm/namespace_devs.c
+++ b/drivers/nvdimm/namespace_devs.c
@@ -1277,10 +1277,12 @@ static ssize_t mode_show(struct device *dev,
1277 1277
1278 device_lock(dev); 1278 device_lock(dev);
1279 claim = ndns->claim; 1279 claim = ndns->claim;
1280 if (pmem_should_map_pages(dev) || (claim && is_nd_pfn(claim))) 1280 if (claim && is_nd_btt(claim))
1281 mode = "memory";
1282 else if (claim && is_nd_btt(claim))
1283 mode = "safe"; 1281 mode = "safe";
1282 else if (claim && is_nd_pfn(claim))
1283 mode = "memory";
1284 else if (!claim && pmem_should_map_pages(dev))
1285 mode = "memory";
1284 else 1286 else
1285 mode = "raw"; 1287 mode = "raw";
1286 rc = sprintf(buf, "%s\n", mode); 1288 rc = sprintf(buf, "%s\n", mode);
diff --git a/drivers/nvdimm/pfn_devs.c b/drivers/nvdimm/pfn_devs.c
index 0cc9048b86e2..ae81a2f1da50 100644
--- a/drivers/nvdimm/pfn_devs.c
+++ b/drivers/nvdimm/pfn_devs.c
@@ -301,10 +301,8 @@ int nd_pfn_validate(struct nd_pfn *nd_pfn)
301 301
302 switch (le32_to_cpu(pfn_sb->mode)) { 302 switch (le32_to_cpu(pfn_sb->mode)) {
303 case PFN_MODE_RAM: 303 case PFN_MODE_RAM:
304 break;
305 case PFN_MODE_PMEM: 304 case PFN_MODE_PMEM:
306 /* TODO: allocate from PMEM support */ 305 break;
307 return -ENOTTY;
308 default: 306 default:
309 return -ENXIO; 307 return -ENXIO;
310 } 308 }
diff --git a/fs/block_dev.c b/fs/block_dev.c
index 7b9cd49622b1..afb437484362 100644
--- a/fs/block_dev.c
+++ b/fs/block_dev.c
@@ -1736,37 +1736,13 @@ static int blkdev_dax_pmd_fault(struct vm_area_struct *vma, unsigned long addr,
1736 return __dax_pmd_fault(vma, addr, pmd, flags, blkdev_get_block, NULL); 1736 return __dax_pmd_fault(vma, addr, pmd, flags, blkdev_get_block, NULL);
1737} 1737}
1738 1738
1739static void blkdev_vm_open(struct vm_area_struct *vma)
1740{
1741 struct inode *bd_inode = bdev_file_inode(vma->vm_file);
1742 struct block_device *bdev = I_BDEV(bd_inode);
1743
1744 inode_lock(bd_inode);
1745 bdev->bd_map_count++;
1746 inode_unlock(bd_inode);
1747}
1748
1749static void blkdev_vm_close(struct vm_area_struct *vma)
1750{
1751 struct inode *bd_inode = bdev_file_inode(vma->vm_file);
1752 struct block_device *bdev = I_BDEV(bd_inode);
1753
1754 inode_lock(bd_inode);
1755 bdev->bd_map_count--;
1756 inode_unlock(bd_inode);
1757}
1758
1759static const struct vm_operations_struct blkdev_dax_vm_ops = { 1739static const struct vm_operations_struct blkdev_dax_vm_ops = {
1760 .open = blkdev_vm_open,
1761 .close = blkdev_vm_close,
1762 .fault = blkdev_dax_fault, 1740 .fault = blkdev_dax_fault,
1763 .pmd_fault = blkdev_dax_pmd_fault, 1741 .pmd_fault = blkdev_dax_pmd_fault,
1764 .pfn_mkwrite = blkdev_dax_fault, 1742 .pfn_mkwrite = blkdev_dax_fault,
1765}; 1743};
1766 1744
1767static const struct vm_operations_struct blkdev_default_vm_ops = { 1745static const struct vm_operations_struct blkdev_default_vm_ops = {
1768 .open = blkdev_vm_open,
1769 .close = blkdev_vm_close,
1770 .fault = filemap_fault, 1746 .fault = filemap_fault,
1771 .map_pages = filemap_map_pages, 1747 .map_pages = filemap_map_pages,
1772}; 1748};
@@ -1774,18 +1750,14 @@ static const struct vm_operations_struct blkdev_default_vm_ops = {
1774static int blkdev_mmap(struct file *file, struct vm_area_struct *vma) 1750static int blkdev_mmap(struct file *file, struct vm_area_struct *vma)
1775{ 1751{
1776 struct inode *bd_inode = bdev_file_inode(file); 1752 struct inode *bd_inode = bdev_file_inode(file);
1777 struct block_device *bdev = I_BDEV(bd_inode);
1778 1753
1779 file_accessed(file); 1754 file_accessed(file);
1780 inode_lock(bd_inode);
1781 bdev->bd_map_count++;
1782 if (IS_DAX(bd_inode)) { 1755 if (IS_DAX(bd_inode)) {
1783 vma->vm_ops = &blkdev_dax_vm_ops; 1756 vma->vm_ops = &blkdev_dax_vm_ops;
1784 vma->vm_flags |= VM_MIXEDMAP | VM_HUGEPAGE; 1757 vma->vm_flags |= VM_MIXEDMAP | VM_HUGEPAGE;
1785 } else { 1758 } else {
1786 vma->vm_ops = &blkdev_default_vm_ops; 1759 vma->vm_ops = &blkdev_default_vm_ops;
1787 } 1760 }
1788 inode_unlock(bd_inode);
1789 1761
1790 return 0; 1762 return 0;
1791} 1763}
diff --git a/fs/dax.c b/fs/dax.c
index 4fd6b0c5c6b5..e0e9358baf35 100644
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -58,6 +58,26 @@ static void dax_unmap_atomic(struct block_device *bdev,
58 blk_queue_exit(bdev->bd_queue); 58 blk_queue_exit(bdev->bd_queue);
59} 59}
60 60
61struct page *read_dax_sector(struct block_device *bdev, sector_t n)
62{
63 struct page *page = alloc_pages(GFP_KERNEL, 0);
64 struct blk_dax_ctl dax = {
65 .size = PAGE_SIZE,
66 .sector = n & ~((((int) PAGE_SIZE) / 512) - 1),
67 };
68 long rc;
69
70 if (!page)
71 return ERR_PTR(-ENOMEM);
72
73 rc = dax_map_atomic(bdev, &dax);
74 if (rc < 0)
75 return ERR_PTR(rc);
76 memcpy_from_pmem(page_address(page), dax.addr, PAGE_SIZE);
77 dax_unmap_atomic(bdev, &dax);
78 return page;
79}
80
61/* 81/*
62 * dax_clear_blocks() is called from within transaction context from XFS, 82 * dax_clear_blocks() is called from within transaction context from XFS,
63 * and hence this means the stack from this point must follow GFP_NOFS 83 * and hence this means the stack from this point must follow GFP_NOFS
diff --git a/include/linux/dax.h b/include/linux/dax.h
index 8204c3dc3800..818e45078929 100644
--- a/include/linux/dax.h
+++ b/include/linux/dax.h
@@ -14,6 +14,17 @@ int dax_fault(struct vm_area_struct *, struct vm_fault *, get_block_t,
14 dax_iodone_t); 14 dax_iodone_t);
15int __dax_fault(struct vm_area_struct *, struct vm_fault *, get_block_t, 15int __dax_fault(struct vm_area_struct *, struct vm_fault *, get_block_t,
16 dax_iodone_t); 16 dax_iodone_t);
17
18#ifdef CONFIG_FS_DAX
19struct page *read_dax_sector(struct block_device *bdev, sector_t n);
20#else
21static inline struct page *read_dax_sector(struct block_device *bdev,
22 sector_t n)
23{
24 return ERR_PTR(-ENXIO);
25}
26#endif
27
17#ifdef CONFIG_TRANSPARENT_HUGEPAGE 28#ifdef CONFIG_TRANSPARENT_HUGEPAGE
18int dax_pmd_fault(struct vm_area_struct *, unsigned long addr, pmd_t *, 29int dax_pmd_fault(struct vm_area_struct *, unsigned long addr, pmd_t *,
19 unsigned int flags, get_block_t, dax_iodone_t); 30 unsigned int flags, get_block_t, dax_iodone_t);
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 1a2046275cdf..ae681002100a 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -484,9 +484,6 @@ struct block_device {
484 int bd_fsfreeze_count; 484 int bd_fsfreeze_count;
485 /* Mutex for freeze */ 485 /* Mutex for freeze */
486 struct mutex bd_fsfreeze_mutex; 486 struct mutex bd_fsfreeze_mutex;
487#ifdef CONFIG_FS_DAX
488 int bd_map_count;
489#endif
490}; 487};
491 488
492/* 489/*
@@ -2907,7 +2904,7 @@ extern void replace_mount_options(struct super_block *sb, char *options);
2907 2904
2908static inline bool io_is_direct(struct file *filp) 2905static inline bool io_is_direct(struct file *filp)
2909{ 2906{
2910 return (filp->f_flags & O_DIRECT) || IS_DAX(file_inode(filp)); 2907 return (filp->f_flags & O_DIRECT) || IS_DAX(filp->f_mapping->host);
2911} 2908}
2912 2909
2913static inline int iocb_flags(struct file *file) 2910static inline int iocb_flags(struct file *file)
diff --git a/include/linux/pfn_t.h b/include/linux/pfn_t.h
index 0703b5360d31..37448ab5fb5c 100644
--- a/include/linux/pfn_t.h
+++ b/include/linux/pfn_t.h
@@ -29,7 +29,7 @@ static inline pfn_t pfn_to_pfn_t(unsigned long pfn)
29 return __pfn_to_pfn_t(pfn, 0); 29 return __pfn_to_pfn_t(pfn, 0);
30} 30}
31 31
32extern pfn_t phys_to_pfn_t(dma_addr_t addr, unsigned long flags); 32extern pfn_t phys_to_pfn_t(phys_addr_t addr, unsigned long flags);
33 33
34static inline bool pfn_t_has_page(pfn_t pfn) 34static inline bool pfn_t_has_page(pfn_t pfn)
35{ 35{
@@ -48,7 +48,7 @@ static inline struct page *pfn_t_to_page(pfn_t pfn)
48 return NULL; 48 return NULL;
49} 49}
50 50
51static inline dma_addr_t pfn_t_to_phys(pfn_t pfn) 51static inline phys_addr_t pfn_t_to_phys(pfn_t pfn)
52{ 52{
53 return PFN_PHYS(pfn_t_to_pfn(pfn)); 53 return PFN_PHYS(pfn_t_to_pfn(pfn));
54} 54}
diff --git a/include/uapi/linux/fs.h b/include/uapi/linux/fs.h
index 41e0433b4a83..149bec83a907 100644
--- a/include/uapi/linux/fs.h
+++ b/include/uapi/linux/fs.h
@@ -222,7 +222,6 @@ struct fsxattr {
222#define BLKSECDISCARD _IO(0x12,125) 222#define BLKSECDISCARD _IO(0x12,125)
223#define BLKROTATIONAL _IO(0x12,126) 223#define BLKROTATIONAL _IO(0x12,126)
224#define BLKZEROOUT _IO(0x12,127) 224#define BLKZEROOUT _IO(0x12,127)
225#define BLKDAXSET _IO(0x12,128)
226#define BLKDAXGET _IO(0x12,129) 225#define BLKDAXGET _IO(0x12,129)
227 226
228#define BMAP_IOCTL 1 /* obsolete - kept for compatibility */ 227#define BMAP_IOCTL 1 /* obsolete - kept for compatibility */
diff --git a/kernel/memremap.c b/kernel/memremap.c
index e517a16cb426..70ee3775de24 100644
--- a/kernel/memremap.c
+++ b/kernel/memremap.c
@@ -150,7 +150,7 @@ void devm_memunmap(struct device *dev, void *addr)
150} 150}
151EXPORT_SYMBOL(devm_memunmap); 151EXPORT_SYMBOL(devm_memunmap);
152 152
153pfn_t phys_to_pfn_t(dma_addr_t addr, unsigned long flags) 153pfn_t phys_to_pfn_t(phys_addr_t addr, unsigned long flags)
154{ 154{
155 return __pfn_to_pfn_t(addr >> PAGE_SHIFT, flags); 155 return __pfn_to_pfn_t(addr >> PAGE_SHIFT, flags);
156} 156}
@@ -183,7 +183,11 @@ EXPORT_SYMBOL(put_zone_device_page);
183 183
184static void pgmap_radix_release(struct resource *res) 184static void pgmap_radix_release(struct resource *res)
185{ 185{
186 resource_size_t key; 186 resource_size_t key, align_start, align_size, align_end;
187
188 align_start = res->start & ~(SECTION_SIZE - 1);
189 align_size = ALIGN(resource_size(res), SECTION_SIZE);
190 align_end = align_start + align_size - 1;
187 191
188 mutex_lock(&pgmap_lock); 192 mutex_lock(&pgmap_lock);
189 for (key = res->start; key <= res->end; key += SECTION_SIZE) 193 for (key = res->start; key <= res->end; key += SECTION_SIZE)
@@ -226,12 +230,11 @@ static void devm_memremap_pages_release(struct device *dev, void *data)
226 percpu_ref_put(pgmap->ref); 230 percpu_ref_put(pgmap->ref);
227 } 231 }
228 232
229 pgmap_radix_release(res);
230
231 /* pages are dead and unused, undo the arch mapping */ 233 /* pages are dead and unused, undo the arch mapping */
232 align_start = res->start & ~(SECTION_SIZE - 1); 234 align_start = res->start & ~(SECTION_SIZE - 1);
233 align_size = ALIGN(resource_size(res), SECTION_SIZE); 235 align_size = ALIGN(resource_size(res), SECTION_SIZE);
234 arch_remove_memory(align_start, align_size); 236 arch_remove_memory(align_start, align_size);
237 pgmap_radix_release(res);
235 dev_WARN_ONCE(dev, pgmap->altmap && pgmap->altmap->alloc, 238 dev_WARN_ONCE(dev, pgmap->altmap && pgmap->altmap->alloc,
236 "%s: failed to free all reserved pages\n", __func__); 239 "%s: failed to free all reserved pages\n", __func__);
237} 240}
@@ -267,7 +270,7 @@ void *devm_memremap_pages(struct device *dev, struct resource *res,
267{ 270{
268 int is_ram = region_intersects(res->start, resource_size(res), 271 int is_ram = region_intersects(res->start, resource_size(res),
269 "System RAM"); 272 "System RAM");
270 resource_size_t key, align_start, align_size; 273 resource_size_t key, align_start, align_size, align_end;
271 struct dev_pagemap *pgmap; 274 struct dev_pagemap *pgmap;
272 struct page_map *page_map; 275 struct page_map *page_map;
273 unsigned long pfn; 276 unsigned long pfn;
@@ -309,7 +312,10 @@ void *devm_memremap_pages(struct device *dev, struct resource *res,
309 312
310 mutex_lock(&pgmap_lock); 313 mutex_lock(&pgmap_lock);
311 error = 0; 314 error = 0;
312 for (key = res->start; key <= res->end; key += SECTION_SIZE) { 315 align_start = res->start & ~(SECTION_SIZE - 1);
316 align_size = ALIGN(resource_size(res), SECTION_SIZE);
317 align_end = align_start + align_size - 1;
318 for (key = align_start; key <= align_end; key += SECTION_SIZE) {
313 struct dev_pagemap *dup; 319 struct dev_pagemap *dup;
314 320
315 rcu_read_lock(); 321 rcu_read_lock();
@@ -336,8 +342,6 @@ void *devm_memremap_pages(struct device *dev, struct resource *res,
336 if (nid < 0) 342 if (nid < 0)
337 nid = numa_mem_id(); 343 nid = numa_mem_id();
338 344
339 align_start = res->start & ~(SECTION_SIZE - 1);
340 align_size = ALIGN(resource_size(res), SECTION_SIZE);
341 error = arch_add_memory(nid, align_start, align_size, true); 345 error = arch_add_memory(nid, align_start, align_size, true);
342 if (error) 346 if (error)
343 goto err_add_memory; 347 goto err_add_memory;
diff --git a/mm/memory.c b/mm/memory.c
index 30991f83d0bf..93ce37989471 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -1591,10 +1591,15 @@ int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
1591 * than insert_pfn). If a zero_pfn were inserted into a VM_MIXEDMAP 1591 * than insert_pfn). If a zero_pfn were inserted into a VM_MIXEDMAP
1592 * without pte special, it would there be refcounted as a normal page. 1592 * without pte special, it would there be refcounted as a normal page.
1593 */ 1593 */
1594 if (!HAVE_PTE_SPECIAL && pfn_t_valid(pfn)) { 1594 if (!HAVE_PTE_SPECIAL && !pfn_t_devmap(pfn) && pfn_t_valid(pfn)) {
1595 struct page *page; 1595 struct page *page;
1596 1596
1597 page = pfn_t_to_page(pfn); 1597 /*
1598 * At this point we are committed to insert_page()
1599 * regardless of whether the caller specified flags that
1600 * result in pfn_t_has_page() == false.
1601 */
1602 page = pfn_to_page(pfn_t_to_pfn(pfn));
1598 return insert_page(vma, addr, page, vma->vm_page_prot); 1603 return insert_page(vma, addr, page, vma->vm_page_prot);
1599 } 1604 }
1600 return insert_pfn(vma, addr, pfn, vma->vm_page_prot); 1605 return insert_pfn(vma, addr, pfn, vma->vm_page_prot);
diff --git a/tools/testing/nvdimm/test/iomap.c b/tools/testing/nvdimm/test/iomap.c
index 7ec7df9e7fc7..0c1a7e65bb81 100644
--- a/tools/testing/nvdimm/test/iomap.c
+++ b/tools/testing/nvdimm/test/iomap.c
@@ -113,7 +113,7 @@ void *__wrap_devm_memremap_pages(struct device *dev, struct resource *res,
113} 113}
114EXPORT_SYMBOL(__wrap_devm_memremap_pages); 114EXPORT_SYMBOL(__wrap_devm_memremap_pages);
115 115
116pfn_t __wrap_phys_to_pfn_t(dma_addr_t addr, unsigned long flags) 116pfn_t __wrap_phys_to_pfn_t(phys_addr_t addr, unsigned long flags)
117{ 117{
118 struct nfit_test_resource *nfit_res = get_nfit_res(addr); 118 struct nfit_test_resource *nfit_res = get_nfit_res(addr);
119 119