aboutsummaryrefslogtreecommitdiffstats
path: root/fs/dax.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/dax.c')
-rw-r--r--fs/dax.c37
1 files changed, 20 insertions, 17 deletions
diff --git a/fs/dax.c b/fs/dax.c
index e07fecc93f80..7c634ac797b1 100644
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -35,7 +35,7 @@ int dax_clear_blocks(struct inode *inode, sector_t block, long size)
35 35
36 might_sleep(); 36 might_sleep();
37 do { 37 do {
38 void *addr; 38 void __pmem *addr;
39 unsigned long pfn; 39 unsigned long pfn;
40 long count; 40 long count;
41 41
@@ -47,7 +47,7 @@ int dax_clear_blocks(struct inode *inode, sector_t block, long size)
47 unsigned pgsz = PAGE_SIZE - offset_in_page(addr); 47 unsigned pgsz = PAGE_SIZE - offset_in_page(addr);
48 if (pgsz > count) 48 if (pgsz > count)
49 pgsz = count; 49 pgsz = count;
50 clear_pmem((void __pmem *)addr, pgsz); 50 clear_pmem(addr, pgsz);
51 addr += pgsz; 51 addr += pgsz;
52 size -= pgsz; 52 size -= pgsz;
53 count -= pgsz; 53 count -= pgsz;
@@ -62,7 +62,8 @@ int dax_clear_blocks(struct inode *inode, sector_t block, long size)
62} 62}
63EXPORT_SYMBOL_GPL(dax_clear_blocks); 63EXPORT_SYMBOL_GPL(dax_clear_blocks);
64 64
65static long dax_get_addr(struct buffer_head *bh, void **addr, unsigned blkbits) 65static long dax_get_addr(struct buffer_head *bh, void __pmem **addr,
66 unsigned blkbits)
66{ 67{
67 unsigned long pfn; 68 unsigned long pfn;
68 sector_t sector = bh->b_blocknr << (blkbits - 9); 69 sector_t sector = bh->b_blocknr << (blkbits - 9);
@@ -70,15 +71,15 @@ static long dax_get_addr(struct buffer_head *bh, void **addr, unsigned blkbits)
70} 71}
71 72
72/* the clear_pmem() calls are ordered by a wmb_pmem() in the caller */ 73/* the clear_pmem() calls are ordered by a wmb_pmem() in the caller */
73static void dax_new_buf(void *addr, unsigned size, unsigned first, loff_t pos, 74static void dax_new_buf(void __pmem *addr, unsigned size, unsigned first,
74 loff_t end) 75 loff_t pos, loff_t end)
75{ 76{
76 loff_t final = end - pos + first; /* The final byte of the buffer */ 77 loff_t final = end - pos + first; /* The final byte of the buffer */
77 78
78 if (first > 0) 79 if (first > 0)
79 clear_pmem((void __pmem *)addr, first); 80 clear_pmem(addr, first);
80 if (final < size) 81 if (final < size)
81 clear_pmem((void __pmem *)addr + final, size - final); 82 clear_pmem(addr + final, size - final);
82} 83}
83 84
84static bool buffer_written(struct buffer_head *bh) 85static bool buffer_written(struct buffer_head *bh)
@@ -106,7 +107,7 @@ static ssize_t dax_io(struct inode *inode, struct iov_iter *iter,
106 loff_t pos = start; 107 loff_t pos = start;
107 loff_t max = start; 108 loff_t max = start;
108 loff_t bh_max = start; 109 loff_t bh_max = start;
109 void *addr; 110 void __pmem *addr;
110 bool hole = false; 111 bool hole = false;
111 bool need_wmb = false; 112 bool need_wmb = false;
112 113
@@ -158,11 +159,11 @@ static ssize_t dax_io(struct inode *inode, struct iov_iter *iter,
158 } 159 }
159 160
160 if (iov_iter_rw(iter) == WRITE) { 161 if (iov_iter_rw(iter) == WRITE) {
161 len = copy_from_iter_pmem((void __pmem *)addr, 162 len = copy_from_iter_pmem(addr, max - pos, iter);
162 max - pos, iter);
163 need_wmb = true; 163 need_wmb = true;
164 } else if (!hole) 164 } else if (!hole)
165 len = copy_to_iter(addr, max - pos, iter); 165 len = copy_to_iter((void __force *)addr, max - pos,
166 iter);
166 else 167 else
167 len = iov_iter_zero(max - pos, iter); 168 len = iov_iter_zero(max - pos, iter);
168 169
@@ -268,11 +269,13 @@ static int dax_load_hole(struct address_space *mapping, struct page *page,
268static int copy_user_bh(struct page *to, struct buffer_head *bh, 269static int copy_user_bh(struct page *to, struct buffer_head *bh,
269 unsigned blkbits, unsigned long vaddr) 270 unsigned blkbits, unsigned long vaddr)
270{ 271{
271 void *vfrom, *vto; 272 void __pmem *vfrom;
273 void *vto;
274
272 if (dax_get_addr(bh, &vfrom, blkbits) < 0) 275 if (dax_get_addr(bh, &vfrom, blkbits) < 0)
273 return -EIO; 276 return -EIO;
274 vto = kmap_atomic(to); 277 vto = kmap_atomic(to);
275 copy_user_page(vto, vfrom, vaddr, to); 278 copy_user_page(vto, (void __force *)vfrom, vaddr, to);
276 kunmap_atomic(vto); 279 kunmap_atomic(vto);
277 return 0; 280 return 0;
278} 281}
@@ -283,7 +286,7 @@ static int dax_insert_mapping(struct inode *inode, struct buffer_head *bh,
283 struct address_space *mapping = inode->i_mapping; 286 struct address_space *mapping = inode->i_mapping;
284 sector_t sector = bh->b_blocknr << (inode->i_blkbits - 9); 287 sector_t sector = bh->b_blocknr << (inode->i_blkbits - 9);
285 unsigned long vaddr = (unsigned long)vmf->virtual_address; 288 unsigned long vaddr = (unsigned long)vmf->virtual_address;
286 void *addr; 289 void __pmem *addr;
287 unsigned long pfn; 290 unsigned long pfn;
288 pgoff_t size; 291 pgoff_t size;
289 int error; 292 int error;
@@ -312,7 +315,7 @@ static int dax_insert_mapping(struct inode *inode, struct buffer_head *bh,
312 } 315 }
313 316
314 if (buffer_unwritten(bh) || buffer_new(bh)) { 317 if (buffer_unwritten(bh) || buffer_new(bh)) {
315 clear_pmem((void __pmem *)addr, PAGE_SIZE); 318 clear_pmem(addr, PAGE_SIZE);
316 wmb_pmem(); 319 wmb_pmem();
317 } 320 }
318 321
@@ -548,11 +551,11 @@ int dax_zero_page_range(struct inode *inode, loff_t from, unsigned length,
548 if (err < 0) 551 if (err < 0)
549 return err; 552 return err;
550 if (buffer_written(&bh)) { 553 if (buffer_written(&bh)) {
551 void *addr; 554 void __pmem *addr;
552 err = dax_get_addr(&bh, &addr, inode->i_blkbits); 555 err = dax_get_addr(&bh, &addr, inode->i_blkbits);
553 if (err < 0) 556 if (err < 0)
554 return err; 557 return err;
555 clear_pmem((void __pmem *)addr + offset, length); 558 clear_pmem(addr + offset, length);
556 wmb_pmem(); 559 wmb_pmem();
557 } 560 }
558 561