aboutsummaryrefslogtreecommitdiffstats
path: root/fs/dax.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/dax.c')
-rw-r--r--fs/dax.c60
1 files changed, 30 insertions, 30 deletions
diff --git a/fs/dax.c b/fs/dax.c
index 93bf2f990ace..a86d3cc2b389 100644
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -119,7 +119,8 @@ static ssize_t dax_io(struct inode *inode, struct iov_iter *iter,
119 size_t len; 119 size_t len;
120 if (pos == max) { 120 if (pos == max) {
121 unsigned blkbits = inode->i_blkbits; 121 unsigned blkbits = inode->i_blkbits;
122 sector_t block = pos >> blkbits; 122 long page = pos >> PAGE_SHIFT;
123 sector_t block = page << (PAGE_SHIFT - blkbits);
123 unsigned first = pos - (block << blkbits); 124 unsigned first = pos - (block << blkbits);
124 long size; 125 long size;
125 126
@@ -284,6 +285,7 @@ static int copy_user_bh(struct page *to, struct buffer_head *bh,
284static int dax_insert_mapping(struct inode *inode, struct buffer_head *bh, 285static int dax_insert_mapping(struct inode *inode, struct buffer_head *bh,
285 struct vm_area_struct *vma, struct vm_fault *vmf) 286 struct vm_area_struct *vma, struct vm_fault *vmf)
286{ 287{
288 struct address_space *mapping = inode->i_mapping;
287 sector_t sector = bh->b_blocknr << (inode->i_blkbits - 9); 289 sector_t sector = bh->b_blocknr << (inode->i_blkbits - 9);
288 unsigned long vaddr = (unsigned long)vmf->virtual_address; 290 unsigned long vaddr = (unsigned long)vmf->virtual_address;
289 void __pmem *addr; 291 void __pmem *addr;
@@ -291,6 +293,8 @@ static int dax_insert_mapping(struct inode *inode, struct buffer_head *bh,
291 pgoff_t size; 293 pgoff_t size;
292 int error; 294 int error;
293 295
296 i_mmap_lock_read(mapping);
297
294 /* 298 /*
295 * Check truncate didn't happen while we were allocating a block. 299 * Check truncate didn't happen while we were allocating a block.
296 * If it did, this block may or may not be still allocated to the 300 * If it did, this block may or may not be still allocated to the
@@ -320,6 +324,8 @@ static int dax_insert_mapping(struct inode *inode, struct buffer_head *bh,
320 error = vm_insert_mixed(vma, vaddr, pfn); 324 error = vm_insert_mixed(vma, vaddr, pfn);
321 325
322 out: 326 out:
327 i_mmap_unlock_read(mapping);
328
323 return error; 329 return error;
324} 330}
325 331
@@ -381,17 +387,15 @@ int __dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
381 * from a read fault and we've raced with a truncate 387 * from a read fault and we've raced with a truncate
382 */ 388 */
383 error = -EIO; 389 error = -EIO;
384 goto unlock; 390 goto unlock_page;
385 } 391 }
386 } else {
387 i_mmap_lock_write(mapping);
388 } 392 }
389 393
390 error = get_block(inode, block, &bh, 0); 394 error = get_block(inode, block, &bh, 0);
391 if (!error && (bh.b_size < PAGE_SIZE)) 395 if (!error && (bh.b_size < PAGE_SIZE))
392 error = -EIO; /* fs corruption? */ 396 error = -EIO; /* fs corruption? */
393 if (error) 397 if (error)
394 goto unlock; 398 goto unlock_page;
395 399
396 if (!buffer_mapped(&bh) && !buffer_unwritten(&bh) && !vmf->cow_page) { 400 if (!buffer_mapped(&bh) && !buffer_unwritten(&bh) && !vmf->cow_page) {
397 if (vmf->flags & FAULT_FLAG_WRITE) { 401 if (vmf->flags & FAULT_FLAG_WRITE) {
@@ -402,9 +406,8 @@ int __dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
402 if (!error && (bh.b_size < PAGE_SIZE)) 406 if (!error && (bh.b_size < PAGE_SIZE))
403 error = -EIO; 407 error = -EIO;
404 if (error) 408 if (error)
405 goto unlock; 409 goto unlock_page;
406 } else { 410 } else {
407 i_mmap_unlock_write(mapping);
408 return dax_load_hole(mapping, page, vmf); 411 return dax_load_hole(mapping, page, vmf);
409 } 412 }
410 } 413 }
@@ -416,15 +419,17 @@ int __dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
416 else 419 else
417 clear_user_highpage(new_page, vaddr); 420 clear_user_highpage(new_page, vaddr);
418 if (error) 421 if (error)
419 goto unlock; 422 goto unlock_page;
420 vmf->page = page; 423 vmf->page = page;
421 if (!page) { 424 if (!page) {
425 i_mmap_lock_read(mapping);
422 /* Check we didn't race with truncate */ 426 /* Check we didn't race with truncate */
423 size = (i_size_read(inode) + PAGE_SIZE - 1) >> 427 size = (i_size_read(inode) + PAGE_SIZE - 1) >>
424 PAGE_SHIFT; 428 PAGE_SHIFT;
425 if (vmf->pgoff >= size) { 429 if (vmf->pgoff >= size) {
430 i_mmap_unlock_read(mapping);
426 error = -EIO; 431 error = -EIO;
427 goto unlock; 432 goto out;
428 } 433 }
429 } 434 }
430 return VM_FAULT_LOCKED; 435 return VM_FAULT_LOCKED;
@@ -460,8 +465,6 @@ int __dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
460 WARN_ON_ONCE(!(vmf->flags & FAULT_FLAG_WRITE)); 465 WARN_ON_ONCE(!(vmf->flags & FAULT_FLAG_WRITE));
461 } 466 }
462 467
463 if (!page)
464 i_mmap_unlock_write(mapping);
465 out: 468 out:
466 if (error == -ENOMEM) 469 if (error == -ENOMEM)
467 return VM_FAULT_OOM | major; 470 return VM_FAULT_OOM | major;
@@ -470,14 +473,11 @@ int __dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
470 return VM_FAULT_SIGBUS | major; 473 return VM_FAULT_SIGBUS | major;
471 return VM_FAULT_NOPAGE | major; 474 return VM_FAULT_NOPAGE | major;
472 475
473 unlock: 476 unlock_page:
474 if (page) { 477 if (page) {
475 unlock_page(page); 478 unlock_page(page);
476 page_cache_release(page); 479 page_cache_release(page);
477 } else {
478 i_mmap_unlock_write(mapping);
479 } 480 }
480
481 goto out; 481 goto out;
482} 482}
483EXPORT_SYMBOL(__dax_fault); 483EXPORT_SYMBOL(__dax_fault);
@@ -555,10 +555,10 @@ int __dax_pmd_fault(struct vm_area_struct *vma, unsigned long address,
555 block = (sector_t)pgoff << (PAGE_SHIFT - blkbits); 555 block = (sector_t)pgoff << (PAGE_SHIFT - blkbits);
556 556
557 bh.b_size = PMD_SIZE; 557 bh.b_size = PMD_SIZE;
558 i_mmap_lock_write(mapping);
559 length = get_block(inode, block, &bh, write); 558 length = get_block(inode, block, &bh, write);
560 if (length) 559 if (length)
561 return VM_FAULT_SIGBUS; 560 return VM_FAULT_SIGBUS;
561 i_mmap_lock_read(mapping);
562 562
563 /* 563 /*
564 * If the filesystem isn't willing to tell us the length of a hole, 564 * If the filesystem isn't willing to tell us the length of a hole,
@@ -568,24 +568,14 @@ int __dax_pmd_fault(struct vm_area_struct *vma, unsigned long address,
568 if (!buffer_size_valid(&bh) || bh.b_size < PMD_SIZE) 568 if (!buffer_size_valid(&bh) || bh.b_size < PMD_SIZE)
569 goto fallback; 569 goto fallback;
570 570
571 if (buffer_unwritten(&bh) || buffer_new(&bh)) {
572 int i;
573 for (i = 0; i < PTRS_PER_PMD; i++)
574 clear_pmem(kaddr + i * PAGE_SIZE, PAGE_SIZE);
575 wmb_pmem();
576 count_vm_event(PGMAJFAULT);
577 mem_cgroup_count_vm_event(vma->vm_mm, PGMAJFAULT);
578 result |= VM_FAULT_MAJOR;
579 }
580
581 /* 571 /*
582 * If we allocated new storage, make sure no process has any 572 * If we allocated new storage, make sure no process has any
583 * zero pages covering this hole 573 * zero pages covering this hole
584 */ 574 */
585 if (buffer_new(&bh)) { 575 if (buffer_new(&bh)) {
586 i_mmap_unlock_write(mapping); 576 i_mmap_unlock_read(mapping);
587 unmap_mapping_range(mapping, pgoff << PAGE_SHIFT, PMD_SIZE, 0); 577 unmap_mapping_range(mapping, pgoff << PAGE_SHIFT, PMD_SIZE, 0);
588 i_mmap_lock_write(mapping); 578 i_mmap_lock_read(mapping);
589 } 579 }
590 580
591 /* 581 /*
@@ -632,15 +622,25 @@ int __dax_pmd_fault(struct vm_area_struct *vma, unsigned long address,
632 if ((length < PMD_SIZE) || (pfn & PG_PMD_COLOUR)) 622 if ((length < PMD_SIZE) || (pfn & PG_PMD_COLOUR))
633 goto fallback; 623 goto fallback;
634 624
625 if (buffer_unwritten(&bh) || buffer_new(&bh)) {
626 int i;
627 for (i = 0; i < PTRS_PER_PMD; i++)
628 clear_pmem(kaddr + i * PAGE_SIZE, PAGE_SIZE);
629 wmb_pmem();
630 count_vm_event(PGMAJFAULT);
631 mem_cgroup_count_vm_event(vma->vm_mm, PGMAJFAULT);
632 result |= VM_FAULT_MAJOR;
633 }
634
635 result |= vmf_insert_pfn_pmd(vma, address, pmd, pfn, write); 635 result |= vmf_insert_pfn_pmd(vma, address, pmd, pfn, write);
636 } 636 }
637 637
638 out: 638 out:
639 i_mmap_unlock_read(mapping);
640
639 if (buffer_unwritten(&bh)) 641 if (buffer_unwritten(&bh))
640 complete_unwritten(&bh, !(result & VM_FAULT_ERROR)); 642 complete_unwritten(&bh, !(result & VM_FAULT_ERROR));
641 643
642 i_mmap_unlock_write(mapping);
643
644 return result; 644 return result;
645 645
646 fallback: 646 fallback: