aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJan Kara <jack@suse.cz>2016-05-11 05:58:53 -0400
committerVishal Verma <vishal.l.verma@intel.com>2016-05-17 02:44:10 -0400
commit7795bec89ebf927ea3ad9ed5f396c227e5c73271 (patch)
tree4f9292261310e20cc62dea91ee9dc44aa097981b
parentc3d98e39d5b37320b15f227686575d58f676e6ef (diff)
dax: Remove redundant inode size checks
Callers of dax fault handlers must make sure these calls cannot race with truncate. Thus it is enough to check inode size when entering the function and we don't have to recheck it again later in the handler. Note that inode size itself can be decreased while the fault handler runs but filesystem locking prevents against any radix tree or block mapping information changes resulting from the truncate and that is what we really care about. Reviewed-by: Ross Zwisler <ross.zwisler@linux.intel.com> Signed-off-by: Jan Kara <jack@suse.cz> Signed-off-by: Vishal Verma <vishal.l.verma@intel.com>
-rw-r--r--fs/dax.c60
1 files changed, 1 insertions, 59 deletions
diff --git a/fs/dax.c b/fs/dax.c
index 237581441bc1..9bc6624251b4 100644
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -305,20 +305,11 @@ EXPORT_SYMBOL_GPL(dax_do_io);
305static int dax_load_hole(struct address_space *mapping, struct page *page, 305static int dax_load_hole(struct address_space *mapping, struct page *page,
306 struct vm_fault *vmf) 306 struct vm_fault *vmf)
307{ 307{
308 unsigned long size;
309 struct inode *inode = mapping->host;
310 if (!page) 308 if (!page)
311 page = find_or_create_page(mapping, vmf->pgoff, 309 page = find_or_create_page(mapping, vmf->pgoff,
312 GFP_KERNEL | __GFP_ZERO); 310 GFP_KERNEL | __GFP_ZERO);
313 if (!page) 311 if (!page)
314 return VM_FAULT_OOM; 312 return VM_FAULT_OOM;
315 /* Recheck i_size under page lock to avoid truncate race */
316 size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT;
317 if (vmf->pgoff >= size) {
318 unlock_page(page);
319 put_page(page);
320 return VM_FAULT_SIGBUS;
321 }
322 313
323 vmf->page = page; 314 vmf->page = page;
324 return VM_FAULT_LOCKED; 315 return VM_FAULT_LOCKED;
@@ -549,24 +540,10 @@ static int dax_insert_mapping(struct inode *inode, struct buffer_head *bh,
549 .sector = to_sector(bh, inode), 540 .sector = to_sector(bh, inode),
550 .size = bh->b_size, 541 .size = bh->b_size,
551 }; 542 };
552 pgoff_t size;
553 int error; 543 int error;
554 544
555 i_mmap_lock_read(mapping); 545 i_mmap_lock_read(mapping);
556 546
557 /*
558 * Check truncate didn't happen while we were allocating a block.
559 * If it did, this block may or may not be still allocated to the
560 * file. We can't tell the filesystem to free it because we can't
561 * take i_mutex here. In the worst case, the file still has blocks
562 * allocated past the end of the file.
563 */
564 size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT;
565 if (unlikely(vmf->pgoff >= size)) {
566 error = -EIO;
567 goto out;
568 }
569
570 if (dax_map_atomic(bdev, &dax) < 0) { 547 if (dax_map_atomic(bdev, &dax) < 0) {
571 error = PTR_ERR(dax.addr); 548 error = PTR_ERR(dax.addr);
572 goto out; 549 goto out;
@@ -632,15 +609,6 @@ int __dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
632 put_page(page); 609 put_page(page);
633 goto repeat; 610 goto repeat;
634 } 611 }
635 size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT;
636 if (unlikely(vmf->pgoff >= size)) {
637 /*
638 * We have a struct page covering a hole in the file
639 * from a read fault and we've raced with a truncate
640 */
641 error = -EIO;
642 goto unlock_page;
643 }
644 } 612 }
645 613
646 error = get_block(inode, block, &bh, 0); 614 error = get_block(inode, block, &bh, 0);
@@ -673,17 +641,8 @@ int __dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
673 if (error) 641 if (error)
674 goto unlock_page; 642 goto unlock_page;
675 vmf->page = page; 643 vmf->page = page;
676 if (!page) { 644 if (!page)
677 i_mmap_lock_read(mapping); 645 i_mmap_lock_read(mapping);
678 /* Check we didn't race with truncate */
679 size = (i_size_read(inode) + PAGE_SIZE - 1) >>
680 PAGE_SHIFT;
681 if (vmf->pgoff >= size) {
682 i_mmap_unlock_read(mapping);
683 error = -EIO;
684 goto out;
685 }
686 }
687 return VM_FAULT_LOCKED; 646 return VM_FAULT_LOCKED;
688 } 647 }
689 648
@@ -861,23 +820,6 @@ int __dax_pmd_fault(struct vm_area_struct *vma, unsigned long address,
861 820
862 i_mmap_lock_read(mapping); 821 i_mmap_lock_read(mapping);
863 822
864 /*
865 * If a truncate happened while we were allocating blocks, we may
866 * leave blocks allocated to the file that are beyond EOF. We can't
867 * take i_mutex here, so just leave them hanging; they'll be freed
868 * when the file is deleted.
869 */
870 size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT;
871 if (pgoff >= size) {
872 result = VM_FAULT_SIGBUS;
873 goto out;
874 }
875 if ((pgoff | PG_PMD_COLOUR) >= size) {
876 dax_pmd_dbg(&bh, address,
877 "offset + huge page size > file size");
878 goto fallback;
879 }
880
881 if (!write && !buffer_mapped(&bh) && buffer_uptodate(&bh)) { 823 if (!write && !buffer_mapped(&bh) && buffer_uptodate(&bh)) {
882 spinlock_t *ptl; 824 spinlock_t *ptl;
883 pmd_t entry; 825 pmd_t entry;