diff options
Diffstat (limited to 'fs/xfs/xfs_file.c')
-rw-r--r-- | fs/xfs/xfs_file.c | 79 |
1 files changed, 17 insertions, 62 deletions
diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c index b927ea9abe33..c68517b0f248 100644 --- a/fs/xfs/xfs_file.c +++ b/fs/xfs/xfs_file.c | |||
@@ -333,10 +333,7 @@ xfs_file_dax_read( | |||
333 | struct kiocb *iocb, | 333 | struct kiocb *iocb, |
334 | struct iov_iter *to) | 334 | struct iov_iter *to) |
335 | { | 335 | { |
336 | struct address_space *mapping = iocb->ki_filp->f_mapping; | 336 | struct xfs_inode *ip = XFS_I(iocb->ki_filp->f_mapping->host); |
337 | struct inode *inode = mapping->host; | ||
338 | struct xfs_inode *ip = XFS_I(inode); | ||
339 | struct iov_iter data = *to; | ||
340 | size_t count = iov_iter_count(to); | 337 | size_t count = iov_iter_count(to); |
341 | ssize_t ret = 0; | 338 | ssize_t ret = 0; |
342 | 339 | ||
@@ -346,11 +343,7 @@ xfs_file_dax_read( | |||
346 | return 0; /* skip atime */ | 343 | return 0; /* skip atime */ |
347 | 344 | ||
348 | xfs_rw_ilock(ip, XFS_IOLOCK_SHARED); | 345 | xfs_rw_ilock(ip, XFS_IOLOCK_SHARED); |
349 | ret = dax_do_io(iocb, inode, &data, xfs_get_blocks_direct, NULL, 0); | 346 | ret = iomap_dax_rw(iocb, to, &xfs_iomap_ops); |
350 | if (ret > 0) { | ||
351 | iocb->ki_pos += ret; | ||
352 | iov_iter_advance(to, ret); | ||
353 | } | ||
354 | xfs_rw_iunlock(ip, XFS_IOLOCK_SHARED); | 347 | xfs_rw_iunlock(ip, XFS_IOLOCK_SHARED); |
355 | 348 | ||
356 | file_accessed(iocb->ki_filp); | 349 | file_accessed(iocb->ki_filp); |
@@ -712,70 +705,32 @@ xfs_file_dax_write( | |||
712 | struct kiocb *iocb, | 705 | struct kiocb *iocb, |
713 | struct iov_iter *from) | 706 | struct iov_iter *from) |
714 | { | 707 | { |
715 | struct address_space *mapping = iocb->ki_filp->f_mapping; | 708 | struct inode *inode = iocb->ki_filp->f_mapping->host; |
716 | struct inode *inode = mapping->host; | ||
717 | struct xfs_inode *ip = XFS_I(inode); | 709 | struct xfs_inode *ip = XFS_I(inode); |
718 | struct xfs_mount *mp = ip->i_mount; | 710 | int iolock = XFS_IOLOCK_EXCL; |
719 | ssize_t ret = 0; | 711 | ssize_t ret, error = 0; |
720 | int unaligned_io = 0; | 712 | size_t count; |
721 | int iolock; | 713 | loff_t pos; |
722 | struct iov_iter data; | ||
723 | 714 | ||
724 | /* "unaligned" here means not aligned to a filesystem block */ | ||
725 | if ((iocb->ki_pos & mp->m_blockmask) || | ||
726 | ((iocb->ki_pos + iov_iter_count(from)) & mp->m_blockmask)) { | ||
727 | unaligned_io = 1; | ||
728 | iolock = XFS_IOLOCK_EXCL; | ||
729 | } else if (mapping->nrpages) { | ||
730 | iolock = XFS_IOLOCK_EXCL; | ||
731 | } else { | ||
732 | iolock = XFS_IOLOCK_SHARED; | ||
733 | } | ||
734 | xfs_rw_ilock(ip, iolock); | 715 | xfs_rw_ilock(ip, iolock); |
735 | |||
736 | ret = xfs_file_aio_write_checks(iocb, from, &iolock); | 716 | ret = xfs_file_aio_write_checks(iocb, from, &iolock); |
737 | if (ret) | 717 | if (ret) |
738 | goto out; | 718 | goto out; |
739 | 719 | ||
740 | /* | 720 | pos = iocb->ki_pos; |
741 | * Yes, even DAX files can have page cache attached to them: A zeroed | 721 | count = iov_iter_count(from); |
742 | * page is inserted into the pagecache when we have to serve a write | ||
743 | * fault on a hole. It should never be dirtied and can simply be | ||
744 | * dropped from the pagecache once we get real data for the page. | ||
745 | * | ||
746 | * XXX: This is racy against mmap, and there's nothing we can do about | ||
747 | * it. dax_do_io() should really do this invalidation internally as | ||
748 | * it will know if we've allocated over a holei for this specific IO and | ||
749 | * if so it needs to update the mapping tree and invalidate existing | ||
750 | * PTEs over the newly allocated range. Remove this invalidation when | ||
751 | * dax_do_io() is fixed up. | ||
752 | */ | ||
753 | if (mapping->nrpages) { | ||
754 | loff_t end = iocb->ki_pos + iov_iter_count(from) - 1; | ||
755 | 722 | ||
756 | ret = invalidate_inode_pages2_range(mapping, | 723 | trace_xfs_file_dax_write(ip, count, pos); |
757 | iocb->ki_pos >> PAGE_SHIFT, | ||
758 | end >> PAGE_SHIFT); | ||
759 | WARN_ON_ONCE(ret); | ||
760 | } | ||
761 | 724 | ||
762 | if (iolock == XFS_IOLOCK_EXCL && !unaligned_io) { | 725 | ret = iomap_dax_rw(iocb, from, &xfs_iomap_ops); |
763 | xfs_rw_ilock_demote(ip, XFS_IOLOCK_EXCL); | 726 | if (ret > 0 && iocb->ki_pos > i_size_read(inode)) { |
764 | iolock = XFS_IOLOCK_SHARED; | 727 | i_size_write(inode, iocb->ki_pos); |
728 | error = xfs_setfilesize(ip, pos, ret); | ||
765 | } | 729 | } |
766 | 730 | ||
767 | trace_xfs_file_dax_write(ip, iov_iter_count(from), iocb->ki_pos); | ||
768 | |||
769 | data = *from; | ||
770 | ret = dax_do_io(iocb, inode, &data, xfs_get_blocks_direct, | ||
771 | xfs_end_io_direct_write, 0); | ||
772 | if (ret > 0) { | ||
773 | iocb->ki_pos += ret; | ||
774 | iov_iter_advance(from, ret); | ||
775 | } | ||
776 | out: | 731 | out: |
777 | xfs_rw_iunlock(ip, iolock); | 732 | xfs_rw_iunlock(ip, iolock); |
778 | return ret; | 733 | return error ? error : ret; |
779 | } | 734 | } |
780 | 735 | ||
781 | STATIC ssize_t | 736 | STATIC ssize_t |
@@ -1514,7 +1469,7 @@ xfs_filemap_page_mkwrite( | |||
1514 | xfs_ilock(XFS_I(inode), XFS_MMAPLOCK_SHARED); | 1469 | xfs_ilock(XFS_I(inode), XFS_MMAPLOCK_SHARED); |
1515 | 1470 | ||
1516 | if (IS_DAX(inode)) { | 1471 | if (IS_DAX(inode)) { |
1517 | ret = dax_mkwrite(vma, vmf, xfs_get_blocks_dax_fault); | 1472 | ret = iomap_dax_fault(vma, vmf, &xfs_iomap_ops); |
1518 | } else { | 1473 | } else { |
1519 | ret = iomap_page_mkwrite(vma, vmf, &xfs_iomap_ops); | 1474 | ret = iomap_page_mkwrite(vma, vmf, &xfs_iomap_ops); |
1520 | ret = block_page_mkwrite_return(ret); | 1475 | ret = block_page_mkwrite_return(ret); |
@@ -1548,7 +1503,7 @@ xfs_filemap_fault( | |||
1548 | * changes to xfs_get_blocks_direct() to map unwritten extent | 1503 | * changes to xfs_get_blocks_direct() to map unwritten extent |
1549 | * ioend for conversion on read-only mappings. | 1504 | * ioend for conversion on read-only mappings. |
1550 | */ | 1505 | */ |
1551 | ret = dax_fault(vma, vmf, xfs_get_blocks_dax_fault); | 1506 | ret = iomap_dax_fault(vma, vmf, &xfs_iomap_ops); |
1552 | } else | 1507 | } else |
1553 | ret = filemap_fault(vma, vmf); | 1508 | ret = filemap_fault(vma, vmf); |
1554 | xfs_iunlock(XFS_I(inode), XFS_MMAPLOCK_SHARED); | 1509 | xfs_iunlock(XFS_I(inode), XFS_MMAPLOCK_SHARED); |