diff options
author | Christoph Hellwig <hch@infradead.org> | 2011-12-18 15:00:14 -0500 |
---|---|---|
committer | Ben Myers <bpm@sgi.com> | 2012-01-17 16:12:33 -0500 |
commit | d060646436233912178e6b9e3a7f30a41214220f (patch) | |
tree | a460c8e2d89cdcf08d574f497c421decf36fe018 /fs/xfs | |
parent | 5bf1f26227a59b9634e95eb3c7c012b766e5e6a0 (diff) |
xfs: cleanup xfs_file_aio_write
With all the size field updates out of the way xfs_file_aio_write can
be further simplified by pushing all iolock handling into
xfs_file_dio_aio_write and xfs_file_buffered_aio_write and using
the generic generic_write_sync helper for synchronous writes.
Reviewed-by: Dave Chinner <dchinner@redhat.com>
Signed-off-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Ben Myers <bpm@sgi.com>
Diffstat (limited to 'fs/xfs')
-rw-r--r-- | fs/xfs/xfs_file.c | 82 |
1 files changed, 37 insertions, 45 deletions
diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c index 134ff2fe4f4d..7e5bc872f2b4 100644 --- a/fs/xfs/xfs_file.c +++ b/fs/xfs/xfs_file.c | |||
@@ -724,8 +724,7 @@ xfs_file_dio_aio_write( | |||
724 | const struct iovec *iovp, | 724 | const struct iovec *iovp, |
725 | unsigned long nr_segs, | 725 | unsigned long nr_segs, |
726 | loff_t pos, | 726 | loff_t pos, |
727 | size_t ocount, | 727 | size_t ocount) |
728 | int *iolock) | ||
729 | { | 728 | { |
730 | struct file *file = iocb->ki_filp; | 729 | struct file *file = iocb->ki_filp; |
731 | struct address_space *mapping = file->f_mapping; | 730 | struct address_space *mapping = file->f_mapping; |
@@ -735,10 +734,10 @@ xfs_file_dio_aio_write( | |||
735 | ssize_t ret = 0; | 734 | ssize_t ret = 0; |
736 | size_t count = ocount; | 735 | size_t count = ocount; |
737 | int unaligned_io = 0; | 736 | int unaligned_io = 0; |
737 | int iolock; | ||
738 | struct xfs_buftarg *target = XFS_IS_REALTIME_INODE(ip) ? | 738 | struct xfs_buftarg *target = XFS_IS_REALTIME_INODE(ip) ? |
739 | mp->m_rtdev_targp : mp->m_ddev_targp; | 739 | mp->m_rtdev_targp : mp->m_ddev_targp; |
740 | 740 | ||
741 | *iolock = 0; | ||
742 | if ((pos & target->bt_smask) || (count & target->bt_smask)) | 741 | if ((pos & target->bt_smask) || (count & target->bt_smask)) |
743 | return -XFS_ERROR(EINVAL); | 742 | return -XFS_ERROR(EINVAL); |
744 | 743 | ||
@@ -753,31 +752,31 @@ xfs_file_dio_aio_write( | |||
753 | * EOF zeroing cases and fill out the new inode size as appropriate. | 752 | * EOF zeroing cases and fill out the new inode size as appropriate. |
754 | */ | 753 | */ |
755 | if (unaligned_io || mapping->nrpages) | 754 | if (unaligned_io || mapping->nrpages) |
756 | *iolock = XFS_IOLOCK_EXCL; | 755 | iolock = XFS_IOLOCK_EXCL; |
757 | else | 756 | else |
758 | *iolock = XFS_IOLOCK_SHARED; | 757 | iolock = XFS_IOLOCK_SHARED; |
759 | xfs_rw_ilock(ip, *iolock); | 758 | xfs_rw_ilock(ip, iolock); |
760 | 759 | ||
761 | /* | 760 | /* |
762 | * Recheck if there are cached pages that need invalidate after we got | 761 | * Recheck if there are cached pages that need invalidate after we got |
763 | * the iolock to protect against other threads adding new pages while | 762 | * the iolock to protect against other threads adding new pages while |
764 | * we were waiting for the iolock. | 763 | * we were waiting for the iolock. |
765 | */ | 764 | */ |
766 | if (mapping->nrpages && *iolock == XFS_IOLOCK_SHARED) { | 765 | if (mapping->nrpages && iolock == XFS_IOLOCK_SHARED) { |
767 | xfs_rw_iunlock(ip, *iolock); | 766 | xfs_rw_iunlock(ip, iolock); |
768 | *iolock = XFS_IOLOCK_EXCL; | 767 | iolock = XFS_IOLOCK_EXCL; |
769 | xfs_rw_ilock(ip, *iolock); | 768 | xfs_rw_ilock(ip, iolock); |
770 | } | 769 | } |
771 | 770 | ||
772 | ret = xfs_file_aio_write_checks(file, &pos, &count, iolock); | 771 | ret = xfs_file_aio_write_checks(file, &pos, &count, &iolock); |
773 | if (ret) | 772 | if (ret) |
774 | return ret; | 773 | goto out; |
775 | 774 | ||
776 | if (mapping->nrpages) { | 775 | if (mapping->nrpages) { |
777 | ret = -xfs_flushinval_pages(ip, (pos & PAGE_CACHE_MASK), -1, | 776 | ret = -xfs_flushinval_pages(ip, (pos & PAGE_CACHE_MASK), -1, |
778 | FI_REMAPF_LOCKED); | 777 | FI_REMAPF_LOCKED); |
779 | if (ret) | 778 | if (ret) |
780 | return ret; | 779 | goto out; |
781 | } | 780 | } |
782 | 781 | ||
783 | /* | 782 | /* |
@@ -786,15 +785,18 @@ xfs_file_dio_aio_write( | |||
786 | */ | 785 | */ |
787 | if (unaligned_io) | 786 | if (unaligned_io) |
788 | inode_dio_wait(inode); | 787 | inode_dio_wait(inode); |
789 | else if (*iolock == XFS_IOLOCK_EXCL) { | 788 | else if (iolock == XFS_IOLOCK_EXCL) { |
790 | xfs_rw_ilock_demote(ip, XFS_IOLOCK_EXCL); | 789 | xfs_rw_ilock_demote(ip, XFS_IOLOCK_EXCL); |
791 | *iolock = XFS_IOLOCK_SHARED; | 790 | iolock = XFS_IOLOCK_SHARED; |
792 | } | 791 | } |
793 | 792 | ||
794 | trace_xfs_file_direct_write(ip, count, iocb->ki_pos, 0); | 793 | trace_xfs_file_direct_write(ip, count, iocb->ki_pos, 0); |
795 | ret = generic_file_direct_write(iocb, iovp, | 794 | ret = generic_file_direct_write(iocb, iovp, |
796 | &nr_segs, pos, &iocb->ki_pos, count, ocount); | 795 | &nr_segs, pos, &iocb->ki_pos, count, ocount); |
797 | 796 | ||
797 | out: | ||
798 | xfs_rw_iunlock(ip, iolock); | ||
799 | |||
798 | /* No fallback to buffered IO on errors for XFS. */ | 800 | /* No fallback to buffered IO on errors for XFS. */ |
799 | ASSERT(ret < 0 || ret == count); | 801 | ASSERT(ret < 0 || ret == count); |
800 | return ret; | 802 | return ret; |
@@ -806,8 +808,7 @@ xfs_file_buffered_aio_write( | |||
806 | const struct iovec *iovp, | 808 | const struct iovec *iovp, |
807 | unsigned long nr_segs, | 809 | unsigned long nr_segs, |
808 | loff_t pos, | 810 | loff_t pos, |
809 | size_t ocount, | 811 | size_t ocount) |
810 | int *iolock) | ||
811 | { | 812 | { |
812 | struct file *file = iocb->ki_filp; | 813 | struct file *file = iocb->ki_filp; |
813 | struct address_space *mapping = file->f_mapping; | 814 | struct address_space *mapping = file->f_mapping; |
@@ -815,14 +816,14 @@ xfs_file_buffered_aio_write( | |||
815 | struct xfs_inode *ip = XFS_I(inode); | 816 | struct xfs_inode *ip = XFS_I(inode); |
816 | ssize_t ret; | 817 | ssize_t ret; |
817 | int enospc = 0; | 818 | int enospc = 0; |
819 | int iolock = XFS_IOLOCK_EXCL; | ||
818 | size_t count = ocount; | 820 | size_t count = ocount; |
819 | 821 | ||
820 | *iolock = XFS_IOLOCK_EXCL; | 822 | xfs_rw_ilock(ip, iolock); |
821 | xfs_rw_ilock(ip, *iolock); | ||
822 | 823 | ||
823 | ret = xfs_file_aio_write_checks(file, &pos, &count, iolock); | 824 | ret = xfs_file_aio_write_checks(file, &pos, &count, &iolock); |
824 | if (ret) | 825 | if (ret) |
825 | return ret; | 826 | goto out; |
826 | 827 | ||
827 | /* We can write back this queue in page reclaim */ | 828 | /* We can write back this queue in page reclaim */ |
828 | current->backing_dev_info = mapping->backing_dev_info; | 829 | current->backing_dev_info = mapping->backing_dev_info; |
@@ -836,13 +837,15 @@ write_retry: | |||
836 | * page locks and retry *once* | 837 | * page locks and retry *once* |
837 | */ | 838 | */ |
838 | if (ret == -ENOSPC && !enospc) { | 839 | if (ret == -ENOSPC && !enospc) { |
839 | ret = -xfs_flush_pages(ip, 0, -1, 0, FI_NONE); | ||
840 | if (ret) | ||
841 | return ret; | ||
842 | enospc = 1; | 840 | enospc = 1; |
843 | goto write_retry; | 841 | ret = -xfs_flush_pages(ip, 0, -1, 0, FI_NONE); |
842 | if (!ret) | ||
843 | goto write_retry; | ||
844 | } | 844 | } |
845 | |||
845 | current->backing_dev_info = NULL; | 846 | current->backing_dev_info = NULL; |
847 | out: | ||
848 | xfs_rw_iunlock(ip, iolock); | ||
846 | return ret; | 849 | return ret; |
847 | } | 850 | } |
848 | 851 | ||
@@ -858,7 +861,6 @@ xfs_file_aio_write( | |||
858 | struct inode *inode = mapping->host; | 861 | struct inode *inode = mapping->host; |
859 | struct xfs_inode *ip = XFS_I(inode); | 862 | struct xfs_inode *ip = XFS_I(inode); |
860 | ssize_t ret; | 863 | ssize_t ret; |
861 | int iolock; | ||
862 | size_t ocount = 0; | 864 | size_t ocount = 0; |
863 | 865 | ||
864 | XFS_STATS_INC(xs_write_calls); | 866 | XFS_STATS_INC(xs_write_calls); |
@@ -878,32 +880,22 @@ xfs_file_aio_write( | |||
878 | return -EIO; | 880 | return -EIO; |
879 | 881 | ||
880 | if (unlikely(file->f_flags & O_DIRECT)) | 882 | if (unlikely(file->f_flags & O_DIRECT)) |
881 | ret = xfs_file_dio_aio_write(iocb, iovp, nr_segs, pos, | 883 | ret = xfs_file_dio_aio_write(iocb, iovp, nr_segs, pos, ocount); |
882 | ocount, &iolock); | ||
883 | else | 884 | else |
884 | ret = xfs_file_buffered_aio_write(iocb, iovp, nr_segs, pos, | 885 | ret = xfs_file_buffered_aio_write(iocb, iovp, nr_segs, pos, |
885 | ocount, &iolock); | 886 | ocount); |
886 | 887 | ||
887 | if (ret <= 0) | 888 | if (ret > 0) { |
888 | goto out_unlock; | 889 | ssize_t err; |
889 | |||
890 | XFS_STATS_ADD(xs_write_bytes, ret); | ||
891 | 890 | ||
892 | /* Handle various SYNC-type writes */ | 891 | XFS_STATS_ADD(xs_write_bytes, ret); |
893 | if ((file->f_flags & O_DSYNC) || IS_SYNC(inode)) { | ||
894 | loff_t end = pos + ret - 1; | ||
895 | int error; | ||
896 | 892 | ||
897 | xfs_rw_iunlock(ip, iolock); | 893 | /* Handle various SYNC-type writes */ |
898 | error = xfs_file_fsync(file, pos, end, | 894 | err = generic_write_sync(file, pos, ret); |
899 | (file->f_flags & __O_SYNC) ? 0 : 1); | 895 | if (err < 0) |
900 | xfs_rw_ilock(ip, iolock); | 896 | ret = err; |
901 | if (error) | ||
902 | ret = error; | ||
903 | } | 897 | } |
904 | 898 | ||
905 | out_unlock: | ||
906 | xfs_rw_iunlock(ip, iolock); | ||
907 | return ret; | 899 | return ret; |
908 | } | 900 | } |
909 | 901 | ||