diff options
-rw-r--r-- | fs/xfs/xfs_aops.c | 232 |
1 files changed, 122 insertions, 110 deletions
diff --git a/fs/xfs/xfs_aops.c b/fs/xfs/xfs_aops.c index f75c7c99cb63..6f5c95f94add 100644 --- a/fs/xfs/xfs_aops.c +++ b/fs/xfs/xfs_aops.c | |||
@@ -747,6 +747,127 @@ xfs_writepage_submit( | |||
747 | return status; | 747 | return status; |
748 | } | 748 | } |
749 | 749 | ||
750 | static int | ||
751 | xfs_writepage_map( | ||
752 | struct xfs_writepage_ctx *wpc, | ||
753 | struct inode *inode, | ||
754 | struct page *page, | ||
755 | loff_t offset, | ||
756 | __uint64_t end_offset) | ||
757 | { | ||
758 | struct buffer_head *bh, *head; | ||
759 | ssize_t len = 1 << inode->i_blkbits; | ||
760 | int error = 0; | ||
761 | int uptodate = 1; | ||
762 | int count = 0; | ||
763 | |||
764 | bh = head = page_buffers(page); | ||
765 | offset = page_offset(page); | ||
766 | |||
767 | do { | ||
768 | if (offset >= end_offset) | ||
769 | break; | ||
770 | if (!buffer_uptodate(bh)) | ||
771 | uptodate = 0; | ||
772 | |||
773 | /* | ||
774 | * set_page_dirty dirties all buffers in a page, independent | ||
775 | * of their state. The dirty state however is entirely | ||
776 | * meaningless for holes (!mapped && uptodate), so skip | ||
777 | * buffers covering holes here. | ||
778 | */ | ||
779 | if (!buffer_mapped(bh) && buffer_uptodate(bh)) { | ||
780 | wpc->imap_valid = false; | ||
781 | continue; | ||
782 | } | ||
783 | |||
784 | if (buffer_unwritten(bh)) { | ||
785 | if (wpc->io_type != XFS_IO_UNWRITTEN) { | ||
786 | wpc->io_type = XFS_IO_UNWRITTEN; | ||
787 | wpc->imap_valid = false; | ||
788 | } | ||
789 | } else if (buffer_delay(bh)) { | ||
790 | if (wpc->io_type != XFS_IO_DELALLOC) { | ||
791 | wpc->io_type = XFS_IO_DELALLOC; | ||
792 | wpc->imap_valid = false; | ||
793 | } | ||
794 | } else if (buffer_uptodate(bh)) { | ||
795 | if (wpc->io_type != XFS_IO_OVERWRITE) { | ||
796 | wpc->io_type = XFS_IO_OVERWRITE; | ||
797 | wpc->imap_valid = false; | ||
798 | } | ||
799 | } else { | ||
800 | if (PageUptodate(page)) | ||
801 | ASSERT(buffer_mapped(bh)); | ||
802 | /* | ||
803 | * This buffer is not uptodate and will not be | ||
804 | * written to disk. Ensure that we will put any | ||
805 | * subsequent writeable buffers into a new | ||
806 | * ioend. | ||
807 | */ | ||
808 | wpc->imap_valid = false; | ||
809 | continue; | ||
810 | } | ||
811 | |||
812 | if (wpc->imap_valid) | ||
813 | wpc->imap_valid = xfs_imap_valid(inode, &wpc->imap, | ||
814 | offset); | ||
815 | if (!wpc->imap_valid) { | ||
816 | error = xfs_map_blocks(inode, offset, &wpc->imap, | ||
817 | wpc->io_type); | ||
818 | if (error) | ||
819 | goto out_error; | ||
820 | wpc->imap_valid = xfs_imap_valid(inode, &wpc->imap, | ||
821 | offset); | ||
822 | } | ||
823 | if (wpc->imap_valid) { | ||
824 | lock_buffer(bh); | ||
825 | if (wpc->io_type != XFS_IO_OVERWRITE) | ||
826 | xfs_map_at_offset(inode, bh, &wpc->imap, offset); | ||
827 | xfs_add_to_ioend(inode, bh, offset, wpc); | ||
828 | count++; | ||
829 | } | ||
830 | |||
831 | if (!wpc->iohead) | ||
832 | wpc->iohead = wpc->ioend; | ||
833 | |||
834 | } while (offset += len, ((bh = bh->b_this_page) != head)); | ||
835 | |||
836 | if (uptodate && bh == head) | ||
837 | SetPageUptodate(page); | ||
838 | |||
839 | xfs_start_page_writeback(page, 1, count); | ||
840 | ASSERT(wpc->iohead || !count); | ||
841 | return 0; | ||
842 | |||
843 | out_error: | ||
844 | /* | ||
845 | * On error, we have to fail the iohead here because we locked buffers | ||
846 | * in the ioend chain. If we don't do this, we'll deadlock invalidating | ||
847 | * the page as that tries to lock the buffers on the page. Also, because | ||
848 | * we may have set pages under writeback, we have to make sure we run | ||
849 | * IO completion to mark the error state of the IO appropriately, so we | ||
850 | * can't cancel the ioend directly here. That means we have to mark this | ||
851 | * page as under writeback if we included any buffers from it in the | ||
852 | * ioend chain so that completion treats it correctly. | ||
853 | * | ||
854 | * If we didn't include the page in the ioend, then we can simply | ||
855 | * discard and unlock it as there are no other users of the page or it's | ||
856 | * buffers right now. The caller will still need to trigger submission | ||
857 | * of outstanding ioends on the writepage context so they are treated | ||
858 | * correctly on error. | ||
859 | */ | ||
860 | if (count) | ||
861 | xfs_start_page_writeback(page, 0, count); | ||
862 | else { | ||
863 | xfs_aops_discard_page(page); | ||
864 | ClearPageUptodate(page); | ||
865 | unlock_page(page); | ||
866 | } | ||
867 | mapping_set_error(page->mapping, error); | ||
868 | return error; | ||
869 | } | ||
870 | |||
750 | /* | 871 | /* |
751 | * Write out a dirty page. | 872 | * Write out a dirty page. |
752 | * | 873 | * |
@@ -763,13 +884,9 @@ xfs_do_writepage( | |||
763 | { | 884 | { |
764 | struct xfs_writepage_ctx *wpc = data; | 885 | struct xfs_writepage_ctx *wpc = data; |
765 | struct inode *inode = page->mapping->host; | 886 | struct inode *inode = page->mapping->host; |
766 | struct buffer_head *bh, *head; | ||
767 | loff_t offset; | 887 | loff_t offset; |
768 | __uint64_t end_offset; | 888 | __uint64_t end_offset; |
769 | pgoff_t end_index; | 889 | pgoff_t end_index; |
770 | ssize_t len; | ||
771 | int err, uptodate = 1; | ||
772 | int count = 0; | ||
773 | 890 | ||
774 | trace_xfs_writepage(inode, page, 0, 0); | 891 | trace_xfs_writepage(inode, page, 0, 0); |
775 | 892 | ||
@@ -862,112 +979,7 @@ xfs_do_writepage( | |||
862 | end_offset = offset; | 979 | end_offset = offset; |
863 | } | 980 | } |
864 | 981 | ||
865 | len = 1 << inode->i_blkbits; | 982 | return xfs_writepage_map(wpc, inode, page, offset, end_offset); |
866 | |||
867 | bh = head = page_buffers(page); | ||
868 | offset = page_offset(page); | ||
869 | |||
870 | do { | ||
871 | if (offset >= end_offset) | ||
872 | break; | ||
873 | if (!buffer_uptodate(bh)) | ||
874 | uptodate = 0; | ||
875 | |||
876 | /* | ||
877 | * set_page_dirty dirties all buffers in a page, independent | ||
878 | * of their state. The dirty state however is entirely | ||
879 | * meaningless for holes (!mapped && uptodate), so skip | ||
880 | * buffers covering holes here. | ||
881 | */ | ||
882 | if (!buffer_mapped(bh) && buffer_uptodate(bh)) { | ||
883 | wpc->imap_valid = false; | ||
884 | continue; | ||
885 | } | ||
886 | |||
887 | if (buffer_unwritten(bh)) { | ||
888 | if (wpc->io_type != XFS_IO_UNWRITTEN) { | ||
889 | wpc->io_type = XFS_IO_UNWRITTEN; | ||
890 | wpc->imap_valid = false; | ||
891 | } | ||
892 | } else if (buffer_delay(bh)) { | ||
893 | if (wpc->io_type != XFS_IO_DELALLOC) { | ||
894 | wpc->io_type = XFS_IO_DELALLOC; | ||
895 | wpc->imap_valid = false; | ||
896 | } | ||
897 | } else if (buffer_uptodate(bh)) { | ||
898 | if (wpc->io_type != XFS_IO_OVERWRITE) { | ||
899 | wpc->io_type = XFS_IO_OVERWRITE; | ||
900 | wpc->imap_valid = false; | ||
901 | } | ||
902 | } else { | ||
903 | if (PageUptodate(page)) | ||
904 | ASSERT(buffer_mapped(bh)); | ||
905 | /* | ||
906 | * This buffer is not uptodate and will not be | ||
907 | * written to disk. Ensure that we will put any | ||
908 | * subsequent writeable buffers into a new | ||
909 | * ioend. | ||
910 | */ | ||
911 | wpc->imap_valid = 0; | ||
912 | continue; | ||
913 | } | ||
914 | |||
915 | if (wpc->imap_valid) | ||
916 | wpc->imap_valid = xfs_imap_valid(inode, &wpc->imap, offset); | ||
917 | if (!wpc->imap_valid) { | ||
918 | err = xfs_map_blocks(inode, offset, &wpc->imap, | ||
919 | wpc->io_type); | ||
920 | if (err) | ||
921 | goto error; | ||
922 | wpc->imap_valid = xfs_imap_valid(inode, &wpc->imap, offset); | ||
923 | } | ||
924 | if (wpc->imap_valid) { | ||
925 | lock_buffer(bh); | ||
926 | if (wpc->io_type != XFS_IO_OVERWRITE) | ||
927 | xfs_map_at_offset(inode, bh, &wpc->imap, offset); | ||
928 | xfs_add_to_ioend(inode, bh, offset, wpc); | ||
929 | count++; | ||
930 | } | ||
931 | |||
932 | if (!wpc->iohead) | ||
933 | wpc->iohead = wpc->ioend; | ||
934 | |||
935 | } while (offset += len, ((bh = bh->b_this_page) != head)); | ||
936 | |||
937 | if (uptodate && bh == head) | ||
938 | SetPageUptodate(page); | ||
939 | |||
940 | xfs_start_page_writeback(page, 1, count); | ||
941 | |||
942 | ASSERT(wpc->iohead || !count); | ||
943 | return 0; | ||
944 | |||
945 | error: | ||
946 | /* | ||
947 | * On error, we have to fail the iohead here because we buffers locked | ||
948 | * in the ioend chain. If we don't do this, we'll deadlock invalidating | ||
949 | * the page as that tries to lock the buffers on the page. Also, because | ||
950 | * we may have set pages under writeback, we have to make sure we run | ||
951 | * IO completion to mark the error state of the IO appropriately, so we | ||
952 | * can't cancel the ioend directly here. That means we have to mark this | ||
953 | * page as under writeback if we included any buffers from it in the | ||
954 | * ioend chain so that completion treats it correctly. | ||
955 | * | ||
956 | * If we didn't include the page in the ioend, then we can simply | ||
957 | * discard and unlock it as there are no other users of the page or it's | ||
958 | * buffers right now. The caller will still need to trigger submission | ||
959 | * of outstanding ioends on the writepage context so they are treated | ||
960 | * correctly on error. | ||
961 | */ | ||
962 | if (count) | ||
963 | xfs_start_page_writeback(page, 0, count); | ||
964 | else { | ||
965 | xfs_aops_discard_page(page); | ||
966 | ClearPageUptodate(page); | ||
967 | unlock_page(page); | ||
968 | } | ||
969 | mapping_set_error(page->mapping, err); | ||
970 | return err; | ||
971 | 983 | ||
972 | redirty: | 984 | redirty: |
973 | redirty_page_for_writepage(wbc, page); | 985 | redirty_page_for_writepage(wbc, page); |