aboutsummaryrefslogtreecommitdiffstats
path: root/fs/btrfs/file.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/btrfs/file.c')
-rw-r--r--fs/btrfs/file.c115
1 files changed, 73 insertions, 42 deletions
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
index c1d3a818731a..f447b783bb84 100644
--- a/fs/btrfs/file.c
+++ b/fs/btrfs/file.c
@@ -70,6 +70,19 @@ static noinline int btrfs_copy_from_user(loff_t pos, int num_pages,
70 70
71 /* Flush processor's dcache for this page */ 71 /* Flush processor's dcache for this page */
72 flush_dcache_page(page); 72 flush_dcache_page(page);
73
74 /*
75 * if we get a partial write, we can end up with
76 * partially up to date pages. These add
77 * a lot of complexity, so make sure they don't
78 * happen by forcing this copy to be retried.
79 *
80 * The rest of the btrfs_file_write code will fall
81 * back to page at a time copies after we return 0.
82 */
83 if (!PageUptodate(page) && copied < count)
84 copied = 0;
85
73 iov_iter_advance(i, copied); 86 iov_iter_advance(i, copied);
74 write_bytes -= copied; 87 write_bytes -= copied;
75 total_copied += copied; 88 total_copied += copied;
@@ -186,6 +199,7 @@ int btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end,
186 split = alloc_extent_map(GFP_NOFS); 199 split = alloc_extent_map(GFP_NOFS);
187 if (!split2) 200 if (!split2)
188 split2 = alloc_extent_map(GFP_NOFS); 201 split2 = alloc_extent_map(GFP_NOFS);
202 BUG_ON(!split || !split2);
189 203
190 write_lock(&em_tree->lock); 204 write_lock(&em_tree->lock);
191 em = lookup_extent_mapping(em_tree, start, len); 205 em = lookup_extent_mapping(em_tree, start, len);
@@ -762,6 +776,27 @@ out:
762} 776}
763 777
764/* 778/*
779 * on error we return an unlocked page and the error value
780 * on success we return a locked page and 0
781 */
782static int prepare_uptodate_page(struct page *page, u64 pos)
783{
784 int ret = 0;
785
786 if ((pos & (PAGE_CACHE_SIZE - 1)) && !PageUptodate(page)) {
787 ret = btrfs_readpage(NULL, page);
788 if (ret)
789 return ret;
790 lock_page(page);
791 if (!PageUptodate(page)) {
792 unlock_page(page);
793 return -EIO;
794 }
795 }
796 return 0;
797}
798
799/*
765 * this gets pages into the page cache and locks them down, it also properly 800 * this gets pages into the page cache and locks them down, it also properly
766 * waits for data=ordered extents to finish before allowing the pages to be 801 * waits for data=ordered extents to finish before allowing the pages to be
767 * modified. 802 * modified.
@@ -776,6 +811,7 @@ static noinline int prepare_pages(struct btrfs_root *root, struct file *file,
776 unsigned long index = pos >> PAGE_CACHE_SHIFT; 811 unsigned long index = pos >> PAGE_CACHE_SHIFT;
777 struct inode *inode = fdentry(file)->d_inode; 812 struct inode *inode = fdentry(file)->d_inode;
778 int err = 0; 813 int err = 0;
814 int faili = 0;
779 u64 start_pos; 815 u64 start_pos;
780 u64 last_pos; 816 u64 last_pos;
781 817
@@ -793,15 +829,24 @@ again:
793 for (i = 0; i < num_pages; i++) { 829 for (i = 0; i < num_pages; i++) {
794 pages[i] = grab_cache_page(inode->i_mapping, index + i); 830 pages[i] = grab_cache_page(inode->i_mapping, index + i);
795 if (!pages[i]) { 831 if (!pages[i]) {
796 int c; 832 faili = i - 1;
797 for (c = i - 1; c >= 0; c--) { 833 err = -ENOMEM;
798 unlock_page(pages[c]); 834 goto fail;
799 page_cache_release(pages[c]); 835 }
800 } 836
801 return -ENOMEM; 837 if (i == 0)
838 err = prepare_uptodate_page(pages[i], pos);
839 if (i == num_pages - 1)
840 err = prepare_uptodate_page(pages[i],
841 pos + write_bytes);
842 if (err) {
843 page_cache_release(pages[i]);
844 faili = i - 1;
845 goto fail;
802 } 846 }
803 wait_on_page_writeback(pages[i]); 847 wait_on_page_writeback(pages[i]);
804 } 848 }
849 err = 0;
805 if (start_pos < inode->i_size) { 850 if (start_pos < inode->i_size) {
806 struct btrfs_ordered_extent *ordered; 851 struct btrfs_ordered_extent *ordered;
807 lock_extent_bits(&BTRFS_I(inode)->io_tree, 852 lock_extent_bits(&BTRFS_I(inode)->io_tree,
@@ -841,6 +886,14 @@ again:
841 WARN_ON(!PageLocked(pages[i])); 886 WARN_ON(!PageLocked(pages[i]));
842 } 887 }
843 return 0; 888 return 0;
889fail:
890 while (faili >= 0) {
891 unlock_page(pages[faili]);
892 page_cache_release(pages[faili]);
893 faili--;
894 }
895 return err;
896
844} 897}
845 898
846static ssize_t btrfs_file_aio_write(struct kiocb *iocb, 899static ssize_t btrfs_file_aio_write(struct kiocb *iocb,
@@ -850,7 +903,6 @@ static ssize_t btrfs_file_aio_write(struct kiocb *iocb,
850 struct file *file = iocb->ki_filp; 903 struct file *file = iocb->ki_filp;
851 struct inode *inode = fdentry(file)->d_inode; 904 struct inode *inode = fdentry(file)->d_inode;
852 struct btrfs_root *root = BTRFS_I(inode)->root; 905 struct btrfs_root *root = BTRFS_I(inode)->root;
853 struct page *pinned[2];
854 struct page **pages = NULL; 906 struct page **pages = NULL;
855 struct iov_iter i; 907 struct iov_iter i;
856 loff_t *ppos = &iocb->ki_pos; 908 loff_t *ppos = &iocb->ki_pos;
@@ -871,9 +923,6 @@ static ssize_t btrfs_file_aio_write(struct kiocb *iocb,
871 will_write = ((file->f_flags & O_DSYNC) || IS_SYNC(inode) || 923 will_write = ((file->f_flags & O_DSYNC) || IS_SYNC(inode) ||
872 (file->f_flags & O_DIRECT)); 924 (file->f_flags & O_DIRECT));
873 925
874 pinned[0] = NULL;
875 pinned[1] = NULL;
876
877 start_pos = pos; 926 start_pos = pos;
878 927
879 vfs_check_frozen(inode->i_sb, SB_FREEZE_WRITE); 928 vfs_check_frozen(inode->i_sb, SB_FREEZE_WRITE);
@@ -961,32 +1010,6 @@ static ssize_t btrfs_file_aio_write(struct kiocb *iocb,
961 first_index = pos >> PAGE_CACHE_SHIFT; 1010 first_index = pos >> PAGE_CACHE_SHIFT;
962 last_index = (pos + iov_iter_count(&i)) >> PAGE_CACHE_SHIFT; 1011 last_index = (pos + iov_iter_count(&i)) >> PAGE_CACHE_SHIFT;
963 1012
964 /*
965 * there are lots of better ways to do this, but this code
966 * makes sure the first and last page in the file range are
967 * up to date and ready for cow
968 */
969 if ((pos & (PAGE_CACHE_SIZE - 1))) {
970 pinned[0] = grab_cache_page(inode->i_mapping, first_index);
971 if (!PageUptodate(pinned[0])) {
972 ret = btrfs_readpage(NULL, pinned[0]);
973 BUG_ON(ret);
974 wait_on_page_locked(pinned[0]);
975 } else {
976 unlock_page(pinned[0]);
977 }
978 }
979 if ((pos + iov_iter_count(&i)) & (PAGE_CACHE_SIZE - 1)) {
980 pinned[1] = grab_cache_page(inode->i_mapping, last_index);
981 if (!PageUptodate(pinned[1])) {
982 ret = btrfs_readpage(NULL, pinned[1]);
983 BUG_ON(ret);
984 wait_on_page_locked(pinned[1]);
985 } else {
986 unlock_page(pinned[1]);
987 }
988 }
989
990 while (iov_iter_count(&i) > 0) { 1013 while (iov_iter_count(&i) > 0) {
991 size_t offset = pos & (PAGE_CACHE_SIZE - 1); 1014 size_t offset = pos & (PAGE_CACHE_SIZE - 1);
992 size_t write_bytes = min(iov_iter_count(&i), 1015 size_t write_bytes = min(iov_iter_count(&i),
@@ -1023,8 +1046,20 @@ static ssize_t btrfs_file_aio_write(struct kiocb *iocb,
1023 1046
1024 copied = btrfs_copy_from_user(pos, num_pages, 1047 copied = btrfs_copy_from_user(pos, num_pages,
1025 write_bytes, pages, &i); 1048 write_bytes, pages, &i);
1026 dirty_pages = (copied + offset + PAGE_CACHE_SIZE - 1) >> 1049
1027 PAGE_CACHE_SHIFT; 1050 /*
1051 * if we have trouble faulting in the pages, fall
1052 * back to one page at a time
1053 */
1054 if (copied < write_bytes)
1055 nrptrs = 1;
1056
1057 if (copied == 0)
1058 dirty_pages = 0;
1059 else
1060 dirty_pages = (copied + offset +
1061 PAGE_CACHE_SIZE - 1) >>
1062 PAGE_CACHE_SHIFT;
1028 1063
1029 if (num_pages > dirty_pages) { 1064 if (num_pages > dirty_pages) {
1030 if (copied > 0) 1065 if (copied > 0)
@@ -1068,10 +1103,6 @@ out:
1068 err = ret; 1103 err = ret;
1069 1104
1070 kfree(pages); 1105 kfree(pages);
1071 if (pinned[0])
1072 page_cache_release(pinned[0]);
1073 if (pinned[1])
1074 page_cache_release(pinned[1]);
1075 *ppos = pos; 1106 *ppos = pos;
1076 1107
1077 /* 1108 /*