aboutsummaryrefslogtreecommitdiffstats
path: root/fs/ext4
diff options
context:
space:
mode:
authorTheodore Ts'o <tytso@mit.edu>2011-02-26 14:08:11 -0500
committerTheodore Ts'o <tytso@mit.edu>2011-02-26 14:08:11 -0500
commitee6ecbcc5d73672217fdea420d182ecb0cdf310c (patch)
tree4d5b960a2feb20ce1b5866739b39c89dec1c49d8 /fs/ext4
parent9749895644a817cfd28a535bc3ae60e4267bdc50 (diff)
ext4: remove page_skipped hackery in ext4_da_writepages()
Because the ext4 page writeback codepath had been prematurely calling clear_page_dirty_for_io(), if it turned out that a particular page couldn't be written out during a particular pass of write_cache_pages_da(), the page would have to get redirtied by calling redirty_pages_for_writeback(). Not only was this wasted work, but redirty_page_for_writeback() would increment wbc->pages_skipped to signal to writeback_sb_inodes() that buffers were locked, and that it should skip this inode until later. Since this signal was incorrect in ext4's case --- which was caused by ext4's historically incorrect use of write_cache_pages() --- ext4_da_writepages() saved and restored wbc->skipped_pages to avoid confusing writeback_sb_inodes(). Now that we've fixed ext4 to call clear_page_dirty_for_io() right before initiating the page I/O, we can nuke the page_skipped save/restore hackery, and breathe a sigh of relief. Signed-off-by: "Theodore Ts'o" <tytso@mit.edu>
Diffstat (limited to 'fs/ext4')
-rw-r--r--fs/ext4/inode.c10
1 files changed, 0 insertions, 10 deletions
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index ae6e2f43d873..617c9cbba182 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -2900,7 +2900,6 @@ static int ext4_da_writepages(struct address_space *mapping,
2900 struct mpage_da_data mpd; 2900 struct mpage_da_data mpd;
2901 struct inode *inode = mapping->host; 2901 struct inode *inode = mapping->host;
2902 int pages_written = 0; 2902 int pages_written = 0;
2903 long pages_skipped;
2904 unsigned int max_pages; 2903 unsigned int max_pages;
2905 int range_cyclic, cycled = 1, io_done = 0; 2904 int range_cyclic, cycled = 1, io_done = 0;
2906 int needed_blocks, ret = 0; 2905 int needed_blocks, ret = 0;
@@ -2986,8 +2985,6 @@ static int ext4_da_writepages(struct address_space *mapping,
2986 mpd.wbc = wbc; 2985 mpd.wbc = wbc;
2987 mpd.inode = mapping->host; 2986 mpd.inode = mapping->host;
2988 2987
2989 pages_skipped = wbc->pages_skipped;
2990
2991retry: 2988retry:
2992 if (wbc->sync_mode == WB_SYNC_ALL) 2989 if (wbc->sync_mode == WB_SYNC_ALL)
2993 tag_pages_for_writeback(mapping, index, end); 2990 tag_pages_for_writeback(mapping, index, end);
@@ -3047,7 +3044,6 @@ retry:
3047 * and try again 3044 * and try again
3048 */ 3045 */
3049 jbd2_journal_force_commit_nested(sbi->s_journal); 3046 jbd2_journal_force_commit_nested(sbi->s_journal);
3050 wbc->pages_skipped = pages_skipped;
3051 ret = 0; 3047 ret = 0;
3052 } else if (ret == MPAGE_DA_EXTENT_TAIL) { 3048 } else if (ret == MPAGE_DA_EXTENT_TAIL) {
3053 /* 3049 /*
@@ -3055,7 +3051,6 @@ retry:
3055 * rest of the pages 3051 * rest of the pages
3056 */ 3052 */
3057 pages_written += mpd.pages_written; 3053 pages_written += mpd.pages_written;
3058 wbc->pages_skipped = pages_skipped;
3059 ret = 0; 3054 ret = 0;
3060 io_done = 1; 3055 io_done = 1;
3061 } else if (wbc->nr_to_write) 3056 } else if (wbc->nr_to_write)
@@ -3073,11 +3068,6 @@ retry:
3073 wbc->range_end = mapping->writeback_index - 1; 3068 wbc->range_end = mapping->writeback_index - 1;
3074 goto retry; 3069 goto retry;
3075 } 3070 }
3076 if (pages_skipped != wbc->pages_skipped)
3077 ext4_msg(inode->i_sb, KERN_CRIT,
3078 "This should not happen leaving %s "
3079 "with nr_to_write = %ld ret = %d",
3080 __func__, wbc->nr_to_write, ret);
3081 3071
3082 /* Update index */ 3072 /* Update index */
3083 wbc->range_cyclic = range_cyclic; 3073 wbc->range_cyclic = range_cyclic;