aboutsummaryrefslogtreecommitdiffstats
path: root/fs/fs-writeback.c
diff options
context:
space:
mode:
authorWu Fengguang <fengguang.wu@intel.com>2010-08-11 17:17:41 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2010-08-12 11:43:30 -0400
commit23539afc71937dbaca7de2229669f4475ff4ea7b (patch)
tree1a37d78bc4d387c6cbdc201755dce09297ebe308 /fs/fs-writeback.c
parent1babe18385d3976043c04237ce837f3736197eb4 (diff)
writeback: don't redirty tail an inode with dirty pages
Avoid delaying writeback for an expire inode with lots of dirty pages, but no active dirtier at the moment. Previously we only do that for the kupdate case. Any filesystem that does delayed allocation or unwritten extent conversion after IO completion will cause this - for example, XFS. Signed-off-by: Wu Fengguang <fengguang.wu@intel.com> Acked-by: Jan Kara <jack@suse.cz> Cc: Dave Chinner <david@fromorbit.com> Cc: Christoph Hellwig <hch@infradead.org> Cc: Dave Chinner <david@fromorbit.com> Cc: Jens Axboe <axboe@kernel.dk> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'fs/fs-writeback.c')
-rw-r--r--fs/fs-writeback.c22
1 files changed, 9 insertions, 13 deletions
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
index fca43d4d7bf4..1ce364bbb003 100644
--- a/fs/fs-writeback.c
+++ b/fs/fs-writeback.c
@@ -363,18 +363,7 @@ writeback_single_inode(struct inode *inode, struct writeback_control *wbc)
363 spin_lock(&inode_lock); 363 spin_lock(&inode_lock);
364 inode->i_state &= ~I_SYNC; 364 inode->i_state &= ~I_SYNC;
365 if (!(inode->i_state & I_FREEING)) { 365 if (!(inode->i_state & I_FREEING)) {
366 if ((inode->i_state & I_DIRTY_PAGES) && wbc->for_kupdate) { 366 if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) {
367 /*
368 * More pages get dirtied by a fast dirtier.
369 */
370 goto select_queue;
371 } else if (inode->i_state & I_DIRTY) {
372 /*
373 * At least XFS will redirty the inode during the
374 * writeback (delalloc) and on io completion (isize).
375 */
376 redirty_tail(inode);
377 } else if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) {
378 /* 367 /*
379 * We didn't write back all the pages. nfs_writepages() 368 * We didn't write back all the pages. nfs_writepages()
380 * sometimes bales out without doing anything. Redirty 369 * sometimes bales out without doing anything. Redirty
@@ -396,7 +385,6 @@ writeback_single_inode(struct inode *inode, struct writeback_control *wbc)
396 * soon as the queue becomes uncongested. 385 * soon as the queue becomes uncongested.
397 */ 386 */
398 inode->i_state |= I_DIRTY_PAGES; 387 inode->i_state |= I_DIRTY_PAGES;
399select_queue:
400 if (wbc->nr_to_write <= 0) { 388 if (wbc->nr_to_write <= 0) {
401 /* 389 /*
402 * slice used up: queue for next turn 390 * slice used up: queue for next turn
@@ -419,6 +407,14 @@ select_queue:
419 inode->i_state |= I_DIRTY_PAGES; 407 inode->i_state |= I_DIRTY_PAGES;
420 redirty_tail(inode); 408 redirty_tail(inode);
421 } 409 }
410 } else if (inode->i_state & I_DIRTY) {
411 /*
412 * Filesystems can dirty the inode during writeback
413 * operations, such as delayed allocation during
414 * submission or metadata updates after data IO
415 * completion.
416 */
417 redirty_tail(inode);
422 } else if (atomic_read(&inode->i_count)) { 418 } else if (atomic_read(&inode->i_count)) {
423 /* 419 /*
424 * The inode is clean, inuse 420 * The inode is clean, inuse