diff options
author | Wu Fengguang <fengguang.wu@intel.com> | 2009-09-25 00:04:10 -0400 |
---|---|---|
committer | Jens Axboe <jens.axboe@oracle.com> | 2009-09-25 12:08:26 -0400 |
commit | b3af9468aebf5fcb573d0a116b31d2be1d43c0e9 (patch) | |
tree | 3af10b5fa82213345ea61d4ee23ba4a550a6077e /fs/fs-writeback.c | |
parent | 9ecc2738ac2371f88dff5d48914b4e35c45203cd (diff) |
writeback: don't delay inodes redirtied by a fast dirtier
Debug traces show that in per-bdi writeback, the inode under writeback
almost always get redirtied by a busy dirtier. We used to call
redirty_tail() in this case, which could delay inode for up to 30s.
This is unacceptable because it now happens so frequently for plain cp/dd,
that the accumulated delays could make writeback of big files very slow.
So let's distinguish between data redirty and metadata only redirty.
The first one is caused by a busy dirtier, while the latter one could
happen in XFS, NFS, etc. when they are doing delalloc or updating isize.
The inode being busy dirtied will now be requeued for next io, while
the inode being redirtied by fs will continue to be delayed to avoid
repeated IO.
CC: Jan Kara <jack@suse.cz>
CC: Theodore Ts'o <tytso@mit.edu>
CC: Dave Chinner <david@fromorbit.com>
CC: Chris Mason <chris.mason@oracle.com>
CC: Christoph Hellwig <hch@infradead.org>
Signed-off-by: Wu Fengguang <fengguang.wu@intel.com>
Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
Diffstat (limited to 'fs/fs-writeback.c')
-rw-r--r-- | fs/fs-writeback.c | 12 |
1 files changed, 9 insertions, 3 deletions
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c index c6bf775e641a..52aa54540079 100644 --- a/fs/fs-writeback.c +++ b/fs/fs-writeback.c | |||
@@ -474,10 +474,15 @@ writeback_single_inode(struct inode *inode, struct writeback_control *wbc) | |||
474 | spin_lock(&inode_lock); | 474 | spin_lock(&inode_lock); |
475 | inode->i_state &= ~I_SYNC; | 475 | inode->i_state &= ~I_SYNC; |
476 | if (!(inode->i_state & (I_FREEING | I_CLEAR))) { | 476 | if (!(inode->i_state & (I_FREEING | I_CLEAR))) { |
477 | if (inode->i_state & I_DIRTY) { | 477 | if ((inode->i_state & I_DIRTY_PAGES) && wbc->for_kupdate) { |
478 | /* | 478 | /* |
479 | * Someone redirtied the inode while were writing back | 479 | * More pages get dirtied by a fast dirtier. |
480 | * the pages. | 480 | */ |
481 | goto select_queue; | ||
482 | } else if (inode->i_state & I_DIRTY) { | ||
483 | /* | ||
484 | * At least XFS will redirty the inode during the | ||
485 | * writeback (delalloc) and on io completion (isize). | ||
481 | */ | 486 | */ |
482 | redirty_tail(inode); | 487 | redirty_tail(inode); |
483 | } else if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) { | 488 | } else if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) { |
@@ -502,6 +507,7 @@ writeback_single_inode(struct inode *inode, struct writeback_control *wbc) | |||
502 | * soon as the queue becomes uncongested. | 507 | * soon as the queue becomes uncongested. |
503 | */ | 508 | */ |
504 | inode->i_state |= I_DIRTY_PAGES; | 509 | inode->i_state |= I_DIRTY_PAGES; |
510 | select_queue: | ||
505 | if (wbc->nr_to_write <= 0) { | 511 | if (wbc->nr_to_write <= 0) { |
506 | /* | 512 | /* |
507 | * slice used up: queue for next turn | 513 | * slice used up: queue for next turn |