diff options
author | Wu Fengguang <fengguang.wu@intel.com> | 2011-05-04 21:54:37 -0400 |
---|---|---|
committer | Wu Fengguang <fengguang.wu@intel.com> | 2011-07-10 01:09:01 -0400 |
commit | d46db3d58233be4be980eb1e42eebe7808bcabab (patch) | |
tree | 6d813b33938d915f0c0633e8615d1ffdcc554c96 /mm/backing-dev.c | |
parent | 36715cef0770b7e2547892b7c3197fc024274630 (diff) |
writeback: make writeback_control.nr_to_write straight
Pass struct wb_writeback_work all the way down to writeback_sb_inodes(),
and initialize the struct writeback_control there.
struct writeback_control is basically designed to control writeback of a
single file, but we keep abuse it for writing multiple files in
writeback_sb_inodes() and its callers.
It immediately clean things up, e.g. suddenly wbc.nr_to_write vs
work->nr_pages starts to make sense, and instead of saving and restoring
pages_skipped in writeback_sb_inodes it can always start with a clean
zero value.
It also makes a neat IO pattern change: large dirty files are now
written in the full 4MB writeback chunk size, rather than whatever
remained quota in wbc->nr_to_write.
Acked-by: Jan Kara <jack@suse.cz>
Proposed-by: Christoph Hellwig <hch@infradead.org>
Signed-off-by: Wu Fengguang <fengguang.wu@intel.com>
Diffstat (limited to 'mm/backing-dev.c')
-rw-r--r-- | mm/backing-dev.c | 17 |
1 files changed, 3 insertions, 14 deletions
diff --git a/mm/backing-dev.c b/mm/backing-dev.c index 5f6553ef1ba7..7ba303be5e03 100644 --- a/mm/backing-dev.c +++ b/mm/backing-dev.c | |||
@@ -260,18 +260,6 @@ int bdi_has_dirty_io(struct backing_dev_info *bdi) | |||
260 | return wb_has_dirty_io(&bdi->wb); | 260 | return wb_has_dirty_io(&bdi->wb); |
261 | } | 261 | } |
262 | 262 | ||
263 | static void bdi_flush_io(struct backing_dev_info *bdi) | ||
264 | { | ||
265 | struct writeback_control wbc = { | ||
266 | .sync_mode = WB_SYNC_NONE, | ||
267 | .older_than_this = NULL, | ||
268 | .range_cyclic = 1, | ||
269 | .nr_to_write = 1024, | ||
270 | }; | ||
271 | |||
272 | writeback_inodes_wb(&bdi->wb, &wbc); | ||
273 | } | ||
274 | |||
275 | /* | 263 | /* |
276 | * kupdated() used to do this. We cannot do it from the bdi_forker_thread() | 264 | * kupdated() used to do this. We cannot do it from the bdi_forker_thread() |
277 | * or we risk deadlocking on ->s_umount. The longer term solution would be | 265 | * or we risk deadlocking on ->s_umount. The longer term solution would be |
@@ -457,9 +445,10 @@ static int bdi_forker_thread(void *ptr) | |||
457 | if (IS_ERR(task)) { | 445 | if (IS_ERR(task)) { |
458 | /* | 446 | /* |
459 | * If thread creation fails, force writeout of | 447 | * If thread creation fails, force writeout of |
460 | * the bdi from the thread. | 448 | * the bdi from the thread. Hopefully 1024 is |
449 | * large enough for efficient IO. | ||
461 | */ | 450 | */ |
462 | bdi_flush_io(bdi); | 451 | writeback_inodes_wb(&bdi->wb, 1024); |
463 | } else { | 452 | } else { |
464 | /* | 453 | /* |
465 | * The spinlock makes sure we do not lose | 454 | * The spinlock makes sure we do not lose |