diff options
author | Wu Fengguang <fengguang.wu@intel.com> | 2011-05-04 21:54:37 -0400 |
---|---|---|
committer | Wu Fengguang <fengguang.wu@intel.com> | 2011-07-10 01:09:01 -0400 |
commit | d46db3d58233be4be980eb1e42eebe7808bcabab (patch) | |
tree | 6d813b33938d915f0c0633e8615d1ffdcc554c96 /mm/page-writeback.c | |
parent | 36715cef0770b7e2547892b7c3197fc024274630 (diff) |
writeback: make writeback_control.nr_to_write straight
Pass struct wb_writeback_work all the way down to writeback_sb_inodes(),
and initialize the struct writeback_control there.
struct writeback_control is basically designed to control writeback of a
single file, but we keep abuse it for writing multiple files in
writeback_sb_inodes() and its callers.
It immediately clean things up, e.g. suddenly wbc.nr_to_write vs
work->nr_pages starts to make sense, and instead of saving and restoring
pages_skipped in writeback_sb_inodes it can always start with a clean
zero value.
It also makes a neat IO pattern change: large dirty files are now
written in the full 4MB writeback chunk size, rather than whatever
remained quota in wbc->nr_to_write.
Acked-by: Jan Kara <jack@suse.cz>
Proposed-by: Christoph Hellwig <hch@infradead.org>
Signed-off-by: Wu Fengguang <fengguang.wu@intel.com>
Diffstat (limited to 'mm/page-writeback.c')
-rw-r--r-- | mm/page-writeback.c | 17 |
1 files changed, 5 insertions, 12 deletions
diff --git a/mm/page-writeback.c b/mm/page-writeback.c index 1965d05a29cc..9d6ac2b6d942 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c | |||
@@ -491,13 +491,6 @@ static void balance_dirty_pages(struct address_space *mapping, | |||
491 | struct backing_dev_info *bdi = mapping->backing_dev_info; | 491 | struct backing_dev_info *bdi = mapping->backing_dev_info; |
492 | 492 | ||
493 | for (;;) { | 493 | for (;;) { |
494 | struct writeback_control wbc = { | ||
495 | .sync_mode = WB_SYNC_NONE, | ||
496 | .older_than_this = NULL, | ||
497 | .nr_to_write = write_chunk, | ||
498 | .range_cyclic = 1, | ||
499 | }; | ||
500 | |||
501 | nr_reclaimable = global_page_state(NR_FILE_DIRTY) + | 494 | nr_reclaimable = global_page_state(NR_FILE_DIRTY) + |
502 | global_page_state(NR_UNSTABLE_NFS); | 495 | global_page_state(NR_UNSTABLE_NFS); |
503 | nr_writeback = global_page_state(NR_WRITEBACK); | 496 | nr_writeback = global_page_state(NR_WRITEBACK); |
@@ -559,17 +552,17 @@ static void balance_dirty_pages(struct address_space *mapping, | |||
559 | * threshold otherwise wait until the disk writes catch | 552 | * threshold otherwise wait until the disk writes catch |
560 | * up. | 553 | * up. |
561 | */ | 554 | */ |
562 | trace_wbc_balance_dirty_start(&wbc, bdi); | 555 | trace_balance_dirty_start(bdi); |
563 | if (bdi_nr_reclaimable > bdi_thresh) { | 556 | if (bdi_nr_reclaimable > bdi_thresh) { |
564 | writeback_inodes_wb(&bdi->wb, &wbc); | 557 | pages_written += writeback_inodes_wb(&bdi->wb, |
565 | pages_written += write_chunk - wbc.nr_to_write; | 558 | write_chunk); |
566 | trace_wbc_balance_dirty_written(&wbc, bdi); | 559 | trace_balance_dirty_written(bdi, pages_written); |
567 | if (pages_written >= write_chunk) | 560 | if (pages_written >= write_chunk) |
568 | break; /* We've done our duty */ | 561 | break; /* We've done our duty */ |
569 | } | 562 | } |
570 | trace_wbc_balance_dirty_wait(&wbc, bdi); | ||
571 | __set_current_state(TASK_UNINTERRUPTIBLE); | 563 | __set_current_state(TASK_UNINTERRUPTIBLE); |
572 | io_schedule_timeout(pause); | 564 | io_schedule_timeout(pause); |
565 | trace_balance_dirty_wait(bdi); | ||
573 | 566 | ||
574 | /* | 567 | /* |
575 | * Increase the delay for each loop, up to our previous | 568 | * Increase the delay for each loop, up to our previous |