diff options
Diffstat (limited to 'mm/page-writeback.c')
-rw-r--r-- | mm/page-writeback.c | 17 |
1 files changed, 5 insertions, 12 deletions
diff --git a/mm/page-writeback.c b/mm/page-writeback.c index 1965d05a29cc..9d6ac2b6d942 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c | |||
@@ -491,13 +491,6 @@ static void balance_dirty_pages(struct address_space *mapping, | |||
491 | struct backing_dev_info *bdi = mapping->backing_dev_info; | 491 | struct backing_dev_info *bdi = mapping->backing_dev_info; |
492 | 492 | ||
493 | for (;;) { | 493 | for (;;) { |
494 | struct writeback_control wbc = { | ||
495 | .sync_mode = WB_SYNC_NONE, | ||
496 | .older_than_this = NULL, | ||
497 | .nr_to_write = write_chunk, | ||
498 | .range_cyclic = 1, | ||
499 | }; | ||
500 | |||
501 | nr_reclaimable = global_page_state(NR_FILE_DIRTY) + | 494 | nr_reclaimable = global_page_state(NR_FILE_DIRTY) + |
502 | global_page_state(NR_UNSTABLE_NFS); | 495 | global_page_state(NR_UNSTABLE_NFS); |
503 | nr_writeback = global_page_state(NR_WRITEBACK); | 496 | nr_writeback = global_page_state(NR_WRITEBACK); |
@@ -559,17 +552,17 @@ static void balance_dirty_pages(struct address_space *mapping, | |||
559 | * threshold otherwise wait until the disk writes catch | 552 | * threshold otherwise wait until the disk writes catch |
560 | * up. | 553 | * up. |
561 | */ | 554 | */ |
562 | trace_wbc_balance_dirty_start(&wbc, bdi); | 555 | trace_balance_dirty_start(bdi); |
563 | if (bdi_nr_reclaimable > bdi_thresh) { | 556 | if (bdi_nr_reclaimable > bdi_thresh) { |
564 | writeback_inodes_wb(&bdi->wb, &wbc); | 557 | pages_written += writeback_inodes_wb(&bdi->wb, |
565 | pages_written += write_chunk - wbc.nr_to_write; | 558 | write_chunk); |
566 | trace_wbc_balance_dirty_written(&wbc, bdi); | 559 | trace_balance_dirty_written(bdi, pages_written); |
567 | if (pages_written >= write_chunk) | 560 | if (pages_written >= write_chunk) |
568 | break; /* We've done our duty */ | 561 | break; /* We've done our duty */ |
569 | } | 562 | } |
570 | trace_wbc_balance_dirty_wait(&wbc, bdi); | ||
571 | __set_current_state(TASK_UNINTERRUPTIBLE); | 563 | __set_current_state(TASK_UNINTERRUPTIBLE); |
572 | io_schedule_timeout(pause); | 564 | io_schedule_timeout(pause); |
565 | trace_balance_dirty_wait(bdi); | ||
573 | 566 | ||
574 | /* | 567 | /* |
575 | * Increase the delay for each loop, up to our previous | 568 | * Increase the delay for each loop, up to our previous |