aboutsummaryrefslogtreecommitdiffstats
path: root/mm/page-writeback.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/page-writeback.c')
-rw-r--r--mm/page-writeback.c13
1 files changed, 8 insertions, 5 deletions
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index 7b0dcea4935b..81627ebcd313 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -541,8 +541,11 @@ static void balance_dirty_pages(struct address_space *mapping)
541 * filesystems (i.e. NFS) in which data may have been 541 * filesystems (i.e. NFS) in which data may have been
542 * written to the server's write cache, but has not yet 542 * written to the server's write cache, but has not yet
543 * been flushed to permanent storage. 543 * been flushed to permanent storage.
544 * Only move pages to writeback if this bdi is over its
545 * threshold otherwise wait until the disk writes catch
546 * up.
544 */ 547 */
545 if (bdi_nr_reclaimable) { 548 if (bdi_nr_reclaimable > bdi_thresh) {
546 writeback_inodes(&wbc); 549 writeback_inodes(&wbc);
547 pages_written += write_chunk - wbc.nr_to_write; 550 pages_written += write_chunk - wbc.nr_to_write;
548 get_dirty_limits(&background_thresh, &dirty_thresh, 551 get_dirty_limits(&background_thresh, &dirty_thresh,
@@ -572,7 +575,7 @@ static void balance_dirty_pages(struct address_space *mapping)
572 if (pages_written >= write_chunk) 575 if (pages_written >= write_chunk)
573 break; /* We've done our duty */ 576 break; /* We've done our duty */
574 577
575 congestion_wait(WRITE, HZ/10); 578 congestion_wait(BLK_RW_ASYNC, HZ/10);
576 } 579 }
577 580
578 if (bdi_nr_reclaimable + bdi_nr_writeback < bdi_thresh && 581 if (bdi_nr_reclaimable + bdi_nr_writeback < bdi_thresh &&
@@ -666,7 +669,7 @@ void throttle_vm_writeout(gfp_t gfp_mask)
666 if (global_page_state(NR_UNSTABLE_NFS) + 669 if (global_page_state(NR_UNSTABLE_NFS) +
667 global_page_state(NR_WRITEBACK) <= dirty_thresh) 670 global_page_state(NR_WRITEBACK) <= dirty_thresh)
668 break; 671 break;
669 congestion_wait(WRITE, HZ/10); 672 congestion_wait(BLK_RW_ASYNC, HZ/10);
670 673
671 /* 674 /*
672 * The caller might hold locks which can prevent IO completion 675 * The caller might hold locks which can prevent IO completion
@@ -712,7 +715,7 @@ static void background_writeout(unsigned long _min_pages)
712 if (wbc.nr_to_write > 0 || wbc.pages_skipped > 0) { 715 if (wbc.nr_to_write > 0 || wbc.pages_skipped > 0) {
713 /* Wrote less than expected */ 716 /* Wrote less than expected */
714 if (wbc.encountered_congestion || wbc.more_io) 717 if (wbc.encountered_congestion || wbc.more_io)
715 congestion_wait(WRITE, HZ/10); 718 congestion_wait(BLK_RW_ASYNC, HZ/10);
716 else 719 else
717 break; 720 break;
718 } 721 }
@@ -784,7 +787,7 @@ static void wb_kupdate(unsigned long arg)
784 writeback_inodes(&wbc); 787 writeback_inodes(&wbc);
785 if (wbc.nr_to_write > 0) { 788 if (wbc.nr_to_write > 0) {
786 if (wbc.encountered_congestion || wbc.more_io) 789 if (wbc.encountered_congestion || wbc.more_io)
787 congestion_wait(WRITE, HZ/10); 790 congestion_wait(BLK_RW_ASYNC, HZ/10);
788 else 791 else
789 break; /* All the old data is written */ 792 break; /* All the old data is written */
790 } 793 }