aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorWu Fengguang <fengguang.wu@intel.com>2010-09-12 15:34:05 -0400
committerWu Fengguang <fengguang.wu@intel.com>2011-07-10 01:09:02 -0400
commit7762741e3af69720186802e945229b6a5afd5c49 (patch)
treee5ca904b7b31154b1a412bcd3a2160f31581bdb7 /mm
parent00821b002df7da867bb2c15b4f83f3706371383f (diff)
writeback: consolidate variable names in balance_dirty_pages()
Introduce nr_dirty = NR_FILE_DIRTY + NR_WRITEBACK + NR_UNSTABLE_NFS in order to simplify many tests in the following patches. balance_dirty_pages() will eventually care only about the dirty sums besides nr_writeback. Acked-by: Jan Kara <jack@suse.cz> Signed-off-by: Wu Fengguang <fengguang.wu@intel.com>
Diffstat (limited to 'mm')
-rw-r--r--mm/page-writeback.c21
1 files changed, 11 insertions, 10 deletions
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index 446bdf7b975b..5f3e1b46ace5 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -565,8 +565,9 @@ static void bdi_update_bandwidth(struct backing_dev_info *bdi,
565static void balance_dirty_pages(struct address_space *mapping, 565static void balance_dirty_pages(struct address_space *mapping,
566 unsigned long write_chunk) 566 unsigned long write_chunk)
567{ 567{
568 long nr_reclaimable, bdi_nr_reclaimable; 568 unsigned long nr_reclaimable, bdi_nr_reclaimable;
569 long nr_writeback, bdi_nr_writeback; 569 unsigned long nr_dirty; /* = file_dirty + writeback + unstable_nfs */
570 unsigned long bdi_dirty;
570 unsigned long background_thresh; 571 unsigned long background_thresh;
571 unsigned long dirty_thresh; 572 unsigned long dirty_thresh;
572 unsigned long bdi_thresh; 573 unsigned long bdi_thresh;
@@ -579,7 +580,7 @@ static void balance_dirty_pages(struct address_space *mapping,
579 for (;;) { 580 for (;;) {
580 nr_reclaimable = global_page_state(NR_FILE_DIRTY) + 581 nr_reclaimable = global_page_state(NR_FILE_DIRTY) +
581 global_page_state(NR_UNSTABLE_NFS); 582 global_page_state(NR_UNSTABLE_NFS);
582 nr_writeback = global_page_state(NR_WRITEBACK); 583 nr_dirty = nr_reclaimable + global_page_state(NR_WRITEBACK);
583 584
584 global_dirty_limits(&background_thresh, &dirty_thresh); 585 global_dirty_limits(&background_thresh, &dirty_thresh);
585 586
@@ -588,8 +589,7 @@ static void balance_dirty_pages(struct address_space *mapping,
588 * catch-up. This avoids (excessively) small writeouts 589 * catch-up. This avoids (excessively) small writeouts
589 * when the bdi limits are ramping up. 590 * when the bdi limits are ramping up.
590 */ 591 */
591 if (nr_reclaimable + nr_writeback <= 592 if (nr_dirty <= (background_thresh + dirty_thresh) / 2)
592 (background_thresh + dirty_thresh) / 2)
593 break; 593 break;
594 594
595 bdi_thresh = bdi_dirty_limit(bdi, dirty_thresh); 595 bdi_thresh = bdi_dirty_limit(bdi, dirty_thresh);
@@ -607,10 +607,12 @@ static void balance_dirty_pages(struct address_space *mapping,
607 */ 607 */
608 if (bdi_thresh < 2*bdi_stat_error(bdi)) { 608 if (bdi_thresh < 2*bdi_stat_error(bdi)) {
609 bdi_nr_reclaimable = bdi_stat_sum(bdi, BDI_RECLAIMABLE); 609 bdi_nr_reclaimable = bdi_stat_sum(bdi, BDI_RECLAIMABLE);
610 bdi_nr_writeback = bdi_stat_sum(bdi, BDI_WRITEBACK); 610 bdi_dirty = bdi_nr_reclaimable +
611 bdi_stat_sum(bdi, BDI_WRITEBACK);
611 } else { 612 } else {
612 bdi_nr_reclaimable = bdi_stat(bdi, BDI_RECLAIMABLE); 613 bdi_nr_reclaimable = bdi_stat(bdi, BDI_RECLAIMABLE);
613 bdi_nr_writeback = bdi_stat(bdi, BDI_WRITEBACK); 614 bdi_dirty = bdi_nr_reclaimable +
615 bdi_stat(bdi, BDI_WRITEBACK);
614 } 616 }
615 617
616 /* 618 /*
@@ -619,9 +621,8 @@ static void balance_dirty_pages(struct address_space *mapping,
619 * bdi or process from holding back light ones; The latter is 621 * bdi or process from holding back light ones; The latter is
620 * the last resort safeguard. 622 * the last resort safeguard.
621 */ 623 */
622 dirty_exceeded = 624 dirty_exceeded = (bdi_dirty > bdi_thresh) ||
623 (bdi_nr_reclaimable + bdi_nr_writeback > bdi_thresh) 625 (nr_dirty > dirty_thresh);
624 || (nr_reclaimable + nr_writeback > dirty_thresh);
625 626
626 if (!dirty_exceeded) 627 if (!dirty_exceeded)
627 break; 628 break;