diff options
-rw-r--r-- | mm/page-writeback.c | 21 |
1 files changed, 11 insertions, 10 deletions
diff --git a/mm/page-writeback.c b/mm/page-writeback.c index 446bdf7b975b..5f3e1b46ace5 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c | |||
@@ -565,8 +565,9 @@ static void bdi_update_bandwidth(struct backing_dev_info *bdi, | |||
565 | static void balance_dirty_pages(struct address_space *mapping, | 565 | static void balance_dirty_pages(struct address_space *mapping, |
566 | unsigned long write_chunk) | 566 | unsigned long write_chunk) |
567 | { | 567 | { |
568 | long nr_reclaimable, bdi_nr_reclaimable; | 568 | unsigned long nr_reclaimable, bdi_nr_reclaimable; |
569 | long nr_writeback, bdi_nr_writeback; | 569 | unsigned long nr_dirty; /* = file_dirty + writeback + unstable_nfs */ |
570 | unsigned long bdi_dirty; | ||
570 | unsigned long background_thresh; | 571 | unsigned long background_thresh; |
571 | unsigned long dirty_thresh; | 572 | unsigned long dirty_thresh; |
572 | unsigned long bdi_thresh; | 573 | unsigned long bdi_thresh; |
@@ -579,7 +580,7 @@ static void balance_dirty_pages(struct address_space *mapping, | |||
579 | for (;;) { | 580 | for (;;) { |
580 | nr_reclaimable = global_page_state(NR_FILE_DIRTY) + | 581 | nr_reclaimable = global_page_state(NR_FILE_DIRTY) + |
581 | global_page_state(NR_UNSTABLE_NFS); | 582 | global_page_state(NR_UNSTABLE_NFS); |
582 | nr_writeback = global_page_state(NR_WRITEBACK); | 583 | nr_dirty = nr_reclaimable + global_page_state(NR_WRITEBACK); |
583 | 584 | ||
584 | global_dirty_limits(&background_thresh, &dirty_thresh); | 585 | global_dirty_limits(&background_thresh, &dirty_thresh); |
585 | 586 | ||
@@ -588,8 +589,7 @@ static void balance_dirty_pages(struct address_space *mapping, | |||
588 | * catch-up. This avoids (excessively) small writeouts | 589 | * catch-up. This avoids (excessively) small writeouts |
589 | * when the bdi limits are ramping up. | 590 | * when the bdi limits are ramping up. |
590 | */ | 591 | */ |
591 | if (nr_reclaimable + nr_writeback <= | 592 | if (nr_dirty <= (background_thresh + dirty_thresh) / 2) |
592 | (background_thresh + dirty_thresh) / 2) | ||
593 | break; | 593 | break; |
594 | 594 | ||
595 | bdi_thresh = bdi_dirty_limit(bdi, dirty_thresh); | 595 | bdi_thresh = bdi_dirty_limit(bdi, dirty_thresh); |
@@ -607,10 +607,12 @@ static void balance_dirty_pages(struct address_space *mapping, | |||
607 | */ | 607 | */ |
608 | if (bdi_thresh < 2*bdi_stat_error(bdi)) { | 608 | if (bdi_thresh < 2*bdi_stat_error(bdi)) { |
609 | bdi_nr_reclaimable = bdi_stat_sum(bdi, BDI_RECLAIMABLE); | 609 | bdi_nr_reclaimable = bdi_stat_sum(bdi, BDI_RECLAIMABLE); |
610 | bdi_nr_writeback = bdi_stat_sum(bdi, BDI_WRITEBACK); | 610 | bdi_dirty = bdi_nr_reclaimable + |
611 | bdi_stat_sum(bdi, BDI_WRITEBACK); | ||
611 | } else { | 612 | } else { |
612 | bdi_nr_reclaimable = bdi_stat(bdi, BDI_RECLAIMABLE); | 613 | bdi_nr_reclaimable = bdi_stat(bdi, BDI_RECLAIMABLE); |
613 | bdi_nr_writeback = bdi_stat(bdi, BDI_WRITEBACK); | 614 | bdi_dirty = bdi_nr_reclaimable + |
615 | bdi_stat(bdi, BDI_WRITEBACK); | ||
614 | } | 616 | } |
615 | 617 | ||
616 | /* | 618 | /* |
@@ -619,9 +621,8 @@ static void balance_dirty_pages(struct address_space *mapping, | |||
619 | * bdi or process from holding back light ones; The latter is | 621 | * bdi or process from holding back light ones; The latter is |
620 | * the last resort safeguard. | 622 | * the last resort safeguard. |
621 | */ | 623 | */ |
622 | dirty_exceeded = | 624 | dirty_exceeded = (bdi_dirty > bdi_thresh) || |
623 | (bdi_nr_reclaimable + bdi_nr_writeback > bdi_thresh) | 625 | (nr_dirty > dirty_thresh); |
624 | || (nr_reclaimable + nr_writeback > dirty_thresh); | ||
625 | 626 | ||
626 | if (!dirty_exceeded) | 627 | if (!dirty_exceeded) |
627 | break; | 628 | break; |