diff options
Diffstat (limited to 'mm/page-writeback.c')
-rw-r--r-- | mm/page-writeback.c | 47 |
1 files changed, 22 insertions, 25 deletions
diff --git a/mm/page-writeback.c b/mm/page-writeback.c index f7c0fb993fb9..f97591d9fa00 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c | |||
@@ -498,20 +498,12 @@ static unsigned long node_dirty_limit(struct pglist_data *pgdat) | |||
498 | */ | 498 | */ |
499 | bool node_dirty_ok(struct pglist_data *pgdat) | 499 | bool node_dirty_ok(struct pglist_data *pgdat) |
500 | { | 500 | { |
501 | int z; | ||
502 | unsigned long limit = node_dirty_limit(pgdat); | 501 | unsigned long limit = node_dirty_limit(pgdat); |
503 | unsigned long nr_pages = 0; | 502 | unsigned long nr_pages = 0; |
504 | 503 | ||
505 | for (z = 0; z < MAX_NR_ZONES; z++) { | 504 | nr_pages += node_page_state(pgdat, NR_FILE_DIRTY); |
506 | struct zone *zone = pgdat->node_zones + z; | 505 | nr_pages += node_page_state(pgdat, NR_UNSTABLE_NFS); |
507 | 506 | nr_pages += node_page_state(pgdat, NR_WRITEBACK); | |
508 | if (!populated_zone(zone)) | ||
509 | continue; | ||
510 | |||
511 | nr_pages += zone_page_state(zone, NR_FILE_DIRTY); | ||
512 | nr_pages += zone_page_state(zone, NR_UNSTABLE_NFS); | ||
513 | nr_pages += zone_page_state(zone, NR_WRITEBACK); | ||
514 | } | ||
515 | 507 | ||
516 | return nr_pages <= limit; | 508 | return nr_pages <= limit; |
517 | } | 509 | } |
@@ -1601,10 +1593,10 @@ static void balance_dirty_pages(struct address_space *mapping, | |||
1601 | * written to the server's write cache, but has not yet | 1593 | * written to the server's write cache, but has not yet |
1602 | * been flushed to permanent storage. | 1594 | * been flushed to permanent storage. |
1603 | */ | 1595 | */ |
1604 | nr_reclaimable = global_page_state(NR_FILE_DIRTY) + | 1596 | nr_reclaimable = global_node_page_state(NR_FILE_DIRTY) + |
1605 | global_page_state(NR_UNSTABLE_NFS); | 1597 | global_node_page_state(NR_UNSTABLE_NFS); |
1606 | gdtc->avail = global_dirtyable_memory(); | 1598 | gdtc->avail = global_dirtyable_memory(); |
1607 | gdtc->dirty = nr_reclaimable + global_page_state(NR_WRITEBACK); | 1599 | gdtc->dirty = nr_reclaimable + global_node_page_state(NR_WRITEBACK); |
1608 | 1600 | ||
1609 | domain_dirty_limits(gdtc); | 1601 | domain_dirty_limits(gdtc); |
1610 | 1602 | ||
@@ -1941,8 +1933,8 @@ bool wb_over_bg_thresh(struct bdi_writeback *wb) | |||
1941 | * as we're trying to decide whether to put more under writeback. | 1933 | * as we're trying to decide whether to put more under writeback. |
1942 | */ | 1934 | */ |
1943 | gdtc->avail = global_dirtyable_memory(); | 1935 | gdtc->avail = global_dirtyable_memory(); |
1944 | gdtc->dirty = global_page_state(NR_FILE_DIRTY) + | 1936 | gdtc->dirty = global_node_page_state(NR_FILE_DIRTY) + |
1945 | global_page_state(NR_UNSTABLE_NFS); | 1937 | global_node_page_state(NR_UNSTABLE_NFS); |
1946 | domain_dirty_limits(gdtc); | 1938 | domain_dirty_limits(gdtc); |
1947 | 1939 | ||
1948 | if (gdtc->dirty > gdtc->bg_thresh) | 1940 | if (gdtc->dirty > gdtc->bg_thresh) |
@@ -1986,8 +1978,8 @@ void throttle_vm_writeout(gfp_t gfp_mask) | |||
1986 | */ | 1978 | */ |
1987 | dirty_thresh += dirty_thresh / 10; /* wheeee... */ | 1979 | dirty_thresh += dirty_thresh / 10; /* wheeee... */ |
1988 | 1980 | ||
1989 | if (global_page_state(NR_UNSTABLE_NFS) + | 1981 | if (global_node_page_state(NR_UNSTABLE_NFS) + |
1990 | global_page_state(NR_WRITEBACK) <= dirty_thresh) | 1982 | global_node_page_state(NR_WRITEBACK) <= dirty_thresh) |
1991 | break; | 1983 | break; |
1992 | congestion_wait(BLK_RW_ASYNC, HZ/10); | 1984 | congestion_wait(BLK_RW_ASYNC, HZ/10); |
1993 | 1985 | ||
@@ -2015,8 +2007,8 @@ int dirty_writeback_centisecs_handler(struct ctl_table *table, int write, | |||
2015 | void laptop_mode_timer_fn(unsigned long data) | 2007 | void laptop_mode_timer_fn(unsigned long data) |
2016 | { | 2008 | { |
2017 | struct request_queue *q = (struct request_queue *)data; | 2009 | struct request_queue *q = (struct request_queue *)data; |
2018 | int nr_pages = global_page_state(NR_FILE_DIRTY) + | 2010 | int nr_pages = global_node_page_state(NR_FILE_DIRTY) + |
2019 | global_page_state(NR_UNSTABLE_NFS); | 2011 | global_node_page_state(NR_UNSTABLE_NFS); |
2020 | struct bdi_writeback *wb; | 2012 | struct bdi_writeback *wb; |
2021 | 2013 | ||
2022 | /* | 2014 | /* |
@@ -2467,7 +2459,8 @@ void account_page_dirtied(struct page *page, struct address_space *mapping) | |||
2467 | wb = inode_to_wb(inode); | 2459 | wb = inode_to_wb(inode); |
2468 | 2460 | ||
2469 | mem_cgroup_inc_page_stat(page, MEM_CGROUP_STAT_DIRTY); | 2461 | mem_cgroup_inc_page_stat(page, MEM_CGROUP_STAT_DIRTY); |
2470 | __inc_zone_page_state(page, NR_FILE_DIRTY); | 2462 | __inc_node_page_state(page, NR_FILE_DIRTY); |
2463 | __inc_zone_page_state(page, NR_ZONE_WRITE_PENDING); | ||
2471 | __inc_zone_page_state(page, NR_DIRTIED); | 2464 | __inc_zone_page_state(page, NR_DIRTIED); |
2472 | __inc_wb_stat(wb, WB_RECLAIMABLE); | 2465 | __inc_wb_stat(wb, WB_RECLAIMABLE); |
2473 | __inc_wb_stat(wb, WB_DIRTIED); | 2466 | __inc_wb_stat(wb, WB_DIRTIED); |
@@ -2488,7 +2481,8 @@ void account_page_cleaned(struct page *page, struct address_space *mapping, | |||
2488 | { | 2481 | { |
2489 | if (mapping_cap_account_dirty(mapping)) { | 2482 | if (mapping_cap_account_dirty(mapping)) { |
2490 | mem_cgroup_dec_page_stat(page, MEM_CGROUP_STAT_DIRTY); | 2483 | mem_cgroup_dec_page_stat(page, MEM_CGROUP_STAT_DIRTY); |
2491 | dec_zone_page_state(page, NR_FILE_DIRTY); | 2484 | dec_node_page_state(page, NR_FILE_DIRTY); |
2485 | dec_zone_page_state(page, NR_ZONE_WRITE_PENDING); | ||
2492 | dec_wb_stat(wb, WB_RECLAIMABLE); | 2486 | dec_wb_stat(wb, WB_RECLAIMABLE); |
2493 | task_io_account_cancelled_write(PAGE_SIZE); | 2487 | task_io_account_cancelled_write(PAGE_SIZE); |
2494 | } | 2488 | } |
@@ -2744,7 +2738,8 @@ int clear_page_dirty_for_io(struct page *page) | |||
2744 | wb = unlocked_inode_to_wb_begin(inode, &locked); | 2738 | wb = unlocked_inode_to_wb_begin(inode, &locked); |
2745 | if (TestClearPageDirty(page)) { | 2739 | if (TestClearPageDirty(page)) { |
2746 | mem_cgroup_dec_page_stat(page, MEM_CGROUP_STAT_DIRTY); | 2740 | mem_cgroup_dec_page_stat(page, MEM_CGROUP_STAT_DIRTY); |
2747 | dec_zone_page_state(page, NR_FILE_DIRTY); | 2741 | dec_node_page_state(page, NR_FILE_DIRTY); |
2742 | dec_zone_page_state(page, NR_ZONE_WRITE_PENDING); | ||
2748 | dec_wb_stat(wb, WB_RECLAIMABLE); | 2743 | dec_wb_stat(wb, WB_RECLAIMABLE); |
2749 | ret = 1; | 2744 | ret = 1; |
2750 | } | 2745 | } |
@@ -2790,7 +2785,8 @@ int test_clear_page_writeback(struct page *page) | |||
2790 | } | 2785 | } |
2791 | if (ret) { | 2786 | if (ret) { |
2792 | mem_cgroup_dec_page_stat(page, MEM_CGROUP_STAT_WRITEBACK); | 2787 | mem_cgroup_dec_page_stat(page, MEM_CGROUP_STAT_WRITEBACK); |
2793 | dec_zone_page_state(page, NR_WRITEBACK); | 2788 | dec_node_page_state(page, NR_WRITEBACK); |
2789 | dec_zone_page_state(page, NR_ZONE_WRITE_PENDING); | ||
2794 | inc_zone_page_state(page, NR_WRITTEN); | 2790 | inc_zone_page_state(page, NR_WRITTEN); |
2795 | } | 2791 | } |
2796 | unlock_page_memcg(page); | 2792 | unlock_page_memcg(page); |
@@ -2844,7 +2840,8 @@ int __test_set_page_writeback(struct page *page, bool keep_write) | |||
2844 | } | 2840 | } |
2845 | if (!ret) { | 2841 | if (!ret) { |
2846 | mem_cgroup_inc_page_stat(page, MEM_CGROUP_STAT_WRITEBACK); | 2842 | mem_cgroup_inc_page_stat(page, MEM_CGROUP_STAT_WRITEBACK); |
2847 | inc_zone_page_state(page, NR_WRITEBACK); | 2843 | inc_node_page_state(page, NR_WRITEBACK); |
2844 | inc_zone_page_state(page, NR_ZONE_WRITE_PENDING); | ||
2848 | } | 2845 | } |
2849 | unlock_page_memcg(page); | 2846 | unlock_page_memcg(page); |
2850 | return ret; | 2847 | return ret; |