summaryrefslogtreecommitdiffstats
path: root/mm/page-writeback.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/page-writeback.c')
-rw-r--r--mm/page-writeback.c4
1 files changed, 2 insertions, 2 deletions
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index bf050ab025b7..0b9c5cbe8eba 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -363,7 +363,7 @@ static unsigned long global_dirtyable_memory(void)
363{ 363{
364 unsigned long x; 364 unsigned long x;
365 365
366 x = global_page_state(NR_FREE_PAGES); 366 x = global_zone_page_state(NR_FREE_PAGES);
367 /* 367 /*
368 * Pages reserved for the kernel should not be considered 368 * Pages reserved for the kernel should not be considered
369 * dirtyable, to prevent a situation where reclaim has to 369 * dirtyable, to prevent a situation where reclaim has to
@@ -1405,7 +1405,7 @@ void wb_update_bandwidth(struct bdi_writeback *wb, unsigned long start_time)
1405 * will look to see if it needs to start dirty throttling. 1405 * will look to see if it needs to start dirty throttling.
1406 * 1406 *
1407 * If dirty_poll_interval is too low, big NUMA machines will call the expensive 1407 * If dirty_poll_interval is too low, big NUMA machines will call the expensive
1408 * global_page_state() too often. So scale it near-sqrt to the safety margin 1408 * global_zone_page_state() too often. So scale it near-sqrt to the safety margin
1409 * (the number of pages we may dirty without exceeding the dirty limits). 1409 * (the number of pages we may dirty without exceeding the dirty limits).
1410 */ 1410 */
1411static unsigned long dirty_poll_interval(unsigned long dirty, 1411static unsigned long dirty_poll_interval(unsigned long dirty,