summaryrefslogtreecommitdiffstats
path: root/mm/page_alloc.c
diff options
context:
space:
mode:
authorJohannes Weiner <hannes@cmpxchg.org>2016-01-14 18:20:15 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2016-01-14 19:00:49 -0500
commita8d0143730d7b42c9fe6d1435d92ecce6863a62a (patch)
tree349dcd7f40a1b1d25702180493b93b720242a9bd /mm/page_alloc.c
parentc20cd45eb01748f0fba77a504f956b000df4ea73 (diff)
mm: page_alloc: generalize the dirty balance reserve
The dirty balance reserve that dirty throttling has to consider is merely memory not available to userspace allocations. There is nothing writeback-specific about it. Generalize the name so that it's reusable outside of that context. Signed-off-by: Johannes Weiner <hannes@cmpxchg.org> Cc: Rik van Riel <riel@redhat.com> Cc: Mel Gorman <mgorman@suse.de> Acked-by: Michal Hocko <mhocko@suse.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r--mm/page_alloc.c21
1 files changed, 3 insertions, 18 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 2a6fe377cafc..1e9a56065400 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -114,13 +114,6 @@ static DEFINE_SPINLOCK(managed_page_count_lock);
114unsigned long totalram_pages __read_mostly; 114unsigned long totalram_pages __read_mostly;
115unsigned long totalreserve_pages __read_mostly; 115unsigned long totalreserve_pages __read_mostly;
116unsigned long totalcma_pages __read_mostly; 116unsigned long totalcma_pages __read_mostly;
117/*
118 * When calculating the number of globally allowed dirty pages, there
119 * is a certain number of per-zone reserves that should not be
120 * considered dirtyable memory. This is the sum of those reserves
121 * over all existing zones that contribute dirtyable memory.
122 */
123unsigned long dirty_balance_reserve __read_mostly;
124 117
125int percpu_pagelist_fraction; 118int percpu_pagelist_fraction;
126gfp_t gfp_allowed_mask __read_mostly = GFP_BOOT_MASK; 119gfp_t gfp_allowed_mask __read_mostly = GFP_BOOT_MASK;
@@ -5942,20 +5935,12 @@ static void calculate_totalreserve_pages(void)
5942 5935
5943 if (max > zone->managed_pages) 5936 if (max > zone->managed_pages)
5944 max = zone->managed_pages; 5937 max = zone->managed_pages;
5938
5939 zone->totalreserve_pages = max;
5940
5945 reserve_pages += max; 5941 reserve_pages += max;
5946 /*
5947 * Lowmem reserves are not available to
5948 * GFP_HIGHUSER page cache allocations and
5949 * kswapd tries to balance zones to their high
5950 * watermark. As a result, neither should be
5951 * regarded as dirtyable memory, to prevent a
5952 * situation where reclaim has to clean pages
5953 * in order to balance the zones.
5954 */
5955 zone->dirty_balance_reserve = max;
5956 } 5942 }
5957 } 5943 }
5958 dirty_balance_reserve = reserve_pages;
5959 totalreserve_pages = reserve_pages; 5944 totalreserve_pages = reserve_pages;
5960} 5945}
5961 5946