aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/linux/mmzone.h6
-rw-r--r--include/linux/swap.h1
-rw-r--r--mm/page-writeback.c5
-rw-r--r--mm/page_alloc.c19
4 files changed, 29 insertions, 2 deletions
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 3ac040f19369..ca6ca92418a6 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -317,6 +317,12 @@ struct zone {
317 */ 317 */
318 unsigned long lowmem_reserve[MAX_NR_ZONES]; 318 unsigned long lowmem_reserve[MAX_NR_ZONES];
319 319
320 /*
321 * This is a per-zone reserve of pages that should not be
322 * considered dirtyable memory.
323 */
324 unsigned long dirty_balance_reserve;
325
320#ifdef CONFIG_NUMA 326#ifdef CONFIG_NUMA
321 int node; 327 int node;
322 /* 328 /*
diff --git a/include/linux/swap.h b/include/linux/swap.h
index 1e22e126d2ac..06061a7f8e69 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -207,6 +207,7 @@ struct swap_list_t {
207/* linux/mm/page_alloc.c */ 207/* linux/mm/page_alloc.c */
208extern unsigned long totalram_pages; 208extern unsigned long totalram_pages;
209extern unsigned long totalreserve_pages; 209extern unsigned long totalreserve_pages;
210extern unsigned long dirty_balance_reserve;
210extern unsigned int nr_free_buffer_pages(void); 211extern unsigned int nr_free_buffer_pages(void);
211extern unsigned int nr_free_pagecache_pages(void); 212extern unsigned int nr_free_pagecache_pages(void);
212 213
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index c081bf62202b..9ab6de82d8e6 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -157,7 +157,7 @@ static unsigned long highmem_dirtyable_memory(unsigned long total)
157 &NODE_DATA(node)->node_zones[ZONE_HIGHMEM]; 157 &NODE_DATA(node)->node_zones[ZONE_HIGHMEM];
158 158
159 x += zone_page_state(z, NR_FREE_PAGES) + 159 x += zone_page_state(z, NR_FREE_PAGES) +
160 zone_reclaimable_pages(z); 160 zone_reclaimable_pages(z) - z->dirty_balance_reserve;
161 } 161 }
162 /* 162 /*
163 * Make sure that the number of highmem pages is never larger 163 * Make sure that the number of highmem pages is never larger
@@ -181,7 +181,8 @@ static unsigned long determine_dirtyable_memory(void)
181{ 181{
182 unsigned long x; 182 unsigned long x;
183 183
184 x = global_page_state(NR_FREE_PAGES) + global_reclaimable_pages(); 184 x = global_page_state(NR_FREE_PAGES) + global_reclaimable_pages() -
185 dirty_balance_reserve;
185 186
186 if (!vm_highmem_is_dirtyable) 187 if (!vm_highmem_is_dirtyable)
187 x -= highmem_dirtyable_memory(x); 188 x -= highmem_dirtyable_memory(x);
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 93baebcc06f3..2cb9eb71e282 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -97,6 +97,14 @@ EXPORT_SYMBOL(node_states);
97 97
98unsigned long totalram_pages __read_mostly; 98unsigned long totalram_pages __read_mostly;
99unsigned long totalreserve_pages __read_mostly; 99unsigned long totalreserve_pages __read_mostly;
100/*
101 * When calculating the number of globally allowed dirty pages, there
102 * is a certain number of per-zone reserves that should not be
103 * considered dirtyable memory. This is the sum of those reserves
104 * over all existing zones that contribute dirtyable memory.
105 */
106unsigned long dirty_balance_reserve __read_mostly;
107
100int percpu_pagelist_fraction; 108int percpu_pagelist_fraction;
101gfp_t gfp_allowed_mask __read_mostly = GFP_BOOT_MASK; 109gfp_t gfp_allowed_mask __read_mostly = GFP_BOOT_MASK;
102 110
@@ -4822,8 +4830,19 @@ static void calculate_totalreserve_pages(void)
4822 if (max > zone->present_pages) 4830 if (max > zone->present_pages)
4823 max = zone->present_pages; 4831 max = zone->present_pages;
4824 reserve_pages += max; 4832 reserve_pages += max;
4833 /*
4834 * Lowmem reserves are not available to
4835 * GFP_HIGHUSER page cache allocations and
4836 * kswapd tries to balance zones to their high
4837 * watermark. As a result, neither should be
4838 * regarded as dirtyable memory, to prevent a
4839 * situation where reclaim has to clean pages
4840 * in order to balance the zones.
4841 */
4842 zone->dirty_balance_reserve = max;
4825 } 4843 }
4826 } 4844 }
4845 dirty_balance_reserve = reserve_pages;
4827 totalreserve_pages = reserve_pages; 4846 totalreserve_pages = reserve_pages;
4828} 4847}
4829 4848