diff options
Diffstat (limited to 'mm')
-rw-r--r-- | mm/page-writeback.c | 5 | ||||
-rw-r--r-- | mm/page_alloc.c | 19 |
2 files changed, 22 insertions, 2 deletions
diff --git a/mm/page-writeback.c b/mm/page-writeback.c index c081bf62202b..9ab6de82d8e6 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c | |||
@@ -157,7 +157,7 @@ static unsigned long highmem_dirtyable_memory(unsigned long total) | |||
157 | &NODE_DATA(node)->node_zones[ZONE_HIGHMEM]; | 157 | &NODE_DATA(node)->node_zones[ZONE_HIGHMEM]; |
158 | 158 | ||
159 | x += zone_page_state(z, NR_FREE_PAGES) + | 159 | x += zone_page_state(z, NR_FREE_PAGES) + |
160 | zone_reclaimable_pages(z); | 160 | zone_reclaimable_pages(z) - z->dirty_balance_reserve; |
161 | } | 161 | } |
162 | /* | 162 | /* |
163 | * Make sure that the number of highmem pages is never larger | 163 | * Make sure that the number of highmem pages is never larger |
@@ -181,7 +181,8 @@ static unsigned long determine_dirtyable_memory(void) | |||
181 | { | 181 | { |
182 | unsigned long x; | 182 | unsigned long x; |
183 | 183 | ||
184 | x = global_page_state(NR_FREE_PAGES) + global_reclaimable_pages(); | 184 | x = global_page_state(NR_FREE_PAGES) + global_reclaimable_pages() - |
185 | dirty_balance_reserve; | ||
185 | 186 | ||
186 | if (!vm_highmem_is_dirtyable) | 187 | if (!vm_highmem_is_dirtyable) |
187 | x -= highmem_dirtyable_memory(x); | 188 | x -= highmem_dirtyable_memory(x); |
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 93baebcc06f3..2cb9eb71e282 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
@@ -97,6 +97,14 @@ EXPORT_SYMBOL(node_states); | |||
97 | 97 | ||
98 | unsigned long totalram_pages __read_mostly; | 98 | unsigned long totalram_pages __read_mostly; |
99 | unsigned long totalreserve_pages __read_mostly; | 99 | unsigned long totalreserve_pages __read_mostly; |
100 | /* | ||
101 | * When calculating the number of globally allowed dirty pages, there | ||
102 | * is a certain number of per-zone reserves that should not be | ||
103 | * considered dirtyable memory. This is the sum of those reserves | ||
104 | * over all existing zones that contribute dirtyable memory. | ||
105 | */ | ||
106 | unsigned long dirty_balance_reserve __read_mostly; | ||
107 | |||
100 | int percpu_pagelist_fraction; | 108 | int percpu_pagelist_fraction; |
101 | gfp_t gfp_allowed_mask __read_mostly = GFP_BOOT_MASK; | 109 | gfp_t gfp_allowed_mask __read_mostly = GFP_BOOT_MASK; |
102 | 110 | ||
@@ -4822,8 +4830,19 @@ static void calculate_totalreserve_pages(void) | |||
4822 | if (max > zone->present_pages) | 4830 | if (max > zone->present_pages) |
4823 | max = zone->present_pages; | 4831 | max = zone->present_pages; |
4824 | reserve_pages += max; | 4832 | reserve_pages += max; |
4833 | /* | ||
4834 | * Lowmem reserves are not available to | ||
4835 | * GFP_HIGHUSER page cache allocations and | ||
4836 | * kswapd tries to balance zones to their high | ||
4837 | * watermark. As a result, neither should be | ||
4838 | * regarded as dirtyable memory, to prevent a | ||
4839 | * situation where reclaim has to clean pages | ||
4840 | * in order to balance the zones. | ||
4841 | */ | ||
4842 | zone->dirty_balance_reserve = max; | ||
4825 | } | 4843 | } |
4826 | } | 4844 | } |
4845 | dirty_balance_reserve = reserve_pages; | ||
4827 | totalreserve_pages = reserve_pages; | 4846 | totalreserve_pages = reserve_pages; |
4828 | } | 4847 | } |
4829 | 4848 | ||