diff options
author | Sonny Rao <sonnyrao@chromium.org> | 2012-12-20 18:05:07 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2012-12-20 20:40:18 -0500 |
commit | c8b74c2f6604923de91f8aa6539f8bb934736754 (patch) | |
tree | 2aa7cefdda54d71d6a8b9387914fb475514a1bd0 /mm | |
parent | 010fc29a45a2e8dbc08bf45ef80b8622619aaae0 (diff) |
mm: fix calculation of dirtyable memory
The system uses global_dirtyable_memory() to calculate number of
dirtyable pages/pages that can be allocated to the page cache. A bug
causes an underflow thus making the page count look like a big unsigned
number. This in turn confuses the dirty writeback throttling to
aggressively write back pages as they become dirty (usually 1 page at a
time). This generally only affects systems with highmem because the
underflowed count gets subtracted from the global count of dirtyable
memory.
The problem was introduced with v3.2-4896-gab8fabd
Fix is to ensure we don't get an underflowed total of either highmem or
global dirtyable memory.
Signed-off-by: Sonny Rao <sonnyrao@chromium.org>
Signed-off-by: Puneet Kumar <puneetster@chromium.org>
Acked-by: Johannes Weiner <hannes@cmpxchg.org>
Tested-by: Damien Wyart <damien.wyart@free.fr>
Cc: <stable@vger.kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/page-writeback.c | 25 |
1 files changed, 20 insertions, 5 deletions
diff --git a/mm/page-writeback.c b/mm/page-writeback.c index 6f4271224493..0713bfbf0954 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c | |||
@@ -201,6 +201,18 @@ static unsigned long highmem_dirtyable_memory(unsigned long total) | |||
201 | zone_reclaimable_pages(z) - z->dirty_balance_reserve; | 201 | zone_reclaimable_pages(z) - z->dirty_balance_reserve; |
202 | } | 202 | } |
203 | /* | 203 | /* |
204 | * Unreclaimable memory (kernel memory or anonymous memory | ||
205 | * without swap) can bring down the dirtyable pages below | ||
206 | * the zone's dirty balance reserve and the above calculation | ||
207 | * will underflow. However we still want to add in nodes | ||
208 | * which are below threshold (negative values) to get a more | ||
209 | * accurate calculation but make sure that the total never | ||
210 | * underflows. | ||
211 | */ | ||
212 | if ((long)x < 0) | ||
213 | x = 0; | ||
214 | |||
215 | /* | ||
204 | * Make sure that the number of highmem pages is never larger | 216 | * Make sure that the number of highmem pages is never larger |
205 | * than the number of the total dirtyable memory. This can only | 217 | * than the number of the total dirtyable memory. This can only |
206 | * occur in very strange VM situations but we want to make sure | 218 | * occur in very strange VM situations but we want to make sure |
@@ -222,8 +234,8 @@ static unsigned long global_dirtyable_memory(void) | |||
222 | { | 234 | { |
223 | unsigned long x; | 235 | unsigned long x; |
224 | 236 | ||
225 | x = global_page_state(NR_FREE_PAGES) + global_reclaimable_pages() - | 237 | x = global_page_state(NR_FREE_PAGES) + global_reclaimable_pages(); |
226 | dirty_balance_reserve; | 238 | x -= min(x, dirty_balance_reserve); |
227 | 239 | ||
228 | if (!vm_highmem_is_dirtyable) | 240 | if (!vm_highmem_is_dirtyable) |
229 | x -= highmem_dirtyable_memory(x); | 241 | x -= highmem_dirtyable_memory(x); |
@@ -290,9 +302,12 @@ static unsigned long zone_dirtyable_memory(struct zone *zone) | |||
290 | * highmem zone can hold its share of dirty pages, so we don't | 302 | * highmem zone can hold its share of dirty pages, so we don't |
291 | * care about vm_highmem_is_dirtyable here. | 303 | * care about vm_highmem_is_dirtyable here. |
292 | */ | 304 | */ |
293 | return zone_page_state(zone, NR_FREE_PAGES) + | 305 | unsigned long nr_pages = zone_page_state(zone, NR_FREE_PAGES) + |
294 | zone_reclaimable_pages(zone) - | 306 | zone_reclaimable_pages(zone); |
295 | zone->dirty_balance_reserve; | 307 | |
308 | /* don't allow this to underflow */ | ||
309 | nr_pages -= min(nr_pages, zone->dirty_balance_reserve); | ||
310 | return nr_pages; | ||
296 | } | 311 | } |
297 | 312 | ||
298 | /** | 313 | /** |