diff options
-rw-r--r-- | Documentation/sysctl/vm.txt | 12 | ||||
-rw-r--r-- | mm/vmscan.c | 52 |
2 files changed, 53 insertions, 11 deletions
diff --git a/Documentation/sysctl/vm.txt b/Documentation/sysctl/vm.txt index 0ea5adbc5b16..c4de6359d440 100644 --- a/Documentation/sysctl/vm.txt +++ b/Documentation/sysctl/vm.txt | |||
@@ -315,10 +315,14 @@ min_unmapped_ratio: | |||
315 | 315 | ||
316 | This is available only on NUMA kernels. | 316 | This is available only on NUMA kernels. |
317 | 317 | ||
318 | A percentage of the total pages in each zone. Zone reclaim will only | 318 | This is a percentage of the total pages in each zone. Zone reclaim will |
319 | occur if more than this percentage of pages are file backed and unmapped. | 319 | only occur if more than this percentage of pages are in a state that |
320 | This is to insure that a minimal amount of local pages is still available for | 320 | zone_reclaim_mode allows to be reclaimed. |
321 | file I/O even if the node is overallocated. | 321 | |
322 | If zone_reclaim_mode has the value 4 OR'd, then the percentage is compared | ||
323 | against all file-backed unmapped pages including swapcache pages and tmpfs | ||
324 | files. Otherwise, only unmapped pages backed by normal files but not tmpfs | ||
325 | files and similar are considered. | ||
322 | 326 | ||
323 | The default is 1 percent. | 327 | The default is 1 percent. |
324 | 328 | ||
diff --git a/mm/vmscan.c b/mm/vmscan.c index 057e44b97aa1..79a98d98ed33 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c | |||
@@ -2356,6 +2356,48 @@ int sysctl_min_unmapped_ratio = 1; | |||
2356 | */ | 2356 | */ |
2357 | int sysctl_min_slab_ratio = 5; | 2357 | int sysctl_min_slab_ratio = 5; |
2358 | 2358 | ||
2359 | static inline unsigned long zone_unmapped_file_pages(struct zone *zone) | ||
2360 | { | ||
2361 | unsigned long file_mapped = zone_page_state(zone, NR_FILE_MAPPED); | ||
2362 | unsigned long file_lru = zone_page_state(zone, NR_INACTIVE_FILE) + | ||
2363 | zone_page_state(zone, NR_ACTIVE_FILE); | ||
2364 | |||
2365 | /* | ||
2366 | * It's possible for there to be more file mapped pages than | ||
2367 | * accounted for by the pages on the file LRU lists because | ||
2368 | * tmpfs pages accounted for as ANON can also be FILE_MAPPED | ||
2369 | */ | ||
2370 | return (file_lru > file_mapped) ? (file_lru - file_mapped) : 0; | ||
2371 | } | ||
2372 | |||
2373 | /* Work out how many page cache pages we can reclaim in this reclaim_mode */ | ||
2374 | static long zone_pagecache_reclaimable(struct zone *zone) | ||
2375 | { | ||
2376 | long nr_pagecache_reclaimable; | ||
2377 | long delta = 0; | ||
2378 | |||
2379 | /* | ||
2380 | * If RECLAIM_SWAP is set, then all file pages are considered | ||
2381 | * potentially reclaimable. Otherwise, we have to worry about | ||
2382 | * pages like swapcache and zone_unmapped_file_pages() provides | ||
2383 | * a better estimate | ||
2384 | */ | ||
2385 | if (zone_reclaim_mode & RECLAIM_SWAP) | ||
2386 | nr_pagecache_reclaimable = zone_page_state(zone, NR_FILE_PAGES); | ||
2387 | else | ||
2388 | nr_pagecache_reclaimable = zone_unmapped_file_pages(zone); | ||
2389 | |||
2390 | /* If we can't clean pages, remove dirty pages from consideration */ | ||
2391 | if (!(zone_reclaim_mode & RECLAIM_WRITE)) | ||
2392 | delta += zone_page_state(zone, NR_FILE_DIRTY); | ||
2393 | |||
2394 | /* Watch for any possible underflows due to delta */ | ||
2395 | if (unlikely(delta > nr_pagecache_reclaimable)) | ||
2396 | delta = nr_pagecache_reclaimable; | ||
2397 | |||
2398 | return nr_pagecache_reclaimable - delta; | ||
2399 | } | ||
2400 | |||
2359 | /* | 2401 | /* |
2360 | * Try to free up some pages from this zone through reclaim. | 2402 | * Try to free up some pages from this zone through reclaim. |
2361 | */ | 2403 | */ |
@@ -2390,9 +2432,7 @@ static int __zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order) | |||
2390 | reclaim_state.reclaimed_slab = 0; | 2432 | reclaim_state.reclaimed_slab = 0; |
2391 | p->reclaim_state = &reclaim_state; | 2433 | p->reclaim_state = &reclaim_state; |
2392 | 2434 | ||
2393 | if (zone_page_state(zone, NR_FILE_PAGES) - | 2435 | if (zone_pagecache_reclaimable(zone) > zone->min_unmapped_pages) { |
2394 | zone_page_state(zone, NR_FILE_MAPPED) > | ||
2395 | zone->min_unmapped_pages) { | ||
2396 | /* | 2436 | /* |
2397 | * Free memory by calling shrink zone with increasing | 2437 | * Free memory by calling shrink zone with increasing |
2398 | * priorities until we have enough memory freed. | 2438 | * priorities until we have enough memory freed. |
@@ -2450,10 +2490,8 @@ int zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order) | |||
2450 | * if less than a specified percentage of the zone is used by | 2490 | * if less than a specified percentage of the zone is used by |
2451 | * unmapped file backed pages. | 2491 | * unmapped file backed pages. |
2452 | */ | 2492 | */ |
2453 | if (zone_page_state(zone, NR_FILE_PAGES) - | 2493 | if (zone_pagecache_reclaimable(zone) <= zone->min_unmapped_pages && |
2454 | zone_page_state(zone, NR_FILE_MAPPED) <= zone->min_unmapped_pages | 2494 | zone_page_state(zone, NR_SLAB_RECLAIMABLE) <= zone->min_slab_pages) |
2455 | && zone_page_state(zone, NR_SLAB_RECLAIMABLE) | ||
2456 | <= zone->min_slab_pages) | ||
2457 | return 0; | 2495 | return 0; |
2458 | 2496 | ||
2459 | if (zone_is_all_unreclaimable(zone)) | 2497 | if (zone_is_all_unreclaimable(zone)) |