aboutsummaryrefslogtreecommitdiffstats
path: root/mm/vmscan.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/vmscan.c')
-rw-r--r--mm/vmscan.c27
1 files changed, 14 insertions, 13 deletions
diff --git a/mm/vmscan.c b/mm/vmscan.c
index ff2ebe9458a3..5d4c4d02254d 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1503,10 +1503,6 @@ module_init(kswapd_init)
1503 * 1503 *
1504 * If non-zero call zone_reclaim when the number of free pages falls below 1504 * If non-zero call zone_reclaim when the number of free pages falls below
1505 * the watermarks. 1505 * the watermarks.
1506 *
1507 * In the future we may add flags to the mode. However, the page allocator
1508 * should only have to check that zone_reclaim_mode != 0 before calling
1509 * zone_reclaim().
1510 */ 1506 */
1511int zone_reclaim_mode __read_mostly; 1507int zone_reclaim_mode __read_mostly;
1512 1508
@@ -1524,6 +1520,12 @@ int zone_reclaim_mode __read_mostly;
1524#define ZONE_RECLAIM_PRIORITY 4 1520#define ZONE_RECLAIM_PRIORITY 4
1525 1521
1526/* 1522/*
1523 * Percentage of pages in a zone that must be unmapped for zone_reclaim to
1524 * occur.
1525 */
1526int sysctl_min_unmapped_ratio = 1;
1527
1528/*
1527 * Try to free up some pages from this zone through reclaim. 1529 * Try to free up some pages from this zone through reclaim.
1528 */ 1530 */
1529static int __zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order) 1531static int __zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
@@ -1590,18 +1592,17 @@ int zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
1590 int node_id; 1592 int node_id;
1591 1593
1592 /* 1594 /*
1593 * Do not reclaim if there are not enough reclaimable pages in this 1595 * Zone reclaim reclaims unmapped file backed pages.
1594 * zone that would satify this allocations.
1595 * 1596 *
1596 * All unmapped pagecache pages are reclaimable. 1597 * A small portion of unmapped file backed pages is needed for
1597 * 1598 * file I/O otherwise pages read by file I/O will be immediately
1598 * Both counters may be temporarily off a bit so we use 1599 * thrown out if the zone is overallocated. So we do not reclaim
1599 * SWAP_CLUSTER_MAX as the boundary. It may also be good to 1600 * if less than a specified percentage of the zone is used by
1600 * leave a few frequently used unmapped pagecache pages around. 1601 * unmapped file backed pages.
1601 */ 1602 */
1602 if (zone_page_state(zone, NR_FILE_PAGES) - 1603 if (zone_page_state(zone, NR_FILE_PAGES) -
1603 zone_page_state(zone, NR_FILE_MAPPED) < SWAP_CLUSTER_MAX) 1604 zone_page_state(zone, NR_FILE_MAPPED) <= zone->min_unmapped_ratio)
1604 return 0; 1605 return 0;
1605 1606
1606 /* 1607 /*
1607 * Avoid concurrent zone reclaims, do not reclaim in a zone that does 1608 * Avoid concurrent zone reclaims, do not reclaim in a zone that does