aboutsummaryrefslogtreecommitdiffstats
path: root/mm/vmscan.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/vmscan.c')
-rw-r--r--mm/vmscan.c31
1 files changed, 10 insertions, 21 deletions
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 2f0390161c0e..0960846d649f 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1518,11 +1518,6 @@ int zone_reclaim_mode __read_mostly;
1518#define RECLAIM_SLAB (1<<3) /* Do a global slab shrink if the zone is out of memory */ 1518#define RECLAIM_SLAB (1<<3) /* Do a global slab shrink if the zone is out of memory */
1519 1519
1520/* 1520/*
1521 * Mininum time between zone reclaim scans
1522 */
1523int zone_reclaim_interval __read_mostly = 30*HZ;
1524
1525/*
1526 * Priority for ZONE_RECLAIM. This determines the fraction of pages 1521 * Priority for ZONE_RECLAIM. This determines the fraction of pages
1527 * of a node considered for each zone_reclaim. 4 scans 1/16th of 1522 * of a node considered for each zone_reclaim. 4 scans 1/16th of
1528 * a zone. 1523 * a zone.
@@ -1587,16 +1582,6 @@ static int __zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
1587 1582
1588 p->reclaim_state = NULL; 1583 p->reclaim_state = NULL;
1589 current->flags &= ~(PF_MEMALLOC | PF_SWAPWRITE); 1584 current->flags &= ~(PF_MEMALLOC | PF_SWAPWRITE);
1590
1591 if (nr_reclaimed == 0) {
1592 /*
1593 * We were unable to reclaim enough pages to stay on node. We
1594 * now allow off node accesses for a certain time period before
1595 * trying again to reclaim pages from the local zone.
1596 */
1597 zone->last_unsuccessful_zone_reclaim = jiffies;
1598 }
1599
1600 return nr_reclaimed >= nr_pages; 1585 return nr_reclaimed >= nr_pages;
1601} 1586}
1602 1587
@@ -1606,13 +1591,17 @@ int zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
1606 int node_id; 1591 int node_id;
1607 1592
1608 /* 1593 /*
1609 * Do not reclaim if there was a recent unsuccessful attempt at zone 1594 * Do not reclaim if there are not enough reclaimable pages in this
1610 * reclaim. In that case we let allocations go off node for the 1595 * zone that would satify this allocations.
1611 * zone_reclaim_interval. Otherwise we would scan for each off-node 1596 *
1612 * page allocation. 1597 * All unmapped pagecache pages are reclaimable.
1598 *
1599 * Both counters may be temporarily off a bit so we use
1600 * SWAP_CLUSTER_MAX as the boundary. It may also be good to
1601 * leave a few frequently used unmapped pagecache pages around.
1613 */ 1602 */
1614 if (time_before(jiffies, 1603 if (zone_page_state(zone, NR_FILE_PAGES) -
1615 zone->last_unsuccessful_zone_reclaim + zone_reclaim_interval)) 1604 zone_page_state(zone, NR_FILE_MAPPED) < SWAP_CLUSTER_MAX)
1616 return 0; 1605 return 0;
1617 1606
1618 /* 1607 /*