aboutsummaryrefslogtreecommitdiffstats
path: root/mm/vmscan.c
diff options
context:
space:
mode:
authorChristoph Lameter <clameter@sgi.com>2006-06-30 04:55:37 -0400
committerLinus Torvalds <torvalds@g5.osdl.org>2006-06-30 14:25:35 -0400
commit34aa1330f9b3c5783d269851d467326525207422 (patch)
treea47db4fa53527ea937dee9e763267ab21865ce11 /mm/vmscan.c
parentf3dbd34460ff54962d3e3244b6bcb7f5295356e6 (diff)
[PATCH] zoned vm counters: zone_reclaim: remove /proc/sys/vm/zone_reclaim_interval
The zone_reclaim_interval was necessary because we were not able to determine how many unmapped pages exist in a zone. Therefore we had to scan in intervals to figure out if any pages were unmapped. With the zoned counters and NR_ANON_PAGES we now know the number of pagecache pages and the number of mapped pages in a zone. So we can simply skip the reclaim if there is an insufficient number of unmapped pages. We use SWAP_CLUSTER_MAX as the boundary. Drop all support for /proc/sys/vm/zone_reclaim_interval. Signed-off-by: Christoph Lameter <clameter@sgi.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'mm/vmscan.c')
-rw-r--r--mm/vmscan.c31
1 files changed, 10 insertions, 21 deletions
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 2f0390161c0e..0960846d649f 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1518,11 +1518,6 @@ int zone_reclaim_mode __read_mostly;
1518#define RECLAIM_SLAB (1<<3) /* Do a global slab shrink if the zone is out of memory */ 1518#define RECLAIM_SLAB (1<<3) /* Do a global slab shrink if the zone is out of memory */
1519 1519
1520/* 1520/*
1521 * Mininum time between zone reclaim scans
1522 */
1523int zone_reclaim_interval __read_mostly = 30*HZ;
1524
1525/*
1526 * Priority for ZONE_RECLAIM. This determines the fraction of pages 1521 * Priority for ZONE_RECLAIM. This determines the fraction of pages
1527 * of a node considered for each zone_reclaim. 4 scans 1/16th of 1522 * of a node considered for each zone_reclaim. 4 scans 1/16th of
1528 * a zone. 1523 * a zone.
@@ -1587,16 +1582,6 @@ static int __zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
1587 1582
1588 p->reclaim_state = NULL; 1583 p->reclaim_state = NULL;
1589 current->flags &= ~(PF_MEMALLOC | PF_SWAPWRITE); 1584 current->flags &= ~(PF_MEMALLOC | PF_SWAPWRITE);
1590
1591 if (nr_reclaimed == 0) {
1592 /*
1593 * We were unable to reclaim enough pages to stay on node. We
1594 * now allow off node accesses for a certain time period before
1595 * trying again to reclaim pages from the local zone.
1596 */
1597 zone->last_unsuccessful_zone_reclaim = jiffies;
1598 }
1599
1600 return nr_reclaimed >= nr_pages; 1585 return nr_reclaimed >= nr_pages;
1601} 1586}
1602 1587
@@ -1606,13 +1591,17 @@ int zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
1606 int node_id; 1591 int node_id;
1607 1592
1608 /* 1593 /*
1609 * Do not reclaim if there was a recent unsuccessful attempt at zone 1594 * Do not reclaim if there are not enough reclaimable pages in this
1610 * reclaim. In that case we let allocations go off node for the 1595 * zone that would satify this allocations.
1611 * zone_reclaim_interval. Otherwise we would scan for each off-node 1596 *
1612 * page allocation. 1597 * All unmapped pagecache pages are reclaimable.
1598 *
1599 * Both counters may be temporarily off a bit so we use
1600 * SWAP_CLUSTER_MAX as the boundary. It may also be good to
1601 * leave a few frequently used unmapped pagecache pages around.
1613 */ 1602 */
1614 if (time_before(jiffies, 1603 if (zone_page_state(zone, NR_FILE_PAGES) -
1615 zone->last_unsuccessful_zone_reclaim + zone_reclaim_interval)) 1604 zone_page_state(zone, NR_FILE_MAPPED) < SWAP_CLUSTER_MAX)
1616 return 0; 1605 return 0;
1617 1606
1618 /* 1607 /*