aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChristoph Lameter <clameter@engr.sgi.com>2006-02-01 06:05:32 -0500
committerLinus Torvalds <torvalds@g5.osdl.org>2006-02-01 11:53:16 -0500
commita92f71263af9d0ab77c260f709c0c079656221aa (patch)
tree93aa7bf968ba108cc893b0dcc4de36fbf3b733bf
parentaa3f18b3391ac305baa01faead3fdf9147daf54b (diff)
[PATCH] zone_reclaim: partial scans instead of full scan
Instead of scanning all the pages in a zone, imitate real swap and scan only a portion of the pages and gradually scan more if we do not free up enough pages. This avoids a zone suddenly loosing all unused pagecache pages (we may after all access some of these again so they deserve another chance) but it still frees up large chunks of memory if a zone only contains unused pagecache pages. Signed-off-by: Christoph Lameter <clameter@sgi.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
-rw-r--r--mm/vmscan.c20
1 files changed, 18 insertions, 2 deletions
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 8277f93148b5..f8b94ea6f722 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1596,6 +1596,14 @@ int zone_reclaim_mode __read_mostly;
1596 * Mininum time between zone reclaim scans 1596 * Mininum time between zone reclaim scans
1597 */ 1597 */
1598#define ZONE_RECLAIM_INTERVAL 30*HZ 1598#define ZONE_RECLAIM_INTERVAL 30*HZ
1599
1600/*
1601 * Priority for ZONE_RECLAIM. This determines the fraction of pages
1602 * of a node considered for each zone_reclaim. 4 scans 1/16th of
1603 * a zone.
1604 */
1605#define ZONE_RECLAIM_PRIORITY 4
1606
1599/* 1607/*
1600 * Try to free up some pages from this zone through reclaim. 1608 * Try to free up some pages from this zone through reclaim.
1601 */ 1609 */
@@ -1626,7 +1634,7 @@ int zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
1626 sc.may_swap = 0; 1634 sc.may_swap = 0;
1627 sc.nr_scanned = 0; 1635 sc.nr_scanned = 0;
1628 sc.nr_reclaimed = 0; 1636 sc.nr_reclaimed = 0;
1629 sc.priority = 0; 1637 sc.priority = ZONE_RECLAIM_PRIORITY + 1;
1630 sc.nr_mapped = read_page_state(nr_mapped); 1638 sc.nr_mapped = read_page_state(nr_mapped);
1631 sc.gfp_mask = gfp_mask; 1639 sc.gfp_mask = gfp_mask;
1632 1640
@@ -1643,7 +1651,15 @@ int zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
1643 reclaim_state.reclaimed_slab = 0; 1651 reclaim_state.reclaimed_slab = 0;
1644 p->reclaim_state = &reclaim_state; 1652 p->reclaim_state = &reclaim_state;
1645 1653
1646 shrink_zone(zone, &sc); 1654 /*
1655 * Free memory by calling shrink zone with increasing priorities
1656 * until we have enough memory freed.
1657 */
1658 do {
1659 sc.priority--;
1660 shrink_zone(zone, &sc);
1661
1662 } while (sc.nr_reclaimed < nr_pages && sc.priority > 0);
1647 1663
1648 p->reclaim_state = NULL; 1664 p->reclaim_state = NULL;
1649 current->flags &= ~PF_MEMALLOC; 1665 current->flags &= ~PF_MEMALLOC;