aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorChristoph Lameter <clameter@engr.sgi.com>2006-02-01 06:05:25 -0500
committerLinus Torvalds <torvalds@g5.osdl.org>2006-02-01 11:53:14 -0500
commit8928862398fef04a137e5673ac5fa9e797960c87 (patch)
tree141f91c4dd56d323e6f5ce2d47cd1f1d22177aa7 /mm
parent537421be79b94bcf620467f50dd9e38b739c2a00 (diff)
[PATCH] Optimize off-node performance of zone reclaim
Ensure that the performance of off node pages stays the same as before. Off node pagefault tests showed an 18% drop in performance without this patch. - Increase the timeout to 30 seconds to reduce the overhead. - Move all code possible out of the off node hot path for zone reclaim (Sorry Andrew, the struct initialization had to be sacrificed). The read_page_state() bit us there. - Check first for the timeout before any other checks. Signed-off-by: Christoph Lameter <clameter@sgi.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/vmscan.c29
1 files changed, 15 insertions, 14 deletions
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 2e34b61a70c7..465bfa54dfd6 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1589,24 +1589,20 @@ int zone_reclaim_mode __read_mostly;
1589/* 1589/*
1590 * Mininum time between zone reclaim scans 1590 * Mininum time between zone reclaim scans
1591 */ 1591 */
1592#define ZONE_RECLAIM_INTERVAL HZ/2 1592#define ZONE_RECLAIM_INTERVAL 30*HZ
1593/* 1593/*
1594 * Try to free up some pages from this zone through reclaim. 1594 * Try to free up some pages from this zone through reclaim.
1595 */ 1595 */
1596int zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order) 1596int zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
1597{ 1597{
1598 int nr_pages = 1 << order; 1598 int nr_pages;
1599 struct task_struct *p = current; 1599 struct task_struct *p = current;
1600 struct reclaim_state reclaim_state; 1600 struct reclaim_state reclaim_state;
1601 struct scan_control sc = { 1601 struct scan_control sc;
1602 .gfp_mask = gfp_mask, 1602
1603 .may_writepage = 0, 1603 if (time_before(jiffies,
1604 .may_swap = 0, 1604 zone->last_unsuccessful_zone_reclaim + ZONE_RECLAIM_INTERVAL))
1605 .nr_mapped = read_page_state(nr_mapped), 1605 return 0;
1606 .nr_scanned = 0,
1607 .nr_reclaimed = 0,
1608 .priority = 0
1609 };
1610 1606
1611 if (!(gfp_mask & __GFP_WAIT) || 1607 if (!(gfp_mask & __GFP_WAIT) ||
1612 zone->zone_pgdat->node_id != numa_node_id() || 1608 zone->zone_pgdat->node_id != numa_node_id() ||
@@ -1614,12 +1610,17 @@ int zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
1614 atomic_read(&zone->reclaim_in_progress) > 0) 1610 atomic_read(&zone->reclaim_in_progress) > 0)
1615 return 0; 1611 return 0;
1616 1612
1617 if (time_before(jiffies, 1613 sc.may_writepage = 0;
1618 zone->last_unsuccessful_zone_reclaim + ZONE_RECLAIM_INTERVAL)) 1614 sc.may_swap = 0;
1619 return 0; 1615 sc.nr_scanned = 0;
1616 sc.nr_reclaimed = 0;
1617 sc.priority = 0;
1618 sc.nr_mapped = read_page_state(nr_mapped);
1619 sc.gfp_mask = gfp_mask;
1620 1620
1621 disable_swap_token(); 1621 disable_swap_token();
1622 1622
1623 nr_pages = 1 << order;
1623 if (nr_pages > SWAP_CLUSTER_MAX) 1624 if (nr_pages > SWAP_CLUSTER_MAX)
1624 sc.swap_cluster_max = nr_pages; 1625 sc.swap_cluster_max = nr_pages;
1625 else 1626 else