diff options
author | Christoph Lameter <clameter@sgi.com> | 2006-09-26 02:31:53 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@g5.osdl.org> | 2006-09-26 11:48:52 -0400 |
commit | 83e33a4711760469f5c3861b8ffea4947656d4eb (patch) | |
tree | 3c6b534760dee49a77157eb6512aeb329e19bc2c /mm/vmscan.c | |
parent | 0ff38490c836dc379ff7ec45b10a15a662f4e5f6 (diff) |
[PATCH] zone reclaim with slab: avoid unecessary off node allocations
Minor performance fix.
If we reclaimed enough slab pages from a zone then we can avoid going off
node with the current allocation. Take care of updating nr_reclaimed when
reclaiming from the slab.
Signed-off-by: Christoph Lameter <clameter@sgi.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'mm/vmscan.c')
-rw-r--r-- | mm/vmscan.c | 17 |
1 files changed, 12 insertions, 5 deletions
diff --git a/mm/vmscan.c b/mm/vmscan.c index 089e943c4d38..b950f193816e 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c | |||
@@ -1566,6 +1566,7 @@ static int __zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order) | |||
1566 | .gfp_mask = gfp_mask, | 1566 | .gfp_mask = gfp_mask, |
1567 | .swappiness = vm_swappiness, | 1567 | .swappiness = vm_swappiness, |
1568 | }; | 1568 | }; |
1569 | unsigned long slab_reclaimable; | ||
1569 | 1570 | ||
1570 | disable_swap_token(); | 1571 | disable_swap_token(); |
1571 | cond_resched(); | 1572 | cond_resched(); |
@@ -1592,7 +1593,8 @@ static int __zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order) | |||
1592 | } while (priority >= 0 && nr_reclaimed < nr_pages); | 1593 | } while (priority >= 0 && nr_reclaimed < nr_pages); |
1593 | } | 1594 | } |
1594 | 1595 | ||
1595 | if (zone_page_state(zone, NR_SLAB_RECLAIMABLE) > zone->min_slab_pages) { | 1596 | slab_reclaimable = zone_page_state(zone, NR_SLAB_RECLAIMABLE); |
1597 | if (slab_reclaimable > zone->min_slab_pages) { | ||
1596 | /* | 1598 | /* |
1597 | * shrink_slab() does not currently allow us to determine how | 1599 | * shrink_slab() does not currently allow us to determine how |
1598 | * many pages were freed in this zone. So we take the current | 1600 | * many pages were freed in this zone. So we take the current |
@@ -1603,12 +1605,17 @@ static int __zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order) | |||
1603 | * Note that shrink_slab will free memory on all zones and may | 1605 | * Note that shrink_slab will free memory on all zones and may |
1604 | * take a long time. | 1606 | * take a long time. |
1605 | */ | 1607 | */ |
1606 | unsigned long limit = zone_page_state(zone, | ||
1607 | NR_SLAB_RECLAIMABLE) - nr_pages; | ||
1608 | |||
1609 | while (shrink_slab(sc.nr_scanned, gfp_mask, order) && | 1608 | while (shrink_slab(sc.nr_scanned, gfp_mask, order) && |
1610 | zone_page_state(zone, NR_SLAB_RECLAIMABLE) > limit) | 1609 | zone_page_state(zone, NR_SLAB_RECLAIMABLE) > |
1610 | slab_reclaimable - nr_pages) | ||
1611 | ; | 1611 | ; |
1612 | |||
1613 | /* | ||
1614 | * Update nr_reclaimed by the number of slab pages we | ||
1615 | * reclaimed from this zone. | ||
1616 | */ | ||
1617 | nr_reclaimed += slab_reclaimable - | ||
1618 | zone_page_state(zone, NR_SLAB_RECLAIMABLE); | ||
1612 | } | 1619 | } |
1613 | 1620 | ||
1614 | p->reclaim_state = NULL; | 1621 | p->reclaim_state = NULL; |