diff options
author | Martin Hicks <mort@sgi.com> | 2005-06-21 20:14:43 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-06-21 21:46:14 -0400 |
commit | 1e7e5a9048b30c57ba1ddaa6cdf59b21b65cde99 (patch) | |
tree | 26eb9c483718ca1a0fad23597c0dfd3a69e9f080 /mm | |
parent | 0c35bbadc59f5ed105c34471143eceb4c0dd9c95 (diff) |
[PATCH] VM: rate limit early reclaim
When early zone reclaim is turned on the LRU is scanned more frequently when a
zone is low on memory. This limits when the zone reclaim can be called by
skipping the scan if another thread (either via kswapd or sync reclaim) is
already reclaiming from the zone.
Signed-off-by: Martin Hicks <mort@sgi.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/page_alloc.c | 1 | ||||
-rw-r--r-- | mm/vmscan.c | 10 |
2 files changed, 11 insertions, 0 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index a9da20bc2ed0..2019c1b19254 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
@@ -1738,6 +1738,7 @@ static void __init free_area_init_core(struct pglist_data *pgdat, | |||
1738 | zone->nr_scan_inactive = 0; | 1738 | zone->nr_scan_inactive = 0; |
1739 | zone->nr_active = 0; | 1739 | zone->nr_active = 0; |
1740 | zone->nr_inactive = 0; | 1740 | zone->nr_inactive = 0; |
1741 | atomic_set(&zone->reclaim_in_progress, -1); | ||
1741 | if (!size) | 1742 | if (!size) |
1742 | continue; | 1743 | continue; |
1743 | 1744 | ||
diff --git a/mm/vmscan.c b/mm/vmscan.c index 7da846960d8a..24da725a30f0 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c | |||
@@ -900,7 +900,9 @@ shrink_caches(struct zone **zones, struct scan_control *sc) | |||
900 | if (zone->all_unreclaimable && sc->priority != DEF_PRIORITY) | 900 | if (zone->all_unreclaimable && sc->priority != DEF_PRIORITY) |
901 | continue; /* Let kswapd poll it */ | 901 | continue; /* Let kswapd poll it */ |
902 | 902 | ||
903 | atomic_inc(&zone->reclaim_in_progress); | ||
903 | shrink_zone(zone, sc); | 904 | shrink_zone(zone, sc); |
905 | atomic_dec(&zone->reclaim_in_progress); | ||
904 | } | 906 | } |
905 | } | 907 | } |
906 | 908 | ||
@@ -1111,7 +1113,9 @@ scan: | |||
1111 | sc.nr_reclaimed = 0; | 1113 | sc.nr_reclaimed = 0; |
1112 | sc.priority = priority; | 1114 | sc.priority = priority; |
1113 | sc.swap_cluster_max = nr_pages? nr_pages : SWAP_CLUSTER_MAX; | 1115 | sc.swap_cluster_max = nr_pages? nr_pages : SWAP_CLUSTER_MAX; |
1116 | atomic_inc(&zone->reclaim_in_progress); | ||
1114 | shrink_zone(zone, &sc); | 1117 | shrink_zone(zone, &sc); |
1118 | atomic_dec(&zone->reclaim_in_progress); | ||
1115 | reclaim_state->reclaimed_slab = 0; | 1119 | reclaim_state->reclaimed_slab = 0; |
1116 | nr_slab = shrink_slab(sc.nr_scanned, GFP_KERNEL, | 1120 | nr_slab = shrink_slab(sc.nr_scanned, GFP_KERNEL, |
1117 | lru_pages); | 1121 | lru_pages); |
@@ -1354,9 +1358,15 @@ int zone_reclaim(struct zone *zone, unsigned int gfp_mask, unsigned int order) | |||
1354 | else | 1358 | else |
1355 | sc.swap_cluster_max = SWAP_CLUSTER_MAX; | 1359 | sc.swap_cluster_max = SWAP_CLUSTER_MAX; |
1356 | 1360 | ||
1361 | /* Don't reclaim the zone if there are other reclaimers active */ | ||
1362 | if (!atomic_inc_and_test(&zone->reclaim_in_progress)) | ||
1363 | goto out; | ||
1364 | |||
1357 | shrink_zone(zone, &sc); | 1365 | shrink_zone(zone, &sc); |
1358 | total_reclaimed = sc.nr_reclaimed; | 1366 | total_reclaimed = sc.nr_reclaimed; |
1359 | 1367 | ||
1368 | out: | ||
1369 | atomic_dec(&zone->reclaim_in_progress); | ||
1360 | return total_reclaimed; | 1370 | return total_reclaimed; |
1361 | } | 1371 | } |
1362 | 1372 | ||