aboutsummaryrefslogtreecommitdiffstats
path: root/mm/vmscan.c
diff options
context:
space:
mode:
authorAndrew Morton <akpm@osdl.org>2006-01-06 03:11:14 -0500
committerLinus Torvalds <torvalds@g5.osdl.org>2006-01-06 11:33:27 -0500
commit210fe530305ee50cd889fe9250168228b2994f32 (patch)
tree4d22a28cf28abded5f77daef5b025c04d1ffea56 /mm/vmscan.c
parent41e9b63b35b52cf918a4ffdb8d77862ab824aa8b (diff)
[PATCH] vmscan: balancing fix
Revert a patch which went into 2.6.8-rc1. The changelog for that patch was: The shrink_zone() logic can, under some circumstances, cause far too many pages to be reclaimed. Say, we're scanning at high priority and suddenly hit a large number of reclaimable pages on the LRU. Change things so we bale out when SWAP_CLUSTER_MAX pages have been reclaimed. Problem is, this change caused significant imbalance in inter-zone scan balancing by truncating scans of larger zones. Suppose, for example, ZONE_HIGHMEM is 10x the size of ZONE_NORMAL. The zone balancing algorithm would require that if we're scanning 100 pages of ZONE_HIGHMEM, we should scan 10 pages of ZONE_NORMAL. But this logic will cause the scanning of ZONE_HIGHMEM to bale out after only 32 pages are reclaimed. Thus effectively causing smaller zones to be scanned relatively harder than large ones. Now I need to remember what the workload was which caused me to write this patch originally, then fix it up in a different way... Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'mm/vmscan.c')
-rw-r--r--mm/vmscan.c8
1 files changed, 0 insertions, 8 deletions
diff --git a/mm/vmscan.c b/mm/vmscan.c
index b2baca7645d7..5c8a412b43f4 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -63,9 +63,6 @@ struct scan_control {
63 63
64 unsigned long nr_mapped; /* From page_state */ 64 unsigned long nr_mapped; /* From page_state */
65 65
66 /* How many pages shrink_cache() should reclaim */
67 int nr_to_reclaim;
68
69 /* Ask shrink_caches, or shrink_zone to scan at this priority */ 66 /* Ask shrink_caches, or shrink_zone to scan at this priority */
70 unsigned int priority; 67 unsigned int priority;
71 68
@@ -656,7 +653,6 @@ static void shrink_cache(struct zone *zone, struct scan_control *sc)
656 if (current_is_kswapd()) 653 if (current_is_kswapd())
657 mod_page_state(kswapd_steal, nr_freed); 654 mod_page_state(kswapd_steal, nr_freed);
658 mod_page_state_zone(zone, pgsteal, nr_freed); 655 mod_page_state_zone(zone, pgsteal, nr_freed);
659 sc->nr_to_reclaim -= nr_freed;
660 656
661 spin_lock_irq(&zone->lru_lock); 657 spin_lock_irq(&zone->lru_lock);
662 /* 658 /*
@@ -856,8 +852,6 @@ shrink_zone(struct zone *zone, struct scan_control *sc)
856 else 852 else
857 nr_inactive = 0; 853 nr_inactive = 0;
858 854
859 sc->nr_to_reclaim = sc->swap_cluster_max;
860
861 while (nr_active || nr_inactive) { 855 while (nr_active || nr_inactive) {
862 if (nr_active) { 856 if (nr_active) {
863 sc->nr_to_scan = min(nr_active, 857 sc->nr_to_scan = min(nr_active,
@@ -871,8 +865,6 @@ shrink_zone(struct zone *zone, struct scan_control *sc)
871 (unsigned long)sc->swap_cluster_max); 865 (unsigned long)sc->swap_cluster_max);
872 nr_inactive -= sc->nr_to_scan; 866 nr_inactive -= sc->nr_to_scan;
873 shrink_cache(zone, sc); 867 shrink_cache(zone, sc);
874 if (sc->nr_to_reclaim <= 0)
875 break;
876 } 868 }
877 } 869 }
878 870