aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMartin Bligh <mbligh@google.com>2006-10-28 13:38:25 -0400
committerLinus Torvalds <torvalds@g5.osdl.org>2006-10-28 14:30:51 -0400
commitbbdb396a60b2ebf7de3b717991e5d3e28c8b7bbd (patch)
treeefeca197ae8a2421940006ace6c1b719b1811a0c
parent3bb1a852ab6c9cdf211a2f4a2f502340c8c38eca (diff)
[PATCH] Use min of two prio settings in calculating distress for reclaim
If try_to_free_pages / balance_pgdat are called with a gfp_mask specifying GFP_IO and/or GFP_FS, they will reclaim the requisite number of pages, and the reset prev_priority to DEF_PRIORITY (or to some other high (ie: unurgent) value). However, another reclaimer without those gfp_mask flags set (say, GFP_NOIO) may still be struggling to reclaim pages. The concurrent overwrite of zone->prev_priority will cause this GFP_NOIO thread to unexpectedly cease deactivating mapped pages, thus causing reclaim difficulties. Fix this is to key the distress calculation not off zone->prev_priority, but also take into account the local caller's priority by using min(zone->prev_priority, sc->priority) Signed-off-by: Martin J. Bligh <mbligh@google.com> Cc: Nick Piggin <nickpiggin@yahoo.com.au> Cc: <stable@kernel.org> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
-rw-r--r--mm/vmscan.c8
1 files changed, 4 insertions, 4 deletions
diff --git a/mm/vmscan.c b/mm/vmscan.c
index b32560ead5c0..518540a4a2a6 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -760,7 +760,7 @@ static inline int zone_is_near_oom(struct zone *zone)
760 * But we had to alter page->flags anyway. 760 * But we had to alter page->flags anyway.
761 */ 761 */
762static void shrink_active_list(unsigned long nr_pages, struct zone *zone, 762static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
763 struct scan_control *sc) 763 struct scan_control *sc, int priority)
764{ 764{
765 unsigned long pgmoved; 765 unsigned long pgmoved;
766 int pgdeactivate = 0; 766 int pgdeactivate = 0;
@@ -784,7 +784,7 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
784 * `distress' is a measure of how much trouble we're having 784 * `distress' is a measure of how much trouble we're having
785 * reclaiming pages. 0 -> no problems. 100 -> great trouble. 785 * reclaiming pages. 0 -> no problems. 100 -> great trouble.
786 */ 786 */
787 distress = 100 >> zone->prev_priority; 787 distress = 100 >> min(zone->prev_priority, priority);
788 788
789 /* 789 /*
790 * The point of this algorithm is to decide when to start 790 * The point of this algorithm is to decide when to start
@@ -936,7 +936,7 @@ static unsigned long shrink_zone(int priority, struct zone *zone,
936 nr_to_scan = min(nr_active, 936 nr_to_scan = min(nr_active,
937 (unsigned long)sc->swap_cluster_max); 937 (unsigned long)sc->swap_cluster_max);
938 nr_active -= nr_to_scan; 938 nr_active -= nr_to_scan;
939 shrink_active_list(nr_to_scan, zone, sc); 939 shrink_active_list(nr_to_scan, zone, sc, priority);
940 } 940 }
941 941
942 if (nr_inactive) { 942 if (nr_inactive) {
@@ -1384,7 +1384,7 @@ static unsigned long shrink_all_zones(unsigned long nr_pages, int pass,
1384 if (zone->nr_scan_active >= nr_pages || pass > 3) { 1384 if (zone->nr_scan_active >= nr_pages || pass > 3) {
1385 zone->nr_scan_active = 0; 1385 zone->nr_scan_active = 0;
1386 nr_to_scan = min(nr_pages, zone->nr_active); 1386 nr_to_scan = min(nr_pages, zone->nr_active);
1387 shrink_active_list(nr_to_scan, zone, sc); 1387 shrink_active_list(nr_to_scan, zone, sc, prio);
1388 } 1388 }
1389 } 1389 }
1390 1390