diff options
author | Nick Piggin <npiggin@suse.de> | 2006-09-26 02:31:28 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@g5.osdl.org> | 2006-09-26 11:48:48 -0400 |
commit | 4ff1ffb4870b007b86f21e5f27eeb11498c4c077 (patch) | |
tree | f168408f90214873e1fa36733d29f2ba002fae46 /mm | |
parent | 408d85441cd5a9bd6bc851d677a10c605ed8db5f (diff) |
[PATCH] oom: reclaim_mapped on oom
Potentially it takes several scans of the lru lists before we can even start
reclaiming pages.
mapped pages, with young ptes can take 2 passes on the active list + one on
the inactive list. But reclaim_mapped may not always kick in instantly, so it
could take even more than that.
Raise the threshold for marking a zone as all_unreclaimable from a factor of 4
time the pages in the zone to 6. Introduce a mechanism to force
reclaim_mapped if we've reached a factor 3 and still haven't made progress.
Previously, a customer doing stress testing was able to easily OOM the box
after using only a small fraction of its swap (~100MB). After the patches, it
would only OOM after having used up all swap (~800MB).
Signed-off-by: Nick Piggin <npiggin@suse.de>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/vmscan.c | 11 |
1 files changed, 10 insertions, 1 deletions
diff --git a/mm/vmscan.c b/mm/vmscan.c index ba18d0c36b8..8f35d7d585c 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c | |||
@@ -697,6 +697,11 @@ done: | |||
697 | return nr_reclaimed; | 697 | return nr_reclaimed; |
698 | } | 698 | } |
699 | 699 | ||
700 | static inline int zone_is_near_oom(struct zone *zone) | ||
701 | { | ||
702 | return zone->pages_scanned >= (zone->nr_active + zone->nr_inactive)*3; | ||
703 | } | ||
704 | |||
700 | /* | 705 | /* |
701 | * This moves pages from the active list to the inactive list. | 706 | * This moves pages from the active list to the inactive list. |
702 | * | 707 | * |
@@ -732,6 +737,9 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone, | |||
732 | long distress; | 737 | long distress; |
733 | long swap_tendency; | 738 | long swap_tendency; |
734 | 739 | ||
740 | if (zone_is_near_oom(zone)) | ||
741 | goto force_reclaim_mapped; | ||
742 | |||
735 | /* | 743 | /* |
736 | * `distress' is a measure of how much trouble we're having | 744 | * `distress' is a measure of how much trouble we're having |
737 | * reclaiming pages. 0 -> no problems. 100 -> great trouble. | 745 | * reclaiming pages. 0 -> no problems. 100 -> great trouble. |
@@ -767,6 +775,7 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone, | |||
767 | * memory onto the inactive list. | 775 | * memory onto the inactive list. |
768 | */ | 776 | */ |
769 | if (swap_tendency >= 100) | 777 | if (swap_tendency >= 100) |
778 | force_reclaim_mapped: | ||
770 | reclaim_mapped = 1; | 779 | reclaim_mapped = 1; |
771 | } | 780 | } |
772 | 781 | ||
@@ -1161,7 +1170,7 @@ scan: | |||
1161 | if (zone->all_unreclaimable) | 1170 | if (zone->all_unreclaimable) |
1162 | continue; | 1171 | continue; |
1163 | if (nr_slab == 0 && zone->pages_scanned >= | 1172 | if (nr_slab == 0 && zone->pages_scanned >= |
1164 | (zone->nr_active + zone->nr_inactive) * 4) | 1173 | (zone->nr_active + zone->nr_inactive) * 6) |
1165 | zone->all_unreclaimable = 1; | 1174 | zone->all_unreclaimable = 1; |
1166 | /* | 1175 | /* |
1167 | * If we've done a decent amount of scanning and | 1176 | * If we've done a decent amount of scanning and |