diff options
author | Johannes Weiner <hannes@saeurebad.de> | 2008-10-18 23:26:55 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2008-10-20 11:52:31 -0400 |
commit | e0f79b8f1f3394bb344b7b83d6f121ac2af327de (patch) | |
tree | 71fbe51998f2e22889bb59d361acf36898d71732 /mm | |
parent | 902d2e8ae0de29f483840ba1134af27343b9564d (diff) |
vmscan: don't accumulate scan pressure on unrelated lists
During each reclaim scan we accumulate scan pressure on unrelated lists
which will result in bogus scans and unwanted reclaims eventually.
Scanning lists with few reclaim candidates results in a lot of rotation
and therefor also disturbs the list balancing, putting even more
pressure on the wrong lists.
In a test-case with much streaming IO, and therefor a crowded inactive
file page list, swapping started because
a) anon pages were reclaimed after swap_cluster_max reclaim
invocations -- nr_scan of this list has just accumulated
b) active file pages were scanned because *their* nr_scan has also
accumulated through the same logic. And this in return created a
lot of rotation for file pages and resulted in a decrease of file
list priority, again increasing the pressure on anon pages.
The result was an evicted working set of anon pages while there were
tons of inactive file pages that should have been taken instead.
Signed-off-by: Johannes Weiner <hannes@saeurebad.de>
Reviewed-by: Rik van Riel <riel@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/vmscan.c | 7 |
1 files changed, 2 insertions, 5 deletions
diff --git a/mm/vmscan.c b/mm/vmscan.c index ca64e3e0c518..412d7872fc75 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c | |||
@@ -1413,16 +1413,13 @@ static unsigned long shrink_zone(int priority, struct zone *zone, | |||
1413 | if (scan_global_lru(sc)) { | 1413 | if (scan_global_lru(sc)) { |
1414 | int file = is_file_lru(l); | 1414 | int file = is_file_lru(l); |
1415 | int scan; | 1415 | int scan; |
1416 | /* | 1416 | |
1417 | * Add one to nr_to_scan just to make sure that the | ||
1418 | * kernel will slowly sift through each list. | ||
1419 | */ | ||
1420 | scan = zone_page_state(zone, NR_LRU_BASE + l); | 1417 | scan = zone_page_state(zone, NR_LRU_BASE + l); |
1421 | if (priority) { | 1418 | if (priority) { |
1422 | scan >>= priority; | 1419 | scan >>= priority; |
1423 | scan = (scan * percent[file]) / 100; | 1420 | scan = (scan * percent[file]) / 100; |
1424 | } | 1421 | } |
1425 | zone->lru[l].nr_scan += scan + 1; | 1422 | zone->lru[l].nr_scan += scan; |
1426 | nr[l] = zone->lru[l].nr_scan; | 1423 | nr[l] = zone->lru[l].nr_scan; |
1427 | if (nr[l] >= sc->swap_cluster_max) | 1424 | if (nr[l] >= sc->swap_cluster_max) |
1428 | zone->lru[l].nr_scan = 0; | 1425 | zone->lru[l].nr_scan = 0; |