aboutsummaryrefslogtreecommitdiffstats
path: root/mm/vmscan.c
diff options
context:
space:
mode:
authorRik van Riel <riel@redhat.com>2005-11-28 16:44:07 -0500
committerLinus Torvalds <torvalds@g5.osdl.org>2005-11-28 17:42:25 -0500
commitf7b7fd8f3ebbb2810d6893295aa984acd0fd30db (patch)
tree01afc1edafc50a3c65ec8576c05c60da53d8d242 /mm/vmscan.c
parenta93a117eaa0bec426d4671a49bfa96a6fdcd2ac9 (diff)
[PATCH] temporarily disable swap token on memory pressure
Some users (hi Zwane) have seen a problem when running a workload that eats nearly all of physical memory - th system does an OOM kill, even when there is still a lot of swap free. The problem appears to be a very big task that is holding the swap token, and the VM has a very hard time finding any other page in the system that is swappable. Instead of ignoring the swap token when sc->priority reaches 0, we could simply take the swap token away from the memory hog and make sure we don't give it back to the memory hog for a few seconds. This patch resolves the problem Zwane ran into. Signed-off-by: Rik van Riel <riel@redhat.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'mm/vmscan.c')
-rw-r--r--mm/vmscan.c11
1 files changed, 9 insertions, 2 deletions
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 28130541270f..078cf920208a 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -407,7 +407,7 @@ static int shrink_list(struct list_head *page_list, struct scan_control *sc)
407 if (PageWriteback(page)) 407 if (PageWriteback(page))
408 goto keep_locked; 408 goto keep_locked;
409 409
410 referenced = page_referenced(page, 1, sc->priority <= 0); 410 referenced = page_referenced(page, 1);
411 /* In active use or really unfreeable? Activate it. */ 411 /* In active use or really unfreeable? Activate it. */
412 if (referenced && page_mapping_inuse(page)) 412 if (referenced && page_mapping_inuse(page))
413 goto activate_locked; 413 goto activate_locked;
@@ -756,7 +756,7 @@ refill_inactive_zone(struct zone *zone, struct scan_control *sc)
756 if (page_mapped(page)) { 756 if (page_mapped(page)) {
757 if (!reclaim_mapped || 757 if (!reclaim_mapped ||
758 (total_swap_pages == 0 && PageAnon(page)) || 758 (total_swap_pages == 0 && PageAnon(page)) ||
759 page_referenced(page, 0, sc->priority <= 0)) { 759 page_referenced(page, 0)) {
760 list_add(&page->lru, &l_active); 760 list_add(&page->lru, &l_active);
761 continue; 761 continue;
762 } 762 }
@@ -960,6 +960,8 @@ int try_to_free_pages(struct zone **zones, gfp_t gfp_mask)
960 sc.nr_reclaimed = 0; 960 sc.nr_reclaimed = 0;
961 sc.priority = priority; 961 sc.priority = priority;
962 sc.swap_cluster_max = SWAP_CLUSTER_MAX; 962 sc.swap_cluster_max = SWAP_CLUSTER_MAX;
963 if (!priority)
964 disable_swap_token();
963 shrink_caches(zones, &sc); 965 shrink_caches(zones, &sc);
964 shrink_slab(sc.nr_scanned, gfp_mask, lru_pages); 966 shrink_slab(sc.nr_scanned, gfp_mask, lru_pages);
965 if (reclaim_state) { 967 if (reclaim_state) {
@@ -1056,6 +1058,10 @@ loop_again:
1056 int end_zone = 0; /* Inclusive. 0 = ZONE_DMA */ 1058 int end_zone = 0; /* Inclusive. 0 = ZONE_DMA */
1057 unsigned long lru_pages = 0; 1059 unsigned long lru_pages = 0;
1058 1060
1061 /* The swap token gets in the way of swapout... */
1062 if (!priority)
1063 disable_swap_token();
1064
1059 all_zones_ok = 1; 1065 all_zones_ok = 1;
1060 1066
1061 if (nr_pages == 0) { 1067 if (nr_pages == 0) {
@@ -1360,6 +1366,7 @@ int zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
1360 sc.nr_reclaimed = 0; 1366 sc.nr_reclaimed = 0;
1361 /* scan at the highest priority */ 1367 /* scan at the highest priority */
1362 sc.priority = 0; 1368 sc.priority = 0;
1369 disable_swap_token();
1363 1370
1364 if (nr_pages > SWAP_CLUSTER_MAX) 1371 if (nr_pages > SWAP_CLUSTER_MAX)
1365 sc.swap_cluster_max = nr_pages; 1372 sc.swap_cluster_max = nr_pages;