aboutsummaryrefslogtreecommitdiffstats
path: root/mm/vmscan.c
diff options
context:
space:
mode:
authorAndrew Morton <akpm@osdl.org>2007-01-05 19:37:05 -0500
committerLinus Torvalds <torvalds@woody.osdl.org>2007-01-06 02:55:29 -0500
commit76395d37611e8758dd8bd6c6f5bfcb31e1dc48f9 (patch)
tree8002f85993ac66e46d4eac3d5bf41bc58581b769 /mm/vmscan.c
parentd63b70902befe189ba2672925f28ec3f4db41352 (diff)
[PATCH] shrink_all_memory(): fix lru_pages handling
At the end of shrink_all_memory() we forget to recalculate lru_pages: it can be zero. Fix that up, and add a helper function for this operation too. Also, recalculate lru_pages each time around the inner loop to get the balancing correct. Cc: "Rafael J. Wysocki" <rjw@sisk.pl> Cc: Pavel Machek <pavel@ucw.cz> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'mm/vmscan.c')
-rw-r--r--mm/vmscan.c33
1 files changed, 16 insertions, 17 deletions
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 40fea4918390..7430df68cb64 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1406,6 +1406,16 @@ static unsigned long shrink_all_zones(unsigned long nr_pages, int prio,
1406 return ret; 1406 return ret;
1407} 1407}
1408 1408
1409static unsigned long count_lru_pages(void)
1410{
1411 struct zone *zone;
1412 unsigned long ret = 0;
1413
1414 for_each_zone(zone)
1415 ret += zone->nr_active + zone->nr_inactive;
1416 return ret;
1417}
1418
1409/* 1419/*
1410 * Try to free `nr_pages' of memory, system-wide, and return the number of 1420 * Try to free `nr_pages' of memory, system-wide, and return the number of
1411 * freed pages. 1421 * freed pages.
@@ -1420,7 +1430,6 @@ unsigned long shrink_all_memory(unsigned long nr_pages)
1420 unsigned long ret = 0; 1430 unsigned long ret = 0;
1421 int pass; 1431 int pass;
1422 struct reclaim_state reclaim_state; 1432 struct reclaim_state reclaim_state;
1423 struct zone *zone;
1424 struct scan_control sc = { 1433 struct scan_control sc = {
1425 .gfp_mask = GFP_KERNEL, 1434 .gfp_mask = GFP_KERNEL,
1426 .may_swap = 0, 1435 .may_swap = 0,
@@ -1431,10 +1440,7 @@ unsigned long shrink_all_memory(unsigned long nr_pages)
1431 1440
1432 current->reclaim_state = &reclaim_state; 1441 current->reclaim_state = &reclaim_state;
1433 1442
1434 lru_pages = 0; 1443 lru_pages = count_lru_pages();
1435 for_each_zone(zone)
1436 lru_pages += zone->nr_active + zone->nr_inactive;
1437
1438 nr_slab = global_page_state(NR_SLAB_RECLAIMABLE); 1444 nr_slab = global_page_state(NR_SLAB_RECLAIMABLE);
1439 /* If slab caches are huge, it's better to hit them first */ 1445 /* If slab caches are huge, it's better to hit them first */
1440 while (nr_slab >= lru_pages) { 1446 while (nr_slab >= lru_pages) {
@@ -1461,13 +1467,6 @@ unsigned long shrink_all_memory(unsigned long nr_pages)
1461 for (pass = 0; pass < 5; pass++) { 1467 for (pass = 0; pass < 5; pass++) {
1462 int prio; 1468 int prio;
1463 1469
1464 /* Needed for shrinking slab caches later on */
1465 if (!lru_pages)
1466 for_each_zone(zone) {
1467 lru_pages += zone->nr_active;
1468 lru_pages += zone->nr_inactive;
1469 }
1470
1471 /* Force reclaiming mapped pages in the passes #3 and #4 */ 1470 /* Force reclaiming mapped pages in the passes #3 and #4 */
1472 if (pass > 2) { 1471 if (pass > 2) {
1473 sc.may_swap = 1; 1472 sc.may_swap = 1;
@@ -1483,7 +1482,8 @@ unsigned long shrink_all_memory(unsigned long nr_pages)
1483 goto out; 1482 goto out;
1484 1483
1485 reclaim_state.reclaimed_slab = 0; 1484 reclaim_state.reclaimed_slab = 0;
1486 shrink_slab(sc.nr_scanned, sc.gfp_mask, lru_pages); 1485 shrink_slab(sc.nr_scanned, sc.gfp_mask,
1486 count_lru_pages());
1487 ret += reclaim_state.reclaimed_slab; 1487 ret += reclaim_state.reclaimed_slab;
1488 if (ret >= nr_pages) 1488 if (ret >= nr_pages)
1489 goto out; 1489 goto out;
@@ -1491,20 +1491,19 @@ unsigned long shrink_all_memory(unsigned long nr_pages)
1491 if (sc.nr_scanned && prio < DEF_PRIORITY - 2) 1491 if (sc.nr_scanned && prio < DEF_PRIORITY - 2)
1492 congestion_wait(WRITE, HZ / 10); 1492 congestion_wait(WRITE, HZ / 10);
1493 } 1493 }
1494
1495 lru_pages = 0;
1496 } 1494 }
1497 1495
1498 /* 1496 /*
1499 * If ret = 0, we could not shrink LRUs, but there may be something 1497 * If ret = 0, we could not shrink LRUs, but there may be something
1500 * in slab caches 1498 * in slab caches
1501 */ 1499 */
1502 if (!ret) 1500 if (!ret) {
1503 do { 1501 do {
1504 reclaim_state.reclaimed_slab = 0; 1502 reclaim_state.reclaimed_slab = 0;
1505 shrink_slab(nr_pages, sc.gfp_mask, lru_pages); 1503 shrink_slab(nr_pages, sc.gfp_mask, count_lru_pages());
1506 ret += reclaim_state.reclaimed_slab; 1504 ret += reclaim_state.reclaimed_slab;
1507 } while (ret < nr_pages && reclaim_state.reclaimed_slab > 0); 1505 } while (ret < nr_pages && reclaim_state.reclaimed_slab > 0);
1506 }
1508 1507
1509out: 1508out:
1510 current->reclaim_state = NULL; 1509 current->reclaim_state = NULL;