diff options
author | Andrew Morton <akpm@osdl.org> | 2006-03-22 03:08:20 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@g5.osdl.org> | 2006-03-22 10:54:00 -0500 |
commit | 05ff51376f01fd8837946a4f8144a84f6cc71c19 (patch) | |
tree | ca91bcabe6b4bfa71801a39e2921a44ec0bb1003 /mm | |
parent | 69e05944af39fc6c97b09380c8721e38433bd828 (diff) |
[PATCH] vmscan return nr_reclaimed
Change all the vmscan functions to retunr the number-of-reclaimed pages and
remove scan_conrtol.nr_reclaimed.
Saves ten-odd bytes of text and makes things clearer and more consistent.
The patch also changes the behaviour of zone_reclaim() when it falls back to slab shrinking. Christoph says
"Setting this to one means that we will rescan and shrink the slab for
each allocation if we are out of zone memory and RECLAIM_SLAB is set. Plus
if we do an order 0 allocation we do not go off node as intended.
"We better set this to zero. This means the allocation will go offnode
despite us having potentially freed lots of memory on the zone. Future
allocations can then again be done from this zone."
Cc: Nick Piggin <nickpiggin@yahoo.com.au>
Cc: Christoph Lameter <christoph@lameter.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/vmscan.c | 77 |
1 files changed, 38 insertions, 39 deletions
diff --git a/mm/vmscan.c b/mm/vmscan.c index 62cd7cd257e3..8f6ad13d34f5 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c | |||
@@ -55,9 +55,6 @@ struct scan_control { | |||
55 | /* Incremented by the number of inactive pages that were scanned */ | 55 | /* Incremented by the number of inactive pages that were scanned */ |
56 | unsigned long nr_scanned; | 56 | unsigned long nr_scanned; |
57 | 57 | ||
58 | /* Incremented by the number of pages reclaimed */ | ||
59 | unsigned long nr_reclaimed; | ||
60 | |||
61 | unsigned long nr_mapped; /* From page_state */ | 58 | unsigned long nr_mapped; /* From page_state */ |
62 | 59 | ||
63 | /* This context's GFP mask */ | 60 | /* This context's GFP mask */ |
@@ -409,7 +406,7 @@ cannot_free: | |||
409 | } | 406 | } |
410 | 407 | ||
411 | /* | 408 | /* |
412 | * shrink_list adds the number of reclaimed pages to sc->nr_reclaimed | 409 | * shrink_list return the number of reclaimed pages |
413 | */ | 410 | */ |
414 | static unsigned long shrink_list(struct list_head *page_list, | 411 | static unsigned long shrink_list(struct list_head *page_list, |
415 | struct scan_control *sc) | 412 | struct scan_control *sc) |
@@ -417,7 +414,7 @@ static unsigned long shrink_list(struct list_head *page_list, | |||
417 | LIST_HEAD(ret_pages); | 414 | LIST_HEAD(ret_pages); |
418 | struct pagevec freed_pvec; | 415 | struct pagevec freed_pvec; |
419 | int pgactivate = 0; | 416 | int pgactivate = 0; |
420 | unsigned long reclaimed = 0; | 417 | unsigned long nr_reclaimed = 0; |
421 | 418 | ||
422 | cond_resched(); | 419 | cond_resched(); |
423 | 420 | ||
@@ -557,7 +554,7 @@ static unsigned long shrink_list(struct list_head *page_list, | |||
557 | 554 | ||
558 | free_it: | 555 | free_it: |
559 | unlock_page(page); | 556 | unlock_page(page); |
560 | reclaimed++; | 557 | nr_reclaimed++; |
561 | if (!pagevec_add(&freed_pvec, page)) | 558 | if (!pagevec_add(&freed_pvec, page)) |
562 | __pagevec_release_nonlru(&freed_pvec); | 559 | __pagevec_release_nonlru(&freed_pvec); |
563 | continue; | 560 | continue; |
@@ -575,8 +572,7 @@ keep: | |||
575 | if (pagevec_count(&freed_pvec)) | 572 | if (pagevec_count(&freed_pvec)) |
576 | __pagevec_release_nonlru(&freed_pvec); | 573 | __pagevec_release_nonlru(&freed_pvec); |
577 | mod_page_state(pgactivate, pgactivate); | 574 | mod_page_state(pgactivate, pgactivate); |
578 | sc->nr_reclaimed += reclaimed; | 575 | return nr_reclaimed; |
579 | return reclaimed; | ||
580 | } | 576 | } |
581 | 577 | ||
582 | #ifdef CONFIG_MIGRATION | 578 | #ifdef CONFIG_MIGRATION |
@@ -1107,14 +1103,15 @@ static unsigned long isolate_lru_pages(unsigned long nr_to_scan, | |||
1107 | } | 1103 | } |
1108 | 1104 | ||
1109 | /* | 1105 | /* |
1110 | * shrink_cache() adds the number of pages reclaimed to sc->nr_reclaimed | 1106 | * shrink_cache() return the number of reclaimed pages |
1111 | */ | 1107 | */ |
1112 | static void shrink_cache(unsigned long max_scan, struct zone *zone, | 1108 | static unsigned long shrink_cache(unsigned long max_scan, struct zone *zone, |
1113 | struct scan_control *sc) | 1109 | struct scan_control *sc) |
1114 | { | 1110 | { |
1115 | LIST_HEAD(page_list); | 1111 | LIST_HEAD(page_list); |
1116 | struct pagevec pvec; | 1112 | struct pagevec pvec; |
1117 | unsigned long nr_scanned = 0; | 1113 | unsigned long nr_scanned = 0; |
1114 | unsigned long nr_reclaimed = 0; | ||
1118 | 1115 | ||
1119 | pagevec_init(&pvec, 1); | 1116 | pagevec_init(&pvec, 1); |
1120 | 1117 | ||
@@ -1138,7 +1135,7 @@ static void shrink_cache(unsigned long max_scan, struct zone *zone, | |||
1138 | 1135 | ||
1139 | nr_scanned += nr_scan; | 1136 | nr_scanned += nr_scan; |
1140 | nr_freed = shrink_list(&page_list, sc); | 1137 | nr_freed = shrink_list(&page_list, sc); |
1141 | 1138 | nr_reclaimed += nr_freed; | |
1142 | local_irq_disable(); | 1139 | local_irq_disable(); |
1143 | if (current_is_kswapd()) { | 1140 | if (current_is_kswapd()) { |
1144 | __mod_page_state_zone(zone, pgscan_kswapd, nr_scan); | 1141 | __mod_page_state_zone(zone, pgscan_kswapd, nr_scan); |
@@ -1170,6 +1167,7 @@ static void shrink_cache(unsigned long max_scan, struct zone *zone, | |||
1170 | spin_unlock_irq(&zone->lru_lock); | 1167 | spin_unlock_irq(&zone->lru_lock); |
1171 | done: | 1168 | done: |
1172 | pagevec_release(&pvec); | 1169 | pagevec_release(&pvec); |
1170 | return nr_reclaimed; | ||
1173 | } | 1171 | } |
1174 | 1172 | ||
1175 | /* | 1173 | /* |
@@ -1329,12 +1327,13 @@ refill_inactive_zone(unsigned long nr_pages, struct zone *zone, | |||
1329 | /* | 1327 | /* |
1330 | * This is a basic per-zone page freer. Used by both kswapd and direct reclaim. | 1328 | * This is a basic per-zone page freer. Used by both kswapd and direct reclaim. |
1331 | */ | 1329 | */ |
1332 | static void shrink_zone(int priority, struct zone *zone, | 1330 | static unsigned long shrink_zone(int priority, struct zone *zone, |
1333 | struct scan_control *sc) | 1331 | struct scan_control *sc) |
1334 | { | 1332 | { |
1335 | unsigned long nr_active; | 1333 | unsigned long nr_active; |
1336 | unsigned long nr_inactive; | 1334 | unsigned long nr_inactive; |
1337 | unsigned long nr_to_scan; | 1335 | unsigned long nr_to_scan; |
1336 | unsigned long nr_reclaimed = 0; | ||
1338 | 1337 | ||
1339 | atomic_inc(&zone->reclaim_in_progress); | 1338 | atomic_inc(&zone->reclaim_in_progress); |
1340 | 1339 | ||
@@ -1368,13 +1367,14 @@ static void shrink_zone(int priority, struct zone *zone, | |||
1368 | nr_to_scan = min(nr_inactive, | 1367 | nr_to_scan = min(nr_inactive, |
1369 | (unsigned long)sc->swap_cluster_max); | 1368 | (unsigned long)sc->swap_cluster_max); |
1370 | nr_inactive -= nr_to_scan; | 1369 | nr_inactive -= nr_to_scan; |
1371 | shrink_cache(nr_to_scan, zone, sc); | 1370 | nr_reclaimed += shrink_cache(nr_to_scan, zone, sc); |
1372 | } | 1371 | } |
1373 | } | 1372 | } |
1374 | 1373 | ||
1375 | throttle_vm_writeout(); | 1374 | throttle_vm_writeout(); |
1376 | 1375 | ||
1377 | atomic_dec(&zone->reclaim_in_progress); | 1376 | atomic_dec(&zone->reclaim_in_progress); |
1377 | return nr_reclaimed; | ||
1378 | } | 1378 | } |
1379 | 1379 | ||
1380 | /* | 1380 | /* |
@@ -1393,9 +1393,10 @@ static void shrink_zone(int priority, struct zone *zone, | |||
1393 | * If a zone is deemed to be full of pinned pages then just give it a light | 1393 | * If a zone is deemed to be full of pinned pages then just give it a light |
1394 | * scan then give up on it. | 1394 | * scan then give up on it. |
1395 | */ | 1395 | */ |
1396 | static void shrink_caches(int priority, struct zone **zones, | 1396 | static unsigned long shrink_caches(int priority, struct zone **zones, |
1397 | struct scan_control *sc) | 1397 | struct scan_control *sc) |
1398 | { | 1398 | { |
1399 | unsigned long nr_reclaimed = 0; | ||
1399 | int i; | 1400 | int i; |
1400 | 1401 | ||
1401 | for (i = 0; zones[i] != NULL; i++) { | 1402 | for (i = 0; zones[i] != NULL; i++) { |
@@ -1414,8 +1415,9 @@ static void shrink_caches(int priority, struct zone **zones, | |||
1414 | if (zone->all_unreclaimable && priority != DEF_PRIORITY) | 1415 | if (zone->all_unreclaimable && priority != DEF_PRIORITY) |
1415 | continue; /* Let kswapd poll it */ | 1416 | continue; /* Let kswapd poll it */ |
1416 | 1417 | ||
1417 | shrink_zone(priority, zone, sc); | 1418 | nr_reclaimed += shrink_zone(priority, zone, sc); |
1418 | } | 1419 | } |
1420 | return nr_reclaimed; | ||
1419 | } | 1421 | } |
1420 | 1422 | ||
1421 | /* | 1423 | /* |
@@ -1436,7 +1438,7 @@ unsigned long try_to_free_pages(struct zone **zones, gfp_t gfp_mask) | |||
1436 | int priority; | 1438 | int priority; |
1437 | int ret = 0; | 1439 | int ret = 0; |
1438 | unsigned long total_scanned = 0; | 1440 | unsigned long total_scanned = 0; |
1439 | unsigned long total_reclaimed = 0; | 1441 | unsigned long nr_reclaimed = 0; |
1440 | struct reclaim_state *reclaim_state = current->reclaim_state; | 1442 | struct reclaim_state *reclaim_state = current->reclaim_state; |
1441 | unsigned long lru_pages = 0; | 1443 | unsigned long lru_pages = 0; |
1442 | int i; | 1444 | int i; |
@@ -1462,18 +1464,16 @@ unsigned long try_to_free_pages(struct zone **zones, gfp_t gfp_mask) | |||
1462 | for (priority = DEF_PRIORITY; priority >= 0; priority--) { | 1464 | for (priority = DEF_PRIORITY; priority >= 0; priority--) { |
1463 | sc.nr_mapped = read_page_state(nr_mapped); | 1465 | sc.nr_mapped = read_page_state(nr_mapped); |
1464 | sc.nr_scanned = 0; | 1466 | sc.nr_scanned = 0; |
1465 | sc.nr_reclaimed = 0; | ||
1466 | if (!priority) | 1467 | if (!priority) |
1467 | disable_swap_token(); | 1468 | disable_swap_token(); |
1468 | shrink_caches(priority, zones, &sc); | 1469 | nr_reclaimed += shrink_caches(priority, zones, &sc); |
1469 | shrink_slab(sc.nr_scanned, gfp_mask, lru_pages); | 1470 | shrink_slab(sc.nr_scanned, gfp_mask, lru_pages); |
1470 | if (reclaim_state) { | 1471 | if (reclaim_state) { |
1471 | sc.nr_reclaimed += reclaim_state->reclaimed_slab; | 1472 | nr_reclaimed += reclaim_state->reclaimed_slab; |
1472 | reclaim_state->reclaimed_slab = 0; | 1473 | reclaim_state->reclaimed_slab = 0; |
1473 | } | 1474 | } |
1474 | total_scanned += sc.nr_scanned; | 1475 | total_scanned += sc.nr_scanned; |
1475 | total_reclaimed += sc.nr_reclaimed; | 1476 | if (nr_reclaimed >= sc.swap_cluster_max) { |
1476 | if (total_reclaimed >= sc.swap_cluster_max) { | ||
1477 | ret = 1; | 1477 | ret = 1; |
1478 | goto out; | 1478 | goto out; |
1479 | } | 1479 | } |
@@ -1540,7 +1540,7 @@ static unsigned long balance_pgdat(pg_data_t *pgdat, unsigned long nr_pages, | |||
1540 | int priority; | 1540 | int priority; |
1541 | int i; | 1541 | int i; |
1542 | unsigned long total_scanned; | 1542 | unsigned long total_scanned; |
1543 | unsigned long total_reclaimed; | 1543 | unsigned long nr_reclaimed; |
1544 | struct reclaim_state *reclaim_state = current->reclaim_state; | 1544 | struct reclaim_state *reclaim_state = current->reclaim_state; |
1545 | struct scan_control sc = { | 1545 | struct scan_control sc = { |
1546 | .gfp_mask = GFP_KERNEL, | 1546 | .gfp_mask = GFP_KERNEL, |
@@ -1550,7 +1550,7 @@ static unsigned long balance_pgdat(pg_data_t *pgdat, unsigned long nr_pages, | |||
1550 | 1550 | ||
1551 | loop_again: | 1551 | loop_again: |
1552 | total_scanned = 0; | 1552 | total_scanned = 0; |
1553 | total_reclaimed = 0; | 1553 | nr_reclaimed = 0; |
1554 | sc.may_writepage = !laptop_mode, | 1554 | sc.may_writepage = !laptop_mode, |
1555 | sc.nr_mapped = read_page_state(nr_mapped); | 1555 | sc.nr_mapped = read_page_state(nr_mapped); |
1556 | 1556 | ||
@@ -1632,13 +1632,11 @@ scan: | |||
1632 | if (zone->prev_priority > priority) | 1632 | if (zone->prev_priority > priority) |
1633 | zone->prev_priority = priority; | 1633 | zone->prev_priority = priority; |
1634 | sc.nr_scanned = 0; | 1634 | sc.nr_scanned = 0; |
1635 | sc.nr_reclaimed = 0; | 1635 | nr_reclaimed += shrink_zone(priority, zone, &sc); |
1636 | shrink_zone(priority, zone, &sc); | ||
1637 | reclaim_state->reclaimed_slab = 0; | 1636 | reclaim_state->reclaimed_slab = 0; |
1638 | nr_slab = shrink_slab(sc.nr_scanned, GFP_KERNEL, | 1637 | nr_slab = shrink_slab(sc.nr_scanned, GFP_KERNEL, |
1639 | lru_pages); | 1638 | lru_pages); |
1640 | sc.nr_reclaimed += reclaim_state->reclaimed_slab; | 1639 | nr_reclaimed += reclaim_state->reclaimed_slab; |
1641 | total_reclaimed += sc.nr_reclaimed; | ||
1642 | total_scanned += sc.nr_scanned; | 1640 | total_scanned += sc.nr_scanned; |
1643 | if (zone->all_unreclaimable) | 1641 | if (zone->all_unreclaimable) |
1644 | continue; | 1642 | continue; |
@@ -1651,10 +1649,10 @@ scan: | |||
1651 | * even in laptop mode | 1649 | * even in laptop mode |
1652 | */ | 1650 | */ |
1653 | if (total_scanned > SWAP_CLUSTER_MAX * 2 && | 1651 | if (total_scanned > SWAP_CLUSTER_MAX * 2 && |
1654 | total_scanned > total_reclaimed+total_reclaimed/2) | 1652 | total_scanned > nr_reclaimed + nr_reclaimed / 2) |
1655 | sc.may_writepage = 1; | 1653 | sc.may_writepage = 1; |
1656 | } | 1654 | } |
1657 | if (nr_pages && to_free > total_reclaimed) | 1655 | if (nr_pages && to_free > nr_reclaimed) |
1658 | continue; /* swsusp: need to do more work */ | 1656 | continue; /* swsusp: need to do more work */ |
1659 | if (all_zones_ok) | 1657 | if (all_zones_ok) |
1660 | break; /* kswapd: all done */ | 1658 | break; /* kswapd: all done */ |
@@ -1671,7 +1669,7 @@ scan: | |||
1671 | * matches the direct reclaim path behaviour in terms of impact | 1669 | * matches the direct reclaim path behaviour in terms of impact |
1672 | * on zone->*_priority. | 1670 | * on zone->*_priority. |
1673 | */ | 1671 | */ |
1674 | if ((total_reclaimed >= SWAP_CLUSTER_MAX) && (!nr_pages)) | 1672 | if ((nr_reclaimed >= SWAP_CLUSTER_MAX) && !nr_pages) |
1675 | break; | 1673 | break; |
1676 | } | 1674 | } |
1677 | out: | 1675 | out: |
@@ -1685,7 +1683,7 @@ out: | |||
1685 | goto loop_again; | 1683 | goto loop_again; |
1686 | } | 1684 | } |
1687 | 1685 | ||
1688 | return total_reclaimed; | 1686 | return nr_reclaimed; |
1689 | } | 1687 | } |
1690 | 1688 | ||
1691 | /* | 1689 | /* |
@@ -1891,6 +1889,7 @@ static int __zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order) | |||
1891 | struct task_struct *p = current; | 1889 | struct task_struct *p = current; |
1892 | struct reclaim_state reclaim_state; | 1890 | struct reclaim_state reclaim_state; |
1893 | int priority; | 1891 | int priority; |
1892 | unsigned long nr_reclaimed = 0; | ||
1894 | struct scan_control sc = { | 1893 | struct scan_control sc = { |
1895 | .may_writepage = !!(zone_reclaim_mode & RECLAIM_WRITE), | 1894 | .may_writepage = !!(zone_reclaim_mode & RECLAIM_WRITE), |
1896 | .may_swap = !!(zone_reclaim_mode & RECLAIM_SWAP), | 1895 | .may_swap = !!(zone_reclaim_mode & RECLAIM_SWAP), |
@@ -1917,11 +1916,11 @@ static int __zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order) | |||
1917 | */ | 1916 | */ |
1918 | priority = ZONE_RECLAIM_PRIORITY; | 1917 | priority = ZONE_RECLAIM_PRIORITY; |
1919 | do { | 1918 | do { |
1920 | shrink_zone(priority, zone, &sc); | 1919 | nr_reclaimed += shrink_zone(priority, zone, &sc); |
1921 | priority--; | 1920 | priority--; |
1922 | } while (priority >= 0 && sc.nr_reclaimed < nr_pages); | 1921 | } while (priority >= 0 && nr_reclaimed < nr_pages); |
1923 | 1922 | ||
1924 | if (sc.nr_reclaimed < nr_pages && (zone_reclaim_mode & RECLAIM_SLAB)) { | 1923 | if (nr_reclaimed < nr_pages && (zone_reclaim_mode & RECLAIM_SLAB)) { |
1925 | /* | 1924 | /* |
1926 | * shrink_slab does not currently allow us to determine | 1925 | * shrink_slab does not currently allow us to determine |
1927 | * how many pages were freed in the zone. So we just | 1926 | * how many pages were freed in the zone. So we just |
@@ -1936,10 +1935,10 @@ static int __zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order) | |||
1936 | p->reclaim_state = NULL; | 1935 | p->reclaim_state = NULL; |
1937 | current->flags &= ~(PF_MEMALLOC | PF_SWAPWRITE); | 1936 | current->flags &= ~(PF_MEMALLOC | PF_SWAPWRITE); |
1938 | 1937 | ||
1939 | if (sc.nr_reclaimed == 0) | 1938 | if (nr_reclaimed == 0) |
1940 | zone->last_unsuccessful_zone_reclaim = jiffies; | 1939 | zone->last_unsuccessful_zone_reclaim = jiffies; |
1941 | 1940 | ||
1942 | return sc.nr_reclaimed >= nr_pages; | 1941 | return nr_reclaimed >= nr_pages; |
1943 | } | 1942 | } |
1944 | 1943 | ||
1945 | int zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order) | 1944 | int zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order) |