aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorAndrew Morton <akpm@osdl.org>2006-03-22 03:08:21 -0500
committerLinus Torvalds <torvalds@g5.osdl.org>2006-03-22 10:54:00 -0500
commit1742f19fa920cdd6905f0db5898524dde22ab2a4 (patch)
tree90a490795f29f96e89503ead5affb4c8f99e5e75 /mm
parent05ff51376f01fd8837946a4f8144a84f6cc71c19 (diff)
[PATCH] vmscan: rename functions
We have: try_to_free_pages ->shrink_caches(struct zone **zones, ..) ->shrink_zone(struct zone *, ...) ->shrink_cache(struct zone *, ...) ->shrink_list(struct list_head *, ...) ->refill_inactive_list((struct zone *, ...) which is fairly irrational. Rename things so that we have try_to_free_pages ->shrink_zones(struct zone **zones, ..) ->shrink_zone(struct zone *, ...) ->shrink_inactive_list(struct zone *, ...) ->shrink_page_list(struct list_head *, ...) ->shrink_active_list(struct zone *, ...) Cc: Nick Piggin <nickpiggin@yahoo.com.au> Cc: Christoph Lameter <christoph@lameter.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/vmscan.c32
1 files changed, 17 insertions, 15 deletions
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 8f6ad13d34f5..2d5d4864de88 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -298,7 +298,8 @@ static void handle_write_error(struct address_space *mapping,
298} 298}
299 299
300/* 300/*
301 * pageout is called by shrink_list() for each dirty page. Calls ->writepage(). 301 * pageout is called by shrink_page_list() for each dirty page.
302 * Calls ->writepage().
302 */ 303 */
303static pageout_t pageout(struct page *page, struct address_space *mapping) 304static pageout_t pageout(struct page *page, struct address_space *mapping)
304{ 305{
@@ -406,10 +407,10 @@ cannot_free:
406} 407}
407 408
408/* 409/*
409 * shrink_list return the number of reclaimed pages 410 * shrink_page_list() returns the number of reclaimed pages
410 */ 411 */
411static unsigned long shrink_list(struct list_head *page_list, 412static unsigned long shrink_page_list(struct list_head *page_list,
412 struct scan_control *sc) 413 struct scan_control *sc)
413{ 414{
414 LIST_HEAD(ret_pages); 415 LIST_HEAD(ret_pages);
415 struct pagevec freed_pvec; 416 struct pagevec freed_pvec;
@@ -1103,10 +1104,11 @@ static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
1103} 1104}
1104 1105
1105/* 1106/*
1106 * shrink_cache() return the number of reclaimed pages 1107 * shrink_inactive_list() is a helper for shrink_zone(). It returns the number
1108 * of reclaimed pages
1107 */ 1109 */
1108static unsigned long shrink_cache(unsigned long max_scan, struct zone *zone, 1110static unsigned long shrink_inactive_list(unsigned long max_scan,
1109 struct scan_control *sc) 1111 struct zone *zone, struct scan_control *sc)
1110{ 1112{
1111 LIST_HEAD(page_list); 1113 LIST_HEAD(page_list);
1112 struct pagevec pvec; 1114 struct pagevec pvec;
@@ -1134,7 +1136,7 @@ static unsigned long shrink_cache(unsigned long max_scan, struct zone *zone,
1134 goto done; 1136 goto done;
1135 1137
1136 nr_scanned += nr_scan; 1138 nr_scanned += nr_scan;
1137 nr_freed = shrink_list(&page_list, sc); 1139 nr_freed = shrink_page_list(&page_list, sc);
1138 nr_reclaimed += nr_freed; 1140 nr_reclaimed += nr_freed;
1139 local_irq_disable(); 1141 local_irq_disable();
1140 if (current_is_kswapd()) { 1142 if (current_is_kswapd()) {
@@ -1187,9 +1189,8 @@ done:
1187 * The downside is that we have to touch page->_count against each page. 1189 * The downside is that we have to touch page->_count against each page.
1188 * But we had to alter page->flags anyway. 1190 * But we had to alter page->flags anyway.
1189 */ 1191 */
1190static void 1192static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
1191refill_inactive_zone(unsigned long nr_pages, struct zone *zone, 1193 struct scan_control *sc)
1192 struct scan_control *sc)
1193{ 1194{
1194 unsigned long pgmoved; 1195 unsigned long pgmoved;
1195 int pgdeactivate = 0; 1196 int pgdeactivate = 0;
@@ -1360,14 +1361,15 @@ static unsigned long shrink_zone(int priority, struct zone *zone,
1360 nr_to_scan = min(nr_active, 1361 nr_to_scan = min(nr_active,
1361 (unsigned long)sc->swap_cluster_max); 1362 (unsigned long)sc->swap_cluster_max);
1362 nr_active -= nr_to_scan; 1363 nr_active -= nr_to_scan;
1363 refill_inactive_zone(nr_to_scan, zone, sc); 1364 shrink_active_list(nr_to_scan, zone, sc);
1364 } 1365 }
1365 1366
1366 if (nr_inactive) { 1367 if (nr_inactive) {
1367 nr_to_scan = min(nr_inactive, 1368 nr_to_scan = min(nr_inactive,
1368 (unsigned long)sc->swap_cluster_max); 1369 (unsigned long)sc->swap_cluster_max);
1369 nr_inactive -= nr_to_scan; 1370 nr_inactive -= nr_to_scan;
1370 nr_reclaimed += shrink_cache(nr_to_scan, zone, sc); 1371 nr_reclaimed += shrink_inactive_list(nr_to_scan, zone,
1372 sc);
1371 } 1373 }
1372 } 1374 }
1373 1375
@@ -1393,7 +1395,7 @@ static unsigned long shrink_zone(int priority, struct zone *zone,
1393 * If a zone is deemed to be full of pinned pages then just give it a light 1395 * If a zone is deemed to be full of pinned pages then just give it a light
1394 * scan then give up on it. 1396 * scan then give up on it.
1395 */ 1397 */
1396static unsigned long shrink_caches(int priority, struct zone **zones, 1398static unsigned long shrink_zones(int priority, struct zone **zones,
1397 struct scan_control *sc) 1399 struct scan_control *sc)
1398{ 1400{
1399 unsigned long nr_reclaimed = 0; 1401 unsigned long nr_reclaimed = 0;
@@ -1466,7 +1468,7 @@ unsigned long try_to_free_pages(struct zone **zones, gfp_t gfp_mask)
1466 sc.nr_scanned = 0; 1468 sc.nr_scanned = 0;
1467 if (!priority) 1469 if (!priority)
1468 disable_swap_token(); 1470 disable_swap_token();
1469 nr_reclaimed += shrink_caches(priority, zones, &sc); 1471 nr_reclaimed += shrink_zones(priority, zones, &sc);
1470 shrink_slab(sc.nr_scanned, gfp_mask, lru_pages); 1472 shrink_slab(sc.nr_scanned, gfp_mask, lru_pages);
1471 if (reclaim_state) { 1473 if (reclaim_state) {
1472 nr_reclaimed += reclaim_state->reclaimed_slab; 1474 nr_reclaimed += reclaim_state->reclaimed_slab;