aboutsummaryrefslogtreecommitdiffstats
path: root/mm/vmscan.c
diff options
context:
space:
mode:
authorHugh Dickins <hughd@google.com>2012-01-12 20:20:07 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2012-01-12 23:13:10 -0500
commit3f79768f239746d19accd88da96263ef35d6a219 (patch)
tree08c9dd76fb61386de4c26c393263c1cfc1d0b874 /mm/vmscan.c
parentf626012db08b9ea71363327d81fe60c2782eea9f (diff)
mm: rearrange putback_inactive_pages
There is sometimes confusion between the global putback_lru_pages() in migrate.c and the static putback_lru_pages() in vmscan.c: rename the latter putback_inactive_pages(): it helps shrink_inactive_list() rather as move_active_pages_to_lru() helps shrink_active_list(). Remove unused scan_control arg from putback_inactive_pages() and from update_isolated_counts(). Move clear_active_flags() inside update_isolated_counts(). Move NR_ISOLATED accounting up into shrink_inactive_list() itself, so the balance is clearer. Do the spin_lock_irq() before calling putback_inactive_pages() and spin_unlock_irq() after return from it, so that it better matches update_isolated_counts() and move_active_pages_to_lru(). Signed-off-by: Hugh Dickins <hughd@google.com> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: KOSAKI Motohiro <kosaki.motohiro@gmail.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/vmscan.c')
-rw-r--r--mm/vmscan.c96
1 files changed, 44 insertions, 52 deletions
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 9aab5dc51718..2880396f7953 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1284,32 +1284,6 @@ static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
1284 return nr_taken; 1284 return nr_taken;
1285} 1285}
1286 1286
1287/*
1288 * clear_active_flags() is a helper for shrink_active_list(), clearing
1289 * any active bits from the pages in the list.
1290 */
1291static unsigned long clear_active_flags(struct list_head *page_list,
1292 unsigned int *count)
1293{
1294 int nr_active = 0;
1295 int lru;
1296 struct page *page;
1297
1298 list_for_each_entry(page, page_list, lru) {
1299 int numpages = hpage_nr_pages(page);
1300 lru = page_lru_base_type(page);
1301 if (PageActive(page)) {
1302 lru += LRU_ACTIVE;
1303 ClearPageActive(page);
1304 nr_active += numpages;
1305 }
1306 if (count)
1307 count[lru] += numpages;
1308 }
1309
1310 return nr_active;
1311}
1312
1313/** 1287/**
1314 * isolate_lru_page - tries to isolate a page from its LRU list 1288 * isolate_lru_page - tries to isolate a page from its LRU list
1315 * @page: page to isolate from its LRU list 1289 * @page: page to isolate from its LRU list
@@ -1383,26 +1357,21 @@ static int too_many_isolated(struct zone *zone, int file,
1383 return isolated > inactive; 1357 return isolated > inactive;
1384} 1358}
1385 1359
1386/*
1387 * TODO: Try merging with migrations version of putback_lru_pages
1388 */
1389static noinline_for_stack void 1360static noinline_for_stack void
1390putback_lru_pages(struct mem_cgroup_zone *mz, struct scan_control *sc, 1361putback_inactive_pages(struct mem_cgroup_zone *mz,
1391 unsigned long nr_anon, unsigned long nr_file, 1362 struct list_head *page_list)
1392 struct list_head *page_list)
1393{ 1363{
1394 struct page *page;
1395 LIST_HEAD(pages_to_free);
1396 struct zone *zone = mz->zone;
1397 struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(mz); 1364 struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(mz);
1365 struct zone *zone = mz->zone;
1366 LIST_HEAD(pages_to_free);
1398 1367
1399 /* 1368 /*
1400 * Put back any unfreeable pages. 1369 * Put back any unfreeable pages.
1401 */ 1370 */
1402 spin_lock(&zone->lru_lock);
1403 while (!list_empty(page_list)) { 1371 while (!list_empty(page_list)) {
1372 struct page *page = lru_to_page(page_list);
1404 int lru; 1373 int lru;
1405 page = lru_to_page(page_list); 1374
1406 VM_BUG_ON(PageLRU(page)); 1375 VM_BUG_ON(PageLRU(page));
1407 list_del(&page->lru); 1376 list_del(&page->lru);
1408 if (unlikely(!page_evictable(page, NULL))) { 1377 if (unlikely(!page_evictable(page, NULL))) {
@@ -1432,26 +1401,40 @@ putback_lru_pages(struct mem_cgroup_zone *mz, struct scan_control *sc,
1432 list_add(&page->lru, &pages_to_free); 1401 list_add(&page->lru, &pages_to_free);
1433 } 1402 }
1434 } 1403 }
1435 __mod_zone_page_state(zone, NR_ISOLATED_ANON, -nr_anon);
1436 __mod_zone_page_state(zone, NR_ISOLATED_FILE, -nr_file);
1437 1404
1438 spin_unlock_irq(&zone->lru_lock); 1405 /*
1439 free_hot_cold_page_list(&pages_to_free, 1); 1406 * To save our caller's stack, now use input list for pages to free.
1407 */
1408 list_splice(&pages_to_free, page_list);
1440} 1409}
1441 1410
1442static noinline_for_stack void 1411static noinline_for_stack void
1443update_isolated_counts(struct mem_cgroup_zone *mz, 1412update_isolated_counts(struct mem_cgroup_zone *mz,
1444 struct scan_control *sc, 1413 struct list_head *page_list,
1445 unsigned long *nr_anon, 1414 unsigned long *nr_anon,
1446 unsigned long *nr_file, 1415 unsigned long *nr_file)
1447 struct list_head *isolated_list)
1448{ 1416{
1449 unsigned long nr_active; 1417 struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(mz);
1450 struct zone *zone = mz->zone; 1418 struct zone *zone = mz->zone;
1451 unsigned int count[NR_LRU_LISTS] = { 0, }; 1419 unsigned int count[NR_LRU_LISTS] = { 0, };
1452 struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(mz); 1420 unsigned long nr_active = 0;
1421 struct page *page;
1422 int lru;
1423
1424 /*
1425 * Count pages and clear active flags
1426 */
1427 list_for_each_entry(page, page_list, lru) {
1428 int numpages = hpage_nr_pages(page);
1429 lru = page_lru_base_type(page);
1430 if (PageActive(page)) {
1431 lru += LRU_ACTIVE;
1432 ClearPageActive(page);
1433 nr_active += numpages;
1434 }
1435 count[lru] += numpages;
1436 }
1453 1437
1454 nr_active = clear_active_flags(isolated_list, count);
1455 __count_vm_events(PGDEACTIVATE, nr_active); 1438 __count_vm_events(PGDEACTIVATE, nr_active);
1456 1439
1457 __mod_zone_page_state(zone, NR_ACTIVE_FILE, 1440 __mod_zone_page_state(zone, NR_ACTIVE_FILE,
@@ -1465,8 +1448,6 @@ update_isolated_counts(struct mem_cgroup_zone *mz,
1465 1448
1466 *nr_anon = count[LRU_ACTIVE_ANON] + count[LRU_INACTIVE_ANON]; 1449 *nr_anon = count[LRU_ACTIVE_ANON] + count[LRU_INACTIVE_ANON];
1467 *nr_file = count[LRU_ACTIVE_FILE] + count[LRU_INACTIVE_FILE]; 1450 *nr_file = count[LRU_ACTIVE_FILE] + count[LRU_INACTIVE_FILE];
1468 __mod_zone_page_state(zone, NR_ISOLATED_ANON, *nr_anon);
1469 __mod_zone_page_state(zone, NR_ISOLATED_FILE, *nr_file);
1470 1451
1471 reclaim_stat->recent_scanned[0] += *nr_anon; 1452 reclaim_stat->recent_scanned[0] += *nr_anon;
1472 reclaim_stat->recent_scanned[1] += *nr_file; 1453 reclaim_stat->recent_scanned[1] += *nr_file;
@@ -1571,7 +1552,10 @@ shrink_inactive_list(unsigned long nr_to_scan, struct mem_cgroup_zone *mz,
1571 return 0; 1552 return 0;
1572 } 1553 }
1573 1554
1574 update_isolated_counts(mz, sc, &nr_anon, &nr_file, &page_list); 1555 update_isolated_counts(mz, &page_list, &nr_anon, &nr_file);
1556
1557 __mod_zone_page_state(zone, NR_ISOLATED_ANON, nr_anon);
1558 __mod_zone_page_state(zone, NR_ISOLATED_FILE, nr_file);
1575 1559
1576 spin_unlock_irq(&zone->lru_lock); 1560 spin_unlock_irq(&zone->lru_lock);
1577 1561
@@ -1585,12 +1569,20 @@ shrink_inactive_list(unsigned long nr_to_scan, struct mem_cgroup_zone *mz,
1585 priority, &nr_dirty, &nr_writeback); 1569 priority, &nr_dirty, &nr_writeback);
1586 } 1570 }
1587 1571
1588 local_irq_disable(); 1572 spin_lock_irq(&zone->lru_lock);
1573
1589 if (current_is_kswapd()) 1574 if (current_is_kswapd())
1590 __count_vm_events(KSWAPD_STEAL, nr_reclaimed); 1575 __count_vm_events(KSWAPD_STEAL, nr_reclaimed);
1591 __count_zone_vm_events(PGSTEAL, zone, nr_reclaimed); 1576 __count_zone_vm_events(PGSTEAL, zone, nr_reclaimed);
1592 1577
1593 putback_lru_pages(mz, sc, nr_anon, nr_file, &page_list); 1578 putback_inactive_pages(mz, &page_list);
1579
1580 __mod_zone_page_state(zone, NR_ISOLATED_ANON, -nr_anon);
1581 __mod_zone_page_state(zone, NR_ISOLATED_FILE, -nr_file);
1582
1583 spin_unlock_irq(&zone->lru_lock);
1584
1585 free_hot_cold_page_list(&page_list, 1);
1594 1586
1595 /* 1587 /*
1596 * If reclaim is isolating dirty pages under writeback, it implies 1588 * If reclaim is isolating dirty pages under writeback, it implies