aboutsummaryrefslogtreecommitdiffstats
path: root/mm/vmscan.c
diff options
context:
space:
mode:
authorHugh Dickins <hughd@google.com>2012-01-12 20:19:56 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2012-01-12 23:13:10 -0500
commit2bcf887963812c075f80a14e1fad8ec7e1c67acf (patch)
tree132f11eefe904653307a05b77d16f4c41866e486 /mm/vmscan.c
parent90b3feaec8ffb167abd8903bf111605c2f035aa8 (diff)
mm: take pagevecs off reclaim stack
Replace pagevecs in putback_lru_pages() and move_active_pages_to_lru() by lists of pages_to_free: then apply Konstantin Khlebnikov's free_hot_cold_page_list() to them instead of pagevec_release(). Which simplifies the flow (no need to drop and retake lock whenever pagevec fills up) and reduces stale addresses in stack backtraces (which often showed through the pagevecs); but more importantly, removes another 120 bytes from the deepest stacks in page reclaim. Although I've not recently seen an actual stack overflow here with a vanilla kernel, move_active_pages_to_lru() has often featured in deep backtraces. However, free_hot_cold_page_list() does not handle compound pages (nor need it: a Transparent HugePage would have been split by the time it reaches the call in shrink_page_list()), but it is possible for putback_lru_pages() or move_active_pages_to_lru() to be left holding the last reference on a THP, so must exclude the unlikely compound case before putting on pages_to_free. Remove pagevec_strip(), its work now done in move_active_pages_to_lru(). The pagevec in scan_mapping_unevictable_pages() remains in mm/vmscan.c, but that is never on the reclaim path, and cannot be replaced by a list. Signed-off-by: Hugh Dickins <hughd@google.com> Reviewed-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> Reviewed-by: Konstantin Khlebnikov <khlebnikov@openvz.org> Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Cc: Mel Gorman <mel@csn.ul.ie> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/vmscan.c')
-rw-r--r--mm/vmscan.c58
1 files changed, 40 insertions, 18 deletions
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 25f90383b391..7724fb8e7498 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1398,12 +1398,10 @@ putback_lru_pages(struct mem_cgroup_zone *mz, struct scan_control *sc,
1398 struct list_head *page_list) 1398 struct list_head *page_list)
1399{ 1399{
1400 struct page *page; 1400 struct page *page;
1401 struct pagevec pvec; 1401 LIST_HEAD(pages_to_free);
1402 struct zone *zone = mz->zone; 1402 struct zone *zone = mz->zone;
1403 struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(mz); 1403 struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(mz);
1404 1404
1405 pagevec_init(&pvec, 1);
1406
1407 /* 1405 /*
1408 * Put back any unfreeable pages. 1406 * Put back any unfreeable pages.
1409 */ 1407 */
@@ -1427,17 +1425,24 @@ putback_lru_pages(struct mem_cgroup_zone *mz, struct scan_control *sc,
1427 int numpages = hpage_nr_pages(page); 1425 int numpages = hpage_nr_pages(page);
1428 reclaim_stat->recent_rotated[file] += numpages; 1426 reclaim_stat->recent_rotated[file] += numpages;
1429 } 1427 }
1430 if (!pagevec_add(&pvec, page)) { 1428 if (put_page_testzero(page)) {
1431 spin_unlock_irq(&zone->lru_lock); 1429 __ClearPageLRU(page);
1432 __pagevec_release(&pvec); 1430 __ClearPageActive(page);
1433 spin_lock_irq(&zone->lru_lock); 1431 del_page_from_lru_list(zone, page, lru);
1432
1433 if (unlikely(PageCompound(page))) {
1434 spin_unlock_irq(&zone->lru_lock);
1435 (*get_compound_page_dtor(page))(page);
1436 spin_lock_irq(&zone->lru_lock);
1437 } else
1438 list_add(&page->lru, &pages_to_free);
1434 } 1439 }
1435 } 1440 }
1436 __mod_zone_page_state(zone, NR_ISOLATED_ANON, -nr_anon); 1441 __mod_zone_page_state(zone, NR_ISOLATED_ANON, -nr_anon);
1437 __mod_zone_page_state(zone, NR_ISOLATED_FILE, -nr_file); 1442 __mod_zone_page_state(zone, NR_ISOLATED_FILE, -nr_file);
1438 1443
1439 spin_unlock_irq(&zone->lru_lock); 1444 spin_unlock_irq(&zone->lru_lock);
1440 pagevec_release(&pvec); 1445 free_hot_cold_page_list(&pages_to_free, 1);
1441} 1446}
1442 1447
1443static noinline_for_stack void 1448static noinline_for_stack void
@@ -1647,13 +1652,23 @@ shrink_inactive_list(unsigned long nr_to_scan, struct mem_cgroup_zone *mz,
1647 1652
1648static void move_active_pages_to_lru(struct zone *zone, 1653static void move_active_pages_to_lru(struct zone *zone,
1649 struct list_head *list, 1654 struct list_head *list,
1655 struct list_head *pages_to_free,
1650 enum lru_list lru) 1656 enum lru_list lru)
1651{ 1657{
1652 unsigned long pgmoved = 0; 1658 unsigned long pgmoved = 0;
1653 struct pagevec pvec;
1654 struct page *page; 1659 struct page *page;
1655 1660
1656 pagevec_init(&pvec, 1); 1661 if (buffer_heads_over_limit) {
1662 spin_unlock_irq(&zone->lru_lock);
1663 list_for_each_entry(page, list, lru) {
1664 if (page_has_private(page) && trylock_page(page)) {
1665 if (page_has_private(page))
1666 try_to_release_page(page, 0);
1667 unlock_page(page);
1668 }
1669 }
1670 spin_lock_irq(&zone->lru_lock);
1671 }
1657 1672
1658 while (!list_empty(list)) { 1673 while (!list_empty(list)) {
1659 struct lruvec *lruvec; 1674 struct lruvec *lruvec;
@@ -1667,12 +1682,17 @@ static void move_active_pages_to_lru(struct zone *zone,
1667 list_move(&page->lru, &lruvec->lists[lru]); 1682 list_move(&page->lru, &lruvec->lists[lru]);
1668 pgmoved += hpage_nr_pages(page); 1683 pgmoved += hpage_nr_pages(page);
1669 1684
1670 if (!pagevec_add(&pvec, page) || list_empty(list)) { 1685 if (put_page_testzero(page)) {
1671 spin_unlock_irq(&zone->lru_lock); 1686 __ClearPageLRU(page);
1672 if (buffer_heads_over_limit) 1687 __ClearPageActive(page);
1673 pagevec_strip(&pvec); 1688 del_page_from_lru_list(zone, page, lru);
1674 __pagevec_release(&pvec); 1689
1675 spin_lock_irq(&zone->lru_lock); 1690 if (unlikely(PageCompound(page))) {
1691 spin_unlock_irq(&zone->lru_lock);
1692 (*get_compound_page_dtor(page))(page);
1693 spin_lock_irq(&zone->lru_lock);
1694 } else
1695 list_add(&page->lru, pages_to_free);
1676 } 1696 }
1677 } 1697 }
1678 __mod_zone_page_state(zone, NR_LRU_BASE + lru, pgmoved); 1698 __mod_zone_page_state(zone, NR_LRU_BASE + lru, pgmoved);
@@ -1766,12 +1786,14 @@ static void shrink_active_list(unsigned long nr_pages,
1766 */ 1786 */
1767 reclaim_stat->recent_rotated[file] += nr_rotated; 1787 reclaim_stat->recent_rotated[file] += nr_rotated;
1768 1788
1769 move_active_pages_to_lru(zone, &l_active, 1789 move_active_pages_to_lru(zone, &l_active, &l_hold,
1770 LRU_ACTIVE + file * LRU_FILE); 1790 LRU_ACTIVE + file * LRU_FILE);
1771 move_active_pages_to_lru(zone, &l_inactive, 1791 move_active_pages_to_lru(zone, &l_inactive, &l_hold,
1772 LRU_BASE + file * LRU_FILE); 1792 LRU_BASE + file * LRU_FILE);
1773 __mod_zone_page_state(zone, NR_ISOLATED_ANON + file, -nr_taken); 1793 __mod_zone_page_state(zone, NR_ISOLATED_ANON + file, -nr_taken);
1774 spin_unlock_irq(&zone->lru_lock); 1794 spin_unlock_irq(&zone->lru_lock);
1795
1796 free_hot_cold_page_list(&l_hold, 1);
1775} 1797}
1776 1798
1777#ifdef CONFIG_SWAP 1799#ifdef CONFIG_SWAP