aboutsummaryrefslogtreecommitdiffstats
path: root/mm/vmscan.c
diff options
context:
space:
mode:
authorJohannes Weiner <hannes@cmpxchg.org>2014-08-08 17:19:24 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2014-08-08 18:57:18 -0400
commit747db954cab64c6b7a95b121b517165f34751898 (patch)
tree35149b31a5f3a0bb85df2e40c79c46ed2df4f4ed /mm/vmscan.c
parent0a31bc97c80c3fa87b32c091d9a930ac19cd0c40 (diff)
mm: memcontrol: use page lists for uncharge batching
Pages are now uncharged at release time, and all sources of batched uncharges operate on lists of pages. Directly use those lists, and get rid of the per-task batching state. This also batches statistics accounting, in addition to the res counter charges, to reduce IRQ-disabling and re-enabling. Signed-off-by: Johannes Weiner <hannes@cmpxchg.org> Acked-by: Michal Hocko <mhocko@suse.cz> Cc: Hugh Dickins <hughd@google.com> Cc: Tejun Heo <tj@kernel.org> Cc: Vladimir Davydov <vdavydov@parallels.com> Cc: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com> Cc: Vladimir Davydov <vdavydov@parallels.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/vmscan.c')
-rw-r--r--mm/vmscan.c12
1 files changed, 5 insertions, 7 deletions
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 7068e838d22b..2836b5373b2e 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -822,7 +822,6 @@ static unsigned long shrink_page_list(struct list_head *page_list,
822 822
823 cond_resched(); 823 cond_resched();
824 824
825 mem_cgroup_uncharge_start();
826 while (!list_empty(page_list)) { 825 while (!list_empty(page_list)) {
827 struct address_space *mapping; 826 struct address_space *mapping;
828 struct page *page; 827 struct page *page;
@@ -1103,7 +1102,6 @@ static unsigned long shrink_page_list(struct list_head *page_list,
1103 */ 1102 */
1104 __clear_page_locked(page); 1103 __clear_page_locked(page);
1105free_it: 1104free_it:
1106 mem_cgroup_uncharge(page);
1107 nr_reclaimed++; 1105 nr_reclaimed++;
1108 1106
1109 /* 1107 /*
@@ -1133,8 +1131,8 @@ keep:
1133 list_add(&page->lru, &ret_pages); 1131 list_add(&page->lru, &ret_pages);
1134 VM_BUG_ON_PAGE(PageLRU(page) || PageUnevictable(page), page); 1132 VM_BUG_ON_PAGE(PageLRU(page) || PageUnevictable(page), page);
1135 } 1133 }
1136 mem_cgroup_uncharge_end();
1137 1134
1135 mem_cgroup_uncharge_list(&free_pages);
1138 free_hot_cold_page_list(&free_pages, true); 1136 free_hot_cold_page_list(&free_pages, true);
1139 1137
1140 list_splice(&ret_pages, page_list); 1138 list_splice(&ret_pages, page_list);
@@ -1437,10 +1435,9 @@ putback_inactive_pages(struct lruvec *lruvec, struct list_head *page_list)
1437 __ClearPageActive(page); 1435 __ClearPageActive(page);
1438 del_page_from_lru_list(page, lruvec, lru); 1436 del_page_from_lru_list(page, lruvec, lru);
1439 1437
1440 mem_cgroup_uncharge(page);
1441
1442 if (unlikely(PageCompound(page))) { 1438 if (unlikely(PageCompound(page))) {
1443 spin_unlock_irq(&zone->lru_lock); 1439 spin_unlock_irq(&zone->lru_lock);
1440 mem_cgroup_uncharge(page);
1444 (*get_compound_page_dtor(page))(page); 1441 (*get_compound_page_dtor(page))(page);
1445 spin_lock_irq(&zone->lru_lock); 1442 spin_lock_irq(&zone->lru_lock);
1446 } else 1443 } else
@@ -1548,6 +1545,7 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec,
1548 1545
1549 spin_unlock_irq(&zone->lru_lock); 1546 spin_unlock_irq(&zone->lru_lock);
1550 1547
1548 mem_cgroup_uncharge_list(&page_list);
1551 free_hot_cold_page_list(&page_list, true); 1549 free_hot_cold_page_list(&page_list, true);
1552 1550
1553 /* 1551 /*
@@ -1660,10 +1658,9 @@ static void move_active_pages_to_lru(struct lruvec *lruvec,
1660 __ClearPageActive(page); 1658 __ClearPageActive(page);
1661 del_page_from_lru_list(page, lruvec, lru); 1659 del_page_from_lru_list(page, lruvec, lru);
1662 1660
1663 mem_cgroup_uncharge(page);
1664
1665 if (unlikely(PageCompound(page))) { 1661 if (unlikely(PageCompound(page))) {
1666 spin_unlock_irq(&zone->lru_lock); 1662 spin_unlock_irq(&zone->lru_lock);
1663 mem_cgroup_uncharge(page);
1667 (*get_compound_page_dtor(page))(page); 1664 (*get_compound_page_dtor(page))(page);
1668 spin_lock_irq(&zone->lru_lock); 1665 spin_lock_irq(&zone->lru_lock);
1669 } else 1666 } else
@@ -1771,6 +1768,7 @@ static void shrink_active_list(unsigned long nr_to_scan,
1771 __mod_zone_page_state(zone, NR_ISOLATED_ANON + file, -nr_taken); 1768 __mod_zone_page_state(zone, NR_ISOLATED_ANON + file, -nr_taken);
1772 spin_unlock_irq(&zone->lru_lock); 1769 spin_unlock_irq(&zone->lru_lock);
1773 1770
1771 mem_cgroup_uncharge_list(&l_hold);
1774 free_hot_cold_page_list(&l_hold, true); 1772 free_hot_cold_page_list(&l_hold, true);
1775} 1773}
1776 1774