aboutsummaryrefslogtreecommitdiffstats
path: root/mm/vmscan.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/vmscan.c')
-rw-r--r--mm/vmscan.c12
1 files changed, 9 insertions, 3 deletions
diff --git a/mm/vmscan.c b/mm/vmscan.c
index d2f65c856350..7068e838d22b 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -577,9 +577,10 @@ static int __remove_mapping(struct address_space *mapping, struct page *page,
577 577
578 if (PageSwapCache(page)) { 578 if (PageSwapCache(page)) {
579 swp_entry_t swap = { .val = page_private(page) }; 579 swp_entry_t swap = { .val = page_private(page) };
580 mem_cgroup_swapout(page, swap);
580 __delete_from_swap_cache(page); 581 __delete_from_swap_cache(page);
581 spin_unlock_irq(&mapping->tree_lock); 582 spin_unlock_irq(&mapping->tree_lock);
582 swapcache_free(swap, page); 583 swapcache_free(swap);
583 } else { 584 } else {
584 void (*freepage)(struct page *); 585 void (*freepage)(struct page *);
585 void *shadow = NULL; 586 void *shadow = NULL;
@@ -600,7 +601,6 @@ static int __remove_mapping(struct address_space *mapping, struct page *page,
600 shadow = workingset_eviction(mapping, page); 601 shadow = workingset_eviction(mapping, page);
601 __delete_from_page_cache(page, shadow); 602 __delete_from_page_cache(page, shadow);
602 spin_unlock_irq(&mapping->tree_lock); 603 spin_unlock_irq(&mapping->tree_lock);
603 mem_cgroup_uncharge_cache_page(page);
604 604
605 if (freepage != NULL) 605 if (freepage != NULL)
606 freepage(page); 606 freepage(page);
@@ -1103,6 +1103,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
1103 */ 1103 */
1104 __clear_page_locked(page); 1104 __clear_page_locked(page);
1105free_it: 1105free_it:
1106 mem_cgroup_uncharge(page);
1106 nr_reclaimed++; 1107 nr_reclaimed++;
1107 1108
1108 /* 1109 /*
@@ -1132,12 +1133,13 @@ keep:
1132 list_add(&page->lru, &ret_pages); 1133 list_add(&page->lru, &ret_pages);
1133 VM_BUG_ON_PAGE(PageLRU(page) || PageUnevictable(page), page); 1134 VM_BUG_ON_PAGE(PageLRU(page) || PageUnevictable(page), page);
1134 } 1135 }
1136 mem_cgroup_uncharge_end();
1135 1137
1136 free_hot_cold_page_list(&free_pages, true); 1138 free_hot_cold_page_list(&free_pages, true);
1137 1139
1138 list_splice(&ret_pages, page_list); 1140 list_splice(&ret_pages, page_list);
1139 count_vm_events(PGACTIVATE, pgactivate); 1141 count_vm_events(PGACTIVATE, pgactivate);
1140 mem_cgroup_uncharge_end(); 1142
1141 *ret_nr_dirty += nr_dirty; 1143 *ret_nr_dirty += nr_dirty;
1142 *ret_nr_congested += nr_congested; 1144 *ret_nr_congested += nr_congested;
1143 *ret_nr_unqueued_dirty += nr_unqueued_dirty; 1145 *ret_nr_unqueued_dirty += nr_unqueued_dirty;
@@ -1435,6 +1437,8 @@ putback_inactive_pages(struct lruvec *lruvec, struct list_head *page_list)
1435 __ClearPageActive(page); 1437 __ClearPageActive(page);
1436 del_page_from_lru_list(page, lruvec, lru); 1438 del_page_from_lru_list(page, lruvec, lru);
1437 1439
1440 mem_cgroup_uncharge(page);
1441
1438 if (unlikely(PageCompound(page))) { 1442 if (unlikely(PageCompound(page))) {
1439 spin_unlock_irq(&zone->lru_lock); 1443 spin_unlock_irq(&zone->lru_lock);
1440 (*get_compound_page_dtor(page))(page); 1444 (*get_compound_page_dtor(page))(page);
@@ -1656,6 +1660,8 @@ static void move_active_pages_to_lru(struct lruvec *lruvec,
1656 __ClearPageActive(page); 1660 __ClearPageActive(page);
1657 del_page_from_lru_list(page, lruvec, lru); 1661 del_page_from_lru_list(page, lruvec, lru);
1658 1662
1663 mem_cgroup_uncharge(page);
1664
1659 if (unlikely(PageCompound(page))) { 1665 if (unlikely(PageCompound(page))) {
1660 spin_unlock_irq(&zone->lru_lock); 1666 spin_unlock_irq(&zone->lru_lock);
1661 (*get_compound_page_dtor(page))(page); 1667 (*get_compound_page_dtor(page))(page);