aboutsummaryrefslogtreecommitdiffstats
path: root/mm/huge_memory.c
diff options
context:
space:
mode:
authorHugh Dickins <hughd@google.com>2012-05-29 18:07:09 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2012-05-29 19:22:28 -0400
commitfa9add641b1b1c564db916accac1db346e7a2759 (patch)
tree875e74ec4d7fed0018fdbc134ad899949c5e3384 /mm/huge_memory.c
parent75b00af77ed5b5a3d55549f9e0c33f3969b9330c (diff)
mm/memcg: apply add/del_page to lruvec
Take lruvec further: pass it instead of zone to add_page_to_lru_list() and del_page_from_lru_list(); and pagevec_lru_move_fn() pass lruvec down to its target functions. This cleanup eliminates a swathe of cruft in memcontrol.c, including mem_cgroup_lru_add_list(), mem_cgroup_lru_del_list() and mem_cgroup_lru_move_lists() - which never actually touched the lists. In their place, mem_cgroup_page_lruvec() to decide the lruvec, previously a side-effect of add, and mem_cgroup_update_lru_size() to maintain the lru_size stats. Whilst these are simplifications in their own right, the goal is to bring the evaluation of lruvec next to the spin_locking of the lrus, in preparation for a future patch. Signed-off-by: Hugh Dickins <hughd@google.com> Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> Acked-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Acked-by: Michal Hocko <mhocko@suse.cz> Acked-by: Konstantin Khlebnikov <khlebnikov@openvz.org> Cc: Johannes Weiner <hannes@cmpxchg.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/huge_memory.c')
-rw-r--r--mm/huge_memory.c8
1 files changed, 5 insertions, 3 deletions
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index d0def42c121b..57c4b9309015 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -1231,10 +1231,13 @@ static void __split_huge_page_refcount(struct page *page)
1231{ 1231{
1232 int i; 1232 int i;
1233 struct zone *zone = page_zone(page); 1233 struct zone *zone = page_zone(page);
1234 struct lruvec *lruvec;
1234 int tail_count = 0; 1235 int tail_count = 0;
1235 1236
1236 /* prevent PageLRU to go away from under us, and freeze lru stats */ 1237 /* prevent PageLRU to go away from under us, and freeze lru stats */
1237 spin_lock_irq(&zone->lru_lock); 1238 spin_lock_irq(&zone->lru_lock);
1239 lruvec = mem_cgroup_page_lruvec(page, zone);
1240
1238 compound_lock(page); 1241 compound_lock(page);
1239 /* complete memcg works before add pages to LRU */ 1242 /* complete memcg works before add pages to LRU */
1240 mem_cgroup_split_huge_fixup(page); 1243 mem_cgroup_split_huge_fixup(page);
@@ -1309,13 +1312,12 @@ static void __split_huge_page_refcount(struct page *page)
1309 BUG_ON(!PageDirty(page_tail)); 1312 BUG_ON(!PageDirty(page_tail));
1310 BUG_ON(!PageSwapBacked(page_tail)); 1313 BUG_ON(!PageSwapBacked(page_tail));
1311 1314
1312 1315 lru_add_page_tail(page, page_tail, lruvec);
1313 lru_add_page_tail(zone, page, page_tail);
1314 } 1316 }
1315 atomic_sub(tail_count, &page->_count); 1317 atomic_sub(tail_count, &page->_count);
1316 BUG_ON(atomic_read(&page->_count) <= 0); 1318 BUG_ON(atomic_read(&page->_count) <= 0);
1317 1319
1318 __dec_zone_page_state(page, NR_ANON_TRANSPARENT_HUGEPAGES); 1320 __mod_zone_page_state(zone, NR_ANON_TRANSPARENT_HUGEPAGES, -1);
1319 __mod_zone_page_state(zone, NR_ANON_PAGES, HPAGE_PMD_NR); 1321 __mod_zone_page_state(zone, NR_ANON_PAGES, HPAGE_PMD_NR);
1320 1322
1321 ClearPageCompound(page); 1323 ClearPageCompound(page);