aboutsummaryrefslogtreecommitdiffstats
path: root/mm/huge_memory.c
diff options
context:
space:
mode:
authorKAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>2012-01-12 20:18:20 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2012-01-12 23:13:05 -0500
commite94c8a9cbce1aee4af9e1285802785481b7f93c5 (patch)
tree14d496e62c2e8d59a8954ff10368d59d8dfaea03 /mm/huge_memory.c
parent6b208e3f6e35aa76d254c395bdcd984b17c6b626 (diff)
memcg: make mem_cgroup_split_huge_fixup() more efficient
In split_huge_page(), mem_cgroup_split_huge_fixup() is called to handle page_cgroup modifcations. It takes move_lock_page_cgroup() and modifies page_cgroup and LRU accounting jobs and called HPAGE_PMD_SIZE - 1 times. But thinking again, - compound_lock() is held at move_accout...then, it's not necessary to take move_lock_page_cgroup(). - LRU is locked and all tail pages will go into the same LRU as head is now on. - page_cgroup is contiguous in huge page range. This patch fixes mem_cgroup_split_huge_fixup() as to be called once per hugepage and reduce costs for spliting. [akpm@linux-foundation.org: fix typo, per Michal] Signed-off-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Andrea Arcangeli <aarcange@redhat.com> Reviewed-by: Michal Hocko <mhocko@suse.cz> Cc: Balbir Singh <bsingharora@gmail.com> Cc: David Rientjes <rientjes@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/huge_memory.c')
-rw-r--r--mm/huge_memory.c3
1 files changed, 2 insertions, 1 deletions
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 36b3d988b4ef..db522e160cca 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -1207,6 +1207,8 @@ static void __split_huge_page_refcount(struct page *page)
1207 /* prevent PageLRU to go away from under us, and freeze lru stats */ 1207 /* prevent PageLRU to go away from under us, and freeze lru stats */
1208 spin_lock_irq(&zone->lru_lock); 1208 spin_lock_irq(&zone->lru_lock);
1209 compound_lock(page); 1209 compound_lock(page);
1210 /* complete memcg works before add pages to LRU */
1211 mem_cgroup_split_huge_fixup(page);
1210 1212
1211 for (i = 1; i < HPAGE_PMD_NR; i++) { 1213 for (i = 1; i < HPAGE_PMD_NR; i++) {
1212 struct page *page_tail = page + i; 1214 struct page *page_tail = page + i;
@@ -1278,7 +1280,6 @@ static void __split_huge_page_refcount(struct page *page)
1278 BUG_ON(!PageDirty(page_tail)); 1280 BUG_ON(!PageDirty(page_tail));
1279 BUG_ON(!PageSwapBacked(page_tail)); 1281 BUG_ON(!PageSwapBacked(page_tail));
1280 1282
1281 mem_cgroup_split_huge_fixup(page, page_tail);
1282 1283
1283 lru_add_page_tail(zone, page, page_tail); 1284 lru_add_page_tail(zone, page, page_tail);
1284 } 1285 }