aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorKAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>2011-01-20 17:44:23 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2011-01-20 20:02:05 -0500
commite401f1761c0b01966e36e41e2c385d455a7b44ee (patch)
tree3b1ac772f0708ba0b1c963663ce6d0d0f79884b6 /mm
parent20d9600cb407b0b55fef6ee814b60345c6f58264 (diff)
memcg: modify accounting function for supporting THP better
mem_cgroup_charge_statisics() was designed for charging a page but now, we have transparent hugepage. To fix problems (in following patch) it's required to change the function to get the number of pages as its arguments. The new function gets following as argument. - type of page rather than 'pc' - size of page which is accounted. Signed-off-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Cc: Daisuke Nishimura <nishimura@mxp.nes.nec.co.jp> Cc: Balbir Singh <balbir@linux.vnet.ibm.com> Cc: Johannes Weiner <hannes@cmpxchg.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/memcontrol.c25
1 files changed, 12 insertions, 13 deletions
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 8ab841031436..6d59a2bd520a 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -600,23 +600,22 @@ static void mem_cgroup_swap_statistics(struct mem_cgroup *mem,
600} 600}
601 601
602static void mem_cgroup_charge_statistics(struct mem_cgroup *mem, 602static void mem_cgroup_charge_statistics(struct mem_cgroup *mem,
603 struct page_cgroup *pc, 603 bool file, int nr_pages)
604 bool charge)
605{ 604{
606 int val = (charge) ? 1 : -1;
607
608 preempt_disable(); 605 preempt_disable();
609 606
610 if (PageCgroupCache(pc)) 607 if (file)
611 __this_cpu_add(mem->stat->count[MEM_CGROUP_STAT_CACHE], val); 608 __this_cpu_add(mem->stat->count[MEM_CGROUP_STAT_CACHE], nr_pages);
612 else 609 else
613 __this_cpu_add(mem->stat->count[MEM_CGROUP_STAT_RSS], val); 610 __this_cpu_add(mem->stat->count[MEM_CGROUP_STAT_RSS], nr_pages);
614 611
615 if (charge) 612 /* pagein of a big page is an event. So, ignore page size */
613 if (nr_pages > 0)
616 __this_cpu_inc(mem->stat->count[MEM_CGROUP_STAT_PGPGIN_COUNT]); 614 __this_cpu_inc(mem->stat->count[MEM_CGROUP_STAT_PGPGIN_COUNT]);
617 else 615 else
618 __this_cpu_inc(mem->stat->count[MEM_CGROUP_STAT_PGPGOUT_COUNT]); 616 __this_cpu_inc(mem->stat->count[MEM_CGROUP_STAT_PGPGOUT_COUNT]);
619 __this_cpu_inc(mem->stat->count[MEM_CGROUP_EVENTS]); 617
618 __this_cpu_add(mem->stat->count[MEM_CGROUP_EVENTS], nr_pages);
620 619
621 preempt_enable(); 620 preempt_enable();
622} 621}
@@ -2115,7 +2114,7 @@ static void ____mem_cgroup_commit_charge(struct mem_cgroup *mem,
2115 break; 2114 break;
2116 } 2115 }
2117 2116
2118 mem_cgroup_charge_statistics(mem, pc, true); 2117 mem_cgroup_charge_statistics(mem, PageCgroupCache(pc), 1);
2119} 2118}
2120 2119
2121static void __mem_cgroup_commit_charge(struct mem_cgroup *mem, 2120static void __mem_cgroup_commit_charge(struct mem_cgroup *mem,
@@ -2186,14 +2185,14 @@ static void __mem_cgroup_move_account(struct page_cgroup *pc,
2186 __this_cpu_inc(to->stat->count[MEM_CGROUP_STAT_FILE_MAPPED]); 2185 __this_cpu_inc(to->stat->count[MEM_CGROUP_STAT_FILE_MAPPED]);
2187 preempt_enable(); 2186 preempt_enable();
2188 } 2187 }
2189 mem_cgroup_charge_statistics(from, pc, false); 2188 mem_cgroup_charge_statistics(from, PageCgroupCache(pc), -1);
2190 if (uncharge) 2189 if (uncharge)
2191 /* This is not "cancel", but cancel_charge does all we need. */ 2190 /* This is not "cancel", but cancel_charge does all we need. */
2192 mem_cgroup_cancel_charge(from, PAGE_SIZE); 2191 mem_cgroup_cancel_charge(from, PAGE_SIZE);
2193 2192
2194 /* caller should have done css_get */ 2193 /* caller should have done css_get */
2195 pc->mem_cgroup = to; 2194 pc->mem_cgroup = to;
2196 mem_cgroup_charge_statistics(to, pc, true); 2195 mem_cgroup_charge_statistics(to, PageCgroupCache(pc), 1);
2197 /* 2196 /*
2198 * We charges against "to" which may not have any tasks. Then, "to" 2197 * We charges against "to" which may not have any tasks. Then, "to"
2199 * can be under rmdir(). But in current implementation, caller of 2198 * can be under rmdir(). But in current implementation, caller of
@@ -2597,7 +2596,7 @@ __mem_cgroup_uncharge_common(struct page *page, enum charge_type ctype)
2597 } 2596 }
2598 2597
2599 for (i = 0; i < count; i++) 2598 for (i = 0; i < count; i++)
2600 mem_cgroup_charge_statistics(mem, pc + i, false); 2599 mem_cgroup_charge_statistics(mem, PageCgroupCache(pc), -1);
2601 2600
2602 ClearPageCgroupUsed(pc); 2601 ClearPageCgroupUsed(pc);
2603 /* 2602 /*