aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorKAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>2010-03-10 18:22:31 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2010-03-12 18:52:37 -0500
commitd2265e6fa3f220ea5fd37522d13390e9675adcf7 (patch)
tree30e0264bcd379e21507b22243891880b7418f68f /mm
parent430e48631e72aeab74d844c57b441f98a2e36eee (diff)
memcg : share event counter rather than duplicate
Memcg has 2 eventcountes which counts "the same" event. Just usages are different from each other. This patch tries to reduce event counter. Now logic uses "only increment, no reset" counter and masks for each checks. Softlimit chesk was done per 1000 evetns. So, the similar check can be done by !(new_counter & 0x3ff). Threshold check was done per 100 events. So, the similar check can be done by (!new_counter & 0x7f) ALL event checks are done right after EVENT percpu counter is updated. Signed-off-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Cc: Kirill A. Shutemov <kirill@shutemov.name> Cc: Balbir Singh <balbir@linux.vnet.ibm.com> Cc: Daisuke Nishimura <nishimura@mxp.nes.nec.co.jp> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/memcontrol.c86
1 files changed, 41 insertions, 45 deletions
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 006fe142d4ba..f9ae4b4c36eb 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -63,8 +63,15 @@ static int really_do_swap_account __initdata = 1; /* for remember boot option*/
63#define do_swap_account (0) 63#define do_swap_account (0)
64#endif 64#endif
65 65
66#define SOFTLIMIT_EVENTS_THRESH (1000) 66/*
67#define THRESHOLDS_EVENTS_THRESH (100) 67 * Per memcg event counter is incremented at every pagein/pageout. This counter
68 * is used for trigger some periodic events. This is straightforward and better
69 * than using jiffies etc. to handle periodic memcg event.
70 *
71 * These values will be used as !((event) & ((1 <<(thresh)) - 1))
72 */
73#define THRESHOLDS_EVENTS_THRESH (7) /* once in 128 */
74#define SOFTLIMIT_EVENTS_THRESH (10) /* once in 1024 */
68 75
69/* 76/*
70 * Statistics for memory cgroup. 77 * Statistics for memory cgroup.
@@ -79,10 +86,7 @@ enum mem_cgroup_stat_index {
79 MEM_CGROUP_STAT_PGPGIN_COUNT, /* # of pages paged in */ 86 MEM_CGROUP_STAT_PGPGIN_COUNT, /* # of pages paged in */
80 MEM_CGROUP_STAT_PGPGOUT_COUNT, /* # of pages paged out */ 87 MEM_CGROUP_STAT_PGPGOUT_COUNT, /* # of pages paged out */
81 MEM_CGROUP_STAT_SWAPOUT, /* # of pages, swapped out */ 88 MEM_CGROUP_STAT_SWAPOUT, /* # of pages, swapped out */
82 MEM_CGROUP_STAT_SOFTLIMIT, /* decrements on each page in/out. 89 MEM_CGROUP_EVENTS, /* incremented at every pagein/pageout */
83 used by soft limit implementation */
84 MEM_CGROUP_STAT_THRESHOLDS, /* decrements on each page in/out.
85 used by threshold implementation */
86 90
87 MEM_CGROUP_STAT_NSTATS, 91 MEM_CGROUP_STAT_NSTATS,
88}; 92};
@@ -154,7 +158,6 @@ struct mem_cgroup_threshold_ary {
154 struct mem_cgroup_threshold entries[0]; 158 struct mem_cgroup_threshold entries[0];
155}; 159};
156 160
157static bool mem_cgroup_threshold_check(struct mem_cgroup *mem);
158static void mem_cgroup_threshold(struct mem_cgroup *mem); 161static void mem_cgroup_threshold(struct mem_cgroup *mem);
159 162
160/* 163/*
@@ -392,19 +395,6 @@ mem_cgroup_remove_exceeded(struct mem_cgroup *mem,
392 spin_unlock(&mctz->lock); 395 spin_unlock(&mctz->lock);
393} 396}
394 397
395static bool mem_cgroup_soft_limit_check(struct mem_cgroup *mem)
396{
397 bool ret = false;
398 s64 val;
399
400 val = this_cpu_read(mem->stat->count[MEM_CGROUP_STAT_SOFTLIMIT]);
401 if (unlikely(val < 0)) {
402 this_cpu_write(mem->stat->count[MEM_CGROUP_STAT_SOFTLIMIT],
403 SOFTLIMIT_EVENTS_THRESH);
404 ret = true;
405 }
406 return ret;
407}
408 398
409static void mem_cgroup_update_tree(struct mem_cgroup *mem, struct page *page) 399static void mem_cgroup_update_tree(struct mem_cgroup *mem, struct page *page)
410{ 400{
@@ -542,8 +532,7 @@ static void mem_cgroup_charge_statistics(struct mem_cgroup *mem,
542 __this_cpu_inc(mem->stat->count[MEM_CGROUP_STAT_PGPGIN_COUNT]); 532 __this_cpu_inc(mem->stat->count[MEM_CGROUP_STAT_PGPGIN_COUNT]);
543 else 533 else
544 __this_cpu_inc(mem->stat->count[MEM_CGROUP_STAT_PGPGOUT_COUNT]); 534 __this_cpu_inc(mem->stat->count[MEM_CGROUP_STAT_PGPGOUT_COUNT]);
545 __this_cpu_dec(mem->stat->count[MEM_CGROUP_STAT_SOFTLIMIT]); 535 __this_cpu_inc(mem->stat->count[MEM_CGROUP_EVENTS]);
546 __this_cpu_dec(mem->stat->count[MEM_CGROUP_STAT_THRESHOLDS]);
547 536
548 preempt_enable(); 537 preempt_enable();
549} 538}
@@ -563,6 +552,29 @@ static unsigned long mem_cgroup_get_local_zonestat(struct mem_cgroup *mem,
563 return total; 552 return total;
564} 553}
565 554
555static bool __memcg_event_check(struct mem_cgroup *mem, int event_mask_shift)
556{
557 s64 val;
558
559 val = this_cpu_read(mem->stat->count[MEM_CGROUP_EVENTS]);
560
561 return !(val & ((1 << event_mask_shift) - 1));
562}
563
564/*
565 * Check events in order.
566 *
567 */
568static void memcg_check_events(struct mem_cgroup *mem, struct page *page)
569{
570 /* threshold event is triggered in finer grain than soft limit */
571 if (unlikely(__memcg_event_check(mem, THRESHOLDS_EVENTS_THRESH))) {
572 mem_cgroup_threshold(mem);
573 if (unlikely(__memcg_event_check(mem, SOFTLIMIT_EVENTS_THRESH)))
574 mem_cgroup_update_tree(mem, page);
575 }
576}
577
566static struct mem_cgroup *mem_cgroup_from_cont(struct cgroup *cont) 578static struct mem_cgroup *mem_cgroup_from_cont(struct cgroup *cont)
567{ 579{
568 return container_of(cgroup_subsys_state(cont, 580 return container_of(cgroup_subsys_state(cont,
@@ -1686,11 +1698,7 @@ static void __mem_cgroup_commit_charge(struct mem_cgroup *mem,
1686 * Insert ancestor (and ancestor's ancestors), to softlimit RB-tree. 1698 * Insert ancestor (and ancestor's ancestors), to softlimit RB-tree.
1687 * if they exceeds softlimit. 1699 * if they exceeds softlimit.
1688 */ 1700 */
1689 if (mem_cgroup_soft_limit_check(mem)) 1701 memcg_check_events(mem, pc->page);
1690 mem_cgroup_update_tree(mem, pc->page);
1691 if (mem_cgroup_threshold_check(mem))
1692 mem_cgroup_threshold(mem);
1693
1694} 1702}
1695 1703
1696/** 1704/**
@@ -1760,6 +1768,11 @@ static int mem_cgroup_move_account(struct page_cgroup *pc,
1760 ret = 0; 1768 ret = 0;
1761 } 1769 }
1762 unlock_page_cgroup(pc); 1770 unlock_page_cgroup(pc);
1771 /*
1772 * check events
1773 */
1774 memcg_check_events(to, pc->page);
1775 memcg_check_events(from, pc->page);
1763 return ret; 1776 return ret;
1764} 1777}
1765 1778
@@ -2128,10 +2141,7 @@ __mem_cgroup_uncharge_common(struct page *page, enum charge_type ctype)
2128 mz = page_cgroup_zoneinfo(pc); 2141 mz = page_cgroup_zoneinfo(pc);
2129 unlock_page_cgroup(pc); 2142 unlock_page_cgroup(pc);
2130 2143
2131 if (mem_cgroup_soft_limit_check(mem)) 2144 memcg_check_events(mem, page);
2132 mem_cgroup_update_tree(mem, page);
2133 if (mem_cgroup_threshold_check(mem))
2134 mem_cgroup_threshold(mem);
2135 /* at swapout, this memcg will be accessed to record to swap */ 2145 /* at swapout, this memcg will be accessed to record to swap */
2136 if (ctype != MEM_CGROUP_CHARGE_TYPE_SWAPOUT) 2146 if (ctype != MEM_CGROUP_CHARGE_TYPE_SWAPOUT)
2137 css_put(&mem->css); 2147 css_put(&mem->css);
@@ -3215,20 +3225,6 @@ static int mem_cgroup_swappiness_write(struct cgroup *cgrp, struct cftype *cft,
3215 return 0; 3225 return 0;
3216} 3226}
3217 3227
3218static bool mem_cgroup_threshold_check(struct mem_cgroup *mem)
3219{
3220 bool ret = false;
3221 s64 val;
3222
3223 val = this_cpu_read(mem->stat->count[MEM_CGROUP_STAT_THRESHOLDS]);
3224 if (unlikely(val < 0)) {
3225 this_cpu_write(mem->stat->count[MEM_CGROUP_STAT_THRESHOLDS],
3226 THRESHOLDS_EVENTS_THRESH);
3227 ret = true;
3228 }
3229 return ret;
3230}
3231
3232static void __mem_cgroup_threshold(struct mem_cgroup *memcg, bool swap) 3228static void __mem_cgroup_threshold(struct mem_cgroup *memcg, bool swap)
3233{ 3229{
3234 struct mem_cgroup_threshold_ary *t; 3230 struct mem_cgroup_threshold_ary *t;