aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorKAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>2011-07-08 18:39:43 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2011-07-09 00:14:44 -0400
commit453a9bf347f1e22a5bb3605ced43b2366921221d (patch)
treedfe355892b8ace86f74f6b8a34ad86da159f697d /mm
parent4d0c066d29f030d47d19678f8008933e67dd3b72 (diff)
memcg: fix numa scan information update to be triggered by memory event
commit 889976dbcb12 ("memcg: reclaim memory from nodes in round-robin order") adds an numa node round-robin for memcg. But the information is updated once per 10sec. This patch changes the update trigger from jiffies to memcg's event count. After this patch, numa scan information will be updated when we see 1024 events of pagein/pageout under a memcg. [akpm@linux-foundation.org: attempt to repair code layout] Signed-off-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Cc: Michal Hocko <mhocko@suse.cz> Cc: Ying Han <yinghan@google.com> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Daisuke Nishimura <nishimura@mxp.nes.nec.co.jp> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/memcontrol.c33
1 files changed, 27 insertions, 6 deletions
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index a7a5cb1bf2c..e013b8e57d2 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -108,10 +108,12 @@ enum mem_cgroup_events_index {
108enum mem_cgroup_events_target { 108enum mem_cgroup_events_target {
109 MEM_CGROUP_TARGET_THRESH, 109 MEM_CGROUP_TARGET_THRESH,
110 MEM_CGROUP_TARGET_SOFTLIMIT, 110 MEM_CGROUP_TARGET_SOFTLIMIT,
111 MEM_CGROUP_TARGET_NUMAINFO,
111 MEM_CGROUP_NTARGETS, 112 MEM_CGROUP_NTARGETS,
112}; 113};
113#define THRESHOLDS_EVENTS_TARGET (128) 114#define THRESHOLDS_EVENTS_TARGET (128)
114#define SOFTLIMIT_EVENTS_TARGET (1024) 115#define SOFTLIMIT_EVENTS_TARGET (1024)
116#define NUMAINFO_EVENTS_TARGET (1024)
115 117
116struct mem_cgroup_stat_cpu { 118struct mem_cgroup_stat_cpu {
117 long count[MEM_CGROUP_STAT_NSTATS]; 119 long count[MEM_CGROUP_STAT_NSTATS];
@@ -237,7 +239,8 @@ struct mem_cgroup {
237 int last_scanned_node; 239 int last_scanned_node;
238#if MAX_NUMNODES > 1 240#if MAX_NUMNODES > 1
239 nodemask_t scan_nodes; 241 nodemask_t scan_nodes;
240 unsigned long next_scan_node_update; 242 atomic_t numainfo_events;
243 atomic_t numainfo_updating;
241#endif 244#endif
242 /* 245 /*
243 * Should the accounting and control be hierarchical, per subtree? 246 * Should the accounting and control be hierarchical, per subtree?
@@ -680,6 +683,9 @@ static void __mem_cgroup_target_update(struct mem_cgroup *mem, int target)
680 case MEM_CGROUP_TARGET_SOFTLIMIT: 683 case MEM_CGROUP_TARGET_SOFTLIMIT:
681 next = val + SOFTLIMIT_EVENTS_TARGET; 684 next = val + SOFTLIMIT_EVENTS_TARGET;
682 break; 685 break;
686 case MEM_CGROUP_TARGET_NUMAINFO:
687 next = val + NUMAINFO_EVENTS_TARGET;
688 break;
683 default: 689 default:
684 return; 690 return;
685 } 691 }
@@ -698,11 +704,19 @@ static void memcg_check_events(struct mem_cgroup *mem, struct page *page)
698 mem_cgroup_threshold(mem); 704 mem_cgroup_threshold(mem);
699 __mem_cgroup_target_update(mem, MEM_CGROUP_TARGET_THRESH); 705 __mem_cgroup_target_update(mem, MEM_CGROUP_TARGET_THRESH);
700 if (unlikely(__memcg_event_check(mem, 706 if (unlikely(__memcg_event_check(mem,
701 MEM_CGROUP_TARGET_SOFTLIMIT))){ 707 MEM_CGROUP_TARGET_SOFTLIMIT))) {
702 mem_cgroup_update_tree(mem, page); 708 mem_cgroup_update_tree(mem, page);
703 __mem_cgroup_target_update(mem, 709 __mem_cgroup_target_update(mem,
704 MEM_CGROUP_TARGET_SOFTLIMIT); 710 MEM_CGROUP_TARGET_SOFTLIMIT);
711 }
712#if MAX_NUMNODES > 1
713 if (unlikely(__memcg_event_check(mem,
714 MEM_CGROUP_TARGET_NUMAINFO))) {
715 atomic_inc(&mem->numainfo_events);
716 __mem_cgroup_target_update(mem,
717 MEM_CGROUP_TARGET_NUMAINFO);
705 } 718 }
719#endif
706 } 720 }
707} 721}
708 722
@@ -1582,11 +1596,15 @@ static bool test_mem_cgroup_node_reclaimable(struct mem_cgroup *mem,
1582static void mem_cgroup_may_update_nodemask(struct mem_cgroup *mem) 1596static void mem_cgroup_may_update_nodemask(struct mem_cgroup *mem)
1583{ 1597{
1584 int nid; 1598 int nid;
1585 1599 /*
1586 if (time_after(mem->next_scan_node_update, jiffies)) 1600 * numainfo_events > 0 means there was at least NUMAINFO_EVENTS_TARGET
1601 * pagein/pageout changes since the last update.
1602 */
1603 if (!atomic_read(&mem->numainfo_events))
1604 return;
1605 if (atomic_inc_return(&mem->numainfo_updating) > 1)
1587 return; 1606 return;
1588 1607
1589 mem->next_scan_node_update = jiffies + 10*HZ;
1590 /* make a nodemask where this memcg uses memory from */ 1608 /* make a nodemask where this memcg uses memory from */
1591 mem->scan_nodes = node_states[N_HIGH_MEMORY]; 1609 mem->scan_nodes = node_states[N_HIGH_MEMORY];
1592 1610
@@ -1595,6 +1613,9 @@ static void mem_cgroup_may_update_nodemask(struct mem_cgroup *mem)
1595 if (!test_mem_cgroup_node_reclaimable(mem, nid, false)) 1613 if (!test_mem_cgroup_node_reclaimable(mem, nid, false))
1596 node_clear(nid, mem->scan_nodes); 1614 node_clear(nid, mem->scan_nodes);
1597 } 1615 }
1616
1617 atomic_set(&mem->numainfo_events, 0);
1618 atomic_set(&mem->numainfo_updating, 0);
1598} 1619}
1599 1620
1600/* 1621/*