aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorBalbir Singh <balbir@linux.vnet.ibm.com>2008-04-29 04:00:16 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2008-04-29 11:06:10 -0400
commitcf475ad28ac35cc9ba612d67158f29b73b38b05d (patch)
tree2c7cd568d00357bd42643ea602884e731cc24f26 /mm
parent29486df325e1fe6e1764afcb19e3370804c2b002 (diff)
cgroups: add an owner to the mm_struct
Remove the mem_cgroup member from mm_struct and instead adds an owner. This approach was suggested by Paul Menage. The advantage of this approach is that, once the mm->owner is known, using the subsystem id, the cgroup can be determined. It also allows several control groups that are virtually grouped by mm_struct, to exist independent of the memory controller i.e., without adding mem_cgroup's for each controller, to mm_struct. A new config option CONFIG_MM_OWNER is added and the memory resource controller selects this config option. This patch also adds cgroup callbacks to notify subsystems when mm->owner changes. The mm_cgroup_changed callback is called with the task_lock() of the new task held and is called just prior to changing the mm->owner. I am indebted to Paul Menage for the several reviews of this patchset and helping me make it lighter and simpler. This patch was tested on a powerpc box, it was compiled with both the MM_OWNER config turned on and off. After the thread group leader exits, it's moved to init_css_state by cgroup_exit(), thus all future charges from runnings threads would be redirected to the init_css_set's subsystem. Signed-off-by: Balbir Singh <balbir@linux.vnet.ibm.com> Cc: Pavel Emelianov <xemul@openvz.org> Cc: Hugh Dickins <hugh@veritas.com> Cc: Sudhir Kumar <skumar@linux.vnet.ibm.com> Cc: YAMAMOTO Takashi <yamamoto@valinux.co.jp> Cc: Hirokazu Takahashi <taka@valinux.co.jp> Cc: David Rientjes <rientjes@google.com>, Cc: Balbir Singh <balbir@linux.vnet.ibm.com> Acked-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Acked-by: Pekka Enberg <penberg@cs.helsinki.fi> Reviewed-by: Paul Menage <menage@google.com> Cc: Oleg Nesterov <oleg@tv-sign.ru> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/memcontrol.c28
1 files changed, 5 insertions, 23 deletions
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index d12795cc762..49d80814798 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -236,26 +236,12 @@ static struct mem_cgroup *mem_cgroup_from_cont(struct cgroup *cont)
236 css); 236 css);
237} 237}
238 238
239static struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p) 239struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p)
240{ 240{
241 return container_of(task_subsys_state(p, mem_cgroup_subsys_id), 241 return container_of(task_subsys_state(p, mem_cgroup_subsys_id),
242 struct mem_cgroup, css); 242 struct mem_cgroup, css);
243} 243}
244 244
245void mm_init_cgroup(struct mm_struct *mm, struct task_struct *p)
246{
247 struct mem_cgroup *mem;
248
249 mem = mem_cgroup_from_task(p);
250 css_get(&mem->css);
251 mm->mem_cgroup = mem;
252}
253
254void mm_free_cgroup(struct mm_struct *mm)
255{
256 css_put(&mm->mem_cgroup->css);
257}
258
259static inline int page_cgroup_locked(struct page *page) 245static inline int page_cgroup_locked(struct page *page)
260{ 246{
261 return bit_spin_is_locked(PAGE_CGROUP_LOCK_BIT, &page->page_cgroup); 247 return bit_spin_is_locked(PAGE_CGROUP_LOCK_BIT, &page->page_cgroup);
@@ -476,6 +462,7 @@ unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan,
476 int zid = zone_idx(z); 462 int zid = zone_idx(z);
477 struct mem_cgroup_per_zone *mz; 463 struct mem_cgroup_per_zone *mz;
478 464
465 BUG_ON(!mem_cont);
479 mz = mem_cgroup_zoneinfo(mem_cont, nid, zid); 466 mz = mem_cgroup_zoneinfo(mem_cont, nid, zid);
480 if (active) 467 if (active)
481 src = &mz->active_list; 468 src = &mz->active_list;
@@ -574,7 +561,7 @@ retry:
574 mm = &init_mm; 561 mm = &init_mm;
575 562
576 rcu_read_lock(); 563 rcu_read_lock();
577 mem = rcu_dereference(mm->mem_cgroup); 564 mem = mem_cgroup_from_task(rcu_dereference(mm->owner));
578 /* 565 /*
579 * For every charge from the cgroup, increment reference count 566 * For every charge from the cgroup, increment reference count
580 */ 567 */
@@ -985,10 +972,9 @@ mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont)
985 struct mem_cgroup *mem; 972 struct mem_cgroup *mem;
986 int node; 973 int node;
987 974
988 if (unlikely((cont->parent) == NULL)) { 975 if (unlikely((cont->parent) == NULL))
989 mem = &init_mem_cgroup; 976 mem = &init_mem_cgroup;
990 init_mm.mem_cgroup = mem; 977 else
991 } else
992 mem = kzalloc(sizeof(struct mem_cgroup), GFP_KERNEL); 978 mem = kzalloc(sizeof(struct mem_cgroup), GFP_KERNEL);
993 979
994 if (mem == NULL) 980 if (mem == NULL)
@@ -1067,10 +1053,6 @@ static void mem_cgroup_move_task(struct cgroup_subsys *ss,
1067 if (!thread_group_leader(p)) 1053 if (!thread_group_leader(p))
1068 goto out; 1054 goto out;
1069 1055
1070 css_get(&mem->css);
1071 rcu_assign_pointer(mm->mem_cgroup, mem);
1072 css_put(&old_mem->css);
1073
1074out: 1056out:
1075 mmput(mm); 1057 mmput(mm);
1076} 1058}