aboutsummaryrefslogtreecommitdiffstats
path: root/mm/memcontrol.c
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2014-05-13 12:11:01 -0400
committerTejun Heo <tj@kernel.org>2014-05-13 12:11:01 -0400
commitec903c0c858e4963a9e0724bdcadfa837253341c (patch)
treeb52cd0d5bcac48d2d9c21e6186397c29ef89c33b /mm/memcontrol.c
parent46cfeb043b04f5878154bea36714709d46028495 (diff)
cgroup: rename css_tryget*() to css_tryget_online*()
Unlike the more usual refcnting, what css_tryget() provides is the distinction between online and offline csses instead of protection against upping a refcnt which already reached zero. cgroup is planning to provide actual tryget which fails if the refcnt already reached zero. Let's rename the existing trygets so that they clearly indicate that they're onliness. I thought about keeping the existing names as-are and introducing new names for the planned actual tryget; however, given that each controller participates in the synchronization of the online state, it seems worthwhile to make it explicit that these functions are about on/offline state. Rename css_tryget() to css_tryget_online() and css_tryget_from_dir() to css_tryget_online_from_dir(). This is pure rename. v2: cgroup_freezer grew new usages of css_tryget(). Update accordingly. Signed-off-by: Tejun Heo <tj@kernel.org> Acked-by: Johannes Weiner <hannes@cmpxchg.org> Acked-by: Michal Hocko <mhocko@suse.cz> Acked-by: Li Zefan <lizefan@huawei.com> Cc: Vivek Goyal <vgoyal@redhat.com> Cc: Jens Axboe <axboe@kernel.dk> Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Paul Mackerras <paulus@samba.org> Cc: Ingo Molnar <mingo@redhat.com> Cc: Arnaldo Carvalho de Melo <acme@kernel.org>
Diffstat (limited to 'mm/memcontrol.c')
-rw-r--r--mm/memcontrol.c46
1 files changed, 24 insertions, 22 deletions
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index c3f82f69ef58..5cf3246314a2 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -567,7 +567,8 @@ void sock_update_memcg(struct sock *sk)
567 memcg = mem_cgroup_from_task(current); 567 memcg = mem_cgroup_from_task(current);
568 cg_proto = sk->sk_prot->proto_cgroup(memcg); 568 cg_proto = sk->sk_prot->proto_cgroup(memcg);
569 if (!mem_cgroup_is_root(memcg) && 569 if (!mem_cgroup_is_root(memcg) &&
570 memcg_proto_active(cg_proto) && css_tryget(&memcg->css)) { 570 memcg_proto_active(cg_proto) &&
571 css_tryget_online(&memcg->css)) {
571 sk->sk_cgrp = cg_proto; 572 sk->sk_cgrp = cg_proto;
572 } 573 }
573 rcu_read_unlock(); 574 rcu_read_unlock();
@@ -834,7 +835,7 @@ retry:
834 */ 835 */
835 __mem_cgroup_remove_exceeded(mz->memcg, mz, mctz); 836 __mem_cgroup_remove_exceeded(mz->memcg, mz, mctz);
836 if (!res_counter_soft_limit_excess(&mz->memcg->res) || 837 if (!res_counter_soft_limit_excess(&mz->memcg->res) ||
837 !css_tryget(&mz->memcg->css)) 838 !css_tryget_online(&mz->memcg->css))
838 goto retry; 839 goto retry;
839done: 840done:
840 return mz; 841 return mz;
@@ -1076,7 +1077,7 @@ static struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm)
1076 memcg = mem_cgroup_from_task(rcu_dereference(mm->owner)); 1077 memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
1077 if (unlikely(!memcg)) 1078 if (unlikely(!memcg))
1078 memcg = root_mem_cgroup; 1079 memcg = root_mem_cgroup;
1079 } while (!css_tryget(&memcg->css)); 1080 } while (!css_tryget_online(&memcg->css));
1080 rcu_read_unlock(); 1081 rcu_read_unlock();
1081 return memcg; 1082 return memcg;
1082} 1083}
@@ -1113,7 +1114,8 @@ skip_node:
1113 */ 1114 */
1114 if (next_css) { 1115 if (next_css) {
1115 if ((next_css == &root->css) || 1116 if ((next_css == &root->css) ||
1116 ((next_css->flags & CSS_ONLINE) && css_tryget(next_css))) 1117 ((next_css->flags & CSS_ONLINE) &&
1118 css_tryget_online(next_css)))
1117 return mem_cgroup_from_css(next_css); 1119 return mem_cgroup_from_css(next_css);
1118 1120
1119 prev_css = next_css; 1121 prev_css = next_css;
@@ -1159,7 +1161,7 @@ mem_cgroup_iter_load(struct mem_cgroup_reclaim_iter *iter,
1159 * would be returned all the time. 1161 * would be returned all the time.
1160 */ 1162 */
1161 if (position && position != root && 1163 if (position && position != root &&
1162 !css_tryget(&position->css)) 1164 !css_tryget_online(&position->css))
1163 position = NULL; 1165 position = NULL;
1164 } 1166 }
1165 return position; 1167 return position;
@@ -2785,9 +2787,9 @@ static void __mem_cgroup_cancel_local_charge(struct mem_cgroup *memcg,
2785 2787
2786/* 2788/*
2787 * A helper function to get mem_cgroup from ID. must be called under 2789 * A helper function to get mem_cgroup from ID. must be called under
2788 * rcu_read_lock(). The caller is responsible for calling css_tryget if 2790 * rcu_read_lock(). The caller is responsible for calling
2789 * the mem_cgroup is used for charging. (dropping refcnt from swap can be 2791 * css_tryget_online() if the mem_cgroup is used for charging. (dropping
2790 * called against removed memcg.) 2792 * refcnt from swap can be called against removed memcg.)
2791 */ 2793 */
2792static struct mem_cgroup *mem_cgroup_lookup(unsigned short id) 2794static struct mem_cgroup *mem_cgroup_lookup(unsigned short id)
2793{ 2795{
@@ -2810,14 +2812,14 @@ struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page)
2810 lock_page_cgroup(pc); 2812 lock_page_cgroup(pc);
2811 if (PageCgroupUsed(pc)) { 2813 if (PageCgroupUsed(pc)) {
2812 memcg = pc->mem_cgroup; 2814 memcg = pc->mem_cgroup;
2813 if (memcg && !css_tryget(&memcg->css)) 2815 if (memcg && !css_tryget_online(&memcg->css))
2814 memcg = NULL; 2816 memcg = NULL;
2815 } else if (PageSwapCache(page)) { 2817 } else if (PageSwapCache(page)) {
2816 ent.val = page_private(page); 2818 ent.val = page_private(page);
2817 id = lookup_swap_cgroup_id(ent); 2819 id = lookup_swap_cgroup_id(ent);
2818 rcu_read_lock(); 2820 rcu_read_lock();
2819 memcg = mem_cgroup_lookup(id); 2821 memcg = mem_cgroup_lookup(id);
2820 if (memcg && !css_tryget(&memcg->css)) 2822 if (memcg && !css_tryget_online(&memcg->css))
2821 memcg = NULL; 2823 memcg = NULL;
2822 rcu_read_unlock(); 2824 rcu_read_unlock();
2823 } 2825 }
@@ -3473,7 +3475,7 @@ struct kmem_cache *__memcg_kmem_get_cache(struct kmem_cache *cachep,
3473 } 3475 }
3474 3476
3475 /* The corresponding put will be done in the workqueue. */ 3477 /* The corresponding put will be done in the workqueue. */
3476 if (!css_tryget(&memcg->css)) 3478 if (!css_tryget_online(&memcg->css))
3477 goto out; 3479 goto out;
3478 rcu_read_unlock(); 3480 rcu_read_unlock();
3479 3481
@@ -4246,8 +4248,8 @@ void mem_cgroup_uncharge_swap(swp_entry_t ent)
4246 memcg = mem_cgroup_lookup(id); 4248 memcg = mem_cgroup_lookup(id);
4247 if (memcg) { 4249 if (memcg) {
4248 /* 4250 /*
4249 * We uncharge this because swap is freed. 4251 * We uncharge this because swap is freed. This memcg can
4250 * This memcg can be obsolete one. We avoid calling css_tryget 4252 * be obsolete one. We avoid calling css_tryget_online().
4251 */ 4253 */
4252 if (!mem_cgroup_is_root(memcg)) 4254 if (!mem_cgroup_is_root(memcg))
4253 res_counter_uncharge(&memcg->memsw, PAGE_SIZE); 4255 res_counter_uncharge(&memcg->memsw, PAGE_SIZE);
@@ -5840,10 +5842,10 @@ static void kmem_cgroup_css_offline(struct mem_cgroup *memcg)
5840 * which is then paired with css_put during uncharge resp. here. 5842 * which is then paired with css_put during uncharge resp. here.
5841 * 5843 *
5842 * Although this might sound strange as this path is called from 5844 * Although this might sound strange as this path is called from
5843 * css_offline() when the referencemight have dropped down to 0 5845 * css_offline() when the referencemight have dropped down to 0 and
5844 * and shouldn't be incremented anymore (css_tryget would fail) 5846 * shouldn't be incremented anymore (css_tryget_online() would
5845 * we do not have other options because of the kmem allocations 5847 * fail) we do not have other options because of the kmem
5846 * lifetime. 5848 * allocations lifetime.
5847 */ 5849 */
5848 css_get(&memcg->css); 5850 css_get(&memcg->css);
5849 5851
@@ -6051,8 +6053,8 @@ static int memcg_write_event_control(struct cgroup_subsys_state *css,
6051 * automatically removed on cgroup destruction but the removal is 6053 * automatically removed on cgroup destruction but the removal is
6052 * asynchronous, so take an extra ref on @css. 6054 * asynchronous, so take an extra ref on @css.
6053 */ 6055 */
6054 cfile_css = css_tryget_from_dir(cfile.file->f_dentry->d_parent, 6056 cfile_css = css_tryget_online_from_dir(cfile.file->f_dentry->d_parent,
6055 &memory_cgrp_subsys); 6057 &memory_cgrp_subsys);
6056 ret = -EINVAL; 6058 ret = -EINVAL;
6057 if (IS_ERR(cfile_css)) 6059 if (IS_ERR(cfile_css))
6058 goto out_put_cfile; 6060 goto out_put_cfile;
@@ -6496,7 +6498,7 @@ static void mem_cgroup_css_free(struct cgroup_subsys_state *css)
6496 /* 6498 /*
6497 * XXX: css_offline() would be where we should reparent all 6499 * XXX: css_offline() would be where we should reparent all
6498 * memory to prepare the cgroup for destruction. However, 6500 * memory to prepare the cgroup for destruction. However,
6499 * memcg does not do css_tryget() and res_counter charging 6501 * memcg does not do css_tryget_online() and res_counter charging
6500 * under the same RCU lock region, which means that charging 6502 * under the same RCU lock region, which means that charging
6501 * could race with offlining. Offlining only happens to 6503 * could race with offlining. Offlining only happens to
6502 * cgroups with no tasks in them but charges can show up 6504 * cgroups with no tasks in them but charges can show up
@@ -6510,9 +6512,9 @@ static void mem_cgroup_css_free(struct cgroup_subsys_state *css)
6510 * lookup_swap_cgroup_id() 6512 * lookup_swap_cgroup_id()
6511 * rcu_read_lock() 6513 * rcu_read_lock()
6512 * mem_cgroup_lookup() 6514 * mem_cgroup_lookup()
6513 * css_tryget() 6515 * css_tryget_online()
6514 * rcu_read_unlock() 6516 * rcu_read_unlock()
6515 * disable css_tryget() 6517 * disable css_tryget_online()
6516 * call_rcu() 6518 * call_rcu()
6517 * offline_css() 6519 * offline_css()
6518 * reparent_charges() 6520 * reparent_charges()