diff options
author | Tejun Heo <tj@kernel.org> | 2014-02-12 09:29:50 -0500 |
---|---|---|
committer | Tejun Heo <tj@kernel.org> | 2014-02-12 09:29:50 -0500 |
commit | e61734c55c24cdf11b07e52a74aec4dc4a7f4bd0 (patch) | |
tree | bab256faae539cd38840bfc886317f80385acae4 /mm/memcontrol.c | |
parent | 6f30558f37bfbd428e3854c2c34b5c32117c8f7e (diff) |
cgroup: remove cgroup->name
cgroup->name handling became quite complicated over time involving
dedicated struct cgroup_name for RCU protection. Now that cgroup is
on kernfs, we can drop all of it and simply use kernfs_name/path() and
friends. Replace cgroup->name and all related code with kernfs
name/path constructs.
* Reimplement cgroup_name() and cgroup_path() as thin wrappers on top
of kernfs counterparts, which involves semantic changes.
pr_cont_cgroup_name() and pr_cont_cgroup_path() added.
* cgroup->name handling dropped from cgroup_rename().
* All users of cgroup_name/path() updated to the new semantics. Users
which were formatting the string just to printk them are converted
to use pr_cont_cgroup_name/path() instead, which simplifies things
quite a bit. As cgroup_name() no longer requires RCU read lock
around it, RCU lockings which were protecting only cgroup_name() are
removed.
v2: Comment above oom_info_lock updated as suggested by Michal.
v3: dummy_top doesn't have a kn associated and
pr_cont_cgroup_name/path() ended up calling the matching kernfs
functions with NULL kn leading to oops. Test for NULL kn and
print "/" if so. This issue was reported by Fengguang Wu.
v4: Rebased on top of 0ab02ca8f887 ("cgroup: protect modifications to
cgroup_idr with cgroup_mutex").
Signed-off-by: Tejun Heo <tj@kernel.org>
Acked-by: Peter Zijlstra <peterz@infradead.org>
Acked-by: Michal Hocko <mhocko@suse.cz>
Acked-by: Li Zefan <lizefan@huawei.com>
Cc: Fengguang Wu <fengguang.wu@intel.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Balbir Singh <bsingharora@gmail.com>
Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Diffstat (limited to 'mm/memcontrol.c')
-rw-r--r-- | mm/memcontrol.c | 68 |
1 files changed, 18 insertions, 50 deletions
diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 102ab48ffa13..c1c25494f7ae 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c | |||
@@ -1683,15 +1683,8 @@ static void move_unlock_mem_cgroup(struct mem_cgroup *memcg, | |||
1683 | */ | 1683 | */ |
1684 | void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p) | 1684 | void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p) |
1685 | { | 1685 | { |
1686 | /* | 1686 | /* oom_info_lock ensures that parallel ooms do not interleave */ |
1687 | * protects memcg_name and makes sure that parallel ooms do not | ||
1688 | * interleave | ||
1689 | */ | ||
1690 | static DEFINE_SPINLOCK(oom_info_lock); | 1687 | static DEFINE_SPINLOCK(oom_info_lock); |
1691 | struct cgroup *task_cgrp; | ||
1692 | struct cgroup *mem_cgrp; | ||
1693 | static char memcg_name[PATH_MAX]; | ||
1694 | int ret; | ||
1695 | struct mem_cgroup *iter; | 1688 | struct mem_cgroup *iter; |
1696 | unsigned int i; | 1689 | unsigned int i; |
1697 | 1690 | ||
@@ -1701,36 +1694,14 @@ void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p) | |||
1701 | spin_lock(&oom_info_lock); | 1694 | spin_lock(&oom_info_lock); |
1702 | rcu_read_lock(); | 1695 | rcu_read_lock(); |
1703 | 1696 | ||
1704 | mem_cgrp = memcg->css.cgroup; | 1697 | pr_info("Task in "); |
1705 | task_cgrp = task_cgroup(p, memory_cgrp_id); | 1698 | pr_cont_cgroup_path(task_cgroup(p, memory_cgrp_id)); |
1699 | pr_info(" killed as a result of limit of "); | ||
1700 | pr_cont_cgroup_path(memcg->css.cgroup); | ||
1701 | pr_info("\n"); | ||
1706 | 1702 | ||
1707 | ret = cgroup_path(task_cgrp, memcg_name, PATH_MAX); | ||
1708 | if (ret < 0) { | ||
1709 | /* | ||
1710 | * Unfortunately, we are unable to convert to a useful name | ||
1711 | * But we'll still print out the usage information | ||
1712 | */ | ||
1713 | rcu_read_unlock(); | ||
1714 | goto done; | ||
1715 | } | ||
1716 | rcu_read_unlock(); | 1703 | rcu_read_unlock(); |
1717 | 1704 | ||
1718 | pr_info("Task in %s killed", memcg_name); | ||
1719 | |||
1720 | rcu_read_lock(); | ||
1721 | ret = cgroup_path(mem_cgrp, memcg_name, PATH_MAX); | ||
1722 | if (ret < 0) { | ||
1723 | rcu_read_unlock(); | ||
1724 | goto done; | ||
1725 | } | ||
1726 | rcu_read_unlock(); | ||
1727 | |||
1728 | /* | ||
1729 | * Continues from above, so we don't need an KERN_ level | ||
1730 | */ | ||
1731 | pr_cont(" as a result of limit of %s\n", memcg_name); | ||
1732 | done: | ||
1733 | |||
1734 | pr_info("memory: usage %llukB, limit %llukB, failcnt %llu\n", | 1705 | pr_info("memory: usage %llukB, limit %llukB, failcnt %llu\n", |
1735 | res_counter_read_u64(&memcg->res, RES_USAGE) >> 10, | 1706 | res_counter_read_u64(&memcg->res, RES_USAGE) >> 10, |
1736 | res_counter_read_u64(&memcg->res, RES_LIMIT) >> 10, | 1707 | res_counter_read_u64(&memcg->res, RES_LIMIT) >> 10, |
@@ -1745,13 +1716,8 @@ done: | |||
1745 | res_counter_read_u64(&memcg->kmem, RES_FAILCNT)); | 1716 | res_counter_read_u64(&memcg->kmem, RES_FAILCNT)); |
1746 | 1717 | ||
1747 | for_each_mem_cgroup_tree(iter, memcg) { | 1718 | for_each_mem_cgroup_tree(iter, memcg) { |
1748 | pr_info("Memory cgroup stats"); | 1719 | pr_info("Memory cgroup stats for "); |
1749 | 1720 | pr_cont_cgroup_path(iter->css.cgroup); | |
1750 | rcu_read_lock(); | ||
1751 | ret = cgroup_path(iter->css.cgroup, memcg_name, PATH_MAX); | ||
1752 | if (!ret) | ||
1753 | pr_cont(" for %s", memcg_name); | ||
1754 | rcu_read_unlock(); | ||
1755 | pr_cont(":"); | 1721 | pr_cont(":"); |
1756 | 1722 | ||
1757 | for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) { | 1723 | for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) { |
@@ -3401,7 +3367,7 @@ static struct kmem_cache *memcg_create_kmem_cache(struct mem_cgroup *memcg, | |||
3401 | struct kmem_cache *s) | 3367 | struct kmem_cache *s) |
3402 | { | 3368 | { |
3403 | struct kmem_cache *new = NULL; | 3369 | struct kmem_cache *new = NULL; |
3404 | static char *tmp_name = NULL; | 3370 | static char *tmp_path = NULL, *tmp_name = NULL; |
3405 | static DEFINE_MUTEX(mutex); /* protects tmp_name */ | 3371 | static DEFINE_MUTEX(mutex); /* protects tmp_name */ |
3406 | 3372 | ||
3407 | BUG_ON(!memcg_can_account_kmem(memcg)); | 3373 | BUG_ON(!memcg_can_account_kmem(memcg)); |
@@ -3413,18 +3379,20 @@ static struct kmem_cache *memcg_create_kmem_cache(struct mem_cgroup *memcg, | |||
3413 | * This static temporary buffer is used to prevent from | 3379 | * This static temporary buffer is used to prevent from |
3414 | * pointless shortliving allocation. | 3380 | * pointless shortliving allocation. |
3415 | */ | 3381 | */ |
3416 | if (!tmp_name) { | 3382 | if (!tmp_path || !tmp_name) { |
3417 | tmp_name = kmalloc(PATH_MAX, GFP_KERNEL); | 3383 | if (!tmp_path) |
3384 | tmp_path = kmalloc(PATH_MAX, GFP_KERNEL); | ||
3418 | if (!tmp_name) | 3385 | if (!tmp_name) |
3386 | tmp_name = kmalloc(NAME_MAX + 1, GFP_KERNEL); | ||
3387 | if (!tmp_path || !tmp_name) | ||
3419 | goto out; | 3388 | goto out; |
3420 | } | 3389 | } |
3421 | 3390 | ||
3422 | rcu_read_lock(); | 3391 | cgroup_name(memcg->css.cgroup, tmp_name, NAME_MAX + 1); |
3423 | snprintf(tmp_name, PATH_MAX, "%s(%d:%s)", s->name, | 3392 | snprintf(tmp_path, PATH_MAX, "%s(%d:%s)", s->name, |
3424 | memcg_cache_id(memcg), cgroup_name(memcg->css.cgroup)); | 3393 | memcg_cache_id(memcg), tmp_name); |
3425 | rcu_read_unlock(); | ||
3426 | 3394 | ||
3427 | new = kmem_cache_create_memcg(memcg, tmp_name, s->object_size, s->align, | 3395 | new = kmem_cache_create_memcg(memcg, tmp_path, s->object_size, s->align, |
3428 | (s->flags & ~SLAB_PANIC), s->ctor, s); | 3396 | (s->flags & ~SLAB_PANIC), s->ctor, s); |
3429 | if (new) | 3397 | if (new) |
3430 | new->allocflags |= __GFP_KMEMCG; | 3398 | new->allocflags |= __GFP_KMEMCG; |