aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--block/blk-cgroup.h12
-rw-r--r--fs/kernfs/dir.c1
-rw-r--r--include/linux/cgroup.h63
-rw-r--r--kernel/cgroup.c146
-rw-r--r--kernel/cpuset.c27
-rw-r--r--kernel/sched/debug.c3
-rw-r--r--mm/memcontrol.c68
7 files changed, 110 insertions, 210 deletions
diff --git a/block/blk-cgroup.h b/block/blk-cgroup.h
index 453b528c8e19..15a8d640de57 100644
--- a/block/blk-cgroup.h
+++ b/block/blk-cgroup.h
@@ -241,12 +241,16 @@ static inline struct blkcg_gq *pd_to_blkg(struct blkg_policy_data *pd)
241 */ 241 */
242static inline int blkg_path(struct blkcg_gq *blkg, char *buf, int buflen) 242static inline int blkg_path(struct blkcg_gq *blkg, char *buf, int buflen)
243{ 243{
244 int ret; 244 char *p;
245 245
246 ret = cgroup_path(blkg->blkcg->css.cgroup, buf, buflen); 246 p = cgroup_path(blkg->blkcg->css.cgroup, buf, buflen);
247 if (ret) 247 if (!p) {
248 strncpy(buf, "<unavailable>", buflen); 248 strncpy(buf, "<unavailable>", buflen);
249 return ret; 249 return -ENAMETOOLONG;
250 }
251
252 memmove(buf, p, buf + buflen - p);
253 return 0;
250} 254}
251 255
252/** 256/**
diff --git a/fs/kernfs/dir.c b/fs/kernfs/dir.c
index a347792c2e5a..939684ebff1e 100644
--- a/fs/kernfs/dir.c
+++ b/fs/kernfs/dir.c
@@ -112,6 +112,7 @@ char *kernfs_path(struct kernfs_node *kn, char *buf, size_t buflen)
112 spin_unlock_irqrestore(&kernfs_rename_lock, flags); 112 spin_unlock_irqrestore(&kernfs_rename_lock, flags);
113 return p; 113 return p;
114} 114}
115EXPORT_SYMBOL_GPL(kernfs_path);
115 116
116/** 117/**
117 * pr_cont_kernfs_name - pr_cont name of a kernfs_node 118 * pr_cont_kernfs_name - pr_cont name of a kernfs_node
diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h
index b42251a23129..4d6ff7d40cf6 100644
--- a/include/linux/cgroup.h
+++ b/include/linux/cgroup.h
@@ -138,11 +138,6 @@ enum {
138 CGRP_SANE_BEHAVIOR, 138 CGRP_SANE_BEHAVIOR,
139}; 139};
140 140
141struct cgroup_name {
142 struct rcu_head rcu_head;
143 char name[];
144};
145
146struct cgroup { 141struct cgroup {
147 unsigned long flags; /* "unsigned long" so bitops work */ 142 unsigned long flags; /* "unsigned long" so bitops work */
148 143
@@ -179,19 +174,6 @@ struct cgroup {
179 */ 174 */
180 u64 serial_nr; 175 u64 serial_nr;
181 176
182 /*
183 * This is a copy of dentry->d_name, and it's needed because
184 * we can't use dentry->d_name in cgroup_path().
185 *
186 * You must acquire rcu_read_lock() to access cgrp->name, and
187 * the only place that can change it is rename(), which is
188 * protected by parent dir's i_mutex.
189 *
190 * Normally you should use cgroup_name() wrapper rather than
191 * access it directly.
192 */
193 struct cgroup_name __rcu *name;
194
195 /* Private pointers for each registered subsystem */ 177 /* Private pointers for each registered subsystem */
196 struct cgroup_subsys_state __rcu *subsys[CGROUP_SUBSYS_COUNT]; 178 struct cgroup_subsys_state __rcu *subsys[CGROUP_SUBSYS_COUNT];
197 179
@@ -479,12 +461,6 @@ static inline bool cgroup_sane_behavior(const struct cgroup *cgrp)
479 return cgrp->root->flags & CGRP_ROOT_SANE_BEHAVIOR; 461 return cgrp->root->flags & CGRP_ROOT_SANE_BEHAVIOR;
480} 462}
481 463
482/* Caller should hold rcu_read_lock() */
483static inline const char *cgroup_name(const struct cgroup *cgrp)
484{
485 return rcu_dereference(cgrp->name)->name;
486}
487
488/* returns ino associated with a cgroup, 0 indicates unmounted root */ 464/* returns ino associated with a cgroup, 0 indicates unmounted root */
489static inline ino_t cgroup_ino(struct cgroup *cgrp) 465static inline ino_t cgroup_ino(struct cgroup *cgrp)
490{ 466{
@@ -503,14 +479,47 @@ static inline struct cftype *seq_cft(struct seq_file *seq)
503 479
504struct cgroup_subsys_state *seq_css(struct seq_file *seq); 480struct cgroup_subsys_state *seq_css(struct seq_file *seq);
505 481
482/*
483 * Name / path handling functions. All are thin wrappers around the kernfs
484 * counterparts and can be called under any context.
485 */
486
487static inline int cgroup_name(struct cgroup *cgrp, char *buf, size_t buflen)
488{
489 return kernfs_name(cgrp->kn, buf, buflen);
490}
491
492static inline char * __must_check cgroup_path(struct cgroup *cgrp, char *buf,
493 size_t buflen)
494{
495 return kernfs_path(cgrp->kn, buf, buflen);
496}
497
498static inline void pr_cont_cgroup_name(struct cgroup *cgrp)
499{
500 /* dummy_top doesn't have a kn associated */
501 if (cgrp->kn)
502 pr_cont_kernfs_name(cgrp->kn);
503 else
504 pr_cont("/");
505}
506
507static inline void pr_cont_cgroup_path(struct cgroup *cgrp)
508{
509 /* dummy_top doesn't have a kn associated */
510 if (cgrp->kn)
511 pr_cont_kernfs_path(cgrp->kn);
512 else
513 pr_cont("/");
514}
515
516char *task_cgroup_path(struct task_struct *task, char *buf, size_t buflen);
517
506int cgroup_add_cftypes(struct cgroup_subsys *ss, struct cftype *cfts); 518int cgroup_add_cftypes(struct cgroup_subsys *ss, struct cftype *cfts);
507int cgroup_rm_cftypes(struct cftype *cfts); 519int cgroup_rm_cftypes(struct cftype *cfts);
508 520
509bool cgroup_is_descendant(struct cgroup *cgrp, struct cgroup *ancestor); 521bool cgroup_is_descendant(struct cgroup *cgrp, struct cgroup *ancestor);
510 522
511int cgroup_path(const struct cgroup *cgrp, char *buf, int buflen);
512int task_cgroup_path(struct task_struct *task, char *buf, size_t buflen);
513
514int cgroup_task_count(const struct cgroup *cgrp); 523int cgroup_task_count(const struct cgroup *cgrp);
515 524
516/* 525/*
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index 59dfb025f1ac..638df032fb94 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -145,8 +145,6 @@ static int cgroup_root_count;
145/* hierarchy ID allocation and mapping, protected by cgroup_mutex */ 145/* hierarchy ID allocation and mapping, protected by cgroup_mutex */
146static DEFINE_IDR(cgroup_hierarchy_idr); 146static DEFINE_IDR(cgroup_hierarchy_idr);
147 147
148static struct cgroup_name root_cgroup_name = { .name = "/" };
149
150/* 148/*
151 * Assign a monotonically increasing serial number to cgroups. It 149 * Assign a monotonically increasing serial number to cgroups. It
152 * guarantees cgroups with bigger numbers are newer than those with smaller 150 * guarantees cgroups with bigger numbers are newer than those with smaller
@@ -888,17 +886,6 @@ static int cgroup_populate_dir(struct cgroup *cgrp, unsigned long subsys_mask);
888static struct kernfs_syscall_ops cgroup_kf_syscall_ops; 886static struct kernfs_syscall_ops cgroup_kf_syscall_ops;
889static const struct file_operations proc_cgroupstats_operations; 887static const struct file_operations proc_cgroupstats_operations;
890 888
891static struct cgroup_name *cgroup_alloc_name(const char *name_str)
892{
893 struct cgroup_name *name;
894
895 name = kmalloc(sizeof(*name) + strlen(name_str) + 1, GFP_KERNEL);
896 if (!name)
897 return NULL;
898 strcpy(name->name, name_str);
899 return name;
900}
901
902static char *cgroup_file_name(struct cgroup *cgrp, const struct cftype *cft, 889static char *cgroup_file_name(struct cgroup *cgrp, const struct cftype *cft,
903 char *buf) 890 char *buf)
904{ 891{
@@ -958,8 +945,6 @@ static void cgroup_free_fn(struct work_struct *work)
958 cgroup_pidlist_destroy_all(cgrp); 945 cgroup_pidlist_destroy_all(cgrp);
959 946
960 kernfs_put(cgrp->kn); 947 kernfs_put(cgrp->kn);
961
962 kfree(rcu_dereference_raw(cgrp->name));
963 kfree(cgrp); 948 kfree(cgrp);
964} 949}
965 950
@@ -1377,7 +1362,6 @@ static void init_cgroup_root(struct cgroupfs_root *root)
1377 INIT_LIST_HEAD(&root->root_list); 1362 INIT_LIST_HEAD(&root->root_list);
1378 root->number_of_cgroups = 1; 1363 root->number_of_cgroups = 1;
1379 cgrp->root = root; 1364 cgrp->root = root;
1380 RCU_INIT_POINTER(cgrp->name, &root_cgroup_name);
1381 init_cgroup_housekeeping(cgrp); 1365 init_cgroup_housekeeping(cgrp);
1382 idr_init(&root->cgroup_idr); 1366 idr_init(&root->cgroup_idr);
1383} 1367}
@@ -1598,57 +1582,6 @@ static struct file_system_type cgroup_fs_type = {
1598static struct kobject *cgroup_kobj; 1582static struct kobject *cgroup_kobj;
1599 1583
1600/** 1584/**
1601 * cgroup_path - generate the path of a cgroup
1602 * @cgrp: the cgroup in question
1603 * @buf: the buffer to write the path into
1604 * @buflen: the length of the buffer
1605 *
1606 * Writes path of cgroup into buf. Returns 0 on success, -errno on error.
1607 *
1608 * We can't generate cgroup path using dentry->d_name, as accessing
1609 * dentry->name must be protected by irq-unsafe dentry->d_lock or parent
1610 * inode's i_mutex, while on the other hand cgroup_path() can be called
1611 * with some irq-safe spinlocks held.
1612 */
1613int cgroup_path(const struct cgroup *cgrp, char *buf, int buflen)
1614{
1615 int ret = -ENAMETOOLONG;
1616 char *start;
1617
1618 if (!cgrp->parent) {
1619 if (strlcpy(buf, "/", buflen) >= buflen)
1620 return -ENAMETOOLONG;
1621 return 0;
1622 }
1623
1624 start = buf + buflen - 1;
1625 *start = '\0';
1626
1627 rcu_read_lock();
1628 do {
1629 const char *name = cgroup_name(cgrp);
1630 int len;
1631
1632 len = strlen(name);
1633 if ((start -= len) < buf)
1634 goto out;
1635 memcpy(start, name, len);
1636
1637 if (--start < buf)
1638 goto out;
1639 *start = '/';
1640
1641 cgrp = cgrp->parent;
1642 } while (cgrp->parent);
1643 ret = 0;
1644 memmove(buf, start, buf + buflen - start);
1645out:
1646 rcu_read_unlock();
1647 return ret;
1648}
1649EXPORT_SYMBOL_GPL(cgroup_path);
1650
1651/**
1652 * task_cgroup_path - cgroup path of a task in the first cgroup hierarchy 1585 * task_cgroup_path - cgroup path of a task in the first cgroup hierarchy
1653 * @task: target task 1586 * @task: target task
1654 * @buf: the buffer to write the path into 1587 * @buf: the buffer to write the path into
@@ -1659,16 +1592,14 @@ EXPORT_SYMBOL_GPL(cgroup_path);
1659 * function grabs cgroup_mutex and shouldn't be used inside locks used by 1592 * function grabs cgroup_mutex and shouldn't be used inside locks used by
1660 * cgroup controller callbacks. 1593 * cgroup controller callbacks.
1661 * 1594 *
1662 * Returns 0 on success, fails with -%ENAMETOOLONG if @buflen is too short. 1595 * Return value is the same as kernfs_path().
1663 */ 1596 */
1664int task_cgroup_path(struct task_struct *task, char *buf, size_t buflen) 1597char *task_cgroup_path(struct task_struct *task, char *buf, size_t buflen)
1665{ 1598{
1666 struct cgroupfs_root *root; 1599 struct cgroupfs_root *root;
1667 struct cgroup *cgrp; 1600 struct cgroup *cgrp;
1668 int hierarchy_id = 1, ret = 0; 1601 int hierarchy_id = 1;
1669 1602 char *path = NULL;
1670 if (buflen < 2)
1671 return -ENAMETOOLONG;
1672 1603
1673 mutex_lock(&cgroup_mutex); 1604 mutex_lock(&cgroup_mutex);
1674 1605
@@ -1676,14 +1607,15 @@ int task_cgroup_path(struct task_struct *task, char *buf, size_t buflen)
1676 1607
1677 if (root) { 1608 if (root) {
1678 cgrp = task_cgroup_from_root(task, root); 1609 cgrp = task_cgroup_from_root(task, root);
1679 ret = cgroup_path(cgrp, buf, buflen); 1610 path = cgroup_path(cgrp, buf, buflen);
1680 } else { 1611 } else {
1681 /* if no hierarchy exists, everyone is in "/" */ 1612 /* if no hierarchy exists, everyone is in "/" */
1682 memcpy(buf, "/", 2); 1613 if (strlcpy(buf, "/", buflen) < buflen)
1614 path = buf;
1683 } 1615 }
1684 1616
1685 mutex_unlock(&cgroup_mutex); 1617 mutex_unlock(&cgroup_mutex);
1686 return ret; 1618 return path;
1687} 1619}
1688EXPORT_SYMBOL_GPL(task_cgroup_path); 1620EXPORT_SYMBOL_GPL(task_cgroup_path);
1689 1621
@@ -2211,7 +2143,6 @@ static int cgroup_rename(struct kernfs_node *kn, struct kernfs_node *new_parent,
2211 const char *new_name_str) 2143 const char *new_name_str)
2212{ 2144{
2213 struct cgroup *cgrp = kn->priv; 2145 struct cgroup *cgrp = kn->priv;
2214 struct cgroup_name *name, *old_name;
2215 int ret; 2146 int ret;
2216 2147
2217 if (kernfs_type(kn) != KERNFS_DIR) 2148 if (kernfs_type(kn) != KERNFS_DIR)
@@ -2226,25 +2157,13 @@ static int cgroup_rename(struct kernfs_node *kn, struct kernfs_node *new_parent,
2226 if (cgroup_sane_behavior(cgrp)) 2157 if (cgroup_sane_behavior(cgrp))
2227 return -EPERM; 2158 return -EPERM;
2228 2159
2229 name = cgroup_alloc_name(new_name_str);
2230 if (!name)
2231 return -ENOMEM;
2232
2233 mutex_lock(&cgroup_tree_mutex); 2160 mutex_lock(&cgroup_tree_mutex);
2234 mutex_lock(&cgroup_mutex); 2161 mutex_lock(&cgroup_mutex);
2235 2162
2236 ret = kernfs_rename(kn, new_parent, new_name_str); 2163 ret = kernfs_rename(kn, new_parent, new_name_str);
2237 if (!ret) {
2238 old_name = rcu_dereference_protected(cgrp->name, true);
2239 rcu_assign_pointer(cgrp->name, name);
2240 } else {
2241 old_name = name;
2242 }
2243 2164
2244 mutex_unlock(&cgroup_mutex); 2165 mutex_unlock(&cgroup_mutex);
2245 mutex_unlock(&cgroup_tree_mutex); 2166 mutex_unlock(&cgroup_tree_mutex);
2246
2247 kfree_rcu(old_name, rcu_head);
2248 return ret; 2167 return ret;
2249} 2168}
2250 2169
@@ -3719,14 +3638,13 @@ err_free:
3719/** 3638/**
3720 * cgroup_create - create a cgroup 3639 * cgroup_create - create a cgroup
3721 * @parent: cgroup that will be parent of the new cgroup 3640 * @parent: cgroup that will be parent of the new cgroup
3722 * @name_str: name of the new cgroup 3641 * @name: name of the new cgroup
3723 * @mode: mode to set on new cgroup 3642 * @mode: mode to set on new cgroup
3724 */ 3643 */
3725static long cgroup_create(struct cgroup *parent, const char *name_str, 3644static long cgroup_create(struct cgroup *parent, const char *name,
3726 umode_t mode) 3645 umode_t mode)
3727{ 3646{
3728 struct cgroup *cgrp; 3647 struct cgroup *cgrp;
3729 struct cgroup_name *name;
3730 struct cgroupfs_root *root = parent->root; 3648 struct cgroupfs_root *root = parent->root;
3731 int ssid, err; 3649 int ssid, err;
3732 struct cgroup_subsys *ss; 3650 struct cgroup_subsys *ss;
@@ -3737,13 +3655,6 @@ static long cgroup_create(struct cgroup *parent, const char *name_str,
3737 if (!cgrp) 3655 if (!cgrp)
3738 return -ENOMEM; 3656 return -ENOMEM;
3739 3657
3740 name = cgroup_alloc_name(name_str);
3741 if (!name) {
3742 err = -ENOMEM;
3743 goto err_free_cgrp;
3744 }
3745 rcu_assign_pointer(cgrp->name, name);
3746
3747 mutex_lock(&cgroup_tree_mutex); 3658 mutex_lock(&cgroup_tree_mutex);
3748 3659
3749 /* 3660 /*
@@ -3781,7 +3692,7 @@ static long cgroup_create(struct cgroup *parent, const char *name_str,
3781 set_bit(CGRP_CPUSET_CLONE_CHILDREN, &cgrp->flags); 3692 set_bit(CGRP_CPUSET_CLONE_CHILDREN, &cgrp->flags);
3782 3693
3783 /* create the directory */ 3694 /* create the directory */
3784 kn = kernfs_create_dir(parent->kn, name->name, mode, cgrp); 3695 kn = kernfs_create_dir(parent->kn, name, mode, cgrp);
3785 if (IS_ERR(kn)) { 3696 if (IS_ERR(kn)) {
3786 err = PTR_ERR(kn); 3697 err = PTR_ERR(kn);
3787 goto err_free_id; 3698 goto err_free_id;
@@ -3839,8 +3750,6 @@ err_unlock:
3839 mutex_unlock(&cgroup_mutex); 3750 mutex_unlock(&cgroup_mutex);
3840err_unlock_tree: 3751err_unlock_tree:
3841 mutex_unlock(&cgroup_tree_mutex); 3752 mutex_unlock(&cgroup_tree_mutex);
3842 kfree(rcu_dereference_raw(cgrp->name));
3843err_free_cgrp:
3844 kfree(cgrp); 3753 kfree(cgrp);
3845 return err; 3754 return err;
3846 3755
@@ -4304,12 +4213,12 @@ int proc_cgroup_show(struct seq_file *m, void *v)
4304{ 4213{
4305 struct pid *pid; 4214 struct pid *pid;
4306 struct task_struct *tsk; 4215 struct task_struct *tsk;
4307 char *buf; 4216 char *buf, *path;
4308 int retval; 4217 int retval;
4309 struct cgroupfs_root *root; 4218 struct cgroupfs_root *root;
4310 4219
4311 retval = -ENOMEM; 4220 retval = -ENOMEM;
4312 buf = kmalloc(PAGE_SIZE, GFP_KERNEL); 4221 buf = kmalloc(PATH_MAX, GFP_KERNEL);
4313 if (!buf) 4222 if (!buf)
4314 goto out; 4223 goto out;
4315 4224
@@ -4337,10 +4246,12 @@ int proc_cgroup_show(struct seq_file *m, void *v)
4337 root->name); 4246 root->name);
4338 seq_putc(m, ':'); 4247 seq_putc(m, ':');
4339 cgrp = task_cgroup_from_root(tsk, root); 4248 cgrp = task_cgroup_from_root(tsk, root);
4340 retval = cgroup_path(cgrp, buf, PAGE_SIZE); 4249 path = cgroup_path(cgrp, buf, PATH_MAX);
4341 if (retval < 0) 4250 if (!path) {
4251 retval = -ENAMETOOLONG;
4342 goto out_unlock; 4252 goto out_unlock;
4343 seq_puts(m, buf); 4253 }
4254 seq_puts(m, path);
4344 seq_putc(m, '\n'); 4255 seq_putc(m, '\n');
4345 } 4256 }
4346 4257
@@ -4588,16 +4499,17 @@ static void cgroup_release_agent(struct work_struct *work)
4588 while (!list_empty(&release_list)) { 4499 while (!list_empty(&release_list)) {
4589 char *argv[3], *envp[3]; 4500 char *argv[3], *envp[3];
4590 int i; 4501 int i;
4591 char *pathbuf = NULL, *agentbuf = NULL; 4502 char *pathbuf = NULL, *agentbuf = NULL, *path;
4592 struct cgroup *cgrp = list_entry(release_list.next, 4503 struct cgroup *cgrp = list_entry(release_list.next,
4593 struct cgroup, 4504 struct cgroup,
4594 release_list); 4505 release_list);
4595 list_del_init(&cgrp->release_list); 4506 list_del_init(&cgrp->release_list);
4596 raw_spin_unlock(&release_list_lock); 4507 raw_spin_unlock(&release_list_lock);
4597 pathbuf = kmalloc(PAGE_SIZE, GFP_KERNEL); 4508 pathbuf = kmalloc(PATH_MAX, GFP_KERNEL);
4598 if (!pathbuf) 4509 if (!pathbuf)
4599 goto continue_free; 4510 goto continue_free;
4600 if (cgroup_path(cgrp, pathbuf, PAGE_SIZE) < 0) 4511 path = cgroup_path(cgrp, pathbuf, PATH_MAX);
4512 if (!path)
4601 goto continue_free; 4513 goto continue_free;
4602 agentbuf = kstrdup(cgrp->root->release_agent_path, GFP_KERNEL); 4514 agentbuf = kstrdup(cgrp->root->release_agent_path, GFP_KERNEL);
4603 if (!agentbuf) 4515 if (!agentbuf)
@@ -4605,7 +4517,7 @@ static void cgroup_release_agent(struct work_struct *work)
4605 4517
4606 i = 0; 4518 i = 0;
4607 argv[i++] = agentbuf; 4519 argv[i++] = agentbuf;
4608 argv[i++] = pathbuf; 4520 argv[i++] = path;
4609 argv[i] = NULL; 4521 argv[i] = NULL;
4610 4522
4611 i = 0; 4523 i = 0;
@@ -4755,6 +4667,11 @@ static int current_css_set_cg_links_read(struct seq_file *seq, void *v)
4755{ 4667{
4756 struct cgrp_cset_link *link; 4668 struct cgrp_cset_link *link;
4757 struct css_set *cset; 4669 struct css_set *cset;
4670 char *name_buf;
4671
4672 name_buf = kmalloc(NAME_MAX + 1, GFP_KERNEL);
4673 if (!name_buf)
4674 return -ENOMEM;
4758 4675
4759 read_lock(&css_set_lock); 4676 read_lock(&css_set_lock);
4760 rcu_read_lock(); 4677 rcu_read_lock();
@@ -4763,14 +4680,17 @@ static int current_css_set_cg_links_read(struct seq_file *seq, void *v)
4763 struct cgroup *c = link->cgrp; 4680 struct cgroup *c = link->cgrp;
4764 const char *name = "?"; 4681 const char *name = "?";
4765 4682
4766 if (c != cgroup_dummy_top) 4683 if (c != cgroup_dummy_top) {
4767 name = cgroup_name(c); 4684 cgroup_name(c, name_buf, NAME_MAX + 1);
4685 name = name_buf;
4686 }
4768 4687
4769 seq_printf(seq, "Root %d group %s\n", 4688 seq_printf(seq, "Root %d group %s\n",
4770 c->root->hierarchy_id, name); 4689 c->root->hierarchy_id, name);
4771 } 4690 }
4772 rcu_read_unlock(); 4691 rcu_read_unlock();
4773 read_unlock(&css_set_lock); 4692 read_unlock(&css_set_lock);
4693 kfree(name_buf);
4774 return 0; 4694 return 0;
4775} 4695}
4776 4696
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index 2d018c795fea..e97a6e88d036 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -2088,10 +2088,9 @@ static void remove_tasks_in_empty_cpuset(struct cpuset *cs)
2088 parent = parent_cs(parent); 2088 parent = parent_cs(parent);
2089 2089
2090 if (cgroup_transfer_tasks(parent->css.cgroup, cs->css.cgroup)) { 2090 if (cgroup_transfer_tasks(parent->css.cgroup, cs->css.cgroup)) {
2091 rcu_read_lock(); 2091 printk(KERN_ERR "cpuset: failed to transfer tasks out of empty cpuset ");
2092 printk(KERN_ERR "cpuset: failed to transfer tasks out of empty cpuset %s\n", 2092 pr_cont_cgroup_name(cs->css.cgroup);
2093 cgroup_name(cs->css.cgroup)); 2093 pr_cont("\n");
2094 rcu_read_unlock();
2095 } 2094 }
2096} 2095}
2097 2096
@@ -2619,19 +2618,17 @@ void cpuset_print_task_mems_allowed(struct task_struct *tsk)
2619 /* Statically allocated to prevent using excess stack. */ 2618 /* Statically allocated to prevent using excess stack. */
2620 static char cpuset_nodelist[CPUSET_NODELIST_LEN]; 2619 static char cpuset_nodelist[CPUSET_NODELIST_LEN];
2621 static DEFINE_SPINLOCK(cpuset_buffer_lock); 2620 static DEFINE_SPINLOCK(cpuset_buffer_lock);
2622
2623 struct cgroup *cgrp = task_cs(tsk)->css.cgroup; 2621 struct cgroup *cgrp = task_cs(tsk)->css.cgroup;
2624 2622
2625 rcu_read_lock();
2626 spin_lock(&cpuset_buffer_lock); 2623 spin_lock(&cpuset_buffer_lock);
2627 2624
2628 nodelist_scnprintf(cpuset_nodelist, CPUSET_NODELIST_LEN, 2625 nodelist_scnprintf(cpuset_nodelist, CPUSET_NODELIST_LEN,
2629 tsk->mems_allowed); 2626 tsk->mems_allowed);
2630 printk(KERN_INFO "%s cpuset=%s mems_allowed=%s\n", 2627 printk(KERN_INFO "%s cpuset=", tsk->comm);
2631 tsk->comm, cgroup_name(cgrp), cpuset_nodelist); 2628 pr_cont_cgroup_name(cgrp);
2629 pr_cont(" mems_allowed=%s\n", cpuset_nodelist);
2632 2630
2633 spin_unlock(&cpuset_buffer_lock); 2631 spin_unlock(&cpuset_buffer_lock);
2634 rcu_read_unlock();
2635} 2632}
2636 2633
2637/* 2634/*
@@ -2681,12 +2678,12 @@ int proc_cpuset_show(struct seq_file *m, void *unused_v)
2681{ 2678{
2682 struct pid *pid; 2679 struct pid *pid;
2683 struct task_struct *tsk; 2680 struct task_struct *tsk;
2684 char *buf; 2681 char *buf, *p;
2685 struct cgroup_subsys_state *css; 2682 struct cgroup_subsys_state *css;
2686 int retval; 2683 int retval;
2687 2684
2688 retval = -ENOMEM; 2685 retval = -ENOMEM;
2689 buf = kmalloc(PAGE_SIZE, GFP_KERNEL); 2686 buf = kmalloc(PATH_MAX, GFP_KERNEL);
2690 if (!buf) 2687 if (!buf)
2691 goto out; 2688 goto out;
2692 2689
@@ -2696,14 +2693,16 @@ int proc_cpuset_show(struct seq_file *m, void *unused_v)
2696 if (!tsk) 2693 if (!tsk)
2697 goto out_free; 2694 goto out_free;
2698 2695
2696 retval = -ENAMETOOLONG;
2699 rcu_read_lock(); 2697 rcu_read_lock();
2700 css = task_css(tsk, cpuset_cgrp_id); 2698 css = task_css(tsk, cpuset_cgrp_id);
2701 retval = cgroup_path(css->cgroup, buf, PAGE_SIZE); 2699 p = cgroup_path(css->cgroup, buf, PATH_MAX);
2702 rcu_read_unlock(); 2700 rcu_read_unlock();
2703 if (retval < 0) 2701 if (!p)
2704 goto out_put_task; 2702 goto out_put_task;
2705 seq_puts(m, buf); 2703 seq_puts(m, p);
2706 seq_putc(m, '\n'); 2704 seq_putc(m, '\n');
2705 retval = 0;
2707out_put_task: 2706out_put_task:
2708 put_task_struct(tsk); 2707 put_task_struct(tsk);
2709out_free: 2708out_free:
diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c
index dd52e7ffb10e..30eee3b5293d 100644
--- a/kernel/sched/debug.c
+++ b/kernel/sched/debug.c
@@ -111,8 +111,7 @@ static char *task_group_path(struct task_group *tg)
111 if (autogroup_path(tg, group_path, PATH_MAX)) 111 if (autogroup_path(tg, group_path, PATH_MAX))
112 return group_path; 112 return group_path;
113 113
114 cgroup_path(tg->css.cgroup, group_path, PATH_MAX); 114 return cgroup_path(tg->css.cgroup, group_path, PATH_MAX);
115 return group_path;
116} 115}
117#endif 116#endif
118 117
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 102ab48ffa13..c1c25494f7ae 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -1683,15 +1683,8 @@ static void move_unlock_mem_cgroup(struct mem_cgroup *memcg,
1683 */ 1683 */
1684void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p) 1684void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p)
1685{ 1685{
1686 /* 1686 /* oom_info_lock ensures that parallel ooms do not interleave */
1687 * protects memcg_name and makes sure that parallel ooms do not
1688 * interleave
1689 */
1690 static DEFINE_SPINLOCK(oom_info_lock); 1687 static DEFINE_SPINLOCK(oom_info_lock);
1691 struct cgroup *task_cgrp;
1692 struct cgroup *mem_cgrp;
1693 static char memcg_name[PATH_MAX];
1694 int ret;
1695 struct mem_cgroup *iter; 1688 struct mem_cgroup *iter;
1696 unsigned int i; 1689 unsigned int i;
1697 1690
@@ -1701,36 +1694,14 @@ void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p)
1701 spin_lock(&oom_info_lock); 1694 spin_lock(&oom_info_lock);
1702 rcu_read_lock(); 1695 rcu_read_lock();
1703 1696
1704 mem_cgrp = memcg->css.cgroup; 1697 pr_info("Task in ");
1705 task_cgrp = task_cgroup(p, memory_cgrp_id); 1698 pr_cont_cgroup_path(task_cgroup(p, memory_cgrp_id));
1699 pr_info(" killed as a result of limit of ");
1700 pr_cont_cgroup_path(memcg->css.cgroup);
1701 pr_info("\n");
1706 1702
1707 ret = cgroup_path(task_cgrp, memcg_name, PATH_MAX);
1708 if (ret < 0) {
1709 /*
1710 * Unfortunately, we are unable to convert to a useful name
1711 * But we'll still print out the usage information
1712 */
1713 rcu_read_unlock();
1714 goto done;
1715 }
1716 rcu_read_unlock(); 1703 rcu_read_unlock();
1717 1704
1718 pr_info("Task in %s killed", memcg_name);
1719
1720 rcu_read_lock();
1721 ret = cgroup_path(mem_cgrp, memcg_name, PATH_MAX);
1722 if (ret < 0) {
1723 rcu_read_unlock();
1724 goto done;
1725 }
1726 rcu_read_unlock();
1727
1728 /*
1729 * Continues from above, so we don't need an KERN_ level
1730 */
1731 pr_cont(" as a result of limit of %s\n", memcg_name);
1732done:
1733
1734 pr_info("memory: usage %llukB, limit %llukB, failcnt %llu\n", 1705 pr_info("memory: usage %llukB, limit %llukB, failcnt %llu\n",
1735 res_counter_read_u64(&memcg->res, RES_USAGE) >> 10, 1706 res_counter_read_u64(&memcg->res, RES_USAGE) >> 10,
1736 res_counter_read_u64(&memcg->res, RES_LIMIT) >> 10, 1707 res_counter_read_u64(&memcg->res, RES_LIMIT) >> 10,
@@ -1745,13 +1716,8 @@ done:
1745 res_counter_read_u64(&memcg->kmem, RES_FAILCNT)); 1716 res_counter_read_u64(&memcg->kmem, RES_FAILCNT));
1746 1717
1747 for_each_mem_cgroup_tree(iter, memcg) { 1718 for_each_mem_cgroup_tree(iter, memcg) {
1748 pr_info("Memory cgroup stats"); 1719 pr_info("Memory cgroup stats for ");
1749 1720 pr_cont_cgroup_path(iter->css.cgroup);
1750 rcu_read_lock();
1751 ret = cgroup_path(iter->css.cgroup, memcg_name, PATH_MAX);
1752 if (!ret)
1753 pr_cont(" for %s", memcg_name);
1754 rcu_read_unlock();
1755 pr_cont(":"); 1721 pr_cont(":");
1756 1722
1757 for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) { 1723 for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) {
@@ -3401,7 +3367,7 @@ static struct kmem_cache *memcg_create_kmem_cache(struct mem_cgroup *memcg,
3401 struct kmem_cache *s) 3367 struct kmem_cache *s)
3402{ 3368{
3403 struct kmem_cache *new = NULL; 3369 struct kmem_cache *new = NULL;
3404 static char *tmp_name = NULL; 3370 static char *tmp_path = NULL, *tmp_name = NULL;
3405 static DEFINE_MUTEX(mutex); /* protects tmp_name */ 3371 static DEFINE_MUTEX(mutex); /* protects tmp_name */
3406 3372
3407 BUG_ON(!memcg_can_account_kmem(memcg)); 3373 BUG_ON(!memcg_can_account_kmem(memcg));
@@ -3413,18 +3379,20 @@ static struct kmem_cache *memcg_create_kmem_cache(struct mem_cgroup *memcg,
3413 * This static temporary buffer is used to prevent from 3379 * This static temporary buffer is used to prevent from
3414 * pointless shortliving allocation. 3380 * pointless shortliving allocation.
3415 */ 3381 */
3416 if (!tmp_name) { 3382 if (!tmp_path || !tmp_name) {
3417 tmp_name = kmalloc(PATH_MAX, GFP_KERNEL); 3383 if (!tmp_path)
3384 tmp_path = kmalloc(PATH_MAX, GFP_KERNEL);
3418 if (!tmp_name) 3385 if (!tmp_name)
3386 tmp_name = kmalloc(NAME_MAX + 1, GFP_KERNEL);
3387 if (!tmp_path || !tmp_name)
3419 goto out; 3388 goto out;
3420 } 3389 }
3421 3390
3422 rcu_read_lock(); 3391 cgroup_name(memcg->css.cgroup, tmp_name, NAME_MAX + 1);
3423 snprintf(tmp_name, PATH_MAX, "%s(%d:%s)", s->name, 3392 snprintf(tmp_path, PATH_MAX, "%s(%d:%s)", s->name,
3424 memcg_cache_id(memcg), cgroup_name(memcg->css.cgroup)); 3393 memcg_cache_id(memcg), tmp_name);
3425 rcu_read_unlock();
3426 3394
3427 new = kmem_cache_create_memcg(memcg, tmp_name, s->object_size, s->align, 3395 new = kmem_cache_create_memcg(memcg, tmp_path, s->object_size, s->align,
3428 (s->flags & ~SLAB_PANIC), s->ctor, s); 3396 (s->flags & ~SLAB_PANIC), s->ctor, s);
3429 if (new) 3397 if (new)
3430 new->allocflags |= __GFP_KMEMCG; 3398 new->allocflags |= __GFP_KMEMCG;