aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/cgroup.c
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2008-07-28 18:07:55 -0400
committerIngo Molnar <mingo@elte.hu>2008-07-28 18:07:55 -0400
commitcb28a1bbdb4790378e7366d6c9ee1d2340b84f92 (patch)
tree316436f77dac75335fd2c3ef5f109e71606c50d3 /kernel/cgroup.c
parentb6d4f7e3ef25beb8c658c97867d98883e69dc544 (diff)
parentf934fb19ef34730263e6afc01e8ec27a8a71470f (diff)
Merge branch 'linus' into core/generic-dma-coherent
Conflicts: arch/x86/Kconfig Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/cgroup.c')
-rw-r--r--kernel/cgroup.c312
1 files changed, 145 insertions, 167 deletions
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index 15ac0e1e4f4d..657f8f8d93a5 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -45,6 +45,7 @@
45#include <linux/delayacct.h> 45#include <linux/delayacct.h>
46#include <linux/cgroupstats.h> 46#include <linux/cgroupstats.h>
47#include <linux/hash.h> 47#include <linux/hash.h>
48#include <linux/namei.h>
48 49
49#include <asm/atomic.h> 50#include <asm/atomic.h>
50 51
@@ -89,11 +90,7 @@ struct cgroupfs_root {
89 /* Hierarchy-specific flags */ 90 /* Hierarchy-specific flags */
90 unsigned long flags; 91 unsigned long flags;
91 92
92 /* The path to use for release notifications. No locking 93 /* The path to use for release notifications. */
93 * between setting and use - so if userspace updates this
94 * while child cgroups exist, you could miss a
95 * notification. We ensure that it's always a valid
96 * NUL-terminated string */
97 char release_agent_path[PATH_MAX]; 94 char release_agent_path[PATH_MAX];
98}; 95};
99 96
@@ -118,7 +115,7 @@ static int root_count;
118 * extra work in the fork/exit path if none of the subsystems need to 115 * extra work in the fork/exit path if none of the subsystems need to
119 * be called. 116 * be called.
120 */ 117 */
121static int need_forkexit_callback; 118static int need_forkexit_callback __read_mostly;
122static int need_mm_owner_callback __read_mostly; 119static int need_mm_owner_callback __read_mostly;
123 120
124/* convenient tests for these bits */ 121/* convenient tests for these bits */
@@ -220,7 +217,7 @@ static struct hlist_head *css_set_hash(struct cgroup_subsys_state *css[])
220 * task until after the first call to cgroup_iter_start(). This 217 * task until after the first call to cgroup_iter_start(). This
221 * reduces the fork()/exit() overhead for people who have cgroups 218 * reduces the fork()/exit() overhead for people who have cgroups
222 * compiled into their kernel but not actually in use */ 219 * compiled into their kernel but not actually in use */
223static int use_task_css_set_links; 220static int use_task_css_set_links __read_mostly;
224 221
225/* When we create or destroy a css_set, the operation simply 222/* When we create or destroy a css_set, the operation simply
226 * takes/releases a reference count on all the cgroups referenced 223 * takes/releases a reference count on all the cgroups referenced
@@ -241,17 +238,20 @@ static int use_task_css_set_links;
241 */ 238 */
242static void unlink_css_set(struct css_set *cg) 239static void unlink_css_set(struct css_set *cg)
243{ 240{
241 struct cg_cgroup_link *link;
242 struct cg_cgroup_link *saved_link;
243
244 write_lock(&css_set_lock); 244 write_lock(&css_set_lock);
245 hlist_del(&cg->hlist); 245 hlist_del(&cg->hlist);
246 css_set_count--; 246 css_set_count--;
247 while (!list_empty(&cg->cg_links)) { 247
248 struct cg_cgroup_link *link; 248 list_for_each_entry_safe(link, saved_link, &cg->cg_links,
249 link = list_entry(cg->cg_links.next, 249 cg_link_list) {
250 struct cg_cgroup_link, cg_link_list);
251 list_del(&link->cg_link_list); 250 list_del(&link->cg_link_list);
252 list_del(&link->cgrp_link_list); 251 list_del(&link->cgrp_link_list);
253 kfree(link); 252 kfree(link);
254 } 253 }
254
255 write_unlock(&css_set_lock); 255 write_unlock(&css_set_lock);
256} 256}
257 257
@@ -363,15 +363,14 @@ static struct css_set *find_existing_css_set(
363static int allocate_cg_links(int count, struct list_head *tmp) 363static int allocate_cg_links(int count, struct list_head *tmp)
364{ 364{
365 struct cg_cgroup_link *link; 365 struct cg_cgroup_link *link;
366 struct cg_cgroup_link *saved_link;
366 int i; 367 int i;
367 INIT_LIST_HEAD(tmp); 368 INIT_LIST_HEAD(tmp);
368 for (i = 0; i < count; i++) { 369 for (i = 0; i < count; i++) {
369 link = kmalloc(sizeof(*link), GFP_KERNEL); 370 link = kmalloc(sizeof(*link), GFP_KERNEL);
370 if (!link) { 371 if (!link) {
371 while (!list_empty(tmp)) { 372 list_for_each_entry_safe(link, saved_link, tmp,
372 link = list_entry(tmp->next, 373 cgrp_link_list) {
373 struct cg_cgroup_link,
374 cgrp_link_list);
375 list_del(&link->cgrp_link_list); 374 list_del(&link->cgrp_link_list);
376 kfree(link); 375 kfree(link);
377 } 376 }
@@ -384,11 +383,10 @@ static int allocate_cg_links(int count, struct list_head *tmp)
384 383
385static void free_cg_links(struct list_head *tmp) 384static void free_cg_links(struct list_head *tmp)
386{ 385{
387 while (!list_empty(tmp)) { 386 struct cg_cgroup_link *link;
388 struct cg_cgroup_link *link; 387 struct cg_cgroup_link *saved_link;
389 link = list_entry(tmp->next, 388
390 struct cg_cgroup_link, 389 list_for_each_entry_safe(link, saved_link, tmp, cgrp_link_list) {
391 cgrp_link_list);
392 list_del(&link->cgrp_link_list); 390 list_del(&link->cgrp_link_list);
393 kfree(link); 391 kfree(link);
394 } 392 }
@@ -415,11 +413,11 @@ static struct css_set *find_css_set(
415 413
416 /* First see if we already have a cgroup group that matches 414 /* First see if we already have a cgroup group that matches
417 * the desired set */ 415 * the desired set */
418 write_lock(&css_set_lock); 416 read_lock(&css_set_lock);
419 res = find_existing_css_set(oldcg, cgrp, template); 417 res = find_existing_css_set(oldcg, cgrp, template);
420 if (res) 418 if (res)
421 get_css_set(res); 419 get_css_set(res);
422 write_unlock(&css_set_lock); 420 read_unlock(&css_set_lock);
423 421
424 if (res) 422 if (res)
425 return res; 423 return res;
@@ -507,10 +505,6 @@ static struct css_set *find_css_set(
507 * knows that the cgroup won't be removed, as cgroup_rmdir() 505 * knows that the cgroup won't be removed, as cgroup_rmdir()
508 * needs that mutex. 506 * needs that mutex.
509 * 507 *
510 * The cgroup_common_file_write handler for operations that modify
511 * the cgroup hierarchy holds cgroup_mutex across the entire operation,
512 * single threading all such cgroup modifications across the system.
513 *
514 * The fork and exit callbacks cgroup_fork() and cgroup_exit(), don't 508 * The fork and exit callbacks cgroup_fork() and cgroup_exit(), don't
515 * (usually) take cgroup_mutex. These are the two most performance 509 * (usually) take cgroup_mutex. These are the two most performance
516 * critical pieces of code here. The exception occurs on cgroup_exit(), 510 * critical pieces of code here. The exception occurs on cgroup_exit(),
@@ -1093,6 +1087,8 @@ static void cgroup_kill_sb(struct super_block *sb) {
1093 struct cgroupfs_root *root = sb->s_fs_info; 1087 struct cgroupfs_root *root = sb->s_fs_info;
1094 struct cgroup *cgrp = &root->top_cgroup; 1088 struct cgroup *cgrp = &root->top_cgroup;
1095 int ret; 1089 int ret;
1090 struct cg_cgroup_link *link;
1091 struct cg_cgroup_link *saved_link;
1096 1092
1097 BUG_ON(!root); 1093 BUG_ON(!root);
1098 1094
@@ -1112,10 +1108,9 @@ static void cgroup_kill_sb(struct super_block *sb) {
1112 * root cgroup 1108 * root cgroup
1113 */ 1109 */
1114 write_lock(&css_set_lock); 1110 write_lock(&css_set_lock);
1115 while (!list_empty(&cgrp->css_sets)) { 1111
1116 struct cg_cgroup_link *link; 1112 list_for_each_entry_safe(link, saved_link, &cgrp->css_sets,
1117 link = list_entry(cgrp->css_sets.next, 1113 cgrp_link_list) {
1118 struct cg_cgroup_link, cgrp_link_list);
1119 list_del(&link->cg_link_list); 1114 list_del(&link->cg_link_list);
1120 list_del(&link->cgrp_link_list); 1115 list_del(&link->cgrp_link_list);
1121 kfree(link); 1116 kfree(link);
@@ -1281,18 +1276,14 @@ int cgroup_attach_task(struct cgroup *cgrp, struct task_struct *tsk)
1281} 1276}
1282 1277
1283/* 1278/*
1284 * Attach task with pid 'pid' to cgroup 'cgrp'. Call with 1279 * Attach task with pid 'pid' to cgroup 'cgrp'. Call with cgroup_mutex
1285 * cgroup_mutex, may take task_lock of task 1280 * held. May take task_lock of task
1286 */ 1281 */
1287static int attach_task_by_pid(struct cgroup *cgrp, char *pidbuf) 1282static int attach_task_by_pid(struct cgroup *cgrp, u64 pid)
1288{ 1283{
1289 pid_t pid;
1290 struct task_struct *tsk; 1284 struct task_struct *tsk;
1291 int ret; 1285 int ret;
1292 1286
1293 if (sscanf(pidbuf, "%d", &pid) != 1)
1294 return -EIO;
1295
1296 if (pid) { 1287 if (pid) {
1297 rcu_read_lock(); 1288 rcu_read_lock();
1298 tsk = find_task_by_vpid(pid); 1289 tsk = find_task_by_vpid(pid);
@@ -1318,6 +1309,16 @@ static int attach_task_by_pid(struct cgroup *cgrp, char *pidbuf)
1318 return ret; 1309 return ret;
1319} 1310}
1320 1311
1312static int cgroup_tasks_write(struct cgroup *cgrp, struct cftype *cft, u64 pid)
1313{
1314 int ret;
1315 if (!cgroup_lock_live_group(cgrp))
1316 return -ENODEV;
1317 ret = attach_task_by_pid(cgrp, pid);
1318 cgroup_unlock();
1319 return ret;
1320}
1321
1321/* The various types of files and directories in a cgroup file system */ 1322/* The various types of files and directories in a cgroup file system */
1322enum cgroup_filetype { 1323enum cgroup_filetype {
1323 FILE_ROOT, 1324 FILE_ROOT,
@@ -1327,12 +1328,54 @@ enum cgroup_filetype {
1327 FILE_RELEASE_AGENT, 1328 FILE_RELEASE_AGENT,
1328}; 1329};
1329 1330
1331/**
1332 * cgroup_lock_live_group - take cgroup_mutex and check that cgrp is alive.
1333 * @cgrp: the cgroup to be checked for liveness
1334 *
1335 * On success, returns true; the lock should be later released with
1336 * cgroup_unlock(). On failure returns false with no lock held.
1337 */
1338bool cgroup_lock_live_group(struct cgroup *cgrp)
1339{
1340 mutex_lock(&cgroup_mutex);
1341 if (cgroup_is_removed(cgrp)) {
1342 mutex_unlock(&cgroup_mutex);
1343 return false;
1344 }
1345 return true;
1346}
1347
1348static int cgroup_release_agent_write(struct cgroup *cgrp, struct cftype *cft,
1349 const char *buffer)
1350{
1351 BUILD_BUG_ON(sizeof(cgrp->root->release_agent_path) < PATH_MAX);
1352 if (!cgroup_lock_live_group(cgrp))
1353 return -ENODEV;
1354 strcpy(cgrp->root->release_agent_path, buffer);
1355 cgroup_unlock();
1356 return 0;
1357}
1358
1359static int cgroup_release_agent_show(struct cgroup *cgrp, struct cftype *cft,
1360 struct seq_file *seq)
1361{
1362 if (!cgroup_lock_live_group(cgrp))
1363 return -ENODEV;
1364 seq_puts(seq, cgrp->root->release_agent_path);
1365 seq_putc(seq, '\n');
1366 cgroup_unlock();
1367 return 0;
1368}
1369
1370/* A buffer size big enough for numbers or short strings */
1371#define CGROUP_LOCAL_BUFFER_SIZE 64
1372
1330static ssize_t cgroup_write_X64(struct cgroup *cgrp, struct cftype *cft, 1373static ssize_t cgroup_write_X64(struct cgroup *cgrp, struct cftype *cft,
1331 struct file *file, 1374 struct file *file,
1332 const char __user *userbuf, 1375 const char __user *userbuf,
1333 size_t nbytes, loff_t *unused_ppos) 1376 size_t nbytes, loff_t *unused_ppos)
1334{ 1377{
1335 char buffer[64]; 1378 char buffer[CGROUP_LOCAL_BUFFER_SIZE];
1336 int retval = 0; 1379 int retval = 0;
1337 char *end; 1380 char *end;
1338 1381
@@ -1361,68 +1404,36 @@ static ssize_t cgroup_write_X64(struct cgroup *cgrp, struct cftype *cft,
1361 return retval; 1404 return retval;
1362} 1405}
1363 1406
1364static ssize_t cgroup_common_file_write(struct cgroup *cgrp, 1407static ssize_t cgroup_write_string(struct cgroup *cgrp, struct cftype *cft,
1365 struct cftype *cft, 1408 struct file *file,
1366 struct file *file, 1409 const char __user *userbuf,
1367 const char __user *userbuf, 1410 size_t nbytes, loff_t *unused_ppos)
1368 size_t nbytes, loff_t *unused_ppos)
1369{ 1411{
1370 enum cgroup_filetype type = cft->private; 1412 char local_buffer[CGROUP_LOCAL_BUFFER_SIZE];
1371 char *buffer;
1372 int retval = 0; 1413 int retval = 0;
1414 size_t max_bytes = cft->max_write_len;
1415 char *buffer = local_buffer;
1373 1416
1374 if (nbytes >= PATH_MAX) 1417 if (!max_bytes)
1418 max_bytes = sizeof(local_buffer) - 1;
1419 if (nbytes >= max_bytes)
1375 return -E2BIG; 1420 return -E2BIG;
1376 1421 /* Allocate a dynamic buffer if we need one */
1377 /* +1 for nul-terminator */ 1422 if (nbytes >= sizeof(local_buffer)) {
1378 buffer = kmalloc(nbytes + 1, GFP_KERNEL); 1423 buffer = kmalloc(nbytes + 1, GFP_KERNEL);
1379 if (buffer == NULL) 1424 if (buffer == NULL)
1380 return -ENOMEM; 1425 return -ENOMEM;
1381
1382 if (copy_from_user(buffer, userbuf, nbytes)) {
1383 retval = -EFAULT;
1384 goto out1;
1385 } 1426 }
1386 buffer[nbytes] = 0; /* nul-terminate */ 1427 if (nbytes && copy_from_user(buffer, userbuf, nbytes))
1387 strstrip(buffer); /* strip -just- trailing whitespace */ 1428 return -EFAULT;
1388
1389 mutex_lock(&cgroup_mutex);
1390 1429
1391 /* 1430 buffer[nbytes] = 0; /* nul-terminate */
1392 * This was already checked for in cgroup_file_write(), but 1431 strstrip(buffer);
1393 * check again now we're holding cgroup_mutex. 1432 retval = cft->write_string(cgrp, cft, buffer);
1394 */ 1433 if (!retval)
1395 if (cgroup_is_removed(cgrp)) {
1396 retval = -ENODEV;
1397 goto out2;
1398 }
1399
1400 switch (type) {
1401 case FILE_TASKLIST:
1402 retval = attach_task_by_pid(cgrp, buffer);
1403 break;
1404 case FILE_NOTIFY_ON_RELEASE:
1405 clear_bit(CGRP_RELEASABLE, &cgrp->flags);
1406 if (simple_strtoul(buffer, NULL, 10) != 0)
1407 set_bit(CGRP_NOTIFY_ON_RELEASE, &cgrp->flags);
1408 else
1409 clear_bit(CGRP_NOTIFY_ON_RELEASE, &cgrp->flags);
1410 break;
1411 case FILE_RELEASE_AGENT:
1412 BUILD_BUG_ON(sizeof(cgrp->root->release_agent_path) < PATH_MAX);
1413 strcpy(cgrp->root->release_agent_path, buffer);
1414 break;
1415 default:
1416 retval = -EINVAL;
1417 goto out2;
1418 }
1419
1420 if (retval == 0)
1421 retval = nbytes; 1434 retval = nbytes;
1422out2: 1435 if (buffer != local_buffer)
1423 mutex_unlock(&cgroup_mutex); 1436 kfree(buffer);
1424out1:
1425 kfree(buffer);
1426 return retval; 1437 return retval;
1427} 1438}
1428 1439
@@ -1438,6 +1449,8 @@ static ssize_t cgroup_file_write(struct file *file, const char __user *buf,
1438 return cft->write(cgrp, cft, file, buf, nbytes, ppos); 1449 return cft->write(cgrp, cft, file, buf, nbytes, ppos);
1439 if (cft->write_u64 || cft->write_s64) 1450 if (cft->write_u64 || cft->write_s64)
1440 return cgroup_write_X64(cgrp, cft, file, buf, nbytes, ppos); 1451 return cgroup_write_X64(cgrp, cft, file, buf, nbytes, ppos);
1452 if (cft->write_string)
1453 return cgroup_write_string(cgrp, cft, file, buf, nbytes, ppos);
1441 if (cft->trigger) { 1454 if (cft->trigger) {
1442 int ret = cft->trigger(cgrp, (unsigned int)cft->private); 1455 int ret = cft->trigger(cgrp, (unsigned int)cft->private);
1443 return ret ? ret : nbytes; 1456 return ret ? ret : nbytes;
@@ -1450,7 +1463,7 @@ static ssize_t cgroup_read_u64(struct cgroup *cgrp, struct cftype *cft,
1450 char __user *buf, size_t nbytes, 1463 char __user *buf, size_t nbytes,
1451 loff_t *ppos) 1464 loff_t *ppos)
1452{ 1465{
1453 char tmp[64]; 1466 char tmp[CGROUP_LOCAL_BUFFER_SIZE];
1454 u64 val = cft->read_u64(cgrp, cft); 1467 u64 val = cft->read_u64(cgrp, cft);
1455 int len = sprintf(tmp, "%llu\n", (unsigned long long) val); 1468 int len = sprintf(tmp, "%llu\n", (unsigned long long) val);
1456 1469
@@ -1462,56 +1475,13 @@ static ssize_t cgroup_read_s64(struct cgroup *cgrp, struct cftype *cft,
1462 char __user *buf, size_t nbytes, 1475 char __user *buf, size_t nbytes,
1463 loff_t *ppos) 1476 loff_t *ppos)
1464{ 1477{
1465 char tmp[64]; 1478 char tmp[CGROUP_LOCAL_BUFFER_SIZE];
1466 s64 val = cft->read_s64(cgrp, cft); 1479 s64 val = cft->read_s64(cgrp, cft);
1467 int len = sprintf(tmp, "%lld\n", (long long) val); 1480 int len = sprintf(tmp, "%lld\n", (long long) val);
1468 1481
1469 return simple_read_from_buffer(buf, nbytes, ppos, tmp, len); 1482 return simple_read_from_buffer(buf, nbytes, ppos, tmp, len);
1470} 1483}
1471 1484
1472static ssize_t cgroup_common_file_read(struct cgroup *cgrp,
1473 struct cftype *cft,
1474 struct file *file,
1475 char __user *buf,
1476 size_t nbytes, loff_t *ppos)
1477{
1478 enum cgroup_filetype type = cft->private;
1479 char *page;
1480 ssize_t retval = 0;
1481 char *s;
1482
1483 if (!(page = (char *)__get_free_page(GFP_KERNEL)))
1484 return -ENOMEM;
1485
1486 s = page;
1487
1488 switch (type) {
1489 case FILE_RELEASE_AGENT:
1490 {
1491 struct cgroupfs_root *root;
1492 size_t n;
1493 mutex_lock(&cgroup_mutex);
1494 root = cgrp->root;
1495 n = strnlen(root->release_agent_path,
1496 sizeof(root->release_agent_path));
1497 n = min(n, (size_t) PAGE_SIZE);
1498 strncpy(s, root->release_agent_path, n);
1499 mutex_unlock(&cgroup_mutex);
1500 s += n;
1501 break;
1502 }
1503 default:
1504 retval = -EINVAL;
1505 goto out;
1506 }
1507 *s++ = '\n';
1508
1509 retval = simple_read_from_buffer(buf, nbytes, ppos, page, s - page);
1510out:
1511 free_page((unsigned long)page);
1512 return retval;
1513}
1514
1515static ssize_t cgroup_file_read(struct file *file, char __user *buf, 1485static ssize_t cgroup_file_read(struct file *file, char __user *buf,
1516 size_t nbytes, loff_t *ppos) 1486 size_t nbytes, loff_t *ppos)
1517{ 1487{
@@ -1560,7 +1530,7 @@ static int cgroup_seqfile_show(struct seq_file *m, void *arg)
1560 return cft->read_seq_string(state->cgroup, cft, m); 1530 return cft->read_seq_string(state->cgroup, cft, m);
1561} 1531}
1562 1532
1563int cgroup_seqfile_release(struct inode *inode, struct file *file) 1533static int cgroup_seqfile_release(struct inode *inode, struct file *file)
1564{ 1534{
1565 struct seq_file *seq = file->private_data; 1535 struct seq_file *seq = file->private_data;
1566 kfree(seq->private); 1536 kfree(seq->private);
@@ -1569,6 +1539,7 @@ int cgroup_seqfile_release(struct inode *inode, struct file *file)
1569 1539
1570static struct file_operations cgroup_seqfile_operations = { 1540static struct file_operations cgroup_seqfile_operations = {
1571 .read = seq_read, 1541 .read = seq_read,
1542 .write = cgroup_file_write,
1572 .llseek = seq_lseek, 1543 .llseek = seq_lseek,
1573 .release = cgroup_seqfile_release, 1544 .release = cgroup_seqfile_release,
1574}; 1545};
@@ -1756,15 +1727,11 @@ int cgroup_add_files(struct cgroup *cgrp,
1756int cgroup_task_count(const struct cgroup *cgrp) 1727int cgroup_task_count(const struct cgroup *cgrp)
1757{ 1728{
1758 int count = 0; 1729 int count = 0;
1759 struct list_head *l; 1730 struct cg_cgroup_link *link;
1760 1731
1761 read_lock(&css_set_lock); 1732 read_lock(&css_set_lock);
1762 l = cgrp->css_sets.next; 1733 list_for_each_entry(link, &cgrp->css_sets, cgrp_link_list) {
1763 while (l != &cgrp->css_sets) {
1764 struct cg_cgroup_link *link =
1765 list_entry(l, struct cg_cgroup_link, cgrp_link_list);
1766 count += atomic_read(&link->cg->ref.refcount); 1734 count += atomic_read(&link->cg->ref.refcount);
1767 l = l->next;
1768 } 1735 }
1769 read_unlock(&css_set_lock); 1736 read_unlock(&css_set_lock);
1770 return count; 1737 return count;
@@ -2227,6 +2194,18 @@ static u64 cgroup_read_notify_on_release(struct cgroup *cgrp,
2227 return notify_on_release(cgrp); 2194 return notify_on_release(cgrp);
2228} 2195}
2229 2196
2197static int cgroup_write_notify_on_release(struct cgroup *cgrp,
2198 struct cftype *cft,
2199 u64 val)
2200{
2201 clear_bit(CGRP_RELEASABLE, &cgrp->flags);
2202 if (val)
2203 set_bit(CGRP_NOTIFY_ON_RELEASE, &cgrp->flags);
2204 else
2205 clear_bit(CGRP_NOTIFY_ON_RELEASE, &cgrp->flags);
2206 return 0;
2207}
2208
2230/* 2209/*
2231 * for the common functions, 'private' gives the type of file 2210 * for the common functions, 'private' gives the type of file
2232 */ 2211 */
@@ -2235,7 +2214,7 @@ static struct cftype files[] = {
2235 .name = "tasks", 2214 .name = "tasks",
2236 .open = cgroup_tasks_open, 2215 .open = cgroup_tasks_open,
2237 .read = cgroup_tasks_read, 2216 .read = cgroup_tasks_read,
2238 .write = cgroup_common_file_write, 2217 .write_u64 = cgroup_tasks_write,
2239 .release = cgroup_tasks_release, 2218 .release = cgroup_tasks_release,
2240 .private = FILE_TASKLIST, 2219 .private = FILE_TASKLIST,
2241 }, 2220 },
@@ -2243,15 +2222,16 @@ static struct cftype files[] = {
2243 { 2222 {
2244 .name = "notify_on_release", 2223 .name = "notify_on_release",
2245 .read_u64 = cgroup_read_notify_on_release, 2224 .read_u64 = cgroup_read_notify_on_release,
2246 .write = cgroup_common_file_write, 2225 .write_u64 = cgroup_write_notify_on_release,
2247 .private = FILE_NOTIFY_ON_RELEASE, 2226 .private = FILE_NOTIFY_ON_RELEASE,
2248 }, 2227 },
2249}; 2228};
2250 2229
2251static struct cftype cft_release_agent = { 2230static struct cftype cft_release_agent = {
2252 .name = "release_agent", 2231 .name = "release_agent",
2253 .read = cgroup_common_file_read, 2232 .read_seq_string = cgroup_release_agent_show,
2254 .write = cgroup_common_file_write, 2233 .write_string = cgroup_release_agent_write,
2234 .max_write_len = PATH_MAX,
2255 .private = FILE_RELEASE_AGENT, 2235 .private = FILE_RELEASE_AGENT,
2256}; 2236};
2257 2237
@@ -2869,16 +2849,17 @@ void cgroup_exit(struct task_struct *tsk, int run_callbacks)
2869 * cgroup_clone - clone the cgroup the given subsystem is attached to 2849 * cgroup_clone - clone the cgroup the given subsystem is attached to
2870 * @tsk: the task to be moved 2850 * @tsk: the task to be moved
2871 * @subsys: the given subsystem 2851 * @subsys: the given subsystem
2852 * @nodename: the name for the new cgroup
2872 * 2853 *
2873 * Duplicate the current cgroup in the hierarchy that the given 2854 * Duplicate the current cgroup in the hierarchy that the given
2874 * subsystem is attached to, and move this task into the new 2855 * subsystem is attached to, and move this task into the new
2875 * child. 2856 * child.
2876 */ 2857 */
2877int cgroup_clone(struct task_struct *tsk, struct cgroup_subsys *subsys) 2858int cgroup_clone(struct task_struct *tsk, struct cgroup_subsys *subsys,
2859 char *nodename)
2878{ 2860{
2879 struct dentry *dentry; 2861 struct dentry *dentry;
2880 int ret = 0; 2862 int ret = 0;
2881 char nodename[MAX_CGROUP_TYPE_NAMELEN];
2882 struct cgroup *parent, *child; 2863 struct cgroup *parent, *child;
2883 struct inode *inode; 2864 struct inode *inode;
2884 struct css_set *cg; 2865 struct css_set *cg;
@@ -2903,8 +2884,6 @@ int cgroup_clone(struct task_struct *tsk, struct cgroup_subsys *subsys)
2903 cg = tsk->cgroups; 2884 cg = tsk->cgroups;
2904 parent = task_cgroup(tsk, subsys->subsys_id); 2885 parent = task_cgroup(tsk, subsys->subsys_id);
2905 2886
2906 snprintf(nodename, MAX_CGROUP_TYPE_NAMELEN, "%d", tsk->pid);
2907
2908 /* Pin the hierarchy */ 2887 /* Pin the hierarchy */
2909 atomic_inc(&parent->root->sb->s_active); 2888 atomic_inc(&parent->root->sb->s_active);
2910 2889
@@ -3078,27 +3057,24 @@ static void cgroup_release_agent(struct work_struct *work)
3078 while (!list_empty(&release_list)) { 3057 while (!list_empty(&release_list)) {
3079 char *argv[3], *envp[3]; 3058 char *argv[3], *envp[3];
3080 int i; 3059 int i;
3081 char *pathbuf; 3060 char *pathbuf = NULL, *agentbuf = NULL;
3082 struct cgroup *cgrp = list_entry(release_list.next, 3061 struct cgroup *cgrp = list_entry(release_list.next,
3083 struct cgroup, 3062 struct cgroup,
3084 release_list); 3063 release_list);
3085 list_del_init(&cgrp->release_list); 3064 list_del_init(&cgrp->release_list);
3086 spin_unlock(&release_list_lock); 3065 spin_unlock(&release_list_lock);
3087 pathbuf = kmalloc(PAGE_SIZE, GFP_KERNEL); 3066 pathbuf = kmalloc(PAGE_SIZE, GFP_KERNEL);
3088 if (!pathbuf) { 3067 if (!pathbuf)
3089 spin_lock(&release_list_lock); 3068 goto continue_free;
3090 continue; 3069 if (cgroup_path(cgrp, pathbuf, PAGE_SIZE) < 0)
3091 } 3070 goto continue_free;
3092 3071 agentbuf = kstrdup(cgrp->root->release_agent_path, GFP_KERNEL);
3093 if (cgroup_path(cgrp, pathbuf, PAGE_SIZE) < 0) { 3072 if (!agentbuf)
3094 kfree(pathbuf); 3073 goto continue_free;
3095 spin_lock(&release_list_lock);
3096 continue;
3097 }
3098 3074
3099 i = 0; 3075 i = 0;
3100 argv[i++] = cgrp->root->release_agent_path; 3076 argv[i++] = agentbuf;
3101 argv[i++] = (char *)pathbuf; 3077 argv[i++] = pathbuf;
3102 argv[i] = NULL; 3078 argv[i] = NULL;
3103 3079
3104 i = 0; 3080 i = 0;
@@ -3112,8 +3088,10 @@ static void cgroup_release_agent(struct work_struct *work)
3112 * be a slow process */ 3088 * be a slow process */
3113 mutex_unlock(&cgroup_mutex); 3089 mutex_unlock(&cgroup_mutex);
3114 call_usermodehelper(argv[0], argv, envp, UMH_WAIT_EXEC); 3090 call_usermodehelper(argv[0], argv, envp, UMH_WAIT_EXEC);
3115 kfree(pathbuf);
3116 mutex_lock(&cgroup_mutex); 3091 mutex_lock(&cgroup_mutex);
3092 continue_free:
3093 kfree(pathbuf);
3094 kfree(agentbuf);
3117 spin_lock(&release_list_lock); 3095 spin_lock(&release_list_lock);
3118 } 3096 }
3119 spin_unlock(&release_list_lock); 3097 spin_unlock(&release_list_lock);