diff options
Diffstat (limited to 'kernel/cpuset.c')
-rw-r--r-- | kernel/cpuset.c | 1601 |
1 files changed, 601 insertions, 1000 deletions
diff --git a/kernel/cpuset.c b/kernel/cpuset.c index 64950fa5d321..50f5dc463688 100644 --- a/kernel/cpuset.c +++ b/kernel/cpuset.c | |||
@@ -4,7 +4,8 @@ | |||
4 | * Processor and Memory placement constraints for sets of tasks. | 4 | * Processor and Memory placement constraints for sets of tasks. |
5 | * | 5 | * |
6 | * Copyright (C) 2003 BULL SA. | 6 | * Copyright (C) 2003 BULL SA. |
7 | * Copyright (C) 2004-2006 Silicon Graphics, Inc. | 7 | * Copyright (C) 2004-2007 Silicon Graphics, Inc. |
8 | * Copyright (C) 2006 Google, Inc | ||
8 | * | 9 | * |
9 | * Portions derived from Patrick Mochel's sysfs code. | 10 | * Portions derived from Patrick Mochel's sysfs code. |
10 | * sysfs is Copyright (c) 2001-3 Patrick Mochel | 11 | * sysfs is Copyright (c) 2001-3 Patrick Mochel |
@@ -12,6 +13,7 @@ | |||
12 | * 2003-10-10 Written by Simon Derr. | 13 | * 2003-10-10 Written by Simon Derr. |
13 | * 2003-10-22 Updates by Stephen Hemminger. | 14 | * 2003-10-22 Updates by Stephen Hemminger. |
14 | * 2004 May-July Rework by Paul Jackson. | 15 | * 2004 May-July Rework by Paul Jackson. |
16 | * 2006 Rework by Paul Menage to use generic cgroups | ||
15 | * | 17 | * |
16 | * This file is subject to the terms and conditions of the GNU General Public | 18 | * This file is subject to the terms and conditions of the GNU General Public |
17 | * License. See the file COPYING in the main directory of the Linux | 19 | * License. See the file COPYING in the main directory of the Linux |
@@ -36,6 +38,7 @@ | |||
36 | #include <linux/mount.h> | 38 | #include <linux/mount.h> |
37 | #include <linux/namei.h> | 39 | #include <linux/namei.h> |
38 | #include <linux/pagemap.h> | 40 | #include <linux/pagemap.h> |
41 | #include <linux/prio_heap.h> | ||
39 | #include <linux/proc_fs.h> | 42 | #include <linux/proc_fs.h> |
40 | #include <linux/rcupdate.h> | 43 | #include <linux/rcupdate.h> |
41 | #include <linux/sched.h> | 44 | #include <linux/sched.h> |
@@ -52,8 +55,7 @@ | |||
52 | #include <asm/uaccess.h> | 55 | #include <asm/uaccess.h> |
53 | #include <asm/atomic.h> | 56 | #include <asm/atomic.h> |
54 | #include <linux/mutex.h> | 57 | #include <linux/mutex.h> |
55 | 58 | #include <linux/kfifo.h> | |
56 | #define CPUSET_SUPER_MAGIC 0x27e0eb | ||
57 | 59 | ||
58 | /* | 60 | /* |
59 | * Tracks how many cpusets are currently defined in system. | 61 | * Tracks how many cpusets are currently defined in system. |
@@ -62,6 +64,10 @@ | |||
62 | */ | 64 | */ |
63 | int number_of_cpusets __read_mostly; | 65 | int number_of_cpusets __read_mostly; |
64 | 66 | ||
67 | /* Retrieve the cpuset from a cgroup */ | ||
68 | struct cgroup_subsys cpuset_subsys; | ||
69 | struct cpuset; | ||
70 | |||
65 | /* See "Frequency meter" comments, below. */ | 71 | /* See "Frequency meter" comments, below. */ |
66 | 72 | ||
67 | struct fmeter { | 73 | struct fmeter { |
@@ -72,24 +78,13 @@ struct fmeter { | |||
72 | }; | 78 | }; |
73 | 79 | ||
74 | struct cpuset { | 80 | struct cpuset { |
81 | struct cgroup_subsys_state css; | ||
82 | |||
75 | unsigned long flags; /* "unsigned long" so bitops work */ | 83 | unsigned long flags; /* "unsigned long" so bitops work */ |
76 | cpumask_t cpus_allowed; /* CPUs allowed to tasks in cpuset */ | 84 | cpumask_t cpus_allowed; /* CPUs allowed to tasks in cpuset */ |
77 | nodemask_t mems_allowed; /* Memory Nodes allowed to tasks */ | 85 | nodemask_t mems_allowed; /* Memory Nodes allowed to tasks */ |
78 | 86 | ||
79 | /* | ||
80 | * Count is atomic so can incr (fork) or decr (exit) without a lock. | ||
81 | */ | ||
82 | atomic_t count; /* count tasks using this cpuset */ | ||
83 | |||
84 | /* | ||
85 | * We link our 'sibling' struct into our parents 'children'. | ||
86 | * Our children link their 'sibling' into our 'children'. | ||
87 | */ | ||
88 | struct list_head sibling; /* my parents children */ | ||
89 | struct list_head children; /* my children */ | ||
90 | |||
91 | struct cpuset *parent; /* my parent */ | 87 | struct cpuset *parent; /* my parent */ |
92 | struct dentry *dentry; /* cpuset fs entry */ | ||
93 | 88 | ||
94 | /* | 89 | /* |
95 | * Copy of global cpuset_mems_generation as of the most | 90 | * Copy of global cpuset_mems_generation as of the most |
@@ -98,15 +93,32 @@ struct cpuset { | |||
98 | int mems_generation; | 93 | int mems_generation; |
99 | 94 | ||
100 | struct fmeter fmeter; /* memory_pressure filter */ | 95 | struct fmeter fmeter; /* memory_pressure filter */ |
96 | |||
97 | /* partition number for rebuild_sched_domains() */ | ||
98 | int pn; | ||
101 | }; | 99 | }; |
102 | 100 | ||
101 | /* Retrieve the cpuset for a cgroup */ | ||
102 | static inline struct cpuset *cgroup_cs(struct cgroup *cont) | ||
103 | { | ||
104 | return container_of(cgroup_subsys_state(cont, cpuset_subsys_id), | ||
105 | struct cpuset, css); | ||
106 | } | ||
107 | |||
108 | /* Retrieve the cpuset for a task */ | ||
109 | static inline struct cpuset *task_cs(struct task_struct *task) | ||
110 | { | ||
111 | return container_of(task_subsys_state(task, cpuset_subsys_id), | ||
112 | struct cpuset, css); | ||
113 | } | ||
114 | |||
115 | |||
103 | /* bits in struct cpuset flags field */ | 116 | /* bits in struct cpuset flags field */ |
104 | typedef enum { | 117 | typedef enum { |
105 | CS_CPU_EXCLUSIVE, | 118 | CS_CPU_EXCLUSIVE, |
106 | CS_MEM_EXCLUSIVE, | 119 | CS_MEM_EXCLUSIVE, |
107 | CS_MEMORY_MIGRATE, | 120 | CS_MEMORY_MIGRATE, |
108 | CS_REMOVED, | 121 | CS_SCHED_LOAD_BALANCE, |
109 | CS_NOTIFY_ON_RELEASE, | ||
110 | CS_SPREAD_PAGE, | 122 | CS_SPREAD_PAGE, |
111 | CS_SPREAD_SLAB, | 123 | CS_SPREAD_SLAB, |
112 | } cpuset_flagbits_t; | 124 | } cpuset_flagbits_t; |
@@ -122,14 +134,9 @@ static inline int is_mem_exclusive(const struct cpuset *cs) | |||
122 | return test_bit(CS_MEM_EXCLUSIVE, &cs->flags); | 134 | return test_bit(CS_MEM_EXCLUSIVE, &cs->flags); |
123 | } | 135 | } |
124 | 136 | ||
125 | static inline int is_removed(const struct cpuset *cs) | 137 | static inline int is_sched_load_balance(const struct cpuset *cs) |
126 | { | 138 | { |
127 | return test_bit(CS_REMOVED, &cs->flags); | 139 | return test_bit(CS_SCHED_LOAD_BALANCE, &cs->flags); |
128 | } | ||
129 | |||
130 | static inline int notify_on_release(const struct cpuset *cs) | ||
131 | { | ||
132 | return test_bit(CS_NOTIFY_ON_RELEASE, &cs->flags); | ||
133 | } | 140 | } |
134 | 141 | ||
135 | static inline int is_memory_migrate(const struct cpuset *cs) | 142 | static inline int is_memory_migrate(const struct cpuset *cs) |
@@ -172,14 +179,8 @@ static struct cpuset top_cpuset = { | |||
172 | .flags = ((1 << CS_CPU_EXCLUSIVE) | (1 << CS_MEM_EXCLUSIVE)), | 179 | .flags = ((1 << CS_CPU_EXCLUSIVE) | (1 << CS_MEM_EXCLUSIVE)), |
173 | .cpus_allowed = CPU_MASK_ALL, | 180 | .cpus_allowed = CPU_MASK_ALL, |
174 | .mems_allowed = NODE_MASK_ALL, | 181 | .mems_allowed = NODE_MASK_ALL, |
175 | .count = ATOMIC_INIT(0), | ||
176 | .sibling = LIST_HEAD_INIT(top_cpuset.sibling), | ||
177 | .children = LIST_HEAD_INIT(top_cpuset.children), | ||
178 | }; | 182 | }; |
179 | 183 | ||
180 | static struct vfsmount *cpuset_mount; | ||
181 | static struct super_block *cpuset_sb; | ||
182 | |||
183 | /* | 184 | /* |
184 | * We have two global cpuset mutexes below. They can nest. | 185 | * We have two global cpuset mutexes below. They can nest. |
185 | * It is ok to first take manage_mutex, then nest callback_mutex. We also | 186 | * It is ok to first take manage_mutex, then nest callback_mutex. We also |
@@ -263,297 +264,33 @@ static struct super_block *cpuset_sb; | |||
263 | * the routine cpuset_update_task_memory_state(). | 264 | * the routine cpuset_update_task_memory_state(). |
264 | */ | 265 | */ |
265 | 266 | ||
266 | static DEFINE_MUTEX(manage_mutex); | ||
267 | static DEFINE_MUTEX(callback_mutex); | 267 | static DEFINE_MUTEX(callback_mutex); |
268 | 268 | ||
269 | /* | 269 | /* This is ugly, but preserves the userspace API for existing cpuset |
270 | * A couple of forward declarations required, due to cyclic reference loop: | 270 | * users. If someone tries to mount the "cpuset" filesystem, we |
271 | * cpuset_mkdir -> cpuset_create -> cpuset_populate_dir -> cpuset_add_file | 271 | * silently switch it to mount "cgroup" instead */ |
272 | * -> cpuset_create_file -> cpuset_dir_inode_operations -> cpuset_mkdir. | ||
273 | */ | ||
274 | |||
275 | static int cpuset_mkdir(struct inode *dir, struct dentry *dentry, int mode); | ||
276 | static int cpuset_rmdir(struct inode *unused_dir, struct dentry *dentry); | ||
277 | |||
278 | static struct backing_dev_info cpuset_backing_dev_info = { | ||
279 | .ra_pages = 0, /* No readahead */ | ||
280 | .capabilities = BDI_CAP_NO_ACCT_DIRTY | BDI_CAP_NO_WRITEBACK, | ||
281 | }; | ||
282 | |||
283 | static struct inode *cpuset_new_inode(mode_t mode) | ||
284 | { | ||
285 | struct inode *inode = new_inode(cpuset_sb); | ||
286 | |||
287 | if (inode) { | ||
288 | inode->i_mode = mode; | ||
289 | inode->i_uid = current->fsuid; | ||
290 | inode->i_gid = current->fsgid; | ||
291 | inode->i_blocks = 0; | ||
292 | inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME; | ||
293 | inode->i_mapping->backing_dev_info = &cpuset_backing_dev_info; | ||
294 | } | ||
295 | return inode; | ||
296 | } | ||
297 | |||
298 | static void cpuset_diput(struct dentry *dentry, struct inode *inode) | ||
299 | { | ||
300 | /* is dentry a directory ? if so, kfree() associated cpuset */ | ||
301 | if (S_ISDIR(inode->i_mode)) { | ||
302 | struct cpuset *cs = dentry->d_fsdata; | ||
303 | BUG_ON(!(is_removed(cs))); | ||
304 | kfree(cs); | ||
305 | } | ||
306 | iput(inode); | ||
307 | } | ||
308 | |||
309 | static struct dentry_operations cpuset_dops = { | ||
310 | .d_iput = cpuset_diput, | ||
311 | }; | ||
312 | |||
313 | static struct dentry *cpuset_get_dentry(struct dentry *parent, const char *name) | ||
314 | { | ||
315 | struct dentry *d = lookup_one_len(name, parent, strlen(name)); | ||
316 | if (!IS_ERR(d)) | ||
317 | d->d_op = &cpuset_dops; | ||
318 | return d; | ||
319 | } | ||
320 | |||
321 | static void remove_dir(struct dentry *d) | ||
322 | { | ||
323 | struct dentry *parent = dget(d->d_parent); | ||
324 | |||
325 | d_delete(d); | ||
326 | simple_rmdir(parent->d_inode, d); | ||
327 | dput(parent); | ||
328 | } | ||
329 | |||
330 | /* | ||
331 | * NOTE : the dentry must have been dget()'ed | ||
332 | */ | ||
333 | static void cpuset_d_remove_dir(struct dentry *dentry) | ||
334 | { | ||
335 | struct list_head *node; | ||
336 | |||
337 | spin_lock(&dcache_lock); | ||
338 | node = dentry->d_subdirs.next; | ||
339 | while (node != &dentry->d_subdirs) { | ||
340 | struct dentry *d = list_entry(node, struct dentry, d_u.d_child); | ||
341 | list_del_init(node); | ||
342 | if (d->d_inode) { | ||
343 | d = dget_locked(d); | ||
344 | spin_unlock(&dcache_lock); | ||
345 | d_delete(d); | ||
346 | simple_unlink(dentry->d_inode, d); | ||
347 | dput(d); | ||
348 | spin_lock(&dcache_lock); | ||
349 | } | ||
350 | node = dentry->d_subdirs.next; | ||
351 | } | ||
352 | list_del_init(&dentry->d_u.d_child); | ||
353 | spin_unlock(&dcache_lock); | ||
354 | remove_dir(dentry); | ||
355 | } | ||
356 | |||
357 | static struct super_operations cpuset_ops = { | ||
358 | .statfs = simple_statfs, | ||
359 | .drop_inode = generic_delete_inode, | ||
360 | }; | ||
361 | |||
362 | static int cpuset_fill_super(struct super_block *sb, void *unused_data, | ||
363 | int unused_silent) | ||
364 | { | ||
365 | struct inode *inode; | ||
366 | struct dentry *root; | ||
367 | |||
368 | sb->s_blocksize = PAGE_CACHE_SIZE; | ||
369 | sb->s_blocksize_bits = PAGE_CACHE_SHIFT; | ||
370 | sb->s_magic = CPUSET_SUPER_MAGIC; | ||
371 | sb->s_op = &cpuset_ops; | ||
372 | cpuset_sb = sb; | ||
373 | |||
374 | inode = cpuset_new_inode(S_IFDIR | S_IRUGO | S_IXUGO | S_IWUSR); | ||
375 | if (inode) { | ||
376 | inode->i_op = &simple_dir_inode_operations; | ||
377 | inode->i_fop = &simple_dir_operations; | ||
378 | /* directories start off with i_nlink == 2 (for "." entry) */ | ||
379 | inc_nlink(inode); | ||
380 | } else { | ||
381 | return -ENOMEM; | ||
382 | } | ||
383 | |||
384 | root = d_alloc_root(inode); | ||
385 | if (!root) { | ||
386 | iput(inode); | ||
387 | return -ENOMEM; | ||
388 | } | ||
389 | sb->s_root = root; | ||
390 | return 0; | ||
391 | } | ||
392 | |||
393 | static int cpuset_get_sb(struct file_system_type *fs_type, | 272 | static int cpuset_get_sb(struct file_system_type *fs_type, |
394 | int flags, const char *unused_dev_name, | 273 | int flags, const char *unused_dev_name, |
395 | void *data, struct vfsmount *mnt) | 274 | void *data, struct vfsmount *mnt) |
396 | { | 275 | { |
397 | return get_sb_single(fs_type, flags, data, cpuset_fill_super, mnt); | 276 | struct file_system_type *cgroup_fs = get_fs_type("cgroup"); |
277 | int ret = -ENODEV; | ||
278 | if (cgroup_fs) { | ||
279 | char mountopts[] = | ||
280 | "cpuset,noprefix," | ||
281 | "release_agent=/sbin/cpuset_release_agent"; | ||
282 | ret = cgroup_fs->get_sb(cgroup_fs, flags, | ||
283 | unused_dev_name, mountopts, mnt); | ||
284 | put_filesystem(cgroup_fs); | ||
285 | } | ||
286 | return ret; | ||
398 | } | 287 | } |
399 | 288 | ||
400 | static struct file_system_type cpuset_fs_type = { | 289 | static struct file_system_type cpuset_fs_type = { |
401 | .name = "cpuset", | 290 | .name = "cpuset", |
402 | .get_sb = cpuset_get_sb, | 291 | .get_sb = cpuset_get_sb, |
403 | .kill_sb = kill_litter_super, | ||
404 | }; | 292 | }; |
405 | 293 | ||
406 | /* struct cftype: | ||
407 | * | ||
408 | * The files in the cpuset filesystem mostly have a very simple read/write | ||
409 | * handling, some common function will take care of it. Nevertheless some cases | ||
410 | * (read tasks) are special and therefore I define this structure for every | ||
411 | * kind of file. | ||
412 | * | ||
413 | * | ||
414 | * When reading/writing to a file: | ||
415 | * - the cpuset to use in file->f_path.dentry->d_parent->d_fsdata | ||
416 | * - the 'cftype' of the file is file->f_path.dentry->d_fsdata | ||
417 | */ | ||
418 | |||
419 | struct cftype { | ||
420 | char *name; | ||
421 | int private; | ||
422 | int (*open) (struct inode *inode, struct file *file); | ||
423 | ssize_t (*read) (struct file *file, char __user *buf, size_t nbytes, | ||
424 | loff_t *ppos); | ||
425 | int (*write) (struct file *file, const char __user *buf, size_t nbytes, | ||
426 | loff_t *ppos); | ||
427 | int (*release) (struct inode *inode, struct file *file); | ||
428 | }; | ||
429 | |||
430 | static inline struct cpuset *__d_cs(struct dentry *dentry) | ||
431 | { | ||
432 | return dentry->d_fsdata; | ||
433 | } | ||
434 | |||
435 | static inline struct cftype *__d_cft(struct dentry *dentry) | ||
436 | { | ||
437 | return dentry->d_fsdata; | ||
438 | } | ||
439 | |||
440 | /* | ||
441 | * Call with manage_mutex held. Writes path of cpuset into buf. | ||
442 | * Returns 0 on success, -errno on error. | ||
443 | */ | ||
444 | |||
445 | static int cpuset_path(const struct cpuset *cs, char *buf, int buflen) | ||
446 | { | ||
447 | char *start; | ||
448 | |||
449 | start = buf + buflen; | ||
450 | |||
451 | *--start = '\0'; | ||
452 | for (;;) { | ||
453 | int len = cs->dentry->d_name.len; | ||
454 | if ((start -= len) < buf) | ||
455 | return -ENAMETOOLONG; | ||
456 | memcpy(start, cs->dentry->d_name.name, len); | ||
457 | cs = cs->parent; | ||
458 | if (!cs) | ||
459 | break; | ||
460 | if (!cs->parent) | ||
461 | continue; | ||
462 | if (--start < buf) | ||
463 | return -ENAMETOOLONG; | ||
464 | *start = '/'; | ||
465 | } | ||
466 | memmove(buf, start, buf + buflen - start); | ||
467 | return 0; | ||
468 | } | ||
469 | |||
470 | /* | ||
471 | * Notify userspace when a cpuset is released, by running | ||
472 | * /sbin/cpuset_release_agent with the name of the cpuset (path | ||
473 | * relative to the root of cpuset file system) as the argument. | ||
474 | * | ||
475 | * Most likely, this user command will try to rmdir this cpuset. | ||
476 | * | ||
477 | * This races with the possibility that some other task will be | ||
478 | * attached to this cpuset before it is removed, or that some other | ||
479 | * user task will 'mkdir' a child cpuset of this cpuset. That's ok. | ||
480 | * The presumed 'rmdir' will fail quietly if this cpuset is no longer | ||
481 | * unused, and this cpuset will be reprieved from its death sentence, | ||
482 | * to continue to serve a useful existence. Next time it's released, | ||
483 | * we will get notified again, if it still has 'notify_on_release' set. | ||
484 | * | ||
485 | * The final arg to call_usermodehelper() is 0, which means don't | ||
486 | * wait. The separate /sbin/cpuset_release_agent task is forked by | ||
487 | * call_usermodehelper(), then control in this thread returns here, | ||
488 | * without waiting for the release agent task. We don't bother to | ||
489 | * wait because the caller of this routine has no use for the exit | ||
490 | * status of the /sbin/cpuset_release_agent task, so no sense holding | ||
491 | * our caller up for that. | ||
492 | * | ||
493 | * When we had only one cpuset mutex, we had to call this | ||
494 | * without holding it, to avoid deadlock when call_usermodehelper() | ||
495 | * allocated memory. With two locks, we could now call this while | ||
496 | * holding manage_mutex, but we still don't, so as to minimize | ||
497 | * the time manage_mutex is held. | ||
498 | */ | ||
499 | |||
500 | static void cpuset_release_agent(const char *pathbuf) | ||
501 | { | ||
502 | char *argv[3], *envp[3]; | ||
503 | int i; | ||
504 | |||
505 | if (!pathbuf) | ||
506 | return; | ||
507 | |||
508 | i = 0; | ||
509 | argv[i++] = "/sbin/cpuset_release_agent"; | ||
510 | argv[i++] = (char *)pathbuf; | ||
511 | argv[i] = NULL; | ||
512 | |||
513 | i = 0; | ||
514 | /* minimal command environment */ | ||
515 | envp[i++] = "HOME=/"; | ||
516 | envp[i++] = "PATH=/sbin:/bin:/usr/sbin:/usr/bin"; | ||
517 | envp[i] = NULL; | ||
518 | |||
519 | call_usermodehelper(argv[0], argv, envp, UMH_WAIT_EXEC); | ||
520 | kfree(pathbuf); | ||
521 | } | ||
522 | |||
523 | /* | ||
524 | * Either cs->count of using tasks transitioned to zero, or the | ||
525 | * cs->children list of child cpusets just became empty. If this | ||
526 | * cs is notify_on_release() and now both the user count is zero and | ||
527 | * the list of children is empty, prepare cpuset path in a kmalloc'd | ||
528 | * buffer, to be returned via ppathbuf, so that the caller can invoke | ||
529 | * cpuset_release_agent() with it later on, once manage_mutex is dropped. | ||
530 | * Call here with manage_mutex held. | ||
531 | * | ||
532 | * This check_for_release() routine is responsible for kmalloc'ing | ||
533 | * pathbuf. The above cpuset_release_agent() is responsible for | ||
534 | * kfree'ing pathbuf. The caller of these routines is responsible | ||
535 | * for providing a pathbuf pointer, initialized to NULL, then | ||
536 | * calling check_for_release() with manage_mutex held and the address | ||
537 | * of the pathbuf pointer, then dropping manage_mutex, then calling | ||
538 | * cpuset_release_agent() with pathbuf, as set by check_for_release(). | ||
539 | */ | ||
540 | |||
541 | static void check_for_release(struct cpuset *cs, char **ppathbuf) | ||
542 | { | ||
543 | if (notify_on_release(cs) && atomic_read(&cs->count) == 0 && | ||
544 | list_empty(&cs->children)) { | ||
545 | char *buf; | ||
546 | |||
547 | buf = kmalloc(PAGE_SIZE, GFP_KERNEL); | ||
548 | if (!buf) | ||
549 | return; | ||
550 | if (cpuset_path(cs, buf, PAGE_SIZE) < 0) | ||
551 | kfree(buf); | ||
552 | else | ||
553 | *ppathbuf = buf; | ||
554 | } | ||
555 | } | ||
556 | |||
557 | /* | 294 | /* |
558 | * Return in *pmask the portion of a cpusets's cpus_allowed that | 295 | * Return in *pmask the portion of a cpusets's cpus_allowed that |
559 | * are online. If none are online, walk up the cpuset hierarchy | 296 | * are online. If none are online, walk up the cpuset hierarchy |
@@ -653,20 +390,19 @@ void cpuset_update_task_memory_state(void) | |||
653 | struct task_struct *tsk = current; | 390 | struct task_struct *tsk = current; |
654 | struct cpuset *cs; | 391 | struct cpuset *cs; |
655 | 392 | ||
656 | if (tsk->cpuset == &top_cpuset) { | 393 | if (task_cs(tsk) == &top_cpuset) { |
657 | /* Don't need rcu for top_cpuset. It's never freed. */ | 394 | /* Don't need rcu for top_cpuset. It's never freed. */ |
658 | my_cpusets_mem_gen = top_cpuset.mems_generation; | 395 | my_cpusets_mem_gen = top_cpuset.mems_generation; |
659 | } else { | 396 | } else { |
660 | rcu_read_lock(); | 397 | rcu_read_lock(); |
661 | cs = rcu_dereference(tsk->cpuset); | 398 | my_cpusets_mem_gen = task_cs(current)->mems_generation; |
662 | my_cpusets_mem_gen = cs->mems_generation; | ||
663 | rcu_read_unlock(); | 399 | rcu_read_unlock(); |
664 | } | 400 | } |
665 | 401 | ||
666 | if (my_cpusets_mem_gen != tsk->cpuset_mems_generation) { | 402 | if (my_cpusets_mem_gen != tsk->cpuset_mems_generation) { |
667 | mutex_lock(&callback_mutex); | 403 | mutex_lock(&callback_mutex); |
668 | task_lock(tsk); | 404 | task_lock(tsk); |
669 | cs = tsk->cpuset; /* Maybe changed when task not locked */ | 405 | cs = task_cs(tsk); /* Maybe changed when task not locked */ |
670 | guarantee_online_mems(cs, &tsk->mems_allowed); | 406 | guarantee_online_mems(cs, &tsk->mems_allowed); |
671 | tsk->cpuset_mems_generation = cs->mems_generation; | 407 | tsk->cpuset_mems_generation = cs->mems_generation; |
672 | if (is_spread_page(cs)) | 408 | if (is_spread_page(cs)) |
@@ -721,11 +457,12 @@ static int is_cpuset_subset(const struct cpuset *p, const struct cpuset *q) | |||
721 | 457 | ||
722 | static int validate_change(const struct cpuset *cur, const struct cpuset *trial) | 458 | static int validate_change(const struct cpuset *cur, const struct cpuset *trial) |
723 | { | 459 | { |
460 | struct cgroup *cont; | ||
724 | struct cpuset *c, *par; | 461 | struct cpuset *c, *par; |
725 | 462 | ||
726 | /* Each of our child cpusets must be a subset of us */ | 463 | /* Each of our child cpusets must be a subset of us */ |
727 | list_for_each_entry(c, &cur->children, sibling) { | 464 | list_for_each_entry(cont, &cur->css.cgroup->children, sibling) { |
728 | if (!is_cpuset_subset(c, trial)) | 465 | if (!is_cpuset_subset(cgroup_cs(cont), trial)) |
729 | return -EBUSY; | 466 | return -EBUSY; |
730 | } | 467 | } |
731 | 468 | ||
@@ -740,7 +477,8 @@ static int validate_change(const struct cpuset *cur, const struct cpuset *trial) | |||
740 | return -EACCES; | 477 | return -EACCES; |
741 | 478 | ||
742 | /* If either I or some sibling (!= me) is exclusive, we can't overlap */ | 479 | /* If either I or some sibling (!= me) is exclusive, we can't overlap */ |
743 | list_for_each_entry(c, &par->children, sibling) { | 480 | list_for_each_entry(cont, &par->css.cgroup->children, sibling) { |
481 | c = cgroup_cs(cont); | ||
744 | if ((is_cpu_exclusive(trial) || is_cpu_exclusive(c)) && | 482 | if ((is_cpu_exclusive(trial) || is_cpu_exclusive(c)) && |
745 | c != cur && | 483 | c != cur && |
746 | cpus_intersects(trial->cpus_allowed, c->cpus_allowed)) | 484 | cpus_intersects(trial->cpus_allowed, c->cpus_allowed)) |
@@ -751,17 +489,265 @@ static int validate_change(const struct cpuset *cur, const struct cpuset *trial) | |||
751 | return -EINVAL; | 489 | return -EINVAL; |
752 | } | 490 | } |
753 | 491 | ||
492 | /* Cpusets with tasks can't have empty cpus_allowed or mems_allowed */ | ||
493 | if (cgroup_task_count(cur->css.cgroup)) { | ||
494 | if (cpus_empty(trial->cpus_allowed) || | ||
495 | nodes_empty(trial->mems_allowed)) { | ||
496 | return -ENOSPC; | ||
497 | } | ||
498 | } | ||
499 | |||
754 | return 0; | 500 | return 0; |
755 | } | 501 | } |
756 | 502 | ||
757 | /* | 503 | /* |
504 | * Helper routine for rebuild_sched_domains(). | ||
505 | * Do cpusets a, b have overlapping cpus_allowed masks? | ||
506 | */ | ||
507 | |||
508 | static int cpusets_overlap(struct cpuset *a, struct cpuset *b) | ||
509 | { | ||
510 | return cpus_intersects(a->cpus_allowed, b->cpus_allowed); | ||
511 | } | ||
512 | |||
513 | /* | ||
514 | * rebuild_sched_domains() | ||
515 | * | ||
516 | * If the flag 'sched_load_balance' of any cpuset with non-empty | ||
517 | * 'cpus' changes, or if the 'cpus' allowed changes in any cpuset | ||
518 | * which has that flag enabled, or if any cpuset with a non-empty | ||
519 | * 'cpus' is removed, then call this routine to rebuild the | ||
520 | * scheduler's dynamic sched domains. | ||
521 | * | ||
522 | * This routine builds a partial partition of the systems CPUs | ||
523 | * (the set of non-overlappping cpumask_t's in the array 'part' | ||
524 | * below), and passes that partial partition to the kernel/sched.c | ||
525 | * partition_sched_domains() routine, which will rebuild the | ||
526 | * schedulers load balancing domains (sched domains) as specified | ||
527 | * by that partial partition. A 'partial partition' is a set of | ||
528 | * non-overlapping subsets whose union is a subset of that set. | ||
529 | * | ||
530 | * See "What is sched_load_balance" in Documentation/cpusets.txt | ||
531 | * for a background explanation of this. | ||
532 | * | ||
533 | * Does not return errors, on the theory that the callers of this | ||
534 | * routine would rather not worry about failures to rebuild sched | ||
535 | * domains when operating in the severe memory shortage situations | ||
536 | * that could cause allocation failures below. | ||
537 | * | ||
538 | * Call with cgroup_mutex held. May take callback_mutex during | ||
539 | * call due to the kfifo_alloc() and kmalloc() calls. May nest | ||
540 | * a call to the lock_cpu_hotplug()/unlock_cpu_hotplug() pair. | ||
541 | * Must not be called holding callback_mutex, because we must not | ||
542 | * call lock_cpu_hotplug() while holding callback_mutex. Elsewhere | ||
543 | * the kernel nests callback_mutex inside lock_cpu_hotplug() calls. | ||
544 | * So the reverse nesting would risk an ABBA deadlock. | ||
545 | * | ||
546 | * The three key local variables below are: | ||
547 | * q - a kfifo queue of cpuset pointers, used to implement a | ||
548 | * top-down scan of all cpusets. This scan loads a pointer | ||
549 | * to each cpuset marked is_sched_load_balance into the | ||
550 | * array 'csa'. For our purposes, rebuilding the schedulers | ||
551 | * sched domains, we can ignore !is_sched_load_balance cpusets. | ||
552 | * csa - (for CpuSet Array) Array of pointers to all the cpusets | ||
553 | * that need to be load balanced, for convenient iterative | ||
554 | * access by the subsequent code that finds the best partition, | ||
555 | * i.e the set of domains (subsets) of CPUs such that the | ||
556 | * cpus_allowed of every cpuset marked is_sched_load_balance | ||
557 | * is a subset of one of these domains, while there are as | ||
558 | * many such domains as possible, each as small as possible. | ||
559 | * doms - Conversion of 'csa' to an array of cpumasks, for passing to | ||
560 | * the kernel/sched.c routine partition_sched_domains() in a | ||
561 | * convenient format, that can be easily compared to the prior | ||
562 | * value to determine what partition elements (sched domains) | ||
563 | * were changed (added or removed.) | ||
564 | * | ||
565 | * Finding the best partition (set of domains): | ||
566 | * The triple nested loops below over i, j, k scan over the | ||
567 | * load balanced cpusets (using the array of cpuset pointers in | ||
568 | * csa[]) looking for pairs of cpusets that have overlapping | ||
569 | * cpus_allowed, but which don't have the same 'pn' partition | ||
570 | * number and gives them in the same partition number. It keeps | ||
571 | * looping on the 'restart' label until it can no longer find | ||
572 | * any such pairs. | ||
573 | * | ||
574 | * The union of the cpus_allowed masks from the set of | ||
575 | * all cpusets having the same 'pn' value then form the one | ||
576 | * element of the partition (one sched domain) to be passed to | ||
577 | * partition_sched_domains(). | ||
578 | */ | ||
579 | |||
580 | static void rebuild_sched_domains(void) | ||
581 | { | ||
582 | struct kfifo *q; /* queue of cpusets to be scanned */ | ||
583 | struct cpuset *cp; /* scans q */ | ||
584 | struct cpuset **csa; /* array of all cpuset ptrs */ | ||
585 | int csn; /* how many cpuset ptrs in csa so far */ | ||
586 | int i, j, k; /* indices for partition finding loops */ | ||
587 | cpumask_t *doms; /* resulting partition; i.e. sched domains */ | ||
588 | int ndoms; /* number of sched domains in result */ | ||
589 | int nslot; /* next empty doms[] cpumask_t slot */ | ||
590 | |||
591 | q = NULL; | ||
592 | csa = NULL; | ||
593 | doms = NULL; | ||
594 | |||
595 | /* Special case for the 99% of systems with one, full, sched domain */ | ||
596 | if (is_sched_load_balance(&top_cpuset)) { | ||
597 | ndoms = 1; | ||
598 | doms = kmalloc(sizeof(cpumask_t), GFP_KERNEL); | ||
599 | if (!doms) | ||
600 | goto rebuild; | ||
601 | *doms = top_cpuset.cpus_allowed; | ||
602 | goto rebuild; | ||
603 | } | ||
604 | |||
605 | q = kfifo_alloc(number_of_cpusets * sizeof(cp), GFP_KERNEL, NULL); | ||
606 | if (IS_ERR(q)) | ||
607 | goto done; | ||
608 | csa = kmalloc(number_of_cpusets * sizeof(cp), GFP_KERNEL); | ||
609 | if (!csa) | ||
610 | goto done; | ||
611 | csn = 0; | ||
612 | |||
613 | cp = &top_cpuset; | ||
614 | __kfifo_put(q, (void *)&cp, sizeof(cp)); | ||
615 | while (__kfifo_get(q, (void *)&cp, sizeof(cp))) { | ||
616 | struct cgroup *cont; | ||
617 | struct cpuset *child; /* scans child cpusets of cp */ | ||
618 | if (is_sched_load_balance(cp)) | ||
619 | csa[csn++] = cp; | ||
620 | list_for_each_entry(cont, &cp->css.cgroup->children, sibling) { | ||
621 | child = cgroup_cs(cont); | ||
622 | __kfifo_put(q, (void *)&child, sizeof(cp)); | ||
623 | } | ||
624 | } | ||
625 | |||
626 | for (i = 0; i < csn; i++) | ||
627 | csa[i]->pn = i; | ||
628 | ndoms = csn; | ||
629 | |||
630 | restart: | ||
631 | /* Find the best partition (set of sched domains) */ | ||
632 | for (i = 0; i < csn; i++) { | ||
633 | struct cpuset *a = csa[i]; | ||
634 | int apn = a->pn; | ||
635 | |||
636 | for (j = 0; j < csn; j++) { | ||
637 | struct cpuset *b = csa[j]; | ||
638 | int bpn = b->pn; | ||
639 | |||
640 | if (apn != bpn && cpusets_overlap(a, b)) { | ||
641 | for (k = 0; k < csn; k++) { | ||
642 | struct cpuset *c = csa[k]; | ||
643 | |||
644 | if (c->pn == bpn) | ||
645 | c->pn = apn; | ||
646 | } | ||
647 | ndoms--; /* one less element */ | ||
648 | goto restart; | ||
649 | } | ||
650 | } | ||
651 | } | ||
652 | |||
653 | /* Convert <csn, csa> to <ndoms, doms> */ | ||
654 | doms = kmalloc(ndoms * sizeof(cpumask_t), GFP_KERNEL); | ||
655 | if (!doms) | ||
656 | goto rebuild; | ||
657 | |||
658 | for (nslot = 0, i = 0; i < csn; i++) { | ||
659 | struct cpuset *a = csa[i]; | ||
660 | int apn = a->pn; | ||
661 | |||
662 | if (apn >= 0) { | ||
663 | cpumask_t *dp = doms + nslot; | ||
664 | |||
665 | if (nslot == ndoms) { | ||
666 | static int warnings = 10; | ||
667 | if (warnings) { | ||
668 | printk(KERN_WARNING | ||
669 | "rebuild_sched_domains confused:" | ||
670 | " nslot %d, ndoms %d, csn %d, i %d," | ||
671 | " apn %d\n", | ||
672 | nslot, ndoms, csn, i, apn); | ||
673 | warnings--; | ||
674 | } | ||
675 | continue; | ||
676 | } | ||
677 | |||
678 | cpus_clear(*dp); | ||
679 | for (j = i; j < csn; j++) { | ||
680 | struct cpuset *b = csa[j]; | ||
681 | |||
682 | if (apn == b->pn) { | ||
683 | cpus_or(*dp, *dp, b->cpus_allowed); | ||
684 | b->pn = -1; | ||
685 | } | ||
686 | } | ||
687 | nslot++; | ||
688 | } | ||
689 | } | ||
690 | BUG_ON(nslot != ndoms); | ||
691 | |||
692 | rebuild: | ||
693 | /* Have scheduler rebuild sched domains */ | ||
694 | lock_cpu_hotplug(); | ||
695 | partition_sched_domains(ndoms, doms); | ||
696 | unlock_cpu_hotplug(); | ||
697 | |||
698 | done: | ||
699 | if (q && !IS_ERR(q)) | ||
700 | kfifo_free(q); | ||
701 | kfree(csa); | ||
702 | /* Don't kfree(doms) -- partition_sched_domains() does that. */ | ||
703 | } | ||
704 | |||
705 | static inline int started_after_time(struct task_struct *t1, | ||
706 | struct timespec *time, | ||
707 | struct task_struct *t2) | ||
708 | { | ||
709 | int start_diff = timespec_compare(&t1->start_time, time); | ||
710 | if (start_diff > 0) { | ||
711 | return 1; | ||
712 | } else if (start_diff < 0) { | ||
713 | return 0; | ||
714 | } else { | ||
715 | /* | ||
716 | * Arbitrarily, if two processes started at the same | ||
717 | * time, we'll say that the lower pointer value | ||
718 | * started first. Note that t2 may have exited by now | ||
719 | * so this may not be a valid pointer any longer, but | ||
720 | * that's fine - it still serves to distinguish | ||
721 | * between two tasks started (effectively) | ||
722 | * simultaneously. | ||
723 | */ | ||
724 | return t1 > t2; | ||
725 | } | ||
726 | } | ||
727 | |||
728 | static inline int started_after(void *p1, void *p2) | ||
729 | { | ||
730 | struct task_struct *t1 = p1; | ||
731 | struct task_struct *t2 = p2; | ||
732 | return started_after_time(t1, &t2->start_time, t2); | ||
733 | } | ||
734 | |||
735 | /* | ||
758 | * Call with manage_mutex held. May take callback_mutex during call. | 736 | * Call with manage_mutex held. May take callback_mutex during call. |
759 | */ | 737 | */ |
760 | 738 | ||
761 | static int update_cpumask(struct cpuset *cs, char *buf) | 739 | static int update_cpumask(struct cpuset *cs, char *buf) |
762 | { | 740 | { |
763 | struct cpuset trialcs; | 741 | struct cpuset trialcs; |
764 | int retval; | 742 | int retval, i; |
743 | int is_load_balanced; | ||
744 | struct cgroup_iter it; | ||
745 | struct cgroup *cgrp = cs->css.cgroup; | ||
746 | struct task_struct *p, *dropped; | ||
747 | /* Never dereference latest_task, since it's not refcounted */ | ||
748 | struct task_struct *latest_task = NULL; | ||
749 | struct ptr_heap heap; | ||
750 | struct timespec latest_time = { 0, 0 }; | ||
765 | 751 | ||
766 | /* top_cpuset.cpus_allowed tracks cpu_online_map; it's read-only */ | 752 | /* top_cpuset.cpus_allowed tracks cpu_online_map; it's read-only */ |
767 | if (cs == &top_cpuset) | 753 | if (cs == &top_cpuset) |
@@ -770,11 +756,13 @@ static int update_cpumask(struct cpuset *cs, char *buf) | |||
770 | trialcs = *cs; | 756 | trialcs = *cs; |
771 | 757 | ||
772 | /* | 758 | /* |
773 | * We allow a cpuset's cpus_allowed to be empty; if it has attached | 759 | * An empty cpus_allowed is ok iff there are no tasks in the cpuset. |
774 | * tasks, we'll catch it later when we validate the change and return | 760 | * Since cpulist_parse() fails on an empty mask, we special case |
775 | * -ENOSPC. | 761 | * that parsing. The validate_change() call ensures that cpusets |
762 | * with tasks have cpus. | ||
776 | */ | 763 | */ |
777 | if (!buf[0] || (buf[0] == '\n' && !buf[1])) { | 764 | buf = strstrip(buf); |
765 | if (!*buf) { | ||
778 | cpus_clear(trialcs.cpus_allowed); | 766 | cpus_clear(trialcs.cpus_allowed); |
779 | } else { | 767 | } else { |
780 | retval = cpulist_parse(buf, trialcs.cpus_allowed); | 768 | retval = cpulist_parse(buf, trialcs.cpus_allowed); |
@@ -782,15 +770,79 @@ static int update_cpumask(struct cpuset *cs, char *buf) | |||
782 | return retval; | 770 | return retval; |
783 | } | 771 | } |
784 | cpus_and(trialcs.cpus_allowed, trialcs.cpus_allowed, cpu_online_map); | 772 | cpus_and(trialcs.cpus_allowed, trialcs.cpus_allowed, cpu_online_map); |
785 | /* cpus_allowed cannot be empty for a cpuset with attached tasks. */ | ||
786 | if (atomic_read(&cs->count) && cpus_empty(trialcs.cpus_allowed)) | ||
787 | return -ENOSPC; | ||
788 | retval = validate_change(cs, &trialcs); | 773 | retval = validate_change(cs, &trialcs); |
789 | if (retval < 0) | 774 | if (retval < 0) |
790 | return retval; | 775 | return retval; |
776 | |||
777 | /* Nothing to do if the cpus didn't change */ | ||
778 | if (cpus_equal(cs->cpus_allowed, trialcs.cpus_allowed)) | ||
779 | return 0; | ||
780 | retval = heap_init(&heap, PAGE_SIZE, GFP_KERNEL, &started_after); | ||
781 | if (retval) | ||
782 | return retval; | ||
783 | |||
784 | is_load_balanced = is_sched_load_balance(&trialcs); | ||
785 | |||
791 | mutex_lock(&callback_mutex); | 786 | mutex_lock(&callback_mutex); |
792 | cs->cpus_allowed = trialcs.cpus_allowed; | 787 | cs->cpus_allowed = trialcs.cpus_allowed; |
793 | mutex_unlock(&callback_mutex); | 788 | mutex_unlock(&callback_mutex); |
789 | |||
790 | again: | ||
791 | /* | ||
792 | * Scan tasks in the cpuset, and update the cpumasks of any | ||
793 | * that need an update. Since we can't call set_cpus_allowed() | ||
794 | * while holding tasklist_lock, gather tasks to be processed | ||
795 | * in a heap structure. If the statically-sized heap fills up, | ||
796 | * overflow tasks that started later, and in future iterations | ||
797 | * only consider tasks that started after the latest task in | ||
798 | * the previous pass. This guarantees forward progress and | ||
799 | * that we don't miss any tasks | ||
800 | */ | ||
801 | heap.size = 0; | ||
802 | cgroup_iter_start(cgrp, &it); | ||
803 | while ((p = cgroup_iter_next(cgrp, &it))) { | ||
804 | /* Only affect tasks that don't have the right cpus_allowed */ | ||
805 | if (cpus_equal(p->cpus_allowed, cs->cpus_allowed)) | ||
806 | continue; | ||
807 | /* | ||
808 | * Only process tasks that started after the last task | ||
809 | * we processed | ||
810 | */ | ||
811 | if (!started_after_time(p, &latest_time, latest_task)) | ||
812 | continue; | ||
813 | dropped = heap_insert(&heap, p); | ||
814 | if (dropped == NULL) { | ||
815 | get_task_struct(p); | ||
816 | } else if (dropped != p) { | ||
817 | get_task_struct(p); | ||
818 | put_task_struct(dropped); | ||
819 | } | ||
820 | } | ||
821 | cgroup_iter_end(cgrp, &it); | ||
822 | if (heap.size) { | ||
823 | for (i = 0; i < heap.size; i++) { | ||
824 | struct task_struct *p = heap.ptrs[i]; | ||
825 | if (i == 0) { | ||
826 | latest_time = p->start_time; | ||
827 | latest_task = p; | ||
828 | } | ||
829 | set_cpus_allowed(p, cs->cpus_allowed); | ||
830 | put_task_struct(p); | ||
831 | } | ||
832 | /* | ||
833 | * If we had to process any tasks at all, scan again | ||
834 | * in case some of them were in the middle of forking | ||
835 | * children that didn't notice the new cpumask | ||
836 | * restriction. Not the most efficient way to do it, | ||
837 | * but it avoids having to take callback_mutex in the | ||
838 | * fork path | ||
839 | */ | ||
840 | goto again; | ||
841 | } | ||
842 | heap_free(&heap); | ||
843 | if (is_load_balanced) | ||
844 | rebuild_sched_domains(); | ||
845 | |||
794 | return 0; | 846 | return 0; |
795 | } | 847 | } |
796 | 848 | ||
@@ -839,7 +891,7 @@ static void cpuset_migrate_mm(struct mm_struct *mm, const nodemask_t *from, | |||
839 | do_migrate_pages(mm, from, to, MPOL_MF_MOVE_ALL); | 891 | do_migrate_pages(mm, from, to, MPOL_MF_MOVE_ALL); |
840 | 892 | ||
841 | mutex_lock(&callback_mutex); | 893 | mutex_lock(&callback_mutex); |
842 | guarantee_online_mems(tsk->cpuset, &tsk->mems_allowed); | 894 | guarantee_online_mems(task_cs(tsk),&tsk->mems_allowed); |
843 | mutex_unlock(&callback_mutex); | 895 | mutex_unlock(&callback_mutex); |
844 | } | 896 | } |
845 | 897 | ||
@@ -857,16 +909,19 @@ static void cpuset_migrate_mm(struct mm_struct *mm, const nodemask_t *from, | |||
857 | * their mempolicies to the cpusets new mems_allowed. | 909 | * their mempolicies to the cpusets new mems_allowed. |
858 | */ | 910 | */ |
859 | 911 | ||
912 | static void *cpuset_being_rebound; | ||
913 | |||
860 | static int update_nodemask(struct cpuset *cs, char *buf) | 914 | static int update_nodemask(struct cpuset *cs, char *buf) |
861 | { | 915 | { |
862 | struct cpuset trialcs; | 916 | struct cpuset trialcs; |
863 | nodemask_t oldmem; | 917 | nodemask_t oldmem; |
864 | struct task_struct *g, *p; | 918 | struct task_struct *p; |
865 | struct mm_struct **mmarray; | 919 | struct mm_struct **mmarray; |
866 | int i, n, ntasks; | 920 | int i, n, ntasks; |
867 | int migrate; | 921 | int migrate; |
868 | int fudge; | 922 | int fudge; |
869 | int retval; | 923 | int retval; |
924 | struct cgroup_iter it; | ||
870 | 925 | ||
871 | /* | 926 | /* |
872 | * top_cpuset.mems_allowed tracks node_stats[N_HIGH_MEMORY]; | 927 | * top_cpuset.mems_allowed tracks node_stats[N_HIGH_MEMORY]; |
@@ -878,29 +933,19 @@ static int update_nodemask(struct cpuset *cs, char *buf) | |||
878 | trialcs = *cs; | 933 | trialcs = *cs; |
879 | 934 | ||
880 | /* | 935 | /* |
881 | * We allow a cpuset's mems_allowed to be empty; if it has attached | 936 | * An empty mems_allowed is ok iff there are no tasks in the cpuset. |
882 | * tasks, we'll catch it later when we validate the change and return | 937 | * Since nodelist_parse() fails on an empty mask, we special case |
883 | * -ENOSPC. | 938 | * that parsing. The validate_change() call ensures that cpusets |
939 | * with tasks have memory. | ||
884 | */ | 940 | */ |
885 | if (!buf[0] || (buf[0] == '\n' && !buf[1])) { | 941 | buf = strstrip(buf); |
942 | if (!*buf) { | ||
886 | nodes_clear(trialcs.mems_allowed); | 943 | nodes_clear(trialcs.mems_allowed); |
887 | } else { | 944 | } else { |
888 | retval = nodelist_parse(buf, trialcs.mems_allowed); | 945 | retval = nodelist_parse(buf, trialcs.mems_allowed); |
889 | if (retval < 0) | 946 | if (retval < 0) |
890 | goto done; | 947 | goto done; |
891 | if (!nodes_intersects(trialcs.mems_allowed, | ||
892 | node_states[N_HIGH_MEMORY])) { | ||
893 | /* | ||
894 | * error if only memoryless nodes specified. | ||
895 | */ | ||
896 | retval = -ENOSPC; | ||
897 | goto done; | ||
898 | } | ||
899 | } | 948 | } |
900 | /* | ||
901 | * Exclude memoryless nodes. We know that trialcs.mems_allowed | ||
902 | * contains at least one node with memory. | ||
903 | */ | ||
904 | nodes_and(trialcs.mems_allowed, trialcs.mems_allowed, | 949 | nodes_and(trialcs.mems_allowed, trialcs.mems_allowed, |
905 | node_states[N_HIGH_MEMORY]); | 950 | node_states[N_HIGH_MEMORY]); |
906 | oldmem = cs->mems_allowed; | 951 | oldmem = cs->mems_allowed; |
@@ -908,11 +953,6 @@ static int update_nodemask(struct cpuset *cs, char *buf) | |||
908 | retval = 0; /* Too easy - nothing to do */ | 953 | retval = 0; /* Too easy - nothing to do */ |
909 | goto done; | 954 | goto done; |
910 | } | 955 | } |
911 | /* mems_allowed cannot be empty for a cpuset with attached tasks. */ | ||
912 | if (atomic_read(&cs->count) && nodes_empty(trialcs.mems_allowed)) { | ||
913 | retval = -ENOSPC; | ||
914 | goto done; | ||
915 | } | ||
916 | retval = validate_change(cs, &trialcs); | 956 | retval = validate_change(cs, &trialcs); |
917 | if (retval < 0) | 957 | if (retval < 0) |
918 | goto done; | 958 | goto done; |
@@ -922,7 +962,7 @@ static int update_nodemask(struct cpuset *cs, char *buf) | |||
922 | cs->mems_generation = cpuset_mems_generation++; | 962 | cs->mems_generation = cpuset_mems_generation++; |
923 | mutex_unlock(&callback_mutex); | 963 | mutex_unlock(&callback_mutex); |
924 | 964 | ||
925 | set_cpuset_being_rebound(cs); /* causes mpol_copy() rebind */ | 965 | cpuset_being_rebound = cs; /* causes mpol_copy() rebind */ |
926 | 966 | ||
927 | fudge = 10; /* spare mmarray[] slots */ | 967 | fudge = 10; /* spare mmarray[] slots */ |
928 | fudge += cpus_weight(cs->cpus_allowed); /* imagine one fork-bomb/cpu */ | 968 | fudge += cpus_weight(cs->cpus_allowed); /* imagine one fork-bomb/cpu */ |
@@ -936,13 +976,13 @@ static int update_nodemask(struct cpuset *cs, char *buf) | |||
936 | * enough mmarray[] w/o using GFP_ATOMIC. | 976 | * enough mmarray[] w/o using GFP_ATOMIC. |
937 | */ | 977 | */ |
938 | while (1) { | 978 | while (1) { |
939 | ntasks = atomic_read(&cs->count); /* guess */ | 979 | ntasks = cgroup_task_count(cs->css.cgroup); /* guess */ |
940 | ntasks += fudge; | 980 | ntasks += fudge; |
941 | mmarray = kmalloc(ntasks * sizeof(*mmarray), GFP_KERNEL); | 981 | mmarray = kmalloc(ntasks * sizeof(*mmarray), GFP_KERNEL); |
942 | if (!mmarray) | 982 | if (!mmarray) |
943 | goto done; | 983 | goto done; |
944 | read_lock(&tasklist_lock); /* block fork */ | 984 | read_lock(&tasklist_lock); /* block fork */ |
945 | if (atomic_read(&cs->count) <= ntasks) | 985 | if (cgroup_task_count(cs->css.cgroup) <= ntasks) |
946 | break; /* got enough */ | 986 | break; /* got enough */ |
947 | read_unlock(&tasklist_lock); /* try again */ | 987 | read_unlock(&tasklist_lock); /* try again */ |
948 | kfree(mmarray); | 988 | kfree(mmarray); |
@@ -951,21 +991,21 @@ static int update_nodemask(struct cpuset *cs, char *buf) | |||
951 | n = 0; | 991 | n = 0; |
952 | 992 | ||
953 | /* Load up mmarray[] with mm reference for each task in cpuset. */ | 993 | /* Load up mmarray[] with mm reference for each task in cpuset. */ |
954 | do_each_thread(g, p) { | 994 | cgroup_iter_start(cs->css.cgroup, &it); |
995 | while ((p = cgroup_iter_next(cs->css.cgroup, &it))) { | ||
955 | struct mm_struct *mm; | 996 | struct mm_struct *mm; |
956 | 997 | ||
957 | if (n >= ntasks) { | 998 | if (n >= ntasks) { |
958 | printk(KERN_WARNING | 999 | printk(KERN_WARNING |
959 | "Cpuset mempolicy rebind incomplete.\n"); | 1000 | "Cpuset mempolicy rebind incomplete.\n"); |
960 | continue; | 1001 | break; |
961 | } | 1002 | } |
962 | if (p->cpuset != cs) | ||
963 | continue; | ||
964 | mm = get_task_mm(p); | 1003 | mm = get_task_mm(p); |
965 | if (!mm) | 1004 | if (!mm) |
966 | continue; | 1005 | continue; |
967 | mmarray[n++] = mm; | 1006 | mmarray[n++] = mm; |
968 | } while_each_thread(g, p); | 1007 | } |
1008 | cgroup_iter_end(cs->css.cgroup, &it); | ||
969 | read_unlock(&tasklist_lock); | 1009 | read_unlock(&tasklist_lock); |
970 | 1010 | ||
971 | /* | 1011 | /* |
@@ -993,12 +1033,17 @@ static int update_nodemask(struct cpuset *cs, char *buf) | |||
993 | 1033 | ||
994 | /* We're done rebinding vma's to this cpusets new mems_allowed. */ | 1034 | /* We're done rebinding vma's to this cpusets new mems_allowed. */ |
995 | kfree(mmarray); | 1035 | kfree(mmarray); |
996 | set_cpuset_being_rebound(NULL); | 1036 | cpuset_being_rebound = NULL; |
997 | retval = 0; | 1037 | retval = 0; |
998 | done: | 1038 | done: |
999 | return retval; | 1039 | return retval; |
1000 | } | 1040 | } |
1001 | 1041 | ||
1042 | int current_cpuset_is_being_rebound(void) | ||
1043 | { | ||
1044 | return task_cs(current) == cpuset_being_rebound; | ||
1045 | } | ||
1046 | |||
1002 | /* | 1047 | /* |
1003 | * Call with manage_mutex held. | 1048 | * Call with manage_mutex held. |
1004 | */ | 1049 | */ |
@@ -1015,6 +1060,7 @@ static int update_memory_pressure_enabled(struct cpuset *cs, char *buf) | |||
1015 | /* | 1060 | /* |
1016 | * update_flag - read a 0 or a 1 in a file and update associated flag | 1061 | * update_flag - read a 0 or a 1 in a file and update associated flag |
1017 | * bit: the bit to update (CS_CPU_EXCLUSIVE, CS_MEM_EXCLUSIVE, | 1062 | * bit: the bit to update (CS_CPU_EXCLUSIVE, CS_MEM_EXCLUSIVE, |
1063 | * CS_SCHED_LOAD_BALANCE, | ||
1018 | * CS_NOTIFY_ON_RELEASE, CS_MEMORY_MIGRATE, | 1064 | * CS_NOTIFY_ON_RELEASE, CS_MEMORY_MIGRATE, |
1019 | * CS_SPREAD_PAGE, CS_SPREAD_SLAB) | 1065 | * CS_SPREAD_PAGE, CS_SPREAD_SLAB) |
1020 | * cs: the cpuset to update | 1066 | * cs: the cpuset to update |
@@ -1028,6 +1074,7 @@ static int update_flag(cpuset_flagbits_t bit, struct cpuset *cs, char *buf) | |||
1028 | int turning_on; | 1074 | int turning_on; |
1029 | struct cpuset trialcs; | 1075 | struct cpuset trialcs; |
1030 | int err; | 1076 | int err; |
1077 | int cpus_nonempty, balance_flag_changed; | ||
1031 | 1078 | ||
1032 | turning_on = (simple_strtoul(buf, NULL, 10) != 0); | 1079 | turning_on = (simple_strtoul(buf, NULL, 10) != 0); |
1033 | 1080 | ||
@@ -1040,10 +1087,18 @@ static int update_flag(cpuset_flagbits_t bit, struct cpuset *cs, char *buf) | |||
1040 | err = validate_change(cs, &trialcs); | 1087 | err = validate_change(cs, &trialcs); |
1041 | if (err < 0) | 1088 | if (err < 0) |
1042 | return err; | 1089 | return err; |
1090 | |||
1091 | cpus_nonempty = !cpus_empty(trialcs.cpus_allowed); | ||
1092 | balance_flag_changed = (is_sched_load_balance(cs) != | ||
1093 | is_sched_load_balance(&trialcs)); | ||
1094 | |||
1043 | mutex_lock(&callback_mutex); | 1095 | mutex_lock(&callback_mutex); |
1044 | cs->flags = trialcs.flags; | 1096 | cs->flags = trialcs.flags; |
1045 | mutex_unlock(&callback_mutex); | 1097 | mutex_unlock(&callback_mutex); |
1046 | 1098 | ||
1099 | if (cpus_nonempty && balance_flag_changed) | ||
1100 | rebuild_sched_domains(); | ||
1101 | |||
1047 | return 0; | 1102 | return 0; |
1048 | } | 1103 | } |
1049 | 1104 | ||
@@ -1145,85 +1200,34 @@ static int fmeter_getrate(struct fmeter *fmp) | |||
1145 | return val; | 1200 | return val; |
1146 | } | 1201 | } |
1147 | 1202 | ||
1148 | /* | 1203 | static int cpuset_can_attach(struct cgroup_subsys *ss, |
1149 | * Attack task specified by pid in 'pidbuf' to cpuset 'cs', possibly | 1204 | struct cgroup *cont, struct task_struct *tsk) |
1150 | * writing the path of the old cpuset in 'ppathbuf' if it needs to be | ||
1151 | * notified on release. | ||
1152 | * | ||
1153 | * Call holding manage_mutex. May take callback_mutex and task_lock of | ||
1154 | * the task 'pid' during call. | ||
1155 | */ | ||
1156 | |||
1157 | static int attach_task(struct cpuset *cs, char *pidbuf, char **ppathbuf) | ||
1158 | { | 1205 | { |
1159 | pid_t pid; | 1206 | struct cpuset *cs = cgroup_cs(cont); |
1160 | struct task_struct *tsk; | ||
1161 | struct cpuset *oldcs; | ||
1162 | cpumask_t cpus; | ||
1163 | nodemask_t from, to; | ||
1164 | struct mm_struct *mm; | ||
1165 | int retval; | ||
1166 | 1207 | ||
1167 | if (sscanf(pidbuf, "%d", &pid) != 1) | ||
1168 | return -EIO; | ||
1169 | if (cpus_empty(cs->cpus_allowed) || nodes_empty(cs->mems_allowed)) | 1208 | if (cpus_empty(cs->cpus_allowed) || nodes_empty(cs->mems_allowed)) |
1170 | return -ENOSPC; | 1209 | return -ENOSPC; |
1171 | 1210 | ||
1172 | if (pid) { | 1211 | return security_task_setscheduler(tsk, 0, NULL); |
1173 | read_lock(&tasklist_lock); | 1212 | } |
1174 | |||
1175 | tsk = find_task_by_pid(pid); | ||
1176 | if (!tsk || tsk->flags & PF_EXITING) { | ||
1177 | read_unlock(&tasklist_lock); | ||
1178 | return -ESRCH; | ||
1179 | } | ||
1180 | |||
1181 | get_task_struct(tsk); | ||
1182 | read_unlock(&tasklist_lock); | ||
1183 | |||
1184 | if ((current->euid) && (current->euid != tsk->uid) | ||
1185 | && (current->euid != tsk->suid)) { | ||
1186 | put_task_struct(tsk); | ||
1187 | return -EACCES; | ||
1188 | } | ||
1189 | } else { | ||
1190 | tsk = current; | ||
1191 | get_task_struct(tsk); | ||
1192 | } | ||
1193 | 1213 | ||
1194 | retval = security_task_setscheduler(tsk, 0, NULL); | 1214 | static void cpuset_attach(struct cgroup_subsys *ss, |
1195 | if (retval) { | 1215 | struct cgroup *cont, struct cgroup *oldcont, |
1196 | put_task_struct(tsk); | 1216 | struct task_struct *tsk) |
1197 | return retval; | 1217 | { |
1198 | } | 1218 | cpumask_t cpus; |
1219 | nodemask_t from, to; | ||
1220 | struct mm_struct *mm; | ||
1221 | struct cpuset *cs = cgroup_cs(cont); | ||
1222 | struct cpuset *oldcs = cgroup_cs(oldcont); | ||
1199 | 1223 | ||
1200 | mutex_lock(&callback_mutex); | 1224 | mutex_lock(&callback_mutex); |
1201 | |||
1202 | task_lock(tsk); | ||
1203 | oldcs = tsk->cpuset; | ||
1204 | /* | ||
1205 | * After getting 'oldcs' cpuset ptr, be sure still not exiting. | ||
1206 | * If 'oldcs' might be the top_cpuset due to the_top_cpuset_hack | ||
1207 | * then fail this attach_task(), to avoid breaking top_cpuset.count. | ||
1208 | */ | ||
1209 | if (tsk->flags & PF_EXITING) { | ||
1210 | task_unlock(tsk); | ||
1211 | mutex_unlock(&callback_mutex); | ||
1212 | put_task_struct(tsk); | ||
1213 | return -ESRCH; | ||
1214 | } | ||
1215 | atomic_inc(&cs->count); | ||
1216 | rcu_assign_pointer(tsk->cpuset, cs); | ||
1217 | task_unlock(tsk); | ||
1218 | |||
1219 | guarantee_online_cpus(cs, &cpus); | 1225 | guarantee_online_cpus(cs, &cpus); |
1220 | set_cpus_allowed(tsk, cpus); | 1226 | set_cpus_allowed(tsk, cpus); |
1227 | mutex_unlock(&callback_mutex); | ||
1221 | 1228 | ||
1222 | from = oldcs->mems_allowed; | 1229 | from = oldcs->mems_allowed; |
1223 | to = cs->mems_allowed; | 1230 | to = cs->mems_allowed; |
1224 | |||
1225 | mutex_unlock(&callback_mutex); | ||
1226 | |||
1227 | mm = get_task_mm(tsk); | 1231 | mm = get_task_mm(tsk); |
1228 | if (mm) { | 1232 | if (mm) { |
1229 | mpol_rebind_mm(mm, &to); | 1233 | mpol_rebind_mm(mm, &to); |
@@ -1232,44 +1236,36 @@ static int attach_task(struct cpuset *cs, char *pidbuf, char **ppathbuf) | |||
1232 | mmput(mm); | 1236 | mmput(mm); |
1233 | } | 1237 | } |
1234 | 1238 | ||
1235 | put_task_struct(tsk); | ||
1236 | synchronize_rcu(); | ||
1237 | if (atomic_dec_and_test(&oldcs->count)) | ||
1238 | check_for_release(oldcs, ppathbuf); | ||
1239 | return 0; | ||
1240 | } | 1239 | } |
1241 | 1240 | ||
1242 | /* The various types of files and directories in a cpuset file system */ | 1241 | /* The various types of files and directories in a cpuset file system */ |
1243 | 1242 | ||
1244 | typedef enum { | 1243 | typedef enum { |
1245 | FILE_ROOT, | ||
1246 | FILE_DIR, | ||
1247 | FILE_MEMORY_MIGRATE, | 1244 | FILE_MEMORY_MIGRATE, |
1248 | FILE_CPULIST, | 1245 | FILE_CPULIST, |
1249 | FILE_MEMLIST, | 1246 | FILE_MEMLIST, |
1250 | FILE_CPU_EXCLUSIVE, | 1247 | FILE_CPU_EXCLUSIVE, |
1251 | FILE_MEM_EXCLUSIVE, | 1248 | FILE_MEM_EXCLUSIVE, |
1252 | FILE_NOTIFY_ON_RELEASE, | 1249 | FILE_SCHED_LOAD_BALANCE, |
1253 | FILE_MEMORY_PRESSURE_ENABLED, | 1250 | FILE_MEMORY_PRESSURE_ENABLED, |
1254 | FILE_MEMORY_PRESSURE, | 1251 | FILE_MEMORY_PRESSURE, |
1255 | FILE_SPREAD_PAGE, | 1252 | FILE_SPREAD_PAGE, |
1256 | FILE_SPREAD_SLAB, | 1253 | FILE_SPREAD_SLAB, |
1257 | FILE_TASKLIST, | ||
1258 | } cpuset_filetype_t; | 1254 | } cpuset_filetype_t; |
1259 | 1255 | ||
1260 | static ssize_t cpuset_common_file_write(struct file *file, | 1256 | static ssize_t cpuset_common_file_write(struct cgroup *cont, |
1257 | struct cftype *cft, | ||
1258 | struct file *file, | ||
1261 | const char __user *userbuf, | 1259 | const char __user *userbuf, |
1262 | size_t nbytes, loff_t *unused_ppos) | 1260 | size_t nbytes, loff_t *unused_ppos) |
1263 | { | 1261 | { |
1264 | struct cpuset *cs = __d_cs(file->f_path.dentry->d_parent); | 1262 | struct cpuset *cs = cgroup_cs(cont); |
1265 | struct cftype *cft = __d_cft(file->f_path.dentry); | ||
1266 | cpuset_filetype_t type = cft->private; | 1263 | cpuset_filetype_t type = cft->private; |
1267 | char *buffer; | 1264 | char *buffer; |
1268 | char *pathbuf = NULL; | ||
1269 | int retval = 0; | 1265 | int retval = 0; |
1270 | 1266 | ||
1271 | /* Crude upper limit on largest legitimate cpulist user might write. */ | 1267 | /* Crude upper limit on largest legitimate cpulist user might write. */ |
1272 | if (nbytes > 100 + 6 * max(NR_CPUS, MAX_NUMNODES)) | 1268 | if (nbytes > 100U + 6 * max(NR_CPUS, MAX_NUMNODES)) |
1273 | return -E2BIG; | 1269 | return -E2BIG; |
1274 | 1270 | ||
1275 | /* +1 for nul-terminator */ | 1271 | /* +1 for nul-terminator */ |
@@ -1282,9 +1278,9 @@ static ssize_t cpuset_common_file_write(struct file *file, | |||
1282 | } | 1278 | } |
1283 | buffer[nbytes] = 0; /* nul-terminate */ | 1279 | buffer[nbytes] = 0; /* nul-terminate */ |
1284 | 1280 | ||
1285 | mutex_lock(&manage_mutex); | 1281 | cgroup_lock(); |
1286 | 1282 | ||
1287 | if (is_removed(cs)) { | 1283 | if (cgroup_is_removed(cont)) { |
1288 | retval = -ENODEV; | 1284 | retval = -ENODEV; |
1289 | goto out2; | 1285 | goto out2; |
1290 | } | 1286 | } |
@@ -1302,8 +1298,8 @@ static ssize_t cpuset_common_file_write(struct file *file, | |||
1302 | case FILE_MEM_EXCLUSIVE: | 1298 | case FILE_MEM_EXCLUSIVE: |
1303 | retval = update_flag(CS_MEM_EXCLUSIVE, cs, buffer); | 1299 | retval = update_flag(CS_MEM_EXCLUSIVE, cs, buffer); |
1304 | break; | 1300 | break; |
1305 | case FILE_NOTIFY_ON_RELEASE: | 1301 | case FILE_SCHED_LOAD_BALANCE: |
1306 | retval = update_flag(CS_NOTIFY_ON_RELEASE, cs, buffer); | 1302 | retval = update_flag(CS_SCHED_LOAD_BALANCE, cs, buffer); |
1307 | break; | 1303 | break; |
1308 | case FILE_MEMORY_MIGRATE: | 1304 | case FILE_MEMORY_MIGRATE: |
1309 | retval = update_flag(CS_MEMORY_MIGRATE, cs, buffer); | 1305 | retval = update_flag(CS_MEMORY_MIGRATE, cs, buffer); |
@@ -1322,9 +1318,6 @@ static ssize_t cpuset_common_file_write(struct file *file, | |||
1322 | retval = update_flag(CS_SPREAD_SLAB, cs, buffer); | 1318 | retval = update_flag(CS_SPREAD_SLAB, cs, buffer); |
1323 | cs->mems_generation = cpuset_mems_generation++; | 1319 | cs->mems_generation = cpuset_mems_generation++; |
1324 | break; | 1320 | break; |
1325 | case FILE_TASKLIST: | ||
1326 | retval = attach_task(cs, buffer, &pathbuf); | ||
1327 | break; | ||
1328 | default: | 1321 | default: |
1329 | retval = -EINVAL; | 1322 | retval = -EINVAL; |
1330 | goto out2; | 1323 | goto out2; |
@@ -1333,30 +1326,12 @@ static ssize_t cpuset_common_file_write(struct file *file, | |||
1333 | if (retval == 0) | 1326 | if (retval == 0) |
1334 | retval = nbytes; | 1327 | retval = nbytes; |
1335 | out2: | 1328 | out2: |
1336 | mutex_unlock(&manage_mutex); | 1329 | cgroup_unlock(); |
1337 | cpuset_release_agent(pathbuf); | ||
1338 | out1: | 1330 | out1: |
1339 | kfree(buffer); | 1331 | kfree(buffer); |
1340 | return retval; | 1332 | return retval; |
1341 | } | 1333 | } |
1342 | 1334 | ||
1343 | static ssize_t cpuset_file_write(struct file *file, const char __user *buf, | ||
1344 | size_t nbytes, loff_t *ppos) | ||
1345 | { | ||
1346 | ssize_t retval = 0; | ||
1347 | struct cftype *cft = __d_cft(file->f_path.dentry); | ||
1348 | if (!cft) | ||
1349 | return -ENODEV; | ||
1350 | |||
1351 | /* special function ? */ | ||
1352 | if (cft->write) | ||
1353 | retval = cft->write(file, buf, nbytes, ppos); | ||
1354 | else | ||
1355 | retval = cpuset_common_file_write(file, buf, nbytes, ppos); | ||
1356 | |||
1357 | return retval; | ||
1358 | } | ||
1359 | |||
1360 | /* | 1335 | /* |
1361 | * These ascii lists should be read in a single call, by using a user | 1336 | * These ascii lists should be read in a single call, by using a user |
1362 | * buffer large enough to hold the entire map. If read in smaller | 1337 | * buffer large enough to hold the entire map. If read in smaller |
@@ -1391,11 +1366,13 @@ static int cpuset_sprintf_memlist(char *page, struct cpuset *cs) | |||
1391 | return nodelist_scnprintf(page, PAGE_SIZE, mask); | 1366 | return nodelist_scnprintf(page, PAGE_SIZE, mask); |
1392 | } | 1367 | } |
1393 | 1368 | ||
1394 | static ssize_t cpuset_common_file_read(struct file *file, char __user *buf, | 1369 | static ssize_t cpuset_common_file_read(struct cgroup *cont, |
1395 | size_t nbytes, loff_t *ppos) | 1370 | struct cftype *cft, |
1371 | struct file *file, | ||
1372 | char __user *buf, | ||
1373 | size_t nbytes, loff_t *ppos) | ||
1396 | { | 1374 | { |
1397 | struct cftype *cft = __d_cft(file->f_path.dentry); | 1375 | struct cpuset *cs = cgroup_cs(cont); |
1398 | struct cpuset *cs = __d_cs(file->f_path.dentry->d_parent); | ||
1399 | cpuset_filetype_t type = cft->private; | 1376 | cpuset_filetype_t type = cft->private; |
1400 | char *page; | 1377 | char *page; |
1401 | ssize_t retval = 0; | 1378 | ssize_t retval = 0; |
@@ -1419,8 +1396,8 @@ static ssize_t cpuset_common_file_read(struct file *file, char __user *buf, | |||
1419 | case FILE_MEM_EXCLUSIVE: | 1396 | case FILE_MEM_EXCLUSIVE: |
1420 | *s++ = is_mem_exclusive(cs) ? '1' : '0'; | 1397 | *s++ = is_mem_exclusive(cs) ? '1' : '0'; |
1421 | break; | 1398 | break; |
1422 | case FILE_NOTIFY_ON_RELEASE: | 1399 | case FILE_SCHED_LOAD_BALANCE: |
1423 | *s++ = notify_on_release(cs) ? '1' : '0'; | 1400 | *s++ = is_sched_load_balance(cs) ? '1' : '0'; |
1424 | break; | 1401 | break; |
1425 | case FILE_MEMORY_MIGRATE: | 1402 | case FILE_MEMORY_MIGRATE: |
1426 | *s++ = is_memory_migrate(cs) ? '1' : '0'; | 1403 | *s++ = is_memory_migrate(cs) ? '1' : '0'; |
@@ -1449,390 +1426,150 @@ out: | |||
1449 | return retval; | 1426 | return retval; |
1450 | } | 1427 | } |
1451 | 1428 | ||
1452 | static ssize_t cpuset_file_read(struct file *file, char __user *buf, size_t nbytes, | ||
1453 | loff_t *ppos) | ||
1454 | { | ||
1455 | ssize_t retval = 0; | ||
1456 | struct cftype *cft = __d_cft(file->f_path.dentry); | ||
1457 | if (!cft) | ||
1458 | return -ENODEV; | ||
1459 | |||
1460 | /* special function ? */ | ||
1461 | if (cft->read) | ||
1462 | retval = cft->read(file, buf, nbytes, ppos); | ||
1463 | else | ||
1464 | retval = cpuset_common_file_read(file, buf, nbytes, ppos); | ||
1465 | |||
1466 | return retval; | ||
1467 | } | ||
1468 | |||
1469 | static int cpuset_file_open(struct inode *inode, struct file *file) | ||
1470 | { | ||
1471 | int err; | ||
1472 | struct cftype *cft; | ||
1473 | |||
1474 | err = generic_file_open(inode, file); | ||
1475 | if (err) | ||
1476 | return err; | ||
1477 | |||
1478 | cft = __d_cft(file->f_path.dentry); | ||
1479 | if (!cft) | ||
1480 | return -ENODEV; | ||
1481 | if (cft->open) | ||
1482 | err = cft->open(inode, file); | ||
1483 | else | ||
1484 | err = 0; | ||
1485 | |||
1486 | return err; | ||
1487 | } | ||
1488 | |||
1489 | static int cpuset_file_release(struct inode *inode, struct file *file) | ||
1490 | { | ||
1491 | struct cftype *cft = __d_cft(file->f_path.dentry); | ||
1492 | if (cft->release) | ||
1493 | return cft->release(inode, file); | ||
1494 | return 0; | ||
1495 | } | ||
1496 | |||
1497 | /* | ||
1498 | * cpuset_rename - Only allow simple rename of directories in place. | ||
1499 | */ | ||
1500 | static int cpuset_rename(struct inode *old_dir, struct dentry *old_dentry, | ||
1501 | struct inode *new_dir, struct dentry *new_dentry) | ||
1502 | { | ||
1503 | if (!S_ISDIR(old_dentry->d_inode->i_mode)) | ||
1504 | return -ENOTDIR; | ||
1505 | if (new_dentry->d_inode) | ||
1506 | return -EEXIST; | ||
1507 | if (old_dir != new_dir) | ||
1508 | return -EIO; | ||
1509 | return simple_rename(old_dir, old_dentry, new_dir, new_dentry); | ||
1510 | } | ||
1511 | |||
1512 | static const struct file_operations cpuset_file_operations = { | ||
1513 | .read = cpuset_file_read, | ||
1514 | .write = cpuset_file_write, | ||
1515 | .llseek = generic_file_llseek, | ||
1516 | .open = cpuset_file_open, | ||
1517 | .release = cpuset_file_release, | ||
1518 | }; | ||
1519 | |||
1520 | static const struct inode_operations cpuset_dir_inode_operations = { | ||
1521 | .lookup = simple_lookup, | ||
1522 | .mkdir = cpuset_mkdir, | ||
1523 | .rmdir = cpuset_rmdir, | ||
1524 | .rename = cpuset_rename, | ||
1525 | }; | ||
1526 | |||
1527 | static int cpuset_create_file(struct dentry *dentry, int mode) | ||
1528 | { | ||
1529 | struct inode *inode; | ||
1530 | |||
1531 | if (!dentry) | ||
1532 | return -ENOENT; | ||
1533 | if (dentry->d_inode) | ||
1534 | return -EEXIST; | ||
1535 | |||
1536 | inode = cpuset_new_inode(mode); | ||
1537 | if (!inode) | ||
1538 | return -ENOMEM; | ||
1539 | |||
1540 | if (S_ISDIR(mode)) { | ||
1541 | inode->i_op = &cpuset_dir_inode_operations; | ||
1542 | inode->i_fop = &simple_dir_operations; | ||
1543 | |||
1544 | /* start off with i_nlink == 2 (for "." entry) */ | ||
1545 | inc_nlink(inode); | ||
1546 | } else if (S_ISREG(mode)) { | ||
1547 | inode->i_size = 0; | ||
1548 | inode->i_fop = &cpuset_file_operations; | ||
1549 | } | ||
1550 | |||
1551 | d_instantiate(dentry, inode); | ||
1552 | dget(dentry); /* Extra count - pin the dentry in core */ | ||
1553 | return 0; | ||
1554 | } | ||
1555 | |||
1556 | /* | ||
1557 | * cpuset_create_dir - create a directory for an object. | ||
1558 | * cs: the cpuset we create the directory for. | ||
1559 | * It must have a valid ->parent field | ||
1560 | * And we are going to fill its ->dentry field. | ||
1561 | * name: The name to give to the cpuset directory. Will be copied. | ||
1562 | * mode: mode to set on new directory. | ||
1563 | */ | ||
1564 | |||
1565 | static int cpuset_create_dir(struct cpuset *cs, const char *name, int mode) | ||
1566 | { | ||
1567 | struct dentry *dentry = NULL; | ||
1568 | struct dentry *parent; | ||
1569 | int error = 0; | ||
1570 | |||
1571 | parent = cs->parent->dentry; | ||
1572 | dentry = cpuset_get_dentry(parent, name); | ||
1573 | if (IS_ERR(dentry)) | ||
1574 | return PTR_ERR(dentry); | ||
1575 | error = cpuset_create_file(dentry, S_IFDIR | mode); | ||
1576 | if (!error) { | ||
1577 | dentry->d_fsdata = cs; | ||
1578 | inc_nlink(parent->d_inode); | ||
1579 | cs->dentry = dentry; | ||
1580 | } | ||
1581 | dput(dentry); | ||
1582 | |||
1583 | return error; | ||
1584 | } | ||
1585 | |||
1586 | static int cpuset_add_file(struct dentry *dir, const struct cftype *cft) | ||
1587 | { | ||
1588 | struct dentry *dentry; | ||
1589 | int error; | ||
1590 | |||
1591 | mutex_lock(&dir->d_inode->i_mutex); | ||
1592 | dentry = cpuset_get_dentry(dir, cft->name); | ||
1593 | if (!IS_ERR(dentry)) { | ||
1594 | error = cpuset_create_file(dentry, 0644 | S_IFREG); | ||
1595 | if (!error) | ||
1596 | dentry->d_fsdata = (void *)cft; | ||
1597 | dput(dentry); | ||
1598 | } else | ||
1599 | error = PTR_ERR(dentry); | ||
1600 | mutex_unlock(&dir->d_inode->i_mutex); | ||
1601 | return error; | ||
1602 | } | ||
1603 | |||
1604 | /* | ||
1605 | * Stuff for reading the 'tasks' file. | ||
1606 | * | ||
1607 | * Reading this file can return large amounts of data if a cpuset has | ||
1608 | * *lots* of attached tasks. So it may need several calls to read(), | ||
1609 | * but we cannot guarantee that the information we produce is correct | ||
1610 | * unless we produce it entirely atomically. | ||
1611 | * | ||
1612 | * Upon tasks file open(), a struct ctr_struct is allocated, that | ||
1613 | * will have a pointer to an array (also allocated here). The struct | ||
1614 | * ctr_struct * is stored in file->private_data. Its resources will | ||
1615 | * be freed by release() when the file is closed. The array is used | ||
1616 | * to sprintf the PIDs and then used by read(). | ||
1617 | */ | ||
1618 | |||
1619 | /* cpusets_tasks_read array */ | ||
1620 | |||
1621 | struct ctr_struct { | ||
1622 | char *buf; | ||
1623 | int bufsz; | ||
1624 | }; | ||
1625 | |||
1626 | /* | ||
1627 | * Load into 'pidarray' up to 'npids' of the tasks using cpuset 'cs'. | ||
1628 | * Return actual number of pids loaded. No need to task_lock(p) | ||
1629 | * when reading out p->cpuset, as we don't really care if it changes | ||
1630 | * on the next cycle, and we are not going to try to dereference it. | ||
1631 | */ | ||
1632 | static int pid_array_load(pid_t *pidarray, int npids, struct cpuset *cs) | ||
1633 | { | ||
1634 | int n = 0; | ||
1635 | struct task_struct *g, *p; | ||
1636 | |||
1637 | read_lock(&tasklist_lock); | ||
1638 | |||
1639 | do_each_thread(g, p) { | ||
1640 | if (p->cpuset == cs) { | ||
1641 | if (unlikely(n == npids)) | ||
1642 | goto array_full; | ||
1643 | pidarray[n++] = p->pid; | ||
1644 | } | ||
1645 | } while_each_thread(g, p); | ||
1646 | |||
1647 | array_full: | ||
1648 | read_unlock(&tasklist_lock); | ||
1649 | return n; | ||
1650 | } | ||
1651 | |||
1652 | static int cmppid(const void *a, const void *b) | ||
1653 | { | ||
1654 | return *(pid_t *)a - *(pid_t *)b; | ||
1655 | } | ||
1656 | |||
1657 | /* | ||
1658 | * Convert array 'a' of 'npids' pid_t's to a string of newline separated | ||
1659 | * decimal pids in 'buf'. Don't write more than 'sz' chars, but return | ||
1660 | * count 'cnt' of how many chars would be written if buf were large enough. | ||
1661 | */ | ||
1662 | static int pid_array_to_buf(char *buf, int sz, pid_t *a, int npids) | ||
1663 | { | ||
1664 | int cnt = 0; | ||
1665 | int i; | ||
1666 | |||
1667 | for (i = 0; i < npids; i++) | ||
1668 | cnt += snprintf(buf + cnt, max(sz - cnt, 0), "%d\n", a[i]); | ||
1669 | return cnt; | ||
1670 | } | ||
1671 | |||
1672 | /* | ||
1673 | * Handle an open on 'tasks' file. Prepare a buffer listing the | ||
1674 | * process id's of tasks currently attached to the cpuset being opened. | ||
1675 | * | ||
1676 | * Does not require any specific cpuset mutexes, and does not take any. | ||
1677 | */ | ||
1678 | static int cpuset_tasks_open(struct inode *unused, struct file *file) | ||
1679 | { | ||
1680 | struct cpuset *cs = __d_cs(file->f_path.dentry->d_parent); | ||
1681 | struct ctr_struct *ctr; | ||
1682 | pid_t *pidarray; | ||
1683 | int npids; | ||
1684 | char c; | ||
1685 | |||
1686 | if (!(file->f_mode & FMODE_READ)) | ||
1687 | return 0; | ||
1688 | |||
1689 | ctr = kmalloc(sizeof(*ctr), GFP_KERNEL); | ||
1690 | if (!ctr) | ||
1691 | goto err0; | ||
1692 | |||
1693 | /* | ||
1694 | * If cpuset gets more users after we read count, we won't have | ||
1695 | * enough space - tough. This race is indistinguishable to the | ||
1696 | * caller from the case that the additional cpuset users didn't | ||
1697 | * show up until sometime later on. | ||
1698 | */ | ||
1699 | npids = atomic_read(&cs->count); | ||
1700 | pidarray = kmalloc(npids * sizeof(pid_t), GFP_KERNEL); | ||
1701 | if (!pidarray) | ||
1702 | goto err1; | ||
1703 | |||
1704 | npids = pid_array_load(pidarray, npids, cs); | ||
1705 | sort(pidarray, npids, sizeof(pid_t), cmppid, NULL); | ||
1706 | |||
1707 | /* Call pid_array_to_buf() twice, first just to get bufsz */ | ||
1708 | ctr->bufsz = pid_array_to_buf(&c, sizeof(c), pidarray, npids) + 1; | ||
1709 | ctr->buf = kmalloc(ctr->bufsz, GFP_KERNEL); | ||
1710 | if (!ctr->buf) | ||
1711 | goto err2; | ||
1712 | ctr->bufsz = pid_array_to_buf(ctr->buf, ctr->bufsz, pidarray, npids); | ||
1713 | |||
1714 | kfree(pidarray); | ||
1715 | file->private_data = ctr; | ||
1716 | return 0; | ||
1717 | |||
1718 | err2: | ||
1719 | kfree(pidarray); | ||
1720 | err1: | ||
1721 | kfree(ctr); | ||
1722 | err0: | ||
1723 | return -ENOMEM; | ||
1724 | } | ||
1725 | |||
1726 | static ssize_t cpuset_tasks_read(struct file *file, char __user *buf, | ||
1727 | size_t nbytes, loff_t *ppos) | ||
1728 | { | ||
1729 | struct ctr_struct *ctr = file->private_data; | ||
1730 | 1429 | ||
1731 | return simple_read_from_buffer(buf, nbytes, ppos, ctr->buf, ctr->bufsz); | ||
1732 | } | ||
1733 | 1430 | ||
1734 | static int cpuset_tasks_release(struct inode *unused_inode, struct file *file) | ||
1735 | { | ||
1736 | struct ctr_struct *ctr; | ||
1737 | 1431 | ||
1738 | if (file->f_mode & FMODE_READ) { | ||
1739 | ctr = file->private_data; | ||
1740 | kfree(ctr->buf); | ||
1741 | kfree(ctr); | ||
1742 | } | ||
1743 | return 0; | ||
1744 | } | ||
1745 | 1432 | ||
1746 | /* | 1433 | /* |
1747 | * for the common functions, 'private' gives the type of file | 1434 | * for the common functions, 'private' gives the type of file |
1748 | */ | 1435 | */ |
1749 | 1436 | ||
1750 | static struct cftype cft_tasks = { | ||
1751 | .name = "tasks", | ||
1752 | .open = cpuset_tasks_open, | ||
1753 | .read = cpuset_tasks_read, | ||
1754 | .release = cpuset_tasks_release, | ||
1755 | .private = FILE_TASKLIST, | ||
1756 | }; | ||
1757 | |||
1758 | static struct cftype cft_cpus = { | 1437 | static struct cftype cft_cpus = { |
1759 | .name = "cpus", | 1438 | .name = "cpus", |
1439 | .read = cpuset_common_file_read, | ||
1440 | .write = cpuset_common_file_write, | ||
1760 | .private = FILE_CPULIST, | 1441 | .private = FILE_CPULIST, |
1761 | }; | 1442 | }; |
1762 | 1443 | ||
1763 | static struct cftype cft_mems = { | 1444 | static struct cftype cft_mems = { |
1764 | .name = "mems", | 1445 | .name = "mems", |
1446 | .read = cpuset_common_file_read, | ||
1447 | .write = cpuset_common_file_write, | ||
1765 | .private = FILE_MEMLIST, | 1448 | .private = FILE_MEMLIST, |
1766 | }; | 1449 | }; |
1767 | 1450 | ||
1768 | static struct cftype cft_cpu_exclusive = { | 1451 | static struct cftype cft_cpu_exclusive = { |
1769 | .name = "cpu_exclusive", | 1452 | .name = "cpu_exclusive", |
1453 | .read = cpuset_common_file_read, | ||
1454 | .write = cpuset_common_file_write, | ||
1770 | .private = FILE_CPU_EXCLUSIVE, | 1455 | .private = FILE_CPU_EXCLUSIVE, |
1771 | }; | 1456 | }; |
1772 | 1457 | ||
1773 | static struct cftype cft_mem_exclusive = { | 1458 | static struct cftype cft_mem_exclusive = { |
1774 | .name = "mem_exclusive", | 1459 | .name = "mem_exclusive", |
1460 | .read = cpuset_common_file_read, | ||
1461 | .write = cpuset_common_file_write, | ||
1775 | .private = FILE_MEM_EXCLUSIVE, | 1462 | .private = FILE_MEM_EXCLUSIVE, |
1776 | }; | 1463 | }; |
1777 | 1464 | ||
1778 | static struct cftype cft_notify_on_release = { | 1465 | static struct cftype cft_sched_load_balance = { |
1779 | .name = "notify_on_release", | 1466 | .name = "sched_load_balance", |
1780 | .private = FILE_NOTIFY_ON_RELEASE, | 1467 | .read = cpuset_common_file_read, |
1468 | .write = cpuset_common_file_write, | ||
1469 | .private = FILE_SCHED_LOAD_BALANCE, | ||
1781 | }; | 1470 | }; |
1782 | 1471 | ||
1783 | static struct cftype cft_memory_migrate = { | 1472 | static struct cftype cft_memory_migrate = { |
1784 | .name = "memory_migrate", | 1473 | .name = "memory_migrate", |
1474 | .read = cpuset_common_file_read, | ||
1475 | .write = cpuset_common_file_write, | ||
1785 | .private = FILE_MEMORY_MIGRATE, | 1476 | .private = FILE_MEMORY_MIGRATE, |
1786 | }; | 1477 | }; |
1787 | 1478 | ||
1788 | static struct cftype cft_memory_pressure_enabled = { | 1479 | static struct cftype cft_memory_pressure_enabled = { |
1789 | .name = "memory_pressure_enabled", | 1480 | .name = "memory_pressure_enabled", |
1481 | .read = cpuset_common_file_read, | ||
1482 | .write = cpuset_common_file_write, | ||
1790 | .private = FILE_MEMORY_PRESSURE_ENABLED, | 1483 | .private = FILE_MEMORY_PRESSURE_ENABLED, |
1791 | }; | 1484 | }; |
1792 | 1485 | ||
1793 | static struct cftype cft_memory_pressure = { | 1486 | static struct cftype cft_memory_pressure = { |
1794 | .name = "memory_pressure", | 1487 | .name = "memory_pressure", |
1488 | .read = cpuset_common_file_read, | ||
1489 | .write = cpuset_common_file_write, | ||
1795 | .private = FILE_MEMORY_PRESSURE, | 1490 | .private = FILE_MEMORY_PRESSURE, |
1796 | }; | 1491 | }; |
1797 | 1492 | ||
1798 | static struct cftype cft_spread_page = { | 1493 | static struct cftype cft_spread_page = { |
1799 | .name = "memory_spread_page", | 1494 | .name = "memory_spread_page", |
1495 | .read = cpuset_common_file_read, | ||
1496 | .write = cpuset_common_file_write, | ||
1800 | .private = FILE_SPREAD_PAGE, | 1497 | .private = FILE_SPREAD_PAGE, |
1801 | }; | 1498 | }; |
1802 | 1499 | ||
1803 | static struct cftype cft_spread_slab = { | 1500 | static struct cftype cft_spread_slab = { |
1804 | .name = "memory_spread_slab", | 1501 | .name = "memory_spread_slab", |
1502 | .read = cpuset_common_file_read, | ||
1503 | .write = cpuset_common_file_write, | ||
1805 | .private = FILE_SPREAD_SLAB, | 1504 | .private = FILE_SPREAD_SLAB, |
1806 | }; | 1505 | }; |
1807 | 1506 | ||
1808 | static int cpuset_populate_dir(struct dentry *cs_dentry) | 1507 | static int cpuset_populate(struct cgroup_subsys *ss, struct cgroup *cont) |
1809 | { | 1508 | { |
1810 | int err; | 1509 | int err; |
1811 | 1510 | ||
1812 | if ((err = cpuset_add_file(cs_dentry, &cft_cpus)) < 0) | 1511 | if ((err = cgroup_add_file(cont, ss, &cft_cpus)) < 0) |
1813 | return err; | 1512 | return err; |
1814 | if ((err = cpuset_add_file(cs_dentry, &cft_mems)) < 0) | 1513 | if ((err = cgroup_add_file(cont, ss, &cft_mems)) < 0) |
1815 | return err; | 1514 | return err; |
1816 | if ((err = cpuset_add_file(cs_dentry, &cft_cpu_exclusive)) < 0) | 1515 | if ((err = cgroup_add_file(cont, ss, &cft_cpu_exclusive)) < 0) |
1817 | return err; | 1516 | return err; |
1818 | if ((err = cpuset_add_file(cs_dentry, &cft_mem_exclusive)) < 0) | 1517 | if ((err = cgroup_add_file(cont, ss, &cft_mem_exclusive)) < 0) |
1819 | return err; | 1518 | return err; |
1820 | if ((err = cpuset_add_file(cs_dentry, &cft_notify_on_release)) < 0) | 1519 | if ((err = cgroup_add_file(cont, ss, &cft_memory_migrate)) < 0) |
1821 | return err; | 1520 | return err; |
1822 | if ((err = cpuset_add_file(cs_dentry, &cft_memory_migrate)) < 0) | 1521 | if ((err = cgroup_add_file(cont, ss, &cft_sched_load_balance)) < 0) |
1823 | return err; | 1522 | return err; |
1824 | if ((err = cpuset_add_file(cs_dentry, &cft_memory_pressure)) < 0) | 1523 | if ((err = cgroup_add_file(cont, ss, &cft_memory_pressure)) < 0) |
1825 | return err; | 1524 | return err; |
1826 | if ((err = cpuset_add_file(cs_dentry, &cft_spread_page)) < 0) | 1525 | if ((err = cgroup_add_file(cont, ss, &cft_spread_page)) < 0) |
1827 | return err; | 1526 | return err; |
1828 | if ((err = cpuset_add_file(cs_dentry, &cft_spread_slab)) < 0) | 1527 | if ((err = cgroup_add_file(cont, ss, &cft_spread_slab)) < 0) |
1829 | return err; | ||
1830 | if ((err = cpuset_add_file(cs_dentry, &cft_tasks)) < 0) | ||
1831 | return err; | 1528 | return err; |
1529 | /* memory_pressure_enabled is in root cpuset only */ | ||
1530 | if (err == 0 && !cont->parent) | ||
1531 | err = cgroup_add_file(cont, ss, | ||
1532 | &cft_memory_pressure_enabled); | ||
1832 | return 0; | 1533 | return 0; |
1833 | } | 1534 | } |
1834 | 1535 | ||
1835 | /* | 1536 | /* |
1537 | * post_clone() is called at the end of cgroup_clone(). | ||
1538 | * 'cgroup' was just created automatically as a result of | ||
1539 | * a cgroup_clone(), and the current task is about to | ||
1540 | * be moved into 'cgroup'. | ||
1541 | * | ||
1542 | * Currently we refuse to set up the cgroup - thereby | ||
1543 | * refusing the task to be entered, and as a result refusing | ||
1544 | * the sys_unshare() or clone() which initiated it - if any | ||
1545 | * sibling cpusets have exclusive cpus or mem. | ||
1546 | * | ||
1547 | * If this becomes a problem for some users who wish to | ||
1548 | * allow that scenario, then cpuset_post_clone() could be | ||
1549 | * changed to grant parent->cpus_allowed-sibling_cpus_exclusive | ||
1550 | * (and likewise for mems) to the new cgroup. | ||
1551 | */ | ||
1552 | static void cpuset_post_clone(struct cgroup_subsys *ss, | ||
1553 | struct cgroup *cgroup) | ||
1554 | { | ||
1555 | struct cgroup *parent, *child; | ||
1556 | struct cpuset *cs, *parent_cs; | ||
1557 | |||
1558 | parent = cgroup->parent; | ||
1559 | list_for_each_entry(child, &parent->children, sibling) { | ||
1560 | cs = cgroup_cs(child); | ||
1561 | if (is_mem_exclusive(cs) || is_cpu_exclusive(cs)) | ||
1562 | return; | ||
1563 | } | ||
1564 | cs = cgroup_cs(cgroup); | ||
1565 | parent_cs = cgroup_cs(parent); | ||
1566 | |||
1567 | cs->mems_allowed = parent_cs->mems_allowed; | ||
1568 | cs->cpus_allowed = parent_cs->cpus_allowed; | ||
1569 | return; | ||
1570 | } | ||
1571 | |||
1572 | /* | ||
1836 | * cpuset_create - create a cpuset | 1573 | * cpuset_create - create a cpuset |
1837 | * parent: cpuset that will be parent of the new cpuset. | 1574 | * parent: cpuset that will be parent of the new cpuset. |
1838 | * name: name of the new cpuset. Will be strcpy'ed. | 1575 | * name: name of the new cpuset. Will be strcpy'ed. |
@@ -1841,106 +1578,77 @@ static int cpuset_populate_dir(struct dentry *cs_dentry) | |||
1841 | * Must be called with the mutex on the parent inode held | 1578 | * Must be called with the mutex on the parent inode held |
1842 | */ | 1579 | */ |
1843 | 1580 | ||
1844 | static long cpuset_create(struct cpuset *parent, const char *name, int mode) | 1581 | static struct cgroup_subsys_state *cpuset_create( |
1582 | struct cgroup_subsys *ss, | ||
1583 | struct cgroup *cont) | ||
1845 | { | 1584 | { |
1846 | struct cpuset *cs; | 1585 | struct cpuset *cs; |
1847 | int err; | 1586 | struct cpuset *parent; |
1848 | 1587 | ||
1588 | if (!cont->parent) { | ||
1589 | /* This is early initialization for the top cgroup */ | ||
1590 | top_cpuset.mems_generation = cpuset_mems_generation++; | ||
1591 | return &top_cpuset.css; | ||
1592 | } | ||
1593 | parent = cgroup_cs(cont->parent); | ||
1849 | cs = kmalloc(sizeof(*cs), GFP_KERNEL); | 1594 | cs = kmalloc(sizeof(*cs), GFP_KERNEL); |
1850 | if (!cs) | 1595 | if (!cs) |
1851 | return -ENOMEM; | 1596 | return ERR_PTR(-ENOMEM); |
1852 | 1597 | ||
1853 | mutex_lock(&manage_mutex); | ||
1854 | cpuset_update_task_memory_state(); | 1598 | cpuset_update_task_memory_state(); |
1855 | cs->flags = 0; | 1599 | cs->flags = 0; |
1856 | if (notify_on_release(parent)) | ||
1857 | set_bit(CS_NOTIFY_ON_RELEASE, &cs->flags); | ||
1858 | if (is_spread_page(parent)) | 1600 | if (is_spread_page(parent)) |
1859 | set_bit(CS_SPREAD_PAGE, &cs->flags); | 1601 | set_bit(CS_SPREAD_PAGE, &cs->flags); |
1860 | if (is_spread_slab(parent)) | 1602 | if (is_spread_slab(parent)) |
1861 | set_bit(CS_SPREAD_SLAB, &cs->flags); | 1603 | set_bit(CS_SPREAD_SLAB, &cs->flags); |
1604 | set_bit(CS_SCHED_LOAD_BALANCE, &cs->flags); | ||
1862 | cs->cpus_allowed = CPU_MASK_NONE; | 1605 | cs->cpus_allowed = CPU_MASK_NONE; |
1863 | cs->mems_allowed = NODE_MASK_NONE; | 1606 | cs->mems_allowed = NODE_MASK_NONE; |
1864 | atomic_set(&cs->count, 0); | ||
1865 | INIT_LIST_HEAD(&cs->sibling); | ||
1866 | INIT_LIST_HEAD(&cs->children); | ||
1867 | cs->mems_generation = cpuset_mems_generation++; | 1607 | cs->mems_generation = cpuset_mems_generation++; |
1868 | fmeter_init(&cs->fmeter); | 1608 | fmeter_init(&cs->fmeter); |
1869 | 1609 | ||
1870 | cs->parent = parent; | 1610 | cs->parent = parent; |
1871 | |||
1872 | mutex_lock(&callback_mutex); | ||
1873 | list_add(&cs->sibling, &cs->parent->children); | ||
1874 | number_of_cpusets++; | 1611 | number_of_cpusets++; |
1875 | mutex_unlock(&callback_mutex); | 1612 | return &cs->css ; |
1876 | |||
1877 | err = cpuset_create_dir(cs, name, mode); | ||
1878 | if (err < 0) | ||
1879 | goto err; | ||
1880 | |||
1881 | /* | ||
1882 | * Release manage_mutex before cpuset_populate_dir() because it | ||
1883 | * will down() this new directory's i_mutex and if we race with | ||
1884 | * another mkdir, we might deadlock. | ||
1885 | */ | ||
1886 | mutex_unlock(&manage_mutex); | ||
1887 | |||
1888 | err = cpuset_populate_dir(cs->dentry); | ||
1889 | /* If err < 0, we have a half-filled directory - oh well ;) */ | ||
1890 | return 0; | ||
1891 | err: | ||
1892 | list_del(&cs->sibling); | ||
1893 | mutex_unlock(&manage_mutex); | ||
1894 | kfree(cs); | ||
1895 | return err; | ||
1896 | } | 1613 | } |
1897 | 1614 | ||
1898 | static int cpuset_mkdir(struct inode *dir, struct dentry *dentry, int mode) | 1615 | /* |
1899 | { | 1616 | * Locking note on the strange update_flag() call below: |
1900 | struct cpuset *c_parent = dentry->d_parent->d_fsdata; | 1617 | * |
1901 | 1618 | * If the cpuset being removed has its flag 'sched_load_balance' | |
1902 | /* the vfs holds inode->i_mutex already */ | 1619 | * enabled, then simulate turning sched_load_balance off, which |
1903 | return cpuset_create(c_parent, dentry->d_name.name, mode | S_IFDIR); | 1620 | * will call rebuild_sched_domains(). The lock_cpu_hotplug() |
1904 | } | 1621 | * call in rebuild_sched_domains() must not be made while holding |
1622 | * callback_mutex. Elsewhere the kernel nests callback_mutex inside | ||
1623 | * lock_cpu_hotplug() calls. So the reverse nesting would risk an | ||
1624 | * ABBA deadlock. | ||
1625 | */ | ||
1905 | 1626 | ||
1906 | static int cpuset_rmdir(struct inode *unused_dir, struct dentry *dentry) | 1627 | static void cpuset_destroy(struct cgroup_subsys *ss, struct cgroup *cont) |
1907 | { | 1628 | { |
1908 | struct cpuset *cs = dentry->d_fsdata; | 1629 | struct cpuset *cs = cgroup_cs(cont); |
1909 | struct dentry *d; | ||
1910 | struct cpuset *parent; | ||
1911 | char *pathbuf = NULL; | ||
1912 | 1630 | ||
1913 | /* the vfs holds both inode->i_mutex already */ | ||
1914 | |||
1915 | mutex_lock(&manage_mutex); | ||
1916 | cpuset_update_task_memory_state(); | 1631 | cpuset_update_task_memory_state(); |
1917 | if (atomic_read(&cs->count) > 0) { | 1632 | |
1918 | mutex_unlock(&manage_mutex); | 1633 | if (is_sched_load_balance(cs)) |
1919 | return -EBUSY; | 1634 | update_flag(CS_SCHED_LOAD_BALANCE, cs, "0"); |
1920 | } | 1635 | |
1921 | if (!list_empty(&cs->children)) { | ||
1922 | mutex_unlock(&manage_mutex); | ||
1923 | return -EBUSY; | ||
1924 | } | ||
1925 | parent = cs->parent; | ||
1926 | mutex_lock(&callback_mutex); | ||
1927 | set_bit(CS_REMOVED, &cs->flags); | ||
1928 | list_del(&cs->sibling); /* delete my sibling from parent->children */ | ||
1929 | spin_lock(&cs->dentry->d_lock); | ||
1930 | d = dget(cs->dentry); | ||
1931 | cs->dentry = NULL; | ||
1932 | spin_unlock(&d->d_lock); | ||
1933 | cpuset_d_remove_dir(d); | ||
1934 | dput(d); | ||
1935 | number_of_cpusets--; | 1636 | number_of_cpusets--; |
1936 | mutex_unlock(&callback_mutex); | 1637 | kfree(cs); |
1937 | if (list_empty(&parent->children)) | ||
1938 | check_for_release(parent, &pathbuf); | ||
1939 | mutex_unlock(&manage_mutex); | ||
1940 | cpuset_release_agent(pathbuf); | ||
1941 | return 0; | ||
1942 | } | 1638 | } |
1943 | 1639 | ||
1640 | struct cgroup_subsys cpuset_subsys = { | ||
1641 | .name = "cpuset", | ||
1642 | .create = cpuset_create, | ||
1643 | .destroy = cpuset_destroy, | ||
1644 | .can_attach = cpuset_can_attach, | ||
1645 | .attach = cpuset_attach, | ||
1646 | .populate = cpuset_populate, | ||
1647 | .post_clone = cpuset_post_clone, | ||
1648 | .subsys_id = cpuset_subsys_id, | ||
1649 | .early_init = 1, | ||
1650 | }; | ||
1651 | |||
1944 | /* | 1652 | /* |
1945 | * cpuset_init_early - just enough so that the calls to | 1653 | * cpuset_init_early - just enough so that the calls to |
1946 | * cpuset_update_task_memory_state() in early init code | 1654 | * cpuset_update_task_memory_state() in early init code |
@@ -1949,13 +1657,11 @@ static int cpuset_rmdir(struct inode *unused_dir, struct dentry *dentry) | |||
1949 | 1657 | ||
1950 | int __init cpuset_init_early(void) | 1658 | int __init cpuset_init_early(void) |
1951 | { | 1659 | { |
1952 | struct task_struct *tsk = current; | 1660 | top_cpuset.mems_generation = cpuset_mems_generation++; |
1953 | |||
1954 | tsk->cpuset = &top_cpuset; | ||
1955 | tsk->cpuset->mems_generation = cpuset_mems_generation++; | ||
1956 | return 0; | 1661 | return 0; |
1957 | } | 1662 | } |
1958 | 1663 | ||
1664 | |||
1959 | /** | 1665 | /** |
1960 | * cpuset_init - initialize cpusets at system boot | 1666 | * cpuset_init - initialize cpusets at system boot |
1961 | * | 1667 | * |
@@ -1964,39 +1670,21 @@ int __init cpuset_init_early(void) | |||
1964 | 1670 | ||
1965 | int __init cpuset_init(void) | 1671 | int __init cpuset_init(void) |
1966 | { | 1672 | { |
1967 | struct dentry *root; | 1673 | int err = 0; |
1968 | int err; | ||
1969 | 1674 | ||
1970 | top_cpuset.cpus_allowed = CPU_MASK_ALL; | 1675 | top_cpuset.cpus_allowed = CPU_MASK_ALL; |
1971 | top_cpuset.mems_allowed = NODE_MASK_ALL; | 1676 | top_cpuset.mems_allowed = NODE_MASK_ALL; |
1972 | 1677 | ||
1973 | fmeter_init(&top_cpuset.fmeter); | 1678 | fmeter_init(&top_cpuset.fmeter); |
1974 | top_cpuset.mems_generation = cpuset_mems_generation++; | 1679 | top_cpuset.mems_generation = cpuset_mems_generation++; |
1975 | 1680 | set_bit(CS_SCHED_LOAD_BALANCE, &top_cpuset.flags); | |
1976 | init_task.cpuset = &top_cpuset; | ||
1977 | 1681 | ||
1978 | err = register_filesystem(&cpuset_fs_type); | 1682 | err = register_filesystem(&cpuset_fs_type); |
1979 | if (err < 0) | 1683 | if (err < 0) |
1980 | goto out; | 1684 | return err; |
1981 | cpuset_mount = kern_mount(&cpuset_fs_type); | 1685 | |
1982 | if (IS_ERR(cpuset_mount)) { | ||
1983 | printk(KERN_ERR "cpuset: could not mount!\n"); | ||
1984 | err = PTR_ERR(cpuset_mount); | ||
1985 | cpuset_mount = NULL; | ||
1986 | goto out; | ||
1987 | } | ||
1988 | root = cpuset_mount->mnt_sb->s_root; | ||
1989 | root->d_fsdata = &top_cpuset; | ||
1990 | inc_nlink(root->d_inode); | ||
1991 | top_cpuset.dentry = root; | ||
1992 | root->d_inode->i_op = &cpuset_dir_inode_operations; | ||
1993 | number_of_cpusets = 1; | 1686 | number_of_cpusets = 1; |
1994 | err = cpuset_populate_dir(root); | 1687 | return 0; |
1995 | /* memory_pressure_enabled is in root cpuset only */ | ||
1996 | if (err == 0) | ||
1997 | err = cpuset_add_file(root, &cft_memory_pressure_enabled); | ||
1998 | out: | ||
1999 | return err; | ||
2000 | } | 1688 | } |
2001 | 1689 | ||
2002 | /* | 1690 | /* |
@@ -2022,10 +1710,12 @@ out: | |||
2022 | 1710 | ||
2023 | static void guarantee_online_cpus_mems_in_subtree(const struct cpuset *cur) | 1711 | static void guarantee_online_cpus_mems_in_subtree(const struct cpuset *cur) |
2024 | { | 1712 | { |
1713 | struct cgroup *cont; | ||
2025 | struct cpuset *c; | 1714 | struct cpuset *c; |
2026 | 1715 | ||
2027 | /* Each of our child cpusets mems must be online */ | 1716 | /* Each of our child cpusets mems must be online */ |
2028 | list_for_each_entry(c, &cur->children, sibling) { | 1717 | list_for_each_entry(cont, &cur->css.cgroup->children, sibling) { |
1718 | c = cgroup_cs(cont); | ||
2029 | guarantee_online_cpus_mems_in_subtree(c); | 1719 | guarantee_online_cpus_mems_in_subtree(c); |
2030 | if (!cpus_empty(c->cpus_allowed)) | 1720 | if (!cpus_empty(c->cpus_allowed)) |
2031 | guarantee_online_cpus(c, &c->cpus_allowed); | 1721 | guarantee_online_cpus(c, &c->cpus_allowed); |
@@ -2053,7 +1743,7 @@ static void guarantee_online_cpus_mems_in_subtree(const struct cpuset *cur) | |||
2053 | 1743 | ||
2054 | static void common_cpu_mem_hotplug_unplug(void) | 1744 | static void common_cpu_mem_hotplug_unplug(void) |
2055 | { | 1745 | { |
2056 | mutex_lock(&manage_mutex); | 1746 | cgroup_lock(); |
2057 | mutex_lock(&callback_mutex); | 1747 | mutex_lock(&callback_mutex); |
2058 | 1748 | ||
2059 | guarantee_online_cpus_mems_in_subtree(&top_cpuset); | 1749 | guarantee_online_cpus_mems_in_subtree(&top_cpuset); |
@@ -2061,7 +1751,7 @@ static void common_cpu_mem_hotplug_unplug(void) | |||
2061 | top_cpuset.mems_allowed = node_states[N_HIGH_MEMORY]; | 1751 | top_cpuset.mems_allowed = node_states[N_HIGH_MEMORY]; |
2062 | 1752 | ||
2063 | mutex_unlock(&callback_mutex); | 1753 | mutex_unlock(&callback_mutex); |
2064 | mutex_unlock(&manage_mutex); | 1754 | cgroup_unlock(); |
2065 | } | 1755 | } |
2066 | 1756 | ||
2067 | /* | 1757 | /* |
@@ -2074,8 +1764,8 @@ static void common_cpu_mem_hotplug_unplug(void) | |||
2074 | * cpu_online_map on each CPU hotplug (cpuhp) event. | 1764 | * cpu_online_map on each CPU hotplug (cpuhp) event. |
2075 | */ | 1765 | */ |
2076 | 1766 | ||
2077 | static int cpuset_handle_cpuhp(struct notifier_block *nb, | 1767 | static int cpuset_handle_cpuhp(struct notifier_block *unused_nb, |
2078 | unsigned long phase, void *cpu) | 1768 | unsigned long phase, void *unused_cpu) |
2079 | { | 1769 | { |
2080 | if (phase == CPU_DYING || phase == CPU_DYING_FROZEN) | 1770 | if (phase == CPU_DYING || phase == CPU_DYING_FROZEN) |
2081 | return NOTIFY_DONE; | 1771 | return NOTIFY_DONE; |
@@ -2113,109 +1803,7 @@ void __init cpuset_init_smp(void) | |||
2113 | } | 1803 | } |
2114 | 1804 | ||
2115 | /** | 1805 | /** |
2116 | * cpuset_fork - attach newly forked task to its parents cpuset. | ||
2117 | * @tsk: pointer to task_struct of forking parent process. | ||
2118 | * | ||
2119 | * Description: A task inherits its parent's cpuset at fork(). | ||
2120 | * | ||
2121 | * A pointer to the shared cpuset was automatically copied in fork.c | ||
2122 | * by dup_task_struct(). However, we ignore that copy, since it was | ||
2123 | * not made under the protection of task_lock(), so might no longer be | ||
2124 | * a valid cpuset pointer. attach_task() might have already changed | ||
2125 | * current->cpuset, allowing the previously referenced cpuset to | ||
2126 | * be removed and freed. Instead, we task_lock(current) and copy | ||
2127 | * its present value of current->cpuset for our freshly forked child. | ||
2128 | * | ||
2129 | * At the point that cpuset_fork() is called, 'current' is the parent | ||
2130 | * task, and the passed argument 'child' points to the child task. | ||
2131 | **/ | ||
2132 | |||
2133 | void cpuset_fork(struct task_struct *child) | ||
2134 | { | ||
2135 | task_lock(current); | ||
2136 | child->cpuset = current->cpuset; | ||
2137 | atomic_inc(&child->cpuset->count); | ||
2138 | task_unlock(current); | ||
2139 | } | ||
2140 | |||
2141 | /** | ||
2142 | * cpuset_exit - detach cpuset from exiting task | ||
2143 | * @tsk: pointer to task_struct of exiting process | ||
2144 | * | ||
2145 | * Description: Detach cpuset from @tsk and release it. | ||
2146 | * | ||
2147 | * Note that cpusets marked notify_on_release force every task in | ||
2148 | * them to take the global manage_mutex mutex when exiting. | ||
2149 | * This could impact scaling on very large systems. Be reluctant to | ||
2150 | * use notify_on_release cpusets where very high task exit scaling | ||
2151 | * is required on large systems. | ||
2152 | * | ||
2153 | * Don't even think about derefencing 'cs' after the cpuset use count | ||
2154 | * goes to zero, except inside a critical section guarded by manage_mutex | ||
2155 | * or callback_mutex. Otherwise a zero cpuset use count is a license to | ||
2156 | * any other task to nuke the cpuset immediately, via cpuset_rmdir(). | ||
2157 | * | ||
2158 | * This routine has to take manage_mutex, not callback_mutex, because | ||
2159 | * it is holding that mutex while calling check_for_release(), | ||
2160 | * which calls kmalloc(), so can't be called holding callback_mutex(). | ||
2161 | * | ||
2162 | * the_top_cpuset_hack: | ||
2163 | * | ||
2164 | * Set the exiting tasks cpuset to the root cpuset (top_cpuset). | ||
2165 | * | ||
2166 | * Don't leave a task unable to allocate memory, as that is an | ||
2167 | * accident waiting to happen should someone add a callout in | ||
2168 | * do_exit() after the cpuset_exit() call that might allocate. | ||
2169 | * If a task tries to allocate memory with an invalid cpuset, | ||
2170 | * it will oops in cpuset_update_task_memory_state(). | ||
2171 | * | ||
2172 | * We call cpuset_exit() while the task is still competent to | ||
2173 | * handle notify_on_release(), then leave the task attached to | ||
2174 | * the root cpuset (top_cpuset) for the remainder of its exit. | ||
2175 | * | ||
2176 | * To do this properly, we would increment the reference count on | ||
2177 | * top_cpuset, and near the very end of the kernel/exit.c do_exit() | ||
2178 | * code we would add a second cpuset function call, to drop that | ||
2179 | * reference. This would just create an unnecessary hot spot on | ||
2180 | * the top_cpuset reference count, to no avail. | ||
2181 | * | ||
2182 | * Normally, holding a reference to a cpuset without bumping its | ||
2183 | * count is unsafe. The cpuset could go away, or someone could | ||
2184 | * attach us to a different cpuset, decrementing the count on | ||
2185 | * the first cpuset that we never incremented. But in this case, | ||
2186 | * top_cpuset isn't going away, and either task has PF_EXITING set, | ||
2187 | * which wards off any attach_task() attempts, or task is a failed | ||
2188 | * fork, never visible to attach_task. | ||
2189 | * | ||
2190 | * Another way to do this would be to set the cpuset pointer | ||
2191 | * to NULL here, and check in cpuset_update_task_memory_state() | ||
2192 | * for a NULL pointer. This hack avoids that NULL check, for no | ||
2193 | * cost (other than this way too long comment ;). | ||
2194 | **/ | ||
2195 | 1806 | ||
2196 | void cpuset_exit(struct task_struct *tsk) | ||
2197 | { | ||
2198 | struct cpuset *cs; | ||
2199 | |||
2200 | task_lock(current); | ||
2201 | cs = tsk->cpuset; | ||
2202 | tsk->cpuset = &top_cpuset; /* the_top_cpuset_hack - see above */ | ||
2203 | task_unlock(current); | ||
2204 | |||
2205 | if (notify_on_release(cs)) { | ||
2206 | char *pathbuf = NULL; | ||
2207 | |||
2208 | mutex_lock(&manage_mutex); | ||
2209 | if (atomic_dec_and_test(&cs->count)) | ||
2210 | check_for_release(cs, &pathbuf); | ||
2211 | mutex_unlock(&manage_mutex); | ||
2212 | cpuset_release_agent(pathbuf); | ||
2213 | } else { | ||
2214 | atomic_dec(&cs->count); | ||
2215 | } | ||
2216 | } | ||
2217 | |||
2218 | /** | ||
2219 | * cpuset_cpus_allowed - return cpus_allowed mask from a tasks cpuset. | 1807 | * cpuset_cpus_allowed - return cpus_allowed mask from a tasks cpuset. |
2220 | * @tsk: pointer to task_struct from which to obtain cpuset->cpus_allowed. | 1808 | * @tsk: pointer to task_struct from which to obtain cpuset->cpus_allowed. |
2221 | * | 1809 | * |
@@ -2230,10 +1818,23 @@ cpumask_t cpuset_cpus_allowed(struct task_struct *tsk) | |||
2230 | cpumask_t mask; | 1818 | cpumask_t mask; |
2231 | 1819 | ||
2232 | mutex_lock(&callback_mutex); | 1820 | mutex_lock(&callback_mutex); |
1821 | mask = cpuset_cpus_allowed_locked(tsk); | ||
1822 | mutex_unlock(&callback_mutex); | ||
1823 | |||
1824 | return mask; | ||
1825 | } | ||
1826 | |||
1827 | /** | ||
1828 | * cpuset_cpus_allowed_locked - return cpus_allowed mask from a tasks cpuset. | ||
1829 | * Must be called with callback_mutex held. | ||
1830 | **/ | ||
1831 | cpumask_t cpuset_cpus_allowed_locked(struct task_struct *tsk) | ||
1832 | { | ||
1833 | cpumask_t mask; | ||
1834 | |||
2233 | task_lock(tsk); | 1835 | task_lock(tsk); |
2234 | guarantee_online_cpus(tsk->cpuset, &mask); | 1836 | guarantee_online_cpus(task_cs(tsk), &mask); |
2235 | task_unlock(tsk); | 1837 | task_unlock(tsk); |
2236 | mutex_unlock(&callback_mutex); | ||
2237 | 1838 | ||
2238 | return mask; | 1839 | return mask; |
2239 | } | 1840 | } |
@@ -2259,7 +1860,7 @@ nodemask_t cpuset_mems_allowed(struct task_struct *tsk) | |||
2259 | 1860 | ||
2260 | mutex_lock(&callback_mutex); | 1861 | mutex_lock(&callback_mutex); |
2261 | task_lock(tsk); | 1862 | task_lock(tsk); |
2262 | guarantee_online_mems(tsk->cpuset, &mask); | 1863 | guarantee_online_mems(task_cs(tsk), &mask); |
2263 | task_unlock(tsk); | 1864 | task_unlock(tsk); |
2264 | mutex_unlock(&callback_mutex); | 1865 | mutex_unlock(&callback_mutex); |
2265 | 1866 | ||
@@ -2390,7 +1991,7 @@ int __cpuset_zone_allowed_softwall(struct zone *z, gfp_t gfp_mask) | |||
2390 | mutex_lock(&callback_mutex); | 1991 | mutex_lock(&callback_mutex); |
2391 | 1992 | ||
2392 | task_lock(current); | 1993 | task_lock(current); |
2393 | cs = nearest_exclusive_ancestor(current->cpuset); | 1994 | cs = nearest_exclusive_ancestor(task_cs(current)); |
2394 | task_unlock(current); | 1995 | task_unlock(current); |
2395 | 1996 | ||
2396 | allowed = node_isset(node, cs->mems_allowed); | 1997 | allowed = node_isset(node, cs->mems_allowed); |
@@ -2550,14 +2151,12 @@ int cpuset_memory_pressure_enabled __read_mostly; | |||
2550 | 2151 | ||
2551 | void __cpuset_memory_pressure_bump(void) | 2152 | void __cpuset_memory_pressure_bump(void) |
2552 | { | 2153 | { |
2553 | struct cpuset *cs; | ||
2554 | |||
2555 | task_lock(current); | 2154 | task_lock(current); |
2556 | cs = current->cpuset; | 2155 | fmeter_markevent(&task_cs(current)->fmeter); |
2557 | fmeter_markevent(&cs->fmeter); | ||
2558 | task_unlock(current); | 2156 | task_unlock(current); |
2559 | } | 2157 | } |
2560 | 2158 | ||
2159 | #ifdef CONFIG_PROC_PID_CPUSET | ||
2561 | /* | 2160 | /* |
2562 | * proc_cpuset_show() | 2161 | * proc_cpuset_show() |
2563 | * - Print tasks cpuset path into seq_file. | 2162 | * - Print tasks cpuset path into seq_file. |
@@ -2569,11 +2168,12 @@ void __cpuset_memory_pressure_bump(void) | |||
2569 | * the_top_cpuset_hack in cpuset_exit(), which sets an exiting tasks | 2168 | * the_top_cpuset_hack in cpuset_exit(), which sets an exiting tasks |
2570 | * cpuset to top_cpuset. | 2169 | * cpuset to top_cpuset. |
2571 | */ | 2170 | */ |
2572 | static int proc_cpuset_show(struct seq_file *m, void *v) | 2171 | static int proc_cpuset_show(struct seq_file *m, void *unused_v) |
2573 | { | 2172 | { |
2574 | struct pid *pid; | 2173 | struct pid *pid; |
2575 | struct task_struct *tsk; | 2174 | struct task_struct *tsk; |
2576 | char *buf; | 2175 | char *buf; |
2176 | struct cgroup_subsys_state *css; | ||
2577 | int retval; | 2177 | int retval; |
2578 | 2178 | ||
2579 | retval = -ENOMEM; | 2179 | retval = -ENOMEM; |
@@ -2588,15 +2188,15 @@ static int proc_cpuset_show(struct seq_file *m, void *v) | |||
2588 | goto out_free; | 2188 | goto out_free; |
2589 | 2189 | ||
2590 | retval = -EINVAL; | 2190 | retval = -EINVAL; |
2591 | mutex_lock(&manage_mutex); | 2191 | cgroup_lock(); |
2592 | 2192 | css = task_subsys_state(tsk, cpuset_subsys_id); | |
2593 | retval = cpuset_path(tsk->cpuset, buf, PAGE_SIZE); | 2193 | retval = cgroup_path(css->cgroup, buf, PAGE_SIZE); |
2594 | if (retval < 0) | 2194 | if (retval < 0) |
2595 | goto out_unlock; | 2195 | goto out_unlock; |
2596 | seq_puts(m, buf); | 2196 | seq_puts(m, buf); |
2597 | seq_putc(m, '\n'); | 2197 | seq_putc(m, '\n'); |
2598 | out_unlock: | 2198 | out_unlock: |
2599 | mutex_unlock(&manage_mutex); | 2199 | cgroup_unlock(); |
2600 | put_task_struct(tsk); | 2200 | put_task_struct(tsk); |
2601 | out_free: | 2201 | out_free: |
2602 | kfree(buf); | 2202 | kfree(buf); |
@@ -2616,6 +2216,7 @@ const struct file_operations proc_cpuset_operations = { | |||
2616 | .llseek = seq_lseek, | 2216 | .llseek = seq_lseek, |
2617 | .release = single_release, | 2217 | .release = single_release, |
2618 | }; | 2218 | }; |
2219 | #endif /* CONFIG_PROC_PID_CPUSET */ | ||
2619 | 2220 | ||
2620 | /* Display task cpus_allowed, mems_allowed in /proc/<pid>/status file. */ | 2221 | /* Display task cpus_allowed, mems_allowed in /proc/<pid>/status file. */ |
2621 | char *cpuset_task_status_allowed(struct task_struct *task, char *buffer) | 2222 | char *cpuset_task_status_allowed(struct task_struct *task, char *buffer) |