diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2012-01-09 15:59:24 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2012-01-09 15:59:24 -0500 |
commit | db0c2bf69aa095d4a6de7b1145f29fe9a7c0f6a3 (patch) | |
tree | 8f38957c01b18edddd44d49ecc3beeac08a20b4e /include/linux/sched.h | |
parent | ac69e0928054ff29a5049902fb477f9c7605c773 (diff) | |
parent | 0d19ea866562e46989412a0676412fa0983c9ce7 (diff) |
Merge branch 'for-3.3' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/cgroup
* 'for-3.3' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/cgroup: (21 commits)
cgroup: fix to allow mounting a hierarchy by name
cgroup: move assignement out of condition in cgroup_attach_proc()
cgroup: Remove task_lock() from cgroup_post_fork()
cgroup: add sparse annotation to cgroup_iter_start() and cgroup_iter_end()
cgroup: mark cgroup_rmdir_waitq and cgroup_attach_proc() as static
cgroup: only need to check oldcgrp==newgrp once
cgroup: remove redundant get/put of task struct
cgroup: remove redundant get/put of old css_set from migrate
cgroup: Remove unnecessary task_lock before fetching css_set on migration
cgroup: Drop task_lock(parent) on cgroup_fork()
cgroups: remove redundant get/put of css_set from css_set_check_fetched()
resource cgroups: remove bogus cast
cgroup: kill subsys->can_attach_task(), pre_attach() and attach_task()
cgroup, cpuset: don't use ss->pre_attach()
cgroup: don't use subsys->can_attach_task() or ->attach_task()
cgroup: introduce cgroup_taskset and use it in subsys->can_attach(), cancel_attach() and attach()
cgroup: improve old cgroup handling in cgroup_attach_proc()
cgroup: always lock threadgroup during migration
threadgroup: extend threadgroup_lock() to cover exit and exec
threadgroup: rename signal->threadgroup_fork_lock to ->group_rwsem
...
Fix up conflict in kernel/cgroup.c due to commit e0197aae59e5: "cgroups:
fix a css_set not found bug in cgroup_attach_proc" that already
mentioned that the bug is fixed (differently) in Tejun's cgroup
patchset. This one, in other words.
Diffstat (limited to 'include/linux/sched.h')
-rw-r--r-- | include/linux/sched.h | 73 |
1 files changed, 54 insertions, 19 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h index ad93e1ec8c65..f044f66018f2 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
@@ -637,13 +637,15 @@ struct signal_struct { | |||
637 | #endif | 637 | #endif |
638 | #ifdef CONFIG_CGROUPS | 638 | #ifdef CONFIG_CGROUPS |
639 | /* | 639 | /* |
640 | * The threadgroup_fork_lock prevents threads from forking with | 640 | * group_rwsem prevents new tasks from entering the threadgroup and |
641 | * CLONE_THREAD while held for writing. Use this for fork-sensitive | 641 | * member tasks from exiting,a more specifically, setting of |
642 | * threadgroup-wide operations. It's taken for reading in fork.c in | 642 | * PF_EXITING. fork and exit paths are protected with this rwsem |
643 | * copy_process(). | 643 | * using threadgroup_change_begin/end(). Users which require |
644 | * Currently only needed write-side by cgroups. | 644 | * threadgroup to remain stable should use threadgroup_[un]lock() |
645 | * which also takes care of exec path. Currently, cgroup is the | ||
646 | * only user. | ||
645 | */ | 647 | */ |
646 | struct rw_semaphore threadgroup_fork_lock; | 648 | struct rw_semaphore group_rwsem; |
647 | #endif | 649 | #endif |
648 | 650 | ||
649 | int oom_adj; /* OOM kill score adjustment (bit shift) */ | 651 | int oom_adj; /* OOM kill score adjustment (bit shift) */ |
@@ -2394,29 +2396,62 @@ static inline void unlock_task_sighand(struct task_struct *tsk, | |||
2394 | spin_unlock_irqrestore(&tsk->sighand->siglock, *flags); | 2396 | spin_unlock_irqrestore(&tsk->sighand->siglock, *flags); |
2395 | } | 2397 | } |
2396 | 2398 | ||
2397 | /* See the declaration of threadgroup_fork_lock in signal_struct. */ | ||
2398 | #ifdef CONFIG_CGROUPS | 2399 | #ifdef CONFIG_CGROUPS |
2399 | static inline void threadgroup_fork_read_lock(struct task_struct *tsk) | 2400 | static inline void threadgroup_change_begin(struct task_struct *tsk) |
2400 | { | 2401 | { |
2401 | down_read(&tsk->signal->threadgroup_fork_lock); | 2402 | down_read(&tsk->signal->group_rwsem); |
2402 | } | 2403 | } |
2403 | static inline void threadgroup_fork_read_unlock(struct task_struct *tsk) | 2404 | static inline void threadgroup_change_end(struct task_struct *tsk) |
2404 | { | 2405 | { |
2405 | up_read(&tsk->signal->threadgroup_fork_lock); | 2406 | up_read(&tsk->signal->group_rwsem); |
2406 | } | 2407 | } |
2407 | static inline void threadgroup_fork_write_lock(struct task_struct *tsk) | 2408 | |
2409 | /** | ||
2410 | * threadgroup_lock - lock threadgroup | ||
2411 | * @tsk: member task of the threadgroup to lock | ||
2412 | * | ||
2413 | * Lock the threadgroup @tsk belongs to. No new task is allowed to enter | ||
2414 | * and member tasks aren't allowed to exit (as indicated by PF_EXITING) or | ||
2415 | * perform exec. This is useful for cases where the threadgroup needs to | ||
2416 | * stay stable across blockable operations. | ||
2417 | * | ||
2418 | * fork and exit paths explicitly call threadgroup_change_{begin|end}() for | ||
2419 | * synchronization. While held, no new task will be added to threadgroup | ||
2420 | * and no existing live task will have its PF_EXITING set. | ||
2421 | * | ||
2422 | * During exec, a task goes and puts its thread group through unusual | ||
2423 | * changes. After de-threading, exclusive access is assumed to resources | ||
2424 | * which are usually shared by tasks in the same group - e.g. sighand may | ||
2425 | * be replaced with a new one. Also, the exec'ing task takes over group | ||
2426 | * leader role including its pid. Exclude these changes while locked by | ||
2427 | * grabbing cred_guard_mutex which is used to synchronize exec path. | ||
2428 | */ | ||
2429 | static inline void threadgroup_lock(struct task_struct *tsk) | ||
2408 | { | 2430 | { |
2409 | down_write(&tsk->signal->threadgroup_fork_lock); | 2431 | /* |
2432 | * exec uses exit for de-threading nesting group_rwsem inside | ||
2433 | * cred_guard_mutex. Grab cred_guard_mutex first. | ||
2434 | */ | ||
2435 | mutex_lock(&tsk->signal->cred_guard_mutex); | ||
2436 | down_write(&tsk->signal->group_rwsem); | ||
2410 | } | 2437 | } |
2411 | static inline void threadgroup_fork_write_unlock(struct task_struct *tsk) | 2438 | |
2439 | /** | ||
2440 | * threadgroup_unlock - unlock threadgroup | ||
2441 | * @tsk: member task of the threadgroup to unlock | ||
2442 | * | ||
2443 | * Reverse threadgroup_lock(). | ||
2444 | */ | ||
2445 | static inline void threadgroup_unlock(struct task_struct *tsk) | ||
2412 | { | 2446 | { |
2413 | up_write(&tsk->signal->threadgroup_fork_lock); | 2447 | up_write(&tsk->signal->group_rwsem); |
2448 | mutex_unlock(&tsk->signal->cred_guard_mutex); | ||
2414 | } | 2449 | } |
2415 | #else | 2450 | #else |
2416 | static inline void threadgroup_fork_read_lock(struct task_struct *tsk) {} | 2451 | static inline void threadgroup_change_begin(struct task_struct *tsk) {} |
2417 | static inline void threadgroup_fork_read_unlock(struct task_struct *tsk) {} | 2452 | static inline void threadgroup_change_end(struct task_struct *tsk) {} |
2418 | static inline void threadgroup_fork_write_lock(struct task_struct *tsk) {} | 2453 | static inline void threadgroup_lock(struct task_struct *tsk) {} |
2419 | static inline void threadgroup_fork_write_unlock(struct task_struct *tsk) {} | 2454 | static inline void threadgroup_unlock(struct task_struct *tsk) {} |
2420 | #endif | 2455 | #endif |
2421 | 2456 | ||
2422 | #ifndef __HAVE_THREAD_FUNCTIONS | 2457 | #ifndef __HAVE_THREAD_FUNCTIONS |