aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/sched.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/sched.h')
-rw-r--r--include/linux/sched.h73
1 files changed, 54 insertions, 19 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h
index ad93e1ec8c65..f044f66018f2 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -637,13 +637,15 @@ struct signal_struct {
637#endif 637#endif
638#ifdef CONFIG_CGROUPS 638#ifdef CONFIG_CGROUPS
639 /* 639 /*
640 * The threadgroup_fork_lock prevents threads from forking with 640 * group_rwsem prevents new tasks from entering the threadgroup and
641 * CLONE_THREAD while held for writing. Use this for fork-sensitive 641 * member tasks from exiting,a more specifically, setting of
642 * threadgroup-wide operations. It's taken for reading in fork.c in 642 * PF_EXITING. fork and exit paths are protected with this rwsem
643 * copy_process(). 643 * using threadgroup_change_begin/end(). Users which require
644 * Currently only needed write-side by cgroups. 644 * threadgroup to remain stable should use threadgroup_[un]lock()
645 * which also takes care of exec path. Currently, cgroup is the
646 * only user.
645 */ 647 */
646 struct rw_semaphore threadgroup_fork_lock; 648 struct rw_semaphore group_rwsem;
647#endif 649#endif
648 650
649 int oom_adj; /* OOM kill score adjustment (bit shift) */ 651 int oom_adj; /* OOM kill score adjustment (bit shift) */
@@ -2394,29 +2396,62 @@ static inline void unlock_task_sighand(struct task_struct *tsk,
2394 spin_unlock_irqrestore(&tsk->sighand->siglock, *flags); 2396 spin_unlock_irqrestore(&tsk->sighand->siglock, *flags);
2395} 2397}
2396 2398
2397/* See the declaration of threadgroup_fork_lock in signal_struct. */
2398#ifdef CONFIG_CGROUPS 2399#ifdef CONFIG_CGROUPS
2399static inline void threadgroup_fork_read_lock(struct task_struct *tsk) 2400static inline void threadgroup_change_begin(struct task_struct *tsk)
2400{ 2401{
2401 down_read(&tsk->signal->threadgroup_fork_lock); 2402 down_read(&tsk->signal->group_rwsem);
2402} 2403}
2403static inline void threadgroup_fork_read_unlock(struct task_struct *tsk) 2404static inline void threadgroup_change_end(struct task_struct *tsk)
2404{ 2405{
2405 up_read(&tsk->signal->threadgroup_fork_lock); 2406 up_read(&tsk->signal->group_rwsem);
2406} 2407}
2407static inline void threadgroup_fork_write_lock(struct task_struct *tsk) 2408
2409/**
2410 * threadgroup_lock - lock threadgroup
2411 * @tsk: member task of the threadgroup to lock
2412 *
2413 * Lock the threadgroup @tsk belongs to. No new task is allowed to enter
2414 * and member tasks aren't allowed to exit (as indicated by PF_EXITING) or
2415 * perform exec. This is useful for cases where the threadgroup needs to
2416 * stay stable across blockable operations.
2417 *
2418 * fork and exit paths explicitly call threadgroup_change_{begin|end}() for
2419 * synchronization. While held, no new task will be added to threadgroup
2420 * and no existing live task will have its PF_EXITING set.
2421 *
2422 * During exec, a task goes and puts its thread group through unusual
2423 * changes. After de-threading, exclusive access is assumed to resources
2424 * which are usually shared by tasks in the same group - e.g. sighand may
2425 * be replaced with a new one. Also, the exec'ing task takes over group
2426 * leader role including its pid. Exclude these changes while locked by
2427 * grabbing cred_guard_mutex which is used to synchronize exec path.
2428 */
2429static inline void threadgroup_lock(struct task_struct *tsk)
2408{ 2430{
2409 down_write(&tsk->signal->threadgroup_fork_lock); 2431 /*
2432 * exec uses exit for de-threading nesting group_rwsem inside
2433 * cred_guard_mutex. Grab cred_guard_mutex first.
2434 */
2435 mutex_lock(&tsk->signal->cred_guard_mutex);
2436 down_write(&tsk->signal->group_rwsem);
2410} 2437}
2411static inline void threadgroup_fork_write_unlock(struct task_struct *tsk) 2438
2439/**
2440 * threadgroup_unlock - unlock threadgroup
2441 * @tsk: member task of the threadgroup to unlock
2442 *
2443 * Reverse threadgroup_lock().
2444 */
2445static inline void threadgroup_unlock(struct task_struct *tsk)
2412{ 2446{
2413 up_write(&tsk->signal->threadgroup_fork_lock); 2447 up_write(&tsk->signal->group_rwsem);
2448 mutex_unlock(&tsk->signal->cred_guard_mutex);
2414} 2449}
2415#else 2450#else
2416static inline void threadgroup_fork_read_lock(struct task_struct *tsk) {} 2451static inline void threadgroup_change_begin(struct task_struct *tsk) {}
2417static inline void threadgroup_fork_read_unlock(struct task_struct *tsk) {} 2452static inline void threadgroup_change_end(struct task_struct *tsk) {}
2418static inline void threadgroup_fork_write_lock(struct task_struct *tsk) {} 2453static inline void threadgroup_lock(struct task_struct *tsk) {}
2419static inline void threadgroup_fork_write_unlock(struct task_struct *tsk) {} 2454static inline void threadgroup_unlock(struct task_struct *tsk) {}
2420#endif 2455#endif
2421 2456
2422#ifndef __HAVE_THREAD_FUNCTIONS 2457#ifndef __HAVE_THREAD_FUNCTIONS