aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/sched.h
diff options
context:
space:
mode:
authorBen Blum <bblum@andrew.cmu.edu>2011-05-26 19:25:18 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2011-05-26 20:12:34 -0400
commit4714d1d32d97239fb5ae3e10521d3f133a899b66 (patch)
treefb50707cefc386bf4e87cac9661a38dcfe3192df /include/linux/sched.h
parentdcb3a08e69629ea65a3e9647da730bfaf670497d (diff)
cgroups: read-write lock CLONE_THREAD forking per threadgroup
Adds functionality to read/write lock CLONE_THREAD fork()ing per-threadgroup Add an rwsem that lives in a threadgroup's signal_struct that's taken for reading in the fork path, under CONFIG_CGROUPS. If another part of the kernel later wants to use such a locking mechanism, the CONFIG_CGROUPS ifdefs should be changed to a higher-up flag that CGROUPS and the other system would both depend on. This is a pre-patch for cgroup-procs-write.patch. Signed-off-by: Ben Blum <bblum@andrew.cmu.edu> Cc: "Eric W. Biederman" <ebiederm@xmission.com> Cc: Li Zefan <lizf@cn.fujitsu.com> Cc: Matt Helsley <matthltc@us.ibm.com> Reviewed-by: Paul Menage <menage@google.com> Cc: Oleg Nesterov <oleg@redhat.com> Cc: David Rientjes <rientjes@google.com> Cc: Miao Xie <miaox@cn.fujitsu.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'include/linux/sched.h')
-rw-r--r--include/linux/sched.h36
1 files changed, 36 insertions, 0 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h
index f18300eddfcb..dc8871295a5a 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -513,6 +513,7 @@ struct thread_group_cputimer {
513 spinlock_t lock; 513 spinlock_t lock;
514}; 514};
515 515
516#include <linux/rwsem.h>
516struct autogroup; 517struct autogroup;
517 518
518/* 519/*
@@ -632,6 +633,16 @@ struct signal_struct {
632 unsigned audit_tty; 633 unsigned audit_tty;
633 struct tty_audit_buf *tty_audit_buf; 634 struct tty_audit_buf *tty_audit_buf;
634#endif 635#endif
636#ifdef CONFIG_CGROUPS
637 /*
638 * The threadgroup_fork_lock prevents threads from forking with
639 * CLONE_THREAD while held for writing. Use this for fork-sensitive
640 * threadgroup-wide operations. It's taken for reading in fork.c in
641 * copy_process().
642 * Currently only needed write-side by cgroups.
643 */
644 struct rw_semaphore threadgroup_fork_lock;
645#endif
635 646
636 int oom_adj; /* OOM kill score adjustment (bit shift) */ 647 int oom_adj; /* OOM kill score adjustment (bit shift) */
637 int oom_score_adj; /* OOM kill score adjustment */ 648 int oom_score_adj; /* OOM kill score adjustment */
@@ -2323,6 +2334,31 @@ static inline void unlock_task_sighand(struct task_struct *tsk,
2323 spin_unlock_irqrestore(&tsk->sighand->siglock, *flags); 2334 spin_unlock_irqrestore(&tsk->sighand->siglock, *flags);
2324} 2335}
2325 2336
2337/* See the declaration of threadgroup_fork_lock in signal_struct. */
2338#ifdef CONFIG_CGROUPS
2339static inline void threadgroup_fork_read_lock(struct task_struct *tsk)
2340{
2341 down_read(&tsk->signal->threadgroup_fork_lock);
2342}
2343static inline void threadgroup_fork_read_unlock(struct task_struct *tsk)
2344{
2345 up_read(&tsk->signal->threadgroup_fork_lock);
2346}
2347static inline void threadgroup_fork_write_lock(struct task_struct *tsk)
2348{
2349 down_write(&tsk->signal->threadgroup_fork_lock);
2350}
2351static inline void threadgroup_fork_write_unlock(struct task_struct *tsk)
2352{
2353 up_write(&tsk->signal->threadgroup_fork_lock);
2354}
2355#else
2356static inline void threadgroup_fork_read_lock(struct task_struct *tsk) {}
2357static inline void threadgroup_fork_read_unlock(struct task_struct *tsk) {}
2358static inline void threadgroup_fork_write_lock(struct task_struct *tsk) {}
2359static inline void threadgroup_fork_write_unlock(struct task_struct *tsk) {}
2360#endif
2361
2326#ifndef __HAVE_THREAD_FUNCTIONS 2362#ifndef __HAVE_THREAD_FUNCTIONS
2327 2363
2328#define task_thread_info(task) ((struct thread_info *)(task)->stack) 2364#define task_thread_info(task) ((struct thread_info *)(task)->stack)