aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorOleg Nesterov <oleg@redhat.com>2010-05-26 17:43:24 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2010-05-27 12:12:47 -0400
commitb3ac022cb9dc5883505a88b159d1b240ad1ef405 (patch)
treebffa035303cbe3c5bde048ac3d3154fb57059e2d
parentdd98acf74762764fbc4382a1d9a244f11a2658cc (diff)
proc: turn signal_struct->count into "int nr_threads"
No functional changes, just s/atomic_t count/int nr_threads/. With the recent changes this counter has a single user, get_nr_threads() And, none of its callers need the really accurate number of threads, not to mention each caller obviously races with fork/exit. It is only used to report this value to the user-space, except first_tid() uses it to avoid the unnecessary while_each_thread() loop in the unlikely case. It is a bit sad we need a word in struct signal_struct for this, perhaps we can change get_nr_threads() to approximate the number of threads using signal->live and kill ->nr_threads later. [akpm@linux-foundation.org: coding-style fixes] Signed-off-by: Oleg Nesterov <oleg@redhat.com> Cc: Alexey Dobriyan <adobriyan@gmail.com> Cc: "Eric W. Biederman" <ebiederm@xmission.com> Acked-by: Roland McGrath <roland@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--include/linux/init_task.h2
-rw-r--r--include/linux/sched.h4
-rw-r--r--kernel/exit.c5
-rw-r--r--kernel/fork.c8
4 files changed, 8 insertions, 11 deletions
diff --git a/include/linux/init_task.h b/include/linux/init_task.h
index 7996fc2c9ba9..0551e0dcb71b 100644
--- a/include/linux/init_task.h
+++ b/include/linux/init_task.h
@@ -16,7 +16,7 @@ extern struct files_struct init_files;
16extern struct fs_struct init_fs; 16extern struct fs_struct init_fs;
17 17
18#define INIT_SIGNALS(sig) { \ 18#define INIT_SIGNALS(sig) { \
19 .count = ATOMIC_INIT(1), \ 19 .nr_threads = 1, \
20 .wait_chldexit = __WAIT_QUEUE_HEAD_INITIALIZER(sig.wait_chldexit),\ 20 .wait_chldexit = __WAIT_QUEUE_HEAD_INITIALIZER(sig.wait_chldexit),\
21 .shared_pending = { \ 21 .shared_pending = { \
22 .list = LIST_HEAD_INIT(sig.shared_pending.list), \ 22 .list = LIST_HEAD_INIT(sig.shared_pending.list), \
diff --git a/include/linux/sched.h b/include/linux/sched.h
index ccd2d1500720..f118809c953f 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -527,8 +527,8 @@ struct thread_group_cputimer {
527 */ 527 */
528struct signal_struct { 528struct signal_struct {
529 atomic_t sigcnt; 529 atomic_t sigcnt;
530 atomic_t count;
531 atomic_t live; 530 atomic_t live;
531 int nr_threads;
532 532
533 wait_queue_head_t wait_chldexit; /* for wait4() */ 533 wait_queue_head_t wait_chldexit; /* for wait4() */
534 534
@@ -2149,7 +2149,7 @@ extern bool current_is_single_threaded(void);
2149 2149
2150static inline int get_nr_threads(struct task_struct *tsk) 2150static inline int get_nr_threads(struct task_struct *tsk)
2151{ 2151{
2152 return atomic_read(&tsk->signal->count); 2152 return tsk->signal->nr_threads;
2153} 2153}
2154 2154
2155/* de_thread depends on thread_group_leader not being a pid based check */ 2155/* de_thread depends on thread_group_leader not being a pid based check */
diff --git a/kernel/exit.c b/kernel/exit.c
index 357d443d5a00..ceffc67b564a 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -83,14 +83,10 @@ static void __exit_signal(struct task_struct *tsk)
83 struct sighand_struct *sighand; 83 struct sighand_struct *sighand;
84 struct tty_struct *uninitialized_var(tty); 84 struct tty_struct *uninitialized_var(tty);
85 85
86 BUG_ON(!sig);
87 BUG_ON(!atomic_read(&sig->count));
88
89 sighand = rcu_dereference_check(tsk->sighand, 86 sighand = rcu_dereference_check(tsk->sighand,
90 rcu_read_lock_held() || 87 rcu_read_lock_held() ||
91 lockdep_tasklist_lock_is_held()); 88 lockdep_tasklist_lock_is_held());
92 spin_lock(&sighand->siglock); 89 spin_lock(&sighand->siglock);
93 atomic_dec(&sig->count);
94 90
95 posix_cpu_timers_exit(tsk); 91 posix_cpu_timers_exit(tsk);
96 if (group_dead) { 92 if (group_dead) {
@@ -130,6 +126,7 @@ static void __exit_signal(struct task_struct *tsk)
130 sig->sum_sched_runtime += tsk->se.sum_exec_runtime; 126 sig->sum_sched_runtime += tsk->se.sum_exec_runtime;
131 } 127 }
132 128
129 sig->nr_threads--;
133 __unhash_process(tsk, group_dead); 130 __unhash_process(tsk, group_dead);
134 131
135 /* 132 /*
diff --git a/kernel/fork.c b/kernel/fork.c
index 40cd099cfc1b..d32410bd4be7 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -877,9 +877,9 @@ static int copy_signal(unsigned long clone_flags, struct task_struct *tsk)
877 if (!sig) 877 if (!sig)
878 return -ENOMEM; 878 return -ENOMEM;
879 879
880 atomic_set(&sig->sigcnt, 1); 880 sig->nr_threads = 1;
881 atomic_set(&sig->count, 1);
882 atomic_set(&sig->live, 1); 881 atomic_set(&sig->live, 1);
882 atomic_set(&sig->sigcnt, 1);
883 init_waitqueue_head(&sig->wait_chldexit); 883 init_waitqueue_head(&sig->wait_chldexit);
884 if (clone_flags & CLONE_NEWPID) 884 if (clone_flags & CLONE_NEWPID)
885 sig->flags |= SIGNAL_UNKILLABLE; 885 sig->flags |= SIGNAL_UNKILLABLE;
@@ -1256,9 +1256,9 @@ static struct task_struct *copy_process(unsigned long clone_flags,
1256 } 1256 }
1257 1257
1258 if (clone_flags & CLONE_THREAD) { 1258 if (clone_flags & CLONE_THREAD) {
1259 atomic_inc(&current->signal->sigcnt); 1259 current->signal->nr_threads++;
1260 atomic_inc(&current->signal->count);
1261 atomic_inc(&current->signal->live); 1260 atomic_inc(&current->signal->live);
1261 atomic_inc(&current->signal->sigcnt);
1262 p->group_leader = current->group_leader; 1262 p->group_leader = current->group_leader;
1263 list_add_tail_rcu(&p->thread_group, &p->group_leader->thread_group); 1263 list_add_tail_rcu(&p->thread_group, &p->group_leader->thread_group);
1264 } 1264 }