diff options
| author | Oleg Nesterov <oleg@redhat.com> | 2010-06-10 19:09:56 -0400 |
|---|---|---|
| committer | Ingo Molnar <mingo@elte.hu> | 2010-06-18 04:46:56 -0400 |
| commit | bfac7009180901f57f20a73c53c3e57b1ce75a1b (patch) | |
| tree | f167c0a6bfbf397ad668517bdccbc9d0de162715 /kernel | |
| parent | 48286d5088a3ba76de40a6b70221632a49cab7a1 (diff) | |
sched: thread_group_cputime: Simplify, document the "alive" check
thread_group_cputime() looks as if it is rcu-safe, but in fact this
was wrong until ea6d290c which pins task->signal to task_struct.
It checks ->sighand != NULL under rcu, but this can't help if ->signal
can go away. Fortunately the caller either holds ->siglock, or it is
fastpath_timer_check() which uses current and checks exit_state == 0.
- Since ea6d290c commit tsk->signal is stable, we can read it first
and avoid the initialization from INIT_CPUTIME.
- Even if tsk->signal is always valid, we still have to check it
is safe to use next_thread() under rcu_read_lock(). Currently
the code checks ->sighand != NULL, change it to use pid_alive()
which is commonly used to ensure the task wasn't unhashed before
we take rcu_read_lock().
Add the comment to explain this check.
- Change the main loop to use the while_each_thread() helper.
Signed-off-by: Oleg Nesterov <oleg@redhat.com>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
LKML-Reference: <20100610230956.GA25921@redhat.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel')
| -rw-r--r-- | kernel/posix-cpu-timers.c | 21 |
1 files changed, 7 insertions, 14 deletions
diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c index 9829646d399c..bf2a6502860a 100644 --- a/kernel/posix-cpu-timers.c +++ b/kernel/posix-cpu-timers.c | |||
| @@ -232,31 +232,24 @@ static int cpu_clock_sample(const clockid_t which_clock, struct task_struct *p, | |||
| 232 | 232 | ||
| 233 | void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times) | 233 | void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times) |
| 234 | { | 234 | { |
| 235 | struct sighand_struct *sighand; | 235 | struct signal_struct *sig = tsk->signal; |
| 236 | struct signal_struct *sig; | ||
| 237 | struct task_struct *t; | 236 | struct task_struct *t; |
| 238 | 237 | ||
| 239 | *times = INIT_CPUTIME; | 238 | times->utime = sig->utime; |
| 239 | times->stime = sig->stime; | ||
| 240 | times->sum_exec_runtime = sig->sum_sched_runtime; | ||
| 240 | 241 | ||
| 241 | rcu_read_lock(); | 242 | rcu_read_lock(); |
| 242 | sighand = rcu_dereference(tsk->sighand); | 243 | /* make sure we can trust tsk->thread_group list */ |
| 243 | if (!sighand) | 244 | if (!likely(pid_alive(tsk))) |
| 244 | goto out; | 245 | goto out; |
| 245 | 246 | ||
| 246 | sig = tsk->signal; | ||
| 247 | |||
| 248 | t = tsk; | 247 | t = tsk; |
| 249 | do { | 248 | do { |
| 250 | times->utime = cputime_add(times->utime, t->utime); | 249 | times->utime = cputime_add(times->utime, t->utime); |
| 251 | times->stime = cputime_add(times->stime, t->stime); | 250 | times->stime = cputime_add(times->stime, t->stime); |
| 252 | times->sum_exec_runtime += t->se.sum_exec_runtime; | 251 | times->sum_exec_runtime += t->se.sum_exec_runtime; |
| 253 | 252 | } while_each_thread(tsk, t); | |
| 254 | t = next_thread(t); | ||
| 255 | } while (t != tsk); | ||
| 256 | |||
| 257 | times->utime = cputime_add(times->utime, sig->utime); | ||
| 258 | times->stime = cputime_add(times->stime, sig->stime); | ||
| 259 | times->sum_exec_runtime += sig->sum_sched_runtime; | ||
| 260 | out: | 253 | out: |
| 261 | rcu_read_unlock(); | 254 | rcu_read_unlock(); |
| 262 | } | 255 | } |
