diff options
author | Oleg Nesterov <oleg@redhat.com> | 2010-06-10 19:10:18 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2010-06-18 04:46:57 -0400 |
commit | 0bdd2ed4138ec04e09b4f8165981efc99e439f55 (patch) | |
tree | 2bac5b05aa0f248b82facc0f2bc0bf705e306e05 /kernel | |
parent | bfac7009180901f57f20a73c53c3e57b1ce75a1b (diff) |
sched: run_posix_cpu_timers: Don't check ->exit_state, use lock_task_sighand()
run_posix_cpu_timers() doesn't work if current has already passed
exit_notify(). This was needed to prevent the races with do_wait().
Since ea6d290c ->signal is always valid and can't go away. We can
remove the "tsk->exit_state == 0" in fastpath_timer_check() and
convert run_posix_cpu_timers() to use lock_task_sighand().
Note: it makes sense to take group_leader's sighand instead, the
sub-thread still uses CPU after release_task(). But we need more
changes to do this.
Signed-off-by: Oleg Nesterov <oleg@redhat.com>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
LKML-Reference: <20100610231018.GA25942@redhat.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/posix-cpu-timers.c | 10 |
1 files changed, 4 insertions, 6 deletions
diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c index bf2a6502860a..d5dbef5e89e6 100644 --- a/kernel/posix-cpu-timers.c +++ b/kernel/posix-cpu-timers.c | |||
@@ -1272,10 +1272,6 @@ static inline int fastpath_timer_check(struct task_struct *tsk) | |||
1272 | { | 1272 | { |
1273 | struct signal_struct *sig; | 1273 | struct signal_struct *sig; |
1274 | 1274 | ||
1275 | /* tsk == current, ensure it is safe to use ->signal/sighand */ | ||
1276 | if (unlikely(tsk->exit_state)) | ||
1277 | return 0; | ||
1278 | |||
1279 | if (!task_cputime_zero(&tsk->cputime_expires)) { | 1275 | if (!task_cputime_zero(&tsk->cputime_expires)) { |
1280 | struct task_cputime task_sample = { | 1276 | struct task_cputime task_sample = { |
1281 | .utime = tsk->utime, | 1277 | .utime = tsk->utime, |
@@ -1308,6 +1304,7 @@ void run_posix_cpu_timers(struct task_struct *tsk) | |||
1308 | { | 1304 | { |
1309 | LIST_HEAD(firing); | 1305 | LIST_HEAD(firing); |
1310 | struct k_itimer *timer, *next; | 1306 | struct k_itimer *timer, *next; |
1307 | unsigned long flags; | ||
1311 | 1308 | ||
1312 | BUG_ON(!irqs_disabled()); | 1309 | BUG_ON(!irqs_disabled()); |
1313 | 1310 | ||
@@ -1318,7 +1315,8 @@ void run_posix_cpu_timers(struct task_struct *tsk) | |||
1318 | if (!fastpath_timer_check(tsk)) | 1315 | if (!fastpath_timer_check(tsk)) |
1319 | return; | 1316 | return; |
1320 | 1317 | ||
1321 | spin_lock(&tsk->sighand->siglock); | 1318 | if (!lock_task_sighand(tsk, &flags)) |
1319 | return; | ||
1322 | /* | 1320 | /* |
1323 | * Here we take off tsk->signal->cpu_timers[N] and | 1321 | * Here we take off tsk->signal->cpu_timers[N] and |
1324 | * tsk->cpu_timers[N] all the timers that are firing, and | 1322 | * tsk->cpu_timers[N] all the timers that are firing, and |
@@ -1340,7 +1338,7 @@ void run_posix_cpu_timers(struct task_struct *tsk) | |||
1340 | * that gets the timer lock before we do will give it up and | 1338 | * that gets the timer lock before we do will give it up and |
1341 | * spin until we've taken care of that timer below. | 1339 | * spin until we've taken care of that timer below. |
1342 | */ | 1340 | */ |
1343 | spin_unlock(&tsk->sighand->siglock); | 1341 | unlock_task_sighand(tsk, &flags); |
1344 | 1342 | ||
1345 | /* | 1343 | /* |
1346 | * Now that all the timers on our list have the firing flag, | 1344 | * Now that all the timers on our list have the firing flag, |