aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sys.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/sys.c')
-rw-r--r--kernel/sys.c13
1 files changed, 1 insertions, 12 deletions
diff --git a/kernel/sys.c b/kernel/sys.c
index 84371fdc660b..7ef7f6054c28 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -1206,7 +1206,7 @@ asmlinkage long sys_times(struct tms __user * tbuf)
1206 struct task_struct *t; 1206 struct task_struct *t;
1207 cputime_t utime, stime, cutime, cstime; 1207 cputime_t utime, stime, cutime, cstime;
1208 1208
1209 read_lock(&tasklist_lock); 1209 spin_lock_irq(&tsk->sighand->siglock);
1210 utime = tsk->signal->utime; 1210 utime = tsk->signal->utime;
1211 stime = tsk->signal->stime; 1211 stime = tsk->signal->stime;
1212 t = tsk; 1212 t = tsk;
@@ -1216,20 +1216,9 @@ asmlinkage long sys_times(struct tms __user * tbuf)
1216 t = next_thread(t); 1216 t = next_thread(t);
1217 } while (t != tsk); 1217 } while (t != tsk);
1218 1218
1219 /*
1220 * While we have tasklist_lock read-locked, no dying thread
1221 * can be updating current->signal->[us]time. Instead,
1222 * we got their counts included in the live thread loop.
1223 * However, another thread can come in right now and
1224 * do a wait call that updates current->signal->c[us]time.
1225 * To make sure we always see that pair updated atomically,
1226 * we take the siglock around fetching them.
1227 */
1228 spin_lock_irq(&tsk->sighand->siglock);
1229 cutime = tsk->signal->cutime; 1219 cutime = tsk->signal->cutime;
1230 cstime = tsk->signal->cstime; 1220 cstime = tsk->signal->cstime;
1231 spin_unlock_irq(&tsk->sighand->siglock); 1221 spin_unlock_irq(&tsk->sighand->siglock);
1232 read_unlock(&tasklist_lock);
1233 1222
1234 tmp.tms_utime = cputime_to_clock_t(utime); 1223 tmp.tms_utime = cputime_to_clock_t(utime);
1235 tmp.tms_stime = cputime_to_clock_t(stime); 1224 tmp.tms_stime = cputime_to_clock_t(stime);