diff options
Diffstat (limited to 'kernel/sys.c')
-rw-r--r-- | kernel/sys.c | 73 |
1 files changed, 14 insertions, 59 deletions
diff --git a/kernel/sys.c b/kernel/sys.c index c93d37f71aef..7ef7f6054c28 100644 --- a/kernel/sys.c +++ b/kernel/sys.c | |||
@@ -1202,69 +1202,24 @@ asmlinkage long sys_times(struct tms __user * tbuf) | |||
1202 | */ | 1202 | */ |
1203 | if (tbuf) { | 1203 | if (tbuf) { |
1204 | struct tms tmp; | 1204 | struct tms tmp; |
1205 | struct task_struct *tsk = current; | ||
1206 | struct task_struct *t; | ||
1205 | cputime_t utime, stime, cutime, cstime; | 1207 | cputime_t utime, stime, cutime, cstime; |
1206 | 1208 | ||
1207 | #ifdef CONFIG_SMP | 1209 | spin_lock_irq(&tsk->sighand->siglock); |
1208 | if (thread_group_empty(current)) { | 1210 | utime = tsk->signal->utime; |
1209 | /* | 1211 | stime = tsk->signal->stime; |
1210 | * Single thread case without the use of any locks. | 1212 | t = tsk; |
1211 | * | 1213 | do { |
1212 | * We may race with release_task if two threads are | 1214 | utime = cputime_add(utime, t->utime); |
1213 | * executing. However, release task first adds up the | 1215 | stime = cputime_add(stime, t->stime); |
1214 | * counters (__exit_signal) before removing the task | 1216 | t = next_thread(t); |
1215 | * from the process tasklist (__unhash_process). | 1217 | } while (t != tsk); |
1216 | * __exit_signal also acquires and releases the | ||
1217 | * siglock which results in the proper memory ordering | ||
1218 | * so that the list modifications are always visible | ||
1219 | * after the counters have been updated. | ||
1220 | * | ||
1221 | * If the counters have been updated by the second thread | ||
1222 | * but the thread has not yet been removed from the list | ||
1223 | * then the other branch will be executing which will | ||
1224 | * block on tasklist_lock until the exit handling of the | ||
1225 | * other task is finished. | ||
1226 | * | ||
1227 | * This also implies that the sighand->siglock cannot | ||
1228 | * be held by another processor. So we can also | ||
1229 | * skip acquiring that lock. | ||
1230 | */ | ||
1231 | utime = cputime_add(current->signal->utime, current->utime); | ||
1232 | stime = cputime_add(current->signal->utime, current->stime); | ||
1233 | cutime = current->signal->cutime; | ||
1234 | cstime = current->signal->cstime; | ||
1235 | } else | ||
1236 | #endif | ||
1237 | { | ||
1238 | 1218 | ||
1239 | /* Process with multiple threads */ | 1219 | cutime = tsk->signal->cutime; |
1240 | struct task_struct *tsk = current; | 1220 | cstime = tsk->signal->cstime; |
1241 | struct task_struct *t; | 1221 | spin_unlock_irq(&tsk->sighand->siglock); |
1242 | 1222 | ||
1243 | read_lock(&tasklist_lock); | ||
1244 | utime = tsk->signal->utime; | ||
1245 | stime = tsk->signal->stime; | ||
1246 | t = tsk; | ||
1247 | do { | ||
1248 | utime = cputime_add(utime, t->utime); | ||
1249 | stime = cputime_add(stime, t->stime); | ||
1250 | t = next_thread(t); | ||
1251 | } while (t != tsk); | ||
1252 | |||
1253 | /* | ||
1254 | * While we have tasklist_lock read-locked, no dying thread | ||
1255 | * can be updating current->signal->[us]time. Instead, | ||
1256 | * we got their counts included in the live thread loop. | ||
1257 | * However, another thread can come in right now and | ||
1258 | * do a wait call that updates current->signal->c[us]time. | ||
1259 | * To make sure we always see that pair updated atomically, | ||
1260 | * we take the siglock around fetching them. | ||
1261 | */ | ||
1262 | spin_lock_irq(&tsk->sighand->siglock); | ||
1263 | cutime = tsk->signal->cutime; | ||
1264 | cstime = tsk->signal->cstime; | ||
1265 | spin_unlock_irq(&tsk->sighand->siglock); | ||
1266 | read_unlock(&tasklist_lock); | ||
1267 | } | ||
1268 | tmp.tms_utime = cputime_to_clock_t(utime); | 1223 | tmp.tms_utime = cputime_to_clock_t(utime); |
1269 | tmp.tms_stime = cputime_to_clock_t(stime); | 1224 | tmp.tms_stime = cputime_to_clock_t(stime); |
1270 | tmp.tms_cutime = cputime_to_clock_t(cutime); | 1225 | tmp.tms_cutime = cputime_to_clock_t(cutime); |