aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/posix-cpu-timers.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/posix-cpu-timers.c')
-rw-r--r--kernel/posix-cpu-timers.c48
1 files changed, 21 insertions, 27 deletions
diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c
index 00bb252f29a2..f66bdd33a6c6 100644
--- a/kernel/posix-cpu-timers.c
+++ b/kernel/posix-cpu-timers.c
@@ -232,31 +232,24 @@ static int cpu_clock_sample(const clockid_t which_clock, struct task_struct *p,
232 232
233void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times) 233void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times)
234{ 234{
235 struct sighand_struct *sighand; 235 struct signal_struct *sig = tsk->signal;
236 struct signal_struct *sig;
237 struct task_struct *t; 236 struct task_struct *t;
238 237
239 *times = INIT_CPUTIME; 238 times->utime = sig->utime;
239 times->stime = sig->stime;
240 times->sum_exec_runtime = sig->sum_sched_runtime;
240 241
241 rcu_read_lock(); 242 rcu_read_lock();
242 sighand = rcu_dereference(tsk->sighand); 243 /* make sure we can trust tsk->thread_group list */
243 if (!sighand) 244 if (!likely(pid_alive(tsk)))
244 goto out; 245 goto out;
245 246
246 sig = tsk->signal;
247
248 t = tsk; 247 t = tsk;
249 do { 248 do {
250 times->utime = cputime_add(times->utime, t->utime); 249 times->utime = cputime_add(times->utime, t->utime);
251 times->stime = cputime_add(times->stime, t->stime); 250 times->stime = cputime_add(times->stime, t->stime);
252 times->sum_exec_runtime += t->se.sum_exec_runtime; 251 times->sum_exec_runtime += t->se.sum_exec_runtime;
253 252 } while_each_thread(tsk, t);
254 t = next_thread(t);
255 } while (t != tsk);
256
257 times->utime = cputime_add(times->utime, sig->utime);
258 times->stime = cputime_add(times->stime, sig->stime);
259 times->sum_exec_runtime += sig->sum_sched_runtime;
260out: 253out:
261 rcu_read_unlock(); 254 rcu_read_unlock();
262} 255}
@@ -363,7 +356,7 @@ int posix_cpu_clock_get(const clockid_t which_clock, struct timespec *tp)
363 } 356 }
364 } else { 357 } else {
365 read_lock(&tasklist_lock); 358 read_lock(&tasklist_lock);
366 if (thread_group_leader(p) && p->signal) { 359 if (thread_group_leader(p) && p->sighand) {
367 error = 360 error =
368 cpu_clock_sample_group(which_clock, 361 cpu_clock_sample_group(which_clock,
369 p, &rtn); 362 p, &rtn);
@@ -439,7 +432,7 @@ int posix_cpu_timer_del(struct k_itimer *timer)
439 432
440 if (likely(p != NULL)) { 433 if (likely(p != NULL)) {
441 read_lock(&tasklist_lock); 434 read_lock(&tasklist_lock);
442 if (unlikely(p->signal == NULL)) { 435 if (unlikely(p->sighand == NULL)) {
443 /* 436 /*
444 * We raced with the reaping of the task. 437 * We raced with the reaping of the task.
445 * The deletion should have cleared us off the list. 438 * The deletion should have cleared us off the list.
@@ -691,10 +684,10 @@ int posix_cpu_timer_set(struct k_itimer *timer, int flags,
691 read_lock(&tasklist_lock); 684 read_lock(&tasklist_lock);
692 /* 685 /*
693 * We need the tasklist_lock to protect against reaping that 686 * We need the tasklist_lock to protect against reaping that
694 * clears p->signal. If p has just been reaped, we can no 687 * clears p->sighand. If p has just been reaped, we can no
695 * longer get any information about it at all. 688 * longer get any information about it at all.
696 */ 689 */
697 if (unlikely(p->signal == NULL)) { 690 if (unlikely(p->sighand == NULL)) {
698 read_unlock(&tasklist_lock); 691 read_unlock(&tasklist_lock);
699 put_task_struct(p); 692 put_task_struct(p);
700 timer->it.cpu.task = NULL; 693 timer->it.cpu.task = NULL;
@@ -863,7 +856,7 @@ void posix_cpu_timer_get(struct k_itimer *timer, struct itimerspec *itp)
863 clear_dead = p->exit_state; 856 clear_dead = p->exit_state;
864 } else { 857 } else {
865 read_lock(&tasklist_lock); 858 read_lock(&tasklist_lock);
866 if (unlikely(p->signal == NULL)) { 859 if (unlikely(p->sighand == NULL)) {
867 /* 860 /*
868 * The process has been reaped. 861 * The process has been reaped.
869 * We can't even collect a sample any more. 862 * We can't even collect a sample any more.
@@ -1199,7 +1192,7 @@ void posix_cpu_timer_schedule(struct k_itimer *timer)
1199 spin_lock(&p->sighand->siglock); 1192 spin_lock(&p->sighand->siglock);
1200 } else { 1193 } else {
1201 read_lock(&tasklist_lock); 1194 read_lock(&tasklist_lock);
1202 if (unlikely(p->signal == NULL)) { 1195 if (unlikely(p->sighand == NULL)) {
1203 /* 1196 /*
1204 * The process has been reaped. 1197 * The process has been reaped.
1205 * We can't even collect a sample any more. 1198 * We can't even collect a sample any more.
@@ -1279,10 +1272,6 @@ static inline int fastpath_timer_check(struct task_struct *tsk)
1279{ 1272{
1280 struct signal_struct *sig; 1273 struct signal_struct *sig;
1281 1274
1282 /* tsk == current, ensure it is safe to use ->signal/sighand */
1283 if (unlikely(tsk->exit_state))
1284 return 0;
1285
1286 if (!task_cputime_zero(&tsk->cputime_expires)) { 1275 if (!task_cputime_zero(&tsk->cputime_expires)) {
1287 struct task_cputime task_sample = { 1276 struct task_cputime task_sample = {
1288 .utime = tsk->utime, 1277 .utime = tsk->utime,
@@ -1298,7 +1287,10 @@ static inline int fastpath_timer_check(struct task_struct *tsk)
1298 if (sig->cputimer.running) { 1287 if (sig->cputimer.running) {
1299 struct task_cputime group_sample; 1288 struct task_cputime group_sample;
1300 1289
1301 thread_group_cputimer(tsk, &group_sample); 1290 spin_lock(&sig->cputimer.lock);
1291 group_sample = sig->cputimer.cputime;
1292 spin_unlock(&sig->cputimer.lock);
1293
1302 if (task_cputime_expired(&group_sample, &sig->cputime_expires)) 1294 if (task_cputime_expired(&group_sample, &sig->cputime_expires))
1303 return 1; 1295 return 1;
1304 } 1296 }
@@ -1315,6 +1307,7 @@ void run_posix_cpu_timers(struct task_struct *tsk)
1315{ 1307{
1316 LIST_HEAD(firing); 1308 LIST_HEAD(firing);
1317 struct k_itimer *timer, *next; 1309 struct k_itimer *timer, *next;
1310 unsigned long flags;
1318 1311
1319 BUG_ON(!irqs_disabled()); 1312 BUG_ON(!irqs_disabled());
1320 1313
@@ -1325,7 +1318,8 @@ void run_posix_cpu_timers(struct task_struct *tsk)
1325 if (!fastpath_timer_check(tsk)) 1318 if (!fastpath_timer_check(tsk))
1326 return; 1319 return;
1327 1320
1328 spin_lock(&tsk->sighand->siglock); 1321 if (!lock_task_sighand(tsk, &flags))
1322 return;
1329 /* 1323 /*
1330 * Here we take off tsk->signal->cpu_timers[N] and 1324 * Here we take off tsk->signal->cpu_timers[N] and
1331 * tsk->cpu_timers[N] all the timers that are firing, and 1325 * tsk->cpu_timers[N] all the timers that are firing, and
@@ -1347,7 +1341,7 @@ void run_posix_cpu_timers(struct task_struct *tsk)
1347 * that gets the timer lock before we do will give it up and 1341 * that gets the timer lock before we do will give it up and
1348 * spin until we've taken care of that timer below. 1342 * spin until we've taken care of that timer below.
1349 */ 1343 */
1350 spin_unlock(&tsk->sighand->siglock); 1344 unlock_task_sighand(tsk, &flags);
1351 1345
1352 /* 1346 /*
1353 * Now that all the timers on our list have the firing flag, 1347 * Now that all the timers on our list have the firing flag,