diff options
Diffstat (limited to 'kernel/posix-cpu-timers.c')
-rw-r--r-- | kernel/posix-cpu-timers.c | 126 |
1 files changed, 117 insertions, 9 deletions
diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c index 4318c3085788..6f7b869c011d 100644 --- a/kernel/posix-cpu-timers.c +++ b/kernel/posix-cpu-timers.c | |||
@@ -230,6 +230,71 @@ static int cpu_clock_sample(const clockid_t which_clock, struct task_struct *p, | |||
230 | return 0; | 230 | return 0; |
231 | } | 231 | } |
232 | 232 | ||
233 | void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times) | ||
234 | { | ||
235 | struct sighand_struct *sighand; | ||
236 | struct signal_struct *sig; | ||
237 | struct task_struct *t; | ||
238 | |||
239 | *times = INIT_CPUTIME; | ||
240 | |||
241 | rcu_read_lock(); | ||
242 | sighand = rcu_dereference(tsk->sighand); | ||
243 | if (!sighand) | ||
244 | goto out; | ||
245 | |||
246 | sig = tsk->signal; | ||
247 | |||
248 | t = tsk; | ||
249 | do { | ||
250 | times->utime = cputime_add(times->utime, t->utime); | ||
251 | times->stime = cputime_add(times->stime, t->stime); | ||
252 | times->sum_exec_runtime += t->se.sum_exec_runtime; | ||
253 | |||
254 | t = next_thread(t); | ||
255 | } while (t != tsk); | ||
256 | |||
257 | times->utime = cputime_add(times->utime, sig->utime); | ||
258 | times->stime = cputime_add(times->stime, sig->stime); | ||
259 | times->sum_exec_runtime += sig->sum_sched_runtime; | ||
260 | out: | ||
261 | rcu_read_unlock(); | ||
262 | } | ||
263 | |||
264 | static void update_gt_cputime(struct task_cputime *a, struct task_cputime *b) | ||
265 | { | ||
266 | if (cputime_gt(b->utime, a->utime)) | ||
267 | a->utime = b->utime; | ||
268 | |||
269 | if (cputime_gt(b->stime, a->stime)) | ||
270 | a->stime = b->stime; | ||
271 | |||
272 | if (b->sum_exec_runtime > a->sum_exec_runtime) | ||
273 | a->sum_exec_runtime = b->sum_exec_runtime; | ||
274 | } | ||
275 | |||
276 | void thread_group_cputimer(struct task_struct *tsk, struct task_cputime *times) | ||
277 | { | ||
278 | struct thread_group_cputimer *cputimer = &tsk->signal->cputimer; | ||
279 | struct task_cputime sum; | ||
280 | unsigned long flags; | ||
281 | |||
282 | spin_lock_irqsave(&cputimer->lock, flags); | ||
283 | if (!cputimer->running) { | ||
284 | cputimer->running = 1; | ||
285 | /* | ||
286 | * The POSIX timer interface allows for absolute time expiry | ||
287 | * values through the TIMER_ABSTIME flag, therefore we have | ||
288 | * to synchronize the timer to the clock every time we start | ||
289 | * it. | ||
290 | */ | ||
291 | thread_group_cputime(tsk, &sum); | ||
292 | update_gt_cputime(&cputimer->cputime, &sum); | ||
293 | } | ||
294 | *times = cputimer->cputime; | ||
295 | spin_unlock_irqrestore(&cputimer->lock, flags); | ||
296 | } | ||
297 | |||
233 | /* | 298 | /* |
234 | * Sample a process (thread group) clock for the given group_leader task. | 299 | * Sample a process (thread group) clock for the given group_leader task. |
235 | * Must be called with tasklist_lock held for reading. | 300 | * Must be called with tasklist_lock held for reading. |
@@ -458,7 +523,7 @@ void posix_cpu_timers_exit_group(struct task_struct *tsk) | |||
458 | { | 523 | { |
459 | struct task_cputime cputime; | 524 | struct task_cputime cputime; |
460 | 525 | ||
461 | thread_group_cputime(tsk, &cputime); | 526 | thread_group_cputimer(tsk, &cputime); |
462 | cleanup_timers(tsk->signal->cpu_timers, | 527 | cleanup_timers(tsk->signal->cpu_timers, |
463 | cputime.utime, cputime.stime, cputime.sum_exec_runtime); | 528 | cputime.utime, cputime.stime, cputime.sum_exec_runtime); |
464 | } | 529 | } |
@@ -617,6 +682,33 @@ static void cpu_timer_fire(struct k_itimer *timer) | |||
617 | } | 682 | } |
618 | 683 | ||
619 | /* | 684 | /* |
685 | * Sample a process (thread group) timer for the given group_leader task. | ||
686 | * Must be called with tasklist_lock held for reading. | ||
687 | */ | ||
688 | static int cpu_timer_sample_group(const clockid_t which_clock, | ||
689 | struct task_struct *p, | ||
690 | union cpu_time_count *cpu) | ||
691 | { | ||
692 | struct task_cputime cputime; | ||
693 | |||
694 | thread_group_cputimer(p, &cputime); | ||
695 | switch (CPUCLOCK_WHICH(which_clock)) { | ||
696 | default: | ||
697 | return -EINVAL; | ||
698 | case CPUCLOCK_PROF: | ||
699 | cpu->cpu = cputime_add(cputime.utime, cputime.stime); | ||
700 | break; | ||
701 | case CPUCLOCK_VIRT: | ||
702 | cpu->cpu = cputime.utime; | ||
703 | break; | ||
704 | case CPUCLOCK_SCHED: | ||
705 | cpu->sched = cputime.sum_exec_runtime + task_delta_exec(p); | ||
706 | break; | ||
707 | } | ||
708 | return 0; | ||
709 | } | ||
710 | |||
711 | /* | ||
620 | * Guts of sys_timer_settime for CPU timers. | 712 | * Guts of sys_timer_settime for CPU timers. |
621 | * This is called with the timer locked and interrupts disabled. | 713 | * This is called with the timer locked and interrupts disabled. |
622 | * If we return TIMER_RETRY, it's necessary to release the timer's lock | 714 | * If we return TIMER_RETRY, it's necessary to release the timer's lock |
@@ -677,7 +769,7 @@ int posix_cpu_timer_set(struct k_itimer *timer, int flags, | |||
677 | if (CPUCLOCK_PERTHREAD(timer->it_clock)) { | 769 | if (CPUCLOCK_PERTHREAD(timer->it_clock)) { |
678 | cpu_clock_sample(timer->it_clock, p, &val); | 770 | cpu_clock_sample(timer->it_clock, p, &val); |
679 | } else { | 771 | } else { |
680 | cpu_clock_sample_group(timer->it_clock, p, &val); | 772 | cpu_timer_sample_group(timer->it_clock, p, &val); |
681 | } | 773 | } |
682 | 774 | ||
683 | if (old) { | 775 | if (old) { |
@@ -825,7 +917,7 @@ void posix_cpu_timer_get(struct k_itimer *timer, struct itimerspec *itp) | |||
825 | read_unlock(&tasklist_lock); | 917 | read_unlock(&tasklist_lock); |
826 | goto dead; | 918 | goto dead; |
827 | } else { | 919 | } else { |
828 | cpu_clock_sample_group(timer->it_clock, p, &now); | 920 | cpu_timer_sample_group(timer->it_clock, p, &now); |
829 | clear_dead = (unlikely(p->exit_state) && | 921 | clear_dead = (unlikely(p->exit_state) && |
830 | thread_group_empty(p)); | 922 | thread_group_empty(p)); |
831 | } | 923 | } |
@@ -965,6 +1057,19 @@ static void check_thread_timers(struct task_struct *tsk, | |||
965 | } | 1057 | } |
966 | } | 1058 | } |
967 | 1059 | ||
1060 | static void stop_process_timers(struct task_struct *tsk) | ||
1061 | { | ||
1062 | struct thread_group_cputimer *cputimer = &tsk->signal->cputimer; | ||
1063 | unsigned long flags; | ||
1064 | |||
1065 | if (!cputimer->running) | ||
1066 | return; | ||
1067 | |||
1068 | spin_lock_irqsave(&cputimer->lock, flags); | ||
1069 | cputimer->running = 0; | ||
1070 | spin_unlock_irqrestore(&cputimer->lock, flags); | ||
1071 | } | ||
1072 | |||
968 | /* | 1073 | /* |
969 | * Check for any per-thread CPU timers that have fired and move them | 1074 | * Check for any per-thread CPU timers that have fired and move them |
970 | * off the tsk->*_timers list onto the firing list. Per-thread timers | 1075 | * off the tsk->*_timers list onto the firing list. Per-thread timers |
@@ -988,13 +1093,15 @@ static void check_process_timers(struct task_struct *tsk, | |||
988 | sig->rlim[RLIMIT_CPU].rlim_cur == RLIM_INFINITY && | 1093 | sig->rlim[RLIMIT_CPU].rlim_cur == RLIM_INFINITY && |
989 | list_empty(&timers[CPUCLOCK_VIRT]) && | 1094 | list_empty(&timers[CPUCLOCK_VIRT]) && |
990 | cputime_eq(sig->it_virt_expires, cputime_zero) && | 1095 | cputime_eq(sig->it_virt_expires, cputime_zero) && |
991 | list_empty(&timers[CPUCLOCK_SCHED])) | 1096 | list_empty(&timers[CPUCLOCK_SCHED])) { |
1097 | stop_process_timers(tsk); | ||
992 | return; | 1098 | return; |
1099 | } | ||
993 | 1100 | ||
994 | /* | 1101 | /* |
995 | * Collect the current process totals. | 1102 | * Collect the current process totals. |
996 | */ | 1103 | */ |
997 | thread_group_cputime(tsk, &cputime); | 1104 | thread_group_cputimer(tsk, &cputime); |
998 | utime = cputime.utime; | 1105 | utime = cputime.utime; |
999 | ptime = cputime_add(utime, cputime.stime); | 1106 | ptime = cputime_add(utime, cputime.stime); |
1000 | sum_sched_runtime = cputime.sum_exec_runtime; | 1107 | sum_sched_runtime = cputime.sum_exec_runtime; |
@@ -1165,7 +1272,7 @@ void posix_cpu_timer_schedule(struct k_itimer *timer) | |||
1165 | clear_dead_task(timer, now); | 1272 | clear_dead_task(timer, now); |
1166 | goto out_unlock; | 1273 | goto out_unlock; |
1167 | } | 1274 | } |
1168 | cpu_clock_sample_group(timer->it_clock, p, &now); | 1275 | cpu_timer_sample_group(timer->it_clock, p, &now); |
1169 | bump_cpu_timer(timer, now); | 1276 | bump_cpu_timer(timer, now); |
1170 | /* Leave the tasklist_lock locked for the call below. */ | 1277 | /* Leave the tasklist_lock locked for the call below. */ |
1171 | } | 1278 | } |
@@ -1260,11 +1367,12 @@ static inline int fastpath_timer_check(struct task_struct *tsk) | |||
1260 | if (!task_cputime_zero(&sig->cputime_expires)) { | 1367 | if (!task_cputime_zero(&sig->cputime_expires)) { |
1261 | struct task_cputime group_sample; | 1368 | struct task_cputime group_sample; |
1262 | 1369 | ||
1263 | thread_group_cputime(tsk, &group_sample); | 1370 | thread_group_cputimer(tsk, &group_sample); |
1264 | if (task_cputime_expired(&group_sample, &sig->cputime_expires)) | 1371 | if (task_cputime_expired(&group_sample, &sig->cputime_expires)) |
1265 | return 1; | 1372 | return 1; |
1266 | } | 1373 | } |
1267 | return 0; | 1374 | |
1375 | return sig->rlim[RLIMIT_CPU].rlim_cur != RLIM_INFINITY; | ||
1268 | } | 1376 | } |
1269 | 1377 | ||
1270 | /* | 1378 | /* |
@@ -1342,7 +1450,7 @@ void set_process_cpu_timer(struct task_struct *tsk, unsigned int clock_idx, | |||
1342 | struct list_head *head; | 1450 | struct list_head *head; |
1343 | 1451 | ||
1344 | BUG_ON(clock_idx == CPUCLOCK_SCHED); | 1452 | BUG_ON(clock_idx == CPUCLOCK_SCHED); |
1345 | cpu_clock_sample_group(clock_idx, tsk, &now); | 1453 | cpu_timer_sample_group(clock_idx, tsk, &now); |
1346 | 1454 | ||
1347 | if (oldval) { | 1455 | if (oldval) { |
1348 | if (!cputime_eq(*oldval, cputime_zero)) { | 1456 | if (!cputime_eq(*oldval, cputime_zero)) { |