diff options
Diffstat (limited to 'kernel/posix-cpu-timers.c')
-rw-r--r-- | kernel/posix-cpu-timers.c | 136 |
1 files changed, 78 insertions, 58 deletions
diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c index db107c9bbc05..e976e505648d 100644 --- a/kernel/posix-cpu-timers.c +++ b/kernel/posix-cpu-timers.c | |||
@@ -261,6 +261,40 @@ out: | |||
261 | rcu_read_unlock(); | 261 | rcu_read_unlock(); |
262 | } | 262 | } |
263 | 263 | ||
264 | static void update_gt_cputime(struct task_cputime *a, struct task_cputime *b) | ||
265 | { | ||
266 | if (cputime_gt(b->utime, a->utime)) | ||
267 | a->utime = b->utime; | ||
268 | |||
269 | if (cputime_gt(b->stime, a->stime)) | ||
270 | a->stime = b->stime; | ||
271 | |||
272 | if (b->sum_exec_runtime > a->sum_exec_runtime) | ||
273 | a->sum_exec_runtime = b->sum_exec_runtime; | ||
274 | } | ||
275 | |||
276 | void thread_group_cputimer(struct task_struct *tsk, struct task_cputime *times) | ||
277 | { | ||
278 | struct thread_group_cputimer *cputimer = &tsk->signal->cputimer; | ||
279 | struct task_cputime sum; | ||
280 | unsigned long flags; | ||
281 | |||
282 | spin_lock_irqsave(&cputimer->lock, flags); | ||
283 | if (!cputimer->running) { | ||
284 | cputimer->running = 1; | ||
285 | /* | ||
286 | * The POSIX timer interface allows for absolute time expiry | ||
287 | * values through the TIMER_ABSTIME flag, therefore we have | ||
288 | * to synchronize the timer to the clock every time we start | ||
289 | * it. | ||
290 | */ | ||
291 | thread_group_cputime(tsk, &sum); | ||
292 | update_gt_cputime(&cputimer->cputime, &sum); | ||
293 | } | ||
294 | *times = cputimer->cputime; | ||
295 | spin_unlock_irqrestore(&cputimer->lock, flags); | ||
296 | } | ||
297 | |||
264 | /* | 298 | /* |
265 | * Sample a process (thread group) clock for the given group_leader task. | 299 | * Sample a process (thread group) clock for the given group_leader task. |
266 | * Must be called with tasklist_lock held for reading. | 300 | * Must be called with tasklist_lock held for reading. |
@@ -488,7 +522,7 @@ void posix_cpu_timers_exit_group(struct task_struct *tsk) | |||
488 | { | 522 | { |
489 | struct task_cputime cputime; | 523 | struct task_cputime cputime; |
490 | 524 | ||
491 | thread_group_cputime(tsk, &cputime); | 525 | thread_group_cputimer(tsk, &cputime); |
492 | cleanup_timers(tsk->signal->cpu_timers, | 526 | cleanup_timers(tsk->signal->cpu_timers, |
493 | cputime.utime, cputime.stime, cputime.sum_exec_runtime); | 527 | cputime.utime, cputime.stime, cputime.sum_exec_runtime); |
494 | } | 528 | } |
@@ -507,29 +541,6 @@ static void clear_dead_task(struct k_itimer *timer, union cpu_time_count now) | |||
507 | } | 541 | } |
508 | 542 | ||
509 | /* | 543 | /* |
510 | * Enable the process wide cpu timer accounting. | ||
511 | * | ||
512 | * serialized using ->sighand->siglock | ||
513 | */ | ||
514 | static void start_process_timers(struct task_struct *tsk) | ||
515 | { | ||
516 | tsk->signal->cputimer.running = 1; | ||
517 | barrier(); | ||
518 | } | ||
519 | |||
520 | /* | ||
521 | * Release the process wide timer accounting -- timer stops ticking when | ||
522 | * nobody cares about it. | ||
523 | * | ||
524 | * serialized using ->sighand->siglock | ||
525 | */ | ||
526 | static void stop_process_timers(struct task_struct *tsk) | ||
527 | { | ||
528 | tsk->signal->cputimer.running = 0; | ||
529 | barrier(); | ||
530 | } | ||
531 | |||
532 | /* | ||
533 | * Insert the timer on the appropriate list before any timers that | 544 | * Insert the timer on the appropriate list before any timers that |
534 | * expire later. This must be called with the tasklist_lock held | 545 | * expire later. This must be called with the tasklist_lock held |
535 | * for reading, and interrupts disabled. | 546 | * for reading, and interrupts disabled. |
@@ -549,9 +560,6 @@ static void arm_timer(struct k_itimer *timer, union cpu_time_count now) | |||
549 | BUG_ON(!irqs_disabled()); | 560 | BUG_ON(!irqs_disabled()); |
550 | spin_lock(&p->sighand->siglock); | 561 | spin_lock(&p->sighand->siglock); |
551 | 562 | ||
552 | if (!CPUCLOCK_PERTHREAD(timer->it_clock)) | ||
553 | start_process_timers(p); | ||
554 | |||
555 | listpos = head; | 563 | listpos = head; |
556 | if (CPUCLOCK_WHICH(timer->it_clock) == CPUCLOCK_SCHED) { | 564 | if (CPUCLOCK_WHICH(timer->it_clock) == CPUCLOCK_SCHED) { |
557 | list_for_each_entry(next, head, entry) { | 565 | list_for_each_entry(next, head, entry) { |
@@ -673,6 +681,33 @@ static void cpu_timer_fire(struct k_itimer *timer) | |||
673 | } | 681 | } |
674 | 682 | ||
675 | /* | 683 | /* |
684 | * Sample a process (thread group) timer for the given group_leader task. | ||
685 | * Must be called with tasklist_lock held for reading. | ||
686 | */ | ||
687 | static int cpu_timer_sample_group(const clockid_t which_clock, | ||
688 | struct task_struct *p, | ||
689 | union cpu_time_count *cpu) | ||
690 | { | ||
691 | struct task_cputime cputime; | ||
692 | |||
693 | thread_group_cputimer(p, &cputime); | ||
694 | switch (CPUCLOCK_WHICH(which_clock)) { | ||
695 | default: | ||
696 | return -EINVAL; | ||
697 | case CPUCLOCK_PROF: | ||
698 | cpu->cpu = cputime_add(cputime.utime, cputime.stime); | ||
699 | break; | ||
700 | case CPUCLOCK_VIRT: | ||
701 | cpu->cpu = cputime.utime; | ||
702 | break; | ||
703 | case CPUCLOCK_SCHED: | ||
704 | cpu->sched = cputime.sum_exec_runtime + task_delta_exec(p); | ||
705 | break; | ||
706 | } | ||
707 | return 0; | ||
708 | } | ||
709 | |||
710 | /* | ||
676 | * Guts of sys_timer_settime for CPU timers. | 711 | * Guts of sys_timer_settime for CPU timers. |
677 | * This is called with the timer locked and interrupts disabled. | 712 | * This is called with the timer locked and interrupts disabled. |
678 | * If we return TIMER_RETRY, it's necessary to release the timer's lock | 713 | * If we return TIMER_RETRY, it's necessary to release the timer's lock |
@@ -733,7 +768,7 @@ int posix_cpu_timer_set(struct k_itimer *timer, int flags, | |||
733 | if (CPUCLOCK_PERTHREAD(timer->it_clock)) { | 768 | if (CPUCLOCK_PERTHREAD(timer->it_clock)) { |
734 | cpu_clock_sample(timer->it_clock, p, &val); | 769 | cpu_clock_sample(timer->it_clock, p, &val); |
735 | } else { | 770 | } else { |
736 | cpu_clock_sample_group(timer->it_clock, p, &val); | 771 | cpu_timer_sample_group(timer->it_clock, p, &val); |
737 | } | 772 | } |
738 | 773 | ||
739 | if (old) { | 774 | if (old) { |
@@ -881,7 +916,7 @@ void posix_cpu_timer_get(struct k_itimer *timer, struct itimerspec *itp) | |||
881 | read_unlock(&tasklist_lock); | 916 | read_unlock(&tasklist_lock); |
882 | goto dead; | 917 | goto dead; |
883 | } else { | 918 | } else { |
884 | cpu_clock_sample_group(timer->it_clock, p, &now); | 919 | cpu_timer_sample_group(timer->it_clock, p, &now); |
885 | clear_dead = (unlikely(p->exit_state) && | 920 | clear_dead = (unlikely(p->exit_state) && |
886 | thread_group_empty(p)); | 921 | thread_group_empty(p)); |
887 | } | 922 | } |
@@ -1021,6 +1056,19 @@ static void check_thread_timers(struct task_struct *tsk, | |||
1021 | } | 1056 | } |
1022 | } | 1057 | } |
1023 | 1058 | ||
1059 | static void stop_process_timers(struct task_struct *tsk) | ||
1060 | { | ||
1061 | struct thread_group_cputimer *cputimer = &tsk->signal->cputimer; | ||
1062 | unsigned long flags; | ||
1063 | |||
1064 | if (!cputimer->running) | ||
1065 | return; | ||
1066 | |||
1067 | spin_lock_irqsave(&cputimer->lock, flags); | ||
1068 | cputimer->running = 0; | ||
1069 | spin_unlock_irqrestore(&cputimer->lock, flags); | ||
1070 | } | ||
1071 | |||
1024 | /* | 1072 | /* |
1025 | * Check for any per-thread CPU timers that have fired and move them | 1073 | * Check for any per-thread CPU timers that have fired and move them |
1026 | * off the tsk->*_timers list onto the firing list. Per-thread timers | 1074 | * off the tsk->*_timers list onto the firing list. Per-thread timers |
@@ -1223,7 +1271,7 @@ void posix_cpu_timer_schedule(struct k_itimer *timer) | |||
1223 | clear_dead_task(timer, now); | 1271 | clear_dead_task(timer, now); |
1224 | goto out_unlock; | 1272 | goto out_unlock; |
1225 | } | 1273 | } |
1226 | cpu_clock_sample_group(timer->it_clock, p, &now); | 1274 | cpu_timer_sample_group(timer->it_clock, p, &now); |
1227 | bump_cpu_timer(timer, now); | 1275 | bump_cpu_timer(timer, now); |
1228 | /* Leave the tasklist_lock locked for the call below. */ | 1276 | /* Leave the tasklist_lock locked for the call below. */ |
1229 | } | 1277 | } |
@@ -1388,33 +1436,6 @@ void run_posix_cpu_timers(struct task_struct *tsk) | |||
1388 | } | 1436 | } |
1389 | 1437 | ||
1390 | /* | 1438 | /* |
1391 | * Sample a process (thread group) timer for the given group_leader task. | ||
1392 | * Must be called with tasklist_lock held for reading. | ||
1393 | */ | ||
1394 | static int cpu_timer_sample_group(const clockid_t which_clock, | ||
1395 | struct task_struct *p, | ||
1396 | union cpu_time_count *cpu) | ||
1397 | { | ||
1398 | struct task_cputime cputime; | ||
1399 | |||
1400 | thread_group_cputimer(p, &cputime); | ||
1401 | switch (CPUCLOCK_WHICH(which_clock)) { | ||
1402 | default: | ||
1403 | return -EINVAL; | ||
1404 | case CPUCLOCK_PROF: | ||
1405 | cpu->cpu = cputime_add(cputime.utime, cputime.stime); | ||
1406 | break; | ||
1407 | case CPUCLOCK_VIRT: | ||
1408 | cpu->cpu = cputime.utime; | ||
1409 | break; | ||
1410 | case CPUCLOCK_SCHED: | ||
1411 | cpu->sched = cputime.sum_exec_runtime + task_delta_exec(p); | ||
1412 | break; | ||
1413 | } | ||
1414 | return 0; | ||
1415 | } | ||
1416 | |||
1417 | /* | ||
1418 | * Set one of the process-wide special case CPU timers. | 1439 | * Set one of the process-wide special case CPU timers. |
1419 | * The tsk->sighand->siglock must be held by the caller. | 1440 | * The tsk->sighand->siglock must be held by the caller. |
1420 | * The *newval argument is relative and we update it to be absolute, *oldval | 1441 | * The *newval argument is relative and we update it to be absolute, *oldval |
@@ -1427,7 +1448,6 @@ void set_process_cpu_timer(struct task_struct *tsk, unsigned int clock_idx, | |||
1427 | struct list_head *head; | 1448 | struct list_head *head; |
1428 | 1449 | ||
1429 | BUG_ON(clock_idx == CPUCLOCK_SCHED); | 1450 | BUG_ON(clock_idx == CPUCLOCK_SCHED); |
1430 | start_process_timers(tsk); | ||
1431 | cpu_timer_sample_group(clock_idx, tsk, &now); | 1451 | cpu_timer_sample_group(clock_idx, tsk, &now); |
1432 | 1452 | ||
1433 | if (oldval) { | 1453 | if (oldval) { |