diff options
Diffstat (limited to 'kernel/posix-cpu-timers.c')
-rw-r--r-- | kernel/posix-cpu-timers.c | 162 |
1 files changed, 89 insertions, 73 deletions
diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c index bece7c0b67b2..5c9dc228747b 100644 --- a/kernel/posix-cpu-timers.c +++ b/kernel/posix-cpu-timers.c | |||
@@ -8,17 +8,18 @@ | |||
8 | #include <linux/math64.h> | 8 | #include <linux/math64.h> |
9 | #include <asm/uaccess.h> | 9 | #include <asm/uaccess.h> |
10 | #include <linux/kernel_stat.h> | 10 | #include <linux/kernel_stat.h> |
11 | #include <trace/events/timer.h> | ||
11 | 12 | ||
12 | /* | 13 | /* |
13 | * Called after updating RLIMIT_CPU to set timer expiration if necessary. | 14 | * Called after updating RLIMIT_CPU to set timer expiration if necessary. |
14 | */ | 15 | */ |
15 | void update_rlimit_cpu(unsigned long rlim_new) | 16 | void update_rlimit_cpu(unsigned long rlim_new) |
16 | { | 17 | { |
17 | cputime_t cputime; | 18 | cputime_t cputime = secs_to_cputime(rlim_new); |
19 | struct signal_struct *const sig = current->signal; | ||
18 | 20 | ||
19 | cputime = secs_to_cputime(rlim_new); | 21 | if (cputime_eq(sig->it[CPUCLOCK_PROF].expires, cputime_zero) || |
20 | if (cputime_eq(current->signal->it_prof_expires, cputime_zero) || | 22 | cputime_gt(sig->it[CPUCLOCK_PROF].expires, cputime)) { |
21 | cputime_gt(current->signal->it_prof_expires, cputime)) { | ||
22 | spin_lock_irq(¤t->sighand->siglock); | 23 | spin_lock_irq(¤t->sighand->siglock); |
23 | set_process_cpu_timer(current, CPUCLOCK_PROF, &cputime, NULL); | 24 | set_process_cpu_timer(current, CPUCLOCK_PROF, &cputime, NULL); |
24 | spin_unlock_irq(¤t->sighand->siglock); | 25 | spin_unlock_irq(¤t->sighand->siglock); |
@@ -521,11 +522,12 @@ void posix_cpu_timers_exit(struct task_struct *tsk) | |||
521 | } | 522 | } |
522 | void posix_cpu_timers_exit_group(struct task_struct *tsk) | 523 | void posix_cpu_timers_exit_group(struct task_struct *tsk) |
523 | { | 524 | { |
524 | struct task_cputime cputime; | 525 | struct signal_struct *const sig = tsk->signal; |
525 | 526 | ||
526 | thread_group_cputimer(tsk, &cputime); | ||
527 | cleanup_timers(tsk->signal->cpu_timers, | 527 | cleanup_timers(tsk->signal->cpu_timers, |
528 | cputime.utime, cputime.stime, cputime.sum_exec_runtime); | 528 | cputime_add(tsk->utime, sig->utime), |
529 | cputime_add(tsk->stime, sig->stime), | ||
530 | tsk->se.sum_exec_runtime + sig->sum_sched_runtime); | ||
529 | } | 531 | } |
530 | 532 | ||
531 | static void clear_dead_task(struct k_itimer *timer, union cpu_time_count now) | 533 | static void clear_dead_task(struct k_itimer *timer, union cpu_time_count now) |
@@ -541,6 +543,17 @@ static void clear_dead_task(struct k_itimer *timer, union cpu_time_count now) | |||
541 | now); | 543 | now); |
542 | } | 544 | } |
543 | 545 | ||
546 | static inline int expires_gt(cputime_t expires, cputime_t new_exp) | ||
547 | { | ||
548 | return cputime_eq(expires, cputime_zero) || | ||
549 | cputime_gt(expires, new_exp); | ||
550 | } | ||
551 | |||
552 | static inline int expires_le(cputime_t expires, cputime_t new_exp) | ||
553 | { | ||
554 | return !cputime_eq(expires, cputime_zero) && | ||
555 | cputime_le(expires, new_exp); | ||
556 | } | ||
544 | /* | 557 | /* |
545 | * Insert the timer on the appropriate list before any timers that | 558 | * Insert the timer on the appropriate list before any timers that |
546 | * expire later. This must be called with the tasklist_lock held | 559 | * expire later. This must be called with the tasklist_lock held |
@@ -585,34 +598,32 @@ static void arm_timer(struct k_itimer *timer, union cpu_time_count now) | |||
585 | */ | 598 | */ |
586 | 599 | ||
587 | if (CPUCLOCK_PERTHREAD(timer->it_clock)) { | 600 | if (CPUCLOCK_PERTHREAD(timer->it_clock)) { |
601 | union cpu_time_count *exp = &nt->expires; | ||
602 | |||
588 | switch (CPUCLOCK_WHICH(timer->it_clock)) { | 603 | switch (CPUCLOCK_WHICH(timer->it_clock)) { |
589 | default: | 604 | default: |
590 | BUG(); | 605 | BUG(); |
591 | case CPUCLOCK_PROF: | 606 | case CPUCLOCK_PROF: |
592 | if (cputime_eq(p->cputime_expires.prof_exp, | 607 | if (expires_gt(p->cputime_expires.prof_exp, |
593 | cputime_zero) || | 608 | exp->cpu)) |
594 | cputime_gt(p->cputime_expires.prof_exp, | 609 | p->cputime_expires.prof_exp = exp->cpu; |
595 | nt->expires.cpu)) | ||
596 | p->cputime_expires.prof_exp = | ||
597 | nt->expires.cpu; | ||
598 | break; | 610 | break; |
599 | case CPUCLOCK_VIRT: | 611 | case CPUCLOCK_VIRT: |
600 | if (cputime_eq(p->cputime_expires.virt_exp, | 612 | if (expires_gt(p->cputime_expires.virt_exp, |
601 | cputime_zero) || | 613 | exp->cpu)) |
602 | cputime_gt(p->cputime_expires.virt_exp, | 614 | p->cputime_expires.virt_exp = exp->cpu; |
603 | nt->expires.cpu)) | ||
604 | p->cputime_expires.virt_exp = | ||
605 | nt->expires.cpu; | ||
606 | break; | 615 | break; |
607 | case CPUCLOCK_SCHED: | 616 | case CPUCLOCK_SCHED: |
608 | if (p->cputime_expires.sched_exp == 0 || | 617 | if (p->cputime_expires.sched_exp == 0 || |
609 | p->cputime_expires.sched_exp > | 618 | p->cputime_expires.sched_exp > exp->sched) |
610 | nt->expires.sched) | ||
611 | p->cputime_expires.sched_exp = | 619 | p->cputime_expires.sched_exp = |
612 | nt->expires.sched; | 620 | exp->sched; |
613 | break; | 621 | break; |
614 | } | 622 | } |
615 | } else { | 623 | } else { |
624 | struct signal_struct *const sig = p->signal; | ||
625 | union cpu_time_count *exp = &timer->it.cpu.expires; | ||
626 | |||
616 | /* | 627 | /* |
617 | * For a process timer, set the cached expiration time. | 628 | * For a process timer, set the cached expiration time. |
618 | */ | 629 | */ |
@@ -620,30 +631,23 @@ static void arm_timer(struct k_itimer *timer, union cpu_time_count now) | |||
620 | default: | 631 | default: |
621 | BUG(); | 632 | BUG(); |
622 | case CPUCLOCK_VIRT: | 633 | case CPUCLOCK_VIRT: |
623 | if (!cputime_eq(p->signal->it_virt_expires, | 634 | if (expires_le(sig->it[CPUCLOCK_VIRT].expires, |
624 | cputime_zero) && | 635 | exp->cpu)) |
625 | cputime_lt(p->signal->it_virt_expires, | ||
626 | timer->it.cpu.expires.cpu)) | ||
627 | break; | 636 | break; |
628 | p->signal->cputime_expires.virt_exp = | 637 | sig->cputime_expires.virt_exp = exp->cpu; |
629 | timer->it.cpu.expires.cpu; | ||
630 | break; | 638 | break; |
631 | case CPUCLOCK_PROF: | 639 | case CPUCLOCK_PROF: |
632 | if (!cputime_eq(p->signal->it_prof_expires, | 640 | if (expires_le(sig->it[CPUCLOCK_PROF].expires, |
633 | cputime_zero) && | 641 | exp->cpu)) |
634 | cputime_lt(p->signal->it_prof_expires, | ||
635 | timer->it.cpu.expires.cpu)) | ||
636 | break; | 642 | break; |
637 | i = p->signal->rlim[RLIMIT_CPU].rlim_cur; | 643 | i = sig->rlim[RLIMIT_CPU].rlim_cur; |
638 | if (i != RLIM_INFINITY && | 644 | if (i != RLIM_INFINITY && |
639 | i <= cputime_to_secs(timer->it.cpu.expires.cpu)) | 645 | i <= cputime_to_secs(exp->cpu)) |
640 | break; | 646 | break; |
641 | p->signal->cputime_expires.prof_exp = | 647 | sig->cputime_expires.prof_exp = exp->cpu; |
642 | timer->it.cpu.expires.cpu; | ||
643 | break; | 648 | break; |
644 | case CPUCLOCK_SCHED: | 649 | case CPUCLOCK_SCHED: |
645 | p->signal->cputime_expires.sched_exp = | 650 | sig->cputime_expires.sched_exp = exp->sched; |
646 | timer->it.cpu.expires.sched; | ||
647 | break; | 651 | break; |
648 | } | 652 | } |
649 | } | 653 | } |
@@ -1070,6 +1074,40 @@ static void stop_process_timers(struct task_struct *tsk) | |||
1070 | spin_unlock_irqrestore(&cputimer->lock, flags); | 1074 | spin_unlock_irqrestore(&cputimer->lock, flags); |
1071 | } | 1075 | } |
1072 | 1076 | ||
1077 | static u32 onecputick; | ||
1078 | |||
1079 | static void check_cpu_itimer(struct task_struct *tsk, struct cpu_itimer *it, | ||
1080 | cputime_t *expires, cputime_t cur_time, int signo) | ||
1081 | { | ||
1082 | if (cputime_eq(it->expires, cputime_zero)) | ||
1083 | return; | ||
1084 | |||
1085 | if (cputime_ge(cur_time, it->expires)) { | ||
1086 | if (!cputime_eq(it->incr, cputime_zero)) { | ||
1087 | it->expires = cputime_add(it->expires, it->incr); | ||
1088 | it->error += it->incr_error; | ||
1089 | if (it->error >= onecputick) { | ||
1090 | it->expires = cputime_sub(it->expires, | ||
1091 | cputime_one_jiffy); | ||
1092 | it->error -= onecputick; | ||
1093 | } | ||
1094 | } else { | ||
1095 | it->expires = cputime_zero; | ||
1096 | } | ||
1097 | |||
1098 | trace_itimer_expire(signo == SIGPROF ? | ||
1099 | ITIMER_PROF : ITIMER_VIRTUAL, | ||
1100 | tsk->signal->leader_pid, cur_time); | ||
1101 | __group_send_sig_info(signo, SEND_SIG_PRIV, tsk); | ||
1102 | } | ||
1103 | |||
1104 | if (!cputime_eq(it->expires, cputime_zero) && | ||
1105 | (cputime_eq(*expires, cputime_zero) || | ||
1106 | cputime_lt(it->expires, *expires))) { | ||
1107 | *expires = it->expires; | ||
1108 | } | ||
1109 | } | ||
1110 | |||
1073 | /* | 1111 | /* |
1074 | * Check for any per-thread CPU timers that have fired and move them | 1112 | * Check for any per-thread CPU timers that have fired and move them |
1075 | * off the tsk->*_timers list onto the firing list. Per-thread timers | 1113 | * off the tsk->*_timers list onto the firing list. Per-thread timers |
@@ -1089,10 +1127,10 @@ static void check_process_timers(struct task_struct *tsk, | |||
1089 | * Don't sample the current process CPU clocks if there are no timers. | 1127 | * Don't sample the current process CPU clocks if there are no timers. |
1090 | */ | 1128 | */ |
1091 | if (list_empty(&timers[CPUCLOCK_PROF]) && | 1129 | if (list_empty(&timers[CPUCLOCK_PROF]) && |
1092 | cputime_eq(sig->it_prof_expires, cputime_zero) && | 1130 | cputime_eq(sig->it[CPUCLOCK_PROF].expires, cputime_zero) && |
1093 | sig->rlim[RLIMIT_CPU].rlim_cur == RLIM_INFINITY && | 1131 | sig->rlim[RLIMIT_CPU].rlim_cur == RLIM_INFINITY && |
1094 | list_empty(&timers[CPUCLOCK_VIRT]) && | 1132 | list_empty(&timers[CPUCLOCK_VIRT]) && |
1095 | cputime_eq(sig->it_virt_expires, cputime_zero) && | 1133 | cputime_eq(sig->it[CPUCLOCK_VIRT].expires, cputime_zero) && |
1096 | list_empty(&timers[CPUCLOCK_SCHED])) { | 1134 | list_empty(&timers[CPUCLOCK_SCHED])) { |
1097 | stop_process_timers(tsk); | 1135 | stop_process_timers(tsk); |
1098 | return; | 1136 | return; |
@@ -1152,38 +1190,11 @@ static void check_process_timers(struct task_struct *tsk, | |||
1152 | /* | 1190 | /* |
1153 | * Check for the special case process timers. | 1191 | * Check for the special case process timers. |
1154 | */ | 1192 | */ |
1155 | if (!cputime_eq(sig->it_prof_expires, cputime_zero)) { | 1193 | check_cpu_itimer(tsk, &sig->it[CPUCLOCK_PROF], &prof_expires, ptime, |
1156 | if (cputime_ge(ptime, sig->it_prof_expires)) { | 1194 | SIGPROF); |
1157 | /* ITIMER_PROF fires and reloads. */ | 1195 | check_cpu_itimer(tsk, &sig->it[CPUCLOCK_VIRT], &virt_expires, utime, |
1158 | sig->it_prof_expires = sig->it_prof_incr; | 1196 | SIGVTALRM); |
1159 | if (!cputime_eq(sig->it_prof_expires, cputime_zero)) { | 1197 | |
1160 | sig->it_prof_expires = cputime_add( | ||
1161 | sig->it_prof_expires, ptime); | ||
1162 | } | ||
1163 | __group_send_sig_info(SIGPROF, SEND_SIG_PRIV, tsk); | ||
1164 | } | ||
1165 | if (!cputime_eq(sig->it_prof_expires, cputime_zero) && | ||
1166 | (cputime_eq(prof_expires, cputime_zero) || | ||
1167 | cputime_lt(sig->it_prof_expires, prof_expires))) { | ||
1168 | prof_expires = sig->it_prof_expires; | ||
1169 | } | ||
1170 | } | ||
1171 | if (!cputime_eq(sig->it_virt_expires, cputime_zero)) { | ||
1172 | if (cputime_ge(utime, sig->it_virt_expires)) { | ||
1173 | /* ITIMER_VIRTUAL fires and reloads. */ | ||
1174 | sig->it_virt_expires = sig->it_virt_incr; | ||
1175 | if (!cputime_eq(sig->it_virt_expires, cputime_zero)) { | ||
1176 | sig->it_virt_expires = cputime_add( | ||
1177 | sig->it_virt_expires, utime); | ||
1178 | } | ||
1179 | __group_send_sig_info(SIGVTALRM, SEND_SIG_PRIV, tsk); | ||
1180 | } | ||
1181 | if (!cputime_eq(sig->it_virt_expires, cputime_zero) && | ||
1182 | (cputime_eq(virt_expires, cputime_zero) || | ||
1183 | cputime_lt(sig->it_virt_expires, virt_expires))) { | ||
1184 | virt_expires = sig->it_virt_expires; | ||
1185 | } | ||
1186 | } | ||
1187 | if (sig->rlim[RLIMIT_CPU].rlim_cur != RLIM_INFINITY) { | 1198 | if (sig->rlim[RLIMIT_CPU].rlim_cur != RLIM_INFINITY) { |
1188 | unsigned long psecs = cputime_to_secs(ptime); | 1199 | unsigned long psecs = cputime_to_secs(ptime); |
1189 | cputime_t x; | 1200 | cputime_t x; |
@@ -1456,7 +1467,7 @@ void set_process_cpu_timer(struct task_struct *tsk, unsigned int clock_idx, | |||
1456 | if (!cputime_eq(*oldval, cputime_zero)) { | 1467 | if (!cputime_eq(*oldval, cputime_zero)) { |
1457 | if (cputime_le(*oldval, now.cpu)) { | 1468 | if (cputime_le(*oldval, now.cpu)) { |
1458 | /* Just about to fire. */ | 1469 | /* Just about to fire. */ |
1459 | *oldval = jiffies_to_cputime(1); | 1470 | *oldval = cputime_one_jiffy; |
1460 | } else { | 1471 | } else { |
1461 | *oldval = cputime_sub(*oldval, now.cpu); | 1472 | *oldval = cputime_sub(*oldval, now.cpu); |
1462 | } | 1473 | } |
@@ -1702,10 +1713,15 @@ static __init int init_posix_cpu_timers(void) | |||
1702 | .nsleep = thread_cpu_nsleep, | 1713 | .nsleep = thread_cpu_nsleep, |
1703 | .nsleep_restart = thread_cpu_nsleep_restart, | 1714 | .nsleep_restart = thread_cpu_nsleep_restart, |
1704 | }; | 1715 | }; |
1716 | struct timespec ts; | ||
1705 | 1717 | ||
1706 | register_posix_clock(CLOCK_PROCESS_CPUTIME_ID, &process); | 1718 | register_posix_clock(CLOCK_PROCESS_CPUTIME_ID, &process); |
1707 | register_posix_clock(CLOCK_THREAD_CPUTIME_ID, &thread); | 1719 | register_posix_clock(CLOCK_THREAD_CPUTIME_ID, &thread); |
1708 | 1720 | ||
1721 | cputime_to_timespec(cputime_one_jiffy, &ts); | ||
1722 | onecputick = ts.tv_nsec; | ||
1723 | WARN_ON(ts.tv_sec != 0); | ||
1724 | |||
1709 | return 0; | 1725 | return 0; |
1710 | } | 1726 | } |
1711 | __initcall(init_posix_cpu_timers); | 1727 | __initcall(init_posix_cpu_timers); |