diff options
Diffstat (limited to 'kernel/posix-cpu-timers.c')
| -rw-r--r-- | kernel/posix-cpu-timers.c | 155 |
1 files changed, 85 insertions, 70 deletions
diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c index e33a21cb9407..5c9dc228747b 100644 --- a/kernel/posix-cpu-timers.c +++ b/kernel/posix-cpu-timers.c | |||
| @@ -8,17 +8,18 @@ | |||
| 8 | #include <linux/math64.h> | 8 | #include <linux/math64.h> |
| 9 | #include <asm/uaccess.h> | 9 | #include <asm/uaccess.h> |
| 10 | #include <linux/kernel_stat.h> | 10 | #include <linux/kernel_stat.h> |
| 11 | #include <trace/events/timer.h> | ||
| 11 | 12 | ||
| 12 | /* | 13 | /* |
| 13 | * Called after updating RLIMIT_CPU to set timer expiration if necessary. | 14 | * Called after updating RLIMIT_CPU to set timer expiration if necessary. |
| 14 | */ | 15 | */ |
| 15 | void update_rlimit_cpu(unsigned long rlim_new) | 16 | void update_rlimit_cpu(unsigned long rlim_new) |
| 16 | { | 17 | { |
| 17 | cputime_t cputime; | 18 | cputime_t cputime = secs_to_cputime(rlim_new); |
| 19 | struct signal_struct *const sig = current->signal; | ||
| 18 | 20 | ||
| 19 | cputime = secs_to_cputime(rlim_new); | 21 | if (cputime_eq(sig->it[CPUCLOCK_PROF].expires, cputime_zero) || |
| 20 | if (cputime_eq(current->signal->it_prof_expires, cputime_zero) || | 22 | cputime_gt(sig->it[CPUCLOCK_PROF].expires, cputime)) { |
| 21 | cputime_gt(current->signal->it_prof_expires, cputime)) { | ||
| 22 | spin_lock_irq(¤t->sighand->siglock); | 23 | spin_lock_irq(¤t->sighand->siglock); |
| 23 | set_process_cpu_timer(current, CPUCLOCK_PROF, &cputime, NULL); | 24 | set_process_cpu_timer(current, CPUCLOCK_PROF, &cputime, NULL); |
| 24 | spin_unlock_irq(¤t->sighand->siglock); | 25 | spin_unlock_irq(¤t->sighand->siglock); |
| @@ -542,6 +543,17 @@ static void clear_dead_task(struct k_itimer *timer, union cpu_time_count now) | |||
| 542 | now); | 543 | now); |
| 543 | } | 544 | } |
| 544 | 545 | ||
| 546 | static inline int expires_gt(cputime_t expires, cputime_t new_exp) | ||
| 547 | { | ||
| 548 | return cputime_eq(expires, cputime_zero) || | ||
| 549 | cputime_gt(expires, new_exp); | ||
| 550 | } | ||
| 551 | |||
| 552 | static inline int expires_le(cputime_t expires, cputime_t new_exp) | ||
| 553 | { | ||
| 554 | return !cputime_eq(expires, cputime_zero) && | ||
| 555 | cputime_le(expires, new_exp); | ||
| 556 | } | ||
| 545 | /* | 557 | /* |
| 546 | * Insert the timer on the appropriate list before any timers that | 558 | * Insert the timer on the appropriate list before any timers that |
| 547 | * expire later. This must be called with the tasklist_lock held | 559 | * expire later. This must be called with the tasklist_lock held |
| @@ -586,34 +598,32 @@ static void arm_timer(struct k_itimer *timer, union cpu_time_count now) | |||
| 586 | */ | 598 | */ |
| 587 | 599 | ||
| 588 | if (CPUCLOCK_PERTHREAD(timer->it_clock)) { | 600 | if (CPUCLOCK_PERTHREAD(timer->it_clock)) { |
| 601 | union cpu_time_count *exp = &nt->expires; | ||
| 602 | |||
| 589 | switch (CPUCLOCK_WHICH(timer->it_clock)) { | 603 | switch (CPUCLOCK_WHICH(timer->it_clock)) { |
| 590 | default: | 604 | default: |
| 591 | BUG(); | 605 | BUG(); |
| 592 | case CPUCLOCK_PROF: | 606 | case CPUCLOCK_PROF: |
| 593 | if (cputime_eq(p->cputime_expires.prof_exp, | 607 | if (expires_gt(p->cputime_expires.prof_exp, |
| 594 | cputime_zero) || | 608 | exp->cpu)) |
| 595 | cputime_gt(p->cputime_expires.prof_exp, | 609 | p->cputime_expires.prof_exp = exp->cpu; |
| 596 | nt->expires.cpu)) | ||
| 597 | p->cputime_expires.prof_exp = | ||
| 598 | nt->expires.cpu; | ||
| 599 | break; | 610 | break; |
| 600 | case CPUCLOCK_VIRT: | 611 | case CPUCLOCK_VIRT: |
| 601 | if (cputime_eq(p->cputime_expires.virt_exp, | 612 | if (expires_gt(p->cputime_expires.virt_exp, |
| 602 | cputime_zero) || | 613 | exp->cpu)) |
| 603 | cputime_gt(p->cputime_expires.virt_exp, | 614 | p->cputime_expires.virt_exp = exp->cpu; |
| 604 | nt->expires.cpu)) | ||
| 605 | p->cputime_expires.virt_exp = | ||
| 606 | nt->expires.cpu; | ||
| 607 | break; | 615 | break; |
| 608 | case CPUCLOCK_SCHED: | 616 | case CPUCLOCK_SCHED: |
| 609 | if (p->cputime_expires.sched_exp == 0 || | 617 | if (p->cputime_expires.sched_exp == 0 || |
| 610 | p->cputime_expires.sched_exp > | 618 | p->cputime_expires.sched_exp > exp->sched) |
| 611 | nt->expires.sched) | ||
| 612 | p->cputime_expires.sched_exp = | 619 | p->cputime_expires.sched_exp = |
| 613 | nt->expires.sched; | 620 | exp->sched; |
| 614 | break; | 621 | break; |
| 615 | } | 622 | } |
| 616 | } else { | 623 | } else { |
| 624 | struct signal_struct *const sig = p->signal; | ||
| 625 | union cpu_time_count *exp = &timer->it.cpu.expires; | ||
| 626 | |||
| 617 | /* | 627 | /* |
| 618 | * For a process timer, set the cached expiration time. | 628 | * For a process timer, set the cached expiration time. |
| 619 | */ | 629 | */ |
| @@ -621,30 +631,23 @@ static void arm_timer(struct k_itimer *timer, union cpu_time_count now) | |||
| 621 | default: | 631 | default: |
| 622 | BUG(); | 632 | BUG(); |
| 623 | case CPUCLOCK_VIRT: | 633 | case CPUCLOCK_VIRT: |
| 624 | if (!cputime_eq(p->signal->it_virt_expires, | 634 | if (expires_le(sig->it[CPUCLOCK_VIRT].expires, |
| 625 | cputime_zero) && | 635 | exp->cpu)) |
| 626 | cputime_lt(p->signal->it_virt_expires, | ||
| 627 | timer->it.cpu.expires.cpu)) | ||
| 628 | break; | 636 | break; |
| 629 | p->signal->cputime_expires.virt_exp = | 637 | sig->cputime_expires.virt_exp = exp->cpu; |
| 630 | timer->it.cpu.expires.cpu; | ||
| 631 | break; | 638 | break; |
| 632 | case CPUCLOCK_PROF: | 639 | case CPUCLOCK_PROF: |
| 633 | if (!cputime_eq(p->signal->it_prof_expires, | 640 | if (expires_le(sig->it[CPUCLOCK_PROF].expires, |
| 634 | cputime_zero) && | 641 | exp->cpu)) |
| 635 | cputime_lt(p->signal->it_prof_expires, | ||
| 636 | timer->it.cpu.expires.cpu)) | ||
| 637 | break; | 642 | break; |
| 638 | i = p->signal->rlim[RLIMIT_CPU].rlim_cur; | 643 | i = sig->rlim[RLIMIT_CPU].rlim_cur; |
| 639 | if (i != RLIM_INFINITY && | 644 | if (i != RLIM_INFINITY && |
| 640 | i <= cputime_to_secs(timer->it.cpu.expires.cpu)) | 645 | i <= cputime_to_secs(exp->cpu)) |
| 641 | break; | 646 | break; |
| 642 | p->signal->cputime_expires.prof_exp = | 647 | sig->cputime_expires.prof_exp = exp->cpu; |
| 643 | timer->it.cpu.expires.cpu; | ||
| 644 | break; | 648 | break; |
| 645 | case CPUCLOCK_SCHED: | 649 | case CPUCLOCK_SCHED: |
| 646 | p->signal->cputime_expires.sched_exp = | 650 | sig->cputime_expires.sched_exp = exp->sched; |
| 647 | timer->it.cpu.expires.sched; | ||
| 648 | break; | 651 | break; |
| 649 | } | 652 | } |
| 650 | } | 653 | } |
| @@ -1071,6 +1074,40 @@ static void stop_process_timers(struct task_struct *tsk) | |||
| 1071 | spin_unlock_irqrestore(&cputimer->lock, flags); | 1074 | spin_unlock_irqrestore(&cputimer->lock, flags); |
| 1072 | } | 1075 | } |
| 1073 | 1076 | ||
| 1077 | static u32 onecputick; | ||
| 1078 | |||
| 1079 | static void check_cpu_itimer(struct task_struct *tsk, struct cpu_itimer *it, | ||
| 1080 | cputime_t *expires, cputime_t cur_time, int signo) | ||
| 1081 | { | ||
| 1082 | if (cputime_eq(it->expires, cputime_zero)) | ||
| 1083 | return; | ||
| 1084 | |||
| 1085 | if (cputime_ge(cur_time, it->expires)) { | ||
| 1086 | if (!cputime_eq(it->incr, cputime_zero)) { | ||
| 1087 | it->expires = cputime_add(it->expires, it->incr); | ||
| 1088 | it->error += it->incr_error; | ||
| 1089 | if (it->error >= onecputick) { | ||
| 1090 | it->expires = cputime_sub(it->expires, | ||
| 1091 | cputime_one_jiffy); | ||
| 1092 | it->error -= onecputick; | ||
| 1093 | } | ||
| 1094 | } else { | ||
| 1095 | it->expires = cputime_zero; | ||
| 1096 | } | ||
| 1097 | |||
| 1098 | trace_itimer_expire(signo == SIGPROF ? | ||
| 1099 | ITIMER_PROF : ITIMER_VIRTUAL, | ||
| 1100 | tsk->signal->leader_pid, cur_time); | ||
| 1101 | __group_send_sig_info(signo, SEND_SIG_PRIV, tsk); | ||
| 1102 | } | ||
| 1103 | |||
| 1104 | if (!cputime_eq(it->expires, cputime_zero) && | ||
| 1105 | (cputime_eq(*expires, cputime_zero) || | ||
| 1106 | cputime_lt(it->expires, *expires))) { | ||
| 1107 | *expires = it->expires; | ||
| 1108 | } | ||
| 1109 | } | ||
| 1110 | |||
| 1074 | /* | 1111 | /* |
| 1075 | * Check for any per-thread CPU timers that have fired and move them | 1112 | * Check for any per-thread CPU timers that have fired and move them |
| 1076 | * off the tsk->*_timers list onto the firing list. Per-thread timers | 1113 | * off the tsk->*_timers list onto the firing list. Per-thread timers |
| @@ -1090,10 +1127,10 @@ static void check_process_timers(struct task_struct *tsk, | |||
| 1090 | * Don't sample the current process CPU clocks if there are no timers. | 1127 | * Don't sample the current process CPU clocks if there are no timers. |
| 1091 | */ | 1128 | */ |
| 1092 | if (list_empty(&timers[CPUCLOCK_PROF]) && | 1129 | if (list_empty(&timers[CPUCLOCK_PROF]) && |
| 1093 | cputime_eq(sig->it_prof_expires, cputime_zero) && | 1130 | cputime_eq(sig->it[CPUCLOCK_PROF].expires, cputime_zero) && |
| 1094 | sig->rlim[RLIMIT_CPU].rlim_cur == RLIM_INFINITY && | 1131 | sig->rlim[RLIMIT_CPU].rlim_cur == RLIM_INFINITY && |
| 1095 | list_empty(&timers[CPUCLOCK_VIRT]) && | 1132 | list_empty(&timers[CPUCLOCK_VIRT]) && |
| 1096 | cputime_eq(sig->it_virt_expires, cputime_zero) && | 1133 | cputime_eq(sig->it[CPUCLOCK_VIRT].expires, cputime_zero) && |
| 1097 | list_empty(&timers[CPUCLOCK_SCHED])) { | 1134 | list_empty(&timers[CPUCLOCK_SCHED])) { |
| 1098 | stop_process_timers(tsk); | 1135 | stop_process_timers(tsk); |
| 1099 | return; | 1136 | return; |
| @@ -1153,38 +1190,11 @@ static void check_process_timers(struct task_struct *tsk, | |||
| 1153 | /* | 1190 | /* |
| 1154 | * Check for the special case process timers. | 1191 | * Check for the special case process timers. |
| 1155 | */ | 1192 | */ |
| 1156 | if (!cputime_eq(sig->it_prof_expires, cputime_zero)) { | 1193 | check_cpu_itimer(tsk, &sig->it[CPUCLOCK_PROF], &prof_expires, ptime, |
| 1157 | if (cputime_ge(ptime, sig->it_prof_expires)) { | 1194 | SIGPROF); |
| 1158 | /* ITIMER_PROF fires and reloads. */ | 1195 | check_cpu_itimer(tsk, &sig->it[CPUCLOCK_VIRT], &virt_expires, utime, |
| 1159 | sig->it_prof_expires = sig->it_prof_incr; | 1196 | SIGVTALRM); |
| 1160 | if (!cputime_eq(sig->it_prof_expires, cputime_zero)) { | 1197 | |
| 1161 | sig->it_prof_expires = cputime_add( | ||
| 1162 | sig->it_prof_expires, ptime); | ||
| 1163 | } | ||
| 1164 | __group_send_sig_info(SIGPROF, SEND_SIG_PRIV, tsk); | ||
| 1165 | } | ||
| 1166 | if (!cputime_eq(sig->it_prof_expires, cputime_zero) && | ||
| 1167 | (cputime_eq(prof_expires, cputime_zero) || | ||
| 1168 | cputime_lt(sig->it_prof_expires, prof_expires))) { | ||
| 1169 | prof_expires = sig->it_prof_expires; | ||
| 1170 | } | ||
| 1171 | } | ||
| 1172 | if (!cputime_eq(sig->it_virt_expires, cputime_zero)) { | ||
| 1173 | if (cputime_ge(utime, sig->it_virt_expires)) { | ||
| 1174 | /* ITIMER_VIRTUAL fires and reloads. */ | ||
| 1175 | sig->it_virt_expires = sig->it_virt_incr; | ||
| 1176 | if (!cputime_eq(sig->it_virt_expires, cputime_zero)) { | ||
| 1177 | sig->it_virt_expires = cputime_add( | ||
| 1178 | sig->it_virt_expires, utime); | ||
| 1179 | } | ||
| 1180 | __group_send_sig_info(SIGVTALRM, SEND_SIG_PRIV, tsk); | ||
| 1181 | } | ||
| 1182 | if (!cputime_eq(sig->it_virt_expires, cputime_zero) && | ||
| 1183 | (cputime_eq(virt_expires, cputime_zero) || | ||
| 1184 | cputime_lt(sig->it_virt_expires, virt_expires))) { | ||
| 1185 | virt_expires = sig->it_virt_expires; | ||
| 1186 | } | ||
| 1187 | } | ||
| 1188 | if (sig->rlim[RLIMIT_CPU].rlim_cur != RLIM_INFINITY) { | 1198 | if (sig->rlim[RLIMIT_CPU].rlim_cur != RLIM_INFINITY) { |
| 1189 | unsigned long psecs = cputime_to_secs(ptime); | 1199 | unsigned long psecs = cputime_to_secs(ptime); |
| 1190 | cputime_t x; | 1200 | cputime_t x; |
| @@ -1457,7 +1467,7 @@ void set_process_cpu_timer(struct task_struct *tsk, unsigned int clock_idx, | |||
| 1457 | if (!cputime_eq(*oldval, cputime_zero)) { | 1467 | if (!cputime_eq(*oldval, cputime_zero)) { |
| 1458 | if (cputime_le(*oldval, now.cpu)) { | 1468 | if (cputime_le(*oldval, now.cpu)) { |
| 1459 | /* Just about to fire. */ | 1469 | /* Just about to fire. */ |
| 1460 | *oldval = jiffies_to_cputime(1); | 1470 | *oldval = cputime_one_jiffy; |
| 1461 | } else { | 1471 | } else { |
| 1462 | *oldval = cputime_sub(*oldval, now.cpu); | 1472 | *oldval = cputime_sub(*oldval, now.cpu); |
| 1463 | } | 1473 | } |
| @@ -1703,10 +1713,15 @@ static __init int init_posix_cpu_timers(void) | |||
| 1703 | .nsleep = thread_cpu_nsleep, | 1713 | .nsleep = thread_cpu_nsleep, |
| 1704 | .nsleep_restart = thread_cpu_nsleep_restart, | 1714 | .nsleep_restart = thread_cpu_nsleep_restart, |
| 1705 | }; | 1715 | }; |
| 1716 | struct timespec ts; | ||
| 1706 | 1717 | ||
| 1707 | register_posix_clock(CLOCK_PROCESS_CPUTIME_ID, &process); | 1718 | register_posix_clock(CLOCK_PROCESS_CPUTIME_ID, &process); |
| 1708 | register_posix_clock(CLOCK_THREAD_CPUTIME_ID, &thread); | 1719 | register_posix_clock(CLOCK_THREAD_CPUTIME_ID, &thread); |
| 1709 | 1720 | ||
| 1721 | cputime_to_timespec(cputime_one_jiffy, &ts); | ||
| 1722 | onecputick = ts.tv_nsec; | ||
| 1723 | WARN_ON(ts.tv_sec != 0); | ||
| 1724 | |||
| 1710 | return 0; | 1725 | return 0; |
| 1711 | } | 1726 | } |
| 1712 | __initcall(init_posix_cpu_timers); | 1727 | __initcall(init_posix_cpu_timers); |
