aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/posix-cpu-timers.c
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2009-08-29 04:34:18 -0400
committerThomas Gleixner <tglx@linutronix.de>2009-08-29 04:34:29 -0400
commitf71bb0ac5e85410601b0db29d7b1635345ea61a4 (patch)
tree7c3ef70ef008db87d8b71e5de0632766ecd64d2f /kernel/posix-cpu-timers.c
parent7285dd7fd375763bfb8ab1ac9cf3f1206f503c16 (diff)
parenta42548a18866e87092db93b771e6c5b060d78401 (diff)
Merge branch 'timers/posixtimers' into timers/tracing
Merge reason: timer tracepoint patches depend on both branches Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'kernel/posix-cpu-timers.c')
-rw-r--r--kernel/posix-cpu-timers.c150
1 files changed, 80 insertions, 70 deletions
diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c
index e33a21cb9407..12161f74744e 100644
--- a/kernel/posix-cpu-timers.c
+++ b/kernel/posix-cpu-timers.c
@@ -14,11 +14,11 @@
14 */ 14 */
15void update_rlimit_cpu(unsigned long rlim_new) 15void update_rlimit_cpu(unsigned long rlim_new)
16{ 16{
17 cputime_t cputime; 17 cputime_t cputime = secs_to_cputime(rlim_new);
18 struct signal_struct *const sig = current->signal;
18 19
19 cputime = secs_to_cputime(rlim_new); 20 if (cputime_eq(sig->it[CPUCLOCK_PROF].expires, cputime_zero) ||
20 if (cputime_eq(current->signal->it_prof_expires, cputime_zero) || 21 cputime_gt(sig->it[CPUCLOCK_PROF].expires, cputime)) {
21 cputime_gt(current->signal->it_prof_expires, cputime)) {
22 spin_lock_irq(&current->sighand->siglock); 22 spin_lock_irq(&current->sighand->siglock);
23 set_process_cpu_timer(current, CPUCLOCK_PROF, &cputime, NULL); 23 set_process_cpu_timer(current, CPUCLOCK_PROF, &cputime, NULL);
24 spin_unlock_irq(&current->sighand->siglock); 24 spin_unlock_irq(&current->sighand->siglock);
@@ -542,6 +542,17 @@ static void clear_dead_task(struct k_itimer *timer, union cpu_time_count now)
542 now); 542 now);
543} 543}
544 544
545static inline int expires_gt(cputime_t expires, cputime_t new_exp)
546{
547 return cputime_eq(expires, cputime_zero) ||
548 cputime_gt(expires, new_exp);
549}
550
551static inline int expires_le(cputime_t expires, cputime_t new_exp)
552{
553 return !cputime_eq(expires, cputime_zero) &&
554 cputime_le(expires, new_exp);
555}
545/* 556/*
546 * Insert the timer on the appropriate list before any timers that 557 * Insert the timer on the appropriate list before any timers that
547 * expire later. This must be called with the tasklist_lock held 558 * expire later. This must be called with the tasklist_lock held
@@ -586,34 +597,32 @@ static void arm_timer(struct k_itimer *timer, union cpu_time_count now)
586 */ 597 */
587 598
588 if (CPUCLOCK_PERTHREAD(timer->it_clock)) { 599 if (CPUCLOCK_PERTHREAD(timer->it_clock)) {
600 union cpu_time_count *exp = &nt->expires;
601
589 switch (CPUCLOCK_WHICH(timer->it_clock)) { 602 switch (CPUCLOCK_WHICH(timer->it_clock)) {
590 default: 603 default:
591 BUG(); 604 BUG();
592 case CPUCLOCK_PROF: 605 case CPUCLOCK_PROF:
593 if (cputime_eq(p->cputime_expires.prof_exp, 606 if (expires_gt(p->cputime_expires.prof_exp,
594 cputime_zero) || 607 exp->cpu))
595 cputime_gt(p->cputime_expires.prof_exp, 608 p->cputime_expires.prof_exp = exp->cpu;
596 nt->expires.cpu))
597 p->cputime_expires.prof_exp =
598 nt->expires.cpu;
599 break; 609 break;
600 case CPUCLOCK_VIRT: 610 case CPUCLOCK_VIRT:
601 if (cputime_eq(p->cputime_expires.virt_exp, 611 if (expires_gt(p->cputime_expires.virt_exp,
602 cputime_zero) || 612 exp->cpu))
603 cputime_gt(p->cputime_expires.virt_exp, 613 p->cputime_expires.virt_exp = exp->cpu;
604 nt->expires.cpu))
605 p->cputime_expires.virt_exp =
606 nt->expires.cpu;
607 break; 614 break;
608 case CPUCLOCK_SCHED: 615 case CPUCLOCK_SCHED:
609 if (p->cputime_expires.sched_exp == 0 || 616 if (p->cputime_expires.sched_exp == 0 ||
610 p->cputime_expires.sched_exp > 617 p->cputime_expires.sched_exp > exp->sched)
611 nt->expires.sched)
612 p->cputime_expires.sched_exp = 618 p->cputime_expires.sched_exp =
613 nt->expires.sched; 619 exp->sched;
614 break; 620 break;
615 } 621 }
616 } else { 622 } else {
623 struct signal_struct *const sig = p->signal;
624 union cpu_time_count *exp = &timer->it.cpu.expires;
625
617 /* 626 /*
618 * For a process timer, set the cached expiration time. 627 * For a process timer, set the cached expiration time.
619 */ 628 */
@@ -621,30 +630,23 @@ static void arm_timer(struct k_itimer *timer, union cpu_time_count now)
621 default: 630 default:
622 BUG(); 631 BUG();
623 case CPUCLOCK_VIRT: 632 case CPUCLOCK_VIRT:
624 if (!cputime_eq(p->signal->it_virt_expires, 633 if (expires_le(sig->it[CPUCLOCK_VIRT].expires,
625 cputime_zero) && 634 exp->cpu))
626 cputime_lt(p->signal->it_virt_expires,
627 timer->it.cpu.expires.cpu))
628 break; 635 break;
629 p->signal->cputime_expires.virt_exp = 636 sig->cputime_expires.virt_exp = exp->cpu;
630 timer->it.cpu.expires.cpu;
631 break; 637 break;
632 case CPUCLOCK_PROF: 638 case CPUCLOCK_PROF:
633 if (!cputime_eq(p->signal->it_prof_expires, 639 if (expires_le(sig->it[CPUCLOCK_PROF].expires,
634 cputime_zero) && 640 exp->cpu))
635 cputime_lt(p->signal->it_prof_expires,
636 timer->it.cpu.expires.cpu))
637 break; 641 break;
638 i = p->signal->rlim[RLIMIT_CPU].rlim_cur; 642 i = sig->rlim[RLIMIT_CPU].rlim_cur;
639 if (i != RLIM_INFINITY && 643 if (i != RLIM_INFINITY &&
640 i <= cputime_to_secs(timer->it.cpu.expires.cpu)) 644 i <= cputime_to_secs(exp->cpu))
641 break; 645 break;
642 p->signal->cputime_expires.prof_exp = 646 sig->cputime_expires.prof_exp = exp->cpu;
643 timer->it.cpu.expires.cpu;
644 break; 647 break;
645 case CPUCLOCK_SCHED: 648 case CPUCLOCK_SCHED:
646 p->signal->cputime_expires.sched_exp = 649 sig->cputime_expires.sched_exp = exp->sched;
647 timer->it.cpu.expires.sched;
648 break; 650 break;
649 } 651 }
650 } 652 }
@@ -1071,6 +1073,36 @@ static void stop_process_timers(struct task_struct *tsk)
1071 spin_unlock_irqrestore(&cputimer->lock, flags); 1073 spin_unlock_irqrestore(&cputimer->lock, flags);
1072} 1074}
1073 1075
1076static u32 onecputick;
1077
1078static void check_cpu_itimer(struct task_struct *tsk, struct cpu_itimer *it,
1079 cputime_t *expires, cputime_t cur_time, int signo)
1080{
1081 if (cputime_eq(it->expires, cputime_zero))
1082 return;
1083
1084 if (cputime_ge(cur_time, it->expires)) {
1085 if (!cputime_eq(it->incr, cputime_zero)) {
1086 it->expires = cputime_add(it->expires, it->incr);
1087 it->error += it->incr_error;
1088 if (it->error >= onecputick) {
1089 it->expires = cputime_sub(it->expires,
1090 cputime_one_jiffy);
1091 it->error -= onecputick;
1092 }
1093 } else
1094 it->expires = cputime_zero;
1095
1096 __group_send_sig_info(signo, SEND_SIG_PRIV, tsk);
1097 }
1098
1099 if (!cputime_eq(it->expires, cputime_zero) &&
1100 (cputime_eq(*expires, cputime_zero) ||
1101 cputime_lt(it->expires, *expires))) {
1102 *expires = it->expires;
1103 }
1104}
1105
1074/* 1106/*
1075 * Check for any per-thread CPU timers that have fired and move them 1107 * Check for any per-thread CPU timers that have fired and move them
1076 * off the tsk->*_timers list onto the firing list. Per-thread timers 1108 * off the tsk->*_timers list onto the firing list. Per-thread timers
@@ -1090,10 +1122,10 @@ static void check_process_timers(struct task_struct *tsk,
1090 * Don't sample the current process CPU clocks if there are no timers. 1122 * Don't sample the current process CPU clocks if there are no timers.
1091 */ 1123 */
1092 if (list_empty(&timers[CPUCLOCK_PROF]) && 1124 if (list_empty(&timers[CPUCLOCK_PROF]) &&
1093 cputime_eq(sig->it_prof_expires, cputime_zero) && 1125 cputime_eq(sig->it[CPUCLOCK_PROF].expires, cputime_zero) &&
1094 sig->rlim[RLIMIT_CPU].rlim_cur == RLIM_INFINITY && 1126 sig->rlim[RLIMIT_CPU].rlim_cur == RLIM_INFINITY &&
1095 list_empty(&timers[CPUCLOCK_VIRT]) && 1127 list_empty(&timers[CPUCLOCK_VIRT]) &&
1096 cputime_eq(sig->it_virt_expires, cputime_zero) && 1128 cputime_eq(sig->it[CPUCLOCK_VIRT].expires, cputime_zero) &&
1097 list_empty(&timers[CPUCLOCK_SCHED])) { 1129 list_empty(&timers[CPUCLOCK_SCHED])) {
1098 stop_process_timers(tsk); 1130 stop_process_timers(tsk);
1099 return; 1131 return;
@@ -1153,38 +1185,11 @@ static void check_process_timers(struct task_struct *tsk,
1153 /* 1185 /*
1154 * Check for the special case process timers. 1186 * Check for the special case process timers.
1155 */ 1187 */
1156 if (!cputime_eq(sig->it_prof_expires, cputime_zero)) { 1188 check_cpu_itimer(tsk, &sig->it[CPUCLOCK_PROF], &prof_expires, ptime,
1157 if (cputime_ge(ptime, sig->it_prof_expires)) { 1189 SIGPROF);
1158 /* ITIMER_PROF fires and reloads. */ 1190 check_cpu_itimer(tsk, &sig->it[CPUCLOCK_VIRT], &virt_expires, utime,
1159 sig->it_prof_expires = sig->it_prof_incr; 1191 SIGVTALRM);
1160 if (!cputime_eq(sig->it_prof_expires, cputime_zero)) { 1192
1161 sig->it_prof_expires = cputime_add(
1162 sig->it_prof_expires, ptime);
1163 }
1164 __group_send_sig_info(SIGPROF, SEND_SIG_PRIV, tsk);
1165 }
1166 if (!cputime_eq(sig->it_prof_expires, cputime_zero) &&
1167 (cputime_eq(prof_expires, cputime_zero) ||
1168 cputime_lt(sig->it_prof_expires, prof_expires))) {
1169 prof_expires = sig->it_prof_expires;
1170 }
1171 }
1172 if (!cputime_eq(sig->it_virt_expires, cputime_zero)) {
1173 if (cputime_ge(utime, sig->it_virt_expires)) {
1174 /* ITIMER_VIRTUAL fires and reloads. */
1175 sig->it_virt_expires = sig->it_virt_incr;
1176 if (!cputime_eq(sig->it_virt_expires, cputime_zero)) {
1177 sig->it_virt_expires = cputime_add(
1178 sig->it_virt_expires, utime);
1179 }
1180 __group_send_sig_info(SIGVTALRM, SEND_SIG_PRIV, tsk);
1181 }
1182 if (!cputime_eq(sig->it_virt_expires, cputime_zero) &&
1183 (cputime_eq(virt_expires, cputime_zero) ||
1184 cputime_lt(sig->it_virt_expires, virt_expires))) {
1185 virt_expires = sig->it_virt_expires;
1186 }
1187 }
1188 if (sig->rlim[RLIMIT_CPU].rlim_cur != RLIM_INFINITY) { 1193 if (sig->rlim[RLIMIT_CPU].rlim_cur != RLIM_INFINITY) {
1189 unsigned long psecs = cputime_to_secs(ptime); 1194 unsigned long psecs = cputime_to_secs(ptime);
1190 cputime_t x; 1195 cputime_t x;
@@ -1457,7 +1462,7 @@ void set_process_cpu_timer(struct task_struct *tsk, unsigned int clock_idx,
1457 if (!cputime_eq(*oldval, cputime_zero)) { 1462 if (!cputime_eq(*oldval, cputime_zero)) {
1458 if (cputime_le(*oldval, now.cpu)) { 1463 if (cputime_le(*oldval, now.cpu)) {
1459 /* Just about to fire. */ 1464 /* Just about to fire. */
1460 *oldval = jiffies_to_cputime(1); 1465 *oldval = cputime_one_jiffy;
1461 } else { 1466 } else {
1462 *oldval = cputime_sub(*oldval, now.cpu); 1467 *oldval = cputime_sub(*oldval, now.cpu);
1463 } 1468 }
@@ -1703,10 +1708,15 @@ static __init int init_posix_cpu_timers(void)
1703 .nsleep = thread_cpu_nsleep, 1708 .nsleep = thread_cpu_nsleep,
1704 .nsleep_restart = thread_cpu_nsleep_restart, 1709 .nsleep_restart = thread_cpu_nsleep_restart,
1705 }; 1710 };
1711 struct timespec ts;
1706 1712
1707 register_posix_clock(CLOCK_PROCESS_CPUTIME_ID, &process); 1713 register_posix_clock(CLOCK_PROCESS_CPUTIME_ID, &process);
1708 register_posix_clock(CLOCK_THREAD_CPUTIME_ID, &thread); 1714 register_posix_clock(CLOCK_THREAD_CPUTIME_ID, &thread);
1709 1715
1716 cputime_to_timespec(cputime_one_jiffy, &ts);
1717 onecputick = ts.tv_nsec;
1718 WARN_ON(ts.tv_sec != 0);
1719
1710 return 0; 1720 return 0;
1711} 1721}
1712__initcall(init_posix_cpu_timers); 1722__initcall(init_posix_cpu_timers);