aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/posix-cpu-timers.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/posix-cpu-timers.c')
-rw-r--r--kernel/posix-cpu-timers.c346
1 files changed, 131 insertions, 215 deletions
diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c
index 438ff4523513..9829646d399c 100644
--- a/kernel/posix-cpu-timers.c
+++ b/kernel/posix-cpu-timers.c
@@ -11,19 +11,18 @@
11#include <trace/events/timer.h> 11#include <trace/events/timer.h>
12 12
13/* 13/*
14 * Called after updating RLIMIT_CPU to set timer expiration if necessary. 14 * Called after updating RLIMIT_CPU to run cpu timer and update
15 * tsk->signal->cputime_expires expiration cache if necessary. Needs
16 * siglock protection since other code may update expiration cache as
17 * well.
15 */ 18 */
16void update_rlimit_cpu(unsigned long rlim_new) 19void update_rlimit_cpu(unsigned long rlim_new)
17{ 20{
18 cputime_t cputime = secs_to_cputime(rlim_new); 21 cputime_t cputime = secs_to_cputime(rlim_new);
19 struct signal_struct *const sig = current->signal;
20 22
21 if (cputime_eq(sig->it[CPUCLOCK_PROF].expires, cputime_zero) || 23 spin_lock_irq(&current->sighand->siglock);
22 cputime_gt(sig->it[CPUCLOCK_PROF].expires, cputime)) { 24 set_process_cpu_timer(current, CPUCLOCK_PROF, &cputime, NULL);
23 spin_lock_irq(&current->sighand->siglock); 25 spin_unlock_irq(&current->sighand->siglock);
24 set_process_cpu_timer(current, CPUCLOCK_PROF, &cputime, NULL);
25 spin_unlock_irq(&current->sighand->siglock);
26 }
27} 26}
28 27
29static int check_clock(const clockid_t which_clock) 28static int check_clock(const clockid_t which_clock)
@@ -364,7 +363,7 @@ int posix_cpu_clock_get(const clockid_t which_clock, struct timespec *tp)
364 } 363 }
365 } else { 364 } else {
366 read_lock(&tasklist_lock); 365 read_lock(&tasklist_lock);
367 if (thread_group_leader(p) && p->signal) { 366 if (thread_group_leader(p) && p->sighand) {
368 error = 367 error =
369 cpu_clock_sample_group(which_clock, 368 cpu_clock_sample_group(which_clock,
370 p, &rtn); 369 p, &rtn);
@@ -440,7 +439,7 @@ int posix_cpu_timer_del(struct k_itimer *timer)
440 439
441 if (likely(p != NULL)) { 440 if (likely(p != NULL)) {
442 read_lock(&tasklist_lock); 441 read_lock(&tasklist_lock);
443 if (unlikely(p->signal == NULL)) { 442 if (unlikely(p->sighand == NULL)) {
444 /* 443 /*
445 * We raced with the reaping of the task. 444 * We raced with the reaping of the task.
446 * The deletion should have cleared us off the list. 445 * The deletion should have cleared us off the list.
@@ -548,111 +547,62 @@ static inline int expires_gt(cputime_t expires, cputime_t new_exp)
548 cputime_gt(expires, new_exp); 547 cputime_gt(expires, new_exp);
549} 548}
550 549
551static inline int expires_le(cputime_t expires, cputime_t new_exp)
552{
553 return !cputime_eq(expires, cputime_zero) &&
554 cputime_le(expires, new_exp);
555}
556/* 550/*
557 * Insert the timer on the appropriate list before any timers that 551 * Insert the timer on the appropriate list before any timers that
558 * expire later. This must be called with the tasklist_lock held 552 * expire later. This must be called with the tasklist_lock held
559 * for reading, and interrupts disabled. 553 * for reading, interrupts disabled and p->sighand->siglock taken.
560 */ 554 */
561static void arm_timer(struct k_itimer *timer, union cpu_time_count now) 555static void arm_timer(struct k_itimer *timer)
562{ 556{
563 struct task_struct *p = timer->it.cpu.task; 557 struct task_struct *p = timer->it.cpu.task;
564 struct list_head *head, *listpos; 558 struct list_head *head, *listpos;
559 struct task_cputime *cputime_expires;
565 struct cpu_timer_list *const nt = &timer->it.cpu; 560 struct cpu_timer_list *const nt = &timer->it.cpu;
566 struct cpu_timer_list *next; 561 struct cpu_timer_list *next;
567 unsigned long i;
568 562
569 head = (CPUCLOCK_PERTHREAD(timer->it_clock) ? 563 if (CPUCLOCK_PERTHREAD(timer->it_clock)) {
570 p->cpu_timers : p->signal->cpu_timers); 564 head = p->cpu_timers;
565 cputime_expires = &p->cputime_expires;
566 } else {
567 head = p->signal->cpu_timers;
568 cputime_expires = &p->signal->cputime_expires;
569 }
571 head += CPUCLOCK_WHICH(timer->it_clock); 570 head += CPUCLOCK_WHICH(timer->it_clock);
572 571
573 BUG_ON(!irqs_disabled());
574 spin_lock(&p->sighand->siglock);
575
576 listpos = head; 572 listpos = head;
577 if (CPUCLOCK_WHICH(timer->it_clock) == CPUCLOCK_SCHED) { 573 list_for_each_entry(next, head, entry) {
578 list_for_each_entry(next, head, entry) { 574 if (cpu_time_before(timer->it_clock, nt->expires, next->expires))
579 if (next->expires.sched > nt->expires.sched) 575 break;
580 break; 576 listpos = &next->entry;
581 listpos = &next->entry;
582 }
583 } else {
584 list_for_each_entry(next, head, entry) {
585 if (cputime_gt(next->expires.cpu, nt->expires.cpu))
586 break;
587 listpos = &next->entry;
588 }
589 } 577 }
590 list_add(&nt->entry, listpos); 578 list_add(&nt->entry, listpos);
591 579
592 if (listpos == head) { 580 if (listpos == head) {
581 union cpu_time_count *exp = &nt->expires;
582
593 /* 583 /*
594 * We are the new earliest-expiring timer. 584 * We are the new earliest-expiring POSIX 1.b timer, hence
595 * If we are a thread timer, there can always 585 * need to update expiration cache. Take into account that
596 * be a process timer telling us to stop earlier. 586 * for process timers we share expiration cache with itimers
587 * and RLIMIT_CPU and for thread timers with RLIMIT_RTTIME.
597 */ 588 */
598 589
599 if (CPUCLOCK_PERTHREAD(timer->it_clock)) { 590 switch (CPUCLOCK_WHICH(timer->it_clock)) {
600 union cpu_time_count *exp = &nt->expires; 591 case CPUCLOCK_PROF:
601 592 if (expires_gt(cputime_expires->prof_exp, exp->cpu))
602 switch (CPUCLOCK_WHICH(timer->it_clock)) { 593 cputime_expires->prof_exp = exp->cpu;
603 default: 594 break;
604 BUG(); 595 case CPUCLOCK_VIRT:
605 case CPUCLOCK_PROF: 596 if (expires_gt(cputime_expires->virt_exp, exp->cpu))
606 if (expires_gt(p->cputime_expires.prof_exp, 597 cputime_expires->virt_exp = exp->cpu;
607 exp->cpu)) 598 break;
608 p->cputime_expires.prof_exp = exp->cpu; 599 case CPUCLOCK_SCHED:
609 break; 600 if (cputime_expires->sched_exp == 0 ||
610 case CPUCLOCK_VIRT: 601 cputime_expires->sched_exp > exp->sched)
611 if (expires_gt(p->cputime_expires.virt_exp, 602 cputime_expires->sched_exp = exp->sched;
612 exp->cpu)) 603 break;
613 p->cputime_expires.virt_exp = exp->cpu;
614 break;
615 case CPUCLOCK_SCHED:
616 if (p->cputime_expires.sched_exp == 0 ||
617 p->cputime_expires.sched_exp > exp->sched)
618 p->cputime_expires.sched_exp =
619 exp->sched;
620 break;
621 }
622 } else {
623 struct signal_struct *const sig = p->signal;
624 union cpu_time_count *exp = &timer->it.cpu.expires;
625
626 /*
627 * For a process timer, set the cached expiration time.
628 */
629 switch (CPUCLOCK_WHICH(timer->it_clock)) {
630 default:
631 BUG();
632 case CPUCLOCK_VIRT:
633 if (expires_le(sig->it[CPUCLOCK_VIRT].expires,
634 exp->cpu))
635 break;
636 sig->cputime_expires.virt_exp = exp->cpu;
637 break;
638 case CPUCLOCK_PROF:
639 if (expires_le(sig->it[CPUCLOCK_PROF].expires,
640 exp->cpu))
641 break;
642 i = sig->rlim[RLIMIT_CPU].rlim_cur;
643 if (i != RLIM_INFINITY &&
644 i <= cputime_to_secs(exp->cpu))
645 break;
646 sig->cputime_expires.prof_exp = exp->cpu;
647 break;
648 case CPUCLOCK_SCHED:
649 sig->cputime_expires.sched_exp = exp->sched;
650 break;
651 }
652 } 604 }
653 } 605 }
654
655 spin_unlock(&p->sighand->siglock);
656} 606}
657 607
658/* 608/*
@@ -660,7 +610,12 @@ static void arm_timer(struct k_itimer *timer, union cpu_time_count now)
660 */ 610 */
661static void cpu_timer_fire(struct k_itimer *timer) 611static void cpu_timer_fire(struct k_itimer *timer)
662{ 612{
663 if (unlikely(timer->sigq == NULL)) { 613 if ((timer->it_sigev_notify & ~SIGEV_THREAD_ID) == SIGEV_NONE) {
614 /*
615 * User don't want any signal.
616 */
617 timer->it.cpu.expires.sched = 0;
618 } else if (unlikely(timer->sigq == NULL)) {
664 /* 619 /*
665 * This a special case for clock_nanosleep, 620 * This a special case for clock_nanosleep,
666 * not a normal timer from sys_timer_create. 621 * not a normal timer from sys_timer_create.
@@ -721,7 +676,7 @@ int posix_cpu_timer_set(struct k_itimer *timer, int flags,
721 struct itimerspec *new, struct itimerspec *old) 676 struct itimerspec *new, struct itimerspec *old)
722{ 677{
723 struct task_struct *p = timer->it.cpu.task; 678 struct task_struct *p = timer->it.cpu.task;
724 union cpu_time_count old_expires, new_expires, val; 679 union cpu_time_count old_expires, new_expires, old_incr, val;
725 int ret; 680 int ret;
726 681
727 if (unlikely(p == NULL)) { 682 if (unlikely(p == NULL)) {
@@ -736,10 +691,10 @@ int posix_cpu_timer_set(struct k_itimer *timer, int flags,
736 read_lock(&tasklist_lock); 691 read_lock(&tasklist_lock);
737 /* 692 /*
738 * We need the tasklist_lock to protect against reaping that 693 * We need the tasklist_lock to protect against reaping that
739 * clears p->signal. If p has just been reaped, we can no 694 * clears p->sighand. If p has just been reaped, we can no
740 * longer get any information about it at all. 695 * longer get any information about it at all.
741 */ 696 */
742 if (unlikely(p->signal == NULL)) { 697 if (unlikely(p->sighand == NULL)) {
743 read_unlock(&tasklist_lock); 698 read_unlock(&tasklist_lock);
744 put_task_struct(p); 699 put_task_struct(p);
745 timer->it.cpu.task = NULL; 700 timer->it.cpu.task = NULL;
@@ -752,6 +707,7 @@ int posix_cpu_timer_set(struct k_itimer *timer, int flags,
752 BUG_ON(!irqs_disabled()); 707 BUG_ON(!irqs_disabled());
753 708
754 ret = 0; 709 ret = 0;
710 old_incr = timer->it.cpu.incr;
755 spin_lock(&p->sighand->siglock); 711 spin_lock(&p->sighand->siglock);
756 old_expires = timer->it.cpu.expires; 712 old_expires = timer->it.cpu.expires;
757 if (unlikely(timer->it.cpu.firing)) { 713 if (unlikely(timer->it.cpu.firing)) {
@@ -759,7 +715,6 @@ int posix_cpu_timer_set(struct k_itimer *timer, int flags,
759 ret = TIMER_RETRY; 715 ret = TIMER_RETRY;
760 } else 716 } else
761 list_del_init(&timer->it.cpu.entry); 717 list_del_init(&timer->it.cpu.entry);
762 spin_unlock(&p->sighand->siglock);
763 718
764 /* 719 /*
765 * We need to sample the current value to convert the new 720 * We need to sample the current value to convert the new
@@ -813,6 +768,7 @@ int posix_cpu_timer_set(struct k_itimer *timer, int flags,
813 * disable this firing since we are already reporting 768 * disable this firing since we are already reporting
814 * it as an overrun (thanks to bump_cpu_timer above). 769 * it as an overrun (thanks to bump_cpu_timer above).
815 */ 770 */
771 spin_unlock(&p->sighand->siglock);
816 read_unlock(&tasklist_lock); 772 read_unlock(&tasklist_lock);
817 goto out; 773 goto out;
818 } 774 }
@@ -828,11 +784,11 @@ int posix_cpu_timer_set(struct k_itimer *timer, int flags,
828 */ 784 */
829 timer->it.cpu.expires = new_expires; 785 timer->it.cpu.expires = new_expires;
830 if (new_expires.sched != 0 && 786 if (new_expires.sched != 0 &&
831 (timer->it_sigev_notify & ~SIGEV_THREAD_ID) != SIGEV_NONE &&
832 cpu_time_before(timer->it_clock, val, new_expires)) { 787 cpu_time_before(timer->it_clock, val, new_expires)) {
833 arm_timer(timer, val); 788 arm_timer(timer);
834 } 789 }
835 790
791 spin_unlock(&p->sighand->siglock);
836 read_unlock(&tasklist_lock); 792 read_unlock(&tasklist_lock);
837 793
838 /* 794 /*
@@ -853,7 +809,6 @@ int posix_cpu_timer_set(struct k_itimer *timer, int flags,
853 timer->it_overrun = -1; 809 timer->it_overrun = -1;
854 810
855 if (new_expires.sched != 0 && 811 if (new_expires.sched != 0 &&
856 (timer->it_sigev_notify & ~SIGEV_THREAD_ID) != SIGEV_NONE &&
857 !cpu_time_before(timer->it_clock, val, new_expires)) { 812 !cpu_time_before(timer->it_clock, val, new_expires)) {
858 /* 813 /*
859 * The designated time already passed, so we notify 814 * The designated time already passed, so we notify
@@ -867,7 +822,7 @@ int posix_cpu_timer_set(struct k_itimer *timer, int flags,
867 out: 822 out:
868 if (old) { 823 if (old) {
869 sample_to_timespec(timer->it_clock, 824 sample_to_timespec(timer->it_clock,
870 timer->it.cpu.incr, &old->it_interval); 825 old_incr, &old->it_interval);
871 } 826 }
872 return ret; 827 return ret;
873} 828}
@@ -908,7 +863,7 @@ void posix_cpu_timer_get(struct k_itimer *timer, struct itimerspec *itp)
908 clear_dead = p->exit_state; 863 clear_dead = p->exit_state;
909 } else { 864 } else {
910 read_lock(&tasklist_lock); 865 read_lock(&tasklist_lock);
911 if (unlikely(p->signal == NULL)) { 866 if (unlikely(p->sighand == NULL)) {
912 /* 867 /*
913 * The process has been reaped. 868 * The process has been reaped.
914 * We can't even collect a sample any more. 869 * We can't even collect a sample any more.
@@ -927,25 +882,6 @@ void posix_cpu_timer_get(struct k_itimer *timer, struct itimerspec *itp)
927 read_unlock(&tasklist_lock); 882 read_unlock(&tasklist_lock);
928 } 883 }
929 884
930 if ((timer->it_sigev_notify & ~SIGEV_THREAD_ID) == SIGEV_NONE) {
931 if (timer->it.cpu.incr.sched == 0 &&
932 cpu_time_before(timer->it_clock,
933 timer->it.cpu.expires, now)) {
934 /*
935 * Do-nothing timer expired and has no reload,
936 * so it's as if it was never set.
937 */
938 timer->it.cpu.expires.sched = 0;
939 itp->it_value.tv_sec = itp->it_value.tv_nsec = 0;
940 return;
941 }
942 /*
943 * Account for any expirations and reloads that should
944 * have happened.
945 */
946 bump_cpu_timer(timer, now);
947 }
948
949 if (unlikely(clear_dead)) { 885 if (unlikely(clear_dead)) {
950 /* 886 /*
951 * We've noticed that the thread is dead, but 887 * We've noticed that the thread is dead, but
@@ -982,6 +918,7 @@ static void check_thread_timers(struct task_struct *tsk,
982 int maxfire; 918 int maxfire;
983 struct list_head *timers = tsk->cpu_timers; 919 struct list_head *timers = tsk->cpu_timers;
984 struct signal_struct *const sig = tsk->signal; 920 struct signal_struct *const sig = tsk->signal;
921 unsigned long soft;
985 922
986 maxfire = 20; 923 maxfire = 20;
987 tsk->cputime_expires.prof_exp = cputime_zero; 924 tsk->cputime_expires.prof_exp = cputime_zero;
@@ -1030,9 +967,10 @@ static void check_thread_timers(struct task_struct *tsk,
1030 /* 967 /*
1031 * Check for the special case thread timers. 968 * Check for the special case thread timers.
1032 */ 969 */
1033 if (sig->rlim[RLIMIT_RTTIME].rlim_cur != RLIM_INFINITY) { 970 soft = ACCESS_ONCE(sig->rlim[RLIMIT_RTTIME].rlim_cur);
1034 unsigned long hard = sig->rlim[RLIMIT_RTTIME].rlim_max; 971 if (soft != RLIM_INFINITY) {
1035 unsigned long *soft = &sig->rlim[RLIMIT_RTTIME].rlim_cur; 972 unsigned long hard =
973 ACCESS_ONCE(sig->rlim[RLIMIT_RTTIME].rlim_max);
1036 974
1037 if (hard != RLIM_INFINITY && 975 if (hard != RLIM_INFINITY &&
1038 tsk->rt.timeout > DIV_ROUND_UP(hard, USEC_PER_SEC/HZ)) { 976 tsk->rt.timeout > DIV_ROUND_UP(hard, USEC_PER_SEC/HZ)) {
@@ -1043,14 +981,13 @@ static void check_thread_timers(struct task_struct *tsk,
1043 __group_send_sig_info(SIGKILL, SEND_SIG_PRIV, tsk); 981 __group_send_sig_info(SIGKILL, SEND_SIG_PRIV, tsk);
1044 return; 982 return;
1045 } 983 }
1046 if (tsk->rt.timeout > DIV_ROUND_UP(*soft, USEC_PER_SEC/HZ)) { 984 if (tsk->rt.timeout > DIV_ROUND_UP(soft, USEC_PER_SEC/HZ)) {
1047 /* 985 /*
1048 * At the soft limit, send a SIGXCPU every second. 986 * At the soft limit, send a SIGXCPU every second.
1049 */ 987 */
1050 if (sig->rlim[RLIMIT_RTTIME].rlim_cur 988 if (soft < hard) {
1051 < sig->rlim[RLIMIT_RTTIME].rlim_max) { 989 soft += USEC_PER_SEC;
1052 sig->rlim[RLIMIT_RTTIME].rlim_cur += 990 sig->rlim[RLIMIT_RTTIME].rlim_cur = soft;
1053 USEC_PER_SEC;
1054 } 991 }
1055 printk(KERN_INFO 992 printk(KERN_INFO
1056 "RT Watchdog Timeout: %s[%d]\n", 993 "RT Watchdog Timeout: %s[%d]\n",
@@ -1060,14 +997,11 @@ static void check_thread_timers(struct task_struct *tsk,
1060 } 997 }
1061} 998}
1062 999
1063static void stop_process_timers(struct task_struct *tsk) 1000static void stop_process_timers(struct signal_struct *sig)
1064{ 1001{
1065 struct thread_group_cputimer *cputimer = &tsk->signal->cputimer; 1002 struct thread_group_cputimer *cputimer = &sig->cputimer;
1066 unsigned long flags; 1003 unsigned long flags;
1067 1004
1068 if (!cputimer->running)
1069 return;
1070
1071 spin_lock_irqsave(&cputimer->lock, flags); 1005 spin_lock_irqsave(&cputimer->lock, flags);
1072 cputimer->running = 0; 1006 cputimer->running = 0;
1073 spin_unlock_irqrestore(&cputimer->lock, flags); 1007 spin_unlock_irqrestore(&cputimer->lock, flags);
@@ -1107,6 +1041,23 @@ static void check_cpu_itimer(struct task_struct *tsk, struct cpu_itimer *it,
1107 } 1041 }
1108} 1042}
1109 1043
1044/**
1045 * task_cputime_zero - Check a task_cputime struct for all zero fields.
1046 *
1047 * @cputime: The struct to compare.
1048 *
1049 * Checks @cputime to see if all fields are zero. Returns true if all fields
1050 * are zero, false if any field is nonzero.
1051 */
1052static inline int task_cputime_zero(const struct task_cputime *cputime)
1053{
1054 if (cputime_eq(cputime->utime, cputime_zero) &&
1055 cputime_eq(cputime->stime, cputime_zero) &&
1056 cputime->sum_exec_runtime == 0)
1057 return 1;
1058 return 0;
1059}
1060
1110/* 1061/*
1111 * Check for any per-thread CPU timers that have fired and move them 1062 * Check for any per-thread CPU timers that have fired and move them
1112 * off the tsk->*_timers list onto the firing list. Per-thread timers 1063 * off the tsk->*_timers list onto the firing list. Per-thread timers
@@ -1121,19 +1072,7 @@ static void check_process_timers(struct task_struct *tsk,
1121 unsigned long long sum_sched_runtime, sched_expires; 1072 unsigned long long sum_sched_runtime, sched_expires;
1122 struct list_head *timers = sig->cpu_timers; 1073 struct list_head *timers = sig->cpu_timers;
1123 struct task_cputime cputime; 1074 struct task_cputime cputime;
1124 1075 unsigned long soft;
1125 /*
1126 * Don't sample the current process CPU clocks if there are no timers.
1127 */
1128 if (list_empty(&timers[CPUCLOCK_PROF]) &&
1129 cputime_eq(sig->it[CPUCLOCK_PROF].expires, cputime_zero) &&
1130 sig->rlim[RLIMIT_CPU].rlim_cur == RLIM_INFINITY &&
1131 list_empty(&timers[CPUCLOCK_VIRT]) &&
1132 cputime_eq(sig->it[CPUCLOCK_VIRT].expires, cputime_zero) &&
1133 list_empty(&timers[CPUCLOCK_SCHED])) {
1134 stop_process_timers(tsk);
1135 return;
1136 }
1137 1076
1138 /* 1077 /*
1139 * Collect the current process totals. 1078 * Collect the current process totals.
@@ -1193,11 +1132,13 @@ static void check_process_timers(struct task_struct *tsk,
1193 SIGPROF); 1132 SIGPROF);
1194 check_cpu_itimer(tsk, &sig->it[CPUCLOCK_VIRT], &virt_expires, utime, 1133 check_cpu_itimer(tsk, &sig->it[CPUCLOCK_VIRT], &virt_expires, utime,
1195 SIGVTALRM); 1134 SIGVTALRM);
1196 1135 soft = ACCESS_ONCE(sig->rlim[RLIMIT_CPU].rlim_cur);
1197 if (sig->rlim[RLIMIT_CPU].rlim_cur != RLIM_INFINITY) { 1136 if (soft != RLIM_INFINITY) {
1198 unsigned long psecs = cputime_to_secs(ptime); 1137 unsigned long psecs = cputime_to_secs(ptime);
1138 unsigned long hard =
1139 ACCESS_ONCE(sig->rlim[RLIMIT_CPU].rlim_max);
1199 cputime_t x; 1140 cputime_t x;
1200 if (psecs >= sig->rlim[RLIMIT_CPU].rlim_max) { 1141 if (psecs >= hard) {
1201 /* 1142 /*
1202 * At the hard limit, we just die. 1143 * At the hard limit, we just die.
1203 * No need to calculate anything else now. 1144 * No need to calculate anything else now.
@@ -1205,35 +1146,28 @@ static void check_process_timers(struct task_struct *tsk,
1205 __group_send_sig_info(SIGKILL, SEND_SIG_PRIV, tsk); 1146 __group_send_sig_info(SIGKILL, SEND_SIG_PRIV, tsk);
1206 return; 1147 return;
1207 } 1148 }
1208 if (psecs >= sig->rlim[RLIMIT_CPU].rlim_cur) { 1149 if (psecs >= soft) {
1209 /* 1150 /*
1210 * At the soft limit, send a SIGXCPU every second. 1151 * At the soft limit, send a SIGXCPU every second.
1211 */ 1152 */
1212 __group_send_sig_info(SIGXCPU, SEND_SIG_PRIV, tsk); 1153 __group_send_sig_info(SIGXCPU, SEND_SIG_PRIV, tsk);
1213 if (sig->rlim[RLIMIT_CPU].rlim_cur 1154 if (soft < hard) {
1214 < sig->rlim[RLIMIT_CPU].rlim_max) { 1155 soft++;
1215 sig->rlim[RLIMIT_CPU].rlim_cur++; 1156 sig->rlim[RLIMIT_CPU].rlim_cur = soft;
1216 } 1157 }
1217 } 1158 }
1218 x = secs_to_cputime(sig->rlim[RLIMIT_CPU].rlim_cur); 1159 x = secs_to_cputime(soft);
1219 if (cputime_eq(prof_expires, cputime_zero) || 1160 if (cputime_eq(prof_expires, cputime_zero) ||
1220 cputime_lt(x, prof_expires)) { 1161 cputime_lt(x, prof_expires)) {
1221 prof_expires = x; 1162 prof_expires = x;
1222 } 1163 }
1223 } 1164 }
1224 1165
1225 if (!cputime_eq(prof_expires, cputime_zero) && 1166 sig->cputime_expires.prof_exp = prof_expires;
1226 (cputime_eq(sig->cputime_expires.prof_exp, cputime_zero) || 1167 sig->cputime_expires.virt_exp = virt_expires;
1227 cputime_gt(sig->cputime_expires.prof_exp, prof_expires))) 1168 sig->cputime_expires.sched_exp = sched_expires;
1228 sig->cputime_expires.prof_exp = prof_expires; 1169 if (task_cputime_zero(&sig->cputime_expires))
1229 if (!cputime_eq(virt_expires, cputime_zero) && 1170 stop_process_timers(sig);
1230 (cputime_eq(sig->cputime_expires.virt_exp, cputime_zero) ||
1231 cputime_gt(sig->cputime_expires.virt_exp, virt_expires)))
1232 sig->cputime_expires.virt_exp = virt_expires;
1233 if (sched_expires != 0 &&
1234 (sig->cputime_expires.sched_exp == 0 ||
1235 sig->cputime_expires.sched_exp > sched_expires))
1236 sig->cputime_expires.sched_exp = sched_expires;
1237} 1171}
1238 1172
1239/* 1173/*
@@ -1262,9 +1196,10 @@ void posix_cpu_timer_schedule(struct k_itimer *timer)
1262 goto out; 1196 goto out;
1263 } 1197 }
1264 read_lock(&tasklist_lock); /* arm_timer needs it. */ 1198 read_lock(&tasklist_lock); /* arm_timer needs it. */
1199 spin_lock(&p->sighand->siglock);
1265 } else { 1200 } else {
1266 read_lock(&tasklist_lock); 1201 read_lock(&tasklist_lock);
1267 if (unlikely(p->signal == NULL)) { 1202 if (unlikely(p->sighand == NULL)) {
1268 /* 1203 /*
1269 * The process has been reaped. 1204 * The process has been reaped.
1270 * We can't even collect a sample any more. 1205 * We can't even collect a sample any more.
@@ -1282,6 +1217,7 @@ void posix_cpu_timer_schedule(struct k_itimer *timer)
1282 clear_dead_task(timer, now); 1217 clear_dead_task(timer, now);
1283 goto out_unlock; 1218 goto out_unlock;
1284 } 1219 }
1220 spin_lock(&p->sighand->siglock);
1285 cpu_timer_sample_group(timer->it_clock, p, &now); 1221 cpu_timer_sample_group(timer->it_clock, p, &now);
1286 bump_cpu_timer(timer, now); 1222 bump_cpu_timer(timer, now);
1287 /* Leave the tasklist_lock locked for the call below. */ 1223 /* Leave the tasklist_lock locked for the call below. */
@@ -1290,7 +1226,9 @@ void posix_cpu_timer_schedule(struct k_itimer *timer)
1290 /* 1226 /*
1291 * Now re-arm for the new expiry time. 1227 * Now re-arm for the new expiry time.
1292 */ 1228 */
1293 arm_timer(timer, now); 1229 BUG_ON(!irqs_disabled());
1230 arm_timer(timer);
1231 spin_unlock(&p->sighand->siglock);
1294 1232
1295out_unlock: 1233out_unlock:
1296 read_unlock(&tasklist_lock); 1234 read_unlock(&tasklist_lock);
@@ -1302,23 +1240,6 @@ out:
1302} 1240}
1303 1241
1304/** 1242/**
1305 * task_cputime_zero - Check a task_cputime struct for all zero fields.
1306 *
1307 * @cputime: The struct to compare.
1308 *
1309 * Checks @cputime to see if all fields are zero. Returns true if all fields
1310 * are zero, false if any field is nonzero.
1311 */
1312static inline int task_cputime_zero(const struct task_cputime *cputime)
1313{
1314 if (cputime_eq(cputime->utime, cputime_zero) &&
1315 cputime_eq(cputime->stime, cputime_zero) &&
1316 cputime->sum_exec_runtime == 0)
1317 return 1;
1318 return 0;
1319}
1320
1321/**
1322 * task_cputime_expired - Compare two task_cputime entities. 1243 * task_cputime_expired - Compare two task_cputime entities.
1323 * 1244 *
1324 * @sample: The task_cputime structure to be checked for expiration. 1245 * @sample: The task_cputime structure to be checked for expiration.
@@ -1374,7 +1295,7 @@ static inline int fastpath_timer_check(struct task_struct *tsk)
1374 } 1295 }
1375 1296
1376 sig = tsk->signal; 1297 sig = tsk->signal;
1377 if (!task_cputime_zero(&sig->cputime_expires)) { 1298 if (sig->cputimer.running) {
1378 struct task_cputime group_sample; 1299 struct task_cputime group_sample;
1379 1300
1380 thread_group_cputimer(tsk, &group_sample); 1301 thread_group_cputimer(tsk, &group_sample);
@@ -1382,7 +1303,7 @@ static inline int fastpath_timer_check(struct task_struct *tsk)
1382 return 1; 1303 return 1;
1383 } 1304 }
1384 1305
1385 return sig->rlim[RLIMIT_CPU].rlim_cur != RLIM_INFINITY; 1306 return 0;
1386} 1307}
1387 1308
1388/* 1309/*
@@ -1411,7 +1332,12 @@ void run_posix_cpu_timers(struct task_struct *tsk)
1411 * put them on the firing list. 1332 * put them on the firing list.
1412 */ 1333 */
1413 check_thread_timers(tsk, &firing); 1334 check_thread_timers(tsk, &firing);
1414 check_process_timers(tsk, &firing); 1335 /*
1336 * If there are any active process wide timers (POSIX 1.b, itimers,
1337 * RLIMIT_CPU) cputimer must be running.
1338 */
1339 if (tsk->signal->cputimer.running)
1340 check_process_timers(tsk, &firing);
1415 1341
1416 /* 1342 /*
1417 * We must release these locks before taking any timer's lock. 1343 * We must release these locks before taking any timer's lock.
@@ -1448,21 +1374,23 @@ void run_posix_cpu_timers(struct task_struct *tsk)
1448} 1374}
1449 1375
1450/* 1376/*
1451 * Set one of the process-wide special case CPU timers. 1377 * Set one of the process-wide special case CPU timers or RLIMIT_CPU.
1452 * The tsk->sighand->siglock must be held by the caller. 1378 * The tsk->sighand->siglock must be held by the caller.
1453 * The *newval argument is relative and we update it to be absolute, *oldval
1454 * is absolute and we update it to be relative.
1455 */ 1379 */
1456void set_process_cpu_timer(struct task_struct *tsk, unsigned int clock_idx, 1380void set_process_cpu_timer(struct task_struct *tsk, unsigned int clock_idx,
1457 cputime_t *newval, cputime_t *oldval) 1381 cputime_t *newval, cputime_t *oldval)
1458{ 1382{
1459 union cpu_time_count now; 1383 union cpu_time_count now;
1460 struct list_head *head;
1461 1384
1462 BUG_ON(clock_idx == CPUCLOCK_SCHED); 1385 BUG_ON(clock_idx == CPUCLOCK_SCHED);
1463 cpu_timer_sample_group(clock_idx, tsk, &now); 1386 cpu_timer_sample_group(clock_idx, tsk, &now);
1464 1387
1465 if (oldval) { 1388 if (oldval) {
1389 /*
1390 * We are setting itimer. The *oldval is absolute and we update
1391 * it to be relative, *newval argument is relative and we update
1392 * it to be absolute.
1393 */
1466 if (!cputime_eq(*oldval, cputime_zero)) { 1394 if (!cputime_eq(*oldval, cputime_zero)) {
1467 if (cputime_le(*oldval, now.cpu)) { 1395 if (cputime_le(*oldval, now.cpu)) {
1468 /* Just about to fire. */ 1396 /* Just about to fire. */
@@ -1475,33 +1403,21 @@ void set_process_cpu_timer(struct task_struct *tsk, unsigned int clock_idx,
1475 if (cputime_eq(*newval, cputime_zero)) 1403 if (cputime_eq(*newval, cputime_zero))
1476 return; 1404 return;
1477 *newval = cputime_add(*newval, now.cpu); 1405 *newval = cputime_add(*newval, now.cpu);
1478
1479 /*
1480 * If the RLIMIT_CPU timer will expire before the
1481 * ITIMER_PROF timer, we have nothing else to do.
1482 */
1483 if (tsk->signal->rlim[RLIMIT_CPU].rlim_cur
1484 < cputime_to_secs(*newval))
1485 return;
1486 } 1406 }
1487 1407
1488 /* 1408 /*
1489 * Check whether there are any process timers already set to fire 1409 * Update expiration cache if we are the earliest timer, or eventually
1490 * before this one. If so, we don't have anything more to do. 1410 * RLIMIT_CPU limit is earlier than prof_exp cpu timer expire.
1491 */ 1411 */
1492 head = &tsk->signal->cpu_timers[clock_idx]; 1412 switch (clock_idx) {
1493 if (list_empty(head) || 1413 case CPUCLOCK_PROF:
1494 cputime_ge(list_first_entry(head, 1414 if (expires_gt(tsk->signal->cputime_expires.prof_exp, *newval))
1495 struct cpu_timer_list, entry)->expires.cpu,
1496 *newval)) {
1497 switch (clock_idx) {
1498 case CPUCLOCK_PROF:
1499 tsk->signal->cputime_expires.prof_exp = *newval; 1415 tsk->signal->cputime_expires.prof_exp = *newval;
1500 break; 1416 break;
1501 case CPUCLOCK_VIRT: 1417 case CPUCLOCK_VIRT:
1418 if (expires_gt(tsk->signal->cputime_expires.virt_exp, *newval))
1502 tsk->signal->cputime_expires.virt_exp = *newval; 1419 tsk->signal->cputime_expires.virt_exp = *newval;
1503 break; 1420 break;
1504 }
1505 } 1421 }
1506} 1422}
1507 1423