aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/posix-cpu-timers.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/posix-cpu-timers.c')
-rw-r--r--kernel/posix-cpu-timers.c348
1 files changed, 125 insertions, 223 deletions
diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c
index bc7704b3a443..6842eeba5879 100644
--- a/kernel/posix-cpu-timers.c
+++ b/kernel/posix-cpu-timers.c
@@ -11,19 +11,18 @@
11#include <trace/events/timer.h> 11#include <trace/events/timer.h>
12 12
13/* 13/*
14 * Called after updating RLIMIT_CPU to set timer expiration if necessary. 14 * Called after updating RLIMIT_CPU to run cpu timer and update
15 * tsk->signal->cputime_expires expiration cache if necessary. Needs
16 * siglock protection since other code may update expiration cache as
17 * well.
15 */ 18 */
16void update_rlimit_cpu(unsigned long rlim_new) 19void update_rlimit_cpu(struct task_struct *task, unsigned long rlim_new)
17{ 20{
18 cputime_t cputime = secs_to_cputime(rlim_new); 21 cputime_t cputime = secs_to_cputime(rlim_new);
19 struct signal_struct *const sig = current->signal;
20 22
21 if (cputime_eq(sig->it[CPUCLOCK_PROF].expires, cputime_zero) || 23 spin_lock_irq(&task->sighand->siglock);
22 cputime_gt(sig->it[CPUCLOCK_PROF].expires, cputime)) { 24 set_process_cpu_timer(task, CPUCLOCK_PROF, &cputime, NULL);
23 spin_lock_irq(&current->sighand->siglock); 25 spin_unlock_irq(&task->sighand->siglock);
24 set_process_cpu_timer(current, CPUCLOCK_PROF, &cputime, NULL);
25 spin_unlock_irq(&current->sighand->siglock);
26 }
27} 26}
28 27
29static int check_clock(const clockid_t which_clock) 28static int check_clock(const clockid_t which_clock)
@@ -233,31 +232,24 @@ static int cpu_clock_sample(const clockid_t which_clock, struct task_struct *p,
233 232
234void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times) 233void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times)
235{ 234{
236 struct sighand_struct *sighand; 235 struct signal_struct *sig = tsk->signal;
237 struct signal_struct *sig;
238 struct task_struct *t; 236 struct task_struct *t;
239 237
240 *times = INIT_CPUTIME; 238 times->utime = sig->utime;
239 times->stime = sig->stime;
240 times->sum_exec_runtime = sig->sum_sched_runtime;
241 241
242 rcu_read_lock(); 242 rcu_read_lock();
243 sighand = rcu_dereference(tsk->sighand); 243 /* make sure we can trust tsk->thread_group list */
244 if (!sighand) 244 if (!likely(pid_alive(tsk)))
245 goto out; 245 goto out;
246 246
247 sig = tsk->signal;
248
249 t = tsk; 247 t = tsk;
250 do { 248 do {
251 times->utime = cputime_add(times->utime, t->utime); 249 times->utime = cputime_add(times->utime, t->utime);
252 times->stime = cputime_add(times->stime, t->stime); 250 times->stime = cputime_add(times->stime, t->stime);
253 times->sum_exec_runtime += t->se.sum_exec_runtime; 251 times->sum_exec_runtime += t->se.sum_exec_runtime;
254 252 } while_each_thread(tsk, t);
255 t = next_thread(t);
256 } while (t != tsk);
257
258 times->utime = cputime_add(times->utime, sig->utime);
259 times->stime = cputime_add(times->stime, sig->stime);
260 times->sum_exec_runtime += sig->sum_sched_runtime;
261out: 253out:
262 rcu_read_unlock(); 254 rcu_read_unlock();
263} 255}
@@ -364,7 +356,7 @@ int posix_cpu_clock_get(const clockid_t which_clock, struct timespec *tp)
364 } 356 }
365 } else { 357 } else {
366 read_lock(&tasklist_lock); 358 read_lock(&tasklist_lock);
367 if (thread_group_leader(p) && p->signal) { 359 if (thread_group_leader(p) && p->sighand) {
368 error = 360 error =
369 cpu_clock_sample_group(which_clock, 361 cpu_clock_sample_group(which_clock,
370 p, &rtn); 362 p, &rtn);
@@ -440,7 +432,7 @@ int posix_cpu_timer_del(struct k_itimer *timer)
440 432
441 if (likely(p != NULL)) { 433 if (likely(p != NULL)) {
442 read_lock(&tasklist_lock); 434 read_lock(&tasklist_lock);
443 if (unlikely(p->signal == NULL)) { 435 if (unlikely(p->sighand == NULL)) {
444 /* 436 /*
445 * We raced with the reaping of the task. 437 * We raced with the reaping of the task.
446 * The deletion should have cleared us off the list. 438 * The deletion should have cleared us off the list.
@@ -548,111 +540,62 @@ static inline int expires_gt(cputime_t expires, cputime_t new_exp)
548 cputime_gt(expires, new_exp); 540 cputime_gt(expires, new_exp);
549} 541}
550 542
551static inline int expires_le(cputime_t expires, cputime_t new_exp)
552{
553 return !cputime_eq(expires, cputime_zero) &&
554 cputime_le(expires, new_exp);
555}
556/* 543/*
557 * Insert the timer on the appropriate list before any timers that 544 * Insert the timer on the appropriate list before any timers that
558 * expire later. This must be called with the tasklist_lock held 545 * expire later. This must be called with the tasklist_lock held
559 * for reading, and interrupts disabled. 546 * for reading, interrupts disabled and p->sighand->siglock taken.
560 */ 547 */
561static void arm_timer(struct k_itimer *timer, union cpu_time_count now) 548static void arm_timer(struct k_itimer *timer)
562{ 549{
563 struct task_struct *p = timer->it.cpu.task; 550 struct task_struct *p = timer->it.cpu.task;
564 struct list_head *head, *listpos; 551 struct list_head *head, *listpos;
552 struct task_cputime *cputime_expires;
565 struct cpu_timer_list *const nt = &timer->it.cpu; 553 struct cpu_timer_list *const nt = &timer->it.cpu;
566 struct cpu_timer_list *next; 554 struct cpu_timer_list *next;
567 unsigned long i;
568 555
569 head = (CPUCLOCK_PERTHREAD(timer->it_clock) ? 556 if (CPUCLOCK_PERTHREAD(timer->it_clock)) {
570 p->cpu_timers : p->signal->cpu_timers); 557 head = p->cpu_timers;
558 cputime_expires = &p->cputime_expires;
559 } else {
560 head = p->signal->cpu_timers;
561 cputime_expires = &p->signal->cputime_expires;
562 }
571 head += CPUCLOCK_WHICH(timer->it_clock); 563 head += CPUCLOCK_WHICH(timer->it_clock);
572 564
573 BUG_ON(!irqs_disabled());
574 spin_lock(&p->sighand->siglock);
575
576 listpos = head; 565 listpos = head;
577 if (CPUCLOCK_WHICH(timer->it_clock) == CPUCLOCK_SCHED) { 566 list_for_each_entry(next, head, entry) {
578 list_for_each_entry(next, head, entry) { 567 if (cpu_time_before(timer->it_clock, nt->expires, next->expires))
579 if (next->expires.sched > nt->expires.sched) 568 break;
580 break; 569 listpos = &next->entry;
581 listpos = &next->entry;
582 }
583 } else {
584 list_for_each_entry(next, head, entry) {
585 if (cputime_gt(next->expires.cpu, nt->expires.cpu))
586 break;
587 listpos = &next->entry;
588 }
589 } 570 }
590 list_add(&nt->entry, listpos); 571 list_add(&nt->entry, listpos);
591 572
592 if (listpos == head) { 573 if (listpos == head) {
574 union cpu_time_count *exp = &nt->expires;
575
593 /* 576 /*
594 * We are the new earliest-expiring timer. 577 * We are the new earliest-expiring POSIX 1.b timer, hence
595 * If we are a thread timer, there can always 578 * need to update expiration cache. Take into account that
596 * be a process timer telling us to stop earlier. 579 * for process timers we share expiration cache with itimers
580 * and RLIMIT_CPU and for thread timers with RLIMIT_RTTIME.
597 */ 581 */
598 582
599 if (CPUCLOCK_PERTHREAD(timer->it_clock)) { 583 switch (CPUCLOCK_WHICH(timer->it_clock)) {
600 union cpu_time_count *exp = &nt->expires; 584 case CPUCLOCK_PROF:
601 585 if (expires_gt(cputime_expires->prof_exp, exp->cpu))
602 switch (CPUCLOCK_WHICH(timer->it_clock)) { 586 cputime_expires->prof_exp = exp->cpu;
603 default: 587 break;
604 BUG(); 588 case CPUCLOCK_VIRT:
605 case CPUCLOCK_PROF: 589 if (expires_gt(cputime_expires->virt_exp, exp->cpu))
606 if (expires_gt(p->cputime_expires.prof_exp, 590 cputime_expires->virt_exp = exp->cpu;
607 exp->cpu)) 591 break;
608 p->cputime_expires.prof_exp = exp->cpu; 592 case CPUCLOCK_SCHED:
609 break; 593 if (cputime_expires->sched_exp == 0 ||
610 case CPUCLOCK_VIRT: 594 cputime_expires->sched_exp > exp->sched)
611 if (expires_gt(p->cputime_expires.virt_exp, 595 cputime_expires->sched_exp = exp->sched;
612 exp->cpu)) 596 break;
613 p->cputime_expires.virt_exp = exp->cpu;
614 break;
615 case CPUCLOCK_SCHED:
616 if (p->cputime_expires.sched_exp == 0 ||
617 p->cputime_expires.sched_exp > exp->sched)
618 p->cputime_expires.sched_exp =
619 exp->sched;
620 break;
621 }
622 } else {
623 struct signal_struct *const sig = p->signal;
624 union cpu_time_count *exp = &timer->it.cpu.expires;
625
626 /*
627 * For a process timer, set the cached expiration time.
628 */
629 switch (CPUCLOCK_WHICH(timer->it_clock)) {
630 default:
631 BUG();
632 case CPUCLOCK_VIRT:
633 if (expires_le(sig->it[CPUCLOCK_VIRT].expires,
634 exp->cpu))
635 break;
636 sig->cputime_expires.virt_exp = exp->cpu;
637 break;
638 case CPUCLOCK_PROF:
639 if (expires_le(sig->it[CPUCLOCK_PROF].expires,
640 exp->cpu))
641 break;
642 i = sig->rlim[RLIMIT_CPU].rlim_cur;
643 if (i != RLIM_INFINITY &&
644 i <= cputime_to_secs(exp->cpu))
645 break;
646 sig->cputime_expires.prof_exp = exp->cpu;
647 break;
648 case CPUCLOCK_SCHED:
649 sig->cputime_expires.sched_exp = exp->sched;
650 break;
651 }
652 } 597 }
653 } 598 }
654
655 spin_unlock(&p->sighand->siglock);
656} 599}
657 600
658/* 601/*
@@ -660,7 +603,12 @@ static void arm_timer(struct k_itimer *timer, union cpu_time_count now)
660 */ 603 */
661static void cpu_timer_fire(struct k_itimer *timer) 604static void cpu_timer_fire(struct k_itimer *timer)
662{ 605{
663 if (unlikely(timer->sigq == NULL)) { 606 if ((timer->it_sigev_notify & ~SIGEV_THREAD_ID) == SIGEV_NONE) {
607 /*
608 * User don't want any signal.
609 */
610 timer->it.cpu.expires.sched = 0;
611 } else if (unlikely(timer->sigq == NULL)) {
664 /* 612 /*
665 * This a special case for clock_nanosleep, 613 * This a special case for clock_nanosleep,
666 * not a normal timer from sys_timer_create. 614 * not a normal timer from sys_timer_create.
@@ -721,7 +669,7 @@ int posix_cpu_timer_set(struct k_itimer *timer, int flags,
721 struct itimerspec *new, struct itimerspec *old) 669 struct itimerspec *new, struct itimerspec *old)
722{ 670{
723 struct task_struct *p = timer->it.cpu.task; 671 struct task_struct *p = timer->it.cpu.task;
724 union cpu_time_count old_expires, new_expires, val; 672 union cpu_time_count old_expires, new_expires, old_incr, val;
725 int ret; 673 int ret;
726 674
727 if (unlikely(p == NULL)) { 675 if (unlikely(p == NULL)) {
@@ -736,10 +684,10 @@ int posix_cpu_timer_set(struct k_itimer *timer, int flags,
736 read_lock(&tasklist_lock); 684 read_lock(&tasklist_lock);
737 /* 685 /*
738 * We need the tasklist_lock to protect against reaping that 686 * We need the tasklist_lock to protect against reaping that
739 * clears p->signal. If p has just been reaped, we can no 687 * clears p->sighand. If p has just been reaped, we can no
740 * longer get any information about it at all. 688 * longer get any information about it at all.
741 */ 689 */
742 if (unlikely(p->signal == NULL)) { 690 if (unlikely(p->sighand == NULL)) {
743 read_unlock(&tasklist_lock); 691 read_unlock(&tasklist_lock);
744 put_task_struct(p); 692 put_task_struct(p);
745 timer->it.cpu.task = NULL; 693 timer->it.cpu.task = NULL;
@@ -752,6 +700,7 @@ int posix_cpu_timer_set(struct k_itimer *timer, int flags,
752 BUG_ON(!irqs_disabled()); 700 BUG_ON(!irqs_disabled());
753 701
754 ret = 0; 702 ret = 0;
703 old_incr = timer->it.cpu.incr;
755 spin_lock(&p->sighand->siglock); 704 spin_lock(&p->sighand->siglock);
756 old_expires = timer->it.cpu.expires; 705 old_expires = timer->it.cpu.expires;
757 if (unlikely(timer->it.cpu.firing)) { 706 if (unlikely(timer->it.cpu.firing)) {
@@ -759,7 +708,6 @@ int posix_cpu_timer_set(struct k_itimer *timer, int flags,
759 ret = TIMER_RETRY; 708 ret = TIMER_RETRY;
760 } else 709 } else
761 list_del_init(&timer->it.cpu.entry); 710 list_del_init(&timer->it.cpu.entry);
762 spin_unlock(&p->sighand->siglock);
763 711
764 /* 712 /*
765 * We need to sample the current value to convert the new 713 * We need to sample the current value to convert the new
@@ -813,6 +761,7 @@ int posix_cpu_timer_set(struct k_itimer *timer, int flags,
813 * disable this firing since we are already reporting 761 * disable this firing since we are already reporting
814 * it as an overrun (thanks to bump_cpu_timer above). 762 * it as an overrun (thanks to bump_cpu_timer above).
815 */ 763 */
764 spin_unlock(&p->sighand->siglock);
816 read_unlock(&tasklist_lock); 765 read_unlock(&tasklist_lock);
817 goto out; 766 goto out;
818 } 767 }
@@ -828,11 +777,11 @@ int posix_cpu_timer_set(struct k_itimer *timer, int flags,
828 */ 777 */
829 timer->it.cpu.expires = new_expires; 778 timer->it.cpu.expires = new_expires;
830 if (new_expires.sched != 0 && 779 if (new_expires.sched != 0 &&
831 (timer->it_sigev_notify & ~SIGEV_THREAD_ID) != SIGEV_NONE &&
832 cpu_time_before(timer->it_clock, val, new_expires)) { 780 cpu_time_before(timer->it_clock, val, new_expires)) {
833 arm_timer(timer, val); 781 arm_timer(timer);
834 } 782 }
835 783
784 spin_unlock(&p->sighand->siglock);
836 read_unlock(&tasklist_lock); 785 read_unlock(&tasklist_lock);
837 786
838 /* 787 /*
@@ -853,7 +802,6 @@ int posix_cpu_timer_set(struct k_itimer *timer, int flags,
853 timer->it_overrun = -1; 802 timer->it_overrun = -1;
854 803
855 if (new_expires.sched != 0 && 804 if (new_expires.sched != 0 &&
856 (timer->it_sigev_notify & ~SIGEV_THREAD_ID) != SIGEV_NONE &&
857 !cpu_time_before(timer->it_clock, val, new_expires)) { 805 !cpu_time_before(timer->it_clock, val, new_expires)) {
858 /* 806 /*
859 * The designated time already passed, so we notify 807 * The designated time already passed, so we notify
@@ -867,7 +815,7 @@ int posix_cpu_timer_set(struct k_itimer *timer, int flags,
867 out: 815 out:
868 if (old) { 816 if (old) {
869 sample_to_timespec(timer->it_clock, 817 sample_to_timespec(timer->it_clock,
870 timer->it.cpu.incr, &old->it_interval); 818 old_incr, &old->it_interval);
871 } 819 }
872 return ret; 820 return ret;
873} 821}
@@ -908,7 +856,7 @@ void posix_cpu_timer_get(struct k_itimer *timer, struct itimerspec *itp)
908 clear_dead = p->exit_state; 856 clear_dead = p->exit_state;
909 } else { 857 } else {
910 read_lock(&tasklist_lock); 858 read_lock(&tasklist_lock);
911 if (unlikely(p->signal == NULL)) { 859 if (unlikely(p->sighand == NULL)) {
912 /* 860 /*
913 * The process has been reaped. 861 * The process has been reaped.
914 * We can't even collect a sample any more. 862 * We can't even collect a sample any more.
@@ -927,25 +875,6 @@ void posix_cpu_timer_get(struct k_itimer *timer, struct itimerspec *itp)
927 read_unlock(&tasklist_lock); 875 read_unlock(&tasklist_lock);
928 } 876 }
929 877
930 if ((timer->it_sigev_notify & ~SIGEV_THREAD_ID) == SIGEV_NONE) {
931 if (timer->it.cpu.incr.sched == 0 &&
932 cpu_time_before(timer->it_clock,
933 timer->it.cpu.expires, now)) {
934 /*
935 * Do-nothing timer expired and has no reload,
936 * so it's as if it was never set.
937 */
938 timer->it.cpu.expires.sched = 0;
939 itp->it_value.tv_sec = itp->it_value.tv_nsec = 0;
940 return;
941 }
942 /*
943 * Account for any expirations and reloads that should
944 * have happened.
945 */
946 bump_cpu_timer(timer, now);
947 }
948
949 if (unlikely(clear_dead)) { 878 if (unlikely(clear_dead)) {
950 /* 879 /*
951 * We've noticed that the thread is dead, but 880 * We've noticed that the thread is dead, but
@@ -1066,16 +995,9 @@ static void stop_process_timers(struct signal_struct *sig)
1066 struct thread_group_cputimer *cputimer = &sig->cputimer; 995 struct thread_group_cputimer *cputimer = &sig->cputimer;
1067 unsigned long flags; 996 unsigned long flags;
1068 997
1069 if (!cputimer->running)
1070 return;
1071
1072 spin_lock_irqsave(&cputimer->lock, flags); 998 spin_lock_irqsave(&cputimer->lock, flags);
1073 cputimer->running = 0; 999 cputimer->running = 0;
1074 spin_unlock_irqrestore(&cputimer->lock, flags); 1000 spin_unlock_irqrestore(&cputimer->lock, flags);
1075
1076 sig->cputime_expires.prof_exp = cputime_zero;
1077 sig->cputime_expires.virt_exp = cputime_zero;
1078 sig->cputime_expires.sched_exp = 0;
1079} 1001}
1080 1002
1081static u32 onecputick; 1003static u32 onecputick;
@@ -1112,6 +1034,23 @@ static void check_cpu_itimer(struct task_struct *tsk, struct cpu_itimer *it,
1112 } 1034 }
1113} 1035}
1114 1036
1037/**
1038 * task_cputime_zero - Check a task_cputime struct for all zero fields.
1039 *
1040 * @cputime: The struct to compare.
1041 *
1042 * Checks @cputime to see if all fields are zero. Returns true if all fields
1043 * are zero, false if any field is nonzero.
1044 */
1045static inline int task_cputime_zero(const struct task_cputime *cputime)
1046{
1047 if (cputime_eq(cputime->utime, cputime_zero) &&
1048 cputime_eq(cputime->stime, cputime_zero) &&
1049 cputime->sum_exec_runtime == 0)
1050 return 1;
1051 return 0;
1052}
1053
1115/* 1054/*
1116 * Check for any per-thread CPU timers that have fired and move them 1055 * Check for any per-thread CPU timers that have fired and move them
1117 * off the tsk->*_timers list onto the firing list. Per-thread timers 1056 * off the tsk->*_timers list onto the firing list. Per-thread timers
@@ -1129,19 +1068,6 @@ static void check_process_timers(struct task_struct *tsk,
1129 unsigned long soft; 1068 unsigned long soft;
1130 1069
1131 /* 1070 /*
1132 * Don't sample the current process CPU clocks if there are no timers.
1133 */
1134 if (list_empty(&timers[CPUCLOCK_PROF]) &&
1135 cputime_eq(sig->it[CPUCLOCK_PROF].expires, cputime_zero) &&
1136 sig->rlim[RLIMIT_CPU].rlim_cur == RLIM_INFINITY &&
1137 list_empty(&timers[CPUCLOCK_VIRT]) &&
1138 cputime_eq(sig->it[CPUCLOCK_VIRT].expires, cputime_zero) &&
1139 list_empty(&timers[CPUCLOCK_SCHED])) {
1140 stop_process_timers(sig);
1141 return;
1142 }
1143
1144 /*
1145 * Collect the current process totals. 1071 * Collect the current process totals.
1146 */ 1072 */
1147 thread_group_cputimer(tsk, &cputime); 1073 thread_group_cputimer(tsk, &cputime);
@@ -1230,18 +1156,11 @@ static void check_process_timers(struct task_struct *tsk,
1230 } 1156 }
1231 } 1157 }
1232 1158
1233 if (!cputime_eq(prof_expires, cputime_zero) && 1159 sig->cputime_expires.prof_exp = prof_expires;
1234 (cputime_eq(sig->cputime_expires.prof_exp, cputime_zero) || 1160 sig->cputime_expires.virt_exp = virt_expires;
1235 cputime_gt(sig->cputime_expires.prof_exp, prof_expires))) 1161 sig->cputime_expires.sched_exp = sched_expires;
1236 sig->cputime_expires.prof_exp = prof_expires; 1162 if (task_cputime_zero(&sig->cputime_expires))
1237 if (!cputime_eq(virt_expires, cputime_zero) && 1163 stop_process_timers(sig);
1238 (cputime_eq(sig->cputime_expires.virt_exp, cputime_zero) ||
1239 cputime_gt(sig->cputime_expires.virt_exp, virt_expires)))
1240 sig->cputime_expires.virt_exp = virt_expires;
1241 if (sched_expires != 0 &&
1242 (sig->cputime_expires.sched_exp == 0 ||
1243 sig->cputime_expires.sched_exp > sched_expires))
1244 sig->cputime_expires.sched_exp = sched_expires;
1245} 1164}
1246 1165
1247/* 1166/*
@@ -1270,9 +1189,10 @@ void posix_cpu_timer_schedule(struct k_itimer *timer)
1270 goto out; 1189 goto out;
1271 } 1190 }
1272 read_lock(&tasklist_lock); /* arm_timer needs it. */ 1191 read_lock(&tasklist_lock); /* arm_timer needs it. */
1192 spin_lock(&p->sighand->siglock);
1273 } else { 1193 } else {
1274 read_lock(&tasklist_lock); 1194 read_lock(&tasklist_lock);
1275 if (unlikely(p->signal == NULL)) { 1195 if (unlikely(p->sighand == NULL)) {
1276 /* 1196 /*
1277 * The process has been reaped. 1197 * The process has been reaped.
1278 * We can't even collect a sample any more. 1198 * We can't even collect a sample any more.
@@ -1290,6 +1210,7 @@ void posix_cpu_timer_schedule(struct k_itimer *timer)
1290 clear_dead_task(timer, now); 1210 clear_dead_task(timer, now);
1291 goto out_unlock; 1211 goto out_unlock;
1292 } 1212 }
1213 spin_lock(&p->sighand->siglock);
1293 cpu_timer_sample_group(timer->it_clock, p, &now); 1214 cpu_timer_sample_group(timer->it_clock, p, &now);
1294 bump_cpu_timer(timer, now); 1215 bump_cpu_timer(timer, now);
1295 /* Leave the tasklist_lock locked for the call below. */ 1216 /* Leave the tasklist_lock locked for the call below. */
@@ -1298,7 +1219,9 @@ void posix_cpu_timer_schedule(struct k_itimer *timer)
1298 /* 1219 /*
1299 * Now re-arm for the new expiry time. 1220 * Now re-arm for the new expiry time.
1300 */ 1221 */
1301 arm_timer(timer, now); 1222 BUG_ON(!irqs_disabled());
1223 arm_timer(timer);
1224 spin_unlock(&p->sighand->siglock);
1302 1225
1303out_unlock: 1226out_unlock:
1304 read_unlock(&tasklist_lock); 1227 read_unlock(&tasklist_lock);
@@ -1310,23 +1233,6 @@ out:
1310} 1233}
1311 1234
1312/** 1235/**
1313 * task_cputime_zero - Check a task_cputime struct for all zero fields.
1314 *
1315 * @cputime: The struct to compare.
1316 *
1317 * Checks @cputime to see if all fields are zero. Returns true if all fields
1318 * are zero, false if any field is nonzero.
1319 */
1320static inline int task_cputime_zero(const struct task_cputime *cputime)
1321{
1322 if (cputime_eq(cputime->utime, cputime_zero) &&
1323 cputime_eq(cputime->stime, cputime_zero) &&
1324 cputime->sum_exec_runtime == 0)
1325 return 1;
1326 return 0;
1327}
1328
1329/**
1330 * task_cputime_expired - Compare two task_cputime entities. 1236 * task_cputime_expired - Compare two task_cputime entities.
1331 * 1237 *
1332 * @sample: The task_cputime structure to be checked for expiration. 1238 * @sample: The task_cputime structure to be checked for expiration.
@@ -1366,10 +1272,6 @@ static inline int fastpath_timer_check(struct task_struct *tsk)
1366{ 1272{
1367 struct signal_struct *sig; 1273 struct signal_struct *sig;
1368 1274
1369 /* tsk == current, ensure it is safe to use ->signal/sighand */
1370 if (unlikely(tsk->exit_state))
1371 return 0;
1372
1373 if (!task_cputime_zero(&tsk->cputime_expires)) { 1275 if (!task_cputime_zero(&tsk->cputime_expires)) {
1374 struct task_cputime task_sample = { 1276 struct task_cputime task_sample = {
1375 .utime = tsk->utime, 1277 .utime = tsk->utime,
@@ -1382,15 +1284,18 @@ static inline int fastpath_timer_check(struct task_struct *tsk)
1382 } 1284 }
1383 1285
1384 sig = tsk->signal; 1286 sig = tsk->signal;
1385 if (!task_cputime_zero(&sig->cputime_expires)) { 1287 if (sig->cputimer.running) {
1386 struct task_cputime group_sample; 1288 struct task_cputime group_sample;
1387 1289
1388 thread_group_cputimer(tsk, &group_sample); 1290 spin_lock(&sig->cputimer.lock);
1291 group_sample = sig->cputimer.cputime;
1292 spin_unlock(&sig->cputimer.lock);
1293
1389 if (task_cputime_expired(&group_sample, &sig->cputime_expires)) 1294 if (task_cputime_expired(&group_sample, &sig->cputime_expires))
1390 return 1; 1295 return 1;
1391 } 1296 }
1392 1297
1393 return sig->rlim[RLIMIT_CPU].rlim_cur != RLIM_INFINITY; 1298 return 0;
1394} 1299}
1395 1300
1396/* 1301/*
@@ -1402,6 +1307,7 @@ void run_posix_cpu_timers(struct task_struct *tsk)
1402{ 1307{
1403 LIST_HEAD(firing); 1308 LIST_HEAD(firing);
1404 struct k_itimer *timer, *next; 1309 struct k_itimer *timer, *next;
1310 unsigned long flags;
1405 1311
1406 BUG_ON(!irqs_disabled()); 1312 BUG_ON(!irqs_disabled());
1407 1313
@@ -1412,14 +1318,20 @@ void run_posix_cpu_timers(struct task_struct *tsk)
1412 if (!fastpath_timer_check(tsk)) 1318 if (!fastpath_timer_check(tsk))
1413 return; 1319 return;
1414 1320
1415 spin_lock(&tsk->sighand->siglock); 1321 if (!lock_task_sighand(tsk, &flags))
1322 return;
1416 /* 1323 /*
1417 * Here we take off tsk->signal->cpu_timers[N] and 1324 * Here we take off tsk->signal->cpu_timers[N] and
1418 * tsk->cpu_timers[N] all the timers that are firing, and 1325 * tsk->cpu_timers[N] all the timers that are firing, and
1419 * put them on the firing list. 1326 * put them on the firing list.
1420 */ 1327 */
1421 check_thread_timers(tsk, &firing); 1328 check_thread_timers(tsk, &firing);
1422 check_process_timers(tsk, &firing); 1329 /*
1330 * If there are any active process wide timers (POSIX 1.b, itimers,
1331 * RLIMIT_CPU) cputimer must be running.
1332 */
1333 if (tsk->signal->cputimer.running)
1334 check_process_timers(tsk, &firing);
1423 1335
1424 /* 1336 /*
1425 * We must release these locks before taking any timer's lock. 1337 * We must release these locks before taking any timer's lock.
@@ -1429,7 +1341,7 @@ void run_posix_cpu_timers(struct task_struct *tsk)
1429 * that gets the timer lock before we do will give it up and 1341 * that gets the timer lock before we do will give it up and
1430 * spin until we've taken care of that timer below. 1342 * spin until we've taken care of that timer below.
1431 */ 1343 */
1432 spin_unlock(&tsk->sighand->siglock); 1344 unlock_task_sighand(tsk, &flags);
1433 1345
1434 /* 1346 /*
1435 * Now that all the timers on our list have the firing flag, 1347 * Now that all the timers on our list have the firing flag,
@@ -1456,21 +1368,23 @@ void run_posix_cpu_timers(struct task_struct *tsk)
1456} 1368}
1457 1369
1458/* 1370/*
1459 * Set one of the process-wide special case CPU timers. 1371 * Set one of the process-wide special case CPU timers or RLIMIT_CPU.
1460 * The tsk->sighand->siglock must be held by the caller. 1372 * The tsk->sighand->siglock must be held by the caller.
1461 * The *newval argument is relative and we update it to be absolute, *oldval
1462 * is absolute and we update it to be relative.
1463 */ 1373 */
1464void set_process_cpu_timer(struct task_struct *tsk, unsigned int clock_idx, 1374void set_process_cpu_timer(struct task_struct *tsk, unsigned int clock_idx,
1465 cputime_t *newval, cputime_t *oldval) 1375 cputime_t *newval, cputime_t *oldval)
1466{ 1376{
1467 union cpu_time_count now; 1377 union cpu_time_count now;
1468 struct list_head *head;
1469 1378
1470 BUG_ON(clock_idx == CPUCLOCK_SCHED); 1379 BUG_ON(clock_idx == CPUCLOCK_SCHED);
1471 cpu_timer_sample_group(clock_idx, tsk, &now); 1380 cpu_timer_sample_group(clock_idx, tsk, &now);
1472 1381
1473 if (oldval) { 1382 if (oldval) {
1383 /*
1384 * We are setting itimer. The *oldval is absolute and we update
1385 * it to be relative, *newval argument is relative and we update
1386 * it to be absolute.
1387 */
1474 if (!cputime_eq(*oldval, cputime_zero)) { 1388 if (!cputime_eq(*oldval, cputime_zero)) {
1475 if (cputime_le(*oldval, now.cpu)) { 1389 if (cputime_le(*oldval, now.cpu)) {
1476 /* Just about to fire. */ 1390 /* Just about to fire. */
@@ -1483,33 +1397,21 @@ void set_process_cpu_timer(struct task_struct *tsk, unsigned int clock_idx,
1483 if (cputime_eq(*newval, cputime_zero)) 1397 if (cputime_eq(*newval, cputime_zero))
1484 return; 1398 return;
1485 *newval = cputime_add(*newval, now.cpu); 1399 *newval = cputime_add(*newval, now.cpu);
1486
1487 /*
1488 * If the RLIMIT_CPU timer will expire before the
1489 * ITIMER_PROF timer, we have nothing else to do.
1490 */
1491 if (tsk->signal->rlim[RLIMIT_CPU].rlim_cur
1492 < cputime_to_secs(*newval))
1493 return;
1494 } 1400 }
1495 1401
1496 /* 1402 /*
1497 * Check whether there are any process timers already set to fire 1403 * Update expiration cache if we are the earliest timer, or eventually
1498 * before this one. If so, we don't have anything more to do. 1404 * RLIMIT_CPU limit is earlier than prof_exp cpu timer expire.
1499 */ 1405 */
1500 head = &tsk->signal->cpu_timers[clock_idx]; 1406 switch (clock_idx) {
1501 if (list_empty(head) || 1407 case CPUCLOCK_PROF:
1502 cputime_ge(list_first_entry(head, 1408 if (expires_gt(tsk->signal->cputime_expires.prof_exp, *newval))
1503 struct cpu_timer_list, entry)->expires.cpu,
1504 *newval)) {
1505 switch (clock_idx) {
1506 case CPUCLOCK_PROF:
1507 tsk->signal->cputime_expires.prof_exp = *newval; 1409 tsk->signal->cputime_expires.prof_exp = *newval;
1508 break; 1410 break;
1509 case CPUCLOCK_VIRT: 1411 case CPUCLOCK_VIRT:
1412 if (expires_gt(tsk->signal->cputime_expires.virt_exp, *newval))
1510 tsk->signal->cputime_expires.virt_exp = *newval; 1413 tsk->signal->cputime_expires.virt_exp = *newval;
1511 break; 1414 break;
1512 }
1513 } 1415 }
1514} 1416}
1515 1417