diff options
Diffstat (limited to 'kernel/posix-cpu-timers.c')
-rw-r--r-- | kernel/posix-cpu-timers.c | 471 |
1 files changed, 252 insertions, 219 deletions
diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c index c42a03aef36f..dba1c334c3e8 100644 --- a/kernel/posix-cpu-timers.c +++ b/kernel/posix-cpu-timers.c | |||
@@ -8,6 +8,99 @@ | |||
8 | #include <linux/math64.h> | 8 | #include <linux/math64.h> |
9 | #include <asm/uaccess.h> | 9 | #include <asm/uaccess.h> |
10 | 10 | ||
11 | #ifdef CONFIG_SMP | ||
12 | /* | ||
13 | * Allocate the thread_group_cputime structure appropriately for SMP kernels | ||
14 | * and fill in the current values of the fields. Called from copy_signal() | ||
15 | * via thread_group_cputime_clone_thread() when adding a second or subsequent | ||
16 | * thread to a thread group. Assumes interrupts are enabled when called. | ||
17 | */ | ||
18 | int thread_group_cputime_alloc_smp(struct task_struct *tsk) | ||
19 | { | ||
20 | struct signal_struct *sig = tsk->signal; | ||
21 | struct task_cputime *cputime; | ||
22 | |||
23 | /* | ||
24 | * If we have multiple threads and we don't already have a | ||
25 | * per-CPU task_cputime struct, allocate one and fill it in with | ||
26 | * the times accumulated so far. | ||
27 | */ | ||
28 | if (sig->cputime.totals) | ||
29 | return 0; | ||
30 | cputime = alloc_percpu(struct task_cputime); | ||
31 | if (cputime == NULL) | ||
32 | return -ENOMEM; | ||
33 | read_lock(&tasklist_lock); | ||
34 | spin_lock_irq(&tsk->sighand->siglock); | ||
35 | if (sig->cputime.totals) { | ||
36 | spin_unlock_irq(&tsk->sighand->siglock); | ||
37 | read_unlock(&tasklist_lock); | ||
38 | free_percpu(cputime); | ||
39 | return 0; | ||
40 | } | ||
41 | sig->cputime.totals = cputime; | ||
42 | cputime = per_cpu_ptr(sig->cputime.totals, get_cpu()); | ||
43 | cputime->utime = tsk->utime; | ||
44 | cputime->stime = tsk->stime; | ||
45 | cputime->sum_exec_runtime = tsk->se.sum_exec_runtime; | ||
46 | put_cpu_no_resched(); | ||
47 | spin_unlock_irq(&tsk->sighand->siglock); | ||
48 | read_unlock(&tasklist_lock); | ||
49 | return 0; | ||
50 | } | ||
51 | |||
52 | /** | ||
53 | * thread_group_cputime_smp - Sum the thread group time fields across all CPUs. | ||
54 | * | ||
55 | * @tsk: The task we use to identify the thread group. | ||
56 | * @times: task_cputime structure in which we return the summed fields. | ||
57 | * | ||
58 | * Walk the list of CPUs to sum the per-CPU time fields in the thread group | ||
59 | * time structure. | ||
60 | */ | ||
61 | void thread_group_cputime_smp( | ||
62 | struct task_struct *tsk, | ||
63 | struct task_cputime *times) | ||
64 | { | ||
65 | struct signal_struct *sig; | ||
66 | int i; | ||
67 | struct task_cputime *tot; | ||
68 | |||
69 | sig = tsk->signal; | ||
70 | if (unlikely(!sig) || !sig->cputime.totals) { | ||
71 | times->utime = tsk->utime; | ||
72 | times->stime = tsk->stime; | ||
73 | times->sum_exec_runtime = tsk->se.sum_exec_runtime; | ||
74 | return; | ||
75 | } | ||
76 | times->stime = times->utime = cputime_zero; | ||
77 | times->sum_exec_runtime = 0; | ||
78 | for_each_possible_cpu(i) { | ||
79 | tot = per_cpu_ptr(tsk->signal->cputime.totals, i); | ||
80 | times->utime = cputime_add(times->utime, tot->utime); | ||
81 | times->stime = cputime_add(times->stime, tot->stime); | ||
82 | times->sum_exec_runtime += tot->sum_exec_runtime; | ||
83 | } | ||
84 | } | ||
85 | |||
86 | #endif /* CONFIG_SMP */ | ||
87 | |||
88 | /* | ||
89 | * Called after updating RLIMIT_CPU to set timer expiration if necessary. | ||
90 | */ | ||
91 | void update_rlimit_cpu(unsigned long rlim_new) | ||
92 | { | ||
93 | cputime_t cputime; | ||
94 | |||
95 | cputime = secs_to_cputime(rlim_new); | ||
96 | if (cputime_eq(current->signal->it_prof_expires, cputime_zero) || | ||
97 | cputime_lt(current->signal->it_prof_expires, cputime)) { | ||
98 | spin_lock_irq(¤t->sighand->siglock); | ||
99 | set_process_cpu_timer(current, CPUCLOCK_PROF, &cputime, NULL); | ||
100 | spin_unlock_irq(¤t->sighand->siglock); | ||
101 | } | ||
102 | } | ||
103 | |||
11 | static int check_clock(const clockid_t which_clock) | 104 | static int check_clock(const clockid_t which_clock) |
12 | { | 105 | { |
13 | int error = 0; | 106 | int error = 0; |
@@ -158,10 +251,6 @@ static inline cputime_t virt_ticks(struct task_struct *p) | |||
158 | { | 251 | { |
159 | return p->utime; | 252 | return p->utime; |
160 | } | 253 | } |
161 | static inline unsigned long long sched_ns(struct task_struct *p) | ||
162 | { | ||
163 | return task_sched_runtime(p); | ||
164 | } | ||
165 | 254 | ||
166 | int posix_cpu_clock_getres(const clockid_t which_clock, struct timespec *tp) | 255 | int posix_cpu_clock_getres(const clockid_t which_clock, struct timespec *tp) |
167 | { | 256 | { |
@@ -211,7 +300,7 @@ static int cpu_clock_sample(const clockid_t which_clock, struct task_struct *p, | |||
211 | cpu->cpu = virt_ticks(p); | 300 | cpu->cpu = virt_ticks(p); |
212 | break; | 301 | break; |
213 | case CPUCLOCK_SCHED: | 302 | case CPUCLOCK_SCHED: |
214 | cpu->sched = sched_ns(p); | 303 | cpu->sched = task_sched_runtime(p); |
215 | break; | 304 | break; |
216 | } | 305 | } |
217 | return 0; | 306 | return 0; |
@@ -226,31 +315,20 @@ static int cpu_clock_sample_group_locked(unsigned int clock_idx, | |||
226 | struct task_struct *p, | 315 | struct task_struct *p, |
227 | union cpu_time_count *cpu) | 316 | union cpu_time_count *cpu) |
228 | { | 317 | { |
229 | struct task_struct *t = p; | 318 | struct task_cputime cputime; |
230 | switch (clock_idx) { | 319 | |
320 | thread_group_cputime(p, &cputime); | ||
321 | switch (clock_idx) { | ||
231 | default: | 322 | default: |
232 | return -EINVAL; | 323 | return -EINVAL; |
233 | case CPUCLOCK_PROF: | 324 | case CPUCLOCK_PROF: |
234 | cpu->cpu = cputime_add(p->signal->utime, p->signal->stime); | 325 | cpu->cpu = cputime_add(cputime.utime, cputime.stime); |
235 | do { | ||
236 | cpu->cpu = cputime_add(cpu->cpu, prof_ticks(t)); | ||
237 | t = next_thread(t); | ||
238 | } while (t != p); | ||
239 | break; | 326 | break; |
240 | case CPUCLOCK_VIRT: | 327 | case CPUCLOCK_VIRT: |
241 | cpu->cpu = p->signal->utime; | 328 | cpu->cpu = cputime.utime; |
242 | do { | ||
243 | cpu->cpu = cputime_add(cpu->cpu, virt_ticks(t)); | ||
244 | t = next_thread(t); | ||
245 | } while (t != p); | ||
246 | break; | 329 | break; |
247 | case CPUCLOCK_SCHED: | 330 | case CPUCLOCK_SCHED: |
248 | cpu->sched = p->signal->sum_sched_runtime; | 331 | cpu->sched = thread_group_sched_runtime(p); |
249 | /* Add in each other live thread. */ | ||
250 | while ((t = next_thread(t)) != p) { | ||
251 | cpu->sched += t->se.sum_exec_runtime; | ||
252 | } | ||
253 | cpu->sched += sched_ns(p); | ||
254 | break; | 332 | break; |
255 | } | 333 | } |
256 | return 0; | 334 | return 0; |
@@ -471,80 +549,11 @@ void posix_cpu_timers_exit(struct task_struct *tsk) | |||
471 | } | 549 | } |
472 | void posix_cpu_timers_exit_group(struct task_struct *tsk) | 550 | void posix_cpu_timers_exit_group(struct task_struct *tsk) |
473 | { | 551 | { |
474 | cleanup_timers(tsk->signal->cpu_timers, | 552 | struct task_cputime cputime; |
475 | cputime_add(tsk->utime, tsk->signal->utime), | ||
476 | cputime_add(tsk->stime, tsk->signal->stime), | ||
477 | tsk->se.sum_exec_runtime + tsk->signal->sum_sched_runtime); | ||
478 | } | ||
479 | |||
480 | |||
481 | /* | ||
482 | * Set the expiry times of all the threads in the process so one of them | ||
483 | * will go off before the process cumulative expiry total is reached. | ||
484 | */ | ||
485 | static void process_timer_rebalance(struct task_struct *p, | ||
486 | unsigned int clock_idx, | ||
487 | union cpu_time_count expires, | ||
488 | union cpu_time_count val) | ||
489 | { | ||
490 | cputime_t ticks, left; | ||
491 | unsigned long long ns, nsleft; | ||
492 | struct task_struct *t = p; | ||
493 | unsigned int nthreads = atomic_read(&p->signal->live); | ||
494 | |||
495 | if (!nthreads) | ||
496 | return; | ||
497 | 553 | ||
498 | switch (clock_idx) { | 554 | thread_group_cputime(tsk, &cputime); |
499 | default: | 555 | cleanup_timers(tsk->signal->cpu_timers, |
500 | BUG(); | 556 | cputime.utime, cputime.stime, cputime.sum_exec_runtime); |
501 | break; | ||
502 | case CPUCLOCK_PROF: | ||
503 | left = cputime_div_non_zero(cputime_sub(expires.cpu, val.cpu), | ||
504 | nthreads); | ||
505 | do { | ||
506 | if (likely(!(t->flags & PF_EXITING))) { | ||
507 | ticks = cputime_add(prof_ticks(t), left); | ||
508 | if (cputime_eq(t->it_prof_expires, | ||
509 | cputime_zero) || | ||
510 | cputime_gt(t->it_prof_expires, ticks)) { | ||
511 | t->it_prof_expires = ticks; | ||
512 | } | ||
513 | } | ||
514 | t = next_thread(t); | ||
515 | } while (t != p); | ||
516 | break; | ||
517 | case CPUCLOCK_VIRT: | ||
518 | left = cputime_div_non_zero(cputime_sub(expires.cpu, val.cpu), | ||
519 | nthreads); | ||
520 | do { | ||
521 | if (likely(!(t->flags & PF_EXITING))) { | ||
522 | ticks = cputime_add(virt_ticks(t), left); | ||
523 | if (cputime_eq(t->it_virt_expires, | ||
524 | cputime_zero) || | ||
525 | cputime_gt(t->it_virt_expires, ticks)) { | ||
526 | t->it_virt_expires = ticks; | ||
527 | } | ||
528 | } | ||
529 | t = next_thread(t); | ||
530 | } while (t != p); | ||
531 | break; | ||
532 | case CPUCLOCK_SCHED: | ||
533 | nsleft = expires.sched - val.sched; | ||
534 | do_div(nsleft, nthreads); | ||
535 | nsleft = max_t(unsigned long long, nsleft, 1); | ||
536 | do { | ||
537 | if (likely(!(t->flags & PF_EXITING))) { | ||
538 | ns = t->se.sum_exec_runtime + nsleft; | ||
539 | if (t->it_sched_expires == 0 || | ||
540 | t->it_sched_expires > ns) { | ||
541 | t->it_sched_expires = ns; | ||
542 | } | ||
543 | } | ||
544 | t = next_thread(t); | ||
545 | } while (t != p); | ||
546 | break; | ||
547 | } | ||
548 | } | 557 | } |
549 | 558 | ||
550 | static void clear_dead_task(struct k_itimer *timer, union cpu_time_count now) | 559 | static void clear_dead_task(struct k_itimer *timer, union cpu_time_count now) |
@@ -608,29 +617,32 @@ static void arm_timer(struct k_itimer *timer, union cpu_time_count now) | |||
608 | default: | 617 | default: |
609 | BUG(); | 618 | BUG(); |
610 | case CPUCLOCK_PROF: | 619 | case CPUCLOCK_PROF: |
611 | if (cputime_eq(p->it_prof_expires, | 620 | if (cputime_eq(p->cputime_expires.prof_exp, |
612 | cputime_zero) || | 621 | cputime_zero) || |
613 | cputime_gt(p->it_prof_expires, | 622 | cputime_gt(p->cputime_expires.prof_exp, |
614 | nt->expires.cpu)) | 623 | nt->expires.cpu)) |
615 | p->it_prof_expires = nt->expires.cpu; | 624 | p->cputime_expires.prof_exp = |
625 | nt->expires.cpu; | ||
616 | break; | 626 | break; |
617 | case CPUCLOCK_VIRT: | 627 | case CPUCLOCK_VIRT: |
618 | if (cputime_eq(p->it_virt_expires, | 628 | if (cputime_eq(p->cputime_expires.virt_exp, |
619 | cputime_zero) || | 629 | cputime_zero) || |
620 | cputime_gt(p->it_virt_expires, | 630 | cputime_gt(p->cputime_expires.virt_exp, |
621 | nt->expires.cpu)) | 631 | nt->expires.cpu)) |
622 | p->it_virt_expires = nt->expires.cpu; | 632 | p->cputime_expires.virt_exp = |
633 | nt->expires.cpu; | ||
623 | break; | 634 | break; |
624 | case CPUCLOCK_SCHED: | 635 | case CPUCLOCK_SCHED: |
625 | if (p->it_sched_expires == 0 || | 636 | if (p->cputime_expires.sched_exp == 0 || |
626 | p->it_sched_expires > nt->expires.sched) | 637 | p->cputime_expires.sched_exp > |
627 | p->it_sched_expires = nt->expires.sched; | 638 | nt->expires.sched) |
639 | p->cputime_expires.sched_exp = | ||
640 | nt->expires.sched; | ||
628 | break; | 641 | break; |
629 | } | 642 | } |
630 | } else { | 643 | } else { |
631 | /* | 644 | /* |
632 | * For a process timer, we must balance | 645 | * For a process timer, set the cached expiration time. |
633 | * all the live threads' expirations. | ||
634 | */ | 646 | */ |
635 | switch (CPUCLOCK_WHICH(timer->it_clock)) { | 647 | switch (CPUCLOCK_WHICH(timer->it_clock)) { |
636 | default: | 648 | default: |
@@ -641,7 +653,9 @@ static void arm_timer(struct k_itimer *timer, union cpu_time_count now) | |||
641 | cputime_lt(p->signal->it_virt_expires, | 653 | cputime_lt(p->signal->it_virt_expires, |
642 | timer->it.cpu.expires.cpu)) | 654 | timer->it.cpu.expires.cpu)) |
643 | break; | 655 | break; |
644 | goto rebalance; | 656 | p->signal->cputime_expires.virt_exp = |
657 | timer->it.cpu.expires.cpu; | ||
658 | break; | ||
645 | case CPUCLOCK_PROF: | 659 | case CPUCLOCK_PROF: |
646 | if (!cputime_eq(p->signal->it_prof_expires, | 660 | if (!cputime_eq(p->signal->it_prof_expires, |
647 | cputime_zero) && | 661 | cputime_zero) && |
@@ -652,13 +666,12 @@ static void arm_timer(struct k_itimer *timer, union cpu_time_count now) | |||
652 | if (i != RLIM_INFINITY && | 666 | if (i != RLIM_INFINITY && |
653 | i <= cputime_to_secs(timer->it.cpu.expires.cpu)) | 667 | i <= cputime_to_secs(timer->it.cpu.expires.cpu)) |
654 | break; | 668 | break; |
655 | goto rebalance; | 669 | p->signal->cputime_expires.prof_exp = |
670 | timer->it.cpu.expires.cpu; | ||
671 | break; | ||
656 | case CPUCLOCK_SCHED: | 672 | case CPUCLOCK_SCHED: |
657 | rebalance: | 673 | p->signal->cputime_expires.sched_exp = |
658 | process_timer_rebalance( | 674 | timer->it.cpu.expires.sched; |
659 | timer->it.cpu.task, | ||
660 | CPUCLOCK_WHICH(timer->it_clock), | ||
661 | timer->it.cpu.expires, now); | ||
662 | break; | 675 | break; |
663 | } | 676 | } |
664 | } | 677 | } |
@@ -969,13 +982,13 @@ static void check_thread_timers(struct task_struct *tsk, | |||
969 | struct signal_struct *const sig = tsk->signal; | 982 | struct signal_struct *const sig = tsk->signal; |
970 | 983 | ||
971 | maxfire = 20; | 984 | maxfire = 20; |
972 | tsk->it_prof_expires = cputime_zero; | 985 | tsk->cputime_expires.prof_exp = cputime_zero; |
973 | while (!list_empty(timers)) { | 986 | while (!list_empty(timers)) { |
974 | struct cpu_timer_list *t = list_first_entry(timers, | 987 | struct cpu_timer_list *t = list_first_entry(timers, |
975 | struct cpu_timer_list, | 988 | struct cpu_timer_list, |
976 | entry); | 989 | entry); |
977 | if (!--maxfire || cputime_lt(prof_ticks(tsk), t->expires.cpu)) { | 990 | if (!--maxfire || cputime_lt(prof_ticks(tsk), t->expires.cpu)) { |
978 | tsk->it_prof_expires = t->expires.cpu; | 991 | tsk->cputime_expires.prof_exp = t->expires.cpu; |
979 | break; | 992 | break; |
980 | } | 993 | } |
981 | t->firing = 1; | 994 | t->firing = 1; |
@@ -984,13 +997,13 @@ static void check_thread_timers(struct task_struct *tsk, | |||
984 | 997 | ||
985 | ++timers; | 998 | ++timers; |
986 | maxfire = 20; | 999 | maxfire = 20; |
987 | tsk->it_virt_expires = cputime_zero; | 1000 | tsk->cputime_expires.virt_exp = cputime_zero; |
988 | while (!list_empty(timers)) { | 1001 | while (!list_empty(timers)) { |
989 | struct cpu_timer_list *t = list_first_entry(timers, | 1002 | struct cpu_timer_list *t = list_first_entry(timers, |
990 | struct cpu_timer_list, | 1003 | struct cpu_timer_list, |
991 | entry); | 1004 | entry); |
992 | if (!--maxfire || cputime_lt(virt_ticks(tsk), t->expires.cpu)) { | 1005 | if (!--maxfire || cputime_lt(virt_ticks(tsk), t->expires.cpu)) { |
993 | tsk->it_virt_expires = t->expires.cpu; | 1006 | tsk->cputime_expires.virt_exp = t->expires.cpu; |
994 | break; | 1007 | break; |
995 | } | 1008 | } |
996 | t->firing = 1; | 1009 | t->firing = 1; |
@@ -999,13 +1012,13 @@ static void check_thread_timers(struct task_struct *tsk, | |||
999 | 1012 | ||
1000 | ++timers; | 1013 | ++timers; |
1001 | maxfire = 20; | 1014 | maxfire = 20; |
1002 | tsk->it_sched_expires = 0; | 1015 | tsk->cputime_expires.sched_exp = 0; |
1003 | while (!list_empty(timers)) { | 1016 | while (!list_empty(timers)) { |
1004 | struct cpu_timer_list *t = list_first_entry(timers, | 1017 | struct cpu_timer_list *t = list_first_entry(timers, |
1005 | struct cpu_timer_list, | 1018 | struct cpu_timer_list, |
1006 | entry); | 1019 | entry); |
1007 | if (!--maxfire || tsk->se.sum_exec_runtime < t->expires.sched) { | 1020 | if (!--maxfire || tsk->se.sum_exec_runtime < t->expires.sched) { |
1008 | tsk->it_sched_expires = t->expires.sched; | 1021 | tsk->cputime_expires.sched_exp = t->expires.sched; |
1009 | break; | 1022 | break; |
1010 | } | 1023 | } |
1011 | t->firing = 1; | 1024 | t->firing = 1; |
@@ -1055,10 +1068,10 @@ static void check_process_timers(struct task_struct *tsk, | |||
1055 | { | 1068 | { |
1056 | int maxfire; | 1069 | int maxfire; |
1057 | struct signal_struct *const sig = tsk->signal; | 1070 | struct signal_struct *const sig = tsk->signal; |
1058 | cputime_t utime, stime, ptime, virt_expires, prof_expires; | 1071 | cputime_t utime, ptime, virt_expires, prof_expires; |
1059 | unsigned long long sum_sched_runtime, sched_expires; | 1072 | unsigned long long sum_sched_runtime, sched_expires; |
1060 | struct task_struct *t; | ||
1061 | struct list_head *timers = sig->cpu_timers; | 1073 | struct list_head *timers = sig->cpu_timers; |
1074 | struct task_cputime cputime; | ||
1062 | 1075 | ||
1063 | /* | 1076 | /* |
1064 | * Don't sample the current process CPU clocks if there are no timers. | 1077 | * Don't sample the current process CPU clocks if there are no timers. |
@@ -1074,18 +1087,10 @@ static void check_process_timers(struct task_struct *tsk, | |||
1074 | /* | 1087 | /* |
1075 | * Collect the current process totals. | 1088 | * Collect the current process totals. |
1076 | */ | 1089 | */ |
1077 | utime = sig->utime; | 1090 | thread_group_cputime(tsk, &cputime); |
1078 | stime = sig->stime; | 1091 | utime = cputime.utime; |
1079 | sum_sched_runtime = sig->sum_sched_runtime; | 1092 | ptime = cputime_add(utime, cputime.stime); |
1080 | t = tsk; | 1093 | sum_sched_runtime = cputime.sum_exec_runtime; |
1081 | do { | ||
1082 | utime = cputime_add(utime, t->utime); | ||
1083 | stime = cputime_add(stime, t->stime); | ||
1084 | sum_sched_runtime += t->se.sum_exec_runtime; | ||
1085 | t = next_thread(t); | ||
1086 | } while (t != tsk); | ||
1087 | ptime = cputime_add(utime, stime); | ||
1088 | |||
1089 | maxfire = 20; | 1094 | maxfire = 20; |
1090 | prof_expires = cputime_zero; | 1095 | prof_expires = cputime_zero; |
1091 | while (!list_empty(timers)) { | 1096 | while (!list_empty(timers)) { |
@@ -1193,60 +1198,18 @@ static void check_process_timers(struct task_struct *tsk, | |||
1193 | } | 1198 | } |
1194 | } | 1199 | } |
1195 | 1200 | ||
1196 | if (!cputime_eq(prof_expires, cputime_zero) || | 1201 | if (!cputime_eq(prof_expires, cputime_zero) && |
1197 | !cputime_eq(virt_expires, cputime_zero) || | 1202 | (cputime_eq(sig->cputime_expires.prof_exp, cputime_zero) || |
1198 | sched_expires != 0) { | 1203 | cputime_gt(sig->cputime_expires.prof_exp, prof_expires))) |
1199 | /* | 1204 | sig->cputime_expires.prof_exp = prof_expires; |
1200 | * Rebalance the threads' expiry times for the remaining | 1205 | if (!cputime_eq(virt_expires, cputime_zero) && |
1201 | * process CPU timers. | 1206 | (cputime_eq(sig->cputime_expires.virt_exp, cputime_zero) || |
1202 | */ | 1207 | cputime_gt(sig->cputime_expires.virt_exp, virt_expires))) |
1203 | 1208 | sig->cputime_expires.virt_exp = virt_expires; | |
1204 | cputime_t prof_left, virt_left, ticks; | 1209 | if (sched_expires != 0 && |
1205 | unsigned long long sched_left, sched; | 1210 | (sig->cputime_expires.sched_exp == 0 || |
1206 | const unsigned int nthreads = atomic_read(&sig->live); | 1211 | sig->cputime_expires.sched_exp > sched_expires)) |
1207 | 1212 | sig->cputime_expires.sched_exp = sched_expires; | |
1208 | if (!nthreads) | ||
1209 | return; | ||
1210 | |||
1211 | prof_left = cputime_sub(prof_expires, utime); | ||
1212 | prof_left = cputime_sub(prof_left, stime); | ||
1213 | prof_left = cputime_div_non_zero(prof_left, nthreads); | ||
1214 | virt_left = cputime_sub(virt_expires, utime); | ||
1215 | virt_left = cputime_div_non_zero(virt_left, nthreads); | ||
1216 | if (sched_expires) { | ||
1217 | sched_left = sched_expires - sum_sched_runtime; | ||
1218 | do_div(sched_left, nthreads); | ||
1219 | sched_left = max_t(unsigned long long, sched_left, 1); | ||
1220 | } else { | ||
1221 | sched_left = 0; | ||
1222 | } | ||
1223 | t = tsk; | ||
1224 | do { | ||
1225 | if (unlikely(t->flags & PF_EXITING)) | ||
1226 | continue; | ||
1227 | |||
1228 | ticks = cputime_add(cputime_add(t->utime, t->stime), | ||
1229 | prof_left); | ||
1230 | if (!cputime_eq(prof_expires, cputime_zero) && | ||
1231 | (cputime_eq(t->it_prof_expires, cputime_zero) || | ||
1232 | cputime_gt(t->it_prof_expires, ticks))) { | ||
1233 | t->it_prof_expires = ticks; | ||
1234 | } | ||
1235 | |||
1236 | ticks = cputime_add(t->utime, virt_left); | ||
1237 | if (!cputime_eq(virt_expires, cputime_zero) && | ||
1238 | (cputime_eq(t->it_virt_expires, cputime_zero) || | ||
1239 | cputime_gt(t->it_virt_expires, ticks))) { | ||
1240 | t->it_virt_expires = ticks; | ||
1241 | } | ||
1242 | |||
1243 | sched = t->se.sum_exec_runtime + sched_left; | ||
1244 | if (sched_expires && (t->it_sched_expires == 0 || | ||
1245 | t->it_sched_expires > sched)) { | ||
1246 | t->it_sched_expires = sched; | ||
1247 | } | ||
1248 | } while ((t = next_thread(t)) != tsk); | ||
1249 | } | ||
1250 | } | 1213 | } |
1251 | 1214 | ||
1252 | /* | 1215 | /* |
@@ -1314,6 +1277,78 @@ out: | |||
1314 | ++timer->it_requeue_pending; | 1277 | ++timer->it_requeue_pending; |
1315 | } | 1278 | } |
1316 | 1279 | ||
1280 | /** | ||
1281 | * task_cputime_zero - Check a task_cputime struct for all zero fields. | ||
1282 | * | ||
1283 | * @cputime: The struct to compare. | ||
1284 | * | ||
1285 | * Checks @cputime to see if all fields are zero. Returns true if all fields | ||
1286 | * are zero, false if any field is nonzero. | ||
1287 | */ | ||
1288 | static inline int task_cputime_zero(const struct task_cputime *cputime) | ||
1289 | { | ||
1290 | if (cputime_eq(cputime->utime, cputime_zero) && | ||
1291 | cputime_eq(cputime->stime, cputime_zero) && | ||
1292 | cputime->sum_exec_runtime == 0) | ||
1293 | return 1; | ||
1294 | return 0; | ||
1295 | } | ||
1296 | |||
1297 | /** | ||
1298 | * task_cputime_expired - Compare two task_cputime entities. | ||
1299 | * | ||
1300 | * @sample: The task_cputime structure to be checked for expiration. | ||
1301 | * @expires: Expiration times, against which @sample will be checked. | ||
1302 | * | ||
1303 | * Checks @sample against @expires to see if any field of @sample has expired. | ||
1304 | * Returns true if any field of the former is greater than the corresponding | ||
1305 | * field of the latter if the latter field is set. Otherwise returns false. | ||
1306 | */ | ||
1307 | static inline int task_cputime_expired(const struct task_cputime *sample, | ||
1308 | const struct task_cputime *expires) | ||
1309 | { | ||
1310 | if (!cputime_eq(expires->utime, cputime_zero) && | ||
1311 | cputime_ge(sample->utime, expires->utime)) | ||
1312 | return 1; | ||
1313 | if (!cputime_eq(expires->stime, cputime_zero) && | ||
1314 | cputime_ge(cputime_add(sample->utime, sample->stime), | ||
1315 | expires->stime)) | ||
1316 | return 1; | ||
1317 | if (expires->sum_exec_runtime != 0 && | ||
1318 | sample->sum_exec_runtime >= expires->sum_exec_runtime) | ||
1319 | return 1; | ||
1320 | return 0; | ||
1321 | } | ||
1322 | |||
1323 | /** | ||
1324 | * fastpath_timer_check - POSIX CPU timers fast path. | ||
1325 | * | ||
1326 | * @tsk: The task (thread) being checked. | ||
1327 | * @sig: The signal pointer for that task. | ||
1328 | * | ||
1329 | * If there are no timers set return false. Otherwise snapshot the task and | ||
1330 | * thread group timers, then compare them with the corresponding expiration | ||
1331 | # times. Returns true if a timer has expired, else returns false. | ||
1332 | */ | ||
1333 | static inline int fastpath_timer_check(struct task_struct *tsk, | ||
1334 | struct signal_struct *sig) | ||
1335 | { | ||
1336 | struct task_cputime task_sample = { | ||
1337 | .utime = tsk->utime, | ||
1338 | .stime = tsk->stime, | ||
1339 | .sum_exec_runtime = tsk->se.sum_exec_runtime | ||
1340 | }; | ||
1341 | struct task_cputime group_sample; | ||
1342 | |||
1343 | if (task_cputime_zero(&tsk->cputime_expires) && | ||
1344 | task_cputime_zero(&sig->cputime_expires)) | ||
1345 | return 0; | ||
1346 | if (task_cputime_expired(&task_sample, &tsk->cputime_expires)) | ||
1347 | return 1; | ||
1348 | thread_group_cputime(tsk, &group_sample); | ||
1349 | return task_cputime_expired(&group_sample, &sig->cputime_expires); | ||
1350 | } | ||
1351 | |||
1317 | /* | 1352 | /* |
1318 | * This is called from the timer interrupt handler. The irq handler has | 1353 | * This is called from the timer interrupt handler. The irq handler has |
1319 | * already updated our counts. We need to check if any timers fire now. | 1354 | * already updated our counts. We need to check if any timers fire now. |
@@ -1323,30 +1358,29 @@ void run_posix_cpu_timers(struct task_struct *tsk) | |||
1323 | { | 1358 | { |
1324 | LIST_HEAD(firing); | 1359 | LIST_HEAD(firing); |
1325 | struct k_itimer *timer, *next; | 1360 | struct k_itimer *timer, *next; |
1361 | struct signal_struct *sig; | ||
1362 | struct sighand_struct *sighand; | ||
1363 | unsigned long flags; | ||
1326 | 1364 | ||
1327 | BUG_ON(!irqs_disabled()); | 1365 | BUG_ON(!irqs_disabled()); |
1328 | 1366 | ||
1329 | #define UNEXPIRED(clock) \ | 1367 | /* Pick up tsk->signal and make sure it's valid. */ |
1330 | (cputime_eq(tsk->it_##clock##_expires, cputime_zero) || \ | 1368 | sig = tsk->signal; |
1331 | cputime_lt(clock##_ticks(tsk), tsk->it_##clock##_expires)) | ||
1332 | |||
1333 | if (UNEXPIRED(prof) && UNEXPIRED(virt) && | ||
1334 | (tsk->it_sched_expires == 0 || | ||
1335 | tsk->se.sum_exec_runtime < tsk->it_sched_expires)) | ||
1336 | return; | ||
1337 | |||
1338 | #undef UNEXPIRED | ||
1339 | |||
1340 | /* | 1369 | /* |
1341 | * Double-check with locks held. | 1370 | * The fast path checks that there are no expired thread or thread |
1371 | * group timers. If that's so, just return. Also check that | ||
1372 | * tsk->signal is non-NULL; this probably can't happen but cover the | ||
1373 | * possibility anyway. | ||
1342 | */ | 1374 | */ |
1343 | read_lock(&tasklist_lock); | 1375 | if (unlikely(!sig) || !fastpath_timer_check(tsk, sig)) { |
1344 | if (likely(tsk->signal != NULL)) { | 1376 | return; |
1345 | spin_lock(&tsk->sighand->siglock); | 1377 | } |
1346 | 1378 | sighand = lock_task_sighand(tsk, &flags); | |
1379 | if (likely(sighand)) { | ||
1347 | /* | 1380 | /* |
1348 | * Here we take off tsk->cpu_timers[N] and tsk->signal->cpu_timers[N] | 1381 | * Here we take off tsk->signal->cpu_timers[N] and |
1349 | * all the timers that are firing, and put them on the firing list. | 1382 | * tsk->cpu_timers[N] all the timers that are firing, and |
1383 | * put them on the firing list. | ||
1350 | */ | 1384 | */ |
1351 | check_thread_timers(tsk, &firing); | 1385 | check_thread_timers(tsk, &firing); |
1352 | check_process_timers(tsk, &firing); | 1386 | check_process_timers(tsk, &firing); |
@@ -1359,9 +1393,8 @@ void run_posix_cpu_timers(struct task_struct *tsk) | |||
1359 | * that gets the timer lock before we do will give it up and | 1393 | * that gets the timer lock before we do will give it up and |
1360 | * spin until we've taken care of that timer below. | 1394 | * spin until we've taken care of that timer below. |
1361 | */ | 1395 | */ |
1362 | spin_unlock(&tsk->sighand->siglock); | ||
1363 | } | 1396 | } |
1364 | read_unlock(&tasklist_lock); | 1397 | unlock_task_sighand(tsk, &flags); |
1365 | 1398 | ||
1366 | /* | 1399 | /* |
1367 | * Now that all the timers on our list have the firing flag, | 1400 | * Now that all the timers on our list have the firing flag, |
@@ -1389,10 +1422,9 @@ void run_posix_cpu_timers(struct task_struct *tsk) | |||
1389 | 1422 | ||
1390 | /* | 1423 | /* |
1391 | * Set one of the process-wide special case CPU timers. | 1424 | * Set one of the process-wide special case CPU timers. |
1392 | * The tasklist_lock and tsk->sighand->siglock must be held by the caller. | 1425 | * The tsk->sighand->siglock must be held by the caller. |
1393 | * The oldval argument is null for the RLIMIT_CPU timer, where *newval is | 1426 | * The *newval argument is relative and we update it to be absolute, *oldval |
1394 | * absolute; non-null for ITIMER_*, where *newval is relative and we update | 1427 | * is absolute and we update it to be relative. |
1395 | * it to be absolute, *oldval is absolute and we update it to be relative. | ||
1396 | */ | 1428 | */ |
1397 | void set_process_cpu_timer(struct task_struct *tsk, unsigned int clock_idx, | 1429 | void set_process_cpu_timer(struct task_struct *tsk, unsigned int clock_idx, |
1398 | cputime_t *newval, cputime_t *oldval) | 1430 | cputime_t *newval, cputime_t *oldval) |
@@ -1435,13 +1467,14 @@ void set_process_cpu_timer(struct task_struct *tsk, unsigned int clock_idx, | |||
1435 | cputime_ge(list_first_entry(head, | 1467 | cputime_ge(list_first_entry(head, |
1436 | struct cpu_timer_list, entry)->expires.cpu, | 1468 | struct cpu_timer_list, entry)->expires.cpu, |
1437 | *newval)) { | 1469 | *newval)) { |
1438 | /* | 1470 | switch (clock_idx) { |
1439 | * Rejigger each thread's expiry time so that one will | 1471 | case CPUCLOCK_PROF: |
1440 | * notice before we hit the process-cumulative expiry time. | 1472 | tsk->signal->cputime_expires.prof_exp = *newval; |
1441 | */ | 1473 | break; |
1442 | union cpu_time_count expires = { .sched = 0 }; | 1474 | case CPUCLOCK_VIRT: |
1443 | expires.cpu = *newval; | 1475 | tsk->signal->cputime_expires.virt_exp = *newval; |
1444 | process_timer_rebalance(tsk, clock_idx, expires, now); | 1476 | break; |
1477 | } | ||
1445 | } | 1478 | } |
1446 | } | 1479 | } |
1447 | 1480 | ||