diff options
Diffstat (limited to 'kernel/posix-cpu-timers.c')
| -rw-r--r-- | kernel/posix-cpu-timers.c | 132 |
1 files changed, 52 insertions, 80 deletions
diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c index e7cb76dc18f5..125cb67daa21 100644 --- a/kernel/posix-cpu-timers.c +++ b/kernel/posix-cpu-timers.c | |||
| @@ -78,7 +78,7 @@ static inline int cpu_time_before(const clockid_t which_clock, | |||
| 78 | if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED) { | 78 | if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED) { |
| 79 | return now.sched < then.sched; | 79 | return now.sched < then.sched; |
| 80 | } else { | 80 | } else { |
| 81 | return cputime_lt(now.cpu, then.cpu); | 81 | return now.cpu < then.cpu; |
| 82 | } | 82 | } |
| 83 | } | 83 | } |
| 84 | static inline void cpu_time_add(const clockid_t which_clock, | 84 | static inline void cpu_time_add(const clockid_t which_clock, |
| @@ -88,7 +88,7 @@ static inline void cpu_time_add(const clockid_t which_clock, | |||
| 88 | if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED) { | 88 | if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED) { |
| 89 | acc->sched += val.sched; | 89 | acc->sched += val.sched; |
| 90 | } else { | 90 | } else { |
| 91 | acc->cpu = cputime_add(acc->cpu, val.cpu); | 91 | acc->cpu += val.cpu; |
| 92 | } | 92 | } |
| 93 | } | 93 | } |
| 94 | static inline union cpu_time_count cpu_time_sub(const clockid_t which_clock, | 94 | static inline union cpu_time_count cpu_time_sub(const clockid_t which_clock, |
| @@ -98,25 +98,12 @@ static inline union cpu_time_count cpu_time_sub(const clockid_t which_clock, | |||
| 98 | if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED) { | 98 | if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED) { |
| 99 | a.sched -= b.sched; | 99 | a.sched -= b.sched; |
| 100 | } else { | 100 | } else { |
| 101 | a.cpu = cputime_sub(a.cpu, b.cpu); | 101 | a.cpu -= b.cpu; |
| 102 | } | 102 | } |
| 103 | return a; | 103 | return a; |
| 104 | } | 104 | } |
| 105 | 105 | ||
| 106 | /* | 106 | /* |
| 107 | * Divide and limit the result to res >= 1 | ||
| 108 | * | ||
| 109 | * This is necessary to prevent signal delivery starvation, when the result of | ||
| 110 | * the division would be rounded down to 0. | ||
| 111 | */ | ||
| 112 | static inline cputime_t cputime_div_non_zero(cputime_t time, unsigned long div) | ||
| 113 | { | ||
| 114 | cputime_t res = cputime_div(time, div); | ||
| 115 | |||
| 116 | return max_t(cputime_t, res, 1); | ||
| 117 | } | ||
| 118 | |||
| 119 | /* | ||
| 120 | * Update expiry time from increment, and increase overrun count, | 107 | * Update expiry time from increment, and increase overrun count, |
| 121 | * given the current clock sample. | 108 | * given the current clock sample. |
| 122 | */ | 109 | */ |
| @@ -148,28 +135,26 @@ static void bump_cpu_timer(struct k_itimer *timer, | |||
| 148 | } else { | 135 | } else { |
| 149 | cputime_t delta, incr; | 136 | cputime_t delta, incr; |
| 150 | 137 | ||
| 151 | if (cputime_lt(now.cpu, timer->it.cpu.expires.cpu)) | 138 | if (now.cpu < timer->it.cpu.expires.cpu) |
| 152 | return; | 139 | return; |
| 153 | incr = timer->it.cpu.incr.cpu; | 140 | incr = timer->it.cpu.incr.cpu; |
| 154 | delta = cputime_sub(cputime_add(now.cpu, incr), | 141 | delta = now.cpu + incr - timer->it.cpu.expires.cpu; |
| 155 | timer->it.cpu.expires.cpu); | ||
| 156 | /* Don't use (incr*2 < delta), incr*2 might overflow. */ | 142 | /* Don't use (incr*2 < delta), incr*2 might overflow. */ |
| 157 | for (i = 0; cputime_lt(incr, cputime_sub(delta, incr)); i++) | 143 | for (i = 0; incr < delta - incr; i++) |
| 158 | incr = cputime_add(incr, incr); | 144 | incr += incr; |
| 159 | for (; i >= 0; incr = cputime_halve(incr), i--) { | 145 | for (; i >= 0; incr = incr >> 1, i--) { |
| 160 | if (cputime_lt(delta, incr)) | 146 | if (delta < incr) |
| 161 | continue; | 147 | continue; |
| 162 | timer->it.cpu.expires.cpu = | 148 | timer->it.cpu.expires.cpu += incr; |
| 163 | cputime_add(timer->it.cpu.expires.cpu, incr); | ||
| 164 | timer->it_overrun += 1 << i; | 149 | timer->it_overrun += 1 << i; |
| 165 | delta = cputime_sub(delta, incr); | 150 | delta -= incr; |
| 166 | } | 151 | } |
| 167 | } | 152 | } |
| 168 | } | 153 | } |
| 169 | 154 | ||
| 170 | static inline cputime_t prof_ticks(struct task_struct *p) | 155 | static inline cputime_t prof_ticks(struct task_struct *p) |
| 171 | { | 156 | { |
| 172 | return cputime_add(p->utime, p->stime); | 157 | return p->utime + p->stime; |
| 173 | } | 158 | } |
| 174 | static inline cputime_t virt_ticks(struct task_struct *p) | 159 | static inline cputime_t virt_ticks(struct task_struct *p) |
| 175 | { | 160 | { |
| @@ -248,8 +233,8 @@ void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times) | |||
| 248 | 233 | ||
| 249 | t = tsk; | 234 | t = tsk; |
| 250 | do { | 235 | do { |
| 251 | times->utime = cputime_add(times->utime, t->utime); | 236 | times->utime += t->utime; |
| 252 | times->stime = cputime_add(times->stime, t->stime); | 237 | times->stime += t->stime; |
| 253 | times->sum_exec_runtime += task_sched_runtime(t); | 238 | times->sum_exec_runtime += task_sched_runtime(t); |
| 254 | } while_each_thread(tsk, t); | 239 | } while_each_thread(tsk, t); |
| 255 | out: | 240 | out: |
| @@ -258,10 +243,10 @@ out: | |||
| 258 | 243 | ||
| 259 | static void update_gt_cputime(struct task_cputime *a, struct task_cputime *b) | 244 | static void update_gt_cputime(struct task_cputime *a, struct task_cputime *b) |
| 260 | { | 245 | { |
| 261 | if (cputime_gt(b->utime, a->utime)) | 246 | if (b->utime > a->utime) |
| 262 | a->utime = b->utime; | 247 | a->utime = b->utime; |
| 263 | 248 | ||
| 264 | if (cputime_gt(b->stime, a->stime)) | 249 | if (b->stime > a->stime) |
| 265 | a->stime = b->stime; | 250 | a->stime = b->stime; |
| 266 | 251 | ||
| 267 | if (b->sum_exec_runtime > a->sum_exec_runtime) | 252 | if (b->sum_exec_runtime > a->sum_exec_runtime) |
| @@ -306,7 +291,7 @@ static int cpu_clock_sample_group(const clockid_t which_clock, | |||
| 306 | return -EINVAL; | 291 | return -EINVAL; |
| 307 | case CPUCLOCK_PROF: | 292 | case CPUCLOCK_PROF: |
| 308 | thread_group_cputime(p, &cputime); | 293 | thread_group_cputime(p, &cputime); |
| 309 | cpu->cpu = cputime_add(cputime.utime, cputime.stime); | 294 | cpu->cpu = cputime.utime + cputime.stime; |
| 310 | break; | 295 | break; |
| 311 | case CPUCLOCK_VIRT: | 296 | case CPUCLOCK_VIRT: |
| 312 | thread_group_cputime(p, &cputime); | 297 | thread_group_cputime(p, &cputime); |
| @@ -470,26 +455,24 @@ static void cleanup_timers(struct list_head *head, | |||
| 470 | unsigned long long sum_exec_runtime) | 455 | unsigned long long sum_exec_runtime) |
| 471 | { | 456 | { |
| 472 | struct cpu_timer_list *timer, *next; | 457 | struct cpu_timer_list *timer, *next; |
| 473 | cputime_t ptime = cputime_add(utime, stime); | 458 | cputime_t ptime = utime + stime; |
| 474 | 459 | ||
| 475 | list_for_each_entry_safe(timer, next, head, entry) { | 460 | list_for_each_entry_safe(timer, next, head, entry) { |
| 476 | list_del_init(&timer->entry); | 461 | list_del_init(&timer->entry); |
| 477 | if (cputime_lt(timer->expires.cpu, ptime)) { | 462 | if (timer->expires.cpu < ptime) { |
| 478 | timer->expires.cpu = cputime_zero; | 463 | timer->expires.cpu = 0; |
| 479 | } else { | 464 | } else { |
| 480 | timer->expires.cpu = cputime_sub(timer->expires.cpu, | 465 | timer->expires.cpu -= ptime; |
| 481 | ptime); | ||
| 482 | } | 466 | } |
| 483 | } | 467 | } |
| 484 | 468 | ||
| 485 | ++head; | 469 | ++head; |
| 486 | list_for_each_entry_safe(timer, next, head, entry) { | 470 | list_for_each_entry_safe(timer, next, head, entry) { |
| 487 | list_del_init(&timer->entry); | 471 | list_del_init(&timer->entry); |
| 488 | if (cputime_lt(timer->expires.cpu, utime)) { | 472 | if (timer->expires.cpu < utime) { |
| 489 | timer->expires.cpu = cputime_zero; | 473 | timer->expires.cpu = 0; |
| 490 | } else { | 474 | } else { |
| 491 | timer->expires.cpu = cputime_sub(timer->expires.cpu, | 475 | timer->expires.cpu -= utime; |
| 492 | utime); | ||
| 493 | } | 476 | } |
| 494 | } | 477 | } |
| 495 | 478 | ||
| @@ -520,8 +503,7 @@ void posix_cpu_timers_exit_group(struct task_struct *tsk) | |||
| 520 | struct signal_struct *const sig = tsk->signal; | 503 | struct signal_struct *const sig = tsk->signal; |
| 521 | 504 | ||
| 522 | cleanup_timers(tsk->signal->cpu_timers, | 505 | cleanup_timers(tsk->signal->cpu_timers, |
| 523 | cputime_add(tsk->utime, sig->utime), | 506 | tsk->utime + sig->utime, tsk->stime + sig->stime, |
| 524 | cputime_add(tsk->stime, sig->stime), | ||
| 525 | tsk->se.sum_exec_runtime + sig->sum_sched_runtime); | 507 | tsk->se.sum_exec_runtime + sig->sum_sched_runtime); |
| 526 | } | 508 | } |
| 527 | 509 | ||
| @@ -540,8 +522,7 @@ static void clear_dead_task(struct k_itimer *timer, union cpu_time_count now) | |||
| 540 | 522 | ||
| 541 | static inline int expires_gt(cputime_t expires, cputime_t new_exp) | 523 | static inline int expires_gt(cputime_t expires, cputime_t new_exp) |
| 542 | { | 524 | { |
| 543 | return cputime_eq(expires, cputime_zero) || | 525 | return expires == 0 || expires > new_exp; |
| 544 | cputime_gt(expires, new_exp); | ||
| 545 | } | 526 | } |
| 546 | 527 | ||
| 547 | /* | 528 | /* |
| @@ -651,7 +632,7 @@ static int cpu_timer_sample_group(const clockid_t which_clock, | |||
| 651 | default: | 632 | default: |
| 652 | return -EINVAL; | 633 | return -EINVAL; |
| 653 | case CPUCLOCK_PROF: | 634 | case CPUCLOCK_PROF: |
| 654 | cpu->cpu = cputime_add(cputime.utime, cputime.stime); | 635 | cpu->cpu = cputime.utime + cputime.stime; |
| 655 | break; | 636 | break; |
| 656 | case CPUCLOCK_VIRT: | 637 | case CPUCLOCK_VIRT: |
| 657 | cpu->cpu = cputime.utime; | 638 | cpu->cpu = cputime.utime; |
| @@ -918,12 +899,12 @@ static void check_thread_timers(struct task_struct *tsk, | |||
| 918 | unsigned long soft; | 899 | unsigned long soft; |
| 919 | 900 | ||
| 920 | maxfire = 20; | 901 | maxfire = 20; |
| 921 | tsk->cputime_expires.prof_exp = cputime_zero; | 902 | tsk->cputime_expires.prof_exp = 0; |
| 922 | while (!list_empty(timers)) { | 903 | while (!list_empty(timers)) { |
| 923 | struct cpu_timer_list *t = list_first_entry(timers, | 904 | struct cpu_timer_list *t = list_first_entry(timers, |
| 924 | struct cpu_timer_list, | 905 | struct cpu_timer_list, |
| 925 | entry); | 906 | entry); |
| 926 | if (!--maxfire || cputime_lt(prof_ticks(tsk), t->expires.cpu)) { | 907 | if (!--maxfire || prof_ticks(tsk) < t->expires.cpu) { |
| 927 | tsk->cputime_expires.prof_exp = t->expires.cpu; | 908 | tsk->cputime_expires.prof_exp = t->expires.cpu; |
| 928 | break; | 909 | break; |
| 929 | } | 910 | } |
| @@ -933,12 +914,12 @@ static void check_thread_timers(struct task_struct *tsk, | |||
| 933 | 914 | ||
| 934 | ++timers; | 915 | ++timers; |
| 935 | maxfire = 20; | 916 | maxfire = 20; |
| 936 | tsk->cputime_expires.virt_exp = cputime_zero; | 917 | tsk->cputime_expires.virt_exp = 0; |
| 937 | while (!list_empty(timers)) { | 918 | while (!list_empty(timers)) { |
| 938 | struct cpu_timer_list *t = list_first_entry(timers, | 919 | struct cpu_timer_list *t = list_first_entry(timers, |
| 939 | struct cpu_timer_list, | 920 | struct cpu_timer_list, |
| 940 | entry); | 921 | entry); |
| 941 | if (!--maxfire || cputime_lt(virt_ticks(tsk), t->expires.cpu)) { | 922 | if (!--maxfire || virt_ticks(tsk) < t->expires.cpu) { |
| 942 | tsk->cputime_expires.virt_exp = t->expires.cpu; | 923 | tsk->cputime_expires.virt_exp = t->expires.cpu; |
| 943 | break; | 924 | break; |
| 944 | } | 925 | } |
| @@ -1009,20 +990,19 @@ static u32 onecputick; | |||
| 1009 | static void check_cpu_itimer(struct task_struct *tsk, struct cpu_itimer *it, | 990 | static void check_cpu_itimer(struct task_struct *tsk, struct cpu_itimer *it, |
| 1010 | cputime_t *expires, cputime_t cur_time, int signo) | 991 | cputime_t *expires, cputime_t cur_time, int signo) |
| 1011 | { | 992 | { |
| 1012 | if (cputime_eq(it->expires, cputime_zero)) | 993 | if (!it->expires) |
| 1013 | return; | 994 | return; |
| 1014 | 995 | ||
| 1015 | if (cputime_ge(cur_time, it->expires)) { | 996 | if (cur_time >= it->expires) { |
| 1016 | if (!cputime_eq(it->incr, cputime_zero)) { | 997 | if (it->incr) { |
| 1017 | it->expires = cputime_add(it->expires, it->incr); | 998 | it->expires += it->incr; |
| 1018 | it->error += it->incr_error; | 999 | it->error += it->incr_error; |
| 1019 | if (it->error >= onecputick) { | 1000 | if (it->error >= onecputick) { |
| 1020 | it->expires = cputime_sub(it->expires, | 1001 | it->expires -= cputime_one_jiffy; |
| 1021 | cputime_one_jiffy); | ||
| 1022 | it->error -= onecputick; | 1002 | it->error -= onecputick; |
| 1023 | } | 1003 | } |
| 1024 | } else { | 1004 | } else { |
| 1025 | it->expires = cputime_zero; | 1005 | it->expires = 0; |
| 1026 | } | 1006 | } |
| 1027 | 1007 | ||
| 1028 | trace_itimer_expire(signo == SIGPROF ? | 1008 | trace_itimer_expire(signo == SIGPROF ? |
| @@ -1031,9 +1011,7 @@ static void check_cpu_itimer(struct task_struct *tsk, struct cpu_itimer *it, | |||
| 1031 | __group_send_sig_info(signo, SEND_SIG_PRIV, tsk); | 1011 | __group_send_sig_info(signo, SEND_SIG_PRIV, tsk); |
| 1032 | } | 1012 | } |
| 1033 | 1013 | ||
| 1034 | if (!cputime_eq(it->expires, cputime_zero) && | 1014 | if (it->expires && (!*expires || it->expires < *expires)) { |
| 1035 | (cputime_eq(*expires, cputime_zero) || | ||
| 1036 | cputime_lt(it->expires, *expires))) { | ||
| 1037 | *expires = it->expires; | 1015 | *expires = it->expires; |
| 1038 | } | 1016 | } |
| 1039 | } | 1017 | } |
| @@ -1048,9 +1026,7 @@ static void check_cpu_itimer(struct task_struct *tsk, struct cpu_itimer *it, | |||
| 1048 | */ | 1026 | */ |
| 1049 | static inline int task_cputime_zero(const struct task_cputime *cputime) | 1027 | static inline int task_cputime_zero(const struct task_cputime *cputime) |
| 1050 | { | 1028 | { |
| 1051 | if (cputime_eq(cputime->utime, cputime_zero) && | 1029 | if (!cputime->utime && !cputime->stime && !cputime->sum_exec_runtime) |
| 1052 | cputime_eq(cputime->stime, cputime_zero) && | ||
| 1053 | cputime->sum_exec_runtime == 0) | ||
| 1054 | return 1; | 1030 | return 1; |
| 1055 | return 0; | 1031 | return 0; |
| 1056 | } | 1032 | } |
| @@ -1076,15 +1052,15 @@ static void check_process_timers(struct task_struct *tsk, | |||
| 1076 | */ | 1052 | */ |
| 1077 | thread_group_cputimer(tsk, &cputime); | 1053 | thread_group_cputimer(tsk, &cputime); |
| 1078 | utime = cputime.utime; | 1054 | utime = cputime.utime; |
| 1079 | ptime = cputime_add(utime, cputime.stime); | 1055 | ptime = utime + cputime.stime; |
| 1080 | sum_sched_runtime = cputime.sum_exec_runtime; | 1056 | sum_sched_runtime = cputime.sum_exec_runtime; |
| 1081 | maxfire = 20; | 1057 | maxfire = 20; |
| 1082 | prof_expires = cputime_zero; | 1058 | prof_expires = 0; |
| 1083 | while (!list_empty(timers)) { | 1059 | while (!list_empty(timers)) { |
| 1084 | struct cpu_timer_list *tl = list_first_entry(timers, | 1060 | struct cpu_timer_list *tl = list_first_entry(timers, |
| 1085 | struct cpu_timer_list, | 1061 | struct cpu_timer_list, |
| 1086 | entry); | 1062 | entry); |
| 1087 | if (!--maxfire || cputime_lt(ptime, tl->expires.cpu)) { | 1063 | if (!--maxfire || ptime < tl->expires.cpu) { |
| 1088 | prof_expires = tl->expires.cpu; | 1064 | prof_expires = tl->expires.cpu; |
| 1089 | break; | 1065 | break; |
| 1090 | } | 1066 | } |
| @@ -1094,12 +1070,12 @@ static void check_process_timers(struct task_struct *tsk, | |||
| 1094 | 1070 | ||
| 1095 | ++timers; | 1071 | ++timers; |
| 1096 | maxfire = 20; | 1072 | maxfire = 20; |
| 1097 | virt_expires = cputime_zero; | 1073 | virt_expires = 0; |
| 1098 | while (!list_empty(timers)) { | 1074 | while (!list_empty(timers)) { |
| 1099 | struct cpu_timer_list *tl = list_first_entry(timers, | 1075 | struct cpu_timer_list *tl = list_first_entry(timers, |
| 1100 | struct cpu_timer_list, | 1076 | struct cpu_timer_list, |
| 1101 | entry); | 1077 | entry); |
| 1102 | if (!--maxfire || cputime_lt(utime, tl->expires.cpu)) { | 1078 | if (!--maxfire || utime < tl->expires.cpu) { |
| 1103 | virt_expires = tl->expires.cpu; | 1079 | virt_expires = tl->expires.cpu; |
| 1104 | break; | 1080 | break; |
| 1105 | } | 1081 | } |
| @@ -1154,8 +1130,7 @@ static void check_process_timers(struct task_struct *tsk, | |||
| 1154 | } | 1130 | } |
| 1155 | } | 1131 | } |
| 1156 | x = secs_to_cputime(soft); | 1132 | x = secs_to_cputime(soft); |
| 1157 | if (cputime_eq(prof_expires, cputime_zero) || | 1133 | if (!prof_expires || x < prof_expires) { |
| 1158 | cputime_lt(x, prof_expires)) { | ||
| 1159 | prof_expires = x; | 1134 | prof_expires = x; |
| 1160 | } | 1135 | } |
| 1161 | } | 1136 | } |
| @@ -1249,12 +1224,9 @@ out: | |||
| 1249 | static inline int task_cputime_expired(const struct task_cputime *sample, | 1224 | static inline int task_cputime_expired(const struct task_cputime *sample, |
| 1250 | const struct task_cputime *expires) | 1225 | const struct task_cputime *expires) |
| 1251 | { | 1226 | { |
| 1252 | if (!cputime_eq(expires->utime, cputime_zero) && | 1227 | if (expires->utime && sample->utime >= expires->utime) |
| 1253 | cputime_ge(sample->utime, expires->utime)) | ||
| 1254 | return 1; | 1228 | return 1; |
| 1255 | if (!cputime_eq(expires->stime, cputime_zero) && | 1229 | if (expires->stime && sample->utime + sample->stime >= expires->stime) |
| 1256 | cputime_ge(cputime_add(sample->utime, sample->stime), | ||
| 1257 | expires->stime)) | ||
| 1258 | return 1; | 1230 | return 1; |
| 1259 | if (expires->sum_exec_runtime != 0 && | 1231 | if (expires->sum_exec_runtime != 0 && |
| 1260 | sample->sum_exec_runtime >= expires->sum_exec_runtime) | 1232 | sample->sum_exec_runtime >= expires->sum_exec_runtime) |
| @@ -1389,18 +1361,18 @@ void set_process_cpu_timer(struct task_struct *tsk, unsigned int clock_idx, | |||
| 1389 | * it to be relative, *newval argument is relative and we update | 1361 | * it to be relative, *newval argument is relative and we update |
| 1390 | * it to be absolute. | 1362 | * it to be absolute. |
| 1391 | */ | 1363 | */ |
| 1392 | if (!cputime_eq(*oldval, cputime_zero)) { | 1364 | if (*oldval) { |
| 1393 | if (cputime_le(*oldval, now.cpu)) { | 1365 | if (*oldval <= now.cpu) { |
| 1394 | /* Just about to fire. */ | 1366 | /* Just about to fire. */ |
| 1395 | *oldval = cputime_one_jiffy; | 1367 | *oldval = cputime_one_jiffy; |
| 1396 | } else { | 1368 | } else { |
| 1397 | *oldval = cputime_sub(*oldval, now.cpu); | 1369 | *oldval -= now.cpu; |
| 1398 | } | 1370 | } |
| 1399 | } | 1371 | } |
| 1400 | 1372 | ||
| 1401 | if (cputime_eq(*newval, cputime_zero)) | 1373 | if (!*newval) |
| 1402 | return; | 1374 | return; |
| 1403 | *newval = cputime_add(*newval, now.cpu); | 1375 | *newval += now.cpu; |
| 1404 | } | 1376 | } |
| 1405 | 1377 | ||
| 1406 | /* | 1378 | /* |
