diff options
Diffstat (limited to 'kernel/posix-cpu-timers.c')
-rw-r--r-- | kernel/posix-cpu-timers.c | 78 |
1 files changed, 46 insertions, 32 deletions
diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c index 125cb67daa21..8fd709c9bb58 100644 --- a/kernel/posix-cpu-timers.c +++ b/kernel/posix-cpu-timers.c | |||
@@ -9,6 +9,7 @@ | |||
9 | #include <asm/uaccess.h> | 9 | #include <asm/uaccess.h> |
10 | #include <linux/kernel_stat.h> | 10 | #include <linux/kernel_stat.h> |
11 | #include <trace/events/timer.h> | 11 | #include <trace/events/timer.h> |
12 | #include <linux/random.h> | ||
12 | 13 | ||
13 | /* | 14 | /* |
14 | * Called after updating RLIMIT_CPU to run cpu timer and update | 15 | * Called after updating RLIMIT_CPU to run cpu timer and update |
@@ -154,11 +155,19 @@ static void bump_cpu_timer(struct k_itimer *timer, | |||
154 | 155 | ||
155 | static inline cputime_t prof_ticks(struct task_struct *p) | 156 | static inline cputime_t prof_ticks(struct task_struct *p) |
156 | { | 157 | { |
157 | return p->utime + p->stime; | 158 | cputime_t utime, stime; |
159 | |||
160 | task_cputime(p, &utime, &stime); | ||
161 | |||
162 | return utime + stime; | ||
158 | } | 163 | } |
159 | static inline cputime_t virt_ticks(struct task_struct *p) | 164 | static inline cputime_t virt_ticks(struct task_struct *p) |
160 | { | 165 | { |
161 | return p->utime; | 166 | cputime_t utime; |
167 | |||
168 | task_cputime(p, &utime, NULL); | ||
169 | |||
170 | return utime; | ||
162 | } | 171 | } |
163 | 172 | ||
164 | static int | 173 | static int |
@@ -217,30 +226,6 @@ static int cpu_clock_sample(const clockid_t which_clock, struct task_struct *p, | |||
217 | return 0; | 226 | return 0; |
218 | } | 227 | } |
219 | 228 | ||
220 | void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times) | ||
221 | { | ||
222 | struct signal_struct *sig = tsk->signal; | ||
223 | struct task_struct *t; | ||
224 | |||
225 | times->utime = sig->utime; | ||
226 | times->stime = sig->stime; | ||
227 | times->sum_exec_runtime = sig->sum_sched_runtime; | ||
228 | |||
229 | rcu_read_lock(); | ||
230 | /* make sure we can trust tsk->thread_group list */ | ||
231 | if (!likely(pid_alive(tsk))) | ||
232 | goto out; | ||
233 | |||
234 | t = tsk; | ||
235 | do { | ||
236 | times->utime += t->utime; | ||
237 | times->stime += t->stime; | ||
238 | times->sum_exec_runtime += task_sched_runtime(t); | ||
239 | } while_each_thread(tsk, t); | ||
240 | out: | ||
241 | rcu_read_unlock(); | ||
242 | } | ||
243 | |||
244 | static void update_gt_cputime(struct task_cputime *a, struct task_cputime *b) | 229 | static void update_gt_cputime(struct task_cputime *a, struct task_cputime *b) |
245 | { | 230 | { |
246 | if (b->utime > a->utime) | 231 | if (b->utime > a->utime) |
@@ -494,16 +479,23 @@ static void cleanup_timers(struct list_head *head, | |||
494 | */ | 479 | */ |
495 | void posix_cpu_timers_exit(struct task_struct *tsk) | 480 | void posix_cpu_timers_exit(struct task_struct *tsk) |
496 | { | 481 | { |
482 | cputime_t utime, stime; | ||
483 | |||
484 | add_device_randomness((const void*) &tsk->se.sum_exec_runtime, | ||
485 | sizeof(unsigned long long)); | ||
486 | task_cputime(tsk, &utime, &stime); | ||
497 | cleanup_timers(tsk->cpu_timers, | 487 | cleanup_timers(tsk->cpu_timers, |
498 | tsk->utime, tsk->stime, tsk->se.sum_exec_runtime); | 488 | utime, stime, tsk->se.sum_exec_runtime); |
499 | 489 | ||
500 | } | 490 | } |
501 | void posix_cpu_timers_exit_group(struct task_struct *tsk) | 491 | void posix_cpu_timers_exit_group(struct task_struct *tsk) |
502 | { | 492 | { |
503 | struct signal_struct *const sig = tsk->signal; | 493 | struct signal_struct *const sig = tsk->signal; |
494 | cputime_t utime, stime; | ||
504 | 495 | ||
496 | task_cputime(tsk, &utime, &stime); | ||
505 | cleanup_timers(tsk->signal->cpu_timers, | 497 | cleanup_timers(tsk->signal->cpu_timers, |
506 | tsk->utime + sig->utime, tsk->stime + sig->stime, | 498 | utime + sig->utime, stime + sig->stime, |
507 | tsk->se.sum_exec_runtime + sig->sum_sched_runtime); | 499 | tsk->se.sum_exec_runtime + sig->sum_sched_runtime); |
508 | } | 500 | } |
509 | 501 | ||
@@ -1247,11 +1239,14 @@ static inline int task_cputime_expired(const struct task_cputime *sample, | |||
1247 | static inline int fastpath_timer_check(struct task_struct *tsk) | 1239 | static inline int fastpath_timer_check(struct task_struct *tsk) |
1248 | { | 1240 | { |
1249 | struct signal_struct *sig; | 1241 | struct signal_struct *sig; |
1242 | cputime_t utime, stime; | ||
1243 | |||
1244 | task_cputime(tsk, &utime, &stime); | ||
1250 | 1245 | ||
1251 | if (!task_cputime_zero(&tsk->cputime_expires)) { | 1246 | if (!task_cputime_zero(&tsk->cputime_expires)) { |
1252 | struct task_cputime task_sample = { | 1247 | struct task_cputime task_sample = { |
1253 | .utime = tsk->utime, | 1248 | .utime = utime, |
1254 | .stime = tsk->stime, | 1249 | .stime = stime, |
1255 | .sum_exec_runtime = tsk->se.sum_exec_runtime | 1250 | .sum_exec_runtime = tsk->se.sum_exec_runtime |
1256 | }; | 1251 | }; |
1257 | 1252 | ||
@@ -1422,8 +1417,10 @@ static int do_cpu_nanosleep(const clockid_t which_clock, int flags, | |||
1422 | while (!signal_pending(current)) { | 1417 | while (!signal_pending(current)) { |
1423 | if (timer.it.cpu.expires.sched == 0) { | 1418 | if (timer.it.cpu.expires.sched == 0) { |
1424 | /* | 1419 | /* |
1425 | * Our timer fired and was reset. | 1420 | * Our timer fired and was reset, below |
1421 | * deletion can not fail. | ||
1426 | */ | 1422 | */ |
1423 | posix_cpu_timer_del(&timer); | ||
1427 | spin_unlock_irq(&timer.it_lock); | 1424 | spin_unlock_irq(&timer.it_lock); |
1428 | return 0; | 1425 | return 0; |
1429 | } | 1426 | } |
@@ -1441,9 +1438,26 @@ static int do_cpu_nanosleep(const clockid_t which_clock, int flags, | |||
1441 | * We were interrupted by a signal. | 1438 | * We were interrupted by a signal. |
1442 | */ | 1439 | */ |
1443 | sample_to_timespec(which_clock, timer.it.cpu.expires, rqtp); | 1440 | sample_to_timespec(which_clock, timer.it.cpu.expires, rqtp); |
1444 | posix_cpu_timer_set(&timer, 0, &zero_it, it); | 1441 | error = posix_cpu_timer_set(&timer, 0, &zero_it, it); |
1442 | if (!error) { | ||
1443 | /* | ||
1444 | * Timer is now unarmed, deletion can not fail. | ||
1445 | */ | ||
1446 | posix_cpu_timer_del(&timer); | ||
1447 | } | ||
1445 | spin_unlock_irq(&timer.it_lock); | 1448 | spin_unlock_irq(&timer.it_lock); |
1446 | 1449 | ||
1450 | while (error == TIMER_RETRY) { | ||
1451 | /* | ||
1452 | * We need to handle case when timer was or is in the | ||
1453 | * middle of firing. In other cases we already freed | ||
1454 | * resources. | ||
1455 | */ | ||
1456 | spin_lock_irq(&timer.it_lock); | ||
1457 | error = posix_cpu_timer_del(&timer); | ||
1458 | spin_unlock_irq(&timer.it_lock); | ||
1459 | } | ||
1460 | |||
1447 | if ((it->it_value.tv_sec | it->it_value.tv_nsec) == 0) { | 1461 | if ((it->it_value.tv_sec | it->it_value.tv_nsec) == 0) { |
1448 | /* | 1462 | /* |
1449 | * It actually did fire already. | 1463 | * It actually did fire already. |