summaryrefslogtreecommitdiffstats
path: root/kernel/time
diff options
context:
space:
mode:
authorFrederic Weisbecker <fweisbec@gmail.com>2017-01-30 22:09:34 -0500
committerIngo Molnar <mingo@kernel.org>2017-02-01 03:13:54 -0500
commitebd7e7fc4bc63be5eaf9da903b8060b02dd711ea (patch)
treeac4a9204986f4b9f4a59ec798ecb6ffcaf3a0781 /kernel/time
parent715eb7a9243a058a0722aa2f6ba703ede9113e76 (diff)
timers/posix-timers: Convert internals to use nsecs
Use the new nsec based cputime accessors as part of the whole cputime conversion from cputime_t to nsecs. Also convert posix-cpu-timers to use nsec based internal counters to simplify it. Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Cc: Fenghua Yu <fenghua.yu@intel.com> Cc: Heiko Carstens <heiko.carstens@de.ibm.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Martin Schwidefsky <schwidefsky@de.ibm.com> Cc: Michael Ellerman <mpe@ellerman.id.au> Cc: Paul Mackerras <paulus@samba.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Rik van Riel <riel@redhat.com> Cc: Stanislaw Gruszka <sgruszka@redhat.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Tony Luck <tony.luck@intel.com> Cc: Wanpeng Li <wanpeng.li@hotmail.com> Link: http://lkml.kernel.org/r/1485832191-26889-19-git-send-email-fweisbec@gmail.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/time')
-rw-r--r--kernel/time/itimer.c6
-rw-r--r--kernel/time/posix-cpu-timers.c210
2 files changed, 90 insertions, 126 deletions
diff --git a/kernel/time/itimer.c b/kernel/time/itimer.c
index f2d5097bcb6d..bb01ff445ce9 100644
--- a/kernel/time/itimer.c
+++ b/kernel/time/itimer.c
@@ -53,15 +53,15 @@ static void get_cpu_itimer(struct task_struct *tsk, unsigned int clock_id,
53 cval = it->expires; 53 cval = it->expires;
54 cinterval = it->incr; 54 cinterval = it->incr;
55 if (cval) { 55 if (cval) {
56 struct task_cputime_t cputime; 56 struct task_cputime cputime;
57 cputime_t t; 57 cputime_t t;
58 58
59 thread_group_cputimer(tsk, &cputime); 59 thread_group_cputimer(tsk, &cputime);
60 if (clock_id == CPUCLOCK_PROF) 60 if (clock_id == CPUCLOCK_PROF)
61 t = cputime.utime + cputime.stime; 61 t = nsecs_to_cputime(cputime.utime + cputime.stime);
62 else 62 else
63 /* CPUCLOCK_VIRT */ 63 /* CPUCLOCK_VIRT */
64 t = cputime.utime; 64 t = nsecs_to_cputime(cputime.utime);
65 65
66 if (cval < t) 66 if (cval < t)
67 /* about to fire */ 67 /* about to fire */
diff --git a/kernel/time/posix-cpu-timers.c b/kernel/time/posix-cpu-timers.c
index 8349e02b1c0c..45be3cec0dc2 100644
--- a/kernel/time/posix-cpu-timers.c
+++ b/kernel/time/posix-cpu-timers.c
@@ -50,39 +50,14 @@ static int check_clock(const clockid_t which_clock)
50 return error; 50 return error;
51} 51}
52 52
53static inline unsigned long long
54timespec_to_sample(const clockid_t which_clock, const struct timespec *tp)
55{
56 unsigned long long ret;
57
58 ret = 0; /* high half always zero when .cpu used */
59 if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED) {
60 ret = (unsigned long long)tp->tv_sec * NSEC_PER_SEC + tp->tv_nsec;
61 } else {
62 ret = cputime_to_expires(timespec_to_cputime(tp));
63 }
64 return ret;
65}
66
67static void sample_to_timespec(const clockid_t which_clock,
68 unsigned long long expires,
69 struct timespec *tp)
70{
71 if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED)
72 *tp = ns_to_timespec(expires);
73 else
74 cputime_to_timespec((__force cputime_t)expires, tp);
75}
76
77/* 53/*
78 * Update expiry time from increment, and increase overrun count, 54 * Update expiry time from increment, and increase overrun count,
79 * given the current clock sample. 55 * given the current clock sample.
80 */ 56 */
81static void bump_cpu_timer(struct k_itimer *timer, 57static void bump_cpu_timer(struct k_itimer *timer, u64 now)
82 unsigned long long now)
83{ 58{
84 int i; 59 int i;
85 unsigned long long delta, incr; 60 u64 delta, incr;
86 61
87 if (timer->it.cpu.incr == 0) 62 if (timer->it.cpu.incr == 0)
88 return; 63 return;
@@ -115,28 +90,28 @@ static void bump_cpu_timer(struct k_itimer *timer,
115 * Checks @cputime to see if all fields are zero. Returns true if all fields 90 * Checks @cputime to see if all fields are zero. Returns true if all fields
116 * are zero, false if any field is nonzero. 91 * are zero, false if any field is nonzero.
117 */ 92 */
118static inline int task_cputime_zero(const struct task_cputime_t *cputime) 93static inline int task_cputime_zero(const struct task_cputime *cputime)
119{ 94{
120 if (!cputime->utime && !cputime->stime && !cputime->sum_exec_runtime) 95 if (!cputime->utime && !cputime->stime && !cputime->sum_exec_runtime)
121 return 1; 96 return 1;
122 return 0; 97 return 0;
123} 98}
124 99
125static inline unsigned long long prof_ticks(struct task_struct *p) 100static inline u64 prof_ticks(struct task_struct *p)
126{ 101{
127 cputime_t utime, stime; 102 u64 utime, stime;
128 103
129 task_cputime_t(p, &utime, &stime); 104 task_cputime(p, &utime, &stime);
130 105
131 return cputime_to_expires(utime + stime); 106 return utime + stime;
132} 107}
133static inline unsigned long long virt_ticks(struct task_struct *p) 108static inline u64 virt_ticks(struct task_struct *p)
134{ 109{
135 cputime_t utime, stime; 110 u64 utime, stime;
136 111
137 task_cputime_t(p, &utime, &stime); 112 task_cputime(p, &utime, &stime);
138 113
139 return cputime_to_expires(utime); 114 return utime;
140} 115}
141 116
142static int 117static int
@@ -176,8 +151,8 @@ posix_cpu_clock_set(const clockid_t which_clock, const struct timespec *tp)
176/* 151/*
177 * Sample a per-thread clock for the given task. 152 * Sample a per-thread clock for the given task.
178 */ 153 */
179static int cpu_clock_sample(const clockid_t which_clock, struct task_struct *p, 154static int cpu_clock_sample(const clockid_t which_clock,
180 unsigned long long *sample) 155 struct task_struct *p, u64 *sample)
181{ 156{
182 switch (CPUCLOCK_WHICH(which_clock)) { 157 switch (CPUCLOCK_WHICH(which_clock)) {
183 default: 158 default:
@@ -210,7 +185,7 @@ retry:
210 } 185 }
211} 186}
212 187
213static void update_gt_cputime(struct task_cputime_atomic *cputime_atomic, struct task_cputime_t *sum) 188static void update_gt_cputime(struct task_cputime_atomic *cputime_atomic, struct task_cputime *sum)
214{ 189{
215 __update_gt_cputime(&cputime_atomic->utime, sum->utime); 190 __update_gt_cputime(&cputime_atomic->utime, sum->utime);
216 __update_gt_cputime(&cputime_atomic->stime, sum->stime); 191 __update_gt_cputime(&cputime_atomic->stime, sum->stime);
@@ -218,7 +193,7 @@ static void update_gt_cputime(struct task_cputime_atomic *cputime_atomic, struct
218} 193}
219 194
220/* Sample task_cputime_atomic values in "atomic_timers", store results in "times". */ 195/* Sample task_cputime_atomic values in "atomic_timers", store results in "times". */
221static inline void sample_cputime_atomic(struct task_cputime_t *times, 196static inline void sample_cputime_atomic(struct task_cputime *times,
222 struct task_cputime_atomic *atomic_times) 197 struct task_cputime_atomic *atomic_times)
223{ 198{
224 times->utime = atomic64_read(&atomic_times->utime); 199 times->utime = atomic64_read(&atomic_times->utime);
@@ -226,10 +201,10 @@ static inline void sample_cputime_atomic(struct task_cputime_t *times,
226 times->sum_exec_runtime = atomic64_read(&atomic_times->sum_exec_runtime); 201 times->sum_exec_runtime = atomic64_read(&atomic_times->sum_exec_runtime);
227} 202}
228 203
229void thread_group_cputimer(struct task_struct *tsk, struct task_cputime_t *times) 204void thread_group_cputimer(struct task_struct *tsk, struct task_cputime *times)
230{ 205{
231 struct thread_group_cputimer *cputimer = &tsk->signal->cputimer; 206 struct thread_group_cputimer *cputimer = &tsk->signal->cputimer;
232 struct task_cputime_t sum; 207 struct task_cputime sum;
233 208
234 /* Check if cputimer isn't running. This is accessed without locking. */ 209 /* Check if cputimer isn't running. This is accessed without locking. */
235 if (!READ_ONCE(cputimer->running)) { 210 if (!READ_ONCE(cputimer->running)) {
@@ -238,7 +213,7 @@ void thread_group_cputimer(struct task_struct *tsk, struct task_cputime_t *times
238 * values through the TIMER_ABSTIME flag, therefore we have 213 * values through the TIMER_ABSTIME flag, therefore we have
239 * to synchronize the timer to the clock every time we start it. 214 * to synchronize the timer to the clock every time we start it.
240 */ 215 */
241 thread_group_cputime_t(tsk, &sum); 216 thread_group_cputime(tsk, &sum);
242 update_gt_cputime(&cputimer->cputime_atomic, &sum); 217 update_gt_cputime(&cputimer->cputime_atomic, &sum);
243 218
244 /* 219 /*
@@ -260,23 +235,23 @@ void thread_group_cputimer(struct task_struct *tsk, struct task_cputime_t *times
260 */ 235 */
261static int cpu_clock_sample_group(const clockid_t which_clock, 236static int cpu_clock_sample_group(const clockid_t which_clock,
262 struct task_struct *p, 237 struct task_struct *p,
263 unsigned long long *sample) 238 u64 *sample)
264{ 239{
265 struct task_cputime_t cputime; 240 struct task_cputime cputime;
266 241
267 switch (CPUCLOCK_WHICH(which_clock)) { 242 switch (CPUCLOCK_WHICH(which_clock)) {
268 default: 243 default:
269 return -EINVAL; 244 return -EINVAL;
270 case CPUCLOCK_PROF: 245 case CPUCLOCK_PROF:
271 thread_group_cputime_t(p, &cputime); 246 thread_group_cputime(p, &cputime);
272 *sample = cputime_to_expires(cputime.utime + cputime.stime); 247 *sample = cputime.utime + cputime.stime;
273 break; 248 break;
274 case CPUCLOCK_VIRT: 249 case CPUCLOCK_VIRT:
275 thread_group_cputime_t(p, &cputime); 250 thread_group_cputime(p, &cputime);
276 *sample = cputime_to_expires(cputime.utime); 251 *sample = cputime.utime;
277 break; 252 break;
278 case CPUCLOCK_SCHED: 253 case CPUCLOCK_SCHED:
279 thread_group_cputime_t(p, &cputime); 254 thread_group_cputime(p, &cputime);
280 *sample = cputime.sum_exec_runtime; 255 *sample = cputime.sum_exec_runtime;
281 break; 256 break;
282 } 257 }
@@ -288,7 +263,7 @@ static int posix_cpu_clock_get_task(struct task_struct *tsk,
288 struct timespec *tp) 263 struct timespec *tp)
289{ 264{
290 int err = -EINVAL; 265 int err = -EINVAL;
291 unsigned long long rtn; 266 u64 rtn;
292 267
293 if (CPUCLOCK_PERTHREAD(which_clock)) { 268 if (CPUCLOCK_PERTHREAD(which_clock)) {
294 if (same_thread_group(tsk, current)) 269 if (same_thread_group(tsk, current))
@@ -299,7 +274,7 @@ static int posix_cpu_clock_get_task(struct task_struct *tsk,
299 } 274 }
300 275
301 if (!err) 276 if (!err)
302 sample_to_timespec(which_clock, rtn, tp); 277 *tp = ns_to_timespec(rtn);
303 278
304 return err; 279 return err;
305} 280}
@@ -453,7 +428,7 @@ void posix_cpu_timers_exit_group(struct task_struct *tsk)
453 cleanup_timers(tsk->signal->cpu_timers); 428 cleanup_timers(tsk->signal->cpu_timers);
454} 429}
455 430
456static inline int expires_gt(cputime_t expires, cputime_t new_exp) 431static inline int expires_gt(u64 expires, u64 new_exp)
457{ 432{
458 return expires == 0 || expires > new_exp; 433 return expires == 0 || expires > new_exp;
459} 434}
@@ -466,7 +441,7 @@ static void arm_timer(struct k_itimer *timer)
466{ 441{
467 struct task_struct *p = timer->it.cpu.task; 442 struct task_struct *p = timer->it.cpu.task;
468 struct list_head *head, *listpos; 443 struct list_head *head, *listpos;
469 struct task_cputime_t *cputime_expires; 444 struct task_cputime *cputime_expires;
470 struct cpu_timer_list *const nt = &timer->it.cpu; 445 struct cpu_timer_list *const nt = &timer->it.cpu;
471 struct cpu_timer_list *next; 446 struct cpu_timer_list *next;
472 447
@@ -488,7 +463,7 @@ static void arm_timer(struct k_itimer *timer)
488 list_add(&nt->entry, listpos); 463 list_add(&nt->entry, listpos);
489 464
490 if (listpos == head) { 465 if (listpos == head) {
491 unsigned long long exp = nt->expires; 466 u64 exp = nt->expires;
492 467
493 /* 468 /*
494 * We are the new earliest-expiring POSIX 1.b timer, hence 469 * We are the new earliest-expiring POSIX 1.b timer, hence
@@ -499,16 +474,15 @@ static void arm_timer(struct k_itimer *timer)
499 474
500 switch (CPUCLOCK_WHICH(timer->it_clock)) { 475 switch (CPUCLOCK_WHICH(timer->it_clock)) {
501 case CPUCLOCK_PROF: 476 case CPUCLOCK_PROF:
502 if (expires_gt(cputime_expires->prof_exp, expires_to_cputime(exp))) 477 if (expires_gt(cputime_expires->prof_exp, exp))
503 cputime_expires->prof_exp = expires_to_cputime(exp); 478 cputime_expires->prof_exp = exp;
504 break; 479 break;
505 case CPUCLOCK_VIRT: 480 case CPUCLOCK_VIRT:
506 if (expires_gt(cputime_expires->virt_exp, expires_to_cputime(exp))) 481 if (expires_gt(cputime_expires->virt_exp, exp))
507 cputime_expires->virt_exp = expires_to_cputime(exp); 482 cputime_expires->virt_exp = exp;
508 break; 483 break;
509 case CPUCLOCK_SCHED: 484 case CPUCLOCK_SCHED:
510 if (cputime_expires->sched_exp == 0 || 485 if (expires_gt(cputime_expires->sched_exp, exp))
511 cputime_expires->sched_exp > exp)
512 cputime_expires->sched_exp = exp; 486 cputime_expires->sched_exp = exp;
513 break; 487 break;
514 } 488 }
@@ -559,20 +533,19 @@ static void cpu_timer_fire(struct k_itimer *timer)
559 * traversal. 533 * traversal.
560 */ 534 */
561static int cpu_timer_sample_group(const clockid_t which_clock, 535static int cpu_timer_sample_group(const clockid_t which_clock,
562 struct task_struct *p, 536 struct task_struct *p, u64 *sample)
563 unsigned long long *sample)
564{ 537{
565 struct task_cputime_t cputime; 538 struct task_cputime cputime;
566 539
567 thread_group_cputimer(p, &cputime); 540 thread_group_cputimer(p, &cputime);
568 switch (CPUCLOCK_WHICH(which_clock)) { 541 switch (CPUCLOCK_WHICH(which_clock)) {
569 default: 542 default:
570 return -EINVAL; 543 return -EINVAL;
571 case CPUCLOCK_PROF: 544 case CPUCLOCK_PROF:
572 *sample = cputime_to_expires(cputime.utime + cputime.stime); 545 *sample = cputime.utime + cputime.stime;
573 break; 546 break;
574 case CPUCLOCK_VIRT: 547 case CPUCLOCK_VIRT:
575 *sample = cputime_to_expires(cputime.utime); 548 *sample = cputime.utime;
576 break; 549 break;
577 case CPUCLOCK_SCHED: 550 case CPUCLOCK_SCHED:
578 *sample = cputime.sum_exec_runtime; 551 *sample = cputime.sum_exec_runtime;
@@ -593,12 +566,12 @@ static int posix_cpu_timer_set(struct k_itimer *timer, int timer_flags,
593 unsigned long flags; 566 unsigned long flags;
594 struct sighand_struct *sighand; 567 struct sighand_struct *sighand;
595 struct task_struct *p = timer->it.cpu.task; 568 struct task_struct *p = timer->it.cpu.task;
596 unsigned long long old_expires, new_expires, old_incr, val; 569 u64 old_expires, new_expires, old_incr, val;
597 int ret; 570 int ret;
598 571
599 WARN_ON_ONCE(p == NULL); 572 WARN_ON_ONCE(p == NULL);
600 573
601 new_expires = timespec_to_sample(timer->it_clock, &new->it_value); 574 new_expires = timespec_to_ns(&new->it_value);
602 575
603 /* 576 /*
604 * Protect against sighand release/switch in exit/exec and p->cpu_timers 577 * Protect against sighand release/switch in exit/exec and p->cpu_timers
@@ -659,9 +632,7 @@ static int posix_cpu_timer_set(struct k_itimer *timer, int timer_flags,
659 bump_cpu_timer(timer, val); 632 bump_cpu_timer(timer, val);
660 if (val < timer->it.cpu.expires) { 633 if (val < timer->it.cpu.expires) {
661 old_expires = timer->it.cpu.expires - val; 634 old_expires = timer->it.cpu.expires - val;
662 sample_to_timespec(timer->it_clock, 635 old->it_value = ns_to_timespec(old_expires);
663 old_expires,
664 &old->it_value);
665 } else { 636 } else {
666 old->it_value.tv_nsec = 1; 637 old->it_value.tv_nsec = 1;
667 old->it_value.tv_sec = 0; 638 old->it_value.tv_sec = 0;
@@ -699,8 +670,7 @@ static int posix_cpu_timer_set(struct k_itimer *timer, int timer_flags,
699 * Install the new reload setting, and 670 * Install the new reload setting, and
700 * set up the signal and overrun bookkeeping. 671 * set up the signal and overrun bookkeeping.
701 */ 672 */
702 timer->it.cpu.incr = timespec_to_sample(timer->it_clock, 673 timer->it.cpu.incr = timespec_to_ns(&new->it_interval);
703 &new->it_interval);
704 674
705 /* 675 /*
706 * This acts as a modification timestamp for the timer, 676 * This acts as a modification timestamp for the timer,
@@ -723,17 +693,15 @@ static int posix_cpu_timer_set(struct k_itimer *timer, int timer_flags,
723 693
724 ret = 0; 694 ret = 0;
725 out: 695 out:
726 if (old) { 696 if (old)
727 sample_to_timespec(timer->it_clock, 697 old->it_interval = ns_to_timespec(old_incr);
728 old_incr, &old->it_interval);
729 }
730 698
731 return ret; 699 return ret;
732} 700}
733 701
734static void posix_cpu_timer_get(struct k_itimer *timer, struct itimerspec *itp) 702static void posix_cpu_timer_get(struct k_itimer *timer, struct itimerspec *itp)
735{ 703{
736 unsigned long long now; 704 u64 now;
737 struct task_struct *p = timer->it.cpu.task; 705 struct task_struct *p = timer->it.cpu.task;
738 706
739 WARN_ON_ONCE(p == NULL); 707 WARN_ON_ONCE(p == NULL);
@@ -741,8 +709,7 @@ static void posix_cpu_timer_get(struct k_itimer *timer, struct itimerspec *itp)
741 /* 709 /*
742 * Easy part: convert the reload time. 710 * Easy part: convert the reload time.
743 */ 711 */
744 sample_to_timespec(timer->it_clock, 712 itp->it_interval = ns_to_timespec(timer->it.cpu.incr);
745 timer->it.cpu.incr, &itp->it_interval);
746 713
747 if (timer->it.cpu.expires == 0) { /* Timer not armed at all. */ 714 if (timer->it.cpu.expires == 0) { /* Timer not armed at all. */
748 itp->it_value.tv_sec = itp->it_value.tv_nsec = 0; 715 itp->it_value.tv_sec = itp->it_value.tv_nsec = 0;
@@ -761,7 +728,7 @@ static void posix_cpu_timer_get(struct k_itimer *timer, struct itimerspec *itp)
761 /* 728 /*
762 * Protect against sighand release/switch in exit/exec and 729 * Protect against sighand release/switch in exit/exec and
763 * also make timer sampling safe if it ends up calling 730 * also make timer sampling safe if it ends up calling
764 * thread_group_cputime_t(). 731 * thread_group_cputime().
765 */ 732 */
766 sighand = lock_task_sighand(p, &flags); 733 sighand = lock_task_sighand(p, &flags);
767 if (unlikely(sighand == NULL)) { 734 if (unlikely(sighand == NULL)) {
@@ -771,8 +738,7 @@ static void posix_cpu_timer_get(struct k_itimer *timer, struct itimerspec *itp)
771 * Call the timer disarmed, nothing else to do. 738 * Call the timer disarmed, nothing else to do.
772 */ 739 */
773 timer->it.cpu.expires = 0; 740 timer->it.cpu.expires = 0;
774 sample_to_timespec(timer->it_clock, timer->it.cpu.expires, 741 itp->it_value = ns_to_timespec(timer->it.cpu.expires);
775 &itp->it_value);
776 return; 742 return;
777 } else { 743 } else {
778 cpu_timer_sample_group(timer->it_clock, p, &now); 744 cpu_timer_sample_group(timer->it_clock, p, &now);
@@ -781,9 +747,7 @@ static void posix_cpu_timer_get(struct k_itimer *timer, struct itimerspec *itp)
781 } 747 }
782 748
783 if (now < timer->it.cpu.expires) { 749 if (now < timer->it.cpu.expires) {
784 sample_to_timespec(timer->it_clock, 750 itp->it_value = ns_to_timespec(timer->it.cpu.expires - now);
785 timer->it.cpu.expires - now,
786 &itp->it_value);
787 } else { 751 } else {
788 /* 752 /*
789 * The timer should have expired already, but the firing 753 * The timer should have expired already, but the firing
@@ -826,8 +790,8 @@ static void check_thread_timers(struct task_struct *tsk,
826{ 790{
827 struct list_head *timers = tsk->cpu_timers; 791 struct list_head *timers = tsk->cpu_timers;
828 struct signal_struct *const sig = tsk->signal; 792 struct signal_struct *const sig = tsk->signal;
829 struct task_cputime_t *tsk_expires = &tsk->cputime_expires; 793 struct task_cputime *tsk_expires = &tsk->cputime_expires;
830 unsigned long long expires; 794 u64 expires;
831 unsigned long soft; 795 unsigned long soft;
832 796
833 /* 797 /*
@@ -838,10 +802,10 @@ static void check_thread_timers(struct task_struct *tsk,
838 return; 802 return;
839 803
840 expires = check_timers_list(timers, firing, prof_ticks(tsk)); 804 expires = check_timers_list(timers, firing, prof_ticks(tsk));
841 tsk_expires->prof_exp = expires_to_cputime(expires); 805 tsk_expires->prof_exp = expires;
842 806
843 expires = check_timers_list(++timers, firing, virt_ticks(tsk)); 807 expires = check_timers_list(++timers, firing, virt_ticks(tsk));
844 tsk_expires->virt_exp = expires_to_cputime(expires); 808 tsk_expires->virt_exp = expires;
845 809
846 tsk_expires->sched_exp = check_timers_list(++timers, firing, 810 tsk_expires->sched_exp = check_timers_list(++timers, firing,
847 tsk->se.sum_exec_runtime); 811 tsk->se.sum_exec_runtime);
@@ -891,13 +855,12 @@ static inline void stop_process_timers(struct signal_struct *sig)
891} 855}
892 856
893static void check_cpu_itimer(struct task_struct *tsk, struct cpu_itimer *it, 857static void check_cpu_itimer(struct task_struct *tsk, struct cpu_itimer *it,
894 unsigned long long *expires, 858 u64 *expires, u64 cur_time, int signo)
895 unsigned long long cur_time, int signo)
896{ 859{
897 if (!it->expires) 860 if (!it->expires)
898 return; 861 return;
899 862
900 if (cur_time >= it->expires) { 863 if (cur_time >= cputime_to_nsecs(it->expires)) {
901 if (it->incr) { 864 if (it->incr) {
902 it->expires += it->incr; 865 it->expires += it->incr;
903 it->error += it->incr_error; 866 it->error += it->incr_error;
@@ -915,8 +878,8 @@ static void check_cpu_itimer(struct task_struct *tsk, struct cpu_itimer *it,
915 __group_send_sig_info(signo, SEND_SIG_PRIV, tsk); 878 __group_send_sig_info(signo, SEND_SIG_PRIV, tsk);
916 } 879 }
917 880
918 if (it->expires && (!*expires || it->expires < *expires)) { 881 if (it->expires && (!*expires || cputime_to_nsecs(it->expires) < *expires)) {
919 *expires = it->expires; 882 *expires = cputime_to_nsecs(it->expires);
920 } 883 }
921} 884}
922 885
@@ -929,10 +892,10 @@ static void check_process_timers(struct task_struct *tsk,
929 struct list_head *firing) 892 struct list_head *firing)
930{ 893{
931 struct signal_struct *const sig = tsk->signal; 894 struct signal_struct *const sig = tsk->signal;
932 unsigned long long utime, ptime, virt_expires, prof_expires; 895 u64 utime, ptime, virt_expires, prof_expires;
933 unsigned long long sum_sched_runtime, sched_expires; 896 u64 sum_sched_runtime, sched_expires;
934 struct list_head *timers = sig->cpu_timers; 897 struct list_head *timers = sig->cpu_timers;
935 struct task_cputime_t cputime; 898 struct task_cputime cputime;
936 unsigned long soft; 899 unsigned long soft;
937 900
938 /* 901 /*
@@ -952,8 +915,8 @@ static void check_process_timers(struct task_struct *tsk,
952 * Collect the current process totals. 915 * Collect the current process totals.
953 */ 916 */
954 thread_group_cputimer(tsk, &cputime); 917 thread_group_cputimer(tsk, &cputime);
955 utime = cputime_to_expires(cputime.utime); 918 utime = cputime.utime;
956 ptime = utime + cputime_to_expires(cputime.stime); 919 ptime = utime + cputime.stime;
957 sum_sched_runtime = cputime.sum_exec_runtime; 920 sum_sched_runtime = cputime.sum_exec_runtime;
958 921
959 prof_expires = check_timers_list(timers, firing, ptime); 922 prof_expires = check_timers_list(timers, firing, ptime);
@@ -969,10 +932,10 @@ static void check_process_timers(struct task_struct *tsk,
969 SIGVTALRM); 932 SIGVTALRM);
970 soft = READ_ONCE(sig->rlim[RLIMIT_CPU].rlim_cur); 933 soft = READ_ONCE(sig->rlim[RLIMIT_CPU].rlim_cur);
971 if (soft != RLIM_INFINITY) { 934 if (soft != RLIM_INFINITY) {
972 unsigned long psecs = cputime_to_secs(ptime); 935 unsigned long psecs = div_u64(ptime, NSEC_PER_SEC);
973 unsigned long hard = 936 unsigned long hard =
974 READ_ONCE(sig->rlim[RLIMIT_CPU].rlim_max); 937 READ_ONCE(sig->rlim[RLIMIT_CPU].rlim_max);
975 cputime_t x; 938 u64 x;
976 if (psecs >= hard) { 939 if (psecs >= hard) {
977 /* 940 /*
978 * At the hard limit, we just die. 941 * At the hard limit, we just die.
@@ -991,14 +954,13 @@ static void check_process_timers(struct task_struct *tsk,
991 sig->rlim[RLIMIT_CPU].rlim_cur = soft; 954 sig->rlim[RLIMIT_CPU].rlim_cur = soft;
992 } 955 }
993 } 956 }
994 x = secs_to_cputime(soft); 957 x = soft * NSEC_PER_SEC;
995 if (!prof_expires || x < prof_expires) { 958 if (!prof_expires || x < prof_expires)
996 prof_expires = x; 959 prof_expires = x;
997 }
998 } 960 }
999 961
1000 sig->cputime_expires.prof_exp = expires_to_cputime(prof_expires); 962 sig->cputime_expires.prof_exp = prof_expires;
1001 sig->cputime_expires.virt_exp = expires_to_cputime(virt_expires); 963 sig->cputime_expires.virt_exp = virt_expires;
1002 sig->cputime_expires.sched_exp = sched_expires; 964 sig->cputime_expires.sched_exp = sched_expires;
1003 if (task_cputime_zero(&sig->cputime_expires)) 965 if (task_cputime_zero(&sig->cputime_expires))
1004 stop_process_timers(sig); 966 stop_process_timers(sig);
@@ -1015,7 +977,7 @@ void posix_cpu_timer_schedule(struct k_itimer *timer)
1015 struct sighand_struct *sighand; 977 struct sighand_struct *sighand;
1016 unsigned long flags; 978 unsigned long flags;
1017 struct task_struct *p = timer->it.cpu.task; 979 struct task_struct *p = timer->it.cpu.task;
1018 unsigned long long now; 980 u64 now;
1019 981
1020 WARN_ON_ONCE(p == NULL); 982 WARN_ON_ONCE(p == NULL);
1021 983
@@ -1035,7 +997,7 @@ void posix_cpu_timer_schedule(struct k_itimer *timer)
1035 } else { 997 } else {
1036 /* 998 /*
1037 * Protect arm_timer() and timer sampling in case of call to 999 * Protect arm_timer() and timer sampling in case of call to
1038 * thread_group_cputime_t(). 1000 * thread_group_cputime().
1039 */ 1001 */
1040 sighand = lock_task_sighand(p, &flags); 1002 sighand = lock_task_sighand(p, &flags);
1041 if (unlikely(sighand == NULL)) { 1003 if (unlikely(sighand == NULL)) {
@@ -1078,8 +1040,8 @@ out:
1078 * Returns true if any field of the former is greater than the corresponding 1040 * Returns true if any field of the former is greater than the corresponding
1079 * field of the latter if the latter field is set. Otherwise returns false. 1041 * field of the latter if the latter field is set. Otherwise returns false.
1080 */ 1042 */
1081static inline int task_cputime_expired(const struct task_cputime_t *sample, 1043static inline int task_cputime_expired(const struct task_cputime *sample,
1082 const struct task_cputime_t *expires) 1044 const struct task_cputime *expires)
1083{ 1045{
1084 if (expires->utime && sample->utime >= expires->utime) 1046 if (expires->utime && sample->utime >= expires->utime)
1085 return 1; 1047 return 1;
@@ -1106,9 +1068,9 @@ static inline int fastpath_timer_check(struct task_struct *tsk)
1106 struct signal_struct *sig; 1068 struct signal_struct *sig;
1107 1069
1108 if (!task_cputime_zero(&tsk->cputime_expires)) { 1070 if (!task_cputime_zero(&tsk->cputime_expires)) {
1109 struct task_cputime_t task_sample; 1071 struct task_cputime task_sample;
1110 1072
1111 task_cputime_t(tsk, &task_sample.utime, &task_sample.stime); 1073 task_cputime(tsk, &task_sample.utime, &task_sample.stime);
1112 task_sample.sum_exec_runtime = tsk->se.sum_exec_runtime; 1074 task_sample.sum_exec_runtime = tsk->se.sum_exec_runtime;
1113 if (task_cputime_expired(&task_sample, &tsk->cputime_expires)) 1075 if (task_cputime_expired(&task_sample, &tsk->cputime_expires))
1114 return 1; 1076 return 1;
@@ -1131,7 +1093,7 @@ static inline int fastpath_timer_check(struct task_struct *tsk)
1131 */ 1093 */
1132 if (READ_ONCE(sig->cputimer.running) && 1094 if (READ_ONCE(sig->cputimer.running) &&
1133 !READ_ONCE(sig->cputimer.checking_timer)) { 1095 !READ_ONCE(sig->cputimer.checking_timer)) {
1134 struct task_cputime_t group_sample; 1096 struct task_cputime group_sample;
1135 1097
1136 sample_cputime_atomic(&group_sample, &sig->cputimer.cputime_atomic); 1098 sample_cputime_atomic(&group_sample, &sig->cputimer.cputime_atomic);
1137 1099
@@ -1214,7 +1176,7 @@ void run_posix_cpu_timers(struct task_struct *tsk)
1214void set_process_cpu_timer(struct task_struct *tsk, unsigned int clock_idx, 1176void set_process_cpu_timer(struct task_struct *tsk, unsigned int clock_idx,
1215 cputime_t *newval, cputime_t *oldval) 1177 cputime_t *newval, cputime_t *oldval)
1216{ 1178{
1217 unsigned long long now; 1179 u64 now, new;
1218 1180
1219 WARN_ON_ONCE(clock_idx == CPUCLOCK_SCHED); 1181 WARN_ON_ONCE(clock_idx == CPUCLOCK_SCHED);
1220 cpu_timer_sample_group(clock_idx, tsk, &now); 1182 cpu_timer_sample_group(clock_idx, tsk, &now);
@@ -1226,31 +1188,33 @@ void set_process_cpu_timer(struct task_struct *tsk, unsigned int clock_idx,
1226 * it to be absolute. 1188 * it to be absolute.
1227 */ 1189 */
1228 if (*oldval) { 1190 if (*oldval) {
1229 if (*oldval <= now) { 1191 if (cputime_to_nsecs(*oldval) <= now) {
1230 /* Just about to fire. */ 1192 /* Just about to fire. */
1231 *oldval = cputime_one_jiffy; 1193 *oldval = cputime_one_jiffy;
1232 } else { 1194 } else {
1233 *oldval -= now; 1195 *oldval -= nsecs_to_cputime(now);
1234 } 1196 }
1235 } 1197 }
1236 1198
1237 if (!*newval) 1199 if (!*newval)
1238 return; 1200 return;
1239 *newval += now; 1201 *newval += nsecs_to_cputime(now);
1240 } 1202 }
1241 1203
1204 new = cputime_to_nsecs(*newval);
1205
1242 /* 1206 /*
1243 * Update expiration cache if we are the earliest timer, or eventually 1207 * Update expiration cache if we are the earliest timer, or eventually
1244 * RLIMIT_CPU limit is earlier than prof_exp cpu timer expire. 1208 * RLIMIT_CPU limit is earlier than prof_exp cpu timer expire.
1245 */ 1209 */
1246 switch (clock_idx) { 1210 switch (clock_idx) {
1247 case CPUCLOCK_PROF: 1211 case CPUCLOCK_PROF:
1248 if (expires_gt(tsk->signal->cputime_expires.prof_exp, *newval)) 1212 if (expires_gt(tsk->signal->cputime_expires.prof_exp, new))
1249 tsk->signal->cputime_expires.prof_exp = *newval; 1213 tsk->signal->cputime_expires.prof_exp = new;
1250 break; 1214 break;
1251 case CPUCLOCK_VIRT: 1215 case CPUCLOCK_VIRT:
1252 if (expires_gt(tsk->signal->cputime_expires.virt_exp, *newval)) 1216 if (expires_gt(tsk->signal->cputime_expires.virt_exp, new))
1253 tsk->signal->cputime_expires.virt_exp = *newval; 1217 tsk->signal->cputime_expires.virt_exp = new;
1254 break; 1218 break;
1255 } 1219 }
1256 1220
@@ -1308,7 +1272,7 @@ static int do_cpu_nanosleep(const clockid_t which_clock, int flags,
1308 /* 1272 /*
1309 * We were interrupted by a signal. 1273 * We were interrupted by a signal.
1310 */ 1274 */
1311 sample_to_timespec(which_clock, timer.it.cpu.expires, rqtp); 1275 *rqtp = ns_to_timespec(timer.it.cpu.expires);
1312 error = posix_cpu_timer_set(&timer, 0, &zero_it, it); 1276 error = posix_cpu_timer_set(&timer, 0, &zero_it, it);
1313 if (!error) { 1277 if (!error) {
1314 /* 1278 /*