aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/time/posix-cpu-timers.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/time/posix-cpu-timers.c')
-rw-r--r--kernel/time/posix-cpu-timers.c170
1 files changed, 59 insertions, 111 deletions
diff --git a/kernel/time/posix-cpu-timers.c b/kernel/time/posix-cpu-timers.c
index e9e8c10f0d9a..b4377a5e4269 100644
--- a/kernel/time/posix-cpu-timers.c
+++ b/kernel/time/posix-cpu-timers.c
@@ -20,10 +20,10 @@
20 */ 20 */
21void update_rlimit_cpu(struct task_struct *task, unsigned long rlim_new) 21void update_rlimit_cpu(struct task_struct *task, unsigned long rlim_new)
22{ 22{
23 cputime_t cputime = secs_to_cputime(rlim_new); 23 u64 nsecs = rlim_new * NSEC_PER_SEC;
24 24
25 spin_lock_irq(&task->sighand->siglock); 25 spin_lock_irq(&task->sighand->siglock);
26 set_process_cpu_timer(task, CPUCLOCK_PROF, &cputime, NULL); 26 set_process_cpu_timer(task, CPUCLOCK_PROF, &nsecs, NULL);
27 spin_unlock_irq(&task->sighand->siglock); 27 spin_unlock_irq(&task->sighand->siglock);
28} 28}
29 29
@@ -50,39 +50,14 @@ static int check_clock(const clockid_t which_clock)
50 return error; 50 return error;
51} 51}
52 52
53static inline unsigned long long
54timespec_to_sample(const clockid_t which_clock, const struct timespec *tp)
55{
56 unsigned long long ret;
57
58 ret = 0; /* high half always zero when .cpu used */
59 if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED) {
60 ret = (unsigned long long)tp->tv_sec * NSEC_PER_SEC + tp->tv_nsec;
61 } else {
62 ret = cputime_to_expires(timespec_to_cputime(tp));
63 }
64 return ret;
65}
66
67static void sample_to_timespec(const clockid_t which_clock,
68 unsigned long long expires,
69 struct timespec *tp)
70{
71 if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED)
72 *tp = ns_to_timespec(expires);
73 else
74 cputime_to_timespec((__force cputime_t)expires, tp);
75}
76
77/* 53/*
78 * Update expiry time from increment, and increase overrun count, 54 * Update expiry time from increment, and increase overrun count,
79 * given the current clock sample. 55 * given the current clock sample.
80 */ 56 */
81static void bump_cpu_timer(struct k_itimer *timer, 57static void bump_cpu_timer(struct k_itimer *timer, u64 now)
82 unsigned long long now)
83{ 58{
84 int i; 59 int i;
85 unsigned long long delta, incr; 60 u64 delta, incr;
86 61
87 if (timer->it.cpu.incr == 0) 62 if (timer->it.cpu.incr == 0)
88 return; 63 return;
@@ -122,21 +97,21 @@ static inline int task_cputime_zero(const struct task_cputime *cputime)
122 return 0; 97 return 0;
123} 98}
124 99
125static inline unsigned long long prof_ticks(struct task_struct *p) 100static inline u64 prof_ticks(struct task_struct *p)
126{ 101{
127 cputime_t utime, stime; 102 u64 utime, stime;
128 103
129 task_cputime(p, &utime, &stime); 104 task_cputime(p, &utime, &stime);
130 105
131 return cputime_to_expires(utime + stime); 106 return utime + stime;
132} 107}
133static inline unsigned long long virt_ticks(struct task_struct *p) 108static inline u64 virt_ticks(struct task_struct *p)
134{ 109{
135 cputime_t utime, stime; 110 u64 utime, stime;
136 111
137 task_cputime(p, &utime, &stime); 112 task_cputime(p, &utime, &stime);
138 113
139 return cputime_to_expires(utime); 114 return utime;
140} 115}
141 116
142static int 117static int
@@ -176,8 +151,8 @@ posix_cpu_clock_set(const clockid_t which_clock, const struct timespec *tp)
176/* 151/*
177 * Sample a per-thread clock for the given task. 152 * Sample a per-thread clock for the given task.
178 */ 153 */
179static int cpu_clock_sample(const clockid_t which_clock, struct task_struct *p, 154static int cpu_clock_sample(const clockid_t which_clock,
180 unsigned long long *sample) 155 struct task_struct *p, u64 *sample)
181{ 156{
182 switch (CPUCLOCK_WHICH(which_clock)) { 157 switch (CPUCLOCK_WHICH(which_clock)) {
183 default: 158 default:
@@ -260,7 +235,7 @@ void thread_group_cputimer(struct task_struct *tsk, struct task_cputime *times)
260 */ 235 */
261static int cpu_clock_sample_group(const clockid_t which_clock, 236static int cpu_clock_sample_group(const clockid_t which_clock,
262 struct task_struct *p, 237 struct task_struct *p,
263 unsigned long long *sample) 238 u64 *sample)
264{ 239{
265 struct task_cputime cputime; 240 struct task_cputime cputime;
266 241
@@ -269,11 +244,11 @@ static int cpu_clock_sample_group(const clockid_t which_clock,
269 return -EINVAL; 244 return -EINVAL;
270 case CPUCLOCK_PROF: 245 case CPUCLOCK_PROF:
271 thread_group_cputime(p, &cputime); 246 thread_group_cputime(p, &cputime);
272 *sample = cputime_to_expires(cputime.utime + cputime.stime); 247 *sample = cputime.utime + cputime.stime;
273 break; 248 break;
274 case CPUCLOCK_VIRT: 249 case CPUCLOCK_VIRT:
275 thread_group_cputime(p, &cputime); 250 thread_group_cputime(p, &cputime);
276 *sample = cputime_to_expires(cputime.utime); 251 *sample = cputime.utime;
277 break; 252 break;
278 case CPUCLOCK_SCHED: 253 case CPUCLOCK_SCHED:
279 thread_group_cputime(p, &cputime); 254 thread_group_cputime(p, &cputime);
@@ -288,7 +263,7 @@ static int posix_cpu_clock_get_task(struct task_struct *tsk,
288 struct timespec *tp) 263 struct timespec *tp)
289{ 264{
290 int err = -EINVAL; 265 int err = -EINVAL;
291 unsigned long long rtn; 266 u64 rtn;
292 267
293 if (CPUCLOCK_PERTHREAD(which_clock)) { 268 if (CPUCLOCK_PERTHREAD(which_clock)) {
294 if (same_thread_group(tsk, current)) 269 if (same_thread_group(tsk, current))
@@ -299,7 +274,7 @@ static int posix_cpu_clock_get_task(struct task_struct *tsk,
299 } 274 }
300 275
301 if (!err) 276 if (!err)
302 sample_to_timespec(which_clock, rtn, tp); 277 *tp = ns_to_timespec(rtn);
303 278
304 return err; 279 return err;
305} 280}
@@ -453,7 +428,7 @@ void posix_cpu_timers_exit_group(struct task_struct *tsk)
453 cleanup_timers(tsk->signal->cpu_timers); 428 cleanup_timers(tsk->signal->cpu_timers);
454} 429}
455 430
456static inline int expires_gt(cputime_t expires, cputime_t new_exp) 431static inline int expires_gt(u64 expires, u64 new_exp)
457{ 432{
458 return expires == 0 || expires > new_exp; 433 return expires == 0 || expires > new_exp;
459} 434}
@@ -488,7 +463,7 @@ static void arm_timer(struct k_itimer *timer)
488 list_add(&nt->entry, listpos); 463 list_add(&nt->entry, listpos);
489 464
490 if (listpos == head) { 465 if (listpos == head) {
491 unsigned long long exp = nt->expires; 466 u64 exp = nt->expires;
492 467
493 /* 468 /*
494 * We are the new earliest-expiring POSIX 1.b timer, hence 469 * We are the new earliest-expiring POSIX 1.b timer, hence
@@ -499,16 +474,15 @@ static void arm_timer(struct k_itimer *timer)
499 474
500 switch (CPUCLOCK_WHICH(timer->it_clock)) { 475 switch (CPUCLOCK_WHICH(timer->it_clock)) {
501 case CPUCLOCK_PROF: 476 case CPUCLOCK_PROF:
502 if (expires_gt(cputime_expires->prof_exp, expires_to_cputime(exp))) 477 if (expires_gt(cputime_expires->prof_exp, exp))
503 cputime_expires->prof_exp = expires_to_cputime(exp); 478 cputime_expires->prof_exp = exp;
504 break; 479 break;
505 case CPUCLOCK_VIRT: 480 case CPUCLOCK_VIRT:
506 if (expires_gt(cputime_expires->virt_exp, expires_to_cputime(exp))) 481 if (expires_gt(cputime_expires->virt_exp, exp))
507 cputime_expires->virt_exp = expires_to_cputime(exp); 482 cputime_expires->virt_exp = exp;
508 break; 483 break;
509 case CPUCLOCK_SCHED: 484 case CPUCLOCK_SCHED:
510 if (cputime_expires->sched_exp == 0 || 485 if (expires_gt(cputime_expires->sched_exp, exp))
511 cputime_expires->sched_exp > exp)
512 cputime_expires->sched_exp = exp; 486 cputime_expires->sched_exp = exp;
513 break; 487 break;
514 } 488 }
@@ -559,8 +533,7 @@ static void cpu_timer_fire(struct k_itimer *timer)
559 * traversal. 533 * traversal.
560 */ 534 */
561static int cpu_timer_sample_group(const clockid_t which_clock, 535static int cpu_timer_sample_group(const clockid_t which_clock,
562 struct task_struct *p, 536 struct task_struct *p, u64 *sample)
563 unsigned long long *sample)
564{ 537{
565 struct task_cputime cputime; 538 struct task_cputime cputime;
566 539
@@ -569,10 +542,10 @@ static int cpu_timer_sample_group(const clockid_t which_clock,
569 default: 542 default:
570 return -EINVAL; 543 return -EINVAL;
571 case CPUCLOCK_PROF: 544 case CPUCLOCK_PROF:
572 *sample = cputime_to_expires(cputime.utime + cputime.stime); 545 *sample = cputime.utime + cputime.stime;
573 break; 546 break;
574 case CPUCLOCK_VIRT: 547 case CPUCLOCK_VIRT:
575 *sample = cputime_to_expires(cputime.utime); 548 *sample = cputime.utime;
576 break; 549 break;
577 case CPUCLOCK_SCHED: 550 case CPUCLOCK_SCHED:
578 *sample = cputime.sum_exec_runtime; 551 *sample = cputime.sum_exec_runtime;
@@ -593,12 +566,12 @@ static int posix_cpu_timer_set(struct k_itimer *timer, int timer_flags,
593 unsigned long flags; 566 unsigned long flags;
594 struct sighand_struct *sighand; 567 struct sighand_struct *sighand;
595 struct task_struct *p = timer->it.cpu.task; 568 struct task_struct *p = timer->it.cpu.task;
596 unsigned long long old_expires, new_expires, old_incr, val; 569 u64 old_expires, new_expires, old_incr, val;
597 int ret; 570 int ret;
598 571
599 WARN_ON_ONCE(p == NULL); 572 WARN_ON_ONCE(p == NULL);
600 573
601 new_expires = timespec_to_sample(timer->it_clock, &new->it_value); 574 new_expires = timespec_to_ns(&new->it_value);
602 575
603 /* 576 /*
604 * Protect against sighand release/switch in exit/exec and p->cpu_timers 577 * Protect against sighand release/switch in exit/exec and p->cpu_timers
@@ -659,9 +632,7 @@ static int posix_cpu_timer_set(struct k_itimer *timer, int timer_flags,
659 bump_cpu_timer(timer, val); 632 bump_cpu_timer(timer, val);
660 if (val < timer->it.cpu.expires) { 633 if (val < timer->it.cpu.expires) {
661 old_expires = timer->it.cpu.expires - val; 634 old_expires = timer->it.cpu.expires - val;
662 sample_to_timespec(timer->it_clock, 635 old->it_value = ns_to_timespec(old_expires);
663 old_expires,
664 &old->it_value);
665 } else { 636 } else {
666 old->it_value.tv_nsec = 1; 637 old->it_value.tv_nsec = 1;
667 old->it_value.tv_sec = 0; 638 old->it_value.tv_sec = 0;
@@ -699,8 +670,7 @@ static int posix_cpu_timer_set(struct k_itimer *timer, int timer_flags,
699 * Install the new reload setting, and 670 * Install the new reload setting, and
700 * set up the signal and overrun bookkeeping. 671 * set up the signal and overrun bookkeeping.
701 */ 672 */
702 timer->it.cpu.incr = timespec_to_sample(timer->it_clock, 673 timer->it.cpu.incr = timespec_to_ns(&new->it_interval);
703 &new->it_interval);
704 674
705 /* 675 /*
706 * This acts as a modification timestamp for the timer, 676 * This acts as a modification timestamp for the timer,
@@ -723,17 +693,15 @@ static int posix_cpu_timer_set(struct k_itimer *timer, int timer_flags,
723 693
724 ret = 0; 694 ret = 0;
725 out: 695 out:
726 if (old) { 696 if (old)
727 sample_to_timespec(timer->it_clock, 697 old->it_interval = ns_to_timespec(old_incr);
728 old_incr, &old->it_interval);
729 }
730 698
731 return ret; 699 return ret;
732} 700}
733 701
734static void posix_cpu_timer_get(struct k_itimer *timer, struct itimerspec *itp) 702static void posix_cpu_timer_get(struct k_itimer *timer, struct itimerspec *itp)
735{ 703{
736 unsigned long long now; 704 u64 now;
737 struct task_struct *p = timer->it.cpu.task; 705 struct task_struct *p = timer->it.cpu.task;
738 706
739 WARN_ON_ONCE(p == NULL); 707 WARN_ON_ONCE(p == NULL);
@@ -741,8 +709,7 @@ static void posix_cpu_timer_get(struct k_itimer *timer, struct itimerspec *itp)
741 /* 709 /*
742 * Easy part: convert the reload time. 710 * Easy part: convert the reload time.
743 */ 711 */
744 sample_to_timespec(timer->it_clock, 712 itp->it_interval = ns_to_timespec(timer->it.cpu.incr);
745 timer->it.cpu.incr, &itp->it_interval);
746 713
747 if (timer->it.cpu.expires == 0) { /* Timer not armed at all. */ 714 if (timer->it.cpu.expires == 0) { /* Timer not armed at all. */
748 itp->it_value.tv_sec = itp->it_value.tv_nsec = 0; 715 itp->it_value.tv_sec = itp->it_value.tv_nsec = 0;
@@ -771,8 +738,7 @@ static void posix_cpu_timer_get(struct k_itimer *timer, struct itimerspec *itp)
771 * Call the timer disarmed, nothing else to do. 738 * Call the timer disarmed, nothing else to do.
772 */ 739 */
773 timer->it.cpu.expires = 0; 740 timer->it.cpu.expires = 0;
774 sample_to_timespec(timer->it_clock, timer->it.cpu.expires, 741 itp->it_value = ns_to_timespec(timer->it.cpu.expires);
775 &itp->it_value);
776 return; 742 return;
777 } else { 743 } else {
778 cpu_timer_sample_group(timer->it_clock, p, &now); 744 cpu_timer_sample_group(timer->it_clock, p, &now);
@@ -781,9 +747,7 @@ static void posix_cpu_timer_get(struct k_itimer *timer, struct itimerspec *itp)
781 } 747 }
782 748
783 if (now < timer->it.cpu.expires) { 749 if (now < timer->it.cpu.expires) {
784 sample_to_timespec(timer->it_clock, 750 itp->it_value = ns_to_timespec(timer->it.cpu.expires - now);
785 timer->it.cpu.expires - now,
786 &itp->it_value);
787 } else { 751 } else {
788 /* 752 /*
789 * The timer should have expired already, but the firing 753 * The timer should have expired already, but the firing
@@ -827,7 +791,7 @@ static void check_thread_timers(struct task_struct *tsk,
827 struct list_head *timers = tsk->cpu_timers; 791 struct list_head *timers = tsk->cpu_timers;
828 struct signal_struct *const sig = tsk->signal; 792 struct signal_struct *const sig = tsk->signal;
829 struct task_cputime *tsk_expires = &tsk->cputime_expires; 793 struct task_cputime *tsk_expires = &tsk->cputime_expires;
830 unsigned long long expires; 794 u64 expires;
831 unsigned long soft; 795 unsigned long soft;
832 796
833 /* 797 /*
@@ -838,10 +802,10 @@ static void check_thread_timers(struct task_struct *tsk,
838 return; 802 return;
839 803
840 expires = check_timers_list(timers, firing, prof_ticks(tsk)); 804 expires = check_timers_list(timers, firing, prof_ticks(tsk));
841 tsk_expires->prof_exp = expires_to_cputime(expires); 805 tsk_expires->prof_exp = expires;
842 806
843 expires = check_timers_list(++timers, firing, virt_ticks(tsk)); 807 expires = check_timers_list(++timers, firing, virt_ticks(tsk));
844 tsk_expires->virt_exp = expires_to_cputime(expires); 808 tsk_expires->virt_exp = expires;
845 809
846 tsk_expires->sched_exp = check_timers_list(++timers, firing, 810 tsk_expires->sched_exp = check_timers_list(++timers, firing,
847 tsk->se.sum_exec_runtime); 811 tsk->se.sum_exec_runtime);
@@ -890,26 +854,17 @@ static inline void stop_process_timers(struct signal_struct *sig)
890 tick_dep_clear_signal(sig, TICK_DEP_BIT_POSIX_TIMER); 854 tick_dep_clear_signal(sig, TICK_DEP_BIT_POSIX_TIMER);
891} 855}
892 856
893static u32 onecputick;
894
895static void check_cpu_itimer(struct task_struct *tsk, struct cpu_itimer *it, 857static void check_cpu_itimer(struct task_struct *tsk, struct cpu_itimer *it,
896 unsigned long long *expires, 858 u64 *expires, u64 cur_time, int signo)
897 unsigned long long cur_time, int signo)
898{ 859{
899 if (!it->expires) 860 if (!it->expires)
900 return; 861 return;
901 862
902 if (cur_time >= it->expires) { 863 if (cur_time >= it->expires) {
903 if (it->incr) { 864 if (it->incr)
904 it->expires += it->incr; 865 it->expires += it->incr;
905 it->error += it->incr_error; 866 else
906 if (it->error >= onecputick) {
907 it->expires -= cputime_one_jiffy;
908 it->error -= onecputick;
909 }
910 } else {
911 it->expires = 0; 867 it->expires = 0;
912 }
913 868
914 trace_itimer_expire(signo == SIGPROF ? 869 trace_itimer_expire(signo == SIGPROF ?
915 ITIMER_PROF : ITIMER_VIRTUAL, 870 ITIMER_PROF : ITIMER_VIRTUAL,
@@ -917,9 +872,8 @@ static void check_cpu_itimer(struct task_struct *tsk, struct cpu_itimer *it,
917 __group_send_sig_info(signo, SEND_SIG_PRIV, tsk); 872 __group_send_sig_info(signo, SEND_SIG_PRIV, tsk);
918 } 873 }
919 874
920 if (it->expires && (!*expires || it->expires < *expires)) { 875 if (it->expires && (!*expires || it->expires < *expires))
921 *expires = it->expires; 876 *expires = it->expires;
922 }
923} 877}
924 878
925/* 879/*
@@ -931,8 +885,8 @@ static void check_process_timers(struct task_struct *tsk,
931 struct list_head *firing) 885 struct list_head *firing)
932{ 886{
933 struct signal_struct *const sig = tsk->signal; 887 struct signal_struct *const sig = tsk->signal;
934 unsigned long long utime, ptime, virt_expires, prof_expires; 888 u64 utime, ptime, virt_expires, prof_expires;
935 unsigned long long sum_sched_runtime, sched_expires; 889 u64 sum_sched_runtime, sched_expires;
936 struct list_head *timers = sig->cpu_timers; 890 struct list_head *timers = sig->cpu_timers;
937 struct task_cputime cputime; 891 struct task_cputime cputime;
938 unsigned long soft; 892 unsigned long soft;
@@ -954,8 +908,8 @@ static void check_process_timers(struct task_struct *tsk,
954 * Collect the current process totals. 908 * Collect the current process totals.
955 */ 909 */
956 thread_group_cputimer(tsk, &cputime); 910 thread_group_cputimer(tsk, &cputime);
957 utime = cputime_to_expires(cputime.utime); 911 utime = cputime.utime;
958 ptime = utime + cputime_to_expires(cputime.stime); 912 ptime = utime + cputime.stime;
959 sum_sched_runtime = cputime.sum_exec_runtime; 913 sum_sched_runtime = cputime.sum_exec_runtime;
960 914
961 prof_expires = check_timers_list(timers, firing, ptime); 915 prof_expires = check_timers_list(timers, firing, ptime);
@@ -971,10 +925,10 @@ static void check_process_timers(struct task_struct *tsk,
971 SIGVTALRM); 925 SIGVTALRM);
972 soft = READ_ONCE(sig->rlim[RLIMIT_CPU].rlim_cur); 926 soft = READ_ONCE(sig->rlim[RLIMIT_CPU].rlim_cur);
973 if (soft != RLIM_INFINITY) { 927 if (soft != RLIM_INFINITY) {
974 unsigned long psecs = cputime_to_secs(ptime); 928 unsigned long psecs = div_u64(ptime, NSEC_PER_SEC);
975 unsigned long hard = 929 unsigned long hard =
976 READ_ONCE(sig->rlim[RLIMIT_CPU].rlim_max); 930 READ_ONCE(sig->rlim[RLIMIT_CPU].rlim_max);
977 cputime_t x; 931 u64 x;
978 if (psecs >= hard) { 932 if (psecs >= hard) {
979 /* 933 /*
980 * At the hard limit, we just die. 934 * At the hard limit, we just die.
@@ -993,14 +947,13 @@ static void check_process_timers(struct task_struct *tsk,
993 sig->rlim[RLIMIT_CPU].rlim_cur = soft; 947 sig->rlim[RLIMIT_CPU].rlim_cur = soft;
994 } 948 }
995 } 949 }
996 x = secs_to_cputime(soft); 950 x = soft * NSEC_PER_SEC;
997 if (!prof_expires || x < prof_expires) { 951 if (!prof_expires || x < prof_expires)
998 prof_expires = x; 952 prof_expires = x;
999 }
1000 } 953 }
1001 954
1002 sig->cputime_expires.prof_exp = expires_to_cputime(prof_expires); 955 sig->cputime_expires.prof_exp = prof_expires;
1003 sig->cputime_expires.virt_exp = expires_to_cputime(virt_expires); 956 sig->cputime_expires.virt_exp = virt_expires;
1004 sig->cputime_expires.sched_exp = sched_expires; 957 sig->cputime_expires.sched_exp = sched_expires;
1005 if (task_cputime_zero(&sig->cputime_expires)) 958 if (task_cputime_zero(&sig->cputime_expires))
1006 stop_process_timers(sig); 959 stop_process_timers(sig);
@@ -1017,7 +970,7 @@ void posix_cpu_timer_schedule(struct k_itimer *timer)
1017 struct sighand_struct *sighand; 970 struct sighand_struct *sighand;
1018 unsigned long flags; 971 unsigned long flags;
1019 struct task_struct *p = timer->it.cpu.task; 972 struct task_struct *p = timer->it.cpu.task;
1020 unsigned long long now; 973 u64 now;
1021 974
1022 WARN_ON_ONCE(p == NULL); 975 WARN_ON_ONCE(p == NULL);
1023 976
@@ -1214,9 +1167,9 @@ void run_posix_cpu_timers(struct task_struct *tsk)
1214 * The tsk->sighand->siglock must be held by the caller. 1167 * The tsk->sighand->siglock must be held by the caller.
1215 */ 1168 */
1216void set_process_cpu_timer(struct task_struct *tsk, unsigned int clock_idx, 1169void set_process_cpu_timer(struct task_struct *tsk, unsigned int clock_idx,
1217 cputime_t *newval, cputime_t *oldval) 1170 u64 *newval, u64 *oldval)
1218{ 1171{
1219 unsigned long long now; 1172 u64 now;
1220 1173
1221 WARN_ON_ONCE(clock_idx == CPUCLOCK_SCHED); 1174 WARN_ON_ONCE(clock_idx == CPUCLOCK_SCHED);
1222 cpu_timer_sample_group(clock_idx, tsk, &now); 1175 cpu_timer_sample_group(clock_idx, tsk, &now);
@@ -1230,7 +1183,7 @@ void set_process_cpu_timer(struct task_struct *tsk, unsigned int clock_idx,
1230 if (*oldval) { 1183 if (*oldval) {
1231 if (*oldval <= now) { 1184 if (*oldval <= now) {
1232 /* Just about to fire. */ 1185 /* Just about to fire. */
1233 *oldval = cputime_one_jiffy; 1186 *oldval = TICK_NSEC;
1234 } else { 1187 } else {
1235 *oldval -= now; 1188 *oldval -= now;
1236 } 1189 }
@@ -1310,7 +1263,7 @@ static int do_cpu_nanosleep(const clockid_t which_clock, int flags,
1310 /* 1263 /*
1311 * We were interrupted by a signal. 1264 * We were interrupted by a signal.
1312 */ 1265 */
1313 sample_to_timespec(which_clock, timer.it.cpu.expires, rqtp); 1266 *rqtp = ns_to_timespec(timer.it.cpu.expires);
1314 error = posix_cpu_timer_set(&timer, 0, &zero_it, it); 1267 error = posix_cpu_timer_set(&timer, 0, &zero_it, it);
1315 if (!error) { 1268 if (!error) {
1316 /* 1269 /*
@@ -1476,15 +1429,10 @@ static __init int init_posix_cpu_timers(void)
1476 .clock_get = thread_cpu_clock_get, 1429 .clock_get = thread_cpu_clock_get,
1477 .timer_create = thread_cpu_timer_create, 1430 .timer_create = thread_cpu_timer_create,
1478 }; 1431 };
1479 struct timespec ts;
1480 1432
1481 posix_timers_register_clock(CLOCK_PROCESS_CPUTIME_ID, &process); 1433 posix_timers_register_clock(CLOCK_PROCESS_CPUTIME_ID, &process);
1482 posix_timers_register_clock(CLOCK_THREAD_CPUTIME_ID, &thread); 1434 posix_timers_register_clock(CLOCK_THREAD_CPUTIME_ID, &thread);
1483 1435
1484 cputime_to_timespec(cputime_one_jiffy, &ts);
1485 onecputick = ts.tv_nsec;
1486 WARN_ON(ts.tv_sec != 0);
1487
1488 return 0; 1436 return 0;
1489} 1437}
1490__initcall(init_posix_cpu_timers); 1438__initcall(init_posix_cpu_timers);