aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/posix-cpu-timers.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/posix-cpu-timers.c')
-rw-r--r--kernel/posix-cpu-timers.c327
1 files changed, 135 insertions, 192 deletions
diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c
index c7f31aa272f7..3b8946416a5f 100644
--- a/kernel/posix-cpu-timers.c
+++ b/kernel/posix-cpu-timers.c
@@ -233,7 +233,8 @@ void thread_group_cputimer(struct task_struct *tsk, struct task_cputime *times)
233 233
234/* 234/*
235 * Sample a process (thread group) clock for the given group_leader task. 235 * Sample a process (thread group) clock for the given group_leader task.
236 * Must be called with tasklist_lock held for reading. 236 * Must be called with task sighand lock held for safe while_each_thread()
237 * traversal.
237 */ 238 */
238static int cpu_clock_sample_group(const clockid_t which_clock, 239static int cpu_clock_sample_group(const clockid_t which_clock,
239 struct task_struct *p, 240 struct task_struct *p,
@@ -260,30 +261,53 @@ static int cpu_clock_sample_group(const clockid_t which_clock,
260 return 0; 261 return 0;
261} 262}
262 263
264static int posix_cpu_clock_get_task(struct task_struct *tsk,
265 const clockid_t which_clock,
266 struct timespec *tp)
267{
268 int err = -EINVAL;
269 unsigned long long rtn;
270
271 if (CPUCLOCK_PERTHREAD(which_clock)) {
272 if (same_thread_group(tsk, current))
273 err = cpu_clock_sample(which_clock, tsk, &rtn);
274 } else {
275 unsigned long flags;
276 struct sighand_struct *sighand;
277
278 /*
279 * while_each_thread() is not yet entirely RCU safe,
280 * keep locking the group while sampling process
281 * clock for now.
282 */
283 sighand = lock_task_sighand(tsk, &flags);
284 if (!sighand)
285 return err;
286
287 if (tsk == current || thread_group_leader(tsk))
288 err = cpu_clock_sample_group(which_clock, tsk, &rtn);
289
290 unlock_task_sighand(tsk, &flags);
291 }
292
293 if (!err)
294 sample_to_timespec(which_clock, rtn, tp);
295
296 return err;
297}
298
263 299
264static int posix_cpu_clock_get(const clockid_t which_clock, struct timespec *tp) 300static int posix_cpu_clock_get(const clockid_t which_clock, struct timespec *tp)
265{ 301{
266 const pid_t pid = CPUCLOCK_PID(which_clock); 302 const pid_t pid = CPUCLOCK_PID(which_clock);
267 int error = -EINVAL; 303 int err = -EINVAL;
268 unsigned long long rtn;
269 304
270 if (pid == 0) { 305 if (pid == 0) {
271 /* 306 /*
272 * Special case constant value for our own clocks. 307 * Special case constant value for our own clocks.
273 * We don't have to do any lookup to find ourselves. 308 * We don't have to do any lookup to find ourselves.
274 */ 309 */
275 if (CPUCLOCK_PERTHREAD(which_clock)) { 310 err = posix_cpu_clock_get_task(current, which_clock, tp);
276 /*
277 * Sampling just ourselves we can do with no locking.
278 */
279 error = cpu_clock_sample(which_clock,
280 current, &rtn);
281 } else {
282 read_lock(&tasklist_lock);
283 error = cpu_clock_sample_group(which_clock,
284 current, &rtn);
285 read_unlock(&tasklist_lock);
286 }
287 } else { 311 } else {
288 /* 312 /*
289 * Find the given PID, and validate that the caller 313 * Find the given PID, and validate that the caller
@@ -292,29 +316,12 @@ static int posix_cpu_clock_get(const clockid_t which_clock, struct timespec *tp)
292 struct task_struct *p; 316 struct task_struct *p;
293 rcu_read_lock(); 317 rcu_read_lock();
294 p = find_task_by_vpid(pid); 318 p = find_task_by_vpid(pid);
295 if (p) { 319 if (p)
296 if (CPUCLOCK_PERTHREAD(which_clock)) { 320 err = posix_cpu_clock_get_task(p, which_clock, tp);
297 if (same_thread_group(p, current)) {
298 error = cpu_clock_sample(which_clock,
299 p, &rtn);
300 }
301 } else {
302 read_lock(&tasklist_lock);
303 if (thread_group_leader(p) && p->sighand) {
304 error =
305 cpu_clock_sample_group(which_clock,
306 p, &rtn);
307 }
308 read_unlock(&tasklist_lock);
309 }
310 }
311 rcu_read_unlock(); 321 rcu_read_unlock();
312 } 322 }
313 323
314 if (error) 324 return err;
315 return error;
316 sample_to_timespec(which_clock, rtn, tp);
317 return 0;
318} 325}
319 326
320 327
@@ -371,36 +378,40 @@ static int posix_cpu_timer_create(struct k_itimer *new_timer)
371 */ 378 */
372static int posix_cpu_timer_del(struct k_itimer *timer) 379static int posix_cpu_timer_del(struct k_itimer *timer)
373{ 380{
374 struct task_struct *p = timer->it.cpu.task;
375 int ret = 0; 381 int ret = 0;
382 unsigned long flags;
383 struct sighand_struct *sighand;
384 struct task_struct *p = timer->it.cpu.task;
376 385
377 if (likely(p != NULL)) { 386 WARN_ON_ONCE(p == NULL);
378 read_lock(&tasklist_lock);
379 if (unlikely(p->sighand == NULL)) {
380 /*
381 * We raced with the reaping of the task.
382 * The deletion should have cleared us off the list.
383 */
384 BUG_ON(!list_empty(&timer->it.cpu.entry));
385 } else {
386 spin_lock(&p->sighand->siglock);
387 if (timer->it.cpu.firing)
388 ret = TIMER_RETRY;
389 else
390 list_del(&timer->it.cpu.entry);
391 spin_unlock(&p->sighand->siglock);
392 }
393 read_unlock(&tasklist_lock);
394 387
395 if (!ret) 388 /*
396 put_task_struct(p); 389 * Protect against sighand release/switch in exit/exec and process/
390 * thread timer list entry concurrent read/writes.
391 */
392 sighand = lock_task_sighand(p, &flags);
393 if (unlikely(sighand == NULL)) {
394 /*
395 * We raced with the reaping of the task.
396 * The deletion should have cleared us off the list.
397 */
398 WARN_ON_ONCE(!list_empty(&timer->it.cpu.entry));
399 } else {
400 if (timer->it.cpu.firing)
401 ret = TIMER_RETRY;
402 else
403 list_del(&timer->it.cpu.entry);
404
405 unlock_task_sighand(p, &flags);
397 } 406 }
398 407
408 if (!ret)
409 put_task_struct(p);
410
399 return ret; 411 return ret;
400} 412}
401 413
402static void cleanup_timers_list(struct list_head *head, 414static void cleanup_timers_list(struct list_head *head)
403 unsigned long long curr)
404{ 415{
405 struct cpu_timer_list *timer, *next; 416 struct cpu_timer_list *timer, *next;
406 417
@@ -414,16 +425,11 @@ static void cleanup_timers_list(struct list_head *head,
414 * time for later timer_gettime calls to return. 425 * time for later timer_gettime calls to return.
415 * This must be called with the siglock held. 426 * This must be called with the siglock held.
416 */ 427 */
417static void cleanup_timers(struct list_head *head, 428static void cleanup_timers(struct list_head *head)
418 cputime_t utime, cputime_t stime,
419 unsigned long long sum_exec_runtime)
420{ 429{
421 430 cleanup_timers_list(head);
422 cputime_t ptime = utime + stime; 431 cleanup_timers_list(++head);
423 432 cleanup_timers_list(++head);
424 cleanup_timers_list(head, cputime_to_expires(ptime));
425 cleanup_timers_list(++head, cputime_to_expires(utime));
426 cleanup_timers_list(++head, sum_exec_runtime);
427} 433}
428 434
429/* 435/*
@@ -433,41 +439,14 @@ static void cleanup_timers(struct list_head *head,
433 */ 439 */
434void posix_cpu_timers_exit(struct task_struct *tsk) 440void posix_cpu_timers_exit(struct task_struct *tsk)
435{ 441{
436 cputime_t utime, stime;
437
438 add_device_randomness((const void*) &tsk->se.sum_exec_runtime, 442 add_device_randomness((const void*) &tsk->se.sum_exec_runtime,
439 sizeof(unsigned long long)); 443 sizeof(unsigned long long));
440 task_cputime(tsk, &utime, &stime); 444 cleanup_timers(tsk->cpu_timers);
441 cleanup_timers(tsk->cpu_timers,
442 utime, stime, tsk->se.sum_exec_runtime);
443 445
444} 446}
445void posix_cpu_timers_exit_group(struct task_struct *tsk) 447void posix_cpu_timers_exit_group(struct task_struct *tsk)
446{ 448{
447 struct signal_struct *const sig = tsk->signal; 449 cleanup_timers(tsk->signal->cpu_timers);
448 cputime_t utime, stime;
449
450 task_cputime(tsk, &utime, &stime);
451 cleanup_timers(tsk->signal->cpu_timers,
452 utime + sig->utime, stime + sig->stime,
453 tsk->se.sum_exec_runtime + sig->sum_sched_runtime);
454}
455
456static void clear_dead_task(struct k_itimer *itimer, unsigned long long now)
457{
458 struct cpu_timer_list *timer = &itimer->it.cpu;
459
460 /*
461 * That's all for this thread or process.
462 * We leave our residual in expires to be reported.
463 */
464 put_task_struct(timer->task);
465 timer->task = NULL;
466 if (timer->expires < now) {
467 timer->expires = 0;
468 } else {
469 timer->expires -= now;
470 }
471} 450}
472 451
473static inline int expires_gt(cputime_t expires, cputime_t new_exp) 452static inline int expires_gt(cputime_t expires, cputime_t new_exp)
@@ -477,8 +456,7 @@ static inline int expires_gt(cputime_t expires, cputime_t new_exp)
477 456
478/* 457/*
479 * Insert the timer on the appropriate list before any timers that 458 * Insert the timer on the appropriate list before any timers that
480 * expire later. This must be called with the tasklist_lock held 459 * expire later. This must be called with the sighand lock held.
481 * for reading, interrupts disabled and p->sighand->siglock taken.
482 */ 460 */
483static void arm_timer(struct k_itimer *timer) 461static void arm_timer(struct k_itimer *timer)
484{ 462{
@@ -569,7 +547,8 @@ static void cpu_timer_fire(struct k_itimer *timer)
569 547
570/* 548/*
571 * Sample a process (thread group) timer for the given group_leader task. 549 * Sample a process (thread group) timer for the given group_leader task.
572 * Must be called with tasklist_lock held for reading. 550 * Must be called with task sighand lock held for safe while_each_thread()
551 * traversal.
573 */ 552 */
574static int cpu_timer_sample_group(const clockid_t which_clock, 553static int cpu_timer_sample_group(const clockid_t which_clock,
575 struct task_struct *p, 554 struct task_struct *p,
@@ -608,7 +587,8 @@ static DECLARE_WORK(nohz_kick_work, nohz_kick_work_fn);
608 */ 587 */
609static void posix_cpu_timer_kick_nohz(void) 588static void posix_cpu_timer_kick_nohz(void)
610{ 589{
611 schedule_work(&nohz_kick_work); 590 if (context_tracking_is_enabled())
591 schedule_work(&nohz_kick_work);
612} 592}
613 593
614bool posix_cpu_timers_can_stop_tick(struct task_struct *tsk) 594bool posix_cpu_timers_can_stop_tick(struct task_struct *tsk)
@@ -631,43 +611,39 @@ static inline void posix_cpu_timer_kick_nohz(void) { }
631 * If we return TIMER_RETRY, it's necessary to release the timer's lock 611 * If we return TIMER_RETRY, it's necessary to release the timer's lock
632 * and try again. (This happens when the timer is in the middle of firing.) 612 * and try again. (This happens when the timer is in the middle of firing.)
633 */ 613 */
634static int posix_cpu_timer_set(struct k_itimer *timer, int flags, 614static int posix_cpu_timer_set(struct k_itimer *timer, int timer_flags,
635 struct itimerspec *new, struct itimerspec *old) 615 struct itimerspec *new, struct itimerspec *old)
636{ 616{
617 unsigned long flags;
618 struct sighand_struct *sighand;
637 struct task_struct *p = timer->it.cpu.task; 619 struct task_struct *p = timer->it.cpu.task;
638 unsigned long long old_expires, new_expires, old_incr, val; 620 unsigned long long old_expires, new_expires, old_incr, val;
639 int ret; 621 int ret;
640 622
641 if (unlikely(p == NULL)) { 623 WARN_ON_ONCE(p == NULL);
642 /*
643 * Timer refers to a dead task's clock.
644 */
645 return -ESRCH;
646 }
647 624
648 new_expires = timespec_to_sample(timer->it_clock, &new->it_value); 625 new_expires = timespec_to_sample(timer->it_clock, &new->it_value);
649 626
650 read_lock(&tasklist_lock);
651 /* 627 /*
652 * We need the tasklist_lock to protect against reaping that 628 * Protect against sighand release/switch in exit/exec and p->cpu_timers
653 * clears p->sighand. If p has just been reaped, we can no 629 * and p->signal->cpu_timers read/write in arm_timer()
630 */
631 sighand = lock_task_sighand(p, &flags);
632 /*
633 * If p has just been reaped, we can no
654 * longer get any information about it at all. 634 * longer get any information about it at all.
655 */ 635 */
656 if (unlikely(p->sighand == NULL)) { 636 if (unlikely(sighand == NULL)) {
657 read_unlock(&tasklist_lock);
658 put_task_struct(p);
659 timer->it.cpu.task = NULL;
660 return -ESRCH; 637 return -ESRCH;
661 } 638 }
662 639
663 /* 640 /*
664 * Disarm any old timer after extracting its expiry time. 641 * Disarm any old timer after extracting its expiry time.
665 */ 642 */
666 BUG_ON(!irqs_disabled()); 643 WARN_ON_ONCE(!irqs_disabled());
667 644
668 ret = 0; 645 ret = 0;
669 old_incr = timer->it.cpu.incr; 646 old_incr = timer->it.cpu.incr;
670 spin_lock(&p->sighand->siglock);
671 old_expires = timer->it.cpu.expires; 647 old_expires = timer->it.cpu.expires;
672 if (unlikely(timer->it.cpu.firing)) { 648 if (unlikely(timer->it.cpu.firing)) {
673 timer->it.cpu.firing = -1; 649 timer->it.cpu.firing = -1;
@@ -724,12 +700,11 @@ static int posix_cpu_timer_set(struct k_itimer *timer, int flags,
724 * disable this firing since we are already reporting 700 * disable this firing since we are already reporting
725 * it as an overrun (thanks to bump_cpu_timer above). 701 * it as an overrun (thanks to bump_cpu_timer above).
726 */ 702 */
727 spin_unlock(&p->sighand->siglock); 703 unlock_task_sighand(p, &flags);
728 read_unlock(&tasklist_lock);
729 goto out; 704 goto out;
730 } 705 }
731 706
732 if (new_expires != 0 && !(flags & TIMER_ABSTIME)) { 707 if (new_expires != 0 && !(timer_flags & TIMER_ABSTIME)) {
733 new_expires += val; 708 new_expires += val;
734 } 709 }
735 710
@@ -743,9 +718,7 @@ static int posix_cpu_timer_set(struct k_itimer *timer, int flags,
743 arm_timer(timer); 718 arm_timer(timer);
744 } 719 }
745 720
746 spin_unlock(&p->sighand->siglock); 721 unlock_task_sighand(p, &flags);
747 read_unlock(&tasklist_lock);
748
749 /* 722 /*
750 * Install the new reload setting, and 723 * Install the new reload setting, and
751 * set up the signal and overrun bookkeeping. 724 * set up the signal and overrun bookkeeping.
@@ -787,7 +760,8 @@ static void posix_cpu_timer_get(struct k_itimer *timer, struct itimerspec *itp)
787{ 760{
788 unsigned long long now; 761 unsigned long long now;
789 struct task_struct *p = timer->it.cpu.task; 762 struct task_struct *p = timer->it.cpu.task;
790 int clear_dead; 763
764 WARN_ON_ONCE(p == NULL);
791 765
792 /* 766 /*
793 * Easy part: convert the reload time. 767 * Easy part: convert the reload time.
@@ -800,52 +774,34 @@ static void posix_cpu_timer_get(struct k_itimer *timer, struct itimerspec *itp)
800 return; 774 return;
801 } 775 }
802 776
803 if (unlikely(p == NULL)) {
804 /*
805 * This task already died and the timer will never fire.
806 * In this case, expires is actually the dead value.
807 */
808 dead:
809 sample_to_timespec(timer->it_clock, timer->it.cpu.expires,
810 &itp->it_value);
811 return;
812 }
813
814 /* 777 /*
815 * Sample the clock to take the difference with the expiry time. 778 * Sample the clock to take the difference with the expiry time.
816 */ 779 */
817 if (CPUCLOCK_PERTHREAD(timer->it_clock)) { 780 if (CPUCLOCK_PERTHREAD(timer->it_clock)) {
818 cpu_clock_sample(timer->it_clock, p, &now); 781 cpu_clock_sample(timer->it_clock, p, &now);
819 clear_dead = p->exit_state;
820 } else { 782 } else {
821 read_lock(&tasklist_lock); 783 struct sighand_struct *sighand;
822 if (unlikely(p->sighand == NULL)) { 784 unsigned long flags;
785
786 /*
787 * Protect against sighand release/switch in exit/exec and
788 * also make timer sampling safe if it ends up calling
789 * thread_group_cputime().
790 */
791 sighand = lock_task_sighand(p, &flags);
792 if (unlikely(sighand == NULL)) {
823 /* 793 /*
824 * The process has been reaped. 794 * The process has been reaped.
825 * We can't even collect a sample any more. 795 * We can't even collect a sample any more.
826 * Call the timer disarmed, nothing else to do. 796 * Call the timer disarmed, nothing else to do.
827 */ 797 */
828 put_task_struct(p);
829 timer->it.cpu.task = NULL;
830 timer->it.cpu.expires = 0; 798 timer->it.cpu.expires = 0;
831 read_unlock(&tasklist_lock); 799 sample_to_timespec(timer->it_clock, timer->it.cpu.expires,
832 goto dead; 800 &itp->it_value);
833 } else { 801 } else {
834 cpu_timer_sample_group(timer->it_clock, p, &now); 802 cpu_timer_sample_group(timer->it_clock, p, &now);
835 clear_dead = (unlikely(p->exit_state) && 803 unlock_task_sighand(p, &flags);
836 thread_group_empty(p));
837 } 804 }
838 read_unlock(&tasklist_lock);
839 }
840
841 if (unlikely(clear_dead)) {
842 /*
843 * We've noticed that the thread is dead, but
844 * not yet reaped. Take this opportunity to
845 * drop our task ref.
846 */
847 clear_dead_task(timer, now);
848 goto dead;
849 } 805 }
850 806
851 if (now < timer->it.cpu.expires) { 807 if (now < timer->it.cpu.expires) {
@@ -1059,14 +1015,12 @@ static void check_process_timers(struct task_struct *tsk,
1059 */ 1015 */
1060void posix_cpu_timer_schedule(struct k_itimer *timer) 1016void posix_cpu_timer_schedule(struct k_itimer *timer)
1061{ 1017{
1018 struct sighand_struct *sighand;
1019 unsigned long flags;
1062 struct task_struct *p = timer->it.cpu.task; 1020 struct task_struct *p = timer->it.cpu.task;
1063 unsigned long long now; 1021 unsigned long long now;
1064 1022
1065 if (unlikely(p == NULL)) 1023 WARN_ON_ONCE(p == NULL);
1066 /*
1067 * The task was cleaned up already, no future firings.
1068 */
1069 goto out;
1070 1024
1071 /* 1025 /*
1072 * Fetch the current sample and update the timer's expiry time. 1026 * Fetch the current sample and update the timer's expiry time.
@@ -1074,49 +1028,45 @@ void posix_cpu_timer_schedule(struct k_itimer *timer)
1074 if (CPUCLOCK_PERTHREAD(timer->it_clock)) { 1028 if (CPUCLOCK_PERTHREAD(timer->it_clock)) {
1075 cpu_clock_sample(timer->it_clock, p, &now); 1029 cpu_clock_sample(timer->it_clock, p, &now);
1076 bump_cpu_timer(timer, now); 1030 bump_cpu_timer(timer, now);
1077 if (unlikely(p->exit_state)) { 1031 if (unlikely(p->exit_state))
1078 clear_dead_task(timer, now); 1032 goto out;
1033
1034 /* Protect timer list r/w in arm_timer() */
1035 sighand = lock_task_sighand(p, &flags);
1036 if (!sighand)
1079 goto out; 1037 goto out;
1080 }
1081 read_lock(&tasklist_lock); /* arm_timer needs it. */
1082 spin_lock(&p->sighand->siglock);
1083 } else { 1038 } else {
1084 read_lock(&tasklist_lock); 1039 /*
1085 if (unlikely(p->sighand == NULL)) { 1040 * Protect arm_timer() and timer sampling in case of call to
1041 * thread_group_cputime().
1042 */
1043 sighand = lock_task_sighand(p, &flags);
1044 if (unlikely(sighand == NULL)) {
1086 /* 1045 /*
1087 * The process has been reaped. 1046 * The process has been reaped.
1088 * We can't even collect a sample any more. 1047 * We can't even collect a sample any more.
1089 */ 1048 */
1090 put_task_struct(p);
1091 timer->it.cpu.task = p = NULL;
1092 timer->it.cpu.expires = 0; 1049 timer->it.cpu.expires = 0;
1093 goto out_unlock; 1050 goto out;
1094 } else if (unlikely(p->exit_state) && thread_group_empty(p)) { 1051 } else if (unlikely(p->exit_state) && thread_group_empty(p)) {
1095 /* 1052 unlock_task_sighand(p, &flags);
1096 * We've noticed that the thread is dead, but 1053 /* Optimizations: if the process is dying, no need to rearm */
1097 * not yet reaped. Take this opportunity to 1054 goto out;
1098 * drop our task ref.
1099 */
1100 cpu_timer_sample_group(timer->it_clock, p, &now);
1101 clear_dead_task(timer, now);
1102 goto out_unlock;
1103 } 1055 }
1104 spin_lock(&p->sighand->siglock);
1105 cpu_timer_sample_group(timer->it_clock, p, &now); 1056 cpu_timer_sample_group(timer->it_clock, p, &now);
1106 bump_cpu_timer(timer, now); 1057 bump_cpu_timer(timer, now);
1107 /* Leave the tasklist_lock locked for the call below. */ 1058 /* Leave the sighand locked for the call below. */
1108 } 1059 }
1109 1060
1110 /* 1061 /*
1111 * Now re-arm for the new expiry time. 1062 * Now re-arm for the new expiry time.
1112 */ 1063 */
1113 BUG_ON(!irqs_disabled()); 1064 WARN_ON_ONCE(!irqs_disabled());
1114 arm_timer(timer); 1065 arm_timer(timer);
1115 spin_unlock(&p->sighand->siglock); 1066 unlock_task_sighand(p, &flags);
1116
1117out_unlock:
1118 read_unlock(&tasklist_lock);
1119 1067
1068 /* Kick full dynticks CPUs in case they need to tick on the new timer */
1069 posix_cpu_timer_kick_nohz();
1120out: 1070out:
1121 timer->it_overrun_last = timer->it_overrun; 1071 timer->it_overrun_last = timer->it_overrun;
1122 timer->it_overrun = -1; 1072 timer->it_overrun = -1;
@@ -1200,7 +1150,7 @@ void run_posix_cpu_timers(struct task_struct *tsk)
1200 struct k_itimer *timer, *next; 1150 struct k_itimer *timer, *next;
1201 unsigned long flags; 1151 unsigned long flags;
1202 1152
1203 BUG_ON(!irqs_disabled()); 1153 WARN_ON_ONCE(!irqs_disabled());
1204 1154
1205 /* 1155 /*
1206 * The fast path checks that there are no expired thread or thread 1156 * The fast path checks that there are no expired thread or thread
@@ -1256,13 +1206,6 @@ void run_posix_cpu_timers(struct task_struct *tsk)
1256 cpu_timer_fire(timer); 1206 cpu_timer_fire(timer);
1257 spin_unlock(&timer->it_lock); 1207 spin_unlock(&timer->it_lock);
1258 } 1208 }
1259
1260 /*
1261 * In case some timers were rescheduled after the queue got emptied,
1262 * wake up full dynticks CPUs.
1263 */
1264 if (tsk->signal->cputimer.running)
1265 posix_cpu_timer_kick_nohz();
1266} 1209}
1267 1210
1268/* 1211/*
@@ -1274,7 +1217,7 @@ void set_process_cpu_timer(struct task_struct *tsk, unsigned int clock_idx,
1274{ 1217{
1275 unsigned long long now; 1218 unsigned long long now;
1276 1219
1277 BUG_ON(clock_idx == CPUCLOCK_SCHED); 1220 WARN_ON_ONCE(clock_idx == CPUCLOCK_SCHED);
1278 cpu_timer_sample_group(clock_idx, tsk, &now); 1221 cpu_timer_sample_group(clock_idx, tsk, &now);
1279 1222
1280 if (oldval) { 1223 if (oldval) {