aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorIngo Molnar <mingo@kernel.org>2013-12-10 06:32:36 -0500
committerIngo Molnar <mingo@kernel.org>2013-12-10 06:32:36 -0500
commit0e6601eee039893a3f6420596ae4588d90d13cbe (patch)
treeec485a59bc5cc37b195d50313d3888583345310d
parenta934a56e1284f1863c9c800ff9c63183c25aec93 (diff)
parent531f64fd6f46a3f2a3edb1b97ecc827c775932c5 (diff)
Merge branch 'timers/posix-timers-for-tip-v2' of git://git.kernel.org/pub/scm/linux/kernel/git/frederic/linux-dynticks into timers/core
Pull posix cpu timer changes for v3.14 from Frederic Weisbecker: * Remove dying thread/process timers caching that was complicating the code for no significant win. * Remove early task reference release on dying timer sample read. Again it was not worth the code complication. The other timer's resources aren't released until timer_delete() is called anyway (or when the whole process dies). * Remove leftover arguments in reaped target cleanup * Consolidate some timer sampling code * Remove use of tasklist lock * Robustify sighand locking against exec and exit by using the safer lock_task_sighand() API instead of sighand raw locking. * Convert some unnecessary BUG_ON() to WARN_ON() Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r--kernel/posix-cpu-timers.c312
1 files changed, 129 insertions, 183 deletions
diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c
index 79747b7d9420..3b8946416a5f 100644
--- a/kernel/posix-cpu-timers.c
+++ b/kernel/posix-cpu-timers.c
@@ -233,7 +233,8 @@ void thread_group_cputimer(struct task_struct *tsk, struct task_cputime *times)
233 233
234/* 234/*
235 * Sample a process (thread group) clock for the given group_leader task. 235 * Sample a process (thread group) clock for the given group_leader task.
236 * Must be called with tasklist_lock held for reading. 236 * Must be called with task sighand lock held for safe while_each_thread()
237 * traversal.
237 */ 238 */
238static int cpu_clock_sample_group(const clockid_t which_clock, 239static int cpu_clock_sample_group(const clockid_t which_clock,
239 struct task_struct *p, 240 struct task_struct *p,
@@ -260,30 +261,53 @@ static int cpu_clock_sample_group(const clockid_t which_clock,
260 return 0; 261 return 0;
261} 262}
262 263
264static int posix_cpu_clock_get_task(struct task_struct *tsk,
265 const clockid_t which_clock,
266 struct timespec *tp)
267{
268 int err = -EINVAL;
269 unsigned long long rtn;
270
271 if (CPUCLOCK_PERTHREAD(which_clock)) {
272 if (same_thread_group(tsk, current))
273 err = cpu_clock_sample(which_clock, tsk, &rtn);
274 } else {
275 unsigned long flags;
276 struct sighand_struct *sighand;
277
278 /*
279 * while_each_thread() is not yet entirely RCU safe,
280 * keep locking the group while sampling process
281 * clock for now.
282 */
283 sighand = lock_task_sighand(tsk, &flags);
284 if (!sighand)
285 return err;
286
287 if (tsk == current || thread_group_leader(tsk))
288 err = cpu_clock_sample_group(which_clock, tsk, &rtn);
289
290 unlock_task_sighand(tsk, &flags);
291 }
292
293 if (!err)
294 sample_to_timespec(which_clock, rtn, tp);
295
296 return err;
297}
298
263 299
264static int posix_cpu_clock_get(const clockid_t which_clock, struct timespec *tp) 300static int posix_cpu_clock_get(const clockid_t which_clock, struct timespec *tp)
265{ 301{
266 const pid_t pid = CPUCLOCK_PID(which_clock); 302 const pid_t pid = CPUCLOCK_PID(which_clock);
267 int error = -EINVAL; 303 int err = -EINVAL;
268 unsigned long long rtn;
269 304
270 if (pid == 0) { 305 if (pid == 0) {
271 /* 306 /*
272 * Special case constant value for our own clocks. 307 * Special case constant value for our own clocks.
273 * We don't have to do any lookup to find ourselves. 308 * We don't have to do any lookup to find ourselves.
274 */ 309 */
275 if (CPUCLOCK_PERTHREAD(which_clock)) { 310 err = posix_cpu_clock_get_task(current, which_clock, tp);
276 /*
277 * Sampling just ourselves we can do with no locking.
278 */
279 error = cpu_clock_sample(which_clock,
280 current, &rtn);
281 } else {
282 read_lock(&tasklist_lock);
283 error = cpu_clock_sample_group(which_clock,
284 current, &rtn);
285 read_unlock(&tasklist_lock);
286 }
287 } else { 311 } else {
288 /* 312 /*
289 * Find the given PID, and validate that the caller 313 * Find the given PID, and validate that the caller
@@ -292,29 +316,12 @@ static int posix_cpu_clock_get(const clockid_t which_clock, struct timespec *tp)
292 struct task_struct *p; 316 struct task_struct *p;
293 rcu_read_lock(); 317 rcu_read_lock();
294 p = find_task_by_vpid(pid); 318 p = find_task_by_vpid(pid);
295 if (p) { 319 if (p)
296 if (CPUCLOCK_PERTHREAD(which_clock)) { 320 err = posix_cpu_clock_get_task(p, which_clock, tp);
297 if (same_thread_group(p, current)) {
298 error = cpu_clock_sample(which_clock,
299 p, &rtn);
300 }
301 } else {
302 read_lock(&tasklist_lock);
303 if (thread_group_leader(p) && p->sighand) {
304 error =
305 cpu_clock_sample_group(which_clock,
306 p, &rtn);
307 }
308 read_unlock(&tasklist_lock);
309 }
310 }
311 rcu_read_unlock(); 321 rcu_read_unlock();
312 } 322 }
313 323
314 if (error) 324 return err;
315 return error;
316 sample_to_timespec(which_clock, rtn, tp);
317 return 0;
318} 325}
319 326
320 327
@@ -371,36 +378,40 @@ static int posix_cpu_timer_create(struct k_itimer *new_timer)
371 */ 378 */
372static int posix_cpu_timer_del(struct k_itimer *timer) 379static int posix_cpu_timer_del(struct k_itimer *timer)
373{ 380{
374 struct task_struct *p = timer->it.cpu.task;
375 int ret = 0; 381 int ret = 0;
382 unsigned long flags;
383 struct sighand_struct *sighand;
384 struct task_struct *p = timer->it.cpu.task;
376 385
377 if (likely(p != NULL)) { 386 WARN_ON_ONCE(p == NULL);
378 read_lock(&tasklist_lock);
379 if (unlikely(p->sighand == NULL)) {
380 /*
381 * We raced with the reaping of the task.
382 * The deletion should have cleared us off the list.
383 */
384 BUG_ON(!list_empty(&timer->it.cpu.entry));
385 } else {
386 spin_lock(&p->sighand->siglock);
387 if (timer->it.cpu.firing)
388 ret = TIMER_RETRY;
389 else
390 list_del(&timer->it.cpu.entry);
391 spin_unlock(&p->sighand->siglock);
392 }
393 read_unlock(&tasklist_lock);
394 387
395 if (!ret) 388 /*
396 put_task_struct(p); 389 * Protect against sighand release/switch in exit/exec and process/
390 * thread timer list entry concurrent read/writes.
391 */
392 sighand = lock_task_sighand(p, &flags);
393 if (unlikely(sighand == NULL)) {
394 /*
395 * We raced with the reaping of the task.
396 * The deletion should have cleared us off the list.
397 */
398 WARN_ON_ONCE(!list_empty(&timer->it.cpu.entry));
399 } else {
400 if (timer->it.cpu.firing)
401 ret = TIMER_RETRY;
402 else
403 list_del(&timer->it.cpu.entry);
404
405 unlock_task_sighand(p, &flags);
397 } 406 }
398 407
408 if (!ret)
409 put_task_struct(p);
410
399 return ret; 411 return ret;
400} 412}
401 413
402static void cleanup_timers_list(struct list_head *head, 414static void cleanup_timers_list(struct list_head *head)
403 unsigned long long curr)
404{ 415{
405 struct cpu_timer_list *timer, *next; 416 struct cpu_timer_list *timer, *next;
406 417
@@ -414,16 +425,11 @@ static void cleanup_timers_list(struct list_head *head,
414 * time for later timer_gettime calls to return. 425 * time for later timer_gettime calls to return.
415 * This must be called with the siglock held. 426 * This must be called with the siglock held.
416 */ 427 */
417static void cleanup_timers(struct list_head *head, 428static void cleanup_timers(struct list_head *head)
418 cputime_t utime, cputime_t stime,
419 unsigned long long sum_exec_runtime)
420{ 429{
421 430 cleanup_timers_list(head);
422 cputime_t ptime = utime + stime; 431 cleanup_timers_list(++head);
423 432 cleanup_timers_list(++head);
424 cleanup_timers_list(head, cputime_to_expires(ptime));
425 cleanup_timers_list(++head, cputime_to_expires(utime));
426 cleanup_timers_list(++head, sum_exec_runtime);
427} 433}
428 434
429/* 435/*
@@ -433,41 +439,14 @@ static void cleanup_timers(struct list_head *head,
433 */ 439 */
434void posix_cpu_timers_exit(struct task_struct *tsk) 440void posix_cpu_timers_exit(struct task_struct *tsk)
435{ 441{
436 cputime_t utime, stime;
437
438 add_device_randomness((const void*) &tsk->se.sum_exec_runtime, 442 add_device_randomness((const void*) &tsk->se.sum_exec_runtime,
439 sizeof(unsigned long long)); 443 sizeof(unsigned long long));
440 task_cputime(tsk, &utime, &stime); 444 cleanup_timers(tsk->cpu_timers);
441 cleanup_timers(tsk->cpu_timers,
442 utime, stime, tsk->se.sum_exec_runtime);
443 445
444} 446}
445void posix_cpu_timers_exit_group(struct task_struct *tsk) 447void posix_cpu_timers_exit_group(struct task_struct *tsk)
446{ 448{
447 struct signal_struct *const sig = tsk->signal; 449 cleanup_timers(tsk->signal->cpu_timers);
448 cputime_t utime, stime;
449
450 task_cputime(tsk, &utime, &stime);
451 cleanup_timers(tsk->signal->cpu_timers,
452 utime + sig->utime, stime + sig->stime,
453 tsk->se.sum_exec_runtime + sig->sum_sched_runtime);
454}
455
456static void clear_dead_task(struct k_itimer *itimer, unsigned long long now)
457{
458 struct cpu_timer_list *timer = &itimer->it.cpu;
459
460 /*
461 * That's all for this thread or process.
462 * We leave our residual in expires to be reported.
463 */
464 put_task_struct(timer->task);
465 timer->task = NULL;
466 if (timer->expires < now) {
467 timer->expires = 0;
468 } else {
469 timer->expires -= now;
470 }
471} 450}
472 451
473static inline int expires_gt(cputime_t expires, cputime_t new_exp) 452static inline int expires_gt(cputime_t expires, cputime_t new_exp)
@@ -477,8 +456,7 @@ static inline int expires_gt(cputime_t expires, cputime_t new_exp)
477 456
478/* 457/*
479 * Insert the timer on the appropriate list before any timers that 458 * Insert the timer on the appropriate list before any timers that
480 * expire later. This must be called with the tasklist_lock held 459 * expire later. This must be called with the sighand lock held.
481 * for reading, interrupts disabled and p->sighand->siglock taken.
482 */ 460 */
483static void arm_timer(struct k_itimer *timer) 461static void arm_timer(struct k_itimer *timer)
484{ 462{
@@ -569,7 +547,8 @@ static void cpu_timer_fire(struct k_itimer *timer)
569 547
570/* 548/*
571 * Sample a process (thread group) timer for the given group_leader task. 549 * Sample a process (thread group) timer for the given group_leader task.
572 * Must be called with tasklist_lock held for reading. 550 * Must be called with task sighand lock held for safe while_each_thread()
551 * traversal.
573 */ 552 */
574static int cpu_timer_sample_group(const clockid_t which_clock, 553static int cpu_timer_sample_group(const clockid_t which_clock,
575 struct task_struct *p, 554 struct task_struct *p,
@@ -632,43 +611,39 @@ static inline void posix_cpu_timer_kick_nohz(void) { }
632 * If we return TIMER_RETRY, it's necessary to release the timer's lock 611 * If we return TIMER_RETRY, it's necessary to release the timer's lock
633 * and try again. (This happens when the timer is in the middle of firing.) 612 * and try again. (This happens when the timer is in the middle of firing.)
634 */ 613 */
635static int posix_cpu_timer_set(struct k_itimer *timer, int flags, 614static int posix_cpu_timer_set(struct k_itimer *timer, int timer_flags,
636 struct itimerspec *new, struct itimerspec *old) 615 struct itimerspec *new, struct itimerspec *old)
637{ 616{
617 unsigned long flags;
618 struct sighand_struct *sighand;
638 struct task_struct *p = timer->it.cpu.task; 619 struct task_struct *p = timer->it.cpu.task;
639 unsigned long long old_expires, new_expires, old_incr, val; 620 unsigned long long old_expires, new_expires, old_incr, val;
640 int ret; 621 int ret;
641 622
642 if (unlikely(p == NULL)) { 623 WARN_ON_ONCE(p == NULL);
643 /*
644 * Timer refers to a dead task's clock.
645 */
646 return -ESRCH;
647 }
648 624
649 new_expires = timespec_to_sample(timer->it_clock, &new->it_value); 625 new_expires = timespec_to_sample(timer->it_clock, &new->it_value);
650 626
651 read_lock(&tasklist_lock);
652 /* 627 /*
653 * We need the tasklist_lock to protect against reaping that 628 * Protect against sighand release/switch in exit/exec and p->cpu_timers
654 * clears p->sighand. If p has just been reaped, we can no 629 * and p->signal->cpu_timers read/write in arm_timer()
630 */
631 sighand = lock_task_sighand(p, &flags);
632 /*
633 * If p has just been reaped, we can no
655 * longer get any information about it at all. 634 * longer get any information about it at all.
656 */ 635 */
657 if (unlikely(p->sighand == NULL)) { 636 if (unlikely(sighand == NULL)) {
658 read_unlock(&tasklist_lock);
659 put_task_struct(p);
660 timer->it.cpu.task = NULL;
661 return -ESRCH; 637 return -ESRCH;
662 } 638 }
663 639
664 /* 640 /*
665 * Disarm any old timer after extracting its expiry time. 641 * Disarm any old timer after extracting its expiry time.
666 */ 642 */
667 BUG_ON(!irqs_disabled()); 643 WARN_ON_ONCE(!irqs_disabled());
668 644
669 ret = 0; 645 ret = 0;
670 old_incr = timer->it.cpu.incr; 646 old_incr = timer->it.cpu.incr;
671 spin_lock(&p->sighand->siglock);
672 old_expires = timer->it.cpu.expires; 647 old_expires = timer->it.cpu.expires;
673 if (unlikely(timer->it.cpu.firing)) { 648 if (unlikely(timer->it.cpu.firing)) {
674 timer->it.cpu.firing = -1; 649 timer->it.cpu.firing = -1;
@@ -725,12 +700,11 @@ static int posix_cpu_timer_set(struct k_itimer *timer, int flags,
725 * disable this firing since we are already reporting 700 * disable this firing since we are already reporting
726 * it as an overrun (thanks to bump_cpu_timer above). 701 * it as an overrun (thanks to bump_cpu_timer above).
727 */ 702 */
728 spin_unlock(&p->sighand->siglock); 703 unlock_task_sighand(p, &flags);
729 read_unlock(&tasklist_lock);
730 goto out; 704 goto out;
731 } 705 }
732 706
733 if (new_expires != 0 && !(flags & TIMER_ABSTIME)) { 707 if (new_expires != 0 && !(timer_flags & TIMER_ABSTIME)) {
734 new_expires += val; 708 new_expires += val;
735 } 709 }
736 710
@@ -744,9 +718,7 @@ static int posix_cpu_timer_set(struct k_itimer *timer, int flags,
744 arm_timer(timer); 718 arm_timer(timer);
745 } 719 }
746 720
747 spin_unlock(&p->sighand->siglock); 721 unlock_task_sighand(p, &flags);
748 read_unlock(&tasklist_lock);
749
750 /* 722 /*
751 * Install the new reload setting, and 723 * Install the new reload setting, and
752 * set up the signal and overrun bookkeeping. 724 * set up the signal and overrun bookkeeping.
@@ -788,7 +760,8 @@ static void posix_cpu_timer_get(struct k_itimer *timer, struct itimerspec *itp)
788{ 760{
789 unsigned long long now; 761 unsigned long long now;
790 struct task_struct *p = timer->it.cpu.task; 762 struct task_struct *p = timer->it.cpu.task;
791 int clear_dead; 763
764 WARN_ON_ONCE(p == NULL);
792 765
793 /* 766 /*
794 * Easy part: convert the reload time. 767 * Easy part: convert the reload time.
@@ -801,52 +774,34 @@ static void posix_cpu_timer_get(struct k_itimer *timer, struct itimerspec *itp)
801 return; 774 return;
802 } 775 }
803 776
804 if (unlikely(p == NULL)) {
805 /*
806 * This task already died and the timer will never fire.
807 * In this case, expires is actually the dead value.
808 */
809 dead:
810 sample_to_timespec(timer->it_clock, timer->it.cpu.expires,
811 &itp->it_value);
812 return;
813 }
814
815 /* 777 /*
816 * Sample the clock to take the difference with the expiry time. 778 * Sample the clock to take the difference with the expiry time.
817 */ 779 */
818 if (CPUCLOCK_PERTHREAD(timer->it_clock)) { 780 if (CPUCLOCK_PERTHREAD(timer->it_clock)) {
819 cpu_clock_sample(timer->it_clock, p, &now); 781 cpu_clock_sample(timer->it_clock, p, &now);
820 clear_dead = p->exit_state;
821 } else { 782 } else {
822 read_lock(&tasklist_lock); 783 struct sighand_struct *sighand;
823 if (unlikely(p->sighand == NULL)) { 784 unsigned long flags;
785
786 /*
787 * Protect against sighand release/switch in exit/exec and
788 * also make timer sampling safe if it ends up calling
789 * thread_group_cputime().
790 */
791 sighand = lock_task_sighand(p, &flags);
792 if (unlikely(sighand == NULL)) {
824 /* 793 /*
825 * The process has been reaped. 794 * The process has been reaped.
826 * We can't even collect a sample any more. 795 * We can't even collect a sample any more.
827 * Call the timer disarmed, nothing else to do. 796 * Call the timer disarmed, nothing else to do.
828 */ 797 */
829 put_task_struct(p);
830 timer->it.cpu.task = NULL;
831 timer->it.cpu.expires = 0; 798 timer->it.cpu.expires = 0;
832 read_unlock(&tasklist_lock); 799 sample_to_timespec(timer->it_clock, timer->it.cpu.expires,
833 goto dead; 800 &itp->it_value);
834 } else { 801 } else {
835 cpu_timer_sample_group(timer->it_clock, p, &now); 802 cpu_timer_sample_group(timer->it_clock, p, &now);
836 clear_dead = (unlikely(p->exit_state) && 803 unlock_task_sighand(p, &flags);
837 thread_group_empty(p));
838 } 804 }
839 read_unlock(&tasklist_lock);
840 }
841
842 if (unlikely(clear_dead)) {
843 /*
844 * We've noticed that the thread is dead, but
845 * not yet reaped. Take this opportunity to
846 * drop our task ref.
847 */
848 clear_dead_task(timer, now);
849 goto dead;
850 } 805 }
851 806
852 if (now < timer->it.cpu.expires) { 807 if (now < timer->it.cpu.expires) {
@@ -1060,14 +1015,12 @@ static void check_process_timers(struct task_struct *tsk,
1060 */ 1015 */
1061void posix_cpu_timer_schedule(struct k_itimer *timer) 1016void posix_cpu_timer_schedule(struct k_itimer *timer)
1062{ 1017{
1018 struct sighand_struct *sighand;
1019 unsigned long flags;
1063 struct task_struct *p = timer->it.cpu.task; 1020 struct task_struct *p = timer->it.cpu.task;
1064 unsigned long long now; 1021 unsigned long long now;
1065 1022
1066 if (unlikely(p == NULL)) 1023 WARN_ON_ONCE(p == NULL);
1067 /*
1068 * The task was cleaned up already, no future firings.
1069 */
1070 goto out;
1071 1024
1072 /* 1025 /*
1073 * Fetch the current sample and update the timer's expiry time. 1026 * Fetch the current sample and update the timer's expiry time.
@@ -1075,52 +1028,45 @@ void posix_cpu_timer_schedule(struct k_itimer *timer)
1075 if (CPUCLOCK_PERTHREAD(timer->it_clock)) { 1028 if (CPUCLOCK_PERTHREAD(timer->it_clock)) {
1076 cpu_clock_sample(timer->it_clock, p, &now); 1029 cpu_clock_sample(timer->it_clock, p, &now);
1077 bump_cpu_timer(timer, now); 1030 bump_cpu_timer(timer, now);
1078 if (unlikely(p->exit_state)) { 1031 if (unlikely(p->exit_state))
1079 clear_dead_task(timer, now); 1032 goto out;
1033
1034 /* Protect timer list r/w in arm_timer() */
1035 sighand = lock_task_sighand(p, &flags);
1036 if (!sighand)
1080 goto out; 1037 goto out;
1081 }
1082 read_lock(&tasklist_lock); /* arm_timer needs it. */
1083 spin_lock(&p->sighand->siglock);
1084 } else { 1038 } else {
1085 read_lock(&tasklist_lock); 1039 /*
1086 if (unlikely(p->sighand == NULL)) { 1040 * Protect arm_timer() and timer sampling in case of call to
1041 * thread_group_cputime().
1042 */
1043 sighand = lock_task_sighand(p, &flags);
1044 if (unlikely(sighand == NULL)) {
1087 /* 1045 /*
1088 * The process has been reaped. 1046 * The process has been reaped.
1089 * We can't even collect a sample any more. 1047 * We can't even collect a sample any more.
1090 */ 1048 */
1091 put_task_struct(p);
1092 timer->it.cpu.task = p = NULL;
1093 timer->it.cpu.expires = 0; 1049 timer->it.cpu.expires = 0;
1094 read_unlock(&tasklist_lock);
1095 goto out; 1050 goto out;
1096 } else if (unlikely(p->exit_state) && thread_group_empty(p)) { 1051 } else if (unlikely(p->exit_state) && thread_group_empty(p)) {
1097 /* 1052 unlock_task_sighand(p, &flags);
1098 * We've noticed that the thread is dead, but 1053 /* Optimizations: if the process is dying, no need to rearm */
1099 * not yet reaped. Take this opportunity to
1100 * drop our task ref.
1101 */
1102 cpu_timer_sample_group(timer->it_clock, p, &now);
1103 clear_dead_task(timer, now);
1104 read_unlock(&tasklist_lock);
1105 goto out; 1054 goto out;
1106 } 1055 }
1107 spin_lock(&p->sighand->siglock);
1108 cpu_timer_sample_group(timer->it_clock, p, &now); 1056 cpu_timer_sample_group(timer->it_clock, p, &now);
1109 bump_cpu_timer(timer, now); 1057 bump_cpu_timer(timer, now);
1110 /* Leave the tasklist_lock locked for the call below. */ 1058 /* Leave the sighand locked for the call below. */
1111 } 1059 }
1112 1060
1113 /* 1061 /*
1114 * Now re-arm for the new expiry time. 1062 * Now re-arm for the new expiry time.
1115 */ 1063 */
1116 BUG_ON(!irqs_disabled()); 1064 WARN_ON_ONCE(!irqs_disabled());
1117 arm_timer(timer); 1065 arm_timer(timer);
1118 spin_unlock(&p->sighand->siglock); 1066 unlock_task_sighand(p, &flags);
1119 read_unlock(&tasklist_lock);
1120 1067
1121 /* Kick full dynticks CPUs in case they need to tick on the new timer */ 1068 /* Kick full dynticks CPUs in case they need to tick on the new timer */
1122 posix_cpu_timer_kick_nohz(); 1069 posix_cpu_timer_kick_nohz();
1123
1124out: 1070out:
1125 timer->it_overrun_last = timer->it_overrun; 1071 timer->it_overrun_last = timer->it_overrun;
1126 timer->it_overrun = -1; 1072 timer->it_overrun = -1;
@@ -1204,7 +1150,7 @@ void run_posix_cpu_timers(struct task_struct *tsk)
1204 struct k_itimer *timer, *next; 1150 struct k_itimer *timer, *next;
1205 unsigned long flags; 1151 unsigned long flags;
1206 1152
1207 BUG_ON(!irqs_disabled()); 1153 WARN_ON_ONCE(!irqs_disabled());
1208 1154
1209 /* 1155 /*
1210 * The fast path checks that there are no expired thread or thread 1156 * The fast path checks that there are no expired thread or thread
@@ -1271,7 +1217,7 @@ void set_process_cpu_timer(struct task_struct *tsk, unsigned int clock_idx,
1271{ 1217{
1272 unsigned long long now; 1218 unsigned long long now;
1273 1219
1274 BUG_ON(clock_idx == CPUCLOCK_SCHED); 1220 WARN_ON_ONCE(clock_idx == CPUCLOCK_SCHED);
1275 cpu_timer_sample_group(clock_idx, tsk, &now); 1221 cpu_timer_sample_group(clock_idx, tsk, &now);
1276 1222
1277 if (oldval) { 1223 if (oldval) {