aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/timer.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/timer.c')
-rw-r--r--kernel/timer.c110
1 files changed, 73 insertions, 37 deletions
diff --git a/kernel/timer.c b/kernel/timer.c
index ef1c385bc572..b4555568b4e4 100644
--- a/kernel/timer.c
+++ b/kernel/timer.c
@@ -600,11 +600,14 @@ static struct tvec_base *lock_timer_base(struct timer_list *timer,
600 } 600 }
601} 601}
602 602
603int __mod_timer(struct timer_list *timer, unsigned long expires) 603static inline int
604__mod_timer(struct timer_list *timer, unsigned long expires, bool pending_only)
604{ 605{
605 struct tvec_base *base, *new_base; 606 struct tvec_base *base, *new_base;
606 unsigned long flags; 607 unsigned long flags;
607 int ret = 0; 608 int ret;
609
610 ret = 0;
608 611
609 timer_stats_timer_set_start_info(timer); 612 timer_stats_timer_set_start_info(timer);
610 BUG_ON(!timer->function); 613 BUG_ON(!timer->function);
@@ -614,6 +617,9 @@ int __mod_timer(struct timer_list *timer, unsigned long expires)
614 if (timer_pending(timer)) { 617 if (timer_pending(timer)) {
615 detach_timer(timer, 0); 618 detach_timer(timer, 0);
616 ret = 1; 619 ret = 1;
620 } else {
621 if (pending_only)
622 goto out_unlock;
617 } 623 }
618 624
619 debug_timer_activate(timer); 625 debug_timer_activate(timer);
@@ -640,42 +646,28 @@ int __mod_timer(struct timer_list *timer, unsigned long expires)
640 646
641 timer->expires = expires; 647 timer->expires = expires;
642 internal_add_timer(base, timer); 648 internal_add_timer(base, timer);
649
650out_unlock:
643 spin_unlock_irqrestore(&base->lock, flags); 651 spin_unlock_irqrestore(&base->lock, flags);
644 652
645 return ret; 653 return ret;
646} 654}
647 655
648EXPORT_SYMBOL(__mod_timer);
649
650/** 656/**
651 * add_timer_on - start a timer on a particular CPU 657 * mod_timer_pending - modify a pending timer's timeout
652 * @timer: the timer to be added 658 * @timer: the pending timer to be modified
653 * @cpu: the CPU to start it on 659 * @expires: new timeout in jiffies
654 * 660 *
655 * This is not very scalable on SMP. Double adds are not possible. 661 * mod_timer_pending() is the same for pending timers as mod_timer(),
662 * but will not re-activate and modify already deleted timers.
663 *
664 * It is useful for unserialized use of timers.
656 */ 665 */
657void add_timer_on(struct timer_list *timer, int cpu) 666int mod_timer_pending(struct timer_list *timer, unsigned long expires)
658{ 667{
659 struct tvec_base *base = per_cpu(tvec_bases, cpu); 668 return __mod_timer(timer, expires, true);
660 unsigned long flags;
661
662 timer_stats_timer_set_start_info(timer);
663 BUG_ON(timer_pending(timer) || !timer->function);
664 spin_lock_irqsave(&base->lock, flags);
665 timer_set_base(timer, base);
666 debug_timer_activate(timer);
667 internal_add_timer(base, timer);
668 /*
669 * Check whether the other CPU is idle and needs to be
670 * triggered to reevaluate the timer wheel when nohz is
671 * active. We are protected against the other CPU fiddling
672 * with the timer by holding the timer base lock. This also
673 * makes sure that a CPU on the way to idle can not evaluate
674 * the timer wheel.
675 */
676 wake_up_idle_cpu(cpu);
677 spin_unlock_irqrestore(&base->lock, flags);
678} 669}
670EXPORT_SYMBOL(mod_timer_pending);
679 671
680/** 672/**
681 * mod_timer - modify a timer's timeout 673 * mod_timer - modify a timer's timeout
@@ -699,9 +691,6 @@ void add_timer_on(struct timer_list *timer, int cpu)
699 */ 691 */
700int mod_timer(struct timer_list *timer, unsigned long expires) 692int mod_timer(struct timer_list *timer, unsigned long expires)
701{ 693{
702 BUG_ON(!timer->function);
703
704 timer_stats_timer_set_start_info(timer);
705 /* 694 /*
706 * This is a common optimization triggered by the 695 * This is a common optimization triggered by the
707 * networking code - if the timer is re-modified 696 * networking code - if the timer is re-modified
@@ -710,12 +699,62 @@ int mod_timer(struct timer_list *timer, unsigned long expires)
710 if (timer->expires == expires && timer_pending(timer)) 699 if (timer->expires == expires && timer_pending(timer))
711 return 1; 700 return 1;
712 701
713 return __mod_timer(timer, expires); 702 return __mod_timer(timer, expires, false);
714} 703}
715
716EXPORT_SYMBOL(mod_timer); 704EXPORT_SYMBOL(mod_timer);
717 705
718/** 706/**
707 * add_timer - start a timer
708 * @timer: the timer to be added
709 *
710 * The kernel will do a ->function(->data) callback from the
711 * timer interrupt at the ->expires point in the future. The
712 * current time is 'jiffies'.
713 *
714 * The timer's ->expires, ->function (and if the handler uses it, ->data)
715 * fields must be set prior calling this function.
716 *
717 * Timers with an ->expires field in the past will be executed in the next
718 * timer tick.
719 */
720void add_timer(struct timer_list *timer)
721{
722 BUG_ON(timer_pending(timer));
723 mod_timer(timer, timer->expires);
724}
725EXPORT_SYMBOL(add_timer);
726
727/**
728 * add_timer_on - start a timer on a particular CPU
729 * @timer: the timer to be added
730 * @cpu: the CPU to start it on
731 *
732 * This is not very scalable on SMP. Double adds are not possible.
733 */
734void add_timer_on(struct timer_list *timer, int cpu)
735{
736 struct tvec_base *base = per_cpu(tvec_bases, cpu);
737 unsigned long flags;
738
739 timer_stats_timer_set_start_info(timer);
740 BUG_ON(timer_pending(timer) || !timer->function);
741 spin_lock_irqsave(&base->lock, flags);
742 timer_set_base(timer, base);
743 debug_timer_activate(timer);
744 internal_add_timer(base, timer);
745 /*
746 * Check whether the other CPU is idle and needs to be
747 * triggered to reevaluate the timer wheel when nohz is
748 * active. We are protected against the other CPU fiddling
749 * with the timer by holding the timer base lock. This also
750 * makes sure that a CPU on the way to idle can not evaluate
751 * the timer wheel.
752 */
753 wake_up_idle_cpu(cpu);
754 spin_unlock_irqrestore(&base->lock, flags);
755}
756
757/**
719 * del_timer - deactive a timer. 758 * del_timer - deactive a timer.
720 * @timer: the timer to be deactivated 759 * @timer: the timer to be deactivated
721 * 760 *
@@ -744,7 +783,6 @@ int del_timer(struct timer_list *timer)
744 783
745 return ret; 784 return ret;
746} 785}
747
748EXPORT_SYMBOL(del_timer); 786EXPORT_SYMBOL(del_timer);
749 787
750#ifdef CONFIG_SMP 788#ifdef CONFIG_SMP
@@ -778,7 +816,6 @@ out:
778 816
779 return ret; 817 return ret;
780} 818}
781
782EXPORT_SYMBOL(try_to_del_timer_sync); 819EXPORT_SYMBOL(try_to_del_timer_sync);
783 820
784/** 821/**
@@ -816,7 +853,6 @@ int del_timer_sync(struct timer_list *timer)
816 cpu_relax(); 853 cpu_relax();
817 } 854 }
818} 855}
819
820EXPORT_SYMBOL(del_timer_sync); 856EXPORT_SYMBOL(del_timer_sync);
821#endif 857#endif
822 858
@@ -1314,7 +1350,7 @@ signed long __sched schedule_timeout(signed long timeout)
1314 expire = timeout + jiffies; 1350 expire = timeout + jiffies;
1315 1351
1316 setup_timer_on_stack(&timer, process_timeout, (unsigned long)current); 1352 setup_timer_on_stack(&timer, process_timeout, (unsigned long)current);
1317 __mod_timer(&timer, expire); 1353 __mod_timer(&timer, expire, false);
1318 schedule(); 1354 schedule();
1319 del_singleshot_timer_sync(&timer); 1355 del_singleshot_timer_sync(&timer);
1320 1356