diff options
author | Ingo Molnar <mingo@elte.hu> | 2009-02-18 06:23:29 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-02-18 13:26:33 -0500 |
commit | 74019224ac34b044b44a31dd89a54e3477db4896 (patch) | |
tree | bf7b04319c0c4329eb6cef0788737c14d2fa9030 /kernel/timer.c | |
parent | 5955c7a2cfb6a35429adea5dc480002b15ca8cfc (diff) |
timers: add mod_timer_pending()
Impact: new timer API
Based on an idea from Martin Josefsson with the help of
Patrick McHardy and Stephen Hemminger:
introduce the mod_timer_pending() API which is a mod_timer()
offspring that is an invariant on already removed timers.
(regular mod_timer() re-activates non-pending timers.)
This is useful for the networking code in that it can
allow unserialized mod_timer_pending() timer-forwarding
calls, but a single del_timer*() will stop the timer
from being reactivated again.
Also while at it:
- optimize the regular mod_timer() path some more, the
timer-stat and a debug check was needlessly duplicated
in __mod_timer().
- make the exports come straight after the function, as
most other exports in timer.c already did.
- eliminate __mod_timer() as an external API, change the
users to mod_timer().
The regular mod_timer() code path is not impacted
significantly, due to inlining optimizations and due to
the simplifications.
Based-on-patch-from: Stephen Hemminger <shemminger@vyatta.com>
Acked-by: Stephen Hemminger <shemminger@vyatta.com>
Cc: "David S. Miller" <davem@davemloft.net>
Cc: Patrick McHardy <kaber@trash.net>
Cc: netdev@vger.kernel.org
Cc: Oleg Nesterov <oleg@redhat.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/timer.c')
-rw-r--r-- | kernel/timer.c | 110 |
1 files changed, 73 insertions, 37 deletions
diff --git a/kernel/timer.c b/kernel/timer.c index 13dd64fe143d..9b77fc9a9ac8 100644 --- a/kernel/timer.c +++ b/kernel/timer.c | |||
@@ -589,11 +589,14 @@ static struct tvec_base *lock_timer_base(struct timer_list *timer, | |||
589 | } | 589 | } |
590 | } | 590 | } |
591 | 591 | ||
592 | int __mod_timer(struct timer_list *timer, unsigned long expires) | 592 | static inline int |
593 | __mod_timer(struct timer_list *timer, unsigned long expires, bool pending_only) | ||
593 | { | 594 | { |
594 | struct tvec_base *base, *new_base; | 595 | struct tvec_base *base, *new_base; |
595 | unsigned long flags; | 596 | unsigned long flags; |
596 | int ret = 0; | 597 | int ret; |
598 | |||
599 | ret = 0; | ||
597 | 600 | ||
598 | timer_stats_timer_set_start_info(timer); | 601 | timer_stats_timer_set_start_info(timer); |
599 | BUG_ON(!timer->function); | 602 | BUG_ON(!timer->function); |
@@ -603,6 +606,9 @@ int __mod_timer(struct timer_list *timer, unsigned long expires) | |||
603 | if (timer_pending(timer)) { | 606 | if (timer_pending(timer)) { |
604 | detach_timer(timer, 0); | 607 | detach_timer(timer, 0); |
605 | ret = 1; | 608 | ret = 1; |
609 | } else { | ||
610 | if (pending_only) | ||
611 | goto out_unlock; | ||
606 | } | 612 | } |
607 | 613 | ||
608 | debug_timer_activate(timer); | 614 | debug_timer_activate(timer); |
@@ -629,42 +635,28 @@ int __mod_timer(struct timer_list *timer, unsigned long expires) | |||
629 | 635 | ||
630 | timer->expires = expires; | 636 | timer->expires = expires; |
631 | internal_add_timer(base, timer); | 637 | internal_add_timer(base, timer); |
638 | |||
639 | out_unlock: | ||
632 | spin_unlock_irqrestore(&base->lock, flags); | 640 | spin_unlock_irqrestore(&base->lock, flags); |
633 | 641 | ||
634 | return ret; | 642 | return ret; |
635 | } | 643 | } |
636 | 644 | ||
637 | EXPORT_SYMBOL(__mod_timer); | ||
638 | |||
639 | /** | 645 | /** |
640 | * add_timer_on - start a timer on a particular CPU | 646 | * mod_timer_pending - modify a pending timer's timeout |
641 | * @timer: the timer to be added | 647 | * @timer: the pending timer to be modified |
642 | * @cpu: the CPU to start it on | 648 | * @expires: new timeout in jiffies |
643 | * | 649 | * |
644 | * This is not very scalable on SMP. Double adds are not possible. | 650 | * mod_timer_pending() is the same for pending timers as mod_timer(), |
651 | * but will not re-activate and modify already deleted timers. | ||
652 | * | ||
653 | * It is useful for unserialized use of timers. | ||
645 | */ | 654 | */ |
646 | void add_timer_on(struct timer_list *timer, int cpu) | 655 | int mod_timer_pending(struct timer_list *timer, unsigned long expires) |
647 | { | 656 | { |
648 | struct tvec_base *base = per_cpu(tvec_bases, cpu); | 657 | return __mod_timer(timer, expires, true); |
649 | unsigned long flags; | ||
650 | |||
651 | timer_stats_timer_set_start_info(timer); | ||
652 | BUG_ON(timer_pending(timer) || !timer->function); | ||
653 | spin_lock_irqsave(&base->lock, flags); | ||
654 | timer_set_base(timer, base); | ||
655 | debug_timer_activate(timer); | ||
656 | internal_add_timer(base, timer); | ||
657 | /* | ||
658 | * Check whether the other CPU is idle and needs to be | ||
659 | * triggered to reevaluate the timer wheel when nohz is | ||
660 | * active. We are protected against the other CPU fiddling | ||
661 | * with the timer by holding the timer base lock. This also | ||
662 | * makes sure that a CPU on the way to idle can not evaluate | ||
663 | * the timer wheel. | ||
664 | */ | ||
665 | wake_up_idle_cpu(cpu); | ||
666 | spin_unlock_irqrestore(&base->lock, flags); | ||
667 | } | 658 | } |
659 | EXPORT_SYMBOL(mod_timer_pending); | ||
668 | 660 | ||
669 | /** | 661 | /** |
670 | * mod_timer - modify a timer's timeout | 662 | * mod_timer - modify a timer's timeout |
@@ -688,9 +680,6 @@ void add_timer_on(struct timer_list *timer, int cpu) | |||
688 | */ | 680 | */ |
689 | int mod_timer(struct timer_list *timer, unsigned long expires) | 681 | int mod_timer(struct timer_list *timer, unsigned long expires) |
690 | { | 682 | { |
691 | BUG_ON(!timer->function); | ||
692 | |||
693 | timer_stats_timer_set_start_info(timer); | ||
694 | /* | 683 | /* |
695 | * This is a common optimization triggered by the | 684 | * This is a common optimization triggered by the |
696 | * networking code - if the timer is re-modified | 685 | * networking code - if the timer is re-modified |
@@ -699,12 +688,62 @@ int mod_timer(struct timer_list *timer, unsigned long expires) | |||
699 | if (timer->expires == expires && timer_pending(timer)) | 688 | if (timer->expires == expires && timer_pending(timer)) |
700 | return 1; | 689 | return 1; |
701 | 690 | ||
702 | return __mod_timer(timer, expires); | 691 | return __mod_timer(timer, expires, false); |
703 | } | 692 | } |
704 | |||
705 | EXPORT_SYMBOL(mod_timer); | 693 | EXPORT_SYMBOL(mod_timer); |
706 | 694 | ||
707 | /** | 695 | /** |
696 | * add_timer - start a timer | ||
697 | * @timer: the timer to be added | ||
698 | * | ||
699 | * The kernel will do a ->function(->data) callback from the | ||
700 | * timer interrupt at the ->expires point in the future. The | ||
701 | * current time is 'jiffies'. | ||
702 | * | ||
703 | * The timer's ->expires, ->function (and if the handler uses it, ->data) | ||
704 | * fields must be set prior calling this function. | ||
705 | * | ||
706 | * Timers with an ->expires field in the past will be executed in the next | ||
707 | * timer tick. | ||
708 | */ | ||
709 | void add_timer(struct timer_list *timer) | ||
710 | { | ||
711 | BUG_ON(timer_pending(timer)); | ||
712 | mod_timer(timer, timer->expires); | ||
713 | } | ||
714 | EXPORT_SYMBOL(add_timer); | ||
715 | |||
716 | /** | ||
717 | * add_timer_on - start a timer on a particular CPU | ||
718 | * @timer: the timer to be added | ||
719 | * @cpu: the CPU to start it on | ||
720 | * | ||
721 | * This is not very scalable on SMP. Double adds are not possible. | ||
722 | */ | ||
723 | void add_timer_on(struct timer_list *timer, int cpu) | ||
724 | { | ||
725 | struct tvec_base *base = per_cpu(tvec_bases, cpu); | ||
726 | unsigned long flags; | ||
727 | |||
728 | timer_stats_timer_set_start_info(timer); | ||
729 | BUG_ON(timer_pending(timer) || !timer->function); | ||
730 | spin_lock_irqsave(&base->lock, flags); | ||
731 | timer_set_base(timer, base); | ||
732 | debug_timer_activate(timer); | ||
733 | internal_add_timer(base, timer); | ||
734 | /* | ||
735 | * Check whether the other CPU is idle and needs to be | ||
736 | * triggered to reevaluate the timer wheel when nohz is | ||
737 | * active. We are protected against the other CPU fiddling | ||
738 | * with the timer by holding the timer base lock. This also | ||
739 | * makes sure that a CPU on the way to idle can not evaluate | ||
740 | * the timer wheel. | ||
741 | */ | ||
742 | wake_up_idle_cpu(cpu); | ||
743 | spin_unlock_irqrestore(&base->lock, flags); | ||
744 | } | ||
745 | |||
746 | /** | ||
708 | * del_timer - deactive a timer. | 747 | * del_timer - deactive a timer. |
709 | * @timer: the timer to be deactivated | 748 | * @timer: the timer to be deactivated |
710 | * | 749 | * |
@@ -733,7 +772,6 @@ int del_timer(struct timer_list *timer) | |||
733 | 772 | ||
734 | return ret; | 773 | return ret; |
735 | } | 774 | } |
736 | |||
737 | EXPORT_SYMBOL(del_timer); | 775 | EXPORT_SYMBOL(del_timer); |
738 | 776 | ||
739 | #ifdef CONFIG_SMP | 777 | #ifdef CONFIG_SMP |
@@ -767,7 +805,6 @@ out: | |||
767 | 805 | ||
768 | return ret; | 806 | return ret; |
769 | } | 807 | } |
770 | |||
771 | EXPORT_SYMBOL(try_to_del_timer_sync); | 808 | EXPORT_SYMBOL(try_to_del_timer_sync); |
772 | 809 | ||
773 | /** | 810 | /** |
@@ -796,7 +833,6 @@ int del_timer_sync(struct timer_list *timer) | |||
796 | cpu_relax(); | 833 | cpu_relax(); |
797 | } | 834 | } |
798 | } | 835 | } |
799 | |||
800 | EXPORT_SYMBOL(del_timer_sync); | 836 | EXPORT_SYMBOL(del_timer_sync); |
801 | #endif | 837 | #endif |
802 | 838 | ||
@@ -1268,7 +1304,7 @@ signed long __sched schedule_timeout(signed long timeout) | |||
1268 | expire = timeout + jiffies; | 1304 | expire = timeout + jiffies; |
1269 | 1305 | ||
1270 | setup_timer_on_stack(&timer, process_timeout, (unsigned long)current); | 1306 | setup_timer_on_stack(&timer, process_timeout, (unsigned long)current); |
1271 | __mod_timer(&timer, expire); | 1307 | __mod_timer(&timer, expire, false); |
1272 | schedule(); | 1308 | schedule(); |
1273 | del_singleshot_timer_sync(&timer); | 1309 | del_singleshot_timer_sync(&timer); |
1274 | 1310 | ||