diff options
-rw-r--r-- | arch/powerpc/platforms/cell/spufs/sched.c | 2 | ||||
-rw-r--r-- | drivers/infiniband/hw/ipath/ipath_driver.c | 6 | ||||
-rw-r--r-- | include/linux/timer.h | 22 | ||||
-rw-r--r-- | kernel/relay.c | 2 | ||||
-rw-r--r-- | kernel/timer.c | 110 |
5 files changed, 80 insertions, 62 deletions
diff --git a/arch/powerpc/platforms/cell/spufs/sched.c b/arch/powerpc/platforms/cell/spufs/sched.c index 6a0ad196aeb3..f085369301b1 100644 --- a/arch/powerpc/platforms/cell/spufs/sched.c +++ b/arch/powerpc/platforms/cell/spufs/sched.c | |||
@@ -508,7 +508,7 @@ static void __spu_add_to_rq(struct spu_context *ctx) | |||
508 | list_add_tail(&ctx->rq, &spu_prio->runq[ctx->prio]); | 508 | list_add_tail(&ctx->rq, &spu_prio->runq[ctx->prio]); |
509 | set_bit(ctx->prio, spu_prio->bitmap); | 509 | set_bit(ctx->prio, spu_prio->bitmap); |
510 | if (!spu_prio->nr_waiting++) | 510 | if (!spu_prio->nr_waiting++) |
511 | __mod_timer(&spusched_timer, jiffies + SPUSCHED_TICK); | 511 | mod_timer(&spusched_timer, jiffies + SPUSCHED_TICK); |
512 | } | 512 | } |
513 | } | 513 | } |
514 | 514 | ||
diff --git a/drivers/infiniband/hw/ipath/ipath_driver.c b/drivers/infiniband/hw/ipath/ipath_driver.c index 69c0ce321b4e..cb9daa6ac029 100644 --- a/drivers/infiniband/hw/ipath/ipath_driver.c +++ b/drivers/infiniband/hw/ipath/ipath_driver.c | |||
@@ -2715,7 +2715,7 @@ static void ipath_hol_signal_up(struct ipath_devdata *dd) | |||
2715 | * to prevent HoL blocking, then start the HoL timer that | 2715 | * to prevent HoL blocking, then start the HoL timer that |
2716 | * periodically continues, then stop procs, so they can detect | 2716 | * periodically continues, then stop procs, so they can detect |
2717 | * link down if they want, and do something about it. | 2717 | * link down if they want, and do something about it. |
2718 | * Timer may already be running, so use __mod_timer, not add_timer. | 2718 | * Timer may already be running, so use mod_timer, not add_timer. |
2719 | */ | 2719 | */ |
2720 | void ipath_hol_down(struct ipath_devdata *dd) | 2720 | void ipath_hol_down(struct ipath_devdata *dd) |
2721 | { | 2721 | { |
@@ -2724,7 +2724,7 @@ void ipath_hol_down(struct ipath_devdata *dd) | |||
2724 | dd->ipath_hol_next = IPATH_HOL_DOWNCONT; | 2724 | dd->ipath_hol_next = IPATH_HOL_DOWNCONT; |
2725 | dd->ipath_hol_timer.expires = jiffies + | 2725 | dd->ipath_hol_timer.expires = jiffies + |
2726 | msecs_to_jiffies(ipath_hol_timeout_ms); | 2726 | msecs_to_jiffies(ipath_hol_timeout_ms); |
2727 | __mod_timer(&dd->ipath_hol_timer, dd->ipath_hol_timer.expires); | 2727 | mod_timer(&dd->ipath_hol_timer, dd->ipath_hol_timer.expires); |
2728 | } | 2728 | } |
2729 | 2729 | ||
2730 | /* | 2730 | /* |
@@ -2763,7 +2763,7 @@ void ipath_hol_event(unsigned long opaque) | |||
2763 | else { | 2763 | else { |
2764 | dd->ipath_hol_timer.expires = jiffies + | 2764 | dd->ipath_hol_timer.expires = jiffies + |
2765 | msecs_to_jiffies(ipath_hol_timeout_ms); | 2765 | msecs_to_jiffies(ipath_hol_timeout_ms); |
2766 | __mod_timer(&dd->ipath_hol_timer, | 2766 | mod_timer(&dd->ipath_hol_timer, |
2767 | dd->ipath_hol_timer.expires); | 2767 | dd->ipath_hol_timer.expires); |
2768 | } | 2768 | } |
2769 | } | 2769 | } |
diff --git a/include/linux/timer.h b/include/linux/timer.h index daf9685b861c..e2d662e3416e 100644 --- a/include/linux/timer.h +++ b/include/linux/timer.h | |||
@@ -86,8 +86,8 @@ static inline int timer_pending(const struct timer_list * timer) | |||
86 | 86 | ||
87 | extern void add_timer_on(struct timer_list *timer, int cpu); | 87 | extern void add_timer_on(struct timer_list *timer, int cpu); |
88 | extern int del_timer(struct timer_list * timer); | 88 | extern int del_timer(struct timer_list * timer); |
89 | extern int __mod_timer(struct timer_list *timer, unsigned long expires); | ||
90 | extern int mod_timer(struct timer_list *timer, unsigned long expires); | 89 | extern int mod_timer(struct timer_list *timer, unsigned long expires); |
90 | extern int mod_timer_pending(struct timer_list *timer, unsigned long expires); | ||
91 | 91 | ||
92 | /* | 92 | /* |
93 | * The jiffies value which is added to now, when there is no timer | 93 | * The jiffies value which is added to now, when there is no timer |
@@ -146,25 +146,7 @@ static inline void timer_stats_timer_clear_start_info(struct timer_list *timer) | |||
146 | } | 146 | } |
147 | #endif | 147 | #endif |
148 | 148 | ||
149 | /** | 149 | extern void add_timer(struct timer_list *timer); |
150 | * add_timer - start a timer | ||
151 | * @timer: the timer to be added | ||
152 | * | ||
153 | * The kernel will do a ->function(->data) callback from the | ||
154 | * timer interrupt at the ->expires point in the future. The | ||
155 | * current time is 'jiffies'. | ||
156 | * | ||
157 | * The timer's ->expires, ->function (and if the handler uses it, ->data) | ||
158 | * fields must be set prior calling this function. | ||
159 | * | ||
160 | * Timers with an ->expires field in the past will be executed in the next | ||
161 | * timer tick. | ||
162 | */ | ||
163 | static inline void add_timer(struct timer_list *timer) | ||
164 | { | ||
165 | BUG_ON(timer_pending(timer)); | ||
166 | __mod_timer(timer, timer->expires); | ||
167 | } | ||
168 | 150 | ||
169 | #ifdef CONFIG_SMP | 151 | #ifdef CONFIG_SMP |
170 | extern int try_to_del_timer_sync(struct timer_list *timer); | 152 | extern int try_to_del_timer_sync(struct timer_list *timer); |
diff --git a/kernel/relay.c b/kernel/relay.c index 9d79b7854fa6..8f2179c8056f 100644 --- a/kernel/relay.c +++ b/kernel/relay.c | |||
@@ -750,7 +750,7 @@ size_t relay_switch_subbuf(struct rchan_buf *buf, size_t length) | |||
750 | * from the scheduler (trying to re-grab | 750 | * from the scheduler (trying to re-grab |
751 | * rq->lock), so defer it. | 751 | * rq->lock), so defer it. |
752 | */ | 752 | */ |
753 | __mod_timer(&buf->timer, jiffies + 1); | 753 | mod_timer(&buf->timer, jiffies + 1); |
754 | } | 754 | } |
755 | 755 | ||
756 | old = buf->data; | 756 | old = buf->data; |
diff --git a/kernel/timer.c b/kernel/timer.c index 13dd64fe143d..9b77fc9a9ac8 100644 --- a/kernel/timer.c +++ b/kernel/timer.c | |||
@@ -589,11 +589,14 @@ static struct tvec_base *lock_timer_base(struct timer_list *timer, | |||
589 | } | 589 | } |
590 | } | 590 | } |
591 | 591 | ||
592 | int __mod_timer(struct timer_list *timer, unsigned long expires) | 592 | static inline int |
593 | __mod_timer(struct timer_list *timer, unsigned long expires, bool pending_only) | ||
593 | { | 594 | { |
594 | struct tvec_base *base, *new_base; | 595 | struct tvec_base *base, *new_base; |
595 | unsigned long flags; | 596 | unsigned long flags; |
596 | int ret = 0; | 597 | int ret; |
598 | |||
599 | ret = 0; | ||
597 | 600 | ||
598 | timer_stats_timer_set_start_info(timer); | 601 | timer_stats_timer_set_start_info(timer); |
599 | BUG_ON(!timer->function); | 602 | BUG_ON(!timer->function); |
@@ -603,6 +606,9 @@ int __mod_timer(struct timer_list *timer, unsigned long expires) | |||
603 | if (timer_pending(timer)) { | 606 | if (timer_pending(timer)) { |
604 | detach_timer(timer, 0); | 607 | detach_timer(timer, 0); |
605 | ret = 1; | 608 | ret = 1; |
609 | } else { | ||
610 | if (pending_only) | ||
611 | goto out_unlock; | ||
606 | } | 612 | } |
607 | 613 | ||
608 | debug_timer_activate(timer); | 614 | debug_timer_activate(timer); |
@@ -629,42 +635,28 @@ int __mod_timer(struct timer_list *timer, unsigned long expires) | |||
629 | 635 | ||
630 | timer->expires = expires; | 636 | timer->expires = expires; |
631 | internal_add_timer(base, timer); | 637 | internal_add_timer(base, timer); |
638 | |||
639 | out_unlock: | ||
632 | spin_unlock_irqrestore(&base->lock, flags); | 640 | spin_unlock_irqrestore(&base->lock, flags); |
633 | 641 | ||
634 | return ret; | 642 | return ret; |
635 | } | 643 | } |
636 | 644 | ||
637 | EXPORT_SYMBOL(__mod_timer); | ||
638 | |||
639 | /** | 645 | /** |
640 | * add_timer_on - start a timer on a particular CPU | 646 | * mod_timer_pending - modify a pending timer's timeout |
641 | * @timer: the timer to be added | 647 | * @timer: the pending timer to be modified |
642 | * @cpu: the CPU to start it on | 648 | * @expires: new timeout in jiffies |
643 | * | 649 | * |
644 | * This is not very scalable on SMP. Double adds are not possible. | 650 | * mod_timer_pending() is the same for pending timers as mod_timer(), |
651 | * but will not re-activate and modify already deleted timers. | ||
652 | * | ||
653 | * It is useful for unserialized use of timers. | ||
645 | */ | 654 | */ |
646 | void add_timer_on(struct timer_list *timer, int cpu) | 655 | int mod_timer_pending(struct timer_list *timer, unsigned long expires) |
647 | { | 656 | { |
648 | struct tvec_base *base = per_cpu(tvec_bases, cpu); | 657 | return __mod_timer(timer, expires, true); |
649 | unsigned long flags; | ||
650 | |||
651 | timer_stats_timer_set_start_info(timer); | ||
652 | BUG_ON(timer_pending(timer) || !timer->function); | ||
653 | spin_lock_irqsave(&base->lock, flags); | ||
654 | timer_set_base(timer, base); | ||
655 | debug_timer_activate(timer); | ||
656 | internal_add_timer(base, timer); | ||
657 | /* | ||
658 | * Check whether the other CPU is idle and needs to be | ||
659 | * triggered to reevaluate the timer wheel when nohz is | ||
660 | * active. We are protected against the other CPU fiddling | ||
661 | * with the timer by holding the timer base lock. This also | ||
662 | * makes sure that a CPU on the way to idle can not evaluate | ||
663 | * the timer wheel. | ||
664 | */ | ||
665 | wake_up_idle_cpu(cpu); | ||
666 | spin_unlock_irqrestore(&base->lock, flags); | ||
667 | } | 658 | } |
659 | EXPORT_SYMBOL(mod_timer_pending); | ||
668 | 660 | ||
669 | /** | 661 | /** |
670 | * mod_timer - modify a timer's timeout | 662 | * mod_timer - modify a timer's timeout |
@@ -688,9 +680,6 @@ void add_timer_on(struct timer_list *timer, int cpu) | |||
688 | */ | 680 | */ |
689 | int mod_timer(struct timer_list *timer, unsigned long expires) | 681 | int mod_timer(struct timer_list *timer, unsigned long expires) |
690 | { | 682 | { |
691 | BUG_ON(!timer->function); | ||
692 | |||
693 | timer_stats_timer_set_start_info(timer); | ||
694 | /* | 683 | /* |
695 | * This is a common optimization triggered by the | 684 | * This is a common optimization triggered by the |
696 | * networking code - if the timer is re-modified | 685 | * networking code - if the timer is re-modified |
@@ -699,12 +688,62 @@ int mod_timer(struct timer_list *timer, unsigned long expires) | |||
699 | if (timer->expires == expires && timer_pending(timer)) | 688 | if (timer->expires == expires && timer_pending(timer)) |
700 | return 1; | 689 | return 1; |
701 | 690 | ||
702 | return __mod_timer(timer, expires); | 691 | return __mod_timer(timer, expires, false); |
703 | } | 692 | } |
704 | |||
705 | EXPORT_SYMBOL(mod_timer); | 693 | EXPORT_SYMBOL(mod_timer); |
706 | 694 | ||
707 | /** | 695 | /** |
696 | * add_timer - start a timer | ||
697 | * @timer: the timer to be added | ||
698 | * | ||
699 | * The kernel will do a ->function(->data) callback from the | ||
700 | * timer interrupt at the ->expires point in the future. The | ||
701 | * current time is 'jiffies'. | ||
702 | * | ||
703 | * The timer's ->expires, ->function (and if the handler uses it, ->data) | ||
704 | * fields must be set prior calling this function. | ||
705 | * | ||
706 | * Timers with an ->expires field in the past will be executed in the next | ||
707 | * timer tick. | ||
708 | */ | ||
709 | void add_timer(struct timer_list *timer) | ||
710 | { | ||
711 | BUG_ON(timer_pending(timer)); | ||
712 | mod_timer(timer, timer->expires); | ||
713 | } | ||
714 | EXPORT_SYMBOL(add_timer); | ||
715 | |||
716 | /** | ||
717 | * add_timer_on - start a timer on a particular CPU | ||
718 | * @timer: the timer to be added | ||
719 | * @cpu: the CPU to start it on | ||
720 | * | ||
721 | * This is not very scalable on SMP. Double adds are not possible. | ||
722 | */ | ||
723 | void add_timer_on(struct timer_list *timer, int cpu) | ||
724 | { | ||
725 | struct tvec_base *base = per_cpu(tvec_bases, cpu); | ||
726 | unsigned long flags; | ||
727 | |||
728 | timer_stats_timer_set_start_info(timer); | ||
729 | BUG_ON(timer_pending(timer) || !timer->function); | ||
730 | spin_lock_irqsave(&base->lock, flags); | ||
731 | timer_set_base(timer, base); | ||
732 | debug_timer_activate(timer); | ||
733 | internal_add_timer(base, timer); | ||
734 | /* | ||
735 | * Check whether the other CPU is idle and needs to be | ||
736 | * triggered to reevaluate the timer wheel when nohz is | ||
737 | * active. We are protected against the other CPU fiddling | ||
738 | * with the timer by holding the timer base lock. This also | ||
739 | * makes sure that a CPU on the way to idle can not evaluate | ||
740 | * the timer wheel. | ||
741 | */ | ||
742 | wake_up_idle_cpu(cpu); | ||
743 | spin_unlock_irqrestore(&base->lock, flags); | ||
744 | } | ||
745 | |||
746 | /** | ||
708 | * del_timer - deactive a timer. | 747 | * del_timer - deactive a timer. |
709 | * @timer: the timer to be deactivated | 748 | * @timer: the timer to be deactivated |
710 | * | 749 | * |
@@ -733,7 +772,6 @@ int del_timer(struct timer_list *timer) | |||
733 | 772 | ||
734 | return ret; | 773 | return ret; |
735 | } | 774 | } |
736 | |||
737 | EXPORT_SYMBOL(del_timer); | 775 | EXPORT_SYMBOL(del_timer); |
738 | 776 | ||
739 | #ifdef CONFIG_SMP | 777 | #ifdef CONFIG_SMP |
@@ -767,7 +805,6 @@ out: | |||
767 | 805 | ||
768 | return ret; | 806 | return ret; |
769 | } | 807 | } |
770 | |||
771 | EXPORT_SYMBOL(try_to_del_timer_sync); | 808 | EXPORT_SYMBOL(try_to_del_timer_sync); |
772 | 809 | ||
773 | /** | 810 | /** |
@@ -796,7 +833,6 @@ int del_timer_sync(struct timer_list *timer) | |||
796 | cpu_relax(); | 833 | cpu_relax(); |
797 | } | 834 | } |
798 | } | 835 | } |
799 | |||
800 | EXPORT_SYMBOL(del_timer_sync); | 836 | EXPORT_SYMBOL(del_timer_sync); |
801 | #endif | 837 | #endif |
802 | 838 | ||
@@ -1268,7 +1304,7 @@ signed long __sched schedule_timeout(signed long timeout) | |||
1268 | expire = timeout + jiffies; | 1304 | expire = timeout + jiffies; |
1269 | 1305 | ||
1270 | setup_timer_on_stack(&timer, process_timeout, (unsigned long)current); | 1306 | setup_timer_on_stack(&timer, process_timeout, (unsigned long)current); |
1271 | __mod_timer(&timer, expire); | 1307 | __mod_timer(&timer, expire, false); |
1272 | schedule(); | 1308 | schedule(); |
1273 | del_singleshot_timer_sync(&timer); | 1309 | del_singleshot_timer_sync(&timer); |
1274 | 1310 | ||