diff options
Diffstat (limited to 'kernel/timer.c')
-rw-r--r-- | kernel/timer.c | 178 |
1 files changed, 130 insertions, 48 deletions
diff --git a/kernel/timer.c b/kernel/timer.c index 13dd64fe143d..b4555568b4e4 100644 --- a/kernel/timer.c +++ b/kernel/timer.c | |||
@@ -491,14 +491,18 @@ static inline void debug_timer_free(struct timer_list *timer) | |||
491 | debug_object_free(timer, &timer_debug_descr); | 491 | debug_object_free(timer, &timer_debug_descr); |
492 | } | 492 | } |
493 | 493 | ||
494 | static void __init_timer(struct timer_list *timer); | 494 | static void __init_timer(struct timer_list *timer, |
495 | const char *name, | ||
496 | struct lock_class_key *key); | ||
495 | 497 | ||
496 | void init_timer_on_stack(struct timer_list *timer) | 498 | void init_timer_on_stack_key(struct timer_list *timer, |
499 | const char *name, | ||
500 | struct lock_class_key *key) | ||
497 | { | 501 | { |
498 | debug_object_init_on_stack(timer, &timer_debug_descr); | 502 | debug_object_init_on_stack(timer, &timer_debug_descr); |
499 | __init_timer(timer); | 503 | __init_timer(timer, name, key); |
500 | } | 504 | } |
501 | EXPORT_SYMBOL_GPL(init_timer_on_stack); | 505 | EXPORT_SYMBOL_GPL(init_timer_on_stack_key); |
502 | 506 | ||
503 | void destroy_timer_on_stack(struct timer_list *timer) | 507 | void destroy_timer_on_stack(struct timer_list *timer) |
504 | { | 508 | { |
@@ -512,7 +516,9 @@ static inline void debug_timer_activate(struct timer_list *timer) { } | |||
512 | static inline void debug_timer_deactivate(struct timer_list *timer) { } | 516 | static inline void debug_timer_deactivate(struct timer_list *timer) { } |
513 | #endif | 517 | #endif |
514 | 518 | ||
515 | static void __init_timer(struct timer_list *timer) | 519 | static void __init_timer(struct timer_list *timer, |
520 | const char *name, | ||
521 | struct lock_class_key *key) | ||
516 | { | 522 | { |
517 | timer->entry.next = NULL; | 523 | timer->entry.next = NULL; |
518 | timer->base = __raw_get_cpu_var(tvec_bases); | 524 | timer->base = __raw_get_cpu_var(tvec_bases); |
@@ -521,6 +527,7 @@ static void __init_timer(struct timer_list *timer) | |||
521 | timer->start_pid = -1; | 527 | timer->start_pid = -1; |
522 | memset(timer->start_comm, 0, TASK_COMM_LEN); | 528 | memset(timer->start_comm, 0, TASK_COMM_LEN); |
523 | #endif | 529 | #endif |
530 | lockdep_init_map(&timer->lockdep_map, name, key, 0); | ||
524 | } | 531 | } |
525 | 532 | ||
526 | /** | 533 | /** |
@@ -530,19 +537,23 @@ static void __init_timer(struct timer_list *timer) | |||
530 | * init_timer() must be done to a timer prior calling *any* of the | 537 | * init_timer() must be done to a timer prior calling *any* of the |
531 | * other timer functions. | 538 | * other timer functions. |
532 | */ | 539 | */ |
533 | void init_timer(struct timer_list *timer) | 540 | void init_timer_key(struct timer_list *timer, |
541 | const char *name, | ||
542 | struct lock_class_key *key) | ||
534 | { | 543 | { |
535 | debug_timer_init(timer); | 544 | debug_timer_init(timer); |
536 | __init_timer(timer); | 545 | __init_timer(timer, name, key); |
537 | } | 546 | } |
538 | EXPORT_SYMBOL(init_timer); | 547 | EXPORT_SYMBOL(init_timer_key); |
539 | 548 | ||
540 | void init_timer_deferrable(struct timer_list *timer) | 549 | void init_timer_deferrable_key(struct timer_list *timer, |
550 | const char *name, | ||
551 | struct lock_class_key *key) | ||
541 | { | 552 | { |
542 | init_timer(timer); | 553 | init_timer_key(timer, name, key); |
543 | timer_set_deferrable(timer); | 554 | timer_set_deferrable(timer); |
544 | } | 555 | } |
545 | EXPORT_SYMBOL(init_timer_deferrable); | 556 | EXPORT_SYMBOL(init_timer_deferrable_key); |
546 | 557 | ||
547 | static inline void detach_timer(struct timer_list *timer, | 558 | static inline void detach_timer(struct timer_list *timer, |
548 | int clear_pending) | 559 | int clear_pending) |
@@ -589,11 +600,14 @@ static struct tvec_base *lock_timer_base(struct timer_list *timer, | |||
589 | } | 600 | } |
590 | } | 601 | } |
591 | 602 | ||
592 | int __mod_timer(struct timer_list *timer, unsigned long expires) | 603 | static inline int |
604 | __mod_timer(struct timer_list *timer, unsigned long expires, bool pending_only) | ||
593 | { | 605 | { |
594 | struct tvec_base *base, *new_base; | 606 | struct tvec_base *base, *new_base; |
595 | unsigned long flags; | 607 | unsigned long flags; |
596 | int ret = 0; | 608 | int ret; |
609 | |||
610 | ret = 0; | ||
597 | 611 | ||
598 | timer_stats_timer_set_start_info(timer); | 612 | timer_stats_timer_set_start_info(timer); |
599 | BUG_ON(!timer->function); | 613 | BUG_ON(!timer->function); |
@@ -603,6 +617,9 @@ int __mod_timer(struct timer_list *timer, unsigned long expires) | |||
603 | if (timer_pending(timer)) { | 617 | if (timer_pending(timer)) { |
604 | detach_timer(timer, 0); | 618 | detach_timer(timer, 0); |
605 | ret = 1; | 619 | ret = 1; |
620 | } else { | ||
621 | if (pending_only) | ||
622 | goto out_unlock; | ||
606 | } | 623 | } |
607 | 624 | ||
608 | debug_timer_activate(timer); | 625 | debug_timer_activate(timer); |
@@ -629,42 +646,28 @@ int __mod_timer(struct timer_list *timer, unsigned long expires) | |||
629 | 646 | ||
630 | timer->expires = expires; | 647 | timer->expires = expires; |
631 | internal_add_timer(base, timer); | 648 | internal_add_timer(base, timer); |
649 | |||
650 | out_unlock: | ||
632 | spin_unlock_irqrestore(&base->lock, flags); | 651 | spin_unlock_irqrestore(&base->lock, flags); |
633 | 652 | ||
634 | return ret; | 653 | return ret; |
635 | } | 654 | } |
636 | 655 | ||
637 | EXPORT_SYMBOL(__mod_timer); | ||
638 | |||
639 | /** | 656 | /** |
640 | * add_timer_on - start a timer on a particular CPU | 657 | * mod_timer_pending - modify a pending timer's timeout |
641 | * @timer: the timer to be added | 658 | * @timer: the pending timer to be modified |
642 | * @cpu: the CPU to start it on | 659 | * @expires: new timeout in jiffies |
643 | * | 660 | * |
644 | * This is not very scalable on SMP. Double adds are not possible. | 661 | * mod_timer_pending() is the same for pending timers as mod_timer(), |
662 | * but will not re-activate and modify already deleted timers. | ||
663 | * | ||
664 | * It is useful for unserialized use of timers. | ||
645 | */ | 665 | */ |
646 | void add_timer_on(struct timer_list *timer, int cpu) | 666 | int mod_timer_pending(struct timer_list *timer, unsigned long expires) |
647 | { | 667 | { |
648 | struct tvec_base *base = per_cpu(tvec_bases, cpu); | 668 | return __mod_timer(timer, expires, true); |
649 | unsigned long flags; | ||
650 | |||
651 | timer_stats_timer_set_start_info(timer); | ||
652 | BUG_ON(timer_pending(timer) || !timer->function); | ||
653 | spin_lock_irqsave(&base->lock, flags); | ||
654 | timer_set_base(timer, base); | ||
655 | debug_timer_activate(timer); | ||
656 | internal_add_timer(base, timer); | ||
657 | /* | ||
658 | * Check whether the other CPU is idle and needs to be | ||
659 | * triggered to reevaluate the timer wheel when nohz is | ||
660 | * active. We are protected against the other CPU fiddling | ||
661 | * with the timer by holding the timer base lock. This also | ||
662 | * makes sure that a CPU on the way to idle can not evaluate | ||
663 | * the timer wheel. | ||
664 | */ | ||
665 | wake_up_idle_cpu(cpu); | ||
666 | spin_unlock_irqrestore(&base->lock, flags); | ||
667 | } | 669 | } |
670 | EXPORT_SYMBOL(mod_timer_pending); | ||
668 | 671 | ||
669 | /** | 672 | /** |
670 | * mod_timer - modify a timer's timeout | 673 | * mod_timer - modify a timer's timeout |
@@ -688,9 +691,6 @@ void add_timer_on(struct timer_list *timer, int cpu) | |||
688 | */ | 691 | */ |
689 | int mod_timer(struct timer_list *timer, unsigned long expires) | 692 | int mod_timer(struct timer_list *timer, unsigned long expires) |
690 | { | 693 | { |
691 | BUG_ON(!timer->function); | ||
692 | |||
693 | timer_stats_timer_set_start_info(timer); | ||
694 | /* | 694 | /* |
695 | * This is a common optimization triggered by the | 695 | * This is a common optimization triggered by the |
696 | * networking code - if the timer is re-modified | 696 | * networking code - if the timer is re-modified |
@@ -699,12 +699,62 @@ int mod_timer(struct timer_list *timer, unsigned long expires) | |||
699 | if (timer->expires == expires && timer_pending(timer)) | 699 | if (timer->expires == expires && timer_pending(timer)) |
700 | return 1; | 700 | return 1; |
701 | 701 | ||
702 | return __mod_timer(timer, expires); | 702 | return __mod_timer(timer, expires, false); |
703 | } | 703 | } |
704 | |||
705 | EXPORT_SYMBOL(mod_timer); | 704 | EXPORT_SYMBOL(mod_timer); |
706 | 705 | ||
707 | /** | 706 | /** |
707 | * add_timer - start a timer | ||
708 | * @timer: the timer to be added | ||
709 | * | ||
710 | * The kernel will do a ->function(->data) callback from the | ||
711 | * timer interrupt at the ->expires point in the future. The | ||
712 | * current time is 'jiffies'. | ||
713 | * | ||
714 | * The timer's ->expires, ->function (and if the handler uses it, ->data) | ||
715 | * fields must be set prior calling this function. | ||
716 | * | ||
717 | * Timers with an ->expires field in the past will be executed in the next | ||
718 | * timer tick. | ||
719 | */ | ||
720 | void add_timer(struct timer_list *timer) | ||
721 | { | ||
722 | BUG_ON(timer_pending(timer)); | ||
723 | mod_timer(timer, timer->expires); | ||
724 | } | ||
725 | EXPORT_SYMBOL(add_timer); | ||
726 | |||
727 | /** | ||
728 | * add_timer_on - start a timer on a particular CPU | ||
729 | * @timer: the timer to be added | ||
730 | * @cpu: the CPU to start it on | ||
731 | * | ||
732 | * This is not very scalable on SMP. Double adds are not possible. | ||
733 | */ | ||
734 | void add_timer_on(struct timer_list *timer, int cpu) | ||
735 | { | ||
736 | struct tvec_base *base = per_cpu(tvec_bases, cpu); | ||
737 | unsigned long flags; | ||
738 | |||
739 | timer_stats_timer_set_start_info(timer); | ||
740 | BUG_ON(timer_pending(timer) || !timer->function); | ||
741 | spin_lock_irqsave(&base->lock, flags); | ||
742 | timer_set_base(timer, base); | ||
743 | debug_timer_activate(timer); | ||
744 | internal_add_timer(base, timer); | ||
745 | /* | ||
746 | * Check whether the other CPU is idle and needs to be | ||
747 | * triggered to reevaluate the timer wheel when nohz is | ||
748 | * active. We are protected against the other CPU fiddling | ||
749 | * with the timer by holding the timer base lock. This also | ||
750 | * makes sure that a CPU on the way to idle can not evaluate | ||
751 | * the timer wheel. | ||
752 | */ | ||
753 | wake_up_idle_cpu(cpu); | ||
754 | spin_unlock_irqrestore(&base->lock, flags); | ||
755 | } | ||
756 | |||
757 | /** | ||
708 | * del_timer - deactive a timer. | 758 | * del_timer - deactive a timer. |
709 | * @timer: the timer to be deactivated | 759 | * @timer: the timer to be deactivated |
710 | * | 760 | * |
@@ -733,7 +783,6 @@ int del_timer(struct timer_list *timer) | |||
733 | 783 | ||
734 | return ret; | 784 | return ret; |
735 | } | 785 | } |
736 | |||
737 | EXPORT_SYMBOL(del_timer); | 786 | EXPORT_SYMBOL(del_timer); |
738 | 787 | ||
739 | #ifdef CONFIG_SMP | 788 | #ifdef CONFIG_SMP |
@@ -767,7 +816,6 @@ out: | |||
767 | 816 | ||
768 | return ret; | 817 | return ret; |
769 | } | 818 | } |
770 | |||
771 | EXPORT_SYMBOL(try_to_del_timer_sync); | 819 | EXPORT_SYMBOL(try_to_del_timer_sync); |
772 | 820 | ||
773 | /** | 821 | /** |
@@ -789,6 +837,15 @@ EXPORT_SYMBOL(try_to_del_timer_sync); | |||
789 | */ | 837 | */ |
790 | int del_timer_sync(struct timer_list *timer) | 838 | int del_timer_sync(struct timer_list *timer) |
791 | { | 839 | { |
840 | #ifdef CONFIG_LOCKDEP | ||
841 | unsigned long flags; | ||
842 | |||
843 | local_irq_save(flags); | ||
844 | lock_map_acquire(&timer->lockdep_map); | ||
845 | lock_map_release(&timer->lockdep_map); | ||
846 | local_irq_restore(flags); | ||
847 | #endif | ||
848 | |||
792 | for (;;) { | 849 | for (;;) { |
793 | int ret = try_to_del_timer_sync(timer); | 850 | int ret = try_to_del_timer_sync(timer); |
794 | if (ret >= 0) | 851 | if (ret >= 0) |
@@ -796,7 +853,6 @@ int del_timer_sync(struct timer_list *timer) | |||
796 | cpu_relax(); | 853 | cpu_relax(); |
797 | } | 854 | } |
798 | } | 855 | } |
799 | |||
800 | EXPORT_SYMBOL(del_timer_sync); | 856 | EXPORT_SYMBOL(del_timer_sync); |
801 | #endif | 857 | #endif |
802 | 858 | ||
@@ -861,10 +917,36 @@ static inline void __run_timers(struct tvec_base *base) | |||
861 | 917 | ||
862 | set_running_timer(base, timer); | 918 | set_running_timer(base, timer); |
863 | detach_timer(timer, 1); | 919 | detach_timer(timer, 1); |
920 | |||
864 | spin_unlock_irq(&base->lock); | 921 | spin_unlock_irq(&base->lock); |
865 | { | 922 | { |
866 | int preempt_count = preempt_count(); | 923 | int preempt_count = preempt_count(); |
924 | |||
925 | #ifdef CONFIG_LOCKDEP | ||
926 | /* | ||
927 | * It is permissible to free the timer from | ||
928 | * inside the function that is called from | ||
929 | * it, this we need to take into account for | ||
930 | * lockdep too. To avoid bogus "held lock | ||
931 | * freed" warnings as well as problems when | ||
932 | * looking into timer->lockdep_map, make a | ||
933 | * copy and use that here. | ||
934 | */ | ||
935 | struct lockdep_map lockdep_map = | ||
936 | timer->lockdep_map; | ||
937 | #endif | ||
938 | /* | ||
939 | * Couple the lock chain with the lock chain at | ||
940 | * del_timer_sync() by acquiring the lock_map | ||
941 | * around the fn() call here and in | ||
942 | * del_timer_sync(). | ||
943 | */ | ||
944 | lock_map_acquire(&lockdep_map); | ||
945 | |||
867 | fn(data); | 946 | fn(data); |
947 | |||
948 | lock_map_release(&lockdep_map); | ||
949 | |||
868 | if (preempt_count != preempt_count()) { | 950 | if (preempt_count != preempt_count()) { |
869 | printk(KERN_ERR "huh, entered %p " | 951 | printk(KERN_ERR "huh, entered %p " |
870 | "with preempt_count %08x, exited" | 952 | "with preempt_count %08x, exited" |
@@ -1268,7 +1350,7 @@ signed long __sched schedule_timeout(signed long timeout) | |||
1268 | expire = timeout + jiffies; | 1350 | expire = timeout + jiffies; |
1269 | 1351 | ||
1270 | setup_timer_on_stack(&timer, process_timeout, (unsigned long)current); | 1352 | setup_timer_on_stack(&timer, process_timeout, (unsigned long)current); |
1271 | __mod_timer(&timer, expire); | 1353 | __mod_timer(&timer, expire, false); |
1272 | schedule(); | 1354 | schedule(); |
1273 | del_singleshot_timer_sync(&timer); | 1355 | del_singleshot_timer_sync(&timer); |
1274 | 1356 | ||