diff options
Diffstat (limited to 'kernel/hrtimer.c')
-rw-r--r-- | kernel/hrtimer.c | 143 |
1 files changed, 70 insertions, 73 deletions
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c index bda9cb924276..1455b7651b6b 100644 --- a/kernel/hrtimer.c +++ b/kernel/hrtimer.c | |||
@@ -32,7 +32,6 @@ | |||
32 | */ | 32 | */ |
33 | 33 | ||
34 | #include <linux/cpu.h> | 34 | #include <linux/cpu.h> |
35 | #include <linux/irq.h> | ||
36 | #include <linux/module.h> | 35 | #include <linux/module.h> |
37 | #include <linux/percpu.h> | 36 | #include <linux/percpu.h> |
38 | #include <linux/hrtimer.h> | 37 | #include <linux/hrtimer.h> |
@@ -635,7 +634,6 @@ static inline void hrtimer_init_timer_hres(struct hrtimer *timer) | |||
635 | { | 634 | { |
636 | } | 635 | } |
637 | 636 | ||
638 | static void __run_hrtimer(struct hrtimer *timer); | ||
639 | 637 | ||
640 | /* | 638 | /* |
641 | * When High resolution timers are active, try to reprogram. Note, that in case | 639 | * When High resolution timers are active, try to reprogram. Note, that in case |
@@ -647,13 +645,9 @@ static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer, | |||
647 | struct hrtimer_clock_base *base) | 645 | struct hrtimer_clock_base *base) |
648 | { | 646 | { |
649 | if (base->cpu_base->hres_active && hrtimer_reprogram(timer, base)) { | 647 | if (base->cpu_base->hres_active && hrtimer_reprogram(timer, base)) { |
650 | /* | 648 | spin_unlock(&base->cpu_base->lock); |
651 | * XXX: recursion check? | 649 | raise_softirq_irqoff(HRTIMER_SOFTIRQ); |
652 | * hrtimer_forward() should round up with timer granularity | 650 | spin_lock(&base->cpu_base->lock); |
653 | * so that we never get into inf recursion here, | ||
654 | * it doesn't do that though | ||
655 | */ | ||
656 | __run_hrtimer(timer); | ||
657 | return 1; | 651 | return 1; |
658 | } | 652 | } |
659 | return 0; | 653 | return 0; |
@@ -706,11 +700,6 @@ static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer, | |||
706 | } | 700 | } |
707 | static inline void hrtimer_init_hres(struct hrtimer_cpu_base *base) { } | 701 | static inline void hrtimer_init_hres(struct hrtimer_cpu_base *base) { } |
708 | static inline void hrtimer_init_timer_hres(struct hrtimer *timer) { } | 702 | static inline void hrtimer_init_timer_hres(struct hrtimer *timer) { } |
709 | static inline int hrtimer_reprogram(struct hrtimer *timer, | ||
710 | struct hrtimer_clock_base *base) | ||
711 | { | ||
712 | return 0; | ||
713 | } | ||
714 | 703 | ||
715 | #endif /* CONFIG_HIGH_RES_TIMERS */ | 704 | #endif /* CONFIG_HIGH_RES_TIMERS */ |
716 | 705 | ||
@@ -781,9 +770,11 @@ EXPORT_SYMBOL_GPL(hrtimer_forward); | |||
781 | * | 770 | * |
782 | * The timer is inserted in expiry order. Insertion into the | 771 | * The timer is inserted in expiry order. Insertion into the |
783 | * red black tree is O(log(n)). Must hold the base lock. | 772 | * red black tree is O(log(n)). Must hold the base lock. |
773 | * | ||
774 | * Returns 1 when the new timer is the leftmost timer in the tree. | ||
784 | */ | 775 | */ |
785 | static void enqueue_hrtimer(struct hrtimer *timer, | 776 | static int enqueue_hrtimer(struct hrtimer *timer, |
786 | struct hrtimer_clock_base *base, int reprogram) | 777 | struct hrtimer_clock_base *base) |
787 | { | 778 | { |
788 | struct rb_node **link = &base->active.rb_node; | 779 | struct rb_node **link = &base->active.rb_node; |
789 | struct rb_node *parent = NULL; | 780 | struct rb_node *parent = NULL; |
@@ -815,20 +806,8 @@ static void enqueue_hrtimer(struct hrtimer *timer, | |||
815 | * Insert the timer to the rbtree and check whether it | 806 | * Insert the timer to the rbtree and check whether it |
816 | * replaces the first pending timer | 807 | * replaces the first pending timer |
817 | */ | 808 | */ |
818 | if (leftmost) { | 809 | if (leftmost) |
819 | /* | ||
820 | * Reprogram the clock event device. When the timer is already | ||
821 | * expired hrtimer_enqueue_reprogram has either called the | ||
822 | * callback or added it to the pending list and raised the | ||
823 | * softirq. | ||
824 | * | ||
825 | * This is a NOP for !HIGHRES | ||
826 | */ | ||
827 | if (reprogram && hrtimer_enqueue_reprogram(timer, base)) | ||
828 | return; | ||
829 | |||
830 | base->first = &timer->node; | 810 | base->first = &timer->node; |
831 | } | ||
832 | 811 | ||
833 | rb_link_node(&timer->node, parent, link); | 812 | rb_link_node(&timer->node, parent, link); |
834 | rb_insert_color(&timer->node, &base->active); | 813 | rb_insert_color(&timer->node, &base->active); |
@@ -837,6 +816,8 @@ static void enqueue_hrtimer(struct hrtimer *timer, | |||
837 | * state of a possibly running callback. | 816 | * state of a possibly running callback. |
838 | */ | 817 | */ |
839 | timer->state |= HRTIMER_STATE_ENQUEUED; | 818 | timer->state |= HRTIMER_STATE_ENQUEUED; |
819 | |||
820 | return leftmost; | ||
840 | } | 821 | } |
841 | 822 | ||
842 | /* | 823 | /* |
@@ -913,7 +894,7 @@ hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim, unsigned long delta_n | |||
913 | { | 894 | { |
914 | struct hrtimer_clock_base *base, *new_base; | 895 | struct hrtimer_clock_base *base, *new_base; |
915 | unsigned long flags; | 896 | unsigned long flags; |
916 | int ret; | 897 | int ret, leftmost; |
917 | 898 | ||
918 | base = lock_hrtimer_base(timer, &flags); | 899 | base = lock_hrtimer_base(timer, &flags); |
919 | 900 | ||
@@ -941,12 +922,16 @@ hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim, unsigned long delta_n | |||
941 | 922 | ||
942 | timer_stats_hrtimer_set_start_info(timer); | 923 | timer_stats_hrtimer_set_start_info(timer); |
943 | 924 | ||
925 | leftmost = enqueue_hrtimer(timer, new_base); | ||
926 | |||
944 | /* | 927 | /* |
945 | * Only allow reprogramming if the new base is on this CPU. | 928 | * Only allow reprogramming if the new base is on this CPU. |
946 | * (it might still be on another CPU if the timer was pending) | 929 | * (it might still be on another CPU if the timer was pending) |
930 | * | ||
931 | * XXX send_remote_softirq() ? | ||
947 | */ | 932 | */ |
948 | enqueue_hrtimer(timer, new_base, | 933 | if (leftmost && new_base->cpu_base == &__get_cpu_var(hrtimer_bases)) |
949 | new_base->cpu_base == &__get_cpu_var(hrtimer_bases)); | 934 | hrtimer_enqueue_reprogram(timer, new_base); |
950 | 935 | ||
951 | unlock_hrtimer_base(timer, &flags); | 936 | unlock_hrtimer_base(timer, &flags); |
952 | 937 | ||
@@ -1158,13 +1143,13 @@ static void __run_hrtimer(struct hrtimer *timer) | |||
1158 | spin_lock(&cpu_base->lock); | 1143 | spin_lock(&cpu_base->lock); |
1159 | 1144 | ||
1160 | /* | 1145 | /* |
1161 | * Note: We clear the CALLBACK bit after enqueue_hrtimer to avoid | 1146 | * Note: We clear the CALLBACK bit after enqueue_hrtimer and |
1162 | * reprogramming of the event hardware. This happens at the end of this | 1147 | * we do not reprogramm the event hardware. Happens either in |
1163 | * function anyway. | 1148 | * hrtimer_start_range_ns() or in hrtimer_interrupt() |
1164 | */ | 1149 | */ |
1165 | if (restart != HRTIMER_NORESTART) { | 1150 | if (restart != HRTIMER_NORESTART) { |
1166 | BUG_ON(timer->state != HRTIMER_STATE_CALLBACK); | 1151 | BUG_ON(timer->state != HRTIMER_STATE_CALLBACK); |
1167 | enqueue_hrtimer(timer, base, 0); | 1152 | enqueue_hrtimer(timer, base); |
1168 | } | 1153 | } |
1169 | timer->state &= ~HRTIMER_STATE_CALLBACK; | 1154 | timer->state &= ~HRTIMER_STATE_CALLBACK; |
1170 | } | 1155 | } |
@@ -1244,6 +1229,22 @@ void hrtimer_interrupt(struct clock_event_device *dev) | |||
1244 | } | 1229 | } |
1245 | } | 1230 | } |
1246 | 1231 | ||
1232 | /* | ||
1233 | * local version of hrtimer_peek_ahead_timers() called with interrupts | ||
1234 | * disabled. | ||
1235 | */ | ||
1236 | static void __hrtimer_peek_ahead_timers(void) | ||
1237 | { | ||
1238 | struct tick_device *td; | ||
1239 | |||
1240 | if (!hrtimer_hres_active()) | ||
1241 | return; | ||
1242 | |||
1243 | td = &__get_cpu_var(tick_cpu_device); | ||
1244 | if (td && td->evtdev) | ||
1245 | hrtimer_interrupt(td->evtdev); | ||
1246 | } | ||
1247 | |||
1247 | /** | 1248 | /** |
1248 | * hrtimer_peek_ahead_timers -- run soft-expired timers now | 1249 | * hrtimer_peek_ahead_timers -- run soft-expired timers now |
1249 | * | 1250 | * |
@@ -1255,20 +1256,23 @@ void hrtimer_interrupt(struct clock_event_device *dev) | |||
1255 | */ | 1256 | */ |
1256 | void hrtimer_peek_ahead_timers(void) | 1257 | void hrtimer_peek_ahead_timers(void) |
1257 | { | 1258 | { |
1258 | struct tick_device *td; | ||
1259 | unsigned long flags; | 1259 | unsigned long flags; |
1260 | 1260 | ||
1261 | if (!hrtimer_hres_active()) | ||
1262 | return; | ||
1263 | |||
1264 | local_irq_save(flags); | 1261 | local_irq_save(flags); |
1265 | td = &__get_cpu_var(tick_cpu_device); | 1262 | __hrtimer_peek_ahead_timers(); |
1266 | if (td && td->evtdev) | ||
1267 | hrtimer_interrupt(td->evtdev); | ||
1268 | local_irq_restore(flags); | 1263 | local_irq_restore(flags); |
1269 | } | 1264 | } |
1270 | 1265 | ||
1271 | #endif /* CONFIG_HIGH_RES_TIMERS */ | 1266 | static void run_hrtimer_softirq(struct softirq_action *h) |
1267 | { | ||
1268 | hrtimer_peek_ahead_timers(); | ||
1269 | } | ||
1270 | |||
1271 | #else /* CONFIG_HIGH_RES_TIMERS */ | ||
1272 | |||
1273 | static inline void __hrtimer_peek_ahead_timers(void) { } | ||
1274 | |||
1275 | #endif /* !CONFIG_HIGH_RES_TIMERS */ | ||
1272 | 1276 | ||
1273 | /* | 1277 | /* |
1274 | * Called from timer softirq every jiffy, expire hrtimers: | 1278 | * Called from timer softirq every jiffy, expire hrtimers: |
@@ -1514,39 +1518,36 @@ static void migrate_hrtimer_list(struct hrtimer_clock_base *old_base, | |||
1514 | __remove_hrtimer(timer, old_base, HRTIMER_STATE_MIGRATE, 0); | 1518 | __remove_hrtimer(timer, old_base, HRTIMER_STATE_MIGRATE, 0); |
1515 | timer->base = new_base; | 1519 | timer->base = new_base; |
1516 | /* | 1520 | /* |
1517 | * Enqueue the timers on the new cpu, but do not reprogram | 1521 | * Enqueue the timers on the new cpu. This does not |
1518 | * the timer as that would enable a deadlock between | 1522 | * reprogram the event device in case the timer |
1519 | * hrtimer_enqueue_reprogramm() running the timer and us still | 1523 | * expires before the earliest on this CPU, but we run |
1520 | * holding a nested base lock. | 1524 | * hrtimer_interrupt after we migrated everything to |
1521 | * | 1525 | * sort out already expired timers and reprogram the |
1522 | * Instead we tickle the hrtimer interrupt after the migration | 1526 | * event device. |
1523 | * is done, which will run all expired timers and re-programm | ||
1524 | * the timer device. | ||
1525 | */ | 1527 | */ |
1526 | enqueue_hrtimer(timer, new_base, 0); | 1528 | enqueue_hrtimer(timer, new_base); |
1527 | 1529 | ||
1528 | /* Clear the migration state bit */ | 1530 | /* Clear the migration state bit */ |
1529 | timer->state &= ~HRTIMER_STATE_MIGRATE; | 1531 | timer->state &= ~HRTIMER_STATE_MIGRATE; |
1530 | } | 1532 | } |
1531 | } | 1533 | } |
1532 | 1534 | ||
1533 | static int migrate_hrtimers(int scpu) | 1535 | static void migrate_hrtimers(int scpu) |
1534 | { | 1536 | { |
1535 | struct hrtimer_cpu_base *old_base, *new_base; | 1537 | struct hrtimer_cpu_base *old_base, *new_base; |
1536 | int dcpu, i; | 1538 | int i; |
1537 | 1539 | ||
1538 | BUG_ON(cpu_online(scpu)); | 1540 | BUG_ON(cpu_online(scpu)); |
1539 | old_base = &per_cpu(hrtimer_bases, scpu); | ||
1540 | new_base = &get_cpu_var(hrtimer_bases); | ||
1541 | |||
1542 | dcpu = smp_processor_id(); | ||
1543 | |||
1544 | tick_cancel_sched_timer(scpu); | 1541 | tick_cancel_sched_timer(scpu); |
1542 | |||
1543 | local_irq_disable(); | ||
1544 | old_base = &per_cpu(hrtimer_bases, scpu); | ||
1545 | new_base = &__get_cpu_var(hrtimer_bases); | ||
1545 | /* | 1546 | /* |
1546 | * The caller is globally serialized and nobody else | 1547 | * The caller is globally serialized and nobody else |
1547 | * takes two locks at once, deadlock is not possible. | 1548 | * takes two locks at once, deadlock is not possible. |
1548 | */ | 1549 | */ |
1549 | spin_lock_irq(&new_base->lock); | 1550 | spin_lock(&new_base->lock); |
1550 | spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING); | 1551 | spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING); |
1551 | 1552 | ||
1552 | for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) { | 1553 | for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) { |
@@ -1555,15 +1556,11 @@ static int migrate_hrtimers(int scpu) | |||
1555 | } | 1556 | } |
1556 | 1557 | ||
1557 | spin_unlock(&old_base->lock); | 1558 | spin_unlock(&old_base->lock); |
1558 | spin_unlock_irq(&new_base->lock); | 1559 | spin_unlock(&new_base->lock); |
1559 | put_cpu_var(hrtimer_bases); | ||
1560 | 1560 | ||
1561 | return dcpu; | 1561 | /* Check, if we got expired work to do */ |
1562 | } | 1562 | __hrtimer_peek_ahead_timers(); |
1563 | 1563 | local_irq_enable(); | |
1564 | static void tickle_timers(void *arg) | ||
1565 | { | ||
1566 | hrtimer_peek_ahead_timers(); | ||
1567 | } | 1564 | } |
1568 | 1565 | ||
1569 | #endif /* CONFIG_HOTPLUG_CPU */ | 1566 | #endif /* CONFIG_HOTPLUG_CPU */ |
@@ -1584,11 +1581,8 @@ static int __cpuinit hrtimer_cpu_notify(struct notifier_block *self, | |||
1584 | case CPU_DEAD: | 1581 | case CPU_DEAD: |
1585 | case CPU_DEAD_FROZEN: | 1582 | case CPU_DEAD_FROZEN: |
1586 | { | 1583 | { |
1587 | int dcpu; | ||
1588 | |||
1589 | clockevents_notify(CLOCK_EVT_NOTIFY_CPU_DEAD, &scpu); | 1584 | clockevents_notify(CLOCK_EVT_NOTIFY_CPU_DEAD, &scpu); |
1590 | dcpu = migrate_hrtimers(scpu); | 1585 | migrate_hrtimers(scpu); |
1591 | smp_call_function_single(dcpu, tickle_timers, NULL, 0); | ||
1592 | break; | 1586 | break; |
1593 | } | 1587 | } |
1594 | #endif | 1588 | #endif |
@@ -1609,6 +1603,9 @@ void __init hrtimers_init(void) | |||
1609 | hrtimer_cpu_notify(&hrtimers_nb, (unsigned long)CPU_UP_PREPARE, | 1603 | hrtimer_cpu_notify(&hrtimers_nb, (unsigned long)CPU_UP_PREPARE, |
1610 | (void *)(long)smp_processor_id()); | 1604 | (void *)(long)smp_processor_id()); |
1611 | register_cpu_notifier(&hrtimers_nb); | 1605 | register_cpu_notifier(&hrtimers_nb); |
1606 | #ifdef CONFIG_HIGH_RES_TIMERS | ||
1607 | open_softirq(HRTIMER_SOFTIRQ, run_hrtimer_softirq); | ||
1608 | #endif | ||
1612 | } | 1609 | } |
1613 | 1610 | ||
1614 | /** | 1611 | /** |