diff options
Diffstat (limited to 'kernel/hrtimer.c')
-rw-r--r-- | kernel/hrtimer.c | 162 |
1 files changed, 85 insertions, 77 deletions
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c index dbbbf7d43080..c541ee527ecb 100644 --- a/kernel/hrtimer.c +++ b/kernel/hrtimer.c | |||
@@ -64,17 +64,20 @@ DEFINE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases) = | |||
64 | .clock_base = | 64 | .clock_base = |
65 | { | 65 | { |
66 | { | 66 | { |
67 | .index = CLOCK_REALTIME, | 67 | .index = HRTIMER_BASE_MONOTONIC, |
68 | .get_time = &ktime_get_real, | 68 | .clockid = CLOCK_MONOTONIC, |
69 | .get_time = &ktime_get, | ||
69 | .resolution = KTIME_LOW_RES, | 70 | .resolution = KTIME_LOW_RES, |
70 | }, | 71 | }, |
71 | { | 72 | { |
72 | .index = CLOCK_MONOTONIC, | 73 | .index = HRTIMER_BASE_REALTIME, |
73 | .get_time = &ktime_get, | 74 | .clockid = CLOCK_REALTIME, |
75 | .get_time = &ktime_get_real, | ||
74 | .resolution = KTIME_LOW_RES, | 76 | .resolution = KTIME_LOW_RES, |
75 | }, | 77 | }, |
76 | { | 78 | { |
77 | .index = CLOCK_BOOTTIME, | 79 | .index = HRTIMER_BASE_BOOTTIME, |
80 | .clockid = CLOCK_BOOTTIME, | ||
78 | .get_time = &ktime_get_boottime, | 81 | .get_time = &ktime_get_boottime, |
79 | .resolution = KTIME_LOW_RES, | 82 | .resolution = KTIME_LOW_RES, |
80 | }, | 83 | }, |
@@ -196,7 +199,7 @@ switch_hrtimer_base(struct hrtimer *timer, struct hrtimer_clock_base *base, | |||
196 | struct hrtimer_cpu_base *new_cpu_base; | 199 | struct hrtimer_cpu_base *new_cpu_base; |
197 | int this_cpu = smp_processor_id(); | 200 | int this_cpu = smp_processor_id(); |
198 | int cpu = hrtimer_get_target(this_cpu, pinned); | 201 | int cpu = hrtimer_get_target(this_cpu, pinned); |
199 | int basenum = hrtimer_clockid_to_base(base->index); | 202 | int basenum = base->index; |
200 | 203 | ||
201 | again: | 204 | again: |
202 | new_cpu_base = &per_cpu(hrtimer_bases, cpu); | 205 | new_cpu_base = &per_cpu(hrtimer_bases, cpu); |
@@ -621,66 +624,6 @@ static int hrtimer_reprogram(struct hrtimer *timer, | |||
621 | return res; | 624 | return res; |
622 | } | 625 | } |
623 | 626 | ||
624 | |||
625 | /* | ||
626 | * Retrigger next event is called after clock was set | ||
627 | * | ||
628 | * Called with interrupts disabled via on_each_cpu() | ||
629 | */ | ||
630 | static void retrigger_next_event(void *arg) | ||
631 | { | ||
632 | struct hrtimer_cpu_base *base; | ||
633 | struct timespec realtime_offset, wtm, sleep; | ||
634 | |||
635 | if (!hrtimer_hres_active()) | ||
636 | return; | ||
637 | |||
638 | get_xtime_and_monotonic_and_sleep_offset(&realtime_offset, &wtm, | ||
639 | &sleep); | ||
640 | set_normalized_timespec(&realtime_offset, -wtm.tv_sec, -wtm.tv_nsec); | ||
641 | |||
642 | base = &__get_cpu_var(hrtimer_bases); | ||
643 | |||
644 | /* Adjust CLOCK_REALTIME offset */ | ||
645 | raw_spin_lock(&base->lock); | ||
646 | base->clock_base[HRTIMER_BASE_REALTIME].offset = | ||
647 | timespec_to_ktime(realtime_offset); | ||
648 | base->clock_base[HRTIMER_BASE_BOOTTIME].offset = | ||
649 | timespec_to_ktime(sleep); | ||
650 | |||
651 | hrtimer_force_reprogram(base, 0); | ||
652 | raw_spin_unlock(&base->lock); | ||
653 | } | ||
654 | |||
655 | /* | ||
656 | * Clock realtime was set | ||
657 | * | ||
658 | * Change the offset of the realtime clock vs. the monotonic | ||
659 | * clock. | ||
660 | * | ||
661 | * We might have to reprogram the high resolution timer interrupt. On | ||
662 | * SMP we call the architecture specific code to retrigger _all_ high | ||
663 | * resolution timer interrupts. On UP we just disable interrupts and | ||
664 | * call the high resolution interrupt code. | ||
665 | */ | ||
666 | void clock_was_set(void) | ||
667 | { | ||
668 | /* Retrigger the CPU local events everywhere */ | ||
669 | on_each_cpu(retrigger_next_event, NULL, 1); | ||
670 | } | ||
671 | |||
672 | /* | ||
673 | * During resume we might have to reprogram the high resolution timer | ||
674 | * interrupt (on the local CPU): | ||
675 | */ | ||
676 | void hres_timers_resume(void) | ||
677 | { | ||
678 | WARN_ONCE(!irqs_disabled(), | ||
679 | KERN_INFO "hres_timers_resume() called with IRQs enabled!"); | ||
680 | |||
681 | retrigger_next_event(NULL); | ||
682 | } | ||
683 | |||
684 | /* | 627 | /* |
685 | * Initialize the high resolution related parts of cpu_base | 628 | * Initialize the high resolution related parts of cpu_base |
686 | */ | 629 | */ |
@@ -715,11 +658,39 @@ static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer, | |||
715 | } | 658 | } |
716 | 659 | ||
717 | /* | 660 | /* |
661 | * Retrigger next event is called after clock was set | ||
662 | * | ||
663 | * Called with interrupts disabled via on_each_cpu() | ||
664 | */ | ||
665 | static void retrigger_next_event(void *arg) | ||
666 | { | ||
667 | struct hrtimer_cpu_base *base = &__get_cpu_var(hrtimer_bases); | ||
668 | struct timespec realtime_offset, xtim, wtm, sleep; | ||
669 | |||
670 | if (!hrtimer_hres_active()) | ||
671 | return; | ||
672 | |||
673 | /* Optimized out for !HIGH_RES */ | ||
674 | get_xtime_and_monotonic_and_sleep_offset(&xtim, &wtm, &sleep); | ||
675 | set_normalized_timespec(&realtime_offset, -wtm.tv_sec, -wtm.tv_nsec); | ||
676 | |||
677 | /* Adjust CLOCK_REALTIME offset */ | ||
678 | raw_spin_lock(&base->lock); | ||
679 | base->clock_base[HRTIMER_BASE_REALTIME].offset = | ||
680 | timespec_to_ktime(realtime_offset); | ||
681 | base->clock_base[HRTIMER_BASE_BOOTTIME].offset = | ||
682 | timespec_to_ktime(sleep); | ||
683 | |||
684 | hrtimer_force_reprogram(base, 0); | ||
685 | raw_spin_unlock(&base->lock); | ||
686 | } | ||
687 | |||
688 | /* | ||
718 | * Switch to high resolution mode | 689 | * Switch to high resolution mode |
719 | */ | 690 | */ |
720 | static int hrtimer_switch_to_hres(void) | 691 | static int hrtimer_switch_to_hres(void) |
721 | { | 692 | { |
722 | int cpu = smp_processor_id(); | 693 | int i, cpu = smp_processor_id(); |
723 | struct hrtimer_cpu_base *base = &per_cpu(hrtimer_bases, cpu); | 694 | struct hrtimer_cpu_base *base = &per_cpu(hrtimer_bases, cpu); |
724 | unsigned long flags; | 695 | unsigned long flags; |
725 | 696 | ||
@@ -735,9 +706,8 @@ static int hrtimer_switch_to_hres(void) | |||
735 | return 0; | 706 | return 0; |
736 | } | 707 | } |
737 | base->hres_active = 1; | 708 | base->hres_active = 1; |
738 | base->clock_base[HRTIMER_BASE_REALTIME].resolution = KTIME_HIGH_RES; | 709 | for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) |
739 | base->clock_base[HRTIMER_BASE_MONOTONIC].resolution = KTIME_HIGH_RES; | 710 | base->clock_base[i].resolution = KTIME_HIGH_RES; |
740 | base->clock_base[HRTIMER_BASE_BOOTTIME].resolution = KTIME_HIGH_RES; | ||
741 | 711 | ||
742 | tick_setup_sched_timer(); | 712 | tick_setup_sched_timer(); |
743 | 713 | ||
@@ -761,9 +731,43 @@ static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer, | |||
761 | return 0; | 731 | return 0; |
762 | } | 732 | } |
763 | static inline void hrtimer_init_hres(struct hrtimer_cpu_base *base) { } | 733 | static inline void hrtimer_init_hres(struct hrtimer_cpu_base *base) { } |
734 | static inline void retrigger_next_event(void *arg) { } | ||
764 | 735 | ||
765 | #endif /* CONFIG_HIGH_RES_TIMERS */ | 736 | #endif /* CONFIG_HIGH_RES_TIMERS */ |
766 | 737 | ||
738 | /* | ||
739 | * Clock realtime was set | ||
740 | * | ||
741 | * Change the offset of the realtime clock vs. the monotonic | ||
742 | * clock. | ||
743 | * | ||
744 | * We might have to reprogram the high resolution timer interrupt. On | ||
745 | * SMP we call the architecture specific code to retrigger _all_ high | ||
746 | * resolution timer interrupts. On UP we just disable interrupts and | ||
747 | * call the high resolution interrupt code. | ||
748 | */ | ||
749 | void clock_was_set(void) | ||
750 | { | ||
751 | #ifdef CONFIG_HIGHRES_TIMERS | ||
752 | /* Retrigger the CPU local events everywhere */ | ||
753 | on_each_cpu(retrigger_next_event, NULL, 1); | ||
754 | #endif | ||
755 | timerfd_clock_was_set(); | ||
756 | } | ||
757 | |||
758 | /* | ||
759 | * During resume we might have to reprogram the high resolution timer | ||
760 | * interrupt (on the local CPU): | ||
761 | */ | ||
762 | void hrtimers_resume(void) | ||
763 | { | ||
764 | WARN_ONCE(!irqs_disabled(), | ||
765 | KERN_INFO "hrtimers_resume() called with IRQs enabled!"); | ||
766 | |||
767 | retrigger_next_event(NULL); | ||
768 | timerfd_clock_was_set(); | ||
769 | } | ||
770 | |||
767 | static inline void timer_stats_hrtimer_set_start_info(struct hrtimer *timer) | 771 | static inline void timer_stats_hrtimer_set_start_info(struct hrtimer *timer) |
768 | { | 772 | { |
769 | #ifdef CONFIG_TIMER_STATS | 773 | #ifdef CONFIG_TIMER_STATS |
@@ -856,6 +860,7 @@ static int enqueue_hrtimer(struct hrtimer *timer, | |||
856 | debug_activate(timer); | 860 | debug_activate(timer); |
857 | 861 | ||
858 | timerqueue_add(&base->active, &timer->node); | 862 | timerqueue_add(&base->active, &timer->node); |
863 | base->cpu_base->active_bases |= 1 << base->index; | ||
859 | 864 | ||
860 | /* | 865 | /* |
861 | * HRTIMER_STATE_ENQUEUED is or'ed to the current state to preserve the | 866 | * HRTIMER_STATE_ENQUEUED is or'ed to the current state to preserve the |
@@ -897,6 +902,8 @@ static void __remove_hrtimer(struct hrtimer *timer, | |||
897 | #endif | 902 | #endif |
898 | } | 903 | } |
899 | timerqueue_del(&base->active, &timer->node); | 904 | timerqueue_del(&base->active, &timer->node); |
905 | if (!timerqueue_getnext(&base->active)) | ||
906 | base->cpu_base->active_bases &= ~(1 << base->index); | ||
900 | out: | 907 | out: |
901 | timer->state = newstate; | 908 | timer->state = newstate; |
902 | } | 909 | } |
@@ -1234,7 +1241,6 @@ static void __run_hrtimer(struct hrtimer *timer, ktime_t *now) | |||
1234 | void hrtimer_interrupt(struct clock_event_device *dev) | 1241 | void hrtimer_interrupt(struct clock_event_device *dev) |
1235 | { | 1242 | { |
1236 | struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases); | 1243 | struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases); |
1237 | struct hrtimer_clock_base *base; | ||
1238 | ktime_t expires_next, now, entry_time, delta; | 1244 | ktime_t expires_next, now, entry_time, delta; |
1239 | int i, retries = 0; | 1245 | int i, retries = 0; |
1240 | 1246 | ||
@@ -1256,12 +1262,15 @@ retry: | |||
1256 | */ | 1262 | */ |
1257 | cpu_base->expires_next.tv64 = KTIME_MAX; | 1263 | cpu_base->expires_next.tv64 = KTIME_MAX; |
1258 | 1264 | ||
1259 | base = cpu_base->clock_base; | ||
1260 | |||
1261 | for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) { | 1265 | for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) { |
1262 | ktime_t basenow; | 1266 | struct hrtimer_clock_base *base; |
1263 | struct timerqueue_node *node; | 1267 | struct timerqueue_node *node; |
1268 | ktime_t basenow; | ||
1269 | |||
1270 | if (!(cpu_base->active_bases & (1 << i))) | ||
1271 | continue; | ||
1264 | 1272 | ||
1273 | base = cpu_base->clock_base + i; | ||
1265 | basenow = ktime_add(now, base->offset); | 1274 | basenow = ktime_add(now, base->offset); |
1266 | 1275 | ||
1267 | while ((node = timerqueue_getnext(&base->active))) { | 1276 | while ((node = timerqueue_getnext(&base->active))) { |
@@ -1294,7 +1303,6 @@ retry: | |||
1294 | 1303 | ||
1295 | __run_hrtimer(timer, &basenow); | 1304 | __run_hrtimer(timer, &basenow); |
1296 | } | 1305 | } |
1297 | base++; | ||
1298 | } | 1306 | } |
1299 | 1307 | ||
1300 | /* | 1308 | /* |
@@ -1525,7 +1533,7 @@ long __sched hrtimer_nanosleep_restart(struct restart_block *restart) | |||
1525 | struct timespec __user *rmtp; | 1533 | struct timespec __user *rmtp; |
1526 | int ret = 0; | 1534 | int ret = 0; |
1527 | 1535 | ||
1528 | hrtimer_init_on_stack(&t.timer, restart->nanosleep.index, | 1536 | hrtimer_init_on_stack(&t.timer, restart->nanosleep.clockid, |
1529 | HRTIMER_MODE_ABS); | 1537 | HRTIMER_MODE_ABS); |
1530 | hrtimer_set_expires_tv64(&t.timer, restart->nanosleep.expires); | 1538 | hrtimer_set_expires_tv64(&t.timer, restart->nanosleep.expires); |
1531 | 1539 | ||
@@ -1577,7 +1585,7 @@ long hrtimer_nanosleep(struct timespec *rqtp, struct timespec __user *rmtp, | |||
1577 | 1585 | ||
1578 | restart = ¤t_thread_info()->restart_block; | 1586 | restart = ¤t_thread_info()->restart_block; |
1579 | restart->fn = hrtimer_nanosleep_restart; | 1587 | restart->fn = hrtimer_nanosleep_restart; |
1580 | restart->nanosleep.index = t.timer.base->index; | 1588 | restart->nanosleep.clockid = t.timer.base->clockid; |
1581 | restart->nanosleep.rmtp = rmtp; | 1589 | restart->nanosleep.rmtp = rmtp; |
1582 | restart->nanosleep.expires = hrtimer_get_expires_tv64(&t.timer); | 1590 | restart->nanosleep.expires = hrtimer_get_expires_tv64(&t.timer); |
1583 | 1591 | ||