aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2011-05-20 10:18:50 -0400
committerThomas Gleixner <tglx@linutronix.de>2011-05-23 07:59:53 -0400
commit9ec2690758a5467f24beb301cca5098078073bba (patch)
treee5bc78f690d12635a56460ea6f54b49318221dc8 /kernel
parent250f972d85effad5b6e10da4bbd877e6a4b503b6 (diff)
timerfd: Manage cancelable timers in timerfd
Peter is concerned about the extra scan of CLOCK_REALTIME_COS in the timer interrupt. Yes, I did not think about it, because the solution was so elegant. I didn't like the extra list in timerfd when it was proposed some time ago, but with a rcu based list the list walk it's less horrible than the original global lock, which was held over the list iteration. Requested-by: Peter Zijlstra <peterz@infradead.org> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Reviewed-by: Peter Zijlstra <peterz@infradead.org>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/hrtimer.c94
1 files changed, 32 insertions, 62 deletions
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
index eabcbd781433..26dd32f9f6b2 100644
--- a/kernel/hrtimer.c
+++ b/kernel/hrtimer.c
@@ -78,11 +78,6 @@ DEFINE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases) =
78 .get_time = &ktime_get_boottime, 78 .get_time = &ktime_get_boottime,
79 .resolution = KTIME_LOW_RES, 79 .resolution = KTIME_LOW_RES,
80 }, 80 },
81 {
82 .index = CLOCK_REALTIME_COS,
83 .get_time = &ktime_get_real,
84 .resolution = KTIME_LOW_RES,
85 },
86 } 81 }
87}; 82};
88 83
@@ -90,7 +85,6 @@ static const int hrtimer_clock_to_base_table[MAX_CLOCKS] = {
90 [CLOCK_REALTIME] = HRTIMER_BASE_REALTIME, 85 [CLOCK_REALTIME] = HRTIMER_BASE_REALTIME,
91 [CLOCK_MONOTONIC] = HRTIMER_BASE_MONOTONIC, 86 [CLOCK_MONOTONIC] = HRTIMER_BASE_MONOTONIC,
92 [CLOCK_BOOTTIME] = HRTIMER_BASE_BOOTTIME, 87 [CLOCK_BOOTTIME] = HRTIMER_BASE_BOOTTIME,
93 [CLOCK_REALTIME_COS] = HRTIMER_BASE_REALTIME_COS,
94}; 88};
95 89
96static inline int hrtimer_clockid_to_base(clockid_t clock_id) 90static inline int hrtimer_clockid_to_base(clockid_t clock_id)
@@ -116,7 +110,6 @@ static void hrtimer_get_softirq_time(struct hrtimer_cpu_base *base)
116 base->clock_base[HRTIMER_BASE_REALTIME].softirq_time = xtim; 110 base->clock_base[HRTIMER_BASE_REALTIME].softirq_time = xtim;
117 base->clock_base[HRTIMER_BASE_MONOTONIC].softirq_time = mono; 111 base->clock_base[HRTIMER_BASE_MONOTONIC].softirq_time = mono;
118 base->clock_base[HRTIMER_BASE_BOOTTIME].softirq_time = boot; 112 base->clock_base[HRTIMER_BASE_BOOTTIME].softirq_time = boot;
119 base->clock_base[HRTIMER_BASE_REALTIME_COS].softirq_time = xtim;
120} 113}
121 114
122/* 115/*
@@ -486,8 +479,6 @@ static inline void debug_deactivate(struct hrtimer *timer)
486 trace_hrtimer_cancel(timer); 479 trace_hrtimer_cancel(timer);
487} 480}
488 481
489static void hrtimer_expire_cancelable(struct hrtimer_cpu_base *cpu_base);
490
491/* High resolution timer related functions */ 482/* High resolution timer related functions */
492#ifdef CONFIG_HIGH_RES_TIMERS 483#ifdef CONFIG_HIGH_RES_TIMERS
493 484
@@ -663,7 +654,33 @@ static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer,
663 return 0; 654 return 0;
664} 655}
665 656
666static void retrigger_next_event(void *arg); 657/*
658 * Retrigger next event is called after clock was set
659 *
660 * Called with interrupts disabled via on_each_cpu()
661 */
662static void retrigger_next_event(void *arg)
663{
664 struct hrtimer_cpu_base *base = &__get_cpu_var(hrtimer_bases);
665 struct timespec realtime_offset, xtim, wtm, sleep;
666
667 if (!hrtimer_hres_active())
668 return;
669
670 /* Optimized out for !HIGH_RES */
671 get_xtime_and_monotonic_and_sleep_offset(&xtim, &wtm, &sleep);
672 set_normalized_timespec(&realtime_offset, -wtm.tv_sec, -wtm.tv_nsec);
673
674 /* Adjust CLOCK_REALTIME offset */
675 raw_spin_lock(&base->lock);
676 base->clock_base[HRTIMER_BASE_REALTIME].offset =
677 timespec_to_ktime(realtime_offset);
678 base->clock_base[HRTIMER_BASE_BOOTTIME].offset =
679 timespec_to_ktime(sleep);
680
681 hrtimer_force_reprogram(base, 0);
682 raw_spin_unlock(&base->lock);
683}
667 684
668/* 685/*
669 * Switch to high resolution mode 686 * Switch to high resolution mode
@@ -711,46 +728,11 @@ static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer,
711 return 0; 728 return 0;
712} 729}
713static inline void hrtimer_init_hres(struct hrtimer_cpu_base *base) { } 730static inline void hrtimer_init_hres(struct hrtimer_cpu_base *base) { }
731static inline void retrigger_next_event(void *arg) { }
714 732
715#endif /* CONFIG_HIGH_RES_TIMERS */ 733#endif /* CONFIG_HIGH_RES_TIMERS */
716 734
717/* 735/*
718 * Retrigger next event is called after clock was set
719 *
720 * Called with interrupts disabled via on_each_cpu()
721 */
722static void retrigger_next_event(void *arg)
723{
724 struct hrtimer_cpu_base *base = &__get_cpu_var(hrtimer_bases);
725 struct timespec realtime_offset, xtim, wtm, sleep;
726
727 if (!hrtimer_hres_active()) {
728 raw_spin_lock(&base->lock);
729 hrtimer_expire_cancelable(base);
730 raw_spin_unlock(&base->lock);
731 return;
732 }
733
734 /* Optimized out for !HIGH_RES */
735 get_xtime_and_monotonic_and_sleep_offset(&xtim, &wtm, &sleep);
736 set_normalized_timespec(&realtime_offset, -wtm.tv_sec, -wtm.tv_nsec);
737
738 /* Adjust CLOCK_REALTIME offset */
739 raw_spin_lock(&base->lock);
740 base->clock_base[HRTIMER_BASE_REALTIME].offset =
741 timespec_to_ktime(realtime_offset);
742 base->clock_base[HRTIMER_BASE_BOOTTIME].offset =
743 timespec_to_ktime(sleep);
744 base->clock_base[HRTIMER_BASE_REALTIME_COS].offset =
745 timespec_to_ktime(realtime_offset);
746
747 hrtimer_expire_cancelable(base);
748
749 hrtimer_force_reprogram(base, 0);
750 raw_spin_unlock(&base->lock);
751}
752
753/*
754 * Clock realtime was set 736 * Clock realtime was set
755 * 737 *
756 * Change the offset of the realtime clock vs. the monotonic 738 * Change the offset of the realtime clock vs. the monotonic
@@ -763,8 +745,11 @@ static void retrigger_next_event(void *arg)
763 */ 745 */
764void clock_was_set(void) 746void clock_was_set(void)
765{ 747{
748#ifdef CONFIG_HIGHRES_TIMERS
766 /* Retrigger the CPU local events everywhere */ 749 /* Retrigger the CPU local events everywhere */
767 on_each_cpu(retrigger_next_event, NULL, 1); 750 on_each_cpu(retrigger_next_event, NULL, 1);
751#endif
752 timerfd_clock_was_set();
768} 753}
769 754
770/* 755/*
@@ -777,6 +762,7 @@ void hrtimers_resume(void)
777 KERN_INFO "hrtimers_resume() called with IRQs enabled!"); 762 KERN_INFO "hrtimers_resume() called with IRQs enabled!");
778 763
779 retrigger_next_event(NULL); 764 retrigger_next_event(NULL);
765 timerfd_clock_was_set();
780} 766}
781 767
782static inline void timer_stats_hrtimer_set_start_info(struct hrtimer *timer) 768static inline void timer_stats_hrtimer_set_start_info(struct hrtimer *timer)
@@ -1240,22 +1226,6 @@ static void __run_hrtimer(struct hrtimer *timer, ktime_t *now)
1240 timer->state &= ~HRTIMER_STATE_CALLBACK; 1226 timer->state &= ~HRTIMER_STATE_CALLBACK;
1241} 1227}
1242 1228
1243static void hrtimer_expire_cancelable(struct hrtimer_cpu_base *cpu_base)
1244{
1245 struct timerqueue_node *node;
1246 struct hrtimer_clock_base *base;
1247 ktime_t now = ktime_get_real();
1248
1249 base = &cpu_base->clock_base[HRTIMER_BASE_REALTIME_COS];
1250
1251 while ((node = timerqueue_getnext(&base->active))) {
1252 struct hrtimer *timer;
1253
1254 timer = container_of(node, struct hrtimer, node);
1255 __run_hrtimer(timer, &now);
1256 }
1257}
1258
1259#ifdef CONFIG_HIGH_RES_TIMERS 1229#ifdef CONFIG_HIGH_RES_TIMERS
1260 1230
1261/* 1231/*