aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/hrtimer.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/hrtimer.c')
-rw-r--r--kernel/hrtimer.c57
1 files changed, 44 insertions, 13 deletions
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
index 476cb0c0b4a4..1b3033105b40 100644
--- a/kernel/hrtimer.c
+++ b/kernel/hrtimer.c
@@ -59,6 +59,7 @@ ktime_t ktime_get(void)
59 59
60 return timespec_to_ktime(now); 60 return timespec_to_ktime(now);
61} 61}
62EXPORT_SYMBOL_GPL(ktime_get);
62 63
63/** 64/**
64 * ktime_get_real - get the real (wall-) time in ktime_t format 65 * ktime_get_real - get the real (wall-) time in ktime_t format
@@ -135,7 +136,7 @@ EXPORT_SYMBOL_GPL(ktime_get_ts);
135static void hrtimer_get_softirq_time(struct hrtimer_cpu_base *base) 136static void hrtimer_get_softirq_time(struct hrtimer_cpu_base *base)
136{ 137{
137 ktime_t xtim, tomono; 138 ktime_t xtim, tomono;
138 struct timespec xts; 139 struct timespec xts, tom;
139 unsigned long seq; 140 unsigned long seq;
140 141
141 do { 142 do {
@@ -145,10 +146,11 @@ static void hrtimer_get_softirq_time(struct hrtimer_cpu_base *base)
145#else 146#else
146 xts = xtime; 147 xts = xtime;
147#endif 148#endif
149 tom = wall_to_monotonic;
148 } while (read_seqretry(&xtime_lock, seq)); 150 } while (read_seqretry(&xtime_lock, seq));
149 151
150 xtim = timespec_to_ktime(xts); 152 xtim = timespec_to_ktime(xts);
151 tomono = timespec_to_ktime(wall_to_monotonic); 153 tomono = timespec_to_ktime(tom);
152 base->clock_base[CLOCK_REALTIME].softirq_time = xtim; 154 base->clock_base[CLOCK_REALTIME].softirq_time = xtim;
153 base->clock_base[CLOCK_MONOTONIC].softirq_time = 155 base->clock_base[CLOCK_MONOTONIC].softirq_time =
154 ktime_add(xtim, tomono); 156 ktime_add(xtim, tomono);
@@ -277,6 +279,8 @@ ktime_t ktime_add_ns(const ktime_t kt, u64 nsec)
277 279
278 return ktime_add(kt, tmp); 280 return ktime_add(kt, tmp);
279} 281}
282
283EXPORT_SYMBOL_GPL(ktime_add_ns);
280# endif /* !CONFIG_KTIME_SCALAR */ 284# endif /* !CONFIG_KTIME_SCALAR */
281 285
282/* 286/*
@@ -458,6 +462,18 @@ void clock_was_set(void)
458} 462}
459 463
460/* 464/*
465 * During resume we might have to reprogram the high resolution timer
466 * interrupt (on the local CPU):
467 */
468void hres_timers_resume(void)
469{
470 WARN_ON_ONCE(num_online_cpus() > 1);
471
472 /* Retrigger the CPU local events: */
473 retrigger_next_event(NULL);
474}
475
476/*
461 * Check, whether the timer is on the callback pending list 477 * Check, whether the timer is on the callback pending list
462 */ 478 */
463static inline int hrtimer_cb_pending(const struct hrtimer *timer) 479static inline int hrtimer_cb_pending(const struct hrtimer *timer)
@@ -540,19 +556,19 @@ static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer,
540/* 556/*
541 * Switch to high resolution mode 557 * Switch to high resolution mode
542 */ 558 */
543static void hrtimer_switch_to_hres(void) 559static int hrtimer_switch_to_hres(void)
544{ 560{
545 struct hrtimer_cpu_base *base = &__get_cpu_var(hrtimer_bases); 561 struct hrtimer_cpu_base *base = &__get_cpu_var(hrtimer_bases);
546 unsigned long flags; 562 unsigned long flags;
547 563
548 if (base->hres_active) 564 if (base->hres_active)
549 return; 565 return 1;
550 566
551 local_irq_save(flags); 567 local_irq_save(flags);
552 568
553 if (tick_init_highres()) { 569 if (tick_init_highres()) {
554 local_irq_restore(flags); 570 local_irq_restore(flags);
555 return; 571 return 0;
556 } 572 }
557 base->hres_active = 1; 573 base->hres_active = 1;
558 base->clock_base[CLOCK_REALTIME].resolution = KTIME_HIGH_RES; 574 base->clock_base[CLOCK_REALTIME].resolution = KTIME_HIGH_RES;
@@ -565,13 +581,14 @@ static void hrtimer_switch_to_hres(void)
565 local_irq_restore(flags); 581 local_irq_restore(flags);
566 printk(KERN_INFO "Switched to high resolution mode on CPU %d\n", 582 printk(KERN_INFO "Switched to high resolution mode on CPU %d\n",
567 smp_processor_id()); 583 smp_processor_id());
584 return 1;
568} 585}
569 586
570#else 587#else
571 588
572static inline int hrtimer_hres_active(void) { return 0; } 589static inline int hrtimer_hres_active(void) { return 0; }
573static inline int hrtimer_is_hres_enabled(void) { return 0; } 590static inline int hrtimer_is_hres_enabled(void) { return 0; }
574static inline void hrtimer_switch_to_hres(void) { } 591static inline int hrtimer_switch_to_hres(void) { return 0; }
575static inline void hrtimer_force_reprogram(struct hrtimer_cpu_base *base) { } 592static inline void hrtimer_force_reprogram(struct hrtimer_cpu_base *base) { }
576static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer, 593static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer,
577 struct hrtimer_clock_base *base) 594 struct hrtimer_clock_base *base)
@@ -643,6 +660,12 @@ hrtimer_forward(struct hrtimer *timer, ktime_t now, ktime_t interval)
643 orun++; 660 orun++;
644 } 661 }
645 timer->expires = ktime_add(timer->expires, interval); 662 timer->expires = ktime_add(timer->expires, interval);
663 /*
664 * Make sure, that the result did not wrap with a very large
665 * interval.
666 */
667 if (timer->expires.tv64 < 0)
668 timer->expires = ktime_set(KTIME_SEC_MAX, 0);
646 669
647 return orun; 670 return orun;
648} 671}
@@ -806,7 +829,12 @@ hrtimer_start(struct hrtimer *timer, ktime_t tim, const enum hrtimer_mode mode)
806 829
807 timer_stats_hrtimer_set_start_info(timer); 830 timer_stats_hrtimer_set_start_info(timer);
808 831
809 enqueue_hrtimer(timer, new_base, base == new_base); 832 /*
833 * Only allow reprogramming if the new base is on this CPU.
834 * (it might still be on another CPU if the timer was pending)
835 */
836 enqueue_hrtimer(timer, new_base,
837 new_base->cpu_base == &__get_cpu_var(hrtimer_bases));
810 838
811 unlock_hrtimer_base(timer, &flags); 839 unlock_hrtimer_base(timer, &flags);
812 840
@@ -1130,6 +1158,9 @@ static inline void run_hrtimer_queue(struct hrtimer_cpu_base *cpu_base,
1130 if (base->softirq_time.tv64 <= timer->expires.tv64) 1158 if (base->softirq_time.tv64 <= timer->expires.tv64)
1131 break; 1159 break;
1132 1160
1161#ifdef CONFIG_HIGH_RES_TIMERS
1162 WARN_ON_ONCE(timer->cb_mode == HRTIMER_CB_IRQSAFE_NO_SOFTIRQ);
1163#endif
1133 timer_stats_account_hrtimer(timer); 1164 timer_stats_account_hrtimer(timer);
1134 1165
1135 fn = timer->function; 1166 fn = timer->function;
@@ -1173,7 +1204,8 @@ void hrtimer_run_queues(void)
1173 * deadlock vs. xtime_lock. 1204 * deadlock vs. xtime_lock.
1174 */ 1205 */
1175 if (tick_check_oneshot_change(!hrtimer_is_hres_enabled())) 1206 if (tick_check_oneshot_change(!hrtimer_is_hres_enabled()))
1176 hrtimer_switch_to_hres(); 1207 if (hrtimer_switch_to_hres())
1208 return;
1177 1209
1178 hrtimer_get_softirq_time(cpu_base); 1210 hrtimer_get_softirq_time(cpu_base);
1179 1211
@@ -1355,17 +1387,16 @@ static void migrate_hrtimers(int cpu)
1355 tick_cancel_sched_timer(cpu); 1387 tick_cancel_sched_timer(cpu);
1356 1388
1357 local_irq_disable(); 1389 local_irq_disable();
1358 1390 double_spin_lock(&new_base->lock, &old_base->lock,
1359 spin_lock(&new_base->lock); 1391 smp_processor_id() < cpu);
1360 spin_lock(&old_base->lock);
1361 1392
1362 for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) { 1393 for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) {
1363 migrate_hrtimer_list(&old_base->clock_base[i], 1394 migrate_hrtimer_list(&old_base->clock_base[i],
1364 &new_base->clock_base[i]); 1395 &new_base->clock_base[i]);
1365 } 1396 }
1366 spin_unlock(&old_base->lock);
1367 spin_unlock(&new_base->lock);
1368 1397
1398 double_spin_unlock(&new_base->lock, &old_base->lock,
1399 smp_processor_id() < cpu);
1369 local_irq_enable(); 1400 local_irq_enable();
1370 put_cpu_var(hrtimer_bases); 1401 put_cpu_var(hrtimer_bases);
1371} 1402}