diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2015-02-09 19:33:07 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2015-02-09 19:33:07 -0500 |
commit | 0ba97bc4b4b054b71cd348dab838a7545a27b893 (patch) | |
tree | 6ceac2634bbf46410ddc015c98689c150e1e8571 /kernel | |
parent | 5b9b28a63f2e47dac5ff3a2503bfe3ade8796aa0 (diff) | |
parent | 4ebbda5251374d532ba8939de4241d769d1420b6 (diff) |
Merge branch 'timers-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull timer updates from Ingo Molnar:
"The main changes in this cycle were:
- rework hrtimer expiry calculation in hrtimer_interrupt(): the
previous code had a subtle bug where expiry caching would miss an
expiry, resulting in occasional bogus (late) expiry of hrtimers.
- continuing Y2038 fixes
- ktime division optimization
- misc smaller fixes and cleanups"
* 'timers-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
hrtimer: Make __hrtimer_get_next_event() static
rtc: Convert rtc_set_ntp_time() to use timespec64
rtc: Remove redundant rtc_valid_tm() from rtc_hctosys()
rtc: Modify rtc_hctosys() to address y2038 issues
rtc: Update rtc-dev to use y2038-safe time interfaces
rtc: Update interface.c to use y2038-safe time interfaces
time: Expose get_monotonic_boottime64 for in-kernel use
time: Expose getboottime64 for in-kernel uses
ktime: Optimize ktime_divns for constant divisors
hrtimer: Prevent stale expiry time in hrtimer_interrupt()
ktime.h: Introduce ktime_ms_delta
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/time/hrtimer.c | 112 | ||||
-rw-r--r-- | kernel/time/ntp.c | 4 | ||||
-rw-r--r-- | kernel/time/timekeeping.c | 12 |
3 files changed, 60 insertions, 68 deletions
diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c index d8c724cda37b..3f5e183c3d97 100644 --- a/kernel/time/hrtimer.c +++ b/kernel/time/hrtimer.c | |||
@@ -266,7 +266,7 @@ lock_hrtimer_base(const struct hrtimer *timer, unsigned long *flags) | |||
266 | /* | 266 | /* |
267 | * Divide a ktime value by a nanosecond value | 267 | * Divide a ktime value by a nanosecond value |
268 | */ | 268 | */ |
269 | u64 ktime_divns(const ktime_t kt, s64 div) | 269 | u64 __ktime_divns(const ktime_t kt, s64 div) |
270 | { | 270 | { |
271 | u64 dclc; | 271 | u64 dclc; |
272 | int sft = 0; | 272 | int sft = 0; |
@@ -282,7 +282,7 @@ u64 ktime_divns(const ktime_t kt, s64 div) | |||
282 | 282 | ||
283 | return dclc; | 283 | return dclc; |
284 | } | 284 | } |
285 | EXPORT_SYMBOL_GPL(ktime_divns); | 285 | EXPORT_SYMBOL_GPL(__ktime_divns); |
286 | #endif /* BITS_PER_LONG >= 64 */ | 286 | #endif /* BITS_PER_LONG >= 64 */ |
287 | 287 | ||
288 | /* | 288 | /* |
@@ -440,6 +440,37 @@ static inline void debug_deactivate(struct hrtimer *timer) | |||
440 | trace_hrtimer_cancel(timer); | 440 | trace_hrtimer_cancel(timer); |
441 | } | 441 | } |
442 | 442 | ||
443 | #if defined(CONFIG_NO_HZ_COMMON) || defined(CONFIG_HIGH_RES_TIMERS) | ||
444 | static ktime_t __hrtimer_get_next_event(struct hrtimer_cpu_base *cpu_base) | ||
445 | { | ||
446 | struct hrtimer_clock_base *base = cpu_base->clock_base; | ||
447 | ktime_t expires, expires_next = { .tv64 = KTIME_MAX }; | ||
448 | int i; | ||
449 | |||
450 | for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++, base++) { | ||
451 | struct timerqueue_node *next; | ||
452 | struct hrtimer *timer; | ||
453 | |||
454 | next = timerqueue_getnext(&base->active); | ||
455 | if (!next) | ||
456 | continue; | ||
457 | |||
458 | timer = container_of(next, struct hrtimer, node); | ||
459 | expires = ktime_sub(hrtimer_get_expires(timer), base->offset); | ||
460 | if (expires.tv64 < expires_next.tv64) | ||
461 | expires_next = expires; | ||
462 | } | ||
463 | /* | ||
464 | * clock_was_set() might have changed base->offset of any of | ||
465 | * the clock bases so the result might be negative. Fix it up | ||
466 | * to prevent a false positive in clockevents_program_event(). | ||
467 | */ | ||
468 | if (expires_next.tv64 < 0) | ||
469 | expires_next.tv64 = 0; | ||
470 | return expires_next; | ||
471 | } | ||
472 | #endif | ||
473 | |||
443 | /* High resolution timer related functions */ | 474 | /* High resolution timer related functions */ |
444 | #ifdef CONFIG_HIGH_RES_TIMERS | 475 | #ifdef CONFIG_HIGH_RES_TIMERS |
445 | 476 | ||
@@ -488,32 +519,7 @@ static inline int hrtimer_hres_active(void) | |||
488 | static void | 519 | static void |
489 | hrtimer_force_reprogram(struct hrtimer_cpu_base *cpu_base, int skip_equal) | 520 | hrtimer_force_reprogram(struct hrtimer_cpu_base *cpu_base, int skip_equal) |
490 | { | 521 | { |
491 | int i; | 522 | ktime_t expires_next = __hrtimer_get_next_event(cpu_base); |
492 | struct hrtimer_clock_base *base = cpu_base->clock_base; | ||
493 | ktime_t expires, expires_next; | ||
494 | |||
495 | expires_next.tv64 = KTIME_MAX; | ||
496 | |||
497 | for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++, base++) { | ||
498 | struct hrtimer *timer; | ||
499 | struct timerqueue_node *next; | ||
500 | |||
501 | next = timerqueue_getnext(&base->active); | ||
502 | if (!next) | ||
503 | continue; | ||
504 | timer = container_of(next, struct hrtimer, node); | ||
505 | |||
506 | expires = ktime_sub(hrtimer_get_expires(timer), base->offset); | ||
507 | /* | ||
508 | * clock_was_set() has changed base->offset so the | ||
509 | * result might be negative. Fix it up to prevent a | ||
510 | * false positive in clockevents_program_event() | ||
511 | */ | ||
512 | if (expires.tv64 < 0) | ||
513 | expires.tv64 = 0; | ||
514 | if (expires.tv64 < expires_next.tv64) | ||
515 | expires_next = expires; | ||
516 | } | ||
517 | 523 | ||
518 | if (skip_equal && expires_next.tv64 == cpu_base->expires_next.tv64) | 524 | if (skip_equal && expires_next.tv64 == cpu_base->expires_next.tv64) |
519 | return; | 525 | return; |
@@ -587,6 +593,15 @@ static int hrtimer_reprogram(struct hrtimer *timer, | |||
587 | return 0; | 593 | return 0; |
588 | 594 | ||
589 | /* | 595 | /* |
596 | * When the target cpu of the timer is currently executing | ||
597 | * hrtimer_interrupt(), then we do not touch the clock event | ||
598 | * device. hrtimer_interrupt() will reevaluate all clock bases | ||
599 | * before reprogramming the device. | ||
600 | */ | ||
601 | if (cpu_base->in_hrtirq) | ||
602 | return 0; | ||
603 | |||
604 | /* | ||
590 | * If a hang was detected in the last timer interrupt then we | 605 | * If a hang was detected in the last timer interrupt then we |
591 | * do not schedule a timer which is earlier than the expiry | 606 | * do not schedule a timer which is earlier than the expiry |
592 | * which we enforced in the hang detection. We want the system | 607 | * which we enforced in the hang detection. We want the system |
@@ -1104,29 +1119,14 @@ EXPORT_SYMBOL_GPL(hrtimer_get_remaining); | |||
1104 | ktime_t hrtimer_get_next_event(void) | 1119 | ktime_t hrtimer_get_next_event(void) |
1105 | { | 1120 | { |
1106 | struct hrtimer_cpu_base *cpu_base = this_cpu_ptr(&hrtimer_bases); | 1121 | struct hrtimer_cpu_base *cpu_base = this_cpu_ptr(&hrtimer_bases); |
1107 | struct hrtimer_clock_base *base = cpu_base->clock_base; | 1122 | ktime_t mindelta = { .tv64 = KTIME_MAX }; |
1108 | ktime_t delta, mindelta = { .tv64 = KTIME_MAX }; | ||
1109 | unsigned long flags; | 1123 | unsigned long flags; |
1110 | int i; | ||
1111 | 1124 | ||
1112 | raw_spin_lock_irqsave(&cpu_base->lock, flags); | 1125 | raw_spin_lock_irqsave(&cpu_base->lock, flags); |
1113 | 1126 | ||
1114 | if (!hrtimer_hres_active()) { | 1127 | if (!hrtimer_hres_active()) |
1115 | for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++, base++) { | 1128 | mindelta = ktime_sub(__hrtimer_get_next_event(cpu_base), |
1116 | struct hrtimer *timer; | 1129 | ktime_get()); |
1117 | struct timerqueue_node *next; | ||
1118 | |||
1119 | next = timerqueue_getnext(&base->active); | ||
1120 | if (!next) | ||
1121 | continue; | ||
1122 | |||
1123 | timer = container_of(next, struct hrtimer, node); | ||
1124 | delta.tv64 = hrtimer_get_expires_tv64(timer); | ||
1125 | delta = ktime_sub(delta, base->get_time()); | ||
1126 | if (delta.tv64 < mindelta.tv64) | ||
1127 | mindelta.tv64 = delta.tv64; | ||
1128 | } | ||
1129 | } | ||
1130 | 1130 | ||
1131 | raw_spin_unlock_irqrestore(&cpu_base->lock, flags); | 1131 | raw_spin_unlock_irqrestore(&cpu_base->lock, flags); |
1132 | 1132 | ||
@@ -1253,7 +1253,7 @@ void hrtimer_interrupt(struct clock_event_device *dev) | |||
1253 | raw_spin_lock(&cpu_base->lock); | 1253 | raw_spin_lock(&cpu_base->lock); |
1254 | entry_time = now = hrtimer_update_base(cpu_base); | 1254 | entry_time = now = hrtimer_update_base(cpu_base); |
1255 | retry: | 1255 | retry: |
1256 | expires_next.tv64 = KTIME_MAX; | 1256 | cpu_base->in_hrtirq = 1; |
1257 | /* | 1257 | /* |
1258 | * We set expires_next to KTIME_MAX here with cpu_base->lock | 1258 | * We set expires_next to KTIME_MAX here with cpu_base->lock |
1259 | * held to prevent that a timer is enqueued in our queue via | 1259 | * held to prevent that a timer is enqueued in our queue via |
@@ -1291,28 +1291,20 @@ retry: | |||
1291 | * are right-of a not yet expired timer, because that | 1291 | * are right-of a not yet expired timer, because that |
1292 | * timer will have to trigger a wakeup anyway. | 1292 | * timer will have to trigger a wakeup anyway. |
1293 | */ | 1293 | */ |
1294 | 1294 | if (basenow.tv64 < hrtimer_get_softexpires_tv64(timer)) | |
1295 | if (basenow.tv64 < hrtimer_get_softexpires_tv64(timer)) { | ||
1296 | ktime_t expires; | ||
1297 | |||
1298 | expires = ktime_sub(hrtimer_get_expires(timer), | ||
1299 | base->offset); | ||
1300 | if (expires.tv64 < 0) | ||
1301 | expires.tv64 = KTIME_MAX; | ||
1302 | if (expires.tv64 < expires_next.tv64) | ||
1303 | expires_next = expires; | ||
1304 | break; | 1295 | break; |
1305 | } | ||
1306 | 1296 | ||
1307 | __run_hrtimer(timer, &basenow); | 1297 | __run_hrtimer(timer, &basenow); |
1308 | } | 1298 | } |
1309 | } | 1299 | } |
1310 | 1300 | /* Reevaluate the clock bases for the next expiry */ | |
1301 | expires_next = __hrtimer_get_next_event(cpu_base); | ||
1311 | /* | 1302 | /* |
1312 | * Store the new expiry value so the migration code can verify | 1303 | * Store the new expiry value so the migration code can verify |
1313 | * against it. | 1304 | * against it. |
1314 | */ | 1305 | */ |
1315 | cpu_base->expires_next = expires_next; | 1306 | cpu_base->expires_next = expires_next; |
1307 | cpu_base->in_hrtirq = 0; | ||
1316 | raw_spin_unlock(&cpu_base->lock); | 1308 | raw_spin_unlock(&cpu_base->lock); |
1317 | 1309 | ||
1318 | /* Reprogramming necessary ? */ | 1310 | /* Reprogramming necessary ? */ |
diff --git a/kernel/time/ntp.c b/kernel/time/ntp.c index 28bf91c60a0b..4b585e0fdd22 100644 --- a/kernel/time/ntp.c +++ b/kernel/time/ntp.c | |||
@@ -488,13 +488,13 @@ static void sync_cmos_clock(struct work_struct *work) | |||
488 | 488 | ||
489 | getnstimeofday64(&now); | 489 | getnstimeofday64(&now); |
490 | if (abs(now.tv_nsec - (NSEC_PER_SEC / 2)) <= tick_nsec * 5) { | 490 | if (abs(now.tv_nsec - (NSEC_PER_SEC / 2)) <= tick_nsec * 5) { |
491 | struct timespec adjust = timespec64_to_timespec(now); | 491 | struct timespec64 adjust = now; |
492 | 492 | ||
493 | fail = -ENODEV; | 493 | fail = -ENODEV; |
494 | if (persistent_clock_is_local) | 494 | if (persistent_clock_is_local) |
495 | adjust.tv_sec -= (sys_tz.tz_minuteswest * 60); | 495 | adjust.tv_sec -= (sys_tz.tz_minuteswest * 60); |
496 | #ifdef CONFIG_GENERIC_CMOS_UPDATE | 496 | #ifdef CONFIG_GENERIC_CMOS_UPDATE |
497 | fail = update_persistent_clock(adjust); | 497 | fail = update_persistent_clock(timespec64_to_timespec(adjust)); |
498 | #endif | 498 | #endif |
499 | #ifdef CONFIG_RTC_SYSTOHC | 499 | #ifdef CONFIG_RTC_SYSTOHC |
500 | if (fail == -ENODEV) | 500 | if (fail == -ENODEV) |
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c index 6a931852082f..b124af259800 100644 --- a/kernel/time/timekeeping.c +++ b/kernel/time/timekeeping.c | |||
@@ -1659,24 +1659,24 @@ out: | |||
1659 | } | 1659 | } |
1660 | 1660 | ||
1661 | /** | 1661 | /** |
1662 | * getboottime - Return the real time of system boot. | 1662 | * getboottime64 - Return the real time of system boot. |
1663 | * @ts: pointer to the timespec to be set | 1663 | * @ts: pointer to the timespec64 to be set |
1664 | * | 1664 | * |
1665 | * Returns the wall-time of boot in a timespec. | 1665 | * Returns the wall-time of boot in a timespec64. |
1666 | * | 1666 | * |
1667 | * This is based on the wall_to_monotonic offset and the total suspend | 1667 | * This is based on the wall_to_monotonic offset and the total suspend |
1668 | * time. Calls to settimeofday will affect the value returned (which | 1668 | * time. Calls to settimeofday will affect the value returned (which |
1669 | * basically means that however wrong your real time clock is at boot time, | 1669 | * basically means that however wrong your real time clock is at boot time, |
1670 | * you get the right time here). | 1670 | * you get the right time here). |
1671 | */ | 1671 | */ |
1672 | void getboottime(struct timespec *ts) | 1672 | void getboottime64(struct timespec64 *ts) |
1673 | { | 1673 | { |
1674 | struct timekeeper *tk = &tk_core.timekeeper; | 1674 | struct timekeeper *tk = &tk_core.timekeeper; |
1675 | ktime_t t = ktime_sub(tk->offs_real, tk->offs_boot); | 1675 | ktime_t t = ktime_sub(tk->offs_real, tk->offs_boot); |
1676 | 1676 | ||
1677 | *ts = ktime_to_timespec(t); | 1677 | *ts = ktime_to_timespec64(t); |
1678 | } | 1678 | } |
1679 | EXPORT_SYMBOL_GPL(getboottime); | 1679 | EXPORT_SYMBOL_GPL(getboottime64); |
1680 | 1680 | ||
1681 | unsigned long get_seconds(void) | 1681 | unsigned long get_seconds(void) |
1682 | { | 1682 | { |