diff options
Diffstat (limited to 'kernel/time/hrtimer.c')
-rw-r--r-- | kernel/time/hrtimer.c | 52 |
1 files changed, 26 insertions, 26 deletions
diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c index 161e340395d5..c7f780113884 100644 --- a/kernel/time/hrtimer.c +++ b/kernel/time/hrtimer.c | |||
@@ -171,7 +171,7 @@ hrtimer_check_target(struct hrtimer *timer, struct hrtimer_clock_base *new_base) | |||
171 | return 0; | 171 | return 0; |
172 | 172 | ||
173 | expires = ktime_sub(hrtimer_get_expires(timer), new_base->offset); | 173 | expires = ktime_sub(hrtimer_get_expires(timer), new_base->offset); |
174 | return expires.tv64 <= new_base->cpu_base->expires_next.tv64; | 174 | return expires <= new_base->cpu_base->expires_next; |
175 | #else | 175 | #else |
176 | return 0; | 176 | return 0; |
177 | #endif | 177 | #endif |
@@ -313,7 +313,7 @@ ktime_t ktime_add_safe(const ktime_t lhs, const ktime_t rhs) | |||
313 | * We use KTIME_SEC_MAX here, the maximum timeout which we can | 313 | * We use KTIME_SEC_MAX here, the maximum timeout which we can |
314 | * return to user space in a timespec: | 314 | * return to user space in a timespec: |
315 | */ | 315 | */ |
316 | if (res.tv64 < 0 || res.tv64 < lhs.tv64 || res.tv64 < rhs.tv64) | 316 | if (res < 0 || res < lhs || res < rhs) |
317 | res = ktime_set(KTIME_SEC_MAX, 0); | 317 | res = ktime_set(KTIME_SEC_MAX, 0); |
318 | 318 | ||
319 | return res; | 319 | return res; |
@@ -465,8 +465,8 @@ static inline void hrtimer_update_next_timer(struct hrtimer_cpu_base *cpu_base, | |||
465 | static ktime_t __hrtimer_get_next_event(struct hrtimer_cpu_base *cpu_base) | 465 | static ktime_t __hrtimer_get_next_event(struct hrtimer_cpu_base *cpu_base) |
466 | { | 466 | { |
467 | struct hrtimer_clock_base *base = cpu_base->clock_base; | 467 | struct hrtimer_clock_base *base = cpu_base->clock_base; |
468 | ktime_t expires, expires_next = { .tv64 = KTIME_MAX }; | ||
469 | unsigned int active = cpu_base->active_bases; | 468 | unsigned int active = cpu_base->active_bases; |
469 | ktime_t expires, expires_next = KTIME_MAX; | ||
470 | 470 | ||
471 | hrtimer_update_next_timer(cpu_base, NULL); | 471 | hrtimer_update_next_timer(cpu_base, NULL); |
472 | for (; active; base++, active >>= 1) { | 472 | for (; active; base++, active >>= 1) { |
@@ -479,7 +479,7 @@ static ktime_t __hrtimer_get_next_event(struct hrtimer_cpu_base *cpu_base) | |||
479 | next = timerqueue_getnext(&base->active); | 479 | next = timerqueue_getnext(&base->active); |
480 | timer = container_of(next, struct hrtimer, node); | 480 | timer = container_of(next, struct hrtimer, node); |
481 | expires = ktime_sub(hrtimer_get_expires(timer), base->offset); | 481 | expires = ktime_sub(hrtimer_get_expires(timer), base->offset); |
482 | if (expires.tv64 < expires_next.tv64) { | 482 | if (expires < expires_next) { |
483 | expires_next = expires; | 483 | expires_next = expires; |
484 | hrtimer_update_next_timer(cpu_base, timer); | 484 | hrtimer_update_next_timer(cpu_base, timer); |
485 | } | 485 | } |
@@ -489,8 +489,8 @@ static ktime_t __hrtimer_get_next_event(struct hrtimer_cpu_base *cpu_base) | |||
489 | * the clock bases so the result might be negative. Fix it up | 489 | * the clock bases so the result might be negative. Fix it up |
490 | * to prevent a false positive in clockevents_program_event(). | 490 | * to prevent a false positive in clockevents_program_event(). |
491 | */ | 491 | */ |
492 | if (expires_next.tv64 < 0) | 492 | if (expires_next < 0) |
493 | expires_next.tv64 = 0; | 493 | expires_next = 0; |
494 | return expires_next; | 494 | return expires_next; |
495 | } | 495 | } |
496 | #endif | 496 | #endif |
@@ -561,10 +561,10 @@ hrtimer_force_reprogram(struct hrtimer_cpu_base *cpu_base, int skip_equal) | |||
561 | 561 | ||
562 | expires_next = __hrtimer_get_next_event(cpu_base); | 562 | expires_next = __hrtimer_get_next_event(cpu_base); |
563 | 563 | ||
564 | if (skip_equal && expires_next.tv64 == cpu_base->expires_next.tv64) | 564 | if (skip_equal && expires_next == cpu_base->expires_next) |
565 | return; | 565 | return; |
566 | 566 | ||
567 | cpu_base->expires_next.tv64 = expires_next.tv64; | 567 | cpu_base->expires_next = expires_next; |
568 | 568 | ||
569 | /* | 569 | /* |
570 | * If a hang was detected in the last timer interrupt then we | 570 | * If a hang was detected in the last timer interrupt then we |
@@ -622,10 +622,10 @@ static void hrtimer_reprogram(struct hrtimer *timer, | |||
622 | * CLOCK_REALTIME timer might be requested with an absolute | 622 | * CLOCK_REALTIME timer might be requested with an absolute |
623 | * expiry time which is less than base->offset. Set it to 0. | 623 | * expiry time which is less than base->offset. Set it to 0. |
624 | */ | 624 | */ |
625 | if (expires.tv64 < 0) | 625 | if (expires < 0) |
626 | expires.tv64 = 0; | 626 | expires = 0; |
627 | 627 | ||
628 | if (expires.tv64 >= cpu_base->expires_next.tv64) | 628 | if (expires >= cpu_base->expires_next) |
629 | return; | 629 | return; |
630 | 630 | ||
631 | /* Update the pointer to the next expiring timer */ | 631 | /* Update the pointer to the next expiring timer */ |
@@ -653,7 +653,7 @@ static void hrtimer_reprogram(struct hrtimer *timer, | |||
653 | */ | 653 | */ |
654 | static inline void hrtimer_init_hres(struct hrtimer_cpu_base *base) | 654 | static inline void hrtimer_init_hres(struct hrtimer_cpu_base *base) |
655 | { | 655 | { |
656 | base->expires_next.tv64 = KTIME_MAX; | 656 | base->expires_next = KTIME_MAX; |
657 | base->hres_active = 0; | 657 | base->hres_active = 0; |
658 | } | 658 | } |
659 | 659 | ||
@@ -827,21 +827,21 @@ u64 hrtimer_forward(struct hrtimer *timer, ktime_t now, ktime_t interval) | |||
827 | 827 | ||
828 | delta = ktime_sub(now, hrtimer_get_expires(timer)); | 828 | delta = ktime_sub(now, hrtimer_get_expires(timer)); |
829 | 829 | ||
830 | if (delta.tv64 < 0) | 830 | if (delta < 0) |
831 | return 0; | 831 | return 0; |
832 | 832 | ||
833 | if (WARN_ON(timer->state & HRTIMER_STATE_ENQUEUED)) | 833 | if (WARN_ON(timer->state & HRTIMER_STATE_ENQUEUED)) |
834 | return 0; | 834 | return 0; |
835 | 835 | ||
836 | if (interval.tv64 < hrtimer_resolution) | 836 | if (interval < hrtimer_resolution) |
837 | interval.tv64 = hrtimer_resolution; | 837 | interval = hrtimer_resolution; |
838 | 838 | ||
839 | if (unlikely(delta.tv64 >= interval.tv64)) { | 839 | if (unlikely(delta >= interval)) { |
840 | s64 incr = ktime_to_ns(interval); | 840 | s64 incr = ktime_to_ns(interval); |
841 | 841 | ||
842 | orun = ktime_divns(delta, incr); | 842 | orun = ktime_divns(delta, incr); |
843 | hrtimer_add_expires_ns(timer, incr * orun); | 843 | hrtimer_add_expires_ns(timer, incr * orun); |
844 | if (hrtimer_get_expires_tv64(timer) > now.tv64) | 844 | if (hrtimer_get_expires_tv64(timer) > now) |
845 | return orun; | 845 | return orun; |
846 | /* | 846 | /* |
847 | * This (and the ktime_add() below) is the | 847 | * This (and the ktime_add() below) is the |
@@ -1104,7 +1104,7 @@ u64 hrtimer_get_next_event(void) | |||
1104 | raw_spin_lock_irqsave(&cpu_base->lock, flags); | 1104 | raw_spin_lock_irqsave(&cpu_base->lock, flags); |
1105 | 1105 | ||
1106 | if (!__hrtimer_hres_active(cpu_base)) | 1106 | if (!__hrtimer_hres_active(cpu_base)) |
1107 | expires = __hrtimer_get_next_event(cpu_base).tv64; | 1107 | expires = __hrtimer_get_next_event(cpu_base); |
1108 | 1108 | ||
1109 | raw_spin_unlock_irqrestore(&cpu_base->lock, flags); | 1109 | raw_spin_unlock_irqrestore(&cpu_base->lock, flags); |
1110 | 1110 | ||
@@ -1296,7 +1296,7 @@ static void __hrtimer_run_queues(struct hrtimer_cpu_base *cpu_base, ktime_t now) | |||
1296 | * are right-of a not yet expired timer, because that | 1296 | * are right-of a not yet expired timer, because that |
1297 | * timer will have to trigger a wakeup anyway. | 1297 | * timer will have to trigger a wakeup anyway. |
1298 | */ | 1298 | */ |
1299 | if (basenow.tv64 < hrtimer_get_softexpires_tv64(timer)) | 1299 | if (basenow < hrtimer_get_softexpires_tv64(timer)) |
1300 | break; | 1300 | break; |
1301 | 1301 | ||
1302 | __run_hrtimer(cpu_base, base, timer, &basenow); | 1302 | __run_hrtimer(cpu_base, base, timer, &basenow); |
@@ -1318,7 +1318,7 @@ void hrtimer_interrupt(struct clock_event_device *dev) | |||
1318 | 1318 | ||
1319 | BUG_ON(!cpu_base->hres_active); | 1319 | BUG_ON(!cpu_base->hres_active); |
1320 | cpu_base->nr_events++; | 1320 | cpu_base->nr_events++; |
1321 | dev->next_event.tv64 = KTIME_MAX; | 1321 | dev->next_event = KTIME_MAX; |
1322 | 1322 | ||
1323 | raw_spin_lock(&cpu_base->lock); | 1323 | raw_spin_lock(&cpu_base->lock); |
1324 | entry_time = now = hrtimer_update_base(cpu_base); | 1324 | entry_time = now = hrtimer_update_base(cpu_base); |
@@ -1331,7 +1331,7 @@ retry: | |||
1331 | * timers which run their callback and need to be requeued on | 1331 | * timers which run their callback and need to be requeued on |
1332 | * this CPU. | 1332 | * this CPU. |
1333 | */ | 1333 | */ |
1334 | cpu_base->expires_next.tv64 = KTIME_MAX; | 1334 | cpu_base->expires_next = KTIME_MAX; |
1335 | 1335 | ||
1336 | __hrtimer_run_queues(cpu_base, now); | 1336 | __hrtimer_run_queues(cpu_base, now); |
1337 | 1337 | ||
@@ -1379,13 +1379,13 @@ retry: | |||
1379 | cpu_base->hang_detected = 1; | 1379 | cpu_base->hang_detected = 1; |
1380 | raw_spin_unlock(&cpu_base->lock); | 1380 | raw_spin_unlock(&cpu_base->lock); |
1381 | delta = ktime_sub(now, entry_time); | 1381 | delta = ktime_sub(now, entry_time); |
1382 | if ((unsigned int)delta.tv64 > cpu_base->max_hang_time) | 1382 | if ((unsigned int)delta > cpu_base->max_hang_time) |
1383 | cpu_base->max_hang_time = (unsigned int) delta.tv64; | 1383 | cpu_base->max_hang_time = (unsigned int) delta; |
1384 | /* | 1384 | /* |
1385 | * Limit it to a sensible value as we enforce a longer | 1385 | * Limit it to a sensible value as we enforce a longer |
1386 | * delay. Give the CPU at least 100ms to catch up. | 1386 | * delay. Give the CPU at least 100ms to catch up. |
1387 | */ | 1387 | */ |
1388 | if (delta.tv64 > 100 * NSEC_PER_MSEC) | 1388 | if (delta > 100 * NSEC_PER_MSEC) |
1389 | expires_next = ktime_add_ns(now, 100 * NSEC_PER_MSEC); | 1389 | expires_next = ktime_add_ns(now, 100 * NSEC_PER_MSEC); |
1390 | else | 1390 | else |
1391 | expires_next = ktime_add(now, delta); | 1391 | expires_next = ktime_add(now, delta); |
@@ -1495,7 +1495,7 @@ static int update_rmtp(struct hrtimer *timer, struct timespec __user *rmtp) | |||
1495 | ktime_t rem; | 1495 | ktime_t rem; |
1496 | 1496 | ||
1497 | rem = hrtimer_expires_remaining(timer); | 1497 | rem = hrtimer_expires_remaining(timer); |
1498 | if (rem.tv64 <= 0) | 1498 | if (rem <= 0) |
1499 | return 0; | 1499 | return 0; |
1500 | rmt = ktime_to_timespec(rem); | 1500 | rmt = ktime_to_timespec(rem); |
1501 | 1501 | ||
@@ -1693,7 +1693,7 @@ schedule_hrtimeout_range_clock(ktime_t *expires, u64 delta, | |||
1693 | * Optimize when a zero timeout value is given. It does not | 1693 | * Optimize when a zero timeout value is given. It does not |
1694 | * matter whether this is an absolute or a relative time. | 1694 | * matter whether this is an absolute or a relative time. |
1695 | */ | 1695 | */ |
1696 | if (expires && !expires->tv64) { | 1696 | if (expires && *expires == 0) { |
1697 | __set_current_state(TASK_RUNNING); | 1697 | __set_current_state(TASK_RUNNING); |
1698 | return 0; | 1698 | return 0; |
1699 | } | 1699 | } |