diff options
Diffstat (limited to 'kernel/time')
-rw-r--r-- | kernel/time/Makefile | 2 | ||||
-rw-r--r-- | kernel/time/alarmtimer.c | 20 | ||||
-rw-r--r-- | kernel/time/clocksource.c | 62 | ||||
-rw-r--r-- | kernel/time/tick-broadcast.c | 2 | ||||
-rw-r--r-- | kernel/time/timekeeping.c | 39 |
5 files changed, 105 insertions, 20 deletions
diff --git a/kernel/time/Makefile b/kernel/time/Makefile index e2fd74b8e8c..cae2ad7491b 100644 --- a/kernel/time/Makefile +++ b/kernel/time/Makefile | |||
@@ -1,5 +1,5 @@ | |||
1 | obj-y += timekeeping.o ntp.o clocksource.o jiffies.o timer_list.o timecompare.o | 1 | obj-y += timekeeping.o ntp.o clocksource.o jiffies.o timer_list.o timecompare.o |
2 | obj-y += timeconv.o posix-clock.o alarmtimer.o | 2 | obj-y += timeconv.o posix-clock.o #alarmtimer.o |
3 | 3 | ||
4 | obj-$(CONFIG_GENERIC_CLOCKEVENTS_BUILD) += clockevents.o | 4 | obj-$(CONFIG_GENERIC_CLOCKEVENTS_BUILD) += clockevents.o |
5 | obj-$(CONFIG_GENERIC_CLOCKEVENTS) += tick-common.o | 5 | obj-$(CONFIG_GENERIC_CLOCKEVENTS) += tick-common.o |
diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c index 59f369f98a0..8b70c76910a 100644 --- a/kernel/time/alarmtimer.c +++ b/kernel/time/alarmtimer.c | |||
@@ -181,7 +181,7 @@ static enum hrtimer_restart alarmtimer_fired(struct hrtimer *timer) | |||
181 | struct alarm *alarm; | 181 | struct alarm *alarm; |
182 | ktime_t expired = next->expires; | 182 | ktime_t expired = next->expires; |
183 | 183 | ||
184 | if (expired.tv64 >= now.tv64) | 184 | if (expired.tv64 > now.tv64) |
185 | break; | 185 | break; |
186 | 186 | ||
187 | alarm = container_of(next, struct alarm, node); | 187 | alarm = container_of(next, struct alarm, node); |
@@ -441,6 +441,8 @@ static int alarm_timer_create(struct k_itimer *new_timer) | |||
441 | static void alarm_timer_get(struct k_itimer *timr, | 441 | static void alarm_timer_get(struct k_itimer *timr, |
442 | struct itimerspec *cur_setting) | 442 | struct itimerspec *cur_setting) |
443 | { | 443 | { |
444 | memset(cur_setting, 0, sizeof(struct itimerspec)); | ||
445 | |||
444 | cur_setting->it_interval = | 446 | cur_setting->it_interval = |
445 | ktime_to_timespec(timr->it.alarmtimer.period); | 447 | ktime_to_timespec(timr->it.alarmtimer.period); |
446 | cur_setting->it_value = | 448 | cur_setting->it_value = |
@@ -479,11 +481,17 @@ static int alarm_timer_set(struct k_itimer *timr, int flags, | |||
479 | if (!rtcdev) | 481 | if (!rtcdev) |
480 | return -ENOTSUPP; | 482 | return -ENOTSUPP; |
481 | 483 | ||
482 | /* Save old values */ | 484 | /* |
483 | old_setting->it_interval = | 485 | * XXX HACK! Currently we can DOS a system if the interval |
484 | ktime_to_timespec(timr->it.alarmtimer.period); | 486 | * period on alarmtimers is too small. Cap the interval here |
485 | old_setting->it_value = | 487 | * to 100us and solve this properly in a future patch! -jstultz |
486 | ktime_to_timespec(timr->it.alarmtimer.node.expires); | 488 | */ |
489 | if ((new_setting->it_interval.tv_sec == 0) && | ||
490 | (new_setting->it_interval.tv_nsec < 100000)) | ||
491 | new_setting->it_interval.tv_nsec = 100000; | ||
492 | |||
493 | if (old_setting) | ||
494 | alarm_timer_get(timr, old_setting); | ||
487 | 495 | ||
488 | /* If the timer was already set, cancel it */ | 496 | /* If the timer was already set, cancel it */ |
489 | alarm_cancel(&timr->it.alarmtimer); | 497 | alarm_cancel(&timr->it.alarmtimer); |
diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c index e0980f0d9a0..8f77da18fef 100644 --- a/kernel/time/clocksource.c +++ b/kernel/time/clocksource.c | |||
@@ -494,6 +494,22 @@ void clocksource_touch_watchdog(void) | |||
494 | } | 494 | } |
495 | 495 | ||
496 | /** | 496 | /** |
497 | * clocksource_max_adjustment- Returns max adjustment amount | ||
498 | * @cs: Pointer to clocksource | ||
499 | * | ||
500 | */ | ||
501 | static u32 clocksource_max_adjustment(struct clocksource *cs) | ||
502 | { | ||
503 | u64 ret; | ||
504 | /* | ||
505 | * We won't try to correct for more then 11% adjustments (110,000 ppm), | ||
506 | */ | ||
507 | ret = (u64)cs->mult * 11; | ||
508 | do_div(ret,100); | ||
509 | return (u32)ret; | ||
510 | } | ||
511 | |||
512 | /** | ||
497 | * clocksource_max_deferment - Returns max time the clocksource can be deferred | 513 | * clocksource_max_deferment - Returns max time the clocksource can be deferred |
498 | * @cs: Pointer to clocksource | 514 | * @cs: Pointer to clocksource |
499 | * | 515 | * |
@@ -505,25 +521,28 @@ static u64 clocksource_max_deferment(struct clocksource *cs) | |||
505 | /* | 521 | /* |
506 | * Calculate the maximum number of cycles that we can pass to the | 522 | * Calculate the maximum number of cycles that we can pass to the |
507 | * cyc2ns function without overflowing a 64-bit signed result. The | 523 | * cyc2ns function without overflowing a 64-bit signed result. The |
508 | * maximum number of cycles is equal to ULLONG_MAX/cs->mult which | 524 | * maximum number of cycles is equal to ULLONG_MAX/(cs->mult+cs->maxadj) |
509 | * is equivalent to the below. | 525 | * which is equivalent to the below. |
510 | * max_cycles < (2^63)/cs->mult | 526 | * max_cycles < (2^63)/(cs->mult + cs->maxadj) |
511 | * max_cycles < 2^(log2((2^63)/cs->mult)) | 527 | * max_cycles < 2^(log2((2^63)/(cs->mult + cs->maxadj))) |
512 | * max_cycles < 2^(log2(2^63) - log2(cs->mult)) | 528 | * max_cycles < 2^(log2(2^63) - log2(cs->mult + cs->maxadj)) |
513 | * max_cycles < 2^(63 - log2(cs->mult)) | 529 | * max_cycles < 2^(63 - log2(cs->mult + cs->maxadj)) |
514 | * max_cycles < 1 << (63 - log2(cs->mult)) | 530 | * max_cycles < 1 << (63 - log2(cs->mult + cs->maxadj)) |
515 | * Please note that we add 1 to the result of the log2 to account for | 531 | * Please note that we add 1 to the result of the log2 to account for |
516 | * any rounding errors, ensure the above inequality is satisfied and | 532 | * any rounding errors, ensure the above inequality is satisfied and |
517 | * no overflow will occur. | 533 | * no overflow will occur. |
518 | */ | 534 | */ |
519 | max_cycles = 1ULL << (63 - (ilog2(cs->mult) + 1)); | 535 | max_cycles = 1ULL << (63 - (ilog2(cs->mult + cs->maxadj) + 1)); |
520 | 536 | ||
521 | /* | 537 | /* |
522 | * The actual maximum number of cycles we can defer the clocksource is | 538 | * The actual maximum number of cycles we can defer the clocksource is |
523 | * determined by the minimum of max_cycles and cs->mask. | 539 | * determined by the minimum of max_cycles and cs->mask. |
540 | * Note: Here we subtract the maxadj to make sure we don't sleep for | ||
541 | * too long if there's a large negative adjustment. | ||
524 | */ | 542 | */ |
525 | max_cycles = min_t(u64, max_cycles, (u64) cs->mask); | 543 | max_cycles = min_t(u64, max_cycles, (u64) cs->mask); |
526 | max_nsecs = clocksource_cyc2ns(max_cycles, cs->mult, cs->shift); | 544 | max_nsecs = clocksource_cyc2ns(max_cycles, cs->mult - cs->maxadj, |
545 | cs->shift); | ||
527 | 546 | ||
528 | /* | 547 | /* |
529 | * To ensure that the clocksource does not wrap whilst we are idle, | 548 | * To ensure that the clocksource does not wrap whilst we are idle, |
@@ -531,7 +550,7 @@ static u64 clocksource_max_deferment(struct clocksource *cs) | |||
531 | * note a margin of 12.5% is used because this can be computed with | 550 | * note a margin of 12.5% is used because this can be computed with |
532 | * a shift, versus say 10% which would require division. | 551 | * a shift, versus say 10% which would require division. |
533 | */ | 552 | */ |
534 | return max_nsecs - (max_nsecs >> 5); | 553 | return max_nsecs - (max_nsecs >> 3); |
535 | } | 554 | } |
536 | 555 | ||
537 | #ifndef CONFIG_ARCH_USES_GETTIMEOFFSET | 556 | #ifndef CONFIG_ARCH_USES_GETTIMEOFFSET |
@@ -642,7 +661,6 @@ static void clocksource_enqueue(struct clocksource *cs) | |||
642 | void __clocksource_updatefreq_scale(struct clocksource *cs, u32 scale, u32 freq) | 661 | void __clocksource_updatefreq_scale(struct clocksource *cs, u32 scale, u32 freq) |
643 | { | 662 | { |
644 | u64 sec; | 663 | u64 sec; |
645 | |||
646 | /* | 664 | /* |
647 | * Calc the maximum number of seconds which we can run before | 665 | * Calc the maximum number of seconds which we can run before |
648 | * wrapping around. For clocksources which have a mask > 32bit | 666 | * wrapping around. For clocksources which have a mask > 32bit |
@@ -653,7 +671,7 @@ void __clocksource_updatefreq_scale(struct clocksource *cs, u32 scale, u32 freq) | |||
653 | * ~ 0.06ppm granularity for NTP. We apply the same 12.5% | 671 | * ~ 0.06ppm granularity for NTP. We apply the same 12.5% |
654 | * margin as we do in clocksource_max_deferment() | 672 | * margin as we do in clocksource_max_deferment() |
655 | */ | 673 | */ |
656 | sec = (cs->mask - (cs->mask >> 5)); | 674 | sec = (cs->mask - (cs->mask >> 3)); |
657 | do_div(sec, freq); | 675 | do_div(sec, freq); |
658 | do_div(sec, scale); | 676 | do_div(sec, scale); |
659 | if (!sec) | 677 | if (!sec) |
@@ -663,6 +681,20 @@ void __clocksource_updatefreq_scale(struct clocksource *cs, u32 scale, u32 freq) | |||
663 | 681 | ||
664 | clocks_calc_mult_shift(&cs->mult, &cs->shift, freq, | 682 | clocks_calc_mult_shift(&cs->mult, &cs->shift, freq, |
665 | NSEC_PER_SEC / scale, sec * scale); | 683 | NSEC_PER_SEC / scale, sec * scale); |
684 | |||
685 | /* | ||
686 | * for clocksources that have large mults, to avoid overflow. | ||
687 | * Since mult may be adjusted by ntp, add an safety extra margin | ||
688 | * | ||
689 | */ | ||
690 | cs->maxadj = clocksource_max_adjustment(cs); | ||
691 | while ((cs->mult + cs->maxadj < cs->mult) | ||
692 | || (cs->mult - cs->maxadj > cs->mult)) { | ||
693 | cs->mult >>= 1; | ||
694 | cs->shift--; | ||
695 | cs->maxadj = clocksource_max_adjustment(cs); | ||
696 | } | ||
697 | |||
666 | cs->max_idle_ns = clocksource_max_deferment(cs); | 698 | cs->max_idle_ns = clocksource_max_deferment(cs); |
667 | } | 699 | } |
668 | EXPORT_SYMBOL_GPL(__clocksource_updatefreq_scale); | 700 | EXPORT_SYMBOL_GPL(__clocksource_updatefreq_scale); |
@@ -703,6 +735,12 @@ EXPORT_SYMBOL_GPL(__clocksource_register_scale); | |||
703 | */ | 735 | */ |
704 | int clocksource_register(struct clocksource *cs) | 736 | int clocksource_register(struct clocksource *cs) |
705 | { | 737 | { |
738 | /* calculate max adjustment for given mult/shift */ | ||
739 | cs->maxadj = clocksource_max_adjustment(cs); | ||
740 | WARN_ONCE(cs->mult + cs->maxadj < cs->mult, | ||
741 | "Clocksource %s might overflow on 11%% adjustment\n", | ||
742 | cs->name); | ||
743 | |||
706 | /* calculate max idle time permitted for this clocksource */ | 744 | /* calculate max idle time permitted for this clocksource */ |
707 | cs->max_idle_ns = clocksource_max_deferment(cs); | 745 | cs->max_idle_ns = clocksource_max_deferment(cs); |
708 | 746 | ||
diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c index c7218d13273..7a90d021b79 100644 --- a/kernel/time/tick-broadcast.c +++ b/kernel/time/tick-broadcast.c | |||
@@ -71,7 +71,7 @@ int tick_check_broadcast_device(struct clock_event_device *dev) | |||
71 | (dev->features & CLOCK_EVT_FEAT_C3STOP)) | 71 | (dev->features & CLOCK_EVT_FEAT_C3STOP)) |
72 | return 0; | 72 | return 0; |
73 | 73 | ||
74 | clockevents_exchange_device(NULL, dev); | 74 | clockevents_exchange_device(tick_broadcast_device.evtdev, dev); |
75 | tick_broadcast_device.evtdev = dev; | 75 | tick_broadcast_device.evtdev = dev; |
76 | if (!cpumask_empty(tick_get_broadcast_mask())) | 76 | if (!cpumask_empty(tick_get_broadcast_mask())) |
77 | tick_broadcast_start_periodic(dev); | 77 | tick_broadcast_start_periodic(dev); |
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c index 342408cf68d..6f9798bf240 100644 --- a/kernel/time/timekeeping.c +++ b/kernel/time/timekeeping.c | |||
@@ -249,6 +249,8 @@ ktime_t ktime_get(void) | |||
249 | secs = xtime.tv_sec + wall_to_monotonic.tv_sec; | 249 | secs = xtime.tv_sec + wall_to_monotonic.tv_sec; |
250 | nsecs = xtime.tv_nsec + wall_to_monotonic.tv_nsec; | 250 | nsecs = xtime.tv_nsec + wall_to_monotonic.tv_nsec; |
251 | nsecs += timekeeping_get_ns(); | 251 | nsecs += timekeeping_get_ns(); |
252 | /* If arch requires, add in gettimeoffset() */ | ||
253 | nsecs += arch_gettimeoffset(); | ||
252 | 254 | ||
253 | } while (read_seqretry(&xtime_lock, seq)); | 255 | } while (read_seqretry(&xtime_lock, seq)); |
254 | /* | 256 | /* |
@@ -280,6 +282,8 @@ void ktime_get_ts(struct timespec *ts) | |||
280 | *ts = xtime; | 282 | *ts = xtime; |
281 | tomono = wall_to_monotonic; | 283 | tomono = wall_to_monotonic; |
282 | nsecs = timekeeping_get_ns(); | 284 | nsecs = timekeeping_get_ns(); |
285 | /* If arch requires, add in gettimeoffset() */ | ||
286 | nsecs += arch_gettimeoffset(); | ||
283 | 287 | ||
284 | } while (read_seqretry(&xtime_lock, seq)); | 288 | } while (read_seqretry(&xtime_lock, seq)); |
285 | 289 | ||
@@ -604,6 +608,12 @@ static struct timespec timekeeping_suspend_time; | |||
604 | */ | 608 | */ |
605 | static void __timekeeping_inject_sleeptime(struct timespec *delta) | 609 | static void __timekeeping_inject_sleeptime(struct timespec *delta) |
606 | { | 610 | { |
611 | if (!timespec_valid(delta)) { | ||
612 | printk(KERN_WARNING "__timekeeping_inject_sleeptime: Invalid " | ||
613 | "sleep delta value!\n"); | ||
614 | return; | ||
615 | } | ||
616 | |||
607 | xtime = timespec_add(xtime, *delta); | 617 | xtime = timespec_add(xtime, *delta); |
608 | wall_to_monotonic = timespec_sub(wall_to_monotonic, *delta); | 618 | wall_to_monotonic = timespec_sub(wall_to_monotonic, *delta); |
609 | total_sleep_time = timespec_add(total_sleep_time, *delta); | 619 | total_sleep_time = timespec_add(total_sleep_time, *delta); |
@@ -686,12 +696,34 @@ static void timekeeping_resume(void) | |||
686 | static int timekeeping_suspend(void) | 696 | static int timekeeping_suspend(void) |
687 | { | 697 | { |
688 | unsigned long flags; | 698 | unsigned long flags; |
699 | struct timespec delta, delta_delta; | ||
700 | static struct timespec old_delta; | ||
689 | 701 | ||
690 | read_persistent_clock(&timekeeping_suspend_time); | 702 | read_persistent_clock(&timekeeping_suspend_time); |
691 | 703 | ||
692 | write_seqlock_irqsave(&xtime_lock, flags); | 704 | write_seqlock_irqsave(&xtime_lock, flags); |
693 | timekeeping_forward_now(); | 705 | timekeeping_forward_now(); |
694 | timekeeping_suspended = 1; | 706 | timekeeping_suspended = 1; |
707 | |||
708 | /* | ||
709 | * To avoid drift caused by repeated suspend/resumes, | ||
710 | * which each can add ~1 second drift error, | ||
711 | * try to compensate so the difference in system time | ||
712 | * and persistent_clock time stays close to constant. | ||
713 | */ | ||
714 | delta = timespec_sub(xtime, timekeeping_suspend_time); | ||
715 | delta_delta = timespec_sub(delta, old_delta); | ||
716 | if (abs(delta_delta.tv_sec) >= 2) { | ||
717 | /* | ||
718 | * if delta_delta is too large, assume time correction | ||
719 | * has occured and set old_delta to the current delta. | ||
720 | */ | ||
721 | old_delta = delta; | ||
722 | } else { | ||
723 | /* Otherwise try to adjust old_system to compensate */ | ||
724 | timekeeping_suspend_time = | ||
725 | timespec_add(timekeeping_suspend_time, delta_delta); | ||
726 | } | ||
695 | write_sequnlock_irqrestore(&xtime_lock, flags); | 727 | write_sequnlock_irqrestore(&xtime_lock, flags); |
696 | 728 | ||
697 | clockevents_notify(CLOCK_EVT_NOTIFY_SUSPEND, NULL); | 729 | clockevents_notify(CLOCK_EVT_NOTIFY_SUSPEND, NULL); |
@@ -792,6 +824,13 @@ static void timekeeping_adjust(s64 offset) | |||
792 | } else | 824 | } else |
793 | return; | 825 | return; |
794 | 826 | ||
827 | WARN_ONCE(timekeeper.clock->maxadj && | ||
828 | (timekeeper.mult + adj > timekeeper.clock->mult + | ||
829 | timekeeper.clock->maxadj), | ||
830 | "Adjusting %s more then 11%% (%ld vs %ld)\n", | ||
831 | timekeeper.clock->name, (long)timekeeper.mult + adj, | ||
832 | (long)timekeeper.clock->mult + | ||
833 | timekeeper.clock->maxadj); | ||
795 | timekeeper.mult += adj; | 834 | timekeeper.mult += adj; |
796 | timekeeper.xtime_interval += interval; | 835 | timekeeper.xtime_interval += interval; |
797 | timekeeper.xtime_nsec -= offset; | 836 | timekeeper.xtime_nsec -= offset; |