diff options
Diffstat (limited to 'kernel/time')
| -rw-r--r-- | kernel/time/Kconfig | 4 | ||||
| -rw-r--r-- | kernel/time/clocksource.c | 33 | ||||
| -rw-r--r-- | kernel/time/tick-broadcast.c | 2 | ||||
| -rw-r--r-- | kernel/time/tick-sched.c | 32 | ||||
| -rw-r--r-- | kernel/time/timekeeping.c | 93 |
5 files changed, 67 insertions, 97 deletions
diff --git a/kernel/time/Kconfig b/kernel/time/Kconfig index 95ed42951e0a..f06a8a365648 100644 --- a/kernel/time/Kconfig +++ b/kernel/time/Kconfig | |||
| @@ -6,7 +6,7 @@ config TICK_ONESHOT | |||
| 6 | 6 | ||
| 7 | config NO_HZ | 7 | config NO_HZ |
| 8 | bool "Tickless System (Dynamic Ticks)" | 8 | bool "Tickless System (Dynamic Ticks)" |
| 9 | depends on GENERIC_TIME && GENERIC_CLOCKEVENTS | 9 | depends on !ARCH_USES_GETTIMEOFFSET && GENERIC_CLOCKEVENTS |
| 10 | select TICK_ONESHOT | 10 | select TICK_ONESHOT |
| 11 | help | 11 | help |
| 12 | This option enables a tickless system: timer interrupts will | 12 | This option enables a tickless system: timer interrupts will |
| @@ -15,7 +15,7 @@ config NO_HZ | |||
| 15 | 15 | ||
| 16 | config HIGH_RES_TIMERS | 16 | config HIGH_RES_TIMERS |
| 17 | bool "High Resolution Timer Support" | 17 | bool "High Resolution Timer Support" |
| 18 | depends on GENERIC_TIME && GENERIC_CLOCKEVENTS | 18 | depends on !ARCH_USES_GETTIMEOFFSET && GENERIC_CLOCKEVENTS |
| 19 | select TICK_ONESHOT | 19 | select TICK_ONESHOT |
| 20 | help | 20 | help |
| 21 | This option enables high resolution timer support. If your | 21 | This option enables high resolution timer support. If your |
diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c index f08e99c1d561..c18d7efa1b4b 100644 --- a/kernel/time/clocksource.c +++ b/kernel/time/clocksource.c | |||
| @@ -531,7 +531,7 @@ static u64 clocksource_max_deferment(struct clocksource *cs) | |||
| 531 | return max_nsecs - (max_nsecs >> 5); | 531 | return max_nsecs - (max_nsecs >> 5); |
| 532 | } | 532 | } |
| 533 | 533 | ||
| 534 | #ifdef CONFIG_GENERIC_TIME | 534 | #ifndef CONFIG_ARCH_USES_GETTIMEOFFSET |
| 535 | 535 | ||
| 536 | /** | 536 | /** |
| 537 | * clocksource_select - Select the best clocksource available | 537 | * clocksource_select - Select the best clocksource available |
| @@ -577,7 +577,7 @@ static void clocksource_select(void) | |||
| 577 | } | 577 | } |
| 578 | } | 578 | } |
| 579 | 579 | ||
| 580 | #else /* CONFIG_GENERIC_TIME */ | 580 | #else /* !CONFIG_ARCH_USES_GETTIMEOFFSET */ |
| 581 | 581 | ||
| 582 | static inline void clocksource_select(void) { } | 582 | static inline void clocksource_select(void) { } |
| 583 | 583 | ||
| @@ -639,19 +639,18 @@ static void clocksource_enqueue(struct clocksource *cs) | |||
| 639 | #define MAX_UPDATE_LENGTH 5 /* Seconds */ | 639 | #define MAX_UPDATE_LENGTH 5 /* Seconds */ |
| 640 | 640 | ||
| 641 | /** | 641 | /** |
| 642 | * __clocksource_register_scale - Used to install new clocksources | 642 | * __clocksource_updatefreq_scale - Used update clocksource with new freq |
| 643 | * @t: clocksource to be registered | 643 | * @t: clocksource to be registered |
| 644 | * @scale: Scale factor multiplied against freq to get clocksource hz | 644 | * @scale: Scale factor multiplied against freq to get clocksource hz |
| 645 | * @freq: clocksource frequency (cycles per second) divided by scale | 645 | * @freq: clocksource frequency (cycles per second) divided by scale |
| 646 | * | 646 | * |
| 647 | * Returns -EBUSY if registration fails, zero otherwise. | 647 | * This should only be called from the clocksource->enable() method. |
| 648 | * | 648 | * |
| 649 | * This *SHOULD NOT* be called directly! Please use the | 649 | * This *SHOULD NOT* be called directly! Please use the |
| 650 | * clocksource_register_hz() or clocksource_register_khz helper functions. | 650 | * clocksource_updatefreq_hz() or clocksource_updatefreq_khz helper functions. |
| 651 | */ | 651 | */ |
| 652 | int __clocksource_register_scale(struct clocksource *cs, u32 scale, u32 freq) | 652 | void __clocksource_updatefreq_scale(struct clocksource *cs, u32 scale, u32 freq) |
| 653 | { | 653 | { |
| 654 | |||
| 655 | /* | 654 | /* |
| 656 | * Ideally we want to use some of the limits used in | 655 | * Ideally we want to use some of the limits used in |
| 657 | * clocksource_max_deferment, to provide a more informed | 656 | * clocksource_max_deferment, to provide a more informed |
| @@ -662,7 +661,27 @@ int __clocksource_register_scale(struct clocksource *cs, u32 scale, u32 freq) | |||
| 662 | NSEC_PER_SEC/scale, | 661 | NSEC_PER_SEC/scale, |
| 663 | MAX_UPDATE_LENGTH*scale); | 662 | MAX_UPDATE_LENGTH*scale); |
| 664 | cs->max_idle_ns = clocksource_max_deferment(cs); | 663 | cs->max_idle_ns = clocksource_max_deferment(cs); |
| 664 | } | ||
| 665 | EXPORT_SYMBOL_GPL(__clocksource_updatefreq_scale); | ||
| 666 | |||
| 667 | /** | ||
| 668 | * __clocksource_register_scale - Used to install new clocksources | ||
| 669 | * @t: clocksource to be registered | ||
| 670 | * @scale: Scale factor multiplied against freq to get clocksource hz | ||
| 671 | * @freq: clocksource frequency (cycles per second) divided by scale | ||
| 672 | * | ||
| 673 | * Returns -EBUSY if registration fails, zero otherwise. | ||
| 674 | * | ||
| 675 | * This *SHOULD NOT* be called directly! Please use the | ||
| 676 | * clocksource_register_hz() or clocksource_register_khz helper functions. | ||
| 677 | */ | ||
| 678 | int __clocksource_register_scale(struct clocksource *cs, u32 scale, u32 freq) | ||
| 679 | { | ||
| 680 | |||
| 681 | /* Intialize mult/shift and max_idle_ns */ | ||
| 682 | __clocksource_updatefreq_scale(cs, scale, freq); | ||
| 665 | 683 | ||
| 684 | /* Add clocksource to the clcoksource list */ | ||
| 666 | mutex_lock(&clocksource_mutex); | 685 | mutex_lock(&clocksource_mutex); |
| 667 | clocksource_enqueue(cs); | 686 | clocksource_enqueue(cs); |
| 668 | clocksource_select(); | 687 | clocksource_select(); |
diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c index b3bafd5fc66d..48b2761b5668 100644 --- a/kernel/time/tick-broadcast.c +++ b/kernel/time/tick-broadcast.c | |||
| @@ -188,7 +188,7 @@ static void tick_handle_periodic_broadcast(struct clock_event_device *dev) | |||
| 188 | /* | 188 | /* |
| 189 | * Setup the next period for devices, which do not have | 189 | * Setup the next period for devices, which do not have |
| 190 | * periodic mode. We read dev->next_event first and add to it | 190 | * periodic mode. We read dev->next_event first and add to it |
| 191 | * when the event alrady expired. clockevents_program_event() | 191 | * when the event already expired. clockevents_program_event() |
| 192 | * sets dev->next_event only when the event is really | 192 | * sets dev->next_event only when the event is really |
| 193 | * programmed to the device. | 193 | * programmed to the device. |
| 194 | */ | 194 | */ |
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c index 1d7b9bc1c034..3e216e01bbd1 100644 --- a/kernel/time/tick-sched.c +++ b/kernel/time/tick-sched.c | |||
| @@ -154,14 +154,14 @@ static void tick_nohz_update_jiffies(ktime_t now) | |||
| 154 | * Updates the per cpu time idle statistics counters | 154 | * Updates the per cpu time idle statistics counters |
| 155 | */ | 155 | */ |
| 156 | static void | 156 | static void |
| 157 | update_ts_time_stats(struct tick_sched *ts, ktime_t now, u64 *last_update_time) | 157 | update_ts_time_stats(int cpu, struct tick_sched *ts, ktime_t now, u64 *last_update_time) |
| 158 | { | 158 | { |
| 159 | ktime_t delta; | 159 | ktime_t delta; |
| 160 | 160 | ||
| 161 | if (ts->idle_active) { | 161 | if (ts->idle_active) { |
| 162 | delta = ktime_sub(now, ts->idle_entrytime); | 162 | delta = ktime_sub(now, ts->idle_entrytime); |
| 163 | ts->idle_sleeptime = ktime_add(ts->idle_sleeptime, delta); | 163 | ts->idle_sleeptime = ktime_add(ts->idle_sleeptime, delta); |
| 164 | if (nr_iowait_cpu() > 0) | 164 | if (nr_iowait_cpu(cpu) > 0) |
| 165 | ts->iowait_sleeptime = ktime_add(ts->iowait_sleeptime, delta); | 165 | ts->iowait_sleeptime = ktime_add(ts->iowait_sleeptime, delta); |
| 166 | ts->idle_entrytime = now; | 166 | ts->idle_entrytime = now; |
| 167 | } | 167 | } |
| @@ -175,19 +175,19 @@ static void tick_nohz_stop_idle(int cpu, ktime_t now) | |||
| 175 | { | 175 | { |
| 176 | struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); | 176 | struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); |
| 177 | 177 | ||
| 178 | update_ts_time_stats(ts, now, NULL); | 178 | update_ts_time_stats(cpu, ts, now, NULL); |
| 179 | ts->idle_active = 0; | 179 | ts->idle_active = 0; |
| 180 | 180 | ||
| 181 | sched_clock_idle_wakeup_event(0); | 181 | sched_clock_idle_wakeup_event(0); |
| 182 | } | 182 | } |
| 183 | 183 | ||
| 184 | static ktime_t tick_nohz_start_idle(struct tick_sched *ts) | 184 | static ktime_t tick_nohz_start_idle(int cpu, struct tick_sched *ts) |
| 185 | { | 185 | { |
| 186 | ktime_t now; | 186 | ktime_t now; |
| 187 | 187 | ||
| 188 | now = ktime_get(); | 188 | now = ktime_get(); |
| 189 | 189 | ||
| 190 | update_ts_time_stats(ts, now, NULL); | 190 | update_ts_time_stats(cpu, ts, now, NULL); |
| 191 | 191 | ||
| 192 | ts->idle_entrytime = now; | 192 | ts->idle_entrytime = now; |
| 193 | ts->idle_active = 1; | 193 | ts->idle_active = 1; |
| @@ -216,7 +216,7 @@ u64 get_cpu_idle_time_us(int cpu, u64 *last_update_time) | |||
| 216 | if (!tick_nohz_enabled) | 216 | if (!tick_nohz_enabled) |
| 217 | return -1; | 217 | return -1; |
| 218 | 218 | ||
| 219 | update_ts_time_stats(ts, ktime_get(), last_update_time); | 219 | update_ts_time_stats(cpu, ts, ktime_get(), last_update_time); |
| 220 | 220 | ||
| 221 | return ktime_to_us(ts->idle_sleeptime); | 221 | return ktime_to_us(ts->idle_sleeptime); |
| 222 | } | 222 | } |
| @@ -242,7 +242,7 @@ u64 get_cpu_iowait_time_us(int cpu, u64 *last_update_time) | |||
| 242 | if (!tick_nohz_enabled) | 242 | if (!tick_nohz_enabled) |
| 243 | return -1; | 243 | return -1; |
| 244 | 244 | ||
| 245 | update_ts_time_stats(ts, ktime_get(), last_update_time); | 245 | update_ts_time_stats(cpu, ts, ktime_get(), last_update_time); |
| 246 | 246 | ||
| 247 | return ktime_to_us(ts->iowait_sleeptime); | 247 | return ktime_to_us(ts->iowait_sleeptime); |
| 248 | } | 248 | } |
| @@ -284,7 +284,7 @@ void tick_nohz_stop_sched_tick(int inidle) | |||
| 284 | */ | 284 | */ |
| 285 | ts->inidle = 1; | 285 | ts->inidle = 1; |
| 286 | 286 | ||
| 287 | now = tick_nohz_start_idle(ts); | 287 | now = tick_nohz_start_idle(cpu, ts); |
| 288 | 288 | ||
| 289 | /* | 289 | /* |
| 290 | * If this cpu is offline and it is the one which updates | 290 | * If this cpu is offline and it is the one which updates |
| @@ -315,9 +315,6 @@ void tick_nohz_stop_sched_tick(int inidle) | |||
| 315 | goto end; | 315 | goto end; |
| 316 | } | 316 | } |
| 317 | 317 | ||
| 318 | if (nohz_ratelimit(cpu)) | ||
| 319 | goto end; | ||
| 320 | |||
| 321 | ts->idle_calls++; | 318 | ts->idle_calls++; |
| 322 | /* Read jiffies and the time when jiffies were updated last */ | 319 | /* Read jiffies and the time when jiffies were updated last */ |
| 323 | do { | 320 | do { |
| @@ -408,13 +405,7 @@ void tick_nohz_stop_sched_tick(int inidle) | |||
| 408 | * the scheduler tick in nohz_restart_sched_tick. | 405 | * the scheduler tick in nohz_restart_sched_tick. |
| 409 | */ | 406 | */ |
| 410 | if (!ts->tick_stopped) { | 407 | if (!ts->tick_stopped) { |
| 411 | if (select_nohz_load_balancer(1)) { | 408 | select_nohz_load_balancer(1); |
| 412 | /* | ||
| 413 | * sched tick not stopped! | ||
| 414 | */ | ||
| 415 | cpumask_clear_cpu(cpu, nohz_cpu_mask); | ||
| 416 | goto out; | ||
| 417 | } | ||
| 418 | 409 | ||
| 419 | ts->idle_tick = hrtimer_get_expires(&ts->sched_timer); | 410 | ts->idle_tick = hrtimer_get_expires(&ts->sched_timer); |
| 420 | ts->tick_stopped = 1; | 411 | ts->tick_stopped = 1; |
| @@ -783,7 +774,6 @@ void tick_setup_sched_timer(void) | |||
| 783 | { | 774 | { |
| 784 | struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched); | 775 | struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched); |
| 785 | ktime_t now = ktime_get(); | 776 | ktime_t now = ktime_get(); |
| 786 | u64 offset; | ||
| 787 | 777 | ||
| 788 | /* | 778 | /* |
| 789 | * Emulate tick processing via per-CPU hrtimers: | 779 | * Emulate tick processing via per-CPU hrtimers: |
| @@ -793,10 +783,6 @@ void tick_setup_sched_timer(void) | |||
| 793 | 783 | ||
| 794 | /* Get the next period (per cpu) */ | 784 | /* Get the next period (per cpu) */ |
| 795 | hrtimer_set_expires(&ts->sched_timer, tick_init_jiffy_update()); | 785 | hrtimer_set_expires(&ts->sched_timer, tick_init_jiffy_update()); |
| 796 | offset = ktime_to_ns(tick_period) >> 1; | ||
| 797 | do_div(offset, num_possible_cpus()); | ||
| 798 | offset *= smp_processor_id(); | ||
| 799 | hrtimer_add_expires_ns(&ts->sched_timer, offset); | ||
| 800 | 786 | ||
| 801 | for (;;) { | 787 | for (;;) { |
| 802 | hrtimer_forward(&ts->sched_timer, now, tick_period); | 788 | hrtimer_forward(&ts->sched_timer, now, tick_period); |
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c index caf8d4d4f5c8..49010d822f72 100644 --- a/kernel/time/timekeeping.c +++ b/kernel/time/timekeeping.c | |||
| @@ -153,8 +153,8 @@ __cacheline_aligned_in_smp DEFINE_SEQLOCK(xtime_lock); | |||
| 153 | * - wall_to_monotonic is no longer the boot time, getboottime must be | 153 | * - wall_to_monotonic is no longer the boot time, getboottime must be |
| 154 | * used instead. | 154 | * used instead. |
| 155 | */ | 155 | */ |
| 156 | struct timespec xtime __attribute__ ((aligned (16))); | 156 | static struct timespec xtime __attribute__ ((aligned (16))); |
| 157 | struct timespec wall_to_monotonic __attribute__ ((aligned (16))); | 157 | static struct timespec wall_to_monotonic __attribute__ ((aligned (16))); |
| 158 | static struct timespec total_sleep_time; | 158 | static struct timespec total_sleep_time; |
| 159 | 159 | ||
| 160 | /* | 160 | /* |
| @@ -170,11 +170,10 @@ void timekeeping_leap_insert(int leapsecond) | |||
| 170 | { | 170 | { |
| 171 | xtime.tv_sec += leapsecond; | 171 | xtime.tv_sec += leapsecond; |
| 172 | wall_to_monotonic.tv_sec -= leapsecond; | 172 | wall_to_monotonic.tv_sec -= leapsecond; |
| 173 | update_vsyscall(&xtime, timekeeper.clock, timekeeper.mult); | 173 | update_vsyscall(&xtime, &wall_to_monotonic, timekeeper.clock, |
| 174 | timekeeper.mult); | ||
| 174 | } | 175 | } |
| 175 | 176 | ||
| 176 | #ifdef CONFIG_GENERIC_TIME | ||
| 177 | |||
| 178 | /** | 177 | /** |
| 179 | * timekeeping_forward_now - update clock to the current time | 178 | * timekeeping_forward_now - update clock to the current time |
| 180 | * | 179 | * |
| @@ -328,7 +327,8 @@ int do_settimeofday(struct timespec *tv) | |||
| 328 | timekeeper.ntp_error = 0; | 327 | timekeeper.ntp_error = 0; |
| 329 | ntp_clear(); | 328 | ntp_clear(); |
| 330 | 329 | ||
| 331 | update_vsyscall(&xtime, timekeeper.clock, timekeeper.mult); | 330 | update_vsyscall(&xtime, &wall_to_monotonic, timekeeper.clock, |
| 331 | timekeeper.mult); | ||
| 332 | 332 | ||
| 333 | write_sequnlock_irqrestore(&xtime_lock, flags); | 333 | write_sequnlock_irqrestore(&xtime_lock, flags); |
| 334 | 334 | ||
| @@ -376,52 +376,6 @@ void timekeeping_notify(struct clocksource *clock) | |||
| 376 | tick_clock_notify(); | 376 | tick_clock_notify(); |
| 377 | } | 377 | } |
| 378 | 378 | ||
| 379 | #else /* GENERIC_TIME */ | ||
| 380 | |||
| 381 | static inline void timekeeping_forward_now(void) { } | ||
| 382 | |||
| 383 | /** | ||
| 384 | * ktime_get - get the monotonic time in ktime_t format | ||
| 385 | * | ||
| 386 | * returns the time in ktime_t format | ||
| 387 | */ | ||
| 388 | ktime_t ktime_get(void) | ||
| 389 | { | ||
| 390 | struct timespec now; | ||
| 391 | |||
| 392 | ktime_get_ts(&now); | ||
| 393 | |||
| 394 | return timespec_to_ktime(now); | ||
| 395 | } | ||
| 396 | EXPORT_SYMBOL_GPL(ktime_get); | ||
| 397 | |||
| 398 | /** | ||
| 399 | * ktime_get_ts - get the monotonic clock in timespec format | ||
| 400 | * @ts: pointer to timespec variable | ||
| 401 | * | ||
| 402 | * The function calculates the monotonic clock from the realtime | ||
| 403 | * clock and the wall_to_monotonic offset and stores the result | ||
| 404 | * in normalized timespec format in the variable pointed to by @ts. | ||
| 405 | */ | ||
| 406 | void ktime_get_ts(struct timespec *ts) | ||
| 407 | { | ||
| 408 | struct timespec tomono; | ||
| 409 | unsigned long seq; | ||
| 410 | |||
| 411 | do { | ||
| 412 | seq = read_seqbegin(&xtime_lock); | ||
| 413 | getnstimeofday(ts); | ||
| 414 | tomono = wall_to_monotonic; | ||
| 415 | |||
| 416 | } while (read_seqretry(&xtime_lock, seq)); | ||
| 417 | |||
| 418 | set_normalized_timespec(ts, ts->tv_sec + tomono.tv_sec, | ||
| 419 | ts->tv_nsec + tomono.tv_nsec); | ||
| 420 | } | ||
| 421 | EXPORT_SYMBOL_GPL(ktime_get_ts); | ||
| 422 | |||
| 423 | #endif /* !GENERIC_TIME */ | ||
| 424 | |||
| 425 | /** | 379 | /** |
| 426 | * ktime_get_real - get the real (wall-) time in ktime_t format | 380 | * ktime_get_real - get the real (wall-) time in ktime_t format |
| 427 | * | 381 | * |
| @@ -579,9 +533,9 @@ static int timekeeping_resume(struct sys_device *dev) | |||
| 579 | 533 | ||
| 580 | if (timespec_compare(&ts, &timekeeping_suspend_time) > 0) { | 534 | if (timespec_compare(&ts, &timekeeping_suspend_time) > 0) { |
| 581 | ts = timespec_sub(ts, timekeeping_suspend_time); | 535 | ts = timespec_sub(ts, timekeeping_suspend_time); |
| 582 | xtime = timespec_add_safe(xtime, ts); | 536 | xtime = timespec_add(xtime, ts); |
| 583 | wall_to_monotonic = timespec_sub(wall_to_monotonic, ts); | 537 | wall_to_monotonic = timespec_sub(wall_to_monotonic, ts); |
| 584 | total_sleep_time = timespec_add_safe(total_sleep_time, ts); | 538 | total_sleep_time = timespec_add(total_sleep_time, ts); |
| 585 | } | 539 | } |
| 586 | /* re-base the last cycle value */ | 540 | /* re-base the last cycle value */ |
| 587 | timekeeper.clock->cycle_last = timekeeper.clock->read(timekeeper.clock); | 541 | timekeeper.clock->cycle_last = timekeeper.clock->read(timekeeper.clock); |
| @@ -736,6 +690,7 @@ static void timekeeping_adjust(s64 offset) | |||
| 736 | static cycle_t logarithmic_accumulation(cycle_t offset, int shift) | 690 | static cycle_t logarithmic_accumulation(cycle_t offset, int shift) |
| 737 | { | 691 | { |
| 738 | u64 nsecps = (u64)NSEC_PER_SEC << timekeeper.shift; | 692 | u64 nsecps = (u64)NSEC_PER_SEC << timekeeper.shift; |
| 693 | u64 raw_nsecs; | ||
| 739 | 694 | ||
| 740 | /* If the offset is smaller then a shifted interval, do nothing */ | 695 | /* If the offset is smaller then a shifted interval, do nothing */ |
| 741 | if (offset < timekeeper.cycle_interval<<shift) | 696 | if (offset < timekeeper.cycle_interval<<shift) |
| @@ -752,12 +707,15 @@ static cycle_t logarithmic_accumulation(cycle_t offset, int shift) | |||
| 752 | second_overflow(); | 707 | second_overflow(); |
| 753 | } | 708 | } |
| 754 | 709 | ||
| 755 | /* Accumulate into raw time */ | 710 | /* Accumulate raw time */ |
| 756 | raw_time.tv_nsec += timekeeper.raw_interval << shift;; | 711 | raw_nsecs = timekeeper.raw_interval << shift; |
| 757 | while (raw_time.tv_nsec >= NSEC_PER_SEC) { | 712 | raw_nsecs += raw_time.tv_nsec; |
| 758 | raw_time.tv_nsec -= NSEC_PER_SEC; | 713 | if (raw_nsecs >= NSEC_PER_SEC) { |
| 759 | raw_time.tv_sec++; | 714 | u64 raw_secs = raw_nsecs; |
| 715 | raw_nsecs = do_div(raw_secs, NSEC_PER_SEC); | ||
| 716 | raw_time.tv_sec += raw_secs; | ||
| 760 | } | 717 | } |
| 718 | raw_time.tv_nsec = raw_nsecs; | ||
| 761 | 719 | ||
| 762 | /* Accumulate error between NTP and clock interval */ | 720 | /* Accumulate error between NTP and clock interval */ |
| 763 | timekeeper.ntp_error += tick_length << shift; | 721 | timekeeper.ntp_error += tick_length << shift; |
| @@ -784,10 +742,11 @@ void update_wall_time(void) | |||
| 784 | return; | 742 | return; |
| 785 | 743 | ||
| 786 | clock = timekeeper.clock; | 744 | clock = timekeeper.clock; |
| 787 | #ifdef CONFIG_GENERIC_TIME | 745 | |
| 788 | offset = (clock->read(clock) - clock->cycle_last) & clock->mask; | 746 | #ifdef CONFIG_ARCH_USES_GETTIMEOFFSET |
| 789 | #else | ||
| 790 | offset = timekeeper.cycle_interval; | 747 | offset = timekeeper.cycle_interval; |
| 748 | #else | ||
| 749 | offset = (clock->read(clock) - clock->cycle_last) & clock->mask; | ||
| 791 | #endif | 750 | #endif |
| 792 | timekeeper.xtime_nsec = (s64)xtime.tv_nsec << timekeeper.shift; | 751 | timekeeper.xtime_nsec = (s64)xtime.tv_nsec << timekeeper.shift; |
| 793 | 752 | ||
| @@ -856,7 +815,8 @@ void update_wall_time(void) | |||
| 856 | } | 815 | } |
| 857 | 816 | ||
| 858 | /* check to see if there is a new clocksource to use */ | 817 | /* check to see if there is a new clocksource to use */ |
| 859 | update_vsyscall(&xtime, timekeeper.clock, timekeeper.mult); | 818 | update_vsyscall(&xtime, &wall_to_monotonic, timekeeper.clock, |
| 819 | timekeeper.mult); | ||
| 860 | } | 820 | } |
| 861 | 821 | ||
| 862 | /** | 822 | /** |
| @@ -887,7 +847,7 @@ EXPORT_SYMBOL_GPL(getboottime); | |||
| 887 | */ | 847 | */ |
| 888 | void monotonic_to_bootbased(struct timespec *ts) | 848 | void monotonic_to_bootbased(struct timespec *ts) |
| 889 | { | 849 | { |
| 890 | *ts = timespec_add_safe(*ts, total_sleep_time); | 850 | *ts = timespec_add(*ts, total_sleep_time); |
| 891 | } | 851 | } |
| 892 | EXPORT_SYMBOL_GPL(monotonic_to_bootbased); | 852 | EXPORT_SYMBOL_GPL(monotonic_to_bootbased); |
| 893 | 853 | ||
| @@ -902,6 +862,11 @@ struct timespec __current_kernel_time(void) | |||
| 902 | return xtime; | 862 | return xtime; |
| 903 | } | 863 | } |
| 904 | 864 | ||
| 865 | struct timespec __get_wall_to_monotonic(void) | ||
| 866 | { | ||
| 867 | return wall_to_monotonic; | ||
| 868 | } | ||
| 869 | |||
| 905 | struct timespec current_kernel_time(void) | 870 | struct timespec current_kernel_time(void) |
| 906 | { | 871 | { |
| 907 | struct timespec now; | 872 | struct timespec now; |
