aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/time
diff options
context:
space:
mode:
authorAndrea Bastoni <bastoni@cs.unc.edu>2010-10-23 01:01:49 -0400
committerAndrea Bastoni <bastoni@cs.unc.edu>2010-10-23 01:01:49 -0400
commit3dd41424090a0ca3a660218d06afe6ff4441bad3 (patch)
tree511ef1bb1799027fc5aad574adce49120ecadd87 /kernel/time
parent5c5456402d467969b217d7fdd6670f8c8600f5a8 (diff)
parentf6f94e2ab1b33f0082ac22d71f66385a60d8157f (diff)
Merge commit 'v2.6.36' into wip-merge-2.6.36
Conflicts: Makefile arch/x86/include/asm/unistd_32.h arch/x86/kernel/syscall_table_32.S kernel/sched.c kernel/time/tick-sched.c Relevant API and functions changes (solved in this commit): - (API) .enqueue_task() (enqueue_task_litmus), dequeue_task() (dequeue_task_litmus), [litmus/sched_litmus.c] - (API) .select_task_rq() (select_task_rq_litmus) [litmus/sched_litmus.c] - (API) sysrq_dump_trace_buffer() and sysrq_handle_kill_rt_tasks() [litmus/sched_trace.c] - struct kfifo internal buffer name changed (buffer -> buf) [litmus/sched_trace.c] - add_wait_queue_exclusive_locked -> __add_wait_queue_tail_exclusive [litmus/fmlp.c] - syscall numbers for both x86_32 and x86_64
Diffstat (limited to 'kernel/time')
-rw-r--r--kernel/time/Kconfig4
-rw-r--r--kernel/time/clocksource.c71
-rw-r--r--kernel/time/ntp.c2
-rw-r--r--kernel/time/tick-broadcast.c2
-rw-r--r--kernel/time/tick-sched.c93
-rw-r--r--kernel/time/timekeeping.c128
-rw-r--r--kernel/time/timer_list.c1
7 files changed, 189 insertions, 112 deletions
diff --git a/kernel/time/Kconfig b/kernel/time/Kconfig
index 95ed42951e0..f06a8a36564 100644
--- a/kernel/time/Kconfig
+++ b/kernel/time/Kconfig
@@ -6,7 +6,7 @@ config TICK_ONESHOT
6 6
7config NO_HZ 7config NO_HZ
8 bool "Tickless System (Dynamic Ticks)" 8 bool "Tickless System (Dynamic Ticks)"
9 depends on GENERIC_TIME && GENERIC_CLOCKEVENTS 9 depends on !ARCH_USES_GETTIMEOFFSET && GENERIC_CLOCKEVENTS
10 select TICK_ONESHOT 10 select TICK_ONESHOT
11 help 11 help
12 This option enables a tickless system: timer interrupts will 12 This option enables a tickless system: timer interrupts will
@@ -15,7 +15,7 @@ config NO_HZ
15 15
16config HIGH_RES_TIMERS 16config HIGH_RES_TIMERS
17 bool "High Resolution Timer Support" 17 bool "High Resolution Timer Support"
18 depends on GENERIC_TIME && GENERIC_CLOCKEVENTS 18 depends on !ARCH_USES_GETTIMEOFFSET && GENERIC_CLOCKEVENTS
19 select TICK_ONESHOT 19 select TICK_ONESHOT
20 help 20 help
21 This option enables high resolution timer support. If your 21 This option enables high resolution timer support. If your
diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c
index 1f5dde63745..c18d7efa1b4 100644
--- a/kernel/time/clocksource.c
+++ b/kernel/time/clocksource.c
@@ -531,7 +531,7 @@ static u64 clocksource_max_deferment(struct clocksource *cs)
531 return max_nsecs - (max_nsecs >> 5); 531 return max_nsecs - (max_nsecs >> 5);
532} 532}
533 533
534#ifdef CONFIG_GENERIC_TIME 534#ifndef CONFIG_ARCH_USES_GETTIMEOFFSET
535 535
536/** 536/**
537 * clocksource_select - Select the best clocksource available 537 * clocksource_select - Select the best clocksource available
@@ -577,7 +577,7 @@ static void clocksource_select(void)
577 } 577 }
578} 578}
579 579
580#else /* CONFIG_GENERIC_TIME */ 580#else /* !CONFIG_ARCH_USES_GETTIMEOFFSET */
581 581
582static inline void clocksource_select(void) { } 582static inline void clocksource_select(void) { }
583 583
@@ -625,6 +625,73 @@ static void clocksource_enqueue(struct clocksource *cs)
625 list_add(&cs->list, entry); 625 list_add(&cs->list, entry);
626} 626}
627 627
628
629/*
630 * Maximum time we expect to go between ticks. This includes idle
631 * tickless time. It provides the trade off between selecting a
632 * mult/shift pair that is very precise but can only handle a short
633 * period of time, vs. a mult/shift pair that can handle long periods
634 * of time but isn't as precise.
635 *
636 * This is a subsystem constant, and actual hardware limitations
637 * may override it (ie: clocksources that wrap every 3 seconds).
638 */
639#define MAX_UPDATE_LENGTH 5 /* Seconds */
640
641/**
642 * __clocksource_updatefreq_scale - Used update clocksource with new freq
643 * @t: clocksource to be registered
644 * @scale: Scale factor multiplied against freq to get clocksource hz
645 * @freq: clocksource frequency (cycles per second) divided by scale
646 *
647 * This should only be called from the clocksource->enable() method.
648 *
649 * This *SHOULD NOT* be called directly! Please use the
650 * clocksource_updatefreq_hz() or clocksource_updatefreq_khz helper functions.
651 */
652void __clocksource_updatefreq_scale(struct clocksource *cs, u32 scale, u32 freq)
653{
654 /*
655 * Ideally we want to use some of the limits used in
656 * clocksource_max_deferment, to provide a more informed
657 * MAX_UPDATE_LENGTH. But for now this just gets the
658 * register interface working properly.
659 */
660 clocks_calc_mult_shift(&cs->mult, &cs->shift, freq,
661 NSEC_PER_SEC/scale,
662 MAX_UPDATE_LENGTH*scale);
663 cs->max_idle_ns = clocksource_max_deferment(cs);
664}
665EXPORT_SYMBOL_GPL(__clocksource_updatefreq_scale);
666
667/**
668 * __clocksource_register_scale - Used to install new clocksources
669 * @t: clocksource to be registered
670 * @scale: Scale factor multiplied against freq to get clocksource hz
671 * @freq: clocksource frequency (cycles per second) divided by scale
672 *
673 * Returns -EBUSY if registration fails, zero otherwise.
674 *
675 * This *SHOULD NOT* be called directly! Please use the
676 * clocksource_register_hz() or clocksource_register_khz helper functions.
677 */
678int __clocksource_register_scale(struct clocksource *cs, u32 scale, u32 freq)
679{
680
681 /* Intialize mult/shift and max_idle_ns */
682 __clocksource_updatefreq_scale(cs, scale, freq);
683
684 /* Add clocksource to the clcoksource list */
685 mutex_lock(&clocksource_mutex);
686 clocksource_enqueue(cs);
687 clocksource_select();
688 clocksource_enqueue_watchdog(cs);
689 mutex_unlock(&clocksource_mutex);
690 return 0;
691}
692EXPORT_SYMBOL_GPL(__clocksource_register_scale);
693
694
628/** 695/**
629 * clocksource_register - Used to install new clocksources 696 * clocksource_register - Used to install new clocksources
630 * @t: clocksource to be registered 697 * @t: clocksource to be registered
diff --git a/kernel/time/ntp.c b/kernel/time/ntp.c
index 7c0f180d6e9..c63116863a8 100644
--- a/kernel/time/ntp.c
+++ b/kernel/time/ntp.c
@@ -69,7 +69,7 @@ static s64 time_freq;
69/* time at last adjustment (secs): */ 69/* time at last adjustment (secs): */
70static long time_reftime; 70static long time_reftime;
71 71
72long time_adjust; 72static long time_adjust;
73 73
74/* constant (boot-param configurable) NTP tick adjustment (upscaled) */ 74/* constant (boot-param configurable) NTP tick adjustment (upscaled) */
75static s64 ntp_tick_adj; 75static s64 ntp_tick_adj;
diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c
index b3bafd5fc66..48b2761b566 100644
--- a/kernel/time/tick-broadcast.c
+++ b/kernel/time/tick-broadcast.c
@@ -188,7 +188,7 @@ static void tick_handle_periodic_broadcast(struct clock_event_device *dev)
188 /* 188 /*
189 * Setup the next period for devices, which do not have 189 * Setup the next period for devices, which do not have
190 * periodic mode. We read dev->next_event first and add to it 190 * periodic mode. We read dev->next_event first and add to it
191 * when the event alrady expired. clockevents_program_event() 191 * when the event already expired. clockevents_program_event()
192 * sets dev->next_event only when the event is really 192 * sets dev->next_event only when the event is really
193 * programmed to the device. 193 * programmed to the device.
194 */ 194 */
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index 0adc54bd7c7..bb2d8b7850a 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -150,35 +150,65 @@ static void tick_nohz_update_jiffies(ktime_t now)
150 touch_softlockup_watchdog(); 150 touch_softlockup_watchdog();
151} 151}
152 152
153/*
154 * Updates the per cpu time idle statistics counters
155 */
156static void
157update_ts_time_stats(int cpu, struct tick_sched *ts, ktime_t now, u64 *last_update_time)
158{
159 ktime_t delta;
160
161 if (ts->idle_active) {
162 delta = ktime_sub(now, ts->idle_entrytime);
163 ts->idle_sleeptime = ktime_add(ts->idle_sleeptime, delta);
164 if (nr_iowait_cpu(cpu) > 0)
165 ts->iowait_sleeptime = ktime_add(ts->iowait_sleeptime, delta);
166 ts->idle_entrytime = now;
167 }
168
169 if (last_update_time)
170 *last_update_time = ktime_to_us(now);
171
172}
173
153static void tick_nohz_stop_idle(int cpu, ktime_t now) 174static void tick_nohz_stop_idle(int cpu, ktime_t now)
154{ 175{
155 struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); 176 struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
156 ktime_t delta;
157 177
158 delta = ktime_sub(now, ts->idle_entrytime); 178 update_ts_time_stats(cpu, ts, now, NULL);
159 ts->idle_lastupdate = now;
160 ts->idle_sleeptime = ktime_add(ts->idle_sleeptime, delta);
161 ts->idle_active = 0; 179 ts->idle_active = 0;
162 180
163 sched_clock_idle_wakeup_event(0); 181 sched_clock_idle_wakeup_event(0);
164} 182}
165 183
166static ktime_t tick_nohz_start_idle(struct tick_sched *ts) 184static ktime_t tick_nohz_start_idle(int cpu, struct tick_sched *ts)
167{ 185{
168 ktime_t now, delta; 186 ktime_t now;
169 187
170 now = ktime_get(); 188 now = ktime_get();
171 if (ts->idle_active) { 189
172 delta = ktime_sub(now, ts->idle_entrytime); 190 update_ts_time_stats(cpu, ts, now, NULL);
173 ts->idle_lastupdate = now; 191
174 ts->idle_sleeptime = ktime_add(ts->idle_sleeptime, delta);
175 }
176 ts->idle_entrytime = now; 192 ts->idle_entrytime = now;
177 ts->idle_active = 1; 193 ts->idle_active = 1;
178 sched_clock_idle_sleep_event(); 194 sched_clock_idle_sleep_event();
179 return now; 195 return now;
180} 196}
181 197
198/**
199 * get_cpu_idle_time_us - get the total idle time of a cpu
200 * @cpu: CPU number to query
201 * @last_update_time: variable to store update time in
202 *
203 * Return the cummulative idle time (since boot) for a given
204 * CPU, in microseconds. The idle time returned includes
205 * the iowait time (unlike what "top" and co report).
206 *
207 * This time is measured via accounting rather than sampling,
208 * and is as accurate as ktime_get() is.
209 *
210 * This function returns -1 if NOHZ is not enabled.
211 */
182u64 get_cpu_idle_time_us(int cpu, u64 *last_update_time) 212u64 get_cpu_idle_time_us(int cpu, u64 *last_update_time)
183{ 213{
184 struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); 214 struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
@@ -186,15 +216,38 @@ u64 get_cpu_idle_time_us(int cpu, u64 *last_update_time)
186 if (!tick_nohz_enabled) 216 if (!tick_nohz_enabled)
187 return -1; 217 return -1;
188 218
189 if (ts->idle_active) 219 update_ts_time_stats(cpu, ts, ktime_get(), last_update_time);
190 *last_update_time = ktime_to_us(ts->idle_lastupdate);
191 else
192 *last_update_time = ktime_to_us(ktime_get());
193 220
194 return ktime_to_us(ts->idle_sleeptime); 221 return ktime_to_us(ts->idle_sleeptime);
195} 222}
196EXPORT_SYMBOL_GPL(get_cpu_idle_time_us); 223EXPORT_SYMBOL_GPL(get_cpu_idle_time_us);
197 224
225/*
226 * get_cpu_iowait_time_us - get the total iowait time of a cpu
227 * @cpu: CPU number to query
228 * @last_update_time: variable to store update time in
229 *
230 * Return the cummulative iowait time (since boot) for a given
231 * CPU, in microseconds.
232 *
233 * This time is measured via accounting rather than sampling,
234 * and is as accurate as ktime_get() is.
235 *
236 * This function returns -1 if NOHZ is not enabled.
237 */
238u64 get_cpu_iowait_time_us(int cpu, u64 *last_update_time)
239{
240 struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
241
242 if (!tick_nohz_enabled)
243 return -1;
244
245 update_ts_time_stats(cpu, ts, ktime_get(), last_update_time);
246
247 return ktime_to_us(ts->iowait_sleeptime);
248}
249EXPORT_SYMBOL_GPL(get_cpu_iowait_time_us);
250
198/** 251/**
199 * tick_nohz_stop_sched_tick - stop the idle tick from the idle task 252 * tick_nohz_stop_sched_tick - stop the idle tick from the idle task
200 * 253 *
@@ -231,7 +284,7 @@ void tick_nohz_stop_sched_tick(int inidle)
231 */ 284 */
232 ts->inidle = 1; 285 ts->inidle = 1;
233 286
234 now = tick_nohz_start_idle(ts); 287 now = tick_nohz_start_idle(cpu, ts);
235 288
236 /* 289 /*
237 * If this cpu is offline and it is the one which updates 290 * If this cpu is offline and it is the one which updates
@@ -352,13 +405,7 @@ void tick_nohz_stop_sched_tick(int inidle)
352 * the scheduler tick in nohz_restart_sched_tick. 405 * the scheduler tick in nohz_restart_sched_tick.
353 */ 406 */
354 if (!ts->tick_stopped) { 407 if (!ts->tick_stopped) {
355 if (select_nohz_load_balancer(1)) { 408 select_nohz_load_balancer(1);
356 /*
357 * sched tick not stopped!
358 */
359 cpumask_clear_cpu(cpu, nohz_cpu_mask);
360 goto out;
361 }
362 409
363 ts->idle_tick = hrtimer_get_expires(&ts->sched_timer); 410 ts->idle_tick = hrtimer_get_expires(&ts->sched_timer);
364 ts->tick_stopped = 1; 411 ts->tick_stopped = 1;
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
index 39f6177fafa..49010d822f7 100644
--- a/kernel/time/timekeeping.c
+++ b/kernel/time/timekeeping.c
@@ -153,8 +153,8 @@ __cacheline_aligned_in_smp DEFINE_SEQLOCK(xtime_lock);
153 * - wall_to_monotonic is no longer the boot time, getboottime must be 153 * - wall_to_monotonic is no longer the boot time, getboottime must be
154 * used instead. 154 * used instead.
155 */ 155 */
156struct timespec xtime __attribute__ ((aligned (16))); 156static struct timespec xtime __attribute__ ((aligned (16)));
157struct timespec wall_to_monotonic __attribute__ ((aligned (16))); 157static struct timespec wall_to_monotonic __attribute__ ((aligned (16)));
158static struct timespec total_sleep_time; 158static struct timespec total_sleep_time;
159 159
160/* 160/*
@@ -165,23 +165,15 @@ struct timespec raw_time;
165/* flag for if timekeeping is suspended */ 165/* flag for if timekeeping is suspended */
166int __read_mostly timekeeping_suspended; 166int __read_mostly timekeeping_suspended;
167 167
168static struct timespec xtime_cache __attribute__ ((aligned (16)));
169void update_xtime_cache(u64 nsec)
170{
171 xtime_cache = xtime;
172 timespec_add_ns(&xtime_cache, nsec);
173}
174
175/* must hold xtime_lock */ 168/* must hold xtime_lock */
176void timekeeping_leap_insert(int leapsecond) 169void timekeeping_leap_insert(int leapsecond)
177{ 170{
178 xtime.tv_sec += leapsecond; 171 xtime.tv_sec += leapsecond;
179 wall_to_monotonic.tv_sec -= leapsecond; 172 wall_to_monotonic.tv_sec -= leapsecond;
180 update_vsyscall(&xtime, timekeeper.clock, timekeeper.mult); 173 update_vsyscall(&xtime, &wall_to_monotonic, timekeeper.clock,
174 timekeeper.mult);
181} 175}
182 176
183#ifdef CONFIG_GENERIC_TIME
184
185/** 177/**
186 * timekeeping_forward_now - update clock to the current time 178 * timekeeping_forward_now - update clock to the current time
187 * 179 *
@@ -332,12 +324,11 @@ int do_settimeofday(struct timespec *tv)
332 324
333 xtime = *tv; 325 xtime = *tv;
334 326
335 update_xtime_cache(0);
336
337 timekeeper.ntp_error = 0; 327 timekeeper.ntp_error = 0;
338 ntp_clear(); 328 ntp_clear();
339 329
340 update_vsyscall(&xtime, timekeeper.clock, timekeeper.mult); 330 update_vsyscall(&xtime, &wall_to_monotonic, timekeeper.clock,
331 timekeeper.mult);
341 332
342 write_sequnlock_irqrestore(&xtime_lock, flags); 333 write_sequnlock_irqrestore(&xtime_lock, flags);
343 334
@@ -385,52 +376,6 @@ void timekeeping_notify(struct clocksource *clock)
385 tick_clock_notify(); 376 tick_clock_notify();
386} 377}
387 378
388#else /* GENERIC_TIME */
389
390static inline void timekeeping_forward_now(void) { }
391
392/**
393 * ktime_get - get the monotonic time in ktime_t format
394 *
395 * returns the time in ktime_t format
396 */
397ktime_t ktime_get(void)
398{
399 struct timespec now;
400
401 ktime_get_ts(&now);
402
403 return timespec_to_ktime(now);
404}
405EXPORT_SYMBOL_GPL(ktime_get);
406
407/**
408 * ktime_get_ts - get the monotonic clock in timespec format
409 * @ts: pointer to timespec variable
410 *
411 * The function calculates the monotonic clock from the realtime
412 * clock and the wall_to_monotonic offset and stores the result
413 * in normalized timespec format in the variable pointed to by @ts.
414 */
415void ktime_get_ts(struct timespec *ts)
416{
417 struct timespec tomono;
418 unsigned long seq;
419
420 do {
421 seq = read_seqbegin(&xtime_lock);
422 getnstimeofday(ts);
423 tomono = wall_to_monotonic;
424
425 } while (read_seqretry(&xtime_lock, seq));
426
427 set_normalized_timespec(ts, ts->tv_sec + tomono.tv_sec,
428 ts->tv_nsec + tomono.tv_nsec);
429}
430EXPORT_SYMBOL_GPL(ktime_get_ts);
431
432#endif /* !GENERIC_TIME */
433
434/** 379/**
435 * ktime_get_real - get the real (wall-) time in ktime_t format 380 * ktime_get_real - get the real (wall-) time in ktime_t format
436 * 381 *
@@ -559,7 +504,6 @@ void __init timekeeping_init(void)
559 } 504 }
560 set_normalized_timespec(&wall_to_monotonic, 505 set_normalized_timespec(&wall_to_monotonic,
561 -boot.tv_sec, -boot.tv_nsec); 506 -boot.tv_sec, -boot.tv_nsec);
562 update_xtime_cache(0);
563 total_sleep_time.tv_sec = 0; 507 total_sleep_time.tv_sec = 0;
564 total_sleep_time.tv_nsec = 0; 508 total_sleep_time.tv_nsec = 0;
565 write_sequnlock_irqrestore(&xtime_lock, flags); 509 write_sequnlock_irqrestore(&xtime_lock, flags);
@@ -589,11 +533,10 @@ static int timekeeping_resume(struct sys_device *dev)
589 533
590 if (timespec_compare(&ts, &timekeeping_suspend_time) > 0) { 534 if (timespec_compare(&ts, &timekeeping_suspend_time) > 0) {
591 ts = timespec_sub(ts, timekeeping_suspend_time); 535 ts = timespec_sub(ts, timekeeping_suspend_time);
592 xtime = timespec_add_safe(xtime, ts); 536 xtime = timespec_add(xtime, ts);
593 wall_to_monotonic = timespec_sub(wall_to_monotonic, ts); 537 wall_to_monotonic = timespec_sub(wall_to_monotonic, ts);
594 total_sleep_time = timespec_add_safe(total_sleep_time, ts); 538 total_sleep_time = timespec_add(total_sleep_time, ts);
595 } 539 }
596 update_xtime_cache(0);
597 /* re-base the last cycle value */ 540 /* re-base the last cycle value */
598 timekeeper.clock->cycle_last = timekeeper.clock->read(timekeeper.clock); 541 timekeeper.clock->cycle_last = timekeeper.clock->read(timekeeper.clock);
599 timekeeper.ntp_error = 0; 542 timekeeper.ntp_error = 0;
@@ -747,6 +690,7 @@ static void timekeeping_adjust(s64 offset)
747static cycle_t logarithmic_accumulation(cycle_t offset, int shift) 690static cycle_t logarithmic_accumulation(cycle_t offset, int shift)
748{ 691{
749 u64 nsecps = (u64)NSEC_PER_SEC << timekeeper.shift; 692 u64 nsecps = (u64)NSEC_PER_SEC << timekeeper.shift;
693 u64 raw_nsecs;
750 694
751 /* If the offset is smaller then a shifted interval, do nothing */ 695 /* If the offset is smaller then a shifted interval, do nothing */
752 if (offset < timekeeper.cycle_interval<<shift) 696 if (offset < timekeeper.cycle_interval<<shift)
@@ -763,12 +707,15 @@ static cycle_t logarithmic_accumulation(cycle_t offset, int shift)
763 second_overflow(); 707 second_overflow();
764 } 708 }
765 709
766 /* Accumulate into raw time */ 710 /* Accumulate raw time */
767 raw_time.tv_nsec += timekeeper.raw_interval << shift;; 711 raw_nsecs = timekeeper.raw_interval << shift;
768 while (raw_time.tv_nsec >= NSEC_PER_SEC) { 712 raw_nsecs += raw_time.tv_nsec;
769 raw_time.tv_nsec -= NSEC_PER_SEC; 713 if (raw_nsecs >= NSEC_PER_SEC) {
770 raw_time.tv_sec++; 714 u64 raw_secs = raw_nsecs;
715 raw_nsecs = do_div(raw_secs, NSEC_PER_SEC);
716 raw_time.tv_sec += raw_secs;
771 } 717 }
718 raw_time.tv_nsec = raw_nsecs;
772 719
773 /* Accumulate error between NTP and clock interval */ 720 /* Accumulate error between NTP and clock interval */
774 timekeeper.ntp_error += tick_length << shift; 721 timekeeper.ntp_error += tick_length << shift;
@@ -788,7 +735,6 @@ void update_wall_time(void)
788{ 735{
789 struct clocksource *clock; 736 struct clocksource *clock;
790 cycle_t offset; 737 cycle_t offset;
791 u64 nsecs;
792 int shift = 0, maxshift; 738 int shift = 0, maxshift;
793 739
794 /* Make sure we're fully resumed: */ 740 /* Make sure we're fully resumed: */
@@ -796,10 +742,11 @@ void update_wall_time(void)
796 return; 742 return;
797 743
798 clock = timekeeper.clock; 744 clock = timekeeper.clock;
799#ifdef CONFIG_GENERIC_TIME 745
800 offset = (clock->read(clock) - clock->cycle_last) & clock->mask; 746#ifdef CONFIG_ARCH_USES_GETTIMEOFFSET
801#else
802 offset = timekeeper.cycle_interval; 747 offset = timekeeper.cycle_interval;
748#else
749 offset = (clock->read(clock) - clock->cycle_last) & clock->mask;
803#endif 750#endif
804 timekeeper.xtime_nsec = (s64)xtime.tv_nsec << timekeeper.shift; 751 timekeeper.xtime_nsec = (s64)xtime.tv_nsec << timekeeper.shift;
805 752
@@ -847,7 +794,9 @@ void update_wall_time(void)
847 timekeeper.ntp_error += neg << timekeeper.ntp_error_shift; 794 timekeeper.ntp_error += neg << timekeeper.ntp_error_shift;
848 } 795 }
849 796
850 /* store full nanoseconds into xtime after rounding it up and 797
798 /*
799 * Store full nanoseconds into xtime after rounding it up and
851 * add the remainder to the error difference. 800 * add the remainder to the error difference.
852 */ 801 */
853 xtime.tv_nsec = ((s64) timekeeper.xtime_nsec >> timekeeper.shift) + 1; 802 xtime.tv_nsec = ((s64) timekeeper.xtime_nsec >> timekeeper.shift) + 1;
@@ -855,11 +804,19 @@ void update_wall_time(void)
855 timekeeper.ntp_error += timekeeper.xtime_nsec << 804 timekeeper.ntp_error += timekeeper.xtime_nsec <<
856 timekeeper.ntp_error_shift; 805 timekeeper.ntp_error_shift;
857 806
858 nsecs = clocksource_cyc2ns(offset, timekeeper.mult, timekeeper.shift); 807 /*
859 update_xtime_cache(nsecs); 808 * Finally, make sure that after the rounding
809 * xtime.tv_nsec isn't larger then NSEC_PER_SEC
810 */
811 if (unlikely(xtime.tv_nsec >= NSEC_PER_SEC)) {
812 xtime.tv_nsec -= NSEC_PER_SEC;
813 xtime.tv_sec++;
814 second_overflow();
815 }
860 816
861 /* check to see if there is a new clocksource to use */ 817 /* check to see if there is a new clocksource to use */
862 update_vsyscall(&xtime, timekeeper.clock, timekeeper.mult); 818 update_vsyscall(&xtime, &wall_to_monotonic, timekeeper.clock,
819 timekeeper.mult);
863} 820}
864 821
865/** 822/**
@@ -890,19 +847,24 @@ EXPORT_SYMBOL_GPL(getboottime);
890 */ 847 */
891void monotonic_to_bootbased(struct timespec *ts) 848void monotonic_to_bootbased(struct timespec *ts)
892{ 849{
893 *ts = timespec_add_safe(*ts, total_sleep_time); 850 *ts = timespec_add(*ts, total_sleep_time);
894} 851}
895EXPORT_SYMBOL_GPL(monotonic_to_bootbased); 852EXPORT_SYMBOL_GPL(monotonic_to_bootbased);
896 853
897unsigned long get_seconds(void) 854unsigned long get_seconds(void)
898{ 855{
899 return xtime_cache.tv_sec; 856 return xtime.tv_sec;
900} 857}
901EXPORT_SYMBOL(get_seconds); 858EXPORT_SYMBOL(get_seconds);
902 859
903struct timespec __current_kernel_time(void) 860struct timespec __current_kernel_time(void)
904{ 861{
905 return xtime_cache; 862 return xtime;
863}
864
865struct timespec __get_wall_to_monotonic(void)
866{
867 return wall_to_monotonic;
906} 868}
907 869
908struct timespec current_kernel_time(void) 870struct timespec current_kernel_time(void)
@@ -913,7 +875,7 @@ struct timespec current_kernel_time(void)
913 do { 875 do {
914 seq = read_seqbegin(&xtime_lock); 876 seq = read_seqbegin(&xtime_lock);
915 877
916 now = xtime_cache; 878 now = xtime;
917 } while (read_seqretry(&xtime_lock, seq)); 879 } while (read_seqretry(&xtime_lock, seq));
918 880
919 return now; 881 return now;
@@ -928,7 +890,7 @@ struct timespec get_monotonic_coarse(void)
928 do { 890 do {
929 seq = read_seqbegin(&xtime_lock); 891 seq = read_seqbegin(&xtime_lock);
930 892
931 now = xtime_cache; 893 now = xtime;
932 mono = wall_to_monotonic; 894 mono = wall_to_monotonic;
933 } while (read_seqretry(&xtime_lock, seq)); 895 } while (read_seqretry(&xtime_lock, seq));
934 896
diff --git a/kernel/time/timer_list.c b/kernel/time/timer_list.c
index 1a4a7dd7877..ab8f5e33fa9 100644
--- a/kernel/time/timer_list.c
+++ b/kernel/time/timer_list.c
@@ -176,6 +176,7 @@ static void print_cpu(struct seq_file *m, int cpu, u64 now)
176 P_ns(idle_waketime); 176 P_ns(idle_waketime);
177 P_ns(idle_exittime); 177 P_ns(idle_exittime);
178 P_ns(idle_sleeptime); 178 P_ns(idle_sleeptime);
179 P_ns(iowait_sleeptime);
179 P(last_jiffies); 180 P(last_jiffies);
180 P(next_jiffies); 181 P(next_jiffies);
181 P_ns(idle_expires); 182 P_ns(idle_expires);