aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/time
diff options
context:
space:
mode:
authorDaniel Vetter <daniel.vetter@ffwll.ch>2013-03-19 04:47:30 -0400
committerDaniel Vetter <daniel.vetter@ffwll.ch>2013-03-19 04:47:30 -0400
commit0d4a42f6bd298e826620585e766a154ab460617a (patch)
tree406d8f7778691d858dbe3e48e4bbb10e99c0a58a /kernel/time
parentd62b4892f3d9f7dd2002e5309be10719d6805b0f (diff)
parenta937536b868b8369b98967929045f1df54234323 (diff)
Merge tag 'v3.9-rc3' into drm-intel-next-queued
Backmerge so that I can merge Imre Deak's coalesced sg entries fixes, which depend upon the new for_each_sg_page introduce in commit a321e91b6d73ed011ffceed384c40d2785cf723b Author: Imre Deak <imre.deak@intel.com> Date: Wed Feb 27 17:02:56 2013 -0800 lib/scatterlist: add simple page iterator The merge itself is just two trivial conflicts: Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Diffstat (limited to 'kernel/time')
-rw-r--r--kernel/time/Kconfig9
-rw-r--r--kernel/time/clockevents.c1
-rw-r--r--kernel/time/ntp.c48
-rw-r--r--kernel/time/tick-broadcast.c38
-rw-r--r--kernel/time/tick-sched.c14
-rw-r--r--kernel/time/timekeeping.c71
6 files changed, 142 insertions, 39 deletions
diff --git a/kernel/time/Kconfig b/kernel/time/Kconfig
index 8601f0db1261..24510d84efd7 100644
--- a/kernel/time/Kconfig
+++ b/kernel/time/Kconfig
@@ -12,6 +12,11 @@ config CLOCKSOURCE_WATCHDOG
12config ARCH_CLOCKSOURCE_DATA 12config ARCH_CLOCKSOURCE_DATA
13 bool 13 bool
14 14
15# Platforms has a persistent clock
16config ALWAYS_USE_PERSISTENT_CLOCK
17 bool
18 default n
19
15# Timekeeping vsyscall support 20# Timekeeping vsyscall support
16config GENERIC_TIME_VSYSCALL 21config GENERIC_TIME_VSYSCALL
17 bool 22 bool
@@ -38,6 +43,10 @@ config GENERIC_CLOCKEVENTS_BUILD
38 default y 43 default y
39 depends on GENERIC_CLOCKEVENTS 44 depends on GENERIC_CLOCKEVENTS
40 45
46# Architecture can handle broadcast in a driver-agnostic way
47config ARCH_HAS_TICK_BROADCAST
48 bool
49
41# Clockevents broadcasting infrastructure 50# Clockevents broadcasting infrastructure
42config GENERIC_CLOCKEVENTS_BROADCAST 51config GENERIC_CLOCKEVENTS_BROADCAST
43 bool 52 bool
diff --git a/kernel/time/clockevents.c b/kernel/time/clockevents.c
index 30b6de0d977c..c6d6400ee137 100644
--- a/kernel/time/clockevents.c
+++ b/kernel/time/clockevents.c
@@ -339,6 +339,7 @@ void clockevents_config_and_register(struct clock_event_device *dev,
339 clockevents_config(dev, freq); 339 clockevents_config(dev, freq);
340 clockevents_register_device(dev); 340 clockevents_register_device(dev);
341} 341}
342EXPORT_SYMBOL_GPL(clockevents_config_and_register);
342 343
343/** 344/**
344 * clockevents_update_freq - Update frequency and reprogram a clock event device. 345 * clockevents_update_freq - Update frequency and reprogram a clock event device.
diff --git a/kernel/time/ntp.c b/kernel/time/ntp.c
index 24174b4d669b..072bb066bb7d 100644
--- a/kernel/time/ntp.c
+++ b/kernel/time/ntp.c
@@ -15,6 +15,7 @@
15#include <linux/time.h> 15#include <linux/time.h>
16#include <linux/mm.h> 16#include <linux/mm.h>
17#include <linux/module.h> 17#include <linux/module.h>
18#include <linux/rtc.h>
18 19
19#include "tick-internal.h" 20#include "tick-internal.h"
20 21
@@ -22,7 +23,7 @@
22 * NTP timekeeping variables: 23 * NTP timekeeping variables:
23 */ 24 */
24 25
25DEFINE_SPINLOCK(ntp_lock); 26DEFINE_RAW_SPINLOCK(ntp_lock);
26 27
27 28
28/* USER_HZ period (usecs): */ 29/* USER_HZ period (usecs): */
@@ -347,7 +348,7 @@ void ntp_clear(void)
347{ 348{
348 unsigned long flags; 349 unsigned long flags;
349 350
350 spin_lock_irqsave(&ntp_lock, flags); 351 raw_spin_lock_irqsave(&ntp_lock, flags);
351 352
352 time_adjust = 0; /* stop active adjtime() */ 353 time_adjust = 0; /* stop active adjtime() */
353 time_status |= STA_UNSYNC; 354 time_status |= STA_UNSYNC;
@@ -361,7 +362,7 @@ void ntp_clear(void)
361 362
362 /* Clear PPS state variables */ 363 /* Clear PPS state variables */
363 pps_clear(); 364 pps_clear();
364 spin_unlock_irqrestore(&ntp_lock, flags); 365 raw_spin_unlock_irqrestore(&ntp_lock, flags);
365 366
366} 367}
367 368
@@ -371,9 +372,9 @@ u64 ntp_tick_length(void)
371 unsigned long flags; 372 unsigned long flags;
372 s64 ret; 373 s64 ret;
373 374
374 spin_lock_irqsave(&ntp_lock, flags); 375 raw_spin_lock_irqsave(&ntp_lock, flags);
375 ret = tick_length; 376 ret = tick_length;
376 spin_unlock_irqrestore(&ntp_lock, flags); 377 raw_spin_unlock_irqrestore(&ntp_lock, flags);
377 return ret; 378 return ret;
378} 379}
379 380
@@ -394,7 +395,7 @@ int second_overflow(unsigned long secs)
394 int leap = 0; 395 int leap = 0;
395 unsigned long flags; 396 unsigned long flags;
396 397
397 spin_lock_irqsave(&ntp_lock, flags); 398 raw_spin_lock_irqsave(&ntp_lock, flags);
398 399
399 /* 400 /*
400 * Leap second processing. If in leap-insert state at the end of the 401 * Leap second processing. If in leap-insert state at the end of the
@@ -478,13 +479,12 @@ int second_overflow(unsigned long secs)
478 time_adjust = 0; 479 time_adjust = 0;
479 480
480out: 481out:
481 spin_unlock_irqrestore(&ntp_lock, flags); 482 raw_spin_unlock_irqrestore(&ntp_lock, flags);
482 483
483 return leap; 484 return leap;
484} 485}
485 486
486#ifdef CONFIG_GENERIC_CMOS_UPDATE 487#if defined(CONFIG_GENERIC_CMOS_UPDATE) || defined(CONFIG_RTC_SYSTOHC)
487
488static void sync_cmos_clock(struct work_struct *work); 488static void sync_cmos_clock(struct work_struct *work);
489 489
490static DECLARE_DELAYED_WORK(sync_cmos_work, sync_cmos_clock); 490static DECLARE_DELAYED_WORK(sync_cmos_work, sync_cmos_clock);
@@ -510,14 +510,26 @@ static void sync_cmos_clock(struct work_struct *work)
510 } 510 }
511 511
512 getnstimeofday(&now); 512 getnstimeofday(&now);
513 if (abs(now.tv_nsec - (NSEC_PER_SEC / 2)) <= tick_nsec / 2) 513 if (abs(now.tv_nsec - (NSEC_PER_SEC / 2)) <= tick_nsec / 2) {
514 fail = update_persistent_clock(now); 514 struct timespec adjust = now;
515
516 fail = -ENODEV;
517 if (persistent_clock_is_local)
518 adjust.tv_sec -= (sys_tz.tz_minuteswest * 60);
519#ifdef CONFIG_GENERIC_CMOS_UPDATE
520 fail = update_persistent_clock(adjust);
521#endif
522#ifdef CONFIG_RTC_SYSTOHC
523 if (fail == -ENODEV)
524 fail = rtc_set_ntp_time(adjust);
525#endif
526 }
515 527
516 next.tv_nsec = (NSEC_PER_SEC / 2) - now.tv_nsec - (TICK_NSEC / 2); 528 next.tv_nsec = (NSEC_PER_SEC / 2) - now.tv_nsec - (TICK_NSEC / 2);
517 if (next.tv_nsec <= 0) 529 if (next.tv_nsec <= 0)
518 next.tv_nsec += NSEC_PER_SEC; 530 next.tv_nsec += NSEC_PER_SEC;
519 531
520 if (!fail) 532 if (!fail || fail == -ENODEV)
521 next.tv_sec = 659; 533 next.tv_sec = 659;
522 else 534 else
523 next.tv_sec = 0; 535 next.tv_sec = 0;
@@ -660,7 +672,7 @@ int do_adjtimex(struct timex *txc)
660 672
661 getnstimeofday(&ts); 673 getnstimeofday(&ts);
662 674
663 spin_lock_irq(&ntp_lock); 675 raw_spin_lock_irq(&ntp_lock);
664 676
665 if (txc->modes & ADJ_ADJTIME) { 677 if (txc->modes & ADJ_ADJTIME) {
666 long save_adjust = time_adjust; 678 long save_adjust = time_adjust;
@@ -702,7 +714,7 @@ int do_adjtimex(struct timex *txc)
702 /* fill PPS status fields */ 714 /* fill PPS status fields */
703 pps_fill_timex(txc); 715 pps_fill_timex(txc);
704 716
705 spin_unlock_irq(&ntp_lock); 717 raw_spin_unlock_irq(&ntp_lock);
706 718
707 txc->time.tv_sec = ts.tv_sec; 719 txc->time.tv_sec = ts.tv_sec;
708 txc->time.tv_usec = ts.tv_nsec; 720 txc->time.tv_usec = ts.tv_nsec;
@@ -900,7 +912,7 @@ void hardpps(const struct timespec *phase_ts, const struct timespec *raw_ts)
900 912
901 pts_norm = pps_normalize_ts(*phase_ts); 913 pts_norm = pps_normalize_ts(*phase_ts);
902 914
903 spin_lock_irqsave(&ntp_lock, flags); 915 raw_spin_lock_irqsave(&ntp_lock, flags);
904 916
905 /* clear the error bits, they will be set again if needed */ 917 /* clear the error bits, they will be set again if needed */
906 time_status &= ~(STA_PPSJITTER | STA_PPSWANDER | STA_PPSERROR); 918 time_status &= ~(STA_PPSJITTER | STA_PPSWANDER | STA_PPSERROR);
@@ -913,7 +925,7 @@ void hardpps(const struct timespec *phase_ts, const struct timespec *raw_ts)
913 * just start the frequency interval */ 925 * just start the frequency interval */
914 if (unlikely(pps_fbase.tv_sec == 0)) { 926 if (unlikely(pps_fbase.tv_sec == 0)) {
915 pps_fbase = *raw_ts; 927 pps_fbase = *raw_ts;
916 spin_unlock_irqrestore(&ntp_lock, flags); 928 raw_spin_unlock_irqrestore(&ntp_lock, flags);
917 return; 929 return;
918 } 930 }
919 931
@@ -928,7 +940,7 @@ void hardpps(const struct timespec *phase_ts, const struct timespec *raw_ts)
928 time_status |= STA_PPSJITTER; 940 time_status |= STA_PPSJITTER;
929 /* restart the frequency calibration interval */ 941 /* restart the frequency calibration interval */
930 pps_fbase = *raw_ts; 942 pps_fbase = *raw_ts;
931 spin_unlock_irqrestore(&ntp_lock, flags); 943 raw_spin_unlock_irqrestore(&ntp_lock, flags);
932 pr_err("hardpps: PPSJITTER: bad pulse\n"); 944 pr_err("hardpps: PPSJITTER: bad pulse\n");
933 return; 945 return;
934 } 946 }
@@ -945,7 +957,7 @@ void hardpps(const struct timespec *phase_ts, const struct timespec *raw_ts)
945 957
946 hardpps_update_phase(pts_norm.nsec); 958 hardpps_update_phase(pts_norm.nsec);
947 959
948 spin_unlock_irqrestore(&ntp_lock, flags); 960 raw_spin_unlock_irqrestore(&ntp_lock, flags);
949} 961}
950EXPORT_SYMBOL(hardpps); 962EXPORT_SYMBOL(hardpps);
951 963
diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c
index f113755695e2..2fb8cb88df8d 100644
--- a/kernel/time/tick-broadcast.c
+++ b/kernel/time/tick-broadcast.c
@@ -18,6 +18,7 @@
18#include <linux/percpu.h> 18#include <linux/percpu.h>
19#include <linux/profile.h> 19#include <linux/profile.h>
20#include <linux/sched.h> 20#include <linux/sched.h>
21#include <linux/smp.h>
21 22
22#include "tick-internal.h" 23#include "tick-internal.h"
23 24
@@ -86,6 +87,22 @@ int tick_is_broadcast_device(struct clock_event_device *dev)
86 return (dev && tick_broadcast_device.evtdev == dev); 87 return (dev && tick_broadcast_device.evtdev == dev);
87} 88}
88 89
90static void err_broadcast(const struct cpumask *mask)
91{
92 pr_crit_once("Failed to broadcast timer tick. Some CPUs may be unresponsive.\n");
93}
94
95static void tick_device_setup_broadcast_func(struct clock_event_device *dev)
96{
97 if (!dev->broadcast)
98 dev->broadcast = tick_broadcast;
99 if (!dev->broadcast) {
100 pr_warn_once("%s depends on broadcast, but no broadcast function available\n",
101 dev->name);
102 dev->broadcast = err_broadcast;
103 }
104}
105
89/* 106/*
90 * Check, if the device is disfunctional and a place holder, which 107 * Check, if the device is disfunctional and a place holder, which
91 * needs to be handled by the broadcast device. 108 * needs to be handled by the broadcast device.
@@ -105,6 +122,7 @@ int tick_device_uses_broadcast(struct clock_event_device *dev, int cpu)
105 */ 122 */
106 if (!tick_device_is_functional(dev)) { 123 if (!tick_device_is_functional(dev)) {
107 dev->event_handler = tick_handle_periodic; 124 dev->event_handler = tick_handle_periodic;
125 tick_device_setup_broadcast_func(dev);
108 cpumask_set_cpu(cpu, tick_get_broadcast_mask()); 126 cpumask_set_cpu(cpu, tick_get_broadcast_mask());
109 tick_broadcast_start_periodic(tick_broadcast_device.evtdev); 127 tick_broadcast_start_periodic(tick_broadcast_device.evtdev);
110 ret = 1; 128 ret = 1;
@@ -116,15 +134,33 @@ int tick_device_uses_broadcast(struct clock_event_device *dev, int cpu)
116 */ 134 */
117 if (!(dev->features & CLOCK_EVT_FEAT_C3STOP)) { 135 if (!(dev->features & CLOCK_EVT_FEAT_C3STOP)) {
118 int cpu = smp_processor_id(); 136 int cpu = smp_processor_id();
119
120 cpumask_clear_cpu(cpu, tick_get_broadcast_mask()); 137 cpumask_clear_cpu(cpu, tick_get_broadcast_mask());
121 tick_broadcast_clear_oneshot(cpu); 138 tick_broadcast_clear_oneshot(cpu);
139 } else {
140 tick_device_setup_broadcast_func(dev);
122 } 141 }
123 } 142 }
124 raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags); 143 raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);
125 return ret; 144 return ret;
126} 145}
127 146
147#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
148int tick_receive_broadcast(void)
149{
150 struct tick_device *td = this_cpu_ptr(&tick_cpu_device);
151 struct clock_event_device *evt = td->evtdev;
152
153 if (!evt)
154 return -ENODEV;
155
156 if (!evt->event_handler)
157 return -EINVAL;
158
159 evt->event_handler(evt);
160 return 0;
161}
162#endif
163
128/* 164/*
129 * Broadcast the event to the cpus, which are set in the mask (mangled). 165 * Broadcast the event to the cpus, which are set in the mask (mangled).
130 */ 166 */
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index d58e552d9fd1..a19a39952c1b 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -20,6 +20,7 @@
20#include <linux/profile.h> 20#include <linux/profile.h>
21#include <linux/sched.h> 21#include <linux/sched.h>
22#include <linux/module.h> 22#include <linux/module.h>
23#include <linux/irq_work.h>
23 24
24#include <asm/irq_regs.h> 25#include <asm/irq_regs.h>
25 26
@@ -28,7 +29,7 @@
28/* 29/*
29 * Per cpu nohz control structure 30 * Per cpu nohz control structure
30 */ 31 */
31static DEFINE_PER_CPU(struct tick_sched, tick_cpu_sched); 32DEFINE_PER_CPU(struct tick_sched, tick_cpu_sched);
32 33
33/* 34/*
34 * The time, when the last jiffy update happened. Protected by jiffies_lock. 35 * The time, when the last jiffy update happened. Protected by jiffies_lock.
@@ -331,8 +332,8 @@ static ktime_t tick_nohz_stop_sched_tick(struct tick_sched *ts,
331 time_delta = timekeeping_max_deferment(); 332 time_delta = timekeeping_max_deferment();
332 } while (read_seqretry(&jiffies_lock, seq)); 333 } while (read_seqretry(&jiffies_lock, seq));
333 334
334 if (rcu_needs_cpu(cpu, &rcu_delta_jiffies) || printk_needs_cpu(cpu) || 335 if (rcu_needs_cpu(cpu, &rcu_delta_jiffies) ||
335 arch_needs_cpu(cpu)) { 336 arch_needs_cpu(cpu) || irq_work_needs_cpu()) {
336 next_jiffies = last_jiffies + 1; 337 next_jiffies = last_jiffies + 1;
337 delta_jiffies = 1; 338 delta_jiffies = 1;
338 } else { 339 } else {
@@ -553,6 +554,7 @@ void tick_nohz_idle_enter(void)
553 554
554 local_irq_enable(); 555 local_irq_enable();
555} 556}
557EXPORT_SYMBOL_GPL(tick_nohz_idle_enter);
556 558
557/** 559/**
558 * tick_nohz_irq_exit - update next tick event from interrupt exit 560 * tick_nohz_irq_exit - update next tick event from interrupt exit
@@ -631,8 +633,11 @@ static void tick_nohz_restart_sched_tick(struct tick_sched *ts, ktime_t now)
631 633
632static void tick_nohz_account_idle_ticks(struct tick_sched *ts) 634static void tick_nohz_account_idle_ticks(struct tick_sched *ts)
633{ 635{
634#ifndef CONFIG_VIRT_CPU_ACCOUNTING 636#ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
635 unsigned long ticks; 637 unsigned long ticks;
638
639 if (vtime_accounting_enabled())
640 return;
636 /* 641 /*
637 * We stopped the tick in idle. Update process times would miss the 642 * We stopped the tick in idle. Update process times would miss the
638 * time we slept as update_process_times does only a 1 tick 643 * time we slept as update_process_times does only a 1 tick
@@ -681,6 +686,7 @@ void tick_nohz_idle_exit(void)
681 686
682 local_irq_enable(); 687 local_irq_enable();
683} 688}
689EXPORT_SYMBOL_GPL(tick_nohz_idle_exit);
684 690
685static int tick_nohz_reprogram(struct tick_sched *ts, ktime_t now) 691static int tick_nohz_reprogram(struct tick_sched *ts, ktime_t now)
686{ 692{
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
index cbc6acb0db3f..9a0bc98fbe1d 100644
--- a/kernel/time/timekeeping.c
+++ b/kernel/time/timekeeping.c
@@ -29,6 +29,9 @@ static struct timekeeper timekeeper;
29/* flag for if timekeeping is suspended */ 29/* flag for if timekeeping is suspended */
30int __read_mostly timekeeping_suspended; 30int __read_mostly timekeeping_suspended;
31 31
32/* Flag for if there is a persistent clock on this platform */
33bool __read_mostly persistent_clock_exist = false;
34
32static inline void tk_normalize_xtime(struct timekeeper *tk) 35static inline void tk_normalize_xtime(struct timekeeper *tk)
33{ 36{
34 while (tk->xtime_nsec >= ((u64)NSEC_PER_SEC << tk->shift)) { 37 while (tk->xtime_nsec >= ((u64)NSEC_PER_SEC << tk->shift)) {
@@ -135,6 +138,20 @@ static void tk_setup_internals(struct timekeeper *tk, struct clocksource *clock)
135} 138}
136 139
137/* Timekeeper helper functions. */ 140/* Timekeeper helper functions. */
141
142#ifdef CONFIG_ARCH_USES_GETTIMEOFFSET
143u32 (*arch_gettimeoffset)(void);
144
145u32 get_arch_timeoffset(void)
146{
147 if (likely(arch_gettimeoffset))
148 return arch_gettimeoffset();
149 return 0;
150}
151#else
152static inline u32 get_arch_timeoffset(void) { return 0; }
153#endif
154
138static inline s64 timekeeping_get_ns(struct timekeeper *tk) 155static inline s64 timekeeping_get_ns(struct timekeeper *tk)
139{ 156{
140 cycle_t cycle_now, cycle_delta; 157 cycle_t cycle_now, cycle_delta;
@@ -151,8 +168,8 @@ static inline s64 timekeeping_get_ns(struct timekeeper *tk)
151 nsec = cycle_delta * tk->mult + tk->xtime_nsec; 168 nsec = cycle_delta * tk->mult + tk->xtime_nsec;
152 nsec >>= tk->shift; 169 nsec >>= tk->shift;
153 170
154 /* If arch requires, add in gettimeoffset() */ 171 /* If arch requires, add in get_arch_timeoffset() */
155 return nsec + arch_gettimeoffset(); 172 return nsec + get_arch_timeoffset();
156} 173}
157 174
158static inline s64 timekeeping_get_ns_raw(struct timekeeper *tk) 175static inline s64 timekeeping_get_ns_raw(struct timekeeper *tk)
@@ -171,8 +188,8 @@ static inline s64 timekeeping_get_ns_raw(struct timekeeper *tk)
171 /* convert delta to nanoseconds. */ 188 /* convert delta to nanoseconds. */
172 nsec = clocksource_cyc2ns(cycle_delta, clock->mult, clock->shift); 189 nsec = clocksource_cyc2ns(cycle_delta, clock->mult, clock->shift);
173 190
174 /* If arch requires, add in gettimeoffset() */ 191 /* If arch requires, add in get_arch_timeoffset() */
175 return nsec + arch_gettimeoffset(); 192 return nsec + get_arch_timeoffset();
176} 193}
177 194
178static RAW_NOTIFIER_HEAD(pvclock_gtod_chain); 195static RAW_NOTIFIER_HEAD(pvclock_gtod_chain);
@@ -254,8 +271,8 @@ static void timekeeping_forward_now(struct timekeeper *tk)
254 271
255 tk->xtime_nsec += cycle_delta * tk->mult; 272 tk->xtime_nsec += cycle_delta * tk->mult;
256 273
257 /* If arch requires, add in gettimeoffset() */ 274 /* If arch requires, add in get_arch_timeoffset() */
258 tk->xtime_nsec += (u64)arch_gettimeoffset() << tk->shift; 275 tk->xtime_nsec += (u64)get_arch_timeoffset() << tk->shift;
259 276
260 tk_normalize_xtime(tk); 277 tk_normalize_xtime(tk);
261 278
@@ -264,19 +281,18 @@ static void timekeeping_forward_now(struct timekeeper *tk)
264} 281}
265 282
266/** 283/**
267 * getnstimeofday - Returns the time of day in a timespec 284 * __getnstimeofday - Returns the time of day in a timespec.
268 * @ts: pointer to the timespec to be set 285 * @ts: pointer to the timespec to be set
269 * 286 *
270 * Returns the time of day in a timespec. 287 * Updates the time of day in the timespec.
288 * Returns 0 on success, or -ve when suspended (timespec will be undefined).
271 */ 289 */
272void getnstimeofday(struct timespec *ts) 290int __getnstimeofday(struct timespec *ts)
273{ 291{
274 struct timekeeper *tk = &timekeeper; 292 struct timekeeper *tk = &timekeeper;
275 unsigned long seq; 293 unsigned long seq;
276 s64 nsecs = 0; 294 s64 nsecs = 0;
277 295
278 WARN_ON(timekeeping_suspended);
279
280 do { 296 do {
281 seq = read_seqbegin(&tk->lock); 297 seq = read_seqbegin(&tk->lock);
282 298
@@ -287,6 +303,26 @@ void getnstimeofday(struct timespec *ts)
287 303
288 ts->tv_nsec = 0; 304 ts->tv_nsec = 0;
289 timespec_add_ns(ts, nsecs); 305 timespec_add_ns(ts, nsecs);
306
307 /*
308 * Do not bail out early, in case there were callers still using
309 * the value, even in the face of the WARN_ON.
310 */
311 if (unlikely(timekeeping_suspended))
312 return -EAGAIN;
313 return 0;
314}
315EXPORT_SYMBOL(__getnstimeofday);
316
317/**
318 * getnstimeofday - Returns the time of day in a timespec.
319 * @ts: pointer to the timespec to be set
320 *
321 * Returns the time of day in a timespec (WARN if suspended).
322 */
323void getnstimeofday(struct timespec *ts)
324{
325 WARN_ON(__getnstimeofday(ts));
290} 326}
291EXPORT_SYMBOL(getnstimeofday); 327EXPORT_SYMBOL(getnstimeofday);
292 328
@@ -640,12 +676,14 @@ void __init timekeeping_init(void)
640 struct timespec now, boot, tmp; 676 struct timespec now, boot, tmp;
641 677
642 read_persistent_clock(&now); 678 read_persistent_clock(&now);
679
643 if (!timespec_valid_strict(&now)) { 680 if (!timespec_valid_strict(&now)) {
644 pr_warn("WARNING: Persistent clock returned invalid value!\n" 681 pr_warn("WARNING: Persistent clock returned invalid value!\n"
645 " Check your CMOS/BIOS settings.\n"); 682 " Check your CMOS/BIOS settings.\n");
646 now.tv_sec = 0; 683 now.tv_sec = 0;
647 now.tv_nsec = 0; 684 now.tv_nsec = 0;
648 } 685 } else if (now.tv_sec || now.tv_nsec)
686 persistent_clock_exist = true;
649 687
650 read_boot_clock(&boot); 688 read_boot_clock(&boot);
651 if (!timespec_valid_strict(&boot)) { 689 if (!timespec_valid_strict(&boot)) {
@@ -718,11 +756,12 @@ void timekeeping_inject_sleeptime(struct timespec *delta)
718{ 756{
719 struct timekeeper *tk = &timekeeper; 757 struct timekeeper *tk = &timekeeper;
720 unsigned long flags; 758 unsigned long flags;
721 struct timespec ts;
722 759
723 /* Make sure we don't set the clock twice */ 760 /*
724 read_persistent_clock(&ts); 761 * Make sure we don't set the clock twice, as timekeeping_resume()
725 if (!(ts.tv_sec == 0 && ts.tv_nsec == 0)) 762 * already did it
763 */
764 if (has_persistent_clock())
726 return; 765 return;
727 766
728 write_seqlock_irqsave(&tk->lock, flags); 767 write_seqlock_irqsave(&tk->lock, flags);