aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-08-05 20:46:42 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2014-08-05 20:46:42 -0400
commite7fda6c4c3c1a7d6996dd75fd84670fa0b5d448f (patch)
treedaa51c16462c318b890acf7f01fba5827275dd74 /include
parent08d69a25714429850cf9ef71f22d8cdc9189d93f (diff)
parent953dec21aed4038464fec02f96a2f1b8701a5bce (diff)
Merge branch 'timers-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull timer and time updates from Thomas Gleixner: "A rather large update of timers, timekeeping & co - Core timekeeping code is year-2038 safe now for 32bit machines. Now we just need to fix all in kernel users and the gazillion of user space interfaces which rely on timespec/timeval :) - Better cache layout for the timekeeping internal data structures. - Proper nanosecond based interfaces for in kernel users. - Tree wide cleanup of code which wants nanoseconds but does hoops and loops to convert back and forth from timespecs. Some of it definitely belongs into the ugly code museum. - Consolidation of the timekeeping interface zoo. - A fast NMI safe accessor to clock monotonic for tracing. This is a long standing request to support correlated user/kernel space traces. With proper NTP frequency correction it's also suitable for correlation of traces accross separate machines. - Checkpoint/restart support for timerfd. - A few NOHZ[_FULL] improvements in the [hr]timer code. - Code move from kernel to kernel/time of all time* related code. - New clocksource/event drivers from the ARM universe. I'm really impressed that despite an architected timer in the newer chips SoC manufacturers insist on inventing new and differently broken SoC specific timers. [ Ed. "Impressed"? I don't think that word means what you think it means ] - Another round of code move from arch to drivers. Looks like most of the legacy mess in ARM regarding timers is sorted out except for a few obnoxious strongholds. - The usual updates and fixlets all over the place" * 'timers-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (114 commits) timekeeping: Fixup typo in update_vsyscall_old definition clocksource: document some basic timekeeping concepts timekeeping: Use cached ntp_tick_length when accumulating error timekeeping: Rework frequency adjustments to work better w/ nohz timekeeping: Minor fixup for timespec64->timespec assignment ftrace: Provide trace clocks monotonic timekeeping: Provide fast and NMI safe access to CLOCK_MONOTONIC seqcount: Add raw_write_seqcount_latch() seqcount: Provide raw_read_seqcount() timekeeping: Use tk_read_base as argument for timekeeping_get_ns() timekeeping: Create struct tk_read_base and use it in struct timekeeper timekeeping: Restructure the timekeeper some more clocksource: Get rid of cycle_last clocksource: Move cycle_last validation to core code clocksource: Make delta calculation a function wireless: ath9k: Get rid of timespec conversions drm: vmwgfx: Use nsec based interfaces drm: i915: Use nsec based interfaces timekeeping: Provide ktime_get_raw() hangcheck-timer: Use ktime_get_ns() ...
Diffstat (limited to 'include')
-rw-r--r--include/clocksource/pxa.h18
-rw-r--r--include/linux/clocksource.h2
-rw-r--r--include/linux/hrtimer.h16
-rw-r--r--include/linux/iio/iio.h9
-rw-r--r--include/linux/io.h2
-rw-r--r--include/linux/ktime.h228
-rw-r--r--include/linux/mlx5/driver.h4
-rw-r--r--include/linux/of_address.h11
-rw-r--r--include/linux/sched.h8
-rw-r--r--include/linux/seqlock.h27
-rw-r--r--include/linux/sh_timer.h5
-rw-r--r--include/linux/time.h65
-rw-r--r--include/linux/time64.h190
-rw-r--r--include/linux/timekeeper_internal.h150
-rw-r--r--include/linux/timekeeping.h209
-rw-r--r--include/linux/timerfd.h5
16 files changed, 601 insertions, 348 deletions
diff --git a/include/clocksource/pxa.h b/include/clocksource/pxa.h
new file mode 100644
index 000000000000..1efbe5a66958
--- /dev/null
+++ b/include/clocksource/pxa.h
@@ -0,0 +1,18 @@
1/*
2 * PXA clocksource, clockevents, and OST interrupt handlers.
3 *
4 * Copyright (C) 2014 Robert Jarzmik
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; version 2 of the License.
9 *
10 */
11
12#ifndef _CLOCKSOURCE_PXA_H
13#define _CLOCKSOURCE_PXA_H
14
15extern void pxa_timer_nodt_init(int irq, void __iomem *base,
16 unsigned long clock_tick_rate);
17
18#endif
diff --git a/include/linux/clocksource.h b/include/linux/clocksource.h
index a16b497d5159..653f0e2b6ca9 100644
--- a/include/linux/clocksource.h
+++ b/include/linux/clocksource.h
@@ -162,7 +162,6 @@ extern u64 timecounter_cyc2time(struct timecounter *tc,
162 * @archdata: arch-specific data 162 * @archdata: arch-specific data
163 * @suspend: suspend function for the clocksource, if necessary 163 * @suspend: suspend function for the clocksource, if necessary
164 * @resume: resume function for the clocksource, if necessary 164 * @resume: resume function for the clocksource, if necessary
165 * @cycle_last: most recent cycle counter value seen by ::read()
166 * @owner: module reference, must be set by clocksource in modules 165 * @owner: module reference, must be set by clocksource in modules
167 */ 166 */
168struct clocksource { 167struct clocksource {
@@ -171,7 +170,6 @@ struct clocksource {
171 * clocksource itself is cacheline aligned. 170 * clocksource itself is cacheline aligned.
172 */ 171 */
173 cycle_t (*read)(struct clocksource *cs); 172 cycle_t (*read)(struct clocksource *cs);
174 cycle_t cycle_last;
175 cycle_t mask; 173 cycle_t mask;
176 u32 mult; 174 u32 mult;
177 u32 shift; 175 u32 shift;
diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h
index e7a8d3fa91d5..a036d058a249 100644
--- a/include/linux/hrtimer.h
+++ b/include/linux/hrtimer.h
@@ -165,6 +165,7 @@ enum hrtimer_base_type {
165 * struct hrtimer_cpu_base - the per cpu clock bases 165 * struct hrtimer_cpu_base - the per cpu clock bases
166 * @lock: lock protecting the base and associated clock bases 166 * @lock: lock protecting the base and associated clock bases
167 * and timers 167 * and timers
168 * @cpu: cpu number
168 * @active_bases: Bitfield to mark bases with active timers 169 * @active_bases: Bitfield to mark bases with active timers
169 * @clock_was_set: Indicates that clock was set from irq context. 170 * @clock_was_set: Indicates that clock was set from irq context.
170 * @expires_next: absolute time of the next event which was scheduled 171 * @expires_next: absolute time of the next event which was scheduled
@@ -179,6 +180,7 @@ enum hrtimer_base_type {
179 */ 180 */
180struct hrtimer_cpu_base { 181struct hrtimer_cpu_base {
181 raw_spinlock_t lock; 182 raw_spinlock_t lock;
183 unsigned int cpu;
182 unsigned int active_bases; 184 unsigned int active_bases;
183 unsigned int clock_was_set; 185 unsigned int clock_was_set;
184#ifdef CONFIG_HIGH_RES_TIMERS 186#ifdef CONFIG_HIGH_RES_TIMERS
@@ -324,14 +326,6 @@ static inline void timerfd_clock_was_set(void) { }
324#endif 326#endif
325extern void hrtimers_resume(void); 327extern void hrtimers_resume(void);
326 328
327extern ktime_t ktime_get(void);
328extern ktime_t ktime_get_real(void);
329extern ktime_t ktime_get_boottime(void);
330extern ktime_t ktime_get_monotonic_offset(void);
331extern ktime_t ktime_get_clocktai(void);
332extern ktime_t ktime_get_update_offsets(ktime_t *offs_real, ktime_t *offs_boot,
333 ktime_t *offs_tai);
334
335DECLARE_PER_CPU(struct tick_device, tick_cpu_device); 329DECLARE_PER_CPU(struct tick_device, tick_cpu_device);
336 330
337 331
@@ -452,12 +446,6 @@ extern void hrtimer_run_pending(void);
452/* Bootup initialization: */ 446/* Bootup initialization: */
453extern void __init hrtimers_init(void); 447extern void __init hrtimers_init(void);
454 448
455#if BITS_PER_LONG < 64
456extern u64 ktime_divns(const ktime_t kt, s64 div);
457#else /* BITS_PER_LONG < 64 */
458# define ktime_divns(kt, div) (u64)((kt).tv64 / (div))
459#endif
460
461/* Show pending timers: */ 449/* Show pending timers: */
462extern void sysrq_timer_list_show(void); 450extern void sysrq_timer_list_show(void);
463 451
diff --git a/include/linux/iio/iio.h b/include/linux/iio/iio.h
index ccde91725f98..15dc6bc2bdd2 100644
--- a/include/linux/iio/iio.h
+++ b/include/linux/iio/iio.h
@@ -277,14 +277,7 @@ static inline bool iio_channel_has_info(const struct iio_chan_spec *chan,
277 **/ 277 **/
278static inline s64 iio_get_time_ns(void) 278static inline s64 iio_get_time_ns(void)
279{ 279{
280 struct timespec ts; 280 return ktime_get_real_ns();
281 /*
282 * calls getnstimeofday.
283 * If hrtimers then up to ns accurate, if not microsecond.
284 */
285 ktime_get_real_ts(&ts);
286
287 return timespec_to_ns(&ts);
288} 281}
289 282
290/* Device operating modes */ 283/* Device operating modes */
diff --git a/include/linux/io.h b/include/linux/io.h
index b76e6e545806..d5fc9b8d8b03 100644
--- a/include/linux/io.h
+++ b/include/linux/io.h
@@ -58,6 +58,8 @@ static inline void devm_ioport_unmap(struct device *dev, void __iomem *addr)
58} 58}
59#endif 59#endif
60 60
61#define IOMEM_ERR_PTR(err) (__force void __iomem *)ERR_PTR(err)
62
61void __iomem *devm_ioremap(struct device *dev, resource_size_t offset, 63void __iomem *devm_ioremap(struct device *dev, resource_size_t offset,
62 unsigned long size); 64 unsigned long size);
63void __iomem *devm_ioremap_nocache(struct device *dev, resource_size_t offset, 65void __iomem *devm_ioremap_nocache(struct device *dev, resource_size_t offset,
diff --git a/include/linux/ktime.h b/include/linux/ktime.h
index de9e46e6bcc9..c9d645ad98ff 100644
--- a/include/linux/ktime.h
+++ b/include/linux/ktime.h
@@ -27,43 +27,19 @@
27/* 27/*
28 * ktime_t: 28 * ktime_t:
29 * 29 *
30 * On 64-bit CPUs a single 64-bit variable is used to store the hrtimers 30 * A single 64-bit variable is used to store the hrtimers
31 * internal representation of time values in scalar nanoseconds. The 31 * internal representation of time values in scalar nanoseconds. The
32 * design plays out best on 64-bit CPUs, where most conversions are 32 * design plays out best on 64-bit CPUs, where most conversions are
33 * NOPs and most arithmetic ktime_t operations are plain arithmetic 33 * NOPs and most arithmetic ktime_t operations are plain arithmetic
34 * operations. 34 * operations.
35 * 35 *
36 * On 32-bit CPUs an optimized representation of the timespec structure
37 * is used to avoid expensive conversions from and to timespecs. The
38 * endian-aware order of the tv struct members is chosen to allow
39 * mathematical operations on the tv64 member of the union too, which
40 * for certain operations produces better code.
41 *
42 * For architectures with efficient support for 64/32-bit conversions the
43 * plain scalar nanosecond based representation can be selected by the
44 * config switch CONFIG_KTIME_SCALAR.
45 */ 36 */
46union ktime { 37union ktime {
47 s64 tv64; 38 s64 tv64;
48#if BITS_PER_LONG != 64 && !defined(CONFIG_KTIME_SCALAR)
49 struct {
50# ifdef __BIG_ENDIAN
51 s32 sec, nsec;
52# else
53 s32 nsec, sec;
54# endif
55 } tv;
56#endif
57}; 39};
58 40
59typedef union ktime ktime_t; /* Kill this */ 41typedef union ktime ktime_t; /* Kill this */
60 42
61/*
62 * ktime_t definitions when using the 64-bit scalar representation:
63 */
64
65#if (BITS_PER_LONG == 64) || defined(CONFIG_KTIME_SCALAR)
66
67/** 43/**
68 * ktime_set - Set a ktime_t variable from a seconds/nanoseconds value 44 * ktime_set - Set a ktime_t variable from a seconds/nanoseconds value
69 * @secs: seconds to set 45 * @secs: seconds to set
@@ -71,13 +47,12 @@ typedef union ktime ktime_t; /* Kill this */
71 * 47 *
72 * Return: The ktime_t representation of the value. 48 * Return: The ktime_t representation of the value.
73 */ 49 */
74static inline ktime_t ktime_set(const long secs, const unsigned long nsecs) 50static inline ktime_t ktime_set(const s64 secs, const unsigned long nsecs)
75{ 51{
76#if (BITS_PER_LONG == 64)
77 if (unlikely(secs >= KTIME_SEC_MAX)) 52 if (unlikely(secs >= KTIME_SEC_MAX))
78 return (ktime_t){ .tv64 = KTIME_MAX }; 53 return (ktime_t){ .tv64 = KTIME_MAX };
79#endif 54
80 return (ktime_t) { .tv64 = (s64)secs * NSEC_PER_SEC + (s64)nsecs }; 55 return (ktime_t) { .tv64 = secs * NSEC_PER_SEC + (s64)nsecs };
81} 56}
82 57
83/* Subtract two ktime_t variables. rem = lhs -rhs: */ 58/* Subtract two ktime_t variables. rem = lhs -rhs: */
@@ -108,6 +83,12 @@ static inline ktime_t timespec_to_ktime(struct timespec ts)
108 return ktime_set(ts.tv_sec, ts.tv_nsec); 83 return ktime_set(ts.tv_sec, ts.tv_nsec);
109} 84}
110 85
86/* convert a timespec64 to ktime_t format: */
87static inline ktime_t timespec64_to_ktime(struct timespec64 ts)
88{
89 return ktime_set(ts.tv_sec, ts.tv_nsec);
90}
91
111/* convert a timeval to ktime_t format: */ 92/* convert a timeval to ktime_t format: */
112static inline ktime_t timeval_to_ktime(struct timeval tv) 93static inline ktime_t timeval_to_ktime(struct timeval tv)
113{ 94{
@@ -117,159 +98,15 @@ static inline ktime_t timeval_to_ktime(struct timeval tv)
117/* Map the ktime_t to timespec conversion to ns_to_timespec function */ 98/* Map the ktime_t to timespec conversion to ns_to_timespec function */
118#define ktime_to_timespec(kt) ns_to_timespec((kt).tv64) 99#define ktime_to_timespec(kt) ns_to_timespec((kt).tv64)
119 100
101/* Map the ktime_t to timespec conversion to ns_to_timespec function */
102#define ktime_to_timespec64(kt) ns_to_timespec64((kt).tv64)
103
120/* Map the ktime_t to timeval conversion to ns_to_timeval function */ 104/* Map the ktime_t to timeval conversion to ns_to_timeval function */
121#define ktime_to_timeval(kt) ns_to_timeval((kt).tv64) 105#define ktime_to_timeval(kt) ns_to_timeval((kt).tv64)
122 106
123/* Convert ktime_t to nanoseconds - NOP in the scalar storage format: */ 107/* Convert ktime_t to nanoseconds - NOP in the scalar storage format: */
124#define ktime_to_ns(kt) ((kt).tv64) 108#define ktime_to_ns(kt) ((kt).tv64)
125 109
126#else /* !((BITS_PER_LONG == 64) || defined(CONFIG_KTIME_SCALAR)) */
127
128/*
129 * Helper macros/inlines to get the ktime_t math right in the timespec
130 * representation. The macros are sometimes ugly - their actual use is
131 * pretty okay-ish, given the circumstances. We do all this for
132 * performance reasons. The pure scalar nsec_t based code was nice and
133 * simple, but created too many 64-bit / 32-bit conversions and divisions.
134 *
135 * Be especially aware that negative values are represented in a way
136 * that the tv.sec field is negative and the tv.nsec field is greater
137 * or equal to zero but less than nanoseconds per second. This is the
138 * same representation which is used by timespecs.
139 *
140 * tv.sec < 0 and 0 >= tv.nsec < NSEC_PER_SEC
141 */
142
143/* Set a ktime_t variable to a value in sec/nsec representation: */
144static inline ktime_t ktime_set(const long secs, const unsigned long nsecs)
145{
146 return (ktime_t) { .tv = { .sec = secs, .nsec = nsecs } };
147}
148
149/**
150 * ktime_sub - subtract two ktime_t variables
151 * @lhs: minuend
152 * @rhs: subtrahend
153 *
154 * Return: The remainder of the subtraction.
155 */
156static inline ktime_t ktime_sub(const ktime_t lhs, const ktime_t rhs)
157{
158 ktime_t res;
159
160 res.tv64 = lhs.tv64 - rhs.tv64;
161 if (res.tv.nsec < 0)
162 res.tv.nsec += NSEC_PER_SEC;
163
164 return res;
165}
166
167/**
168 * ktime_add - add two ktime_t variables
169 * @add1: addend1
170 * @add2: addend2
171 *
172 * Return: The sum of @add1 and @add2.
173 */
174static inline ktime_t ktime_add(const ktime_t add1, const ktime_t add2)
175{
176 ktime_t res;
177
178 res.tv64 = add1.tv64 + add2.tv64;
179 /*
180 * performance trick: the (u32) -NSEC gives 0x00000000Fxxxxxxx
181 * so we subtract NSEC_PER_SEC and add 1 to the upper 32 bit.
182 *
183 * it's equivalent to:
184 * tv.nsec -= NSEC_PER_SEC
185 * tv.sec ++;
186 */
187 if (res.tv.nsec >= NSEC_PER_SEC)
188 res.tv64 += (u32)-NSEC_PER_SEC;
189
190 return res;
191}
192
193/**
194 * ktime_add_ns - Add a scalar nanoseconds value to a ktime_t variable
195 * @kt: addend
196 * @nsec: the scalar nsec value to add
197 *
198 * Return: The sum of @kt and @nsec in ktime_t format.
199 */
200extern ktime_t ktime_add_ns(const ktime_t kt, u64 nsec);
201
202/**
203 * ktime_sub_ns - Subtract a scalar nanoseconds value from a ktime_t variable
204 * @kt: minuend
205 * @nsec: the scalar nsec value to subtract
206 *
207 * Return: The subtraction of @nsec from @kt in ktime_t format.
208 */
209extern ktime_t ktime_sub_ns(const ktime_t kt, u64 nsec);
210
211/**
212 * timespec_to_ktime - convert a timespec to ktime_t format
213 * @ts: the timespec variable to convert
214 *
215 * Return: A ktime_t variable with the converted timespec value.
216 */
217static inline ktime_t timespec_to_ktime(const struct timespec ts)
218{
219 return (ktime_t) { .tv = { .sec = (s32)ts.tv_sec,
220 .nsec = (s32)ts.tv_nsec } };
221}
222
223/**
224 * timeval_to_ktime - convert a timeval to ktime_t format
225 * @tv: the timeval variable to convert
226 *
227 * Return: A ktime_t variable with the converted timeval value.
228 */
229static inline ktime_t timeval_to_ktime(const struct timeval tv)
230{
231 return (ktime_t) { .tv = { .sec = (s32)tv.tv_sec,
232 .nsec = (s32)(tv.tv_usec *
233 NSEC_PER_USEC) } };
234}
235
236/**
237 * ktime_to_timespec - convert a ktime_t variable to timespec format
238 * @kt: the ktime_t variable to convert
239 *
240 * Return: The timespec representation of the ktime value.
241 */
242static inline struct timespec ktime_to_timespec(const ktime_t kt)
243{
244 return (struct timespec) { .tv_sec = (time_t) kt.tv.sec,
245 .tv_nsec = (long) kt.tv.nsec };
246}
247
248/**
249 * ktime_to_timeval - convert a ktime_t variable to timeval format
250 * @kt: the ktime_t variable to convert
251 *
252 * Return: The timeval representation of the ktime value.
253 */
254static inline struct timeval ktime_to_timeval(const ktime_t kt)
255{
256 return (struct timeval) {
257 .tv_sec = (time_t) kt.tv.sec,
258 .tv_usec = (suseconds_t) (kt.tv.nsec / NSEC_PER_USEC) };
259}
260
261/**
262 * ktime_to_ns - convert a ktime_t variable to scalar nanoseconds
263 * @kt: the ktime_t variable to convert
264 *
265 * Return: The scalar nanoseconds representation of @kt.
266 */
267static inline s64 ktime_to_ns(const ktime_t kt)
268{
269 return (s64) kt.tv.sec * NSEC_PER_SEC + kt.tv.nsec;
270}
271
272#endif /* !((BITS_PER_LONG == 64) || defined(CONFIG_KTIME_SCALAR)) */
273 110
274/** 111/**
275 * ktime_equal - Compares two ktime_t variables to see if they are equal 112 * ktime_equal - Compares two ktime_t variables to see if they are equal
@@ -328,16 +165,20 @@ static inline bool ktime_before(const ktime_t cmp1, const ktime_t cmp2)
328 return ktime_compare(cmp1, cmp2) < 0; 165 return ktime_compare(cmp1, cmp2) < 0;
329} 166}
330 167
168#if BITS_PER_LONG < 64
169extern u64 ktime_divns(const ktime_t kt, s64 div);
170#else /* BITS_PER_LONG < 64 */
171# define ktime_divns(kt, div) (u64)((kt).tv64 / (div))
172#endif
173
331static inline s64 ktime_to_us(const ktime_t kt) 174static inline s64 ktime_to_us(const ktime_t kt)
332{ 175{
333 struct timeval tv = ktime_to_timeval(kt); 176 return ktime_divns(kt, NSEC_PER_USEC);
334 return (s64) tv.tv_sec * USEC_PER_SEC + tv.tv_usec;
335} 177}
336 178
337static inline s64 ktime_to_ms(const ktime_t kt) 179static inline s64 ktime_to_ms(const ktime_t kt)
338{ 180{
339 struct timeval tv = ktime_to_timeval(kt); 181 return ktime_divns(kt, NSEC_PER_MSEC);
340 return (s64) tv.tv_sec * MSEC_PER_SEC + tv.tv_usec / USEC_PER_MSEC;
341} 182}
342 183
343static inline s64 ktime_us_delta(const ktime_t later, const ktime_t earlier) 184static inline s64 ktime_us_delta(const ktime_t later, const ktime_t earlier)
@@ -381,6 +222,25 @@ static inline __must_check bool ktime_to_timespec_cond(const ktime_t kt,
381 } 222 }
382} 223}
383 224
225/**
226 * ktime_to_timespec64_cond - convert a ktime_t variable to timespec64
227 * format only if the variable contains data
228 * @kt: the ktime_t variable to convert
229 * @ts: the timespec variable to store the result in
230 *
231 * Return: %true if there was a successful conversion, %false if kt was 0.
232 */
233static inline __must_check bool ktime_to_timespec64_cond(const ktime_t kt,
234 struct timespec64 *ts)
235{
236 if (kt.tv64) {
237 *ts = ktime_to_timespec64(kt);
238 return true;
239 } else {
240 return false;
241 }
242}
243
384/* 244/*
385 * The resolution of the clocks. The resolution value is returned in 245 * The resolution of the clocks. The resolution value is returned in
386 * the clock_getres() system call to give application programmers an 246 * the clock_getres() system call to give application programmers an
@@ -390,12 +250,6 @@ static inline __must_check bool ktime_to_timespec_cond(const ktime_t kt,
390#define LOW_RES_NSEC TICK_NSEC 250#define LOW_RES_NSEC TICK_NSEC
391#define KTIME_LOW_RES (ktime_t){ .tv64 = LOW_RES_NSEC } 251#define KTIME_LOW_RES (ktime_t){ .tv64 = LOW_RES_NSEC }
392 252
393/* Get the monotonic time in timespec format: */
394extern void ktime_get_ts(struct timespec *ts);
395
396/* Get the real (wall-) time in timespec format: */
397#define ktime_get_real_ts(ts) getnstimeofday(ts)
398
399static inline ktime_t ns_to_ktime(u64 ns) 253static inline ktime_t ns_to_ktime(u64 ns)
400{ 254{
401 static const ktime_t ktime_zero = { .tv64 = 0 }; 255 static const ktime_t ktime_zero = { .tv64 = 0 };
@@ -410,4 +264,6 @@ static inline ktime_t ms_to_ktime(u64 ms)
410 return ktime_add_ms(ktime_zero, ms); 264 return ktime_add_ms(ktime_zero, ms);
411} 265}
412 266
267# include <linux/timekeeping.h>
268
413#endif 269#endif
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
index 2bce4aad2570..52d631ca32cf 100644
--- a/include/linux/mlx5/driver.h
+++ b/include/linux/mlx5/driver.h
@@ -604,8 +604,8 @@ struct mlx5_cmd_work_ent {
604 int page_queue; 604 int page_queue;
605 u8 status; 605 u8 status;
606 u8 token; 606 u8 token;
607 struct timespec ts1; 607 u64 ts1;
608 struct timespec ts2; 608 u64 ts2;
609 u16 op; 609 u16 op;
610}; 610};
611 611
diff --git a/include/linux/of_address.h b/include/linux/of_address.h
index c13b8782a4eb..fb7b7221e063 100644
--- a/include/linux/of_address.h
+++ b/include/linux/of_address.h
@@ -109,7 +109,12 @@ static inline bool of_dma_is_coherent(struct device_node *np)
109extern int of_address_to_resource(struct device_node *dev, int index, 109extern int of_address_to_resource(struct device_node *dev, int index,
110 struct resource *r); 110 struct resource *r);
111void __iomem *of_iomap(struct device_node *node, int index); 111void __iomem *of_iomap(struct device_node *node, int index);
112void __iomem *of_io_request_and_map(struct device_node *device,
113 int index, char *name);
112#else 114#else
115
116#include <linux/io.h>
117
113static inline int of_address_to_resource(struct device_node *dev, int index, 118static inline int of_address_to_resource(struct device_node *dev, int index,
114 struct resource *r) 119 struct resource *r)
115{ 120{
@@ -120,6 +125,12 @@ static inline void __iomem *of_iomap(struct device_node *device, int index)
120{ 125{
121 return NULL; 126 return NULL;
122} 127}
128
129static inline void __iomem *of_io_request_and_map(struct device_node *device,
130 int index, char *name)
131{
132 return IOMEM_ERR_PTR(-EINVAL);
133}
123#endif 134#endif
124 135
125#if defined(CONFIG_OF_ADDRESS) && defined(CONFIG_PCI) 136#if defined(CONFIG_OF_ADDRESS) && defined(CONFIG_PCI)
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 42cac4dc2157..66124d63371a 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -813,7 +813,7 @@ struct task_delay_info {
813 * associated with the operation is added to XXX_delay. 813 * associated with the operation is added to XXX_delay.
814 * XXX_delay contains the accumulated delay time in nanoseconds. 814 * XXX_delay contains the accumulated delay time in nanoseconds.
815 */ 815 */
816 struct timespec blkio_start, blkio_end; /* Shared by blkio, swapin */ 816 u64 blkio_start; /* Shared by blkio, swapin */
817 u64 blkio_delay; /* wait for sync block io completion */ 817 u64 blkio_delay; /* wait for sync block io completion */
818 u64 swapin_delay; /* wait for swapin block io completion */ 818 u64 swapin_delay; /* wait for swapin block io completion */
819 u32 blkio_count; /* total count of the number of sync block */ 819 u32 blkio_count; /* total count of the number of sync block */
@@ -821,7 +821,7 @@ struct task_delay_info {
821 u32 swapin_count; /* total count of the number of swapin block */ 821 u32 swapin_count; /* total count of the number of swapin block */
822 /* io operations performed */ 822 /* io operations performed */
823 823
824 struct timespec freepages_start, freepages_end; 824 u64 freepages_start;
825 u64 freepages_delay; /* wait for memory reclaim */ 825 u64 freepages_delay; /* wait for memory reclaim */
826 u32 freepages_count; /* total count of memory reclaim */ 826 u32 freepages_count; /* total count of memory reclaim */
827}; 827};
@@ -1364,8 +1364,8 @@ struct task_struct {
1364 } vtime_snap_whence; 1364 } vtime_snap_whence;
1365#endif 1365#endif
1366 unsigned long nvcsw, nivcsw; /* context switch counts */ 1366 unsigned long nvcsw, nivcsw; /* context switch counts */
1367 struct timespec start_time; /* monotonic time */ 1367 u64 start_time; /* monotonic time in nsec */
1368 struct timespec real_start_time; /* boot based time */ 1368 u64 real_start_time; /* boot based time in nsec */
1369/* mm fault and swap info: this can arguably be seen as either mm-specific or thread-specific */ 1369/* mm fault and swap info: this can arguably be seen as either mm-specific or thread-specific */
1370 unsigned long min_flt, maj_flt; 1370 unsigned long min_flt, maj_flt;
1371 1371
diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h
index 8cf350325dc6..cc359636cfa3 100644
--- a/include/linux/seqlock.h
+++ b/include/linux/seqlock.h
@@ -117,6 +117,22 @@ repeat:
117} 117}
118 118
119/** 119/**
120 * raw_read_seqcount - Read the raw seqcount
121 * @s: pointer to seqcount_t
122 * Returns: count to be passed to read_seqcount_retry
123 *
124 * raw_read_seqcount opens a read critical section of the given
125 * seqcount without any lockdep checking and without checking or
126 * masking the LSB. Calling code is responsible for handling that.
127 */
128static inline unsigned raw_read_seqcount(const seqcount_t *s)
129{
130 unsigned ret = ACCESS_ONCE(s->sequence);
131 smp_rmb();
132 return ret;
133}
134
135/**
120 * raw_read_seqcount_begin - start seq-read critical section w/o lockdep 136 * raw_read_seqcount_begin - start seq-read critical section w/o lockdep
121 * @s: pointer to seqcount_t 137 * @s: pointer to seqcount_t
122 * Returns: count to be passed to read_seqcount_retry 138 * Returns: count to be passed to read_seqcount_retry
@@ -218,6 +234,17 @@ static inline void raw_write_seqcount_end(seqcount_t *s)
218} 234}
219 235
220/* 236/*
237 * raw_write_seqcount_latch - redirect readers to even/odd copy
238 * @s: pointer to seqcount_t
239 */
240static inline void raw_write_seqcount_latch(seqcount_t *s)
241{
242 smp_wmb(); /* prior stores before incrementing "sequence" */
243 s->sequence++;
244 smp_wmb(); /* increment "sequence" before following stores */
245}
246
247/*
221 * Sequence counter only version assumes that callers are using their 248 * Sequence counter only version assumes that callers are using their
222 * own mutexing. 249 * own mutexing.
223 */ 250 */
diff --git a/include/linux/sh_timer.h b/include/linux/sh_timer.h
index 8e1e036d6d45..64638b058076 100644
--- a/include/linux/sh_timer.h
+++ b/include/linux/sh_timer.h
@@ -2,11 +2,6 @@
2#define __SH_TIMER_H__ 2#define __SH_TIMER_H__
3 3
4struct sh_timer_config { 4struct sh_timer_config {
5 char *name;
6 long channel_offset;
7 int timer_bit;
8 unsigned long clockevent_rating;
9 unsigned long clocksource_rating;
10 unsigned int channels_mask; 5 unsigned int channels_mask;
11}; 6};
12 7
diff --git a/include/linux/time.h b/include/linux/time.h
index d5d229b2e5af..8c42cf8d2444 100644
--- a/include/linux/time.h
+++ b/include/linux/time.h
@@ -4,19 +4,10 @@
4# include <linux/cache.h> 4# include <linux/cache.h>
5# include <linux/seqlock.h> 5# include <linux/seqlock.h>
6# include <linux/math64.h> 6# include <linux/math64.h>
7#include <uapi/linux/time.h> 7# include <linux/time64.h>
8 8
9extern struct timezone sys_tz; 9extern struct timezone sys_tz;
10 10
11/* Parameters used to convert the timespec values: */
12#define MSEC_PER_SEC 1000L
13#define USEC_PER_MSEC 1000L
14#define NSEC_PER_USEC 1000L
15#define NSEC_PER_MSEC 1000000L
16#define USEC_PER_SEC 1000000L
17#define NSEC_PER_SEC 1000000000L
18#define FSEC_PER_SEC 1000000000000000LL
19
20#define TIME_T_MAX (time_t)((1UL << ((sizeof(time_t) << 3) - 1)) - 1) 11#define TIME_T_MAX (time_t)((1UL << ((sizeof(time_t) << 3) - 1)) - 1)
21 12
22static inline int timespec_equal(const struct timespec *a, 13static inline int timespec_equal(const struct timespec *a,
@@ -84,13 +75,6 @@ static inline struct timespec timespec_sub(struct timespec lhs,
84 return ts_delta; 75 return ts_delta;
85} 76}
86 77
87#define KTIME_MAX ((s64)~((u64)1 << 63))
88#if (BITS_PER_LONG == 64)
89# define KTIME_SEC_MAX (KTIME_MAX / NSEC_PER_SEC)
90#else
91# define KTIME_SEC_MAX LONG_MAX
92#endif
93
94/* 78/*
95 * Returns true if the timespec is norm, false if denorm: 79 * Returns true if the timespec is norm, false if denorm:
96 */ 80 */
@@ -115,27 +99,7 @@ static inline bool timespec_valid_strict(const struct timespec *ts)
115 return true; 99 return true;
116} 100}
117 101
118extern bool persistent_clock_exist; 102extern struct timespec timespec_trunc(struct timespec t, unsigned gran);
119
120static inline bool has_persistent_clock(void)
121{
122 return persistent_clock_exist;
123}
124
125extern void read_persistent_clock(struct timespec *ts);
126extern void read_boot_clock(struct timespec *ts);
127extern int persistent_clock_is_local;
128extern int update_persistent_clock(struct timespec now);
129void timekeeping_init(void);
130extern int timekeeping_suspended;
131
132unsigned long get_seconds(void);
133struct timespec current_kernel_time(void);
134struct timespec __current_kernel_time(void); /* does not take xtime_lock */
135struct timespec get_monotonic_coarse(void);
136void get_xtime_and_monotonic_and_sleep_offset(struct timespec *xtim,
137 struct timespec *wtom, struct timespec *sleep);
138void timekeeping_inject_sleeptime(struct timespec *delta);
139 103
140#define CURRENT_TIME (current_kernel_time()) 104#define CURRENT_TIME (current_kernel_time())
141#define CURRENT_TIME_SEC ((struct timespec) { get_seconds(), 0 }) 105#define CURRENT_TIME_SEC ((struct timespec) { get_seconds(), 0 })
@@ -153,33 +117,14 @@ void timekeeping_inject_sleeptime(struct timespec *delta);
153extern u32 (*arch_gettimeoffset)(void); 117extern u32 (*arch_gettimeoffset)(void);
154#endif 118#endif
155 119
156extern void do_gettimeofday(struct timeval *tv);
157extern int do_settimeofday(const struct timespec *tv);
158extern int do_sys_settimeofday(const struct timespec *tv,
159 const struct timezone *tz);
160#define do_posix_clock_monotonic_gettime(ts) ktime_get_ts(ts)
161extern long do_utimes(int dfd, const char __user *filename, struct timespec *times, int flags);
162struct itimerval; 120struct itimerval;
163extern int do_setitimer(int which, struct itimerval *value, 121extern int do_setitimer(int which, struct itimerval *value,
164 struct itimerval *ovalue); 122 struct itimerval *ovalue);
165extern unsigned int alarm_setitimer(unsigned int seconds);
166extern int do_getitimer(int which, struct itimerval *value); 123extern int do_getitimer(int which, struct itimerval *value);
167extern int __getnstimeofday(struct timespec *tv);
168extern void getnstimeofday(struct timespec *tv);
169extern void getrawmonotonic(struct timespec *ts);
170extern void getnstime_raw_and_real(struct timespec *ts_raw,
171 struct timespec *ts_real);
172extern void getboottime(struct timespec *ts);
173extern void monotonic_to_bootbased(struct timespec *ts);
174extern void get_monotonic_boottime(struct timespec *ts);
175 124
176extern struct timespec timespec_trunc(struct timespec t, unsigned gran); 125extern unsigned int alarm_setitimer(unsigned int seconds);
177extern int timekeeping_valid_for_hres(void); 126
178extern u64 timekeeping_max_deferment(void); 127extern long do_utimes(int dfd, const char __user *filename, struct timespec *times, int flags);
179extern int timekeeping_inject_offset(struct timespec *ts);
180extern s32 timekeeping_get_tai_offset(void);
181extern void timekeeping_set_tai_offset(s32 tai_offset);
182extern void timekeeping_clocktai(struct timespec *ts);
183 128
184struct tms; 129struct tms;
185extern void do_sys_times(struct tms *); 130extern void do_sys_times(struct tms *);
diff --git a/include/linux/time64.h b/include/linux/time64.h
new file mode 100644
index 000000000000..a3831478d9cf
--- /dev/null
+++ b/include/linux/time64.h
@@ -0,0 +1,190 @@
1#ifndef _LINUX_TIME64_H
2#define _LINUX_TIME64_H
3
4#include <uapi/linux/time.h>
5
6typedef __s64 time64_t;
7
8/*
9 * This wants to go into uapi/linux/time.h once we agreed about the
10 * userspace interfaces.
11 */
12#if __BITS_PER_LONG == 64
13# define timespec64 timespec
14#else
15struct timespec64 {
16 time64_t tv_sec; /* seconds */
17 long tv_nsec; /* nanoseconds */
18};
19#endif
20
21/* Parameters used to convert the timespec values: */
22#define MSEC_PER_SEC 1000L
23#define USEC_PER_MSEC 1000L
24#define NSEC_PER_USEC 1000L
25#define NSEC_PER_MSEC 1000000L
26#define USEC_PER_SEC 1000000L
27#define NSEC_PER_SEC 1000000000L
28#define FSEC_PER_SEC 1000000000000000LL
29
30/* Located here for timespec[64]_valid_strict */
31#define KTIME_MAX ((s64)~((u64)1 << 63))
32#define KTIME_SEC_MAX (KTIME_MAX / NSEC_PER_SEC)
33
34#if __BITS_PER_LONG == 64
35
36static inline struct timespec timespec64_to_timespec(const struct timespec64 ts64)
37{
38 return ts64;
39}
40
41static inline struct timespec64 timespec_to_timespec64(const struct timespec ts)
42{
43 return ts;
44}
45
46# define timespec64_equal timespec_equal
47# define timespec64_compare timespec_compare
48# define set_normalized_timespec64 set_normalized_timespec
49# define timespec64_add_safe timespec_add_safe
50# define timespec64_add timespec_add
51# define timespec64_sub timespec_sub
52# define timespec64_valid timespec_valid
53# define timespec64_valid_strict timespec_valid_strict
54# define timespec64_to_ns timespec_to_ns
55# define ns_to_timespec64 ns_to_timespec
56# define timespec64_add_ns timespec_add_ns
57
58#else
59
60static inline struct timespec timespec64_to_timespec(const struct timespec64 ts64)
61{
62 struct timespec ret;
63
64 ret.tv_sec = (time_t)ts64.tv_sec;
65 ret.tv_nsec = ts64.tv_nsec;
66 return ret;
67}
68
69static inline struct timespec64 timespec_to_timespec64(const struct timespec ts)
70{
71 struct timespec64 ret;
72
73 ret.tv_sec = ts.tv_sec;
74 ret.tv_nsec = ts.tv_nsec;
75 return ret;
76}
77
78static inline int timespec64_equal(const struct timespec64 *a,
79 const struct timespec64 *b)
80{
81 return (a->tv_sec == b->tv_sec) && (a->tv_nsec == b->tv_nsec);
82}
83
84/*
85 * lhs < rhs: return <0
86 * lhs == rhs: return 0
87 * lhs > rhs: return >0
88 */
89static inline int timespec64_compare(const struct timespec64 *lhs, const struct timespec64 *rhs)
90{
91 if (lhs->tv_sec < rhs->tv_sec)
92 return -1;
93 if (lhs->tv_sec > rhs->tv_sec)
94 return 1;
95 return lhs->tv_nsec - rhs->tv_nsec;
96}
97
98extern void set_normalized_timespec64(struct timespec64 *ts, time64_t sec, s64 nsec);
99
100/*
101 * timespec64_add_safe assumes both values are positive and checks for
102 * overflow. It will return TIME_T_MAX if the returned value would be
103 * smaller then either of the arguments.
104 */
105extern struct timespec64 timespec64_add_safe(const struct timespec64 lhs,
106 const struct timespec64 rhs);
107
108
109static inline struct timespec64 timespec64_add(struct timespec64 lhs,
110 struct timespec64 rhs)
111{
112 struct timespec64 ts_delta;
113 set_normalized_timespec64(&ts_delta, lhs.tv_sec + rhs.tv_sec,
114 lhs.tv_nsec + rhs.tv_nsec);
115 return ts_delta;
116}
117
118/*
119 * sub = lhs - rhs, in normalized form
120 */
121static inline struct timespec64 timespec64_sub(struct timespec64 lhs,
122 struct timespec64 rhs)
123{
124 struct timespec64 ts_delta;
125 set_normalized_timespec64(&ts_delta, lhs.tv_sec - rhs.tv_sec,
126 lhs.tv_nsec - rhs.tv_nsec);
127 return ts_delta;
128}
129
130/*
131 * Returns true if the timespec64 is norm, false if denorm:
132 */
133static inline bool timespec64_valid(const struct timespec64 *ts)
134{
135 /* Dates before 1970 are bogus */
136 if (ts->tv_sec < 0)
137 return false;
138 /* Can't have more nanoseconds then a second */
139 if ((unsigned long)ts->tv_nsec >= NSEC_PER_SEC)
140 return false;
141 return true;
142}
143
144static inline bool timespec64_valid_strict(const struct timespec64 *ts)
145{
146 if (!timespec64_valid(ts))
147 return false;
148 /* Disallow values that could overflow ktime_t */
149 if ((unsigned long long)ts->tv_sec >= KTIME_SEC_MAX)
150 return false;
151 return true;
152}
153
154/**
155 * timespec64_to_ns - Convert timespec64 to nanoseconds
156 * @ts: pointer to the timespec64 variable to be converted
157 *
158 * Returns the scalar nanosecond representation of the timespec64
159 * parameter.
160 */
161static inline s64 timespec64_to_ns(const struct timespec64 *ts)
162{
163 return ((s64) ts->tv_sec * NSEC_PER_SEC) + ts->tv_nsec;
164}
165
166/**
167 * ns_to_timespec64 - Convert nanoseconds to timespec64
168 * @nsec: the nanoseconds value to be converted
169 *
170 * Returns the timespec64 representation of the nsec parameter.
171 */
172extern struct timespec64 ns_to_timespec64(const s64 nsec);
173
174/**
175 * timespec64_add_ns - Adds nanoseconds to a timespec64
176 * @a: pointer to timespec64 to be incremented
177 * @ns: unsigned nanoseconds value to be added
178 *
179 * This must always be inlined because its used from the x86-64 vdso,
180 * which cannot call other kernel functions.
181 */
182static __always_inline void timespec64_add_ns(struct timespec64 *a, u64 ns)
183{
184 a->tv_sec += __iter_div_u64_rem(a->tv_nsec + ns, NSEC_PER_SEC, &ns);
185 a->tv_nsec = ns;
186}
187
188#endif
189
190#endif /* _LINUX_TIME64_H */
diff --git a/include/linux/timekeeper_internal.h b/include/linux/timekeeper_internal.h
index c1825eb436ed..95640dcd1899 100644
--- a/include/linux/timekeeper_internal.h
+++ b/include/linux/timekeeper_internal.h
@@ -10,77 +10,100 @@
10#include <linux/jiffies.h> 10#include <linux/jiffies.h>
11#include <linux/time.h> 11#include <linux/time.h>
12 12
13/* Structure holding internal timekeeping values. */ 13/**
14struct timekeeper { 14 * struct tk_read_base - base structure for timekeeping readout
15 /* Current clocksource used for timekeeping. */ 15 * @clock: Current clocksource used for timekeeping.
16 * @read: Read function of @clock
17 * @mask: Bitmask for two's complement subtraction of non 64bit clocks
18 * @cycle_last: @clock cycle value at last update
19 * @mult: NTP adjusted multiplier for scaled math conversion
20 * @shift: Shift value for scaled math conversion
21 * @xtime_nsec: Shifted (fractional) nano seconds offset for readout
22 * @base_mono: ktime_t (nanoseconds) base time for readout
23 *
24 * This struct has size 56 byte on 64 bit. Together with a seqcount it
25 * occupies a single 64byte cache line.
26 *
27 * The struct is separate from struct timekeeper as it is also used
28 * for a fast NMI safe accessor to clock monotonic.
29 */
30struct tk_read_base {
16 struct clocksource *clock; 31 struct clocksource *clock;
17 /* NTP adjusted clock multiplier */ 32 cycle_t (*read)(struct clocksource *cs);
33 cycle_t mask;
34 cycle_t cycle_last;
18 u32 mult; 35 u32 mult;
19 /* The shift value of the current clocksource. */
20 u32 shift; 36 u32 shift;
21 /* Number of clock cycles in one NTP interval. */ 37 u64 xtime_nsec;
38 ktime_t base_mono;
39};
40
41/**
42 * struct timekeeper - Structure holding internal timekeeping values.
43 * @tkr: The readout base structure
44 * @xtime_sec: Current CLOCK_REALTIME time in seconds
45 * @wall_to_monotonic: CLOCK_REALTIME to CLOCK_MONOTONIC offset
46 * @offs_real: Offset clock monotonic -> clock realtime
47 * @offs_boot: Offset clock monotonic -> clock boottime
48 * @offs_tai: Offset clock monotonic -> clock tai
49 * @tai_offset: The current UTC to TAI offset in seconds
50 * @base_raw: Monotonic raw base time in ktime_t format
51 * @raw_time: Monotonic raw base time in timespec64 format
52 * @cycle_interval: Number of clock cycles in one NTP interval
53 * @xtime_interval: Number of clock shifted nano seconds in one NTP
54 * interval.
55 * @xtime_remainder: Shifted nano seconds left over when rounding
56 * @cycle_interval
57 * @raw_interval: Raw nano seconds accumulated per NTP interval.
58 * @ntp_error: Difference between accumulated time and NTP time in ntp
59 * shifted nano seconds.
60 * @ntp_error_shift: Shift conversion between clock shifted nano seconds and
61 * ntp shifted nano seconds.
62 *
63 * Note: For timespec(64) based interfaces wall_to_monotonic is what
64 * we need to add to xtime (or xtime corrected for sub jiffie times)
65 * to get to monotonic time. Monotonic is pegged at zero at system
66 * boot time, so wall_to_monotonic will be negative, however, we will
67 * ALWAYS keep the tv_nsec part positive so we can use the usual
68 * normalization.
69 *
70 * wall_to_monotonic is moved after resume from suspend for the
71 * monotonic time not to jump. We need to add total_sleep_time to
72 * wall_to_monotonic to get the real boot based time offset.
73 *
74 * wall_to_monotonic is no longer the boot time, getboottime must be
75 * used instead.
76 */
77struct timekeeper {
78 struct tk_read_base tkr;
79 u64 xtime_sec;
80 struct timespec64 wall_to_monotonic;
81 ktime_t offs_real;
82 ktime_t offs_boot;
83 ktime_t offs_tai;
84 s32 tai_offset;
85 ktime_t base_raw;
86 struct timespec64 raw_time;
87
88 /* The following members are for timekeeping internal use */
22 cycle_t cycle_interval; 89 cycle_t cycle_interval;
23 /* Last cycle value (also stored in clock->cycle_last) */
24 cycle_t cycle_last;
25 /* Number of clock shifted nano seconds in one NTP interval. */
26 u64 xtime_interval; 90 u64 xtime_interval;
27 /* shifted nano seconds left over when rounding cycle_interval */
28 s64 xtime_remainder; 91 s64 xtime_remainder;
29 /* Raw nano seconds accumulated per NTP interval. */
30 u32 raw_interval; 92 u32 raw_interval;
31 93 /* The ntp_tick_length() value currently being used.
32 /* Current CLOCK_REALTIME time in seconds */ 94 * This cached copy ensures we consistently apply the tick
33 u64 xtime_sec; 95 * length for an entire tick, as ntp_tick_length may change
34 /* Clock shifted nano seconds */ 96 * mid-tick, and we don't want to apply that new value to
35 u64 xtime_nsec; 97 * the tick in progress.
36 98 */
99 u64 ntp_tick;
37 /* Difference between accumulated time and NTP time in ntp 100 /* Difference between accumulated time and NTP time in ntp
38 * shifted nano seconds. */ 101 * shifted nano seconds. */
39 s64 ntp_error; 102 s64 ntp_error;
40 /* Shift conversion between clock shifted nano seconds and
41 * ntp shifted nano seconds. */
42 u32 ntp_error_shift; 103 u32 ntp_error_shift;
43 104 u32 ntp_err_mult;
44 /*
45 * wall_to_monotonic is what we need to add to xtime (or xtime corrected
46 * for sub jiffie times) to get to monotonic time. Monotonic is pegged
47 * at zero at system boot time, so wall_to_monotonic will be negative,
48 * however, we will ALWAYS keep the tv_nsec part positive so we can use
49 * the usual normalization.
50 *
51 * wall_to_monotonic is moved after resume from suspend for the
52 * monotonic time not to jump. We need to add total_sleep_time to
53 * wall_to_monotonic to get the real boot based time offset.
54 *
55 * - wall_to_monotonic is no longer the boot time, getboottime must be
56 * used instead.
57 */
58 struct timespec wall_to_monotonic;
59 /* Offset clock monotonic -> clock realtime */
60 ktime_t offs_real;
61 /* time spent in suspend */
62 struct timespec total_sleep_time;
63 /* Offset clock monotonic -> clock boottime */
64 ktime_t offs_boot;
65 /* The raw monotonic time for the CLOCK_MONOTONIC_RAW posix clock. */
66 struct timespec raw_time;
67 /* The current UTC to TAI offset in seconds */
68 s32 tai_offset;
69 /* Offset clock monotonic -> clock tai */
70 ktime_t offs_tai;
71
72}; 105};
73 106
74static inline struct timespec tk_xtime(struct timekeeper *tk)
75{
76 struct timespec ts;
77
78 ts.tv_sec = tk->xtime_sec;
79 ts.tv_nsec = (long)(tk->xtime_nsec >> tk->shift);
80 return ts;
81}
82
83
84#ifdef CONFIG_GENERIC_TIME_VSYSCALL 107#ifdef CONFIG_GENERIC_TIME_VSYSCALL
85 108
86extern void update_vsyscall(struct timekeeper *tk); 109extern void update_vsyscall(struct timekeeper *tk);
@@ -89,17 +112,10 @@ extern void update_vsyscall_tz(void);
89#elif defined(CONFIG_GENERIC_TIME_VSYSCALL_OLD) 112#elif defined(CONFIG_GENERIC_TIME_VSYSCALL_OLD)
90 113
91extern void update_vsyscall_old(struct timespec *ts, struct timespec *wtm, 114extern void update_vsyscall_old(struct timespec *ts, struct timespec *wtm,
92 struct clocksource *c, u32 mult); 115 struct clocksource *c, u32 mult,
116 cycle_t cycle_last);
93extern void update_vsyscall_tz(void); 117extern void update_vsyscall_tz(void);
94 118
95static inline void update_vsyscall(struct timekeeper *tk)
96{
97 struct timespec xt;
98
99 xt = tk_xtime(tk);
100 update_vsyscall_old(&xt, &tk->wall_to_monotonic, tk->clock, tk->mult);
101}
102
103#else 119#else
104 120
105static inline void update_vsyscall(struct timekeeper *tk) 121static inline void update_vsyscall(struct timekeeper *tk)
diff --git a/include/linux/timekeeping.h b/include/linux/timekeeping.h
new file mode 100644
index 000000000000..1caa6b04fdc5
--- /dev/null
+++ b/include/linux/timekeeping.h
@@ -0,0 +1,209 @@
1#ifndef _LINUX_TIMEKEEPING_H
2#define _LINUX_TIMEKEEPING_H
3
4/* Included from linux/ktime.h */
5
6void timekeeping_init(void);
7extern int timekeeping_suspended;
8
9/*
10 * Get and set timeofday
11 */
12extern void do_gettimeofday(struct timeval *tv);
13extern int do_settimeofday(const struct timespec *tv);
14extern int do_sys_settimeofday(const struct timespec *tv,
15 const struct timezone *tz);
16
17/*
18 * Kernel time accessors
19 */
20unsigned long get_seconds(void);
21struct timespec current_kernel_time(void);
22/* does not take xtime_lock */
23struct timespec __current_kernel_time(void);
24
25/*
26 * timespec based interfaces
27 */
28struct timespec get_monotonic_coarse(void);
29extern void getrawmonotonic(struct timespec *ts);
30extern void ktime_get_ts64(struct timespec64 *ts);
31
32extern int __getnstimeofday64(struct timespec64 *tv);
33extern void getnstimeofday64(struct timespec64 *tv);
34
35#if BITS_PER_LONG == 64
36static inline int __getnstimeofday(struct timespec *ts)
37{
38 return __getnstimeofday64(ts);
39}
40
41static inline void getnstimeofday(struct timespec *ts)
42{
43 getnstimeofday64(ts);
44}
45
46static inline void ktime_get_ts(struct timespec *ts)
47{
48 ktime_get_ts64(ts);
49}
50
51static inline void ktime_get_real_ts(struct timespec *ts)
52{
53 getnstimeofday64(ts);
54}
55
56#else
57static inline int __getnstimeofday(struct timespec *ts)
58{
59 struct timespec64 ts64;
60 int ret = __getnstimeofday64(&ts64);
61
62 *ts = timespec64_to_timespec(ts64);
63 return ret;
64}
65
66static inline void getnstimeofday(struct timespec *ts)
67{
68 struct timespec64 ts64;
69
70 getnstimeofday64(&ts64);
71 *ts = timespec64_to_timespec(ts64);
72}
73
74static inline void ktime_get_ts(struct timespec *ts)
75{
76 struct timespec64 ts64;
77
78 ktime_get_ts64(&ts64);
79 *ts = timespec64_to_timespec(ts64);
80}
81
82static inline void ktime_get_real_ts(struct timespec *ts)
83{
84 struct timespec64 ts64;
85
86 getnstimeofday64(&ts64);
87 *ts = timespec64_to_timespec(ts64);
88}
89#endif
90
91extern void getboottime(struct timespec *ts);
92
93#define do_posix_clock_monotonic_gettime(ts) ktime_get_ts(ts)
94#define ktime_get_real_ts64(ts) getnstimeofday64(ts)
95
96/*
97 * ktime_t based interfaces
98 */
99
100enum tk_offsets {
101 TK_OFFS_REAL,
102 TK_OFFS_BOOT,
103 TK_OFFS_TAI,
104 TK_OFFS_MAX,
105};
106
107extern ktime_t ktime_get(void);
108extern ktime_t ktime_get_with_offset(enum tk_offsets offs);
109extern ktime_t ktime_mono_to_any(ktime_t tmono, enum tk_offsets offs);
110extern ktime_t ktime_get_raw(void);
111
112/**
113 * ktime_get_real - get the real (wall-) time in ktime_t format
114 */
115static inline ktime_t ktime_get_real(void)
116{
117 return ktime_get_with_offset(TK_OFFS_REAL);
118}
119
120/**
121 * ktime_get_boottime - Returns monotonic time since boot in ktime_t format
122 *
123 * This is similar to CLOCK_MONTONIC/ktime_get, but also includes the
124 * time spent in suspend.
125 */
126static inline ktime_t ktime_get_boottime(void)
127{
128 return ktime_get_with_offset(TK_OFFS_BOOT);
129}
130
131/**
132 * ktime_get_clocktai - Returns the TAI time of day in ktime_t format
133 */
134static inline ktime_t ktime_get_clocktai(void)
135{
136 return ktime_get_with_offset(TK_OFFS_TAI);
137}
138
139/**
140 * ktime_mono_to_real - Convert monotonic time to clock realtime
141 */
142static inline ktime_t ktime_mono_to_real(ktime_t mono)
143{
144 return ktime_mono_to_any(mono, TK_OFFS_REAL);
145}
146
147static inline u64 ktime_get_ns(void)
148{
149 return ktime_to_ns(ktime_get());
150}
151
152static inline u64 ktime_get_real_ns(void)
153{
154 return ktime_to_ns(ktime_get_real());
155}
156
157static inline u64 ktime_get_boot_ns(void)
158{
159 return ktime_to_ns(ktime_get_boottime());
160}
161
162static inline u64 ktime_get_raw_ns(void)
163{
164 return ktime_to_ns(ktime_get_raw());
165}
166
167extern u64 ktime_get_mono_fast_ns(void);
168
169/*
170 * Timespec interfaces utilizing the ktime based ones
171 */
172static inline void get_monotonic_boottime(struct timespec *ts)
173{
174 *ts = ktime_to_timespec(ktime_get_boottime());
175}
176
177static inline void timekeeping_clocktai(struct timespec *ts)
178{
179 *ts = ktime_to_timespec(ktime_get_clocktai());
180}
181
182/*
183 * RTC specific
184 */
185extern void timekeeping_inject_sleeptime(struct timespec *delta);
186
187/*
188 * PPS accessor
189 */
190extern void getnstime_raw_and_real(struct timespec *ts_raw,
191 struct timespec *ts_real);
192
193/*
194 * Persistent clock related interfaces
195 */
196extern bool persistent_clock_exist;
197extern int persistent_clock_is_local;
198
199static inline bool has_persistent_clock(void)
200{
201 return persistent_clock_exist;
202}
203
204extern void read_persistent_clock(struct timespec *ts);
205extern void read_boot_clock(struct timespec *ts);
206extern int update_persistent_clock(struct timespec now);
207
208
209#endif
diff --git a/include/linux/timerfd.h b/include/linux/timerfd.h
index d3b57fa12225..bd36ce431e32 100644
--- a/include/linux/timerfd.h
+++ b/include/linux/timerfd.h
@@ -11,6 +11,9 @@
11/* For O_CLOEXEC and O_NONBLOCK */ 11/* For O_CLOEXEC and O_NONBLOCK */
12#include <linux/fcntl.h> 12#include <linux/fcntl.h>
13 13
14/* For _IO helpers */
15#include <linux/ioctl.h>
16
14/* 17/*
15 * CAREFUL: Check include/asm-generic/fcntl.h when defining 18 * CAREFUL: Check include/asm-generic/fcntl.h when defining
16 * new flags, since they might collide with O_* ones. We want 19 * new flags, since they might collide with O_* ones. We want
@@ -29,4 +32,6 @@
29/* Flags for timerfd_settime. */ 32/* Flags for timerfd_settime. */
30#define TFD_SETTIME_FLAGS (TFD_TIMER_ABSTIME | TFD_TIMER_CANCEL_ON_SET) 33#define TFD_SETTIME_FLAGS (TFD_TIMER_ABSTIME | TFD_TIMER_CANCEL_ON_SET)
31 34
35#define TFD_IOC_SET_TICKS _IOW('T', 0, u64)
36
32#endif /* _LINUX_TIMERFD_H */ 37#endif /* _LINUX_TIMERFD_H */