aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/ktime.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/ktime.h')
-rw-r--r--include/linux/ktime.h68
1 files changed, 23 insertions, 45 deletions
diff --git a/include/linux/ktime.h b/include/linux/ktime.h
index 0fb7ffb1775f..8e573deda55e 100644
--- a/include/linux/ktime.h
+++ b/include/linux/ktime.h
@@ -24,21 +24,8 @@
24#include <linux/time.h> 24#include <linux/time.h>
25#include <linux/jiffies.h> 25#include <linux/jiffies.h>
26 26
27/* 27/* Nanosecond scalar representation for kernel time values */
28 * ktime_t: 28typedef s64 ktime_t;
29 *
30 * A single 64-bit variable is used to store the hrtimers
31 * internal representation of time values in scalar nanoseconds. The
32 * design plays out best on 64-bit CPUs, where most conversions are
33 * NOPs and most arithmetic ktime_t operations are plain arithmetic
34 * operations.
35 *
36 */
37union ktime {
38 s64 tv64;
39};
40
41typedef union ktime ktime_t; /* Kill this */
42 29
43/** 30/**
44 * ktime_set - Set a ktime_t variable from a seconds/nanoseconds value 31 * ktime_set - Set a ktime_t variable from a seconds/nanoseconds value
@@ -50,39 +37,34 @@ typedef union ktime ktime_t; /* Kill this */
50static inline ktime_t ktime_set(const s64 secs, const unsigned long nsecs) 37static inline ktime_t ktime_set(const s64 secs, const unsigned long nsecs)
51{ 38{
52 if (unlikely(secs >= KTIME_SEC_MAX)) 39 if (unlikely(secs >= KTIME_SEC_MAX))
53 return (ktime_t){ .tv64 = KTIME_MAX }; 40 return KTIME_MAX;
54 41
55 return (ktime_t) { .tv64 = secs * NSEC_PER_SEC + (s64)nsecs }; 42 return secs * NSEC_PER_SEC + (s64)nsecs;
56} 43}
57 44
58/* Subtract two ktime_t variables. rem = lhs -rhs: */ 45/* Subtract two ktime_t variables. rem = lhs -rhs: */
59#define ktime_sub(lhs, rhs) \ 46#define ktime_sub(lhs, rhs) ((lhs) - (rhs))
60 ({ (ktime_t){ .tv64 = (lhs).tv64 - (rhs).tv64 }; })
61 47
62/* Add two ktime_t variables. res = lhs + rhs: */ 48/* Add two ktime_t variables. res = lhs + rhs: */
63#define ktime_add(lhs, rhs) \ 49#define ktime_add(lhs, rhs) ((lhs) + (rhs))
64 ({ (ktime_t){ .tv64 = (lhs).tv64 + (rhs).tv64 }; })
65 50
66/* 51/*
67 * Same as ktime_add(), but avoids undefined behaviour on overflow; however, 52 * Same as ktime_add(), but avoids undefined behaviour on overflow; however,
68 * this means that you must check the result for overflow yourself. 53 * this means that you must check the result for overflow yourself.
69 */ 54 */
70#define ktime_add_unsafe(lhs, rhs) \ 55#define ktime_add_unsafe(lhs, rhs) ((u64) (lhs) + (rhs))
71 ({ (ktime_t){ .tv64 = (u64) (lhs).tv64 + (rhs).tv64 }; })
72 56
73/* 57/*
74 * Add a ktime_t variable and a scalar nanosecond value. 58 * Add a ktime_t variable and a scalar nanosecond value.
75 * res = kt + nsval: 59 * res = kt + nsval:
76 */ 60 */
77#define ktime_add_ns(kt, nsval) \ 61#define ktime_add_ns(kt, nsval) ((kt) + (nsval))
78 ({ (ktime_t){ .tv64 = (kt).tv64 + (nsval) }; })
79 62
80/* 63/*
81 * Subtract a scalar nanosecod from a ktime_t variable 64 * Subtract a scalar nanosecod from a ktime_t variable
82 * res = kt - nsval: 65 * res = kt - nsval:
83 */ 66 */
84#define ktime_sub_ns(kt, nsval) \ 67#define ktime_sub_ns(kt, nsval) ((kt) - (nsval))
85 ({ (ktime_t){ .tv64 = (kt).tv64 - (nsval) }; })
86 68
87/* convert a timespec to ktime_t format: */ 69/* convert a timespec to ktime_t format: */
88static inline ktime_t timespec_to_ktime(struct timespec ts) 70static inline ktime_t timespec_to_ktime(struct timespec ts)
@@ -103,16 +85,16 @@ static inline ktime_t timeval_to_ktime(struct timeval tv)
103} 85}
104 86
105/* Map the ktime_t to timespec conversion to ns_to_timespec function */ 87/* Map the ktime_t to timespec conversion to ns_to_timespec function */
106#define ktime_to_timespec(kt) ns_to_timespec((kt).tv64) 88#define ktime_to_timespec(kt) ns_to_timespec((kt))
107 89
108/* Map the ktime_t to timespec conversion to ns_to_timespec function */ 90/* Map the ktime_t to timespec conversion to ns_to_timespec function */
109#define ktime_to_timespec64(kt) ns_to_timespec64((kt).tv64) 91#define ktime_to_timespec64(kt) ns_to_timespec64((kt))
110 92
111/* Map the ktime_t to timeval conversion to ns_to_timeval function */ 93/* Map the ktime_t to timeval conversion to ns_to_timeval function */
112#define ktime_to_timeval(kt) ns_to_timeval((kt).tv64) 94#define ktime_to_timeval(kt) ns_to_timeval((kt))
113 95
114/* Convert ktime_t to nanoseconds - NOP in the scalar storage format: */ 96/* Convert ktime_t to nanoseconds - NOP in the scalar storage format: */
115#define ktime_to_ns(kt) ((kt).tv64) 97#define ktime_to_ns(kt) (kt)
116 98
117 99
118/** 100/**
@@ -126,7 +108,7 @@ static inline ktime_t timeval_to_ktime(struct timeval tv)
126 */ 108 */
127static inline int ktime_equal(const ktime_t cmp1, const ktime_t cmp2) 109static inline int ktime_equal(const ktime_t cmp1, const ktime_t cmp2)
128{ 110{
129 return cmp1.tv64 == cmp2.tv64; 111 return cmp1 == cmp2;
130} 112}
131 113
132/** 114/**
@@ -141,9 +123,9 @@ static inline int ktime_equal(const ktime_t cmp1, const ktime_t cmp2)
141 */ 123 */
142static inline int ktime_compare(const ktime_t cmp1, const ktime_t cmp2) 124static inline int ktime_compare(const ktime_t cmp1, const ktime_t cmp2)
143{ 125{
144 if (cmp1.tv64 < cmp2.tv64) 126 if (cmp1 < cmp2)
145 return -1; 127 return -1;
146 if (cmp1.tv64 > cmp2.tv64) 128 if (cmp1 > cmp2)
147 return 1; 129 return 1;
148 return 0; 130 return 0;
149} 131}
@@ -182,7 +164,7 @@ static inline s64 ktime_divns(const ktime_t kt, s64 div)
182 */ 164 */
183 BUG_ON(div < 0); 165 BUG_ON(div < 0);
184 if (__builtin_constant_p(div) && !(div >> 32)) { 166 if (__builtin_constant_p(div) && !(div >> 32)) {
185 s64 ns = kt.tv64; 167 s64 ns = kt;
186 u64 tmp = ns < 0 ? -ns : ns; 168 u64 tmp = ns < 0 ? -ns : ns;
187 169
188 do_div(tmp, div); 170 do_div(tmp, div);
@@ -199,7 +181,7 @@ static inline s64 ktime_divns(const ktime_t kt, s64 div)
199 * so catch them on 64bit as well. 181 * so catch them on 64bit as well.
200 */ 182 */
201 WARN_ON(div < 0); 183 WARN_ON(div < 0);
202 return kt.tv64 / div; 184 return kt / div;
203} 185}
204#endif 186#endif
205 187
@@ -256,7 +238,7 @@ extern ktime_t ktime_add_safe(const ktime_t lhs, const ktime_t rhs);
256static inline __must_check bool ktime_to_timespec_cond(const ktime_t kt, 238static inline __must_check bool ktime_to_timespec_cond(const ktime_t kt,
257 struct timespec *ts) 239 struct timespec *ts)
258{ 240{
259 if (kt.tv64) { 241 if (kt) {
260 *ts = ktime_to_timespec(kt); 242 *ts = ktime_to_timespec(kt);
261 return true; 243 return true;
262 } else { 244 } else {
@@ -275,7 +257,7 @@ static inline __must_check bool ktime_to_timespec_cond(const ktime_t kt,
275static inline __must_check bool ktime_to_timespec64_cond(const ktime_t kt, 257static inline __must_check bool ktime_to_timespec64_cond(const ktime_t kt,
276 struct timespec64 *ts) 258 struct timespec64 *ts)
277{ 259{
278 if (kt.tv64) { 260 if (kt) {
279 *ts = ktime_to_timespec64(kt); 261 *ts = ktime_to_timespec64(kt);
280 return true; 262 return true;
281 } else { 263 } else {
@@ -290,20 +272,16 @@ static inline __must_check bool ktime_to_timespec64_cond(const ktime_t kt,
290 * this resolution values. 272 * this resolution values.
291 */ 273 */
292#define LOW_RES_NSEC TICK_NSEC 274#define LOW_RES_NSEC TICK_NSEC
293#define KTIME_LOW_RES (ktime_t){ .tv64 = LOW_RES_NSEC } 275#define KTIME_LOW_RES (LOW_RES_NSEC)
294 276
295static inline ktime_t ns_to_ktime(u64 ns) 277static inline ktime_t ns_to_ktime(u64 ns)
296{ 278{
297 static const ktime_t ktime_zero = { .tv64 = 0 }; 279 return ns;
298
299 return ktime_add_ns(ktime_zero, ns);
300} 280}
301 281
302static inline ktime_t ms_to_ktime(u64 ms) 282static inline ktime_t ms_to_ktime(u64 ms)
303{ 283{
304 static const ktime_t ktime_zero = { .tv64 = 0 }; 284 return ms * NSEC_PER_MSEC;
305
306 return ktime_add_ms(ktime_zero, ms);
307} 285}
308 286
309# include <linux/timekeeping.h> 287# include <linux/timekeeping.h>