aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/arm/Kconfig1
-rw-r--r--arch/hexagon/Kconfig1
-rw-r--r--arch/s390/Kconfig1
-rw-r--r--arch/x86/Kconfig1
-rw-r--r--include/linux/ktime.h173
-rw-r--r--include/linux/time.h11
-rw-r--r--kernel/time/Kconfig4
-rw-r--r--kernel/time/hrtimer.c54
-rw-r--r--kernel/time/timekeeping.c7
9 files changed, 7 insertions, 246 deletions
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 05a71511ab3c..b9f6728331c8 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -64,7 +64,6 @@ config ARM
64 select HAVE_UID16 64 select HAVE_UID16
65 select HAVE_VIRT_CPU_ACCOUNTING_GEN 65 select HAVE_VIRT_CPU_ACCOUNTING_GEN
66 select IRQ_FORCED_THREADING 66 select IRQ_FORCED_THREADING
67 select KTIME_SCALAR
68 select MODULES_USE_ELF_REL 67 select MODULES_USE_ELF_REL
69 select NO_BOOTMEM 68 select NO_BOOTMEM
70 select OLD_SIGACTION 69 select OLD_SIGACTION
diff --git a/arch/hexagon/Kconfig b/arch/hexagon/Kconfig
index 0fd6138f6203..4dc89d1f9c48 100644
--- a/arch/hexagon/Kconfig
+++ b/arch/hexagon/Kconfig
@@ -23,7 +23,6 @@ config HEXAGON
23 select GENERIC_IOMAP 23 select GENERIC_IOMAP
24 select GENERIC_SMP_IDLE_THREAD 24 select GENERIC_SMP_IDLE_THREAD
25 select STACKTRACE_SUPPORT 25 select STACKTRACE_SUPPORT
26 select KTIME_SCALAR
27 select GENERIC_CLOCKEVENTS 26 select GENERIC_CLOCKEVENTS
28 select GENERIC_CLOCKEVENTS_BROADCAST 27 select GENERIC_CLOCKEVENTS_BROADCAST
29 select MODULES_USE_ELF_RELA 28 select MODULES_USE_ELF_RELA
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index bb63499fc5d3..1afc7a686702 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -137,7 +137,6 @@ config S390
137 select HAVE_SYSCALL_TRACEPOINTS 137 select HAVE_SYSCALL_TRACEPOINTS
138 select HAVE_UID16 if 32BIT 138 select HAVE_UID16 if 32BIT
139 select HAVE_VIRT_CPU_ACCOUNTING 139 select HAVE_VIRT_CPU_ACCOUNTING
140 select KTIME_SCALAR if 32BIT
141 select MODULES_USE_ELF_RELA 140 select MODULES_USE_ELF_RELA
142 select NO_BOOTMEM 141 select NO_BOOTMEM
143 select OLD_SIGACTION 142 select OLD_SIGACTION
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index a8f749ef0fdc..7fa17b5ce668 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -111,7 +111,6 @@ config X86
111 select ARCH_CLOCKSOURCE_DATA 111 select ARCH_CLOCKSOURCE_DATA
112 select GENERIC_CLOCKEVENTS_BROADCAST if X86_64 || (X86_32 && X86_LOCAL_APIC) 112 select GENERIC_CLOCKEVENTS_BROADCAST if X86_64 || (X86_32 && X86_LOCAL_APIC)
113 select GENERIC_TIME_VSYSCALL 113 select GENERIC_TIME_VSYSCALL
114 select KTIME_SCALAR if X86_32
115 select GENERIC_STRNCPY_FROM_USER 114 select GENERIC_STRNCPY_FROM_USER
116 select GENERIC_STRNLEN_USER 115 select GENERIC_STRNLEN_USER
117 select HAVE_CONTEXT_TRACKING if X86_64 116 select HAVE_CONTEXT_TRACKING if X86_64
diff --git a/include/linux/ktime.h b/include/linux/ktime.h
index de9e46e6bcc9..fbc64f8481b7 100644
--- a/include/linux/ktime.h
+++ b/include/linux/ktime.h
@@ -27,43 +27,19 @@
27/* 27/*
28 * ktime_t: 28 * ktime_t:
29 * 29 *
30 * On 64-bit CPUs a single 64-bit variable is used to store the hrtimers 30 * A single 64-bit variable is used to store the hrtimers
31 * internal representation of time values in scalar nanoseconds. The 31 * internal representation of time values in scalar nanoseconds. The
32 * design plays out best on 64-bit CPUs, where most conversions are 32 * design plays out best on 64-bit CPUs, where most conversions are
33 * NOPs and most arithmetic ktime_t operations are plain arithmetic 33 * NOPs and most arithmetic ktime_t operations are plain arithmetic
34 * operations. 34 * operations.
35 * 35 *
36 * On 32-bit CPUs an optimized representation of the timespec structure
37 * is used to avoid expensive conversions from and to timespecs. The
38 * endian-aware order of the tv struct members is chosen to allow
39 * mathematical operations on the tv64 member of the union too, which
40 * for certain operations produces better code.
41 *
42 * For architectures with efficient support for 64/32-bit conversions the
43 * plain scalar nanosecond based representation can be selected by the
44 * config switch CONFIG_KTIME_SCALAR.
45 */ 36 */
46union ktime { 37union ktime {
47 s64 tv64; 38 s64 tv64;
48#if BITS_PER_LONG != 64 && !defined(CONFIG_KTIME_SCALAR)
49 struct {
50# ifdef __BIG_ENDIAN
51 s32 sec, nsec;
52# else
53 s32 nsec, sec;
54# endif
55 } tv;
56#endif
57}; 39};
58 40
59typedef union ktime ktime_t; /* Kill this */ 41typedef union ktime ktime_t; /* Kill this */
60 42
61/*
62 * ktime_t definitions when using the 64-bit scalar representation:
63 */
64
65#if (BITS_PER_LONG == 64) || defined(CONFIG_KTIME_SCALAR)
66
67/** 43/**
68 * ktime_set - Set a ktime_t variable from a seconds/nanoseconds value 44 * ktime_set - Set a ktime_t variable from a seconds/nanoseconds value
69 * @secs: seconds to set 45 * @secs: seconds to set
@@ -123,153 +99,6 @@ static inline ktime_t timeval_to_ktime(struct timeval tv)
123/* Convert ktime_t to nanoseconds - NOP in the scalar storage format: */ 99/* Convert ktime_t to nanoseconds - NOP in the scalar storage format: */
124#define ktime_to_ns(kt) ((kt).tv64) 100#define ktime_to_ns(kt) ((kt).tv64)
125 101
126#else /* !((BITS_PER_LONG == 64) || defined(CONFIG_KTIME_SCALAR)) */
127
128/*
129 * Helper macros/inlines to get the ktime_t math right in the timespec
130 * representation. The macros are sometimes ugly - their actual use is
131 * pretty okay-ish, given the circumstances. We do all this for
132 * performance reasons. The pure scalar nsec_t based code was nice and
133 * simple, but created too many 64-bit / 32-bit conversions and divisions.
134 *
135 * Be especially aware that negative values are represented in a way
136 * that the tv.sec field is negative and the tv.nsec field is greater
137 * or equal to zero but less than nanoseconds per second. This is the
138 * same representation which is used by timespecs.
139 *
140 * tv.sec < 0 and 0 >= tv.nsec < NSEC_PER_SEC
141 */
142
143/* Set a ktime_t variable to a value in sec/nsec representation: */
144static inline ktime_t ktime_set(const long secs, const unsigned long nsecs)
145{
146 return (ktime_t) { .tv = { .sec = secs, .nsec = nsecs } };
147}
148
149/**
150 * ktime_sub - subtract two ktime_t variables
151 * @lhs: minuend
152 * @rhs: subtrahend
153 *
154 * Return: The remainder of the subtraction.
155 */
156static inline ktime_t ktime_sub(const ktime_t lhs, const ktime_t rhs)
157{
158 ktime_t res;
159
160 res.tv64 = lhs.tv64 - rhs.tv64;
161 if (res.tv.nsec < 0)
162 res.tv.nsec += NSEC_PER_SEC;
163
164 return res;
165}
166
167/**
168 * ktime_add - add two ktime_t variables
169 * @add1: addend1
170 * @add2: addend2
171 *
172 * Return: The sum of @add1 and @add2.
173 */
174static inline ktime_t ktime_add(const ktime_t add1, const ktime_t add2)
175{
176 ktime_t res;
177
178 res.tv64 = add1.tv64 + add2.tv64;
179 /*
180 * performance trick: the (u32) -NSEC gives 0x00000000Fxxxxxxx
181 * so we subtract NSEC_PER_SEC and add 1 to the upper 32 bit.
182 *
183 * it's equivalent to:
184 * tv.nsec -= NSEC_PER_SEC
185 * tv.sec ++;
186 */
187 if (res.tv.nsec >= NSEC_PER_SEC)
188 res.tv64 += (u32)-NSEC_PER_SEC;
189
190 return res;
191}
192
193/**
194 * ktime_add_ns - Add a scalar nanoseconds value to a ktime_t variable
195 * @kt: addend
196 * @nsec: the scalar nsec value to add
197 *
198 * Return: The sum of @kt and @nsec in ktime_t format.
199 */
200extern ktime_t ktime_add_ns(const ktime_t kt, u64 nsec);
201
202/**
203 * ktime_sub_ns - Subtract a scalar nanoseconds value from a ktime_t variable
204 * @kt: minuend
205 * @nsec: the scalar nsec value to subtract
206 *
207 * Return: The subtraction of @nsec from @kt in ktime_t format.
208 */
209extern ktime_t ktime_sub_ns(const ktime_t kt, u64 nsec);
210
211/**
212 * timespec_to_ktime - convert a timespec to ktime_t format
213 * @ts: the timespec variable to convert
214 *
215 * Return: A ktime_t variable with the converted timespec value.
216 */
217static inline ktime_t timespec_to_ktime(const struct timespec ts)
218{
219 return (ktime_t) { .tv = { .sec = (s32)ts.tv_sec,
220 .nsec = (s32)ts.tv_nsec } };
221}
222
223/**
224 * timeval_to_ktime - convert a timeval to ktime_t format
225 * @tv: the timeval variable to convert
226 *
227 * Return: A ktime_t variable with the converted timeval value.
228 */
229static inline ktime_t timeval_to_ktime(const struct timeval tv)
230{
231 return (ktime_t) { .tv = { .sec = (s32)tv.tv_sec,
232 .nsec = (s32)(tv.tv_usec *
233 NSEC_PER_USEC) } };
234}
235
236/**
237 * ktime_to_timespec - convert a ktime_t variable to timespec format
238 * @kt: the ktime_t variable to convert
239 *
240 * Return: The timespec representation of the ktime value.
241 */
242static inline struct timespec ktime_to_timespec(const ktime_t kt)
243{
244 return (struct timespec) { .tv_sec = (time_t) kt.tv.sec,
245 .tv_nsec = (long) kt.tv.nsec };
246}
247
248/**
249 * ktime_to_timeval - convert a ktime_t variable to timeval format
250 * @kt: the ktime_t variable to convert
251 *
252 * Return: The timeval representation of the ktime value.
253 */
254static inline struct timeval ktime_to_timeval(const ktime_t kt)
255{
256 return (struct timeval) {
257 .tv_sec = (time_t) kt.tv.sec,
258 .tv_usec = (suseconds_t) (kt.tv.nsec / NSEC_PER_USEC) };
259}
260
261/**
262 * ktime_to_ns - convert a ktime_t variable to scalar nanoseconds
263 * @kt: the ktime_t variable to convert
264 *
265 * Return: The scalar nanoseconds representation of @kt.
266 */
267static inline s64 ktime_to_ns(const ktime_t kt)
268{
269 return (s64) kt.tv.sec * NSEC_PER_SEC + kt.tv.nsec;
270}
271
272#endif /* !((BITS_PER_LONG == 64) || defined(CONFIG_KTIME_SCALAR)) */
273 102
274/** 103/**
275 * ktime_equal - Compares two ktime_t variables to see if they are equal 104 * ktime_equal - Compares two ktime_t variables to see if they are equal
diff --git a/include/linux/time.h b/include/linux/time.h
index f6d990d1c79a..129f0bd36a8d 100644
--- a/include/linux/time.h
+++ b/include/linux/time.h
@@ -19,6 +19,10 @@ extern struct timezone sys_tz;
19 19
20#define TIME_T_MAX (time_t)((1UL << ((sizeof(time_t) << 3) - 1)) - 1) 20#define TIME_T_MAX (time_t)((1UL << ((sizeof(time_t) << 3) - 1)) - 1)
21 21
22/* Located here for timespec_valid_strict */
23#define KTIME_MAX ((s64)~((u64)1 << 63))
24#define KTIME_SEC_MAX (KTIME_MAX / NSEC_PER_SEC)
25
22static inline int timespec_equal(const struct timespec *a, 26static inline int timespec_equal(const struct timespec *a,
23 const struct timespec *b) 27 const struct timespec *b)
24{ 28{
@@ -84,13 +88,6 @@ static inline struct timespec timespec_sub(struct timespec lhs,
84 return ts_delta; 88 return ts_delta;
85} 89}
86 90
87#define KTIME_MAX ((s64)~((u64)1 << 63))
88#if (BITS_PER_LONG == 64)
89# define KTIME_SEC_MAX (KTIME_MAX / NSEC_PER_SEC)
90#else
91# define KTIME_SEC_MAX LONG_MAX
92#endif
93
94/* 91/*
95 * Returns true if the timespec is norm, false if denorm: 92 * Returns true if the timespec is norm, false if denorm:
96 */ 93 */
diff --git a/kernel/time/Kconfig b/kernel/time/Kconfig
index f448513a45ed..feccfd888732 100644
--- a/kernel/time/Kconfig
+++ b/kernel/time/Kconfig
@@ -20,10 +20,6 @@ config GENERIC_TIME_VSYSCALL
20config GENERIC_TIME_VSYSCALL_OLD 20config GENERIC_TIME_VSYSCALL_OLD
21 bool 21 bool
22 22
23# ktime_t scalar 64bit nsec representation
24config KTIME_SCALAR
25 bool
26
27# Old style timekeeping 23# Old style timekeeping
28config ARCH_USES_GETTIMEOFFSET 24config ARCH_USES_GETTIMEOFFSET
29 bool 25 bool
diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c
index 2f4ef8a1e5ff..19f211051c35 100644
--- a/kernel/time/hrtimer.c
+++ b/kernel/time/hrtimer.c
@@ -261,60 +261,6 @@ lock_hrtimer_base(const struct hrtimer *timer, unsigned long *flags)
261 * too large for inlining: 261 * too large for inlining:
262 */ 262 */
263#if BITS_PER_LONG < 64 263#if BITS_PER_LONG < 64
264# ifndef CONFIG_KTIME_SCALAR
265/**
266 * ktime_add_ns - Add a scalar nanoseconds value to a ktime_t variable
267 * @kt: addend
268 * @nsec: the scalar nsec value to add
269 *
270 * Returns the sum of kt and nsec in ktime_t format
271 */
272ktime_t ktime_add_ns(const ktime_t kt, u64 nsec)
273{
274 ktime_t tmp;
275
276 if (likely(nsec < NSEC_PER_SEC)) {
277 tmp.tv64 = nsec;
278 } else {
279 unsigned long rem = do_div(nsec, NSEC_PER_SEC);
280
281 /* Make sure nsec fits into long */
282 if (unlikely(nsec > KTIME_SEC_MAX))
283 return (ktime_t){ .tv64 = KTIME_MAX };
284
285 tmp = ktime_set((long)nsec, rem);
286 }
287
288 return ktime_add(kt, tmp);
289}
290
291EXPORT_SYMBOL_GPL(ktime_add_ns);
292
293/**
294 * ktime_sub_ns - Subtract a scalar nanoseconds value from a ktime_t variable
295 * @kt: minuend
296 * @nsec: the scalar nsec value to subtract
297 *
298 * Returns the subtraction of @nsec from @kt in ktime_t format
299 */
300ktime_t ktime_sub_ns(const ktime_t kt, u64 nsec)
301{
302 ktime_t tmp;
303
304 if (likely(nsec < NSEC_PER_SEC)) {
305 tmp.tv64 = nsec;
306 } else {
307 unsigned long rem = do_div(nsec, NSEC_PER_SEC);
308
309 tmp = ktime_set((long)nsec, rem);
310 }
311
312 return ktime_sub(kt, tmp);
313}
314
315EXPORT_SYMBOL_GPL(ktime_sub_ns);
316# endif /* !CONFIG_KTIME_SCALAR */
317
318/* 264/*
319 * Divide a ktime value by a nanosecond value 265 * Divide a ktime value by a nanosecond value
320 */ 266 */
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
index b94fa3652aaa..cafef242d8f9 100644
--- a/kernel/time/timekeeping.c
+++ b/kernel/time/timekeeping.c
@@ -344,11 +344,8 @@ ktime_t ktime_get(void)
344 nsecs = timekeeping_get_ns(tk) + tk->wall_to_monotonic.tv_nsec; 344 nsecs = timekeeping_get_ns(tk) + tk->wall_to_monotonic.tv_nsec;
345 345
346 } while (read_seqcount_retry(&timekeeper_seq, seq)); 346 } while (read_seqcount_retry(&timekeeper_seq, seq));
347 /* 347
348 * Use ktime_set/ktime_add_ns to create a proper ktime on 348 return ktime_set(secs, nsecs);
349 * 32-bit architectures without CONFIG_KTIME_SCALAR.
350 */
351 return ktime_add_ns(ktime_set(secs, 0), nsecs);
352} 349}
353EXPORT_SYMBOL_GPL(ktime_get); 350EXPORT_SYMBOL_GPL(ktime_get);
354 351