aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/time.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/time.c')
-rw-r--r--kernel/time.c40
1 files changed, 38 insertions, 2 deletions
diff --git a/kernel/time.c b/kernel/time.c
index 29511943871a..c6324d96009e 100644
--- a/kernel/time.c
+++ b/kernel/time.c
@@ -136,7 +136,6 @@ static inline void warp_clock(void)
136 write_seqlock_irq(&xtime_lock); 136 write_seqlock_irq(&xtime_lock);
137 wall_to_monotonic.tv_sec -= sys_tz.tz_minuteswest * 60; 137 wall_to_monotonic.tv_sec -= sys_tz.tz_minuteswest * 60;
138 xtime.tv_sec += sys_tz.tz_minuteswest * 60; 138 xtime.tv_sec += sys_tz.tz_minuteswest * 60;
139 update_xtime_cache(0);
140 write_sequnlock_irq(&xtime_lock); 139 write_sequnlock_irq(&xtime_lock);
141 clock_was_set(); 140 clock_was_set();
142} 141}
@@ -370,13 +369,20 @@ EXPORT_SYMBOL(mktime);
370 * 0 <= tv_nsec < NSEC_PER_SEC 369 * 0 <= tv_nsec < NSEC_PER_SEC
371 * For negative values only the tv_sec field is negative ! 370 * For negative values only the tv_sec field is negative !
372 */ 371 */
373void set_normalized_timespec(struct timespec *ts, time_t sec, long nsec) 372void set_normalized_timespec(struct timespec *ts, time_t sec, s64 nsec)
374{ 373{
375 while (nsec >= NSEC_PER_SEC) { 374 while (nsec >= NSEC_PER_SEC) {
375 /*
376 * The following asm() prevents the compiler from
377 * optimising this loop into a modulo operation. See
378 * also __iter_div_u64_rem() in include/linux/time.h
379 */
380 asm("" : "+rm"(nsec));
376 nsec -= NSEC_PER_SEC; 381 nsec -= NSEC_PER_SEC;
377 ++sec; 382 ++sec;
378 } 383 }
379 while (nsec < 0) { 384 while (nsec < 0) {
385 asm("" : "+rm"(nsec));
380 nsec += NSEC_PER_SEC; 386 nsec += NSEC_PER_SEC;
381 --sec; 387 --sec;
382 } 388 }
@@ -655,6 +661,36 @@ u64 nsec_to_clock_t(u64 x)
655#endif 661#endif
656} 662}
657 663
664/**
665 * nsecs_to_jiffies - Convert nsecs in u64 to jiffies
666 *
667 * @n: nsecs in u64
668 *
669 * Unlike {m,u}secs_to_jiffies, type of input is not unsigned int but u64.
670 * And this doesn't return MAX_JIFFY_OFFSET since this function is designed
671 * for scheduler, not for use in device drivers to calculate timeout value.
672 *
673 * note:
674 * NSEC_PER_SEC = 10^9 = (5^9 * 2^9) = (1953125 * 512)
675 * ULLONG_MAX ns = 18446744073.709551615 secs = about 584 years
676 */
677unsigned long nsecs_to_jiffies(u64 n)
678{
679#if (NSEC_PER_SEC % HZ) == 0
680 /* Common case, HZ = 100, 128, 200, 250, 256, 500, 512, 1000 etc. */
681 return div_u64(n, NSEC_PER_SEC / HZ);
682#elif (HZ % 512) == 0
683 /* overflow after 292 years if HZ = 1024 */
684 return div_u64(n * HZ / 512, NSEC_PER_SEC / 512);
685#else
686 /*
687 * Generic case - optimized for cases where HZ is a multiple of 3.
688 * overflow after 64.99 years, exact for HZ = 60, 72, 90, 120 etc.
689 */
690 return div_u64(n * 9, (9ull * NSEC_PER_SEC + HZ / 2) / HZ);
691#endif
692}
693
658#if (BITS_PER_LONG < 64) 694#if (BITS_PER_LONG < 64)
659u64 get_jiffies_64(void) 695u64 get_jiffies_64(void)
660{ 696{