diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2014-08-05 20:46:42 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2014-08-05 20:46:42 -0400 |
commit | e7fda6c4c3c1a7d6996dd75fd84670fa0b5d448f (patch) | |
tree | daa51c16462c318b890acf7f01fba5827275dd74 /kernel | |
parent | 08d69a25714429850cf9ef71f22d8cdc9189d93f (diff) | |
parent | 953dec21aed4038464fec02f96a2f1b8701a5bce (diff) |
Merge branch 'timers-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull timer and time updates from Thomas Gleixner:
"A rather large update of timers, timekeeping & co
- Core timekeeping code is year-2038 safe now for 32bit machines.
Now we just need to fix all in kernel users and the gazillion of
user space interfaces which rely on timespec/timeval :)
- Better cache layout for the timekeeping internal data structures.
- Proper nanosecond based interfaces for in kernel users.
- Tree wide cleanup of code which wants nanoseconds but does hoops
and loops to convert back and forth from timespecs. Some of it
definitely belongs into the ugly code museum.
- Consolidation of the timekeeping interface zoo.
- A fast NMI safe accessor to clock monotonic for tracing. This is a
long standing request to support correlated user/kernel space
traces. With proper NTP frequency correction it's also suitable
for correlation of traces accross separate machines.
- Checkpoint/restart support for timerfd.
- A few NOHZ[_FULL] improvements in the [hr]timer code.
- Code move from kernel to kernel/time of all time* related code.
- New clocksource/event drivers from the ARM universe. I'm really
impressed that despite an architected timer in the newer chips SoC
manufacturers insist on inventing new and differently broken SoC
specific timers.
[ Ed. "Impressed"? I don't think that word means what you think it means ]
- Another round of code move from arch to drivers. Looks like most
of the legacy mess in ARM regarding timers is sorted out except for
a few obnoxious strongholds.
- The usual updates and fixlets all over the place"
* 'timers-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (114 commits)
timekeeping: Fixup typo in update_vsyscall_old definition
clocksource: document some basic timekeeping concepts
timekeeping: Use cached ntp_tick_length when accumulating error
timekeeping: Rework frequency adjustments to work better w/ nohz
timekeeping: Minor fixup for timespec64->timespec assignment
ftrace: Provide trace clocks monotonic
timekeeping: Provide fast and NMI safe access to CLOCK_MONOTONIC
seqcount: Add raw_write_seqcount_latch()
seqcount: Provide raw_read_seqcount()
timekeeping: Use tk_read_base as argument for timekeeping_get_ns()
timekeeping: Create struct tk_read_base and use it in struct timekeeper
timekeeping: Restructure the timekeeper some more
clocksource: Get rid of cycle_last
clocksource: Move cycle_last validation to core code
clocksource: Make delta calculation a function
wireless: ath9k: Get rid of timespec conversions
drm: vmwgfx: Use nsec based interfaces
drm: i915: Use nsec based interfaces
timekeeping: Provide ktime_get_raw()
hangcheck-timer: Use ktime_get_ns()
...
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/Makefile | 25 | ||||
-rw-r--r-- | kernel/acct.c | 10 | ||||
-rw-r--r-- | kernel/debug/kdb/kdb_main.c | 2 | ||||
-rw-r--r-- | kernel/delayacct.c | 62 | ||||
-rw-r--r-- | kernel/fork.c | 5 | ||||
-rw-r--r-- | kernel/time/Kconfig | 9 | ||||
-rw-r--r-- | kernel/time/Makefile | 19 | ||||
-rw-r--r-- | kernel/time/clocksource.c | 12 | ||||
-rw-r--r-- | kernel/time/hrtimer.c (renamed from kernel/hrtimer.c) | 125 | ||||
-rw-r--r-- | kernel/time/itimer.c (renamed from kernel/itimer.c) | 0 | ||||
-rw-r--r-- | kernel/time/ntp.c | 15 | ||||
-rw-r--r-- | kernel/time/ntp_internal.h | 2 | ||||
-rw-r--r-- | kernel/time/posix-cpu-timers.c (renamed from kernel/posix-cpu-timers.c) | 0 | ||||
-rw-r--r-- | kernel/time/posix-timers.c (renamed from kernel/posix-timers.c) | 2 | ||||
-rw-r--r-- | kernel/time/tick-internal.h | 2 | ||||
-rw-r--r-- | kernel/time/time.c (renamed from kernel/time.c) | 64 | ||||
-rw-r--r-- | kernel/time/timeconst.bc (renamed from kernel/timeconst.bc) | 0 | ||||
-rw-r--r-- | kernel/time/timekeeping.c | 1147 | ||||
-rw-r--r-- | kernel/time/timekeeping.h | 20 | ||||
-rw-r--r-- | kernel/time/timekeeping_debug.c | 2 | ||||
-rw-r--r-- | kernel/time/timekeeping_internal.h | 17 | ||||
-rw-r--r-- | kernel/time/timer.c (renamed from kernel/timer.c) | 34 | ||||
-rw-r--r-- | kernel/time/udelay_test.c | 168 | ||||
-rw-r--r-- | kernel/trace/trace.c | 11 | ||||
-rw-r--r-- | kernel/tsacct.c | 19 |
25 files changed, 1024 insertions, 748 deletions
diff --git a/kernel/Makefile b/kernel/Makefile index f2a8b6246ce9..973a40cf8068 100644 --- a/kernel/Makefile +++ b/kernel/Makefile | |||
@@ -3,12 +3,11 @@ | |||
3 | # | 3 | # |
4 | 4 | ||
5 | obj-y = fork.o exec_domain.o panic.o \ | 5 | obj-y = fork.o exec_domain.o panic.o \ |
6 | cpu.o exit.o itimer.o time.o softirq.o resource.o \ | 6 | cpu.o exit.o softirq.o resource.o \ |
7 | sysctl.o sysctl_binary.o capability.o ptrace.o timer.o user.o \ | 7 | sysctl.o sysctl_binary.o capability.o ptrace.o user.o \ |
8 | signal.o sys.o kmod.o workqueue.o pid.o task_work.o \ | 8 | signal.o sys.o kmod.o workqueue.o pid.o task_work.o \ |
9 | extable.o params.o posix-timers.o \ | 9 | extable.o params.o \ |
10 | kthread.o sys_ni.o posix-cpu-timers.o \ | 10 | kthread.o sys_ni.o nsproxy.o \ |
11 | hrtimer.o nsproxy.o \ | ||
12 | notifier.o ksysfs.o cred.o reboot.o \ | 11 | notifier.o ksysfs.o cred.o reboot.o \ |
13 | async.o range.o groups.o smpboot.o | 12 | async.o range.o groups.o smpboot.o |
14 | 13 | ||
@@ -110,22 +109,6 @@ targets += config_data.h | |||
110 | $(obj)/config_data.h: $(obj)/config_data.gz FORCE | 109 | $(obj)/config_data.h: $(obj)/config_data.gz FORCE |
111 | $(call filechk,ikconfiggz) | 110 | $(call filechk,ikconfiggz) |
112 | 111 | ||
113 | $(obj)/time.o: $(obj)/timeconst.h | ||
114 | |||
115 | quiet_cmd_hzfile = HZFILE $@ | ||
116 | cmd_hzfile = echo "hz=$(CONFIG_HZ)" > $@ | ||
117 | |||
118 | targets += hz.bc | ||
119 | $(obj)/hz.bc: $(objtree)/include/config/hz.h FORCE | ||
120 | $(call if_changed,hzfile) | ||
121 | |||
122 | quiet_cmd_bc = BC $@ | ||
123 | cmd_bc = bc -q $(filter-out FORCE,$^) > $@ | ||
124 | |||
125 | targets += timeconst.h | ||
126 | $(obj)/timeconst.h: $(obj)/hz.bc $(src)/timeconst.bc FORCE | ||
127 | $(call if_changed,bc) | ||
128 | |||
129 | ############################################################################### | 112 | ############################################################################### |
130 | # | 113 | # |
131 | # Roll all the X.509 certificates that we can find together and pull them into | 114 | # Roll all the X.509 certificates that we can find together and pull them into |
diff --git a/kernel/acct.c b/kernel/acct.c index 808a86ff229d..a1844f14c6d6 100644 --- a/kernel/acct.c +++ b/kernel/acct.c | |||
@@ -458,9 +458,7 @@ static void do_acct_process(struct bsd_acct_struct *acct, | |||
458 | acct_t ac; | 458 | acct_t ac; |
459 | mm_segment_t fs; | 459 | mm_segment_t fs; |
460 | unsigned long flim; | 460 | unsigned long flim; |
461 | u64 elapsed; | 461 | u64 elapsed, run_time; |
462 | u64 run_time; | ||
463 | struct timespec uptime; | ||
464 | struct tty_struct *tty; | 462 | struct tty_struct *tty; |
465 | const struct cred *orig_cred; | 463 | const struct cred *orig_cred; |
466 | 464 | ||
@@ -484,10 +482,8 @@ static void do_acct_process(struct bsd_acct_struct *acct, | |||
484 | strlcpy(ac.ac_comm, current->comm, sizeof(ac.ac_comm)); | 482 | strlcpy(ac.ac_comm, current->comm, sizeof(ac.ac_comm)); |
485 | 483 | ||
486 | /* calculate run_time in nsec*/ | 484 | /* calculate run_time in nsec*/ |
487 | do_posix_clock_monotonic_gettime(&uptime); | 485 | run_time = ktime_get_ns(); |
488 | run_time = (u64)uptime.tv_sec*NSEC_PER_SEC + uptime.tv_nsec; | 486 | run_time -= current->group_leader->start_time; |
489 | run_time -= (u64)current->group_leader->start_time.tv_sec * NSEC_PER_SEC | ||
490 | + current->group_leader->start_time.tv_nsec; | ||
491 | /* convert nsec -> AHZ */ | 487 | /* convert nsec -> AHZ */ |
492 | elapsed = nsec_to_AHZ(run_time); | 488 | elapsed = nsec_to_AHZ(run_time); |
493 | #if ACCT_VERSION==3 | 489 | #if ACCT_VERSION==3 |
diff --git a/kernel/debug/kdb/kdb_main.c b/kernel/debug/kdb/kdb_main.c index 2f7c760305ca..379650b984f8 100644 --- a/kernel/debug/kdb/kdb_main.c +++ b/kernel/debug/kdb/kdb_main.c | |||
@@ -2472,7 +2472,7 @@ static void kdb_gmtime(struct timespec *tv, struct kdb_tm *tm) | |||
2472 | static void kdb_sysinfo(struct sysinfo *val) | 2472 | static void kdb_sysinfo(struct sysinfo *val) |
2473 | { | 2473 | { |
2474 | struct timespec uptime; | 2474 | struct timespec uptime; |
2475 | do_posix_clock_monotonic_gettime(&uptime); | 2475 | ktime_get_ts(&uptime); |
2476 | memset(val, 0, sizeof(*val)); | 2476 | memset(val, 0, sizeof(*val)); |
2477 | val->uptime = uptime.tv_sec; | 2477 | val->uptime = uptime.tv_sec; |
2478 | val->loads[0] = avenrun[0]; | 2478 | val->loads[0] = avenrun[0]; |
diff --git a/kernel/delayacct.c b/kernel/delayacct.c index 54996b71e66d..ef90b04d783f 100644 --- a/kernel/delayacct.c +++ b/kernel/delayacct.c | |||
@@ -46,42 +46,25 @@ void __delayacct_tsk_init(struct task_struct *tsk) | |||
46 | } | 46 | } |
47 | 47 | ||
48 | /* | 48 | /* |
49 | * Start accounting for a delay statistic using | 49 | * Finish delay accounting for a statistic using its timestamps (@start), |
50 | * its starting timestamp (@start) | 50 | * accumalator (@total) and @count |
51 | */ | 51 | */ |
52 | 52 | static void delayacct_end(u64 *start, u64 *total, u32 *count) | |
53 | static inline void delayacct_start(struct timespec *start) | ||
54 | { | 53 | { |
55 | do_posix_clock_monotonic_gettime(start); | 54 | s64 ns = ktime_get_ns() - *start; |
56 | } | ||
57 | |||
58 | /* | ||
59 | * Finish delay accounting for a statistic using | ||
60 | * its timestamps (@start, @end), accumalator (@total) and @count | ||
61 | */ | ||
62 | |||
63 | static void delayacct_end(struct timespec *start, struct timespec *end, | ||
64 | u64 *total, u32 *count) | ||
65 | { | ||
66 | struct timespec ts; | ||
67 | s64 ns; | ||
68 | unsigned long flags; | 55 | unsigned long flags; |
69 | 56 | ||
70 | do_posix_clock_monotonic_gettime(end); | 57 | if (ns > 0) { |
71 | ts = timespec_sub(*end, *start); | 58 | spin_lock_irqsave(¤t->delays->lock, flags); |
72 | ns = timespec_to_ns(&ts); | 59 | *total += ns; |
73 | if (ns < 0) | 60 | (*count)++; |
74 | return; | 61 | spin_unlock_irqrestore(¤t->delays->lock, flags); |
75 | 62 | } | |
76 | spin_lock_irqsave(¤t->delays->lock, flags); | ||
77 | *total += ns; | ||
78 | (*count)++; | ||
79 | spin_unlock_irqrestore(¤t->delays->lock, flags); | ||
80 | } | 63 | } |
81 | 64 | ||
82 | void __delayacct_blkio_start(void) | 65 | void __delayacct_blkio_start(void) |
83 | { | 66 | { |
84 | delayacct_start(¤t->delays->blkio_start); | 67 | current->delays->blkio_start = ktime_get_ns(); |
85 | } | 68 | } |
86 | 69 | ||
87 | void __delayacct_blkio_end(void) | 70 | void __delayacct_blkio_end(void) |
@@ -89,35 +72,29 @@ void __delayacct_blkio_end(void) | |||
89 | if (current->delays->flags & DELAYACCT_PF_SWAPIN) | 72 | if (current->delays->flags & DELAYACCT_PF_SWAPIN) |
90 | /* Swapin block I/O */ | 73 | /* Swapin block I/O */ |
91 | delayacct_end(¤t->delays->blkio_start, | 74 | delayacct_end(¤t->delays->blkio_start, |
92 | ¤t->delays->blkio_end, | ||
93 | ¤t->delays->swapin_delay, | 75 | ¤t->delays->swapin_delay, |
94 | ¤t->delays->swapin_count); | 76 | ¤t->delays->swapin_count); |
95 | else /* Other block I/O */ | 77 | else /* Other block I/O */ |
96 | delayacct_end(¤t->delays->blkio_start, | 78 | delayacct_end(¤t->delays->blkio_start, |
97 | ¤t->delays->blkio_end, | ||
98 | ¤t->delays->blkio_delay, | 79 | ¤t->delays->blkio_delay, |
99 | ¤t->delays->blkio_count); | 80 | ¤t->delays->blkio_count); |
100 | } | 81 | } |
101 | 82 | ||
102 | int __delayacct_add_tsk(struct taskstats *d, struct task_struct *tsk) | 83 | int __delayacct_add_tsk(struct taskstats *d, struct task_struct *tsk) |
103 | { | 84 | { |
104 | s64 tmp; | ||
105 | unsigned long t1; | ||
106 | unsigned long long t2, t3; | ||
107 | unsigned long flags; | ||
108 | struct timespec ts; | ||
109 | cputime_t utime, stime, stimescaled, utimescaled; | 85 | cputime_t utime, stime, stimescaled, utimescaled; |
86 | unsigned long long t2, t3; | ||
87 | unsigned long flags, t1; | ||
88 | s64 tmp; | ||
110 | 89 | ||
111 | tmp = (s64)d->cpu_run_real_total; | ||
112 | task_cputime(tsk, &utime, &stime); | 90 | task_cputime(tsk, &utime, &stime); |
113 | cputime_to_timespec(utime + stime, &ts); | 91 | tmp = (s64)d->cpu_run_real_total; |
114 | tmp += timespec_to_ns(&ts); | 92 | tmp += cputime_to_nsecs(utime + stime); |
115 | d->cpu_run_real_total = (tmp < (s64)d->cpu_run_real_total) ? 0 : tmp; | 93 | d->cpu_run_real_total = (tmp < (s64)d->cpu_run_real_total) ? 0 : tmp; |
116 | 94 | ||
117 | tmp = (s64)d->cpu_scaled_run_real_total; | ||
118 | task_cputime_scaled(tsk, &utimescaled, &stimescaled); | 95 | task_cputime_scaled(tsk, &utimescaled, &stimescaled); |
119 | cputime_to_timespec(utimescaled + stimescaled, &ts); | 96 | tmp = (s64)d->cpu_scaled_run_real_total; |
120 | tmp += timespec_to_ns(&ts); | 97 | tmp += cputime_to_nsecs(utimescaled + stimescaled); |
121 | d->cpu_scaled_run_real_total = | 98 | d->cpu_scaled_run_real_total = |
122 | (tmp < (s64)d->cpu_scaled_run_real_total) ? 0 : tmp; | 99 | (tmp < (s64)d->cpu_scaled_run_real_total) ? 0 : tmp; |
123 | 100 | ||
@@ -169,13 +146,12 @@ __u64 __delayacct_blkio_ticks(struct task_struct *tsk) | |||
169 | 146 | ||
170 | void __delayacct_freepages_start(void) | 147 | void __delayacct_freepages_start(void) |
171 | { | 148 | { |
172 | delayacct_start(¤t->delays->freepages_start); | 149 | current->delays->freepages_start = ktime_get_ns(); |
173 | } | 150 | } |
174 | 151 | ||
175 | void __delayacct_freepages_end(void) | 152 | void __delayacct_freepages_end(void) |
176 | { | 153 | { |
177 | delayacct_end(¤t->delays->freepages_start, | 154 | delayacct_end(¤t->delays->freepages_start, |
178 | ¤t->delays->freepages_end, | ||
179 | ¤t->delays->freepages_delay, | 155 | ¤t->delays->freepages_delay, |
180 | ¤t->delays->freepages_count); | 156 | ¤t->delays->freepages_count); |
181 | } | 157 | } |
diff --git a/kernel/fork.c b/kernel/fork.c index 962885edbe53..5f1bf3bebb4f 100644 --- a/kernel/fork.c +++ b/kernel/fork.c | |||
@@ -1261,9 +1261,8 @@ static struct task_struct *copy_process(unsigned long clone_flags, | |||
1261 | 1261 | ||
1262 | posix_cpu_timers_init(p); | 1262 | posix_cpu_timers_init(p); |
1263 | 1263 | ||
1264 | do_posix_clock_monotonic_gettime(&p->start_time); | 1264 | p->start_time = ktime_get_ns(); |
1265 | p->real_start_time = p->start_time; | 1265 | p->real_start_time = ktime_get_boot_ns(); |
1266 | monotonic_to_bootbased(&p->real_start_time); | ||
1267 | p->io_context = NULL; | 1266 | p->io_context = NULL; |
1268 | p->audit_context = NULL; | 1267 | p->audit_context = NULL; |
1269 | if (clone_flags & CLONE_THREAD) | 1268 | if (clone_flags & CLONE_THREAD) |
diff --git a/kernel/time/Kconfig b/kernel/time/Kconfig index f448513a45ed..d626dc98e8df 100644 --- a/kernel/time/Kconfig +++ b/kernel/time/Kconfig | |||
@@ -12,6 +12,11 @@ config CLOCKSOURCE_WATCHDOG | |||
12 | config ARCH_CLOCKSOURCE_DATA | 12 | config ARCH_CLOCKSOURCE_DATA |
13 | bool | 13 | bool |
14 | 14 | ||
15 | # Clocksources require validation of the clocksource against the last | ||
16 | # cycle update - x86/TSC misfeature | ||
17 | config CLOCKSOURCE_VALIDATE_LAST_CYCLE | ||
18 | bool | ||
19 | |||
15 | # Timekeeping vsyscall support | 20 | # Timekeeping vsyscall support |
16 | config GENERIC_TIME_VSYSCALL | 21 | config GENERIC_TIME_VSYSCALL |
17 | bool | 22 | bool |
@@ -20,10 +25,6 @@ config GENERIC_TIME_VSYSCALL | |||
20 | config GENERIC_TIME_VSYSCALL_OLD | 25 | config GENERIC_TIME_VSYSCALL_OLD |
21 | bool | 26 | bool |
22 | 27 | ||
23 | # ktime_t scalar 64bit nsec representation | ||
24 | config KTIME_SCALAR | ||
25 | bool | ||
26 | |||
27 | # Old style timekeeping | 28 | # Old style timekeeping |
28 | config ARCH_USES_GETTIMEOFFSET | 29 | config ARCH_USES_GETTIMEOFFSET |
29 | bool | 30 | bool |
diff --git a/kernel/time/Makefile b/kernel/time/Makefile index 57a413fd0ebf..7347426fa68d 100644 --- a/kernel/time/Makefile +++ b/kernel/time/Makefile | |||
@@ -1,3 +1,4 @@ | |||
1 | obj-y += time.o timer.o hrtimer.o itimer.o posix-timers.o posix-cpu-timers.o | ||
1 | obj-y += timekeeping.o ntp.o clocksource.o jiffies.o timer_list.o | 2 | obj-y += timekeeping.o ntp.o clocksource.o jiffies.o timer_list.o |
2 | obj-y += timeconv.o posix-clock.o alarmtimer.o | 3 | obj-y += timeconv.o posix-clock.o alarmtimer.o |
3 | 4 | ||
@@ -12,3 +13,21 @@ obj-$(CONFIG_TICK_ONESHOT) += tick-oneshot.o | |||
12 | obj-$(CONFIG_TICK_ONESHOT) += tick-sched.o | 13 | obj-$(CONFIG_TICK_ONESHOT) += tick-sched.o |
13 | obj-$(CONFIG_TIMER_STATS) += timer_stats.o | 14 | obj-$(CONFIG_TIMER_STATS) += timer_stats.o |
14 | obj-$(CONFIG_DEBUG_FS) += timekeeping_debug.o | 15 | obj-$(CONFIG_DEBUG_FS) += timekeeping_debug.o |
16 | obj-$(CONFIG_TEST_UDELAY) += udelay_test.o | ||
17 | |||
18 | $(obj)/time.o: $(obj)/timeconst.h | ||
19 | |||
20 | quiet_cmd_hzfile = HZFILE $@ | ||
21 | cmd_hzfile = echo "hz=$(CONFIG_HZ)" > $@ | ||
22 | |||
23 | targets += hz.bc | ||
24 | $(obj)/hz.bc: $(objtree)/include/config/hz.h FORCE | ||
25 | $(call if_changed,hzfile) | ||
26 | |||
27 | quiet_cmd_bc = BC $@ | ||
28 | cmd_bc = bc -q $(filter-out FORCE,$^) > $@ | ||
29 | |||
30 | targets += timeconst.h | ||
31 | $(obj)/timeconst.h: $(obj)/hz.bc $(src)/timeconst.bc FORCE | ||
32 | $(call if_changed,bc) | ||
33 | |||
diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c index ba3e502c955a..2e949cc9c9f1 100644 --- a/kernel/time/clocksource.c +++ b/kernel/time/clocksource.c | |||
@@ -32,6 +32,7 @@ | |||
32 | #include <linux/kthread.h> | 32 | #include <linux/kthread.h> |
33 | 33 | ||
34 | #include "tick-internal.h" | 34 | #include "tick-internal.h" |
35 | #include "timekeeping_internal.h" | ||
35 | 36 | ||
36 | void timecounter_init(struct timecounter *tc, | 37 | void timecounter_init(struct timecounter *tc, |
37 | const struct cyclecounter *cc, | 38 | const struct cyclecounter *cc, |
@@ -249,7 +250,7 @@ void clocksource_mark_unstable(struct clocksource *cs) | |||
249 | static void clocksource_watchdog(unsigned long data) | 250 | static void clocksource_watchdog(unsigned long data) |
250 | { | 251 | { |
251 | struct clocksource *cs; | 252 | struct clocksource *cs; |
252 | cycle_t csnow, wdnow; | 253 | cycle_t csnow, wdnow, delta; |
253 | int64_t wd_nsec, cs_nsec; | 254 | int64_t wd_nsec, cs_nsec; |
254 | int next_cpu, reset_pending; | 255 | int next_cpu, reset_pending; |
255 | 256 | ||
@@ -282,11 +283,12 @@ static void clocksource_watchdog(unsigned long data) | |||
282 | continue; | 283 | continue; |
283 | } | 284 | } |
284 | 285 | ||
285 | wd_nsec = clocksource_cyc2ns((wdnow - cs->wd_last) & watchdog->mask, | 286 | delta = clocksource_delta(wdnow, cs->wd_last, watchdog->mask); |
286 | watchdog->mult, watchdog->shift); | 287 | wd_nsec = clocksource_cyc2ns(delta, watchdog->mult, |
288 | watchdog->shift); | ||
287 | 289 | ||
288 | cs_nsec = clocksource_cyc2ns((csnow - cs->cs_last) & | 290 | delta = clocksource_delta(csnow, cs->cs_last, cs->mask); |
289 | cs->mask, cs->mult, cs->shift); | 291 | cs_nsec = clocksource_cyc2ns(delta, cs->mult, cs->shift); |
290 | cs->cs_last = csnow; | 292 | cs->cs_last = csnow; |
291 | cs->wd_last = wdnow; | 293 | cs->wd_last = wdnow; |
292 | 294 | ||
diff --git a/kernel/hrtimer.c b/kernel/time/hrtimer.c index 3ab28993f6e0..1c2fe7de2842 100644 --- a/kernel/hrtimer.c +++ b/kernel/time/hrtimer.c | |||
@@ -54,6 +54,8 @@ | |||
54 | 54 | ||
55 | #include <trace/events/timer.h> | 55 | #include <trace/events/timer.h> |
56 | 56 | ||
57 | #include "timekeeping.h" | ||
58 | |||
57 | /* | 59 | /* |
58 | * The timer bases: | 60 | * The timer bases: |
59 | * | 61 | * |
@@ -114,21 +116,18 @@ static inline int hrtimer_clockid_to_base(clockid_t clock_id) | |||
114 | */ | 116 | */ |
115 | static void hrtimer_get_softirq_time(struct hrtimer_cpu_base *base) | 117 | static void hrtimer_get_softirq_time(struct hrtimer_cpu_base *base) |
116 | { | 118 | { |
117 | ktime_t xtim, mono, boot; | 119 | ktime_t xtim, mono, boot, tai; |
118 | struct timespec xts, tom, slp; | 120 | ktime_t off_real, off_boot, off_tai; |
119 | s32 tai_offset; | ||
120 | 121 | ||
121 | get_xtime_and_monotonic_and_sleep_offset(&xts, &tom, &slp); | 122 | mono = ktime_get_update_offsets_tick(&off_real, &off_boot, &off_tai); |
122 | tai_offset = timekeeping_get_tai_offset(); | 123 | boot = ktime_add(mono, off_boot); |
124 | xtim = ktime_add(mono, off_real); | ||
125 | tai = ktime_add(xtim, off_tai); | ||
123 | 126 | ||
124 | xtim = timespec_to_ktime(xts); | ||
125 | mono = ktime_add(xtim, timespec_to_ktime(tom)); | ||
126 | boot = ktime_add(mono, timespec_to_ktime(slp)); | ||
127 | base->clock_base[HRTIMER_BASE_REALTIME].softirq_time = xtim; | 127 | base->clock_base[HRTIMER_BASE_REALTIME].softirq_time = xtim; |
128 | base->clock_base[HRTIMER_BASE_MONOTONIC].softirq_time = mono; | 128 | base->clock_base[HRTIMER_BASE_MONOTONIC].softirq_time = mono; |
129 | base->clock_base[HRTIMER_BASE_BOOTTIME].softirq_time = boot; | 129 | base->clock_base[HRTIMER_BASE_BOOTTIME].softirq_time = boot; |
130 | base->clock_base[HRTIMER_BASE_TAI].softirq_time = | 130 | base->clock_base[HRTIMER_BASE_TAI].softirq_time = tai; |
131 | ktime_add(xtim, ktime_set(tai_offset, 0)); | ||
132 | } | 131 | } |
133 | 132 | ||
134 | /* | 133 | /* |
@@ -264,60 +263,6 @@ lock_hrtimer_base(const struct hrtimer *timer, unsigned long *flags) | |||
264 | * too large for inlining: | 263 | * too large for inlining: |
265 | */ | 264 | */ |
266 | #if BITS_PER_LONG < 64 | 265 | #if BITS_PER_LONG < 64 |
267 | # ifndef CONFIG_KTIME_SCALAR | ||
268 | /** | ||
269 | * ktime_add_ns - Add a scalar nanoseconds value to a ktime_t variable | ||
270 | * @kt: addend | ||
271 | * @nsec: the scalar nsec value to add | ||
272 | * | ||
273 | * Returns the sum of kt and nsec in ktime_t format | ||
274 | */ | ||
275 | ktime_t ktime_add_ns(const ktime_t kt, u64 nsec) | ||
276 | { | ||
277 | ktime_t tmp; | ||
278 | |||
279 | if (likely(nsec < NSEC_PER_SEC)) { | ||
280 | tmp.tv64 = nsec; | ||
281 | } else { | ||
282 | unsigned long rem = do_div(nsec, NSEC_PER_SEC); | ||
283 | |||
284 | /* Make sure nsec fits into long */ | ||
285 | if (unlikely(nsec > KTIME_SEC_MAX)) | ||
286 | return (ktime_t){ .tv64 = KTIME_MAX }; | ||
287 | |||
288 | tmp = ktime_set((long)nsec, rem); | ||
289 | } | ||
290 | |||
291 | return ktime_add(kt, tmp); | ||
292 | } | ||
293 | |||
294 | EXPORT_SYMBOL_GPL(ktime_add_ns); | ||
295 | |||
296 | /** | ||
297 | * ktime_sub_ns - Subtract a scalar nanoseconds value from a ktime_t variable | ||
298 | * @kt: minuend | ||
299 | * @nsec: the scalar nsec value to subtract | ||
300 | * | ||
301 | * Returns the subtraction of @nsec from @kt in ktime_t format | ||
302 | */ | ||
303 | ktime_t ktime_sub_ns(const ktime_t kt, u64 nsec) | ||
304 | { | ||
305 | ktime_t tmp; | ||
306 | |||
307 | if (likely(nsec < NSEC_PER_SEC)) { | ||
308 | tmp.tv64 = nsec; | ||
309 | } else { | ||
310 | unsigned long rem = do_div(nsec, NSEC_PER_SEC); | ||
311 | |||
312 | tmp = ktime_set((long)nsec, rem); | ||
313 | } | ||
314 | |||
315 | return ktime_sub(kt, tmp); | ||
316 | } | ||
317 | |||
318 | EXPORT_SYMBOL_GPL(ktime_sub_ns); | ||
319 | # endif /* !CONFIG_KTIME_SCALAR */ | ||
320 | |||
321 | /* | 266 | /* |
322 | * Divide a ktime value by a nanosecond value | 267 | * Divide a ktime value by a nanosecond value |
323 | */ | 268 | */ |
@@ -337,6 +282,7 @@ u64 ktime_divns(const ktime_t kt, s64 div) | |||
337 | 282 | ||
338 | return dclc; | 283 | return dclc; |
339 | } | 284 | } |
285 | EXPORT_SYMBOL_GPL(ktime_divns); | ||
340 | #endif /* BITS_PER_LONG >= 64 */ | 286 | #endif /* BITS_PER_LONG >= 64 */ |
341 | 287 | ||
342 | /* | 288 | /* |
@@ -602,6 +548,11 @@ hrtimer_force_reprogram(struct hrtimer_cpu_base *cpu_base, int skip_equal) | |||
602 | * timers, we have to check, whether it expires earlier than the timer for | 548 | * timers, we have to check, whether it expires earlier than the timer for |
603 | * which the clock event device was armed. | 549 | * which the clock event device was armed. |
604 | * | 550 | * |
551 | * Note, that in case the state has HRTIMER_STATE_CALLBACK set, no reprogramming | ||
552 | * and no expiry check happens. The timer gets enqueued into the rbtree. The | ||
553 | * reprogramming and expiry check is done in the hrtimer_interrupt or in the | ||
554 | * softirq. | ||
555 | * | ||
605 | * Called with interrupts disabled and base->cpu_base.lock held | 556 | * Called with interrupts disabled and base->cpu_base.lock held |
606 | */ | 557 | */ |
607 | static int hrtimer_reprogram(struct hrtimer *timer, | 558 | static int hrtimer_reprogram(struct hrtimer *timer, |
@@ -662,25 +613,13 @@ static inline void hrtimer_init_hres(struct hrtimer_cpu_base *base) | |||
662 | base->hres_active = 0; | 613 | base->hres_active = 0; |
663 | } | 614 | } |
664 | 615 | ||
665 | /* | ||
666 | * When High resolution timers are active, try to reprogram. Note, that in case | ||
667 | * the state has HRTIMER_STATE_CALLBACK set, no reprogramming and no expiry | ||
668 | * check happens. The timer gets enqueued into the rbtree. The reprogramming | ||
669 | * and expiry check is done in the hrtimer_interrupt or in the softirq. | ||
670 | */ | ||
671 | static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer, | ||
672 | struct hrtimer_clock_base *base) | ||
673 | { | ||
674 | return base->cpu_base->hres_active && hrtimer_reprogram(timer, base); | ||
675 | } | ||
676 | |||
677 | static inline ktime_t hrtimer_update_base(struct hrtimer_cpu_base *base) | 616 | static inline ktime_t hrtimer_update_base(struct hrtimer_cpu_base *base) |
678 | { | 617 | { |
679 | ktime_t *offs_real = &base->clock_base[HRTIMER_BASE_REALTIME].offset; | 618 | ktime_t *offs_real = &base->clock_base[HRTIMER_BASE_REALTIME].offset; |
680 | ktime_t *offs_boot = &base->clock_base[HRTIMER_BASE_BOOTTIME].offset; | 619 | ktime_t *offs_boot = &base->clock_base[HRTIMER_BASE_BOOTTIME].offset; |
681 | ktime_t *offs_tai = &base->clock_base[HRTIMER_BASE_TAI].offset; | 620 | ktime_t *offs_tai = &base->clock_base[HRTIMER_BASE_TAI].offset; |
682 | 621 | ||
683 | return ktime_get_update_offsets(offs_real, offs_boot, offs_tai); | 622 | return ktime_get_update_offsets_now(offs_real, offs_boot, offs_tai); |
684 | } | 623 | } |
685 | 624 | ||
686 | /* | 625 | /* |
@@ -755,8 +694,8 @@ static inline int hrtimer_is_hres_enabled(void) { return 0; } | |||
755 | static inline int hrtimer_switch_to_hres(void) { return 0; } | 694 | static inline int hrtimer_switch_to_hres(void) { return 0; } |
756 | static inline void | 695 | static inline void |
757 | hrtimer_force_reprogram(struct hrtimer_cpu_base *base, int skip_equal) { } | 696 | hrtimer_force_reprogram(struct hrtimer_cpu_base *base, int skip_equal) { } |
758 | static inline int hrtimer_enqueue_reprogram(struct hrtimer *timer, | 697 | static inline int hrtimer_reprogram(struct hrtimer *timer, |
759 | struct hrtimer_clock_base *base) | 698 | struct hrtimer_clock_base *base) |
760 | { | 699 | { |
761 | return 0; | 700 | return 0; |
762 | } | 701 | } |
@@ -1013,14 +952,25 @@ int __hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim, | |||
1013 | 952 | ||
1014 | leftmost = enqueue_hrtimer(timer, new_base); | 953 | leftmost = enqueue_hrtimer(timer, new_base); |
1015 | 954 | ||
1016 | /* | 955 | if (!leftmost) { |
1017 | * Only allow reprogramming if the new base is on this CPU. | 956 | unlock_hrtimer_base(timer, &flags); |
1018 | * (it might still be on another CPU if the timer was pending) | 957 | return ret; |
1019 | * | 958 | } |
1020 | * XXX send_remote_softirq() ? | 959 | |
1021 | */ | 960 | if (!hrtimer_is_hres_active(timer)) { |
1022 | if (leftmost && new_base->cpu_base == &__get_cpu_var(hrtimer_bases) | 961 | /* |
1023 | && hrtimer_enqueue_reprogram(timer, new_base)) { | 962 | * Kick to reschedule the next tick to handle the new timer |
963 | * on dynticks target. | ||
964 | */ | ||
965 | wake_up_nohz_cpu(new_base->cpu_base->cpu); | ||
966 | } else if (new_base->cpu_base == &__get_cpu_var(hrtimer_bases) && | ||
967 | hrtimer_reprogram(timer, new_base)) { | ||
968 | /* | ||
969 | * Only allow reprogramming if the new base is on this CPU. | ||
970 | * (it might still be on another CPU if the timer was pending) | ||
971 | * | ||
972 | * XXX send_remote_softirq() ? | ||
973 | */ | ||
1024 | if (wakeup) { | 974 | if (wakeup) { |
1025 | /* | 975 | /* |
1026 | * We need to drop cpu_base->lock to avoid a | 976 | * We need to drop cpu_base->lock to avoid a |
@@ -1680,6 +1630,7 @@ static void init_hrtimers_cpu(int cpu) | |||
1680 | timerqueue_init_head(&cpu_base->clock_base[i].active); | 1630 | timerqueue_init_head(&cpu_base->clock_base[i].active); |
1681 | } | 1631 | } |
1682 | 1632 | ||
1633 | cpu_base->cpu = cpu; | ||
1683 | hrtimer_init_hres(cpu_base); | 1634 | hrtimer_init_hres(cpu_base); |
1684 | } | 1635 | } |
1685 | 1636 | ||
diff --git a/kernel/itimer.c b/kernel/time/itimer.c index 8d262b467573..8d262b467573 100644 --- a/kernel/itimer.c +++ b/kernel/time/itimer.c | |||
diff --git a/kernel/time/ntp.c b/kernel/time/ntp.c index 33db43a39515..87a346fd6d61 100644 --- a/kernel/time/ntp.c +++ b/kernel/time/ntp.c | |||
@@ -466,7 +466,8 @@ static DECLARE_DELAYED_WORK(sync_cmos_work, sync_cmos_clock); | |||
466 | 466 | ||
467 | static void sync_cmos_clock(struct work_struct *work) | 467 | static void sync_cmos_clock(struct work_struct *work) |
468 | { | 468 | { |
469 | struct timespec now, next; | 469 | struct timespec64 now; |
470 | struct timespec next; | ||
470 | int fail = 1; | 471 | int fail = 1; |
471 | 472 | ||
472 | /* | 473 | /* |
@@ -485,9 +486,9 @@ static void sync_cmos_clock(struct work_struct *work) | |||
485 | return; | 486 | return; |
486 | } | 487 | } |
487 | 488 | ||
488 | getnstimeofday(&now); | 489 | getnstimeofday64(&now); |
489 | if (abs(now.tv_nsec - (NSEC_PER_SEC / 2)) <= tick_nsec * 5) { | 490 | if (abs(now.tv_nsec - (NSEC_PER_SEC / 2)) <= tick_nsec * 5) { |
490 | struct timespec adjust = now; | 491 | struct timespec adjust = timespec64_to_timespec(now); |
491 | 492 | ||
492 | fail = -ENODEV; | 493 | fail = -ENODEV; |
493 | if (persistent_clock_is_local) | 494 | if (persistent_clock_is_local) |
@@ -531,7 +532,7 @@ void ntp_notify_cmos_timer(void) { } | |||
531 | /* | 532 | /* |
532 | * Propagate a new txc->status value into the NTP state: | 533 | * Propagate a new txc->status value into the NTP state: |
533 | */ | 534 | */ |
534 | static inline void process_adj_status(struct timex *txc, struct timespec *ts) | 535 | static inline void process_adj_status(struct timex *txc, struct timespec64 *ts) |
535 | { | 536 | { |
536 | if ((time_status & STA_PLL) && !(txc->status & STA_PLL)) { | 537 | if ((time_status & STA_PLL) && !(txc->status & STA_PLL)) { |
537 | time_state = TIME_OK; | 538 | time_state = TIME_OK; |
@@ -554,7 +555,7 @@ static inline void process_adj_status(struct timex *txc, struct timespec *ts) | |||
554 | 555 | ||
555 | 556 | ||
556 | static inline void process_adjtimex_modes(struct timex *txc, | 557 | static inline void process_adjtimex_modes(struct timex *txc, |
557 | struct timespec *ts, | 558 | struct timespec64 *ts, |
558 | s32 *time_tai) | 559 | s32 *time_tai) |
559 | { | 560 | { |
560 | if (txc->modes & ADJ_STATUS) | 561 | if (txc->modes & ADJ_STATUS) |
@@ -640,7 +641,7 @@ int ntp_validate_timex(struct timex *txc) | |||
640 | * adjtimex mainly allows reading (and writing, if superuser) of | 641 | * adjtimex mainly allows reading (and writing, if superuser) of |
641 | * kernel time-keeping variables. used by xntpd. | 642 | * kernel time-keeping variables. used by xntpd. |
642 | */ | 643 | */ |
643 | int __do_adjtimex(struct timex *txc, struct timespec *ts, s32 *time_tai) | 644 | int __do_adjtimex(struct timex *txc, struct timespec64 *ts, s32 *time_tai) |
644 | { | 645 | { |
645 | int result; | 646 | int result; |
646 | 647 | ||
@@ -684,7 +685,7 @@ int __do_adjtimex(struct timex *txc, struct timespec *ts, s32 *time_tai) | |||
684 | /* fill PPS status fields */ | 685 | /* fill PPS status fields */ |
685 | pps_fill_timex(txc); | 686 | pps_fill_timex(txc); |
686 | 687 | ||
687 | txc->time.tv_sec = ts->tv_sec; | 688 | txc->time.tv_sec = (time_t)ts->tv_sec; |
688 | txc->time.tv_usec = ts->tv_nsec; | 689 | txc->time.tv_usec = ts->tv_nsec; |
689 | if (!(time_status & STA_NANO)) | 690 | if (!(time_status & STA_NANO)) |
690 | txc->time.tv_usec /= NSEC_PER_USEC; | 691 | txc->time.tv_usec /= NSEC_PER_USEC; |
diff --git a/kernel/time/ntp_internal.h b/kernel/time/ntp_internal.h index 1950cb4ca2a4..bbd102ad9df7 100644 --- a/kernel/time/ntp_internal.h +++ b/kernel/time/ntp_internal.h | |||
@@ -7,6 +7,6 @@ extern void ntp_clear(void); | |||
7 | extern u64 ntp_tick_length(void); | 7 | extern u64 ntp_tick_length(void); |
8 | extern int second_overflow(unsigned long secs); | 8 | extern int second_overflow(unsigned long secs); |
9 | extern int ntp_validate_timex(struct timex *); | 9 | extern int ntp_validate_timex(struct timex *); |
10 | extern int __do_adjtimex(struct timex *, struct timespec *, s32 *); | 10 | extern int __do_adjtimex(struct timex *, struct timespec64 *, s32 *); |
11 | extern void __hardpps(const struct timespec *, const struct timespec *); | 11 | extern void __hardpps(const struct timespec *, const struct timespec *); |
12 | #endif /* _LINUX_NTP_INTERNAL_H */ | 12 | #endif /* _LINUX_NTP_INTERNAL_H */ |
diff --git a/kernel/posix-cpu-timers.c b/kernel/time/posix-cpu-timers.c index 3b8946416a5f..3b8946416a5f 100644 --- a/kernel/posix-cpu-timers.c +++ b/kernel/time/posix-cpu-timers.c | |||
diff --git a/kernel/posix-timers.c b/kernel/time/posix-timers.c index 424c2d4265c9..42b463ad90f2 100644 --- a/kernel/posix-timers.c +++ b/kernel/time/posix-timers.c | |||
@@ -49,6 +49,8 @@ | |||
49 | #include <linux/export.h> | 49 | #include <linux/export.h> |
50 | #include <linux/hashtable.h> | 50 | #include <linux/hashtable.h> |
51 | 51 | ||
52 | #include "timekeeping.h" | ||
53 | |||
52 | /* | 54 | /* |
53 | * Management arrays for POSIX timers. Timers are now kept in static hash table | 55 | * Management arrays for POSIX timers. Timers are now kept in static hash table |
54 | * with 512 entries. | 56 | * with 512 entries. |
diff --git a/kernel/time/tick-internal.h b/kernel/time/tick-internal.h index 7ab92b19965a..c19c1d84b6f3 100644 --- a/kernel/time/tick-internal.h +++ b/kernel/time/tick-internal.h | |||
@@ -4,6 +4,8 @@ | |||
4 | #include <linux/hrtimer.h> | 4 | #include <linux/hrtimer.h> |
5 | #include <linux/tick.h> | 5 | #include <linux/tick.h> |
6 | 6 | ||
7 | #include "timekeeping.h" | ||
8 | |||
7 | extern seqlock_t jiffies_lock; | 9 | extern seqlock_t jiffies_lock; |
8 | 10 | ||
9 | #define CS_NAME_LEN 32 | 11 | #define CS_NAME_LEN 32 |
diff --git a/kernel/time.c b/kernel/time/time.c index 7c7964c33ae7..f0294ba14634 100644 --- a/kernel/time.c +++ b/kernel/time/time.c | |||
@@ -42,6 +42,7 @@ | |||
42 | #include <asm/unistd.h> | 42 | #include <asm/unistd.h> |
43 | 43 | ||
44 | #include "timeconst.h" | 44 | #include "timeconst.h" |
45 | #include "timekeeping.h" | ||
45 | 46 | ||
46 | /* | 47 | /* |
47 | * The timezone where the local system is located. Used as a default by some | 48 | * The timezone where the local system is located. Used as a default by some |
@@ -420,6 +421,68 @@ struct timeval ns_to_timeval(const s64 nsec) | |||
420 | } | 421 | } |
421 | EXPORT_SYMBOL(ns_to_timeval); | 422 | EXPORT_SYMBOL(ns_to_timeval); |
422 | 423 | ||
424 | #if BITS_PER_LONG == 32 | ||
425 | /** | ||
426 | * set_normalized_timespec - set timespec sec and nsec parts and normalize | ||
427 | * | ||
428 | * @ts: pointer to timespec variable to be set | ||
429 | * @sec: seconds to set | ||
430 | * @nsec: nanoseconds to set | ||
431 | * | ||
432 | * Set seconds and nanoseconds field of a timespec variable and | ||
433 | * normalize to the timespec storage format | ||
434 | * | ||
435 | * Note: The tv_nsec part is always in the range of | ||
436 | * 0 <= tv_nsec < NSEC_PER_SEC | ||
437 | * For negative values only the tv_sec field is negative ! | ||
438 | */ | ||
439 | void set_normalized_timespec64(struct timespec64 *ts, time64_t sec, s64 nsec) | ||
440 | { | ||
441 | while (nsec >= NSEC_PER_SEC) { | ||
442 | /* | ||
443 | * The following asm() prevents the compiler from | ||
444 | * optimising this loop into a modulo operation. See | ||
445 | * also __iter_div_u64_rem() in include/linux/time.h | ||
446 | */ | ||
447 | asm("" : "+rm"(nsec)); | ||
448 | nsec -= NSEC_PER_SEC; | ||
449 | ++sec; | ||
450 | } | ||
451 | while (nsec < 0) { | ||
452 | asm("" : "+rm"(nsec)); | ||
453 | nsec += NSEC_PER_SEC; | ||
454 | --sec; | ||
455 | } | ||
456 | ts->tv_sec = sec; | ||
457 | ts->tv_nsec = nsec; | ||
458 | } | ||
459 | EXPORT_SYMBOL(set_normalized_timespec64); | ||
460 | |||
461 | /** | ||
462 | * ns_to_timespec64 - Convert nanoseconds to timespec64 | ||
463 | * @nsec: the nanoseconds value to be converted | ||
464 | * | ||
465 | * Returns the timespec64 representation of the nsec parameter. | ||
466 | */ | ||
467 | struct timespec64 ns_to_timespec64(const s64 nsec) | ||
468 | { | ||
469 | struct timespec64 ts; | ||
470 | s32 rem; | ||
471 | |||
472 | if (!nsec) | ||
473 | return (struct timespec64) {0, 0}; | ||
474 | |||
475 | ts.tv_sec = div_s64_rem(nsec, NSEC_PER_SEC, &rem); | ||
476 | if (unlikely(rem < 0)) { | ||
477 | ts.tv_sec--; | ||
478 | rem += NSEC_PER_SEC; | ||
479 | } | ||
480 | ts.tv_nsec = rem; | ||
481 | |||
482 | return ts; | ||
483 | } | ||
484 | EXPORT_SYMBOL(ns_to_timespec64); | ||
485 | #endif | ||
423 | /* | 486 | /* |
424 | * When we convert to jiffies then we interpret incoming values | 487 | * When we convert to jiffies then we interpret incoming values |
425 | * the following way: | 488 | * the following way: |
@@ -694,6 +757,7 @@ unsigned long nsecs_to_jiffies(u64 n) | |||
694 | { | 757 | { |
695 | return (unsigned long)nsecs_to_jiffies64(n); | 758 | return (unsigned long)nsecs_to_jiffies64(n); |
696 | } | 759 | } |
760 | EXPORT_SYMBOL_GPL(nsecs_to_jiffies); | ||
697 | 761 | ||
698 | /* | 762 | /* |
699 | * Add two timespec values and do a safety check for overflow. | 763 | * Add two timespec values and do a safety check for overflow. |
diff --git a/kernel/timeconst.bc b/kernel/time/timeconst.bc index 511bdf2cafda..511bdf2cafda 100644 --- a/kernel/timeconst.bc +++ b/kernel/time/timeconst.bc | |||
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c index 32d8d6aaedb8..f36b02838a47 100644 --- a/kernel/time/timekeeping.c +++ b/kernel/time/timekeeping.c | |||
@@ -32,11 +32,34 @@ | |||
32 | #define TK_MIRROR (1 << 1) | 32 | #define TK_MIRROR (1 << 1) |
33 | #define TK_CLOCK_WAS_SET (1 << 2) | 33 | #define TK_CLOCK_WAS_SET (1 << 2) |
34 | 34 | ||
35 | static struct timekeeper timekeeper; | 35 | /* |
36 | * The most important data for readout fits into a single 64 byte | ||
37 | * cache line. | ||
38 | */ | ||
39 | static struct { | ||
40 | seqcount_t seq; | ||
41 | struct timekeeper timekeeper; | ||
42 | } tk_core ____cacheline_aligned; | ||
43 | |||
36 | static DEFINE_RAW_SPINLOCK(timekeeper_lock); | 44 | static DEFINE_RAW_SPINLOCK(timekeeper_lock); |
37 | static seqcount_t timekeeper_seq; | ||
38 | static struct timekeeper shadow_timekeeper; | 45 | static struct timekeeper shadow_timekeeper; |
39 | 46 | ||
47 | /** | ||
48 | * struct tk_fast - NMI safe timekeeper | ||
49 | * @seq: Sequence counter for protecting updates. The lowest bit | ||
50 | * is the index for the tk_read_base array | ||
51 | * @base: tk_read_base array. Access is indexed by the lowest bit of | ||
52 | * @seq. | ||
53 | * | ||
54 | * See @update_fast_timekeeper() below. | ||
55 | */ | ||
56 | struct tk_fast { | ||
57 | seqcount_t seq; | ||
58 | struct tk_read_base base[2]; | ||
59 | }; | ||
60 | |||
61 | static struct tk_fast tk_fast_mono ____cacheline_aligned; | ||
62 | |||
40 | /* flag for if timekeeping is suspended */ | 63 | /* flag for if timekeeping is suspended */ |
41 | int __read_mostly timekeeping_suspended; | 64 | int __read_mostly timekeeping_suspended; |
42 | 65 | ||
@@ -45,49 +68,54 @@ bool __read_mostly persistent_clock_exist = false; | |||
45 | 68 | ||
46 | static inline void tk_normalize_xtime(struct timekeeper *tk) | 69 | static inline void tk_normalize_xtime(struct timekeeper *tk) |
47 | { | 70 | { |
48 | while (tk->xtime_nsec >= ((u64)NSEC_PER_SEC << tk->shift)) { | 71 | while (tk->tkr.xtime_nsec >= ((u64)NSEC_PER_SEC << tk->tkr.shift)) { |
49 | tk->xtime_nsec -= (u64)NSEC_PER_SEC << tk->shift; | 72 | tk->tkr.xtime_nsec -= (u64)NSEC_PER_SEC << tk->tkr.shift; |
50 | tk->xtime_sec++; | 73 | tk->xtime_sec++; |
51 | } | 74 | } |
52 | } | 75 | } |
53 | 76 | ||
54 | static void tk_set_xtime(struct timekeeper *tk, const struct timespec *ts) | 77 | static inline struct timespec64 tk_xtime(struct timekeeper *tk) |
78 | { | ||
79 | struct timespec64 ts; | ||
80 | |||
81 | ts.tv_sec = tk->xtime_sec; | ||
82 | ts.tv_nsec = (long)(tk->tkr.xtime_nsec >> tk->tkr.shift); | ||
83 | return ts; | ||
84 | } | ||
85 | |||
86 | static void tk_set_xtime(struct timekeeper *tk, const struct timespec64 *ts) | ||
55 | { | 87 | { |
56 | tk->xtime_sec = ts->tv_sec; | 88 | tk->xtime_sec = ts->tv_sec; |
57 | tk->xtime_nsec = (u64)ts->tv_nsec << tk->shift; | 89 | tk->tkr.xtime_nsec = (u64)ts->tv_nsec << tk->tkr.shift; |
58 | } | 90 | } |
59 | 91 | ||
60 | static void tk_xtime_add(struct timekeeper *tk, const struct timespec *ts) | 92 | static void tk_xtime_add(struct timekeeper *tk, const struct timespec64 *ts) |
61 | { | 93 | { |
62 | tk->xtime_sec += ts->tv_sec; | 94 | tk->xtime_sec += ts->tv_sec; |
63 | tk->xtime_nsec += (u64)ts->tv_nsec << tk->shift; | 95 | tk->tkr.xtime_nsec += (u64)ts->tv_nsec << tk->tkr.shift; |
64 | tk_normalize_xtime(tk); | 96 | tk_normalize_xtime(tk); |
65 | } | 97 | } |
66 | 98 | ||
67 | static void tk_set_wall_to_mono(struct timekeeper *tk, struct timespec wtm) | 99 | static void tk_set_wall_to_mono(struct timekeeper *tk, struct timespec64 wtm) |
68 | { | 100 | { |
69 | struct timespec tmp; | 101 | struct timespec64 tmp; |
70 | 102 | ||
71 | /* | 103 | /* |
72 | * Verify consistency of: offset_real = -wall_to_monotonic | 104 | * Verify consistency of: offset_real = -wall_to_monotonic |
73 | * before modifying anything | 105 | * before modifying anything |
74 | */ | 106 | */ |
75 | set_normalized_timespec(&tmp, -tk->wall_to_monotonic.tv_sec, | 107 | set_normalized_timespec64(&tmp, -tk->wall_to_monotonic.tv_sec, |
76 | -tk->wall_to_monotonic.tv_nsec); | 108 | -tk->wall_to_monotonic.tv_nsec); |
77 | WARN_ON_ONCE(tk->offs_real.tv64 != timespec_to_ktime(tmp).tv64); | 109 | WARN_ON_ONCE(tk->offs_real.tv64 != timespec64_to_ktime(tmp).tv64); |
78 | tk->wall_to_monotonic = wtm; | 110 | tk->wall_to_monotonic = wtm; |
79 | set_normalized_timespec(&tmp, -wtm.tv_sec, -wtm.tv_nsec); | 111 | set_normalized_timespec64(&tmp, -wtm.tv_sec, -wtm.tv_nsec); |
80 | tk->offs_real = timespec_to_ktime(tmp); | 112 | tk->offs_real = timespec64_to_ktime(tmp); |
81 | tk->offs_tai = ktime_add(tk->offs_real, ktime_set(tk->tai_offset, 0)); | 113 | tk->offs_tai = ktime_add(tk->offs_real, ktime_set(tk->tai_offset, 0)); |
82 | } | 114 | } |
83 | 115 | ||
84 | static void tk_set_sleep_time(struct timekeeper *tk, struct timespec t) | 116 | static inline void tk_update_sleep_time(struct timekeeper *tk, ktime_t delta) |
85 | { | 117 | { |
86 | /* Verify consistency before modifying */ | 118 | tk->offs_boot = ktime_add(tk->offs_boot, delta); |
87 | WARN_ON_ONCE(tk->offs_boot.tv64 != timespec_to_ktime(tk->total_sleep_time).tv64); | ||
88 | |||
89 | tk->total_sleep_time = t; | ||
90 | tk->offs_boot = timespec_to_ktime(t); | ||
91 | } | 119 | } |
92 | 120 | ||
93 | /** | 121 | /** |
@@ -107,9 +135,11 @@ static void tk_setup_internals(struct timekeeper *tk, struct clocksource *clock) | |||
107 | u64 tmp, ntpinterval; | 135 | u64 tmp, ntpinterval; |
108 | struct clocksource *old_clock; | 136 | struct clocksource *old_clock; |
109 | 137 | ||
110 | old_clock = tk->clock; | 138 | old_clock = tk->tkr.clock; |
111 | tk->clock = clock; | 139 | tk->tkr.clock = clock; |
112 | tk->cycle_last = clock->cycle_last = clock->read(clock); | 140 | tk->tkr.read = clock->read; |
141 | tk->tkr.mask = clock->mask; | ||
142 | tk->tkr.cycle_last = tk->tkr.read(clock); | ||
113 | 143 | ||
114 | /* Do the ns -> cycle conversion first, using original mult */ | 144 | /* Do the ns -> cycle conversion first, using original mult */ |
115 | tmp = NTP_INTERVAL_LENGTH; | 145 | tmp = NTP_INTERVAL_LENGTH; |
@@ -133,78 +163,212 @@ static void tk_setup_internals(struct timekeeper *tk, struct clocksource *clock) | |||
133 | if (old_clock) { | 163 | if (old_clock) { |
134 | int shift_change = clock->shift - old_clock->shift; | 164 | int shift_change = clock->shift - old_clock->shift; |
135 | if (shift_change < 0) | 165 | if (shift_change < 0) |
136 | tk->xtime_nsec >>= -shift_change; | 166 | tk->tkr.xtime_nsec >>= -shift_change; |
137 | else | 167 | else |
138 | tk->xtime_nsec <<= shift_change; | 168 | tk->tkr.xtime_nsec <<= shift_change; |
139 | } | 169 | } |
140 | tk->shift = clock->shift; | 170 | tk->tkr.shift = clock->shift; |
141 | 171 | ||
142 | tk->ntp_error = 0; | 172 | tk->ntp_error = 0; |
143 | tk->ntp_error_shift = NTP_SCALE_SHIFT - clock->shift; | 173 | tk->ntp_error_shift = NTP_SCALE_SHIFT - clock->shift; |
174 | tk->ntp_tick = ntpinterval << tk->ntp_error_shift; | ||
144 | 175 | ||
145 | /* | 176 | /* |
146 | * The timekeeper keeps its own mult values for the currently | 177 | * The timekeeper keeps its own mult values for the currently |
147 | * active clocksource. These value will be adjusted via NTP | 178 | * active clocksource. These value will be adjusted via NTP |
148 | * to counteract clock drifting. | 179 | * to counteract clock drifting. |
149 | */ | 180 | */ |
150 | tk->mult = clock->mult; | 181 | tk->tkr.mult = clock->mult; |
182 | tk->ntp_err_mult = 0; | ||
151 | } | 183 | } |
152 | 184 | ||
153 | /* Timekeeper helper functions. */ | 185 | /* Timekeeper helper functions. */ |
154 | 186 | ||
155 | #ifdef CONFIG_ARCH_USES_GETTIMEOFFSET | 187 | #ifdef CONFIG_ARCH_USES_GETTIMEOFFSET |
156 | u32 (*arch_gettimeoffset)(void); | 188 | static u32 default_arch_gettimeoffset(void) { return 0; } |
157 | 189 | u32 (*arch_gettimeoffset)(void) = default_arch_gettimeoffset; | |
158 | u32 get_arch_timeoffset(void) | ||
159 | { | ||
160 | if (likely(arch_gettimeoffset)) | ||
161 | return arch_gettimeoffset(); | ||
162 | return 0; | ||
163 | } | ||
164 | #else | 190 | #else |
165 | static inline u32 get_arch_timeoffset(void) { return 0; } | 191 | static inline u32 arch_gettimeoffset(void) { return 0; } |
166 | #endif | 192 | #endif |
167 | 193 | ||
168 | static inline s64 timekeeping_get_ns(struct timekeeper *tk) | 194 | static inline s64 timekeeping_get_ns(struct tk_read_base *tkr) |
169 | { | 195 | { |
170 | cycle_t cycle_now, cycle_delta; | 196 | cycle_t cycle_now, delta; |
171 | struct clocksource *clock; | ||
172 | s64 nsec; | 197 | s64 nsec; |
173 | 198 | ||
174 | /* read clocksource: */ | 199 | /* read clocksource: */ |
175 | clock = tk->clock; | 200 | cycle_now = tkr->read(tkr->clock); |
176 | cycle_now = clock->read(clock); | ||
177 | 201 | ||
178 | /* calculate the delta since the last update_wall_time: */ | 202 | /* calculate the delta since the last update_wall_time: */ |
179 | cycle_delta = (cycle_now - clock->cycle_last) & clock->mask; | 203 | delta = clocksource_delta(cycle_now, tkr->cycle_last, tkr->mask); |
180 | 204 | ||
181 | nsec = cycle_delta * tk->mult + tk->xtime_nsec; | 205 | nsec = delta * tkr->mult + tkr->xtime_nsec; |
182 | nsec >>= tk->shift; | 206 | nsec >>= tkr->shift; |
183 | 207 | ||
184 | /* If arch requires, add in get_arch_timeoffset() */ | 208 | /* If arch requires, add in get_arch_timeoffset() */ |
185 | return nsec + get_arch_timeoffset(); | 209 | return nsec + arch_gettimeoffset(); |
186 | } | 210 | } |
187 | 211 | ||
188 | static inline s64 timekeeping_get_ns_raw(struct timekeeper *tk) | 212 | static inline s64 timekeeping_get_ns_raw(struct timekeeper *tk) |
189 | { | 213 | { |
190 | cycle_t cycle_now, cycle_delta; | 214 | struct clocksource *clock = tk->tkr.clock; |
191 | struct clocksource *clock; | 215 | cycle_t cycle_now, delta; |
192 | s64 nsec; | 216 | s64 nsec; |
193 | 217 | ||
194 | /* read clocksource: */ | 218 | /* read clocksource: */ |
195 | clock = tk->clock; | 219 | cycle_now = tk->tkr.read(clock); |
196 | cycle_now = clock->read(clock); | ||
197 | 220 | ||
198 | /* calculate the delta since the last update_wall_time: */ | 221 | /* calculate the delta since the last update_wall_time: */ |
199 | cycle_delta = (cycle_now - clock->cycle_last) & clock->mask; | 222 | delta = clocksource_delta(cycle_now, tk->tkr.cycle_last, tk->tkr.mask); |
200 | 223 | ||
201 | /* convert delta to nanoseconds. */ | 224 | /* convert delta to nanoseconds. */ |
202 | nsec = clocksource_cyc2ns(cycle_delta, clock->mult, clock->shift); | 225 | nsec = clocksource_cyc2ns(delta, clock->mult, clock->shift); |
203 | 226 | ||
204 | /* If arch requires, add in get_arch_timeoffset() */ | 227 | /* If arch requires, add in get_arch_timeoffset() */ |
205 | return nsec + get_arch_timeoffset(); | 228 | return nsec + arch_gettimeoffset(); |
229 | } | ||
230 | |||
231 | /** | ||
232 | * update_fast_timekeeper - Update the fast and NMI safe monotonic timekeeper. | ||
233 | * @tk: The timekeeper from which we take the update | ||
234 | * @tkf: The fast timekeeper to update | ||
235 | * @tbase: The time base for the fast timekeeper (mono/raw) | ||
236 | * | ||
237 | * We want to use this from any context including NMI and tracing / | ||
238 | * instrumenting the timekeeping code itself. | ||
239 | * | ||
240 | * So we handle this differently than the other timekeeping accessor | ||
241 | * functions which retry when the sequence count has changed. The | ||
242 | * update side does: | ||
243 | * | ||
244 | * smp_wmb(); <- Ensure that the last base[1] update is visible | ||
245 | * tkf->seq++; | ||
246 | * smp_wmb(); <- Ensure that the seqcount update is visible | ||
247 | * update(tkf->base[0], tk); | ||
248 | * smp_wmb(); <- Ensure that the base[0] update is visible | ||
249 | * tkf->seq++; | ||
250 | * smp_wmb(); <- Ensure that the seqcount update is visible | ||
251 | * update(tkf->base[1], tk); | ||
252 | * | ||
253 | * The reader side does: | ||
254 | * | ||
255 | * do { | ||
256 | * seq = tkf->seq; | ||
257 | * smp_rmb(); | ||
258 | * idx = seq & 0x01; | ||
259 | * now = now(tkf->base[idx]); | ||
260 | * smp_rmb(); | ||
261 | * } while (seq != tkf->seq) | ||
262 | * | ||
263 | * As long as we update base[0] readers are forced off to | ||
264 | * base[1]. Once base[0] is updated readers are redirected to base[0] | ||
265 | * and the base[1] update takes place. | ||
266 | * | ||
267 | * So if a NMI hits the update of base[0] then it will use base[1] | ||
268 | * which is still consistent. In the worst case this can result is a | ||
269 | * slightly wrong timestamp (a few nanoseconds). See | ||
270 | * @ktime_get_mono_fast_ns. | ||
271 | */ | ||
272 | static void update_fast_timekeeper(struct timekeeper *tk) | ||
273 | { | ||
274 | struct tk_read_base *base = tk_fast_mono.base; | ||
275 | |||
276 | /* Force readers off to base[1] */ | ||
277 | raw_write_seqcount_latch(&tk_fast_mono.seq); | ||
278 | |||
279 | /* Update base[0] */ | ||
280 | memcpy(base, &tk->tkr, sizeof(*base)); | ||
281 | |||
282 | /* Force readers back to base[0] */ | ||
283 | raw_write_seqcount_latch(&tk_fast_mono.seq); | ||
284 | |||
285 | /* Update base[1] */ | ||
286 | memcpy(base + 1, base, sizeof(*base)); | ||
206 | } | 287 | } |
207 | 288 | ||
289 | /** | ||
290 | * ktime_get_mono_fast_ns - Fast NMI safe access to clock monotonic | ||
291 | * | ||
292 | * This timestamp is not guaranteed to be monotonic across an update. | ||
293 | * The timestamp is calculated by: | ||
294 | * | ||
295 | * now = base_mono + clock_delta * slope | ||
296 | * | ||
297 | * So if the update lowers the slope, readers who are forced to the | ||
298 | * not yet updated second array are still using the old steeper slope. | ||
299 | * | ||
300 | * tmono | ||
301 | * ^ | ||
302 | * | o n | ||
303 | * | o n | ||
304 | * | u | ||
305 | * | o | ||
306 | * |o | ||
307 | * |12345678---> reader order | ||
308 | * | ||
309 | * o = old slope | ||
310 | * u = update | ||
311 | * n = new slope | ||
312 | * | ||
313 | * So reader 6 will observe time going backwards versus reader 5. | ||
314 | * | ||
315 | * While other CPUs are likely to be able observe that, the only way | ||
316 | * for a CPU local observation is when an NMI hits in the middle of | ||
317 | * the update. Timestamps taken from that NMI context might be ahead | ||
318 | * of the following timestamps. Callers need to be aware of that and | ||
319 | * deal with it. | ||
320 | */ | ||
321 | u64 notrace ktime_get_mono_fast_ns(void) | ||
322 | { | ||
323 | struct tk_read_base *tkr; | ||
324 | unsigned int seq; | ||
325 | u64 now; | ||
326 | |||
327 | do { | ||
328 | seq = raw_read_seqcount(&tk_fast_mono.seq); | ||
329 | tkr = tk_fast_mono.base + (seq & 0x01); | ||
330 | now = ktime_to_ns(tkr->base_mono) + timekeeping_get_ns(tkr); | ||
331 | |||
332 | } while (read_seqcount_retry(&tk_fast_mono.seq, seq)); | ||
333 | return now; | ||
334 | } | ||
335 | EXPORT_SYMBOL_GPL(ktime_get_mono_fast_ns); | ||
336 | |||
337 | #ifdef CONFIG_GENERIC_TIME_VSYSCALL_OLD | ||
338 | |||
339 | static inline void update_vsyscall(struct timekeeper *tk) | ||
340 | { | ||
341 | struct timespec xt; | ||
342 | |||
343 | xt = timespec64_to_timespec(tk_xtime(tk)); | ||
344 | update_vsyscall_old(&xt, &tk->wall_to_monotonic, tk->tkr.clock, tk->tkr.mult, | ||
345 | tk->tkr.cycle_last); | ||
346 | } | ||
347 | |||
348 | static inline void old_vsyscall_fixup(struct timekeeper *tk) | ||
349 | { | ||
350 | s64 remainder; | ||
351 | |||
352 | /* | ||
353 | * Store only full nanoseconds into xtime_nsec after rounding | ||
354 | * it up and add the remainder to the error difference. | ||
355 | * XXX - This is necessary to avoid small 1ns inconsistnecies caused | ||
356 | * by truncating the remainder in vsyscalls. However, it causes | ||
357 | * additional work to be done in timekeeping_adjust(). Once | ||
358 | * the vsyscall implementations are converted to use xtime_nsec | ||
359 | * (shifted nanoseconds), and CONFIG_GENERIC_TIME_VSYSCALL_OLD | ||
360 | * users are removed, this can be killed. | ||
361 | */ | ||
362 | remainder = tk->tkr.xtime_nsec & ((1ULL << tk->tkr.shift) - 1); | ||
363 | tk->tkr.xtime_nsec -= remainder; | ||
364 | tk->tkr.xtime_nsec += 1ULL << tk->tkr.shift; | ||
365 | tk->ntp_error += remainder << tk->ntp_error_shift; | ||
366 | tk->ntp_error -= (1ULL << tk->tkr.shift) << tk->ntp_error_shift; | ||
367 | } | ||
368 | #else | ||
369 | #define old_vsyscall_fixup(tk) | ||
370 | #endif | ||
371 | |||
208 | static RAW_NOTIFIER_HEAD(pvclock_gtod_chain); | 372 | static RAW_NOTIFIER_HEAD(pvclock_gtod_chain); |
209 | 373 | ||
210 | static void update_pvclock_gtod(struct timekeeper *tk, bool was_set) | 374 | static void update_pvclock_gtod(struct timekeeper *tk, bool was_set) |
@@ -217,7 +381,7 @@ static void update_pvclock_gtod(struct timekeeper *tk, bool was_set) | |||
217 | */ | 381 | */ |
218 | int pvclock_gtod_register_notifier(struct notifier_block *nb) | 382 | int pvclock_gtod_register_notifier(struct notifier_block *nb) |
219 | { | 383 | { |
220 | struct timekeeper *tk = &timekeeper; | 384 | struct timekeeper *tk = &tk_core.timekeeper; |
221 | unsigned long flags; | 385 | unsigned long flags; |
222 | int ret; | 386 | int ret; |
223 | 387 | ||
@@ -247,6 +411,29 @@ int pvclock_gtod_unregister_notifier(struct notifier_block *nb) | |||
247 | } | 411 | } |
248 | EXPORT_SYMBOL_GPL(pvclock_gtod_unregister_notifier); | 412 | EXPORT_SYMBOL_GPL(pvclock_gtod_unregister_notifier); |
249 | 413 | ||
414 | /* | ||
415 | * Update the ktime_t based scalar nsec members of the timekeeper | ||
416 | */ | ||
417 | static inline void tk_update_ktime_data(struct timekeeper *tk) | ||
418 | { | ||
419 | s64 nsec; | ||
420 | |||
421 | /* | ||
422 | * The xtime based monotonic readout is: | ||
423 | * nsec = (xtime_sec + wtm_sec) * 1e9 + wtm_nsec + now(); | ||
424 | * The ktime based monotonic readout is: | ||
425 | * nsec = base_mono + now(); | ||
426 | * ==> base_mono = (xtime_sec + wtm_sec) * 1e9 + wtm_nsec | ||
427 | */ | ||
428 | nsec = (s64)(tk->xtime_sec + tk->wall_to_monotonic.tv_sec); | ||
429 | nsec *= NSEC_PER_SEC; | ||
430 | nsec += tk->wall_to_monotonic.tv_nsec; | ||
431 | tk->tkr.base_mono = ns_to_ktime(nsec); | ||
432 | |||
433 | /* Update the monotonic raw base */ | ||
434 | tk->base_raw = timespec64_to_ktime(tk->raw_time); | ||
435 | } | ||
436 | |||
250 | /* must hold timekeeper_lock */ | 437 | /* must hold timekeeper_lock */ |
251 | static void timekeeping_update(struct timekeeper *tk, unsigned int action) | 438 | static void timekeeping_update(struct timekeeper *tk, unsigned int action) |
252 | { | 439 | { |
@@ -257,8 +444,13 @@ static void timekeeping_update(struct timekeeper *tk, unsigned int action) | |||
257 | update_vsyscall(tk); | 444 | update_vsyscall(tk); |
258 | update_pvclock_gtod(tk, action & TK_CLOCK_WAS_SET); | 445 | update_pvclock_gtod(tk, action & TK_CLOCK_WAS_SET); |
259 | 446 | ||
447 | tk_update_ktime_data(tk); | ||
448 | |||
260 | if (action & TK_MIRROR) | 449 | if (action & TK_MIRROR) |
261 | memcpy(&shadow_timekeeper, &timekeeper, sizeof(timekeeper)); | 450 | memcpy(&shadow_timekeeper, &tk_core.timekeeper, |
451 | sizeof(tk_core.timekeeper)); | ||
452 | |||
453 | update_fast_timekeeper(tk); | ||
262 | } | 454 | } |
263 | 455 | ||
264 | /** | 456 | /** |
@@ -270,49 +462,48 @@ static void timekeeping_update(struct timekeeper *tk, unsigned int action) | |||
270 | */ | 462 | */ |
271 | static void timekeeping_forward_now(struct timekeeper *tk) | 463 | static void timekeeping_forward_now(struct timekeeper *tk) |
272 | { | 464 | { |
273 | cycle_t cycle_now, cycle_delta; | 465 | struct clocksource *clock = tk->tkr.clock; |
274 | struct clocksource *clock; | 466 | cycle_t cycle_now, delta; |
275 | s64 nsec; | 467 | s64 nsec; |
276 | 468 | ||
277 | clock = tk->clock; | 469 | cycle_now = tk->tkr.read(clock); |
278 | cycle_now = clock->read(clock); | 470 | delta = clocksource_delta(cycle_now, tk->tkr.cycle_last, tk->tkr.mask); |
279 | cycle_delta = (cycle_now - clock->cycle_last) & clock->mask; | 471 | tk->tkr.cycle_last = cycle_now; |
280 | tk->cycle_last = clock->cycle_last = cycle_now; | ||
281 | 472 | ||
282 | tk->xtime_nsec += cycle_delta * tk->mult; | 473 | tk->tkr.xtime_nsec += delta * tk->tkr.mult; |
283 | 474 | ||
284 | /* If arch requires, add in get_arch_timeoffset() */ | 475 | /* If arch requires, add in get_arch_timeoffset() */ |
285 | tk->xtime_nsec += (u64)get_arch_timeoffset() << tk->shift; | 476 | tk->tkr.xtime_nsec += (u64)arch_gettimeoffset() << tk->tkr.shift; |
286 | 477 | ||
287 | tk_normalize_xtime(tk); | 478 | tk_normalize_xtime(tk); |
288 | 479 | ||
289 | nsec = clocksource_cyc2ns(cycle_delta, clock->mult, clock->shift); | 480 | nsec = clocksource_cyc2ns(delta, clock->mult, clock->shift); |
290 | timespec_add_ns(&tk->raw_time, nsec); | 481 | timespec64_add_ns(&tk->raw_time, nsec); |
291 | } | 482 | } |
292 | 483 | ||
293 | /** | 484 | /** |
294 | * __getnstimeofday - Returns the time of day in a timespec. | 485 | * __getnstimeofday64 - Returns the time of day in a timespec64. |
295 | * @ts: pointer to the timespec to be set | 486 | * @ts: pointer to the timespec to be set |
296 | * | 487 | * |
297 | * Updates the time of day in the timespec. | 488 | * Updates the time of day in the timespec. |
298 | * Returns 0 on success, or -ve when suspended (timespec will be undefined). | 489 | * Returns 0 on success, or -ve when suspended (timespec will be undefined). |
299 | */ | 490 | */ |
300 | int __getnstimeofday(struct timespec *ts) | 491 | int __getnstimeofday64(struct timespec64 *ts) |
301 | { | 492 | { |
302 | struct timekeeper *tk = &timekeeper; | 493 | struct timekeeper *tk = &tk_core.timekeeper; |
303 | unsigned long seq; | 494 | unsigned long seq; |
304 | s64 nsecs = 0; | 495 | s64 nsecs = 0; |
305 | 496 | ||
306 | do { | 497 | do { |
307 | seq = read_seqcount_begin(&timekeeper_seq); | 498 | seq = read_seqcount_begin(&tk_core.seq); |
308 | 499 | ||
309 | ts->tv_sec = tk->xtime_sec; | 500 | ts->tv_sec = tk->xtime_sec; |
310 | nsecs = timekeeping_get_ns(tk); | 501 | nsecs = timekeeping_get_ns(&tk->tkr); |
311 | 502 | ||
312 | } while (read_seqcount_retry(&timekeeper_seq, seq)); | 503 | } while (read_seqcount_retry(&tk_core.seq, seq)); |
313 | 504 | ||
314 | ts->tv_nsec = 0; | 505 | ts->tv_nsec = 0; |
315 | timespec_add_ns(ts, nsecs); | 506 | timespec64_add_ns(ts, nsecs); |
316 | 507 | ||
317 | /* | 508 | /* |
318 | * Do not bail out early, in case there were callers still using | 509 | * Do not bail out early, in case there were callers still using |
@@ -322,116 +513,138 @@ int __getnstimeofday(struct timespec *ts) | |||
322 | return -EAGAIN; | 513 | return -EAGAIN; |
323 | return 0; | 514 | return 0; |
324 | } | 515 | } |
325 | EXPORT_SYMBOL(__getnstimeofday); | 516 | EXPORT_SYMBOL(__getnstimeofday64); |
326 | 517 | ||
327 | /** | 518 | /** |
328 | * getnstimeofday - Returns the time of day in a timespec. | 519 | * getnstimeofday64 - Returns the time of day in a timespec64. |
329 | * @ts: pointer to the timespec to be set | 520 | * @ts: pointer to the timespec to be set |
330 | * | 521 | * |
331 | * Returns the time of day in a timespec (WARN if suspended). | 522 | * Returns the time of day in a timespec (WARN if suspended). |
332 | */ | 523 | */ |
333 | void getnstimeofday(struct timespec *ts) | 524 | void getnstimeofday64(struct timespec64 *ts) |
334 | { | 525 | { |
335 | WARN_ON(__getnstimeofday(ts)); | 526 | WARN_ON(__getnstimeofday64(ts)); |
336 | } | 527 | } |
337 | EXPORT_SYMBOL(getnstimeofday); | 528 | EXPORT_SYMBOL(getnstimeofday64); |
338 | 529 | ||
339 | ktime_t ktime_get(void) | 530 | ktime_t ktime_get(void) |
340 | { | 531 | { |
341 | struct timekeeper *tk = &timekeeper; | 532 | struct timekeeper *tk = &tk_core.timekeeper; |
342 | unsigned int seq; | 533 | unsigned int seq; |
343 | s64 secs, nsecs; | 534 | ktime_t base; |
535 | s64 nsecs; | ||
344 | 536 | ||
345 | WARN_ON(timekeeping_suspended); | 537 | WARN_ON(timekeeping_suspended); |
346 | 538 | ||
347 | do { | 539 | do { |
348 | seq = read_seqcount_begin(&timekeeper_seq); | 540 | seq = read_seqcount_begin(&tk_core.seq); |
349 | secs = tk->xtime_sec + tk->wall_to_monotonic.tv_sec; | 541 | base = tk->tkr.base_mono; |
350 | nsecs = timekeeping_get_ns(tk) + tk->wall_to_monotonic.tv_nsec; | 542 | nsecs = timekeeping_get_ns(&tk->tkr); |
351 | 543 | ||
352 | } while (read_seqcount_retry(&timekeeper_seq, seq)); | 544 | } while (read_seqcount_retry(&tk_core.seq, seq)); |
353 | /* | 545 | |
354 | * Use ktime_set/ktime_add_ns to create a proper ktime on | 546 | return ktime_add_ns(base, nsecs); |
355 | * 32-bit architectures without CONFIG_KTIME_SCALAR. | ||
356 | */ | ||
357 | return ktime_add_ns(ktime_set(secs, 0), nsecs); | ||
358 | } | 547 | } |
359 | EXPORT_SYMBOL_GPL(ktime_get); | 548 | EXPORT_SYMBOL_GPL(ktime_get); |
360 | 549 | ||
361 | /** | 550 | static ktime_t *offsets[TK_OFFS_MAX] = { |
362 | * ktime_get_ts - get the monotonic clock in timespec format | 551 | [TK_OFFS_REAL] = &tk_core.timekeeper.offs_real, |
363 | * @ts: pointer to timespec variable | 552 | [TK_OFFS_BOOT] = &tk_core.timekeeper.offs_boot, |
364 | * | 553 | [TK_OFFS_TAI] = &tk_core.timekeeper.offs_tai, |
365 | * The function calculates the monotonic clock from the realtime | 554 | }; |
366 | * clock and the wall_to_monotonic offset and stores the result | 555 | |
367 | * in normalized timespec format in the variable pointed to by @ts. | 556 | ktime_t ktime_get_with_offset(enum tk_offsets offs) |
368 | */ | ||
369 | void ktime_get_ts(struct timespec *ts) | ||
370 | { | 557 | { |
371 | struct timekeeper *tk = &timekeeper; | 558 | struct timekeeper *tk = &tk_core.timekeeper; |
372 | struct timespec tomono; | ||
373 | s64 nsec; | ||
374 | unsigned int seq; | 559 | unsigned int seq; |
560 | ktime_t base, *offset = offsets[offs]; | ||
561 | s64 nsecs; | ||
375 | 562 | ||
376 | WARN_ON(timekeeping_suspended); | 563 | WARN_ON(timekeeping_suspended); |
377 | 564 | ||
378 | do { | 565 | do { |
379 | seq = read_seqcount_begin(&timekeeper_seq); | 566 | seq = read_seqcount_begin(&tk_core.seq); |
380 | ts->tv_sec = tk->xtime_sec; | 567 | base = ktime_add(tk->tkr.base_mono, *offset); |
381 | nsec = timekeeping_get_ns(tk); | 568 | nsecs = timekeeping_get_ns(&tk->tkr); |
382 | tomono = tk->wall_to_monotonic; | ||
383 | 569 | ||
384 | } while (read_seqcount_retry(&timekeeper_seq, seq)); | 570 | } while (read_seqcount_retry(&tk_core.seq, seq)); |
385 | 571 | ||
386 | ts->tv_sec += tomono.tv_sec; | 572 | return ktime_add_ns(base, nsecs); |
387 | ts->tv_nsec = 0; | ||
388 | timespec_add_ns(ts, nsec + tomono.tv_nsec); | ||
389 | } | ||
390 | EXPORT_SYMBOL_GPL(ktime_get_ts); | ||
391 | 573 | ||
574 | } | ||
575 | EXPORT_SYMBOL_GPL(ktime_get_with_offset); | ||
392 | 576 | ||
393 | /** | 577 | /** |
394 | * timekeeping_clocktai - Returns the TAI time of day in a timespec | 578 | * ktime_mono_to_any() - convert mononotic time to any other time |
395 | * @ts: pointer to the timespec to be set | 579 | * @tmono: time to convert. |
396 | * | 580 | * @offs: which offset to use |
397 | * Returns the time of day in a timespec. | ||
398 | */ | 581 | */ |
399 | void timekeeping_clocktai(struct timespec *ts) | 582 | ktime_t ktime_mono_to_any(ktime_t tmono, enum tk_offsets offs) |
400 | { | 583 | { |
401 | struct timekeeper *tk = &timekeeper; | 584 | ktime_t *offset = offsets[offs]; |
402 | unsigned long seq; | 585 | unsigned long seq; |
403 | u64 nsecs; | 586 | ktime_t tconv; |
404 | |||
405 | WARN_ON(timekeeping_suspended); | ||
406 | 587 | ||
407 | do { | 588 | do { |
408 | seq = read_seqcount_begin(&timekeeper_seq); | 589 | seq = read_seqcount_begin(&tk_core.seq); |
590 | tconv = ktime_add(tmono, *offset); | ||
591 | } while (read_seqcount_retry(&tk_core.seq, seq)); | ||
409 | 592 | ||
410 | ts->tv_sec = tk->xtime_sec + tk->tai_offset; | 593 | return tconv; |
411 | nsecs = timekeeping_get_ns(tk); | 594 | } |
595 | EXPORT_SYMBOL_GPL(ktime_mono_to_any); | ||
412 | 596 | ||
413 | } while (read_seqcount_retry(&timekeeper_seq, seq)); | 597 | /** |
598 | * ktime_get_raw - Returns the raw monotonic time in ktime_t format | ||
599 | */ | ||
600 | ktime_t ktime_get_raw(void) | ||
601 | { | ||
602 | struct timekeeper *tk = &tk_core.timekeeper; | ||
603 | unsigned int seq; | ||
604 | ktime_t base; | ||
605 | s64 nsecs; | ||
414 | 606 | ||
415 | ts->tv_nsec = 0; | 607 | do { |
416 | timespec_add_ns(ts, nsecs); | 608 | seq = read_seqcount_begin(&tk_core.seq); |
609 | base = tk->base_raw; | ||
610 | nsecs = timekeeping_get_ns_raw(tk); | ||
417 | 611 | ||
418 | } | 612 | } while (read_seqcount_retry(&tk_core.seq, seq)); |
419 | EXPORT_SYMBOL(timekeeping_clocktai); | ||
420 | 613 | ||
614 | return ktime_add_ns(base, nsecs); | ||
615 | } | ||
616 | EXPORT_SYMBOL_GPL(ktime_get_raw); | ||
421 | 617 | ||
422 | /** | 618 | /** |
423 | * ktime_get_clocktai - Returns the TAI time of day in a ktime | 619 | * ktime_get_ts64 - get the monotonic clock in timespec64 format |
620 | * @ts: pointer to timespec variable | ||
424 | * | 621 | * |
425 | * Returns the time of day in a ktime. | 622 | * The function calculates the monotonic clock from the realtime |
623 | * clock and the wall_to_monotonic offset and stores the result | ||
624 | * in normalized timespec format in the variable pointed to by @ts. | ||
426 | */ | 625 | */ |
427 | ktime_t ktime_get_clocktai(void) | 626 | void ktime_get_ts64(struct timespec64 *ts) |
428 | { | 627 | { |
429 | struct timespec ts; | 628 | struct timekeeper *tk = &tk_core.timekeeper; |
629 | struct timespec64 tomono; | ||
630 | s64 nsec; | ||
631 | unsigned int seq; | ||
632 | |||
633 | WARN_ON(timekeeping_suspended); | ||
430 | 634 | ||
431 | timekeeping_clocktai(&ts); | 635 | do { |
432 | return timespec_to_ktime(ts); | 636 | seq = read_seqcount_begin(&tk_core.seq); |
637 | ts->tv_sec = tk->xtime_sec; | ||
638 | nsec = timekeeping_get_ns(&tk->tkr); | ||
639 | tomono = tk->wall_to_monotonic; | ||
640 | |||
641 | } while (read_seqcount_retry(&tk_core.seq, seq)); | ||
642 | |||
643 | ts->tv_sec += tomono.tv_sec; | ||
644 | ts->tv_nsec = 0; | ||
645 | timespec64_add_ns(ts, nsec + tomono.tv_nsec); | ||
433 | } | 646 | } |
434 | EXPORT_SYMBOL(ktime_get_clocktai); | 647 | EXPORT_SYMBOL_GPL(ktime_get_ts64); |
435 | 648 | ||
436 | #ifdef CONFIG_NTP_PPS | 649 | #ifdef CONFIG_NTP_PPS |
437 | 650 | ||
@@ -446,23 +659,23 @@ EXPORT_SYMBOL(ktime_get_clocktai); | |||
446 | */ | 659 | */ |
447 | void getnstime_raw_and_real(struct timespec *ts_raw, struct timespec *ts_real) | 660 | void getnstime_raw_and_real(struct timespec *ts_raw, struct timespec *ts_real) |
448 | { | 661 | { |
449 | struct timekeeper *tk = &timekeeper; | 662 | struct timekeeper *tk = &tk_core.timekeeper; |
450 | unsigned long seq; | 663 | unsigned long seq; |
451 | s64 nsecs_raw, nsecs_real; | 664 | s64 nsecs_raw, nsecs_real; |
452 | 665 | ||
453 | WARN_ON_ONCE(timekeeping_suspended); | 666 | WARN_ON_ONCE(timekeeping_suspended); |
454 | 667 | ||
455 | do { | 668 | do { |
456 | seq = read_seqcount_begin(&timekeeper_seq); | 669 | seq = read_seqcount_begin(&tk_core.seq); |
457 | 670 | ||
458 | *ts_raw = tk->raw_time; | 671 | *ts_raw = timespec64_to_timespec(tk->raw_time); |
459 | ts_real->tv_sec = tk->xtime_sec; | 672 | ts_real->tv_sec = tk->xtime_sec; |
460 | ts_real->tv_nsec = 0; | 673 | ts_real->tv_nsec = 0; |
461 | 674 | ||
462 | nsecs_raw = timekeeping_get_ns_raw(tk); | 675 | nsecs_raw = timekeeping_get_ns_raw(tk); |
463 | nsecs_real = timekeeping_get_ns(tk); | 676 | nsecs_real = timekeeping_get_ns(&tk->tkr); |
464 | 677 | ||
465 | } while (read_seqcount_retry(&timekeeper_seq, seq)); | 678 | } while (read_seqcount_retry(&tk_core.seq, seq)); |
466 | 679 | ||
467 | timespec_add_ns(ts_raw, nsecs_raw); | 680 | timespec_add_ns(ts_raw, nsecs_raw); |
468 | timespec_add_ns(ts_real, nsecs_real); | 681 | timespec_add_ns(ts_real, nsecs_real); |
@@ -479,9 +692,9 @@ EXPORT_SYMBOL(getnstime_raw_and_real); | |||
479 | */ | 692 | */ |
480 | void do_gettimeofday(struct timeval *tv) | 693 | void do_gettimeofday(struct timeval *tv) |
481 | { | 694 | { |
482 | struct timespec now; | 695 | struct timespec64 now; |
483 | 696 | ||
484 | getnstimeofday(&now); | 697 | getnstimeofday64(&now); |
485 | tv->tv_sec = now.tv_sec; | 698 | tv->tv_sec = now.tv_sec; |
486 | tv->tv_usec = now.tv_nsec/1000; | 699 | tv->tv_usec = now.tv_nsec/1000; |
487 | } | 700 | } |
@@ -495,15 +708,15 @@ EXPORT_SYMBOL(do_gettimeofday); | |||
495 | */ | 708 | */ |
496 | int do_settimeofday(const struct timespec *tv) | 709 | int do_settimeofday(const struct timespec *tv) |
497 | { | 710 | { |
498 | struct timekeeper *tk = &timekeeper; | 711 | struct timekeeper *tk = &tk_core.timekeeper; |
499 | struct timespec ts_delta, xt; | 712 | struct timespec64 ts_delta, xt, tmp; |
500 | unsigned long flags; | 713 | unsigned long flags; |
501 | 714 | ||
502 | if (!timespec_valid_strict(tv)) | 715 | if (!timespec_valid_strict(tv)) |
503 | return -EINVAL; | 716 | return -EINVAL; |
504 | 717 | ||
505 | raw_spin_lock_irqsave(&timekeeper_lock, flags); | 718 | raw_spin_lock_irqsave(&timekeeper_lock, flags); |
506 | write_seqcount_begin(&timekeeper_seq); | 719 | write_seqcount_begin(&tk_core.seq); |
507 | 720 | ||
508 | timekeeping_forward_now(tk); | 721 | timekeeping_forward_now(tk); |
509 | 722 | ||
@@ -511,13 +724,14 @@ int do_settimeofday(const struct timespec *tv) | |||
511 | ts_delta.tv_sec = tv->tv_sec - xt.tv_sec; | 724 | ts_delta.tv_sec = tv->tv_sec - xt.tv_sec; |
512 | ts_delta.tv_nsec = tv->tv_nsec - xt.tv_nsec; | 725 | ts_delta.tv_nsec = tv->tv_nsec - xt.tv_nsec; |
513 | 726 | ||
514 | tk_set_wall_to_mono(tk, timespec_sub(tk->wall_to_monotonic, ts_delta)); | 727 | tk_set_wall_to_mono(tk, timespec64_sub(tk->wall_to_monotonic, ts_delta)); |
515 | 728 | ||
516 | tk_set_xtime(tk, tv); | 729 | tmp = timespec_to_timespec64(*tv); |
730 | tk_set_xtime(tk, &tmp); | ||
517 | 731 | ||
518 | timekeeping_update(tk, TK_CLEAR_NTP | TK_MIRROR | TK_CLOCK_WAS_SET); | 732 | timekeeping_update(tk, TK_CLEAR_NTP | TK_MIRROR | TK_CLOCK_WAS_SET); |
519 | 733 | ||
520 | write_seqcount_end(&timekeeper_seq); | 734 | write_seqcount_end(&tk_core.seq); |
521 | raw_spin_unlock_irqrestore(&timekeeper_lock, flags); | 735 | raw_spin_unlock_irqrestore(&timekeeper_lock, flags); |
522 | 736 | ||
523 | /* signal hrtimers about time change */ | 737 | /* signal hrtimers about time change */ |
@@ -535,33 +749,35 @@ EXPORT_SYMBOL(do_settimeofday); | |||
535 | */ | 749 | */ |
536 | int timekeeping_inject_offset(struct timespec *ts) | 750 | int timekeeping_inject_offset(struct timespec *ts) |
537 | { | 751 | { |
538 | struct timekeeper *tk = &timekeeper; | 752 | struct timekeeper *tk = &tk_core.timekeeper; |
539 | unsigned long flags; | 753 | unsigned long flags; |
540 | struct timespec tmp; | 754 | struct timespec64 ts64, tmp; |
541 | int ret = 0; | 755 | int ret = 0; |
542 | 756 | ||
543 | if ((unsigned long)ts->tv_nsec >= NSEC_PER_SEC) | 757 | if ((unsigned long)ts->tv_nsec >= NSEC_PER_SEC) |
544 | return -EINVAL; | 758 | return -EINVAL; |
545 | 759 | ||
760 | ts64 = timespec_to_timespec64(*ts); | ||
761 | |||
546 | raw_spin_lock_irqsave(&timekeeper_lock, flags); | 762 | raw_spin_lock_irqsave(&timekeeper_lock, flags); |
547 | write_seqcount_begin(&timekeeper_seq); | 763 | write_seqcount_begin(&tk_core.seq); |
548 | 764 | ||
549 | timekeeping_forward_now(tk); | 765 | timekeeping_forward_now(tk); |
550 | 766 | ||
551 | /* Make sure the proposed value is valid */ | 767 | /* Make sure the proposed value is valid */ |
552 | tmp = timespec_add(tk_xtime(tk), *ts); | 768 | tmp = timespec64_add(tk_xtime(tk), ts64); |
553 | if (!timespec_valid_strict(&tmp)) { | 769 | if (!timespec64_valid_strict(&tmp)) { |
554 | ret = -EINVAL; | 770 | ret = -EINVAL; |
555 | goto error; | 771 | goto error; |
556 | } | 772 | } |
557 | 773 | ||
558 | tk_xtime_add(tk, ts); | 774 | tk_xtime_add(tk, &ts64); |
559 | tk_set_wall_to_mono(tk, timespec_sub(tk->wall_to_monotonic, *ts)); | 775 | tk_set_wall_to_mono(tk, timespec64_sub(tk->wall_to_monotonic, ts64)); |
560 | 776 | ||
561 | error: /* even if we error out, we forwarded the time, so call update */ | 777 | error: /* even if we error out, we forwarded the time, so call update */ |
562 | timekeeping_update(tk, TK_CLEAR_NTP | TK_MIRROR | TK_CLOCK_WAS_SET); | 778 | timekeeping_update(tk, TK_CLEAR_NTP | TK_MIRROR | TK_CLOCK_WAS_SET); |
563 | 779 | ||
564 | write_seqcount_end(&timekeeper_seq); | 780 | write_seqcount_end(&tk_core.seq); |
565 | raw_spin_unlock_irqrestore(&timekeeper_lock, flags); | 781 | raw_spin_unlock_irqrestore(&timekeeper_lock, flags); |
566 | 782 | ||
567 | /* signal hrtimers about time change */ | 783 | /* signal hrtimers about time change */ |
@@ -578,14 +794,14 @@ EXPORT_SYMBOL(timekeeping_inject_offset); | |||
578 | */ | 794 | */ |
579 | s32 timekeeping_get_tai_offset(void) | 795 | s32 timekeeping_get_tai_offset(void) |
580 | { | 796 | { |
581 | struct timekeeper *tk = &timekeeper; | 797 | struct timekeeper *tk = &tk_core.timekeeper; |
582 | unsigned int seq; | 798 | unsigned int seq; |
583 | s32 ret; | 799 | s32 ret; |
584 | 800 | ||
585 | do { | 801 | do { |
586 | seq = read_seqcount_begin(&timekeeper_seq); | 802 | seq = read_seqcount_begin(&tk_core.seq); |
587 | ret = tk->tai_offset; | 803 | ret = tk->tai_offset; |
588 | } while (read_seqcount_retry(&timekeeper_seq, seq)); | 804 | } while (read_seqcount_retry(&tk_core.seq, seq)); |
589 | 805 | ||
590 | return ret; | 806 | return ret; |
591 | } | 807 | } |
@@ -606,14 +822,14 @@ static void __timekeeping_set_tai_offset(struct timekeeper *tk, s32 tai_offset) | |||
606 | */ | 822 | */ |
607 | void timekeeping_set_tai_offset(s32 tai_offset) | 823 | void timekeeping_set_tai_offset(s32 tai_offset) |
608 | { | 824 | { |
609 | struct timekeeper *tk = &timekeeper; | 825 | struct timekeeper *tk = &tk_core.timekeeper; |
610 | unsigned long flags; | 826 | unsigned long flags; |
611 | 827 | ||
612 | raw_spin_lock_irqsave(&timekeeper_lock, flags); | 828 | raw_spin_lock_irqsave(&timekeeper_lock, flags); |
613 | write_seqcount_begin(&timekeeper_seq); | 829 | write_seqcount_begin(&tk_core.seq); |
614 | __timekeeping_set_tai_offset(tk, tai_offset); | 830 | __timekeeping_set_tai_offset(tk, tai_offset); |
615 | timekeeping_update(tk, TK_MIRROR | TK_CLOCK_WAS_SET); | 831 | timekeeping_update(tk, TK_MIRROR | TK_CLOCK_WAS_SET); |
616 | write_seqcount_end(&timekeeper_seq); | 832 | write_seqcount_end(&tk_core.seq); |
617 | raw_spin_unlock_irqrestore(&timekeeper_lock, flags); | 833 | raw_spin_unlock_irqrestore(&timekeeper_lock, flags); |
618 | clock_was_set(); | 834 | clock_was_set(); |
619 | } | 835 | } |
@@ -625,14 +841,14 @@ void timekeeping_set_tai_offset(s32 tai_offset) | |||
625 | */ | 841 | */ |
626 | static int change_clocksource(void *data) | 842 | static int change_clocksource(void *data) |
627 | { | 843 | { |
628 | struct timekeeper *tk = &timekeeper; | 844 | struct timekeeper *tk = &tk_core.timekeeper; |
629 | struct clocksource *new, *old; | 845 | struct clocksource *new, *old; |
630 | unsigned long flags; | 846 | unsigned long flags; |
631 | 847 | ||
632 | new = (struct clocksource *) data; | 848 | new = (struct clocksource *) data; |
633 | 849 | ||
634 | raw_spin_lock_irqsave(&timekeeper_lock, flags); | 850 | raw_spin_lock_irqsave(&timekeeper_lock, flags); |
635 | write_seqcount_begin(&timekeeper_seq); | 851 | write_seqcount_begin(&tk_core.seq); |
636 | 852 | ||
637 | timekeeping_forward_now(tk); | 853 | timekeeping_forward_now(tk); |
638 | /* | 854 | /* |
@@ -641,7 +857,7 @@ static int change_clocksource(void *data) | |||
641 | */ | 857 | */ |
642 | if (try_module_get(new->owner)) { | 858 | if (try_module_get(new->owner)) { |
643 | if (!new->enable || new->enable(new) == 0) { | 859 | if (!new->enable || new->enable(new) == 0) { |
644 | old = tk->clock; | 860 | old = tk->tkr.clock; |
645 | tk_setup_internals(tk, new); | 861 | tk_setup_internals(tk, new); |
646 | if (old->disable) | 862 | if (old->disable) |
647 | old->disable(old); | 863 | old->disable(old); |
@@ -652,7 +868,7 @@ static int change_clocksource(void *data) | |||
652 | } | 868 | } |
653 | timekeeping_update(tk, TK_CLEAR_NTP | TK_MIRROR | TK_CLOCK_WAS_SET); | 869 | timekeeping_update(tk, TK_CLEAR_NTP | TK_MIRROR | TK_CLOCK_WAS_SET); |
654 | 870 | ||
655 | write_seqcount_end(&timekeeper_seq); | 871 | write_seqcount_end(&tk_core.seq); |
656 | raw_spin_unlock_irqrestore(&timekeeper_lock, flags); | 872 | raw_spin_unlock_irqrestore(&timekeeper_lock, flags); |
657 | 873 | ||
658 | return 0; | 874 | return 0; |
@@ -667,29 +883,14 @@ static int change_clocksource(void *data) | |||
667 | */ | 883 | */ |
668 | int timekeeping_notify(struct clocksource *clock) | 884 | int timekeeping_notify(struct clocksource *clock) |
669 | { | 885 | { |
670 | struct timekeeper *tk = &timekeeper; | 886 | struct timekeeper *tk = &tk_core.timekeeper; |
671 | 887 | ||
672 | if (tk->clock == clock) | 888 | if (tk->tkr.clock == clock) |
673 | return 0; | 889 | return 0; |
674 | stop_machine(change_clocksource, clock, NULL); | 890 | stop_machine(change_clocksource, clock, NULL); |
675 | tick_clock_notify(); | 891 | tick_clock_notify(); |
676 | return tk->clock == clock ? 0 : -1; | 892 | return tk->tkr.clock == clock ? 0 : -1; |
677 | } | ||
678 | |||
679 | /** | ||
680 | * ktime_get_real - get the real (wall-) time in ktime_t format | ||
681 | * | ||
682 | * returns the time in ktime_t format | ||
683 | */ | ||
684 | ktime_t ktime_get_real(void) | ||
685 | { | ||
686 | struct timespec now; | ||
687 | |||
688 | getnstimeofday(&now); | ||
689 | |||
690 | return timespec_to_ktime(now); | ||
691 | } | 893 | } |
692 | EXPORT_SYMBOL_GPL(ktime_get_real); | ||
693 | 894 | ||
694 | /** | 895 | /** |
695 | * getrawmonotonic - Returns the raw monotonic time in a timespec | 896 | * getrawmonotonic - Returns the raw monotonic time in a timespec |
@@ -699,18 +900,20 @@ EXPORT_SYMBOL_GPL(ktime_get_real); | |||
699 | */ | 900 | */ |
700 | void getrawmonotonic(struct timespec *ts) | 901 | void getrawmonotonic(struct timespec *ts) |
701 | { | 902 | { |
702 | struct timekeeper *tk = &timekeeper; | 903 | struct timekeeper *tk = &tk_core.timekeeper; |
904 | struct timespec64 ts64; | ||
703 | unsigned long seq; | 905 | unsigned long seq; |
704 | s64 nsecs; | 906 | s64 nsecs; |
705 | 907 | ||
706 | do { | 908 | do { |
707 | seq = read_seqcount_begin(&timekeeper_seq); | 909 | seq = read_seqcount_begin(&tk_core.seq); |
708 | nsecs = timekeeping_get_ns_raw(tk); | 910 | nsecs = timekeeping_get_ns_raw(tk); |
709 | *ts = tk->raw_time; | 911 | ts64 = tk->raw_time; |
710 | 912 | ||
711 | } while (read_seqcount_retry(&timekeeper_seq, seq)); | 913 | } while (read_seqcount_retry(&tk_core.seq, seq)); |
712 | 914 | ||
713 | timespec_add_ns(ts, nsecs); | 915 | timespec64_add_ns(&ts64, nsecs); |
916 | *ts = timespec64_to_timespec(ts64); | ||
714 | } | 917 | } |
715 | EXPORT_SYMBOL(getrawmonotonic); | 918 | EXPORT_SYMBOL(getrawmonotonic); |
716 | 919 | ||
@@ -719,16 +922,16 @@ EXPORT_SYMBOL(getrawmonotonic); | |||
719 | */ | 922 | */ |
720 | int timekeeping_valid_for_hres(void) | 923 | int timekeeping_valid_for_hres(void) |
721 | { | 924 | { |
722 | struct timekeeper *tk = &timekeeper; | 925 | struct timekeeper *tk = &tk_core.timekeeper; |
723 | unsigned long seq; | 926 | unsigned long seq; |
724 | int ret; | 927 | int ret; |
725 | 928 | ||
726 | do { | 929 | do { |
727 | seq = read_seqcount_begin(&timekeeper_seq); | 930 | seq = read_seqcount_begin(&tk_core.seq); |
728 | 931 | ||
729 | ret = tk->clock->flags & CLOCK_SOURCE_VALID_FOR_HRES; | 932 | ret = tk->tkr.clock->flags & CLOCK_SOURCE_VALID_FOR_HRES; |
730 | 933 | ||
731 | } while (read_seqcount_retry(&timekeeper_seq, seq)); | 934 | } while (read_seqcount_retry(&tk_core.seq, seq)); |
732 | 935 | ||
733 | return ret; | 936 | return ret; |
734 | } | 937 | } |
@@ -738,16 +941,16 @@ int timekeeping_valid_for_hres(void) | |||
738 | */ | 941 | */ |
739 | u64 timekeeping_max_deferment(void) | 942 | u64 timekeeping_max_deferment(void) |
740 | { | 943 | { |
741 | struct timekeeper *tk = &timekeeper; | 944 | struct timekeeper *tk = &tk_core.timekeeper; |
742 | unsigned long seq; | 945 | unsigned long seq; |
743 | u64 ret; | 946 | u64 ret; |
744 | 947 | ||
745 | do { | 948 | do { |
746 | seq = read_seqcount_begin(&timekeeper_seq); | 949 | seq = read_seqcount_begin(&tk_core.seq); |
747 | 950 | ||
748 | ret = tk->clock->max_idle_ns; | 951 | ret = tk->tkr.clock->max_idle_ns; |
749 | 952 | ||
750 | } while (read_seqcount_retry(&timekeeper_seq, seq)); | 953 | } while (read_seqcount_retry(&tk_core.seq, seq)); |
751 | 954 | ||
752 | return ret; | 955 | return ret; |
753 | } | 956 | } |
@@ -787,14 +990,15 @@ void __weak read_boot_clock(struct timespec *ts) | |||
787 | */ | 990 | */ |
788 | void __init timekeeping_init(void) | 991 | void __init timekeeping_init(void) |
789 | { | 992 | { |
790 | struct timekeeper *tk = &timekeeper; | 993 | struct timekeeper *tk = &tk_core.timekeeper; |
791 | struct clocksource *clock; | 994 | struct clocksource *clock; |
792 | unsigned long flags; | 995 | unsigned long flags; |
793 | struct timespec now, boot, tmp; | 996 | struct timespec64 now, boot, tmp; |
794 | 997 | struct timespec ts; | |
795 | read_persistent_clock(&now); | ||
796 | 998 | ||
797 | if (!timespec_valid_strict(&now)) { | 999 | read_persistent_clock(&ts); |
1000 | now = timespec_to_timespec64(ts); | ||
1001 | if (!timespec64_valid_strict(&now)) { | ||
798 | pr_warn("WARNING: Persistent clock returned invalid value!\n" | 1002 | pr_warn("WARNING: Persistent clock returned invalid value!\n" |
799 | " Check your CMOS/BIOS settings.\n"); | 1003 | " Check your CMOS/BIOS settings.\n"); |
800 | now.tv_sec = 0; | 1004 | now.tv_sec = 0; |
@@ -802,8 +1006,9 @@ void __init timekeeping_init(void) | |||
802 | } else if (now.tv_sec || now.tv_nsec) | 1006 | } else if (now.tv_sec || now.tv_nsec) |
803 | persistent_clock_exist = true; | 1007 | persistent_clock_exist = true; |
804 | 1008 | ||
805 | read_boot_clock(&boot); | 1009 | read_boot_clock(&ts); |
806 | if (!timespec_valid_strict(&boot)) { | 1010 | boot = timespec_to_timespec64(ts); |
1011 | if (!timespec64_valid_strict(&boot)) { | ||
807 | pr_warn("WARNING: Boot clock returned invalid value!\n" | 1012 | pr_warn("WARNING: Boot clock returned invalid value!\n" |
808 | " Check your CMOS/BIOS settings.\n"); | 1013 | " Check your CMOS/BIOS settings.\n"); |
809 | boot.tv_sec = 0; | 1014 | boot.tv_sec = 0; |
@@ -811,7 +1016,7 @@ void __init timekeeping_init(void) | |||
811 | } | 1016 | } |
812 | 1017 | ||
813 | raw_spin_lock_irqsave(&timekeeper_lock, flags); | 1018 | raw_spin_lock_irqsave(&timekeeper_lock, flags); |
814 | write_seqcount_begin(&timekeeper_seq); | 1019 | write_seqcount_begin(&tk_core.seq); |
815 | ntp_init(); | 1020 | ntp_init(); |
816 | 1021 | ||
817 | clock = clocksource_default_clock(); | 1022 | clock = clocksource_default_clock(); |
@@ -822,24 +1027,21 @@ void __init timekeeping_init(void) | |||
822 | tk_set_xtime(tk, &now); | 1027 | tk_set_xtime(tk, &now); |
823 | tk->raw_time.tv_sec = 0; | 1028 | tk->raw_time.tv_sec = 0; |
824 | tk->raw_time.tv_nsec = 0; | 1029 | tk->raw_time.tv_nsec = 0; |
1030 | tk->base_raw.tv64 = 0; | ||
825 | if (boot.tv_sec == 0 && boot.tv_nsec == 0) | 1031 | if (boot.tv_sec == 0 && boot.tv_nsec == 0) |
826 | boot = tk_xtime(tk); | 1032 | boot = tk_xtime(tk); |
827 | 1033 | ||
828 | set_normalized_timespec(&tmp, -boot.tv_sec, -boot.tv_nsec); | 1034 | set_normalized_timespec64(&tmp, -boot.tv_sec, -boot.tv_nsec); |
829 | tk_set_wall_to_mono(tk, tmp); | 1035 | tk_set_wall_to_mono(tk, tmp); |
830 | 1036 | ||
831 | tmp.tv_sec = 0; | 1037 | timekeeping_update(tk, TK_MIRROR); |
832 | tmp.tv_nsec = 0; | ||
833 | tk_set_sleep_time(tk, tmp); | ||
834 | |||
835 | memcpy(&shadow_timekeeper, &timekeeper, sizeof(timekeeper)); | ||
836 | 1038 | ||
837 | write_seqcount_end(&timekeeper_seq); | 1039 | write_seqcount_end(&tk_core.seq); |
838 | raw_spin_unlock_irqrestore(&timekeeper_lock, flags); | 1040 | raw_spin_unlock_irqrestore(&timekeeper_lock, flags); |
839 | } | 1041 | } |
840 | 1042 | ||
841 | /* time in seconds when suspend began */ | 1043 | /* time in seconds when suspend began */ |
842 | static struct timespec timekeeping_suspend_time; | 1044 | static struct timespec64 timekeeping_suspend_time; |
843 | 1045 | ||
844 | /** | 1046 | /** |
845 | * __timekeeping_inject_sleeptime - Internal function to add sleep interval | 1047 | * __timekeeping_inject_sleeptime - Internal function to add sleep interval |
@@ -849,17 +1051,17 @@ static struct timespec timekeeping_suspend_time; | |||
849 | * adds the sleep offset to the timekeeping variables. | 1051 | * adds the sleep offset to the timekeeping variables. |
850 | */ | 1052 | */ |
851 | static void __timekeeping_inject_sleeptime(struct timekeeper *tk, | 1053 | static void __timekeeping_inject_sleeptime(struct timekeeper *tk, |
852 | struct timespec *delta) | 1054 | struct timespec64 *delta) |
853 | { | 1055 | { |
854 | if (!timespec_valid_strict(delta)) { | 1056 | if (!timespec64_valid_strict(delta)) { |
855 | printk_deferred(KERN_WARNING | 1057 | printk_deferred(KERN_WARNING |
856 | "__timekeeping_inject_sleeptime: Invalid " | 1058 | "__timekeeping_inject_sleeptime: Invalid " |
857 | "sleep delta value!\n"); | 1059 | "sleep delta value!\n"); |
858 | return; | 1060 | return; |
859 | } | 1061 | } |
860 | tk_xtime_add(tk, delta); | 1062 | tk_xtime_add(tk, delta); |
861 | tk_set_wall_to_mono(tk, timespec_sub(tk->wall_to_monotonic, *delta)); | 1063 | tk_set_wall_to_mono(tk, timespec64_sub(tk->wall_to_monotonic, *delta)); |
862 | tk_set_sleep_time(tk, timespec_add(tk->total_sleep_time, *delta)); | 1064 | tk_update_sleep_time(tk, timespec64_to_ktime(*delta)); |
863 | tk_debug_account_sleep_time(delta); | 1065 | tk_debug_account_sleep_time(delta); |
864 | } | 1066 | } |
865 | 1067 | ||
@@ -875,7 +1077,8 @@ static void __timekeeping_inject_sleeptime(struct timekeeper *tk, | |||
875 | */ | 1077 | */ |
876 | void timekeeping_inject_sleeptime(struct timespec *delta) | 1078 | void timekeeping_inject_sleeptime(struct timespec *delta) |
877 | { | 1079 | { |
878 | struct timekeeper *tk = &timekeeper; | 1080 | struct timekeeper *tk = &tk_core.timekeeper; |
1081 | struct timespec64 tmp; | ||
879 | unsigned long flags; | 1082 | unsigned long flags; |
880 | 1083 | ||
881 | /* | 1084 | /* |
@@ -886,15 +1089,16 @@ void timekeeping_inject_sleeptime(struct timespec *delta) | |||
886 | return; | 1089 | return; |
887 | 1090 | ||
888 | raw_spin_lock_irqsave(&timekeeper_lock, flags); | 1091 | raw_spin_lock_irqsave(&timekeeper_lock, flags); |
889 | write_seqcount_begin(&timekeeper_seq); | 1092 | write_seqcount_begin(&tk_core.seq); |
890 | 1093 | ||
891 | timekeeping_forward_now(tk); | 1094 | timekeeping_forward_now(tk); |
892 | 1095 | ||
893 | __timekeeping_inject_sleeptime(tk, delta); | 1096 | tmp = timespec_to_timespec64(*delta); |
1097 | __timekeeping_inject_sleeptime(tk, &tmp); | ||
894 | 1098 | ||
895 | timekeeping_update(tk, TK_CLEAR_NTP | TK_MIRROR | TK_CLOCK_WAS_SET); | 1099 | timekeeping_update(tk, TK_CLEAR_NTP | TK_MIRROR | TK_CLOCK_WAS_SET); |
896 | 1100 | ||
897 | write_seqcount_end(&timekeeper_seq); | 1101 | write_seqcount_end(&tk_core.seq); |
898 | raw_spin_unlock_irqrestore(&timekeeper_lock, flags); | 1102 | raw_spin_unlock_irqrestore(&timekeeper_lock, flags); |
899 | 1103 | ||
900 | /* signal hrtimers about time change */ | 1104 | /* signal hrtimers about time change */ |
@@ -910,20 +1114,22 @@ void timekeeping_inject_sleeptime(struct timespec *delta) | |||
910 | */ | 1114 | */ |
911 | static void timekeeping_resume(void) | 1115 | static void timekeeping_resume(void) |
912 | { | 1116 | { |
913 | struct timekeeper *tk = &timekeeper; | 1117 | struct timekeeper *tk = &tk_core.timekeeper; |
914 | struct clocksource *clock = tk->clock; | 1118 | struct clocksource *clock = tk->tkr.clock; |
915 | unsigned long flags; | 1119 | unsigned long flags; |
916 | struct timespec ts_new, ts_delta; | 1120 | struct timespec64 ts_new, ts_delta; |
1121 | struct timespec tmp; | ||
917 | cycle_t cycle_now, cycle_delta; | 1122 | cycle_t cycle_now, cycle_delta; |
918 | bool suspendtime_found = false; | 1123 | bool suspendtime_found = false; |
919 | 1124 | ||
920 | read_persistent_clock(&ts_new); | 1125 | read_persistent_clock(&tmp); |
1126 | ts_new = timespec_to_timespec64(tmp); | ||
921 | 1127 | ||
922 | clockevents_resume(); | 1128 | clockevents_resume(); |
923 | clocksource_resume(); | 1129 | clocksource_resume(); |
924 | 1130 | ||
925 | raw_spin_lock_irqsave(&timekeeper_lock, flags); | 1131 | raw_spin_lock_irqsave(&timekeeper_lock, flags); |
926 | write_seqcount_begin(&timekeeper_seq); | 1132 | write_seqcount_begin(&tk_core.seq); |
927 | 1133 | ||
928 | /* | 1134 | /* |
929 | * After system resumes, we need to calculate the suspended time and | 1135 | * After system resumes, we need to calculate the suspended time and |
@@ -937,15 +1143,16 @@ static void timekeeping_resume(void) | |||
937 | * The less preferred source will only be tried if there is no better | 1143 | * The less preferred source will only be tried if there is no better |
938 | * usable source. The rtc part is handled separately in rtc core code. | 1144 | * usable source. The rtc part is handled separately in rtc core code. |
939 | */ | 1145 | */ |
940 | cycle_now = clock->read(clock); | 1146 | cycle_now = tk->tkr.read(clock); |
941 | if ((clock->flags & CLOCK_SOURCE_SUSPEND_NONSTOP) && | 1147 | if ((clock->flags & CLOCK_SOURCE_SUSPEND_NONSTOP) && |
942 | cycle_now > clock->cycle_last) { | 1148 | cycle_now > tk->tkr.cycle_last) { |
943 | u64 num, max = ULLONG_MAX; | 1149 | u64 num, max = ULLONG_MAX; |
944 | u32 mult = clock->mult; | 1150 | u32 mult = clock->mult; |
945 | u32 shift = clock->shift; | 1151 | u32 shift = clock->shift; |
946 | s64 nsec = 0; | 1152 | s64 nsec = 0; |
947 | 1153 | ||
948 | cycle_delta = (cycle_now - clock->cycle_last) & clock->mask; | 1154 | cycle_delta = clocksource_delta(cycle_now, tk->tkr.cycle_last, |
1155 | tk->tkr.mask); | ||
949 | 1156 | ||
950 | /* | 1157 | /* |
951 | * "cycle_delta * mutl" may cause 64 bits overflow, if the | 1158 | * "cycle_delta * mutl" may cause 64 bits overflow, if the |
@@ -960,10 +1167,10 @@ static void timekeeping_resume(void) | |||
960 | } | 1167 | } |
961 | nsec += ((u64) cycle_delta * mult) >> shift; | 1168 | nsec += ((u64) cycle_delta * mult) >> shift; |
962 | 1169 | ||
963 | ts_delta = ns_to_timespec(nsec); | 1170 | ts_delta = ns_to_timespec64(nsec); |
964 | suspendtime_found = true; | 1171 | suspendtime_found = true; |
965 | } else if (timespec_compare(&ts_new, &timekeeping_suspend_time) > 0) { | 1172 | } else if (timespec64_compare(&ts_new, &timekeeping_suspend_time) > 0) { |
966 | ts_delta = timespec_sub(ts_new, timekeeping_suspend_time); | 1173 | ts_delta = timespec64_sub(ts_new, timekeeping_suspend_time); |
967 | suspendtime_found = true; | 1174 | suspendtime_found = true; |
968 | } | 1175 | } |
969 | 1176 | ||
@@ -971,11 +1178,11 @@ static void timekeeping_resume(void) | |||
971 | __timekeeping_inject_sleeptime(tk, &ts_delta); | 1178 | __timekeeping_inject_sleeptime(tk, &ts_delta); |
972 | 1179 | ||
973 | /* Re-base the last cycle value */ | 1180 | /* Re-base the last cycle value */ |
974 | tk->cycle_last = clock->cycle_last = cycle_now; | 1181 | tk->tkr.cycle_last = cycle_now; |
975 | tk->ntp_error = 0; | 1182 | tk->ntp_error = 0; |
976 | timekeeping_suspended = 0; | 1183 | timekeeping_suspended = 0; |
977 | timekeeping_update(tk, TK_MIRROR | TK_CLOCK_WAS_SET); | 1184 | timekeeping_update(tk, TK_MIRROR | TK_CLOCK_WAS_SET); |
978 | write_seqcount_end(&timekeeper_seq); | 1185 | write_seqcount_end(&tk_core.seq); |
979 | raw_spin_unlock_irqrestore(&timekeeper_lock, flags); | 1186 | raw_spin_unlock_irqrestore(&timekeeper_lock, flags); |
980 | 1187 | ||
981 | touch_softlockup_watchdog(); | 1188 | touch_softlockup_watchdog(); |
@@ -988,12 +1195,14 @@ static void timekeeping_resume(void) | |||
988 | 1195 | ||
989 | static int timekeeping_suspend(void) | 1196 | static int timekeeping_suspend(void) |
990 | { | 1197 | { |
991 | struct timekeeper *tk = &timekeeper; | 1198 | struct timekeeper *tk = &tk_core.timekeeper; |
992 | unsigned long flags; | 1199 | unsigned long flags; |
993 | struct timespec delta, delta_delta; | 1200 | struct timespec64 delta, delta_delta; |
994 | static struct timespec old_delta; | 1201 | static struct timespec64 old_delta; |
1202 | struct timespec tmp; | ||
995 | 1203 | ||
996 | read_persistent_clock(&timekeeping_suspend_time); | 1204 | read_persistent_clock(&tmp); |
1205 | timekeeping_suspend_time = timespec_to_timespec64(tmp); | ||
997 | 1206 | ||
998 | /* | 1207 | /* |
999 | * On some systems the persistent_clock can not be detected at | 1208 | * On some systems the persistent_clock can not be detected at |
@@ -1004,7 +1213,7 @@ static int timekeeping_suspend(void) | |||
1004 | persistent_clock_exist = true; | 1213 | persistent_clock_exist = true; |
1005 | 1214 | ||
1006 | raw_spin_lock_irqsave(&timekeeper_lock, flags); | 1215 | raw_spin_lock_irqsave(&timekeeper_lock, flags); |
1007 | write_seqcount_begin(&timekeeper_seq); | 1216 | write_seqcount_begin(&tk_core.seq); |
1008 | timekeeping_forward_now(tk); | 1217 | timekeeping_forward_now(tk); |
1009 | timekeeping_suspended = 1; | 1218 | timekeeping_suspended = 1; |
1010 | 1219 | ||
@@ -1014,8 +1223,8 @@ static int timekeeping_suspend(void) | |||
1014 | * try to compensate so the difference in system time | 1223 | * try to compensate so the difference in system time |
1015 | * and persistent_clock time stays close to constant. | 1224 | * and persistent_clock time stays close to constant. |
1016 | */ | 1225 | */ |
1017 | delta = timespec_sub(tk_xtime(tk), timekeeping_suspend_time); | 1226 | delta = timespec64_sub(tk_xtime(tk), timekeeping_suspend_time); |
1018 | delta_delta = timespec_sub(delta, old_delta); | 1227 | delta_delta = timespec64_sub(delta, old_delta); |
1019 | if (abs(delta_delta.tv_sec) >= 2) { | 1228 | if (abs(delta_delta.tv_sec) >= 2) { |
1020 | /* | 1229 | /* |
1021 | * if delta_delta is too large, assume time correction | 1230 | * if delta_delta is too large, assume time correction |
@@ -1025,11 +1234,11 @@ static int timekeeping_suspend(void) | |||
1025 | } else { | 1234 | } else { |
1026 | /* Otherwise try to adjust old_system to compensate */ | 1235 | /* Otherwise try to adjust old_system to compensate */ |
1027 | timekeeping_suspend_time = | 1236 | timekeeping_suspend_time = |
1028 | timespec_add(timekeeping_suspend_time, delta_delta); | 1237 | timespec64_add(timekeeping_suspend_time, delta_delta); |
1029 | } | 1238 | } |
1030 | 1239 | ||
1031 | timekeeping_update(tk, TK_MIRROR); | 1240 | timekeeping_update(tk, TK_MIRROR); |
1032 | write_seqcount_end(&timekeeper_seq); | 1241 | write_seqcount_end(&tk_core.seq); |
1033 | raw_spin_unlock_irqrestore(&timekeeper_lock, flags); | 1242 | raw_spin_unlock_irqrestore(&timekeeper_lock, flags); |
1034 | 1243 | ||
1035 | clockevents_notify(CLOCK_EVT_NOTIFY_SUSPEND, NULL); | 1244 | clockevents_notify(CLOCK_EVT_NOTIFY_SUSPEND, NULL); |
@@ -1050,125 +1259,34 @@ static int __init timekeeping_init_ops(void) | |||
1050 | register_syscore_ops(&timekeeping_syscore_ops); | 1259 | register_syscore_ops(&timekeeping_syscore_ops); |
1051 | return 0; | 1260 | return 0; |
1052 | } | 1261 | } |
1053 | |||
1054 | device_initcall(timekeeping_init_ops); | 1262 | device_initcall(timekeeping_init_ops); |
1055 | 1263 | ||
1056 | /* | 1264 | /* |
1057 | * If the error is already larger, we look ahead even further | 1265 | * Apply a multiplier adjustment to the timekeeper |
1058 | * to compensate for late or lost adjustments. | ||
1059 | */ | 1266 | */ |
1060 | static __always_inline int timekeeping_bigadjust(struct timekeeper *tk, | 1267 | static __always_inline void timekeeping_apply_adjustment(struct timekeeper *tk, |
1061 | s64 error, s64 *interval, | 1268 | s64 offset, |
1062 | s64 *offset) | 1269 | bool negative, |
1270 | int adj_scale) | ||
1063 | { | 1271 | { |
1064 | s64 tick_error, i; | 1272 | s64 interval = tk->cycle_interval; |
1065 | u32 look_ahead, adj; | 1273 | s32 mult_adj = 1; |
1066 | s32 error2, mult; | ||
1067 | |||
1068 | /* | ||
1069 | * Use the current error value to determine how much to look ahead. | ||
1070 | * The larger the error the slower we adjust for it to avoid problems | ||
1071 | * with losing too many ticks, otherwise we would overadjust and | ||
1072 | * produce an even larger error. The smaller the adjustment the | ||
1073 | * faster we try to adjust for it, as lost ticks can do less harm | ||
1074 | * here. This is tuned so that an error of about 1 msec is adjusted | ||
1075 | * within about 1 sec (or 2^20 nsec in 2^SHIFT_HZ ticks). | ||
1076 | */ | ||
1077 | error2 = tk->ntp_error >> (NTP_SCALE_SHIFT + 22 - 2 * SHIFT_HZ); | ||
1078 | error2 = abs(error2); | ||
1079 | for (look_ahead = 0; error2 > 0; look_ahead++) | ||
1080 | error2 >>= 2; | ||
1081 | 1274 | ||
1082 | /* | 1275 | if (negative) { |
1083 | * Now calculate the error in (1 << look_ahead) ticks, but first | 1276 | mult_adj = -mult_adj; |
1084 | * remove the single look ahead already included in the error. | 1277 | interval = -interval; |
1085 | */ | 1278 | offset = -offset; |
1086 | tick_error = ntp_tick_length() >> (tk->ntp_error_shift + 1); | ||
1087 | tick_error -= tk->xtime_interval >> 1; | ||
1088 | error = ((error - tick_error) >> look_ahead) + tick_error; | ||
1089 | |||
1090 | /* Finally calculate the adjustment shift value. */ | ||
1091 | i = *interval; | ||
1092 | mult = 1; | ||
1093 | if (error < 0) { | ||
1094 | error = -error; | ||
1095 | *interval = -*interval; | ||
1096 | *offset = -*offset; | ||
1097 | mult = -1; | ||
1098 | } | 1279 | } |
1099 | for (adj = 0; error > i; adj++) | 1280 | mult_adj <<= adj_scale; |
1100 | error >>= 1; | 1281 | interval <<= adj_scale; |
1101 | 1282 | offset <<= adj_scale; | |
1102 | *interval <<= adj; | ||
1103 | *offset <<= adj; | ||
1104 | return mult << adj; | ||
1105 | } | ||
1106 | |||
1107 | /* | ||
1108 | * Adjust the multiplier to reduce the error value, | ||
1109 | * this is optimized for the most common adjustments of -1,0,1, | ||
1110 | * for other values we can do a bit more work. | ||
1111 | */ | ||
1112 | static void timekeeping_adjust(struct timekeeper *tk, s64 offset) | ||
1113 | { | ||
1114 | s64 error, interval = tk->cycle_interval; | ||
1115 | int adj; | ||
1116 | 1283 | ||
1117 | /* | 1284 | /* |
1118 | * The point of this is to check if the error is greater than half | ||
1119 | * an interval. | ||
1120 | * | ||
1121 | * First we shift it down from NTP_SHIFT to clocksource->shifted nsecs. | ||
1122 | * | ||
1123 | * Note we subtract one in the shift, so that error is really error*2. | ||
1124 | * This "saves" dividing(shifting) interval twice, but keeps the | ||
1125 | * (error > interval) comparison as still measuring if error is | ||
1126 | * larger than half an interval. | ||
1127 | * | ||
1128 | * Note: It does not "save" on aggravation when reading the code. | ||
1129 | */ | ||
1130 | error = tk->ntp_error >> (tk->ntp_error_shift - 1); | ||
1131 | if (error > interval) { | ||
1132 | /* | ||
1133 | * We now divide error by 4(via shift), which checks if | ||
1134 | * the error is greater than twice the interval. | ||
1135 | * If it is greater, we need a bigadjust, if its smaller, | ||
1136 | * we can adjust by 1. | ||
1137 | */ | ||
1138 | error >>= 2; | ||
1139 | if (likely(error <= interval)) | ||
1140 | adj = 1; | ||
1141 | else | ||
1142 | adj = timekeeping_bigadjust(tk, error, &interval, &offset); | ||
1143 | } else { | ||
1144 | if (error < -interval) { | ||
1145 | /* See comment above, this is just switched for the negative */ | ||
1146 | error >>= 2; | ||
1147 | if (likely(error >= -interval)) { | ||
1148 | adj = -1; | ||
1149 | interval = -interval; | ||
1150 | offset = -offset; | ||
1151 | } else { | ||
1152 | adj = timekeeping_bigadjust(tk, error, &interval, &offset); | ||
1153 | } | ||
1154 | } else { | ||
1155 | goto out_adjust; | ||
1156 | } | ||
1157 | } | ||
1158 | |||
1159 | if (unlikely(tk->clock->maxadj && | ||
1160 | (tk->mult + adj > tk->clock->mult + tk->clock->maxadj))) { | ||
1161 | printk_deferred_once(KERN_WARNING | ||
1162 | "Adjusting %s more than 11%% (%ld vs %ld)\n", | ||
1163 | tk->clock->name, (long)tk->mult + adj, | ||
1164 | (long)tk->clock->mult + tk->clock->maxadj); | ||
1165 | } | ||
1166 | /* | ||
1167 | * So the following can be confusing. | 1285 | * So the following can be confusing. |
1168 | * | 1286 | * |
1169 | * To keep things simple, lets assume adj == 1 for now. | 1287 | * To keep things simple, lets assume mult_adj == 1 for now. |
1170 | * | 1288 | * |
1171 | * When adj != 1, remember that the interval and offset values | 1289 | * When mult_adj != 1, remember that the interval and offset values |
1172 | * have been appropriately scaled so the math is the same. | 1290 | * have been appropriately scaled so the math is the same. |
1173 | * | 1291 | * |
1174 | * The basic idea here is that we're increasing the multiplier | 1292 | * The basic idea here is that we're increasing the multiplier |
@@ -1212,12 +1330,78 @@ static void timekeeping_adjust(struct timekeeper *tk, s64 offset) | |||
1212 | * | 1330 | * |
1213 | * XXX - TODO: Doc ntp_error calculation. | 1331 | * XXX - TODO: Doc ntp_error calculation. |
1214 | */ | 1332 | */ |
1215 | tk->mult += adj; | 1333 | tk->tkr.mult += mult_adj; |
1216 | tk->xtime_interval += interval; | 1334 | tk->xtime_interval += interval; |
1217 | tk->xtime_nsec -= offset; | 1335 | tk->tkr.xtime_nsec -= offset; |
1218 | tk->ntp_error -= (interval - offset) << tk->ntp_error_shift; | 1336 | tk->ntp_error -= (interval - offset) << tk->ntp_error_shift; |
1337 | } | ||
1338 | |||
1339 | /* | ||
1340 | * Calculate the multiplier adjustment needed to match the frequency | ||
1341 | * specified by NTP | ||
1342 | */ | ||
1343 | static __always_inline void timekeeping_freqadjust(struct timekeeper *tk, | ||
1344 | s64 offset) | ||
1345 | { | ||
1346 | s64 interval = tk->cycle_interval; | ||
1347 | s64 xinterval = tk->xtime_interval; | ||
1348 | s64 tick_error; | ||
1349 | bool negative; | ||
1350 | u32 adj; | ||
1351 | |||
1352 | /* Remove any current error adj from freq calculation */ | ||
1353 | if (tk->ntp_err_mult) | ||
1354 | xinterval -= tk->cycle_interval; | ||
1355 | |||
1356 | tk->ntp_tick = ntp_tick_length(); | ||
1357 | |||
1358 | /* Calculate current error per tick */ | ||
1359 | tick_error = ntp_tick_length() >> tk->ntp_error_shift; | ||
1360 | tick_error -= (xinterval + tk->xtime_remainder); | ||
1361 | |||
1362 | /* Don't worry about correcting it if its small */ | ||
1363 | if (likely((tick_error >= 0) && (tick_error <= interval))) | ||
1364 | return; | ||
1365 | |||
1366 | /* preserve the direction of correction */ | ||
1367 | negative = (tick_error < 0); | ||
1368 | |||
1369 | /* Sort out the magnitude of the correction */ | ||
1370 | tick_error = abs(tick_error); | ||
1371 | for (adj = 0; tick_error > interval; adj++) | ||
1372 | tick_error >>= 1; | ||
1373 | |||
1374 | /* scale the corrections */ | ||
1375 | timekeeping_apply_adjustment(tk, offset, negative, adj); | ||
1376 | } | ||
1377 | |||
1378 | /* | ||
1379 | * Adjust the timekeeper's multiplier to the correct frequency | ||
1380 | * and also to reduce the accumulated error value. | ||
1381 | */ | ||
1382 | static void timekeeping_adjust(struct timekeeper *tk, s64 offset) | ||
1383 | { | ||
1384 | /* Correct for the current frequency error */ | ||
1385 | timekeeping_freqadjust(tk, offset); | ||
1386 | |||
1387 | /* Next make a small adjustment to fix any cumulative error */ | ||
1388 | if (!tk->ntp_err_mult && (tk->ntp_error > 0)) { | ||
1389 | tk->ntp_err_mult = 1; | ||
1390 | timekeeping_apply_adjustment(tk, offset, 0, 0); | ||
1391 | } else if (tk->ntp_err_mult && (tk->ntp_error <= 0)) { | ||
1392 | /* Undo any existing error adjustment */ | ||
1393 | timekeeping_apply_adjustment(tk, offset, 1, 0); | ||
1394 | tk->ntp_err_mult = 0; | ||
1395 | } | ||
1396 | |||
1397 | if (unlikely(tk->tkr.clock->maxadj && | ||
1398 | (tk->tkr.mult > tk->tkr.clock->mult + tk->tkr.clock->maxadj))) { | ||
1399 | printk_once(KERN_WARNING | ||
1400 | "Adjusting %s more than 11%% (%ld vs %ld)\n", | ||
1401 | tk->tkr.clock->name, (long)tk->tkr.mult, | ||
1402 | (long)tk->tkr.clock->mult + tk->tkr.clock->maxadj); | ||
1403 | } | ||
1219 | 1404 | ||
1220 | out_adjust: | ||
1221 | /* | 1405 | /* |
1222 | * It may be possible that when we entered this function, xtime_nsec | 1406 | * It may be possible that when we entered this function, xtime_nsec |
1223 | * was very small. Further, if we're slightly speeding the clocksource | 1407 | * was very small. Further, if we're slightly speeding the clocksource |
@@ -1232,12 +1416,11 @@ out_adjust: | |||
1232 | * We'll correct this error next time through this function, when | 1416 | * We'll correct this error next time through this function, when |
1233 | * xtime_nsec is not as small. | 1417 | * xtime_nsec is not as small. |
1234 | */ | 1418 | */ |
1235 | if (unlikely((s64)tk->xtime_nsec < 0)) { | 1419 | if (unlikely((s64)tk->tkr.xtime_nsec < 0)) { |
1236 | s64 neg = -(s64)tk->xtime_nsec; | 1420 | s64 neg = -(s64)tk->tkr.xtime_nsec; |
1237 | tk->xtime_nsec = 0; | 1421 | tk->tkr.xtime_nsec = 0; |
1238 | tk->ntp_error += neg << tk->ntp_error_shift; | 1422 | tk->ntp_error += neg << tk->ntp_error_shift; |
1239 | } | 1423 | } |
1240 | |||
1241 | } | 1424 | } |
1242 | 1425 | ||
1243 | /** | 1426 | /** |
@@ -1250,26 +1433,26 @@ out_adjust: | |||
1250 | */ | 1433 | */ |
1251 | static inline unsigned int accumulate_nsecs_to_secs(struct timekeeper *tk) | 1434 | static inline unsigned int accumulate_nsecs_to_secs(struct timekeeper *tk) |
1252 | { | 1435 | { |
1253 | u64 nsecps = (u64)NSEC_PER_SEC << tk->shift; | 1436 | u64 nsecps = (u64)NSEC_PER_SEC << tk->tkr.shift; |
1254 | unsigned int clock_set = 0; | 1437 | unsigned int clock_set = 0; |
1255 | 1438 | ||
1256 | while (tk->xtime_nsec >= nsecps) { | 1439 | while (tk->tkr.xtime_nsec >= nsecps) { |
1257 | int leap; | 1440 | int leap; |
1258 | 1441 | ||
1259 | tk->xtime_nsec -= nsecps; | 1442 | tk->tkr.xtime_nsec -= nsecps; |
1260 | tk->xtime_sec++; | 1443 | tk->xtime_sec++; |
1261 | 1444 | ||
1262 | /* Figure out if its a leap sec and apply if needed */ | 1445 | /* Figure out if its a leap sec and apply if needed */ |
1263 | leap = second_overflow(tk->xtime_sec); | 1446 | leap = second_overflow(tk->xtime_sec); |
1264 | if (unlikely(leap)) { | 1447 | if (unlikely(leap)) { |
1265 | struct timespec ts; | 1448 | struct timespec64 ts; |
1266 | 1449 | ||
1267 | tk->xtime_sec += leap; | 1450 | tk->xtime_sec += leap; |
1268 | 1451 | ||
1269 | ts.tv_sec = leap; | 1452 | ts.tv_sec = leap; |
1270 | ts.tv_nsec = 0; | 1453 | ts.tv_nsec = 0; |
1271 | tk_set_wall_to_mono(tk, | 1454 | tk_set_wall_to_mono(tk, |
1272 | timespec_sub(tk->wall_to_monotonic, ts)); | 1455 | timespec64_sub(tk->wall_to_monotonic, ts)); |
1273 | 1456 | ||
1274 | __timekeeping_set_tai_offset(tk, tk->tai_offset - leap); | 1457 | __timekeeping_set_tai_offset(tk, tk->tai_offset - leap); |
1275 | 1458 | ||
@@ -1301,9 +1484,9 @@ static cycle_t logarithmic_accumulation(struct timekeeper *tk, cycle_t offset, | |||
1301 | 1484 | ||
1302 | /* Accumulate one shifted interval */ | 1485 | /* Accumulate one shifted interval */ |
1303 | offset -= interval; | 1486 | offset -= interval; |
1304 | tk->cycle_last += interval; | 1487 | tk->tkr.cycle_last += interval; |
1305 | 1488 | ||
1306 | tk->xtime_nsec += tk->xtime_interval << shift; | 1489 | tk->tkr.xtime_nsec += tk->xtime_interval << shift; |
1307 | *clock_set |= accumulate_nsecs_to_secs(tk); | 1490 | *clock_set |= accumulate_nsecs_to_secs(tk); |
1308 | 1491 | ||
1309 | /* Accumulate raw time */ | 1492 | /* Accumulate raw time */ |
@@ -1317,48 +1500,20 @@ static cycle_t logarithmic_accumulation(struct timekeeper *tk, cycle_t offset, | |||
1317 | tk->raw_time.tv_nsec = raw_nsecs; | 1500 | tk->raw_time.tv_nsec = raw_nsecs; |
1318 | 1501 | ||
1319 | /* Accumulate error between NTP and clock interval */ | 1502 | /* Accumulate error between NTP and clock interval */ |
1320 | tk->ntp_error += ntp_tick_length() << shift; | 1503 | tk->ntp_error += tk->ntp_tick << shift; |
1321 | tk->ntp_error -= (tk->xtime_interval + tk->xtime_remainder) << | 1504 | tk->ntp_error -= (tk->xtime_interval + tk->xtime_remainder) << |
1322 | (tk->ntp_error_shift + shift); | 1505 | (tk->ntp_error_shift + shift); |
1323 | 1506 | ||
1324 | return offset; | 1507 | return offset; |
1325 | } | 1508 | } |
1326 | 1509 | ||
1327 | #ifdef CONFIG_GENERIC_TIME_VSYSCALL_OLD | ||
1328 | static inline void old_vsyscall_fixup(struct timekeeper *tk) | ||
1329 | { | ||
1330 | s64 remainder; | ||
1331 | |||
1332 | /* | ||
1333 | * Store only full nanoseconds into xtime_nsec after rounding | ||
1334 | * it up and add the remainder to the error difference. | ||
1335 | * XXX - This is necessary to avoid small 1ns inconsistnecies caused | ||
1336 | * by truncating the remainder in vsyscalls. However, it causes | ||
1337 | * additional work to be done in timekeeping_adjust(). Once | ||
1338 | * the vsyscall implementations are converted to use xtime_nsec | ||
1339 | * (shifted nanoseconds), and CONFIG_GENERIC_TIME_VSYSCALL_OLD | ||
1340 | * users are removed, this can be killed. | ||
1341 | */ | ||
1342 | remainder = tk->xtime_nsec & ((1ULL << tk->shift) - 1); | ||
1343 | tk->xtime_nsec -= remainder; | ||
1344 | tk->xtime_nsec += 1ULL << tk->shift; | ||
1345 | tk->ntp_error += remainder << tk->ntp_error_shift; | ||
1346 | tk->ntp_error -= (1ULL << tk->shift) << tk->ntp_error_shift; | ||
1347 | } | ||
1348 | #else | ||
1349 | #define old_vsyscall_fixup(tk) | ||
1350 | #endif | ||
1351 | |||
1352 | |||
1353 | |||
1354 | /** | 1510 | /** |
1355 | * update_wall_time - Uses the current clocksource to increment the wall time | 1511 | * update_wall_time - Uses the current clocksource to increment the wall time |
1356 | * | 1512 | * |
1357 | */ | 1513 | */ |
1358 | void update_wall_time(void) | 1514 | void update_wall_time(void) |
1359 | { | 1515 | { |
1360 | struct clocksource *clock; | 1516 | struct timekeeper *real_tk = &tk_core.timekeeper; |
1361 | struct timekeeper *real_tk = &timekeeper; | ||
1362 | struct timekeeper *tk = &shadow_timekeeper; | 1517 | struct timekeeper *tk = &shadow_timekeeper; |
1363 | cycle_t offset; | 1518 | cycle_t offset; |
1364 | int shift = 0, maxshift; | 1519 | int shift = 0, maxshift; |
@@ -1371,12 +1526,11 @@ void update_wall_time(void) | |||
1371 | if (unlikely(timekeeping_suspended)) | 1526 | if (unlikely(timekeeping_suspended)) |
1372 | goto out; | 1527 | goto out; |
1373 | 1528 | ||
1374 | clock = real_tk->clock; | ||
1375 | |||
1376 | #ifdef CONFIG_ARCH_USES_GETTIMEOFFSET | 1529 | #ifdef CONFIG_ARCH_USES_GETTIMEOFFSET |
1377 | offset = real_tk->cycle_interval; | 1530 | offset = real_tk->cycle_interval; |
1378 | #else | 1531 | #else |
1379 | offset = (clock->read(clock) - clock->cycle_last) & clock->mask; | 1532 | offset = clocksource_delta(tk->tkr.read(tk->tkr.clock), |
1533 | tk->tkr.cycle_last, tk->tkr.mask); | ||
1380 | #endif | 1534 | #endif |
1381 | 1535 | ||
1382 | /* Check if there's really nothing to do */ | 1536 | /* Check if there's really nothing to do */ |
@@ -1418,9 +1572,7 @@ void update_wall_time(void) | |||
1418 | */ | 1572 | */ |
1419 | clock_set |= accumulate_nsecs_to_secs(tk); | 1573 | clock_set |= accumulate_nsecs_to_secs(tk); |
1420 | 1574 | ||
1421 | write_seqcount_begin(&timekeeper_seq); | 1575 | write_seqcount_begin(&tk_core.seq); |
1422 | /* Update clock->cycle_last with the new value */ | ||
1423 | clock->cycle_last = tk->cycle_last; | ||
1424 | /* | 1576 | /* |
1425 | * Update the real timekeeper. | 1577 | * Update the real timekeeper. |
1426 | * | 1578 | * |
@@ -1428,12 +1580,12 @@ void update_wall_time(void) | |||
1428 | * requires changes to all other timekeeper usage sites as | 1580 | * requires changes to all other timekeeper usage sites as |
1429 | * well, i.e. move the timekeeper pointer getter into the | 1581 | * well, i.e. move the timekeeper pointer getter into the |
1430 | * spinlocked/seqcount protected sections. And we trade this | 1582 | * spinlocked/seqcount protected sections. And we trade this |
1431 | * memcpy under the timekeeper_seq against one before we start | 1583 | * memcpy under the tk_core.seq against one before we start |
1432 | * updating. | 1584 | * updating. |
1433 | */ | 1585 | */ |
1434 | memcpy(real_tk, tk, sizeof(*tk)); | 1586 | memcpy(real_tk, tk, sizeof(*tk)); |
1435 | timekeeping_update(real_tk, clock_set); | 1587 | timekeeping_update(real_tk, clock_set); |
1436 | write_seqcount_end(&timekeeper_seq); | 1588 | write_seqcount_end(&tk_core.seq); |
1437 | out: | 1589 | out: |
1438 | raw_spin_unlock_irqrestore(&timekeeper_lock, flags); | 1590 | raw_spin_unlock_irqrestore(&timekeeper_lock, flags); |
1439 | if (clock_set) | 1591 | if (clock_set) |
@@ -1454,83 +1606,16 @@ out: | |||
1454 | */ | 1606 | */ |
1455 | void getboottime(struct timespec *ts) | 1607 | void getboottime(struct timespec *ts) |
1456 | { | 1608 | { |
1457 | struct timekeeper *tk = &timekeeper; | 1609 | struct timekeeper *tk = &tk_core.timekeeper; |
1458 | struct timespec boottime = { | 1610 | ktime_t t = ktime_sub(tk->offs_real, tk->offs_boot); |
1459 | .tv_sec = tk->wall_to_monotonic.tv_sec + | ||
1460 | tk->total_sleep_time.tv_sec, | ||
1461 | .tv_nsec = tk->wall_to_monotonic.tv_nsec + | ||
1462 | tk->total_sleep_time.tv_nsec | ||
1463 | }; | ||
1464 | |||
1465 | set_normalized_timespec(ts, -boottime.tv_sec, -boottime.tv_nsec); | ||
1466 | } | ||
1467 | EXPORT_SYMBOL_GPL(getboottime); | ||
1468 | |||
1469 | /** | ||
1470 | * get_monotonic_boottime - Returns monotonic time since boot | ||
1471 | * @ts: pointer to the timespec to be set | ||
1472 | * | ||
1473 | * Returns the monotonic time since boot in a timespec. | ||
1474 | * | ||
1475 | * This is similar to CLOCK_MONTONIC/ktime_get_ts, but also | ||
1476 | * includes the time spent in suspend. | ||
1477 | */ | ||
1478 | void get_monotonic_boottime(struct timespec *ts) | ||
1479 | { | ||
1480 | struct timekeeper *tk = &timekeeper; | ||
1481 | struct timespec tomono, sleep; | ||
1482 | s64 nsec; | ||
1483 | unsigned int seq; | ||
1484 | |||
1485 | WARN_ON(timekeeping_suspended); | ||
1486 | |||
1487 | do { | ||
1488 | seq = read_seqcount_begin(&timekeeper_seq); | ||
1489 | ts->tv_sec = tk->xtime_sec; | ||
1490 | nsec = timekeeping_get_ns(tk); | ||
1491 | tomono = tk->wall_to_monotonic; | ||
1492 | sleep = tk->total_sleep_time; | ||
1493 | |||
1494 | } while (read_seqcount_retry(&timekeeper_seq, seq)); | ||
1495 | |||
1496 | ts->tv_sec += tomono.tv_sec + sleep.tv_sec; | ||
1497 | ts->tv_nsec = 0; | ||
1498 | timespec_add_ns(ts, nsec + tomono.tv_nsec + sleep.tv_nsec); | ||
1499 | } | ||
1500 | EXPORT_SYMBOL_GPL(get_monotonic_boottime); | ||
1501 | |||
1502 | /** | ||
1503 | * ktime_get_boottime - Returns monotonic time since boot in a ktime | ||
1504 | * | ||
1505 | * Returns the monotonic time since boot in a ktime | ||
1506 | * | ||
1507 | * This is similar to CLOCK_MONTONIC/ktime_get, but also | ||
1508 | * includes the time spent in suspend. | ||
1509 | */ | ||
1510 | ktime_t ktime_get_boottime(void) | ||
1511 | { | ||
1512 | struct timespec ts; | ||
1513 | |||
1514 | get_monotonic_boottime(&ts); | ||
1515 | return timespec_to_ktime(ts); | ||
1516 | } | ||
1517 | EXPORT_SYMBOL_GPL(ktime_get_boottime); | ||
1518 | |||
1519 | /** | ||
1520 | * monotonic_to_bootbased - Convert the monotonic time to boot based. | ||
1521 | * @ts: pointer to the timespec to be converted | ||
1522 | */ | ||
1523 | void monotonic_to_bootbased(struct timespec *ts) | ||
1524 | { | ||
1525 | struct timekeeper *tk = &timekeeper; | ||
1526 | 1611 | ||
1527 | *ts = timespec_add(*ts, tk->total_sleep_time); | 1612 | *ts = ktime_to_timespec(t); |
1528 | } | 1613 | } |
1529 | EXPORT_SYMBOL_GPL(monotonic_to_bootbased); | 1614 | EXPORT_SYMBOL_GPL(getboottime); |
1530 | 1615 | ||
1531 | unsigned long get_seconds(void) | 1616 | unsigned long get_seconds(void) |
1532 | { | 1617 | { |
1533 | struct timekeeper *tk = &timekeeper; | 1618 | struct timekeeper *tk = &tk_core.timekeeper; |
1534 | 1619 | ||
1535 | return tk->xtime_sec; | 1620 | return tk->xtime_sec; |
1536 | } | 1621 | } |
@@ -1538,43 +1623,44 @@ EXPORT_SYMBOL(get_seconds); | |||
1538 | 1623 | ||
1539 | struct timespec __current_kernel_time(void) | 1624 | struct timespec __current_kernel_time(void) |
1540 | { | 1625 | { |
1541 | struct timekeeper *tk = &timekeeper; | 1626 | struct timekeeper *tk = &tk_core.timekeeper; |
1542 | 1627 | ||
1543 | return tk_xtime(tk); | 1628 | return timespec64_to_timespec(tk_xtime(tk)); |
1544 | } | 1629 | } |
1545 | 1630 | ||
1546 | struct timespec current_kernel_time(void) | 1631 | struct timespec current_kernel_time(void) |
1547 | { | 1632 | { |
1548 | struct timekeeper *tk = &timekeeper; | 1633 | struct timekeeper *tk = &tk_core.timekeeper; |
1549 | struct timespec now; | 1634 | struct timespec64 now; |
1550 | unsigned long seq; | 1635 | unsigned long seq; |
1551 | 1636 | ||
1552 | do { | 1637 | do { |
1553 | seq = read_seqcount_begin(&timekeeper_seq); | 1638 | seq = read_seqcount_begin(&tk_core.seq); |
1554 | 1639 | ||
1555 | now = tk_xtime(tk); | 1640 | now = tk_xtime(tk); |
1556 | } while (read_seqcount_retry(&timekeeper_seq, seq)); | 1641 | } while (read_seqcount_retry(&tk_core.seq, seq)); |
1557 | 1642 | ||
1558 | return now; | 1643 | return timespec64_to_timespec(now); |
1559 | } | 1644 | } |
1560 | EXPORT_SYMBOL(current_kernel_time); | 1645 | EXPORT_SYMBOL(current_kernel_time); |
1561 | 1646 | ||
1562 | struct timespec get_monotonic_coarse(void) | 1647 | struct timespec get_monotonic_coarse(void) |
1563 | { | 1648 | { |
1564 | struct timekeeper *tk = &timekeeper; | 1649 | struct timekeeper *tk = &tk_core.timekeeper; |
1565 | struct timespec now, mono; | 1650 | struct timespec64 now, mono; |
1566 | unsigned long seq; | 1651 | unsigned long seq; |
1567 | 1652 | ||
1568 | do { | 1653 | do { |
1569 | seq = read_seqcount_begin(&timekeeper_seq); | 1654 | seq = read_seqcount_begin(&tk_core.seq); |
1570 | 1655 | ||
1571 | now = tk_xtime(tk); | 1656 | now = tk_xtime(tk); |
1572 | mono = tk->wall_to_monotonic; | 1657 | mono = tk->wall_to_monotonic; |
1573 | } while (read_seqcount_retry(&timekeeper_seq, seq)); | 1658 | } while (read_seqcount_retry(&tk_core.seq, seq)); |
1574 | 1659 | ||
1575 | set_normalized_timespec(&now, now.tv_sec + mono.tv_sec, | 1660 | set_normalized_timespec64(&now, now.tv_sec + mono.tv_sec, |
1576 | now.tv_nsec + mono.tv_nsec); | 1661 | now.tv_nsec + mono.tv_nsec); |
1577 | return now; | 1662 | |
1663 | return timespec64_to_timespec(now); | ||
1578 | } | 1664 | } |
1579 | 1665 | ||
1580 | /* | 1666 | /* |
@@ -1587,29 +1673,38 @@ void do_timer(unsigned long ticks) | |||
1587 | } | 1673 | } |
1588 | 1674 | ||
1589 | /** | 1675 | /** |
1590 | * get_xtime_and_monotonic_and_sleep_offset() - get xtime, wall_to_monotonic, | 1676 | * ktime_get_update_offsets_tick - hrtimer helper |
1591 | * and sleep offsets. | 1677 | * @offs_real: pointer to storage for monotonic -> realtime offset |
1592 | * @xtim: pointer to timespec to be set with xtime | 1678 | * @offs_boot: pointer to storage for monotonic -> boottime offset |
1593 | * @wtom: pointer to timespec to be set with wall_to_monotonic | 1679 | * @offs_tai: pointer to storage for monotonic -> clock tai offset |
1594 | * @sleep: pointer to timespec to be set with time in suspend | 1680 | * |
1681 | * Returns monotonic time at last tick and various offsets | ||
1595 | */ | 1682 | */ |
1596 | void get_xtime_and_monotonic_and_sleep_offset(struct timespec *xtim, | 1683 | ktime_t ktime_get_update_offsets_tick(ktime_t *offs_real, ktime_t *offs_boot, |
1597 | struct timespec *wtom, struct timespec *sleep) | 1684 | ktime_t *offs_tai) |
1598 | { | 1685 | { |
1599 | struct timekeeper *tk = &timekeeper; | 1686 | struct timekeeper *tk = &tk_core.timekeeper; |
1600 | unsigned long seq; | 1687 | unsigned int seq; |
1688 | ktime_t base; | ||
1689 | u64 nsecs; | ||
1601 | 1690 | ||
1602 | do { | 1691 | do { |
1603 | seq = read_seqcount_begin(&timekeeper_seq); | 1692 | seq = read_seqcount_begin(&tk_core.seq); |
1604 | *xtim = tk_xtime(tk); | 1693 | |
1605 | *wtom = tk->wall_to_monotonic; | 1694 | base = tk->tkr.base_mono; |
1606 | *sleep = tk->total_sleep_time; | 1695 | nsecs = tk->tkr.xtime_nsec >> tk->tkr.shift; |
1607 | } while (read_seqcount_retry(&timekeeper_seq, seq)); | 1696 | |
1697 | *offs_real = tk->offs_real; | ||
1698 | *offs_boot = tk->offs_boot; | ||
1699 | *offs_tai = tk->offs_tai; | ||
1700 | } while (read_seqcount_retry(&tk_core.seq, seq)); | ||
1701 | |||
1702 | return ktime_add_ns(base, nsecs); | ||
1608 | } | 1703 | } |
1609 | 1704 | ||
1610 | #ifdef CONFIG_HIGH_RES_TIMERS | 1705 | #ifdef CONFIG_HIGH_RES_TIMERS |
1611 | /** | 1706 | /** |
1612 | * ktime_get_update_offsets - hrtimer helper | 1707 | * ktime_get_update_offsets_now - hrtimer helper |
1613 | * @offs_real: pointer to storage for monotonic -> realtime offset | 1708 | * @offs_real: pointer to storage for monotonic -> realtime offset |
1614 | * @offs_boot: pointer to storage for monotonic -> boottime offset | 1709 | * @offs_boot: pointer to storage for monotonic -> boottime offset |
1615 | * @offs_tai: pointer to storage for monotonic -> clock tai offset | 1710 | * @offs_tai: pointer to storage for monotonic -> clock tai offset |
@@ -1617,57 +1712,37 @@ void get_xtime_and_monotonic_and_sleep_offset(struct timespec *xtim, | |||
1617 | * Returns current monotonic time and updates the offsets | 1712 | * Returns current monotonic time and updates the offsets |
1618 | * Called from hrtimer_interrupt() or retrigger_next_event() | 1713 | * Called from hrtimer_interrupt() or retrigger_next_event() |
1619 | */ | 1714 | */ |
1620 | ktime_t ktime_get_update_offsets(ktime_t *offs_real, ktime_t *offs_boot, | 1715 | ktime_t ktime_get_update_offsets_now(ktime_t *offs_real, ktime_t *offs_boot, |
1621 | ktime_t *offs_tai) | 1716 | ktime_t *offs_tai) |
1622 | { | 1717 | { |
1623 | struct timekeeper *tk = &timekeeper; | 1718 | struct timekeeper *tk = &tk_core.timekeeper; |
1624 | ktime_t now; | ||
1625 | unsigned int seq; | 1719 | unsigned int seq; |
1626 | u64 secs, nsecs; | 1720 | ktime_t base; |
1721 | u64 nsecs; | ||
1627 | 1722 | ||
1628 | do { | 1723 | do { |
1629 | seq = read_seqcount_begin(&timekeeper_seq); | 1724 | seq = read_seqcount_begin(&tk_core.seq); |
1630 | 1725 | ||
1631 | secs = tk->xtime_sec; | 1726 | base = tk->tkr.base_mono; |
1632 | nsecs = timekeeping_get_ns(tk); | 1727 | nsecs = timekeeping_get_ns(&tk->tkr); |
1633 | 1728 | ||
1634 | *offs_real = tk->offs_real; | 1729 | *offs_real = tk->offs_real; |
1635 | *offs_boot = tk->offs_boot; | 1730 | *offs_boot = tk->offs_boot; |
1636 | *offs_tai = tk->offs_tai; | 1731 | *offs_tai = tk->offs_tai; |
1637 | } while (read_seqcount_retry(&timekeeper_seq, seq)); | 1732 | } while (read_seqcount_retry(&tk_core.seq, seq)); |
1638 | 1733 | ||
1639 | now = ktime_add_ns(ktime_set(secs, 0), nsecs); | 1734 | return ktime_add_ns(base, nsecs); |
1640 | now = ktime_sub(now, *offs_real); | ||
1641 | return now; | ||
1642 | } | 1735 | } |
1643 | #endif | 1736 | #endif |
1644 | 1737 | ||
1645 | /** | 1738 | /** |
1646 | * ktime_get_monotonic_offset() - get wall_to_monotonic in ktime_t format | ||
1647 | */ | ||
1648 | ktime_t ktime_get_monotonic_offset(void) | ||
1649 | { | ||
1650 | struct timekeeper *tk = &timekeeper; | ||
1651 | unsigned long seq; | ||
1652 | struct timespec wtom; | ||
1653 | |||
1654 | do { | ||
1655 | seq = read_seqcount_begin(&timekeeper_seq); | ||
1656 | wtom = tk->wall_to_monotonic; | ||
1657 | } while (read_seqcount_retry(&timekeeper_seq, seq)); | ||
1658 | |||
1659 | return timespec_to_ktime(wtom); | ||
1660 | } | ||
1661 | EXPORT_SYMBOL_GPL(ktime_get_monotonic_offset); | ||
1662 | |||
1663 | /** | ||
1664 | * do_adjtimex() - Accessor function to NTP __do_adjtimex function | 1739 | * do_adjtimex() - Accessor function to NTP __do_adjtimex function |
1665 | */ | 1740 | */ |
1666 | int do_adjtimex(struct timex *txc) | 1741 | int do_adjtimex(struct timex *txc) |
1667 | { | 1742 | { |
1668 | struct timekeeper *tk = &timekeeper; | 1743 | struct timekeeper *tk = &tk_core.timekeeper; |
1669 | unsigned long flags; | 1744 | unsigned long flags; |
1670 | struct timespec ts; | 1745 | struct timespec64 ts; |
1671 | s32 orig_tai, tai; | 1746 | s32 orig_tai, tai; |
1672 | int ret; | 1747 | int ret; |
1673 | 1748 | ||
@@ -1687,10 +1762,10 @@ int do_adjtimex(struct timex *txc) | |||
1687 | return ret; | 1762 | return ret; |
1688 | } | 1763 | } |
1689 | 1764 | ||
1690 | getnstimeofday(&ts); | 1765 | getnstimeofday64(&ts); |
1691 | 1766 | ||
1692 | raw_spin_lock_irqsave(&timekeeper_lock, flags); | 1767 | raw_spin_lock_irqsave(&timekeeper_lock, flags); |
1693 | write_seqcount_begin(&timekeeper_seq); | 1768 | write_seqcount_begin(&tk_core.seq); |
1694 | 1769 | ||
1695 | orig_tai = tai = tk->tai_offset; | 1770 | orig_tai = tai = tk->tai_offset; |
1696 | ret = __do_adjtimex(txc, &ts, &tai); | 1771 | ret = __do_adjtimex(txc, &ts, &tai); |
@@ -1699,7 +1774,7 @@ int do_adjtimex(struct timex *txc) | |||
1699 | __timekeeping_set_tai_offset(tk, tai); | 1774 | __timekeeping_set_tai_offset(tk, tai); |
1700 | timekeeping_update(tk, TK_MIRROR | TK_CLOCK_WAS_SET); | 1775 | timekeeping_update(tk, TK_MIRROR | TK_CLOCK_WAS_SET); |
1701 | } | 1776 | } |
1702 | write_seqcount_end(&timekeeper_seq); | 1777 | write_seqcount_end(&tk_core.seq); |
1703 | raw_spin_unlock_irqrestore(&timekeeper_lock, flags); | 1778 | raw_spin_unlock_irqrestore(&timekeeper_lock, flags); |
1704 | 1779 | ||
1705 | if (tai != orig_tai) | 1780 | if (tai != orig_tai) |
@@ -1719,11 +1794,11 @@ void hardpps(const struct timespec *phase_ts, const struct timespec *raw_ts) | |||
1719 | unsigned long flags; | 1794 | unsigned long flags; |
1720 | 1795 | ||
1721 | raw_spin_lock_irqsave(&timekeeper_lock, flags); | 1796 | raw_spin_lock_irqsave(&timekeeper_lock, flags); |
1722 | write_seqcount_begin(&timekeeper_seq); | 1797 | write_seqcount_begin(&tk_core.seq); |
1723 | 1798 | ||
1724 | __hardpps(phase_ts, raw_ts); | 1799 | __hardpps(phase_ts, raw_ts); |
1725 | 1800 | ||
1726 | write_seqcount_end(&timekeeper_seq); | 1801 | write_seqcount_end(&tk_core.seq); |
1727 | raw_spin_unlock_irqrestore(&timekeeper_lock, flags); | 1802 | raw_spin_unlock_irqrestore(&timekeeper_lock, flags); |
1728 | } | 1803 | } |
1729 | EXPORT_SYMBOL(hardpps); | 1804 | EXPORT_SYMBOL(hardpps); |
diff --git a/kernel/time/timekeeping.h b/kernel/time/timekeeping.h new file mode 100644 index 000000000000..adc1fc98bde3 --- /dev/null +++ b/kernel/time/timekeeping.h | |||
@@ -0,0 +1,20 @@ | |||
1 | #ifndef _KERNEL_TIME_TIMEKEEPING_H | ||
2 | #define _KERNEL_TIME_TIMEKEEPING_H | ||
3 | /* | ||
4 | * Internal interfaces for kernel/time/ | ||
5 | */ | ||
6 | extern ktime_t ktime_get_update_offsets_tick(ktime_t *offs_real, | ||
7 | ktime_t *offs_boot, | ||
8 | ktime_t *offs_tai); | ||
9 | extern ktime_t ktime_get_update_offsets_now(ktime_t *offs_real, | ||
10 | ktime_t *offs_boot, | ||
11 | ktime_t *offs_tai); | ||
12 | |||
13 | extern int timekeeping_valid_for_hres(void); | ||
14 | extern u64 timekeeping_max_deferment(void); | ||
15 | extern int timekeeping_inject_offset(struct timespec *ts); | ||
16 | extern s32 timekeeping_get_tai_offset(void); | ||
17 | extern void timekeeping_set_tai_offset(s32 tai_offset); | ||
18 | extern void timekeeping_clocktai(struct timespec *ts); | ||
19 | |||
20 | #endif | ||
diff --git a/kernel/time/timekeeping_debug.c b/kernel/time/timekeeping_debug.c index 4d54f97558df..f6bd65236712 100644 --- a/kernel/time/timekeeping_debug.c +++ b/kernel/time/timekeeping_debug.c | |||
@@ -67,7 +67,7 @@ static int __init tk_debug_sleep_time_init(void) | |||
67 | } | 67 | } |
68 | late_initcall(tk_debug_sleep_time_init); | 68 | late_initcall(tk_debug_sleep_time_init); |
69 | 69 | ||
70 | void tk_debug_account_sleep_time(struct timespec *t) | 70 | void tk_debug_account_sleep_time(struct timespec64 *t) |
71 | { | 71 | { |
72 | sleep_time_bin[fls(t->tv_sec)]++; | 72 | sleep_time_bin[fls(t->tv_sec)]++; |
73 | } | 73 | } |
diff --git a/kernel/time/timekeeping_internal.h b/kernel/time/timekeeping_internal.h index 13323ea08ffa..4ea005a7f9da 100644 --- a/kernel/time/timekeeping_internal.h +++ b/kernel/time/timekeeping_internal.h | |||
@@ -3,12 +3,27 @@ | |||
3 | /* | 3 | /* |
4 | * timekeeping debug functions | 4 | * timekeeping debug functions |
5 | */ | 5 | */ |
6 | #include <linux/clocksource.h> | ||
6 | #include <linux/time.h> | 7 | #include <linux/time.h> |
7 | 8 | ||
8 | #ifdef CONFIG_DEBUG_FS | 9 | #ifdef CONFIG_DEBUG_FS |
9 | extern void tk_debug_account_sleep_time(struct timespec *t); | 10 | extern void tk_debug_account_sleep_time(struct timespec64 *t); |
10 | #else | 11 | #else |
11 | #define tk_debug_account_sleep_time(x) | 12 | #define tk_debug_account_sleep_time(x) |
12 | #endif | 13 | #endif |
13 | 14 | ||
15 | #ifdef CONFIG_CLOCKSOURCE_VALIDATE_LAST_CYCLE | ||
16 | static inline cycle_t clocksource_delta(cycle_t now, cycle_t last, cycle_t mask) | ||
17 | { | ||
18 | cycle_t ret = (now - last) & mask; | ||
19 | |||
20 | return (s64) ret > 0 ? ret : 0; | ||
21 | } | ||
22 | #else | ||
23 | static inline cycle_t clocksource_delta(cycle_t now, cycle_t last, cycle_t mask) | ||
24 | { | ||
25 | return (now - last) & mask; | ||
26 | } | ||
27 | #endif | ||
28 | |||
14 | #endif /* _TIMEKEEPING_INTERNAL_H */ | 29 | #endif /* _TIMEKEEPING_INTERNAL_H */ |
diff --git a/kernel/timer.c b/kernel/time/timer.c index 3bb01a323b2a..aca5dfe2fa3d 100644 --- a/kernel/timer.c +++ b/kernel/time/timer.c | |||
@@ -82,6 +82,7 @@ struct tvec_base { | |||
82 | unsigned long next_timer; | 82 | unsigned long next_timer; |
83 | unsigned long active_timers; | 83 | unsigned long active_timers; |
84 | unsigned long all_timers; | 84 | unsigned long all_timers; |
85 | int cpu; | ||
85 | struct tvec_root tv1; | 86 | struct tvec_root tv1; |
86 | struct tvec tv2; | 87 | struct tvec tv2; |
87 | struct tvec tv3; | 88 | struct tvec tv3; |
@@ -409,6 +410,22 @@ static void internal_add_timer(struct tvec_base *base, struct timer_list *timer) | |||
409 | base->next_timer = timer->expires; | 410 | base->next_timer = timer->expires; |
410 | } | 411 | } |
411 | base->all_timers++; | 412 | base->all_timers++; |
413 | |||
414 | /* | ||
415 | * Check whether the other CPU is in dynticks mode and needs | ||
416 | * to be triggered to reevaluate the timer wheel. | ||
417 | * We are protected against the other CPU fiddling | ||
418 | * with the timer by holding the timer base lock. This also | ||
419 | * makes sure that a CPU on the way to stop its tick can not | ||
420 | * evaluate the timer wheel. | ||
421 | * | ||
422 | * Spare the IPI for deferrable timers on idle targets though. | ||
423 | * The next busy ticks will take care of it. Except full dynticks | ||
424 | * require special care against races with idle_cpu(), lets deal | ||
425 | * with that later. | ||
426 | */ | ||
427 | if (!tbase_get_deferrable(base) || tick_nohz_full_cpu(base->cpu)) | ||
428 | wake_up_nohz_cpu(base->cpu); | ||
412 | } | 429 | } |
413 | 430 | ||
414 | #ifdef CONFIG_TIMER_STATS | 431 | #ifdef CONFIG_TIMER_STATS |
@@ -948,22 +965,6 @@ void add_timer_on(struct timer_list *timer, int cpu) | |||
948 | timer_set_base(timer, base); | 965 | timer_set_base(timer, base); |
949 | debug_activate(timer, timer->expires); | 966 | debug_activate(timer, timer->expires); |
950 | internal_add_timer(base, timer); | 967 | internal_add_timer(base, timer); |
951 | /* | ||
952 | * Check whether the other CPU is in dynticks mode and needs | ||
953 | * to be triggered to reevaluate the timer wheel. | ||
954 | * We are protected against the other CPU fiddling | ||
955 | * with the timer by holding the timer base lock. This also | ||
956 | * makes sure that a CPU on the way to stop its tick can not | ||
957 | * evaluate the timer wheel. | ||
958 | * | ||
959 | * Spare the IPI for deferrable timers on idle targets though. | ||
960 | * The next busy ticks will take care of it. Except full dynticks | ||
961 | * require special care against races with idle_cpu(), lets deal | ||
962 | * with that later. | ||
963 | */ | ||
964 | if (!tbase_get_deferrable(timer->base) || tick_nohz_full_cpu(cpu)) | ||
965 | wake_up_nohz_cpu(cpu); | ||
966 | |||
967 | spin_unlock_irqrestore(&base->lock, flags); | 968 | spin_unlock_irqrestore(&base->lock, flags); |
968 | } | 969 | } |
969 | EXPORT_SYMBOL_GPL(add_timer_on); | 970 | EXPORT_SYMBOL_GPL(add_timer_on); |
@@ -1568,6 +1569,7 @@ static int init_timers_cpu(int cpu) | |||
1568 | } | 1569 | } |
1569 | spin_lock_init(&base->lock); | 1570 | spin_lock_init(&base->lock); |
1570 | tvec_base_done[cpu] = 1; | 1571 | tvec_base_done[cpu] = 1; |
1572 | base->cpu = cpu; | ||
1571 | } else { | 1573 | } else { |
1572 | base = per_cpu(tvec_bases, cpu); | 1574 | base = per_cpu(tvec_bases, cpu); |
1573 | } | 1575 | } |
diff --git a/kernel/time/udelay_test.c b/kernel/time/udelay_test.c new file mode 100644 index 000000000000..e622ba365a13 --- /dev/null +++ b/kernel/time/udelay_test.c | |||
@@ -0,0 +1,168 @@ | |||
1 | /* | ||
2 | * udelay() test kernel module | ||
3 | * | ||
4 | * Test is executed by writing and reading to /sys/kernel/debug/udelay_test | ||
5 | * Tests are configured by writing: USECS ITERATIONS | ||
6 | * Tests are executed by reading from the same file. | ||
7 | * Specifying usecs of 0 or negative values will run multiples tests. | ||
8 | * | ||
9 | * Copyright (C) 2014 Google, Inc. | ||
10 | * | ||
11 | * This software is licensed under the terms of the GNU General Public | ||
12 | * License version 2, as published by the Free Software Foundation, and | ||
13 | * may be copied, distributed, and modified under those terms. | ||
14 | * | ||
15 | * This program is distributed in the hope that it will be useful, | ||
16 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
17 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
18 | * GNU General Public License for more details. | ||
19 | */ | ||
20 | |||
21 | #include <linux/debugfs.h> | ||
22 | #include <linux/delay.h> | ||
23 | #include <linux/ktime.h> | ||
24 | #include <linux/module.h> | ||
25 | #include <linux/uaccess.h> | ||
26 | |||
27 | #define DEFAULT_ITERATIONS 100 | ||
28 | |||
29 | #define DEBUGFS_FILENAME "udelay_test" | ||
30 | |||
31 | static DEFINE_MUTEX(udelay_test_lock); | ||
32 | static struct dentry *udelay_test_debugfs_file; | ||
33 | static int udelay_test_usecs; | ||
34 | static int udelay_test_iterations = DEFAULT_ITERATIONS; | ||
35 | |||
36 | static int udelay_test_single(struct seq_file *s, int usecs, uint32_t iters) | ||
37 | { | ||
38 | int min = 0, max = 0, fail_count = 0; | ||
39 | uint64_t sum = 0; | ||
40 | uint64_t avg; | ||
41 | int i; | ||
42 | /* Allow udelay to be up to 0.5% fast */ | ||
43 | int allowed_error_ns = usecs * 5; | ||
44 | |||
45 | for (i = 0; i < iters; ++i) { | ||
46 | struct timespec ts1, ts2; | ||
47 | int time_passed; | ||
48 | |||
49 | ktime_get_ts(&ts1); | ||
50 | udelay(usecs); | ||
51 | ktime_get_ts(&ts2); | ||
52 | time_passed = timespec_to_ns(&ts2) - timespec_to_ns(&ts1); | ||
53 | |||
54 | if (i == 0 || time_passed < min) | ||
55 | min = time_passed; | ||
56 | if (i == 0 || time_passed > max) | ||
57 | max = time_passed; | ||
58 | if ((time_passed + allowed_error_ns) / 1000 < usecs) | ||
59 | ++fail_count; | ||
60 | WARN_ON(time_passed < 0); | ||
61 | sum += time_passed; | ||
62 | } | ||
63 | |||
64 | avg = sum; | ||
65 | do_div(avg, iters); | ||
66 | seq_printf(s, "%d usecs x %d: exp=%d allowed=%d min=%d avg=%lld max=%d", | ||
67 | usecs, iters, usecs * 1000, | ||
68 | (usecs * 1000) - allowed_error_ns, min, avg, max); | ||
69 | if (fail_count) | ||
70 | seq_printf(s, " FAIL=%d", fail_count); | ||
71 | seq_puts(s, "\n"); | ||
72 | |||
73 | return 0; | ||
74 | } | ||
75 | |||
76 | static int udelay_test_show(struct seq_file *s, void *v) | ||
77 | { | ||
78 | int usecs; | ||
79 | int iters; | ||
80 | int ret = 0; | ||
81 | |||
82 | mutex_lock(&udelay_test_lock); | ||
83 | usecs = udelay_test_usecs; | ||
84 | iters = udelay_test_iterations; | ||
85 | mutex_unlock(&udelay_test_lock); | ||
86 | |||
87 | if (usecs > 0 && iters > 0) { | ||
88 | return udelay_test_single(s, usecs, iters); | ||
89 | } else if (usecs == 0) { | ||
90 | struct timespec ts; | ||
91 | |||
92 | ktime_get_ts(&ts); | ||
93 | seq_printf(s, "udelay() test (lpj=%ld kt=%ld.%09ld)\n", | ||
94 | loops_per_jiffy, ts.tv_sec, ts.tv_nsec); | ||
95 | seq_puts(s, "usage:\n"); | ||
96 | seq_puts(s, "echo USECS [ITERS] > " DEBUGFS_FILENAME "\n"); | ||
97 | seq_puts(s, "cat " DEBUGFS_FILENAME "\n"); | ||
98 | } | ||
99 | |||
100 | return ret; | ||
101 | } | ||
102 | |||
103 | static int udelay_test_open(struct inode *inode, struct file *file) | ||
104 | { | ||
105 | return single_open(file, udelay_test_show, inode->i_private); | ||
106 | } | ||
107 | |||
108 | static ssize_t udelay_test_write(struct file *file, const char __user *buf, | ||
109 | size_t count, loff_t *pos) | ||
110 | { | ||
111 | char lbuf[32]; | ||
112 | int ret; | ||
113 | int usecs; | ||
114 | int iters; | ||
115 | |||
116 | if (count >= sizeof(lbuf)) | ||
117 | return -EINVAL; | ||
118 | |||
119 | if (copy_from_user(lbuf, buf, count)) | ||
120 | return -EFAULT; | ||
121 | lbuf[count] = '\0'; | ||
122 | |||
123 | ret = sscanf(lbuf, "%d %d", &usecs, &iters); | ||
124 | if (ret < 1) | ||
125 | return -EINVAL; | ||
126 | else if (ret < 2) | ||
127 | iters = DEFAULT_ITERATIONS; | ||
128 | |||
129 | mutex_lock(&udelay_test_lock); | ||
130 | udelay_test_usecs = usecs; | ||
131 | udelay_test_iterations = iters; | ||
132 | mutex_unlock(&udelay_test_lock); | ||
133 | |||
134 | return count; | ||
135 | } | ||
136 | |||
137 | static const struct file_operations udelay_test_debugfs_ops = { | ||
138 | .owner = THIS_MODULE, | ||
139 | .open = udelay_test_open, | ||
140 | .read = seq_read, | ||
141 | .write = udelay_test_write, | ||
142 | .llseek = seq_lseek, | ||
143 | .release = single_release, | ||
144 | }; | ||
145 | |||
146 | static int __init udelay_test_init(void) | ||
147 | { | ||
148 | mutex_lock(&udelay_test_lock); | ||
149 | udelay_test_debugfs_file = debugfs_create_file(DEBUGFS_FILENAME, | ||
150 | S_IRUSR, NULL, NULL, &udelay_test_debugfs_ops); | ||
151 | mutex_unlock(&udelay_test_lock); | ||
152 | |||
153 | return 0; | ||
154 | } | ||
155 | |||
156 | module_init(udelay_test_init); | ||
157 | |||
158 | static void __exit udelay_test_exit(void) | ||
159 | { | ||
160 | mutex_lock(&udelay_test_lock); | ||
161 | debugfs_remove(udelay_test_debugfs_file); | ||
162 | mutex_unlock(&udelay_test_lock); | ||
163 | } | ||
164 | |||
165 | module_exit(udelay_test_exit); | ||
166 | |||
167 | MODULE_AUTHOR("David Riley <davidriley@chromium.org>"); | ||
168 | MODULE_LICENSE("GPL"); | ||
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 8bb80fe08767..8a528392b1f4 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c | |||
@@ -820,11 +820,12 @@ static struct { | |||
820 | const char *name; | 820 | const char *name; |
821 | int in_ns; /* is this clock in nanoseconds? */ | 821 | int in_ns; /* is this clock in nanoseconds? */ |
822 | } trace_clocks[] = { | 822 | } trace_clocks[] = { |
823 | { trace_clock_local, "local", 1 }, | 823 | { trace_clock_local, "local", 1 }, |
824 | { trace_clock_global, "global", 1 }, | 824 | { trace_clock_global, "global", 1 }, |
825 | { trace_clock_counter, "counter", 0 }, | 825 | { trace_clock_counter, "counter", 0 }, |
826 | { trace_clock_jiffies, "uptime", 0 }, | 826 | { trace_clock_jiffies, "uptime", 0 }, |
827 | { trace_clock, "perf", 1 }, | 827 | { trace_clock, "perf", 1 }, |
828 | { ktime_get_mono_fast_ns, "mono", 1 }, | ||
828 | ARCH_TRACE_CLOCKS | 829 | ARCH_TRACE_CLOCKS |
829 | }; | 830 | }; |
830 | 831 | ||
diff --git a/kernel/tsacct.c b/kernel/tsacct.c index a1dd9a1b1327..975cb49e32bf 100644 --- a/kernel/tsacct.c +++ b/kernel/tsacct.c | |||
@@ -31,20 +31,19 @@ void bacct_add_tsk(struct user_namespace *user_ns, | |||
31 | struct taskstats *stats, struct task_struct *tsk) | 31 | struct taskstats *stats, struct task_struct *tsk) |
32 | { | 32 | { |
33 | const struct cred *tcred; | 33 | const struct cred *tcred; |
34 | struct timespec uptime, ts; | ||
35 | cputime_t utime, stime, utimescaled, stimescaled; | 34 | cputime_t utime, stime, utimescaled, stimescaled; |
36 | u64 ac_etime; | 35 | u64 delta; |
37 | 36 | ||
38 | BUILD_BUG_ON(TS_COMM_LEN < TASK_COMM_LEN); | 37 | BUILD_BUG_ON(TS_COMM_LEN < TASK_COMM_LEN); |
39 | 38 | ||
40 | /* calculate task elapsed time in timespec */ | 39 | /* calculate task elapsed time in nsec */ |
41 | do_posix_clock_monotonic_gettime(&uptime); | 40 | delta = ktime_get_ns() - tsk->start_time; |
42 | ts = timespec_sub(uptime, tsk->start_time); | 41 | /* Convert to micro seconds */ |
43 | /* rebase elapsed time to usec (should never be negative) */ | 42 | do_div(delta, NSEC_PER_USEC); |
44 | ac_etime = timespec_to_ns(&ts); | 43 | stats->ac_etime = delta; |
45 | do_div(ac_etime, NSEC_PER_USEC); | 44 | /* Convert to seconds for btime */ |
46 | stats->ac_etime = ac_etime; | 45 | do_div(delta, USEC_PER_SEC); |
47 | stats->ac_btime = get_seconds() - ts.tv_sec; | 46 | stats->ac_btime = get_seconds() - delta; |
48 | if (thread_group_leader(tsk)) { | 47 | if (thread_group_leader(tsk)) { |
49 | stats->ac_exitcode = tsk->exit_code; | 48 | stats->ac_exitcode = tsk->exit_code; |
50 | if (tsk->flags & PF_FORKNOEXEC) | 49 | if (tsk->flags & PF_FORKNOEXEC) |