diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2014-08-05 20:46:42 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2014-08-05 20:46:42 -0400 |
commit | e7fda6c4c3c1a7d6996dd75fd84670fa0b5d448f (patch) | |
tree | daa51c16462c318b890acf7f01fba5827275dd74 /arch | |
parent | 08d69a25714429850cf9ef71f22d8cdc9189d93f (diff) | |
parent | 953dec21aed4038464fec02f96a2f1b8701a5bce (diff) |
Merge branch 'timers-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull timer and time updates from Thomas Gleixner:
"A rather large update of timers, timekeeping & co
- Core timekeeping code is year-2038 safe now for 32bit machines.
Now we just need to fix all in kernel users and the gazillion of
user space interfaces which rely on timespec/timeval :)
- Better cache layout for the timekeeping internal data structures.
- Proper nanosecond based interfaces for in kernel users.
- Tree wide cleanup of code which wants nanoseconds but does hoops
and loops to convert back and forth from timespecs. Some of it
definitely belongs into the ugly code museum.
- Consolidation of the timekeeping interface zoo.
- A fast NMI safe accessor to clock monotonic for tracing. This is a
long standing request to support correlated user/kernel space
traces. With proper NTP frequency correction it's also suitable
for correlation of traces accross separate machines.
- Checkpoint/restart support for timerfd.
- A few NOHZ[_FULL] improvements in the [hr]timer code.
- Code move from kernel to kernel/time of all time* related code.
- New clocksource/event drivers from the ARM universe. I'm really
impressed that despite an architected timer in the newer chips SoC
manufacturers insist on inventing new and differently broken SoC
specific timers.
[ Ed. "Impressed"? I don't think that word means what you think it means ]
- Another round of code move from arch to drivers. Looks like most
of the legacy mess in ARM regarding timers is sorted out except for
a few obnoxious strongholds.
- The usual updates and fixlets all over the place"
* 'timers-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (114 commits)
timekeeping: Fixup typo in update_vsyscall_old definition
clocksource: document some basic timekeeping concepts
timekeeping: Use cached ntp_tick_length when accumulating error
timekeeping: Rework frequency adjustments to work better w/ nohz
timekeeping: Minor fixup for timespec64->timespec assignment
ftrace: Provide trace clocks monotonic
timekeeping: Provide fast and NMI safe access to CLOCK_MONOTONIC
seqcount: Add raw_write_seqcount_latch()
seqcount: Provide raw_read_seqcount()
timekeeping: Use tk_read_base as argument for timekeeping_get_ns()
timekeeping: Create struct tk_read_base and use it in struct timekeeper
timekeeping: Restructure the timekeeper some more
clocksource: Get rid of cycle_last
clocksource: Move cycle_last validation to core code
clocksource: Make delta calculation a function
wireless: ath9k: Get rid of timespec conversions
drm: vmwgfx: Use nsec based interfaces
drm: i915: Use nsec based interfaces
timekeeping: Provide ktime_get_raw()
hangcheck-timer: Use ktime_get_ns()
...
Diffstat (limited to 'arch')
-rw-r--r-- | arch/arm/Kconfig | 2 | ||||
-rw-r--r-- | arch/arm/common/bL_switcher.c | 16 | ||||
-rw-r--r-- | arch/arm/mach-pxa/Makefile | 2 | ||||
-rw-r--r-- | arch/arm/mach-pxa/generic.c | 11 | ||||
-rw-r--r-- | arch/arm/mach-pxa/time.c | 162 | ||||
-rw-r--r-- | arch/arm64/kernel/vdso.c | 10 | ||||
-rw-r--r-- | arch/hexagon/Kconfig | 1 | ||||
-rw-r--r-- | arch/ia64/kernel/time.c | 4 | ||||
-rw-r--r-- | arch/powerpc/kernel/time.c | 4 | ||||
-rw-r--r-- | arch/powerpc/platforms/cell/spu_base.c | 11 | ||||
-rw-r--r-- | arch/powerpc/platforms/cell/spufs/context.c | 4 | ||||
-rw-r--r-- | arch/powerpc/platforms/cell/spufs/file.c | 4 | ||||
-rw-r--r-- | arch/powerpc/platforms/cell/spufs/sched.c | 4 | ||||
-rw-r--r-- | arch/s390/Kconfig | 1 | ||||
-rw-r--r-- | arch/s390/kernel/time.c | 16 | ||||
-rw-r--r-- | arch/tile/kernel/time.c | 13 | ||||
-rw-r--r-- | arch/tile/kernel/vdso/vgettimeofday.c | 7 | ||||
-rw-r--r-- | arch/x86/Kconfig | 2 | ||||
-rw-r--r-- | arch/x86/kernel/tsc.c | 21 | ||||
-rw-r--r-- | arch/x86/kernel/vsyscall_gtod.c | 23 | ||||
-rw-r--r-- | arch/x86/kvm/x86.c | 62 |
21 files changed, 92 insertions, 288 deletions
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index 1e14b9068a39..d31c500653a2 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig | |||
@@ -65,7 +65,6 @@ config ARM | |||
65 | select HAVE_UID16 | 65 | select HAVE_UID16 |
66 | select HAVE_VIRT_CPU_ACCOUNTING_GEN | 66 | select HAVE_VIRT_CPU_ACCOUNTING_GEN |
67 | select IRQ_FORCED_THREADING | 67 | select IRQ_FORCED_THREADING |
68 | select KTIME_SCALAR | ||
69 | select MODULES_USE_ELF_REL | 68 | select MODULES_USE_ELF_REL |
70 | select NO_BOOTMEM | 69 | select NO_BOOTMEM |
71 | select OLD_SIGACTION | 70 | select OLD_SIGACTION |
@@ -648,6 +647,7 @@ config ARCH_PXA | |||
648 | select AUTO_ZRELADDR | 647 | select AUTO_ZRELADDR |
649 | select CLKDEV_LOOKUP | 648 | select CLKDEV_LOOKUP |
650 | select CLKSRC_MMIO | 649 | select CLKSRC_MMIO |
650 | select CLKSRC_OF | ||
651 | select GENERIC_CLOCKEVENTS | 651 | select GENERIC_CLOCKEVENTS |
652 | select GPIO_PXA | 652 | select GPIO_PXA |
653 | select HAVE_IDE | 653 | select HAVE_IDE |
diff --git a/arch/arm/common/bL_switcher.c b/arch/arm/common/bL_switcher.c index 490f3dced749..6eaddc47c43d 100644 --- a/arch/arm/common/bL_switcher.c +++ b/arch/arm/common/bL_switcher.c | |||
@@ -58,16 +58,6 @@ static int read_mpidr(void) | |||
58 | } | 58 | } |
59 | 59 | ||
60 | /* | 60 | /* |
61 | * Get a global nanosecond time stamp for tracing. | ||
62 | */ | ||
63 | static s64 get_ns(void) | ||
64 | { | ||
65 | struct timespec ts; | ||
66 | getnstimeofday(&ts); | ||
67 | return timespec_to_ns(&ts); | ||
68 | } | ||
69 | |||
70 | /* | ||
71 | * bL switcher core code. | 61 | * bL switcher core code. |
72 | */ | 62 | */ |
73 | 63 | ||
@@ -224,7 +214,7 @@ static int bL_switch_to(unsigned int new_cluster_id) | |||
224 | */ | 214 | */ |
225 | local_irq_disable(); | 215 | local_irq_disable(); |
226 | local_fiq_disable(); | 216 | local_fiq_disable(); |
227 | trace_cpu_migrate_begin(get_ns(), ob_mpidr); | 217 | trace_cpu_migrate_begin(ktime_get_real_ns(), ob_mpidr); |
228 | 218 | ||
229 | /* redirect GIC's SGIs to our counterpart */ | 219 | /* redirect GIC's SGIs to our counterpart */ |
230 | gic_migrate_target(bL_gic_id[ib_cpu][ib_cluster]); | 220 | gic_migrate_target(bL_gic_id[ib_cpu][ib_cluster]); |
@@ -267,7 +257,7 @@ static int bL_switch_to(unsigned int new_cluster_id) | |||
267 | tdev->evtdev->next_event, 1); | 257 | tdev->evtdev->next_event, 1); |
268 | } | 258 | } |
269 | 259 | ||
270 | trace_cpu_migrate_finish(get_ns(), ib_mpidr); | 260 | trace_cpu_migrate_finish(ktime_get_real_ns(), ib_mpidr); |
271 | local_fiq_enable(); | 261 | local_fiq_enable(); |
272 | local_irq_enable(); | 262 | local_irq_enable(); |
273 | 263 | ||
@@ -558,7 +548,7 @@ int bL_switcher_get_logical_index(u32 mpidr) | |||
558 | 548 | ||
559 | static void bL_switcher_trace_trigger_cpu(void *__always_unused info) | 549 | static void bL_switcher_trace_trigger_cpu(void *__always_unused info) |
560 | { | 550 | { |
561 | trace_cpu_migrate_current(get_ns(), read_mpidr()); | 551 | trace_cpu_migrate_current(ktime_get_real_ns(), read_mpidr()); |
562 | } | 552 | } |
563 | 553 | ||
564 | int bL_switcher_trace_trigger(void) | 554 | int bL_switcher_trace_trigger(void) |
diff --git a/arch/arm/mach-pxa/Makefile b/arch/arm/mach-pxa/Makefile index 648867a8caa8..2fe1824c6dcb 100644 --- a/arch/arm/mach-pxa/Makefile +++ b/arch/arm/mach-pxa/Makefile | |||
@@ -4,7 +4,7 @@ | |||
4 | 4 | ||
5 | # Common support (must be linked before board specific support) | 5 | # Common support (must be linked before board specific support) |
6 | obj-y += clock.o devices.o generic.o irq.o \ | 6 | obj-y += clock.o devices.o generic.o irq.o \ |
7 | time.o reset.o | 7 | reset.o |
8 | obj-$(CONFIG_PM) += pm.o sleep.o standby.o | 8 | obj-$(CONFIG_PM) += pm.o sleep.o standby.o |
9 | 9 | ||
10 | # Generic drivers that other drivers may depend upon | 10 | # Generic drivers that other drivers may depend upon |
diff --git a/arch/arm/mach-pxa/generic.c b/arch/arm/mach-pxa/generic.c index 42254175fcf4..6f38e1af45af 100644 --- a/arch/arm/mach-pxa/generic.c +++ b/arch/arm/mach-pxa/generic.c | |||
@@ -25,11 +25,13 @@ | |||
25 | #include <asm/mach/map.h> | 25 | #include <asm/mach/map.h> |
26 | #include <asm/mach-types.h> | 26 | #include <asm/mach-types.h> |
27 | 27 | ||
28 | #include <mach/irqs.h> | ||
28 | #include <mach/reset.h> | 29 | #include <mach/reset.h> |
29 | #include <mach/smemc.h> | 30 | #include <mach/smemc.h> |
30 | #include <mach/pxa3xx-regs.h> | 31 | #include <mach/pxa3xx-regs.h> |
31 | 32 | ||
32 | #include "generic.h" | 33 | #include "generic.h" |
34 | #include <clocksource/pxa.h> | ||
33 | 35 | ||
34 | void clear_reset_status(unsigned int mask) | 36 | void clear_reset_status(unsigned int mask) |
35 | { | 37 | { |
@@ -57,6 +59,15 @@ unsigned long get_clock_tick_rate(void) | |||
57 | EXPORT_SYMBOL(get_clock_tick_rate); | 59 | EXPORT_SYMBOL(get_clock_tick_rate); |
58 | 60 | ||
59 | /* | 61 | /* |
62 | * For non device-tree builds, keep legacy timer init | ||
63 | */ | ||
64 | void pxa_timer_init(void) | ||
65 | { | ||
66 | pxa_timer_nodt_init(IRQ_OST0, io_p2v(0x40a00000), | ||
67 | get_clock_tick_rate()); | ||
68 | } | ||
69 | |||
70 | /* | ||
60 | * Get the clock frequency as reflected by CCCR and the turbo flag. | 71 | * Get the clock frequency as reflected by CCCR and the turbo flag. |
61 | * We assume these values have been applied via a fcs. | 72 | * We assume these values have been applied via a fcs. |
62 | * If info is not 0 we also display the current settings. | 73 | * If info is not 0 we also display the current settings. |
diff --git a/arch/arm/mach-pxa/time.c b/arch/arm/mach-pxa/time.c deleted file mode 100644 index fca174e3865d..000000000000 --- a/arch/arm/mach-pxa/time.c +++ /dev/null | |||
@@ -1,162 +0,0 @@ | |||
1 | /* | ||
2 | * arch/arm/mach-pxa/time.c | ||
3 | * | ||
4 | * PXA clocksource, clockevents, and OST interrupt handlers. | ||
5 | * Copyright (c) 2007 by Bill Gatliff <bgat@billgatliff.com>. | ||
6 | * | ||
7 | * Derived from Nicolas Pitre's PXA timer handler Copyright (c) 2001 | ||
8 | * by MontaVista Software, Inc. (Nico, your code rocks!) | ||
9 | * | ||
10 | * This program is free software; you can redistribute it and/or modify | ||
11 | * it under the terms of the GNU General Public License version 2 as | ||
12 | * published by the Free Software Foundation. | ||
13 | */ | ||
14 | |||
15 | #include <linux/kernel.h> | ||
16 | #include <linux/init.h> | ||
17 | #include <linux/interrupt.h> | ||
18 | #include <linux/clockchips.h> | ||
19 | #include <linux/sched_clock.h> | ||
20 | |||
21 | #include <asm/div64.h> | ||
22 | #include <asm/mach/irq.h> | ||
23 | #include <asm/mach/time.h> | ||
24 | #include <mach/regs-ost.h> | ||
25 | #include <mach/irqs.h> | ||
26 | |||
27 | /* | ||
28 | * This is PXA's sched_clock implementation. This has a resolution | ||
29 | * of at least 308 ns and a maximum value of 208 days. | ||
30 | * | ||
31 | * The return value is guaranteed to be monotonic in that range as | ||
32 | * long as there is always less than 582 seconds between successive | ||
33 | * calls to sched_clock() which should always be the case in practice. | ||
34 | */ | ||
35 | |||
36 | static u64 notrace pxa_read_sched_clock(void) | ||
37 | { | ||
38 | return readl_relaxed(OSCR); | ||
39 | } | ||
40 | |||
41 | |||
42 | #define MIN_OSCR_DELTA 16 | ||
43 | |||
44 | static irqreturn_t | ||
45 | pxa_ost0_interrupt(int irq, void *dev_id) | ||
46 | { | ||
47 | struct clock_event_device *c = dev_id; | ||
48 | |||
49 | /* Disarm the compare/match, signal the event. */ | ||
50 | writel_relaxed(readl_relaxed(OIER) & ~OIER_E0, OIER); | ||
51 | writel_relaxed(OSSR_M0, OSSR); | ||
52 | c->event_handler(c); | ||
53 | |||
54 | return IRQ_HANDLED; | ||
55 | } | ||
56 | |||
57 | static int | ||
58 | pxa_osmr0_set_next_event(unsigned long delta, struct clock_event_device *dev) | ||
59 | { | ||
60 | unsigned long next, oscr; | ||
61 | |||
62 | writel_relaxed(readl_relaxed(OIER) | OIER_E0, OIER); | ||
63 | next = readl_relaxed(OSCR) + delta; | ||
64 | writel_relaxed(next, OSMR0); | ||
65 | oscr = readl_relaxed(OSCR); | ||
66 | |||
67 | return (signed)(next - oscr) <= MIN_OSCR_DELTA ? -ETIME : 0; | ||
68 | } | ||
69 | |||
70 | static void | ||
71 | pxa_osmr0_set_mode(enum clock_event_mode mode, struct clock_event_device *dev) | ||
72 | { | ||
73 | switch (mode) { | ||
74 | case CLOCK_EVT_MODE_ONESHOT: | ||
75 | writel_relaxed(readl_relaxed(OIER) & ~OIER_E0, OIER); | ||
76 | writel_relaxed(OSSR_M0, OSSR); | ||
77 | break; | ||
78 | |||
79 | case CLOCK_EVT_MODE_UNUSED: | ||
80 | case CLOCK_EVT_MODE_SHUTDOWN: | ||
81 | /* initializing, released, or preparing for suspend */ | ||
82 | writel_relaxed(readl_relaxed(OIER) & ~OIER_E0, OIER); | ||
83 | writel_relaxed(OSSR_M0, OSSR); | ||
84 | break; | ||
85 | |||
86 | case CLOCK_EVT_MODE_RESUME: | ||
87 | case CLOCK_EVT_MODE_PERIODIC: | ||
88 | break; | ||
89 | } | ||
90 | } | ||
91 | |||
92 | #ifdef CONFIG_PM | ||
93 | static unsigned long osmr[4], oier, oscr; | ||
94 | |||
95 | static void pxa_timer_suspend(struct clock_event_device *cedev) | ||
96 | { | ||
97 | osmr[0] = readl_relaxed(OSMR0); | ||
98 | osmr[1] = readl_relaxed(OSMR1); | ||
99 | osmr[2] = readl_relaxed(OSMR2); | ||
100 | osmr[3] = readl_relaxed(OSMR3); | ||
101 | oier = readl_relaxed(OIER); | ||
102 | oscr = readl_relaxed(OSCR); | ||
103 | } | ||
104 | |||
105 | static void pxa_timer_resume(struct clock_event_device *cedev) | ||
106 | { | ||
107 | /* | ||
108 | * Ensure that we have at least MIN_OSCR_DELTA between match | ||
109 | * register 0 and the OSCR, to guarantee that we will receive | ||
110 | * the one-shot timer interrupt. We adjust OSMR0 in preference | ||
111 | * to OSCR to guarantee that OSCR is monotonically incrementing. | ||
112 | */ | ||
113 | if (osmr[0] - oscr < MIN_OSCR_DELTA) | ||
114 | osmr[0] += MIN_OSCR_DELTA; | ||
115 | |||
116 | writel_relaxed(osmr[0], OSMR0); | ||
117 | writel_relaxed(osmr[1], OSMR1); | ||
118 | writel_relaxed(osmr[2], OSMR2); | ||
119 | writel_relaxed(osmr[3], OSMR3); | ||
120 | writel_relaxed(oier, OIER); | ||
121 | writel_relaxed(oscr, OSCR); | ||
122 | } | ||
123 | #else | ||
124 | #define pxa_timer_suspend NULL | ||
125 | #define pxa_timer_resume NULL | ||
126 | #endif | ||
127 | |||
128 | static struct clock_event_device ckevt_pxa_osmr0 = { | ||
129 | .name = "osmr0", | ||
130 | .features = CLOCK_EVT_FEAT_ONESHOT, | ||
131 | .rating = 200, | ||
132 | .set_next_event = pxa_osmr0_set_next_event, | ||
133 | .set_mode = pxa_osmr0_set_mode, | ||
134 | .suspend = pxa_timer_suspend, | ||
135 | .resume = pxa_timer_resume, | ||
136 | }; | ||
137 | |||
138 | static struct irqaction pxa_ost0_irq = { | ||
139 | .name = "ost0", | ||
140 | .flags = IRQF_TIMER | IRQF_IRQPOLL, | ||
141 | .handler = pxa_ost0_interrupt, | ||
142 | .dev_id = &ckevt_pxa_osmr0, | ||
143 | }; | ||
144 | |||
145 | void __init pxa_timer_init(void) | ||
146 | { | ||
147 | unsigned long clock_tick_rate = get_clock_tick_rate(); | ||
148 | |||
149 | writel_relaxed(0, OIER); | ||
150 | writel_relaxed(OSSR_M0 | OSSR_M1 | OSSR_M2 | OSSR_M3, OSSR); | ||
151 | |||
152 | sched_clock_register(pxa_read_sched_clock, 32, clock_tick_rate); | ||
153 | |||
154 | ckevt_pxa_osmr0.cpumask = cpumask_of(0); | ||
155 | |||
156 | setup_irq(IRQ_OST0, &pxa_ost0_irq); | ||
157 | |||
158 | clocksource_mmio_init(OSCR, "oscr0", clock_tick_rate, 200, 32, | ||
159 | clocksource_mmio_readl_up); | ||
160 | clockevents_config_and_register(&ckevt_pxa_osmr0, clock_tick_rate, | ||
161 | MIN_OSCR_DELTA * 2, 0x7fffffff); | ||
162 | } | ||
diff --git a/arch/arm64/kernel/vdso.c b/arch/arm64/kernel/vdso.c index 24f2e8c62479..a81a446a5786 100644 --- a/arch/arm64/kernel/vdso.c +++ b/arch/arm64/kernel/vdso.c | |||
@@ -219,7 +219,7 @@ struct vm_area_struct *get_gate_vma(struct mm_struct *mm) | |||
219 | void update_vsyscall(struct timekeeper *tk) | 219 | void update_vsyscall(struct timekeeper *tk) |
220 | { | 220 | { |
221 | struct timespec xtime_coarse; | 221 | struct timespec xtime_coarse; |
222 | u32 use_syscall = strcmp(tk->clock->name, "arch_sys_counter"); | 222 | u32 use_syscall = strcmp(tk->tkr.clock->name, "arch_sys_counter"); |
223 | 223 | ||
224 | ++vdso_data->tb_seq_count; | 224 | ++vdso_data->tb_seq_count; |
225 | smp_wmb(); | 225 | smp_wmb(); |
@@ -232,11 +232,11 @@ void update_vsyscall(struct timekeeper *tk) | |||
232 | vdso_data->wtm_clock_nsec = tk->wall_to_monotonic.tv_nsec; | 232 | vdso_data->wtm_clock_nsec = tk->wall_to_monotonic.tv_nsec; |
233 | 233 | ||
234 | if (!use_syscall) { | 234 | if (!use_syscall) { |
235 | vdso_data->cs_cycle_last = tk->clock->cycle_last; | 235 | vdso_data->cs_cycle_last = tk->tkr.cycle_last; |
236 | vdso_data->xtime_clock_sec = tk->xtime_sec; | 236 | vdso_data->xtime_clock_sec = tk->xtime_sec; |
237 | vdso_data->xtime_clock_nsec = tk->xtime_nsec; | 237 | vdso_data->xtime_clock_nsec = tk->tkr.xtime_nsec; |
238 | vdso_data->cs_mult = tk->mult; | 238 | vdso_data->cs_mult = tk->tkr.mult; |
239 | vdso_data->cs_shift = tk->shift; | 239 | vdso_data->cs_shift = tk->tkr.shift; |
240 | } | 240 | } |
241 | 241 | ||
242 | smp_wmb(); | 242 | smp_wmb(); |
diff --git a/arch/hexagon/Kconfig b/arch/hexagon/Kconfig index 0fd6138f6203..4dc89d1f9c48 100644 --- a/arch/hexagon/Kconfig +++ b/arch/hexagon/Kconfig | |||
@@ -23,7 +23,6 @@ config HEXAGON | |||
23 | select GENERIC_IOMAP | 23 | select GENERIC_IOMAP |
24 | select GENERIC_SMP_IDLE_THREAD | 24 | select GENERIC_SMP_IDLE_THREAD |
25 | select STACKTRACE_SUPPORT | 25 | select STACKTRACE_SUPPORT |
26 | select KTIME_SCALAR | ||
27 | select GENERIC_CLOCKEVENTS | 26 | select GENERIC_CLOCKEVENTS |
28 | select GENERIC_CLOCKEVENTS_BROADCAST | 27 | select GENERIC_CLOCKEVENTS_BROADCAST |
29 | select MODULES_USE_ELF_RELA | 28 | select MODULES_USE_ELF_RELA |
diff --git a/arch/ia64/kernel/time.c b/arch/ia64/kernel/time.c index 71c52bc7c28d..3e71ef85e439 100644 --- a/arch/ia64/kernel/time.c +++ b/arch/ia64/kernel/time.c | |||
@@ -441,7 +441,7 @@ void update_vsyscall_tz(void) | |||
441 | } | 441 | } |
442 | 442 | ||
443 | void update_vsyscall_old(struct timespec *wall, struct timespec *wtm, | 443 | void update_vsyscall_old(struct timespec *wall, struct timespec *wtm, |
444 | struct clocksource *c, u32 mult) | 444 | struct clocksource *c, u32 mult, cycle_t cycle_last) |
445 | { | 445 | { |
446 | write_seqcount_begin(&fsyscall_gtod_data.seq); | 446 | write_seqcount_begin(&fsyscall_gtod_data.seq); |
447 | 447 | ||
@@ -450,7 +450,7 @@ void update_vsyscall_old(struct timespec *wall, struct timespec *wtm, | |||
450 | fsyscall_gtod_data.clk_mult = mult; | 450 | fsyscall_gtod_data.clk_mult = mult; |
451 | fsyscall_gtod_data.clk_shift = c->shift; | 451 | fsyscall_gtod_data.clk_shift = c->shift; |
452 | fsyscall_gtod_data.clk_fsys_mmio = c->archdata.fsys_mmio; | 452 | fsyscall_gtod_data.clk_fsys_mmio = c->archdata.fsys_mmio; |
453 | fsyscall_gtod_data.clk_cycle_last = c->cycle_last; | 453 | fsyscall_gtod_data.clk_cycle_last = cycle_last; |
454 | 454 | ||
455 | /* copy kernel time structures */ | 455 | /* copy kernel time structures */ |
456 | fsyscall_gtod_data.wall_time.tv_sec = wall->tv_sec; | 456 | fsyscall_gtod_data.wall_time.tv_sec = wall->tv_sec; |
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c index 9fff9cdcc519..368ab374d33c 100644 --- a/arch/powerpc/kernel/time.c +++ b/arch/powerpc/kernel/time.c | |||
@@ -741,7 +741,7 @@ static cycle_t timebase_read(struct clocksource *cs) | |||
741 | } | 741 | } |
742 | 742 | ||
743 | void update_vsyscall_old(struct timespec *wall_time, struct timespec *wtm, | 743 | void update_vsyscall_old(struct timespec *wall_time, struct timespec *wtm, |
744 | struct clocksource *clock, u32 mult) | 744 | struct clocksource *clock, u32 mult, cycle_t cycle_last) |
745 | { | 745 | { |
746 | u64 new_tb_to_xs, new_stamp_xsec; | 746 | u64 new_tb_to_xs, new_stamp_xsec; |
747 | u32 frac_sec; | 747 | u32 frac_sec; |
@@ -774,7 +774,7 @@ void update_vsyscall_old(struct timespec *wall_time, struct timespec *wtm, | |||
774 | * We expect the caller to have done the first increment of | 774 | * We expect the caller to have done the first increment of |
775 | * vdso_data->tb_update_count already. | 775 | * vdso_data->tb_update_count already. |
776 | */ | 776 | */ |
777 | vdso_data->tb_orig_stamp = clock->cycle_last; | 777 | vdso_data->tb_orig_stamp = cycle_last; |
778 | vdso_data->stamp_xsec = new_stamp_xsec; | 778 | vdso_data->stamp_xsec = new_stamp_xsec; |
779 | vdso_data->tb_to_xs = new_tb_to_xs; | 779 | vdso_data->tb_to_xs = new_tb_to_xs; |
780 | vdso_data->wtom_clock_sec = wtm->tv_sec; | 780 | vdso_data->wtom_clock_sec = wtm->tv_sec; |
diff --git a/arch/powerpc/platforms/cell/spu_base.c b/arch/powerpc/platforms/cell/spu_base.c index f85db3a69b4a..2930d1e81a05 100644 --- a/arch/powerpc/platforms/cell/spu_base.c +++ b/arch/powerpc/platforms/cell/spu_base.c | |||
@@ -611,7 +611,6 @@ static int __init create_spu(void *data) | |||
611 | int ret; | 611 | int ret; |
612 | static int number; | 612 | static int number; |
613 | unsigned long flags; | 613 | unsigned long flags; |
614 | struct timespec ts; | ||
615 | 614 | ||
616 | ret = -ENOMEM; | 615 | ret = -ENOMEM; |
617 | spu = kzalloc(sizeof (*spu), GFP_KERNEL); | 616 | spu = kzalloc(sizeof (*spu), GFP_KERNEL); |
@@ -652,8 +651,7 @@ static int __init create_spu(void *data) | |||
652 | mutex_unlock(&spu_full_list_mutex); | 651 | mutex_unlock(&spu_full_list_mutex); |
653 | 652 | ||
654 | spu->stats.util_state = SPU_UTIL_IDLE_LOADED; | 653 | spu->stats.util_state = SPU_UTIL_IDLE_LOADED; |
655 | ktime_get_ts(&ts); | 654 | spu->stats.tstamp = ktime_get_ns(); |
656 | spu->stats.tstamp = timespec_to_ns(&ts); | ||
657 | 655 | ||
658 | INIT_LIST_HEAD(&spu->aff_list); | 656 | INIT_LIST_HEAD(&spu->aff_list); |
659 | 657 | ||
@@ -676,7 +674,6 @@ static const char *spu_state_names[] = { | |||
676 | static unsigned long long spu_acct_time(struct spu *spu, | 674 | static unsigned long long spu_acct_time(struct spu *spu, |
677 | enum spu_utilization_state state) | 675 | enum spu_utilization_state state) |
678 | { | 676 | { |
679 | struct timespec ts; | ||
680 | unsigned long long time = spu->stats.times[state]; | 677 | unsigned long long time = spu->stats.times[state]; |
681 | 678 | ||
682 | /* | 679 | /* |
@@ -684,10 +681,8 @@ static unsigned long long spu_acct_time(struct spu *spu, | |||
684 | * statistics are not updated. Apply the time delta from the | 681 | * statistics are not updated. Apply the time delta from the |
685 | * last recorded state of the spu. | 682 | * last recorded state of the spu. |
686 | */ | 683 | */ |
687 | if (spu->stats.util_state == state) { | 684 | if (spu->stats.util_state == state) |
688 | ktime_get_ts(&ts); | 685 | time += ktime_get_ns() - spu->stats.tstamp; |
689 | time += timespec_to_ns(&ts) - spu->stats.tstamp; | ||
690 | } | ||
691 | 686 | ||
692 | return time / NSEC_PER_MSEC; | 687 | return time / NSEC_PER_MSEC; |
693 | } | 688 | } |
diff --git a/arch/powerpc/platforms/cell/spufs/context.c b/arch/powerpc/platforms/cell/spufs/context.c index 9c6790d17eda..3b4152faeb1f 100644 --- a/arch/powerpc/platforms/cell/spufs/context.c +++ b/arch/powerpc/platforms/cell/spufs/context.c | |||
@@ -36,7 +36,6 @@ atomic_t nr_spu_contexts = ATOMIC_INIT(0); | |||
36 | struct spu_context *alloc_spu_context(struct spu_gang *gang) | 36 | struct spu_context *alloc_spu_context(struct spu_gang *gang) |
37 | { | 37 | { |
38 | struct spu_context *ctx; | 38 | struct spu_context *ctx; |
39 | struct timespec ts; | ||
40 | 39 | ||
41 | ctx = kzalloc(sizeof *ctx, GFP_KERNEL); | 40 | ctx = kzalloc(sizeof *ctx, GFP_KERNEL); |
42 | if (!ctx) | 41 | if (!ctx) |
@@ -67,8 +66,7 @@ struct spu_context *alloc_spu_context(struct spu_gang *gang) | |||
67 | __spu_update_sched_info(ctx); | 66 | __spu_update_sched_info(ctx); |
68 | spu_set_timeslice(ctx); | 67 | spu_set_timeslice(ctx); |
69 | ctx->stats.util_state = SPU_UTIL_IDLE_LOADED; | 68 | ctx->stats.util_state = SPU_UTIL_IDLE_LOADED; |
70 | ktime_get_ts(&ts); | 69 | ctx->stats.tstamp = ktime_get_ns(); |
71 | ctx->stats.tstamp = timespec_to_ns(&ts); | ||
72 | 70 | ||
73 | atomic_inc(&nr_spu_contexts); | 71 | atomic_inc(&nr_spu_contexts); |
74 | goto out; | 72 | goto out; |
diff --git a/arch/powerpc/platforms/cell/spufs/file.c b/arch/powerpc/platforms/cell/spufs/file.c index 90986923a53a..d966bbe58b8f 100644 --- a/arch/powerpc/platforms/cell/spufs/file.c +++ b/arch/powerpc/platforms/cell/spufs/file.c | |||
@@ -2338,7 +2338,6 @@ static const char *ctx_state_names[] = { | |||
2338 | static unsigned long long spufs_acct_time(struct spu_context *ctx, | 2338 | static unsigned long long spufs_acct_time(struct spu_context *ctx, |
2339 | enum spu_utilization_state state) | 2339 | enum spu_utilization_state state) |
2340 | { | 2340 | { |
2341 | struct timespec ts; | ||
2342 | unsigned long long time = ctx->stats.times[state]; | 2341 | unsigned long long time = ctx->stats.times[state]; |
2343 | 2342 | ||
2344 | /* | 2343 | /* |
@@ -2351,8 +2350,7 @@ static unsigned long long spufs_acct_time(struct spu_context *ctx, | |||
2351 | * of the spu context. | 2350 | * of the spu context. |
2352 | */ | 2351 | */ |
2353 | if (ctx->spu && ctx->stats.util_state == state) { | 2352 | if (ctx->spu && ctx->stats.util_state == state) { |
2354 | ktime_get_ts(&ts); | 2353 | time += ktime_get_ns() - ctx->stats.tstamp; |
2355 | time += timespec_to_ns(&ts) - ctx->stats.tstamp; | ||
2356 | } | 2354 | } |
2357 | 2355 | ||
2358 | return time / NSEC_PER_MSEC; | 2356 | return time / NSEC_PER_MSEC; |
diff --git a/arch/powerpc/platforms/cell/spufs/sched.c b/arch/powerpc/platforms/cell/spufs/sched.c index 4a0a64fe25df..998f632e7cce 100644 --- a/arch/powerpc/platforms/cell/spufs/sched.c +++ b/arch/powerpc/platforms/cell/spufs/sched.c | |||
@@ -1039,13 +1039,11 @@ void spuctx_switch_state(struct spu_context *ctx, | |||
1039 | { | 1039 | { |
1040 | unsigned long long curtime; | 1040 | unsigned long long curtime; |
1041 | signed long long delta; | 1041 | signed long long delta; |
1042 | struct timespec ts; | ||
1043 | struct spu *spu; | 1042 | struct spu *spu; |
1044 | enum spu_utilization_state old_state; | 1043 | enum spu_utilization_state old_state; |
1045 | int node; | 1044 | int node; |
1046 | 1045 | ||
1047 | ktime_get_ts(&ts); | 1046 | curtime = ktime_get_ns(); |
1048 | curtime = timespec_to_ns(&ts); | ||
1049 | delta = curtime - ctx->stats.tstamp; | 1047 | delta = curtime - ctx->stats.tstamp; |
1050 | 1048 | ||
1051 | WARN_ON(!mutex_is_locked(&ctx->state_mutex)); | 1049 | WARN_ON(!mutex_is_locked(&ctx->state_mutex)); |
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig index f5af5f6ef0f4..720a11d339eb 100644 --- a/arch/s390/Kconfig +++ b/arch/s390/Kconfig | |||
@@ -136,7 +136,6 @@ config S390 | |||
136 | select HAVE_SYSCALL_TRACEPOINTS | 136 | select HAVE_SYSCALL_TRACEPOINTS |
137 | select HAVE_UID16 if 32BIT | 137 | select HAVE_UID16 if 32BIT |
138 | select HAVE_VIRT_CPU_ACCOUNTING | 138 | select HAVE_VIRT_CPU_ACCOUNTING |
139 | select KTIME_SCALAR if 32BIT | ||
140 | select MODULES_USE_ELF_RELA | 139 | select MODULES_USE_ELF_RELA |
141 | select NO_BOOTMEM | 140 | select NO_BOOTMEM |
142 | select OLD_SIGACTION | 141 | select OLD_SIGACTION |
diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c index 0931b110c826..4cef607f3711 100644 --- a/arch/s390/kernel/time.c +++ b/arch/s390/kernel/time.c | |||
@@ -214,26 +214,26 @@ void update_vsyscall(struct timekeeper *tk) | |||
214 | { | 214 | { |
215 | u64 nsecps; | 215 | u64 nsecps; |
216 | 216 | ||
217 | if (tk->clock != &clocksource_tod) | 217 | if (tk->tkr.clock != &clocksource_tod) |
218 | return; | 218 | return; |
219 | 219 | ||
220 | /* Make userspace gettimeofday spin until we're done. */ | 220 | /* Make userspace gettimeofday spin until we're done. */ |
221 | ++vdso_data->tb_update_count; | 221 | ++vdso_data->tb_update_count; |
222 | smp_wmb(); | 222 | smp_wmb(); |
223 | vdso_data->xtime_tod_stamp = tk->clock->cycle_last; | 223 | vdso_data->xtime_tod_stamp = tk->tkr.cycle_last; |
224 | vdso_data->xtime_clock_sec = tk->xtime_sec; | 224 | vdso_data->xtime_clock_sec = tk->xtime_sec; |
225 | vdso_data->xtime_clock_nsec = tk->xtime_nsec; | 225 | vdso_data->xtime_clock_nsec = tk->tkr.xtime_nsec; |
226 | vdso_data->wtom_clock_sec = | 226 | vdso_data->wtom_clock_sec = |
227 | tk->xtime_sec + tk->wall_to_monotonic.tv_sec; | 227 | tk->xtime_sec + tk->wall_to_monotonic.tv_sec; |
228 | vdso_data->wtom_clock_nsec = tk->xtime_nsec + | 228 | vdso_data->wtom_clock_nsec = tk->tkr.xtime_nsec + |
229 | + ((u64) tk->wall_to_monotonic.tv_nsec << tk->shift); | 229 | + ((u64) tk->wall_to_monotonic.tv_nsec << tk->tkr.shift); |
230 | nsecps = (u64) NSEC_PER_SEC << tk->shift; | 230 | nsecps = (u64) NSEC_PER_SEC << tk->tkr.shift; |
231 | while (vdso_data->wtom_clock_nsec >= nsecps) { | 231 | while (vdso_data->wtom_clock_nsec >= nsecps) { |
232 | vdso_data->wtom_clock_nsec -= nsecps; | 232 | vdso_data->wtom_clock_nsec -= nsecps; |
233 | vdso_data->wtom_clock_sec++; | 233 | vdso_data->wtom_clock_sec++; |
234 | } | 234 | } |
235 | vdso_data->tk_mult = tk->mult; | 235 | vdso_data->tk_mult = tk->tkr.mult; |
236 | vdso_data->tk_shift = tk->shift; | 236 | vdso_data->tk_shift = tk->tkr.shift; |
237 | smp_wmb(); | 237 | smp_wmb(); |
238 | ++vdso_data->tb_update_count; | 238 | ++vdso_data->tb_update_count; |
239 | } | 239 | } |
diff --git a/arch/tile/kernel/time.c b/arch/tile/kernel/time.c index 462dcd0c1700..d8fbc289e680 100644 --- a/arch/tile/kernel/time.c +++ b/arch/tile/kernel/time.c | |||
@@ -260,9 +260,8 @@ void update_vsyscall_tz(void) | |||
260 | 260 | ||
261 | void update_vsyscall(struct timekeeper *tk) | 261 | void update_vsyscall(struct timekeeper *tk) |
262 | { | 262 | { |
263 | struct timespec wall_time = tk_xtime(tk); | ||
264 | struct timespec *wtm = &tk->wall_to_monotonic; | 263 | struct timespec *wtm = &tk->wall_to_monotonic; |
265 | struct clocksource *clock = tk->clock; | 264 | struct clocksource *clock = tk->tkr.clock; |
266 | 265 | ||
267 | if (clock != &cycle_counter_cs) | 266 | if (clock != &cycle_counter_cs) |
268 | return; | 267 | return; |
@@ -270,13 +269,13 @@ void update_vsyscall(struct timekeeper *tk) | |||
270 | /* Userspace gettimeofday will spin while this value is odd. */ | 269 | /* Userspace gettimeofday will spin while this value is odd. */ |
271 | ++vdso_data->tb_update_count; | 270 | ++vdso_data->tb_update_count; |
272 | smp_wmb(); | 271 | smp_wmb(); |
273 | vdso_data->xtime_tod_stamp = clock->cycle_last; | 272 | vdso_data->xtime_tod_stamp = tk->tkr.cycle_last; |
274 | vdso_data->xtime_clock_sec = wall_time.tv_sec; | 273 | vdso_data->xtime_clock_sec = tk->xtime_sec; |
275 | vdso_data->xtime_clock_nsec = wall_time.tv_nsec; | 274 | vdso_data->xtime_clock_nsec = tk->tkr.xtime_nsec; |
276 | vdso_data->wtom_clock_sec = wtm->tv_sec; | 275 | vdso_data->wtom_clock_sec = wtm->tv_sec; |
277 | vdso_data->wtom_clock_nsec = wtm->tv_nsec; | 276 | vdso_data->wtom_clock_nsec = wtm->tv_nsec; |
278 | vdso_data->mult = clock->mult; | 277 | vdso_data->mult = tk->tkr.mult; |
279 | vdso_data->shift = clock->shift; | 278 | vdso_data->shift = tk->tkr.shift; |
280 | smp_wmb(); | 279 | smp_wmb(); |
281 | ++vdso_data->tb_update_count; | 280 | ++vdso_data->tb_update_count; |
282 | } | 281 | } |
diff --git a/arch/tile/kernel/vdso/vgettimeofday.c b/arch/tile/kernel/vdso/vgettimeofday.c index 51ec8e46f5f9..e933fb9fbf5c 100644 --- a/arch/tile/kernel/vdso/vgettimeofday.c +++ b/arch/tile/kernel/vdso/vgettimeofday.c | |||
@@ -83,10 +83,11 @@ int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz) | |||
83 | if (count & 1) | 83 | if (count & 1) |
84 | continue; | 84 | continue; |
85 | 85 | ||
86 | cycles = (get_cycles() - vdso_data->xtime_tod_stamp); | ||
87 | ns = (cycles * vdso_data->mult) >> vdso_data->shift; | ||
88 | sec = vdso_data->xtime_clock_sec; | 86 | sec = vdso_data->xtime_clock_sec; |
89 | ns += vdso_data->xtime_clock_nsec; | 87 | cycles = get_cycles() - vdso_data->xtime_tod_stamp; |
88 | ns = (cycles * vdso_data->mult) + vdso_data->xtime_clock_nsec; | ||
89 | ns >>= vdso_data->shift; | ||
90 | |||
90 | if (ns >= NSEC_PER_SEC) { | 91 | if (ns >= NSEC_PER_SEC) { |
91 | ns -= NSEC_PER_SEC; | 92 | ns -= NSEC_PER_SEC; |
92 | sec += 1; | 93 | sec += 1; |
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 6b71f0417293..6cfeb082a422 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig | |||
@@ -108,9 +108,9 @@ config X86 | |||
108 | select CLOCKSOURCE_WATCHDOG | 108 | select CLOCKSOURCE_WATCHDOG |
109 | select GENERIC_CLOCKEVENTS | 109 | select GENERIC_CLOCKEVENTS |
110 | select ARCH_CLOCKSOURCE_DATA | 110 | select ARCH_CLOCKSOURCE_DATA |
111 | select CLOCKSOURCE_VALIDATE_LAST_CYCLE | ||
111 | select GENERIC_CLOCKEVENTS_BROADCAST if X86_64 || (X86_32 && X86_LOCAL_APIC) | 112 | select GENERIC_CLOCKEVENTS_BROADCAST if X86_64 || (X86_32 && X86_LOCAL_APIC) |
112 | select GENERIC_TIME_VSYSCALL | 113 | select GENERIC_TIME_VSYSCALL |
113 | select KTIME_SCALAR if X86_32 | ||
114 | select GENERIC_STRNCPY_FROM_USER | 114 | select GENERIC_STRNCPY_FROM_USER |
115 | select GENERIC_STRNLEN_USER | 115 | select GENERIC_STRNLEN_USER |
116 | select HAVE_CONTEXT_TRACKING if X86_64 | 116 | select HAVE_CONTEXT_TRACKING if X86_64 |
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c index 56b0c338061e..b6025f9e36c6 100644 --- a/arch/x86/kernel/tsc.c +++ b/arch/x86/kernel/tsc.c | |||
@@ -950,7 +950,7 @@ core_initcall(cpufreq_tsc); | |||
950 | static struct clocksource clocksource_tsc; | 950 | static struct clocksource clocksource_tsc; |
951 | 951 | ||
952 | /* | 952 | /* |
953 | * We compare the TSC to the cycle_last value in the clocksource | 953 | * We used to compare the TSC to the cycle_last value in the clocksource |
954 | * structure to avoid a nasty time-warp. This can be observed in a | 954 | * structure to avoid a nasty time-warp. This can be observed in a |
955 | * very small window right after one CPU updated cycle_last under | 955 | * very small window right after one CPU updated cycle_last under |
956 | * xtime/vsyscall_gtod lock and the other CPU reads a TSC value which | 956 | * xtime/vsyscall_gtod lock and the other CPU reads a TSC value which |
@@ -960,26 +960,23 @@ static struct clocksource clocksource_tsc; | |||
960 | * due to the unsigned delta calculation of the time keeping core | 960 | * due to the unsigned delta calculation of the time keeping core |
961 | * code, which is necessary to support wrapping clocksources like pm | 961 | * code, which is necessary to support wrapping clocksources like pm |
962 | * timer. | 962 | * timer. |
963 | * | ||
964 | * This sanity check is now done in the core timekeeping code. | ||
965 | * checking the result of read_tsc() - cycle_last for being negative. | ||
966 | * That works because CLOCKSOURCE_MASK(64) does not mask out any bit. | ||
963 | */ | 967 | */ |
964 | static cycle_t read_tsc(struct clocksource *cs) | 968 | static cycle_t read_tsc(struct clocksource *cs) |
965 | { | 969 | { |
966 | cycle_t ret = (cycle_t)get_cycles(); | 970 | return (cycle_t)get_cycles(); |
967 | |||
968 | return ret >= clocksource_tsc.cycle_last ? | ||
969 | ret : clocksource_tsc.cycle_last; | ||
970 | } | ||
971 | |||
972 | static void resume_tsc(struct clocksource *cs) | ||
973 | { | ||
974 | if (!boot_cpu_has(X86_FEATURE_NONSTOP_TSC_S3)) | ||
975 | clocksource_tsc.cycle_last = 0; | ||
976 | } | 971 | } |
977 | 972 | ||
973 | /* | ||
974 | * .mask MUST be CLOCKSOURCE_MASK(64). See comment above read_tsc() | ||
975 | */ | ||
978 | static struct clocksource clocksource_tsc = { | 976 | static struct clocksource clocksource_tsc = { |
979 | .name = "tsc", | 977 | .name = "tsc", |
980 | .rating = 300, | 978 | .rating = 300, |
981 | .read = read_tsc, | 979 | .read = read_tsc, |
982 | .resume = resume_tsc, | ||
983 | .mask = CLOCKSOURCE_MASK(64), | 980 | .mask = CLOCKSOURCE_MASK(64), |
984 | .flags = CLOCK_SOURCE_IS_CONTINUOUS | | 981 | .flags = CLOCK_SOURCE_IS_CONTINUOUS | |
985 | CLOCK_SOURCE_MUST_VERIFY, | 982 | CLOCK_SOURCE_MUST_VERIFY, |
diff --git a/arch/x86/kernel/vsyscall_gtod.c b/arch/x86/kernel/vsyscall_gtod.c index 9531fbb123ba..c7d791f32b98 100644 --- a/arch/x86/kernel/vsyscall_gtod.c +++ b/arch/x86/kernel/vsyscall_gtod.c | |||
@@ -31,29 +31,30 @@ void update_vsyscall(struct timekeeper *tk) | |||
31 | gtod_write_begin(vdata); | 31 | gtod_write_begin(vdata); |
32 | 32 | ||
33 | /* copy vsyscall data */ | 33 | /* copy vsyscall data */ |
34 | vdata->vclock_mode = tk->clock->archdata.vclock_mode; | 34 | vdata->vclock_mode = tk->tkr.clock->archdata.vclock_mode; |
35 | vdata->cycle_last = tk->clock->cycle_last; | 35 | vdata->cycle_last = tk->tkr.cycle_last; |
36 | vdata->mask = tk->clock->mask; | 36 | vdata->mask = tk->tkr.mask; |
37 | vdata->mult = tk->mult; | 37 | vdata->mult = tk->tkr.mult; |
38 | vdata->shift = tk->shift; | 38 | vdata->shift = tk->tkr.shift; |
39 | 39 | ||
40 | vdata->wall_time_sec = tk->xtime_sec; | 40 | vdata->wall_time_sec = tk->xtime_sec; |
41 | vdata->wall_time_snsec = tk->xtime_nsec; | 41 | vdata->wall_time_snsec = tk->tkr.xtime_nsec; |
42 | 42 | ||
43 | vdata->monotonic_time_sec = tk->xtime_sec | 43 | vdata->monotonic_time_sec = tk->xtime_sec |
44 | + tk->wall_to_monotonic.tv_sec; | 44 | + tk->wall_to_monotonic.tv_sec; |
45 | vdata->monotonic_time_snsec = tk->xtime_nsec | 45 | vdata->monotonic_time_snsec = tk->tkr.xtime_nsec |
46 | + ((u64)tk->wall_to_monotonic.tv_nsec | 46 | + ((u64)tk->wall_to_monotonic.tv_nsec |
47 | << tk->shift); | 47 | << tk->tkr.shift); |
48 | while (vdata->monotonic_time_snsec >= | 48 | while (vdata->monotonic_time_snsec >= |
49 | (((u64)NSEC_PER_SEC) << tk->shift)) { | 49 | (((u64)NSEC_PER_SEC) << tk->tkr.shift)) { |
50 | vdata->monotonic_time_snsec -= | 50 | vdata->monotonic_time_snsec -= |
51 | ((u64)NSEC_PER_SEC) << tk->shift; | 51 | ((u64)NSEC_PER_SEC) << tk->tkr.shift; |
52 | vdata->monotonic_time_sec++; | 52 | vdata->monotonic_time_sec++; |
53 | } | 53 | } |
54 | 54 | ||
55 | vdata->wall_time_coarse_sec = tk->xtime_sec; | 55 | vdata->wall_time_coarse_sec = tk->xtime_sec; |
56 | vdata->wall_time_coarse_nsec = (long)(tk->xtime_nsec >> tk->shift); | 56 | vdata->wall_time_coarse_nsec = (long)(tk->tkr.xtime_nsec >> |
57 | tk->tkr.shift); | ||
57 | 58 | ||
58 | vdata->monotonic_time_coarse_sec = | 59 | vdata->monotonic_time_coarse_sec = |
59 | vdata->wall_time_coarse_sec + tk->wall_to_monotonic.tv_sec; | 60 | vdata->wall_time_coarse_sec + tk->wall_to_monotonic.tv_sec; |
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index b86d329b953a..ca3d760dd581 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c | |||
@@ -1020,9 +1020,8 @@ struct pvclock_gtod_data { | |||
1020 | u32 shift; | 1020 | u32 shift; |
1021 | } clock; | 1021 | } clock; |
1022 | 1022 | ||
1023 | /* open coded 'struct timespec' */ | 1023 | u64 boot_ns; |
1024 | u64 monotonic_time_snsec; | 1024 | u64 nsec_base; |
1025 | time_t monotonic_time_sec; | ||
1026 | }; | 1025 | }; |
1027 | 1026 | ||
1028 | static struct pvclock_gtod_data pvclock_gtod_data; | 1027 | static struct pvclock_gtod_data pvclock_gtod_data; |
@@ -1030,27 +1029,21 @@ static struct pvclock_gtod_data pvclock_gtod_data; | |||
1030 | static void update_pvclock_gtod(struct timekeeper *tk) | 1029 | static void update_pvclock_gtod(struct timekeeper *tk) |
1031 | { | 1030 | { |
1032 | struct pvclock_gtod_data *vdata = &pvclock_gtod_data; | 1031 | struct pvclock_gtod_data *vdata = &pvclock_gtod_data; |
1032 | u64 boot_ns; | ||
1033 | |||
1034 | boot_ns = ktime_to_ns(ktime_add(tk->tkr.base_mono, tk->offs_boot)); | ||
1033 | 1035 | ||
1034 | write_seqcount_begin(&vdata->seq); | 1036 | write_seqcount_begin(&vdata->seq); |
1035 | 1037 | ||
1036 | /* copy pvclock gtod data */ | 1038 | /* copy pvclock gtod data */ |
1037 | vdata->clock.vclock_mode = tk->clock->archdata.vclock_mode; | 1039 | vdata->clock.vclock_mode = tk->tkr.clock->archdata.vclock_mode; |
1038 | vdata->clock.cycle_last = tk->clock->cycle_last; | 1040 | vdata->clock.cycle_last = tk->tkr.cycle_last; |
1039 | vdata->clock.mask = tk->clock->mask; | 1041 | vdata->clock.mask = tk->tkr.mask; |
1040 | vdata->clock.mult = tk->mult; | 1042 | vdata->clock.mult = tk->tkr.mult; |
1041 | vdata->clock.shift = tk->shift; | 1043 | vdata->clock.shift = tk->tkr.shift; |
1042 | 1044 | ||
1043 | vdata->monotonic_time_sec = tk->xtime_sec | 1045 | vdata->boot_ns = boot_ns; |
1044 | + tk->wall_to_monotonic.tv_sec; | 1046 | vdata->nsec_base = tk->tkr.xtime_nsec; |
1045 | vdata->monotonic_time_snsec = tk->xtime_nsec | ||
1046 | + (tk->wall_to_monotonic.tv_nsec | ||
1047 | << tk->shift); | ||
1048 | while (vdata->monotonic_time_snsec >= | ||
1049 | (((u64)NSEC_PER_SEC) << tk->shift)) { | ||
1050 | vdata->monotonic_time_snsec -= | ||
1051 | ((u64)NSEC_PER_SEC) << tk->shift; | ||
1052 | vdata->monotonic_time_sec++; | ||
1053 | } | ||
1054 | 1047 | ||
1055 | write_seqcount_end(&vdata->seq); | 1048 | write_seqcount_end(&vdata->seq); |
1056 | } | 1049 | } |
@@ -1145,11 +1138,7 @@ static void kvm_get_time_scale(uint32_t scaled_khz, uint32_t base_khz, | |||
1145 | 1138 | ||
1146 | static inline u64 get_kernel_ns(void) | 1139 | static inline u64 get_kernel_ns(void) |
1147 | { | 1140 | { |
1148 | struct timespec ts; | 1141 | return ktime_get_boot_ns(); |
1149 | |||
1150 | ktime_get_ts(&ts); | ||
1151 | monotonic_to_bootbased(&ts); | ||
1152 | return timespec_to_ns(&ts); | ||
1153 | } | 1142 | } |
1154 | 1143 | ||
1155 | #ifdef CONFIG_X86_64 | 1144 | #ifdef CONFIG_X86_64 |
@@ -1414,23 +1403,22 @@ static inline u64 vgettsc(cycle_t *cycle_now) | |||
1414 | return v * gtod->clock.mult; | 1403 | return v * gtod->clock.mult; |
1415 | } | 1404 | } |
1416 | 1405 | ||
1417 | static int do_monotonic(struct timespec *ts, cycle_t *cycle_now) | 1406 | static int do_monotonic_boot(s64 *t, cycle_t *cycle_now) |
1418 | { | 1407 | { |
1408 | struct pvclock_gtod_data *gtod = &pvclock_gtod_data; | ||
1419 | unsigned long seq; | 1409 | unsigned long seq; |
1420 | u64 ns; | ||
1421 | int mode; | 1410 | int mode; |
1422 | struct pvclock_gtod_data *gtod = &pvclock_gtod_data; | 1411 | u64 ns; |
1423 | 1412 | ||
1424 | ts->tv_nsec = 0; | ||
1425 | do { | 1413 | do { |
1426 | seq = read_seqcount_begin(>od->seq); | 1414 | seq = read_seqcount_begin(>od->seq); |
1427 | mode = gtod->clock.vclock_mode; | 1415 | mode = gtod->clock.vclock_mode; |
1428 | ts->tv_sec = gtod->monotonic_time_sec; | 1416 | ns = gtod->nsec_base; |
1429 | ns = gtod->monotonic_time_snsec; | ||
1430 | ns += vgettsc(cycle_now); | 1417 | ns += vgettsc(cycle_now); |
1431 | ns >>= gtod->clock.shift; | 1418 | ns >>= gtod->clock.shift; |
1419 | ns += gtod->boot_ns; | ||
1432 | } while (unlikely(read_seqcount_retry(>od->seq, seq))); | 1420 | } while (unlikely(read_seqcount_retry(>od->seq, seq))); |
1433 | timespec_add_ns(ts, ns); | 1421 | *t = ns; |
1434 | 1422 | ||
1435 | return mode; | 1423 | return mode; |
1436 | } | 1424 | } |
@@ -1438,19 +1426,11 @@ static int do_monotonic(struct timespec *ts, cycle_t *cycle_now) | |||
1438 | /* returns true if host is using tsc clocksource */ | 1426 | /* returns true if host is using tsc clocksource */ |
1439 | static bool kvm_get_time_and_clockread(s64 *kernel_ns, cycle_t *cycle_now) | 1427 | static bool kvm_get_time_and_clockread(s64 *kernel_ns, cycle_t *cycle_now) |
1440 | { | 1428 | { |
1441 | struct timespec ts; | ||
1442 | |||
1443 | /* checked again under seqlock below */ | 1429 | /* checked again under seqlock below */ |
1444 | if (pvclock_gtod_data.clock.vclock_mode != VCLOCK_TSC) | 1430 | if (pvclock_gtod_data.clock.vclock_mode != VCLOCK_TSC) |
1445 | return false; | 1431 | return false; |
1446 | 1432 | ||
1447 | if (do_monotonic(&ts, cycle_now) != VCLOCK_TSC) | 1433 | return do_monotonic_boot(kernel_ns, cycle_now) == VCLOCK_TSC; |
1448 | return false; | ||
1449 | |||
1450 | monotonic_to_bootbased(&ts); | ||
1451 | *kernel_ns = timespec_to_ns(&ts); | ||
1452 | |||
1453 | return true; | ||
1454 | } | 1434 | } |
1455 | #endif | 1435 | #endif |
1456 | 1436 | ||