diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2016-03-15 15:13:56 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2016-03-15 15:13:56 -0400 |
commit | 8a284c062ec923c924c79e3b1b5199b8d72904fc (patch) | |
tree | aa016cb632e01e4b3c989db102137a87adc5b239 | |
parent | 208de21477679175384b5dc1e6dcf97bd568e8cb (diff) | |
parent | 6436257b491cc0d456c39330dfc22126148d5ed7 (diff) |
Merge branch 'timers-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull timer updates from Thomas Gleixner:
"The timer department delivers this time:
- Support for cross clock domain timestamps in the core code plus a
first user. That allows more precise timestamping for PTP and
later for audio and other peripherals.
The ptp/e1000e patches have been acked by the relevant maintainers
and are carried in the timer tree to avoid merge ordering issues.
- Support for unregistering the current clocksource watchdog. That
lifts a limitation for switching clocksources which has been there
from day 1
- The usual pile of fixes and updates to the core and the drivers.
Nothing outstanding and exciting"
* 'timers-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (26 commits)
time/timekeeping: Work around false positive GCC warning
e1000e: Adds hardware supported cross timestamp on e1000e nic
ptp: Add PTP_SYS_OFFSET_PRECISE for driver crosstimestamping
x86/tsc: Always Running Timer (ART) correlated clocksource
hrtimer: Revert CLOCK_MONOTONIC_RAW support
time: Add history to cross timestamp interface supporting slower devices
time: Add driver cross timestamp interface for higher precision time synchronization
time: Remove duplicated code in ktime_get_raw_and_real()
time: Add timekeeping snapshot code capturing system time and counter
time: Add cycles to nanoseconds translation
jiffies: Use CLOCKSOURCE_MASK instead of constant
clocksource: Introduce clocksource_freq2mult()
clockevents/drivers/exynos_mct: Implement ->set_state_oneshot_stopped()
clockevents/drivers/arm_global_timer: Implement ->set_state_oneshot_stopped()
clockevents/drivers/arm_arch_timer: Implement ->set_state_oneshot_stopped()
clocksource/drivers/arm_global_timer: Register delay timer
clocksource/drivers/lpc32xx: Support timer-based ARM delay
clocksource/drivers/lpc32xx: Support periodic mode
clocksource/drivers/lpc32xx: Don't use the prescaler counter for clockevents
clocksource/drivers/rockchip: Add err handle for rk_timer_init
...
25 files changed, 728 insertions, 106 deletions
diff --git a/Documentation/ptp/testptp.c b/Documentation/ptp/testptp.c index 6c6247aaa7b9..d99012f41602 100644 --- a/Documentation/ptp/testptp.c +++ b/Documentation/ptp/testptp.c | |||
@@ -277,13 +277,15 @@ int main(int argc, char *argv[]) | |||
277 | " %d external time stamp channels\n" | 277 | " %d external time stamp channels\n" |
278 | " %d programmable periodic signals\n" | 278 | " %d programmable periodic signals\n" |
279 | " %d pulse per second\n" | 279 | " %d pulse per second\n" |
280 | " %d programmable pins\n", | 280 | " %d programmable pins\n" |
281 | " %d cross timestamping\n", | ||
281 | caps.max_adj, | 282 | caps.max_adj, |
282 | caps.n_alarm, | 283 | caps.n_alarm, |
283 | caps.n_ext_ts, | 284 | caps.n_ext_ts, |
284 | caps.n_per_out, | 285 | caps.n_per_out, |
285 | caps.pps, | 286 | caps.pps, |
286 | caps.n_pins); | 287 | caps.n_pins, |
288 | caps.cross_timestamping); | ||
287 | } | 289 | } |
288 | } | 290 | } |
289 | 291 | ||
diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h index 9e0567f4c081..074b7604bd51 100644 --- a/arch/x86/include/asm/cpufeatures.h +++ b/arch/x86/include/asm/cpufeatures.h | |||
@@ -85,7 +85,7 @@ | |||
85 | #define X86_FEATURE_P4 ( 3*32+ 7) /* "" P4 */ | 85 | #define X86_FEATURE_P4 ( 3*32+ 7) /* "" P4 */ |
86 | #define X86_FEATURE_CONSTANT_TSC ( 3*32+ 8) /* TSC ticks at a constant rate */ | 86 | #define X86_FEATURE_CONSTANT_TSC ( 3*32+ 8) /* TSC ticks at a constant rate */ |
87 | #define X86_FEATURE_UP ( 3*32+ 9) /* smp kernel running on up */ | 87 | #define X86_FEATURE_UP ( 3*32+ 9) /* smp kernel running on up */ |
88 | /* free, was #define X86_FEATURE_FXSAVE_LEAK ( 3*32+10) * "" FXSAVE leaks FOP/FIP/FOP */ | 88 | #define X86_FEATURE_ART ( 3*32+10) /* Platform has always running timer (ART) */ |
89 | #define X86_FEATURE_ARCH_PERFMON ( 3*32+11) /* Intel Architectural PerfMon */ | 89 | #define X86_FEATURE_ARCH_PERFMON ( 3*32+11) /* Intel Architectural PerfMon */ |
90 | #define X86_FEATURE_PEBS ( 3*32+12) /* Precise-Event Based Sampling */ | 90 | #define X86_FEATURE_PEBS ( 3*32+12) /* Precise-Event Based Sampling */ |
91 | #define X86_FEATURE_BTS ( 3*32+13) /* Branch Trace Store */ | 91 | #define X86_FEATURE_BTS ( 3*32+13) /* Branch Trace Store */ |
diff --git a/arch/x86/include/asm/tsc.h b/arch/x86/include/asm/tsc.h index 6d7c5479bcea..174c4212780a 100644 --- a/arch/x86/include/asm/tsc.h +++ b/arch/x86/include/asm/tsc.h | |||
@@ -29,6 +29,8 @@ static inline cycles_t get_cycles(void) | |||
29 | return rdtsc(); | 29 | return rdtsc(); |
30 | } | 30 | } |
31 | 31 | ||
32 | extern struct system_counterval_t convert_art_to_tsc(cycle_t art); | ||
33 | |||
32 | extern void tsc_init(void); | 34 | extern void tsc_init(void); |
33 | extern void mark_tsc_unstable(char *reason); | 35 | extern void mark_tsc_unstable(char *reason); |
34 | extern int unsynchronized_tsc(void); | 36 | extern int unsynchronized_tsc(void); |
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c index 5a6cb4684e0f..56380440d862 100644 --- a/arch/x86/kernel/tsc.c +++ b/arch/x86/kernel/tsc.c | |||
@@ -43,6 +43,11 @@ static DEFINE_STATIC_KEY_FALSE(__use_tsc); | |||
43 | 43 | ||
44 | int tsc_clocksource_reliable; | 44 | int tsc_clocksource_reliable; |
45 | 45 | ||
46 | static u32 art_to_tsc_numerator; | ||
47 | static u32 art_to_tsc_denominator; | ||
48 | static u64 art_to_tsc_offset; | ||
49 | struct clocksource *art_related_clocksource; | ||
50 | |||
46 | /* | 51 | /* |
47 | * Use a ring-buffer like data structure, where a writer advances the head by | 52 | * Use a ring-buffer like data structure, where a writer advances the head by |
48 | * writing a new data entry and a reader advances the tail when it observes a | 53 | * writing a new data entry and a reader advances the tail when it observes a |
@@ -964,6 +969,37 @@ core_initcall(cpufreq_tsc); | |||
964 | 969 | ||
965 | #endif /* CONFIG_CPU_FREQ */ | 970 | #endif /* CONFIG_CPU_FREQ */ |
966 | 971 | ||
972 | #define ART_CPUID_LEAF (0x15) | ||
973 | #define ART_MIN_DENOMINATOR (1) | ||
974 | |||
975 | |||
976 | /* | ||
977 | * If ART is present detect the numerator:denominator to convert to TSC | ||
978 | */ | ||
979 | static void detect_art(void) | ||
980 | { | ||
981 | unsigned int unused[2]; | ||
982 | |||
983 | if (boot_cpu_data.cpuid_level < ART_CPUID_LEAF) | ||
984 | return; | ||
985 | |||
986 | cpuid(ART_CPUID_LEAF, &art_to_tsc_denominator, | ||
987 | &art_to_tsc_numerator, unused, unused+1); | ||
988 | |||
989 | /* Don't enable ART in a VM, non-stop TSC required */ | ||
990 | if (boot_cpu_has(X86_FEATURE_HYPERVISOR) || | ||
991 | !boot_cpu_has(X86_FEATURE_NONSTOP_TSC) || | ||
992 | art_to_tsc_denominator < ART_MIN_DENOMINATOR) | ||
993 | return; | ||
994 | |||
995 | if (rdmsrl_safe(MSR_IA32_TSC_ADJUST, &art_to_tsc_offset)) | ||
996 | return; | ||
997 | |||
998 | /* Make this sticky over multiple CPU init calls */ | ||
999 | setup_force_cpu_cap(X86_FEATURE_ART); | ||
1000 | } | ||
1001 | |||
1002 | |||
967 | /* clocksource code */ | 1003 | /* clocksource code */ |
968 | 1004 | ||
969 | static struct clocksource clocksource_tsc; | 1005 | static struct clocksource clocksource_tsc; |
@@ -1071,6 +1107,25 @@ int unsynchronized_tsc(void) | |||
1071 | return 0; | 1107 | return 0; |
1072 | } | 1108 | } |
1073 | 1109 | ||
1110 | /* | ||
1111 | * Convert ART to TSC given numerator/denominator found in detect_art() | ||
1112 | */ | ||
1113 | struct system_counterval_t convert_art_to_tsc(cycle_t art) | ||
1114 | { | ||
1115 | u64 tmp, res, rem; | ||
1116 | |||
1117 | rem = do_div(art, art_to_tsc_denominator); | ||
1118 | |||
1119 | res = art * art_to_tsc_numerator; | ||
1120 | tmp = rem * art_to_tsc_numerator; | ||
1121 | |||
1122 | do_div(tmp, art_to_tsc_denominator); | ||
1123 | res += tmp + art_to_tsc_offset; | ||
1124 | |||
1125 | return (struct system_counterval_t) {.cs = art_related_clocksource, | ||
1126 | .cycles = res}; | ||
1127 | } | ||
1128 | EXPORT_SYMBOL(convert_art_to_tsc); | ||
1074 | 1129 | ||
1075 | static void tsc_refine_calibration_work(struct work_struct *work); | 1130 | static void tsc_refine_calibration_work(struct work_struct *work); |
1076 | static DECLARE_DELAYED_WORK(tsc_irqwork, tsc_refine_calibration_work); | 1131 | static DECLARE_DELAYED_WORK(tsc_irqwork, tsc_refine_calibration_work); |
@@ -1142,6 +1197,8 @@ static void tsc_refine_calibration_work(struct work_struct *work) | |||
1142 | (unsigned long)tsc_khz % 1000); | 1197 | (unsigned long)tsc_khz % 1000); |
1143 | 1198 | ||
1144 | out: | 1199 | out: |
1200 | if (boot_cpu_has(X86_FEATURE_ART)) | ||
1201 | art_related_clocksource = &clocksource_tsc; | ||
1145 | clocksource_register_khz(&clocksource_tsc, tsc_khz); | 1202 | clocksource_register_khz(&clocksource_tsc, tsc_khz); |
1146 | } | 1203 | } |
1147 | 1204 | ||
@@ -1235,6 +1292,8 @@ void __init tsc_init(void) | |||
1235 | mark_tsc_unstable("TSCs unsynchronized"); | 1292 | mark_tsc_unstable("TSCs unsynchronized"); |
1236 | 1293 | ||
1237 | check_system_tsc_reliable(); | 1294 | check_system_tsc_reliable(); |
1295 | |||
1296 | detect_art(); | ||
1238 | } | 1297 | } |
1239 | 1298 | ||
1240 | #ifdef CONFIG_SMP | 1299 | #ifdef CONFIG_SMP |
diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig index 33db7406c0e2..c346be650892 100644 --- a/drivers/clocksource/Kconfig +++ b/drivers/clocksource/Kconfig | |||
@@ -160,6 +160,7 @@ config CLKSRC_EFM32 | |||
160 | config CLKSRC_LPC32XX | 160 | config CLKSRC_LPC32XX |
161 | bool "Clocksource for LPC32XX" if COMPILE_TEST | 161 | bool "Clocksource for LPC32XX" if COMPILE_TEST |
162 | depends on GENERIC_CLOCKEVENTS && HAS_IOMEM | 162 | depends on GENERIC_CLOCKEVENTS && HAS_IOMEM |
163 | depends on ARM | ||
163 | select CLKSRC_MMIO | 164 | select CLKSRC_MMIO |
164 | select CLKSRC_OF | 165 | select CLKSRC_OF |
165 | help | 166 | help |
diff --git a/drivers/clocksource/arm_arch_timer.c b/drivers/clocksource/arm_arch_timer.c index c64d543d64bf..f0dd9d42bc7b 100644 --- a/drivers/clocksource/arm_arch_timer.c +++ b/drivers/clocksource/arm_arch_timer.c | |||
@@ -32,6 +32,14 @@ | |||
32 | #define CNTTIDR 0x08 | 32 | #define CNTTIDR 0x08 |
33 | #define CNTTIDR_VIRT(n) (BIT(1) << ((n) * 4)) | 33 | #define CNTTIDR_VIRT(n) (BIT(1) << ((n) * 4)) |
34 | 34 | ||
35 | #define CNTACR(n) (0x40 + ((n) * 4)) | ||
36 | #define CNTACR_RPCT BIT(0) | ||
37 | #define CNTACR_RVCT BIT(1) | ||
38 | #define CNTACR_RFRQ BIT(2) | ||
39 | #define CNTACR_RVOFF BIT(3) | ||
40 | #define CNTACR_RWVT BIT(4) | ||
41 | #define CNTACR_RWPT BIT(5) | ||
42 | |||
35 | #define CNTVCT_LO 0x08 | 43 | #define CNTVCT_LO 0x08 |
36 | #define CNTVCT_HI 0x0c | 44 | #define CNTVCT_HI 0x0c |
37 | #define CNTFRQ 0x10 | 45 | #define CNTFRQ 0x10 |
@@ -266,10 +274,12 @@ static void __arch_timer_setup(unsigned type, | |||
266 | if (arch_timer_use_virtual) { | 274 | if (arch_timer_use_virtual) { |
267 | clk->irq = arch_timer_ppi[VIRT_PPI]; | 275 | clk->irq = arch_timer_ppi[VIRT_PPI]; |
268 | clk->set_state_shutdown = arch_timer_shutdown_virt; | 276 | clk->set_state_shutdown = arch_timer_shutdown_virt; |
277 | clk->set_state_oneshot_stopped = arch_timer_shutdown_virt; | ||
269 | clk->set_next_event = arch_timer_set_next_event_virt; | 278 | clk->set_next_event = arch_timer_set_next_event_virt; |
270 | } else { | 279 | } else { |
271 | clk->irq = arch_timer_ppi[PHYS_SECURE_PPI]; | 280 | clk->irq = arch_timer_ppi[PHYS_SECURE_PPI]; |
272 | clk->set_state_shutdown = arch_timer_shutdown_phys; | 281 | clk->set_state_shutdown = arch_timer_shutdown_phys; |
282 | clk->set_state_oneshot_stopped = arch_timer_shutdown_phys; | ||
273 | clk->set_next_event = arch_timer_set_next_event_phys; | 283 | clk->set_next_event = arch_timer_set_next_event_phys; |
274 | } | 284 | } |
275 | } else { | 285 | } else { |
@@ -279,10 +289,12 @@ static void __arch_timer_setup(unsigned type, | |||
279 | clk->cpumask = cpu_all_mask; | 289 | clk->cpumask = cpu_all_mask; |
280 | if (arch_timer_mem_use_virtual) { | 290 | if (arch_timer_mem_use_virtual) { |
281 | clk->set_state_shutdown = arch_timer_shutdown_virt_mem; | 291 | clk->set_state_shutdown = arch_timer_shutdown_virt_mem; |
292 | clk->set_state_oneshot_stopped = arch_timer_shutdown_virt_mem; | ||
282 | clk->set_next_event = | 293 | clk->set_next_event = |
283 | arch_timer_set_next_event_virt_mem; | 294 | arch_timer_set_next_event_virt_mem; |
284 | } else { | 295 | } else { |
285 | clk->set_state_shutdown = arch_timer_shutdown_phys_mem; | 296 | clk->set_state_shutdown = arch_timer_shutdown_phys_mem; |
297 | clk->set_state_oneshot_stopped = arch_timer_shutdown_phys_mem; | ||
286 | clk->set_next_event = | 298 | clk->set_next_event = |
287 | arch_timer_set_next_event_phys_mem; | 299 | arch_timer_set_next_event_phys_mem; |
288 | } | 300 | } |
@@ -757,7 +769,6 @@ static void __init arch_timer_mem_init(struct device_node *np) | |||
757 | } | 769 | } |
758 | 770 | ||
759 | cnttidr = readl_relaxed(cntctlbase + CNTTIDR); | 771 | cnttidr = readl_relaxed(cntctlbase + CNTTIDR); |
760 | iounmap(cntctlbase); | ||
761 | 772 | ||
762 | /* | 773 | /* |
763 | * Try to find a virtual capable frame. Otherwise fall back to a | 774 | * Try to find a virtual capable frame. Otherwise fall back to a |
@@ -765,20 +776,31 @@ static void __init arch_timer_mem_init(struct device_node *np) | |||
765 | */ | 776 | */ |
766 | for_each_available_child_of_node(np, frame) { | 777 | for_each_available_child_of_node(np, frame) { |
767 | int n; | 778 | int n; |
779 | u32 cntacr; | ||
768 | 780 | ||
769 | if (of_property_read_u32(frame, "frame-number", &n)) { | 781 | if (of_property_read_u32(frame, "frame-number", &n)) { |
770 | pr_err("arch_timer: Missing frame-number\n"); | 782 | pr_err("arch_timer: Missing frame-number\n"); |
771 | of_node_put(best_frame); | ||
772 | of_node_put(frame); | 783 | of_node_put(frame); |
773 | return; | 784 | goto out; |
774 | } | 785 | } |
775 | 786 | ||
776 | if (cnttidr & CNTTIDR_VIRT(n)) { | 787 | /* Try enabling everything, and see what sticks */ |
788 | cntacr = CNTACR_RFRQ | CNTACR_RWPT | CNTACR_RPCT | | ||
789 | CNTACR_RWVT | CNTACR_RVOFF | CNTACR_RVCT; | ||
790 | writel_relaxed(cntacr, cntctlbase + CNTACR(n)); | ||
791 | cntacr = readl_relaxed(cntctlbase + CNTACR(n)); | ||
792 | |||
793 | if ((cnttidr & CNTTIDR_VIRT(n)) && | ||
794 | !(~cntacr & (CNTACR_RWVT | CNTACR_RVCT))) { | ||
777 | of_node_put(best_frame); | 795 | of_node_put(best_frame); |
778 | best_frame = frame; | 796 | best_frame = frame; |
779 | arch_timer_mem_use_virtual = true; | 797 | arch_timer_mem_use_virtual = true; |
780 | break; | 798 | break; |
781 | } | 799 | } |
800 | |||
801 | if (~cntacr & (CNTACR_RWPT | CNTACR_RPCT)) | ||
802 | continue; | ||
803 | |||
782 | of_node_put(best_frame); | 804 | of_node_put(best_frame); |
783 | best_frame = of_node_get(frame); | 805 | best_frame = of_node_get(frame); |
784 | } | 806 | } |
@@ -786,24 +808,26 @@ static void __init arch_timer_mem_init(struct device_node *np) | |||
786 | base = arch_counter_base = of_iomap(best_frame, 0); | 808 | base = arch_counter_base = of_iomap(best_frame, 0); |
787 | if (!base) { | 809 | if (!base) { |
788 | pr_err("arch_timer: Can't map frame's registers\n"); | 810 | pr_err("arch_timer: Can't map frame's registers\n"); |
789 | of_node_put(best_frame); | 811 | goto out; |
790 | return; | ||
791 | } | 812 | } |
792 | 813 | ||
793 | if (arch_timer_mem_use_virtual) | 814 | if (arch_timer_mem_use_virtual) |
794 | irq = irq_of_parse_and_map(best_frame, 1); | 815 | irq = irq_of_parse_and_map(best_frame, 1); |
795 | else | 816 | else |
796 | irq = irq_of_parse_and_map(best_frame, 0); | 817 | irq = irq_of_parse_and_map(best_frame, 0); |
797 | of_node_put(best_frame); | 818 | |
798 | if (!irq) { | 819 | if (!irq) { |
799 | pr_err("arch_timer: Frame missing %s irq", | 820 | pr_err("arch_timer: Frame missing %s irq", |
800 | arch_timer_mem_use_virtual ? "virt" : "phys"); | 821 | arch_timer_mem_use_virtual ? "virt" : "phys"); |
801 | return; | 822 | goto out; |
802 | } | 823 | } |
803 | 824 | ||
804 | arch_timer_detect_rate(base, np); | 825 | arch_timer_detect_rate(base, np); |
805 | arch_timer_mem_register(base, irq); | 826 | arch_timer_mem_register(base, irq); |
806 | arch_timer_common_init(); | 827 | arch_timer_common_init(); |
828 | out: | ||
829 | iounmap(cntctlbase); | ||
830 | of_node_put(best_frame); | ||
807 | } | 831 | } |
808 | CLOCKSOURCE_OF_DECLARE(armv7_arch_timer_mem, "arm,armv7-timer-mem", | 832 | CLOCKSOURCE_OF_DECLARE(armv7_arch_timer_mem, "arm,armv7-timer-mem", |
809 | arch_timer_mem_init); | 833 | arch_timer_mem_init); |
diff --git a/drivers/clocksource/arm_global_timer.c b/drivers/clocksource/arm_global_timer.c index d189d8cb69f7..9df0d1699d22 100644 --- a/drivers/clocksource/arm_global_timer.c +++ b/drivers/clocksource/arm_global_timer.c | |||
@@ -16,6 +16,7 @@ | |||
16 | #include <linux/clockchips.h> | 16 | #include <linux/clockchips.h> |
17 | #include <linux/cpu.h> | 17 | #include <linux/cpu.h> |
18 | #include <linux/clk.h> | 18 | #include <linux/clk.h> |
19 | #include <linux/delay.h> | ||
19 | #include <linux/err.h> | 20 | #include <linux/err.h> |
20 | #include <linux/io.h> | 21 | #include <linux/io.h> |
21 | #include <linux/of.h> | 22 | #include <linux/of.h> |
@@ -174,6 +175,7 @@ static int gt_clockevents_init(struct clock_event_device *clk) | |||
174 | clk->set_state_shutdown = gt_clockevent_shutdown; | 175 | clk->set_state_shutdown = gt_clockevent_shutdown; |
175 | clk->set_state_periodic = gt_clockevent_set_periodic; | 176 | clk->set_state_periodic = gt_clockevent_set_periodic; |
176 | clk->set_state_oneshot = gt_clockevent_shutdown; | 177 | clk->set_state_oneshot = gt_clockevent_shutdown; |
178 | clk->set_state_oneshot_stopped = gt_clockevent_shutdown; | ||
177 | clk->set_next_event = gt_clockevent_set_next_event; | 179 | clk->set_next_event = gt_clockevent_set_next_event; |
178 | clk->cpumask = cpumask_of(cpu); | 180 | clk->cpumask = cpumask_of(cpu); |
179 | clk->rating = 300; | 181 | clk->rating = 300; |
@@ -221,6 +223,21 @@ static u64 notrace gt_sched_clock_read(void) | |||
221 | } | 223 | } |
222 | #endif | 224 | #endif |
223 | 225 | ||
226 | static unsigned long gt_read_long(void) | ||
227 | { | ||
228 | return readl_relaxed(gt_base + GT_COUNTER0); | ||
229 | } | ||
230 | |||
231 | static struct delay_timer gt_delay_timer = { | ||
232 | .read_current_timer = gt_read_long, | ||
233 | }; | ||
234 | |||
235 | static void __init gt_delay_timer_init(void) | ||
236 | { | ||
237 | gt_delay_timer.freq = gt_clk_rate; | ||
238 | register_current_timer_delay(>_delay_timer); | ||
239 | } | ||
240 | |||
224 | static void __init gt_clocksource_init(void) | 241 | static void __init gt_clocksource_init(void) |
225 | { | 242 | { |
226 | writel(0, gt_base + GT_CONTROL); | 243 | writel(0, gt_base + GT_CONTROL); |
@@ -317,6 +334,7 @@ static void __init global_timer_of_register(struct device_node *np) | |||
317 | /* Immediately configure the timer on the boot CPU */ | 334 | /* Immediately configure the timer on the boot CPU */ |
318 | gt_clocksource_init(); | 335 | gt_clocksource_init(); |
319 | gt_clockevents_init(this_cpu_ptr(gt_evt)); | 336 | gt_clockevents_init(this_cpu_ptr(gt_evt)); |
337 | gt_delay_timer_init(); | ||
320 | 338 | ||
321 | return; | 339 | return; |
322 | 340 | ||
diff --git a/drivers/clocksource/exynos_mct.c b/drivers/clocksource/exynos_mct.c index ff44082a0827..be09bc0b5e26 100644 --- a/drivers/clocksource/exynos_mct.c +++ b/drivers/clocksource/exynos_mct.c | |||
@@ -313,6 +313,7 @@ static struct clock_event_device mct_comp_device = { | |||
313 | .set_state_periodic = mct_set_state_periodic, | 313 | .set_state_periodic = mct_set_state_periodic, |
314 | .set_state_shutdown = mct_set_state_shutdown, | 314 | .set_state_shutdown = mct_set_state_shutdown, |
315 | .set_state_oneshot = mct_set_state_shutdown, | 315 | .set_state_oneshot = mct_set_state_shutdown, |
316 | .set_state_oneshot_stopped = mct_set_state_shutdown, | ||
316 | .tick_resume = mct_set_state_shutdown, | 317 | .tick_resume = mct_set_state_shutdown, |
317 | }; | 318 | }; |
318 | 319 | ||
@@ -452,6 +453,7 @@ static int exynos4_local_timer_setup(struct mct_clock_event_device *mevt) | |||
452 | evt->set_state_periodic = set_state_periodic; | 453 | evt->set_state_periodic = set_state_periodic; |
453 | evt->set_state_shutdown = set_state_shutdown; | 454 | evt->set_state_shutdown = set_state_shutdown; |
454 | evt->set_state_oneshot = set_state_shutdown; | 455 | evt->set_state_oneshot = set_state_shutdown; |
456 | evt->set_state_oneshot_stopped = set_state_shutdown; | ||
455 | evt->tick_resume = set_state_shutdown; | 457 | evt->tick_resume = set_state_shutdown; |
456 | evt->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT; | 458 | evt->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT; |
457 | evt->rating = 450; | 459 | evt->rating = 450; |
diff --git a/drivers/clocksource/rockchip_timer.c b/drivers/clocksource/rockchip_timer.c index 8c77a529d0d4..b991b288c803 100644 --- a/drivers/clocksource/rockchip_timer.c +++ b/drivers/clocksource/rockchip_timer.c | |||
@@ -122,23 +122,23 @@ static void __init rk_timer_init(struct device_node *np) | |||
122 | pclk = of_clk_get_by_name(np, "pclk"); | 122 | pclk = of_clk_get_by_name(np, "pclk"); |
123 | if (IS_ERR(pclk)) { | 123 | if (IS_ERR(pclk)) { |
124 | pr_err("Failed to get pclk for '%s'\n", TIMER_NAME); | 124 | pr_err("Failed to get pclk for '%s'\n", TIMER_NAME); |
125 | return; | 125 | goto out_unmap; |
126 | } | 126 | } |
127 | 127 | ||
128 | if (clk_prepare_enable(pclk)) { | 128 | if (clk_prepare_enable(pclk)) { |
129 | pr_err("Failed to enable pclk for '%s'\n", TIMER_NAME); | 129 | pr_err("Failed to enable pclk for '%s'\n", TIMER_NAME); |
130 | return; | 130 | goto out_unmap; |
131 | } | 131 | } |
132 | 132 | ||
133 | timer_clk = of_clk_get_by_name(np, "timer"); | 133 | timer_clk = of_clk_get_by_name(np, "timer"); |
134 | if (IS_ERR(timer_clk)) { | 134 | if (IS_ERR(timer_clk)) { |
135 | pr_err("Failed to get timer clock for '%s'\n", TIMER_NAME); | 135 | pr_err("Failed to get timer clock for '%s'\n", TIMER_NAME); |
136 | return; | 136 | goto out_timer_clk; |
137 | } | 137 | } |
138 | 138 | ||
139 | if (clk_prepare_enable(timer_clk)) { | 139 | if (clk_prepare_enable(timer_clk)) { |
140 | pr_err("Failed to enable timer clock\n"); | 140 | pr_err("Failed to enable timer clock\n"); |
141 | return; | 141 | goto out_timer_clk; |
142 | } | 142 | } |
143 | 143 | ||
144 | bc_timer.freq = clk_get_rate(timer_clk); | 144 | bc_timer.freq = clk_get_rate(timer_clk); |
@@ -146,7 +146,7 @@ static void __init rk_timer_init(struct device_node *np) | |||
146 | irq = irq_of_parse_and_map(np, 0); | 146 | irq = irq_of_parse_and_map(np, 0); |
147 | if (!irq) { | 147 | if (!irq) { |
148 | pr_err("Failed to map interrupts for '%s'\n", TIMER_NAME); | 148 | pr_err("Failed to map interrupts for '%s'\n", TIMER_NAME); |
149 | return; | 149 | goto out_irq; |
150 | } | 150 | } |
151 | 151 | ||
152 | ce->name = TIMER_NAME; | 152 | ce->name = TIMER_NAME; |
@@ -164,10 +164,19 @@ static void __init rk_timer_init(struct device_node *np) | |||
164 | ret = request_irq(irq, rk_timer_interrupt, IRQF_TIMER, TIMER_NAME, ce); | 164 | ret = request_irq(irq, rk_timer_interrupt, IRQF_TIMER, TIMER_NAME, ce); |
165 | if (ret) { | 165 | if (ret) { |
166 | pr_err("Failed to initialize '%s': %d\n", TIMER_NAME, ret); | 166 | pr_err("Failed to initialize '%s': %d\n", TIMER_NAME, ret); |
167 | return; | 167 | goto out_irq; |
168 | } | 168 | } |
169 | 169 | ||
170 | clockevents_config_and_register(ce, bc_timer.freq, 1, UINT_MAX); | 170 | clockevents_config_and_register(ce, bc_timer.freq, 1, UINT_MAX); |
171 | |||
172 | return; | ||
173 | |||
174 | out_irq: | ||
175 | clk_disable_unprepare(timer_clk); | ||
176 | out_timer_clk: | ||
177 | clk_disable_unprepare(pclk); | ||
178 | out_unmap: | ||
179 | iounmap(bc_timer.base); | ||
171 | } | 180 | } |
172 | 181 | ||
173 | CLOCKSOURCE_OF_DECLARE(rk_timer, "rockchip,rk3288-timer", rk_timer_init); | 182 | CLOCKSOURCE_OF_DECLARE(rk_timer, "rockchip,rk3288-timer", rk_timer_init); |
diff --git a/drivers/clocksource/time-lpc32xx.c b/drivers/clocksource/time-lpc32xx.c index 1316876b487a..daae61e8c820 100644 --- a/drivers/clocksource/time-lpc32xx.c +++ b/drivers/clocksource/time-lpc32xx.c | |||
@@ -18,6 +18,7 @@ | |||
18 | #include <linux/clk.h> | 18 | #include <linux/clk.h> |
19 | #include <linux/clockchips.h> | 19 | #include <linux/clockchips.h> |
20 | #include <linux/clocksource.h> | 20 | #include <linux/clocksource.h> |
21 | #include <linux/delay.h> | ||
21 | #include <linux/interrupt.h> | 22 | #include <linux/interrupt.h> |
22 | #include <linux/irq.h> | 23 | #include <linux/irq.h> |
23 | #include <linux/kernel.h> | 24 | #include <linux/kernel.h> |
@@ -43,6 +44,7 @@ | |||
43 | struct lpc32xx_clock_event_ddata { | 44 | struct lpc32xx_clock_event_ddata { |
44 | struct clock_event_device evtdev; | 45 | struct clock_event_device evtdev; |
45 | void __iomem *base; | 46 | void __iomem *base; |
47 | u32 ticks_per_jiffy; | ||
46 | }; | 48 | }; |
47 | 49 | ||
48 | /* Needed for the sched clock */ | 50 | /* Needed for the sched clock */ |
@@ -53,6 +55,15 @@ static u64 notrace lpc32xx_read_sched_clock(void) | |||
53 | return readl(clocksource_timer_counter); | 55 | return readl(clocksource_timer_counter); |
54 | } | 56 | } |
55 | 57 | ||
58 | static unsigned long lpc32xx_delay_timer_read(void) | ||
59 | { | ||
60 | return readl(clocksource_timer_counter); | ||
61 | } | ||
62 | |||
63 | static struct delay_timer lpc32xx_delay_timer = { | ||
64 | .read_current_timer = lpc32xx_delay_timer_read, | ||
65 | }; | ||
66 | |||
56 | static int lpc32xx_clkevt_next_event(unsigned long delta, | 67 | static int lpc32xx_clkevt_next_event(unsigned long delta, |
57 | struct clock_event_device *evtdev) | 68 | struct clock_event_device *evtdev) |
58 | { | 69 | { |
@@ -60,14 +71,13 @@ static int lpc32xx_clkevt_next_event(unsigned long delta, | |||
60 | container_of(evtdev, struct lpc32xx_clock_event_ddata, evtdev); | 71 | container_of(evtdev, struct lpc32xx_clock_event_ddata, evtdev); |
61 | 72 | ||
62 | /* | 73 | /* |
63 | * Place timer in reset and program the delta in the prescale | 74 | * Place timer in reset and program the delta in the match |
64 | * register (PR). When the prescale counter matches the value | 75 | * channel 0 (MR0). When the timer counter matches the value |
65 | * in PR the counter register is incremented and the compare | 76 | * in MR0 register the match will trigger an interrupt. |
66 | * match will trigger. After setup the timer is released from | 77 | * After setup the timer is released from reset and enabled. |
67 | * reset and enabled. | ||
68 | */ | 78 | */ |
69 | writel_relaxed(LPC32XX_TIMER_TCR_CRST, ddata->base + LPC32XX_TIMER_TCR); | 79 | writel_relaxed(LPC32XX_TIMER_TCR_CRST, ddata->base + LPC32XX_TIMER_TCR); |
70 | writel_relaxed(delta, ddata->base + LPC32XX_TIMER_PR); | 80 | writel_relaxed(delta, ddata->base + LPC32XX_TIMER_MR0); |
71 | writel_relaxed(LPC32XX_TIMER_TCR_CEN, ddata->base + LPC32XX_TIMER_TCR); | 81 | writel_relaxed(LPC32XX_TIMER_TCR_CEN, ddata->base + LPC32XX_TIMER_TCR); |
72 | 82 | ||
73 | return 0; | 83 | return 0; |
@@ -86,11 +96,39 @@ static int lpc32xx_clkevt_shutdown(struct clock_event_device *evtdev) | |||
86 | 96 | ||
87 | static int lpc32xx_clkevt_oneshot(struct clock_event_device *evtdev) | 97 | static int lpc32xx_clkevt_oneshot(struct clock_event_device *evtdev) |
88 | { | 98 | { |
99 | struct lpc32xx_clock_event_ddata *ddata = | ||
100 | container_of(evtdev, struct lpc32xx_clock_event_ddata, evtdev); | ||
101 | |||
89 | /* | 102 | /* |
90 | * When using oneshot, we must also disable the timer | 103 | * When using oneshot, we must also disable the timer |
91 | * to wait for the first call to set_next_event(). | 104 | * to wait for the first call to set_next_event(). |
92 | */ | 105 | */ |
93 | return lpc32xx_clkevt_shutdown(evtdev); | 106 | writel_relaxed(0, ddata->base + LPC32XX_TIMER_TCR); |
107 | |||
108 | /* Enable interrupt, reset on match and stop on match (MCR). */ | ||
109 | writel_relaxed(LPC32XX_TIMER_MCR_MR0I | LPC32XX_TIMER_MCR_MR0R | | ||
110 | LPC32XX_TIMER_MCR_MR0S, ddata->base + LPC32XX_TIMER_MCR); | ||
111 | return 0; | ||
112 | } | ||
113 | |||
114 | static int lpc32xx_clkevt_periodic(struct clock_event_device *evtdev) | ||
115 | { | ||
116 | struct lpc32xx_clock_event_ddata *ddata = | ||
117 | container_of(evtdev, struct lpc32xx_clock_event_ddata, evtdev); | ||
118 | |||
119 | /* Enable interrupt and reset on match. */ | ||
120 | writel_relaxed(LPC32XX_TIMER_MCR_MR0I | LPC32XX_TIMER_MCR_MR0R, | ||
121 | ddata->base + LPC32XX_TIMER_MCR); | ||
122 | |||
123 | /* | ||
124 | * Place timer in reset and program the delta in the match | ||
125 | * channel 0 (MR0). | ||
126 | */ | ||
127 | writel_relaxed(LPC32XX_TIMER_TCR_CRST, ddata->base + LPC32XX_TIMER_TCR); | ||
128 | writel_relaxed(ddata->ticks_per_jiffy, ddata->base + LPC32XX_TIMER_MR0); | ||
129 | writel_relaxed(LPC32XX_TIMER_TCR_CEN, ddata->base + LPC32XX_TIMER_TCR); | ||
130 | |||
131 | return 0; | ||
94 | } | 132 | } |
95 | 133 | ||
96 | static irqreturn_t lpc32xx_clock_event_handler(int irq, void *dev_id) | 134 | static irqreturn_t lpc32xx_clock_event_handler(int irq, void *dev_id) |
@@ -108,11 +146,13 @@ static irqreturn_t lpc32xx_clock_event_handler(int irq, void *dev_id) | |||
108 | static struct lpc32xx_clock_event_ddata lpc32xx_clk_event_ddata = { | 146 | static struct lpc32xx_clock_event_ddata lpc32xx_clk_event_ddata = { |
109 | .evtdev = { | 147 | .evtdev = { |
110 | .name = "lpc3220 clockevent", | 148 | .name = "lpc3220 clockevent", |
111 | .features = CLOCK_EVT_FEAT_ONESHOT, | 149 | .features = CLOCK_EVT_FEAT_ONESHOT | |
150 | CLOCK_EVT_FEAT_PERIODIC, | ||
112 | .rating = 300, | 151 | .rating = 300, |
113 | .set_next_event = lpc32xx_clkevt_next_event, | 152 | .set_next_event = lpc32xx_clkevt_next_event, |
114 | .set_state_shutdown = lpc32xx_clkevt_shutdown, | 153 | .set_state_shutdown = lpc32xx_clkevt_shutdown, |
115 | .set_state_oneshot = lpc32xx_clkevt_oneshot, | 154 | .set_state_oneshot = lpc32xx_clkevt_oneshot, |
155 | .set_state_periodic = lpc32xx_clkevt_periodic, | ||
116 | }, | 156 | }, |
117 | }; | 157 | }; |
118 | 158 | ||
@@ -162,6 +202,8 @@ static int __init lpc32xx_clocksource_init(struct device_node *np) | |||
162 | } | 202 | } |
163 | 203 | ||
164 | clocksource_timer_counter = base + LPC32XX_TIMER_TC; | 204 | clocksource_timer_counter = base + LPC32XX_TIMER_TC; |
205 | lpc32xx_delay_timer.freq = rate; | ||
206 | register_current_timer_delay(&lpc32xx_delay_timer); | ||
165 | sched_clock_register(lpc32xx_read_sched_clock, 32, rate); | 207 | sched_clock_register(lpc32xx_read_sched_clock, 32, rate); |
166 | 208 | ||
167 | return 0; | 209 | return 0; |
@@ -210,18 +252,16 @@ static int __init lpc32xx_clockevent_init(struct device_node *np) | |||
210 | 252 | ||
211 | /* | 253 | /* |
212 | * Disable timer and clear any pending interrupt (IR) on match | 254 | * Disable timer and clear any pending interrupt (IR) on match |
213 | * channel 0 (MR0). Configure a compare match value of 1 on MR0 | 255 | * channel 0 (MR0). Clear the prescaler as it's not used. |
214 | * and enable interrupt, reset on match and stop on match (MCR). | ||
215 | */ | 256 | */ |
216 | writel_relaxed(0, base + LPC32XX_TIMER_TCR); | 257 | writel_relaxed(0, base + LPC32XX_TIMER_TCR); |
258 | writel_relaxed(0, base + LPC32XX_TIMER_PR); | ||
217 | writel_relaxed(0, base + LPC32XX_TIMER_CTCR); | 259 | writel_relaxed(0, base + LPC32XX_TIMER_CTCR); |
218 | writel_relaxed(LPC32XX_TIMER_IR_MR0INT, base + LPC32XX_TIMER_IR); | 260 | writel_relaxed(LPC32XX_TIMER_IR_MR0INT, base + LPC32XX_TIMER_IR); |
219 | writel_relaxed(1, base + LPC32XX_TIMER_MR0); | ||
220 | writel_relaxed(LPC32XX_TIMER_MCR_MR0I | LPC32XX_TIMER_MCR_MR0R | | ||
221 | LPC32XX_TIMER_MCR_MR0S, base + LPC32XX_TIMER_MCR); | ||
222 | 261 | ||
223 | rate = clk_get_rate(clk); | 262 | rate = clk_get_rate(clk); |
224 | lpc32xx_clk_event_ddata.base = base; | 263 | lpc32xx_clk_event_ddata.base = base; |
264 | lpc32xx_clk_event_ddata.ticks_per_jiffy = DIV_ROUND_CLOSEST(rate, HZ); | ||
225 | clockevents_config_and_register(&lpc32xx_clk_event_ddata.evtdev, | 265 | clockevents_config_and_register(&lpc32xx_clk_event_ddata.evtdev, |
226 | rate, 1, -1); | 266 | rate, 1, -1); |
227 | 267 | ||
diff --git a/drivers/net/ethernet/intel/Kconfig b/drivers/net/ethernet/intel/Kconfig index fa593dd3efe1..3772f3ac956e 100644 --- a/drivers/net/ethernet/intel/Kconfig +++ b/drivers/net/ethernet/intel/Kconfig | |||
@@ -83,6 +83,15 @@ config E1000E | |||
83 | To compile this driver as a module, choose M here. The module | 83 | To compile this driver as a module, choose M here. The module |
84 | will be called e1000e. | 84 | will be called e1000e. |
85 | 85 | ||
86 | config E1000E_HWTS | ||
87 | bool "Support HW cross-timestamp on PCH devices" | ||
88 | default y | ||
89 | depends on E1000E && X86 | ||
90 | ---help--- | ||
91 | Say Y to enable hardware supported cross-timestamping on PCH | ||
92 | devices. The cross-timestamp is available through the PTP clock | ||
93 | driver precise cross-timestamp ioctl (PTP_SYS_OFFSET_PRECISE). | ||
94 | |||
86 | config IGB | 95 | config IGB |
87 | tristate "Intel(R) 82575/82576 PCI-Express Gigabit Ethernet support" | 96 | tristate "Intel(R) 82575/82576 PCI-Express Gigabit Ethernet support" |
88 | depends on PCI | 97 | depends on PCI |
diff --git a/drivers/net/ethernet/intel/e1000e/defines.h b/drivers/net/ethernet/intel/e1000e/defines.h index f7c7804d79e5..0641c0098738 100644 --- a/drivers/net/ethernet/intel/e1000e/defines.h +++ b/drivers/net/ethernet/intel/e1000e/defines.h | |||
@@ -528,6 +528,11 @@ | |||
528 | #define E1000_RXCW_C 0x20000000 /* Receive config */ | 528 | #define E1000_RXCW_C 0x20000000 /* Receive config */ |
529 | #define E1000_RXCW_SYNCH 0x40000000 /* Receive config synch */ | 529 | #define E1000_RXCW_SYNCH 0x40000000 /* Receive config synch */ |
530 | 530 | ||
531 | /* HH Time Sync */ | ||
532 | #define E1000_TSYNCTXCTL_MAX_ALLOWED_DLY_MASK 0x0000F000 /* max delay */ | ||
533 | #define E1000_TSYNCTXCTL_SYNC_COMP 0x40000000 /* sync complete */ | ||
534 | #define E1000_TSYNCTXCTL_START_SYNC 0x80000000 /* initiate sync */ | ||
535 | |||
531 | #define E1000_TSYNCTXCTL_VALID 0x00000001 /* Tx timestamp valid */ | 536 | #define E1000_TSYNCTXCTL_VALID 0x00000001 /* Tx timestamp valid */ |
532 | #define E1000_TSYNCTXCTL_ENABLED 0x00000010 /* enable Tx timestamping */ | 537 | #define E1000_TSYNCTXCTL_ENABLED 0x00000010 /* enable Tx timestamping */ |
533 | 538 | ||
diff --git a/drivers/net/ethernet/intel/e1000e/ptp.c b/drivers/net/ethernet/intel/e1000e/ptp.c index 25a0ad5102d6..e2ff3ef75d5d 100644 --- a/drivers/net/ethernet/intel/e1000e/ptp.c +++ b/drivers/net/ethernet/intel/e1000e/ptp.c | |||
@@ -26,6 +26,12 @@ | |||
26 | 26 | ||
27 | #include "e1000.h" | 27 | #include "e1000.h" |
28 | 28 | ||
29 | #ifdef CONFIG_E1000E_HWTS | ||
30 | #include <linux/clocksource.h> | ||
31 | #include <linux/ktime.h> | ||
32 | #include <asm/tsc.h> | ||
33 | #endif | ||
34 | |||
29 | /** | 35 | /** |
30 | * e1000e_phc_adjfreq - adjust the frequency of the hardware clock | 36 | * e1000e_phc_adjfreq - adjust the frequency of the hardware clock |
31 | * @ptp: ptp clock structure | 37 | * @ptp: ptp clock structure |
@@ -98,6 +104,78 @@ static int e1000e_phc_adjtime(struct ptp_clock_info *ptp, s64 delta) | |||
98 | return 0; | 104 | return 0; |
99 | } | 105 | } |
100 | 106 | ||
107 | #ifdef CONFIG_E1000E_HWTS | ||
108 | #define MAX_HW_WAIT_COUNT (3) | ||
109 | |||
110 | /** | ||
111 | * e1000e_phc_get_syncdevicetime - Callback given to timekeeping code reads system/device registers | ||
112 | * @device: current device time | ||
113 | * @system: system counter value read synchronously with device time | ||
114 | * @ctx: context provided by timekeeping code | ||
115 | * | ||
116 | * Read device and system (ART) clock simultaneously and return the corrected | ||
117 | * clock values in ns. | ||
118 | **/ | ||
119 | static int e1000e_phc_get_syncdevicetime(ktime_t *device, | ||
120 | struct system_counterval_t *system, | ||
121 | void *ctx) | ||
122 | { | ||
123 | struct e1000_adapter *adapter = (struct e1000_adapter *)ctx; | ||
124 | struct e1000_hw *hw = &adapter->hw; | ||
125 | unsigned long flags; | ||
126 | int i; | ||
127 | u32 tsync_ctrl; | ||
128 | cycle_t dev_cycles; | ||
129 | cycle_t sys_cycles; | ||
130 | |||
131 | tsync_ctrl = er32(TSYNCTXCTL); | ||
132 | tsync_ctrl |= E1000_TSYNCTXCTL_START_SYNC | | ||
133 | E1000_TSYNCTXCTL_MAX_ALLOWED_DLY_MASK; | ||
134 | ew32(TSYNCTXCTL, tsync_ctrl); | ||
135 | for (i = 0; i < MAX_HW_WAIT_COUNT; ++i) { | ||
136 | udelay(1); | ||
137 | tsync_ctrl = er32(TSYNCTXCTL); | ||
138 | if (tsync_ctrl & E1000_TSYNCTXCTL_SYNC_COMP) | ||
139 | break; | ||
140 | } | ||
141 | |||
142 | if (i == MAX_HW_WAIT_COUNT) | ||
143 | return -ETIMEDOUT; | ||
144 | |||
145 | dev_cycles = er32(SYSSTMPH); | ||
146 | dev_cycles <<= 32; | ||
147 | dev_cycles |= er32(SYSSTMPL); | ||
148 | spin_lock_irqsave(&adapter->systim_lock, flags); | ||
149 | *device = ns_to_ktime(timecounter_cyc2time(&adapter->tc, dev_cycles)); | ||
150 | spin_unlock_irqrestore(&adapter->systim_lock, flags); | ||
151 | |||
152 | sys_cycles = er32(PLTSTMPH); | ||
153 | sys_cycles <<= 32; | ||
154 | sys_cycles |= er32(PLTSTMPL); | ||
155 | *system = convert_art_to_tsc(sys_cycles); | ||
156 | |||
157 | return 0; | ||
158 | } | ||
159 | |||
160 | /** | ||
161 | * e1000e_phc_getsynctime - Reads the current system/device cross timestamp | ||
162 | * @ptp: ptp clock structure | ||
163 | * @cts: structure containing timestamp | ||
164 | * | ||
165 | * Read device and system (ART) clock simultaneously and return the scaled | ||
166 | * clock values in ns. | ||
167 | **/ | ||
168 | static int e1000e_phc_getcrosststamp(struct ptp_clock_info *ptp, | ||
169 | struct system_device_crosststamp *xtstamp) | ||
170 | { | ||
171 | struct e1000_adapter *adapter = container_of(ptp, struct e1000_adapter, | ||
172 | ptp_clock_info); | ||
173 | |||
174 | return get_device_system_crosststamp(e1000e_phc_get_syncdevicetime, | ||
175 | adapter, NULL, xtstamp); | ||
176 | } | ||
177 | #endif/*CONFIG_E1000E_HWTS*/ | ||
178 | |||
101 | /** | 179 | /** |
102 | * e1000e_phc_gettime - Reads the current time from the hardware clock | 180 | * e1000e_phc_gettime - Reads the current time from the hardware clock |
103 | * @ptp: ptp clock structure | 181 | * @ptp: ptp clock structure |
@@ -236,6 +314,13 @@ void e1000e_ptp_init(struct e1000_adapter *adapter) | |||
236 | break; | 314 | break; |
237 | } | 315 | } |
238 | 316 | ||
317 | #ifdef CONFIG_E1000E_HWTS | ||
318 | /* CPU must have ART and GBe must be from Sunrise Point or greater */ | ||
319 | if (hw->mac.type >= e1000_pch_spt && boot_cpu_has(X86_FEATURE_ART)) | ||
320 | adapter->ptp_clock_info.getcrosststamp = | ||
321 | e1000e_phc_getcrosststamp; | ||
322 | #endif/*CONFIG_E1000E_HWTS*/ | ||
323 | |||
239 | INIT_DELAYED_WORK(&adapter->systim_overflow_work, | 324 | INIT_DELAYED_WORK(&adapter->systim_overflow_work, |
240 | e1000e_systim_overflow_work); | 325 | e1000e_systim_overflow_work); |
241 | 326 | ||
diff --git a/drivers/net/ethernet/intel/e1000e/regs.h b/drivers/net/ethernet/intel/e1000e/regs.h index 1d5e0b77062a..0cb4d365e5ad 100644 --- a/drivers/net/ethernet/intel/e1000e/regs.h +++ b/drivers/net/ethernet/intel/e1000e/regs.h | |||
@@ -245,6 +245,10 @@ | |||
245 | #define E1000_SYSTIML 0x0B600 /* System time register Low - RO */ | 245 | #define E1000_SYSTIML 0x0B600 /* System time register Low - RO */ |
246 | #define E1000_SYSTIMH 0x0B604 /* System time register High - RO */ | 246 | #define E1000_SYSTIMH 0x0B604 /* System time register High - RO */ |
247 | #define E1000_TIMINCA 0x0B608 /* Increment attributes register - RW */ | 247 | #define E1000_TIMINCA 0x0B608 /* Increment attributes register - RW */ |
248 | #define E1000_SYSSTMPL 0x0B648 /* HH Timesync system stamp low register */ | ||
249 | #define E1000_SYSSTMPH 0x0B64C /* HH Timesync system stamp hi register */ | ||
250 | #define E1000_PLTSTMPL 0x0B640 /* HH Timesync platform stamp low register */ | ||
251 | #define E1000_PLTSTMPH 0x0B644 /* HH Timesync platform stamp hi register */ | ||
248 | #define E1000_RXMTRL 0x0B634 /* Time sync Rx EtherType and Msg Type - RW */ | 252 | #define E1000_RXMTRL 0x0B634 /* Time sync Rx EtherType and Msg Type - RW */ |
249 | #define E1000_RXUDP 0x0B638 /* Time Sync Rx UDP Port - RW */ | 253 | #define E1000_RXUDP 0x0B638 /* Time Sync Rx UDP Port - RW */ |
250 | 254 | ||
diff --git a/drivers/ptp/ptp_chardev.c b/drivers/ptp/ptp_chardev.c index da7bae991552..579fd65299a0 100644 --- a/drivers/ptp/ptp_chardev.c +++ b/drivers/ptp/ptp_chardev.c | |||
@@ -22,6 +22,7 @@ | |||
22 | #include <linux/poll.h> | 22 | #include <linux/poll.h> |
23 | #include <linux/sched.h> | 23 | #include <linux/sched.h> |
24 | #include <linux/slab.h> | 24 | #include <linux/slab.h> |
25 | #include <linux/timekeeping.h> | ||
25 | 26 | ||
26 | #include "ptp_private.h" | 27 | #include "ptp_private.h" |
27 | 28 | ||
@@ -120,11 +121,13 @@ long ptp_ioctl(struct posix_clock *pc, unsigned int cmd, unsigned long arg) | |||
120 | struct ptp_clock_caps caps; | 121 | struct ptp_clock_caps caps; |
121 | struct ptp_clock_request req; | 122 | struct ptp_clock_request req; |
122 | struct ptp_sys_offset *sysoff = NULL; | 123 | struct ptp_sys_offset *sysoff = NULL; |
124 | struct ptp_sys_offset_precise precise_offset; | ||
123 | struct ptp_pin_desc pd; | 125 | struct ptp_pin_desc pd; |
124 | struct ptp_clock *ptp = container_of(pc, struct ptp_clock, clock); | 126 | struct ptp_clock *ptp = container_of(pc, struct ptp_clock, clock); |
125 | struct ptp_clock_info *ops = ptp->info; | 127 | struct ptp_clock_info *ops = ptp->info; |
126 | struct ptp_clock_time *pct; | 128 | struct ptp_clock_time *pct; |
127 | struct timespec64 ts; | 129 | struct timespec64 ts; |
130 | struct system_device_crosststamp xtstamp; | ||
128 | int enable, err = 0; | 131 | int enable, err = 0; |
129 | unsigned int i, pin_index; | 132 | unsigned int i, pin_index; |
130 | 133 | ||
@@ -138,6 +141,7 @@ long ptp_ioctl(struct posix_clock *pc, unsigned int cmd, unsigned long arg) | |||
138 | caps.n_per_out = ptp->info->n_per_out; | 141 | caps.n_per_out = ptp->info->n_per_out; |
139 | caps.pps = ptp->info->pps; | 142 | caps.pps = ptp->info->pps; |
140 | caps.n_pins = ptp->info->n_pins; | 143 | caps.n_pins = ptp->info->n_pins; |
144 | caps.cross_timestamping = ptp->info->getcrosststamp != NULL; | ||
141 | if (copy_to_user((void __user *)arg, &caps, sizeof(caps))) | 145 | if (copy_to_user((void __user *)arg, &caps, sizeof(caps))) |
142 | err = -EFAULT; | 146 | err = -EFAULT; |
143 | break; | 147 | break; |
@@ -180,6 +184,29 @@ long ptp_ioctl(struct posix_clock *pc, unsigned int cmd, unsigned long arg) | |||
180 | err = ops->enable(ops, &req, enable); | 184 | err = ops->enable(ops, &req, enable); |
181 | break; | 185 | break; |
182 | 186 | ||
187 | case PTP_SYS_OFFSET_PRECISE: | ||
188 | if (!ptp->info->getcrosststamp) { | ||
189 | err = -EOPNOTSUPP; | ||
190 | break; | ||
191 | } | ||
192 | err = ptp->info->getcrosststamp(ptp->info, &xtstamp); | ||
193 | if (err) | ||
194 | break; | ||
195 | |||
196 | ts = ktime_to_timespec64(xtstamp.device); | ||
197 | precise_offset.device.sec = ts.tv_sec; | ||
198 | precise_offset.device.nsec = ts.tv_nsec; | ||
199 | ts = ktime_to_timespec64(xtstamp.sys_realtime); | ||
200 | precise_offset.sys_realtime.sec = ts.tv_sec; | ||
201 | precise_offset.sys_realtime.nsec = ts.tv_nsec; | ||
202 | ts = ktime_to_timespec64(xtstamp.sys_monoraw); | ||
203 | precise_offset.sys_monoraw.sec = ts.tv_sec; | ||
204 | precise_offset.sys_monoraw.nsec = ts.tv_nsec; | ||
205 | if (copy_to_user((void __user *)arg, &precise_offset, | ||
206 | sizeof(precise_offset))) | ||
207 | err = -EFAULT; | ||
208 | break; | ||
209 | |||
183 | case PTP_SYS_OFFSET: | 210 | case PTP_SYS_OFFSET: |
184 | sysoff = kmalloc(sizeof(*sysoff), GFP_KERNEL); | 211 | sysoff = kmalloc(sizeof(*sysoff), GFP_KERNEL); |
185 | if (!sysoff) { | 212 | if (!sysoff) { |
diff --git a/include/linux/clockchips.h b/include/linux/clockchips.h index bdcf358dfce2..0d442e34c349 100644 --- a/include/linux/clockchips.h +++ b/include/linux/clockchips.h | |||
@@ -190,9 +190,9 @@ extern void clockevents_config_and_register(struct clock_event_device *dev, | |||
190 | extern int clockevents_update_freq(struct clock_event_device *ce, u32 freq); | 190 | extern int clockevents_update_freq(struct clock_event_device *ce, u32 freq); |
191 | 191 | ||
192 | static inline void | 192 | static inline void |
193 | clockevents_calc_mult_shift(struct clock_event_device *ce, u32 freq, u32 minsec) | 193 | clockevents_calc_mult_shift(struct clock_event_device *ce, u32 freq, u32 maxsec) |
194 | { | 194 | { |
195 | return clocks_calc_mult_shift(&ce->mult, &ce->shift, NSEC_PER_SEC, freq, minsec); | 195 | return clocks_calc_mult_shift(&ce->mult, &ce->shift, NSEC_PER_SEC, freq, maxsec); |
196 | } | 196 | } |
197 | 197 | ||
198 | extern void clockevents_suspend(void); | 198 | extern void clockevents_suspend(void); |
diff --git a/include/linux/clocksource.h b/include/linux/clocksource.h index 6013021a3b39..a307bf62974f 100644 --- a/include/linux/clocksource.h +++ b/include/linux/clocksource.h | |||
@@ -118,6 +118,23 @@ struct clocksource { | |||
118 | /* simplify initialization of mask field */ | 118 | /* simplify initialization of mask field */ |
119 | #define CLOCKSOURCE_MASK(bits) (cycle_t)((bits) < 64 ? ((1ULL<<(bits))-1) : -1) | 119 | #define CLOCKSOURCE_MASK(bits) (cycle_t)((bits) < 64 ? ((1ULL<<(bits))-1) : -1) |
120 | 120 | ||
121 | static inline u32 clocksource_freq2mult(u32 freq, u32 shift_constant, u64 from) | ||
122 | { | ||
123 | /* freq = cyc/from | ||
124 | * mult/2^shift = ns/cyc | ||
125 | * mult = ns/cyc * 2^shift | ||
126 | * mult = from/freq * 2^shift | ||
127 | * mult = from * 2^shift / freq | ||
128 | * mult = (from<<shift) / freq | ||
129 | */ | ||
130 | u64 tmp = ((u64)from) << shift_constant; | ||
131 | |||
132 | tmp += freq/2; /* round for do_div */ | ||
133 | do_div(tmp, freq); | ||
134 | |||
135 | return (u32)tmp; | ||
136 | } | ||
137 | |||
121 | /** | 138 | /** |
122 | * clocksource_khz2mult - calculates mult from khz and shift | 139 | * clocksource_khz2mult - calculates mult from khz and shift |
123 | * @khz: Clocksource frequency in KHz | 140 | * @khz: Clocksource frequency in KHz |
@@ -128,19 +145,7 @@ struct clocksource { | |||
128 | */ | 145 | */ |
129 | static inline u32 clocksource_khz2mult(u32 khz, u32 shift_constant) | 146 | static inline u32 clocksource_khz2mult(u32 khz, u32 shift_constant) |
130 | { | 147 | { |
131 | /* khz = cyc/(Million ns) | 148 | return clocksource_freq2mult(khz, shift_constant, NSEC_PER_MSEC); |
132 | * mult/2^shift = ns/cyc | ||
133 | * mult = ns/cyc * 2^shift | ||
134 | * mult = 1Million/khz * 2^shift | ||
135 | * mult = 1000000 * 2^shift / khz | ||
136 | * mult = (1000000<<shift) / khz | ||
137 | */ | ||
138 | u64 tmp = ((u64)1000000) << shift_constant; | ||
139 | |||
140 | tmp += khz/2; /* round for do_div */ | ||
141 | do_div(tmp, khz); | ||
142 | |||
143 | return (u32)tmp; | ||
144 | } | 149 | } |
145 | 150 | ||
146 | /** | 151 | /** |
@@ -154,19 +159,7 @@ static inline u32 clocksource_khz2mult(u32 khz, u32 shift_constant) | |||
154 | */ | 159 | */ |
155 | static inline u32 clocksource_hz2mult(u32 hz, u32 shift_constant) | 160 | static inline u32 clocksource_hz2mult(u32 hz, u32 shift_constant) |
156 | { | 161 | { |
157 | /* hz = cyc/(Billion ns) | 162 | return clocksource_freq2mult(hz, shift_constant, NSEC_PER_SEC); |
158 | * mult/2^shift = ns/cyc | ||
159 | * mult = ns/cyc * 2^shift | ||
160 | * mult = 1Billion/hz * 2^shift | ||
161 | * mult = 1000000000 * 2^shift / hz | ||
162 | * mult = (1000000000<<shift) / hz | ||
163 | */ | ||
164 | u64 tmp = ((u64)1000000000) << shift_constant; | ||
165 | |||
166 | tmp += hz/2; /* round for do_div */ | ||
167 | do_div(tmp, hz); | ||
168 | |||
169 | return (u32)tmp; | ||
170 | } | 163 | } |
171 | 164 | ||
172 | /** | 165 | /** |
diff --git a/include/linux/pps_kernel.h b/include/linux/pps_kernel.h index 54bf1484d41f..35ac903956c7 100644 --- a/include/linux/pps_kernel.h +++ b/include/linux/pps_kernel.h | |||
@@ -111,22 +111,17 @@ static inline void timespec_to_pps_ktime(struct pps_ktime *kt, | |||
111 | kt->nsec = ts.tv_nsec; | 111 | kt->nsec = ts.tv_nsec; |
112 | } | 112 | } |
113 | 113 | ||
114 | #ifdef CONFIG_NTP_PPS | ||
115 | |||
116 | static inline void pps_get_ts(struct pps_event_time *ts) | 114 | static inline void pps_get_ts(struct pps_event_time *ts) |
117 | { | 115 | { |
118 | ktime_get_raw_and_real_ts64(&ts->ts_raw, &ts->ts_real); | 116 | struct system_time_snapshot snap; |
119 | } | ||
120 | 117 | ||
121 | #else /* CONFIG_NTP_PPS */ | 118 | ktime_get_snapshot(&snap); |
122 | 119 | ts->ts_real = ktime_to_timespec64(snap.real); | |
123 | static inline void pps_get_ts(struct pps_event_time *ts) | 120 | #ifdef CONFIG_NTP_PPS |
124 | { | 121 | ts->ts_raw = ktime_to_timespec64(snap.raw); |
125 | ktime_get_real_ts64(&ts->ts_real); | 122 | #endif |
126 | } | 123 | } |
127 | 124 | ||
128 | #endif /* CONFIG_NTP_PPS */ | ||
129 | |||
130 | /* Subtract known time delay from PPS event time(s) */ | 125 | /* Subtract known time delay from PPS event time(s) */ |
131 | static inline void pps_sub_ts(struct pps_event_time *ts, struct timespec64 delta) | 126 | static inline void pps_sub_ts(struct pps_event_time *ts, struct timespec64 delta) |
132 | { | 127 | { |
diff --git a/include/linux/ptp_clock_kernel.h b/include/linux/ptp_clock_kernel.h index b8b73066d137..6b15e168148a 100644 --- a/include/linux/ptp_clock_kernel.h +++ b/include/linux/ptp_clock_kernel.h | |||
@@ -38,6 +38,7 @@ struct ptp_clock_request { | |||
38 | }; | 38 | }; |
39 | }; | 39 | }; |
40 | 40 | ||
41 | struct system_device_crosststamp; | ||
41 | /** | 42 | /** |
42 | * struct ptp_clock_info - decribes a PTP hardware clock | 43 | * struct ptp_clock_info - decribes a PTP hardware clock |
43 | * | 44 | * |
@@ -67,6 +68,11 @@ struct ptp_clock_request { | |||
67 | * @gettime64: Reads the current time from the hardware clock. | 68 | * @gettime64: Reads the current time from the hardware clock. |
68 | * parameter ts: Holds the result. | 69 | * parameter ts: Holds the result. |
69 | * | 70 | * |
71 | * @getcrosststamp: Reads the current time from the hardware clock and | ||
72 | * system clock simultaneously. | ||
73 | * parameter cts: Contains timestamp (device,system) pair, | ||
74 | * where system time is realtime and monotonic. | ||
75 | * | ||
70 | * @settime64: Set the current time on the hardware clock. | 76 | * @settime64: Set the current time on the hardware clock. |
71 | * parameter ts: Time value to set. | 77 | * parameter ts: Time value to set. |
72 | * | 78 | * |
@@ -105,6 +111,8 @@ struct ptp_clock_info { | |||
105 | int (*adjfreq)(struct ptp_clock_info *ptp, s32 delta); | 111 | int (*adjfreq)(struct ptp_clock_info *ptp, s32 delta); |
106 | int (*adjtime)(struct ptp_clock_info *ptp, s64 delta); | 112 | int (*adjtime)(struct ptp_clock_info *ptp, s64 delta); |
107 | int (*gettime64)(struct ptp_clock_info *ptp, struct timespec64 *ts); | 113 | int (*gettime64)(struct ptp_clock_info *ptp, struct timespec64 *ts); |
114 | int (*getcrosststamp)(struct ptp_clock_info *ptp, | ||
115 | struct system_device_crosststamp *cts); | ||
108 | int (*settime64)(struct ptp_clock_info *p, const struct timespec64 *ts); | 116 | int (*settime64)(struct ptp_clock_info *p, const struct timespec64 *ts); |
109 | int (*enable)(struct ptp_clock_info *ptp, | 117 | int (*enable)(struct ptp_clock_info *ptp, |
110 | struct ptp_clock_request *request, int on); | 118 | struct ptp_clock_request *request, int on); |
diff --git a/include/linux/timekeeper_internal.h b/include/linux/timekeeper_internal.h index 25247220b4b7..e88005459035 100644 --- a/include/linux/timekeeper_internal.h +++ b/include/linux/timekeeper_internal.h | |||
@@ -50,6 +50,7 @@ struct tk_read_base { | |||
50 | * @offs_tai: Offset clock monotonic -> clock tai | 50 | * @offs_tai: Offset clock monotonic -> clock tai |
51 | * @tai_offset: The current UTC to TAI offset in seconds | 51 | * @tai_offset: The current UTC to TAI offset in seconds |
52 | * @clock_was_set_seq: The sequence number of clock was set events | 52 | * @clock_was_set_seq: The sequence number of clock was set events |
53 | * @cs_was_changed_seq: The sequence number of clocksource change events | ||
53 | * @next_leap_ktime: CLOCK_MONOTONIC time value of a pending leap-second | 54 | * @next_leap_ktime: CLOCK_MONOTONIC time value of a pending leap-second |
54 | * @raw_time: Monotonic raw base time in timespec64 format | 55 | * @raw_time: Monotonic raw base time in timespec64 format |
55 | * @cycle_interval: Number of clock cycles in one NTP interval | 56 | * @cycle_interval: Number of clock cycles in one NTP interval |
@@ -91,6 +92,7 @@ struct timekeeper { | |||
91 | ktime_t offs_tai; | 92 | ktime_t offs_tai; |
92 | s32 tai_offset; | 93 | s32 tai_offset; |
93 | unsigned int clock_was_set_seq; | 94 | unsigned int clock_was_set_seq; |
95 | u8 cs_was_changed_seq; | ||
94 | ktime_t next_leap_ktime; | 96 | ktime_t next_leap_ktime; |
95 | struct timespec64 raw_time; | 97 | struct timespec64 raw_time; |
96 | 98 | ||
diff --git a/include/linux/timekeeping.h b/include/linux/timekeeping.h index ec89d846324c..96f37bee3bc1 100644 --- a/include/linux/timekeeping.h +++ b/include/linux/timekeeping.h | |||
@@ -267,6 +267,64 @@ extern void ktime_get_raw_and_real_ts64(struct timespec64 *ts_raw, | |||
267 | struct timespec64 *ts_real); | 267 | struct timespec64 *ts_real); |
268 | 268 | ||
269 | /* | 269 | /* |
270 | * struct system_time_snapshot - simultaneous raw/real time capture with | ||
271 | * counter value | ||
272 | * @cycles: Clocksource counter value to produce the system times | ||
273 | * @real: Realtime system time | ||
274 | * @raw: Monotonic raw system time | ||
275 | * @clock_was_set_seq: The sequence number of clock was set events | ||
276 | * @cs_was_changed_seq: The sequence number of clocksource change events | ||
277 | */ | ||
278 | struct system_time_snapshot { | ||
279 | cycle_t cycles; | ||
280 | ktime_t real; | ||
281 | ktime_t raw; | ||
282 | unsigned int clock_was_set_seq; | ||
283 | u8 cs_was_changed_seq; | ||
284 | }; | ||
285 | |||
286 | /* | ||
287 | * struct system_device_crosststamp - system/device cross-timestamp | ||
288 | * (syncronized capture) | ||
289 | * @device: Device time | ||
290 | * @sys_realtime: Realtime simultaneous with device time | ||
291 | * @sys_monoraw: Monotonic raw simultaneous with device time | ||
292 | */ | ||
293 | struct system_device_crosststamp { | ||
294 | ktime_t device; | ||
295 | ktime_t sys_realtime; | ||
296 | ktime_t sys_monoraw; | ||
297 | }; | ||
298 | |||
299 | /* | ||
300 | * struct system_counterval_t - system counter value with the pointer to the | ||
301 | * corresponding clocksource | ||
302 | * @cycles: System counter value | ||
303 | * @cs: Clocksource corresponding to system counter value. Used by | ||
304 | * timekeeping code to verify comparibility of two cycle values | ||
305 | */ | ||
306 | struct system_counterval_t { | ||
307 | cycle_t cycles; | ||
308 | struct clocksource *cs; | ||
309 | }; | ||
310 | |||
311 | /* | ||
312 | * Get cross timestamp between system clock and device clock | ||
313 | */ | ||
314 | extern int get_device_system_crosststamp( | ||
315 | int (*get_time_fn)(ktime_t *device_time, | ||
316 | struct system_counterval_t *system_counterval, | ||
317 | void *ctx), | ||
318 | void *ctx, | ||
319 | struct system_time_snapshot *history, | ||
320 | struct system_device_crosststamp *xtstamp); | ||
321 | |||
322 | /* | ||
323 | * Simultaneously snapshot realtime and monotonic raw clocks | ||
324 | */ | ||
325 | extern void ktime_get_snapshot(struct system_time_snapshot *systime_snapshot); | ||
326 | |||
327 | /* | ||
270 | * Persistent clock related interfaces | 328 | * Persistent clock related interfaces |
271 | */ | 329 | */ |
272 | extern int persistent_clock_is_local; | 330 | extern int persistent_clock_is_local; |
diff --git a/include/uapi/linux/ptp_clock.h b/include/uapi/linux/ptp_clock.h index f0b7bfe5da92..ac6dded80ffa 100644 --- a/include/uapi/linux/ptp_clock.h +++ b/include/uapi/linux/ptp_clock.h | |||
@@ -51,7 +51,9 @@ struct ptp_clock_caps { | |||
51 | int n_per_out; /* Number of programmable periodic signals. */ | 51 | int n_per_out; /* Number of programmable periodic signals. */ |
52 | int pps; /* Whether the clock supports a PPS callback. */ | 52 | int pps; /* Whether the clock supports a PPS callback. */ |
53 | int n_pins; /* Number of input/output pins. */ | 53 | int n_pins; /* Number of input/output pins. */ |
54 | int rsv[14]; /* Reserved for future use. */ | 54 | /* Whether the clock supports precise system-device cross timestamps */ |
55 | int cross_timestamping; | ||
56 | int rsv[13]; /* Reserved for future use. */ | ||
55 | }; | 57 | }; |
56 | 58 | ||
57 | struct ptp_extts_request { | 59 | struct ptp_extts_request { |
@@ -81,6 +83,13 @@ struct ptp_sys_offset { | |||
81 | struct ptp_clock_time ts[2 * PTP_MAX_SAMPLES + 1]; | 83 | struct ptp_clock_time ts[2 * PTP_MAX_SAMPLES + 1]; |
82 | }; | 84 | }; |
83 | 85 | ||
86 | struct ptp_sys_offset_precise { | ||
87 | struct ptp_clock_time device; | ||
88 | struct ptp_clock_time sys_realtime; | ||
89 | struct ptp_clock_time sys_monoraw; | ||
90 | unsigned int rsv[4]; /* Reserved for future use. */ | ||
91 | }; | ||
92 | |||
84 | enum ptp_pin_function { | 93 | enum ptp_pin_function { |
85 | PTP_PF_NONE, | 94 | PTP_PF_NONE, |
86 | PTP_PF_EXTTS, | 95 | PTP_PF_EXTTS, |
@@ -124,6 +133,8 @@ struct ptp_pin_desc { | |||
124 | #define PTP_SYS_OFFSET _IOW(PTP_CLK_MAGIC, 5, struct ptp_sys_offset) | 133 | #define PTP_SYS_OFFSET _IOW(PTP_CLK_MAGIC, 5, struct ptp_sys_offset) |
125 | #define PTP_PIN_GETFUNC _IOWR(PTP_CLK_MAGIC, 6, struct ptp_pin_desc) | 134 | #define PTP_PIN_GETFUNC _IOWR(PTP_CLK_MAGIC, 6, struct ptp_pin_desc) |
126 | #define PTP_PIN_SETFUNC _IOW(PTP_CLK_MAGIC, 7, struct ptp_pin_desc) | 135 | #define PTP_PIN_SETFUNC _IOW(PTP_CLK_MAGIC, 7, struct ptp_pin_desc) |
136 | #define PTP_SYS_OFFSET_PRECISE \ | ||
137 | _IOWR(PTP_CLK_MAGIC, 8, struct ptp_sys_offset_precise) | ||
127 | 138 | ||
128 | struct ptp_extts_event { | 139 | struct ptp_extts_event { |
129 | struct ptp_clock_time t; /* Time event occured. */ | 140 | struct ptp_clock_time t; /* Time event occured. */ |
diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c index 664de539299b..56ece145a814 100644 --- a/kernel/time/clocksource.c +++ b/kernel/time/clocksource.c | |||
@@ -323,13 +323,42 @@ static void clocksource_enqueue_watchdog(struct clocksource *cs) | |||
323 | /* cs is a watchdog. */ | 323 | /* cs is a watchdog. */ |
324 | if (cs->flags & CLOCK_SOURCE_IS_CONTINUOUS) | 324 | if (cs->flags & CLOCK_SOURCE_IS_CONTINUOUS) |
325 | cs->flags |= CLOCK_SOURCE_VALID_FOR_HRES; | 325 | cs->flags |= CLOCK_SOURCE_VALID_FOR_HRES; |
326 | } | ||
327 | spin_unlock_irqrestore(&watchdog_lock, flags); | ||
328 | } | ||
329 | |||
330 | static void clocksource_select_watchdog(bool fallback) | ||
331 | { | ||
332 | struct clocksource *cs, *old_wd; | ||
333 | unsigned long flags; | ||
334 | |||
335 | spin_lock_irqsave(&watchdog_lock, flags); | ||
336 | /* save current watchdog */ | ||
337 | old_wd = watchdog; | ||
338 | if (fallback) | ||
339 | watchdog = NULL; | ||
340 | |||
341 | list_for_each_entry(cs, &clocksource_list, list) { | ||
342 | /* cs is a clocksource to be watched. */ | ||
343 | if (cs->flags & CLOCK_SOURCE_MUST_VERIFY) | ||
344 | continue; | ||
345 | |||
346 | /* Skip current if we were requested for a fallback. */ | ||
347 | if (fallback && cs == old_wd) | ||
348 | continue; | ||
349 | |||
326 | /* Pick the best watchdog. */ | 350 | /* Pick the best watchdog. */ |
327 | if (!watchdog || cs->rating > watchdog->rating) { | 351 | if (!watchdog || cs->rating > watchdog->rating) |
328 | watchdog = cs; | 352 | watchdog = cs; |
329 | /* Reset watchdog cycles */ | ||
330 | clocksource_reset_watchdog(); | ||
331 | } | ||
332 | } | 353 | } |
354 | /* If we failed to find a fallback restore the old one. */ | ||
355 | if (!watchdog) | ||
356 | watchdog = old_wd; | ||
357 | |||
358 | /* If we changed the watchdog we need to reset cycles. */ | ||
359 | if (watchdog != old_wd) | ||
360 | clocksource_reset_watchdog(); | ||
361 | |||
333 | /* Check if the watchdog timer needs to be started. */ | 362 | /* Check if the watchdog timer needs to be started. */ |
334 | clocksource_start_watchdog(); | 363 | clocksource_start_watchdog(); |
335 | spin_unlock_irqrestore(&watchdog_lock, flags); | 364 | spin_unlock_irqrestore(&watchdog_lock, flags); |
@@ -404,6 +433,7 @@ static void clocksource_enqueue_watchdog(struct clocksource *cs) | |||
404 | cs->flags |= CLOCK_SOURCE_VALID_FOR_HRES; | 433 | cs->flags |= CLOCK_SOURCE_VALID_FOR_HRES; |
405 | } | 434 | } |
406 | 435 | ||
436 | static void clocksource_select_watchdog(bool fallback) { } | ||
407 | static inline void clocksource_dequeue_watchdog(struct clocksource *cs) { } | 437 | static inline void clocksource_dequeue_watchdog(struct clocksource *cs) { } |
408 | static inline void clocksource_resume_watchdog(void) { } | 438 | static inline void clocksource_resume_watchdog(void) { } |
409 | static inline int __clocksource_watchdog_kthread(void) { return 0; } | 439 | static inline int __clocksource_watchdog_kthread(void) { return 0; } |
@@ -736,6 +766,7 @@ int __clocksource_register_scale(struct clocksource *cs, u32 scale, u32 freq) | |||
736 | clocksource_enqueue(cs); | 766 | clocksource_enqueue(cs); |
737 | clocksource_enqueue_watchdog(cs); | 767 | clocksource_enqueue_watchdog(cs); |
738 | clocksource_select(); | 768 | clocksource_select(); |
769 | clocksource_select_watchdog(false); | ||
739 | mutex_unlock(&clocksource_mutex); | 770 | mutex_unlock(&clocksource_mutex); |
740 | return 0; | 771 | return 0; |
741 | } | 772 | } |
@@ -758,6 +789,7 @@ void clocksource_change_rating(struct clocksource *cs, int rating) | |||
758 | mutex_lock(&clocksource_mutex); | 789 | mutex_lock(&clocksource_mutex); |
759 | __clocksource_change_rating(cs, rating); | 790 | __clocksource_change_rating(cs, rating); |
760 | clocksource_select(); | 791 | clocksource_select(); |
792 | clocksource_select_watchdog(false); | ||
761 | mutex_unlock(&clocksource_mutex); | 793 | mutex_unlock(&clocksource_mutex); |
762 | } | 794 | } |
763 | EXPORT_SYMBOL(clocksource_change_rating); | 795 | EXPORT_SYMBOL(clocksource_change_rating); |
@@ -767,12 +799,12 @@ EXPORT_SYMBOL(clocksource_change_rating); | |||
767 | */ | 799 | */ |
768 | static int clocksource_unbind(struct clocksource *cs) | 800 | static int clocksource_unbind(struct clocksource *cs) |
769 | { | 801 | { |
770 | /* | 802 | if (clocksource_is_watchdog(cs)) { |
771 | * I really can't convince myself to support this on hardware | 803 | /* Select and try to install a replacement watchdog. */ |
772 | * designed by lobotomized monkeys. | 804 | clocksource_select_watchdog(true); |
773 | */ | 805 | if (clocksource_is_watchdog(cs)) |
774 | if (clocksource_is_watchdog(cs)) | 806 | return -EBUSY; |
775 | return -EBUSY; | 807 | } |
776 | 808 | ||
777 | if (cs == curr_clocksource) { | 809 | if (cs == curr_clocksource) { |
778 | /* Select and try to install a replacement clock source */ | 810 | /* Select and try to install a replacement clock source */ |
diff --git a/kernel/time/jiffies.c b/kernel/time/jiffies.c index 347fecf86a3f..555e21f7b966 100644 --- a/kernel/time/jiffies.c +++ b/kernel/time/jiffies.c | |||
@@ -68,7 +68,7 @@ static struct clocksource clocksource_jiffies = { | |||
68 | .name = "jiffies", | 68 | .name = "jiffies", |
69 | .rating = 1, /* lowest valid rating*/ | 69 | .rating = 1, /* lowest valid rating*/ |
70 | .read = jiffies_read, | 70 | .read = jiffies_read, |
71 | .mask = 0xffffffff, /*32bits*/ | 71 | .mask = CLOCKSOURCE_MASK(32), |
72 | .mult = NSEC_PER_JIFFY << JIFFIES_SHIFT, /* details above */ | 72 | .mult = NSEC_PER_JIFFY << JIFFIES_SHIFT, /* details above */ |
73 | .shift = JIFFIES_SHIFT, | 73 | .shift = JIFFIES_SHIFT, |
74 | .max_cycles = 10, | 74 | .max_cycles = 10, |
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c index 34b4cedfa80d..9c629bbed572 100644 --- a/kernel/time/timekeeping.c +++ b/kernel/time/timekeeping.c | |||
@@ -233,6 +233,7 @@ static void tk_setup_internals(struct timekeeper *tk, struct clocksource *clock) | |||
233 | u64 tmp, ntpinterval; | 233 | u64 tmp, ntpinterval; |
234 | struct clocksource *old_clock; | 234 | struct clocksource *old_clock; |
235 | 235 | ||
236 | ++tk->cs_was_changed_seq; | ||
236 | old_clock = tk->tkr_mono.clock; | 237 | old_clock = tk->tkr_mono.clock; |
237 | tk->tkr_mono.clock = clock; | 238 | tk->tkr_mono.clock = clock; |
238 | tk->tkr_mono.read = clock->read; | 239 | tk->tkr_mono.read = clock->read; |
@@ -298,17 +299,34 @@ u32 (*arch_gettimeoffset)(void) = default_arch_gettimeoffset; | |||
298 | static inline u32 arch_gettimeoffset(void) { return 0; } | 299 | static inline u32 arch_gettimeoffset(void) { return 0; } |
299 | #endif | 300 | #endif |
300 | 301 | ||
302 | static inline s64 timekeeping_delta_to_ns(struct tk_read_base *tkr, | ||
303 | cycle_t delta) | ||
304 | { | ||
305 | s64 nsec; | ||
306 | |||
307 | nsec = delta * tkr->mult + tkr->xtime_nsec; | ||
308 | nsec >>= tkr->shift; | ||
309 | |||
310 | /* If arch requires, add in get_arch_timeoffset() */ | ||
311 | return nsec + arch_gettimeoffset(); | ||
312 | } | ||
313 | |||
301 | static inline s64 timekeeping_get_ns(struct tk_read_base *tkr) | 314 | static inline s64 timekeeping_get_ns(struct tk_read_base *tkr) |
302 | { | 315 | { |
303 | cycle_t delta; | 316 | cycle_t delta; |
304 | s64 nsec; | ||
305 | 317 | ||
306 | delta = timekeeping_get_delta(tkr); | 318 | delta = timekeeping_get_delta(tkr); |
319 | return timekeeping_delta_to_ns(tkr, delta); | ||
320 | } | ||
307 | 321 | ||
308 | nsec = (delta * tkr->mult + tkr->xtime_nsec) >> tkr->shift; | 322 | static inline s64 timekeeping_cycles_to_ns(struct tk_read_base *tkr, |
323 | cycle_t cycles) | ||
324 | { | ||
325 | cycle_t delta; | ||
309 | 326 | ||
310 | /* If arch requires, add in get_arch_timeoffset() */ | 327 | /* calculate the delta since the last update_wall_time */ |
311 | return nsec + arch_gettimeoffset(); | 328 | delta = clocksource_delta(cycles, tkr->cycle_last, tkr->mask); |
329 | return timekeeping_delta_to_ns(tkr, delta); | ||
312 | } | 330 | } |
313 | 331 | ||
314 | /** | 332 | /** |
@@ -857,44 +875,262 @@ time64_t __ktime_get_real_seconds(void) | |||
857 | return tk->xtime_sec; | 875 | return tk->xtime_sec; |
858 | } | 876 | } |
859 | 877 | ||
878 | /** | ||
879 | * ktime_get_snapshot - snapshots the realtime/monotonic raw clocks with counter | ||
880 | * @systime_snapshot: pointer to struct receiving the system time snapshot | ||
881 | */ | ||
882 | void ktime_get_snapshot(struct system_time_snapshot *systime_snapshot) | ||
883 | { | ||
884 | struct timekeeper *tk = &tk_core.timekeeper; | ||
885 | unsigned long seq; | ||
886 | ktime_t base_raw; | ||
887 | ktime_t base_real; | ||
888 | s64 nsec_raw; | ||
889 | s64 nsec_real; | ||
890 | cycle_t now; | ||
860 | 891 | ||
861 | #ifdef CONFIG_NTP_PPS | 892 | WARN_ON_ONCE(timekeeping_suspended); |
893 | |||
894 | do { | ||
895 | seq = read_seqcount_begin(&tk_core.seq); | ||
896 | |||
897 | now = tk->tkr_mono.read(tk->tkr_mono.clock); | ||
898 | systime_snapshot->cs_was_changed_seq = tk->cs_was_changed_seq; | ||
899 | systime_snapshot->clock_was_set_seq = tk->clock_was_set_seq; | ||
900 | base_real = ktime_add(tk->tkr_mono.base, | ||
901 | tk_core.timekeeper.offs_real); | ||
902 | base_raw = tk->tkr_raw.base; | ||
903 | nsec_real = timekeeping_cycles_to_ns(&tk->tkr_mono, now); | ||
904 | nsec_raw = timekeeping_cycles_to_ns(&tk->tkr_raw, now); | ||
905 | } while (read_seqcount_retry(&tk_core.seq, seq)); | ||
906 | |||
907 | systime_snapshot->cycles = now; | ||
908 | systime_snapshot->real = ktime_add_ns(base_real, nsec_real); | ||
909 | systime_snapshot->raw = ktime_add_ns(base_raw, nsec_raw); | ||
910 | } | ||
911 | EXPORT_SYMBOL_GPL(ktime_get_snapshot); | ||
912 | |||
913 | /* Scale base by mult/div checking for overflow */ | ||
914 | static int scale64_check_overflow(u64 mult, u64 div, u64 *base) | ||
915 | { | ||
916 | u64 tmp, rem; | ||
917 | |||
918 | tmp = div64_u64_rem(*base, div, &rem); | ||
919 | |||
920 | if (((int)sizeof(u64)*8 - fls64(mult) < fls64(tmp)) || | ||
921 | ((int)sizeof(u64)*8 - fls64(mult) < fls64(rem))) | ||
922 | return -EOVERFLOW; | ||
923 | tmp *= mult; | ||
924 | rem *= mult; | ||
925 | |||
926 | do_div(rem, div); | ||
927 | *base = tmp + rem; | ||
928 | return 0; | ||
929 | } | ||
862 | 930 | ||
863 | /** | 931 | /** |
864 | * ktime_get_raw_and_real_ts64 - get day and raw monotonic time in timespec format | 932 | * adjust_historical_crosststamp - adjust crosstimestamp previous to current interval |
865 | * @ts_raw: pointer to the timespec to be set to raw monotonic time | 933 | * @history: Snapshot representing start of history |
866 | * @ts_real: pointer to the timespec to be set to the time of day | 934 | * @partial_history_cycles: Cycle offset into history (fractional part) |
935 | * @total_history_cycles: Total history length in cycles | ||
936 | * @discontinuity: True indicates clock was set on history period | ||
937 | * @ts: Cross timestamp that should be adjusted using | ||
938 | * partial/total ratio | ||
867 | * | 939 | * |
868 | * This function reads both the time of day and raw monotonic time at the | 940 | * Helper function used by get_device_system_crosststamp() to correct the |
869 | * same time atomically and stores the resulting timestamps in timespec | 941 | * crosstimestamp corresponding to the start of the current interval to the |
870 | * format. | 942 | * system counter value (timestamp point) provided by the driver. The |
943 | * total_history_* quantities are the total history starting at the provided | ||
944 | * reference point and ending at the start of the current interval. The cycle | ||
945 | * count between the driver timestamp point and the start of the current | ||
946 | * interval is partial_history_cycles. | ||
871 | */ | 947 | */ |
872 | void ktime_get_raw_and_real_ts64(struct timespec64 *ts_raw, struct timespec64 *ts_real) | 948 | static int adjust_historical_crosststamp(struct system_time_snapshot *history, |
949 | cycle_t partial_history_cycles, | ||
950 | cycle_t total_history_cycles, | ||
951 | bool discontinuity, | ||
952 | struct system_device_crosststamp *ts) | ||
873 | { | 953 | { |
874 | struct timekeeper *tk = &tk_core.timekeeper; | 954 | struct timekeeper *tk = &tk_core.timekeeper; |
875 | unsigned long seq; | 955 | u64 corr_raw, corr_real; |
876 | s64 nsecs_raw, nsecs_real; | 956 | bool interp_forward; |
957 | int ret; | ||
877 | 958 | ||
878 | WARN_ON_ONCE(timekeeping_suspended); | 959 | if (total_history_cycles == 0 || partial_history_cycles == 0) |
960 | return 0; | ||
961 | |||
962 | /* Interpolate shortest distance from beginning or end of history */ | ||
963 | interp_forward = partial_history_cycles > total_history_cycles/2 ? | ||
964 | true : false; | ||
965 | partial_history_cycles = interp_forward ? | ||
966 | total_history_cycles - partial_history_cycles : | ||
967 | partial_history_cycles; | ||
968 | |||
969 | /* | ||
970 | * Scale the monotonic raw time delta by: | ||
971 | * partial_history_cycles / total_history_cycles | ||
972 | */ | ||
973 | corr_raw = (u64)ktime_to_ns( | ||
974 | ktime_sub(ts->sys_monoraw, history->raw)); | ||
975 | ret = scale64_check_overflow(partial_history_cycles, | ||
976 | total_history_cycles, &corr_raw); | ||
977 | if (ret) | ||
978 | return ret; | ||
979 | |||
980 | /* | ||
981 | * If there is a discontinuity in the history, scale monotonic raw | ||
982 | * correction by: | ||
983 | * mult(real)/mult(raw) yielding the realtime correction | ||
984 | * Otherwise, calculate the realtime correction similar to monotonic | ||
985 | * raw calculation | ||
986 | */ | ||
987 | if (discontinuity) { | ||
988 | corr_real = mul_u64_u32_div | ||
989 | (corr_raw, tk->tkr_mono.mult, tk->tkr_raw.mult); | ||
990 | } else { | ||
991 | corr_real = (u64)ktime_to_ns( | ||
992 | ktime_sub(ts->sys_realtime, history->real)); | ||
993 | ret = scale64_check_overflow(partial_history_cycles, | ||
994 | total_history_cycles, &corr_real); | ||
995 | if (ret) | ||
996 | return ret; | ||
997 | } | ||
998 | |||
999 | /* Fixup monotonic raw and real time time values */ | ||
1000 | if (interp_forward) { | ||
1001 | ts->sys_monoraw = ktime_add_ns(history->raw, corr_raw); | ||
1002 | ts->sys_realtime = ktime_add_ns(history->real, corr_real); | ||
1003 | } else { | ||
1004 | ts->sys_monoraw = ktime_sub_ns(ts->sys_monoraw, corr_raw); | ||
1005 | ts->sys_realtime = ktime_sub_ns(ts->sys_realtime, corr_real); | ||
1006 | } | ||
1007 | |||
1008 | return 0; | ||
1009 | } | ||
1010 | |||
1011 | /* | ||
1012 | * cycle_between - true if test occurs chronologically between before and after | ||
1013 | */ | ||
1014 | static bool cycle_between(cycle_t before, cycle_t test, cycle_t after) | ||
1015 | { | ||
1016 | if (test > before && test < after) | ||
1017 | return true; | ||
1018 | if (test < before && before > after) | ||
1019 | return true; | ||
1020 | return false; | ||
1021 | } | ||
1022 | |||
1023 | /** | ||
1024 | * get_device_system_crosststamp - Synchronously capture system/device timestamp | ||
1025 | * @get_time_fn: Callback to get simultaneous device time and | ||
1026 | * system counter from the device driver | ||
1027 | * @ctx: Context passed to get_time_fn() | ||
1028 | * @history_begin: Historical reference point used to interpolate system | ||
1029 | * time when counter provided by the driver is before the current interval | ||
1030 | * @xtstamp: Receives simultaneously captured system and device time | ||
1031 | * | ||
1032 | * Reads a timestamp from a device and correlates it to system time | ||
1033 | */ | ||
1034 | int get_device_system_crosststamp(int (*get_time_fn) | ||
1035 | (ktime_t *device_time, | ||
1036 | struct system_counterval_t *sys_counterval, | ||
1037 | void *ctx), | ||
1038 | void *ctx, | ||
1039 | struct system_time_snapshot *history_begin, | ||
1040 | struct system_device_crosststamp *xtstamp) | ||
1041 | { | ||
1042 | struct system_counterval_t system_counterval; | ||
1043 | struct timekeeper *tk = &tk_core.timekeeper; | ||
1044 | cycle_t cycles, now, interval_start; | ||
1045 | unsigned int clock_was_set_seq = 0; | ||
1046 | ktime_t base_real, base_raw; | ||
1047 | s64 nsec_real, nsec_raw; | ||
1048 | u8 cs_was_changed_seq; | ||
1049 | unsigned long seq; | ||
1050 | bool do_interp; | ||
1051 | int ret; | ||
879 | 1052 | ||
880 | do { | 1053 | do { |
881 | seq = read_seqcount_begin(&tk_core.seq); | 1054 | seq = read_seqcount_begin(&tk_core.seq); |
1055 | /* | ||
1056 | * Try to synchronously capture device time and a system | ||
1057 | * counter value calling back into the device driver | ||
1058 | */ | ||
1059 | ret = get_time_fn(&xtstamp->device, &system_counterval, ctx); | ||
1060 | if (ret) | ||
1061 | return ret; | ||
1062 | |||
1063 | /* | ||
1064 | * Verify that the clocksource associated with the captured | ||
1065 | * system counter value is the same as the currently installed | ||
1066 | * timekeeper clocksource | ||
1067 | */ | ||
1068 | if (tk->tkr_mono.clock != system_counterval.cs) | ||
1069 | return -ENODEV; | ||
1070 | cycles = system_counterval.cycles; | ||
882 | 1071 | ||
883 | *ts_raw = tk->raw_time; | 1072 | /* |
884 | ts_real->tv_sec = tk->xtime_sec; | 1073 | * Check whether the system counter value provided by the |
885 | ts_real->tv_nsec = 0; | 1074 | * device driver is on the current timekeeping interval. |
1075 | */ | ||
1076 | now = tk->tkr_mono.read(tk->tkr_mono.clock); | ||
1077 | interval_start = tk->tkr_mono.cycle_last; | ||
1078 | if (!cycle_between(interval_start, cycles, now)) { | ||
1079 | clock_was_set_seq = tk->clock_was_set_seq; | ||
1080 | cs_was_changed_seq = tk->cs_was_changed_seq; | ||
1081 | cycles = interval_start; | ||
1082 | do_interp = true; | ||
1083 | } else { | ||
1084 | do_interp = false; | ||
1085 | } | ||
886 | 1086 | ||
887 | nsecs_raw = timekeeping_get_ns(&tk->tkr_raw); | 1087 | base_real = ktime_add(tk->tkr_mono.base, |
888 | nsecs_real = timekeeping_get_ns(&tk->tkr_mono); | 1088 | tk_core.timekeeper.offs_real); |
1089 | base_raw = tk->tkr_raw.base; | ||
889 | 1090 | ||
1091 | nsec_real = timekeeping_cycles_to_ns(&tk->tkr_mono, | ||
1092 | system_counterval.cycles); | ||
1093 | nsec_raw = timekeeping_cycles_to_ns(&tk->tkr_raw, | ||
1094 | system_counterval.cycles); | ||
890 | } while (read_seqcount_retry(&tk_core.seq, seq)); | 1095 | } while (read_seqcount_retry(&tk_core.seq, seq)); |
891 | 1096 | ||
892 | timespec64_add_ns(ts_raw, nsecs_raw); | 1097 | xtstamp->sys_realtime = ktime_add_ns(base_real, nsec_real); |
893 | timespec64_add_ns(ts_real, nsecs_real); | 1098 | xtstamp->sys_monoraw = ktime_add_ns(base_raw, nsec_raw); |
894 | } | ||
895 | EXPORT_SYMBOL(ktime_get_raw_and_real_ts64); | ||
896 | 1099 | ||
897 | #endif /* CONFIG_NTP_PPS */ | 1100 | /* |
1101 | * Interpolate if necessary, adjusting back from the start of the | ||
1102 | * current interval | ||
1103 | */ | ||
1104 | if (do_interp) { | ||
1105 | cycle_t partial_history_cycles, total_history_cycles; | ||
1106 | bool discontinuity; | ||
1107 | |||
1108 | /* | ||
1109 | * Check that the counter value occurs after the provided | ||
1110 | * history reference and that the history doesn't cross a | ||
1111 | * clocksource change | ||
1112 | */ | ||
1113 | if (!history_begin || | ||
1114 | !cycle_between(history_begin->cycles, | ||
1115 | system_counterval.cycles, cycles) || | ||
1116 | history_begin->cs_was_changed_seq != cs_was_changed_seq) | ||
1117 | return -EINVAL; | ||
1118 | partial_history_cycles = cycles - system_counterval.cycles; | ||
1119 | total_history_cycles = cycles - history_begin->cycles; | ||
1120 | discontinuity = | ||
1121 | history_begin->clock_was_set_seq != clock_was_set_seq; | ||
1122 | |||
1123 | ret = adjust_historical_crosststamp(history_begin, | ||
1124 | partial_history_cycles, | ||
1125 | total_history_cycles, | ||
1126 | discontinuity, xtstamp); | ||
1127 | if (ret) | ||
1128 | return ret; | ||
1129 | } | ||
1130 | |||
1131 | return 0; | ||
1132 | } | ||
1133 | EXPORT_SYMBOL_GPL(get_device_system_crosststamp); | ||
898 | 1134 | ||
899 | /** | 1135 | /** |
900 | * do_gettimeofday - Returns the time of day in a timeval | 1136 | * do_gettimeofday - Returns the time of day in a timeval |