diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2015-11-03 17:13:41 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2015-11-03 17:13:41 -0500 |
commit | 7b2a4306f9e7d64bb408a6df3bb419500578068a (patch) | |
tree | c8ce206194c47c8bd50ee6b5828e0c734d571200 | |
parent | 316dde2fe95b33657de1fc2db54bfc16aa065790 (diff) | |
parent | 03f136a2074b2b8890da4a24df7104558ad0da48 (diff) |
Merge branch 'timers-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull timer updates from Thomas Gleixner:
"The timer departement provides:
- More y2038 work in the area of ntp and pps.
- Optimization of posix cpu timers
- New time related selftests
- Some new clocksource drivers
- The usual pile of fixes, cleanups and improvements"
* 'timers-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (25 commits)
timeconst: Update path in comment
timers/x86/hpet: Type adjustments
clocksource/drivers/armada-370-xp: Implement ARM delay timer
clocksource/drivers/tango_xtal: Add new timer for Tango SoCs
clocksource/drivers/imx: Allow timer irq affinity change
clocksource/drivers/exynos_mct: Use container_of() instead of this_cpu_ptr()
clocksource/drivers/h8300_*: Remove unneeded memset()s
clocksource/drivers/sh_cmt: Remove unneeded memset() in sh_cmt_setup()
clocksource/drivers/em_sti: Remove unneeded memset()s
clocksource/drivers/mediatek: Use GPT as sched clock source
clockevents/drivers/mtk: Fix spurious interrupt leading to crash
posix_cpu_timer: Reduce unnecessary sighand lock contention
posix_cpu_timer: Convert cputimer->running to bool
posix_cpu_timer: Check thread timers only when there are active thread timers
posix_cpu_timer: Optimize fastpath_timer_check()
timers, kselftest: Add 'adjtick' test to validate adjtimex() tick adjustments
timers: Use __fls in apply_slack()
clocksource: Remove return statement from void functions
net: sfc: avoid using timespec
ntp/pps: use y2038 safe types in pps_event_time
...
34 files changed, 468 insertions, 120 deletions
diff --git a/arch/x86/include/asm/hpet.h b/arch/x86/include/asm/hpet.h index 5fa9fb0f8809..cc285ec4b2c1 100644 --- a/arch/x86/include/asm/hpet.h +++ b/arch/x86/include/asm/hpet.h | |||
@@ -63,10 +63,10 @@ | |||
63 | /* hpet memory map physical address */ | 63 | /* hpet memory map physical address */ |
64 | extern unsigned long hpet_address; | 64 | extern unsigned long hpet_address; |
65 | extern unsigned long force_hpet_address; | 65 | extern unsigned long force_hpet_address; |
66 | extern int boot_hpet_disable; | 66 | extern bool boot_hpet_disable; |
67 | extern u8 hpet_blockid; | 67 | extern u8 hpet_blockid; |
68 | extern int hpet_force_user; | 68 | extern bool hpet_force_user; |
69 | extern u8 hpet_msi_disable; | 69 | extern bool hpet_msi_disable; |
70 | extern int is_hpet_enabled(void); | 70 | extern int is_hpet_enabled(void); |
71 | extern int hpet_enable(void); | 71 | extern int hpet_enable(void); |
72 | extern void hpet_disable(void); | 72 | extern void hpet_disable(void); |
diff --git a/arch/x86/kernel/early-quirks.c b/arch/x86/kernel/early-quirks.c index 9f9cc682e561..db9a675e751b 100644 --- a/arch/x86/kernel/early-quirks.c +++ b/arch/x86/kernel/early-quirks.c | |||
@@ -584,7 +584,7 @@ static void __init intel_graphics_stolen(int num, int slot, int func) | |||
584 | static void __init force_disable_hpet(int num, int slot, int func) | 584 | static void __init force_disable_hpet(int num, int slot, int func) |
585 | { | 585 | { |
586 | #ifdef CONFIG_HPET_TIMER | 586 | #ifdef CONFIG_HPET_TIMER |
587 | boot_hpet_disable = 1; | 587 | boot_hpet_disable = true; |
588 | pr_info("x86/hpet: Will disable the HPET for this platform because it's not reliable\n"); | 588 | pr_info("x86/hpet: Will disable the HPET for this platform because it's not reliable\n"); |
589 | #endif | 589 | #endif |
590 | } | 590 | } |
diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c index 88b4da373081..b8e6ff5cd5d0 100644 --- a/arch/x86/kernel/hpet.c +++ b/arch/x86/kernel/hpet.c | |||
@@ -37,10 +37,10 @@ | |||
37 | */ | 37 | */ |
38 | unsigned long hpet_address; | 38 | unsigned long hpet_address; |
39 | u8 hpet_blockid; /* OS timer block num */ | 39 | u8 hpet_blockid; /* OS timer block num */ |
40 | u8 hpet_msi_disable; | 40 | bool hpet_msi_disable; |
41 | 41 | ||
42 | #ifdef CONFIG_PCI_MSI | 42 | #ifdef CONFIG_PCI_MSI |
43 | static unsigned long hpet_num_timers; | 43 | static unsigned int hpet_num_timers; |
44 | #endif | 44 | #endif |
45 | static void __iomem *hpet_virt_address; | 45 | static void __iomem *hpet_virt_address; |
46 | 46 | ||
@@ -86,9 +86,9 @@ static inline void hpet_clear_mapping(void) | |||
86 | /* | 86 | /* |
87 | * HPET command line enable / disable | 87 | * HPET command line enable / disable |
88 | */ | 88 | */ |
89 | int boot_hpet_disable; | 89 | bool boot_hpet_disable; |
90 | int hpet_force_user; | 90 | bool hpet_force_user; |
91 | static int hpet_verbose; | 91 | static bool hpet_verbose; |
92 | 92 | ||
93 | static int __init hpet_setup(char *str) | 93 | static int __init hpet_setup(char *str) |
94 | { | 94 | { |
@@ -98,11 +98,11 @@ static int __init hpet_setup(char *str) | |||
98 | if (next) | 98 | if (next) |
99 | *next++ = 0; | 99 | *next++ = 0; |
100 | if (!strncmp("disable", str, 7)) | 100 | if (!strncmp("disable", str, 7)) |
101 | boot_hpet_disable = 1; | 101 | boot_hpet_disable = true; |
102 | if (!strncmp("force", str, 5)) | 102 | if (!strncmp("force", str, 5)) |
103 | hpet_force_user = 1; | 103 | hpet_force_user = true; |
104 | if (!strncmp("verbose", str, 7)) | 104 | if (!strncmp("verbose", str, 7)) |
105 | hpet_verbose = 1; | 105 | hpet_verbose = true; |
106 | str = next; | 106 | str = next; |
107 | } | 107 | } |
108 | return 1; | 108 | return 1; |
@@ -111,7 +111,7 @@ __setup("hpet=", hpet_setup); | |||
111 | 111 | ||
112 | static int __init disable_hpet(char *str) | 112 | static int __init disable_hpet(char *str) |
113 | { | 113 | { |
114 | boot_hpet_disable = 1; | 114 | boot_hpet_disable = true; |
115 | return 1; | 115 | return 1; |
116 | } | 116 | } |
117 | __setup("nohpet", disable_hpet); | 117 | __setup("nohpet", disable_hpet); |
@@ -124,7 +124,7 @@ static inline int is_hpet_capable(void) | |||
124 | /* | 124 | /* |
125 | * HPET timer interrupt enable / disable | 125 | * HPET timer interrupt enable / disable |
126 | */ | 126 | */ |
127 | static int hpet_legacy_int_enabled; | 127 | static bool hpet_legacy_int_enabled; |
128 | 128 | ||
129 | /** | 129 | /** |
130 | * is_hpet_enabled - check whether the hpet timer interrupt is enabled | 130 | * is_hpet_enabled - check whether the hpet timer interrupt is enabled |
@@ -230,7 +230,7 @@ static struct clock_event_device hpet_clockevent; | |||
230 | 230 | ||
231 | static void hpet_stop_counter(void) | 231 | static void hpet_stop_counter(void) |
232 | { | 232 | { |
233 | unsigned long cfg = hpet_readl(HPET_CFG); | 233 | u32 cfg = hpet_readl(HPET_CFG); |
234 | cfg &= ~HPET_CFG_ENABLE; | 234 | cfg &= ~HPET_CFG_ENABLE; |
235 | hpet_writel(cfg, HPET_CFG); | 235 | hpet_writel(cfg, HPET_CFG); |
236 | } | 236 | } |
@@ -272,7 +272,7 @@ static void hpet_enable_legacy_int(void) | |||
272 | 272 | ||
273 | cfg |= HPET_CFG_LEGACY; | 273 | cfg |= HPET_CFG_LEGACY; |
274 | hpet_writel(cfg, HPET_CFG); | 274 | hpet_writel(cfg, HPET_CFG); |
275 | hpet_legacy_int_enabled = 1; | 275 | hpet_legacy_int_enabled = true; |
276 | } | 276 | } |
277 | 277 | ||
278 | static void hpet_legacy_clockevent_register(void) | 278 | static void hpet_legacy_clockevent_register(void) |
@@ -983,7 +983,7 @@ void hpet_disable(void) | |||
983 | cfg = *hpet_boot_cfg; | 983 | cfg = *hpet_boot_cfg; |
984 | else if (hpet_legacy_int_enabled) { | 984 | else if (hpet_legacy_int_enabled) { |
985 | cfg &= ~HPET_CFG_LEGACY; | 985 | cfg &= ~HPET_CFG_LEGACY; |
986 | hpet_legacy_int_enabled = 0; | 986 | hpet_legacy_int_enabled = false; |
987 | } | 987 | } |
988 | cfg &= ~HPET_CFG_ENABLE; | 988 | cfg &= ~HPET_CFG_ENABLE; |
989 | hpet_writel(cfg, HPET_CFG); | 989 | hpet_writel(cfg, HPET_CFG); |
@@ -1121,8 +1121,7 @@ EXPORT_SYMBOL_GPL(hpet_rtc_timer_init); | |||
1121 | 1121 | ||
1122 | static void hpet_disable_rtc_channel(void) | 1122 | static void hpet_disable_rtc_channel(void) |
1123 | { | 1123 | { |
1124 | unsigned long cfg; | 1124 | u32 cfg = hpet_readl(HPET_T1_CFG); |
1125 | cfg = hpet_readl(HPET_T1_CFG); | ||
1126 | cfg &= ~HPET_TN_ENABLE; | 1125 | cfg &= ~HPET_TN_ENABLE; |
1127 | hpet_writel(cfg, HPET_T1_CFG); | 1126 | hpet_writel(cfg, HPET_T1_CFG); |
1128 | } | 1127 | } |
diff --git a/arch/x86/kernel/quirks.c b/arch/x86/kernel/quirks.c index 176a0f99d4da..cc457ff818ad 100644 --- a/arch/x86/kernel/quirks.c +++ b/arch/x86/kernel/quirks.c | |||
@@ -524,7 +524,7 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E6XX_CU, | |||
524 | */ | 524 | */ |
525 | static void force_disable_hpet_msi(struct pci_dev *unused) | 525 | static void force_disable_hpet_msi(struct pci_dev *unused) |
526 | { | 526 | { |
527 | hpet_msi_disable = 1; | 527 | hpet_msi_disable = true; |
528 | } | 528 | } |
529 | 529 | ||
530 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_SBX00_SMBUS, | 530 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_SBX00_SMBUS, |
diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig index a7726db13abb..50b68bc20720 100644 --- a/drivers/clocksource/Kconfig +++ b/drivers/clocksource/Kconfig | |||
@@ -279,6 +279,10 @@ config CLKSRC_MIPS_GIC | |||
279 | depends on MIPS_GIC | 279 | depends on MIPS_GIC |
280 | select CLKSRC_OF | 280 | select CLKSRC_OF |
281 | 281 | ||
282 | config CLKSRC_TANGO_XTAL | ||
283 | bool | ||
284 | select CLKSRC_OF | ||
285 | |||
282 | config CLKSRC_PXA | 286 | config CLKSRC_PXA |
283 | def_bool y if ARCH_PXA || ARCH_SA1100 | 287 | def_bool y if ARCH_PXA || ARCH_SA1100 |
284 | select CLKSRC_OF if OF | 288 | select CLKSRC_OF if OF |
diff --git a/drivers/clocksource/Makefile b/drivers/clocksource/Makefile index 5c00863c3e33..fc9348dc4f92 100644 --- a/drivers/clocksource/Makefile +++ b/drivers/clocksource/Makefile | |||
@@ -56,6 +56,7 @@ obj-$(CONFIG_ARCH_KEYSTONE) += timer-keystone.o | |||
56 | obj-$(CONFIG_ARCH_INTEGRATOR_AP) += timer-integrator-ap.o | 56 | obj-$(CONFIG_ARCH_INTEGRATOR_AP) += timer-integrator-ap.o |
57 | obj-$(CONFIG_CLKSRC_VERSATILE) += versatile.o | 57 | obj-$(CONFIG_CLKSRC_VERSATILE) += versatile.o |
58 | obj-$(CONFIG_CLKSRC_MIPS_GIC) += mips-gic-timer.o | 58 | obj-$(CONFIG_CLKSRC_MIPS_GIC) += mips-gic-timer.o |
59 | obj-$(CONFIG_CLKSRC_TANGO_XTAL) += tango_xtal.o | ||
59 | obj-$(CONFIG_CLKSRC_IMX_GPT) += timer-imx-gpt.o | 60 | obj-$(CONFIG_CLKSRC_IMX_GPT) += timer-imx-gpt.o |
60 | obj-$(CONFIG_ASM9260_TIMER) += asm9260_timer.o | 61 | obj-$(CONFIG_ASM9260_TIMER) += asm9260_timer.o |
61 | obj-$(CONFIG_H8300) += h8300_timer8.o | 62 | obj-$(CONFIG_H8300) += h8300_timer8.o |
diff --git a/drivers/clocksource/em_sti.c b/drivers/clocksource/em_sti.c index 7a97a34dba70..19bb1792d647 100644 --- a/drivers/clocksource/em_sti.c +++ b/drivers/clocksource/em_sti.c | |||
@@ -228,7 +228,6 @@ static int em_sti_register_clocksource(struct em_sti_priv *p) | |||
228 | { | 228 | { |
229 | struct clocksource *cs = &p->cs; | 229 | struct clocksource *cs = &p->cs; |
230 | 230 | ||
231 | memset(cs, 0, sizeof(*cs)); | ||
232 | cs->name = dev_name(&p->pdev->dev); | 231 | cs->name = dev_name(&p->pdev->dev); |
233 | cs->rating = 200; | 232 | cs->rating = 200; |
234 | cs->read = em_sti_clocksource_read; | 233 | cs->read = em_sti_clocksource_read; |
@@ -285,7 +284,6 @@ static void em_sti_register_clockevent(struct em_sti_priv *p) | |||
285 | { | 284 | { |
286 | struct clock_event_device *ced = &p->ced; | 285 | struct clock_event_device *ced = &p->ced; |
287 | 286 | ||
288 | memset(ced, 0, sizeof(*ced)); | ||
289 | ced->name = dev_name(&p->pdev->dev); | 287 | ced->name = dev_name(&p->pdev->dev); |
290 | ced->features = CLOCK_EVT_FEAT_ONESHOT; | 288 | ced->features = CLOCK_EVT_FEAT_ONESHOT; |
291 | ced->rating = 200; | 289 | ced->rating = 200; |
diff --git a/drivers/clocksource/exynos_mct.c b/drivers/clocksource/exynos_mct.c index 029f96ab131a..ff44082a0827 100644 --- a/drivers/clocksource/exynos_mct.c +++ b/drivers/clocksource/exynos_mct.c | |||
@@ -382,24 +382,28 @@ static void exynos4_mct_tick_start(unsigned long cycles, | |||
382 | static int exynos4_tick_set_next_event(unsigned long cycles, | 382 | static int exynos4_tick_set_next_event(unsigned long cycles, |
383 | struct clock_event_device *evt) | 383 | struct clock_event_device *evt) |
384 | { | 384 | { |
385 | struct mct_clock_event_device *mevt = this_cpu_ptr(&percpu_mct_tick); | 385 | struct mct_clock_event_device *mevt; |
386 | 386 | ||
387 | mevt = container_of(evt, struct mct_clock_event_device, evt); | ||
387 | exynos4_mct_tick_start(cycles, mevt); | 388 | exynos4_mct_tick_start(cycles, mevt); |
388 | |||
389 | return 0; | 389 | return 0; |
390 | } | 390 | } |
391 | 391 | ||
392 | static int set_state_shutdown(struct clock_event_device *evt) | 392 | static int set_state_shutdown(struct clock_event_device *evt) |
393 | { | 393 | { |
394 | exynos4_mct_tick_stop(this_cpu_ptr(&percpu_mct_tick)); | 394 | struct mct_clock_event_device *mevt; |
395 | |||
396 | mevt = container_of(evt, struct mct_clock_event_device, evt); | ||
397 | exynos4_mct_tick_stop(mevt); | ||
395 | return 0; | 398 | return 0; |
396 | } | 399 | } |
397 | 400 | ||
398 | static int set_state_periodic(struct clock_event_device *evt) | 401 | static int set_state_periodic(struct clock_event_device *evt) |
399 | { | 402 | { |
400 | struct mct_clock_event_device *mevt = this_cpu_ptr(&percpu_mct_tick); | 403 | struct mct_clock_event_device *mevt; |
401 | unsigned long cycles_per_jiffy; | 404 | unsigned long cycles_per_jiffy; |
402 | 405 | ||
406 | mevt = container_of(evt, struct mct_clock_event_device, evt); | ||
403 | cycles_per_jiffy = (((unsigned long long)NSEC_PER_SEC / HZ * evt->mult) | 407 | cycles_per_jiffy = (((unsigned long long)NSEC_PER_SEC / HZ * evt->mult) |
404 | >> evt->shift); | 408 | >> evt->shift); |
405 | exynos4_mct_tick_stop(mevt); | 409 | exynos4_mct_tick_stop(mevt); |
diff --git a/drivers/clocksource/h8300_timer16.c b/drivers/clocksource/h8300_timer16.c index 82941c1e9e33..0e076c6fc006 100644 --- a/drivers/clocksource/h8300_timer16.c +++ b/drivers/clocksource/h8300_timer16.c | |||
@@ -153,7 +153,6 @@ static int timer16_setup(struct timer16_priv *p, struct platform_device *pdev) | |||
153 | int ret, irq; | 153 | int ret, irq; |
154 | unsigned int ch; | 154 | unsigned int ch; |
155 | 155 | ||
156 | memset(p, 0, sizeof(*p)); | ||
157 | p->pdev = pdev; | 156 | p->pdev = pdev; |
158 | 157 | ||
159 | res[REG_CH] = platform_get_resource(p->pdev, | 158 | res[REG_CH] = platform_get_resource(p->pdev, |
diff --git a/drivers/clocksource/h8300_timer8.c b/drivers/clocksource/h8300_timer8.c index f9b3b7033a97..44375d8b9bc4 100644 --- a/drivers/clocksource/h8300_timer8.c +++ b/drivers/clocksource/h8300_timer8.c | |||
@@ -215,7 +215,6 @@ static int timer8_setup(struct timer8_priv *p, | |||
215 | int irq; | 215 | int irq; |
216 | int ret; | 216 | int ret; |
217 | 217 | ||
218 | memset(p, 0, sizeof(*p)); | ||
219 | p->pdev = pdev; | 218 | p->pdev = pdev; |
220 | 219 | ||
221 | res = platform_get_resource(p->pdev, IORESOURCE_MEM, 0); | 220 | res = platform_get_resource(p->pdev, IORESOURCE_MEM, 0); |
diff --git a/drivers/clocksource/h8300_tpu.c b/drivers/clocksource/h8300_tpu.c index 64195fdd78bf..5487410bfabb 100644 --- a/drivers/clocksource/h8300_tpu.c +++ b/drivers/clocksource/h8300_tpu.c | |||
@@ -123,7 +123,6 @@ static int __init tpu_setup(struct tpu_priv *p, struct platform_device *pdev) | |||
123 | { | 123 | { |
124 | struct resource *res[2]; | 124 | struct resource *res[2]; |
125 | 125 | ||
126 | memset(p, 0, sizeof(*p)); | ||
127 | p->pdev = pdev; | 126 | p->pdev = pdev; |
128 | 127 | ||
129 | res[CH_L] = platform_get_resource(p->pdev, IORESOURCE_MEM, CH_L); | 128 | res[CH_L] = platform_get_resource(p->pdev, IORESOURCE_MEM, CH_L); |
diff --git a/drivers/clocksource/mtk_timer.c b/drivers/clocksource/mtk_timer.c index 50f0641c65b6..fbfc74685e6a 100644 --- a/drivers/clocksource/mtk_timer.c +++ b/drivers/clocksource/mtk_timer.c | |||
@@ -24,6 +24,7 @@ | |||
24 | #include <linux/of.h> | 24 | #include <linux/of.h> |
25 | #include <linux/of_address.h> | 25 | #include <linux/of_address.h> |
26 | #include <linux/of_irq.h> | 26 | #include <linux/of_irq.h> |
27 | #include <linux/sched_clock.h> | ||
27 | #include <linux/slab.h> | 28 | #include <linux/slab.h> |
28 | 29 | ||
29 | #define GPT_IRQ_EN_REG 0x00 | 30 | #define GPT_IRQ_EN_REG 0x00 |
@@ -59,6 +60,13 @@ struct mtk_clock_event_device { | |||
59 | struct clock_event_device dev; | 60 | struct clock_event_device dev; |
60 | }; | 61 | }; |
61 | 62 | ||
63 | static void __iomem *gpt_sched_reg __read_mostly; | ||
64 | |||
65 | static u64 notrace mtk_read_sched_clock(void) | ||
66 | { | ||
67 | return readl_relaxed(gpt_sched_reg); | ||
68 | } | ||
69 | |||
62 | static inline struct mtk_clock_event_device *to_mtk_clk( | 70 | static inline struct mtk_clock_event_device *to_mtk_clk( |
63 | struct clock_event_device *c) | 71 | struct clock_event_device *c) |
64 | { | 72 | { |
@@ -141,14 +149,6 @@ static irqreturn_t mtk_timer_interrupt(int irq, void *dev_id) | |||
141 | return IRQ_HANDLED; | 149 | return IRQ_HANDLED; |
142 | } | 150 | } |
143 | 151 | ||
144 | static void mtk_timer_global_reset(struct mtk_clock_event_device *evt) | ||
145 | { | ||
146 | /* Disable all interrupts */ | ||
147 | writel(0x0, evt->gpt_base + GPT_IRQ_EN_REG); | ||
148 | /* Acknowledge all interrupts */ | ||
149 | writel(0x3f, evt->gpt_base + GPT_IRQ_ACK_REG); | ||
150 | } | ||
151 | |||
152 | static void | 152 | static void |
153 | mtk_timer_setup(struct mtk_clock_event_device *evt, u8 timer, u8 option) | 153 | mtk_timer_setup(struct mtk_clock_event_device *evt, u8 timer, u8 option) |
154 | { | 154 | { |
@@ -168,6 +168,12 @@ static void mtk_timer_enable_irq(struct mtk_clock_event_device *evt, u8 timer) | |||
168 | { | 168 | { |
169 | u32 val; | 169 | u32 val; |
170 | 170 | ||
171 | /* Disable all interrupts */ | ||
172 | writel(0x0, evt->gpt_base + GPT_IRQ_EN_REG); | ||
173 | |||
174 | /* Acknowledge all spurious pending interrupts */ | ||
175 | writel(0x3f, evt->gpt_base + GPT_IRQ_ACK_REG); | ||
176 | |||
171 | val = readl(evt->gpt_base + GPT_IRQ_EN_REG); | 177 | val = readl(evt->gpt_base + GPT_IRQ_EN_REG); |
172 | writel(val | GPT_IRQ_ENABLE(timer), | 178 | writel(val | GPT_IRQ_ENABLE(timer), |
173 | evt->gpt_base + GPT_IRQ_EN_REG); | 179 | evt->gpt_base + GPT_IRQ_EN_REG); |
@@ -220,8 +226,6 @@ static void __init mtk_timer_init(struct device_node *node) | |||
220 | } | 226 | } |
221 | rate = clk_get_rate(clk); | 227 | rate = clk_get_rate(clk); |
222 | 228 | ||
223 | mtk_timer_global_reset(evt); | ||
224 | |||
225 | if (request_irq(evt->dev.irq, mtk_timer_interrupt, | 229 | if (request_irq(evt->dev.irq, mtk_timer_interrupt, |
226 | IRQF_TIMER | IRQF_IRQPOLL, "mtk_timer", evt)) { | 230 | IRQF_TIMER | IRQF_IRQPOLL, "mtk_timer", evt)) { |
227 | pr_warn("failed to setup irq %d\n", evt->dev.irq); | 231 | pr_warn("failed to setup irq %d\n", evt->dev.irq); |
@@ -234,6 +238,8 @@ static void __init mtk_timer_init(struct device_node *node) | |||
234 | mtk_timer_setup(evt, GPT_CLK_SRC, TIMER_CTRL_OP_FREERUN); | 238 | mtk_timer_setup(evt, GPT_CLK_SRC, TIMER_CTRL_OP_FREERUN); |
235 | clocksource_mmio_init(evt->gpt_base + TIMER_CNT_REG(GPT_CLK_SRC), | 239 | clocksource_mmio_init(evt->gpt_base + TIMER_CNT_REG(GPT_CLK_SRC), |
236 | node->name, rate, 300, 32, clocksource_mmio_readl_up); | 240 | node->name, rate, 300, 32, clocksource_mmio_readl_up); |
241 | gpt_sched_reg = evt->gpt_base + TIMER_CNT_REG(GPT_CLK_SRC); | ||
242 | sched_clock_register(mtk_read_sched_clock, 32, rate); | ||
237 | 243 | ||
238 | /* Configure clock event */ | 244 | /* Configure clock event */ |
239 | mtk_timer_setup(evt, GPT_CLK_EVT, TIMER_CTRL_OP_REPEAT); | 245 | mtk_timer_setup(evt, GPT_CLK_EVT, TIMER_CTRL_OP_REPEAT); |
diff --git a/drivers/clocksource/sh_cmt.c b/drivers/clocksource/sh_cmt.c index ba73a6eb8d66..103c49362c68 100644 --- a/drivers/clocksource/sh_cmt.c +++ b/drivers/clocksource/sh_cmt.c | |||
@@ -962,7 +962,6 @@ static int sh_cmt_setup(struct sh_cmt_device *cmt, struct platform_device *pdev) | |||
962 | unsigned int i; | 962 | unsigned int i; |
963 | int ret; | 963 | int ret; |
964 | 964 | ||
965 | memset(cmt, 0, sizeof(*cmt)); | ||
966 | cmt->pdev = pdev; | 965 | cmt->pdev = pdev; |
967 | raw_spin_lock_init(&cmt->lock); | 966 | raw_spin_lock_init(&cmt->lock); |
968 | 967 | ||
diff --git a/drivers/clocksource/tango_xtal.c b/drivers/clocksource/tango_xtal.c new file mode 100644 index 000000000000..d297b30d2bc0 --- /dev/null +++ b/drivers/clocksource/tango_xtal.c | |||
@@ -0,0 +1,66 @@ | |||
1 | #include <linux/clocksource.h> | ||
2 | #include <linux/sched_clock.h> | ||
3 | #include <linux/of_address.h> | ||
4 | #include <linux/printk.h> | ||
5 | #include <linux/delay.h> | ||
6 | #include <linux/init.h> | ||
7 | #include <linux/clk.h> | ||
8 | |||
9 | static void __iomem *xtal_in_cnt; | ||
10 | static struct delay_timer delay_timer; | ||
11 | |||
12 | static unsigned long notrace read_xtal_counter(void) | ||
13 | { | ||
14 | return readl_relaxed(xtal_in_cnt); | ||
15 | } | ||
16 | |||
17 | static u64 notrace read_sched_clock(void) | ||
18 | { | ||
19 | return read_xtal_counter(); | ||
20 | } | ||
21 | |||
22 | static cycle_t read_clocksource(struct clocksource *cs) | ||
23 | { | ||
24 | return read_xtal_counter(); | ||
25 | } | ||
26 | |||
27 | static struct clocksource tango_xtal = { | ||
28 | .name = "tango-xtal", | ||
29 | .rating = 350, | ||
30 | .read = read_clocksource, | ||
31 | .mask = CLOCKSOURCE_MASK(32), | ||
32 | .flags = CLOCK_SOURCE_IS_CONTINUOUS, | ||
33 | }; | ||
34 | |||
35 | static void __init tango_clocksource_init(struct device_node *np) | ||
36 | { | ||
37 | struct clk *clk; | ||
38 | int xtal_freq, ret; | ||
39 | |||
40 | xtal_in_cnt = of_iomap(np, 0); | ||
41 | if (xtal_in_cnt == NULL) { | ||
42 | pr_err("%s: invalid address\n", np->full_name); | ||
43 | return; | ||
44 | } | ||
45 | |||
46 | clk = of_clk_get(np, 0); | ||
47 | if (IS_ERR(clk)) { | ||
48 | pr_err("%s: invalid clock\n", np->full_name); | ||
49 | return; | ||
50 | } | ||
51 | |||
52 | xtal_freq = clk_get_rate(clk); | ||
53 | delay_timer.freq = xtal_freq; | ||
54 | delay_timer.read_current_timer = read_xtal_counter; | ||
55 | |||
56 | ret = clocksource_register_hz(&tango_xtal, xtal_freq); | ||
57 | if (ret != 0) { | ||
58 | pr_err("%s: registration failed\n", np->full_name); | ||
59 | return; | ||
60 | } | ||
61 | |||
62 | sched_clock_register(read_sched_clock, 32, xtal_freq); | ||
63 | register_current_timer_delay(&delay_timer); | ||
64 | } | ||
65 | |||
66 | CLOCKSOURCE_OF_DECLARE(tango, "sigma,tick-counter", tango_clocksource_init); | ||
diff --git a/drivers/clocksource/time-armada-370-xp.c b/drivers/clocksource/time-armada-370-xp.c index 2162796fd504..d93ec3c4f139 100644 --- a/drivers/clocksource/time-armada-370-xp.c +++ b/drivers/clocksource/time-armada-370-xp.c | |||
@@ -45,6 +45,8 @@ | |||
45 | #include <linux/percpu.h> | 45 | #include <linux/percpu.h> |
46 | #include <linux/syscore_ops.h> | 46 | #include <linux/syscore_ops.h> |
47 | 47 | ||
48 | #include <asm/delay.h> | ||
49 | |||
48 | /* | 50 | /* |
49 | * Timer block registers. | 51 | * Timer block registers. |
50 | */ | 52 | */ |
@@ -249,6 +251,15 @@ struct syscore_ops armada_370_xp_timer_syscore_ops = { | |||
249 | .resume = armada_370_xp_timer_resume, | 251 | .resume = armada_370_xp_timer_resume, |
250 | }; | 252 | }; |
251 | 253 | ||
254 | static unsigned long armada_370_delay_timer_read(void) | ||
255 | { | ||
256 | return ~readl(timer_base + TIMER0_VAL_OFF); | ||
257 | } | ||
258 | |||
259 | static struct delay_timer armada_370_delay_timer = { | ||
260 | .read_current_timer = armada_370_delay_timer_read, | ||
261 | }; | ||
262 | |||
252 | static void __init armada_370_xp_timer_common_init(struct device_node *np) | 263 | static void __init armada_370_xp_timer_common_init(struct device_node *np) |
253 | { | 264 | { |
254 | u32 clr = 0, set = 0; | 265 | u32 clr = 0, set = 0; |
@@ -287,6 +298,9 @@ static void __init armada_370_xp_timer_common_init(struct device_node *np) | |||
287 | TIMER0_RELOAD_EN | enable_mask, | 298 | TIMER0_RELOAD_EN | enable_mask, |
288 | TIMER0_RELOAD_EN | enable_mask); | 299 | TIMER0_RELOAD_EN | enable_mask); |
289 | 300 | ||
301 | armada_370_delay_timer.freq = timer_clk; | ||
302 | register_current_timer_delay(&armada_370_delay_timer); | ||
303 | |||
290 | /* | 304 | /* |
291 | * Set scale and timer for sched_clock. | 305 | * Set scale and timer for sched_clock. |
292 | */ | 306 | */ |
diff --git a/drivers/clocksource/timer-imx-gpt.c b/drivers/clocksource/timer-imx-gpt.c index 839aba92fc39..99ec96769dda 100644 --- a/drivers/clocksource/timer-imx-gpt.c +++ b/drivers/clocksource/timer-imx-gpt.c | |||
@@ -305,13 +305,14 @@ static int __init mxc_clockevent_init(struct imx_timer *imxtm) | |||
305 | struct irqaction *act = &imxtm->act; | 305 | struct irqaction *act = &imxtm->act; |
306 | 306 | ||
307 | ced->name = "mxc_timer1"; | 307 | ced->name = "mxc_timer1"; |
308 | ced->features = CLOCK_EVT_FEAT_ONESHOT; | 308 | ced->features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_DYNIRQ; |
309 | ced->set_state_shutdown = mxc_shutdown; | 309 | ced->set_state_shutdown = mxc_shutdown; |
310 | ced->set_state_oneshot = mxc_set_oneshot; | 310 | ced->set_state_oneshot = mxc_set_oneshot; |
311 | ced->tick_resume = mxc_shutdown; | 311 | ced->tick_resume = mxc_shutdown; |
312 | ced->set_next_event = imxtm->gpt->set_next_event; | 312 | ced->set_next_event = imxtm->gpt->set_next_event; |
313 | ced->rating = 200; | 313 | ced->rating = 200; |
314 | ced->cpumask = cpumask_of(0); | 314 | ced->cpumask = cpumask_of(0); |
315 | ced->irq = imxtm->irq; | ||
315 | clockevents_config_and_register(ced, clk_get_rate(imxtm->clk_per), | 316 | clockevents_config_and_register(ced, clk_get_rate(imxtm->clk_per), |
316 | 0xff, 0xfffffffe); | 317 | 0xff, 0xfffffffe); |
317 | 318 | ||
diff --git a/drivers/net/ethernet/sfc/ptp.c b/drivers/net/ethernet/sfc/ptp.c index ad62615a93dc..c771e0af4e06 100644 --- a/drivers/net/ethernet/sfc/ptp.c +++ b/drivers/net/ethernet/sfc/ptp.c | |||
@@ -401,8 +401,8 @@ size_t efx_ptp_update_stats(struct efx_nic *efx, u64 *stats) | |||
401 | /* For Siena platforms NIC time is s and ns */ | 401 | /* For Siena platforms NIC time is s and ns */ |
402 | static void efx_ptp_ns_to_s_ns(s64 ns, u32 *nic_major, u32 *nic_minor) | 402 | static void efx_ptp_ns_to_s_ns(s64 ns, u32 *nic_major, u32 *nic_minor) |
403 | { | 403 | { |
404 | struct timespec ts = ns_to_timespec(ns); | 404 | struct timespec64 ts = ns_to_timespec64(ns); |
405 | *nic_major = ts.tv_sec; | 405 | *nic_major = (u32)ts.tv_sec; |
406 | *nic_minor = ts.tv_nsec; | 406 | *nic_minor = ts.tv_nsec; |
407 | } | 407 | } |
408 | 408 | ||
@@ -431,8 +431,8 @@ static ktime_t efx_ptp_s_ns_to_ktime_correction(u32 nic_major, u32 nic_minor, | |||
431 | */ | 431 | */ |
432 | static void efx_ptp_ns_to_s27(s64 ns, u32 *nic_major, u32 *nic_minor) | 432 | static void efx_ptp_ns_to_s27(s64 ns, u32 *nic_major, u32 *nic_minor) |
433 | { | 433 | { |
434 | struct timespec ts = ns_to_timespec(ns); | 434 | struct timespec64 ts = ns_to_timespec64(ns); |
435 | u32 maj = ts.tv_sec; | 435 | u32 maj = (u32)ts.tv_sec; |
436 | u32 min = (u32)(((u64)ts.tv_nsec * NS_TO_S27_MULT + | 436 | u32 min = (u32)(((u64)ts.tv_nsec * NS_TO_S27_MULT + |
437 | (1ULL << (NS_TO_S27_SHIFT - 1))) >> NS_TO_S27_SHIFT); | 437 | (1ULL << (NS_TO_S27_SHIFT - 1))) >> NS_TO_S27_SHIFT); |
438 | 438 | ||
@@ -646,28 +646,28 @@ static void efx_ptp_send_times(struct efx_nic *efx, | |||
646 | struct pps_event_time *last_time) | 646 | struct pps_event_time *last_time) |
647 | { | 647 | { |
648 | struct pps_event_time now; | 648 | struct pps_event_time now; |
649 | struct timespec limit; | 649 | struct timespec64 limit; |
650 | struct efx_ptp_data *ptp = efx->ptp_data; | 650 | struct efx_ptp_data *ptp = efx->ptp_data; |
651 | struct timespec start; | 651 | struct timespec64 start; |
652 | int *mc_running = ptp->start.addr; | 652 | int *mc_running = ptp->start.addr; |
653 | 653 | ||
654 | pps_get_ts(&now); | 654 | pps_get_ts(&now); |
655 | start = now.ts_real; | 655 | start = now.ts_real; |
656 | limit = now.ts_real; | 656 | limit = now.ts_real; |
657 | timespec_add_ns(&limit, SYNCHRONISE_PERIOD_NS); | 657 | timespec64_add_ns(&limit, SYNCHRONISE_PERIOD_NS); |
658 | 658 | ||
659 | /* Write host time for specified period or until MC is done */ | 659 | /* Write host time for specified period or until MC is done */ |
660 | while ((timespec_compare(&now.ts_real, &limit) < 0) && | 660 | while ((timespec64_compare(&now.ts_real, &limit) < 0) && |
661 | ACCESS_ONCE(*mc_running)) { | 661 | ACCESS_ONCE(*mc_running)) { |
662 | struct timespec update_time; | 662 | struct timespec64 update_time; |
663 | unsigned int host_time; | 663 | unsigned int host_time; |
664 | 664 | ||
665 | /* Don't update continuously to avoid saturating the PCIe bus */ | 665 | /* Don't update continuously to avoid saturating the PCIe bus */ |
666 | update_time = now.ts_real; | 666 | update_time = now.ts_real; |
667 | timespec_add_ns(&update_time, SYNCHRONISATION_GRANULARITY_NS); | 667 | timespec64_add_ns(&update_time, SYNCHRONISATION_GRANULARITY_NS); |
668 | do { | 668 | do { |
669 | pps_get_ts(&now); | 669 | pps_get_ts(&now); |
670 | } while ((timespec_compare(&now.ts_real, &update_time) < 0) && | 670 | } while ((timespec64_compare(&now.ts_real, &update_time) < 0) && |
671 | ACCESS_ONCE(*mc_running)); | 671 | ACCESS_ONCE(*mc_running)); |
672 | 672 | ||
673 | /* Synchronise NIC with single word of time only */ | 673 | /* Synchronise NIC with single word of time only */ |
@@ -723,7 +723,7 @@ efx_ptp_process_times(struct efx_nic *efx, MCDI_DECLARE_STRUCT_PTR(synch_buf), | |||
723 | struct efx_ptp_data *ptp = efx->ptp_data; | 723 | struct efx_ptp_data *ptp = efx->ptp_data; |
724 | u32 last_sec; | 724 | u32 last_sec; |
725 | u32 start_sec; | 725 | u32 start_sec; |
726 | struct timespec delta; | 726 | struct timespec64 delta; |
727 | ktime_t mc_time; | 727 | ktime_t mc_time; |
728 | 728 | ||
729 | if (number_readings == 0) | 729 | if (number_readings == 0) |
@@ -737,14 +737,14 @@ efx_ptp_process_times(struct efx_nic *efx, MCDI_DECLARE_STRUCT_PTR(synch_buf), | |||
737 | */ | 737 | */ |
738 | for (i = 0; i < number_readings; i++) { | 738 | for (i = 0; i < number_readings; i++) { |
739 | s32 window, corrected; | 739 | s32 window, corrected; |
740 | struct timespec wait; | 740 | struct timespec64 wait; |
741 | 741 | ||
742 | efx_ptp_read_timeset( | 742 | efx_ptp_read_timeset( |
743 | MCDI_ARRAY_STRUCT_PTR(synch_buf, | 743 | MCDI_ARRAY_STRUCT_PTR(synch_buf, |
744 | PTP_OUT_SYNCHRONIZE_TIMESET, i), | 744 | PTP_OUT_SYNCHRONIZE_TIMESET, i), |
745 | &ptp->timeset[i]); | 745 | &ptp->timeset[i]); |
746 | 746 | ||
747 | wait = ktime_to_timespec( | 747 | wait = ktime_to_timespec64( |
748 | ptp->nic_to_kernel_time(0, ptp->timeset[i].wait, 0)); | 748 | ptp->nic_to_kernel_time(0, ptp->timeset[i].wait, 0)); |
749 | window = ptp->timeset[i].window; | 749 | window = ptp->timeset[i].window; |
750 | corrected = window - wait.tv_nsec; | 750 | corrected = window - wait.tv_nsec; |
@@ -803,7 +803,7 @@ efx_ptp_process_times(struct efx_nic *efx, MCDI_DECLARE_STRUCT_PTR(synch_buf), | |||
803 | ptp->timeset[last_good].minor, 0); | 803 | ptp->timeset[last_good].minor, 0); |
804 | 804 | ||
805 | /* Calculate delay from NIC top of second to last_time */ | 805 | /* Calculate delay from NIC top of second to last_time */ |
806 | delta.tv_nsec += ktime_to_timespec(mc_time).tv_nsec; | 806 | delta.tv_nsec += ktime_to_timespec64(mc_time).tv_nsec; |
807 | 807 | ||
808 | /* Set PPS timestamp to match NIC top of second */ | 808 | /* Set PPS timestamp to match NIC top of second */ |
809 | ptp->host_time_pps = *last_time; | 809 | ptp->host_time_pps = *last_time; |
diff --git a/drivers/pps/kapi.c b/drivers/pps/kapi.c index cdad4d95b20e..805c749ac1ad 100644 --- a/drivers/pps/kapi.c +++ b/drivers/pps/kapi.c | |||
@@ -179,8 +179,8 @@ void pps_event(struct pps_device *pps, struct pps_event_time *ts, int event, | |||
179 | /* check event type */ | 179 | /* check event type */ |
180 | BUG_ON((event & (PPS_CAPTUREASSERT | PPS_CAPTURECLEAR)) == 0); | 180 | BUG_ON((event & (PPS_CAPTUREASSERT | PPS_CAPTURECLEAR)) == 0); |
181 | 181 | ||
182 | dev_dbg(pps->dev, "PPS event at %ld.%09ld\n", | 182 | dev_dbg(pps->dev, "PPS event at %lld.%09ld\n", |
183 | ts->ts_real.tv_sec, ts->ts_real.tv_nsec); | 183 | (s64)ts->ts_real.tv_sec, ts->ts_real.tv_nsec); |
184 | 184 | ||
185 | timespec_to_pps_ktime(&ts_real, ts->ts_real); | 185 | timespec_to_pps_ktime(&ts_real, ts->ts_real); |
186 | 186 | ||
diff --git a/include/linux/init_task.h b/include/linux/init_task.h index e38681f4912d..810a34f60424 100644 --- a/include/linux/init_task.h +++ b/include/linux/init_task.h | |||
@@ -59,7 +59,8 @@ extern struct fs_struct init_fs; | |||
59 | .rlim = INIT_RLIMITS, \ | 59 | .rlim = INIT_RLIMITS, \ |
60 | .cputimer = { \ | 60 | .cputimer = { \ |
61 | .cputime_atomic = INIT_CPUTIME_ATOMIC, \ | 61 | .cputime_atomic = INIT_CPUTIME_ATOMIC, \ |
62 | .running = 0, \ | 62 | .running = false, \ |
63 | .checking_timer = false, \ | ||
63 | }, \ | 64 | }, \ |
64 | INIT_PREV_CPUTIME(sig) \ | 65 | INIT_PREV_CPUTIME(sig) \ |
65 | .cred_guard_mutex = \ | 66 | .cred_guard_mutex = \ |
diff --git a/include/linux/pps_kernel.h b/include/linux/pps_kernel.h index 1d2cd21242e8..54bf1484d41f 100644 --- a/include/linux/pps_kernel.h +++ b/include/linux/pps_kernel.h | |||
@@ -48,9 +48,9 @@ struct pps_source_info { | |||
48 | 48 | ||
49 | struct pps_event_time { | 49 | struct pps_event_time { |
50 | #ifdef CONFIG_NTP_PPS | 50 | #ifdef CONFIG_NTP_PPS |
51 | struct timespec ts_raw; | 51 | struct timespec64 ts_raw; |
52 | #endif /* CONFIG_NTP_PPS */ | 52 | #endif /* CONFIG_NTP_PPS */ |
53 | struct timespec ts_real; | 53 | struct timespec64 ts_real; |
54 | }; | 54 | }; |
55 | 55 | ||
56 | /* The main struct */ | 56 | /* The main struct */ |
@@ -105,7 +105,7 @@ extern void pps_event(struct pps_device *pps, | |||
105 | struct pps_device *pps_lookup_dev(void const *cookie); | 105 | struct pps_device *pps_lookup_dev(void const *cookie); |
106 | 106 | ||
107 | static inline void timespec_to_pps_ktime(struct pps_ktime *kt, | 107 | static inline void timespec_to_pps_ktime(struct pps_ktime *kt, |
108 | struct timespec ts) | 108 | struct timespec64 ts) |
109 | { | 109 | { |
110 | kt->sec = ts.tv_sec; | 110 | kt->sec = ts.tv_sec; |
111 | kt->nsec = ts.tv_nsec; | 111 | kt->nsec = ts.tv_nsec; |
@@ -115,24 +115,24 @@ static inline void timespec_to_pps_ktime(struct pps_ktime *kt, | |||
115 | 115 | ||
116 | static inline void pps_get_ts(struct pps_event_time *ts) | 116 | static inline void pps_get_ts(struct pps_event_time *ts) |
117 | { | 117 | { |
118 | getnstime_raw_and_real(&ts->ts_raw, &ts->ts_real); | 118 | ktime_get_raw_and_real_ts64(&ts->ts_raw, &ts->ts_real); |
119 | } | 119 | } |
120 | 120 | ||
121 | #else /* CONFIG_NTP_PPS */ | 121 | #else /* CONFIG_NTP_PPS */ |
122 | 122 | ||
123 | static inline void pps_get_ts(struct pps_event_time *ts) | 123 | static inline void pps_get_ts(struct pps_event_time *ts) |
124 | { | 124 | { |
125 | getnstimeofday(&ts->ts_real); | 125 | ktime_get_real_ts64(&ts->ts_real); |
126 | } | 126 | } |
127 | 127 | ||
128 | #endif /* CONFIG_NTP_PPS */ | 128 | #endif /* CONFIG_NTP_PPS */ |
129 | 129 | ||
130 | /* Subtract known time delay from PPS event time(s) */ | 130 | /* Subtract known time delay from PPS event time(s) */ |
131 | static inline void pps_sub_ts(struct pps_event_time *ts, struct timespec delta) | 131 | static inline void pps_sub_ts(struct pps_event_time *ts, struct timespec64 delta) |
132 | { | 132 | { |
133 | ts->ts_real = timespec_sub(ts->ts_real, delta); | 133 | ts->ts_real = timespec64_sub(ts->ts_real, delta); |
134 | #ifdef CONFIG_NTP_PPS | 134 | #ifdef CONFIG_NTP_PPS |
135 | ts->ts_raw = timespec_sub(ts->ts_raw, delta); | 135 | ts->ts_raw = timespec64_sub(ts->ts_raw, delta); |
136 | #endif | 136 | #endif |
137 | } | 137 | } |
138 | 138 | ||
diff --git a/include/linux/sched.h b/include/linux/sched.h index b7b9501b41af..f87559df5b75 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
@@ -617,15 +617,18 @@ struct task_cputime_atomic { | |||
617 | /** | 617 | /** |
618 | * struct thread_group_cputimer - thread group interval timer counts | 618 | * struct thread_group_cputimer - thread group interval timer counts |
619 | * @cputime_atomic: atomic thread group interval timers. | 619 | * @cputime_atomic: atomic thread group interval timers. |
620 | * @running: non-zero when there are timers running and | 620 | * @running: true when there are timers running and |
621 | * @cputime receives updates. | 621 | * @cputime_atomic receives updates. |
622 | * @checking_timer: true when a thread in the group is in the | ||
623 | * process of checking for thread group timers. | ||
622 | * | 624 | * |
623 | * This structure contains the version of task_cputime, above, that is | 625 | * This structure contains the version of task_cputime, above, that is |
624 | * used for thread group CPU timer calculations. | 626 | * used for thread group CPU timer calculations. |
625 | */ | 627 | */ |
626 | struct thread_group_cputimer { | 628 | struct thread_group_cputimer { |
627 | struct task_cputime_atomic cputime_atomic; | 629 | struct task_cputime_atomic cputime_atomic; |
628 | int running; | 630 | bool running; |
631 | bool checking_timer; | ||
629 | }; | 632 | }; |
630 | 633 | ||
631 | #include <linux/rwsem.h> | 634 | #include <linux/rwsem.h> |
diff --git a/include/linux/timekeeping.h b/include/linux/timekeeping.h index ba0ae09cbb21..ec89d846324c 100644 --- a/include/linux/timekeeping.h +++ b/include/linux/timekeeping.h | |||
@@ -263,8 +263,8 @@ extern void timekeeping_inject_sleeptime64(struct timespec64 *delta); | |||
263 | /* | 263 | /* |
264 | * PPS accessor | 264 | * PPS accessor |
265 | */ | 265 | */ |
266 | extern void getnstime_raw_and_real(struct timespec *ts_raw, | 266 | extern void ktime_get_raw_and_real_ts64(struct timespec64 *ts_raw, |
267 | struct timespec *ts_real); | 267 | struct timespec64 *ts_real); |
268 | 268 | ||
269 | /* | 269 | /* |
270 | * Persistent clock related interfaces | 270 | * Persistent clock related interfaces |
diff --git a/include/linux/timex.h b/include/linux/timex.h index 9d3f1a5b6178..39c25dbebfe8 100644 --- a/include/linux/timex.h +++ b/include/linux/timex.h | |||
@@ -152,7 +152,7 @@ extern unsigned long tick_nsec; /* SHIFTED_HZ period (nsec) */ | |||
152 | #define NTP_INTERVAL_LENGTH (NSEC_PER_SEC/NTP_INTERVAL_FREQ) | 152 | #define NTP_INTERVAL_LENGTH (NSEC_PER_SEC/NTP_INTERVAL_FREQ) |
153 | 153 | ||
154 | extern int do_adjtimex(struct timex *); | 154 | extern int do_adjtimex(struct timex *); |
155 | extern void hardpps(const struct timespec *, const struct timespec *); | 155 | extern void hardpps(const struct timespec64 *, const struct timespec64 *); |
156 | 156 | ||
157 | int read_current_timer(unsigned long *timer_val); | 157 | int read_current_timer(unsigned long *timer_val); |
158 | void ntp_notify_cmos_timer(void); | 158 | void ntp_notify_cmos_timer(void); |
diff --git a/kernel/fork.c b/kernel/fork.c index 2845623fb582..6ac894244d39 100644 --- a/kernel/fork.c +++ b/kernel/fork.c | |||
@@ -1101,7 +1101,7 @@ static void posix_cpu_timers_init_group(struct signal_struct *sig) | |||
1101 | cpu_limit = READ_ONCE(sig->rlim[RLIMIT_CPU].rlim_cur); | 1101 | cpu_limit = READ_ONCE(sig->rlim[RLIMIT_CPU].rlim_cur); |
1102 | if (cpu_limit != RLIM_INFINITY) { | 1102 | if (cpu_limit != RLIM_INFINITY) { |
1103 | sig->cputime_expires.prof_exp = secs_to_cputime(cpu_limit); | 1103 | sig->cputime_expires.prof_exp = secs_to_cputime(cpu_limit); |
1104 | sig->cputimer.running = 1; | 1104 | sig->cputimer.running = true; |
1105 | } | 1105 | } |
1106 | 1106 | ||
1107 | /* The timer lists. */ | 1107 | /* The timer lists. */ |
diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c index 3a38775b50c2..0d8fe8b8f727 100644 --- a/kernel/time/clocksource.c +++ b/kernel/time/clocksource.c | |||
@@ -479,7 +479,7 @@ static u32 clocksource_max_adjustment(struct clocksource *cs) | |||
479 | * return half the number of nanoseconds the hardware counter can technically | 479 | * return half the number of nanoseconds the hardware counter can technically |
480 | * cover. This is done so that we can potentially detect problems caused by | 480 | * cover. This is done so that we can potentially detect problems caused by |
481 | * delayed timers or bad hardware, which might result in time intervals that | 481 | * delayed timers or bad hardware, which might result in time intervals that |
482 | * are larger then what the math used can handle without overflows. | 482 | * are larger than what the math used can handle without overflows. |
483 | */ | 483 | */ |
484 | u64 clocks_calc_max_nsecs(u32 mult, u32 shift, u32 maxadj, u64 mask, u64 *max_cyc) | 484 | u64 clocks_calc_max_nsecs(u32 mult, u32 shift, u32 maxadj, u64 mask, u64 *max_cyc) |
485 | { | 485 | { |
@@ -595,16 +595,15 @@ static void __clocksource_select(bool skipcur) | |||
595 | */ | 595 | */ |
596 | static void clocksource_select(void) | 596 | static void clocksource_select(void) |
597 | { | 597 | { |
598 | return __clocksource_select(false); | 598 | __clocksource_select(false); |
599 | } | 599 | } |
600 | 600 | ||
601 | static void clocksource_select_fallback(void) | 601 | static void clocksource_select_fallback(void) |
602 | { | 602 | { |
603 | return __clocksource_select(true); | 603 | __clocksource_select(true); |
604 | } | 604 | } |
605 | 605 | ||
606 | #else /* !CONFIG_ARCH_USES_GETTIMEOFFSET */ | 606 | #else /* !CONFIG_ARCH_USES_GETTIMEOFFSET */ |
607 | |||
608 | static inline void clocksource_select(void) { } | 607 | static inline void clocksource_select(void) { } |
609 | static inline void clocksource_select_fallback(void) { } | 608 | static inline void clocksource_select_fallback(void) { } |
610 | 609 | ||
diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c index 457a373e2181..435b8850dd80 100644 --- a/kernel/time/hrtimer.c +++ b/kernel/time/hrtimer.c | |||
@@ -59,7 +59,7 @@ | |||
59 | /* | 59 | /* |
60 | * The timer bases: | 60 | * The timer bases: |
61 | * | 61 | * |
62 | * There are more clockids then hrtimer bases. Thus, we index | 62 | * There are more clockids than hrtimer bases. Thus, we index |
63 | * into the timer bases by the hrtimer_base_type enum. When trying | 63 | * into the timer bases by the hrtimer_base_type enum. When trying |
64 | * to reach a base using a clockid, hrtimer_clockid_to_base() | 64 | * to reach a base using a clockid, hrtimer_clockid_to_base() |
65 | * is used to convert from clockid to the proper hrtimer_base_type. | 65 | * is used to convert from clockid to the proper hrtimer_base_type. |
diff --git a/kernel/time/ntp.c b/kernel/time/ntp.c index df68cb875248..149cc8086aea 100644 --- a/kernel/time/ntp.c +++ b/kernel/time/ntp.c | |||
@@ -99,7 +99,7 @@ static time64_t ntp_next_leap_sec = TIME64_MAX; | |||
99 | static int pps_valid; /* signal watchdog counter */ | 99 | static int pps_valid; /* signal watchdog counter */ |
100 | static long pps_tf[3]; /* phase median filter */ | 100 | static long pps_tf[3]; /* phase median filter */ |
101 | static long pps_jitter; /* current jitter (ns) */ | 101 | static long pps_jitter; /* current jitter (ns) */ |
102 | static struct timespec pps_fbase; /* beginning of the last freq interval */ | 102 | static struct timespec64 pps_fbase; /* beginning of the last freq interval */ |
103 | static int pps_shift; /* current interval duration (s) (shift) */ | 103 | static int pps_shift; /* current interval duration (s) (shift) */ |
104 | static int pps_intcnt; /* interval counter */ | 104 | static int pps_intcnt; /* interval counter */ |
105 | static s64 pps_freq; /* frequency offset (scaled ns/s) */ | 105 | static s64 pps_freq; /* frequency offset (scaled ns/s) */ |
@@ -509,7 +509,7 @@ static DECLARE_DELAYED_WORK(sync_cmos_work, sync_cmos_clock); | |||
509 | static void sync_cmos_clock(struct work_struct *work) | 509 | static void sync_cmos_clock(struct work_struct *work) |
510 | { | 510 | { |
511 | struct timespec64 now; | 511 | struct timespec64 now; |
512 | struct timespec next; | 512 | struct timespec64 next; |
513 | int fail = 1; | 513 | int fail = 1; |
514 | 514 | ||
515 | /* | 515 | /* |
@@ -559,7 +559,7 @@ static void sync_cmos_clock(struct work_struct *work) | |||
559 | next.tv_nsec -= NSEC_PER_SEC; | 559 | next.tv_nsec -= NSEC_PER_SEC; |
560 | } | 560 | } |
561 | queue_delayed_work(system_power_efficient_wq, | 561 | queue_delayed_work(system_power_efficient_wq, |
562 | &sync_cmos_work, timespec_to_jiffies(&next)); | 562 | &sync_cmos_work, timespec64_to_jiffies(&next)); |
563 | } | 563 | } |
564 | 564 | ||
565 | void ntp_notify_cmos_timer(void) | 565 | void ntp_notify_cmos_timer(void) |
@@ -773,13 +773,13 @@ int __do_adjtimex(struct timex *txc, struct timespec64 *ts, s32 *time_tai) | |||
773 | * pps_normtime.nsec has a range of ( -NSEC_PER_SEC / 2, NSEC_PER_SEC / 2 ] | 773 | * pps_normtime.nsec has a range of ( -NSEC_PER_SEC / 2, NSEC_PER_SEC / 2 ] |
774 | * while timespec.tv_nsec has a range of [0, NSEC_PER_SEC) */ | 774 | * while timespec.tv_nsec has a range of [0, NSEC_PER_SEC) */ |
775 | struct pps_normtime { | 775 | struct pps_normtime { |
776 | __kernel_time_t sec; /* seconds */ | 776 | s64 sec; /* seconds */ |
777 | long nsec; /* nanoseconds */ | 777 | long nsec; /* nanoseconds */ |
778 | }; | 778 | }; |
779 | 779 | ||
780 | /* normalize the timestamp so that nsec is in the | 780 | /* normalize the timestamp so that nsec is in the |
781 | ( -NSEC_PER_SEC / 2, NSEC_PER_SEC / 2 ] interval */ | 781 | ( -NSEC_PER_SEC / 2, NSEC_PER_SEC / 2 ] interval */ |
782 | static inline struct pps_normtime pps_normalize_ts(struct timespec ts) | 782 | static inline struct pps_normtime pps_normalize_ts(struct timespec64 ts) |
783 | { | 783 | { |
784 | struct pps_normtime norm = { | 784 | struct pps_normtime norm = { |
785 | .sec = ts.tv_sec, | 785 | .sec = ts.tv_sec, |
@@ -861,7 +861,7 @@ static long hardpps_update_freq(struct pps_normtime freq_norm) | |||
861 | pps_errcnt++; | 861 | pps_errcnt++; |
862 | pps_dec_freq_interval(); | 862 | pps_dec_freq_interval(); |
863 | printk_deferred(KERN_ERR | 863 | printk_deferred(KERN_ERR |
864 | "hardpps: PPSERROR: interval too long - %ld s\n", | 864 | "hardpps: PPSERROR: interval too long - %lld s\n", |
865 | freq_norm.sec); | 865 | freq_norm.sec); |
866 | return 0; | 866 | return 0; |
867 | } | 867 | } |
@@ -948,7 +948,7 @@ static void hardpps_update_phase(long error) | |||
948 | * This code is based on David Mills's reference nanokernel | 948 | * This code is based on David Mills's reference nanokernel |
949 | * implementation. It was mostly rewritten but keeps the same idea. | 949 | * implementation. It was mostly rewritten but keeps the same idea. |
950 | */ | 950 | */ |
951 | void __hardpps(const struct timespec *phase_ts, const struct timespec *raw_ts) | 951 | void __hardpps(const struct timespec64 *phase_ts, const struct timespec64 *raw_ts) |
952 | { | 952 | { |
953 | struct pps_normtime pts_norm, freq_norm; | 953 | struct pps_normtime pts_norm, freq_norm; |
954 | 954 | ||
@@ -969,7 +969,7 @@ void __hardpps(const struct timespec *phase_ts, const struct timespec *raw_ts) | |||
969 | } | 969 | } |
970 | 970 | ||
971 | /* ok, now we have a base for frequency calculation */ | 971 | /* ok, now we have a base for frequency calculation */ |
972 | freq_norm = pps_normalize_ts(timespec_sub(*raw_ts, pps_fbase)); | 972 | freq_norm = pps_normalize_ts(timespec64_sub(*raw_ts, pps_fbase)); |
973 | 973 | ||
974 | /* check that the signal is in the range | 974 | /* check that the signal is in the range |
975 | * [1s - MAXFREQ us, 1s + MAXFREQ us], otherwise reject it */ | 975 | * [1s - MAXFREQ us, 1s + MAXFREQ us], otherwise reject it */ |
diff --git a/kernel/time/ntp_internal.h b/kernel/time/ntp_internal.h index 65430504ca26..af924470eac0 100644 --- a/kernel/time/ntp_internal.h +++ b/kernel/time/ntp_internal.h | |||
@@ -9,5 +9,5 @@ extern ktime_t ntp_get_next_leap(void); | |||
9 | extern int second_overflow(unsigned long secs); | 9 | extern int second_overflow(unsigned long secs); |
10 | extern int ntp_validate_timex(struct timex *); | 10 | extern int ntp_validate_timex(struct timex *); |
11 | extern int __do_adjtimex(struct timex *, struct timespec64 *, s32 *); | 11 | extern int __do_adjtimex(struct timex *, struct timespec64 *, s32 *); |
12 | extern void __hardpps(const struct timespec *, const struct timespec *); | 12 | extern void __hardpps(const struct timespec64 *, const struct timespec64 *); |
13 | #endif /* _LINUX_NTP_INTERNAL_H */ | 13 | #endif /* _LINUX_NTP_INTERNAL_H */ |
diff --git a/kernel/time/posix-cpu-timers.c b/kernel/time/posix-cpu-timers.c index 892e3dae0aac..f5e86d282d52 100644 --- a/kernel/time/posix-cpu-timers.c +++ b/kernel/time/posix-cpu-timers.c | |||
@@ -249,7 +249,7 @@ void thread_group_cputimer(struct task_struct *tsk, struct task_cputime *times) | |||
249 | * but barriers are not required because update_gt_cputime() | 249 | * but barriers are not required because update_gt_cputime() |
250 | * can handle concurrent updates. | 250 | * can handle concurrent updates. |
251 | */ | 251 | */ |
252 | WRITE_ONCE(cputimer->running, 1); | 252 | WRITE_ONCE(cputimer->running, true); |
253 | } | 253 | } |
254 | sample_cputime_atomic(times, &cputimer->cputime_atomic); | 254 | sample_cputime_atomic(times, &cputimer->cputime_atomic); |
255 | } | 255 | } |
@@ -864,6 +864,13 @@ static void check_thread_timers(struct task_struct *tsk, | |||
864 | unsigned long long expires; | 864 | unsigned long long expires; |
865 | unsigned long soft; | 865 | unsigned long soft; |
866 | 866 | ||
867 | /* | ||
868 | * If cputime_expires is zero, then there are no active | ||
869 | * per thread CPU timers. | ||
870 | */ | ||
871 | if (task_cputime_zero(&tsk->cputime_expires)) | ||
872 | return; | ||
873 | |||
867 | expires = check_timers_list(timers, firing, prof_ticks(tsk)); | 874 | expires = check_timers_list(timers, firing, prof_ticks(tsk)); |
868 | tsk_expires->prof_exp = expires_to_cputime(expires); | 875 | tsk_expires->prof_exp = expires_to_cputime(expires); |
869 | 876 | ||
@@ -911,7 +918,7 @@ static inline void stop_process_timers(struct signal_struct *sig) | |||
911 | struct thread_group_cputimer *cputimer = &sig->cputimer; | 918 | struct thread_group_cputimer *cputimer = &sig->cputimer; |
912 | 919 | ||
913 | /* Turn off cputimer->running. This is done without locking. */ | 920 | /* Turn off cputimer->running. This is done without locking. */ |
914 | WRITE_ONCE(cputimer->running, 0); | 921 | WRITE_ONCE(cputimer->running, false); |
915 | } | 922 | } |
916 | 923 | ||
917 | static u32 onecputick; | 924 | static u32 onecputick; |
@@ -962,6 +969,19 @@ static void check_process_timers(struct task_struct *tsk, | |||
962 | unsigned long soft; | 969 | unsigned long soft; |
963 | 970 | ||
964 | /* | 971 | /* |
972 | * If cputimer is not running, then there are no active | ||
973 | * process wide timers (POSIX 1.b, itimers, RLIMIT_CPU). | ||
974 | */ | ||
975 | if (!READ_ONCE(tsk->signal->cputimer.running)) | ||
976 | return; | ||
977 | |||
978 | /* | ||
979 | * Signify that a thread is checking for process timers. | ||
980 | * Write access to this field is protected by the sighand lock. | ||
981 | */ | ||
982 | sig->cputimer.checking_timer = true; | ||
983 | |||
984 | /* | ||
965 | * Collect the current process totals. | 985 | * Collect the current process totals. |
966 | */ | 986 | */ |
967 | thread_group_cputimer(tsk, &cputime); | 987 | thread_group_cputimer(tsk, &cputime); |
@@ -1015,6 +1035,8 @@ static void check_process_timers(struct task_struct *tsk, | |||
1015 | sig->cputime_expires.sched_exp = sched_expires; | 1035 | sig->cputime_expires.sched_exp = sched_expires; |
1016 | if (task_cputime_zero(&sig->cputime_expires)) | 1036 | if (task_cputime_zero(&sig->cputime_expires)) |
1017 | stop_process_timers(sig); | 1037 | stop_process_timers(sig); |
1038 | |||
1039 | sig->cputimer.checking_timer = false; | ||
1018 | } | 1040 | } |
1019 | 1041 | ||
1020 | /* | 1042 | /* |
@@ -1117,24 +1139,33 @@ static inline int task_cputime_expired(const struct task_cputime *sample, | |||
1117 | static inline int fastpath_timer_check(struct task_struct *tsk) | 1139 | static inline int fastpath_timer_check(struct task_struct *tsk) |
1118 | { | 1140 | { |
1119 | struct signal_struct *sig; | 1141 | struct signal_struct *sig; |
1120 | cputime_t utime, stime; | ||
1121 | |||
1122 | task_cputime(tsk, &utime, &stime); | ||
1123 | 1142 | ||
1124 | if (!task_cputime_zero(&tsk->cputime_expires)) { | 1143 | if (!task_cputime_zero(&tsk->cputime_expires)) { |
1125 | struct task_cputime task_sample = { | 1144 | struct task_cputime task_sample; |
1126 | .utime = utime, | ||
1127 | .stime = stime, | ||
1128 | .sum_exec_runtime = tsk->se.sum_exec_runtime | ||
1129 | }; | ||
1130 | 1145 | ||
1146 | task_cputime(tsk, &task_sample.utime, &task_sample.stime); | ||
1147 | task_sample.sum_exec_runtime = tsk->se.sum_exec_runtime; | ||
1131 | if (task_cputime_expired(&task_sample, &tsk->cputime_expires)) | 1148 | if (task_cputime_expired(&task_sample, &tsk->cputime_expires)) |
1132 | return 1; | 1149 | return 1; |
1133 | } | 1150 | } |
1134 | 1151 | ||
1135 | sig = tsk->signal; | 1152 | sig = tsk->signal; |
1136 | /* Check if cputimer is running. This is accessed without locking. */ | 1153 | /* |
1137 | if (READ_ONCE(sig->cputimer.running)) { | 1154 | * Check if thread group timers expired when the cputimer is |
1155 | * running and no other thread in the group is already checking | ||
1156 | * for thread group cputimers. These fields are read without the | ||
1157 | * sighand lock. However, this is fine because this is meant to | ||
1158 | * be a fastpath heuristic to determine whether we should try to | ||
1159 | * acquire the sighand lock to check/handle timers. | ||
1160 | * | ||
1161 | * In the worst case scenario, if 'running' or 'checking_timer' gets | ||
1162 | * set but the current thread doesn't see the change yet, we'll wait | ||
1163 | * until the next thread in the group gets a scheduler interrupt to | ||
1164 | * handle the timer. This isn't an issue in practice because these | ||
1165 | * types of delays with signals actually getting sent are expected. | ||
1166 | */ | ||
1167 | if (READ_ONCE(sig->cputimer.running) && | ||
1168 | !READ_ONCE(sig->cputimer.checking_timer)) { | ||
1138 | struct task_cputime group_sample; | 1169 | struct task_cputime group_sample; |
1139 | 1170 | ||
1140 | sample_cputime_atomic(&group_sample, &sig->cputimer.cputime_atomic); | 1171 | sample_cputime_atomic(&group_sample, &sig->cputimer.cputime_atomic); |
@@ -1174,12 +1205,8 @@ void run_posix_cpu_timers(struct task_struct *tsk) | |||
1174 | * put them on the firing list. | 1205 | * put them on the firing list. |
1175 | */ | 1206 | */ |
1176 | check_thread_timers(tsk, &firing); | 1207 | check_thread_timers(tsk, &firing); |
1177 | /* | 1208 | |
1178 | * If there are any active process wide timers (POSIX 1.b, itimers, | 1209 | check_process_timers(tsk, &firing); |
1179 | * RLIMIT_CPU) cputimer must be running. | ||
1180 | */ | ||
1181 | if (READ_ONCE(tsk->signal->cputimer.running)) | ||
1182 | check_process_timers(tsk, &firing); | ||
1183 | 1210 | ||
1184 | /* | 1211 | /* |
1185 | * We must release these locks before taking any timer's lock. | 1212 | * We must release these locks before taking any timer's lock. |
diff --git a/kernel/time/timeconst.bc b/kernel/time/timeconst.bc index c7388dee8635..c48688904f9f 100644 --- a/kernel/time/timeconst.bc +++ b/kernel/time/timeconst.bc | |||
@@ -39,7 +39,7 @@ define fmuls(b,n,d) { | |||
39 | } | 39 | } |
40 | 40 | ||
41 | define timeconst(hz) { | 41 | define timeconst(hz) { |
42 | print "/* Automatically generated by kernel/timeconst.bc */\n" | 42 | print "/* Automatically generated by kernel/time/timeconst.bc */\n" |
43 | print "/* Time conversion constants for HZ == ", hz, " */\n" | 43 | print "/* Time conversion constants for HZ == ", hz, " */\n" |
44 | print "\n" | 44 | print "\n" |
45 | 45 | ||
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c index 44d2cc0436f4..b1356b7ae570 100644 --- a/kernel/time/timekeeping.c +++ b/kernel/time/timekeeping.c | |||
@@ -849,7 +849,7 @@ EXPORT_SYMBOL_GPL(ktime_get_real_seconds); | |||
849 | #ifdef CONFIG_NTP_PPS | 849 | #ifdef CONFIG_NTP_PPS |
850 | 850 | ||
851 | /** | 851 | /** |
852 | * getnstime_raw_and_real - get day and raw monotonic time in timespec format | 852 | * ktime_get_raw_and_real_ts64 - get day and raw monotonic time in timespec format |
853 | * @ts_raw: pointer to the timespec to be set to raw monotonic time | 853 | * @ts_raw: pointer to the timespec to be set to raw monotonic time |
854 | * @ts_real: pointer to the timespec to be set to the time of day | 854 | * @ts_real: pointer to the timespec to be set to the time of day |
855 | * | 855 | * |
@@ -857,7 +857,7 @@ EXPORT_SYMBOL_GPL(ktime_get_real_seconds); | |||
857 | * same time atomically and stores the resulting timestamps in timespec | 857 | * same time atomically and stores the resulting timestamps in timespec |
858 | * format. | 858 | * format. |
859 | */ | 859 | */ |
860 | void getnstime_raw_and_real(struct timespec *ts_raw, struct timespec *ts_real) | 860 | void ktime_get_raw_and_real_ts64(struct timespec64 *ts_raw, struct timespec64 *ts_real) |
861 | { | 861 | { |
862 | struct timekeeper *tk = &tk_core.timekeeper; | 862 | struct timekeeper *tk = &tk_core.timekeeper; |
863 | unsigned long seq; | 863 | unsigned long seq; |
@@ -868,7 +868,7 @@ void getnstime_raw_and_real(struct timespec *ts_raw, struct timespec *ts_real) | |||
868 | do { | 868 | do { |
869 | seq = read_seqcount_begin(&tk_core.seq); | 869 | seq = read_seqcount_begin(&tk_core.seq); |
870 | 870 | ||
871 | *ts_raw = timespec64_to_timespec(tk->raw_time); | 871 | *ts_raw = tk->raw_time; |
872 | ts_real->tv_sec = tk->xtime_sec; | 872 | ts_real->tv_sec = tk->xtime_sec; |
873 | ts_real->tv_nsec = 0; | 873 | ts_real->tv_nsec = 0; |
874 | 874 | ||
@@ -877,10 +877,10 @@ void getnstime_raw_and_real(struct timespec *ts_raw, struct timespec *ts_real) | |||
877 | 877 | ||
878 | } while (read_seqcount_retry(&tk_core.seq, seq)); | 878 | } while (read_seqcount_retry(&tk_core.seq, seq)); |
879 | 879 | ||
880 | timespec_add_ns(ts_raw, nsecs_raw); | 880 | timespec64_add_ns(ts_raw, nsecs_raw); |
881 | timespec_add_ns(ts_real, nsecs_real); | 881 | timespec64_add_ns(ts_real, nsecs_real); |
882 | } | 882 | } |
883 | EXPORT_SYMBOL(getnstime_raw_and_real); | 883 | EXPORT_SYMBOL(ktime_get_raw_and_real_ts64); |
884 | 884 | ||
885 | #endif /* CONFIG_NTP_PPS */ | 885 | #endif /* CONFIG_NTP_PPS */ |
886 | 886 | ||
@@ -1674,7 +1674,7 @@ static void timekeeping_adjust(struct timekeeper *tk, s64 offset) | |||
1674 | /** | 1674 | /** |
1675 | * accumulate_nsecs_to_secs - Accumulates nsecs into secs | 1675 | * accumulate_nsecs_to_secs - Accumulates nsecs into secs |
1676 | * | 1676 | * |
1677 | * Helper function that accumulates a the nsecs greater then a second | 1677 | * Helper function that accumulates the nsecs greater than a second |
1678 | * from the xtime_nsec field to the xtime_secs field. | 1678 | * from the xtime_nsec field to the xtime_secs field. |
1679 | * It also calls into the NTP code to handle leapsecond processing. | 1679 | * It also calls into the NTP code to handle leapsecond processing. |
1680 | * | 1680 | * |
@@ -1726,7 +1726,7 @@ static cycle_t logarithmic_accumulation(struct timekeeper *tk, cycle_t offset, | |||
1726 | cycle_t interval = tk->cycle_interval << shift; | 1726 | cycle_t interval = tk->cycle_interval << shift; |
1727 | u64 raw_nsecs; | 1727 | u64 raw_nsecs; |
1728 | 1728 | ||
1729 | /* If the offset is smaller then a shifted interval, do nothing */ | 1729 | /* If the offset is smaller than a shifted interval, do nothing */ |
1730 | if (offset < interval) | 1730 | if (offset < interval) |
1731 | return offset; | 1731 | return offset; |
1732 | 1732 | ||
@@ -2025,7 +2025,7 @@ int do_adjtimex(struct timex *txc) | |||
2025 | /** | 2025 | /** |
2026 | * hardpps() - Accessor function to NTP __hardpps function | 2026 | * hardpps() - Accessor function to NTP __hardpps function |
2027 | */ | 2027 | */ |
2028 | void hardpps(const struct timespec *phase_ts, const struct timespec *raw_ts) | 2028 | void hardpps(const struct timespec64 *phase_ts, const struct timespec64 *raw_ts) |
2029 | { | 2029 | { |
2030 | unsigned long flags; | 2030 | unsigned long flags; |
2031 | 2031 | ||
diff --git a/kernel/time/timer.c b/kernel/time/timer.c index 84190f02b521..74591ba9474f 100644 --- a/kernel/time/timer.c +++ b/kernel/time/timer.c | |||
@@ -461,10 +461,17 @@ void __timer_stats_timer_set_start_info(struct timer_list *timer, void *addr) | |||
461 | 461 | ||
462 | static void timer_stats_account_timer(struct timer_list *timer) | 462 | static void timer_stats_account_timer(struct timer_list *timer) |
463 | { | 463 | { |
464 | if (likely(!timer->start_site)) | 464 | void *site; |
465 | |||
466 | /* | ||
467 | * start_site can be concurrently reset by | ||
468 | * timer_stats_timer_clear_start_info() | ||
469 | */ | ||
470 | site = READ_ONCE(timer->start_site); | ||
471 | if (likely(!site)) | ||
465 | return; | 472 | return; |
466 | 473 | ||
467 | timer_stats_update_stats(timer, timer->start_pid, timer->start_site, | 474 | timer_stats_update_stats(timer, timer->start_pid, site, |
468 | timer->function, timer->start_comm, | 475 | timer->function, timer->start_comm, |
469 | timer->flags); | 476 | timer->flags); |
470 | } | 477 | } |
@@ -867,7 +874,7 @@ unsigned long apply_slack(struct timer_list *timer, unsigned long expires) | |||
867 | if (mask == 0) | 874 | if (mask == 0) |
868 | return expires; | 875 | return expires; |
869 | 876 | ||
870 | bit = find_last_bit(&mask, BITS_PER_LONG); | 877 | bit = __fls(mask); |
871 | 878 | ||
872 | mask = (1UL << bit) - 1; | 879 | mask = (1UL << bit) - 1; |
873 | 880 | ||
diff --git a/tools/testing/selftests/timers/Makefile b/tools/testing/selftests/timers/Makefile index 89a3f44bf355..4a1be1b75a7f 100644 --- a/tools/testing/selftests/timers/Makefile +++ b/tools/testing/selftests/timers/Makefile | |||
@@ -8,7 +8,7 @@ LDFLAGS += -lrt -lpthread | |||
8 | TEST_PROGS = posix_timers nanosleep nsleep-lat set-timer-lat mqueue-lat \ | 8 | TEST_PROGS = posix_timers nanosleep nsleep-lat set-timer-lat mqueue-lat \ |
9 | inconsistency-check raw_skew threadtest rtctest | 9 | inconsistency-check raw_skew threadtest rtctest |
10 | 10 | ||
11 | TEST_PROGS_EXTENDED = alarmtimer-suspend valid-adjtimex change_skew \ | 11 | TEST_PROGS_EXTENDED = alarmtimer-suspend valid-adjtimex adjtick change_skew \ |
12 | skew_consistency clocksource-switch leap-a-day \ | 12 | skew_consistency clocksource-switch leap-a-day \ |
13 | leapcrash set-tai set-2038 | 13 | leapcrash set-tai set-2038 |
14 | 14 | ||
@@ -24,6 +24,7 @@ include ../lib.mk | |||
24 | run_destructive_tests: run_tests | 24 | run_destructive_tests: run_tests |
25 | ./alarmtimer-suspend | 25 | ./alarmtimer-suspend |
26 | ./valid-adjtimex | 26 | ./valid-adjtimex |
27 | ./adjtick | ||
27 | ./change_skew | 28 | ./change_skew |
28 | ./skew_consistency | 29 | ./skew_consistency |
29 | ./clocksource-switch | 30 | ./clocksource-switch |
diff --git a/tools/testing/selftests/timers/adjtick.c b/tools/testing/selftests/timers/adjtick.c new file mode 100644 index 000000000000..9887fd538fec --- /dev/null +++ b/tools/testing/selftests/timers/adjtick.c | |||
@@ -0,0 +1,221 @@ | |||
1 | /* adjtimex() tick adjustment test | ||
2 | * by: John Stultz <john.stultz@linaro.org> | ||
3 | * (C) Copyright Linaro Limited 2015 | ||
4 | * Licensed under the GPLv2 | ||
5 | * | ||
6 | * To build: | ||
7 | * $ gcc adjtick.c -o adjtick -lrt | ||
8 | * | ||
9 | * This program is free software: you can redistribute it and/or modify | ||
10 | * it under the terms of the GNU General Public License as published by | ||
11 | * the Free Software Foundation, either version 2 of the License, or | ||
12 | * (at your option) any later version. | ||
13 | * | ||
14 | * This program is distributed in the hope that it will be useful, | ||
15 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
16 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
17 | * GNU General Public License for more details. | ||
18 | */ | ||
19 | #include <stdio.h> | ||
20 | #include <unistd.h> | ||
21 | #include <stdlib.h> | ||
22 | #include <sys/time.h> | ||
23 | #include <sys/timex.h> | ||
24 | #include <time.h> | ||
25 | |||
26 | #ifdef KTEST | ||
27 | #include "../kselftest.h" | ||
28 | #else | ||
29 | static inline int ksft_exit_pass(void) | ||
30 | { | ||
31 | exit(0); | ||
32 | } | ||
33 | static inline int ksft_exit_fail(void) | ||
34 | { | ||
35 | exit(1); | ||
36 | } | ||
37 | #endif | ||
38 | |||
39 | #define CLOCK_MONOTONIC_RAW 4 | ||
40 | |||
41 | #define NSEC_PER_SEC 1000000000LL | ||
42 | #define USEC_PER_SEC 1000000 | ||
43 | |||
44 | #define MILLION 1000000 | ||
45 | |||
46 | long systick; | ||
47 | |||
48 | long long llabs(long long val) | ||
49 | { | ||
50 | if (val < 0) | ||
51 | val = -val; | ||
52 | return val; | ||
53 | } | ||
54 | |||
55 | unsigned long long ts_to_nsec(struct timespec ts) | ||
56 | { | ||
57 | return ts.tv_sec * NSEC_PER_SEC + ts.tv_nsec; | ||
58 | } | ||
59 | |||
60 | struct timespec nsec_to_ts(long long ns) | ||
61 | { | ||
62 | struct timespec ts; | ||
63 | |||
64 | ts.tv_sec = ns/NSEC_PER_SEC; | ||
65 | ts.tv_nsec = ns%NSEC_PER_SEC; | ||
66 | |||
67 | return ts; | ||
68 | } | ||
69 | |||
70 | long long diff_timespec(struct timespec start, struct timespec end) | ||
71 | { | ||
72 | long long start_ns, end_ns; | ||
73 | |||
74 | start_ns = ts_to_nsec(start); | ||
75 | end_ns = ts_to_nsec(end); | ||
76 | |||
77 | return end_ns - start_ns; | ||
78 | } | ||
79 | |||
80 | void get_monotonic_and_raw(struct timespec *mon, struct timespec *raw) | ||
81 | { | ||
82 | struct timespec start, mid, end; | ||
83 | long long diff = 0, tmp; | ||
84 | int i; | ||
85 | |||
86 | clock_gettime(CLOCK_MONOTONIC, mon); | ||
87 | clock_gettime(CLOCK_MONOTONIC_RAW, raw); | ||
88 | |||
89 | /* Try to get a more tightly bound pairing */ | ||
90 | for (i = 0; i < 3; i++) { | ||
91 | long long newdiff; | ||
92 | |||
93 | clock_gettime(CLOCK_MONOTONIC, &start); | ||
94 | clock_gettime(CLOCK_MONOTONIC_RAW, &mid); | ||
95 | clock_gettime(CLOCK_MONOTONIC, &end); | ||
96 | |||
97 | newdiff = diff_timespec(start, end); | ||
98 | if (diff == 0 || newdiff < diff) { | ||
99 | diff = newdiff; | ||
100 | *raw = mid; | ||
101 | tmp = (ts_to_nsec(start) + ts_to_nsec(end))/2; | ||
102 | *mon = nsec_to_ts(tmp); | ||
103 | } | ||
104 | } | ||
105 | } | ||
106 | |||
107 | long long get_ppm_drift(void) | ||
108 | { | ||
109 | struct timespec mon_start, raw_start, mon_end, raw_end; | ||
110 | long long delta1, delta2, eppm; | ||
111 | |||
112 | get_monotonic_and_raw(&mon_start, &raw_start); | ||
113 | |||
114 | sleep(15); | ||
115 | |||
116 | get_monotonic_and_raw(&mon_end, &raw_end); | ||
117 | |||
118 | delta1 = diff_timespec(mon_start, mon_end); | ||
119 | delta2 = diff_timespec(raw_start, raw_end); | ||
120 | |||
121 | eppm = (delta1*MILLION)/delta2 - MILLION; | ||
122 | |||
123 | return eppm; | ||
124 | } | ||
125 | |||
126 | int check_tick_adj(long tickval) | ||
127 | { | ||
128 | long long eppm, ppm; | ||
129 | struct timex tx1; | ||
130 | |||
131 | tx1.modes = ADJ_TICK; | ||
132 | tx1.modes |= ADJ_OFFSET; | ||
133 | tx1.modes |= ADJ_FREQUENCY; | ||
134 | tx1.modes |= ADJ_STATUS; | ||
135 | |||
136 | tx1.status = STA_PLL; | ||
137 | tx1.offset = 0; | ||
138 | tx1.freq = 0; | ||
139 | tx1.tick = tickval; | ||
140 | |||
141 | adjtimex(&tx1); | ||
142 | |||
143 | sleep(1); | ||
144 | |||
145 | ppm = ((long long)tickval * MILLION)/systick - MILLION; | ||
146 | printf("Estimating tick (act: %ld usec, %lld ppm): ", tickval, ppm); | ||
147 | |||
148 | eppm = get_ppm_drift(); | ||
149 | printf("%lld usec, %lld ppm", systick + (systick * eppm / MILLION), eppm); | ||
150 | |||
151 | tx1.modes = 0; | ||
152 | adjtimex(&tx1); | ||
153 | |||
154 | if (tx1.offset || tx1.freq || tx1.tick != tickval) { | ||
155 | printf(" [ERROR]\n"); | ||
156 | printf("\tUnexpected adjtimex return values, make sure ntpd is not running.\n"); | ||
157 | return -1; | ||
158 | } | ||
159 | |||
160 | /* | ||
161 | * Here we use 100ppm difference as an error bound. | ||
162 | * We likely should see better, but some coarse clocksources | ||
163 | * cannot match the HZ tick size accurately, so we have a | ||
164 | * internal correction factor that doesn't scale exactly | ||
165 | * with the adjustment, resulting in > 10ppm error during | ||
166 | * a 10% adjustment. 100ppm also gives us more breathing | ||
167 | * room for interruptions during the measurement. | ||
168 | */ | ||
169 | if (llabs(eppm - ppm) > 100) { | ||
170 | printf(" [FAILED]\n"); | ||
171 | return -1; | ||
172 | } | ||
173 | printf(" [OK]\n"); | ||
174 | |||
175 | return 0; | ||
176 | } | ||
177 | |||
178 | int main(int argv, char **argc) | ||
179 | { | ||
180 | struct timespec raw; | ||
181 | long tick, max, interval, err; | ||
182 | struct timex tx1; | ||
183 | |||
184 | err = 0; | ||
185 | setbuf(stdout, NULL); | ||
186 | |||
187 | if (clock_gettime(CLOCK_MONOTONIC_RAW, &raw)) { | ||
188 | printf("ERR: NO CLOCK_MONOTONIC_RAW\n"); | ||
189 | return -1; | ||
190 | } | ||
191 | |||
192 | printf("Each iteration takes about 15 seconds\n"); | ||
193 | |||
194 | systick = sysconf(_SC_CLK_TCK); | ||
195 | systick = USEC_PER_SEC/sysconf(_SC_CLK_TCK); | ||
196 | max = systick/10; /* +/- 10% */ | ||
197 | interval = max/4; /* in 4 steps each side */ | ||
198 | |||
199 | for (tick = (systick - max); tick < (systick + max); tick += interval) { | ||
200 | if (check_tick_adj(tick)) { | ||
201 | err = 1; | ||
202 | break; | ||
203 | } | ||
204 | } | ||
205 | |||
206 | /* Reset things to zero */ | ||
207 | tx1.modes = ADJ_TICK; | ||
208 | tx1.modes |= ADJ_OFFSET; | ||
209 | tx1.modes |= ADJ_FREQUENCY; | ||
210 | |||
211 | tx1.offset = 0; | ||
212 | tx1.freq = 0; | ||
213 | tx1.tick = systick; | ||
214 | |||
215 | adjtimex(&tx1); | ||
216 | |||
217 | if (err) | ||
218 | return ksft_exit_fail(); | ||
219 | |||
220 | return ksft_exit_pass(); | ||
221 | } | ||