aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/time
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2013-11-11 20:36:00 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2013-11-11 20:36:00 -0500
commit87093826aa0172d9135ca1f301c4298a258ceee6 (patch)
treec73f1db366d6f616a81c9c6a8a9611208a6c05ac /kernel/time
parent39cf275a1a18ba3c7eb9b986c5c9b35b57332798 (diff)
parentee5872befc9324fa4c2583c24d7ee7120314a2b7 (diff)
Merge branch 'timers-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull timer changes from Ingo Molnar: "Main changes in this cycle were: - Updated full dynticks support. - Event stream support for architected (ARM) timers. - ARM clocksource driver updates. - Move arm64 to using the generic sched_clock framework & resulting cleanup in the generic sched_clock code. - Misc fixes and cleanups" * 'timers-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (50 commits) x86/time: Honor ACPI FADT flag indicating absence of a CMOS RTC clocksource: sun4i: remove IRQF_DISABLED clocksource: sun4i: Report the minimum tick that we can program clocksource: sun4i: Select CLKSRC_MMIO clocksource: Provide timekeeping for efm32 SoCs clocksource: em_sti: convert to clk_prepare/unprepare time: Fix signedness bug in sysfs_get_uname() and its callers timekeeping: Fix some trivial typos in comments alarmtimer: return EINVAL instead of ENOTSUPP if rtcdev doesn't exist clocksource: arch_timer: Do not register arch_sys_counter twice timer stats: Add a 'Collection: active/inactive' line to timer usage statistics sched_clock: Remove sched_clock_func() hook arch_timer: Move to generic sched_clock framework clocksource: tcb_clksrc: Remove IRQF_DISABLED clocksource: tcb_clksrc: Improve driver robustness clocksource: tcb_clksrc: Replace clk_enable/disable with clk_prepare_enable/disable_unprepare clocksource: arm_arch_timer: Use clocksource for suspend timekeeping clocksource: dw_apb_timer_of: Mark a few more functions as __init clocksource: Put nodes passed to CLOCKSOURCE_OF_DECLARE callbacks centrally arm: zynq: Enable arm_global_timer ...
Diffstat (limited to 'kernel/time')
-rw-r--r--kernel/time/Kconfig2
-rw-r--r--kernel/time/alarmtimer.c4
-rw-r--r--kernel/time/clockevents.c2
-rw-r--r--kernel/time/clocksource.c52
-rw-r--r--kernel/time/ntp.c3
-rw-r--r--kernel/time/sched_clock.c114
-rw-r--r--kernel/time/tick-broadcast.c1
-rw-r--r--kernel/time/tick-internal.h2
-rw-r--r--kernel/time/timekeeping.c3
-rw-r--r--kernel/time/timer_stats.c8
10 files changed, 107 insertions, 84 deletions
diff --git a/kernel/time/Kconfig b/kernel/time/Kconfig
index 2b62fe86f9ec..3ce6e8c5f3fc 100644
--- a/kernel/time/Kconfig
+++ b/kernel/time/Kconfig
@@ -100,7 +100,7 @@ config NO_HZ_FULL
100 # RCU_USER_QS dependency 100 # RCU_USER_QS dependency
101 depends on HAVE_CONTEXT_TRACKING 101 depends on HAVE_CONTEXT_TRACKING
102 # VIRT_CPU_ACCOUNTING_GEN dependency 102 # VIRT_CPU_ACCOUNTING_GEN dependency
103 depends on 64BIT 103 depends on HAVE_VIRT_CPU_ACCOUNTING_GEN
104 select NO_HZ_COMMON 104 select NO_HZ_COMMON
105 select RCU_USER_QS 105 select RCU_USER_QS
106 select RCU_NOCB_CPU 106 select RCU_NOCB_CPU
diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c
index eec50fcef9e4..88c9c65a430d 100644
--- a/kernel/time/alarmtimer.c
+++ b/kernel/time/alarmtimer.c
@@ -490,7 +490,7 @@ static int alarm_clock_getres(const clockid_t which_clock, struct timespec *tp)
490 clockid_t baseid = alarm_bases[clock2alarm(which_clock)].base_clockid; 490 clockid_t baseid = alarm_bases[clock2alarm(which_clock)].base_clockid;
491 491
492 if (!alarmtimer_get_rtcdev()) 492 if (!alarmtimer_get_rtcdev())
493 return -ENOTSUPP; 493 return -EINVAL;
494 494
495 return hrtimer_get_res(baseid, tp); 495 return hrtimer_get_res(baseid, tp);
496} 496}
@@ -507,7 +507,7 @@ static int alarm_clock_get(clockid_t which_clock, struct timespec *tp)
507 struct alarm_base *base = &alarm_bases[clock2alarm(which_clock)]; 507 struct alarm_base *base = &alarm_bases[clock2alarm(which_clock)];
508 508
509 if (!alarmtimer_get_rtcdev()) 509 if (!alarmtimer_get_rtcdev())
510 return -ENOTSUPP; 510 return -EINVAL;
511 511
512 *tp = ktime_to_timespec(base->gettime()); 512 *tp = ktime_to_timespec(base->gettime());
513 return 0; 513 return 0;
diff --git a/kernel/time/clockevents.c b/kernel/time/clockevents.c
index 662c5798a685..086ad6043bcb 100644
--- a/kernel/time/clockevents.c
+++ b/kernel/time/clockevents.c
@@ -619,7 +619,7 @@ static ssize_t sysfs_unbind_tick_dev(struct device *dev,
619 const char *buf, size_t count) 619 const char *buf, size_t count)
620{ 620{
621 char name[CS_NAME_LEN]; 621 char name[CS_NAME_LEN];
622 size_t ret = sysfs_get_uname(buf, name, count); 622 ssize_t ret = sysfs_get_uname(buf, name, count);
623 struct clock_event_device *ce; 623 struct clock_event_device *ce;
624 624
625 if (ret < 0) 625 if (ret < 0)
diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c
index 50a8736757f3..ba3e502c955a 100644
--- a/kernel/time/clocksource.c
+++ b/kernel/time/clocksource.c
@@ -479,6 +479,7 @@ static inline void clocksource_dequeue_watchdog(struct clocksource *cs) { }
479static inline void clocksource_resume_watchdog(void) { } 479static inline void clocksource_resume_watchdog(void) { }
480static inline int __clocksource_watchdog_kthread(void) { return 0; } 480static inline int __clocksource_watchdog_kthread(void) { return 0; }
481static bool clocksource_is_watchdog(struct clocksource *cs) { return false; } 481static bool clocksource_is_watchdog(struct clocksource *cs) { return false; }
482void clocksource_mark_unstable(struct clocksource *cs) { }
482 483
483#endif /* CONFIG_CLOCKSOURCE_WATCHDOG */ 484#endif /* CONFIG_CLOCKSOURCE_WATCHDOG */
484 485
@@ -537,40 +538,55 @@ static u32 clocksource_max_adjustment(struct clocksource *cs)
537} 538}
538 539
539/** 540/**
540 * clocksource_max_deferment - Returns max time the clocksource can be deferred 541 * clocks_calc_max_nsecs - Returns maximum nanoseconds that can be converted
541 * @cs: Pointer to clocksource 542 * @mult: cycle to nanosecond multiplier
542 * 543 * @shift: cycle to nanosecond divisor (power of two)
544 * @maxadj: maximum adjustment value to mult (~11%)
545 * @mask: bitmask for two's complement subtraction of non 64 bit counters
543 */ 546 */
544static u64 clocksource_max_deferment(struct clocksource *cs) 547u64 clocks_calc_max_nsecs(u32 mult, u32 shift, u32 maxadj, u64 mask)
545{ 548{
546 u64 max_nsecs, max_cycles; 549 u64 max_nsecs, max_cycles;
547 550
548 /* 551 /*
549 * Calculate the maximum number of cycles that we can pass to the 552 * Calculate the maximum number of cycles that we can pass to the
550 * cyc2ns function without overflowing a 64-bit signed result. The 553 * cyc2ns function without overflowing a 64-bit signed result. The
551 * maximum number of cycles is equal to ULLONG_MAX/(cs->mult+cs->maxadj) 554 * maximum number of cycles is equal to ULLONG_MAX/(mult+maxadj)
552 * which is equivalent to the below. 555 * which is equivalent to the below.
553 * max_cycles < (2^63)/(cs->mult + cs->maxadj) 556 * max_cycles < (2^63)/(mult + maxadj)
554 * max_cycles < 2^(log2((2^63)/(cs->mult + cs->maxadj))) 557 * max_cycles < 2^(log2((2^63)/(mult + maxadj)))
555 * max_cycles < 2^(log2(2^63) - log2(cs->mult + cs->maxadj)) 558 * max_cycles < 2^(log2(2^63) - log2(mult + maxadj))
556 * max_cycles < 2^(63 - log2(cs->mult + cs->maxadj)) 559 * max_cycles < 2^(63 - log2(mult + maxadj))
557 * max_cycles < 1 << (63 - log2(cs->mult + cs->maxadj)) 560 * max_cycles < 1 << (63 - log2(mult + maxadj))
558 * Please note that we add 1 to the result of the log2 to account for 561 * Please note that we add 1 to the result of the log2 to account for
559 * any rounding errors, ensure the above inequality is satisfied and 562 * any rounding errors, ensure the above inequality is satisfied and
560 * no overflow will occur. 563 * no overflow will occur.
561 */ 564 */
562 max_cycles = 1ULL << (63 - (ilog2(cs->mult + cs->maxadj) + 1)); 565 max_cycles = 1ULL << (63 - (ilog2(mult + maxadj) + 1));
563 566
564 /* 567 /*
565 * The actual maximum number of cycles we can defer the clocksource is 568 * The actual maximum number of cycles we can defer the clocksource is
566 * determined by the minimum of max_cycles and cs->mask. 569 * determined by the minimum of max_cycles and mask.
567 * Note: Here we subtract the maxadj to make sure we don't sleep for 570 * Note: Here we subtract the maxadj to make sure we don't sleep for
568 * too long if there's a large negative adjustment. 571 * too long if there's a large negative adjustment.
569 */ 572 */
570 max_cycles = min_t(u64, max_cycles, (u64) cs->mask); 573 max_cycles = min(max_cycles, mask);
571 max_nsecs = clocksource_cyc2ns(max_cycles, cs->mult - cs->maxadj, 574 max_nsecs = clocksource_cyc2ns(max_cycles, mult - maxadj, shift);
572 cs->shift); 575
576 return max_nsecs;
577}
578
579/**
580 * clocksource_max_deferment - Returns max time the clocksource can be deferred
581 * @cs: Pointer to clocksource
582 *
583 */
584static u64 clocksource_max_deferment(struct clocksource *cs)
585{
586 u64 max_nsecs;
573 587
588 max_nsecs = clocks_calc_max_nsecs(cs->mult, cs->shift, cs->maxadj,
589 cs->mask);
574 /* 590 /*
575 * To ensure that the clocksource does not wrap whilst we are idle, 591 * To ensure that the clocksource does not wrap whilst we are idle,
576 * limit the time the clocksource can be deferred by 12.5%. Please 592 * limit the time the clocksource can be deferred by 12.5%. Please
@@ -893,7 +909,7 @@ sysfs_show_current_clocksources(struct device *dev,
893 return count; 909 return count;
894} 910}
895 911
896size_t sysfs_get_uname(const char *buf, char *dst, size_t cnt) 912ssize_t sysfs_get_uname(const char *buf, char *dst, size_t cnt)
897{ 913{
898 size_t ret = cnt; 914 size_t ret = cnt;
899 915
@@ -924,7 +940,7 @@ static ssize_t sysfs_override_clocksource(struct device *dev,
924 struct device_attribute *attr, 940 struct device_attribute *attr,
925 const char *buf, size_t count) 941 const char *buf, size_t count)
926{ 942{
927 size_t ret; 943 ssize_t ret;
928 944
929 mutex_lock(&clocksource_mutex); 945 mutex_lock(&clocksource_mutex);
930 946
@@ -952,7 +968,7 @@ static ssize_t sysfs_unbind_clocksource(struct device *dev,
952{ 968{
953 struct clocksource *cs; 969 struct clocksource *cs;
954 char name[CS_NAME_LEN]; 970 char name[CS_NAME_LEN];
955 size_t ret; 971 ssize_t ret;
956 972
957 ret = sysfs_get_uname(buf, name, count); 973 ret = sysfs_get_uname(buf, name, count);
958 if (ret < 0) 974 if (ret < 0)
diff --git a/kernel/time/ntp.c b/kernel/time/ntp.c
index bb2215174f05..af8d1d4f3d55 100644
--- a/kernel/time/ntp.c
+++ b/kernel/time/ntp.c
@@ -475,6 +475,7 @@ static void sync_cmos_clock(struct work_struct *work)
475 * called as close as possible to 500 ms before the new second starts. 475 * called as close as possible to 500 ms before the new second starts.
476 * This code is run on a timer. If the clock is set, that timer 476 * This code is run on a timer. If the clock is set, that timer
477 * may not expire at the correct time. Thus, we adjust... 477 * may not expire at the correct time. Thus, we adjust...
478 * We want the clock to be within a couple of ticks from the target.
478 */ 479 */
479 if (!ntp_synced()) { 480 if (!ntp_synced()) {
480 /* 481 /*
@@ -485,7 +486,7 @@ static void sync_cmos_clock(struct work_struct *work)
485 } 486 }
486 487
487 getnstimeofday(&now); 488 getnstimeofday(&now);
488 if (abs(now.tv_nsec - (NSEC_PER_SEC / 2)) <= tick_nsec / 2) { 489 if (abs(now.tv_nsec - (NSEC_PER_SEC / 2)) <= tick_nsec * 5) {
489 struct timespec adjust = now; 490 struct timespec adjust = now;
490 491
491 fail = -ENODEV; 492 fail = -ENODEV;
diff --git a/kernel/time/sched_clock.c b/kernel/time/sched_clock.c
index 0b479a6a22bb..68b799375981 100644
--- a/kernel/time/sched_clock.c
+++ b/kernel/time/sched_clock.c
@@ -8,25 +8,28 @@
8#include <linux/clocksource.h> 8#include <linux/clocksource.h>
9#include <linux/init.h> 9#include <linux/init.h>
10#include <linux/jiffies.h> 10#include <linux/jiffies.h>
11#include <linux/ktime.h>
11#include <linux/kernel.h> 12#include <linux/kernel.h>
12#include <linux/moduleparam.h> 13#include <linux/moduleparam.h>
13#include <linux/sched.h> 14#include <linux/sched.h>
14#include <linux/syscore_ops.h> 15#include <linux/syscore_ops.h>
15#include <linux/timer.h> 16#include <linux/hrtimer.h>
16#include <linux/sched_clock.h> 17#include <linux/sched_clock.h>
18#include <linux/seqlock.h>
19#include <linux/bitops.h>
17 20
18struct clock_data { 21struct clock_data {
22 ktime_t wrap_kt;
19 u64 epoch_ns; 23 u64 epoch_ns;
20 u32 epoch_cyc; 24 u64 epoch_cyc;
21 u32 epoch_cyc_copy; 25 seqcount_t seq;
22 unsigned long rate; 26 unsigned long rate;
23 u32 mult; 27 u32 mult;
24 u32 shift; 28 u32 shift;
25 bool suspended; 29 bool suspended;
26}; 30};
27 31
28static void sched_clock_poll(unsigned long wrap_ticks); 32static struct hrtimer sched_clock_timer;
29static DEFINE_TIMER(sched_clock_timer, sched_clock_poll, 0, 0);
30static int irqtime = -1; 33static int irqtime = -1;
31 34
32core_param(irqtime, irqtime, int, 0400); 35core_param(irqtime, irqtime, int, 0400);
@@ -35,42 +38,46 @@ static struct clock_data cd = {
35 .mult = NSEC_PER_SEC / HZ, 38 .mult = NSEC_PER_SEC / HZ,
36}; 39};
37 40
38static u32 __read_mostly sched_clock_mask = 0xffffffff; 41static u64 __read_mostly sched_clock_mask;
39 42
40static u32 notrace jiffy_sched_clock_read(void) 43static u64 notrace jiffy_sched_clock_read(void)
41{ 44{
42 return (u32)(jiffies - INITIAL_JIFFIES); 45 /*
46 * We don't need to use get_jiffies_64 on 32-bit arches here
47 * because we register with BITS_PER_LONG
48 */
49 return (u64)(jiffies - INITIAL_JIFFIES);
43} 50}
44 51
45static u32 __read_mostly (*read_sched_clock)(void) = jiffy_sched_clock_read; 52static u32 __read_mostly (*read_sched_clock_32)(void);
53
54static u64 notrace read_sched_clock_32_wrapper(void)
55{
56 return read_sched_clock_32();
57}
58
59static u64 __read_mostly (*read_sched_clock)(void) = jiffy_sched_clock_read;
46 60
47static inline u64 notrace cyc_to_ns(u64 cyc, u32 mult, u32 shift) 61static inline u64 notrace cyc_to_ns(u64 cyc, u32 mult, u32 shift)
48{ 62{
49 return (cyc * mult) >> shift; 63 return (cyc * mult) >> shift;
50} 64}
51 65
52static unsigned long long notrace sched_clock_32(void) 66unsigned long long notrace sched_clock(void)
53{ 67{
54 u64 epoch_ns; 68 u64 epoch_ns;
55 u32 epoch_cyc; 69 u64 epoch_cyc;
56 u32 cyc; 70 u64 cyc;
71 unsigned long seq;
57 72
58 if (cd.suspended) 73 if (cd.suspended)
59 return cd.epoch_ns; 74 return cd.epoch_ns;
60 75
61 /*
62 * Load the epoch_cyc and epoch_ns atomically. We do this by
63 * ensuring that we always write epoch_cyc, epoch_ns and
64 * epoch_cyc_copy in strict order, and read them in strict order.
65 * If epoch_cyc and epoch_cyc_copy are not equal, then we're in
66 * the middle of an update, and we should repeat the load.
67 */
68 do { 76 do {
77 seq = read_seqcount_begin(&cd.seq);
69 epoch_cyc = cd.epoch_cyc; 78 epoch_cyc = cd.epoch_cyc;
70 smp_rmb();
71 epoch_ns = cd.epoch_ns; 79 epoch_ns = cd.epoch_ns;
72 smp_rmb(); 80 } while (read_seqcount_retry(&cd.seq, seq));
73 } while (epoch_cyc != cd.epoch_cyc_copy);
74 81
75 cyc = read_sched_clock(); 82 cyc = read_sched_clock();
76 cyc = (cyc - epoch_cyc) & sched_clock_mask; 83 cyc = (cyc - epoch_cyc) & sched_clock_mask;
@@ -83,49 +90,46 @@ static unsigned long long notrace sched_clock_32(void)
83static void notrace update_sched_clock(void) 90static void notrace update_sched_clock(void)
84{ 91{
85 unsigned long flags; 92 unsigned long flags;
86 u32 cyc; 93 u64 cyc;
87 u64 ns; 94 u64 ns;
88 95
89 cyc = read_sched_clock(); 96 cyc = read_sched_clock();
90 ns = cd.epoch_ns + 97 ns = cd.epoch_ns +
91 cyc_to_ns((cyc - cd.epoch_cyc) & sched_clock_mask, 98 cyc_to_ns((cyc - cd.epoch_cyc) & sched_clock_mask,
92 cd.mult, cd.shift); 99 cd.mult, cd.shift);
93 /* 100
94 * Write epoch_cyc and epoch_ns in a way that the update is
95 * detectable in cyc_to_fixed_sched_clock().
96 */
97 raw_local_irq_save(flags); 101 raw_local_irq_save(flags);
98 cd.epoch_cyc_copy = cyc; 102 write_seqcount_begin(&cd.seq);
99 smp_wmb();
100 cd.epoch_ns = ns; 103 cd.epoch_ns = ns;
101 smp_wmb();
102 cd.epoch_cyc = cyc; 104 cd.epoch_cyc = cyc;
105 write_seqcount_end(&cd.seq);
103 raw_local_irq_restore(flags); 106 raw_local_irq_restore(flags);
104} 107}
105 108
106static void sched_clock_poll(unsigned long wrap_ticks) 109static enum hrtimer_restart sched_clock_poll(struct hrtimer *hrt)
107{ 110{
108 mod_timer(&sched_clock_timer, round_jiffies(jiffies + wrap_ticks));
109 update_sched_clock(); 111 update_sched_clock();
112 hrtimer_forward_now(hrt, cd.wrap_kt);
113 return HRTIMER_RESTART;
110} 114}
111 115
112void __init setup_sched_clock(u32 (*read)(void), int bits, unsigned long rate) 116void __init sched_clock_register(u64 (*read)(void), int bits,
117 unsigned long rate)
113{ 118{
114 unsigned long r, w; 119 unsigned long r;
115 u64 res, wrap; 120 u64 res, wrap;
116 char r_unit; 121 char r_unit;
117 122
118 if (cd.rate > rate) 123 if (cd.rate > rate)
119 return; 124 return;
120 125
121 BUG_ON(bits > 32);
122 WARN_ON(!irqs_disabled()); 126 WARN_ON(!irqs_disabled());
123 read_sched_clock = read; 127 read_sched_clock = read;
124 sched_clock_mask = (1ULL << bits) - 1; 128 sched_clock_mask = CLOCKSOURCE_MASK(bits);
125 cd.rate = rate; 129 cd.rate = rate;
126 130
127 /* calculate the mult/shift to convert counter ticks to ns. */ 131 /* calculate the mult/shift to convert counter ticks to ns. */
128 clocks_calc_mult_shift(&cd.mult, &cd.shift, rate, NSEC_PER_SEC, 0); 132 clocks_calc_mult_shift(&cd.mult, &cd.shift, rate, NSEC_PER_SEC, 3600);
129 133
130 r = rate; 134 r = rate;
131 if (r >= 4000000) { 135 if (r >= 4000000) {
@@ -138,20 +142,14 @@ void __init setup_sched_clock(u32 (*read)(void), int bits, unsigned long rate)
138 r_unit = ' '; 142 r_unit = ' ';
139 143
140 /* calculate how many ns until we wrap */ 144 /* calculate how many ns until we wrap */
141 wrap = cyc_to_ns((1ULL << bits) - 1, cd.mult, cd.shift); 145 wrap = clocks_calc_max_nsecs(cd.mult, cd.shift, 0, sched_clock_mask);
142 do_div(wrap, NSEC_PER_MSEC); 146 cd.wrap_kt = ns_to_ktime(wrap - (wrap >> 3));
143 w = wrap;
144 147
145 /* calculate the ns resolution of this counter */ 148 /* calculate the ns resolution of this counter */
146 res = cyc_to_ns(1ULL, cd.mult, cd.shift); 149 res = cyc_to_ns(1ULL, cd.mult, cd.shift);
147 pr_info("sched_clock: %u bits at %lu%cHz, resolution %lluns, wraps every %lums\n", 150 pr_info("sched_clock: %u bits at %lu%cHz, resolution %lluns, wraps every %lluns\n",
148 bits, r, r_unit, res, w); 151 bits, r, r_unit, res, wrap);
149 152
150 /*
151 * Start the timer to keep sched_clock() properly updated and
152 * sets the initial epoch.
153 */
154 sched_clock_timer.data = msecs_to_jiffies(w - (w / 10));
155 update_sched_clock(); 153 update_sched_clock();
156 154
157 /* 155 /*
@@ -166,11 +164,10 @@ void __init setup_sched_clock(u32 (*read)(void), int bits, unsigned long rate)
166 pr_debug("Registered %pF as sched_clock source\n", read); 164 pr_debug("Registered %pF as sched_clock source\n", read);
167} 165}
168 166
169unsigned long long __read_mostly (*sched_clock_func)(void) = sched_clock_32; 167void __init setup_sched_clock(u32 (*read)(void), int bits, unsigned long rate)
170
171unsigned long long notrace sched_clock(void)
172{ 168{
173 return sched_clock_func(); 169 read_sched_clock_32 = read;
170 sched_clock_register(read_sched_clock_32_wrapper, bits, rate);
174} 171}
175 172
176void __init sched_clock_postinit(void) 173void __init sched_clock_postinit(void)
@@ -180,14 +177,22 @@ void __init sched_clock_postinit(void)
180 * make it the final one one. 177 * make it the final one one.
181 */ 178 */
182 if (read_sched_clock == jiffy_sched_clock_read) 179 if (read_sched_clock == jiffy_sched_clock_read)
183 setup_sched_clock(jiffy_sched_clock_read, 32, HZ); 180 sched_clock_register(jiffy_sched_clock_read, BITS_PER_LONG, HZ);
184 181
185 sched_clock_poll(sched_clock_timer.data); 182 update_sched_clock();
183
184 /*
185 * Start the timer to keep sched_clock() properly updated and
186 * sets the initial epoch.
187 */
188 hrtimer_init(&sched_clock_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
189 sched_clock_timer.function = sched_clock_poll;
190 hrtimer_start(&sched_clock_timer, cd.wrap_kt, HRTIMER_MODE_REL);
186} 191}
187 192
188static int sched_clock_suspend(void) 193static int sched_clock_suspend(void)
189{ 194{
190 sched_clock_poll(sched_clock_timer.data); 195 sched_clock_poll(&sched_clock_timer);
191 cd.suspended = true; 196 cd.suspended = true;
192 return 0; 197 return 0;
193} 198}
@@ -195,7 +200,6 @@ static int sched_clock_suspend(void)
195static void sched_clock_resume(void) 200static void sched_clock_resume(void)
196{ 201{
197 cd.epoch_cyc = read_sched_clock(); 202 cd.epoch_cyc = read_sched_clock();
198 cd.epoch_cyc_copy = cd.epoch_cyc;
199 cd.suspended = false; 203 cd.suspended = false;
200} 204}
201 205
diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c
index 218bcb565fed..9532690daaa9 100644
--- a/kernel/time/tick-broadcast.c
+++ b/kernel/time/tick-broadcast.c
@@ -70,6 +70,7 @@ static bool tick_check_broadcast_device(struct clock_event_device *curdev,
70 struct clock_event_device *newdev) 70 struct clock_event_device *newdev)
71{ 71{
72 if ((newdev->features & CLOCK_EVT_FEAT_DUMMY) || 72 if ((newdev->features & CLOCK_EVT_FEAT_DUMMY) ||
73 (newdev->features & CLOCK_EVT_FEAT_PERCPU) ||
73 (newdev->features & CLOCK_EVT_FEAT_C3STOP)) 74 (newdev->features & CLOCK_EVT_FEAT_C3STOP))
74 return false; 75 return false;
75 76
diff --git a/kernel/time/tick-internal.h b/kernel/time/tick-internal.h
index bc906cad709b..18e71f7fbc2a 100644
--- a/kernel/time/tick-internal.h
+++ b/kernel/time/tick-internal.h
@@ -31,7 +31,7 @@ extern void tick_install_replacement(struct clock_event_device *dev);
31 31
32extern void clockevents_shutdown(struct clock_event_device *dev); 32extern void clockevents_shutdown(struct clock_event_device *dev);
33 33
34extern size_t sysfs_get_uname(const char *buf, char *dst, size_t cnt); 34extern ssize_t sysfs_get_uname(const char *buf, char *dst, size_t cnt);
35 35
36/* 36/*
37 * NO_HZ / high resolution timer shared code 37 * NO_HZ / high resolution timer shared code
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
index 947ba25a95a0..3abf53418b67 100644
--- a/kernel/time/timekeeping.c
+++ b/kernel/time/timekeeping.c
@@ -1613,9 +1613,10 @@ void get_xtime_and_monotonic_and_sleep_offset(struct timespec *xtim,
1613 * ktime_get_update_offsets - hrtimer helper 1613 * ktime_get_update_offsets - hrtimer helper
1614 * @offs_real: pointer to storage for monotonic -> realtime offset 1614 * @offs_real: pointer to storage for monotonic -> realtime offset
1615 * @offs_boot: pointer to storage for monotonic -> boottime offset 1615 * @offs_boot: pointer to storage for monotonic -> boottime offset
1616 * @offs_tai: pointer to storage for monotonic -> clock tai offset
1616 * 1617 *
1617 * Returns current monotonic time and updates the offsets 1618 * Returns current monotonic time and updates the offsets
1618 * Called from hrtimer_interupt() or retrigger_next_event() 1619 * Called from hrtimer_interrupt() or retrigger_next_event()
1619 */ 1620 */
1620ktime_t ktime_get_update_offsets(ktime_t *offs_real, ktime_t *offs_boot, 1621ktime_t ktime_get_update_offsets(ktime_t *offs_real, ktime_t *offs_boot,
1621 ktime_t *offs_tai) 1622 ktime_t *offs_tai)
diff --git a/kernel/time/timer_stats.c b/kernel/time/timer_stats.c
index 0b537f27b559..1fb08f21302e 100644
--- a/kernel/time/timer_stats.c
+++ b/kernel/time/timer_stats.c
@@ -298,15 +298,15 @@ static int tstats_show(struct seq_file *m, void *v)
298 period = ktime_to_timespec(time); 298 period = ktime_to_timespec(time);
299 ms = period.tv_nsec / 1000000; 299 ms = period.tv_nsec / 1000000;
300 300
301 seq_puts(m, "Timer Stats Version: v0.2\n"); 301 seq_puts(m, "Timer Stats Version: v0.3\n");
302 seq_printf(m, "Sample period: %ld.%03ld s\n", period.tv_sec, ms); 302 seq_printf(m, "Sample period: %ld.%03ld s\n", period.tv_sec, ms);
303 if (atomic_read(&overflow_count)) 303 if (atomic_read(&overflow_count))
304 seq_printf(m, "Overflow: %d entries\n", 304 seq_printf(m, "Overflow: %d entries\n", atomic_read(&overflow_count));
305 atomic_read(&overflow_count)); 305 seq_printf(m, "Collection: %s\n", timer_stats_active ? "active" : "inactive");
306 306
307 for (i = 0; i < nr_entries; i++) { 307 for (i = 0; i < nr_entries; i++) {
308 entry = entries + i; 308 entry = entries + i;
309 if (entry->timer_flag & TIMER_STATS_FLAG_DEFERRABLE) { 309 if (entry->timer_flag & TIMER_STATS_FLAG_DEFERRABLE) {
310 seq_printf(m, "%4luD, %5d %-16s ", 310 seq_printf(m, "%4luD, %5d %-16s ",
311 entry->count, entry->pid, entry->comm); 311 entry->count, entry->pid, entry->comm);
312 } else { 312 } else {