aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/time
diff options
context:
space:
mode:
authorStephen Boyd <sboyd@codeaurora.org>2013-07-18 19:21:17 -0400
committerJohn Stultz <john.stultz@linaro.org>2013-07-30 14:24:21 -0400
commite7e3ff1bfe9c42ee31172e9afdc0383a9e595e29 (patch)
tree3d3adc5cb561e0923947b7b200c29ddc79e050f5 /kernel/time
parenta08ca5d1089da03724f96fa0870c64968e66765b (diff)
sched_clock: Add support for >32 bit sched_clock
The ARM architected system counter has at least 56 usable bits. Add support for counters with more than 32 bits to the generic sched_clock implementation so we can increase the time between wakeups due to dealing with wrap-around on these devices while benefiting from the irqtime accounting and suspend/resume handling that the generic sched_clock code already has. On my system using 56 bits over 32 bits changes the wraparound time from a few minutes to an hour. For faster running counters (GHz range) this is even more important because we may not be able to execute the timer in time to deal with the wraparound if only 32 bits are used. We choose a maxsec value of 3600 seconds because we assume no system will go idle for more than an hour. In the future we may need to increase this value. Note: All users should switch over to the 64-bit read function so we can remove setup_sched_clock() in favor of sched_clock_register(). Cc: Russell King <linux@arm.linux.org.uk> Signed-off-by: Stephen Boyd <sboyd@codeaurora.org> Signed-off-by: John Stultz <john.stultz@linaro.org>
Diffstat (limited to 'kernel/time')
-rw-r--r--kernel/time/sched_clock.c46
1 files changed, 32 insertions, 14 deletions
diff --git a/kernel/time/sched_clock.c b/kernel/time/sched_clock.c
index c018ffc59937..f388baeaf2b6 100644
--- a/kernel/time/sched_clock.c
+++ b/kernel/time/sched_clock.c
@@ -16,11 +16,12 @@
16#include <linux/hrtimer.h> 16#include <linux/hrtimer.h>
17#include <linux/sched_clock.h> 17#include <linux/sched_clock.h>
18#include <linux/seqlock.h> 18#include <linux/seqlock.h>
19#include <linux/bitops.h>
19 20
20struct clock_data { 21struct clock_data {
21 ktime_t wrap_kt; 22 ktime_t wrap_kt;
22 u64 epoch_ns; 23 u64 epoch_ns;
23 u32 epoch_cyc; 24 u64 epoch_cyc;
24 seqcount_t seq; 25 seqcount_t seq;
25 unsigned long rate; 26 unsigned long rate;
26 u32 mult; 27 u32 mult;
@@ -37,14 +38,25 @@ static struct clock_data cd = {
37 .mult = NSEC_PER_SEC / HZ, 38 .mult = NSEC_PER_SEC / HZ,
38}; 39};
39 40
40static u32 __read_mostly sched_clock_mask = 0xffffffff; 41static u64 __read_mostly sched_clock_mask;
41 42
42static u32 notrace jiffy_sched_clock_read(void) 43static u64 notrace jiffy_sched_clock_read(void)
43{ 44{
44 return (u32)(jiffies - INITIAL_JIFFIES); 45 /*
46 * We don't need to use get_jiffies_64 on 32-bit arches here
47 * because we register with BITS_PER_LONG
48 */
49 return (u64)(jiffies - INITIAL_JIFFIES);
50}
51
52static u32 __read_mostly (*read_sched_clock_32)(void);
53
54static u64 notrace read_sched_clock_32_wrapper(void)
55{
56 return read_sched_clock_32();
45} 57}
46 58
47static u32 __read_mostly (*read_sched_clock)(void) = jiffy_sched_clock_read; 59static u64 __read_mostly (*read_sched_clock)(void) = jiffy_sched_clock_read;
48 60
49static inline u64 notrace cyc_to_ns(u64 cyc, u32 mult, u32 shift) 61static inline u64 notrace cyc_to_ns(u64 cyc, u32 mult, u32 shift)
50{ 62{
@@ -54,8 +66,8 @@ static inline u64 notrace cyc_to_ns(u64 cyc, u32 mult, u32 shift)
54static unsigned long long notrace sched_clock_32(void) 66static unsigned long long notrace sched_clock_32(void)
55{ 67{
56 u64 epoch_ns; 68 u64 epoch_ns;
57 u32 epoch_cyc; 69 u64 epoch_cyc;
58 u32 cyc; 70 u64 cyc;
59 unsigned long seq; 71 unsigned long seq;
60 72
61 if (cd.suspended) 73 if (cd.suspended)
@@ -78,7 +90,7 @@ static unsigned long long notrace sched_clock_32(void)
78static void notrace update_sched_clock(void) 90static void notrace update_sched_clock(void)
79{ 91{
80 unsigned long flags; 92 unsigned long flags;
81 u32 cyc; 93 u64 cyc;
82 u64 ns; 94 u64 ns;
83 95
84 cyc = read_sched_clock(); 96 cyc = read_sched_clock();
@@ -101,7 +113,8 @@ static enum hrtimer_restart sched_clock_poll(struct hrtimer *hrt)
101 return HRTIMER_RESTART; 113 return HRTIMER_RESTART;
102} 114}
103 115
104void __init setup_sched_clock(u32 (*read)(void), int bits, unsigned long rate) 116void __init sched_clock_register(u64 (*read)(void), int bits,
117 unsigned long rate)
105{ 118{
106 unsigned long r; 119 unsigned long r;
107 u64 res, wrap; 120 u64 res, wrap;
@@ -110,14 +123,13 @@ void __init setup_sched_clock(u32 (*read)(void), int bits, unsigned long rate)
110 if (cd.rate > rate) 123 if (cd.rate > rate)
111 return; 124 return;
112 125
113 BUG_ON(bits > 32);
114 WARN_ON(!irqs_disabled()); 126 WARN_ON(!irqs_disabled());
115 read_sched_clock = read; 127 read_sched_clock = read;
116 sched_clock_mask = (1 << bits) - 1; 128 sched_clock_mask = CLOCKSOURCE_MASK(bits);
117 cd.rate = rate; 129 cd.rate = rate;
118 130
119 /* calculate the mult/shift to convert counter ticks to ns. */ 131 /* calculate the mult/shift to convert counter ticks to ns. */
120 clocks_calc_mult_shift(&cd.mult, &cd.shift, rate, NSEC_PER_SEC, 0); 132 clocks_calc_mult_shift(&cd.mult, &cd.shift, rate, NSEC_PER_SEC, 3600);
121 133
122 r = rate; 134 r = rate;
123 if (r >= 4000000) { 135 if (r >= 4000000) {
@@ -130,7 +142,7 @@ void __init setup_sched_clock(u32 (*read)(void), int bits, unsigned long rate)
130 r_unit = ' '; 142 r_unit = ' ';
131 143
132 /* calculate how many ns until we wrap */ 144 /* calculate how many ns until we wrap */
133 wrap = cyc_to_ns((1ULL << bits) - 1, cd.mult, cd.shift); 145 wrap = clocks_calc_max_nsecs(cd.mult, cd.shift, 0, sched_clock_mask);
134 cd.wrap_kt = ns_to_ktime(wrap - (wrap >> 3)); 146 cd.wrap_kt = ns_to_ktime(wrap - (wrap >> 3));
135 147
136 /* calculate the ns resolution of this counter */ 148 /* calculate the ns resolution of this counter */
@@ -152,6 +164,12 @@ void __init setup_sched_clock(u32 (*read)(void), int bits, unsigned long rate)
152 pr_debug("Registered %pF as sched_clock source\n", read); 164 pr_debug("Registered %pF as sched_clock source\n", read);
153} 165}
154 166
167void __init setup_sched_clock(u32 (*read)(void), int bits, unsigned long rate)
168{
169 read_sched_clock_32 = read;
170 sched_clock_register(read_sched_clock_32_wrapper, bits, rate);
171}
172
155unsigned long long __read_mostly (*sched_clock_func)(void) = sched_clock_32; 173unsigned long long __read_mostly (*sched_clock_func)(void) = sched_clock_32;
156 174
157unsigned long long notrace sched_clock(void) 175unsigned long long notrace sched_clock(void)
@@ -166,7 +184,7 @@ void __init sched_clock_postinit(void)
166 * make it the final one one. 184 * make it the final one one.
167 */ 185 */
168 if (read_sched_clock == jiffy_sched_clock_read) 186 if (read_sched_clock == jiffy_sched_clock_read)
169 setup_sched_clock(jiffy_sched_clock_read, 32, HZ); 187 sched_clock_register(jiffy_sched_clock_read, BITS_PER_LONG, HZ);
170 188
171 update_sched_clock(); 189 update_sched_clock();
172 190