aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/time
diff options
context:
space:
mode:
authorStephen Boyd <sboyd@codeaurora.org>2013-06-17 18:40:58 -0400
committerJohn Stultz <john.stultz@linaro.org>2013-06-17 18:56:11 -0400
commit336ae1180df5f69b9e0fb6561bec01c5f64361cf (patch)
tree416cd47092f970dd03e8b655d6204bd9fdc83e6f /kernel/time
parent38ff87f77af0b5a93fc8581cff1d6e5692ab8970 (diff)
ARM: sched_clock: Load cycle count after epoch stabilizes
There is a small race between when the cycle count is read from the hardware and when the epoch stabilizes. Consider this scenario: CPU0 CPU1 ---- ---- cyc = read_sched_clock() cyc_to_sched_clock() update_sched_clock() ... cd.epoch_cyc = cyc; epoch_cyc = cd.epoch_cyc; ... epoch_ns + cyc_to_ns((cyc - epoch_cyc) The cyc on cpu0 was read before the epoch changed. But we calculate the nanoseconds based on the new epoch by subtracting the new epoch from the old cycle count. Since epoch is most likely larger than the old cycle count we calculate a large number that will be converted to nanoseconds and added to epoch_ns, causing time to jump forward too much. Fix this problem by reading the hardware after the epoch has stabilized. Cc: Russell King <linux@arm.linux.org.uk> Signed-off-by: Stephen Boyd <sboyd@codeaurora.org> Signed-off-by: John Stultz <john.stultz@linaro.org>
Diffstat (limited to 'kernel/time')
-rw-r--r--kernel/time/sched_clock.c19
1 files changed, 8 insertions, 11 deletions
diff --git a/kernel/time/sched_clock.c b/kernel/time/sched_clock.c
index aad1ae6077ef..a326f27d7f09 100644
--- a/kernel/time/sched_clock.c
+++ b/kernel/time/sched_clock.c
@@ -49,10 +49,14 @@ static inline u64 notrace cyc_to_ns(u64 cyc, u32 mult, u32 shift)
49 return (cyc * mult) >> shift; 49 return (cyc * mult) >> shift;
50} 50}
51 51
52static unsigned long long notrace cyc_to_sched_clock(u32 cyc, u32 mask) 52static unsigned long long notrace sched_clock_32(void)
53{ 53{
54 u64 epoch_ns; 54 u64 epoch_ns;
55 u32 epoch_cyc; 55 u32 epoch_cyc;
56 u32 cyc;
57
58 if (cd.suspended)
59 return cd.epoch_ns;
56 60
57 /* 61 /*
58 * Load the epoch_cyc and epoch_ns atomically. We do this by 62 * Load the epoch_cyc and epoch_ns atomically. We do this by
@@ -68,7 +72,9 @@ static unsigned long long notrace cyc_to_sched_clock(u32 cyc, u32 mask)
68 smp_rmb(); 72 smp_rmb();
69 } while (epoch_cyc != cd.epoch_cyc_copy); 73 } while (epoch_cyc != cd.epoch_cyc_copy);
70 74
71 return epoch_ns + cyc_to_ns((cyc - epoch_cyc) & mask, cd.mult, cd.shift); 75 cyc = read_sched_clock();
76 cyc = (cyc - epoch_cyc) & sched_clock_mask;
77 return epoch_ns + cyc_to_ns(cyc, cd.mult, cd.shift);
72} 78}
73 79
74/* 80/*
@@ -160,19 +166,10 @@ void __init setup_sched_clock(u32 (*read)(void), int bits, unsigned long rate)
160 pr_debug("Registered %pF as sched_clock source\n", read); 166 pr_debug("Registered %pF as sched_clock source\n", read);
161} 167}
162 168
163static unsigned long long notrace sched_clock_32(void)
164{
165 u32 cyc = read_sched_clock();
166 return cyc_to_sched_clock(cyc, sched_clock_mask);
167}
168
169unsigned long long __read_mostly (*sched_clock_func)(void) = sched_clock_32; 169unsigned long long __read_mostly (*sched_clock_func)(void) = sched_clock_32;
170 170
171unsigned long long notrace sched_clock(void) 171unsigned long long notrace sched_clock(void)
172{ 172{
173 if (cd.suspended)
174 return cd.epoch_ns;
175
176 return sched_clock_func(); 173 return sched_clock_func();
177} 174}
178 175