aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/time/sched_clock.c
diff options
context:
space:
mode:
authorStephen Boyd <sboyd@codeaurora.org>2013-07-18 19:21:15 -0400
committerJohn Stultz <john.stultz@linaro.org>2013-07-30 14:24:20 -0400
commit85c3d2dd15be4d577a37ffb8bbbd019fc8e3280a (patch)
treedb6f0d5ed69e55b7509793c111dc0dc5a57d43fd /kernel/time/sched_clock.c
parent87d8b9eb7eb6669aad6435a51e9862362141ba76 (diff)
sched_clock: Use seqcount instead of rolling our own
We're going to increase the cyc value to 64 bits in the near future. Doing that is going to break the custom seqcount implementation in the sched_clock code because 64 bit numbers aren't guaranteed to be atomic. Replace the cyc_copy with a seqcount to avoid this problem. Cc: Russell King <linux@arm.linux.org.uk> Acked-by: Will Deacon <will.deacon@arm.com> Signed-off-by: Stephen Boyd <sboyd@codeaurora.org> Signed-off-by: John Stultz <john.stultz@linaro.org>
Diffstat (limited to 'kernel/time/sched_clock.c')
-rw-r--r--kernel/time/sched_clock.c27
1 files changed, 8 insertions, 19 deletions
diff --git a/kernel/time/sched_clock.c b/kernel/time/sched_clock.c
index a326f27d7f09..396f7b9dccc9 100644
--- a/kernel/time/sched_clock.c
+++ b/kernel/time/sched_clock.c
@@ -14,11 +14,12 @@
14#include <linux/syscore_ops.h> 14#include <linux/syscore_ops.h>
15#include <linux/timer.h> 15#include <linux/timer.h>
16#include <linux/sched_clock.h> 16#include <linux/sched_clock.h>
17#include <linux/seqlock.h>
17 18
18struct clock_data { 19struct clock_data {
19 u64 epoch_ns; 20 u64 epoch_ns;
20 u32 epoch_cyc; 21 u32 epoch_cyc;
21 u32 epoch_cyc_copy; 22 seqcount_t seq;
22 unsigned long rate; 23 unsigned long rate;
23 u32 mult; 24 u32 mult;
24 u32 shift; 25 u32 shift;
@@ -54,23 +55,16 @@ static unsigned long long notrace sched_clock_32(void)
54 u64 epoch_ns; 55 u64 epoch_ns;
55 u32 epoch_cyc; 56 u32 epoch_cyc;
56 u32 cyc; 57 u32 cyc;
58 unsigned long seq;
57 59
58 if (cd.suspended) 60 if (cd.suspended)
59 return cd.epoch_ns; 61 return cd.epoch_ns;
60 62
61 /*
62 * Load the epoch_cyc and epoch_ns atomically. We do this by
63 * ensuring that we always write epoch_cyc, epoch_ns and
64 * epoch_cyc_copy in strict order, and read them in strict order.
65 * If epoch_cyc and epoch_cyc_copy are not equal, then we're in
66 * the middle of an update, and we should repeat the load.
67 */
68 do { 63 do {
64 seq = read_seqcount_begin(&cd.seq);
69 epoch_cyc = cd.epoch_cyc; 65 epoch_cyc = cd.epoch_cyc;
70 smp_rmb();
71 epoch_ns = cd.epoch_ns; 66 epoch_ns = cd.epoch_ns;
72 smp_rmb(); 67 } while (read_seqcount_retry(&cd.seq, seq));
73 } while (epoch_cyc != cd.epoch_cyc_copy);
74 68
75 cyc = read_sched_clock(); 69 cyc = read_sched_clock();
76 cyc = (cyc - epoch_cyc) & sched_clock_mask; 70 cyc = (cyc - epoch_cyc) & sched_clock_mask;
@@ -90,16 +84,12 @@ static void notrace update_sched_clock(void)
90 ns = cd.epoch_ns + 84 ns = cd.epoch_ns +
91 cyc_to_ns((cyc - cd.epoch_cyc) & sched_clock_mask, 85 cyc_to_ns((cyc - cd.epoch_cyc) & sched_clock_mask,
92 cd.mult, cd.shift); 86 cd.mult, cd.shift);
93 /* 87
94 * Write epoch_cyc and epoch_ns in a way that the update is
95 * detectable in cyc_to_fixed_sched_clock().
96 */
97 raw_local_irq_save(flags); 88 raw_local_irq_save(flags);
98 cd.epoch_cyc_copy = cyc; 89 write_seqcount_begin(&cd.seq);
99 smp_wmb();
100 cd.epoch_ns = ns; 90 cd.epoch_ns = ns;
101 smp_wmb();
102 cd.epoch_cyc = cyc; 91 cd.epoch_cyc = cyc;
92 write_seqcount_end(&cd.seq);
103 raw_local_irq_restore(flags); 93 raw_local_irq_restore(flags);
104} 94}
105 95
@@ -195,7 +185,6 @@ static int sched_clock_suspend(void)
195static void sched_clock_resume(void) 185static void sched_clock_resume(void)
196{ 186{
197 cd.epoch_cyc = read_sched_clock(); 187 cd.epoch_cyc = read_sched_clock();
198 cd.epoch_cyc_copy = cd.epoch_cyc;
199 cd.suspended = false; 188 cd.suspended = false;
200} 189}
201 190