aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/time
diff options
context:
space:
mode:
authorDaniel Thompson <daniel.thompson@linaro.org>2015-03-26 15:23:22 -0400
committerIngo Molnar <mingo@kernel.org>2015-03-27 03:33:56 -0400
commit8710e914027e4f64058ebbf0501cc6db3cc8454f (patch)
treeaab1f5735303d0fae22a7208784e99f7a323a66a /kernel/time
parent2557d215a36d18c8bbaa35f65749349ac603360b (diff)
timers, sched/clock: Match scope of read and write seqcounts
Currently the scope of the raw_write_seqcount_begin/end() in sched_clock_register() far exceeds the scope of the read section in sched_clock(). This gives the impression of safety during cursory review but achieves little. Note that this is likely to be a latent issue at present because sched_clock_register() is typically called before we enable interrupts, however the issue does risk bugs being needlessly introduced as the code evolves. This patch fixes the problem by increasing the scope of the read locking performed by sched_clock() to cover all data modified by sched_clock_register. We also improve clarity by moving writes to struct clock_data that do not impact sched_clock() outside of the critical section. Signed-off-by: Daniel Thompson <daniel.thompson@linaro.org> [ Reworked it slightly to apply to tip/timers/core] Signed-off-by: John Stultz <john.stultz@linaro.org> Reviewed-by: Stephen Boyd <sboyd@codeaurora.org> Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Russell King <linux@arm.linux.org.uk> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Will Deacon <will.deacon@arm.com> Link: http://lkml.kernel.org/r/1427397806-20889-2-git-send-email-john.stultz@linaro.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/time')
-rw-r--r--kernel/time/sched_clock.c26
1 files changed, 11 insertions, 15 deletions
diff --git a/kernel/time/sched_clock.c b/kernel/time/sched_clock.c
index ca3bc5c7027c..1751e956add9 100644
--- a/kernel/time/sched_clock.c
+++ b/kernel/time/sched_clock.c
@@ -58,23 +58,21 @@ static inline u64 notrace cyc_to_ns(u64 cyc, u32 mult, u32 shift)
58 58
59unsigned long long notrace sched_clock(void) 59unsigned long long notrace sched_clock(void)
60{ 60{
61 u64 epoch_ns; 61 u64 cyc, res;
62 u64 epoch_cyc;
63 u64 cyc;
64 unsigned long seq; 62 unsigned long seq;
65 63
66 if (cd.suspended)
67 return cd.epoch_ns;
68
69 do { 64 do {
70 seq = raw_read_seqcount_begin(&cd.seq); 65 seq = raw_read_seqcount_begin(&cd.seq);
71 epoch_cyc = cd.epoch_cyc; 66
72 epoch_ns = cd.epoch_ns; 67 res = cd.epoch_ns;
68 if (!cd.suspended) {
69 cyc = read_sched_clock();
70 cyc = (cyc - cd.epoch_cyc) & sched_clock_mask;
71 res += cyc_to_ns(cyc, cd.mult, cd.shift);
72 }
73 } while (read_seqcount_retry(&cd.seq, seq)); 73 } while (read_seqcount_retry(&cd.seq, seq));
74 74
75 cyc = read_sched_clock(); 75 return res;
76 cyc = (cyc - epoch_cyc) & sched_clock_mask;
77 return epoch_ns + cyc_to_ns(cyc, cd.mult, cd.shift);
78} 76}
79 77
80/* 78/*
@@ -111,7 +109,6 @@ void __init sched_clock_register(u64 (*read)(void), int bits,
111{ 109{
112 u64 res, wrap, new_mask, new_epoch, cyc, ns; 110 u64 res, wrap, new_mask, new_epoch, cyc, ns;
113 u32 new_mult, new_shift; 111 u32 new_mult, new_shift;
114 ktime_t new_wrap_kt;
115 unsigned long r; 112 unsigned long r;
116 char r_unit; 113 char r_unit;
117 114
@@ -124,10 +121,11 @@ void __init sched_clock_register(u64 (*read)(void), int bits,
124 clocks_calc_mult_shift(&new_mult, &new_shift, rate, NSEC_PER_SEC, 3600); 121 clocks_calc_mult_shift(&new_mult, &new_shift, rate, NSEC_PER_SEC, 3600);
125 122
126 new_mask = CLOCKSOURCE_MASK(bits); 123 new_mask = CLOCKSOURCE_MASK(bits);
124 cd.rate = rate;
127 125
128 /* calculate how many nanosecs until we risk wrapping */ 126 /* calculate how many nanosecs until we risk wrapping */
129 wrap = clocks_calc_max_nsecs(new_mult, new_shift, 0, new_mask, NULL); 127 wrap = clocks_calc_max_nsecs(new_mult, new_shift, 0, new_mask, NULL);
130 new_wrap_kt = ns_to_ktime(wrap); 128 cd.wrap_kt = ns_to_ktime(wrap);
131 129
132 /* update epoch for new counter and update epoch_ns from old counter*/ 130 /* update epoch for new counter and update epoch_ns from old counter*/
133 new_epoch = read(); 131 new_epoch = read();
@@ -138,8 +136,6 @@ void __init sched_clock_register(u64 (*read)(void), int bits,
138 raw_write_seqcount_begin(&cd.seq); 136 raw_write_seqcount_begin(&cd.seq);
139 read_sched_clock = read; 137 read_sched_clock = read;
140 sched_clock_mask = new_mask; 138 sched_clock_mask = new_mask;
141 cd.rate = rate;
142 cd.wrap_kt = new_wrap_kt;
143 cd.mult = new_mult; 139 cd.mult = new_mult;
144 cd.shift = new_shift; 140 cd.shift = new_shift;
145 cd.epoch_cyc = new_epoch; 141 cd.epoch_cyc = new_epoch;