aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorStephen Boyd <sboyd@codeaurora.org>2014-02-17 13:45:36 -0500
committerThomas Gleixner <tglx@linutronix.de>2014-02-19 11:07:22 -0500
commit5ae8aabeaec3fe69c4fb21cbe5b17b72b35b5892 (patch)
tree5cd5d476049486e228d60e59de46811f4161f1dd /kernel
parent6d0abeca3242a88cab8232e4acd7e2bf088f3bc2 (diff)
sched_clock: Prevent callers from seeing half-updated data
The generic sched_clock registration function was previously done lockless, due to the fact that it was expected to be called only once. However, now there are systems that may register multiple sched_clock sources, for which the lack of locking has casued problems: If two sched_clock sources are registered we may end up in a situation where a call to sched_clock() may be accessing the epoch cycle count for the old counter and the cycle count for the new counter. This can lead to confusing results where sched_clock() values jump and then are reset to 0 (due to the way the registration function forces the epoch_ns to be 0). Fix this by reorganizing the registration function to hold the seqlock for as short a time as possible while we update the clock_data structure for a new counter. We also put any accumulated time into epoch_ns instead of resetting the time to 0 so that the clock doesn't reset after each successful registration. [jstultz: Added extra context to the commit message] Reported-by: Will Deacon <will.deacon@arm.com> Signed-off-by: Stephen Boyd <sboyd@codeaurora.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Ingo Molnar <mingo@kernel.org> Cc: Will Deacon <will.deacon@arm.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Josh Cartwright <joshc@codeaurora.org> Link: http://lkml.kernel.org/r/1392662736-7803-2-git-send-email-john.stultz@linaro.org Signed-off-by: John Stultz <john.stultz@linaro.org> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/time/sched_clock.c46
1 files changed, 29 insertions, 17 deletions
diff --git a/kernel/time/sched_clock.c b/kernel/time/sched_clock.c
index 0abb36464281..4d23dc4d8139 100644
--- a/kernel/time/sched_clock.c
+++ b/kernel/time/sched_clock.c
@@ -116,20 +116,42 @@ static enum hrtimer_restart sched_clock_poll(struct hrtimer *hrt)
116void __init sched_clock_register(u64 (*read)(void), int bits, 116void __init sched_clock_register(u64 (*read)(void), int bits,
117 unsigned long rate) 117 unsigned long rate)
118{ 118{
119 u64 res, wrap, new_mask, new_epoch, cyc, ns;
120 u32 new_mult, new_shift;
121 ktime_t new_wrap_kt;
119 unsigned long r; 122 unsigned long r;
120 u64 res, wrap;
121 char r_unit; 123 char r_unit;
122 124
123 if (cd.rate > rate) 125 if (cd.rate > rate)
124 return; 126 return;
125 127
126 WARN_ON(!irqs_disabled()); 128 WARN_ON(!irqs_disabled());
127 read_sched_clock = read;
128 sched_clock_mask = CLOCKSOURCE_MASK(bits);
129 cd.rate = rate;
130 129
131 /* calculate the mult/shift to convert counter ticks to ns. */ 130 /* calculate the mult/shift to convert counter ticks to ns. */
132 clocks_calc_mult_shift(&cd.mult, &cd.shift, rate, NSEC_PER_SEC, 3600); 131 clocks_calc_mult_shift(&new_mult, &new_shift, rate, NSEC_PER_SEC, 3600);
132
133 new_mask = CLOCKSOURCE_MASK(bits);
134
135 /* calculate how many ns until we wrap */
136 wrap = clocks_calc_max_nsecs(new_mult, new_shift, 0, new_mask);
137 new_wrap_kt = ns_to_ktime(wrap - (wrap >> 3));
138
139 /* update epoch for new counter and update epoch_ns from old counter*/
140 new_epoch = read();
141 cyc = read_sched_clock();
142 ns = cd.epoch_ns + cyc_to_ns((cyc - cd.epoch_cyc) & sched_clock_mask,
143 cd.mult, cd.shift);
144
145 raw_write_seqcount_begin(&cd.seq);
146 read_sched_clock = read;
147 sched_clock_mask = new_mask;
148 cd.rate = rate;
149 cd.wrap_kt = new_wrap_kt;
150 cd.mult = new_mult;
151 cd.shift = new_shift;
152 cd.epoch_cyc = new_epoch;
153 cd.epoch_ns = ns;
154 raw_write_seqcount_end(&cd.seq);
133 155
134 r = rate; 156 r = rate;
135 if (r >= 4000000) { 157 if (r >= 4000000) {
@@ -141,22 +163,12 @@ void __init sched_clock_register(u64 (*read)(void), int bits,
141 } else 163 } else
142 r_unit = ' '; 164 r_unit = ' ';
143 165
144 /* calculate how many ns until we wrap */
145 wrap = clocks_calc_max_nsecs(cd.mult, cd.shift, 0, sched_clock_mask);
146 cd.wrap_kt = ns_to_ktime(wrap - (wrap >> 3));
147
148 /* calculate the ns resolution of this counter */ 166 /* calculate the ns resolution of this counter */
149 res = cyc_to_ns(1ULL, cd.mult, cd.shift); 167 res = cyc_to_ns(1ULL, new_mult, new_shift);
168
150 pr_info("sched_clock: %u bits at %lu%cHz, resolution %lluns, wraps every %lluns\n", 169 pr_info("sched_clock: %u bits at %lu%cHz, resolution %lluns, wraps every %lluns\n",
151 bits, r, r_unit, res, wrap); 170 bits, r, r_unit, res, wrap);
152 171
153 update_sched_clock();
154
155 /*
156 * Ensure that sched_clock() starts off at 0ns
157 */
158 cd.epoch_ns = 0;
159
160 /* Enable IRQ time accounting if we have a fast enough sched_clock */ 172 /* Enable IRQ time accounting if we have a fast enough sched_clock */
161 if (irqtime > 0 || (irqtime == -1 && rate >= 1000000)) 173 if (irqtime > 0 || (irqtime == -1 && rate >= 1000000))
162 enable_sched_clock_irqtime(); 174 enable_sched_clock_irqtime();