aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorJohn Stultz <john.stultz@linaro.org>2015-03-12 00:16:33 -0400
committerIngo Molnar <mingo@kernel.org>2015-03-13 03:07:04 -0400
commita558cd021d83b65c47ee5b9bec1fcfe5298a769f (patch)
treec18d435983b2de9480aaa7c253a0e21a3fb14976 /kernel
parent3c17ad19f0697ffe5ef7438cdafc2d2b7757d8a5 (diff)
timekeeping: Add checks to cap clocksource reads to the 'max_cycles' value
When calculating the current delta since the last tick, we currently have no hard protections to prevent a multiplication overflow from occuring. This patch introduces infrastructure to allow a cap that limits the clocksource read delta value to the 'max_cycles' value, which is where an overflow would occur. Since this is in the hotpath, it adds the extra checking under CONFIG_DEBUG_TIMEKEEPING=y. There was some concern that capping time like this could cause problems as we may stop expiring timers, which could go circular if the timer that triggers time accumulation were mis-scheduled too far in the future, which would cause time to stop. However, since the mult overflow would result in a smaller time value, we would effectively have the same problem there. Signed-off-by: John Stultz <john.stultz@linaro.org> Cc: Dave Jones <davej@codemonkey.org.uk> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Prarit Bhargava <prarit@redhat.com> Cc: Richard Cochran <richardcochran@gmail.com> Cc: Stephen Boyd <sboyd@codeaurora.org> Cc: Thomas Gleixner <tglx@linutronix.de> Link: http://lkml.kernel.org/r/1426133800-29329-6-git-send-email-john.stultz@linaro.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/time/timekeeping.c49
1 files changed, 35 insertions, 14 deletions
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
index acf049144cf6..657414cf2e46 100644
--- a/kernel/time/timekeeping.c
+++ b/kernel/time/timekeeping.c
@@ -126,9 +126,9 @@ static void timekeeping_check_update(struct timekeeper *tk, cycle_t offset)
126 const char *name = tk->tkr.clock->name; 126 const char *name = tk->tkr.clock->name;
127 127
128 if (offset > max_cycles) { 128 if (offset > max_cycles) {
129 printk_deferred("WARNING: timekeeping: Cycle offset (%lld) is larger than allowed by the '%s' clock's max_cycles value (%lld): time overflow\n", 129 printk_deferred("WARNING: timekeeping: Cycle offset (%lld) is larger than allowed by the '%s' clock's max_cycles value (%lld): time overflow danger\n",
130 offset, name, max_cycles); 130 offset, name, max_cycles);
131 printk_deferred(" timekeeping: Your kernel is sick, but tries to cope\n"); 131 printk_deferred(" timekeeping: Your kernel is sick, but tries to cope by capping time updates\n");
132 } else { 132 } else {
133 if (offset > (max_cycles >> 1)) { 133 if (offset > (max_cycles >> 1)) {
134 printk_deferred("INFO: timekeeping: Cycle offset (%lld) is larger than the the '%s' clock's 50%% safety margin (%lld)\n", 134 printk_deferred("INFO: timekeeping: Cycle offset (%lld) is larger than the the '%s' clock's 50%% safety margin (%lld)\n",
@@ -137,10 +137,39 @@ static void timekeeping_check_update(struct timekeeper *tk, cycle_t offset)
137 } 137 }
138 } 138 }
139} 139}
140
141static inline cycle_t timekeeping_get_delta(struct tk_read_base *tkr)
142{
143 cycle_t cycle_now, delta;
144
145 /* read clocksource */
146 cycle_now = tkr->read(tkr->clock);
147
148 /* calculate the delta since the last update_wall_time */
149 delta = clocksource_delta(cycle_now, tkr->cycle_last, tkr->mask);
150
151 /* Cap delta value to the max_cycles values to avoid mult overflows */
152 if (unlikely(delta > tkr->clock->max_cycles))
153 delta = tkr->clock->max_cycles;
154
155 return delta;
156}
140#else 157#else
141static inline void timekeeping_check_update(struct timekeeper *tk, cycle_t offset) 158static inline void timekeeping_check_update(struct timekeeper *tk, cycle_t offset)
142{ 159{
143} 160}
161static inline cycle_t timekeeping_get_delta(struct tk_read_base *tkr)
162{
163 cycle_t cycle_now, delta;
164
165 /* read clocksource */
166 cycle_now = tkr->read(tkr->clock);
167
168 /* calculate the delta since the last update_wall_time */
169 delta = clocksource_delta(cycle_now, tkr->cycle_last, tkr->mask);
170
171 return delta;
172}
144#endif 173#endif
145 174
146/** 175/**
@@ -218,14 +247,10 @@ static inline u32 arch_gettimeoffset(void) { return 0; }
218 247
219static inline s64 timekeeping_get_ns(struct tk_read_base *tkr) 248static inline s64 timekeeping_get_ns(struct tk_read_base *tkr)
220{ 249{
221 cycle_t cycle_now, delta; 250 cycle_t delta;
222 s64 nsec; 251 s64 nsec;
223 252
224 /* read clocksource: */ 253 delta = timekeeping_get_delta(tkr);
225 cycle_now = tkr->read(tkr->clock);
226
227 /* calculate the delta since the last update_wall_time: */
228 delta = clocksource_delta(cycle_now, tkr->cycle_last, tkr->mask);
229 254
230 nsec = delta * tkr->mult + tkr->xtime_nsec; 255 nsec = delta * tkr->mult + tkr->xtime_nsec;
231 nsec >>= tkr->shift; 256 nsec >>= tkr->shift;
@@ -237,14 +262,10 @@ static inline s64 timekeeping_get_ns(struct tk_read_base *tkr)
237static inline s64 timekeeping_get_ns_raw(struct timekeeper *tk) 262static inline s64 timekeeping_get_ns_raw(struct timekeeper *tk)
238{ 263{
239 struct clocksource *clock = tk->tkr.clock; 264 struct clocksource *clock = tk->tkr.clock;
240 cycle_t cycle_now, delta; 265 cycle_t delta;
241 s64 nsec; 266 s64 nsec;
242 267
243 /* read clocksource: */ 268 delta = timekeeping_get_delta(&tk->tkr);
244 cycle_now = tk->tkr.read(clock);
245
246 /* calculate the delta since the last update_wall_time: */
247 delta = clocksource_delta(cycle_now, tk->tkr.cycle_last, tk->tkr.mask);
248 269
249 /* convert delta to nanoseconds. */ 270 /* convert delta to nanoseconds. */
250 nsec = clocksource_cyc2ns(delta, clock->mult, clock->shift); 271 nsec = clocksource_cyc2ns(delta, clock->mult, clock->shift);