aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/linux/clocksource.h5
-rw-r--r--kernel/time/clocksource.c28
-rw-r--r--kernel/time/sched_clock.c2
3 files changed, 20 insertions, 15 deletions
diff --git a/include/linux/clocksource.h b/include/linux/clocksource.h
index 9c78d15d33e4..16d048cadebb 100644
--- a/include/linux/clocksource.h
+++ b/include/linux/clocksource.h
@@ -56,6 +56,7 @@ struct module;
56 * @shift: cycle to nanosecond divisor (power of two) 56 * @shift: cycle to nanosecond divisor (power of two)
57 * @max_idle_ns: max idle time permitted by the clocksource (nsecs) 57 * @max_idle_ns: max idle time permitted by the clocksource (nsecs)
58 * @maxadj: maximum adjustment value to mult (~11%) 58 * @maxadj: maximum adjustment value to mult (~11%)
59 * @max_cycles: maximum safe cycle value which won't overflow on multiplication
59 * @flags: flags describing special properties 60 * @flags: flags describing special properties
60 * @archdata: arch-specific data 61 * @archdata: arch-specific data
61 * @suspend: suspend function for the clocksource, if necessary 62 * @suspend: suspend function for the clocksource, if necessary
@@ -76,7 +77,7 @@ struct clocksource {
76#ifdef CONFIG_ARCH_CLOCKSOURCE_DATA 77#ifdef CONFIG_ARCH_CLOCKSOURCE_DATA
77 struct arch_clocksource_data archdata; 78 struct arch_clocksource_data archdata;
78#endif 79#endif
79 80 u64 max_cycles;
80 const char *name; 81 const char *name;
81 struct list_head list; 82 struct list_head list;
82 int rating; 83 int rating;
@@ -189,7 +190,7 @@ extern struct clocksource * __init clocksource_default_clock(void);
189extern void clocksource_mark_unstable(struct clocksource *cs); 190extern void clocksource_mark_unstable(struct clocksource *cs);
190 191
191extern u64 192extern u64
192clocks_calc_max_nsecs(u32 mult, u32 shift, u32 maxadj, u64 mask); 193clocks_calc_max_nsecs(u32 mult, u32 shift, u32 maxadj, u64 mask, u64 *max_cycles);
193extern void 194extern void
194clocks_calc_mult_shift(u32 *mult, u32 *shift, u32 from, u32 to, u32 minsec); 195clocks_calc_mult_shift(u32 *mult, u32 *shift, u32 from, u32 to, u32 minsec);
195 196
diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c
index ace95763b3a6..fc2a9de43ca1 100644
--- a/kernel/time/clocksource.c
+++ b/kernel/time/clocksource.c
@@ -469,11 +469,13 @@ static u32 clocksource_max_adjustment(struct clocksource *cs)
469 * @shift: cycle to nanosecond divisor (power of two) 469 * @shift: cycle to nanosecond divisor (power of two)
470 * @maxadj: maximum adjustment value to mult (~11%) 470 * @maxadj: maximum adjustment value to mult (~11%)
471 * @mask: bitmask for two's complement subtraction of non 64 bit counters 471 * @mask: bitmask for two's complement subtraction of non 64 bit counters
472 * @max_cyc: maximum cycle value before potential overflow (does not include
473 * any safety margin)
472 * 474 *
473 * NOTE: This function includes a safety margin of 50%, so that bad clock values 475 * NOTE: This function includes a safety margin of 50%, so that bad clock values
474 * can be detected. 476 * can be detected.
475 */ 477 */
476u64 clocks_calc_max_nsecs(u32 mult, u32 shift, u32 maxadj, u64 mask) 478u64 clocks_calc_max_nsecs(u32 mult, u32 shift, u32 maxadj, u64 mask, u64 *max_cyc)
477{ 479{
478 u64 max_nsecs, max_cycles; 480 u64 max_nsecs, max_cycles;
479 481
@@ -493,6 +495,10 @@ u64 clocks_calc_max_nsecs(u32 mult, u32 shift, u32 maxadj, u64 mask)
493 max_cycles = min(max_cycles, mask); 495 max_cycles = min(max_cycles, mask);
494 max_nsecs = clocksource_cyc2ns(max_cycles, mult - maxadj, shift); 496 max_nsecs = clocksource_cyc2ns(max_cycles, mult - maxadj, shift);
495 497
498 /* return the max_cycles value as well if requested */
499 if (max_cyc)
500 *max_cyc = max_cycles;
501
496 /* Return 50% of the actual maximum, so we can detect bad values */ 502 /* Return 50% of the actual maximum, so we can detect bad values */
497 max_nsecs >>= 1; 503 max_nsecs >>= 1;
498 504
@@ -500,17 +506,15 @@ u64 clocks_calc_max_nsecs(u32 mult, u32 shift, u32 maxadj, u64 mask)
500} 506}
501 507
502/** 508/**
503 * clocksource_max_deferment - Returns max time the clocksource should be deferred 509 * clocksource_update_max_deferment - Updates the clocksource max_idle_ns & max_cycles
504 * @cs: Pointer to clocksource 510 * @cs: Pointer to clocksource to be updated
505 * 511 *
506 */ 512 */
507static u64 clocksource_max_deferment(struct clocksource *cs) 513static inline void clocksource_update_max_deferment(struct clocksource *cs)
508{ 514{
509 u64 max_nsecs; 515 cs->max_idle_ns = clocks_calc_max_nsecs(cs->mult, cs->shift,
510 516 cs->maxadj, cs->mask,
511 max_nsecs = clocks_calc_max_nsecs(cs->mult, cs->shift, cs->maxadj, 517 &cs->max_cycles);
512 cs->mask);
513 return max_nsecs;
514} 518}
515 519
516#ifndef CONFIG_ARCH_USES_GETTIMEOFFSET 520#ifndef CONFIG_ARCH_USES_GETTIMEOFFSET
@@ -684,7 +688,7 @@ void __clocksource_updatefreq_scale(struct clocksource *cs, u32 scale, u32 freq)
684 cs->maxadj = clocksource_max_adjustment(cs); 688 cs->maxadj = clocksource_max_adjustment(cs);
685 } 689 }
686 690
687 cs->max_idle_ns = clocksource_max_deferment(cs); 691 clocksource_update_max_deferment(cs);
688} 692}
689EXPORT_SYMBOL_GPL(__clocksource_updatefreq_scale); 693EXPORT_SYMBOL_GPL(__clocksource_updatefreq_scale);
690 694
@@ -730,8 +734,8 @@ int clocksource_register(struct clocksource *cs)
730 "Clocksource %s might overflow on 11%% adjustment\n", 734 "Clocksource %s might overflow on 11%% adjustment\n",
731 cs->name); 735 cs->name);
732 736
733 /* calculate max idle time permitted for this clocksource */ 737 /* Update max idle time permitted for this clocksource */
734 cs->max_idle_ns = clocksource_max_deferment(cs); 738 clocksource_update_max_deferment(cs);
735 739
736 mutex_lock(&clocksource_mutex); 740 mutex_lock(&clocksource_mutex);
737 clocksource_enqueue(cs); 741 clocksource_enqueue(cs);
diff --git a/kernel/time/sched_clock.c b/kernel/time/sched_clock.c
index 3b8ae45020c1..ca3bc5c7027c 100644
--- a/kernel/time/sched_clock.c
+++ b/kernel/time/sched_clock.c
@@ -126,7 +126,7 @@ void __init sched_clock_register(u64 (*read)(void), int bits,
126 new_mask = CLOCKSOURCE_MASK(bits); 126 new_mask = CLOCKSOURCE_MASK(bits);
127 127
128 /* calculate how many nanosecs until we risk wrapping */ 128 /* calculate how many nanosecs until we risk wrapping */
129 wrap = clocks_calc_max_nsecs(new_mult, new_shift, 0, new_mask); 129 wrap = clocks_calc_max_nsecs(new_mult, new_shift, 0, new_mask, NULL);
130 new_wrap_kt = ns_to_ktime(wrap); 130 new_wrap_kt = ns_to_ktime(wrap);
131 131
132 /* update epoch for new counter and update epoch_ns from old counter*/ 132 /* update epoch for new counter and update epoch_ns from old counter*/