diff options
author | Chris Metcalf <cmetcalf@tilera.com> | 2010-08-13 08:24:22 -0400 |
---|---|---|
committer | Chris Metcalf <cmetcalf@tilera.com> | 2010-08-13 08:24:22 -0400 |
commit | 749dc6f252b57d5cb9c1f4c1c4aafe4c92a28207 (patch) | |
tree | 1728f62b00935ecf9ac16bd9e3871f9d0e7ad75c /arch/tile | |
parent | bc63de7c5bcc44b0098d09931f69a19e93d8a7ba (diff) |
arch/tile: Use separate, better minsec values for clocksource and sched_clock.
We were using the same 5-sec minsec for the clocksource and sched_clock
that we were using for the clock_event_device. For the clock_event_device
that's exactly right since it has a short maximum countdown time.
But for sched_clock we want to avoid wraparound when converting from
ticks to nsec over a much longer window, so we force a shift of 10.
And for clocksource it seems dodgy to use a 5-sec minsec as well, so we
copy some other platforms and force a shift of 22.
Signed-off-by: Chris Metcalf <cmetcalf@tilera.com>
Diffstat (limited to 'arch/tile')
-rw-r--r-- | arch/tile/kernel/time.c | 33 |
1 files changed, 19 insertions, 14 deletions
diff --git a/arch/tile/kernel/time.c b/arch/tile/kernel/time.c index b9ab25a889b5..6bed820e1421 100644 --- a/arch/tile/kernel/time.c +++ b/arch/tile/kernel/time.c | |||
@@ -36,16 +36,6 @@ | |||
36 | /* How many cycles per second we are running at. */ | 36 | /* How many cycles per second we are running at. */ |
37 | static cycles_t cycles_per_sec __write_once; | 37 | static cycles_t cycles_per_sec __write_once; |
38 | 38 | ||
39 | /* | ||
40 | * We set up shift and multiply values with a minsec of five seconds, | ||
41 | * since our timer counter counts down 31 bits at a frequency of | ||
42 | * no less than 500 MHz. See @minsec for clocks_calc_mult_shift(). | ||
43 | * We could use a different value for the 64-bit free-running | ||
44 | * cycle counter, but we use the same one for consistency, and since | ||
45 | * we will be reasonably precise with this value anyway. | ||
46 | */ | ||
47 | #define TILE_MINSEC 5 | ||
48 | |||
49 | cycles_t get_clock_rate(void) | 39 | cycles_t get_clock_rate(void) |
50 | { | 40 | { |
51 | return cycles_per_sec; | 41 | return cycles_per_sec; |
@@ -68,6 +58,14 @@ cycles_t get_cycles(void) | |||
68 | } | 58 | } |
69 | #endif | 59 | #endif |
70 | 60 | ||
61 | /* | ||
62 | * We use a relatively small shift value so that sched_clock() | ||
63 | * won't wrap around very often. | ||
64 | */ | ||
65 | #define SCHED_CLOCK_SHIFT 10 | ||
66 | |||
67 | static unsigned long sched_clock_mult __write_once; | ||
68 | |||
71 | static cycles_t clocksource_get_cycles(struct clocksource *cs) | 69 | static cycles_t clocksource_get_cycles(struct clocksource *cs) |
72 | { | 70 | { |
73 | return get_cycles(); | 71 | return get_cycles(); |
@@ -78,6 +76,7 @@ static struct clocksource cycle_counter_cs = { | |||
78 | .rating = 300, | 76 | .rating = 300, |
79 | .read = clocksource_get_cycles, | 77 | .read = clocksource_get_cycles, |
80 | .mask = CLOCKSOURCE_MASK(64), | 78 | .mask = CLOCKSOURCE_MASK(64), |
79 | .shift = 22, /* typical value, e.g. x86 tsc uses this */ | ||
81 | .flags = CLOCK_SOURCE_IS_CONTINUOUS, | 80 | .flags = CLOCK_SOURCE_IS_CONTINUOUS, |
82 | }; | 81 | }; |
83 | 82 | ||
@@ -88,8 +87,10 @@ static struct clocksource cycle_counter_cs = { | |||
88 | void __init setup_clock(void) | 87 | void __init setup_clock(void) |
89 | { | 88 | { |
90 | cycles_per_sec = hv_sysconf(HV_SYSCONF_CPU_SPEED); | 89 | cycles_per_sec = hv_sysconf(HV_SYSCONF_CPU_SPEED); |
91 | clocksource_calc_mult_shift(&cycle_counter_cs, cycles_per_sec, | 90 | sched_clock_mult = |
92 | TILE_MINSEC); | 91 | clocksource_hz2mult(cycles_per_sec, SCHED_CLOCK_SHIFT); |
92 | cycle_counter_cs.mult = | ||
93 | clocksource_hz2mult(cycles_per_sec, cycle_counter_cs.shift); | ||
93 | } | 94 | } |
94 | 95 | ||
95 | void __init calibrate_delay(void) | 96 | void __init calibrate_delay(void) |
@@ -117,9 +118,14 @@ void __init time_init(void) | |||
117 | * counter, plus bit 31, which signifies that the counter has wrapped | 118 | * counter, plus bit 31, which signifies that the counter has wrapped |
118 | * from zero to (2**31) - 1. The INT_TILE_TIMER interrupt will be | 119 | * from zero to (2**31) - 1. The INT_TILE_TIMER interrupt will be |
119 | * raised as long as bit 31 is set. | 120 | * raised as long as bit 31 is set. |
121 | * | ||
122 | * The TILE_MINSEC value represents the largest range of real-time | ||
123 | * we can possibly cover with the timer, based on MAX_TICK combined | ||
124 | * with the slowest reasonable clock rate we might run at. | ||
120 | */ | 125 | */ |
121 | 126 | ||
122 | #define MAX_TICK 0x7fffffff /* we have 31 bits of countdown timer */ | 127 | #define MAX_TICK 0x7fffffff /* we have 31 bits of countdown timer */ |
128 | #define TILE_MINSEC 5 /* timer covers no more than 5 seconds */ | ||
123 | 129 | ||
124 | static int tile_timer_set_next_event(unsigned long ticks, | 130 | static int tile_timer_set_next_event(unsigned long ticks, |
125 | struct clock_event_device *evt) | 131 | struct clock_event_device *evt) |
@@ -211,8 +217,7 @@ void do_timer_interrupt(struct pt_regs *regs, int fault_num) | |||
211 | unsigned long long sched_clock(void) | 217 | unsigned long long sched_clock(void) |
212 | { | 218 | { |
213 | return clocksource_cyc2ns(get_cycles(), | 219 | return clocksource_cyc2ns(get_cycles(), |
214 | cycle_counter_cs.mult, | 220 | sched_clock_mult, SCHED_CLOCK_SHIFT); |
215 | cycle_counter_cs.shift); | ||
216 | } | 221 | } |
217 | 222 | ||
218 | int setup_profiling_timer(unsigned int multiplier) | 223 | int setup_profiling_timer(unsigned int multiplier) |