aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorGrant Grundler <grundler@gsyprf11.external.hp.com>2006-09-09 02:29:22 -0400
committerMatthew Wilcox <willy@parisc-linux.org>2006-10-04 08:48:28 -0400
commitbed583f76e1d5fbb5a6fdf27a0f7b2ae235f7e99 (patch)
treea5c6b964cb2379406b9f1c4efc04fa3c093c28e9
parent65ee8f0a7fc2f2267b983f1f0349acb8f19db6e6 (diff)
[PARISC] Rewrite timer_interrupt() and gettimeoffset() using "unsigned" math.
It's just a bit easier to follow and timer code is complex enough. So far, only tested on A500-5x (64-bit SMP), ie: gettimeoffset() code hasn't been tested at all. Signed-off-by: Grant Grundler <grundler@parisc-linux.org> Signed-off-by: Kyle McMartin <kyle@parisc-linux.org>
-rw-r--r--arch/parisc/kernel/time.c140
1 files changed, 96 insertions, 44 deletions
diff --git a/arch/parisc/kernel/time.c b/arch/parisc/kernel/time.c
index 47831c2cd093..fd425e1abe66 100644
--- a/arch/parisc/kernel/time.c
+++ b/arch/parisc/kernel/time.c
@@ -32,8 +32,8 @@
32 32
33#include <linux/timex.h> 33#include <linux/timex.h>
34 34
35static long clocktick __read_mostly; /* timer cycles per tick */ 35static unsigned long clocktick __read_mostly; /* timer cycles per tick */
36static long halftick __read_mostly; 36static unsigned long halftick __read_mostly;
37 37
38#ifdef CONFIG_SMP 38#ifdef CONFIG_SMP
39extern void smp_do_timer(struct pt_regs *regs); 39extern void smp_do_timer(struct pt_regs *regs);
@@ -41,34 +41,77 @@ extern void smp_do_timer(struct pt_regs *regs);
41 41
42irqreturn_t timer_interrupt(int irq, void *dev_id, struct pt_regs *regs) 42irqreturn_t timer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
43{ 43{
44 long now; 44 unsigned long now;
45 long next_tick; 45 unsigned long next_tick;
46 int nticks; 46 unsigned long cycles_elapsed;
47 unsigned long cycles_remainder;
48 unsigned long ticks_elapsed = 1; /* at least one elapsed */
47 int cpu = smp_processor_id(); 49 int cpu = smp_processor_id();
48 50
49 profile_tick(CPU_PROFILING, regs); 51 profile_tick(CPU_PROFILING, regs);
50 52
51 now = mfctl(16); 53 /* Initialize next_tick to the expected tick time. */
52 /* initialize next_tick to time at last clocktick */
53 next_tick = cpu_data[cpu].it_value; 54 next_tick = cpu_data[cpu].it_value;
54 55
55 /* since time passes between the interrupt and the mfctl() 56 /* Get current interval timer.
56 * above, it is never true that last_tick + clocktick == now. If we 57 * CR16 reads as 64 bits in CPU wide mode.
57 * never miss a clocktick, we could set next_tick = last_tick + clocktick 58 * CR16 reads as 32 bits in CPU narrow mode.
58 * but maybe we'll miss ticks, hence the loop.
59 *
60 * Variables are *signed*.
61 */ 59 */
60 now = mfctl(16);
62 61
63 nticks = 0; 62 cycles_elapsed = now - next_tick;
64 while((next_tick - now) < halftick) { 63
65 next_tick += clocktick; 64 /* Determine how much time elapsed. */
66 nticks++; 65 if (now < next_tick) {
66 /* Scenario 2: CR16 wrapped after clock tick.
67 * 1's complement will give us the "elapse cycles".
68 *
69 * This "cr16 wrapped" cruft is primarily for 32-bit kernels.
70 * So think "unsigned long is u32" when reading the code.
71 * And yes, of course 64-bit will someday wrap, but only
72 * every 198841 days on a 1GHz machine.
73 */
74 cycles_elapsed = ~cycles_elapsed; /* off by one cycle - don't care */
67 } 75 }
76
77 ticks_elapsed += cycles_elapsed / clocktick;
78 cycles_remainder = cycles_elapsed % clocktick;
79
80 /* Can we differentiate between "early CR16" (aka Scenario 1) and
81 * "long delay" (aka Scenario 3)? I don't think so.
82 *
83 * We expected timer_interrupt to be delivered at least a few hundred
84 * cycles after the IT fires. But it's arbitrary how much time passes
85 * before we call it "late". I've picked one second.
86 */
87 if (ticks_elapsed > HZ) {
88 /* Scenario 3: very long delay? bad in any case */
89 printk (KERN_CRIT "timer_interrupt(CPU %d): delayed! run ntpdate"
90 " ticks %ld cycles %lX rem %lX"
91 " next/now %lX/%lX\n",
92 cpu,
93 ticks_elapsed, cycles_elapsed, cycles_remainder,
94 next_tick, now );
95
96 ticks_elapsed = 1; /* hack to limit damage in loop below */
97 }
98
99
100 /* Determine when (in CR16 cycles) next IT interrupt will fire.
101 * We want IT to fire modulo clocktick even if we miss/skip some.
102 * But those interrupts don't in fact get delivered that regularly.
103 */
104 next_tick = now + (clocktick - cycles_remainder);
105
106 /* Program the IT when to deliver the next interrupt. */
107 /* Only bottom 32-bits of next_tick are written to cr16. */
68 mtctl(next_tick, 16); 108 mtctl(next_tick, 16);
69 cpu_data[cpu].it_value = next_tick; 109 cpu_data[cpu].it_value = next_tick;
70 110
71 while (nticks--) { 111 /* Now that we are done mucking with unreliable delivery of interrupts,
112 * go do system house keeping.
113 */
114 while (ticks_elapsed--) {
72#ifdef CONFIG_SMP 115#ifdef CONFIG_SMP
73 smp_do_timer(regs); 116 smp_do_timer(regs);
74#else 117#else
@@ -121,21 +164,41 @@ gettimeoffset (void)
121 * Once parisc-linux learns the cr16 difference between processors, 164 * Once parisc-linux learns the cr16 difference between processors,
122 * this could be made to work. 165 * this could be made to work.
123 */ 166 */
124 long last_tick; 167 unsigned long now;
125 long elapsed_cycles; 168 unsigned long prev_tick;
169 unsigned long next_tick;
170 unsigned long elapsed_cycles;
171 unsigned long usec;
126 172
127 /* it_value is the intended time of the next tick */ 173 next_tick = cpu_data[smp_processor_id()].it_value;
128 last_tick = cpu_data[smp_processor_id()].it_value; 174 now = mfctl(16); /* Read the hardware interval timer. */
129 175
130 /* Subtract one tick and account for possible difference between 176 prev_tick = next_tick - clocktick;
131 * when we expected the tick and when it actually arrived. 177
132 * (aka wall vs real) 178 /* Assume Scenario 1: "now" is later than prev_tick. */
133 */ 179 elapsed_cycles = now - prev_tick;
134 last_tick -= clocktick * (jiffies - wall_jiffies + 1); 180
135 elapsed_cycles = mfctl(16) - last_tick; 181 if (now < prev_tick) {
182 /* Scenario 2: CR16 wrapped!
183 * 1's complement is close enough.
184 */
185 elapsed_cycles = ~elapsed_cycles;
186 }
136 187
137 /* the precision of this math could be improved */ 188 if (elapsed_cycles > (HZ * clocktick)) {
138 return elapsed_cycles / (PAGE0->mem_10msec / 10000); 189 /* Scenario 3: clock ticks are missing. */
190 printk (KERN_CRIT "gettimeoffset(CPU %d): missing ticks!"
191 "cycles %lX prev/now/next %lX/%lX/%lX clock %lX\n",
192 cpuid,
193 elapsed_cycles, prev_tick, now, next_tick, clocktick);
194 }
195
196 /* FIXME: Can we improve the precision? Not with PAGE0. */
197 usec = (elapsed_cycles * 10000) / PAGE0->mem_10msec;
198
199 /* add in "lost" jiffies */
200 usec += clocktick * (jiffies - wall_jiffies);
201 return usec;
139#else 202#else
140 return 0; 203 return 0;
141#endif 204#endif
@@ -146,6 +209,7 @@ do_gettimeofday (struct timeval *tv)
146{ 209{
147 unsigned long flags, seq, usec, sec; 210 unsigned long flags, seq, usec, sec;
148 211
212 /* Hold xtime_lock and adjust timeval. */
149 do { 213 do {
150 seq = read_seqbegin_irqsave(&xtime_lock, flags); 214 seq = read_seqbegin_irqsave(&xtime_lock, flags);
151 usec = gettimeoffset(); 215 usec = gettimeoffset();
@@ -153,25 +217,13 @@ do_gettimeofday (struct timeval *tv)
153 usec += (xtime.tv_nsec / 1000); 217 usec += (xtime.tv_nsec / 1000);
154 } while (read_seqretry_irqrestore(&xtime_lock, seq, flags)); 218 } while (read_seqretry_irqrestore(&xtime_lock, seq, flags));
155 219
156 if (unlikely(usec > LONG_MAX)) { 220 /* Move adjusted usec's into sec's. */
157 /* This can happen if the gettimeoffset adjustment is
158 * negative and xtime.tv_nsec is smaller than the
159 * adjustment */
160 printk(KERN_ERR "do_gettimeofday() spurious xtime.tv_nsec of %ld\n", usec);
161 usec += USEC_PER_SEC;
162 --sec;
163 /* This should never happen, it means the negative
164 * time adjustment was more than a second, so there's
165 * something seriously wrong */
166 BUG_ON(usec > LONG_MAX);
167 }
168
169
170 while (usec >= USEC_PER_SEC) { 221 while (usec >= USEC_PER_SEC) {
171 usec -= USEC_PER_SEC; 222 usec -= USEC_PER_SEC;
172 ++sec; 223 ++sec;
173 } 224 }
174 225
226 /* Return adjusted result. */
175 tv->tv_sec = sec; 227 tv->tv_sec = sec;
176 tv->tv_usec = usec; 228 tv->tv_usec = usec;
177} 229}