aboutsummaryrefslogtreecommitdiffstats
path: root/arch/parisc/kernel/time.c
diff options
context:
space:
mode:
authorGrant Grundler <grundler@gsyprf11.external.hp.com>2006-09-04 16:56:11 -0400
committerMatthew Wilcox <willy@parisc-linux.org>2006-10-04 08:48:38 -0400
commit6b799d9222fef265802b0b6dcc4fb982cc8f55ca (patch)
treef6d276b49949f2bbf7de1213d5424eb13edf458e /arch/parisc/kernel/time.c
parentbed583f76e1d5fbb5a6fdf27a0f7b2ae235f7e99 (diff)
[PARISC] remove halftick and copy clocktick to local var (gcc can optimize usage)
Signed-off-by: Grant Grundler <grundler@parisc-linux.org> Signed-off-by: Kyle McMartin <kyle@parisc-linux.org>
Diffstat (limited to 'arch/parisc/kernel/time.c')
-rw-r--r--arch/parisc/kernel/time.c46
1 files changed, 31 insertions, 15 deletions
diff --git a/arch/parisc/kernel/time.c b/arch/parisc/kernel/time.c
index fd425e1abe66..c43e847a4b8f 100644
--- a/arch/parisc/kernel/time.c
+++ b/arch/parisc/kernel/time.c
@@ -33,7 +33,6 @@
33#include <linux/timex.h> 33#include <linux/timex.h>
34 34
35static unsigned long clocktick __read_mostly; /* timer cycles per tick */ 35static unsigned long clocktick __read_mostly; /* timer cycles per tick */
36static unsigned long halftick __read_mostly;
37 36
38#ifdef CONFIG_SMP 37#ifdef CONFIG_SMP
39extern void smp_do_timer(struct pt_regs *regs); 38extern void smp_do_timer(struct pt_regs *regs);
@@ -48,6 +47,9 @@ irqreturn_t timer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
48 unsigned long ticks_elapsed = 1; /* at least one elapsed */ 47 unsigned long ticks_elapsed = 1; /* at least one elapsed */
49 int cpu = smp_processor_id(); 48 int cpu = smp_processor_id();
50 49
50 /* gcc can optimize for "read-only" case with a local clocktick */
51 unsigned long local_ct = clocktick;
52
51 profile_tick(CPU_PROFILING, regs); 53 profile_tick(CPU_PROFILING, regs);
52 54
53 /* Initialize next_tick to the expected tick time. */ 55 /* Initialize next_tick to the expected tick time. */
@@ -74,8 +76,16 @@ irqreturn_t timer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
74 cycles_elapsed = ~cycles_elapsed; /* off by one cycle - don't care */ 76 cycles_elapsed = ~cycles_elapsed; /* off by one cycle - don't care */
75 } 77 }
76 78
77 ticks_elapsed += cycles_elapsed / clocktick; 79 if (likely(cycles_elapsed < local_ct)) {
78 cycles_remainder = cycles_elapsed % clocktick; 80 /* ticks_elapsed = 1 -- We already assumed one tick elapsed. */
81 cycles_remainder = cycles_elapsed;
82 } else {
83 /* more than one tick elapsed. Do "expensive" math. */
84 ticks_elapsed += cycles_elapsed / local_ct;
85
86 /* Faster version of "remainder = elapsed % clocktick" */
87 cycles_remainder = cycles_elapsed - (ticks_elapsed * local_ct);
88 }
79 89
80 /* Can we differentiate between "early CR16" (aka Scenario 1) and 90 /* Can we differentiate between "early CR16" (aka Scenario 1) and
81 * "long delay" (aka Scenario 3)? I don't think so. 91 * "long delay" (aka Scenario 3)? I don't think so.
@@ -86,14 +96,12 @@ irqreturn_t timer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
86 */ 96 */
87 if (ticks_elapsed > HZ) { 97 if (ticks_elapsed > HZ) {
88 /* Scenario 3: very long delay? bad in any case */ 98 /* Scenario 3: very long delay? bad in any case */
89 printk (KERN_CRIT "timer_interrupt(CPU %d): delayed! run ntpdate" 99 printk (KERN_CRIT "timer_interrupt(CPU %d): delayed!"
90 " ticks %ld cycles %lX rem %lX" 100 " ticks %ld cycles %lX rem %lX"
91 " next/now %lX/%lX\n", 101 " next/now %lX/%lX\n",
92 cpu, 102 cpu,
93 ticks_elapsed, cycles_elapsed, cycles_remainder, 103 ticks_elapsed, cycles_elapsed, cycles_remainder,
94 next_tick, now ); 104 next_tick, now );
95
96 ticks_elapsed = 1; /* hack to limit damage in loop below */
97 } 105 }
98 106
99 107
@@ -101,12 +109,19 @@ irqreturn_t timer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
101 * We want IT to fire modulo clocktick even if we miss/skip some. 109 * We want IT to fire modulo clocktick even if we miss/skip some.
102 * But those interrupts don't in fact get delivered that regularly. 110 * But those interrupts don't in fact get delivered that regularly.
103 */ 111 */
104 next_tick = now + (clocktick - cycles_remainder); 112 next_tick = now + (local_ct - cycles_remainder);
113
114 /* Skip one clocktick on purpose if we are likely to miss next_tick.
115 * We'll catch what we missed on the tick after that.
116 * We should never need 0x1000 cycles to read CR16, calc the
117 * new next_tick, then write CR16 back. */
118 if (!((local_ct - cycles_remainder) >> 12))
119 next_tick += local_ct;
105 120
106 /* Program the IT when to deliver the next interrupt. */ 121 /* Program the IT when to deliver the next interrupt. */
107 /* Only bottom 32-bits of next_tick are written to cr16. */ 122 /* Only bottom 32-bits of next_tick are written to cr16. */
108 mtctl(next_tick, 16);
109 cpu_data[cpu].it_value = next_tick; 123 cpu_data[cpu].it_value = next_tick;
124 mtctl(next_tick, 16);
110 125
111 /* Now that we are done mucking with unreliable delivery of interrupts, 126 /* Now that we are done mucking with unreliable delivery of interrupts,
112 * go do system house keeping. 127 * go do system house keeping.
@@ -169,35 +184,37 @@ gettimeoffset (void)
169 unsigned long next_tick; 184 unsigned long next_tick;
170 unsigned long elapsed_cycles; 185 unsigned long elapsed_cycles;
171 unsigned long usec; 186 unsigned long usec;
187 unsigned long cpuid = smp_processor_id();
188 unsigned long local_ct = clocktick;
172 189
173 next_tick = cpu_data[smp_processor_id()].it_value; 190 next_tick = cpu_data[cpuid].it_value;
174 now = mfctl(16); /* Read the hardware interval timer. */ 191 now = mfctl(16); /* Read the hardware interval timer. */
175 192
176 prev_tick = next_tick - clocktick; 193 prev_tick = next_tick - local_ct;
177 194
178 /* Assume Scenario 1: "now" is later than prev_tick. */ 195 /* Assume Scenario 1: "now" is later than prev_tick. */
179 elapsed_cycles = now - prev_tick; 196 elapsed_cycles = now - prev_tick;
180 197
181 if (now < prev_tick) { 198 if (now < prev_tick) {
182 /* Scenario 2: CR16 wrapped! 199 /* Scenario 2: CR16 wrapped!
183 * 1's complement is close enough. 200 * ones complement is off-by-one. Don't care.
184 */ 201 */
185 elapsed_cycles = ~elapsed_cycles; 202 elapsed_cycles = ~elapsed_cycles;
186 } 203 }
187 204
188 if (elapsed_cycles > (HZ * clocktick)) { 205 if (elapsed_cycles > (HZ * local_ct)) {
189 /* Scenario 3: clock ticks are missing. */ 206 /* Scenario 3: clock ticks are missing. */
190 printk (KERN_CRIT "gettimeoffset(CPU %d): missing ticks!" 207 printk (KERN_CRIT "gettimeoffset(CPU %d): missing ticks!"
191 "cycles %lX prev/now/next %lX/%lX/%lX clock %lX\n", 208 "cycles %lX prev/now/next %lX/%lX/%lX clock %lX\n",
192 cpuid, 209 cpuid,
193 elapsed_cycles, prev_tick, now, next_tick, clocktick); 210 elapsed_cycles, prev_tick, now, next_tick, local_ct);
194 } 211 }
195 212
196 /* FIXME: Can we improve the precision? Not with PAGE0. */ 213 /* FIXME: Can we improve the precision? Not with PAGE0. */
197 usec = (elapsed_cycles * 10000) / PAGE0->mem_10msec; 214 usec = (elapsed_cycles * 10000) / PAGE0->mem_10msec;
198 215
199 /* add in "lost" jiffies */ 216 /* add in "lost" jiffies */
200 usec += clocktick * (jiffies - wall_jiffies); 217 usec += local_ct * (jiffies - wall_jiffies);
201 return usec; 218 return usec;
202#else 219#else
203 return 0; 220 return 0;
@@ -290,7 +307,6 @@ void __init time_init(void)
290 static struct pdc_tod tod_data; 307 static struct pdc_tod tod_data;
291 308
292 clocktick = (100 * PAGE0->mem_10msec) / HZ; 309 clocktick = (100 * PAGE0->mem_10msec) / HZ;
293 halftick = clocktick / 2;
294 310
295 start_cpu_itimer(); /* get CPU 0 started */ 311 start_cpu_itimer(); /* get CPU 0 started */
296 312