aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/parisc/kernel/time.c127
1 files changed, 65 insertions, 62 deletions
diff --git a/arch/parisc/kernel/time.c b/arch/parisc/kernel/time.c
index c43e847a4b8f..9d642d820fe9 100644
--- a/arch/parisc/kernel/time.c
+++ b/arch/parisc/kernel/time.c
@@ -43,12 +43,11 @@ irqreturn_t timer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
43 unsigned long now; 43 unsigned long now;
44 unsigned long next_tick; 44 unsigned long next_tick;
45 unsigned long cycles_elapsed; 45 unsigned long cycles_elapsed;
46 unsigned long cycles_remainder; 46 unsigned long cycles_remainder;
47 unsigned long ticks_elapsed = 1; /* at least one elapsed */ 47 unsigned int cpu = smp_processor_id();
48 int cpu = smp_processor_id();
49 48
50 /* gcc can optimize for "read-only" case with a local clocktick */ 49 /* gcc can optimize for "read-only" case with a local clocktick */
51 unsigned long local_ct = clocktick; 50 unsigned long cpt = clocktick;
52 51
53 profile_tick(CPU_PROFILING, regs); 52 profile_tick(CPU_PROFILING, regs);
54 53
@@ -63,28 +62,16 @@ irqreturn_t timer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
63 62
64 cycles_elapsed = now - next_tick; 63 cycles_elapsed = now - next_tick;
65 64
66 /* Determine how much time elapsed. */ 65 if ((cycles_elapsed >> 5) < cpt) {
67 if (now < next_tick) { 66 /* use "cheap" math (add/subtract) instead
68 /* Scenario 2: CR16 wrapped after clock tick. 67 * of the more expensive div/mul method
69 * 1's complement will give us the "elapse cycles".
70 *
71 * This "cr16 wrapped" cruft is primarily for 32-bit kernels.
72 * So think "unsigned long is u32" when reading the code.
73 * And yes, of course 64-bit will someday wrap, but only
74 * every 198841 days on a 1GHz machine.
75 */ 68 */
76 cycles_elapsed = ~cycles_elapsed; /* off by one cycle - don't care */
77 }
78
79 if (likely(cycles_elapsed < local_ct)) {
80 /* ticks_elapsed = 1 -- We already assumed one tick elapsed. */
81 cycles_remainder = cycles_elapsed; 69 cycles_remainder = cycles_elapsed;
70 while (cycles_remainder > cpt) {
71 cycles_remainder -= cpt;
72 }
82 } else { 73 } else {
83 /* more than one tick elapsed. Do "expensive" math. */ 74 cycles_remainder = cycles_elapsed % cpt;
84 ticks_elapsed += cycles_elapsed / local_ct;
85
86 /* Faster version of "remainder = elapsed % clocktick" */
87 cycles_remainder = cycles_elapsed - (ticks_elapsed * local_ct);
88 } 75 }
89 76
90 /* Can we differentiate between "early CR16" (aka Scenario 1) and 77 /* Can we differentiate between "early CR16" (aka Scenario 1) and
@@ -94,51 +81,65 @@ irqreturn_t timer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
94 * cycles after the IT fires. But it's arbitrary how much time passes 81 * cycles after the IT fires. But it's arbitrary how much time passes
95 * before we call it "late". I've picked one second. 82 * before we call it "late". I've picked one second.
96 */ 83 */
97 if (ticks_elapsed > HZ) { 84/* aproximate HZ with shifts. Intended math is "(elapsed/clocktick) > HZ" */
85#if HZ == 1000
86 if (cycles_elapsed > (cpt << 10) )
87#elif HZ == 250
88 if (cycles_elapsed > (cpt << 8) )
89#elif HZ == 100
90 if (cycles_elapsed > (cpt << 7) )
91#else
92#warn WTF is HZ set to anyway?
93 if (cycles_elapsed > (HZ * cpt) )
94#endif
95 {
98 /* Scenario 3: very long delay? bad in any case */ 96 /* Scenario 3: very long delay? bad in any case */
99 printk (KERN_CRIT "timer_interrupt(CPU %d): delayed!" 97 printk (KERN_CRIT "timer_interrupt(CPU %d): delayed!"
100 " ticks %ld cycles %lX rem %lX" 98 " cycles %lX rem %lX "
101 " next/now %lX/%lX\n", 99 " next/now %lX/%lX\n",
102 cpu, 100 cpu,
103 ticks_elapsed, cycles_elapsed, cycles_remainder, 101 cycles_elapsed, cycles_remainder,
104 next_tick, now ); 102 next_tick, now );
105 } 103 }
106 104
105 /* convert from "division remainder" to "remainder of clock tick" */
106 cycles_remainder = cpt - cycles_remainder;
107 107
108 /* Determine when (in CR16 cycles) next IT interrupt will fire. 108 /* Determine when (in CR16 cycles) next IT interrupt will fire.
109 * We want IT to fire modulo clocktick even if we miss/skip some. 109 * We want IT to fire modulo clocktick even if we miss/skip some.
110 * But those interrupts don't in fact get delivered that regularly. 110 * But those interrupts don't in fact get delivered that regularly.
111 */ 111 */
112 next_tick = now + (local_ct - cycles_remainder); 112 next_tick = now + cycles_remainder;
113
114 cpu_data[cpu].it_value = next_tick;
113 115
114 /* Skip one clocktick on purpose if we are likely to miss next_tick. 116 /* Skip one clocktick on purpose if we are likely to miss next_tick.
115 * We'll catch what we missed on the tick after that. 117 * We want to avoid the new next_tick being less than CR16.
116 * We should never need 0x1000 cycles to read CR16, calc the 118 * If that happened, itimer wouldn't fire until CR16 wrapped.
117 * new next_tick, then write CR16 back. */ 119 * We'll catch the tick we missed on the tick after that.
118 if (!((local_ct - cycles_remainder) >> 12)) 120 */
119 next_tick += local_ct; 121 if (!(cycles_remainder >> 13))
122 next_tick += cpt;
120 123
121 /* Program the IT when to deliver the next interrupt. */ 124 /* Program the IT when to deliver the next interrupt. */
122 /* Only bottom 32-bits of next_tick are written to cr16. */ 125 /* Only bottom 32-bits of next_tick are written to cr16. */
123 cpu_data[cpu].it_value = next_tick;
124 mtctl(next_tick, 16); 126 mtctl(next_tick, 16);
125 127
126 /* Now that we are done mucking with unreliable delivery of interrupts, 128
127 * go do system house keeping. 129 /* Done mucking with unreliable delivery of interrupts.
130 * Go do system house keeping.
128 */ 131 */
129 while (ticks_elapsed--) {
130#ifdef CONFIG_SMP 132#ifdef CONFIG_SMP
131 smp_do_timer(regs); 133 smp_do_timer(regs);
132#else 134#else
133 update_process_times(user_mode(regs)); 135 update_process_times(user_mode(regs));
134#endif 136#endif
135 if (cpu == 0) { 137 if (cpu == 0) {
136 write_seqlock(&xtime_lock); 138 write_seqlock(&xtime_lock);
137 do_timer(1); 139 do_timer(regs);
138 write_sequnlock(&xtime_lock); 140 write_sequnlock(&xtime_lock);
139 }
140 } 141 }
141 142
142 /* check soft power switch status */ 143 /* check soft power switch status */
143 if (cpu == 0 && !atomic_read(&power_tasklet.count)) 144 if (cpu == 0 && !atomic_read(&power_tasklet.count))
144 tasklet_schedule(&power_tasklet); 145 tasklet_schedule(&power_tasklet);
@@ -164,14 +165,12 @@ unsigned long profile_pc(struct pt_regs *regs)
164EXPORT_SYMBOL(profile_pc); 165EXPORT_SYMBOL(profile_pc);
165 166
166 167
167/*** converted from ia64 ***/
168/* 168/*
169 * Return the number of micro-seconds that elapsed since the last 169 * Return the number of micro-seconds that elapsed since the last
170 * update to wall time (aka xtime). The xtime_lock 170 * update to wall time (aka xtime). The xtime_lock
171 * must be at least read-locked when calling this routine. 171 * must be at least read-locked when calling this routine.
172 */ 172 */
173static inline unsigned long 173static inline unsigned long gettimeoffset (void)
174gettimeoffset (void)
175{ 174{
176#ifndef CONFIG_SMP 175#ifndef CONFIG_SMP
177 /* 176 /*
@@ -185,36 +184,40 @@ gettimeoffset (void)
185 unsigned long elapsed_cycles; 184 unsigned long elapsed_cycles;
186 unsigned long usec; 185 unsigned long usec;
187 unsigned long cpuid = smp_processor_id(); 186 unsigned long cpuid = smp_processor_id();
188 unsigned long local_ct = clocktick; 187 unsigned long cpt = clocktick;
189 188
190 next_tick = cpu_data[cpuid].it_value; 189 next_tick = cpu_data[cpuid].it_value;
191 now = mfctl(16); /* Read the hardware interval timer. */ 190 now = mfctl(16); /* Read the hardware interval timer. */
192 191
193 prev_tick = next_tick - local_ct; 192 prev_tick = next_tick - cpt;
194 193
195 /* Assume Scenario 1: "now" is later than prev_tick. */ 194 /* Assume Scenario 1: "now" is later than prev_tick. */
196 elapsed_cycles = now - prev_tick; 195 elapsed_cycles = now - prev_tick;
197 196
198 if (now < prev_tick) { 197/* aproximate HZ with shifts. Intended math is "(elapsed/clocktick) > HZ" */
199 /* Scenario 2: CR16 wrapped! 198#if HZ == 1000
200 * ones complement is off-by-one. Don't care. 199 if (elapsed_cycles > (cpt << 10) )
201 */ 200#elif HZ == 250
202 elapsed_cycles = ~elapsed_cycles; 201 if (elapsed_cycles > (cpt << 8) )
203 } 202#elif HZ == 100
204 203 if (elapsed_cycles > (cpt << 7) )
205 if (elapsed_cycles > (HZ * local_ct)) { 204#else
205#warn WTF is HZ set to anyway?
206 if (elapsed_cycles > (HZ * cpt) )
207#endif
208 {
206 /* Scenario 3: clock ticks are missing. */ 209 /* Scenario 3: clock ticks are missing. */
207 printk (KERN_CRIT "gettimeoffset(CPU %d): missing ticks!" 210 printk (KERN_CRIT "gettimeoffset(CPU %ld): missing %ld ticks!"
208 "cycles %lX prev/now/next %lX/%lX/%lX clock %lX\n", 211 " cycles %lX prev/now/next %lX/%lX/%lX clock %lX\n",
209 cpuid, 212 cpuid, elapsed_cycles / cpt,
210 elapsed_cycles, prev_tick, now, next_tick, local_ct); 213 elapsed_cycles, prev_tick, now, next_tick, cpt);
211 } 214 }
212 215
213 /* FIXME: Can we improve the precision? Not with PAGE0. */ 216 /* FIXME: Can we improve the precision? Not with PAGE0. */
214 usec = (elapsed_cycles * 10000) / PAGE0->mem_10msec; 217 usec = (elapsed_cycles * 10000) / PAGE0->mem_10msec;
215 218
216 /* add in "lost" jiffies */ 219 /* add in "lost" jiffies */
217 usec += local_ct * (jiffies - wall_jiffies); 220 usec += cpt * (jiffies - wall_jiffies);
218 return usec; 221 return usec;
219#else 222#else
220 return 0; 223 return 0;