aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid S. Miller <davem@sunset.davemloft.net>2007-03-05 18:28:37 -0500
committerDavid S. Miller <davem@sunset.davemloft.net>2007-04-26 04:54:15 -0400
commit112f48716d9f292c92a033cff9e3ce7405ed4280 (patch)
tree5be2952ca83adb519df5995c10e0686447fea88f
parent038cb01ea69cb24ecf30e3ec882e429c84badbeb (diff)
[SPARC64]: Add clocksource/clockevents support.
I'd like to thank John Stul and others for helping me along the way. A lot of cleanups fell out of this. For example, the get_compare() tick_op was totally unused, so was deleted. And the most often used tick_op members were grouped together for cache-friendlyness. The sparc64 TSC is given to the kernel as a one-shot timer. tick_ops->init_timer() simply turns off the privileged bit in the tick register (when possible), and disables the interrupt by setting bit 63 in the compare register. The ->disable_irq() op also sets this bit. tick_ops->add_compare() is changed to: 1) Add the given delta to "tick" not to "compare" 2) Return a boolean which, if true, means that the tick value read after writing the compare value was found to have incremented past the initial tick value. This mirrors logic used in the HPET driver's ->next_event() method. Each tick_ops implementation also now provides a name string. And we feed this into the clocksource and clockevents layers. Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--arch/sparc64/Kconfig14
-rw-r--r--arch/sparc64/kernel/smp.c29
-rw-r--r--arch/sparc64/kernel/time.c408
-rw-r--r--include/asm-sparc64/timer.h17
4 files changed, 238 insertions, 230 deletions
diff --git a/arch/sparc64/Kconfig b/arch/sparc64/Kconfig
index 1a6348b565fb..51c87fdd998c 100644
--- a/arch/sparc64/Kconfig
+++ b/arch/sparc64/Kconfig
@@ -19,6 +19,14 @@ config SPARC64
19 SPARC64 ports; its web page is available at 19 SPARC64 ports; its web page is available at
20 <http://www.ultralinux.org/>. 20 <http://www.ultralinux.org/>.
21 21
22config GENERIC_TIME
23 bool
24 default y
25
26config GENERIC_CLOCKEVENTS
27 bool
28 default y
29
22config 64BIT 30config 64BIT
23 def_bool y 31 def_bool y
24 32
@@ -34,10 +42,6 @@ config LOCKDEP_SUPPORT
34 bool 42 bool
35 default y 43 default y
36 44
37config TIME_INTERPOLATION
38 bool
39 default y
40
41config ARCH_MAY_HAVE_PC_FDC 45config ARCH_MAY_HAVE_PC_FDC
42 bool 46 bool
43 default y 47 default y
@@ -113,6 +117,8 @@ config GENERIC_HARDIRQS
113 117
114menu "General machine setup" 118menu "General machine setup"
115 119
120source "kernel/time/Kconfig"
121
116config SMP 122config SMP
117 bool "Symmetric multi-processing support" 123 bool "Symmetric multi-processing support"
118 ---help--- 124 ---help---
diff --git a/arch/sparc64/kernel/smp.c b/arch/sparc64/kernel/smp.c
index 39deb0346eb5..d4f0a70f4845 100644
--- a/arch/sparc64/kernel/smp.c
+++ b/arch/sparc64/kernel/smp.c
@@ -123,7 +123,7 @@ void __init smp_store_cpu_info(int id)
123 cpu_data(id).ecache_size, cpu_data(id).ecache_line_size); 123 cpu_data(id).ecache_size, cpu_data(id).ecache_line_size);
124} 124}
125 125
126static void smp_setup_percpu_timer(void); 126extern void setup_sparc64_timer(void);
127 127
128static volatile unsigned long callin_flag = 0; 128static volatile unsigned long callin_flag = 0;
129 129
@@ -138,7 +138,7 @@ void __init smp_callin(void)
138 138
139 __flush_tlb_all(); 139 __flush_tlb_all();
140 140
141 smp_setup_percpu_timer(); 141 setup_sparc64_timer();
142 142
143 if (cheetah_pcache_forced_on) 143 if (cheetah_pcache_forced_on)
144 cheetah_enable_pcache(); 144 cheetah_enable_pcache();
@@ -175,8 +175,6 @@ void cpu_panic(void)
175 panic("SMP bolixed\n"); 175 panic("SMP bolixed\n");
176} 176}
177 177
178static unsigned long current_tick_offset __read_mostly;
179
180/* This tick register synchronization scheme is taken entirely from 178/* This tick register synchronization scheme is taken entirely from
181 * the ia64 port, see arch/ia64/kernel/smpboot.c for details and credit. 179 * the ia64 port, see arch/ia64/kernel/smpboot.c for details and credit.
182 * 180 *
@@ -259,7 +257,7 @@ void smp_synchronize_tick_client(void)
259 } else 257 } else
260 adj = -delta; 258 adj = -delta;
261 259
262 tick_ops->add_tick(adj, current_tick_offset); 260 tick_ops->add_tick(adj);
263 } 261 }
264#if DEBUG_TICK_SYNC 262#if DEBUG_TICK_SYNC
265 t[i].rt = rt; 263 t[i].rt = rt;
@@ -1178,30 +1176,9 @@ void smp_penguin_jailcell(int irq, struct pt_regs *regs)
1178 preempt_enable(); 1176 preempt_enable();
1179} 1177}
1180 1178
1181static void __init smp_setup_percpu_timer(void)
1182{
1183 unsigned long pstate;
1184
1185 /* Guarantee that the following sequences execute
1186 * uninterrupted.
1187 */
1188 __asm__ __volatile__("rdpr %%pstate, %0\n\t"
1189 "wrpr %0, %1, %%pstate"
1190 : "=r" (pstate)
1191 : "i" (PSTATE_IE));
1192
1193 tick_ops->init_tick(current_tick_offset);
1194
1195 /* Restore PSTATE_IE. */
1196 __asm__ __volatile__("wrpr %0, 0x0, %%pstate"
1197 : /* no outputs */
1198 : "r" (pstate));
1199}
1200
1201void __init smp_tick_init(void) 1179void __init smp_tick_init(void)
1202{ 1180{
1203 boot_cpu_id = hard_smp_processor_id(); 1181 boot_cpu_id = hard_smp_processor_id();
1204 current_tick_offset = timer_tick_offset;
1205} 1182}
1206 1183
1207/* /proc/profile writes can call this, don't __init it please. */ 1184/* /proc/profile writes can call this, don't __init it please. */
diff --git a/arch/sparc64/kernel/time.c b/arch/sparc64/kernel/time.c
index 48e1217c1e42..21e3b0b9d9ce 100644
--- a/arch/sparc64/kernel/time.c
+++ b/arch/sparc64/kernel/time.c
@@ -32,6 +32,8 @@
32#include <linux/miscdevice.h> 32#include <linux/miscdevice.h>
33#include <linux/rtc.h> 33#include <linux/rtc.h>
34#include <linux/kernel_stat.h> 34#include <linux/kernel_stat.h>
35#include <linux/clockchips.h>
36#include <linux/clocksource.h>
35 37
36#include <asm/oplib.h> 38#include <asm/oplib.h>
37#include <asm/mostek.h> 39#include <asm/mostek.h>
@@ -61,6 +63,7 @@ static void __iomem *mstk48t59_regs;
61static int set_rtc_mmss(unsigned long); 63static int set_rtc_mmss(unsigned long);
62 64
63#define TICK_PRIV_BIT (1UL << 63) 65#define TICK_PRIV_BIT (1UL << 63)
66#define TICKCMP_IRQ_BIT (1UL << 63)
64 67
65#ifdef CONFIG_SMP 68#ifdef CONFIG_SMP
66unsigned long profile_pc(struct pt_regs *regs) 69unsigned long profile_pc(struct pt_regs *regs)
@@ -94,21 +97,22 @@ static void tick_disable_protection(void)
94 : "g2"); 97 : "g2");
95} 98}
96 99
97static void tick_init_tick(unsigned long offset) 100static void tick_disable_irq(void)
98{ 101{
99 tick_disable_protection();
100
101 __asm__ __volatile__( 102 __asm__ __volatile__(
102 " rd %%tick, %%g1\n"
103 " andn %%g1, %1, %%g1\n"
104 " ba,pt %%xcc, 1f\n" 103 " ba,pt %%xcc, 1f\n"
105 " add %%g1, %0, %%g1\n" 104 " nop\n"
106 " .align 64\n" 105 " .align 64\n"
107 "1: wr %%g1, 0x0, %%tick_cmpr\n" 106 "1: wr %0, 0x0, %%tick_cmpr\n"
108 " rd %%tick_cmpr, %%g0" 107 " rd %%tick_cmpr, %%g0"
109 : /* no outputs */ 108 : /* no outputs */
110 : "r" (offset), "r" (TICK_PRIV_BIT) 109 : "r" (TICKCMP_IRQ_BIT));
111 : "g1"); 110}
111
112static void tick_init_tick(void)
113{
114 tick_disable_protection();
115 tick_disable_irq();
112} 116}
113 117
114static unsigned long tick_get_tick(void) 118static unsigned long tick_get_tick(void)
@@ -122,20 +126,14 @@ static unsigned long tick_get_tick(void)
122 return ret & ~TICK_PRIV_BIT; 126 return ret & ~TICK_PRIV_BIT;
123} 127}
124 128
125static unsigned long tick_get_compare(void) 129static int tick_add_compare(unsigned long adj)
126{ 130{
127 unsigned long ret; 131 unsigned long orig_tick, new_tick, new_compare;
128 132
129 __asm__ __volatile__("rd %%tick_cmpr, %0\n\t" 133 __asm__ __volatile__("rd %%tick, %0"
130 "mov %0, %0" 134 : "=r" (orig_tick));
131 : "=r" (ret));
132 135
133 return ret; 136 orig_tick &= ~TICKCMP_IRQ_BIT;
134}
135
136static unsigned long tick_add_compare(unsigned long adj)
137{
138 unsigned long new_compare;
139 137
140 /* Workaround for Spitfire Errata (#54 I think??), I discovered 138 /* Workaround for Spitfire Errata (#54 I think??), I discovered
141 * this via Sun BugID 4008234, mentioned in Solaris-2.5.1 patch 139 * this via Sun BugID 4008234, mentioned in Solaris-2.5.1 patch
@@ -146,44 +144,41 @@ static unsigned long tick_add_compare(unsigned long adj)
146 * at the start of an I-cache line, and perform a dummy 144 * at the start of an I-cache line, and perform a dummy
147 * read back from %tick_cmpr right after writing to it. -DaveM 145 * read back from %tick_cmpr right after writing to it. -DaveM
148 */ 146 */
149 __asm__ __volatile__("rd %%tick_cmpr, %0\n\t" 147 __asm__ __volatile__("ba,pt %%xcc, 1f\n\t"
150 "ba,pt %%xcc, 1f\n\t" 148 " add %1, %2, %0\n\t"
151 " add %0, %1, %0\n\t"
152 ".align 64\n" 149 ".align 64\n"
153 "1:\n\t" 150 "1:\n\t"
154 "wr %0, 0, %%tick_cmpr\n\t" 151 "wr %0, 0, %%tick_cmpr\n\t"
155 "rd %%tick_cmpr, %%g0" 152 "rd %%tick_cmpr, %%g0\n\t"
156 : "=&r" (new_compare) 153 : "=r" (new_compare)
157 : "r" (adj)); 154 : "r" (orig_tick), "r" (adj));
155
156 __asm__ __volatile__("rd %%tick, %0"
157 : "=r" (new_tick));
158 new_tick &= ~TICKCMP_IRQ_BIT;
158 159
159 return new_compare; 160 return ((long)(new_tick - (orig_tick+adj))) > 0L;
160} 161}
161 162
162static unsigned long tick_add_tick(unsigned long adj, unsigned long offset) 163static unsigned long tick_add_tick(unsigned long adj)
163{ 164{
164 unsigned long new_tick, tmp; 165 unsigned long new_tick;
165 166
166 /* Also need to handle Blackbird bug here too. */ 167 /* Also need to handle Blackbird bug here too. */
167 __asm__ __volatile__("rd %%tick, %0\n\t" 168 __asm__ __volatile__("rd %%tick, %0\n\t"
168 "add %0, %2, %0\n\t" 169 "add %0, %1, %0\n\t"
169 "wrpr %0, 0, %%tick\n\t" 170 "wrpr %0, 0, %%tick\n\t"
170 "andn %0, %4, %1\n\t" 171 : "=&r" (new_tick)
171 "ba,pt %%xcc, 1f\n\t" 172 : "r" (adj));
172 " add %1, %3, %1\n\t"
173 ".align 64\n"
174 "1:\n\t"
175 "wr %1, 0, %%tick_cmpr\n\t"
176 "rd %%tick_cmpr, %%g0"
177 : "=&r" (new_tick), "=&r" (tmp)
178 : "r" (adj), "r" (offset), "r" (TICK_PRIV_BIT));
179 173
180 return new_tick; 174 return new_tick;
181} 175}
182 176
183static struct sparc64_tick_ops tick_operations __read_mostly = { 177static struct sparc64_tick_ops tick_operations __read_mostly = {
178 .name = "tick",
184 .init_tick = tick_init_tick, 179 .init_tick = tick_init_tick,
180 .disable_irq = tick_disable_irq,
185 .get_tick = tick_get_tick, 181 .get_tick = tick_get_tick,
186 .get_compare = tick_get_compare,
187 .add_tick = tick_add_tick, 182 .add_tick = tick_add_tick,
188 .add_compare = tick_add_compare, 183 .add_compare = tick_add_compare,
189 .softint_mask = 1UL << 0, 184 .softint_mask = 1UL << 0,
@@ -191,7 +186,15 @@ static struct sparc64_tick_ops tick_operations __read_mostly = {
191 186
192struct sparc64_tick_ops *tick_ops __read_mostly = &tick_operations; 187struct sparc64_tick_ops *tick_ops __read_mostly = &tick_operations;
193 188
194static void stick_init_tick(unsigned long offset) 189static void stick_disable_irq(void)
190{
191 __asm__ __volatile__(
192 "wr %0, 0x0, %%asr25"
193 : /* no outputs */
194 : "r" (TICKCMP_IRQ_BIT));
195}
196
197static void stick_init_tick(void)
195{ 198{
196 /* Writes to the %tick and %stick register are not 199 /* Writes to the %tick and %stick register are not
197 * allowed on sun4v. The Hypervisor controls that 200 * allowed on sun4v. The Hypervisor controls that
@@ -199,6 +202,7 @@ static void stick_init_tick(unsigned long offset)
199 */ 202 */
200 if (tlb_type != hypervisor) { 203 if (tlb_type != hypervisor) {
201 tick_disable_protection(); 204 tick_disable_protection();
205 tick_disable_irq();
202 206
203 /* Let the user get at STICK too. */ 207 /* Let the user get at STICK too. */
204 __asm__ __volatile__( 208 __asm__ __volatile__(
@@ -210,14 +214,7 @@ static void stick_init_tick(unsigned long offset)
210 : "g1", "g2"); 214 : "g1", "g2");
211 } 215 }
212 216
213 __asm__ __volatile__( 217 stick_disable_irq();
214 " rd %%asr24, %%g1\n"
215 " andn %%g1, %1, %%g1\n"
216 " add %%g1, %0, %%g1\n"
217 " wr %%g1, 0x0, %%asr25"
218 : /* no outputs */
219 : "r" (offset), "r" (TICK_PRIV_BIT)
220 : "g1");
221} 218}
222 219
223static unsigned long stick_get_tick(void) 220static unsigned long stick_get_tick(void)
@@ -230,49 +227,43 @@ static unsigned long stick_get_tick(void)
230 return ret & ~TICK_PRIV_BIT; 227 return ret & ~TICK_PRIV_BIT;
231} 228}
232 229
233static unsigned long stick_get_compare(void) 230static unsigned long stick_add_tick(unsigned long adj)
234{
235 unsigned long ret;
236
237 __asm__ __volatile__("rd %%asr25, %0"
238 : "=r" (ret));
239
240 return ret;
241}
242
243static unsigned long stick_add_tick(unsigned long adj, unsigned long offset)
244{ 231{
245 unsigned long new_tick, tmp; 232 unsigned long new_tick;
246 233
247 __asm__ __volatile__("rd %%asr24, %0\n\t" 234 __asm__ __volatile__("rd %%asr24, %0\n\t"
248 "add %0, %2, %0\n\t" 235 "add %0, %1, %0\n\t"
249 "wr %0, 0, %%asr24\n\t" 236 "wr %0, 0, %%asr24\n\t"
250 "andn %0, %4, %1\n\t" 237 : "=&r" (new_tick)
251 "add %1, %3, %1\n\t" 238 : "r" (adj));
252 "wr %1, 0, %%asr25"
253 : "=&r" (new_tick), "=&r" (tmp)
254 : "r" (adj), "r" (offset), "r" (TICK_PRIV_BIT));
255 239
256 return new_tick; 240 return new_tick;
257} 241}
258 242
259static unsigned long stick_add_compare(unsigned long adj) 243static int stick_add_compare(unsigned long adj)
260{ 244{
261 unsigned long new_compare; 245 unsigned long orig_tick, new_tick;
262 246
263 __asm__ __volatile__("rd %%asr25, %0\n\t" 247 __asm__ __volatile__("rd %%asr24, %0"
264 "add %0, %1, %0\n\t" 248 : "=r" (orig_tick));
265 "wr %0, 0, %%asr25" 249 orig_tick &= ~TICKCMP_IRQ_BIT;
266 : "=&r" (new_compare) 250
267 : "r" (adj)); 251 __asm__ __volatile__("wr %0, 0, %%asr25"
252 : /* no outputs */
253 : "r" (orig_tick + adj));
254
255 __asm__ __volatile__("rd %%asr24, %0"
256 : "=r" (new_tick));
257 new_tick &= ~TICKCMP_IRQ_BIT;
268 258
269 return new_compare; 259 return ((long)(new_tick - (orig_tick+adj))) > 0L;
270} 260}
271 261
272static struct sparc64_tick_ops stick_operations __read_mostly = { 262static struct sparc64_tick_ops stick_operations __read_mostly = {
263 .name = "stick",
273 .init_tick = stick_init_tick, 264 .init_tick = stick_init_tick,
265 .disable_irq = stick_disable_irq,
274 .get_tick = stick_get_tick, 266 .get_tick = stick_get_tick,
275 .get_compare = stick_get_compare,
276 .add_tick = stick_add_tick, 267 .add_tick = stick_add_tick,
277 .add_compare = stick_add_compare, 268 .add_compare = stick_add_compare,
278 .softint_mask = 1UL << 16, 269 .softint_mask = 1UL << 16,
@@ -321,20 +312,6 @@ static unsigned long __hbird_read_stick(void)
321 return ret; 312 return ret;
322} 313}
323 314
324static unsigned long __hbird_read_compare(void)
325{
326 unsigned long low, high;
327 unsigned long addr = HBIRD_STICKCMP_ADDR;
328
329 __asm__ __volatile__("ldxa [%2] %3, %0\n\t"
330 "add %2, 0x8, %2\n\t"
331 "ldxa [%2] %3, %1"
332 : "=&r" (low), "=&r" (high), "=&r" (addr)
333 : "i" (ASI_PHYS_BYPASS_EC_E), "2" (addr));
334
335 return (high << 32UL) | low;
336}
337
338static void __hbird_write_stick(unsigned long val) 315static void __hbird_write_stick(unsigned long val)
339{ 316{
340 unsigned long low = (val & 0xffffffffUL); 317 unsigned long low = (val & 0xffffffffUL);
@@ -365,10 +342,13 @@ static void __hbird_write_compare(unsigned long val)
365 "i" (ASI_PHYS_BYPASS_EC_E)); 342 "i" (ASI_PHYS_BYPASS_EC_E));
366} 343}
367 344
368static void hbtick_init_tick(unsigned long offset) 345static void hbtick_disable_irq(void)
369{ 346{
370 unsigned long val; 347 __hbird_write_compare(TICKCMP_IRQ_BIT);
348}
371 349
350static void hbtick_init_tick(void)
351{
372 tick_disable_protection(); 352 tick_disable_protection();
373 353
374 /* XXX This seems to be necessary to 'jumpstart' Hummingbird 354 /* XXX This seems to be necessary to 'jumpstart' Hummingbird
@@ -378,8 +358,7 @@ static void hbtick_init_tick(unsigned long offset)
378 */ 358 */
379 __hbird_write_stick(__hbird_read_stick()); 359 __hbird_write_stick(__hbird_read_stick());
380 360
381 val = __hbird_read_stick() & ~TICK_PRIV_BIT; 361 hbtick_disable_irq();
382 __hbird_write_compare(val + offset);
383} 362}
384 363
385static unsigned long hbtick_get_tick(void) 364static unsigned long hbtick_get_tick(void)
@@ -387,45 +366,40 @@ static unsigned long hbtick_get_tick(void)
387 return __hbird_read_stick() & ~TICK_PRIV_BIT; 366 return __hbird_read_stick() & ~TICK_PRIV_BIT;
388} 367}
389 368
390static unsigned long hbtick_get_compare(void) 369static unsigned long hbtick_add_tick(unsigned long adj)
391{
392 return __hbird_read_compare();
393}
394
395static unsigned long hbtick_add_tick(unsigned long adj, unsigned long offset)
396{ 370{
397 unsigned long val; 371 unsigned long val;
398 372
399 val = __hbird_read_stick() + adj; 373 val = __hbird_read_stick() + adj;
400 __hbird_write_stick(val); 374 __hbird_write_stick(val);
401 375
402 val &= ~TICK_PRIV_BIT;
403 __hbird_write_compare(val + offset);
404
405 return val; 376 return val;
406} 377}
407 378
408static unsigned long hbtick_add_compare(unsigned long adj) 379static int hbtick_add_compare(unsigned long adj)
409{ 380{
410 unsigned long val = __hbird_read_compare() + adj; 381 unsigned long val = __hbird_read_stick();
382 unsigned long val2;
411 383
412 val &= ~TICK_PRIV_BIT; 384 val &= ~TICKCMP_IRQ_BIT;
385 val += adj;
413 __hbird_write_compare(val); 386 __hbird_write_compare(val);
414 387
415 return val; 388 val2 = __hbird_read_stick() & ~TICKCMP_IRQ_BIT;
389
390 return ((long)(val2 - val)) > 0L;
416} 391}
417 392
418static struct sparc64_tick_ops hbtick_operations __read_mostly = { 393static struct sparc64_tick_ops hbtick_operations __read_mostly = {
394 .name = "hbtick",
419 .init_tick = hbtick_init_tick, 395 .init_tick = hbtick_init_tick,
396 .disable_irq = hbtick_disable_irq,
420 .get_tick = hbtick_get_tick, 397 .get_tick = hbtick_get_tick,
421 .get_compare = hbtick_get_compare,
422 .add_tick = hbtick_add_tick, 398 .add_tick = hbtick_add_tick,
423 .add_compare = hbtick_add_compare, 399 .add_compare = hbtick_add_compare,
424 .softint_mask = 1UL << 0, 400 .softint_mask = 1UL << 0,
425}; 401};
426 402
427unsigned long timer_tick_offset __read_mostly;
428
429static unsigned long timer_ticks_per_nsec_quotient __read_mostly; 403static unsigned long timer_ticks_per_nsec_quotient __read_mostly;
430 404
431#define TICK_SIZE (tick_nsec / 1000) 405#define TICK_SIZE (tick_nsec / 1000)
@@ -482,50 +456,6 @@ void notify_arch_cmos_timer(void)
482 mod_timer(&sync_cmos_timer, jiffies + 1); 456 mod_timer(&sync_cmos_timer, jiffies + 1);
483} 457}
484 458
485void timer_interrupt(int irq, struct pt_regs *regs)
486{
487 struct pt_regs *old_regs = set_irq_regs(regs);
488 unsigned long ticks, compare, pstate;
489 unsigned long tick_mask = tick_ops->softint_mask;
490
491 clear_softint(tick_mask);
492
493 irq_enter();
494
495 kstat_this_cpu.irqs[0]++;
496
497 do {
498 profile_tick(CPU_PROFILING);
499 update_process_times(user_mode(get_irq_regs()));
500
501 if (smp_processor_id() == boot_cpu_id) {
502 write_seqlock(&xtime_lock);
503 do_timer(1);
504 write_sequnlock(&xtime_lock);
505 }
506
507 /* Guarantee that the following sequences execute
508 * uninterrupted.
509 */
510 __asm__ __volatile__("rdpr %%pstate, %0\n\t"
511 "wrpr %0, %1, %%pstate"
512 : "=r" (pstate)
513 : "i" (PSTATE_IE));
514
515 compare = tick_ops->add_compare(timer_tick_offset);
516 ticks = tick_ops->get_tick();
517
518 /* Restore PSTATE_IE. */
519 __asm__ __volatile__("wrpr %0, 0x0, %%pstate"
520 : /* no outputs */
521 : "r" (pstate));
522 } while (unlikely(time_after_eq(ticks, compare)));
523
524 irq_exit();
525
526 set_irq_regs(old_regs);
527}
528
529/* Kick start a stopped clock (procedure from the Sun NVRAM/hostid FAQ). */ 459/* Kick start a stopped clock (procedure from the Sun NVRAM/hostid FAQ). */
530static void __init kick_start_clock(void) 460static void __init kick_start_clock(void)
531{ 461{
@@ -923,7 +853,6 @@ static unsigned long sparc64_init_timers(void)
923 prop = of_find_property(dp, "stick-frequency", NULL); 853 prop = of_find_property(dp, "stick-frequency", NULL);
924 } 854 }
925 clock = *(unsigned int *) prop->value; 855 clock = *(unsigned int *) prop->value;
926 timer_tick_offset = clock / HZ;
927 856
928#ifdef CONFIG_SMP 857#ifdef CONFIG_SMP
929 smp_tick_init(); 858 smp_tick_init();
@@ -932,26 +861,6 @@ static unsigned long sparc64_init_timers(void)
932 return clock; 861 return clock;
933} 862}
934 863
935static void sparc64_start_timers(void)
936{
937 unsigned long pstate;
938
939 /* Guarantee that the following sequences execute
940 * uninterrupted.
941 */
942 __asm__ __volatile__("rdpr %%pstate, %0\n\t"
943 "wrpr %0, %1, %%pstate"
944 : "=r" (pstate)
945 : "i" (PSTATE_IE));
946
947 tick_ops->init_tick(timer_tick_offset);
948
949 /* Restore PSTATE_IE. */
950 __asm__ __volatile__("wrpr %0, 0x0, %%pstate"
951 : /* no outputs */
952 : "r" (pstate));
953}
954
955struct freq_table { 864struct freq_table {
956 unsigned long clock_tick_ref; 865 unsigned long clock_tick_ref;
957 unsigned int ref_freq; 866 unsigned int ref_freq;
@@ -998,29 +907,148 @@ static struct notifier_block sparc64_cpufreq_notifier_block = {
998 907
999#endif /* CONFIG_CPU_FREQ */ 908#endif /* CONFIG_CPU_FREQ */
1000 909
1001static struct time_interpolator sparc64_cpu_interpolator = { 910static int sparc64_next_event(unsigned long delta,
1002 .source = TIME_SOURCE_CPU, 911 struct clock_event_device *evt)
1003 .shift = 16, 912{
1004 .mask = 0xffffffffffffffffLL 913 return tick_ops->add_compare(delta);
914}
915
916static void sparc64_timer_setup(enum clock_event_mode mode,
917 struct clock_event_device *evt)
918{
919 switch (mode) {
920 case CLOCK_EVT_MODE_ONESHOT:
921 break;
922
923 case CLOCK_EVT_MODE_SHUTDOWN:
924 tick_ops->disable_irq();
925 break;
926
927 case CLOCK_EVT_MODE_PERIODIC:
928 case CLOCK_EVT_MODE_UNUSED:
929 WARN_ON(1);
930 break;
931 };
932}
933
934static struct clock_event_device sparc64_clockevent = {
935 .features = CLOCK_EVT_FEAT_ONESHOT,
936 .set_mode = sparc64_timer_setup,
937 .set_next_event = sparc64_next_event,
938 .rating = 100,
939 .shift = 30,
940 .irq = -1,
1005}; 941};
942static DEFINE_PER_CPU(struct clock_event_device, sparc64_events);
1006 943
1007/* The quotient formula is taken from the IA64 port. */ 944void timer_interrupt(int irq, struct pt_regs *regs)
1008#define SPARC64_NSEC_PER_CYC_SHIFT 10UL
1009void __init time_init(void)
1010{ 945{
1011 unsigned long clock = sparc64_init_timers(); 946 struct pt_regs *old_regs = set_irq_regs(regs);
947 unsigned long tick_mask = tick_ops->softint_mask;
948 int cpu = smp_processor_id();
949 struct clock_event_device *evt = &per_cpu(sparc64_events, cpu);
950
951 clear_softint(tick_mask);
952
953 irq_enter();
954
955 kstat_this_cpu.irqs[0]++;
956
957 if (unlikely(!evt->event_handler)) {
958 printk(KERN_WARNING
959 "Spurious SPARC64 timer interrupt on cpu %d\n", cpu);
960 } else
961 evt->event_handler(evt);
962
963 irq_exit();
964
965 set_irq_regs(old_regs);
966}
1012 967
1013 sparc64_cpu_interpolator.frequency = clock; 968void __devinit setup_sparc64_timer(void)
1014 register_time_interpolator(&sparc64_cpu_interpolator); 969{
970 struct clock_event_device *sevt;
971 unsigned long pstate;
1015 972
1016 /* Now that the interpolator is registered, it is 973 /* Guarantee that the following sequences execute
1017 * safe to start the timer ticking. 974 * uninterrupted.
1018 */ 975 */
1019 sparc64_start_timers(); 976 __asm__ __volatile__("rdpr %%pstate, %0\n\t"
977 "wrpr %0, %1, %%pstate"
978 : "=r" (pstate)
979 : "i" (PSTATE_IE));
980
981 tick_ops->init_tick();
982
983 /* Restore PSTATE_IE. */
984 __asm__ __volatile__("wrpr %0, 0x0, %%pstate"
985 : /* no outputs */
986 : "r" (pstate));
987
988 sevt = &__get_cpu_var(sparc64_events);
989
990 memcpy(sevt, &sparc64_clockevent, sizeof(*sevt));
991 sevt->cpumask = cpumask_of_cpu(smp_processor_id());
992
993 clockevents_register_device(sevt);
994}
995
996#define SPARC64_NSEC_PER_CYC_SHIFT 32UL
997
998static struct clocksource clocksource_tick = {
999 .rating = 100,
1000 .mask = CLOCKSOURCE_MASK(64),
1001 .shift = 16,
1002 .flags = CLOCK_SOURCE_IS_CONTINUOUS,
1003};
1004
1005static void __init setup_clockevent_multiplier(unsigned long hz)
1006{
1007 unsigned long mult, shift = 32;
1008
1009 while (1) {
1010 mult = div_sc(hz, NSEC_PER_SEC, shift);
1011 if (mult && (mult >> 32UL) == 0UL)
1012 break;
1013
1014 shift--;
1015 }
1016
1017 sparc64_clockevent.shift = shift;
1018 sparc64_clockevent.mult = mult;
1019}
1020
1021void __init time_init(void)
1022{
1023 unsigned long clock = sparc64_init_timers();
1020 1024
1021 timer_ticks_per_nsec_quotient = 1025 timer_ticks_per_nsec_quotient =
1022 (((NSEC_PER_SEC << SPARC64_NSEC_PER_CYC_SHIFT) + 1026 clocksource_hz2mult(clock, SPARC64_NSEC_PER_CYC_SHIFT);
1023 (clock / 2)) / clock); 1027
1028 clocksource_tick.name = tick_ops->name;
1029 clocksource_tick.mult =
1030 clocksource_hz2mult(clock,
1031 clocksource_tick.shift);
1032 clocksource_tick.read = tick_ops->get_tick;
1033
1034 printk("clocksource: mult[%x] shift[%d]\n",
1035 clocksource_tick.mult, clocksource_tick.shift);
1036
1037 clocksource_register(&clocksource_tick);
1038
1039 sparc64_clockevent.name = tick_ops->name;
1040
1041 setup_clockevent_multiplier(clock);
1042
1043 sparc64_clockevent.max_delta_ns =
1044 clockevent_delta2ns(0x7fffffffffffffff, &sparc64_clockevent);
1045 sparc64_clockevent.min_delta_ns =
1046 clockevent_delta2ns(0xF, &sparc64_clockevent);
1047
1048 printk("clockevent: mult[%lx] shift[%d]\n",
1049 sparc64_clockevent.mult, sparc64_clockevent.shift);
1050
1051 setup_sparc64_timer();
1024 1052
1025#ifdef CONFIG_CPU_FREQ 1053#ifdef CONFIG_CPU_FREQ
1026 cpufreq_register_notifier(&sparc64_cpufreq_notifier_block, 1054 cpufreq_register_notifier(&sparc64_cpufreq_notifier_block,
diff --git a/include/asm-sparc64/timer.h b/include/asm-sparc64/timer.h
index d435594df786..ccbd69448866 100644
--- a/include/asm-sparc64/timer.h
+++ b/include/asm-sparc64/timer.h
@@ -11,22 +11,19 @@
11 11
12 12
13struct sparc64_tick_ops { 13struct sparc64_tick_ops {
14 void (*init_tick)(unsigned long);
15 unsigned long (*get_tick)(void); 14 unsigned long (*get_tick)(void);
16 unsigned long (*get_compare)(void); 15 int (*add_compare)(unsigned long);
17 unsigned long (*add_tick)(unsigned long, unsigned long);
18 unsigned long (*add_compare)(unsigned long);
19 unsigned long softint_mask; 16 unsigned long softint_mask;
17 void (*disable_irq)(void);
18
19 void (*init_tick)(void);
20 unsigned long (*add_tick)(unsigned long);
21
22 char *name;
20}; 23};
21 24
22extern struct sparc64_tick_ops *tick_ops; 25extern struct sparc64_tick_ops *tick_ops;
23 26
24#ifdef CONFIG_SMP
25extern unsigned long timer_tick_offset;
26struct pt_regs;
27extern void timer_tick_interrupt(struct pt_regs *);
28#endif
29
30extern unsigned long sparc64_get_clock_tick(unsigned int cpu); 27extern unsigned long sparc64_get_clock_tick(unsigned int cpu);
31 28
32#endif /* _SPARC64_TIMER_H */ 29#endif /* _SPARC64_TIMER_H */