aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sparc64/kernel/time.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/sparc64/kernel/time.c')
-rw-r--r--arch/sparc64/kernel/time.c484
1 files changed, 265 insertions, 219 deletions
diff --git a/arch/sparc64/kernel/time.c b/arch/sparc64/kernel/time.c
index f84da4f1b706..259063f41f95 100644
--- a/arch/sparc64/kernel/time.c
+++ b/arch/sparc64/kernel/time.c
@@ -31,6 +31,9 @@
31#include <linux/profile.h> 31#include <linux/profile.h>
32#include <linux/miscdevice.h> 32#include <linux/miscdevice.h>
33#include <linux/rtc.h> 33#include <linux/rtc.h>
34#include <linux/kernel_stat.h>
35#include <linux/clockchips.h>
36#include <linux/clocksource.h>
34 37
35#include <asm/oplib.h> 38#include <asm/oplib.h>
36#include <asm/mostek.h> 39#include <asm/mostek.h>
@@ -60,6 +63,7 @@ static void __iomem *mstk48t59_regs;
60static int set_rtc_mmss(unsigned long); 63static int set_rtc_mmss(unsigned long);
61 64
62#define TICK_PRIV_BIT (1UL << 63) 65#define TICK_PRIV_BIT (1UL << 63)
66#define TICKCMP_IRQ_BIT (1UL << 63)
63 67
64#ifdef CONFIG_SMP 68#ifdef CONFIG_SMP
65unsigned long profile_pc(struct pt_regs *regs) 69unsigned long profile_pc(struct pt_regs *regs)
@@ -93,21 +97,22 @@ static void tick_disable_protection(void)
93 : "g2"); 97 : "g2");
94} 98}
95 99
96static void tick_init_tick(unsigned long offset) 100static void tick_disable_irq(void)
97{ 101{
98 tick_disable_protection();
99
100 __asm__ __volatile__( 102 __asm__ __volatile__(
101 " rd %%tick, %%g1\n"
102 " andn %%g1, %1, %%g1\n"
103 " ba,pt %%xcc, 1f\n" 103 " ba,pt %%xcc, 1f\n"
104 " add %%g1, %0, %%g1\n" 104 " nop\n"
105 " .align 64\n" 105 " .align 64\n"
106 "1: wr %%g1, 0x0, %%tick_cmpr\n" 106 "1: wr %0, 0x0, %%tick_cmpr\n"
107 " rd %%tick_cmpr, %%g0" 107 " rd %%tick_cmpr, %%g0"
108 : /* no outputs */ 108 : /* no outputs */
109 : "r" (offset), "r" (TICK_PRIV_BIT) 109 : "r" (TICKCMP_IRQ_BIT));
110 : "g1"); 110}
111
112static void tick_init_tick(void)
113{
114 tick_disable_protection();
115 tick_disable_irq();
111} 116}
112 117
113static unsigned long tick_get_tick(void) 118static unsigned long tick_get_tick(void)
@@ -121,20 +126,14 @@ static unsigned long tick_get_tick(void)
121 return ret & ~TICK_PRIV_BIT; 126 return ret & ~TICK_PRIV_BIT;
122} 127}
123 128
124static unsigned long tick_get_compare(void) 129static int tick_add_compare(unsigned long adj)
125{ 130{
126 unsigned long ret; 131 unsigned long orig_tick, new_tick, new_compare;
127 132
128 __asm__ __volatile__("rd %%tick_cmpr, %0\n\t" 133 __asm__ __volatile__("rd %%tick, %0"
129 "mov %0, %0" 134 : "=r" (orig_tick));
130 : "=r" (ret));
131 135
132 return ret; 136 orig_tick &= ~TICKCMP_IRQ_BIT;
133}
134
135static unsigned long tick_add_compare(unsigned long adj)
136{
137 unsigned long new_compare;
138 137
139 /* Workaround for Spitfire Errata (#54 I think??), I discovered 138 /* Workaround for Spitfire Errata (#54 I think??), I discovered
140 * this via Sun BugID 4008234, mentioned in Solaris-2.5.1 patch 139 * this via Sun BugID 4008234, mentioned in Solaris-2.5.1 patch
@@ -145,44 +144,41 @@ static unsigned long tick_add_compare(unsigned long adj)
145 * at the start of an I-cache line, and perform a dummy 144 * at the start of an I-cache line, and perform a dummy
146 * read back from %tick_cmpr right after writing to it. -DaveM 145 * read back from %tick_cmpr right after writing to it. -DaveM
147 */ 146 */
148 __asm__ __volatile__("rd %%tick_cmpr, %0\n\t" 147 __asm__ __volatile__("ba,pt %%xcc, 1f\n\t"
149 "ba,pt %%xcc, 1f\n\t" 148 " add %1, %2, %0\n\t"
150 " add %0, %1, %0\n\t"
151 ".align 64\n" 149 ".align 64\n"
152 "1:\n\t" 150 "1:\n\t"
153 "wr %0, 0, %%tick_cmpr\n\t" 151 "wr %0, 0, %%tick_cmpr\n\t"
154 "rd %%tick_cmpr, %%g0" 152 "rd %%tick_cmpr, %%g0\n\t"
155 : "=&r" (new_compare) 153 : "=r" (new_compare)
156 : "r" (adj)); 154 : "r" (orig_tick), "r" (adj));
157 155
158 return new_compare; 156 __asm__ __volatile__("rd %%tick, %0"
157 : "=r" (new_tick));
158 new_tick &= ~TICKCMP_IRQ_BIT;
159
160 return ((long)(new_tick - (orig_tick+adj))) > 0L;
159} 161}
160 162
161static unsigned long tick_add_tick(unsigned long adj, unsigned long offset) 163static unsigned long tick_add_tick(unsigned long adj)
162{ 164{
163 unsigned long new_tick, tmp; 165 unsigned long new_tick;
164 166
165 /* Also need to handle Blackbird bug here too. */ 167 /* Also need to handle Blackbird bug here too. */
166 __asm__ __volatile__("rd %%tick, %0\n\t" 168 __asm__ __volatile__("rd %%tick, %0\n\t"
167 "add %0, %2, %0\n\t" 169 "add %0, %1, %0\n\t"
168 "wrpr %0, 0, %%tick\n\t" 170 "wrpr %0, 0, %%tick\n\t"
169 "andn %0, %4, %1\n\t" 171 : "=&r" (new_tick)
170 "ba,pt %%xcc, 1f\n\t" 172 : "r" (adj));
171 " add %1, %3, %1\n\t"
172 ".align 64\n"
173 "1:\n\t"
174 "wr %1, 0, %%tick_cmpr\n\t"
175 "rd %%tick_cmpr, %%g0"
176 : "=&r" (new_tick), "=&r" (tmp)
177 : "r" (adj), "r" (offset), "r" (TICK_PRIV_BIT));
178 173
179 return new_tick; 174 return new_tick;
180} 175}
181 176
182static struct sparc64_tick_ops tick_operations __read_mostly = { 177static struct sparc64_tick_ops tick_operations __read_mostly = {
178 .name = "tick",
183 .init_tick = tick_init_tick, 179 .init_tick = tick_init_tick,
180 .disable_irq = tick_disable_irq,
184 .get_tick = tick_get_tick, 181 .get_tick = tick_get_tick,
185 .get_compare = tick_get_compare,
186 .add_tick = tick_add_tick, 182 .add_tick = tick_add_tick,
187 .add_compare = tick_add_compare, 183 .add_compare = tick_add_compare,
188 .softint_mask = 1UL << 0, 184 .softint_mask = 1UL << 0,
@@ -190,7 +186,15 @@ static struct sparc64_tick_ops tick_operations __read_mostly = {
190 186
191struct sparc64_tick_ops *tick_ops __read_mostly = &tick_operations; 187struct sparc64_tick_ops *tick_ops __read_mostly = &tick_operations;
192 188
193static void stick_init_tick(unsigned long offset) 189static void stick_disable_irq(void)
190{
191 __asm__ __volatile__(
192 "wr %0, 0x0, %%asr25"
193 : /* no outputs */
194 : "r" (TICKCMP_IRQ_BIT));
195}
196
197static void stick_init_tick(void)
194{ 198{
195 /* Writes to the %tick and %stick register are not 199 /* Writes to the %tick and %stick register are not
196 * allowed on sun4v. The Hypervisor controls that 200 * allowed on sun4v. The Hypervisor controls that
@@ -198,6 +202,7 @@ static void stick_init_tick(unsigned long offset)
198 */ 202 */
199 if (tlb_type != hypervisor) { 203 if (tlb_type != hypervisor) {
200 tick_disable_protection(); 204 tick_disable_protection();
205 tick_disable_irq();
201 206
202 /* Let the user get at STICK too. */ 207 /* Let the user get at STICK too. */
203 __asm__ __volatile__( 208 __asm__ __volatile__(
@@ -209,14 +214,7 @@ static void stick_init_tick(unsigned long offset)
209 : "g1", "g2"); 214 : "g1", "g2");
210 } 215 }
211 216
212 __asm__ __volatile__( 217 stick_disable_irq();
213 " rd %%asr24, %%g1\n"
214 " andn %%g1, %1, %%g1\n"
215 " add %%g1, %0, %%g1\n"
216 " wr %%g1, 0x0, %%asr25"
217 : /* no outputs */
218 : "r" (offset), "r" (TICK_PRIV_BIT)
219 : "g1");
220} 218}
221 219
222static unsigned long stick_get_tick(void) 220static unsigned long stick_get_tick(void)
@@ -229,49 +227,43 @@ static unsigned long stick_get_tick(void)
229 return ret & ~TICK_PRIV_BIT; 227 return ret & ~TICK_PRIV_BIT;
230} 228}
231 229
232static unsigned long stick_get_compare(void) 230static unsigned long stick_add_tick(unsigned long adj)
233{ 231{
234 unsigned long ret; 232 unsigned long new_tick;
235
236 __asm__ __volatile__("rd %%asr25, %0"
237 : "=r" (ret));
238
239 return ret;
240}
241
242static unsigned long stick_add_tick(unsigned long adj, unsigned long offset)
243{
244 unsigned long new_tick, tmp;
245 233
246 __asm__ __volatile__("rd %%asr24, %0\n\t" 234 __asm__ __volatile__("rd %%asr24, %0\n\t"
247 "add %0, %2, %0\n\t" 235 "add %0, %1, %0\n\t"
248 "wr %0, 0, %%asr24\n\t" 236 "wr %0, 0, %%asr24\n\t"
249 "andn %0, %4, %1\n\t" 237 : "=&r" (new_tick)
250 "add %1, %3, %1\n\t" 238 : "r" (adj));
251 "wr %1, 0, %%asr25"
252 : "=&r" (new_tick), "=&r" (tmp)
253 : "r" (adj), "r" (offset), "r" (TICK_PRIV_BIT));
254 239
255 return new_tick; 240 return new_tick;
256} 241}
257 242
258static unsigned long stick_add_compare(unsigned long adj) 243static int stick_add_compare(unsigned long adj)
259{ 244{
260 unsigned long new_compare; 245 unsigned long orig_tick, new_tick;
261 246
262 __asm__ __volatile__("rd %%asr25, %0\n\t" 247 __asm__ __volatile__("rd %%asr24, %0"
263 "add %0, %1, %0\n\t" 248 : "=r" (orig_tick));
264 "wr %0, 0, %%asr25" 249 orig_tick &= ~TICKCMP_IRQ_BIT;
265 : "=&r" (new_compare) 250
266 : "r" (adj)); 251 __asm__ __volatile__("wr %0, 0, %%asr25"
252 : /* no outputs */
253 : "r" (orig_tick + adj));
254
255 __asm__ __volatile__("rd %%asr24, %0"
256 : "=r" (new_tick));
257 new_tick &= ~TICKCMP_IRQ_BIT;
267 258
268 return new_compare; 259 return ((long)(new_tick - (orig_tick+adj))) > 0L;
269} 260}
270 261
271static struct sparc64_tick_ops stick_operations __read_mostly = { 262static struct sparc64_tick_ops stick_operations __read_mostly = {
263 .name = "stick",
272 .init_tick = stick_init_tick, 264 .init_tick = stick_init_tick,
265 .disable_irq = stick_disable_irq,
273 .get_tick = stick_get_tick, 266 .get_tick = stick_get_tick,
274 .get_compare = stick_get_compare,
275 .add_tick = stick_add_tick, 267 .add_tick = stick_add_tick,
276 .add_compare = stick_add_compare, 268 .add_compare = stick_add_compare,
277 .softint_mask = 1UL << 16, 269 .softint_mask = 1UL << 16,
@@ -320,20 +312,6 @@ static unsigned long __hbird_read_stick(void)
320 return ret; 312 return ret;
321} 313}
322 314
323static unsigned long __hbird_read_compare(void)
324{
325 unsigned long low, high;
326 unsigned long addr = HBIRD_STICKCMP_ADDR;
327
328 __asm__ __volatile__("ldxa [%2] %3, %0\n\t"
329 "add %2, 0x8, %2\n\t"
330 "ldxa [%2] %3, %1"
331 : "=&r" (low), "=&r" (high), "=&r" (addr)
332 : "i" (ASI_PHYS_BYPASS_EC_E), "2" (addr));
333
334 return (high << 32UL) | low;
335}
336
337static void __hbird_write_stick(unsigned long val) 315static void __hbird_write_stick(unsigned long val)
338{ 316{
339 unsigned long low = (val & 0xffffffffUL); 317 unsigned long low = (val & 0xffffffffUL);
@@ -364,10 +342,13 @@ static void __hbird_write_compare(unsigned long val)
364 "i" (ASI_PHYS_BYPASS_EC_E)); 342 "i" (ASI_PHYS_BYPASS_EC_E));
365} 343}
366 344
367static void hbtick_init_tick(unsigned long offset) 345static void hbtick_disable_irq(void)
368{ 346{
369 unsigned long val; 347 __hbird_write_compare(TICKCMP_IRQ_BIT);
348}
370 349
350static void hbtick_init_tick(void)
351{
371 tick_disable_protection(); 352 tick_disable_protection();
372 353
373 /* XXX This seems to be necessary to 'jumpstart' Hummingbird 354 /* XXX This seems to be necessary to 'jumpstart' Hummingbird
@@ -377,8 +358,7 @@ static void hbtick_init_tick(unsigned long offset)
377 */ 358 */
378 __hbird_write_stick(__hbird_read_stick()); 359 __hbird_write_stick(__hbird_read_stick());
379 360
380 val = __hbird_read_stick() & ~TICK_PRIV_BIT; 361 hbtick_disable_irq();
381 __hbird_write_compare(val + offset);
382} 362}
383 363
384static unsigned long hbtick_get_tick(void) 364static unsigned long hbtick_get_tick(void)
@@ -386,122 +366,95 @@ static unsigned long hbtick_get_tick(void)
386 return __hbird_read_stick() & ~TICK_PRIV_BIT; 366 return __hbird_read_stick() & ~TICK_PRIV_BIT;
387} 367}
388 368
389static unsigned long hbtick_get_compare(void) 369static unsigned long hbtick_add_tick(unsigned long adj)
390{
391 return __hbird_read_compare();
392}
393
394static unsigned long hbtick_add_tick(unsigned long adj, unsigned long offset)
395{ 370{
396 unsigned long val; 371 unsigned long val;
397 372
398 val = __hbird_read_stick() + adj; 373 val = __hbird_read_stick() + adj;
399 __hbird_write_stick(val); 374 __hbird_write_stick(val);
400 375
401 val &= ~TICK_PRIV_BIT;
402 __hbird_write_compare(val + offset);
403
404 return val; 376 return val;
405} 377}
406 378
407static unsigned long hbtick_add_compare(unsigned long adj) 379static int hbtick_add_compare(unsigned long adj)
408{ 380{
409 unsigned long val = __hbird_read_compare() + adj; 381 unsigned long val = __hbird_read_stick();
382 unsigned long val2;
410 383
411 val &= ~TICK_PRIV_BIT; 384 val &= ~TICKCMP_IRQ_BIT;
385 val += adj;
412 __hbird_write_compare(val); 386 __hbird_write_compare(val);
413 387
414 return val; 388 val2 = __hbird_read_stick() & ~TICKCMP_IRQ_BIT;
389
390 return ((long)(val2 - val)) > 0L;
415} 391}
416 392
417static struct sparc64_tick_ops hbtick_operations __read_mostly = { 393static struct sparc64_tick_ops hbtick_operations __read_mostly = {
394 .name = "hbtick",
418 .init_tick = hbtick_init_tick, 395 .init_tick = hbtick_init_tick,
396 .disable_irq = hbtick_disable_irq,
419 .get_tick = hbtick_get_tick, 397 .get_tick = hbtick_get_tick,
420 .get_compare = hbtick_get_compare,
421 .add_tick = hbtick_add_tick, 398 .add_tick = hbtick_add_tick,
422 .add_compare = hbtick_add_compare, 399 .add_compare = hbtick_add_compare,
423 .softint_mask = 1UL << 0, 400 .softint_mask = 1UL << 0,
424}; 401};
425 402
426/* timer_interrupt() needs to keep up the real-time clock,
427 * as well as call the "do_timer()" routine every clocktick
428 *
429 * NOTE: On SUN5 systems the ticker interrupt comes in using 2
430 * interrupts, one at level14 and one with softint bit 0.
431 */
432unsigned long timer_tick_offset __read_mostly;
433
434static unsigned long timer_ticks_per_nsec_quotient __read_mostly; 403static unsigned long timer_ticks_per_nsec_quotient __read_mostly;
435 404
436#define TICK_SIZE (tick_nsec / 1000) 405#define TICK_SIZE (tick_nsec / 1000)
437 406
438static inline void timer_check_rtc(void) 407#define USEC_AFTER 500000
439{ 408#define USEC_BEFORE 500000
440 /* last time the cmos clock got updated */
441 static long last_rtc_update;
442
443 /* Determine when to update the Mostek clock. */
444 if (ntp_synced() &&
445 xtime.tv_sec > last_rtc_update + 660 &&
446 (xtime.tv_nsec / 1000) >= 500000 - ((unsigned) TICK_SIZE) / 2 &&
447 (xtime.tv_nsec / 1000) <= 500000 + ((unsigned) TICK_SIZE) / 2) {
448 if (set_rtc_mmss(xtime.tv_sec) == 0)
449 last_rtc_update = xtime.tv_sec;
450 else
451 last_rtc_update = xtime.tv_sec - 600;
452 /* do it again in 60 s */
453 }
454}
455 409
456irqreturn_t timer_interrupt(int irq, void *dev_id) 410static void sync_cmos_clock(unsigned long dummy);
457{
458 unsigned long ticks, compare, pstate;
459 411
460 write_seqlock(&xtime_lock); 412static DEFINE_TIMER(sync_cmos_timer, sync_cmos_clock, 0, 0);
461 413
462 do { 414static void sync_cmos_clock(unsigned long dummy)
463#ifndef CONFIG_SMP 415{
464 profile_tick(CPU_PROFILING); 416 struct timeval now, next;
465 update_process_times(user_mode(get_irq_regs())); 417 int fail = 1;
466#endif
467 do_timer(1);
468 418
469 /* Guarantee that the following sequences execute 419 /*
470 * uninterrupted. 420 * If we have an externally synchronized Linux clock, then update
421 * CMOS clock accordingly every ~11 minutes. Set_rtc_mmss() has to be
422 * called as close as possible to 500 ms before the new second starts.
423 * This code is run on a timer. If the clock is set, that timer
424 * may not expire at the correct time. Thus, we adjust...
425 */
426 if (!ntp_synced())
427 /*
428 * Not synced, exit, do not restart a timer (if one is
429 * running, let it run out).
471 */ 430 */
472 __asm__ __volatile__("rdpr %%pstate, %0\n\t" 431 return;
473 "wrpr %0, %1, %%pstate"
474 : "=r" (pstate)
475 : "i" (PSTATE_IE));
476 432
477 compare = tick_ops->add_compare(timer_tick_offset); 433 do_gettimeofday(&now);
478 ticks = tick_ops->get_tick(); 434 if (now.tv_usec >= USEC_AFTER - ((unsigned) TICK_SIZE) / 2 &&
435 now.tv_usec <= USEC_BEFORE + ((unsigned) TICK_SIZE) / 2)
436 fail = set_rtc_mmss(now.tv_sec);
479 437
480 /* Restore PSTATE_IE. */ 438 next.tv_usec = USEC_AFTER - now.tv_usec;
481 __asm__ __volatile__("wrpr %0, 0x0, %%pstate" 439 if (next.tv_usec <= 0)
482 : /* no outputs */ 440 next.tv_usec += USEC_PER_SEC;
483 : "r" (pstate));
484 } while (time_after_eq(ticks, compare));
485 441
486 timer_check_rtc(); 442 if (!fail)
443 next.tv_sec = 659;
444 else
445 next.tv_sec = 0;
487 446
488 write_sequnlock(&xtime_lock); 447 if (next.tv_usec >= USEC_PER_SEC) {
489 448 next.tv_sec++;
490 return IRQ_HANDLED; 449 next.tv_usec -= USEC_PER_SEC;
450 }
451 mod_timer(&sync_cmos_timer, jiffies + timeval_to_jiffies(&next));
491} 452}
492 453
493#ifdef CONFIG_SMP 454void notify_arch_cmos_timer(void)
494void timer_tick_interrupt(struct pt_regs *regs)
495{ 455{
496 write_seqlock(&xtime_lock); 456 mod_timer(&sync_cmos_timer, jiffies + 1);
497
498 do_timer(1);
499
500 timer_check_rtc();
501
502 write_sequnlock(&xtime_lock);
503} 457}
504#endif
505 458
506/* Kick start a stopped clock (procedure from the Sun NVRAM/hostid FAQ). */ 459/* Kick start a stopped clock (procedure from the Sun NVRAM/hostid FAQ). */
507static void __init kick_start_clock(void) 460static void __init kick_start_clock(void)
@@ -751,7 +704,7 @@ retry:
751 return -EOPNOTSUPP; 704 return -EOPNOTSUPP;
752} 705}
753 706
754static int __init clock_model_matches(char *model) 707static int __init clock_model_matches(const char *model)
755{ 708{
756 if (strcmp(model, "mk48t02") && 709 if (strcmp(model, "mk48t02") &&
757 strcmp(model, "mk48t08") && 710 strcmp(model, "mk48t08") &&
@@ -768,7 +721,7 @@ static int __init clock_model_matches(char *model)
768static int __devinit clock_probe(struct of_device *op, const struct of_device_id *match) 721static int __devinit clock_probe(struct of_device *op, const struct of_device_id *match)
769{ 722{
770 struct device_node *dp = op->node; 723 struct device_node *dp = op->node;
771 char *model = of_get_property(dp, "model", NULL); 724 const char *model = of_get_property(dp, "model", NULL);
772 unsigned long size, flags; 725 unsigned long size, flags;
773 void __iomem *regs; 726 void __iomem *regs;
774 727
@@ -900,7 +853,6 @@ static unsigned long sparc64_init_timers(void)
900 prop = of_find_property(dp, "stick-frequency", NULL); 853 prop = of_find_property(dp, "stick-frequency", NULL);
901 } 854 }
902 clock = *(unsigned int *) prop->value; 855 clock = *(unsigned int *) prop->value;
903 timer_tick_offset = clock / HZ;
904 856
905#ifdef CONFIG_SMP 857#ifdef CONFIG_SMP
906 smp_tick_init(); 858 smp_tick_init();
@@ -909,26 +861,6 @@ static unsigned long sparc64_init_timers(void)
909 return clock; 861 return clock;
910} 862}
911 863
912static void sparc64_start_timers(void)
913{
914 unsigned long pstate;
915
916 /* Guarantee that the following sequences execute
917 * uninterrupted.
918 */
919 __asm__ __volatile__("rdpr %%pstate, %0\n\t"
920 "wrpr %0, %1, %%pstate"
921 : "=r" (pstate)
922 : "i" (PSTATE_IE));
923
924 tick_ops->init_tick(timer_tick_offset);
925
926 /* Restore PSTATE_IE. */
927 __asm__ __volatile__("wrpr %0, 0x0, %%pstate"
928 : /* no outputs */
929 : "r" (pstate));
930}
931
932struct freq_table { 864struct freq_table {
933 unsigned long clock_tick_ref; 865 unsigned long clock_tick_ref;
934 unsigned int ref_freq; 866 unsigned int ref_freq;
@@ -975,29 +907,148 @@ static struct notifier_block sparc64_cpufreq_notifier_block = {
975 907
976#endif /* CONFIG_CPU_FREQ */ 908#endif /* CONFIG_CPU_FREQ */
977 909
978static struct time_interpolator sparc64_cpu_interpolator = { 910static int sparc64_next_event(unsigned long delta,
979 .source = TIME_SOURCE_CPU, 911 struct clock_event_device *evt)
980 .shift = 16, 912{
981 .mask = 0xffffffffffffffffLL 913 return tick_ops->add_compare(delta) ? -ETIME : 0;
914}
915
916static void sparc64_timer_setup(enum clock_event_mode mode,
917 struct clock_event_device *evt)
918{
919 switch (mode) {
920 case CLOCK_EVT_MODE_ONESHOT:
921 break;
922
923 case CLOCK_EVT_MODE_SHUTDOWN:
924 tick_ops->disable_irq();
925 break;
926
927 case CLOCK_EVT_MODE_PERIODIC:
928 case CLOCK_EVT_MODE_UNUSED:
929 WARN_ON(1);
930 break;
931 };
932}
933
934static struct clock_event_device sparc64_clockevent = {
935 .features = CLOCK_EVT_FEAT_ONESHOT,
936 .set_mode = sparc64_timer_setup,
937 .set_next_event = sparc64_next_event,
938 .rating = 100,
939 .shift = 30,
940 .irq = -1,
982}; 941};
942static DEFINE_PER_CPU(struct clock_event_device, sparc64_events);
983 943
984/* The quotient formula is taken from the IA64 port. */ 944void timer_interrupt(int irq, struct pt_regs *regs)
985#define SPARC64_NSEC_PER_CYC_SHIFT 10UL
986void __init time_init(void)
987{ 945{
988 unsigned long clock = sparc64_init_timers(); 946 struct pt_regs *old_regs = set_irq_regs(regs);
947 unsigned long tick_mask = tick_ops->softint_mask;
948 int cpu = smp_processor_id();
949 struct clock_event_device *evt = &per_cpu(sparc64_events, cpu);
950
951 clear_softint(tick_mask);
952
953 irq_enter();
989 954
990 sparc64_cpu_interpolator.frequency = clock; 955 kstat_this_cpu.irqs[0]++;
991 register_time_interpolator(&sparc64_cpu_interpolator);
992 956
993 /* Now that the interpolator is registered, it is 957 if (unlikely(!evt->event_handler)) {
994 * safe to start the timer ticking. 958 printk(KERN_WARNING
959 "Spurious SPARC64 timer interrupt on cpu %d\n", cpu);
960 } else
961 evt->event_handler(evt);
962
963 irq_exit();
964
965 set_irq_regs(old_regs);
966}
967
968void __devinit setup_sparc64_timer(void)
969{
970 struct clock_event_device *sevt;
971 unsigned long pstate;
972
973 /* Guarantee that the following sequences execute
974 * uninterrupted.
995 */ 975 */
996 sparc64_start_timers(); 976 __asm__ __volatile__("rdpr %%pstate, %0\n\t"
977 "wrpr %0, %1, %%pstate"
978 : "=r" (pstate)
979 : "i" (PSTATE_IE));
980
981 tick_ops->init_tick();
982
983 /* Restore PSTATE_IE. */
984 __asm__ __volatile__("wrpr %0, 0x0, %%pstate"
985 : /* no outputs */
986 : "r" (pstate));
987
988 sevt = &__get_cpu_var(sparc64_events);
989
990 memcpy(sevt, &sparc64_clockevent, sizeof(*sevt));
991 sevt->cpumask = cpumask_of_cpu(smp_processor_id());
992
993 clockevents_register_device(sevt);
994}
995
996#define SPARC64_NSEC_PER_CYC_SHIFT 32UL
997
998static struct clocksource clocksource_tick = {
999 .rating = 100,
1000 .mask = CLOCKSOURCE_MASK(64),
1001 .shift = 16,
1002 .flags = CLOCK_SOURCE_IS_CONTINUOUS,
1003};
1004
1005static void __init setup_clockevent_multiplier(unsigned long hz)
1006{
1007 unsigned long mult, shift = 32;
1008
1009 while (1) {
1010 mult = div_sc(hz, NSEC_PER_SEC, shift);
1011 if (mult && (mult >> 32UL) == 0UL)
1012 break;
1013
1014 shift--;
1015 }
1016
1017 sparc64_clockevent.shift = shift;
1018 sparc64_clockevent.mult = mult;
1019}
1020
1021void __init time_init(void)
1022{
1023 unsigned long clock = sparc64_init_timers();
997 1024
998 timer_ticks_per_nsec_quotient = 1025 timer_ticks_per_nsec_quotient =
999 (((NSEC_PER_SEC << SPARC64_NSEC_PER_CYC_SHIFT) + 1026 clocksource_hz2mult(clock, SPARC64_NSEC_PER_CYC_SHIFT);
1000 (clock / 2)) / clock); 1027
1028 clocksource_tick.name = tick_ops->name;
1029 clocksource_tick.mult =
1030 clocksource_hz2mult(clock,
1031 clocksource_tick.shift);
1032 clocksource_tick.read = tick_ops->get_tick;
1033
1034 printk("clocksource: mult[%x] shift[%d]\n",
1035 clocksource_tick.mult, clocksource_tick.shift);
1036
1037 clocksource_register(&clocksource_tick);
1038
1039 sparc64_clockevent.name = tick_ops->name;
1040
1041 setup_clockevent_multiplier(clock);
1042
1043 sparc64_clockevent.max_delta_ns =
1044 clockevent_delta2ns(0x7fffffffffffffff, &sparc64_clockevent);
1045 sparc64_clockevent.min_delta_ns =
1046 clockevent_delta2ns(0xF, &sparc64_clockevent);
1047
1048 printk("clockevent: mult[%lx] shift[%d]\n",
1049 sparc64_clockevent.mult, sparc64_clockevent.shift);
1050
1051 setup_sparc64_timer();
1001 1052
1002#ifdef CONFIG_CPU_FREQ 1053#ifdef CONFIG_CPU_FREQ
1003 cpufreq_register_notifier(&sparc64_cpufreq_notifier_block, 1054 cpufreq_register_notifier(&sparc64_cpufreq_notifier_block,
@@ -1126,10 +1177,6 @@ static int set_rtc_mmss(unsigned long nowtime)
1126#define RTC_IS_OPEN 0x01 /* means /dev/rtc is in use */ 1177#define RTC_IS_OPEN 0x01 /* means /dev/rtc is in use */
1127static unsigned char mini_rtc_status; /* bitmapped status byte. */ 1178static unsigned char mini_rtc_status; /* bitmapped status byte. */
1128 1179
1129/* months start at 0 now */
1130static unsigned char days_in_mo[] =
1131{31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31};
1132
1133#define FEBRUARY 2 1180#define FEBRUARY 2
1134#define STARTOFTIME 1970 1181#define STARTOFTIME 1970
1135#define SECDAY 86400L 1182#define SECDAY 86400L
@@ -1278,8 +1325,7 @@ static int mini_rtc_ioctl(struct inode *inode, struct file *file,
1278 1325
1279 case RTC_SET_TIME: /* Set the RTC */ 1326 case RTC_SET_TIME: /* Set the RTC */
1280 { 1327 {
1281 int year; 1328 int year, days;
1282 unsigned char leap_yr;
1283 1329
1284 if (!capable(CAP_SYS_TIME)) 1330 if (!capable(CAP_SYS_TIME))
1285 return -EACCES; 1331 return -EACCES;
@@ -1288,14 +1334,14 @@ static int mini_rtc_ioctl(struct inode *inode, struct file *file,
1288 return -EFAULT; 1334 return -EFAULT;
1289 1335
1290 year = wtime.tm_year + 1900; 1336 year = wtime.tm_year + 1900;
1291 leap_yr = ((!(year % 4) && (year % 100)) || 1337 days = month_days[wtime.tm_mon] +
1292 !(year % 400)); 1338 ((wtime.tm_mon == 1) && leapyear(year));
1293 1339
1294 if ((wtime.tm_mon < 0 || wtime.tm_mon > 11) || (wtime.tm_mday < 1)) 1340 if ((wtime.tm_mon < 0 || wtime.tm_mon > 11) ||
1341 (wtime.tm_mday < 1))
1295 return -EINVAL; 1342 return -EINVAL;
1296 1343
1297 if (wtime.tm_mday < 0 || wtime.tm_mday > 1344 if (wtime.tm_mday < 0 || wtime.tm_mday > days)
1298 (days_in_mo[wtime.tm_mon] + ((wtime.tm_mon == 1) && leap_yr)))
1299 return -EINVAL; 1345 return -EINVAL;
1300 1346
1301 if (wtime.tm_hour < 0 || wtime.tm_hour >= 24 || 1347 if (wtime.tm_hour < 0 || wtime.tm_hour >= 24 ||