aboutsummaryrefslogtreecommitdiffstats
path: root/arch/mips/kernel/time.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/mips/kernel/time.c')
-rw-r--r--arch/mips/kernel/time.c416
1 files changed, 222 insertions, 194 deletions
diff --git a/arch/mips/kernel/time.c b/arch/mips/kernel/time.c
index 9a5596bf8571..5892491b40eb 100644
--- a/arch/mips/kernel/time.c
+++ b/arch/mips/kernel/time.c
@@ -11,6 +11,7 @@
11 * Free Software Foundation; either version 2 of the License, or (at your 11 * Free Software Foundation; either version 2 of the License, or (at your
12 * option) any later version. 12 * option) any later version.
13 */ 13 */
14#include <linux/clockchips.h>
14#include <linux/types.h> 15#include <linux/types.h>
15#include <linux/kernel.h> 16#include <linux/kernel.h>
16#include <linux/init.h> 17#include <linux/init.h>
@@ -24,6 +25,7 @@
24#include <linux/spinlock.h> 25#include <linux/spinlock.h>
25#include <linux/interrupt.h> 26#include <linux/interrupt.h>
26#include <linux/module.h> 27#include <linux/module.h>
28#include <linux/kallsyms.h>
27 29
28#include <asm/bootinfo.h> 30#include <asm/bootinfo.h>
29#include <asm/cache.h> 31#include <asm/cache.h>
@@ -32,8 +34,11 @@
32#include <asm/cpu-features.h> 34#include <asm/cpu-features.h>
33#include <asm/div64.h> 35#include <asm/div64.h>
34#include <asm/sections.h> 36#include <asm/sections.h>
37#include <asm/smtc_ipi.h>
35#include <asm/time.h> 38#include <asm/time.h>
36 39
40#include <irq.h>
41
37/* 42/*
38 * The integer part of the number of usecs per jiffy is taken from tick, 43 * The integer part of the number of usecs per jiffy is taken from tick,
39 * but the fractional part is not recorded, so we calculate it using the 44 * but the fractional part is not recorded, so we calculate it using the
@@ -49,32 +54,27 @@
49 * forward reference 54 * forward reference
50 */ 55 */
51DEFINE_SPINLOCK(rtc_lock); 56DEFINE_SPINLOCK(rtc_lock);
57EXPORT_SYMBOL(rtc_lock);
52 58
53/* 59int __weak rtc_mips_set_time(unsigned long sec)
54 * By default we provide the null RTC ops
55 */
56static unsigned long null_rtc_get_time(void)
57{ 60{
58 return mktime(2000, 1, 1, 0, 0, 0); 61 return 0;
59} 62}
63EXPORT_SYMBOL(rtc_mips_set_time);
60 64
61static int null_rtc_set_time(unsigned long sec) 65int __weak rtc_mips_set_mmss(unsigned long nowtime)
62{ 66{
63 return 0; 67 return rtc_mips_set_time(nowtime);
64} 68}
65 69
66unsigned long (*rtc_mips_get_time)(void) = null_rtc_get_time; 70int update_persistent_clock(struct timespec now)
67int (*rtc_mips_set_time)(unsigned long) = null_rtc_set_time; 71{
68int (*rtc_mips_set_mmss)(unsigned long); 72 return rtc_mips_set_mmss(now.tv_sec);
69 73}
70 74
71/* how many counter cycles in a jiffy */ 75/* how many counter cycles in a jiffy */
72static unsigned long cycles_per_jiffy __read_mostly; 76static unsigned long cycles_per_jiffy __read_mostly;
73 77
74/* expirelo is the count value for next CPU timer interrupt */
75static unsigned int expirelo;
76
77
78/* 78/*
79 * Null timer ack for systems not needing one (e.g. i8254). 79 * Null timer ack for systems not needing one (e.g. i8254).
80 */ 80 */
@@ -93,18 +93,7 @@ static cycle_t null_hpt_read(void)
93 */ 93 */
94static void c0_timer_ack(void) 94static void c0_timer_ack(void)
95{ 95{
96 unsigned int count; 96 write_c0_compare(read_c0_compare());
97
98 /* Ack this timer interrupt and set the next one. */
99 expirelo += cycles_per_jiffy;
100 write_c0_compare(expirelo);
101
102 /* Check to see if we have missed any timer interrupts. */
103 while (((count = read_c0_count()) - expirelo) < 0x7fffffff) {
104 /* missed_timer_count++; */
105 expirelo = count + cycles_per_jiffy;
106 write_c0_compare(expirelo);
107 }
108} 97}
109 98
110/* 99/*
@@ -115,19 +104,9 @@ static cycle_t c0_hpt_read(void)
115 return read_c0_count(); 104 return read_c0_count();
116} 105}
117 106
118/* For use both as a high precision timer and an interrupt source. */
119static void __init c0_hpt_timer_init(void)
120{
121 expirelo = read_c0_count() + cycles_per_jiffy;
122 write_c0_compare(expirelo);
123}
124
125int (*mips_timer_state)(void); 107int (*mips_timer_state)(void);
126void (*mips_timer_ack)(void); 108void (*mips_timer_ack)(void);
127 109
128/* last time when xtime and rtc are sync'ed up */
129static long last_rtc_update;
130
131/* 110/*
132 * local_timer_interrupt() does profiling and process accounting 111 * local_timer_interrupt() does profiling and process accounting
133 * on a per-CPU basis. 112 * on a per-CPU basis.
@@ -144,60 +123,15 @@ void local_timer_interrupt(int irq, void *dev_id)
144 update_process_times(user_mode(get_irq_regs())); 123 update_process_times(user_mode(get_irq_regs()));
145} 124}
146 125
147/*
148 * High-level timer interrupt service routines. This function
149 * is set as irqaction->handler and is invoked through do_IRQ.
150 */
151irqreturn_t timer_interrupt(int irq, void *dev_id)
152{
153 write_seqlock(&xtime_lock);
154
155 mips_timer_ack();
156
157 /*
158 * call the generic timer interrupt handling
159 */
160 do_timer(1);
161
162 /*
163 * If we have an externally synchronized Linux clock, then update
164 * CMOS clock accordingly every ~11 minutes. rtc_mips_set_time() has to be
165 * called as close as possible to 500 ms before the new second starts.
166 */
167 if (ntp_synced() &&
168 xtime.tv_sec > last_rtc_update + 660 &&
169 (xtime.tv_nsec / 1000) >= 500000 - ((unsigned) TICK_SIZE) / 2 &&
170 (xtime.tv_nsec / 1000) <= 500000 + ((unsigned) TICK_SIZE) / 2) {
171 if (rtc_mips_set_mmss(xtime.tv_sec) == 0) {
172 last_rtc_update = xtime.tv_sec;
173 } else {
174 /* do it again in 60 s */
175 last_rtc_update = xtime.tv_sec - 600;
176 }
177 }
178
179 write_sequnlock(&xtime_lock);
180
181 /*
182 * In UP mode, we call local_timer_interrupt() to do profiling
183 * and process accouting.
184 *
185 * In SMP mode, local_timer_interrupt() is invoked by appropriate
186 * low-level local timer interrupt handler.
187 */
188 local_timer_interrupt(irq, dev_id);
189
190 return IRQ_HANDLED;
191}
192
193int null_perf_irq(void) 126int null_perf_irq(void)
194{ 127{
195 return 0; 128 return 0;
196} 129}
197 130
131EXPORT_SYMBOL(null_perf_irq);
132
198int (*perf_irq)(void) = null_perf_irq; 133int (*perf_irq)(void) = null_perf_irq;
199 134
200EXPORT_SYMBOL(null_perf_irq);
201EXPORT_SYMBOL(perf_irq); 135EXPORT_SYMBOL(perf_irq);
202 136
203/* 137/*
@@ -215,7 +149,7 @@ EXPORT_SYMBOL_GPL(cp0_perfcount_irq);
215 * Possibly handle a performance counter interrupt. 149 * Possibly handle a performance counter interrupt.
216 * Return true if the timer interrupt should not be checked 150 * Return true if the timer interrupt should not be checked
217 */ 151 */
218static inline int handle_perf_irq (int r2) 152static inline int handle_perf_irq(int r2)
219{ 153{
220 /* 154 /*
221 * The performance counter overflow interrupt may be shared with the 155 * The performance counter overflow interrupt may be shared with the
@@ -229,63 +163,23 @@ static inline int handle_perf_irq (int r2)
229 !r2; 163 !r2;
230} 164}
231 165
232asmlinkage void ll_timer_interrupt(int irq)
233{
234 int r2 = cpu_has_mips_r2;
235
236 irq_enter();
237 kstat_this_cpu.irqs[irq]++;
238
239 if (handle_perf_irq(r2))
240 goto out;
241
242 if (r2 && ((read_c0_cause() & (1 << 30)) == 0))
243 goto out;
244
245 timer_interrupt(irq, NULL);
246
247out:
248 irq_exit();
249}
250
251asmlinkage void ll_local_timer_interrupt(int irq)
252{
253 irq_enter();
254 if (smp_processor_id() != 0)
255 kstat_this_cpu.irqs[irq]++;
256
257 /* we keep interrupt disabled all the time */
258 local_timer_interrupt(irq, NULL);
259
260 irq_exit();
261}
262
263/* 166/*
264 * time_init() - it does the following things. 167 * time_init() - it does the following things.
265 * 168 *
266 * 1) board_time_init() - 169 * 1) plat_time_init() -
267 * a) (optional) set up RTC routines, 170 * a) (optional) set up RTC routines,
268 * b) (optional) calibrate and set the mips_hpt_frequency 171 * b) (optional) calibrate and set the mips_hpt_frequency
269 * (only needed if you intended to use cpu counter as timer interrupt 172 * (only needed if you intended to use cpu counter as timer interrupt
270 * source) 173 * source)
271 * 2) setup xtime based on rtc_mips_get_time(). 174 * 2) calculate a couple of cached variables for later usage
272 * 3) calculate a couple of cached variables for later usage 175 * 3) plat_timer_setup() -
273 * 4) plat_timer_setup() -
274 * a) (optional) over-write any choices made above by time_init(). 176 * a) (optional) over-write any choices made above by time_init().
275 * b) machine specific code should setup the timer irqaction. 177 * b) machine specific code should setup the timer irqaction.
276 * c) enable the timer interrupt 178 * c) enable the timer interrupt
277 */ 179 */
278 180
279void (*board_time_init)(void);
280
281unsigned int mips_hpt_frequency; 181unsigned int mips_hpt_frequency;
282 182
283static struct irqaction timer_irqaction = {
284 .handler = timer_interrupt,
285 .flags = IRQF_DISABLED | IRQF_PERCPU,
286 .name = "timer",
287};
288
289static unsigned int __init calibrate_hpt(void) 183static unsigned int __init calibrate_hpt(void)
290{ 184{
291 cycle_t frequency, hpt_start, hpt_end, hpt_count, hz; 185 cycle_t frequency, hpt_start, hpt_end, hpt_count, hz;
@@ -334,6 +228,84 @@ struct clocksource clocksource_mips = {
334 .flags = CLOCK_SOURCE_IS_CONTINUOUS, 228 .flags = CLOCK_SOURCE_IS_CONTINUOUS,
335}; 229};
336 230
231static int mips_next_event(unsigned long delta,
232 struct clock_event_device *evt)
233{
234 unsigned int cnt;
235 int res;
236
237#ifdef CONFIG_MIPS_MT_SMTC
238 {
239 unsigned long flags, vpflags;
240 local_irq_save(flags);
241 vpflags = dvpe();
242#endif
243 cnt = read_c0_count();
244 cnt += delta;
245 write_c0_compare(cnt);
246 res = ((long)(read_c0_count() - cnt ) > 0) ? -ETIME : 0;
247#ifdef CONFIG_MIPS_MT_SMTC
248 evpe(vpflags);
249 local_irq_restore(flags);
250 }
251#endif
252 return res;
253}
254
255static void mips_set_mode(enum clock_event_mode mode,
256 struct clock_event_device *evt)
257{
258 /* Nothing to do ... */
259}
260
261static DEFINE_PER_CPU(struct clock_event_device, mips_clockevent_device);
262static int cp0_timer_irq_installed;
263
264static irqreturn_t timer_interrupt(int irq, void *dev_id)
265{
266 const int r2 = cpu_has_mips_r2;
267 struct clock_event_device *cd;
268 int cpu = smp_processor_id();
269
270 /*
271 * Suckage alert:
272 * Before R2 of the architecture there was no way to see if a
273 * performance counter interrupt was pending, so we have to run
274 * the performance counter interrupt handler anyway.
275 */
276 if (handle_perf_irq(r2))
277 goto out;
278
279 /*
280 * The same applies to performance counter interrupts. But with the
281 * above we now know that the reason we got here must be a timer
282 * interrupt. Being the paranoiacs we are we check anyway.
283 */
284 if (!r2 || (read_c0_cause() & (1 << 30))) {
285 c0_timer_ack();
286#ifdef CONFIG_MIPS_MT_SMTC
287 if (cpu_data[cpu].vpe_id)
288 goto out;
289 cpu = 0;
290#endif
291 cd = &per_cpu(mips_clockevent_device, cpu);
292 cd->event_handler(cd);
293 }
294
295out:
296 return IRQ_HANDLED;
297}
298
299static struct irqaction timer_irqaction = {
300 .handler = timer_interrupt,
301#ifdef CONFIG_MIPS_MT_SMTC
302 .flags = IRQF_DISABLED,
303#else
304 .flags = IRQF_DISABLED | IRQF_PERCPU,
305#endif
306 .name = "timer",
307};
308
337static void __init init_mips_clocksource(void) 309static void __init init_mips_clocksource(void)
338{ 310{
339 u64 temp; 311 u64 temp;
@@ -357,19 +329,127 @@ static void __init init_mips_clocksource(void)
357 clocksource_register(&clocksource_mips); 329 clocksource_register(&clocksource_mips);
358} 330}
359 331
360void __init time_init(void) 332void __init __weak plat_time_init(void)
333{
334}
335
336void __init __weak plat_timer_setup(struct irqaction *irq)
337{
338}
339
340#ifdef CONFIG_MIPS_MT_SMTC
341DEFINE_PER_CPU(struct clock_event_device, smtc_dummy_clockevent_device);
342
343static void smtc_set_mode(enum clock_event_mode mode,
344 struct clock_event_device *evt)
345{
346}
347
348int dummycnt[NR_CPUS];
349
350static void mips_broadcast(cpumask_t mask)
351{
352 unsigned int cpu;
353
354 for_each_cpu_mask(cpu, mask)
355 smtc_send_ipi(cpu, SMTC_CLOCK_TICK, 0);
356}
357
358static void setup_smtc_dummy_clockevent_device(void)
359{
360 //uint64_t mips_freq = mips_hpt_^frequency;
361 unsigned int cpu = smp_processor_id();
362 struct clock_event_device *cd;
363
364 cd = &per_cpu(smtc_dummy_clockevent_device, cpu);
365
366 cd->name = "SMTC";
367 cd->features = CLOCK_EVT_FEAT_DUMMY;
368
369 /* Calculate the min / max delta */
370 cd->mult = 0; //div_sc((unsigned long) mips_freq, NSEC_PER_SEC, 32);
371 cd->shift = 0; //32;
372 cd->max_delta_ns = 0; //clockevent_delta2ns(0x7fffffff, cd);
373 cd->min_delta_ns = 0; //clockevent_delta2ns(0x30, cd);
374
375 cd->rating = 200;
376 cd->irq = 17; //-1;
377// if (cpu)
378// cd->cpumask = CPU_MASK_ALL; // cpumask_of_cpu(cpu);
379// else
380 cd->cpumask = cpumask_of_cpu(cpu);
381
382 cd->set_mode = smtc_set_mode;
383
384 cd->broadcast = mips_broadcast;
385
386 clockevents_register_device(cd);
387}
388#endif
389
390static void mips_event_handler(struct clock_event_device *dev)
361{ 391{
362 if (board_time_init) 392}
363 board_time_init();
364 393
365 if (!rtc_mips_set_mmss) 394void __cpuinit mips_clockevent_init(void)
366 rtc_mips_set_mmss = rtc_mips_set_time; 395{
396 uint64_t mips_freq = mips_hpt_frequency;
397 unsigned int cpu = smp_processor_id();
398 struct clock_event_device *cd;
399 unsigned int irq = MIPS_CPU_IRQ_BASE + 7;
367 400
368 xtime.tv_sec = rtc_mips_get_time(); 401 if (!cpu_has_counter)
369 xtime.tv_nsec = 0; 402 return;
370 403
371 set_normalized_timespec(&wall_to_monotonic, 404#ifdef CONFIG_MIPS_MT_SMTC
372 -xtime.tv_sec, -xtime.tv_nsec); 405 setup_smtc_dummy_clockevent_device();
406
407 /*
408 * On SMTC we only register VPE0's compare interrupt as clockevent
409 * device.
410 */
411 if (cpu)
412 return;
413#endif
414
415 cd = &per_cpu(mips_clockevent_device, cpu);
416
417 cd->name = "MIPS";
418 cd->features = CLOCK_EVT_FEAT_ONESHOT;
419
420 /* Calculate the min / max delta */
421 cd->mult = div_sc((unsigned long) mips_freq, NSEC_PER_SEC, 32);
422 cd->shift = 32;
423 cd->max_delta_ns = clockevent_delta2ns(0x7fffffff, cd);
424 cd->min_delta_ns = clockevent_delta2ns(0x30, cd);
425
426 cd->rating = 300;
427 cd->irq = irq;
428#ifdef CONFIG_MIPS_MT_SMTC
429 cd->cpumask = CPU_MASK_ALL;
430#else
431 cd->cpumask = cpumask_of_cpu(cpu);
432#endif
433 cd->set_next_event = mips_next_event;
434 cd->set_mode = mips_set_mode;
435 cd->event_handler = mips_event_handler;
436
437 clockevents_register_device(cd);
438
439 if (!cp0_timer_irq_installed) {
440#ifdef CONFIG_MIPS_MT_SMTC
441#define CPUCTR_IMASKBIT (0x100 << cp0_compare_irq)
442 setup_irq_smtc(irq, &timer_irqaction, CPUCTR_IMASKBIT);
443#else
444 setup_irq(irq, &timer_irqaction);
445#endif /* CONFIG_MIPS_MT_SMTC */
446 cp0_timer_irq_installed = 1;
447 }
448}
449
450void __init time_init(void)
451{
452 plat_time_init();
373 453
374 /* Choose appropriate high precision timer routines. */ 454 /* Choose appropriate high precision timer routines. */
375 if (!cpu_has_counter && !clocksource_mips.read) 455 if (!cpu_has_counter && !clocksource_mips.read)
@@ -392,11 +472,6 @@ void __init time_init(void)
392 /* Calculate cache parameters. */ 472 /* Calculate cache parameters. */
393 cycles_per_jiffy = 473 cycles_per_jiffy =
394 (mips_hpt_frequency + HZ / 2) / HZ; 474 (mips_hpt_frequency + HZ / 2) / HZ;
395 /*
396 * This sets up the high precision
397 * timer for the first interrupt.
398 */
399 c0_hpt_timer_init();
400 } 475 }
401 } 476 }
402 if (!mips_hpt_frequency) 477 if (!mips_hpt_frequency)
@@ -406,6 +481,10 @@ void __init time_init(void)
406 printk("Using %u.%03u MHz high precision timer.\n", 481 printk("Using %u.%03u MHz high precision timer.\n",
407 ((mips_hpt_frequency + 500) / 1000) / 1000, 482 ((mips_hpt_frequency + 500) / 1000) / 1000,
408 ((mips_hpt_frequency + 500) / 1000) % 1000); 483 ((mips_hpt_frequency + 500) / 1000) % 1000);
484
485#ifdef CONFIG_IRQ_CPU
486 setup_irq(MIPS_CPU_IRQ_BASE + 7, &timer_irqaction);
487#endif
409 } 488 }
410 489
411 if (!mips_timer_ack) 490 if (!mips_timer_ack)
@@ -426,56 +505,5 @@ void __init time_init(void)
426 plat_timer_setup(&timer_irqaction); 505 plat_timer_setup(&timer_irqaction);
427 506
428 init_mips_clocksource(); 507 init_mips_clocksource();
508 mips_clockevent_init();
429} 509}
430
431#define FEBRUARY 2
432#define STARTOFTIME 1970
433#define SECDAY 86400L
434#define SECYR (SECDAY * 365)
435#define leapyear(y) ((!((y) % 4) && ((y) % 100)) || !((y) % 400))
436#define days_in_year(y) (leapyear(y) ? 366 : 365)
437#define days_in_month(m) (month_days[(m) - 1])
438
439static int month_days[12] = {
440 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31
441};
442
443void to_tm(unsigned long tim, struct rtc_time *tm)
444{
445 long hms, day, gday;
446 int i;
447
448 gday = day = tim / SECDAY;
449 hms = tim % SECDAY;
450
451 /* Hours, minutes, seconds are easy */
452 tm->tm_hour = hms / 3600;
453 tm->tm_min = (hms % 3600) / 60;
454 tm->tm_sec = (hms % 3600) % 60;
455
456 /* Number of years in days */
457 for (i = STARTOFTIME; day >= days_in_year(i); i++)
458 day -= days_in_year(i);
459 tm->tm_year = i;
460
461 /* Number of months in days left */
462 if (leapyear(tm->tm_year))
463 days_in_month(FEBRUARY) = 29;
464 for (i = 1; day >= days_in_month(i); i++)
465 day -= days_in_month(i);
466 days_in_month(FEBRUARY) = 28;
467 tm->tm_mon = i - 1; /* tm_mon starts from 0 to 11 */
468
469 /* Days are what is left over (+1) from all that. */
470 tm->tm_mday = day + 1;
471
472 /*
473 * Determine the day of week
474 */
475 tm->tm_wday = (gday + 4) % 7; /* 1970/1/1 was Thursday */
476}
477
478EXPORT_SYMBOL(rtc_lock);
479EXPORT_SYMBOL(to_tm);
480EXPORT_SYMBOL(rtc_mips_set_time);
481EXPORT_SYMBOL(rtc_mips_get_time);