diff options
Diffstat (limited to 'arch/sh/kernel/time_64.c')
-rw-r--r-- | arch/sh/kernel/time_64.c | 363 |
1 files changed, 0 insertions, 363 deletions
diff --git a/arch/sh/kernel/time_64.c b/arch/sh/kernel/time_64.c deleted file mode 100644 index 988c77c37231..000000000000 --- a/arch/sh/kernel/time_64.c +++ /dev/null | |||
@@ -1,363 +0,0 @@ | |||
1 | /* | ||
2 | * arch/sh/kernel/time_64.c | ||
3 | * | ||
4 | * Copyright (C) 2000, 2001 Paolo Alberelli | ||
5 | * Copyright (C) 2003 - 2007 Paul Mundt | ||
6 | * Copyright (C) 2003 Richard Curnow | ||
7 | * | ||
8 | * Original TMU/RTC code taken from sh version. | ||
9 | * Copyright (C) 1999 Tetsuya Okada & Niibe Yutaka | ||
10 | * Some code taken from i386 version. | ||
11 | * Copyright (C) 1991, 1992, 1995 Linus Torvalds | ||
12 | * | ||
13 | * This file is subject to the terms and conditions of the GNU General Public | ||
14 | * License. See the file "COPYING" in the main directory of this archive | ||
15 | * for more details. | ||
16 | */ | ||
17 | #include <linux/errno.h> | ||
18 | #include <linux/rwsem.h> | ||
19 | #include <linux/sched.h> | ||
20 | #include <linux/kernel.h> | ||
21 | #include <linux/param.h> | ||
22 | #include <linux/string.h> | ||
23 | #include <linux/mm.h> | ||
24 | #include <linux/interrupt.h> | ||
25 | #include <linux/time.h> | ||
26 | #include <linux/delay.h> | ||
27 | #include <linux/init.h> | ||
28 | #include <linux/profile.h> | ||
29 | #include <linux/smp.h> | ||
30 | #include <linux/module.h> | ||
31 | #include <linux/bcd.h> | ||
32 | #include <linux/timex.h> | ||
33 | #include <linux/irq.h> | ||
34 | #include <linux/io.h> | ||
35 | #include <linux/platform_device.h> | ||
36 | #include <cpu/registers.h> /* required by inline __asm__ stmt. */ | ||
37 | #include <cpu/irq.h> | ||
38 | #include <asm/addrspace.h> | ||
39 | #include <asm/processor.h> | ||
40 | #include <asm/uaccess.h> | ||
41 | #include <asm/delay.h> | ||
42 | #include <asm/clock.h> | ||
43 | |||
44 | #define TMU_TOCR_INIT 0x00 | ||
45 | #define TMU0_TCR_INIT 0x0020 | ||
46 | #define TMU_TSTR_INIT 1 | ||
47 | #define TMU_TSTR_OFF 0 | ||
48 | |||
49 | /* Real Time Clock */ | ||
50 | #define RTC_BLOCK_OFF 0x01040000 | ||
51 | #define RTC_BASE PHYS_PERIPHERAL_BLOCK + RTC_BLOCK_OFF | ||
52 | #define RTC_RCR1_CIE 0x10 /* Carry Interrupt Enable */ | ||
53 | #define RTC_RCR1 (rtc_base + 0x38) | ||
54 | |||
55 | /* Time Management Unit */ | ||
56 | #define TMU_BLOCK_OFF 0x01020000 | ||
57 | #define TMU_BASE PHYS_PERIPHERAL_BLOCK + TMU_BLOCK_OFF | ||
58 | #define TMU0_BASE tmu_base + 0x8 + (0xc * 0x0) | ||
59 | #define TMU1_BASE tmu_base + 0x8 + (0xc * 0x1) | ||
60 | #define TMU2_BASE tmu_base + 0x8 + (0xc * 0x2) | ||
61 | |||
62 | #define TMU_TOCR tmu_base+0x0 /* Byte access */ | ||
63 | #define TMU_TSTR tmu_base+0x4 /* Byte access */ | ||
64 | |||
65 | #define TMU0_TCOR TMU0_BASE+0x0 /* Long access */ | ||
66 | #define TMU0_TCNT TMU0_BASE+0x4 /* Long access */ | ||
67 | #define TMU0_TCR TMU0_BASE+0x8 /* Word access */ | ||
68 | |||
69 | #define TICK_SIZE (tick_nsec / 1000) | ||
70 | |||
71 | static unsigned long tmu_base, rtc_base; | ||
72 | unsigned long cprc_base; | ||
73 | |||
74 | /* Variables to allow interpolation of time of day to resolution better than a | ||
75 | * jiffy. */ | ||
76 | |||
77 | /* This is effectively protected by xtime_lock */ | ||
78 | static unsigned long ctc_last_interrupt; | ||
79 | static unsigned long long usecs_per_jiffy = 1000000/HZ; /* Approximation */ | ||
80 | |||
81 | #define CTC_JIFFY_SCALE_SHIFT 40 | ||
82 | |||
83 | /* 2**CTC_JIFFY_SCALE_SHIFT / ctc_ticks_per_jiffy */ | ||
84 | static unsigned long long scaled_recip_ctc_ticks_per_jiffy; | ||
85 | |||
86 | /* Estimate number of microseconds that have elapsed since the last timer tick, | ||
87 | by scaling the delta that has occurred in the CTC register. | ||
88 | |||
89 | WARNING WARNING WARNING : This algorithm relies on the CTC decrementing at | ||
90 | the CPU clock rate. If the CPU sleeps, the CTC stops counting. Bear this | ||
91 | in mind if enabling SLEEP_WORKS in process.c. In that case, this algorithm | ||
92 | probably needs to use TMU.TCNT0 instead. This will work even if the CPU is | ||
93 | sleeping, though will be coarser. | ||
94 | |||
95 | FIXME : What if usecs_per_tick is moving around too much, e.g. if an adjtime | ||
96 | is running or if the freq or tick arguments of adjtimex are modified after | ||
97 | we have calibrated the scaling factor? This will result in either a jump at | ||
98 | the end of a tick period, or a wrap backwards at the start of the next one, | ||
99 | if the application is reading the time of day often enough. I think we | ||
100 | ought to do better than this. For this reason, usecs_per_jiffy is left | ||
101 | separated out in the calculation below. This allows some future hook into | ||
102 | the adjtime-related stuff in kernel/timer.c to remove this hazard. | ||
103 | |||
104 | */ | ||
105 | |||
106 | static unsigned long usecs_since_tick(void) | ||
107 | { | ||
108 | unsigned long long current_ctc; | ||
109 | long ctc_ticks_since_interrupt; | ||
110 | unsigned long long ull_ctc_ticks_since_interrupt; | ||
111 | unsigned long result; | ||
112 | |||
113 | unsigned long long mul1_out; | ||
114 | unsigned long long mul1_out_high; | ||
115 | unsigned long long mul2_out_low, mul2_out_high; | ||
116 | |||
117 | /* Read CTC register */ | ||
118 | asm ("getcon cr62, %0" : "=r" (current_ctc)); | ||
119 | /* Note, the CTC counts down on each CPU clock, not up. | ||
120 | Note(2), use long type to get correct wraparound arithmetic when | ||
121 | the counter crosses zero. */ | ||
122 | ctc_ticks_since_interrupt = (long) ctc_last_interrupt - (long) current_ctc; | ||
123 | ull_ctc_ticks_since_interrupt = (unsigned long long) ctc_ticks_since_interrupt; | ||
124 | |||
125 | /* Inline assembly to do 32x32x32->64 multiplier */ | ||
126 | asm volatile ("mulu.l %1, %2, %0" : | ||
127 | "=r" (mul1_out) : | ||
128 | "r" (ull_ctc_ticks_since_interrupt), "r" (usecs_per_jiffy)); | ||
129 | |||
130 | mul1_out_high = mul1_out >> 32; | ||
131 | |||
132 | asm volatile ("mulu.l %1, %2, %0" : | ||
133 | "=r" (mul2_out_low) : | ||
134 | "r" (mul1_out), "r" (scaled_recip_ctc_ticks_per_jiffy)); | ||
135 | |||
136 | #if 1 | ||
137 | asm volatile ("mulu.l %1, %2, %0" : | ||
138 | "=r" (mul2_out_high) : | ||
139 | "r" (mul1_out_high), "r" (scaled_recip_ctc_ticks_per_jiffy)); | ||
140 | #endif | ||
141 | |||
142 | result = (unsigned long) (((mul2_out_high << 32) + mul2_out_low) >> CTC_JIFFY_SCALE_SHIFT); | ||
143 | |||
144 | return result; | ||
145 | } | ||
146 | |||
147 | void do_gettimeofday(struct timeval *tv) | ||
148 | { | ||
149 | unsigned long flags; | ||
150 | unsigned long seq; | ||
151 | unsigned long usec, sec; | ||
152 | |||
153 | do { | ||
154 | seq = read_seqbegin_irqsave(&xtime_lock, flags); | ||
155 | usec = usecs_since_tick(); | ||
156 | sec = xtime.tv_sec; | ||
157 | usec += xtime.tv_nsec / 1000; | ||
158 | } while (read_seqretry_irqrestore(&xtime_lock, seq, flags)); | ||
159 | |||
160 | while (usec >= 1000000) { | ||
161 | usec -= 1000000; | ||
162 | sec++; | ||
163 | } | ||
164 | |||
165 | tv->tv_sec = sec; | ||
166 | tv->tv_usec = usec; | ||
167 | } | ||
168 | EXPORT_SYMBOL(do_gettimeofday); | ||
169 | |||
170 | int do_settimeofday(struct timespec *tv) | ||
171 | { | ||
172 | time_t wtm_sec, sec = tv->tv_sec; | ||
173 | long wtm_nsec, nsec = tv->tv_nsec; | ||
174 | |||
175 | if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC) | ||
176 | return -EINVAL; | ||
177 | |||
178 | write_seqlock_irq(&xtime_lock); | ||
179 | /* | ||
180 | * This is revolting. We need to set "xtime" correctly. However, the | ||
181 | * value in this location is the value at the most recent update of | ||
182 | * wall time. Discover what correction gettimeofday() would have | ||
183 | * made, and then undo it! | ||
184 | */ | ||
185 | nsec -= 1000 * usecs_since_tick(); | ||
186 | |||
187 | wtm_sec = wall_to_monotonic.tv_sec + (xtime.tv_sec - sec); | ||
188 | wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - nsec); | ||
189 | |||
190 | set_normalized_timespec(&xtime, sec, nsec); | ||
191 | set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec); | ||
192 | |||
193 | ntp_clear(); | ||
194 | write_sequnlock_irq(&xtime_lock); | ||
195 | clock_was_set(); | ||
196 | |||
197 | return 0; | ||
198 | } | ||
199 | EXPORT_SYMBOL(do_settimeofday); | ||
200 | |||
201 | /* Dummy RTC ops */ | ||
202 | static void null_rtc_get_time(struct timespec *tv) | ||
203 | { | ||
204 | tv->tv_sec = mktime(2000, 1, 1, 0, 0, 0); | ||
205 | tv->tv_nsec = 0; | ||
206 | } | ||
207 | |||
208 | static int null_rtc_set_time(const time_t secs) | ||
209 | { | ||
210 | return 0; | ||
211 | } | ||
212 | |||
213 | void (*rtc_sh_get_time)(struct timespec *) = null_rtc_get_time; | ||
214 | int (*rtc_sh_set_time)(const time_t) = null_rtc_set_time; | ||
215 | |||
216 | /* last time the RTC clock got updated */ | ||
217 | static long last_rtc_update; | ||
218 | |||
219 | /* | ||
220 | * timer_interrupt() needs to keep up the real-time clock, | ||
221 | * as well as call the "do_timer()" routine every clocktick | ||
222 | */ | ||
223 | static inline void do_timer_interrupt(void) | ||
224 | { | ||
225 | unsigned long long current_ctc; | ||
226 | |||
227 | if (current->pid) | ||
228 | profile_tick(CPU_PROFILING); | ||
229 | |||
230 | /* | ||
231 | * Here we are in the timer irq handler. We just have irqs locally | ||
232 | * disabled but we don't know if the timer_bh is running on the other | ||
233 | * CPU. We need to avoid to SMP race with it. NOTE: we don' t need | ||
234 | * the irq version of write_lock because as just said we have irq | ||
235 | * locally disabled. -arca | ||
236 | */ | ||
237 | write_seqlock(&xtime_lock); | ||
238 | asm ("getcon cr62, %0" : "=r" (current_ctc)); | ||
239 | ctc_last_interrupt = (unsigned long) current_ctc; | ||
240 | |||
241 | do_timer(1); | ||
242 | |||
243 | /* | ||
244 | * If we have an externally synchronized Linux clock, then update | ||
245 | * RTC clock accordingly every ~11 minutes. Set_rtc_mmss() has to be | ||
246 | * called as close as possible to 500 ms before the new second starts. | ||
247 | */ | ||
248 | if (ntp_synced() && | ||
249 | xtime.tv_sec > last_rtc_update + 660 && | ||
250 | (xtime.tv_nsec / 1000) >= 500000 - ((unsigned) TICK_SIZE) / 2 && | ||
251 | (xtime.tv_nsec / 1000) <= 500000 + ((unsigned) TICK_SIZE) / 2) { | ||
252 | if (rtc_sh_set_time(xtime.tv_sec) == 0) | ||
253 | last_rtc_update = xtime.tv_sec; | ||
254 | else | ||
255 | /* do it again in 60 s */ | ||
256 | last_rtc_update = xtime.tv_sec - 600; | ||
257 | } | ||
258 | write_sequnlock(&xtime_lock); | ||
259 | |||
260 | #ifndef CONFIG_SMP | ||
261 | update_process_times(user_mode(get_irq_regs())); | ||
262 | #endif | ||
263 | } | ||
264 | |||
265 | /* | ||
266 | * This is the same as the above, except we _also_ save the current | ||
267 | * Time Stamp Counter value at the time of the timer interrupt, so that | ||
268 | * we later on can estimate the time of day more exactly. | ||
269 | */ | ||
270 | static irqreturn_t timer_interrupt(int irq, void *dev_id) | ||
271 | { | ||
272 | unsigned long timer_status; | ||
273 | |||
274 | /* Clear UNF bit */ | ||
275 | timer_status = ctrl_inw(TMU0_TCR); | ||
276 | timer_status &= ~0x100; | ||
277 | ctrl_outw(timer_status, TMU0_TCR); | ||
278 | |||
279 | do_timer_interrupt(); | ||
280 | |||
281 | return IRQ_HANDLED; | ||
282 | } | ||
283 | |||
284 | static struct irqaction irq0 = { | ||
285 | .handler = timer_interrupt, | ||
286 | .flags = IRQF_DISABLED, | ||
287 | .name = "timer", | ||
288 | }; | ||
289 | |||
290 | void __init time_init(void) | ||
291 | { | ||
292 | unsigned long interval; | ||
293 | struct clk *clk; | ||
294 | |||
295 | tmu_base = onchip_remap(TMU_BASE, 1024, "TMU"); | ||
296 | if (!tmu_base) { | ||
297 | panic("Unable to remap TMU\n"); | ||
298 | } | ||
299 | |||
300 | rtc_base = onchip_remap(RTC_BASE, 1024, "RTC"); | ||
301 | if (!rtc_base) { | ||
302 | panic("Unable to remap RTC\n"); | ||
303 | } | ||
304 | |||
305 | clk = clk_get(NULL, "cpu_clk"); | ||
306 | scaled_recip_ctc_ticks_per_jiffy = ((1ULL << CTC_JIFFY_SCALE_SHIFT) / | ||
307 | (unsigned long long)(clk_get_rate(clk) / HZ)); | ||
308 | |||
309 | rtc_sh_get_time(&xtime); | ||
310 | |||
311 | setup_irq(TIMER_IRQ, &irq0); | ||
312 | |||
313 | clk = clk_get(NULL, "module_clk"); | ||
314 | interval = (clk_get_rate(clk)/(HZ*4)); | ||
315 | |||
316 | printk("Interval = %ld\n", interval); | ||
317 | |||
318 | /* Start TMU0 */ | ||
319 | ctrl_outb(TMU_TSTR_OFF, TMU_TSTR); | ||
320 | ctrl_outb(TMU_TOCR_INIT, TMU_TOCR); | ||
321 | ctrl_outw(TMU0_TCR_INIT, TMU0_TCR); | ||
322 | ctrl_outl(interval, TMU0_TCOR); | ||
323 | ctrl_outl(interval, TMU0_TCNT); | ||
324 | ctrl_outb(TMU_TSTR_INIT, TMU_TSTR); | ||
325 | } | ||
326 | |||
327 | static struct resource rtc_resources[] = { | ||
328 | [0] = { | ||
329 | /* RTC base, filled in by rtc_init */ | ||
330 | .flags = IORESOURCE_IO, | ||
331 | }, | ||
332 | [1] = { | ||
333 | /* Period IRQ */ | ||
334 | .start = IRQ_PRI, | ||
335 | .flags = IORESOURCE_IRQ, | ||
336 | }, | ||
337 | [2] = { | ||
338 | /* Carry IRQ */ | ||
339 | .start = IRQ_CUI, | ||
340 | .flags = IORESOURCE_IRQ, | ||
341 | }, | ||
342 | [3] = { | ||
343 | /* Alarm IRQ */ | ||
344 | .start = IRQ_ATI, | ||
345 | .flags = IORESOURCE_IRQ, | ||
346 | }, | ||
347 | }; | ||
348 | |||
349 | static struct platform_device rtc_device = { | ||
350 | .name = "sh-rtc", | ||
351 | .id = -1, | ||
352 | .num_resources = ARRAY_SIZE(rtc_resources), | ||
353 | .resource = rtc_resources, | ||
354 | }; | ||
355 | |||
356 | static int __init rtc_init(void) | ||
357 | { | ||
358 | rtc_resources[0].start = rtc_base; | ||
359 | rtc_resources[0].end = rtc_resources[0].start + 0x58 - 1; | ||
360 | |||
361 | return platform_device_register(&rtc_device); | ||
362 | } | ||
363 | device_initcall(rtc_init); | ||