aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sh
diff options
context:
space:
mode:
authorPaul Mundt <lethal@linux-sh.org>2007-05-09 04:33:24 -0400
committerPaul Mundt <lethal@linux-sh.org>2007-05-09 04:33:24 -0400
commit57be2b484a417bffae66359b9b89e7239480b729 (patch)
treed517f5b449b4f6b629790476082a4a7c478112bb /arch/sh
parent1ce7ddd5f4cc754b6afe9eec5cee89ede75348ea (diff)
sh: clockevent/clocksource/hrtimers/nohz TMU support.
This adds basic support for clockevents and clocksources, presently only implemented for TMU-based systems (which are the majority of SH-3 and SH-4 systems). The old NO_IDLE_HZ implementation is also dropped completely, the only users of this were on TMU-based systems anyways. More work needs to be done to generalize the TMU handling, in that the current implementation is rather tied to the notion of TMU0 and TMU1 utilization. Additionally, as more SH timers switch over to this scheme, we'll be able to gut most of the remaining system timer infrastructure that existed before. Signed-off-by: Paul Mundt <lethal@linux-sh.org>
Diffstat (limited to 'arch/sh')
-rw-r--r--arch/sh/Kconfig29
-rw-r--r--arch/sh/kernel/process.c3
-rw-r--r--arch/sh/kernel/time.c172
-rw-r--r--arch/sh/kernel/timers/timer-tmu.c182
4 files changed, 174 insertions, 212 deletions
diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig
index d74eb120a9c6..038179ecf6a9 100644
--- a/arch/sh/Kconfig
+++ b/arch/sh/Kconfig
@@ -52,6 +52,9 @@ config GENERIC_IOMAP
52config GENERIC_TIME 52config GENERIC_TIME
53 def_bool n 53 def_bool n
54 54
55config GENERIC_CLOCKEVENTS
56 def_bool n
57
55config SYS_SUPPORTS_APM_EMULATION 58config SYS_SUPPORTS_APM_EMULATION
56 bool 59 bool
57 60
@@ -436,11 +439,11 @@ endmenu
436 439
437menu "Timer and clock configuration" 440menu "Timer and clock configuration"
438 441
439if !GENERIC_TIME
440
441config SH_TMU 442config SH_TMU
442 bool "TMU timer support" 443 bool "TMU timer support"
443 depends on CPU_SH3 || CPU_SH4 444 depends on CPU_SH3 || CPU_SH4
445 select GENERIC_TIME
446 select GENERIC_CLOCKEVENTS
444 default y 447 default y
445 help 448 help
446 This enables the use of the TMU as the system timer. 449 This enables the use of the TMU as the system timer.
@@ -459,8 +462,6 @@ config SH_MTU2
459 help 462 help
460 This enables the use of the MTU2 as the system timer. 463 This enables the use of the MTU2 as the system timer.
461 464
462endif
463
464config SH_TIMER_IRQ 465config SH_TIMER_IRQ
465 int 466 int
466 default "28" if CPU_SUBTYPE_SH7780 || CPU_SUBTYPE_SH7785 467 default "28" if CPU_SUBTYPE_SH7780 || CPU_SUBTYPE_SH7785
@@ -468,24 +469,6 @@ config SH_TIMER_IRQ
468 default "140" if CPU_SUBTYPE_SH7206 469 default "140" if CPU_SUBTYPE_SH7206
469 default "16" 470 default "16"
470 471
471config NO_IDLE_HZ
472 bool "Dynamic tick timer"
473 help
474 Select this option if you want to disable continuous timer ticks
475 and have them programmed to occur as required. This option saves
476 power as the system can remain in idle state for longer.
477
478 By default dynamic tick is disabled during the boot, and can be
479 manually enabled with:
480
481 echo 1 > /sys/devices/system/timer/timer0/dyn_tick
482
483 Alternatively, if you want dynamic tick automatically enabled
484 during boot, pass "dyntick=enable" via the kernel command string.
485
486 Please note that dynamic tick may affect the accuracy of
487 timekeeping on some platforms depending on the implementation.
488
489config SH_PCLK_FREQ 472config SH_PCLK_FREQ
490 int "Peripheral clock frequency (in Hz)" 473 int "Peripheral clock frequency (in Hz)"
491 default "27000000" if CPU_SUBTYPE_SH73180 || CPU_SUBTYPE_SH7343 474 default "27000000" if CPU_SUBTYPE_SH73180 || CPU_SUBTYPE_SH7343
@@ -509,6 +492,8 @@ config SH_CLK_MD
509 help 492 help
510 MD2 - MD0 pin setting. 493 MD2 - MD0 pin setting.
511 494
495source "kernel/time/Kconfig"
496
512endmenu 497endmenu
513 498
514menu "CPU Frequency scaling" 499menu "CPU Frequency scaling"
diff --git a/arch/sh/kernel/process.c b/arch/sh/kernel/process.c
index d755589ba8b1..6b4f5748d0be 100644
--- a/arch/sh/kernel/process.c
+++ b/arch/sh/kernel/process.c
@@ -16,6 +16,7 @@
16#include <linux/kallsyms.h> 16#include <linux/kallsyms.h>
17#include <linux/kexec.h> 17#include <linux/kexec.h>
18#include <linux/kdebug.h> 18#include <linux/kdebug.h>
19#include <linux/tick.h>
19#include <asm/uaccess.h> 20#include <asm/uaccess.h>
20#include <asm/mmu_context.h> 21#include <asm/mmu_context.h>
21#include <asm/pgalloc.h> 22#include <asm/pgalloc.h>
@@ -60,8 +61,10 @@ void cpu_idle(void)
60 if (!idle) 61 if (!idle)
61 idle = default_idle; 62 idle = default_idle;
62 63
64 tick_nohz_stop_sched_tick();
63 while (!need_resched()) 65 while (!need_resched())
64 idle(); 66 idle();
67 tick_nohz_restart_sched_tick();
65 68
66 preempt_enable_no_resched(); 69 preempt_enable_no_resched();
67 schedule(); 70 schedule();
diff --git a/arch/sh/kernel/time.c b/arch/sh/kernel/time.c
index d47e775962e9..a3a67d151e52 100644
--- a/arch/sh/kernel/time.c
+++ b/arch/sh/kernel/time.c
@@ -3,7 +3,7 @@
3 * 3 *
4 * Copyright (C) 1999 Tetsuya Okada & Niibe Yutaka 4 * Copyright (C) 1999 Tetsuya Okada & Niibe Yutaka
5 * Copyright (C) 2000 Philipp Rumpf <prumpf@tux.org> 5 * Copyright (C) 2000 Philipp Rumpf <prumpf@tux.org>
6 * Copyright (C) 2002 - 2006 Paul Mundt 6 * Copyright (C) 2002 - 2007 Paul Mundt
7 * Copyright (C) 2002 M. R. Brown <mrbrown@linux-sh.org> 7 * Copyright (C) 2002 M. R. Brown <mrbrown@linux-sh.org>
8 * 8 *
9 * Some code taken from i386 version. 9 * Some code taken from i386 version.
@@ -15,6 +15,7 @@
15#include <linux/profile.h> 15#include <linux/profile.h>
16#include <linux/timex.h> 16#include <linux/timex.h>
17#include <linux/sched.h> 17#include <linux/sched.h>
18#include <linux/clockchips.h>
18#include <asm/clock.h> 19#include <asm/clock.h>
19#include <asm/rtc.h> 20#include <asm/rtc.h>
20#include <asm/timer.h> 21#include <asm/timer.h>
@@ -38,6 +39,14 @@ static int null_rtc_set_time(const time_t secs)
38 return 0; 39 return 0;
39} 40}
40 41
42/*
43 * Null high precision timer functions for systems lacking one.
44 */
45static cycle_t null_hpt_read(void)
46{
47 return 0;
48}
49
41void (*rtc_sh_get_time)(struct timespec *) = null_rtc_get_time; 50void (*rtc_sh_get_time)(struct timespec *) = null_rtc_get_time;
42int (*rtc_sh_set_time)(const time_t) = null_rtc_set_time; 51int (*rtc_sh_set_time)(const time_t) = null_rtc_set_time;
43 52
@@ -101,6 +110,7 @@ int do_settimeofday(struct timespec *tv)
101EXPORT_SYMBOL(do_settimeofday); 110EXPORT_SYMBOL(do_settimeofday);
102#endif /* !CONFIG_GENERIC_TIME */ 111#endif /* !CONFIG_GENERIC_TIME */
103 112
113#ifndef CONFIG_GENERIC_CLOCKEVENTS
104/* last time the RTC clock got updated */ 114/* last time the RTC clock got updated */
105static long last_rtc_update; 115static long last_rtc_update;
106 116
@@ -138,6 +148,7 @@ void handle_timer_tick(void)
138 last_rtc_update = xtime.tv_sec - 600; 148 last_rtc_update = xtime.tv_sec - 600;
139 } 149 }
140} 150}
151#endif /* !CONFIG_GENERIC_CLOCKEVENTS */
141 152
142#ifdef CONFIG_PM 153#ifdef CONFIG_PM
143int timer_suspend(struct sys_device *dev, pm_message_t state) 154int timer_suspend(struct sys_device *dev, pm_message_t state)
@@ -168,136 +179,58 @@ static struct sysdev_class timer_sysclass = {
168 .resume = timer_resume, 179 .resume = timer_resume,
169}; 180};
170 181
171#ifdef CONFIG_NO_IDLE_HZ 182static int __init timer_init_sysfs(void)
172static int timer_dyn_tick_enable(void)
173{ 183{
174 struct dyn_tick_timer *dyn_tick = sys_timer->dyn_tick; 184 int ret = sysdev_class_register(&timer_sysclass);
175 unsigned long flags; 185 if (ret != 0)
176 int ret = -ENODEV; 186 return ret;
177
178 if (dyn_tick) {
179 spin_lock_irqsave(&dyn_tick->lock, flags);
180 ret = 0;
181 if (!(dyn_tick->state & DYN_TICK_ENABLED)) {
182 ret = dyn_tick->enable();
183
184 if (ret == 0)
185 dyn_tick->state |= DYN_TICK_ENABLED;
186 }
187 spin_unlock_irqrestore(&dyn_tick->lock, flags);
188 }
189 187
190 return ret; 188 sys_timer->dev.cls = &timer_sysclass;
189 return sysdev_register(&sys_timer->dev);
191} 190}
191device_initcall(timer_init_sysfs);
192 192
193static int timer_dyn_tick_disable(void) 193void (*board_time_init)(void);
194{
195 struct dyn_tick_timer *dyn_tick = sys_timer->dyn_tick;
196 unsigned long flags;
197 int ret = -ENODEV;
198
199 if (dyn_tick) {
200 spin_lock_irqsave(&dyn_tick->lock, flags);
201 ret = 0;
202 if (dyn_tick->state & DYN_TICK_ENABLED) {
203 ret = dyn_tick->disable();
204
205 if (ret == 0)
206 dyn_tick->state &= ~DYN_TICK_ENABLED;
207 }
208 spin_unlock_irqrestore(&dyn_tick->lock, flags);
209 }
210
211 return ret;
212}
213 194
214/* 195/*
215 * Reprogram the system timer for at least the calculated time interval. 196 * Shamelessly based on the MIPS and Sparc64 work.
216 * This function should be called from the idle thread with IRQs disabled,
217 * immediately before sleeping.
218 */ 197 */
219void timer_dyn_reprogram(void) 198static unsigned long timer_ticks_per_nsec_quotient __read_mostly;
220{ 199unsigned long sh_hpt_frequency = 0;
221 struct dyn_tick_timer *dyn_tick = sys_timer->dyn_tick; 200
222 unsigned long next, seq, flags; 201#define NSEC_PER_CYC_SHIFT 10
223 202
224 if (!dyn_tick) 203struct clocksource clocksource_sh = {
225 return; 204 .name = "SuperH",
226 205 .rating = 200,
227 spin_lock_irqsave(&dyn_tick->lock, flags); 206 .mask = CLOCKSOURCE_MASK(32),
228 if (dyn_tick->state & DYN_TICK_ENABLED) { 207 .read = null_hpt_read,
229 next = next_timer_interrupt(); 208 .shift = 16,
230 do { 209 .flags = CLOCK_SOURCE_IS_CONTINUOUS,
231 seq = read_seqbegin(&xtime_lock); 210};
232 dyn_tick->reprogram(next - jiffies);
233 } while (read_seqretry(&xtime_lock, seq));
234 }
235 spin_unlock_irqrestore(&dyn_tick->lock, flags);
236}
237 211
238static ssize_t timer_show_dyn_tick(struct sys_device *dev, char *buf) 212static void __init init_sh_clocksource(void)
239{ 213{
240 return sprintf(buf, "%i\n", 214 if (!sh_hpt_frequency || clocksource_sh.read == null_hpt_read)
241 (sys_timer->dyn_tick->state & DYN_TICK_ENABLED) >> 1); 215 return;
242}
243 216
244static ssize_t timer_set_dyn_tick(struct sys_device *dev, const char *buf, 217 clocksource_sh.mult = clocksource_hz2mult(sh_hpt_frequency,
245 size_t count) 218 clocksource_sh.shift);
246{
247 unsigned int enable = simple_strtoul(buf, NULL, 2);
248 219
249 if (enable) 220 timer_ticks_per_nsec_quotient =
250 timer_dyn_tick_enable(); 221 clocksource_hz2mult(sh_hpt_frequency, NSEC_PER_CYC_SHIFT);
251 else
252 timer_dyn_tick_disable();
253 222
254 return count; 223 clocksource_register(&clocksource_sh);
255} 224}
256static SYSDEV_ATTR(dyn_tick, 0644, timer_show_dyn_tick, timer_set_dyn_tick);
257 225
258/* 226#ifdef CONFIG_GENERIC_TIME
259 * dyntick=enable|disable 227unsigned long long sched_clock(void)
260 */
261static char dyntick_str[4] __initdata = "";
262
263static int __init dyntick_setup(char *str)
264{ 228{
265 if (str) 229 unsigned long long ticks = clocksource_sh.read();
266 strlcpy(dyntick_str, str, sizeof(dyntick_str)); 230 return (ticks * timer_ticks_per_nsec_quotient) >> NSEC_PER_CYC_SHIFT;
267 return 1;
268} 231}
269
270__setup("dyntick=", dyntick_setup);
271#endif
272
273static int __init timer_init_sysfs(void)
274{
275 int ret = sysdev_class_register(&timer_sysclass);
276 if (ret != 0)
277 return ret;
278
279 sys_timer->dev.cls = &timer_sysclass;
280 ret = sysdev_register(&sys_timer->dev);
281
282#ifdef CONFIG_NO_IDLE_HZ
283 if (ret == 0 && sys_timer->dyn_tick) {
284 ret = sysdev_create_file(&sys_timer->dev, &attr_dyn_tick);
285
286 /*
287 * Turn on dynamic tick after calibrate delay
288 * for correct bogomips
289 */
290 if (ret == 0 && dyntick_str[0] == 'e')
291 ret = timer_dyn_tick_enable();
292 }
293#endif 232#endif
294 233
295 return ret;
296}
297device_initcall(timer_init_sysfs);
298
299void (*board_time_init)(void);
300
301void __init time_init(void) 234void __init time_init(void)
302{ 235{
303 if (board_time_init) 236 if (board_time_init)
@@ -316,10 +249,15 @@ void __init time_init(void)
316 sys_timer = get_sys_timer(); 249 sys_timer = get_sys_timer();
317 printk(KERN_INFO "Using %s for system timer\n", sys_timer->name); 250 printk(KERN_INFO "Using %s for system timer\n", sys_timer->name);
318 251
319#ifdef CONFIG_NO_IDLE_HZ 252 if (sys_timer->ops->read)
320 if (sys_timer->dyn_tick) 253 clocksource_sh.read = sys_timer->ops->read;
321 spin_lock_init(&sys_timer->dyn_tick->lock); 254
322#endif 255 init_sh_clocksource();
256
257 if (sh_hpt_frequency)
258 printk("Using %lu.%03lu MHz high precision timer.\n",
259 ((sh_hpt_frequency + 500) / 1000) / 1000,
260 ((sh_hpt_frequency + 500) / 1000) % 1000);
323 261
324#if defined(CONFIG_SH_KGDB) 262#if defined(CONFIG_SH_KGDB)
325 /* 263 /*
diff --git a/arch/sh/kernel/timers/timer-tmu.c b/arch/sh/kernel/timers/timer-tmu.c
index d9e3151c891e..2d997e2a5b6c 100644
--- a/arch/sh/kernel/timers/timer-tmu.c
+++ b/arch/sh/kernel/timers/timer-tmu.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * arch/sh/kernel/timers/timer-tmu.c - TMU Timer Support 2 * arch/sh/kernel/timers/timer-tmu.c - TMU Timer Support
3 * 3 *
4 * Copyright (C) 2005 Paul Mundt 4 * Copyright (C) 2005 - 2007 Paul Mundt
5 * 5 *
6 * TMU handling code hacked out of arch/sh/kernel/time.c 6 * TMU handling code hacked out of arch/sh/kernel/time.c
7 * 7 *
@@ -18,6 +18,7 @@
18#include <linux/kernel.h> 18#include <linux/kernel.h>
19#include <linux/interrupt.h> 19#include <linux/interrupt.h>
20#include <linux/seqlock.h> 20#include <linux/seqlock.h>
21#include <linux/clockchips.h>
21#include <asm/timer.h> 22#include <asm/timer.h>
22#include <asm/rtc.h> 23#include <asm/rtc.h>
23#include <asm/io.h> 24#include <asm/io.h>
@@ -25,56 +26,75 @@
25#include <asm/clock.h> 26#include <asm/clock.h>
26 27
27#define TMU_TOCR_INIT 0x00 28#define TMU_TOCR_INIT 0x00
28#define TMU0_TCR_INIT 0x0020 29#define TMU_TCR_INIT 0x0020
29#define TMU_TSTR_INIT 1
30 30
31#define TMU0_TCR_CALIB 0x0000 31static int tmu_timer_start(void)
32{
33 ctrl_outb(ctrl_inb(TMU_TSTR) | 0x3, TMU_TSTR);
34 return 0;
35}
32 36
33static unsigned long tmu_timer_get_offset(void) 37static void tmu0_timer_set_interval(unsigned long interval, unsigned int reload)
34{ 38{
35 int count; 39 ctrl_outl(interval, TMU0_TCNT);
36 static int count_p = 0x7fffffff; /* for the first call after boot */
37 static unsigned long jiffies_p = 0;
38 40
39 /* 41 /*
40 * cache volatile jiffies temporarily; we have IRQs turned off. 42 * TCNT reloads from TCOR on underflow, clear it if we don't
43 * intend to auto-reload
41 */ 44 */
42 unsigned long jiffies_t; 45 if (reload)
46 ctrl_outl(interval, TMU0_TCOR);
47 else
48 ctrl_outl(0, TMU0_TCOR);
43 49
44 /* timer count may underflow right here */ 50 tmu_timer_start();
45 count = ctrl_inl(TMU0_TCNT); /* read the latched count */ 51}
46 52
47 jiffies_t = jiffies; 53static int tmu_timer_stop(void)
54{
55 ctrl_outb(ctrl_inb(TMU_TSTR) & ~0x3, TMU_TSTR);
56 return 0;
57}
48 58
49 /* 59static cycle_t tmu_timer_read(void)
50 * avoiding timer inconsistencies (they are rare, but they happen)... 60{
51 * there is one kind of problem that must be avoided here: 61 return ~ctrl_inl(TMU1_TCNT);
52 * 1. the timer counter underflows 62}
53 */ 63
64static int tmu_set_next_event(unsigned long cycles,
65 struct clock_event_device *evt)
66{
67 tmu0_timer_set_interval(cycles, 1);
68 return 0;
69}
54 70
55 if (jiffies_t == jiffies_p) { 71static void tmu_set_mode(enum clock_event_mode mode,
56 if (count > count_p) { 72 struct clock_event_device *evt)
57 /* the nutcase */ 73{
58 if (ctrl_inw(TMU0_TCR) & 0x100) { /* Check UNF bit */ 74 switch (mode) {
59 count -= LATCH; 75 case CLOCK_EVT_MODE_PERIODIC:
60 } else { 76 ctrl_outl(ctrl_inl(TMU0_TCNT), TMU0_TCOR);
61 printk("%s (): hardware timer problem?\n", 77 break;
62 __FUNCTION__); 78 case CLOCK_EVT_MODE_ONESHOT:
63 } 79 ctrl_outl(0, TMU0_TCOR);
64 } 80 break;
65 } else 81 case CLOCK_EVT_MODE_UNUSED:
66 jiffies_p = jiffies_t; 82 case CLOCK_EVT_MODE_SHUTDOWN:
67 83 break;
68 count_p = count; 84 }
69
70 count = ((LATCH-1) - count) * TICK_SIZE;
71 count = (count + LATCH/2) / LATCH;
72
73 return count;
74} 85}
75 86
87static struct clock_event_device tmu0_clockevent = {
88 .name = "tmu0",
89 .shift = 32,
90 .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
91 .set_mode = tmu_set_mode,
92 .set_next_event = tmu_set_next_event,
93};
94
76static irqreturn_t tmu_timer_interrupt(int irq, void *dummy) 95static irqreturn_t tmu_timer_interrupt(int irq, void *dummy)
77{ 96{
97 struct clock_event_device *evt = &tmu0_clockevent;
78 unsigned long timer_status; 98 unsigned long timer_status;
79 99
80 /* Clear UNF bit */ 100 /* Clear UNF bit */
@@ -82,72 +102,76 @@ static irqreturn_t tmu_timer_interrupt(int irq, void *dummy)
82 timer_status &= ~0x100; 102 timer_status &= ~0x100;
83 ctrl_outw(timer_status, TMU0_TCR); 103 ctrl_outw(timer_status, TMU0_TCR);
84 104
85 /* 105 evt->event_handler(evt);
86 * Here we are in the timer irq handler. We just have irqs locally
87 * disabled but we don't know if the timer_bh is running on the other
88 * CPU. We need to avoid to SMP race with it. NOTE: we don' t need
89 * the irq version of write_lock because as just said we have irq
90 * locally disabled. -arca
91 */
92 write_seqlock(&xtime_lock);
93 handle_timer_tick();
94 write_sequnlock(&xtime_lock);
95 106
96 return IRQ_HANDLED; 107 return IRQ_HANDLED;
97} 108}
98 109
99static struct irqaction tmu_irq = { 110static struct irqaction tmu0_irq = {
100 .name = "timer", 111 .name = "periodic timer",
101 .handler = tmu_timer_interrupt, 112 .handler = tmu_timer_interrupt,
102 .flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL, 113 .flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL,
103 .mask = CPU_MASK_NONE, 114 .mask = CPU_MASK_NONE,
104}; 115};
105 116
106static void tmu_clk_init(struct clk *clk) 117static void tmu0_clk_init(struct clk *clk)
107{ 118{
108 u8 divisor = TMU0_TCR_INIT & 0x7; 119 u8 divisor = TMU_TCR_INIT & 0x7;
109 ctrl_outw(TMU0_TCR_INIT, TMU0_TCR); 120 ctrl_outw(TMU_TCR_INIT, TMU0_TCR);
110 clk->rate = clk->parent->rate / (4 << (divisor << 1)); 121 clk->rate = clk->parent->rate / (4 << (divisor << 1));
111} 122}
112 123
113static void tmu_clk_recalc(struct clk *clk) 124static void tmu0_clk_recalc(struct clk *clk)
114{ 125{
115 u8 divisor = ctrl_inw(TMU0_TCR) & 0x7; 126 u8 divisor = ctrl_inw(TMU0_TCR) & 0x7;
116 clk->rate = clk->parent->rate / (4 << (divisor << 1)); 127 clk->rate = clk->parent->rate / (4 << (divisor << 1));
117} 128}
118 129
119static struct clk_ops tmu_clk_ops = { 130static struct clk_ops tmu0_clk_ops = {
120 .init = tmu_clk_init, 131 .init = tmu0_clk_init,
121 .recalc = tmu_clk_recalc, 132 .recalc = tmu0_clk_recalc,
122}; 133};
123 134
124static struct clk tmu0_clk = { 135static struct clk tmu0_clk = {
125 .name = "tmu0_clk", 136 .name = "tmu0_clk",
126 .ops = &tmu_clk_ops, 137 .ops = &tmu0_clk_ops,
127}; 138};
128 139
129static int tmu_timer_start(void) 140static void tmu1_clk_init(struct clk *clk)
130{ 141{
131 ctrl_outb(TMU_TSTR_INIT, TMU_TSTR); 142 u8 divisor = TMU_TCR_INIT & 0x7;
132 return 0; 143 ctrl_outw(divisor, TMU1_TCR);
144 clk->rate = clk->parent->rate / (4 << (divisor << 1));
133} 145}
134 146
135static int tmu_timer_stop(void) 147static void tmu1_clk_recalc(struct clk *clk)
136{ 148{
137 ctrl_outb(0, TMU_TSTR); 149 u8 divisor = ctrl_inw(TMU1_TCR) & 0x7;
138 return 0; 150 clk->rate = clk->parent->rate / (4 << (divisor << 1));
139} 151}
140 152
153static struct clk_ops tmu1_clk_ops = {
154 .init = tmu1_clk_init,
155 .recalc = tmu1_clk_recalc,
156};
157
158static struct clk tmu1_clk = {
159 .name = "tmu1_clk",
160 .ops = &tmu1_clk_ops,
161};
162
141static int tmu_timer_init(void) 163static int tmu_timer_init(void)
142{ 164{
143 unsigned long interval; 165 unsigned long interval;
166 unsigned long frequency;
144 167
145 setup_irq(CONFIG_SH_TIMER_IRQ, &tmu_irq); 168 setup_irq(CONFIG_SH_TIMER_IRQ, &tmu0_irq);
146 169
147 tmu0_clk.parent = clk_get(NULL, "module_clk"); 170 tmu0_clk.parent = clk_get(NULL, "module_clk");
171 tmu1_clk.parent = clk_get(NULL, "module_clk");
148 172
149 /* Start TMU0 */
150 tmu_timer_stop(); 173 tmu_timer_stop();
174
151#if !defined(CONFIG_CPU_SUBTYPE_SH7300) && \ 175#if !defined(CONFIG_CPU_SUBTYPE_SH7300) && \
152 !defined(CONFIG_CPU_SUBTYPE_SH7760) && \ 176 !defined(CONFIG_CPU_SUBTYPE_SH7760) && \
153 !defined(CONFIG_CPU_SUBTYPE_SH7785) 177 !defined(CONFIG_CPU_SUBTYPE_SH7785)
@@ -155,15 +179,29 @@ static int tmu_timer_init(void)
155#endif 179#endif
156 180
157 clk_register(&tmu0_clk); 181 clk_register(&tmu0_clk);
182 clk_register(&tmu1_clk);
158 clk_enable(&tmu0_clk); 183 clk_enable(&tmu0_clk);
184 clk_enable(&tmu1_clk);
159 185
160 interval = (clk_get_rate(&tmu0_clk) + HZ / 2) / HZ; 186 frequency = clk_get_rate(&tmu0_clk);
161 printk(KERN_INFO "Interval = %ld\n", interval); 187 interval = (frequency + HZ / 2) / HZ;
162 188
163 ctrl_outl(interval, TMU0_TCOR); 189 sh_hpt_frequency = clk_get_rate(&tmu1_clk);
164 ctrl_outl(interval, TMU0_TCNT); 190 ctrl_outl(~0, TMU1_TCNT);
191 ctrl_outl(~0, TMU1_TCOR);
165 192
166 tmu_timer_start(); 193 tmu0_timer_set_interval(interval, 1);
194
195 tmu0_clockevent.mult = div_sc(frequency, NSEC_PER_SEC,
196 tmu0_clockevent.shift);
197 tmu0_clockevent.max_delta_ns =
198 clockevent_delta2ns(-1, &tmu0_clockevent);
199 tmu0_clockevent.min_delta_ns =
200 clockevent_delta2ns(1, &tmu0_clockevent);
201
202 tmu0_clockevent.cpumask = cpumask_of_cpu(0);
203
204 clockevents_register_device(&tmu0_clockevent);
167 205
168 return 0; 206 return 0;
169} 207}
@@ -172,9 +210,7 @@ struct sys_timer_ops tmu_timer_ops = {
172 .init = tmu_timer_init, 210 .init = tmu_timer_init,
173 .start = tmu_timer_start, 211 .start = tmu_timer_start,
174 .stop = tmu_timer_stop, 212 .stop = tmu_timer_stop,
175#ifndef CONFIG_GENERIC_TIME 213 .read = tmu_timer_read,
176 .get_offset = tmu_timer_get_offset,
177#endif
178}; 214};
179 215
180struct sys_timer tmu_timer = { 216struct sys_timer tmu_timer = {