aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sh/kernel/timers
diff options
context:
space:
mode:
Diffstat (limited to 'arch/sh/kernel/timers')
-rw-r--r--arch/sh/kernel/timers/timer-tmu.c182
1 files changed, 109 insertions, 73 deletions
diff --git a/arch/sh/kernel/timers/timer-tmu.c b/arch/sh/kernel/timers/timer-tmu.c
index d9e3151c891e..2d997e2a5b6c 100644
--- a/arch/sh/kernel/timers/timer-tmu.c
+++ b/arch/sh/kernel/timers/timer-tmu.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * arch/sh/kernel/timers/timer-tmu.c - TMU Timer Support 2 * arch/sh/kernel/timers/timer-tmu.c - TMU Timer Support
3 * 3 *
4 * Copyright (C) 2005 Paul Mundt 4 * Copyright (C) 2005 - 2007 Paul Mundt
5 * 5 *
6 * TMU handling code hacked out of arch/sh/kernel/time.c 6 * TMU handling code hacked out of arch/sh/kernel/time.c
7 * 7 *
@@ -18,6 +18,7 @@
18#include <linux/kernel.h> 18#include <linux/kernel.h>
19#include <linux/interrupt.h> 19#include <linux/interrupt.h>
20#include <linux/seqlock.h> 20#include <linux/seqlock.h>
21#include <linux/clockchips.h>
21#include <asm/timer.h> 22#include <asm/timer.h>
22#include <asm/rtc.h> 23#include <asm/rtc.h>
23#include <asm/io.h> 24#include <asm/io.h>
@@ -25,56 +26,75 @@
25#include <asm/clock.h> 26#include <asm/clock.h>
26 27
27#define TMU_TOCR_INIT 0x00 28#define TMU_TOCR_INIT 0x00
28#define TMU0_TCR_INIT 0x0020 29#define TMU_TCR_INIT 0x0020
29#define TMU_TSTR_INIT 1
30 30
31#define TMU0_TCR_CALIB 0x0000 31static int tmu_timer_start(void)
32{
33 ctrl_outb(ctrl_inb(TMU_TSTR) | 0x3, TMU_TSTR);
34 return 0;
35}
32 36
33static unsigned long tmu_timer_get_offset(void) 37static void tmu0_timer_set_interval(unsigned long interval, unsigned int reload)
34{ 38{
35 int count; 39 ctrl_outl(interval, TMU0_TCNT);
36 static int count_p = 0x7fffffff; /* for the first call after boot */
37 static unsigned long jiffies_p = 0;
38 40
39 /* 41 /*
40 * cache volatile jiffies temporarily; we have IRQs turned off. 42 * TCNT reloads from TCOR on underflow, clear it if we don't
43 * intend to auto-reload
41 */ 44 */
42 unsigned long jiffies_t; 45 if (reload)
46 ctrl_outl(interval, TMU0_TCOR);
47 else
48 ctrl_outl(0, TMU0_TCOR);
43 49
44 /* timer count may underflow right here */ 50 tmu_timer_start();
45 count = ctrl_inl(TMU0_TCNT); /* read the latched count */ 51}
46 52
47 jiffies_t = jiffies; 53static int tmu_timer_stop(void)
54{
55 ctrl_outb(ctrl_inb(TMU_TSTR) & ~0x3, TMU_TSTR);
56 return 0;
57}
48 58
49 /* 59static cycle_t tmu_timer_read(void)
50 * avoiding timer inconsistencies (they are rare, but they happen)... 60{
51 * there is one kind of problem that must be avoided here: 61 return ~ctrl_inl(TMU1_TCNT);
52 * 1. the timer counter underflows 62}
53 */ 63
64static int tmu_set_next_event(unsigned long cycles,
65 struct clock_event_device *evt)
66{
67 tmu0_timer_set_interval(cycles, 1);
68 return 0;
69}
54 70
55 if (jiffies_t == jiffies_p) { 71static void tmu_set_mode(enum clock_event_mode mode,
56 if (count > count_p) { 72 struct clock_event_device *evt)
57 /* the nutcase */ 73{
58 if (ctrl_inw(TMU0_TCR) & 0x100) { /* Check UNF bit */ 74 switch (mode) {
59 count -= LATCH; 75 case CLOCK_EVT_MODE_PERIODIC:
60 } else { 76 ctrl_outl(ctrl_inl(TMU0_TCNT), TMU0_TCOR);
61 printk("%s (): hardware timer problem?\n", 77 break;
62 __FUNCTION__); 78 case CLOCK_EVT_MODE_ONESHOT:
63 } 79 ctrl_outl(0, TMU0_TCOR);
64 } 80 break;
65 } else 81 case CLOCK_EVT_MODE_UNUSED:
66 jiffies_p = jiffies_t; 82 case CLOCK_EVT_MODE_SHUTDOWN:
67 83 break;
68 count_p = count; 84 }
69
70 count = ((LATCH-1) - count) * TICK_SIZE;
71 count = (count + LATCH/2) / LATCH;
72
73 return count;
74} 85}
75 86
87static struct clock_event_device tmu0_clockevent = {
88 .name = "tmu0",
89 .shift = 32,
90 .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
91 .set_mode = tmu_set_mode,
92 .set_next_event = tmu_set_next_event,
93};
94
76static irqreturn_t tmu_timer_interrupt(int irq, void *dummy) 95static irqreturn_t tmu_timer_interrupt(int irq, void *dummy)
77{ 96{
97 struct clock_event_device *evt = &tmu0_clockevent;
78 unsigned long timer_status; 98 unsigned long timer_status;
79 99
80 /* Clear UNF bit */ 100 /* Clear UNF bit */
@@ -82,72 +102,76 @@ static irqreturn_t tmu_timer_interrupt(int irq, void *dummy)
82 timer_status &= ~0x100; 102 timer_status &= ~0x100;
83 ctrl_outw(timer_status, TMU0_TCR); 103 ctrl_outw(timer_status, TMU0_TCR);
84 104
85 /* 105 evt->event_handler(evt);
86 * Here we are in the timer irq handler. We just have irqs locally
87 * disabled but we don't know if the timer_bh is running on the other
88 * CPU. We need to avoid to SMP race with it. NOTE: we don' t need
89 * the irq version of write_lock because as just said we have irq
90 * locally disabled. -arca
91 */
92 write_seqlock(&xtime_lock);
93 handle_timer_tick();
94 write_sequnlock(&xtime_lock);
95 106
96 return IRQ_HANDLED; 107 return IRQ_HANDLED;
97} 108}
98 109
99static struct irqaction tmu_irq = { 110static struct irqaction tmu0_irq = {
100 .name = "timer", 111 .name = "periodic timer",
101 .handler = tmu_timer_interrupt, 112 .handler = tmu_timer_interrupt,
102 .flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL, 113 .flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL,
103 .mask = CPU_MASK_NONE, 114 .mask = CPU_MASK_NONE,
104}; 115};
105 116
106static void tmu_clk_init(struct clk *clk) 117static void tmu0_clk_init(struct clk *clk)
107{ 118{
108 u8 divisor = TMU0_TCR_INIT & 0x7; 119 u8 divisor = TMU_TCR_INIT & 0x7;
109 ctrl_outw(TMU0_TCR_INIT, TMU0_TCR); 120 ctrl_outw(TMU_TCR_INIT, TMU0_TCR);
110 clk->rate = clk->parent->rate / (4 << (divisor << 1)); 121 clk->rate = clk->parent->rate / (4 << (divisor << 1));
111} 122}
112 123
113static void tmu_clk_recalc(struct clk *clk) 124static void tmu0_clk_recalc(struct clk *clk)
114{ 125{
115 u8 divisor = ctrl_inw(TMU0_TCR) & 0x7; 126 u8 divisor = ctrl_inw(TMU0_TCR) & 0x7;
116 clk->rate = clk->parent->rate / (4 << (divisor << 1)); 127 clk->rate = clk->parent->rate / (4 << (divisor << 1));
117} 128}
118 129
119static struct clk_ops tmu_clk_ops = { 130static struct clk_ops tmu0_clk_ops = {
120 .init = tmu_clk_init, 131 .init = tmu0_clk_init,
121 .recalc = tmu_clk_recalc, 132 .recalc = tmu0_clk_recalc,
122}; 133};
123 134
124static struct clk tmu0_clk = { 135static struct clk tmu0_clk = {
125 .name = "tmu0_clk", 136 .name = "tmu0_clk",
126 .ops = &tmu_clk_ops, 137 .ops = &tmu0_clk_ops,
127}; 138};
128 139
129static int tmu_timer_start(void) 140static void tmu1_clk_init(struct clk *clk)
130{ 141{
131 ctrl_outb(TMU_TSTR_INIT, TMU_TSTR); 142 u8 divisor = TMU_TCR_INIT & 0x7;
132 return 0; 143 ctrl_outw(divisor, TMU1_TCR);
144 clk->rate = clk->parent->rate / (4 << (divisor << 1));
133} 145}
134 146
135static int tmu_timer_stop(void) 147static void tmu1_clk_recalc(struct clk *clk)
136{ 148{
137 ctrl_outb(0, TMU_TSTR); 149 u8 divisor = ctrl_inw(TMU1_TCR) & 0x7;
138 return 0; 150 clk->rate = clk->parent->rate / (4 << (divisor << 1));
139} 151}
140 152
153static struct clk_ops tmu1_clk_ops = {
154 .init = tmu1_clk_init,
155 .recalc = tmu1_clk_recalc,
156};
157
158static struct clk tmu1_clk = {
159 .name = "tmu1_clk",
160 .ops = &tmu1_clk_ops,
161};
162
141static int tmu_timer_init(void) 163static int tmu_timer_init(void)
142{ 164{
143 unsigned long interval; 165 unsigned long interval;
166 unsigned long frequency;
144 167
145 setup_irq(CONFIG_SH_TIMER_IRQ, &tmu_irq); 168 setup_irq(CONFIG_SH_TIMER_IRQ, &tmu0_irq);
146 169
147 tmu0_clk.parent = clk_get(NULL, "module_clk"); 170 tmu0_clk.parent = clk_get(NULL, "module_clk");
171 tmu1_clk.parent = clk_get(NULL, "module_clk");
148 172
149 /* Start TMU0 */
150 tmu_timer_stop(); 173 tmu_timer_stop();
174
151#if !defined(CONFIG_CPU_SUBTYPE_SH7300) && \ 175#if !defined(CONFIG_CPU_SUBTYPE_SH7300) && \
152 !defined(CONFIG_CPU_SUBTYPE_SH7760) && \ 176 !defined(CONFIG_CPU_SUBTYPE_SH7760) && \
153 !defined(CONFIG_CPU_SUBTYPE_SH7785) 177 !defined(CONFIG_CPU_SUBTYPE_SH7785)
@@ -155,15 +179,29 @@ static int tmu_timer_init(void)
155#endif 179#endif
156 180
157 clk_register(&tmu0_clk); 181 clk_register(&tmu0_clk);
182 clk_register(&tmu1_clk);
158 clk_enable(&tmu0_clk); 183 clk_enable(&tmu0_clk);
184 clk_enable(&tmu1_clk);
159 185
160 interval = (clk_get_rate(&tmu0_clk) + HZ / 2) / HZ; 186 frequency = clk_get_rate(&tmu0_clk);
161 printk(KERN_INFO "Interval = %ld\n", interval); 187 interval = (frequency + HZ / 2) / HZ;
162 188
163 ctrl_outl(interval, TMU0_TCOR); 189 sh_hpt_frequency = clk_get_rate(&tmu1_clk);
164 ctrl_outl(interval, TMU0_TCNT); 190 ctrl_outl(~0, TMU1_TCNT);
191 ctrl_outl(~0, TMU1_TCOR);
165 192
166 tmu_timer_start(); 193 tmu0_timer_set_interval(interval, 1);
194
195 tmu0_clockevent.mult = div_sc(frequency, NSEC_PER_SEC,
196 tmu0_clockevent.shift);
197 tmu0_clockevent.max_delta_ns =
198 clockevent_delta2ns(-1, &tmu0_clockevent);
199 tmu0_clockevent.min_delta_ns =
200 clockevent_delta2ns(1, &tmu0_clockevent);
201
202 tmu0_clockevent.cpumask = cpumask_of_cpu(0);
203
204 clockevents_register_device(&tmu0_clockevent);
167 205
168 return 0; 206 return 0;
169} 207}
@@ -172,9 +210,7 @@ struct sys_timer_ops tmu_timer_ops = {
172 .init = tmu_timer_init, 210 .init = tmu_timer_init,
173 .start = tmu_timer_start, 211 .start = tmu_timer_start,
174 .stop = tmu_timer_stop, 212 .stop = tmu_timer_stop,
175#ifndef CONFIG_GENERIC_TIME 213 .read = tmu_timer_read,
176 .get_offset = tmu_timer_get_offset,
177#endif
178}; 214};
179 215
180struct sys_timer tmu_timer = { 216struct sys_timer tmu_timer = {