diff options
-rw-r--r-- | arch/sh/kernel/timers/timer-tmu.c | 177 |
1 files changed, 119 insertions, 58 deletions
diff --git a/arch/sh/kernel/timers/timer-tmu.c b/arch/sh/kernel/timers/timer-tmu.c index 1ca9ad49b541..aaaf90d06b85 100644 --- a/arch/sh/kernel/timers/timer-tmu.c +++ b/arch/sh/kernel/timers/timer-tmu.c | |||
@@ -28,43 +28,90 @@ | |||
28 | #define TMU_TOCR_INIT 0x00 | 28 | #define TMU_TOCR_INIT 0x00 |
29 | #define TMU_TCR_INIT 0x0020 | 29 | #define TMU_TCR_INIT 0x0020 |
30 | 30 | ||
31 | static int tmu_timer_start(void) | 31 | #define TMU0 (0) |
32 | #define TMU1 (1) | ||
33 | |||
34 | static inline void _tmu_start(int tmu_num) | ||
32 | { | 35 | { |
33 | ctrl_outb(ctrl_inb(TMU_012_TSTR) | 0x3, TMU_012_TSTR); | 36 | ctrl_outb(ctrl_inb(TMU_012_TSTR) | (0x1<<tmu_num), TMU_012_TSTR); |
34 | return 0; | ||
35 | } | 37 | } |
36 | 38 | ||
37 | static void tmu0_timer_set_interval(unsigned long interval, unsigned int reload) | 39 | static inline void _tmu_set_irq(int tmu_num, int enabled) |
38 | { | 40 | { |
39 | ctrl_outl(interval, TMU0_TCNT); | 41 | register unsigned long tmu_tcr = TMU0_TCR + (0xc*tmu_num); |
42 | ctrl_outw( (enabled ? ctrl_inw(tmu_tcr) | (1<<5) : ctrl_inw(tmu_tcr) & ~(1<<5)), tmu_tcr); | ||
43 | } | ||
40 | 44 | ||
41 | /* | 45 | static inline void _tmu_stop(int tmu_num) |
42 | * TCNT reloads from TCOR on underflow, clear it if we don't | 46 | { |
43 | * intend to auto-reload | 47 | ctrl_outb(ctrl_inb(TMU_012_TSTR) & ~(0x1<<tmu_num), TMU_012_TSTR); |
44 | */ | 48 | } |
45 | if (reload) | 49 | |
46 | ctrl_outl(interval, TMU0_TCOR); | 50 | static inline void _tmu_clear_status(int tmu_num) |
47 | else | 51 | { |
48 | ctrl_outl(0, TMU0_TCOR); | 52 | register unsigned long tmu_tcr = TMU0_TCR + (0xc*tmu_num); |
53 | /* Clear UNF bit */ | ||
54 | ctrl_outw(ctrl_inw(tmu_tcr) & ~0x100, tmu_tcr); | ||
55 | } | ||
49 | 56 | ||
50 | tmu_timer_start(); | 57 | static inline unsigned long _tmu_read(int tmu_num) |
58 | { | ||
59 | return ctrl_inl(TMU0_TCNT+0xC*tmu_num); | ||
60 | } | ||
61 | |||
62 | static int tmu_timer_start(void) | ||
63 | { | ||
64 | _tmu_start(TMU0); | ||
65 | _tmu_start(TMU1); | ||
66 | _tmu_set_irq(TMU0,1); | ||
67 | return 0; | ||
51 | } | 68 | } |
52 | 69 | ||
53 | static int tmu_timer_stop(void) | 70 | static int tmu_timer_stop(void) |
54 | { | 71 | { |
55 | ctrl_outb(ctrl_inb(TMU_012_TSTR) & ~0x3, TMU_012_TSTR); | 72 | _tmu_stop(TMU0); |
73 | _tmu_stop(TMU1); | ||
74 | _tmu_clear_status(TMU0); | ||
56 | return 0; | 75 | return 0; |
57 | } | 76 | } |
58 | 77 | ||
78 | /* | ||
79 | * also when the module_clk is scaled the TMU1 | ||
80 | * will show the same frequency | ||
81 | */ | ||
82 | static int tmus_are_scaled; | ||
83 | |||
59 | static cycle_t tmu_timer_read(void) | 84 | static cycle_t tmu_timer_read(void) |
60 | { | 85 | { |
61 | return ~ctrl_inl(TMU1_TCNT); | 86 | return ((cycle_t)(~_tmu_read(TMU1)))<<tmus_are_scaled; |
87 | } | ||
88 | |||
89 | |||
90 | static unsigned long tmu_latest_interval[3]; | ||
91 | static void tmu_timer_set_interval(int tmu_num, unsigned long interval, unsigned int reload) | ||
92 | { | ||
93 | unsigned long tmu_tcnt = TMU0_TCNT + tmu_num*0xC; | ||
94 | unsigned long tmu_tcor = TMU0_TCOR + tmu_num*0xC; | ||
95 | |||
96 | _tmu_stop(tmu_num); | ||
97 | |||
98 | ctrl_outl(interval, tmu_tcnt); | ||
99 | tmu_latest_interval[tmu_num] = interval; | ||
100 | |||
101 | /* | ||
102 | * TCNT reloads from TCOR on underflow, clear it if we don't | ||
103 | * intend to auto-reload | ||
104 | */ | ||
105 | ctrl_outl( reload ? interval : 0 , tmu_tcor); | ||
106 | |||
107 | _tmu_start(tmu_num); | ||
62 | } | 108 | } |
63 | 109 | ||
64 | static int tmu_set_next_event(unsigned long cycles, | 110 | static int tmu_set_next_event(unsigned long cycles, |
65 | struct clock_event_device *evt) | 111 | struct clock_event_device *evt) |
66 | { | 112 | { |
67 | tmu0_timer_set_interval(cycles, 1); | 113 | tmu_timer_set_interval(TMU0,cycles, evt->mode == CLOCK_EVT_MODE_PERIODIC); |
114 | _tmu_set_irq(TMU0,1); | ||
68 | return 0; | 115 | return 0; |
69 | } | 116 | } |
70 | 117 | ||
@@ -96,12 +143,8 @@ static struct clock_event_device tmu0_clockevent = { | |||
96 | static irqreturn_t tmu_timer_interrupt(int irq, void *dummy) | 143 | static irqreturn_t tmu_timer_interrupt(int irq, void *dummy) |
97 | { | 144 | { |
98 | struct clock_event_device *evt = &tmu0_clockevent; | 145 | struct clock_event_device *evt = &tmu0_clockevent; |
99 | unsigned long timer_status; | 146 | _tmu_clear_status(TMU0); |
100 | 147 | _tmu_set_irq(TMU0,tmu0_clockevent.mode != CLOCK_EVT_MODE_ONESHOT); | |
101 | /* Clear UNF bit */ | ||
102 | timer_status = ctrl_inw(TMU0_TCR); | ||
103 | timer_status &= ~0x100; | ||
104 | ctrl_outw(timer_status, TMU0_TCR); | ||
105 | 148 | ||
106 | evt->event_handler(evt); | 149 | evt->event_handler(evt); |
107 | 150 | ||
@@ -109,56 +152,73 @@ static irqreturn_t tmu_timer_interrupt(int irq, void *dummy) | |||
109 | } | 152 | } |
110 | 153 | ||
111 | static struct irqaction tmu0_irq = { | 154 | static struct irqaction tmu0_irq = { |
112 | .name = "periodic timer", | 155 | .name = "periodic/oneshot timer", |
113 | .handler = tmu_timer_interrupt, | 156 | .handler = tmu_timer_interrupt, |
114 | .flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL, | 157 | .flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL, |
115 | .mask = CPU_MASK_NONE, | 158 | .mask = CPU_MASK_NONE, |
116 | }; | 159 | }; |
117 | 160 | ||
118 | static void tmu0_clk_init(struct clk *clk) | 161 | static void __init tmu_clk_init(struct clk *clk) |
119 | { | 162 | { |
120 | u8 divisor = TMU_TCR_INIT & 0x7; | 163 | u8 divisor = TMU_TCR_INIT & 0x7; |
121 | ctrl_outw(TMU_TCR_INIT, TMU0_TCR); | 164 | int tmu_num = clk->name[3]-'0'; |
122 | clk->rate = clk->parent->rate / (4 << (divisor << 1)); | 165 | ctrl_outw(TMU_TCR_INIT, TMU0_TCR+(tmu_num*0xC)); |
166 | clk->rate = clk_get_rate(clk->parent) / (4 << (divisor << 1)); | ||
123 | } | 167 | } |
124 | 168 | ||
125 | static void tmu0_clk_recalc(struct clk *clk) | 169 | static void tmu_clk_recalc(struct clk *clk) |
126 | { | 170 | { |
127 | u8 divisor = ctrl_inw(TMU0_TCR) & 0x7; | 171 | int tmu_num = clk->name[3]-'0'; |
128 | clk->rate = clk->parent->rate / (4 << (divisor << 1)); | 172 | unsigned long prev_rate = clk_get_rate(clk); |
129 | } | 173 | unsigned long flags; |
174 | u8 divisor = ctrl_inw(TMU0_TCR+tmu_num*0xC) & 0x7; | ||
175 | clk->rate = clk_get_rate(clk->parent) / (4 << (divisor << 1)); | ||
130 | 176 | ||
131 | static struct clk_ops tmu0_clk_ops = { | 177 | if(prev_rate==clk_get_rate(clk)) |
132 | .init = tmu0_clk_init, | 178 | return; |
133 | .recalc = tmu0_clk_recalc, | ||
134 | }; | ||
135 | 179 | ||
136 | static struct clk tmu0_clk = { | 180 | if(tmu_num) |
137 | .name = "tmu0_clk", | 181 | return; /* No more work on TMU1 */ |
138 | .ops = &tmu0_clk_ops, | ||
139 | }; | ||
140 | 182 | ||
141 | static void tmu1_clk_init(struct clk *clk) | 183 | local_irq_save(flags); |
142 | { | 184 | tmus_are_scaled = (prev_rate > clk->rate); |
143 | u8 divisor = TMU_TCR_INIT & 0x7; | ||
144 | ctrl_outw(divisor, TMU1_TCR); | ||
145 | clk->rate = clk->parent->rate / (4 << (divisor << 1)); | ||
146 | } | ||
147 | 185 | ||
148 | static void tmu1_clk_recalc(struct clk *clk) | 186 | _tmu_stop(TMU0); |
149 | { | 187 | |
150 | u8 divisor = ctrl_inw(TMU1_TCR) & 0x7; | 188 | tmu0_clockevent.mult = div_sc(clk->rate, NSEC_PER_SEC, |
151 | clk->rate = clk->parent->rate / (4 << (divisor << 1)); | 189 | tmu0_clockevent.shift); |
190 | tmu0_clockevent.max_delta_ns = | ||
191 | clockevent_delta2ns(-1, &tmu0_clockevent); | ||
192 | tmu0_clockevent.min_delta_ns = | ||
193 | clockevent_delta2ns(1, &tmu0_clockevent); | ||
194 | |||
195 | if (tmus_are_scaled) | ||
196 | tmu_latest_interval[TMU0] >>= 1; | ||
197 | else | ||
198 | tmu_latest_interval[TMU0] <<= 1; | ||
199 | |||
200 | tmu_timer_set_interval(TMU0, | ||
201 | tmu_latest_interval[TMU0], | ||
202 | tmu0_clockevent.mode == CLOCK_EVT_MODE_PERIODIC); | ||
203 | |||
204 | _tmu_start(TMU0); | ||
205 | |||
206 | local_irq_restore(flags); | ||
152 | } | 207 | } |
153 | 208 | ||
154 | static struct clk_ops tmu1_clk_ops = { | 209 | static struct clk_ops tmu_clk_ops = { |
155 | .init = tmu1_clk_init, | 210 | .init = tmu_clk_init, |
156 | .recalc = tmu1_clk_recalc, | 211 | .recalc = tmu_clk_recalc, |
212 | }; | ||
213 | |||
214 | static struct clk tmu0_clk = { | ||
215 | .name = "tmu0_clk", | ||
216 | .ops = &tmu_clk_ops, | ||
157 | }; | 217 | }; |
158 | 218 | ||
159 | static struct clk tmu1_clk = { | 219 | static struct clk tmu1_clk = { |
160 | .name = "tmu1_clk", | 220 | .name = "tmu1_clk", |
161 | .ops = &tmu1_clk_ops, | 221 | .ops = &tmu_clk_ops, |
162 | }; | 222 | }; |
163 | 223 | ||
164 | static int tmu_timer_init(void) | 224 | static int tmu_timer_init(void) |
@@ -189,11 +249,12 @@ static int tmu_timer_init(void) | |||
189 | frequency = clk_get_rate(&tmu0_clk); | 249 | frequency = clk_get_rate(&tmu0_clk); |
190 | interval = (frequency + HZ / 2) / HZ; | 250 | interval = (frequency + HZ / 2) / HZ; |
191 | 251 | ||
192 | sh_hpt_frequency = clk_get_rate(&tmu1_clk); | 252 | tmu_timer_set_interval(TMU0,interval, 1); |
193 | ctrl_outl(~0, TMU1_TCNT); | 253 | tmu_timer_set_interval(TMU1,~0,1); |
194 | ctrl_outl(~0, TMU1_TCOR); | ||
195 | 254 | ||
196 | tmu0_timer_set_interval(interval, 1); | 255 | _tmu_start(TMU1); |
256 | |||
257 | sh_hpt_frequency = clk_get_rate(&tmu1_clk); | ||
197 | 258 | ||
198 | tmu0_clockevent.mult = div_sc(frequency, NSEC_PER_SEC, | 259 | tmu0_clockevent.mult = div_sc(frequency, NSEC_PER_SEC, |
199 | tmu0_clockevent.shift); | 260 | tmu0_clockevent.shift); |