aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMagnus Damm <damm@opensource.se>2012-12-14 00:54:00 -0500
committerSimon Horman <horms+renesas@verge.net.au>2013-03-12 13:24:36 -0400
commit1b56b96b663d135305c3c47755fbdde3dc0ef720 (patch)
treed5a7dfdb3f4bdf8f284065509dab3510834d6a48
parent44a10f943f59339f1206d599d4269a35995e397e (diff)
clocksource: sh_cmt: Introduce per-register functions
Introduce sh_cmt_read_cmstr/cmcsr/cmcnt() and sh_cmt_write_cmstr/cmcsr/cmcnt/cmcor() to in the future allow us to split counter registers from control registers and reduce code complexity by removing sh_cmt_read() and sh_cmt_write(). Signed-off-by: Magnus Damm <damm@opensource.se> Acked-by: John Stultz <john.stultz@linaro.org> Tested-by: Guennadi Liakhovetski <g.liakhovetski@gmx.de> Signed-off-by: Simon Horman <horms+renesas@verge.net.au>
-rw-r--r--drivers/clocksource/sh_cmt.c71
1 files changed, 55 insertions, 16 deletions
diff --git a/drivers/clocksource/sh_cmt.c b/drivers/clocksource/sh_cmt.c
index 2e496841b167..94fd3abd6434 100644
--- a/drivers/clocksource/sh_cmt.c
+++ b/drivers/clocksource/sh_cmt.c
@@ -86,6 +86,21 @@ static inline unsigned long sh_cmt_read(struct sh_cmt_priv *p, int reg_nr)
86 return ioread16(base + offs); 86 return ioread16(base + offs);
87} 87}
88 88
89static inline unsigned long sh_cmt_read_cmstr(struct sh_cmt_priv *p)
90{
91 return sh_cmt_read(p, CMSTR);
92}
93
94static inline unsigned long sh_cmt_read_cmcsr(struct sh_cmt_priv *p)
95{
96 return sh_cmt_read(p, CMCSR);
97}
98
99static inline unsigned long sh_cmt_read_cmcnt(struct sh_cmt_priv *p)
100{
101 return sh_cmt_read(p, CMCNT);
102}
103
89static inline void sh_cmt_write(struct sh_cmt_priv *p, int reg_nr, 104static inline void sh_cmt_write(struct sh_cmt_priv *p, int reg_nr,
90 unsigned long value) 105 unsigned long value)
91{ 106{
@@ -112,21 +127,45 @@ static inline void sh_cmt_write(struct sh_cmt_priv *p, int reg_nr,
112 iowrite16(value, base + offs); 127 iowrite16(value, base + offs);
113} 128}
114 129
130static inline void sh_cmt_write_cmstr(struct sh_cmt_priv *p,
131 unsigned long value)
132{
133 sh_cmt_write(p, CMSTR, value);
134}
135
136static inline void sh_cmt_write_cmcsr(struct sh_cmt_priv *p,
137 unsigned long value)
138{
139 sh_cmt_write(p, CMCSR, value);
140}
141
142static inline void sh_cmt_write_cmcnt(struct sh_cmt_priv *p,
143 unsigned long value)
144{
145 sh_cmt_write(p, CMCNT, value);
146}
147
148static inline void sh_cmt_write_cmcor(struct sh_cmt_priv *p,
149 unsigned long value)
150{
151 sh_cmt_write(p, CMCOR, value);
152}
153
115static unsigned long sh_cmt_get_counter(struct sh_cmt_priv *p, 154static unsigned long sh_cmt_get_counter(struct sh_cmt_priv *p,
116 int *has_wrapped) 155 int *has_wrapped)
117{ 156{
118 unsigned long v1, v2, v3; 157 unsigned long v1, v2, v3;
119 int o1, o2; 158 int o1, o2;
120 159
121 o1 = sh_cmt_read(p, CMCSR) & p->overflow_bit; 160 o1 = sh_cmt_read_cmcsr(p) & p->overflow_bit;
122 161
123 /* Make sure the timer value is stable. Stolen from acpi_pm.c */ 162 /* Make sure the timer value is stable. Stolen from acpi_pm.c */
124 do { 163 do {
125 o2 = o1; 164 o2 = o1;
126 v1 = sh_cmt_read(p, CMCNT); 165 v1 = sh_cmt_read_cmcnt(p);
127 v2 = sh_cmt_read(p, CMCNT); 166 v2 = sh_cmt_read_cmcnt(p);
128 v3 = sh_cmt_read(p, CMCNT); 167 v3 = sh_cmt_read_cmcnt(p);
129 o1 = sh_cmt_read(p, CMCSR) & p->overflow_bit; 168 o1 = sh_cmt_read_cmcsr(p) & p->overflow_bit;
130 } while (unlikely((o1 != o2) || (v1 > v2 && v1 < v3) 169 } while (unlikely((o1 != o2) || (v1 > v2 && v1 < v3)
131 || (v2 > v3 && v2 < v1) || (v3 > v1 && v3 < v2))); 170 || (v2 > v3 && v2 < v1) || (v3 > v1 && v3 < v2)));
132 171
@@ -142,14 +181,14 @@ static void sh_cmt_start_stop_ch(struct sh_cmt_priv *p, int start)
142 181
143 /* start stop register shared by multiple timer channels */ 182 /* start stop register shared by multiple timer channels */
144 raw_spin_lock_irqsave(&sh_cmt_lock, flags); 183 raw_spin_lock_irqsave(&sh_cmt_lock, flags);
145 value = sh_cmt_read(p, CMSTR); 184 value = sh_cmt_read_cmstr(p);
146 185
147 if (start) 186 if (start)
148 value |= 1 << cfg->timer_bit; 187 value |= 1 << cfg->timer_bit;
149 else 188 else
150 value &= ~(1 << cfg->timer_bit); 189 value &= ~(1 << cfg->timer_bit);
151 190
152 sh_cmt_write(p, CMSTR, value); 191 sh_cmt_write_cmstr(p, value);
153 raw_spin_unlock_irqrestore(&sh_cmt_lock, flags); 192 raw_spin_unlock_irqrestore(&sh_cmt_lock, flags);
154} 193}
155 194
@@ -173,14 +212,14 @@ static int sh_cmt_enable(struct sh_cmt_priv *p, unsigned long *rate)
173 /* configure channel, periodic mode and maximum timeout */ 212 /* configure channel, periodic mode and maximum timeout */
174 if (p->width == 16) { 213 if (p->width == 16) {
175 *rate = clk_get_rate(p->clk) / 512; 214 *rate = clk_get_rate(p->clk) / 512;
176 sh_cmt_write(p, CMCSR, 0x43); 215 sh_cmt_write_cmcsr(p, 0x43);
177 } else { 216 } else {
178 *rate = clk_get_rate(p->clk) / 8; 217 *rate = clk_get_rate(p->clk) / 8;
179 sh_cmt_write(p, CMCSR, 0x01a4); 218 sh_cmt_write_cmcsr(p, 0x01a4);
180 } 219 }
181 220
182 sh_cmt_write(p, CMCOR, 0xffffffff); 221 sh_cmt_write_cmcor(p, 0xffffffff);
183 sh_cmt_write(p, CMCNT, 0); 222 sh_cmt_write_cmcnt(p, 0);
184 223
185 /* 224 /*
186 * According to the sh73a0 user's manual, as CMCNT can be operated 225 * According to the sh73a0 user's manual, as CMCNT can be operated
@@ -194,12 +233,12 @@ static int sh_cmt_enable(struct sh_cmt_priv *p, unsigned long *rate)
194 * take RCLKx2 at maximum. 233 * take RCLKx2 at maximum.
195 */ 234 */
196 for (k = 0; k < 100; k++) { 235 for (k = 0; k < 100; k++) {
197 if (!sh_cmt_read(p, CMCNT)) 236 if (!sh_cmt_read_cmcnt(p))
198 break; 237 break;
199 udelay(1); 238 udelay(1);
200 } 239 }
201 240
202 if (sh_cmt_read(p, CMCNT)) { 241 if (sh_cmt_read_cmcnt(p)) {
203 dev_err(&p->pdev->dev, "cannot clear CMCNT\n"); 242 dev_err(&p->pdev->dev, "cannot clear CMCNT\n");
204 ret = -ETIMEDOUT; 243 ret = -ETIMEDOUT;
205 goto err1; 244 goto err1;
@@ -222,7 +261,7 @@ static void sh_cmt_disable(struct sh_cmt_priv *p)
222 sh_cmt_start_stop_ch(p, 0); 261 sh_cmt_start_stop_ch(p, 0);
223 262
224 /* disable interrupts in CMT block */ 263 /* disable interrupts in CMT block */
225 sh_cmt_write(p, CMCSR, 0); 264 sh_cmt_write_cmcsr(p, 0);
226 265
227 /* stop clock */ 266 /* stop clock */
228 clk_disable(p->clk); 267 clk_disable(p->clk);
@@ -270,7 +309,7 @@ static void sh_cmt_clock_event_program_verify(struct sh_cmt_priv *p,
270 if (new_match > p->max_match_value) 309 if (new_match > p->max_match_value)
271 new_match = p->max_match_value; 310 new_match = p->max_match_value;
272 311
273 sh_cmt_write(p, CMCOR, new_match); 312 sh_cmt_write_cmcor(p, new_match);
274 313
275 now = sh_cmt_get_counter(p, &has_wrapped); 314 now = sh_cmt_get_counter(p, &has_wrapped);
276 if (has_wrapped && (new_match > p->match_value)) { 315 if (has_wrapped && (new_match > p->match_value)) {
@@ -346,7 +385,7 @@ static irqreturn_t sh_cmt_interrupt(int irq, void *dev_id)
346 struct sh_cmt_priv *p = dev_id; 385 struct sh_cmt_priv *p = dev_id;
347 386
348 /* clear flags */ 387 /* clear flags */
349 sh_cmt_write(p, CMCSR, sh_cmt_read(p, CMCSR) & p->clear_bits); 388 sh_cmt_write_cmcsr(p, sh_cmt_read_cmcsr(p) & p->clear_bits);
350 389
351 /* update clock source counter to begin with if enabled 390 /* update clock source counter to begin with if enabled
352 * the wrap flag should be cleared by the timer specific 391 * the wrap flag should be cleared by the timer specific