diff options
author | Laurent Pinchart <laurent.pinchart+renesas@ideasonboard.com> | 2014-03-04 06:58:30 -0500 |
---|---|---|
committer | Laurent Pinchart <laurent.pinchart+renesas@ideasonboard.com> | 2014-04-16 06:03:26 -0400 |
commit | 42752cc619c0ee619b56f86932ce42b00adb5052 (patch) | |
tree | 3ccd31ad5a56ffb12ae344725ff0c75838ced50e /drivers/clocksource/sh_mtu2.c | |
parent | f92d62f53973466cccb25900c2597ff6df950d74 (diff) |
clocksource: sh_mtu2: Split channel fields from sh_mtu2_priv
Create a new sh_mtu2_channel structure to hold the channel-specific
fields in preparation for multiple channels per device support.
Signed-off-by: Laurent Pinchart <laurent.pinchart+renesas@ideasonboard.com>
Tested-by: Wolfram Sang <wsa@sang-engineering.com>
Diffstat (limited to 'drivers/clocksource/sh_mtu2.c')
-rw-r--r-- | drivers/clocksource/sh_mtu2.c | 125 |
1 files changed, 69 insertions, 56 deletions
diff --git a/drivers/clocksource/sh_mtu2.c b/drivers/clocksource/sh_mtu2.c index 66684552fcc9..e509f417ef64 100644 --- a/drivers/clocksource/sh_mtu2.c +++ b/drivers/clocksource/sh_mtu2.c | |||
@@ -34,12 +34,21 @@ | |||
34 | #include <linux/pm_domain.h> | 34 | #include <linux/pm_domain.h> |
35 | #include <linux/pm_runtime.h> | 35 | #include <linux/pm_runtime.h> |
36 | 36 | ||
37 | struct sh_mtu2_priv; | ||
38 | |||
39 | struct sh_mtu2_channel { | ||
40 | struct sh_mtu2_priv *mtu; | ||
41 | int irq; | ||
42 | struct clock_event_device ced; | ||
43 | }; | ||
44 | |||
37 | struct sh_mtu2_priv { | 45 | struct sh_mtu2_priv { |
46 | struct platform_device *pdev; | ||
47 | |||
38 | void __iomem *mapbase; | 48 | void __iomem *mapbase; |
39 | struct clk *clk; | 49 | struct clk *clk; |
40 | int irq; | 50 | |
41 | struct platform_device *pdev; | 51 | struct sh_mtu2_channel channel; |
42 | struct clock_event_device ced; | ||
43 | }; | 52 | }; |
44 | 53 | ||
45 | static DEFINE_RAW_SPINLOCK(sh_mtu2_lock); | 54 | static DEFINE_RAW_SPINLOCK(sh_mtu2_lock); |
@@ -63,10 +72,10 @@ static unsigned long mtu2_reg_offs[] = { | |||
63 | [TGR] = 8, | 72 | [TGR] = 8, |
64 | }; | 73 | }; |
65 | 74 | ||
66 | static inline unsigned long sh_mtu2_read(struct sh_mtu2_priv *p, int reg_nr) | 75 | static inline unsigned long sh_mtu2_read(struct sh_mtu2_channel *ch, int reg_nr) |
67 | { | 76 | { |
68 | struct sh_timer_config *cfg = p->pdev->dev.platform_data; | 77 | struct sh_timer_config *cfg = ch->mtu->pdev->dev.platform_data; |
69 | void __iomem *base = p->mapbase; | 78 | void __iomem *base = ch->mtu->mapbase; |
70 | unsigned long offs; | 79 | unsigned long offs; |
71 | 80 | ||
72 | if (reg_nr == TSTR) | 81 | if (reg_nr == TSTR) |
@@ -80,11 +89,11 @@ static inline unsigned long sh_mtu2_read(struct sh_mtu2_priv *p, int reg_nr) | |||
80 | return ioread8(base + offs); | 89 | return ioread8(base + offs); |
81 | } | 90 | } |
82 | 91 | ||
83 | static inline void sh_mtu2_write(struct sh_mtu2_priv *p, int reg_nr, | 92 | static inline void sh_mtu2_write(struct sh_mtu2_channel *ch, int reg_nr, |
84 | unsigned long value) | 93 | unsigned long value) |
85 | { | 94 | { |
86 | struct sh_timer_config *cfg = p->pdev->dev.platform_data; | 95 | struct sh_timer_config *cfg = ch->mtu->pdev->dev.platform_data; |
87 | void __iomem *base = p->mapbase; | 96 | void __iomem *base = ch->mtu->mapbase; |
88 | unsigned long offs; | 97 | unsigned long offs; |
89 | 98 | ||
90 | if (reg_nr == TSTR) { | 99 | if (reg_nr == TSTR) { |
@@ -100,100 +109,100 @@ static inline void sh_mtu2_write(struct sh_mtu2_priv *p, int reg_nr, | |||
100 | iowrite8(value, base + offs); | 109 | iowrite8(value, base + offs); |
101 | } | 110 | } |
102 | 111 | ||
103 | static void sh_mtu2_start_stop_ch(struct sh_mtu2_priv *p, int start) | 112 | static void sh_mtu2_start_stop_ch(struct sh_mtu2_channel *ch, int start) |
104 | { | 113 | { |
105 | struct sh_timer_config *cfg = p->pdev->dev.platform_data; | 114 | struct sh_timer_config *cfg = ch->mtu->pdev->dev.platform_data; |
106 | unsigned long flags, value; | 115 | unsigned long flags, value; |
107 | 116 | ||
108 | /* start stop register shared by multiple timer channels */ | 117 | /* start stop register shared by multiple timer channels */ |
109 | raw_spin_lock_irqsave(&sh_mtu2_lock, flags); | 118 | raw_spin_lock_irqsave(&sh_mtu2_lock, flags); |
110 | value = sh_mtu2_read(p, TSTR); | 119 | value = sh_mtu2_read(ch, TSTR); |
111 | 120 | ||
112 | if (start) | 121 | if (start) |
113 | value |= 1 << cfg->timer_bit; | 122 | value |= 1 << cfg->timer_bit; |
114 | else | 123 | else |
115 | value &= ~(1 << cfg->timer_bit); | 124 | value &= ~(1 << cfg->timer_bit); |
116 | 125 | ||
117 | sh_mtu2_write(p, TSTR, value); | 126 | sh_mtu2_write(ch, TSTR, value); |
118 | raw_spin_unlock_irqrestore(&sh_mtu2_lock, flags); | 127 | raw_spin_unlock_irqrestore(&sh_mtu2_lock, flags); |
119 | } | 128 | } |
120 | 129 | ||
121 | static int sh_mtu2_enable(struct sh_mtu2_priv *p) | 130 | static int sh_mtu2_enable(struct sh_mtu2_channel *ch) |
122 | { | 131 | { |
123 | unsigned long periodic; | 132 | unsigned long periodic; |
124 | unsigned long rate; | 133 | unsigned long rate; |
125 | int ret; | 134 | int ret; |
126 | 135 | ||
127 | pm_runtime_get_sync(&p->pdev->dev); | 136 | pm_runtime_get_sync(&ch->mtu->pdev->dev); |
128 | dev_pm_syscore_device(&p->pdev->dev, true); | 137 | dev_pm_syscore_device(&ch->mtu->pdev->dev, true); |
129 | 138 | ||
130 | /* enable clock */ | 139 | /* enable clock */ |
131 | ret = clk_enable(p->clk); | 140 | ret = clk_enable(ch->mtu->clk); |
132 | if (ret) { | 141 | if (ret) { |
133 | dev_err(&p->pdev->dev, "cannot enable clock\n"); | 142 | dev_err(&ch->mtu->pdev->dev, "cannot enable clock\n"); |
134 | return ret; | 143 | return ret; |
135 | } | 144 | } |
136 | 145 | ||
137 | /* make sure channel is disabled */ | 146 | /* make sure channel is disabled */ |
138 | sh_mtu2_start_stop_ch(p, 0); | 147 | sh_mtu2_start_stop_ch(ch, 0); |
139 | 148 | ||
140 | rate = clk_get_rate(p->clk) / 64; | 149 | rate = clk_get_rate(ch->mtu->clk) / 64; |
141 | periodic = (rate + HZ/2) / HZ; | 150 | periodic = (rate + HZ/2) / HZ; |
142 | 151 | ||
143 | /* "Periodic Counter Operation" */ | 152 | /* "Periodic Counter Operation" */ |
144 | sh_mtu2_write(p, TCR, 0x23); /* TGRA clear, divide clock by 64 */ | 153 | sh_mtu2_write(ch, TCR, 0x23); /* TGRA clear, divide clock by 64 */ |
145 | sh_mtu2_write(p, TIOR, 0); | 154 | sh_mtu2_write(ch, TIOR, 0); |
146 | sh_mtu2_write(p, TGR, periodic); | 155 | sh_mtu2_write(ch, TGR, periodic); |
147 | sh_mtu2_write(p, TCNT, 0); | 156 | sh_mtu2_write(ch, TCNT, 0); |
148 | sh_mtu2_write(p, TMDR, 0); | 157 | sh_mtu2_write(ch, TMDR, 0); |
149 | sh_mtu2_write(p, TIER, 0x01); | 158 | sh_mtu2_write(ch, TIER, 0x01); |
150 | 159 | ||
151 | /* enable channel */ | 160 | /* enable channel */ |
152 | sh_mtu2_start_stop_ch(p, 1); | 161 | sh_mtu2_start_stop_ch(ch, 1); |
153 | 162 | ||
154 | return 0; | 163 | return 0; |
155 | } | 164 | } |
156 | 165 | ||
157 | static void sh_mtu2_disable(struct sh_mtu2_priv *p) | 166 | static void sh_mtu2_disable(struct sh_mtu2_channel *ch) |
158 | { | 167 | { |
159 | /* disable channel */ | 168 | /* disable channel */ |
160 | sh_mtu2_start_stop_ch(p, 0); | 169 | sh_mtu2_start_stop_ch(ch, 0); |
161 | 170 | ||
162 | /* stop clock */ | 171 | /* stop clock */ |
163 | clk_disable(p->clk); | 172 | clk_disable(ch->mtu->clk); |
164 | 173 | ||
165 | dev_pm_syscore_device(&p->pdev->dev, false); | 174 | dev_pm_syscore_device(&ch->mtu->pdev->dev, false); |
166 | pm_runtime_put(&p->pdev->dev); | 175 | pm_runtime_put(&ch->mtu->pdev->dev); |
167 | } | 176 | } |
168 | 177 | ||
169 | static irqreturn_t sh_mtu2_interrupt(int irq, void *dev_id) | 178 | static irqreturn_t sh_mtu2_interrupt(int irq, void *dev_id) |
170 | { | 179 | { |
171 | struct sh_mtu2_priv *p = dev_id; | 180 | struct sh_mtu2_channel *ch = dev_id; |
172 | 181 | ||
173 | /* acknowledge interrupt */ | 182 | /* acknowledge interrupt */ |
174 | sh_mtu2_read(p, TSR); | 183 | sh_mtu2_read(ch, TSR); |
175 | sh_mtu2_write(p, TSR, 0xfe); | 184 | sh_mtu2_write(ch, TSR, 0xfe); |
176 | 185 | ||
177 | /* notify clockevent layer */ | 186 | /* notify clockevent layer */ |
178 | p->ced.event_handler(&p->ced); | 187 | ch->ced.event_handler(&ch->ced); |
179 | return IRQ_HANDLED; | 188 | return IRQ_HANDLED; |
180 | } | 189 | } |
181 | 190 | ||
182 | static struct sh_mtu2_priv *ced_to_sh_mtu2(struct clock_event_device *ced) | 191 | static struct sh_mtu2_channel *ced_to_sh_mtu2(struct clock_event_device *ced) |
183 | { | 192 | { |
184 | return container_of(ced, struct sh_mtu2_priv, ced); | 193 | return container_of(ced, struct sh_mtu2_channel, ced); |
185 | } | 194 | } |
186 | 195 | ||
187 | static void sh_mtu2_clock_event_mode(enum clock_event_mode mode, | 196 | static void sh_mtu2_clock_event_mode(enum clock_event_mode mode, |
188 | struct clock_event_device *ced) | 197 | struct clock_event_device *ced) |
189 | { | 198 | { |
190 | struct sh_mtu2_priv *p = ced_to_sh_mtu2(ced); | 199 | struct sh_mtu2_channel *ch = ced_to_sh_mtu2(ced); |
191 | int disabled = 0; | 200 | int disabled = 0; |
192 | 201 | ||
193 | /* deal with old setting first */ | 202 | /* deal with old setting first */ |
194 | switch (ced->mode) { | 203 | switch (ced->mode) { |
195 | case CLOCK_EVT_MODE_PERIODIC: | 204 | case CLOCK_EVT_MODE_PERIODIC: |
196 | sh_mtu2_disable(p); | 205 | sh_mtu2_disable(ch); |
197 | disabled = 1; | 206 | disabled = 1; |
198 | break; | 207 | break; |
199 | default: | 208 | default: |
@@ -202,12 +211,13 @@ static void sh_mtu2_clock_event_mode(enum clock_event_mode mode, | |||
202 | 211 | ||
203 | switch (mode) { | 212 | switch (mode) { |
204 | case CLOCK_EVT_MODE_PERIODIC: | 213 | case CLOCK_EVT_MODE_PERIODIC: |
205 | dev_info(&p->pdev->dev, "used for periodic clock events\n"); | 214 | dev_info(&ch->mtu->pdev->dev, |
206 | sh_mtu2_enable(p); | 215 | "used for periodic clock events\n"); |
216 | sh_mtu2_enable(ch); | ||
207 | break; | 217 | break; |
208 | case CLOCK_EVT_MODE_UNUSED: | 218 | case CLOCK_EVT_MODE_UNUSED: |
209 | if (!disabled) | 219 | if (!disabled) |
210 | sh_mtu2_disable(p); | 220 | sh_mtu2_disable(ch); |
211 | break; | 221 | break; |
212 | case CLOCK_EVT_MODE_SHUTDOWN: | 222 | case CLOCK_EVT_MODE_SHUTDOWN: |
213 | default: | 223 | default: |
@@ -217,18 +227,18 @@ static void sh_mtu2_clock_event_mode(enum clock_event_mode mode, | |||
217 | 227 | ||
218 | static void sh_mtu2_clock_event_suspend(struct clock_event_device *ced) | 228 | static void sh_mtu2_clock_event_suspend(struct clock_event_device *ced) |
219 | { | 229 | { |
220 | pm_genpd_syscore_poweroff(&ced_to_sh_mtu2(ced)->pdev->dev); | 230 | pm_genpd_syscore_poweroff(&ced_to_sh_mtu2(ced)->mtu->pdev->dev); |
221 | } | 231 | } |
222 | 232 | ||
223 | static void sh_mtu2_clock_event_resume(struct clock_event_device *ced) | 233 | static void sh_mtu2_clock_event_resume(struct clock_event_device *ced) |
224 | { | 234 | { |
225 | pm_genpd_syscore_poweron(&ced_to_sh_mtu2(ced)->pdev->dev); | 235 | pm_genpd_syscore_poweron(&ced_to_sh_mtu2(ced)->mtu->pdev->dev); |
226 | } | 236 | } |
227 | 237 | ||
228 | static void sh_mtu2_register_clockevent(struct sh_mtu2_priv *p, | 238 | static void sh_mtu2_register_clockevent(struct sh_mtu2_channel *ch, |
229 | char *name, unsigned long rating) | 239 | char *name, unsigned long rating) |
230 | { | 240 | { |
231 | struct clock_event_device *ced = &p->ced; | 241 | struct clock_event_device *ced = &ch->ced; |
232 | int ret; | 242 | int ret; |
233 | 243 | ||
234 | memset(ced, 0, sizeof(*ced)); | 244 | memset(ced, 0, sizeof(*ced)); |
@@ -241,23 +251,24 @@ static void sh_mtu2_register_clockevent(struct sh_mtu2_priv *p, | |||
241 | ced->suspend = sh_mtu2_clock_event_suspend; | 251 | ced->suspend = sh_mtu2_clock_event_suspend; |
242 | ced->resume = sh_mtu2_clock_event_resume; | 252 | ced->resume = sh_mtu2_clock_event_resume; |
243 | 253 | ||
244 | dev_info(&p->pdev->dev, "used for clock events\n"); | 254 | dev_info(&ch->mtu->pdev->dev, "used for clock events\n"); |
245 | clockevents_register_device(ced); | 255 | clockevents_register_device(ced); |
246 | 256 | ||
247 | ret = request_irq(p->irq, sh_mtu2_interrupt, | 257 | ret = request_irq(ch->irq, sh_mtu2_interrupt, |
248 | IRQF_TIMER | IRQF_IRQPOLL | IRQF_NOBALANCING, | 258 | IRQF_TIMER | IRQF_IRQPOLL | IRQF_NOBALANCING, |
249 | dev_name(&p->pdev->dev), p); | 259 | dev_name(&ch->mtu->pdev->dev), ch); |
250 | if (ret) { | 260 | if (ret) { |
251 | dev_err(&p->pdev->dev, "failed to request irq %d\n", p->irq); | 261 | dev_err(&ch->mtu->pdev->dev, "failed to request irq %d\n", |
262 | ch->irq); | ||
252 | return; | 263 | return; |
253 | } | 264 | } |
254 | } | 265 | } |
255 | 266 | ||
256 | static int sh_mtu2_register(struct sh_mtu2_priv *p, char *name, | 267 | static int sh_mtu2_register(struct sh_mtu2_channel *ch, char *name, |
257 | unsigned long clockevent_rating) | 268 | unsigned long clockevent_rating) |
258 | { | 269 | { |
259 | if (clockevent_rating) | 270 | if (clockevent_rating) |
260 | sh_mtu2_register_clockevent(p, name, clockevent_rating); | 271 | sh_mtu2_register_clockevent(ch, name, clockevent_rating); |
261 | 272 | ||
262 | return 0; | 273 | return 0; |
263 | } | 274 | } |
@@ -285,8 +296,8 @@ static int sh_mtu2_setup(struct sh_mtu2_priv *p, struct platform_device *pdev) | |||
285 | goto err0; | 296 | goto err0; |
286 | } | 297 | } |
287 | 298 | ||
288 | p->irq = platform_get_irq(p->pdev, 0); | 299 | p->channel.irq = platform_get_irq(p->pdev, 0); |
289 | if (p->irq < 0) { | 300 | if (p->channel.irq < 0) { |
290 | dev_err(&p->pdev->dev, "failed to get irq\n"); | 301 | dev_err(&p->pdev->dev, "failed to get irq\n"); |
291 | goto err0; | 302 | goto err0; |
292 | } | 303 | } |
@@ -310,7 +321,9 @@ static int sh_mtu2_setup(struct sh_mtu2_priv *p, struct platform_device *pdev) | |||
310 | if (ret < 0) | 321 | if (ret < 0) |
311 | goto err2; | 322 | goto err2; |
312 | 323 | ||
313 | ret = sh_mtu2_register(p, (char *)dev_name(&p->pdev->dev), | 324 | p->channel.mtu = p; |
325 | |||
326 | ret = sh_mtu2_register(&p->channel, (char *)dev_name(&p->pdev->dev), | ||
314 | cfg->clockevent_rating); | 327 | cfg->clockevent_rating); |
315 | if (ret < 0) | 328 | if (ret < 0) |
316 | goto err3; | 329 | goto err3; |