aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLaurent Pinchart <laurent.pinchart+renesas@ideasonboard.com>2014-02-17 05:27:49 -0500
committerLaurent Pinchart <laurent.pinchart+renesas@ideasonboard.com>2014-04-16 06:03:25 -0400
commit276bee05d8b72e98d530b55161e0a2131da99f58 (patch)
treeaa0e17853a543a9df4cafebcda4aa93d1f51afaf
parent13931f8065fabff117828999e007f62a5cabea34 (diff)
clocksource: sh_mtu2: Use request_irq() instead of setup_irq()
The driver claims it needs to register an interrupt handler too early for request_irq(). This might have been true in the past, but the only meaningful difference between request_irq() and setup_irq() today is an additional kzalloc() call in request_irq(). As the driver calls kmalloc() itself we know that the slab allocator is available, we can thus switch to request_irq(). Signed-off-by: Laurent Pinchart <laurent.pinchart+renesas@ideasonboard.com> Tested-by: Wolfram Sang <wsa@sang-engineering.com>
-rw-r--r--drivers/clocksource/sh_mtu2.c22
1 files changed, 8 insertions, 14 deletions
diff --git a/drivers/clocksource/sh_mtu2.c b/drivers/clocksource/sh_mtu2.c
index e30d76e0a6fa..77992e081205 100644
--- a/drivers/clocksource/sh_mtu2.c
+++ b/drivers/clocksource/sh_mtu2.c
@@ -37,7 +37,7 @@
37struct sh_mtu2_priv { 37struct sh_mtu2_priv {
38 void __iomem *mapbase; 38 void __iomem *mapbase;
39 struct clk *clk; 39 struct clk *clk;
40 struct irqaction irqaction; 40 int irq;
41 struct platform_device *pdev; 41 struct platform_device *pdev;
42 unsigned long rate; 42 unsigned long rate;
43 unsigned long periodic; 43 unsigned long periodic;
@@ -244,10 +244,11 @@ static void sh_mtu2_register_clockevent(struct sh_mtu2_priv *p,
244 dev_info(&p->pdev->dev, "used for clock events\n"); 244 dev_info(&p->pdev->dev, "used for clock events\n");
245 clockevents_register_device(ced); 245 clockevents_register_device(ced);
246 246
247 ret = setup_irq(p->irqaction.irq, &p->irqaction); 247 ret = request_irq(p->irq, sh_mtu2_interrupt,
248 IRQF_TIMER | IRQF_IRQPOLL | IRQF_NOBALANCING,
249 dev_name(&p->pdev->dev), p);
248 if (ret) { 250 if (ret) {
249 dev_err(&p->pdev->dev, "failed to request irq %d\n", 251 dev_err(&p->pdev->dev, "failed to request irq %d\n", p->irq);
250 p->irqaction.irq);
251 return; 252 return;
252 } 253 }
253} 254}
@@ -265,7 +266,7 @@ static int sh_mtu2_setup(struct sh_mtu2_priv *p, struct platform_device *pdev)
265{ 266{
266 struct sh_timer_config *cfg = pdev->dev.platform_data; 267 struct sh_timer_config *cfg = pdev->dev.platform_data;
267 struct resource *res; 268 struct resource *res;
268 int irq, ret; 269 int ret;
269 ret = -ENXIO; 270 ret = -ENXIO;
270 271
271 memset(p, 0, sizeof(*p)); 272 memset(p, 0, sizeof(*p));
@@ -284,8 +285,8 @@ static int sh_mtu2_setup(struct sh_mtu2_priv *p, struct platform_device *pdev)
284 goto err0; 285 goto err0;
285 } 286 }
286 287
287 irq = platform_get_irq(p->pdev, 0); 288 p->irq = platform_get_irq(p->pdev, 0);
288 if (irq < 0) { 289 if (p->irq < 0) {
289 dev_err(&p->pdev->dev, "failed to get irq\n"); 290 dev_err(&p->pdev->dev, "failed to get irq\n");
290 goto err0; 291 goto err0;
291 } 292 }
@@ -297,13 +298,6 @@ static int sh_mtu2_setup(struct sh_mtu2_priv *p, struct platform_device *pdev)
297 goto err0; 298 goto err0;
298 } 299 }
299 300
300 /* setup data for setup_irq() (too early for request_irq()) */
301 p->irqaction.name = dev_name(&p->pdev->dev);
302 p->irqaction.handler = sh_mtu2_interrupt;
303 p->irqaction.dev_id = p;
304 p->irqaction.irq = irq;
305 p->irqaction.flags = IRQF_TIMER | IRQF_IRQPOLL | IRQF_NOBALANCING;
306
307 /* get hold of clock */ 301 /* get hold of clock */
308 p->clk = clk_get(&p->pdev->dev, "mtu2_fck"); 302 p->clk = clk_get(&p->pdev->dev, "mtu2_fck");
309 if (IS_ERR(p->clk)) { 303 if (IS_ERR(p->clk)) {