aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/clocksource
diff options
context:
space:
mode:
authorPaul Mundt <lethal@linux-sh.org>2010-03-10 02:26:25 -0500
committerPaul Mundt <lethal@linux-sh.org>2010-03-10 02:26:25 -0500
commit214a607a4f5bf5a14dab0304ba350cdaf3916795 (patch)
treeb277d47ffff520d368024c4a5c539fafcea88c3e /drivers/clocksource
parenta636ee7fb35b731ba2b331f6294e809bb6be09c8 (diff)
clocksource: Use dev_name() universally across the SH drivers.
There is no need to copy in the name from the sh timer config now that dev_name() is available early. We prefer the dev_name() variant for consistent naming. Signed-off-by: Paul Mundt <lethal@linux-sh.org>
Diffstat (limited to 'drivers/clocksource')
-rw-r--r--drivers/clocksource/sh_cmt.c32
-rw-r--r--drivers/clocksource/sh_mtu2.c24
-rw-r--r--drivers/clocksource/sh_tmu.c28
3 files changed, 37 insertions, 47 deletions
diff --git a/drivers/clocksource/sh_cmt.c b/drivers/clocksource/sh_cmt.c
index 578595c4425d..a1d381171388 100644
--- a/drivers/clocksource/sh_cmt.c
+++ b/drivers/clocksource/sh_cmt.c
@@ -149,13 +149,12 @@ static void sh_cmt_start_stop_ch(struct sh_cmt_priv *p, int start)
149 149
150static int sh_cmt_enable(struct sh_cmt_priv *p, unsigned long *rate) 150static int sh_cmt_enable(struct sh_cmt_priv *p, unsigned long *rate)
151{ 151{
152 struct sh_timer_config *cfg = p->pdev->dev.platform_data;
153 int ret; 152 int ret;
154 153
155 /* enable clock */ 154 /* enable clock */
156 ret = clk_enable(p->clk); 155 ret = clk_enable(p->clk);
157 if (ret) { 156 if (ret) {
158 pr_err("sh_cmt: cannot enable clock \"%s\"\n", cfg->clk); 157 dev_err(&p->pdev->dev, "cannot enable clock\n");
159 return ret; 158 return ret;
160 } 159 }
161 160
@@ -278,7 +277,7 @@ static void sh_cmt_clock_event_program_verify(struct sh_cmt_priv *p,
278 delay = 1; 277 delay = 1;
279 278
280 if (!delay) 279 if (!delay)
281 pr_warning("sh_cmt: too long delay\n"); 280 dev_warn(&p->pdev->dev, "too long delay\n");
282 281
283 } while (delay); 282 } while (delay);
284} 283}
@@ -288,7 +287,7 @@ static void sh_cmt_set_next(struct sh_cmt_priv *p, unsigned long delta)
288 unsigned long flags; 287 unsigned long flags;
289 288
290 if (delta > p->max_match_value) 289 if (delta > p->max_match_value)
291 pr_warning("sh_cmt: delta out of range\n"); 290 dev_warn(&p->pdev->dev, "delta out of range\n");
292 291
293 spin_lock_irqsave(&p->lock, flags); 292 spin_lock_irqsave(&p->lock, flags);
294 p->next_match_value = delta; 293 p->next_match_value = delta;
@@ -450,7 +449,7 @@ static int sh_cmt_register_clocksource(struct sh_cmt_priv *p,
450 cs->resume = sh_cmt_clocksource_resume; 449 cs->resume = sh_cmt_clocksource_resume;
451 cs->mask = CLOCKSOURCE_MASK(sizeof(unsigned long) * 8); 450 cs->mask = CLOCKSOURCE_MASK(sizeof(unsigned long) * 8);
452 cs->flags = CLOCK_SOURCE_IS_CONTINUOUS; 451 cs->flags = CLOCK_SOURCE_IS_CONTINUOUS;
453 pr_info("sh_cmt: %s used as clock source\n", cs->name); 452 dev_info(&p->pdev->dev, "used as clock source\n");
454 clocksource_register(cs); 453 clocksource_register(cs);
455 return 0; 454 return 0;
456} 455}
@@ -496,13 +495,11 @@ static void sh_cmt_clock_event_mode(enum clock_event_mode mode,
496 495
497 switch (mode) { 496 switch (mode) {
498 case CLOCK_EVT_MODE_PERIODIC: 497 case CLOCK_EVT_MODE_PERIODIC:
499 pr_info("sh_cmt: %s used for periodic clock events\n", 498 dev_info(&p->pdev->dev, "used for periodic clock events\n");
500 ced->name);
501 sh_cmt_clock_event_start(p, 1); 499 sh_cmt_clock_event_start(p, 1);
502 break; 500 break;
503 case CLOCK_EVT_MODE_ONESHOT: 501 case CLOCK_EVT_MODE_ONESHOT:
504 pr_info("sh_cmt: %s used for oneshot clock events\n", 502 dev_info(&p->pdev->dev, "used for oneshot clock events\n");
505 ced->name);
506 sh_cmt_clock_event_start(p, 0); 503 sh_cmt_clock_event_start(p, 0);
507 break; 504 break;
508 case CLOCK_EVT_MODE_SHUTDOWN: 505 case CLOCK_EVT_MODE_SHUTDOWN:
@@ -543,7 +540,7 @@ static void sh_cmt_register_clockevent(struct sh_cmt_priv *p,
543 ced->set_next_event = sh_cmt_clock_event_next; 540 ced->set_next_event = sh_cmt_clock_event_next;
544 ced->set_mode = sh_cmt_clock_event_mode; 541 ced->set_mode = sh_cmt_clock_event_mode;
545 542
546 pr_info("sh_cmt: %s used for clock events\n", ced->name); 543 dev_info(&p->pdev->dev, "used for clock events\n");
547 clockevents_register_device(ced); 544 clockevents_register_device(ced);
548} 545}
549 546
@@ -600,12 +597,12 @@ static int sh_cmt_setup(struct sh_cmt_priv *p, struct platform_device *pdev)
600 /* map memory, let mapbase point to our channel */ 597 /* map memory, let mapbase point to our channel */
601 p->mapbase = ioremap_nocache(res->start, resource_size(res)); 598 p->mapbase = ioremap_nocache(res->start, resource_size(res));
602 if (p->mapbase == NULL) { 599 if (p->mapbase == NULL) {
603 pr_err("sh_cmt: failed to remap I/O memory\n"); 600 dev_err(&p->pdev->dev, "failed to remap I/O memory\n");
604 goto err0; 601 goto err0;
605 } 602 }
606 603
607 /* request irq using setup_irq() (too early for request_irq()) */ 604 /* request irq using setup_irq() (too early for request_irq()) */
608 p->irqaction.name = cfg->name; 605 p->irqaction.name = dev_name(&p->pdev->dev);
609 p->irqaction.handler = sh_cmt_interrupt; 606 p->irqaction.handler = sh_cmt_interrupt;
610 p->irqaction.dev_id = p; 607 p->irqaction.dev_id = p;
611 p->irqaction.flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL; 608 p->irqaction.flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL;
@@ -613,7 +610,7 @@ static int sh_cmt_setup(struct sh_cmt_priv *p, struct platform_device *pdev)
613 /* get hold of clock */ 610 /* get hold of clock */
614 p->clk = clk_get(&p->pdev->dev, cfg->clk); 611 p->clk = clk_get(&p->pdev->dev, cfg->clk);
615 if (IS_ERR(p->clk)) { 612 if (IS_ERR(p->clk)) {
616 pr_err("sh_cmt: cannot get clock \"%s\"\n", cfg->clk); 613 dev_err(&p->pdev->dev, "cannot get clock\n");
617 ret = PTR_ERR(p->clk); 614 ret = PTR_ERR(p->clk);
618 goto err1; 615 goto err1;
619 } 616 }
@@ -628,17 +625,17 @@ static int sh_cmt_setup(struct sh_cmt_priv *p, struct platform_device *pdev)
628 p->clear_bits = ~0xc000; 625 p->clear_bits = ~0xc000;
629 } 626 }
630 627
631 ret = sh_cmt_register(p, cfg->name, 628 ret = sh_cmt_register(p, (char *)dev_name(&p->pdev->dev),
632 cfg->clockevent_rating, 629 cfg->clockevent_rating,
633 cfg->clocksource_rating); 630 cfg->clocksource_rating);
634 if (ret) { 631 if (ret) {
635 pr_err("sh_cmt: registration failed\n"); 632 dev_err(&p->pdev->dev, "registration failed\n");
636 goto err1; 633 goto err1;
637 } 634 }
638 635
639 ret = setup_irq(irq, &p->irqaction); 636 ret = setup_irq(irq, &p->irqaction);
640 if (ret) { 637 if (ret) {
641 pr_err("sh_cmt: failed to request irq %d\n", irq); 638 dev_err(&p->pdev->dev, "failed to request irq %d\n", irq);
642 goto err1; 639 goto err1;
643 } 640 }
644 641
@@ -653,11 +650,10 @@ err0:
653static int __devinit sh_cmt_probe(struct platform_device *pdev) 650static int __devinit sh_cmt_probe(struct platform_device *pdev)
654{ 651{
655 struct sh_cmt_priv *p = platform_get_drvdata(pdev); 652 struct sh_cmt_priv *p = platform_get_drvdata(pdev);
656 struct sh_timer_config *cfg = pdev->dev.platform_data;
657 int ret; 653 int ret;
658 654
659 if (p) { 655 if (p) {
660 pr_info("sh_cmt: %s kept as earlytimer\n", cfg->name); 656 dev_info(&pdev->dev, "kept as earlytimer\n");
661 return 0; 657 return 0;
662 } 658 }
663 659
diff --git a/drivers/clocksource/sh_mtu2.c b/drivers/clocksource/sh_mtu2.c
index 4c8a759e60cd..8ce355572d16 100644
--- a/drivers/clocksource/sh_mtu2.c
+++ b/drivers/clocksource/sh_mtu2.c
@@ -118,13 +118,12 @@ static void sh_mtu2_start_stop_ch(struct sh_mtu2_priv *p, int start)
118 118
119static int sh_mtu2_enable(struct sh_mtu2_priv *p) 119static int sh_mtu2_enable(struct sh_mtu2_priv *p)
120{ 120{
121 struct sh_timer_config *cfg = p->pdev->dev.platform_data;
122 int ret; 121 int ret;
123 122
124 /* enable clock */ 123 /* enable clock */
125 ret = clk_enable(p->clk); 124 ret = clk_enable(p->clk);
126 if (ret) { 125 if (ret) {
127 pr_err("sh_mtu2: cannot enable clock \"%s\"\n", cfg->clk); 126 dev_err(&p->pdev->dev, "cannot enable clock\n");
128 return ret; 127 return ret;
129 } 128 }
130 129
@@ -193,8 +192,7 @@ static void sh_mtu2_clock_event_mode(enum clock_event_mode mode,
193 192
194 switch (mode) { 193 switch (mode) {
195 case CLOCK_EVT_MODE_PERIODIC: 194 case CLOCK_EVT_MODE_PERIODIC:
196 pr_info("sh_mtu2: %s used for periodic clock events\n", 195 dev_info(&p->pdev->dev, "used for periodic clock events\n");
197 ced->name);
198 sh_mtu2_enable(p); 196 sh_mtu2_enable(p);
199 break; 197 break;
200 case CLOCK_EVT_MODE_UNUSED: 198 case CLOCK_EVT_MODE_UNUSED:
@@ -221,13 +219,13 @@ static void sh_mtu2_register_clockevent(struct sh_mtu2_priv *p,
221 ced->cpumask = cpumask_of(0); 219 ced->cpumask = cpumask_of(0);
222 ced->set_mode = sh_mtu2_clock_event_mode; 220 ced->set_mode = sh_mtu2_clock_event_mode;
223 221
224 pr_info("sh_mtu2: %s used for clock events\n", ced->name); 222 dev_info(&p->pdev->dev, "used for clock events\n");
225 clockevents_register_device(ced); 223 clockevents_register_device(ced);
226 224
227 ret = setup_irq(p->irqaction.irq, &p->irqaction); 225 ret = setup_irq(p->irqaction.irq, &p->irqaction);
228 if (ret) { 226 if (ret) {
229 pr_err("sh_mtu2: failed to request irq %d\n", 227 dev_err(&p->pdev->dev, "failed to request irq %d\n",
230 p->irqaction.irq); 228 p->irqaction.irq);
231 return; 229 return;
232 } 230 }
233} 231}
@@ -273,12 +271,12 @@ static int sh_mtu2_setup(struct sh_mtu2_priv *p, struct platform_device *pdev)
273 /* map memory, let mapbase point to our channel */ 271 /* map memory, let mapbase point to our channel */
274 p->mapbase = ioremap_nocache(res->start, resource_size(res)); 272 p->mapbase = ioremap_nocache(res->start, resource_size(res));
275 if (p->mapbase == NULL) { 273 if (p->mapbase == NULL) {
276 pr_err("sh_mtu2: failed to remap I/O memory\n"); 274 dev_err(&p->pdev->dev, "failed to remap I/O memory\n");
277 goto err0; 275 goto err0;
278 } 276 }
279 277
280 /* setup data for setup_irq() (too early for request_irq()) */ 278 /* setup data for setup_irq() (too early for request_irq()) */
281 p->irqaction.name = cfg->name; 279 p->irqaction.name = dev_name(&p->pdev->dev);
282 p->irqaction.handler = sh_mtu2_interrupt; 280 p->irqaction.handler = sh_mtu2_interrupt;
283 p->irqaction.dev_id = p; 281 p->irqaction.dev_id = p;
284 p->irqaction.irq = irq; 282 p->irqaction.irq = irq;
@@ -287,12 +285,13 @@ static int sh_mtu2_setup(struct sh_mtu2_priv *p, struct platform_device *pdev)
287 /* get hold of clock */ 285 /* get hold of clock */
288 p->clk = clk_get(&p->pdev->dev, cfg->clk); 286 p->clk = clk_get(&p->pdev->dev, cfg->clk);
289 if (IS_ERR(p->clk)) { 287 if (IS_ERR(p->clk)) {
290 pr_err("sh_mtu2: cannot get clock \"%s\"\n", cfg->clk); 288 dev_err(&p->pdev->dev, "cannot get clock\n");
291 ret = PTR_ERR(p->clk); 289 ret = PTR_ERR(p->clk);
292 goto err1; 290 goto err1;
293 } 291 }
294 292
295 return sh_mtu2_register(p, cfg->name, cfg->clockevent_rating); 293 return sh_mtu2_register(p, (char *)dev_name(&p->pdev->dev),
294 cfg->clockevent_rating);
296 err1: 295 err1:
297 iounmap(p->mapbase); 296 iounmap(p->mapbase);
298 err0: 297 err0:
@@ -302,11 +301,10 @@ static int sh_mtu2_setup(struct sh_mtu2_priv *p, struct platform_device *pdev)
302static int __devinit sh_mtu2_probe(struct platform_device *pdev) 301static int __devinit sh_mtu2_probe(struct platform_device *pdev)
303{ 302{
304 struct sh_mtu2_priv *p = platform_get_drvdata(pdev); 303 struct sh_mtu2_priv *p = platform_get_drvdata(pdev);
305 struct sh_timer_config *cfg = pdev->dev.platform_data;
306 int ret; 304 int ret;
307 305
308 if (p) { 306 if (p) {
309 pr_info("sh_mtu2: %s kept as earlytimer\n", cfg->name); 307 dev_info(&pdev->dev, "kept as earlytimer\n");
310 return 0; 308 return 0;
311 } 309 }
312 310
diff --git a/drivers/clocksource/sh_tmu.c b/drivers/clocksource/sh_tmu.c
index 961f5b5ef6a3..34239453ebaf 100644
--- a/drivers/clocksource/sh_tmu.c
+++ b/drivers/clocksource/sh_tmu.c
@@ -106,13 +106,12 @@ static void sh_tmu_start_stop_ch(struct sh_tmu_priv *p, int start)
106 106
107static int sh_tmu_enable(struct sh_tmu_priv *p) 107static int sh_tmu_enable(struct sh_tmu_priv *p)
108{ 108{
109 struct sh_timer_config *cfg = p->pdev->dev.platform_data;
110 int ret; 109 int ret;
111 110
112 /* enable clock */ 111 /* enable clock */
113 ret = clk_enable(p->clk); 112 ret = clk_enable(p->clk);
114 if (ret) { 113 if (ret) {
115 pr_err("sh_tmu: cannot enable clock \"%s\"\n", cfg->clk); 114 dev_err(&p->pdev->dev, "cannot enable clock\n");
116 return ret; 115 return ret;
117 } 116 }
118 117
@@ -228,7 +227,7 @@ static int sh_tmu_register_clocksource(struct sh_tmu_priv *p,
228 cs->disable = sh_tmu_clocksource_disable; 227 cs->disable = sh_tmu_clocksource_disable;
229 cs->mask = CLOCKSOURCE_MASK(32); 228 cs->mask = CLOCKSOURCE_MASK(32);
230 cs->flags = CLOCK_SOURCE_IS_CONTINUOUS; 229 cs->flags = CLOCK_SOURCE_IS_CONTINUOUS;
231 pr_info("sh_tmu: %s used as clock source\n", cs->name); 230 dev_info(&p->pdev->dev, "used as clock source\n");
232 clocksource_register(cs); 231 clocksource_register(cs);
233 return 0; 232 return 0;
234} 233}
@@ -276,13 +275,11 @@ static void sh_tmu_clock_event_mode(enum clock_event_mode mode,
276 275
277 switch (mode) { 276 switch (mode) {
278 case CLOCK_EVT_MODE_PERIODIC: 277 case CLOCK_EVT_MODE_PERIODIC:
279 pr_info("sh_tmu: %s used for periodic clock events\n", 278 dev_info(&p->pdev->dev, "used for periodic clock events\n");
280 ced->name);
281 sh_tmu_clock_event_start(p, 1); 279 sh_tmu_clock_event_start(p, 1);
282 break; 280 break;
283 case CLOCK_EVT_MODE_ONESHOT: 281 case CLOCK_EVT_MODE_ONESHOT:
284 pr_info("sh_tmu: %s used for oneshot clock events\n", 282 dev_info(&p->pdev->dev, "used for oneshot clock events\n");
285 ced->name);
286 sh_tmu_clock_event_start(p, 0); 283 sh_tmu_clock_event_start(p, 0);
287 break; 284 break;
288 case CLOCK_EVT_MODE_UNUSED: 285 case CLOCK_EVT_MODE_UNUSED:
@@ -323,13 +320,13 @@ static void sh_tmu_register_clockevent(struct sh_tmu_priv *p,
323 ced->set_next_event = sh_tmu_clock_event_next; 320 ced->set_next_event = sh_tmu_clock_event_next;
324 ced->set_mode = sh_tmu_clock_event_mode; 321 ced->set_mode = sh_tmu_clock_event_mode;
325 322
326 pr_info("sh_tmu: %s used for clock events\n", ced->name); 323 dev_info(&p->pdev->dev, "used for clock events\n");
327 clockevents_register_device(ced); 324 clockevents_register_device(ced);
328 325
329 ret = setup_irq(p->irqaction.irq, &p->irqaction); 326 ret = setup_irq(p->irqaction.irq, &p->irqaction);
330 if (ret) { 327 if (ret) {
331 pr_err("sh_tmu: failed to request irq %d\n", 328 dev_err(&p->pdev->dev, "failed to request irq %d\n",
332 p->irqaction.irq); 329 p->irqaction.irq);
333 return; 330 return;
334 } 331 }
335} 332}
@@ -378,12 +375,12 @@ static int sh_tmu_setup(struct sh_tmu_priv *p, struct platform_device *pdev)
378 /* map memory, let mapbase point to our channel */ 375 /* map memory, let mapbase point to our channel */
379 p->mapbase = ioremap_nocache(res->start, resource_size(res)); 376 p->mapbase = ioremap_nocache(res->start, resource_size(res));
380 if (p->mapbase == NULL) { 377 if (p->mapbase == NULL) {
381 pr_err("sh_tmu: failed to remap I/O memory\n"); 378 dev_err(&p->pdev->dev, "failed to remap I/O memory\n");
382 goto err0; 379 goto err0;
383 } 380 }
384 381
385 /* setup data for setup_irq() (too early for request_irq()) */ 382 /* setup data for setup_irq() (too early for request_irq()) */
386 p->irqaction.name = cfg->name; 383 p->irqaction.name = dev_name(&p->pdev->dev);
387 p->irqaction.handler = sh_tmu_interrupt; 384 p->irqaction.handler = sh_tmu_interrupt;
388 p->irqaction.dev_id = p; 385 p->irqaction.dev_id = p;
389 p->irqaction.irq = irq; 386 p->irqaction.irq = irq;
@@ -392,12 +389,12 @@ static int sh_tmu_setup(struct sh_tmu_priv *p, struct platform_device *pdev)
392 /* get hold of clock */ 389 /* get hold of clock */
393 p->clk = clk_get(&p->pdev->dev, cfg->clk); 390 p->clk = clk_get(&p->pdev->dev, cfg->clk);
394 if (IS_ERR(p->clk)) { 391 if (IS_ERR(p->clk)) {
395 pr_err("sh_tmu: cannot get clock \"%s\"\n", cfg->clk); 392 dev_err(&p->pdev->dev, "cannot get clock\n");
396 ret = PTR_ERR(p->clk); 393 ret = PTR_ERR(p->clk);
397 goto err1; 394 goto err1;
398 } 395 }
399 396
400 return sh_tmu_register(p, cfg->name, 397 return sh_tmu_register(p, (char *)dev_name(&p->pdev->dev),
401 cfg->clockevent_rating, 398 cfg->clockevent_rating,
402 cfg->clocksource_rating); 399 cfg->clocksource_rating);
403 err1: 400 err1:
@@ -409,11 +406,10 @@ static int sh_tmu_setup(struct sh_tmu_priv *p, struct platform_device *pdev)
409static int __devinit sh_tmu_probe(struct platform_device *pdev) 406static int __devinit sh_tmu_probe(struct platform_device *pdev)
410{ 407{
411 struct sh_tmu_priv *p = platform_get_drvdata(pdev); 408 struct sh_tmu_priv *p = platform_get_drvdata(pdev);
412 struct sh_timer_config *cfg = pdev->dev.platform_data;
413 int ret; 409 int ret;
414 410
415 if (p) { 411 if (p) {
416 pr_info("sh_tmu: %s kept as earlytimer\n", cfg->name); 412 dev_info(&pdev->dev, "kept as earlytimer\n");
417 return 0; 413 return 0;
418 } 414 }
419 415