aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2010-05-19 14:36:03 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2010-05-19 14:36:03 -0400
commit7c7cbaf5b82c418cd3b1dcf718f71d0e6057e639 (patch)
treeda9aaa5a246af464b1e10d88618c1cad07b76314 /drivers
parentba0234ec35127fe21d373db53cbaf9fe20620cb6 (diff)
parent4d0956b8f597aac10208ca763f8fe641fde16aab (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/lethal/sh-2.6
* git://git.kernel.org/pub/scm/linux/kernel/git/lethal/sh-2.6: (127 commits) sh: update defconfigs. sh: Fix up the NUMA build for recent LMB changes. sh64: provide a stub per_cpu_trap_init() definition. sh: fix up CONFIG_KEXEC=n build. sh: fixup the docbook paths for clock framework shuffling. driver core: Early dev_name() depends on slab_is_available(). sh: simplify WARN usage in SH clock driver sh: Check return value of clk_get on ms7724 sh: Check return value of clk_get on ecovec24 sh: move sh clock-cpg.c contents to drivers/sh/clk-cpg.c sh: move sh clock.c contents to drivers/sh/clk. sh: move sh asm/clock.h contents to linux/sh_clk.h V2 sh: remove unused clock lookup sh: switch boards to clkdev sh: switch sh4-202 to clkdev sh: switch shx3 to clkdev sh: switch sh7757 to clkdev sh: switch sh7763 to clkdev sh: switch sh7780 to clkdev sh: switch sh7786 to clkdev ...
Diffstat (limited to 'drivers')
-rw-r--r--drivers/base/platform.c20
-rw-r--r--drivers/clocksource/sh_cmt.c45
-rw-r--r--drivers/clocksource/sh_mtu2.c37
-rw-r--r--drivers/clocksource/sh_tmu.c41
-rw-r--r--drivers/dma/shdma.c25
-rw-r--r--drivers/dma/shdma.h4
-rw-r--r--drivers/serial/sh-sci.c189
-rw-r--r--drivers/sh/Kconfig24
-rw-r--r--drivers/sh/Makefile2
-rw-r--r--drivers/sh/clk-cpg.c298
-rw-r--r--drivers/sh/clk.c545
-rw-r--r--drivers/sh/intc.c333
12 files changed, 1396 insertions, 167 deletions
diff --git a/drivers/base/platform.c b/drivers/base/platform.c
index c5fbe198fbd..765bcf0df3b 100644
--- a/drivers/base/platform.c
+++ b/drivers/base/platform.c
@@ -1254,6 +1254,26 @@ static int __init early_platform_driver_probe_id(char *class_str,
1254 } 1254 }
1255 1255
1256 if (match) { 1256 if (match) {
1257 /*
1258 * Set up a sensible init_name to enable
1259 * dev_name() and others to be used before the
1260 * rest of the driver core is initialized.
1261 */
1262 if (!match->dev.init_name && slab_is_available()) {
1263 if (match->id != -1)
1264 match->dev.init_name =
1265 kasprintf(GFP_KERNEL, "%s.%d",
1266 match->name,
1267 match->id);
1268 else
1269 match->dev.init_name =
1270 kasprintf(GFP_KERNEL, "%s",
1271 match->name);
1272
1273 if (!match->dev.init_name)
1274 return -ENOMEM;
1275 }
1276
1257 if (epdrv->pdrv->probe(match)) 1277 if (epdrv->pdrv->probe(match))
1258 pr_warning("%s: unable to probe %s early.\n", 1278 pr_warning("%s: unable to probe %s early.\n",
1259 class_str, match->name); 1279 class_str, match->name);
diff --git a/drivers/clocksource/sh_cmt.c b/drivers/clocksource/sh_cmt.c
index 744f748cc84..f6677cb1978 100644
--- a/drivers/clocksource/sh_cmt.c
+++ b/drivers/clocksource/sh_cmt.c
@@ -150,13 +150,12 @@ static void sh_cmt_start_stop_ch(struct sh_cmt_priv *p, int start)
150 150
151static int sh_cmt_enable(struct sh_cmt_priv *p, unsigned long *rate) 151static int sh_cmt_enable(struct sh_cmt_priv *p, unsigned long *rate)
152{ 152{
153 struct sh_timer_config *cfg = p->pdev->dev.platform_data;
154 int ret; 153 int ret;
155 154
156 /* enable clock */ 155 /* enable clock */
157 ret = clk_enable(p->clk); 156 ret = clk_enable(p->clk);
158 if (ret) { 157 if (ret) {
159 pr_err("sh_cmt: cannot enable clock \"%s\"\n", cfg->clk); 158 dev_err(&p->pdev->dev, "cannot enable clock\n");
160 return ret; 159 return ret;
161 } 160 }
162 161
@@ -279,7 +278,7 @@ static void sh_cmt_clock_event_program_verify(struct sh_cmt_priv *p,
279 delay = 1; 278 delay = 1;
280 279
281 if (!delay) 280 if (!delay)
282 pr_warning("sh_cmt: too long delay\n"); 281 dev_warn(&p->pdev->dev, "too long delay\n");
283 282
284 } while (delay); 283 } while (delay);
285} 284}
@@ -289,7 +288,7 @@ static void sh_cmt_set_next(struct sh_cmt_priv *p, unsigned long delta)
289 unsigned long flags; 288 unsigned long flags;
290 289
291 if (delta > p->max_match_value) 290 if (delta > p->max_match_value)
292 pr_warning("sh_cmt: delta out of range\n"); 291 dev_warn(&p->pdev->dev, "delta out of range\n");
293 292
294 spin_lock_irqsave(&p->lock, flags); 293 spin_lock_irqsave(&p->lock, flags);
295 p->next_match_value = delta; 294 p->next_match_value = delta;
@@ -451,7 +450,7 @@ static int sh_cmt_register_clocksource(struct sh_cmt_priv *p,
451 cs->resume = sh_cmt_clocksource_resume; 450 cs->resume = sh_cmt_clocksource_resume;
452 cs->mask = CLOCKSOURCE_MASK(sizeof(unsigned long) * 8); 451 cs->mask = CLOCKSOURCE_MASK(sizeof(unsigned long) * 8);
453 cs->flags = CLOCK_SOURCE_IS_CONTINUOUS; 452 cs->flags = CLOCK_SOURCE_IS_CONTINUOUS;
454 pr_info("sh_cmt: %s used as clock source\n", cs->name); 453 dev_info(&p->pdev->dev, "used as clock source\n");
455 clocksource_register(cs); 454 clocksource_register(cs);
456 return 0; 455 return 0;
457} 456}
@@ -497,13 +496,11 @@ static void sh_cmt_clock_event_mode(enum clock_event_mode mode,
497 496
498 switch (mode) { 497 switch (mode) {
499 case CLOCK_EVT_MODE_PERIODIC: 498 case CLOCK_EVT_MODE_PERIODIC:
500 pr_info("sh_cmt: %s used for periodic clock events\n", 499 dev_info(&p->pdev->dev, "used for periodic clock events\n");
501 ced->name);
502 sh_cmt_clock_event_start(p, 1); 500 sh_cmt_clock_event_start(p, 1);
503 break; 501 break;
504 case CLOCK_EVT_MODE_ONESHOT: 502 case CLOCK_EVT_MODE_ONESHOT:
505 pr_info("sh_cmt: %s used for oneshot clock events\n", 503 dev_info(&p->pdev->dev, "used for oneshot clock events\n");
506 ced->name);
507 sh_cmt_clock_event_start(p, 0); 504 sh_cmt_clock_event_start(p, 0);
508 break; 505 break;
509 case CLOCK_EVT_MODE_SHUTDOWN: 506 case CLOCK_EVT_MODE_SHUTDOWN:
@@ -544,7 +541,7 @@ static void sh_cmt_register_clockevent(struct sh_cmt_priv *p,
544 ced->set_next_event = sh_cmt_clock_event_next; 541 ced->set_next_event = sh_cmt_clock_event_next;
545 ced->set_mode = sh_cmt_clock_event_mode; 542 ced->set_mode = sh_cmt_clock_event_mode;
546 543
547 pr_info("sh_cmt: %s used for clock events\n", ced->name); 544 dev_info(&p->pdev->dev, "used for clock events\n");
548 clockevents_register_device(ced); 545 clockevents_register_device(ced);
549} 546}
550 547
@@ -601,22 +598,27 @@ static int sh_cmt_setup(struct sh_cmt_priv *p, struct platform_device *pdev)
601 /* map memory, let mapbase point to our channel */ 598 /* map memory, let mapbase point to our channel */
602 p->mapbase = ioremap_nocache(res->start, resource_size(res)); 599 p->mapbase = ioremap_nocache(res->start, resource_size(res));
603 if (p->mapbase == NULL) { 600 if (p->mapbase == NULL) {
604 pr_err("sh_cmt: failed to remap I/O memory\n"); 601 dev_err(&p->pdev->dev, "failed to remap I/O memory\n");
605 goto err0; 602 goto err0;
606 } 603 }
607 604
608 /* request irq using setup_irq() (too early for request_irq()) */ 605 /* request irq using setup_irq() (too early for request_irq()) */
609 p->irqaction.name = cfg->name; 606 p->irqaction.name = dev_name(&p->pdev->dev);
610 p->irqaction.handler = sh_cmt_interrupt; 607 p->irqaction.handler = sh_cmt_interrupt;
611 p->irqaction.dev_id = p; 608 p->irqaction.dev_id = p;
612 p->irqaction.flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL; 609 p->irqaction.flags = IRQF_DISABLED | IRQF_TIMER | \
610 IRQF_IRQPOLL | IRQF_NOBALANCING;
613 611
614 /* get hold of clock */ 612 /* get hold of clock */
615 p->clk = clk_get(&p->pdev->dev, cfg->clk); 613 p->clk = clk_get(&p->pdev->dev, "cmt_fck");
616 if (IS_ERR(p->clk)) { 614 if (IS_ERR(p->clk)) {
617 pr_err("sh_cmt: cannot get clock \"%s\"\n", cfg->clk); 615 dev_warn(&p->pdev->dev, "using deprecated clock lookup\n");
618 ret = PTR_ERR(p->clk); 616 p->clk = clk_get(&p->pdev->dev, cfg->clk);
619 goto err1; 617 if (IS_ERR(p->clk)) {
618 dev_err(&p->pdev->dev, "cannot get clock\n");
619 ret = PTR_ERR(p->clk);
620 goto err1;
621 }
620 } 622 }
621 623
622 if (resource_size(res) == 6) { 624 if (resource_size(res) == 6) {
@@ -629,17 +631,17 @@ static int sh_cmt_setup(struct sh_cmt_priv *p, struct platform_device *pdev)
629 p->clear_bits = ~0xc000; 631 p->clear_bits = ~0xc000;
630 } 632 }
631 633
632 ret = sh_cmt_register(p, cfg->name, 634 ret = sh_cmt_register(p, (char *)dev_name(&p->pdev->dev),
633 cfg->clockevent_rating, 635 cfg->clockevent_rating,
634 cfg->clocksource_rating); 636 cfg->clocksource_rating);
635 if (ret) { 637 if (ret) {
636 pr_err("sh_cmt: registration failed\n"); 638 dev_err(&p->pdev->dev, "registration failed\n");
637 goto err1; 639 goto err1;
638 } 640 }
639 641
640 ret = setup_irq(irq, &p->irqaction); 642 ret = setup_irq(irq, &p->irqaction);
641 if (ret) { 643 if (ret) {
642 pr_err("sh_cmt: failed to request irq %d\n", irq); 644 dev_err(&p->pdev->dev, "failed to request irq %d\n", irq);
643 goto err1; 645 goto err1;
644 } 646 }
645 647
@@ -654,11 +656,10 @@ err0:
654static int __devinit sh_cmt_probe(struct platform_device *pdev) 656static int __devinit sh_cmt_probe(struct platform_device *pdev)
655{ 657{
656 struct sh_cmt_priv *p = platform_get_drvdata(pdev); 658 struct sh_cmt_priv *p = platform_get_drvdata(pdev);
657 struct sh_timer_config *cfg = pdev->dev.platform_data;
658 int ret; 659 int ret;
659 660
660 if (p) { 661 if (p) {
661 pr_info("sh_cmt: %s kept as earlytimer\n", cfg->name); 662 dev_info(&pdev->dev, "kept as earlytimer\n");
662 return 0; 663 return 0;
663 } 664 }
664 665
diff --git a/drivers/clocksource/sh_mtu2.c b/drivers/clocksource/sh_mtu2.c
index 5fb78bfd73b..ef7a5be8a09 100644
--- a/drivers/clocksource/sh_mtu2.c
+++ b/drivers/clocksource/sh_mtu2.c
@@ -119,13 +119,12 @@ static void sh_mtu2_start_stop_ch(struct sh_mtu2_priv *p, int start)
119 119
120static int sh_mtu2_enable(struct sh_mtu2_priv *p) 120static int sh_mtu2_enable(struct sh_mtu2_priv *p)
121{ 121{
122 struct sh_timer_config *cfg = p->pdev->dev.platform_data;
123 int ret; 122 int ret;
124 123
125 /* enable clock */ 124 /* enable clock */
126 ret = clk_enable(p->clk); 125 ret = clk_enable(p->clk);
127 if (ret) { 126 if (ret) {
128 pr_err("sh_mtu2: cannot enable clock \"%s\"\n", cfg->clk); 127 dev_err(&p->pdev->dev, "cannot enable clock\n");
129 return ret; 128 return ret;
130 } 129 }
131 130
@@ -194,8 +193,7 @@ static void sh_mtu2_clock_event_mode(enum clock_event_mode mode,
194 193
195 switch (mode) { 194 switch (mode) {
196 case CLOCK_EVT_MODE_PERIODIC: 195 case CLOCK_EVT_MODE_PERIODIC:
197 pr_info("sh_mtu2: %s used for periodic clock events\n", 196 dev_info(&p->pdev->dev, "used for periodic clock events\n");
198 ced->name);
199 sh_mtu2_enable(p); 197 sh_mtu2_enable(p);
200 break; 198 break;
201 case CLOCK_EVT_MODE_UNUSED: 199 case CLOCK_EVT_MODE_UNUSED:
@@ -222,13 +220,13 @@ static void sh_mtu2_register_clockevent(struct sh_mtu2_priv *p,
222 ced->cpumask = cpumask_of(0); 220 ced->cpumask = cpumask_of(0);
223 ced->set_mode = sh_mtu2_clock_event_mode; 221 ced->set_mode = sh_mtu2_clock_event_mode;
224 222
225 pr_info("sh_mtu2: %s used for clock events\n", ced->name); 223 dev_info(&p->pdev->dev, "used for clock events\n");
226 clockevents_register_device(ced); 224 clockevents_register_device(ced);
227 225
228 ret = setup_irq(p->irqaction.irq, &p->irqaction); 226 ret = setup_irq(p->irqaction.irq, &p->irqaction);
229 if (ret) { 227 if (ret) {
230 pr_err("sh_mtu2: failed to request irq %d\n", 228 dev_err(&p->pdev->dev, "failed to request irq %d\n",
231 p->irqaction.irq); 229 p->irqaction.irq);
232 return; 230 return;
233 } 231 }
234} 232}
@@ -274,26 +272,32 @@ static int sh_mtu2_setup(struct sh_mtu2_priv *p, struct platform_device *pdev)
274 /* map memory, let mapbase point to our channel */ 272 /* map memory, let mapbase point to our channel */
275 p->mapbase = ioremap_nocache(res->start, resource_size(res)); 273 p->mapbase = ioremap_nocache(res->start, resource_size(res));
276 if (p->mapbase == NULL) { 274 if (p->mapbase == NULL) {
277 pr_err("sh_mtu2: failed to remap I/O memory\n"); 275 dev_err(&p->pdev->dev, "failed to remap I/O memory\n");
278 goto err0; 276 goto err0;
279 } 277 }
280 278
281 /* setup data for setup_irq() (too early for request_irq()) */ 279 /* setup data for setup_irq() (too early for request_irq()) */
282 p->irqaction.name = cfg->name; 280 p->irqaction.name = dev_name(&p->pdev->dev);
283 p->irqaction.handler = sh_mtu2_interrupt; 281 p->irqaction.handler = sh_mtu2_interrupt;
284 p->irqaction.dev_id = p; 282 p->irqaction.dev_id = p;
285 p->irqaction.irq = irq; 283 p->irqaction.irq = irq;
286 p->irqaction.flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL; 284 p->irqaction.flags = IRQF_DISABLED | IRQF_TIMER | \
285 IRQF_IRQPOLL | IRQF_NOBALANCING;
287 286
288 /* get hold of clock */ 287 /* get hold of clock */
289 p->clk = clk_get(&p->pdev->dev, cfg->clk); 288 p->clk = clk_get(&p->pdev->dev, "mtu2_fck");
290 if (IS_ERR(p->clk)) { 289 if (IS_ERR(p->clk)) {
291 pr_err("sh_mtu2: cannot get clock \"%s\"\n", cfg->clk); 290 dev_warn(&p->pdev->dev, "using deprecated clock lookup\n");
292 ret = PTR_ERR(p->clk); 291 p->clk = clk_get(&p->pdev->dev, cfg->clk);
293 goto err1; 292 if (IS_ERR(p->clk)) {
293 dev_err(&p->pdev->dev, "cannot get clock\n");
294 ret = PTR_ERR(p->clk);
295 goto err1;
296 }
294 } 297 }
295 298
296 return sh_mtu2_register(p, cfg->name, cfg->clockevent_rating); 299 return sh_mtu2_register(p, (char *)dev_name(&p->pdev->dev),
300 cfg->clockevent_rating);
297 err1: 301 err1:
298 iounmap(p->mapbase); 302 iounmap(p->mapbase);
299 err0: 303 err0:
@@ -303,11 +307,10 @@ static int sh_mtu2_setup(struct sh_mtu2_priv *p, struct platform_device *pdev)
303static int __devinit sh_mtu2_probe(struct platform_device *pdev) 307static int __devinit sh_mtu2_probe(struct platform_device *pdev)
304{ 308{
305 struct sh_mtu2_priv *p = platform_get_drvdata(pdev); 309 struct sh_mtu2_priv *p = platform_get_drvdata(pdev);
306 struct sh_timer_config *cfg = pdev->dev.platform_data;
307 int ret; 310 int ret;
308 311
309 if (p) { 312 if (p) {
310 pr_info("sh_mtu2: %s kept as earlytimer\n", cfg->name); 313 dev_info(&pdev->dev, "kept as earlytimer\n");
311 return 0; 314 return 0;
312 } 315 }
313 316
diff --git a/drivers/clocksource/sh_tmu.c b/drivers/clocksource/sh_tmu.c
index fc9ff1e5b77..8e44e14ec4c 100644
--- a/drivers/clocksource/sh_tmu.c
+++ b/drivers/clocksource/sh_tmu.c
@@ -107,13 +107,12 @@ static void sh_tmu_start_stop_ch(struct sh_tmu_priv *p, int start)
107 107
108static int sh_tmu_enable(struct sh_tmu_priv *p) 108static int sh_tmu_enable(struct sh_tmu_priv *p)
109{ 109{
110 struct sh_timer_config *cfg = p->pdev->dev.platform_data;
111 int ret; 110 int ret;
112 111
113 /* enable clock */ 112 /* enable clock */
114 ret = clk_enable(p->clk); 113 ret = clk_enable(p->clk);
115 if (ret) { 114 if (ret) {
116 pr_err("sh_tmu: cannot enable clock \"%s\"\n", cfg->clk); 115 dev_err(&p->pdev->dev, "cannot enable clock\n");
117 return ret; 116 return ret;
118 } 117 }
119 118
@@ -229,7 +228,7 @@ static int sh_tmu_register_clocksource(struct sh_tmu_priv *p,
229 cs->disable = sh_tmu_clocksource_disable; 228 cs->disable = sh_tmu_clocksource_disable;
230 cs->mask = CLOCKSOURCE_MASK(32); 229 cs->mask = CLOCKSOURCE_MASK(32);
231 cs->flags = CLOCK_SOURCE_IS_CONTINUOUS; 230 cs->flags = CLOCK_SOURCE_IS_CONTINUOUS;
232 pr_info("sh_tmu: %s used as clock source\n", cs->name); 231 dev_info(&p->pdev->dev, "used as clock source\n");
233 clocksource_register(cs); 232 clocksource_register(cs);
234 return 0; 233 return 0;
235} 234}
@@ -277,13 +276,11 @@ static void sh_tmu_clock_event_mode(enum clock_event_mode mode,
277 276
278 switch (mode) { 277 switch (mode) {
279 case CLOCK_EVT_MODE_PERIODIC: 278 case CLOCK_EVT_MODE_PERIODIC:
280 pr_info("sh_tmu: %s used for periodic clock events\n", 279 dev_info(&p->pdev->dev, "used for periodic clock events\n");
281 ced->name);
282 sh_tmu_clock_event_start(p, 1); 280 sh_tmu_clock_event_start(p, 1);
283 break; 281 break;
284 case CLOCK_EVT_MODE_ONESHOT: 282 case CLOCK_EVT_MODE_ONESHOT:
285 pr_info("sh_tmu: %s used for oneshot clock events\n", 283 dev_info(&p->pdev->dev, "used for oneshot clock events\n");
286 ced->name);
287 sh_tmu_clock_event_start(p, 0); 284 sh_tmu_clock_event_start(p, 0);
288 break; 285 break;
289 case CLOCK_EVT_MODE_UNUSED: 286 case CLOCK_EVT_MODE_UNUSED:
@@ -324,13 +321,13 @@ static void sh_tmu_register_clockevent(struct sh_tmu_priv *p,
324 ced->set_next_event = sh_tmu_clock_event_next; 321 ced->set_next_event = sh_tmu_clock_event_next;
325 ced->set_mode = sh_tmu_clock_event_mode; 322 ced->set_mode = sh_tmu_clock_event_mode;
326 323
327 pr_info("sh_tmu: %s used for clock events\n", ced->name); 324 dev_info(&p->pdev->dev, "used for clock events\n");
328 clockevents_register_device(ced); 325 clockevents_register_device(ced);
329 326
330 ret = setup_irq(p->irqaction.irq, &p->irqaction); 327 ret = setup_irq(p->irqaction.irq, &p->irqaction);
331 if (ret) { 328 if (ret) {
332 pr_err("sh_tmu: failed to request irq %d\n", 329 dev_err(&p->pdev->dev, "failed to request irq %d\n",
333 p->irqaction.irq); 330 p->irqaction.irq);
334 return; 331 return;
335 } 332 }
336} 333}
@@ -379,26 +376,31 @@ static int sh_tmu_setup(struct sh_tmu_priv *p, struct platform_device *pdev)
379 /* map memory, let mapbase point to our channel */ 376 /* map memory, let mapbase point to our channel */
380 p->mapbase = ioremap_nocache(res->start, resource_size(res)); 377 p->mapbase = ioremap_nocache(res->start, resource_size(res));
381 if (p->mapbase == NULL) { 378 if (p->mapbase == NULL) {
382 pr_err("sh_tmu: failed to remap I/O memory\n"); 379 dev_err(&p->pdev->dev, "failed to remap I/O memory\n");
383 goto err0; 380 goto err0;
384 } 381 }
385 382
386 /* setup data for setup_irq() (too early for request_irq()) */ 383 /* setup data for setup_irq() (too early for request_irq()) */
387 p->irqaction.name = cfg->name; 384 p->irqaction.name = dev_name(&p->pdev->dev);
388 p->irqaction.handler = sh_tmu_interrupt; 385 p->irqaction.handler = sh_tmu_interrupt;
389 p->irqaction.dev_id = p; 386 p->irqaction.dev_id = p;
390 p->irqaction.irq = irq; 387 p->irqaction.irq = irq;
391 p->irqaction.flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL; 388 p->irqaction.flags = IRQF_DISABLED | IRQF_TIMER | \
389 IRQF_IRQPOLL | IRQF_NOBALANCING;
392 390
393 /* get hold of clock */ 391 /* get hold of clock */
394 p->clk = clk_get(&p->pdev->dev, cfg->clk); 392 p->clk = clk_get(&p->pdev->dev, "tmu_fck");
395 if (IS_ERR(p->clk)) { 393 if (IS_ERR(p->clk)) {
396 pr_err("sh_tmu: cannot get clock \"%s\"\n", cfg->clk); 394 dev_warn(&p->pdev->dev, "using deprecated clock lookup\n");
397 ret = PTR_ERR(p->clk); 395 p->clk = clk_get(&p->pdev->dev, cfg->clk);
398 goto err1; 396 if (IS_ERR(p->clk)) {
397 dev_err(&p->pdev->dev, "cannot get clock\n");
398 ret = PTR_ERR(p->clk);
399 goto err1;
400 }
399 } 401 }
400 402
401 return sh_tmu_register(p, cfg->name, 403 return sh_tmu_register(p, (char *)dev_name(&p->pdev->dev),
402 cfg->clockevent_rating, 404 cfg->clockevent_rating,
403 cfg->clocksource_rating); 405 cfg->clocksource_rating);
404 err1: 406 err1:
@@ -410,11 +412,10 @@ static int sh_tmu_setup(struct sh_tmu_priv *p, struct platform_device *pdev)
410static int __devinit sh_tmu_probe(struct platform_device *pdev) 412static int __devinit sh_tmu_probe(struct platform_device *pdev)
411{ 413{
412 struct sh_tmu_priv *p = platform_get_drvdata(pdev); 414 struct sh_tmu_priv *p = platform_get_drvdata(pdev);
413 struct sh_timer_config *cfg = pdev->dev.platform_data;
414 int ret; 415 int ret;
415 416
416 if (p) { 417 if (p) {
417 pr_info("sh_tmu: %s kept as earlytimer\n", cfg->name); 418 dev_info(&pdev->dev, "kept as earlytimer\n");
418 return 0; 419 return 0;
419 } 420 }
420 421
diff --git a/drivers/dma/shdma.c b/drivers/dma/shdma.c
index 6f25a20de99..323afef7780 100644
--- a/drivers/dma/shdma.c
+++ b/drivers/dma/shdma.c
@@ -26,8 +26,7 @@
26#include <linux/dma-mapping.h> 26#include <linux/dma-mapping.h>
27#include <linux/platform_device.h> 27#include <linux/platform_device.h>
28#include <linux/pm_runtime.h> 28#include <linux/pm_runtime.h>
29 29#include <linux/sh_dma.h>
30#include <asm/dmaengine.h>
31 30
32#include "shdma.h" 31#include "shdma.h"
33 32
@@ -45,7 +44,7 @@ enum sh_dmae_desc_status {
45#define LOG2_DEFAULT_XFER_SIZE 2 44#define LOG2_DEFAULT_XFER_SIZE 2
46 45
47/* A bitmask with bits enough for enum sh_dmae_slave_chan_id */ 46/* A bitmask with bits enough for enum sh_dmae_slave_chan_id */
48static unsigned long sh_dmae_slave_used[BITS_TO_LONGS(SHDMA_SLAVE_NUMBER)]; 47static unsigned long sh_dmae_slave_used[BITS_TO_LONGS(SH_DMA_SLAVE_NUMBER)];
49 48
50static void sh_dmae_chan_ld_cleanup(struct sh_dmae_chan *sh_chan, bool all); 49static void sh_dmae_chan_ld_cleanup(struct sh_dmae_chan *sh_chan, bool all);
51 50
@@ -190,7 +189,7 @@ static int dmae_set_dmars(struct sh_dmae_chan *sh_chan, u16 val)
190 struct sh_dmae_device *shdev = container_of(sh_chan->common.device, 189 struct sh_dmae_device *shdev = container_of(sh_chan->common.device,
191 struct sh_dmae_device, common); 190 struct sh_dmae_device, common);
192 struct sh_dmae_pdata *pdata = shdev->pdata; 191 struct sh_dmae_pdata *pdata = shdev->pdata;
193 struct sh_dmae_channel *chan_pdata = &pdata->channel[sh_chan->id]; 192 const struct sh_dmae_channel *chan_pdata = &pdata->channel[sh_chan->id];
194 u16 __iomem *addr = shdev->dmars + chan_pdata->dmars / sizeof(u16); 193 u16 __iomem *addr = shdev->dmars + chan_pdata->dmars / sizeof(u16);
195 int shift = chan_pdata->dmars_bit; 194 int shift = chan_pdata->dmars_bit;
196 195
@@ -266,8 +265,8 @@ static struct sh_desc *sh_dmae_get_desc(struct sh_dmae_chan *sh_chan)
266 return NULL; 265 return NULL;
267} 266}
268 267
269static struct sh_dmae_slave_config *sh_dmae_find_slave( 268static const struct sh_dmae_slave_config *sh_dmae_find_slave(
270 struct sh_dmae_chan *sh_chan, enum sh_dmae_slave_chan_id slave_id) 269 struct sh_dmae_chan *sh_chan, struct sh_dmae_slave *param)
271{ 270{
272 struct dma_device *dma_dev = sh_chan->common.device; 271 struct dma_device *dma_dev = sh_chan->common.device;
273 struct sh_dmae_device *shdev = container_of(dma_dev, 272 struct sh_dmae_device *shdev = container_of(dma_dev,
@@ -275,11 +274,11 @@ static struct sh_dmae_slave_config *sh_dmae_find_slave(
275 struct sh_dmae_pdata *pdata = shdev->pdata; 274 struct sh_dmae_pdata *pdata = shdev->pdata;
276 int i; 275 int i;
277 276
278 if ((unsigned)slave_id >= SHDMA_SLAVE_NUMBER) 277 if (param->slave_id >= SH_DMA_SLAVE_NUMBER)
279 return NULL; 278 return NULL;
280 279
281 for (i = 0; i < pdata->slave_num; i++) 280 for (i = 0; i < pdata->slave_num; i++)
282 if (pdata->slave[i].slave_id == slave_id) 281 if (pdata->slave[i].slave_id == param->slave_id)
283 return pdata->slave + i; 282 return pdata->slave + i;
284 283
285 return NULL; 284 return NULL;
@@ -299,9 +298,9 @@ static int sh_dmae_alloc_chan_resources(struct dma_chan *chan)
299 * never runs concurrently with itself or free_chan_resources. 298 * never runs concurrently with itself or free_chan_resources.
300 */ 299 */
301 if (param) { 300 if (param) {
302 struct sh_dmae_slave_config *cfg; 301 const struct sh_dmae_slave_config *cfg;
303 302
304 cfg = sh_dmae_find_slave(sh_chan, param->slave_id); 303 cfg = sh_dmae_find_slave(sh_chan, param);
305 if (!cfg) { 304 if (!cfg) {
306 ret = -EINVAL; 305 ret = -EINVAL;
307 goto efindslave; 306 goto efindslave;
@@ -574,12 +573,14 @@ static struct dma_async_tx_descriptor *sh_dmae_prep_slave_sg(
574{ 573{
575 struct sh_dmae_slave *param; 574 struct sh_dmae_slave *param;
576 struct sh_dmae_chan *sh_chan; 575 struct sh_dmae_chan *sh_chan;
576 dma_addr_t slave_addr;
577 577
578 if (!chan) 578 if (!chan)
579 return NULL; 579 return NULL;
580 580
581 sh_chan = to_sh_chan(chan); 581 sh_chan = to_sh_chan(chan);
582 param = chan->private; 582 param = chan->private;
583 slave_addr = param->config->addr;
583 584
584 /* Someone calling slave DMA on a public channel? */ 585 /* Someone calling slave DMA on a public channel? */
585 if (!param || !sg_len) { 586 if (!param || !sg_len) {
@@ -592,7 +593,7 @@ static struct dma_async_tx_descriptor *sh_dmae_prep_slave_sg(
592 * if (param != NULL), this is a successfully requested slave channel, 593 * if (param != NULL), this is a successfully requested slave channel,
593 * therefore param->config != NULL too. 594 * therefore param->config != NULL too.
594 */ 595 */
595 return sh_dmae_prep_sg(sh_chan, sgl, sg_len, &param->config->addr, 596 return sh_dmae_prep_sg(sh_chan, sgl, sg_len, &slave_addr,
596 direction, flags); 597 direction, flags);
597} 598}
598 599
@@ -873,7 +874,7 @@ static int __devinit sh_dmae_chan_probe(struct sh_dmae_device *shdev, int id,
873 int irq, unsigned long flags) 874 int irq, unsigned long flags)
874{ 875{
875 int err; 876 int err;
876 struct sh_dmae_channel *chan_pdata = &shdev->pdata->channel[id]; 877 const struct sh_dmae_channel *chan_pdata = &shdev->pdata->channel[id];
877 struct platform_device *pdev = to_platform_device(shdev->common.dev); 878 struct platform_device *pdev = to_platform_device(shdev->common.dev);
878 struct sh_dmae_chan *new_sh_chan; 879 struct sh_dmae_chan *new_sh_chan;
879 880
diff --git a/drivers/dma/shdma.h b/drivers/dma/shdma.h
index 153609a1e96..4021275a0a4 100644
--- a/drivers/dma/shdma.h
+++ b/drivers/dma/shdma.h
@@ -17,8 +17,8 @@
17#include <linux/interrupt.h> 17#include <linux/interrupt.h>
18#include <linux/list.h> 18#include <linux/list.h>
19 19
20#include <asm/dmaengine.h> 20#define SH_DMAC_MAX_CHANNELS 6
21 21#define SH_DMA_SLAVE_NUMBER 256
22#define SH_DMA_TCR_MAX 0x00FFFFFF /* 16MB */ 22#define SH_DMA_TCR_MAX 0x00FFFFFF /* 16MB */
23 23
24struct device; 24struct device;
diff --git a/drivers/serial/sh-sci.c b/drivers/serial/sh-sci.c
index 8eb094c1f61..8d993c4ccea 100644
--- a/drivers/serial/sh-sci.c
+++ b/drivers/serial/sh-sci.c
@@ -83,16 +83,16 @@ struct sci_port {
83 83
84 /* Interface clock */ 84 /* Interface clock */
85 struct clk *iclk; 85 struct clk *iclk;
86 /* Data clock */ 86 /* Function clock */
87 struct clk *dclk; 87 struct clk *fclk;
88 88
89 struct list_head node; 89 struct list_head node;
90 struct dma_chan *chan_tx; 90 struct dma_chan *chan_tx;
91 struct dma_chan *chan_rx; 91 struct dma_chan *chan_rx;
92#ifdef CONFIG_SERIAL_SH_SCI_DMA 92#ifdef CONFIG_SERIAL_SH_SCI_DMA
93 struct device *dma_dev; 93 struct device *dma_dev;
94 enum sh_dmae_slave_chan_id slave_tx; 94 unsigned int slave_tx;
95 enum sh_dmae_slave_chan_id slave_rx; 95 unsigned int slave_rx;
96 struct dma_async_tx_descriptor *desc_tx; 96 struct dma_async_tx_descriptor *desc_tx;
97 struct dma_async_tx_descriptor *desc_rx[2]; 97 struct dma_async_tx_descriptor *desc_rx[2];
98 dma_cookie_t cookie_tx; 98 dma_cookie_t cookie_tx;
@@ -107,6 +107,7 @@ struct sci_port {
107 struct work_struct work_tx; 107 struct work_struct work_tx;
108 struct work_struct work_rx; 108 struct work_struct work_rx;
109 struct timer_list rx_timer; 109 struct timer_list rx_timer;
110 unsigned int rx_timeout;
110#endif 111#endif
111}; 112};
112 113
@@ -674,22 +675,22 @@ static irqreturn_t sci_rx_interrupt(int irq, void *ptr)
674 struct sci_port *s = to_sci_port(port); 675 struct sci_port *s = to_sci_port(port);
675 676
676 if (s->chan_rx) { 677 if (s->chan_rx) {
677 unsigned long tout;
678 u16 scr = sci_in(port, SCSCR); 678 u16 scr = sci_in(port, SCSCR);
679 u16 ssr = sci_in(port, SCxSR); 679 u16 ssr = sci_in(port, SCxSR);
680 680
681 /* Disable future Rx interrupts */ 681 /* Disable future Rx interrupts */
682 sci_out(port, SCSCR, scr & ~SCI_CTRL_FLAGS_RIE); 682 if (port->type == PORT_SCIFA) {
683 disable_irq_nosync(irq);
684 scr |= 0x4000;
685 } else {
686 scr &= ~SCI_CTRL_FLAGS_RIE;
687 }
688 sci_out(port, SCSCR, scr);
683 /* Clear current interrupt */ 689 /* Clear current interrupt */
684 sci_out(port, SCxSR, ssr & ~(1 | SCxSR_RDxF(port))); 690 sci_out(port, SCxSR, ssr & ~(1 | SCxSR_RDxF(port)));
685 /* Calculate delay for 1.5 DMA buffers */ 691 dev_dbg(port->dev, "Rx IRQ %lu: setup t-out in %u jiffies\n",
686 tout = (port->timeout - HZ / 50) * s->buf_len_rx * 3 / 692 jiffies, s->rx_timeout);
687 port->fifosize / 2; 693 mod_timer(&s->rx_timer, jiffies + s->rx_timeout);
688 dev_dbg(port->dev, "Rx IRQ: setup timeout in %lu ms\n",
689 tout * 1000 / HZ);
690 if (tout < 2)
691 tout = 2;
692 mod_timer(&s->rx_timer, jiffies + tout);
693 694
694 return IRQ_HANDLED; 695 return IRQ_HANDLED;
695 } 696 }
@@ -799,7 +800,7 @@ static int sci_notifier(struct notifier_block *self,
799 (phase == CPUFREQ_RESUMECHANGE)) { 800 (phase == CPUFREQ_RESUMECHANGE)) {
800 spin_lock_irqsave(&priv->lock, flags); 801 spin_lock_irqsave(&priv->lock, flags);
801 list_for_each_entry(sci_port, &priv->ports, node) 802 list_for_each_entry(sci_port, &priv->ports, node)
802 sci_port->port.uartclk = clk_get_rate(sci_port->dclk); 803 sci_port->port.uartclk = clk_get_rate(sci_port->iclk);
803 spin_unlock_irqrestore(&priv->lock, flags); 804 spin_unlock_irqrestore(&priv->lock, flags);
804 } 805 }
805 806
@@ -810,21 +811,17 @@ static void sci_clk_enable(struct uart_port *port)
810{ 811{
811 struct sci_port *sci_port = to_sci_port(port); 812 struct sci_port *sci_port = to_sci_port(port);
812 813
813 clk_enable(sci_port->dclk); 814 clk_enable(sci_port->iclk);
814 sci_port->port.uartclk = clk_get_rate(sci_port->dclk); 815 sci_port->port.uartclk = clk_get_rate(sci_port->iclk);
815 816 clk_enable(sci_port->fclk);
816 if (sci_port->iclk)
817 clk_enable(sci_port->iclk);
818} 817}
819 818
820static void sci_clk_disable(struct uart_port *port) 819static void sci_clk_disable(struct uart_port *port)
821{ 820{
822 struct sci_port *sci_port = to_sci_port(port); 821 struct sci_port *sci_port = to_sci_port(port);
823 822
824 if (sci_port->iclk) 823 clk_disable(sci_port->fclk);
825 clk_disable(sci_port->iclk); 824 clk_disable(sci_port->iclk);
826
827 clk_disable(sci_port->dclk);
828} 825}
829 826
830static int sci_request_irq(struct sci_port *port) 827static int sci_request_irq(struct sci_port *port)
@@ -913,22 +910,26 @@ static void sci_dma_tx_complete(void *arg)
913 910
914 spin_lock_irqsave(&port->lock, flags); 911 spin_lock_irqsave(&port->lock, flags);
915 912
916 xmit->tail += s->sg_tx.length; 913 xmit->tail += sg_dma_len(&s->sg_tx);
917 xmit->tail &= UART_XMIT_SIZE - 1; 914 xmit->tail &= UART_XMIT_SIZE - 1;
918 915
919 port->icount.tx += s->sg_tx.length; 916 port->icount.tx += sg_dma_len(&s->sg_tx);
920 917
921 async_tx_ack(s->desc_tx); 918 async_tx_ack(s->desc_tx);
922 s->cookie_tx = -EINVAL; 919 s->cookie_tx = -EINVAL;
923 s->desc_tx = NULL; 920 s->desc_tx = NULL;
924 921
925 spin_unlock_irqrestore(&port->lock, flags);
926
927 if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) 922 if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
928 uart_write_wakeup(port); 923 uart_write_wakeup(port);
929 924
930 if (uart_circ_chars_pending(xmit)) 925 if (!uart_circ_empty(xmit)) {
931 schedule_work(&s->work_tx); 926 schedule_work(&s->work_tx);
927 } else if (port->type == PORT_SCIFA) {
928 u16 ctrl = sci_in(port, SCSCR);
929 sci_out(port, SCSCR, ctrl & ~SCI_CTRL_FLAGS_TIE);
930 }
931
932 spin_unlock_irqrestore(&port->lock, flags);
932} 933}
933 934
934/* Locking: called with port lock held */ 935/* Locking: called with port lock held */
@@ -972,13 +973,13 @@ static void sci_dma_rx_complete(void *arg)
972 unsigned long flags; 973 unsigned long flags;
973 int count; 974 int count;
974 975
975 dev_dbg(port->dev, "%s(%d)\n", __func__, port->line); 976 dev_dbg(port->dev, "%s(%d) active #%d\n", __func__, port->line, s->active_rx);
976 977
977 spin_lock_irqsave(&port->lock, flags); 978 spin_lock_irqsave(&port->lock, flags);
978 979
979 count = sci_dma_rx_push(s, tty, s->buf_len_rx); 980 count = sci_dma_rx_push(s, tty, s->buf_len_rx);
980 981
981 mod_timer(&s->rx_timer, jiffies + msecs_to_jiffies(5)); 982 mod_timer(&s->rx_timer, jiffies + s->rx_timeout);
982 983
983 spin_unlock_irqrestore(&port->lock, flags); 984 spin_unlock_irqrestore(&port->lock, flags);
984 985
@@ -1050,6 +1051,8 @@ static void sci_submit_rx(struct sci_port *s)
1050 sci_rx_dma_release(s, true); 1051 sci_rx_dma_release(s, true);
1051 return; 1052 return;
1052 } 1053 }
1054 dev_dbg(s->port.dev, "%s(): cookie %d to #%d\n", __func__,
1055 s->cookie_rx[i], i);
1053 } 1056 }
1054 1057
1055 s->active_rx = s->cookie_rx[0]; 1058 s->active_rx = s->cookie_rx[0];
@@ -1107,10 +1110,10 @@ static void work_fn_rx(struct work_struct *work)
1107 return; 1110 return;
1108 } 1111 }
1109 1112
1110 dev_dbg(port->dev, "%s: cookie %d #%d\n", __func__,
1111 s->cookie_rx[new], new);
1112
1113 s->active_rx = s->cookie_rx[!new]; 1113 s->active_rx = s->cookie_rx[!new];
1114
1115 dev_dbg(port->dev, "%s: cookie %d #%d, new active #%d\n", __func__,
1116 s->cookie_rx[new], new, s->active_rx);
1114} 1117}
1115 1118
1116static void work_fn_tx(struct work_struct *work) 1119static void work_fn_tx(struct work_struct *work)
@@ -1131,14 +1134,13 @@ static void work_fn_tx(struct work_struct *work)
1131 */ 1134 */
1132 spin_lock_irq(&port->lock); 1135 spin_lock_irq(&port->lock);
1133 sg->offset = xmit->tail & (UART_XMIT_SIZE - 1); 1136 sg->offset = xmit->tail & (UART_XMIT_SIZE - 1);
1134 sg->dma_address = (sg_dma_address(sg) & ~(UART_XMIT_SIZE - 1)) + 1137 sg_dma_address(sg) = (sg_dma_address(sg) & ~(UART_XMIT_SIZE - 1)) +
1135 sg->offset; 1138 sg->offset;
1136 sg->length = min((int)CIRC_CNT(xmit->head, xmit->tail, UART_XMIT_SIZE), 1139 sg_dma_len(sg) = min((int)CIRC_CNT(xmit->head, xmit->tail, UART_XMIT_SIZE),
1137 CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE)); 1140 CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE));
1138 sg->dma_length = sg->length;
1139 spin_unlock_irq(&port->lock); 1141 spin_unlock_irq(&port->lock);
1140 1142
1141 BUG_ON(!sg->length); 1143 BUG_ON(!sg_dma_len(sg));
1142 1144
1143 desc = chan->device->device_prep_slave_sg(chan, 1145 desc = chan->device->device_prep_slave_sg(chan,
1144 sg, s->sg_len_tx, DMA_TO_DEVICE, 1146 sg, s->sg_len_tx, DMA_TO_DEVICE,
@@ -1173,23 +1175,28 @@ static void work_fn_tx(struct work_struct *work)
1173 1175
1174static void sci_start_tx(struct uart_port *port) 1176static void sci_start_tx(struct uart_port *port)
1175{ 1177{
1178 struct sci_port *s = to_sci_port(port);
1176 unsigned short ctrl; 1179 unsigned short ctrl;
1177 1180
1178#ifdef CONFIG_SERIAL_SH_SCI_DMA 1181#ifdef CONFIG_SERIAL_SH_SCI_DMA
1179 struct sci_port *s = to_sci_port(port); 1182 if (port->type == PORT_SCIFA) {
1180 1183 u16 new, scr = sci_in(port, SCSCR);
1181 if (s->chan_tx) { 1184 if (s->chan_tx)
1182 if (!uart_circ_empty(&s->port.state->xmit) && s->cookie_tx < 0) 1185 new = scr | 0x8000;
1183 schedule_work(&s->work_tx); 1186 else
1184 1187 new = scr & ~0x8000;
1185 return; 1188 if (new != scr)
1189 sci_out(port, SCSCR, new);
1186 } 1190 }
1191 if (s->chan_tx && !uart_circ_empty(&s->port.state->xmit) &&
1192 s->cookie_tx < 0)
1193 schedule_work(&s->work_tx);
1187#endif 1194#endif
1188 1195 if (!s->chan_tx || port->type == PORT_SCIFA) {
1189 /* Set TIE (Transmit Interrupt Enable) bit in SCSCR */ 1196 /* Set TIE (Transmit Interrupt Enable) bit in SCSCR */
1190 ctrl = sci_in(port, SCSCR); 1197 ctrl = sci_in(port, SCSCR);
1191 ctrl |= SCI_CTRL_FLAGS_TIE; 1198 sci_out(port, SCSCR, ctrl | SCI_CTRL_FLAGS_TIE);
1192 sci_out(port, SCSCR, ctrl); 1199 }
1193} 1200}
1194 1201
1195static void sci_stop_tx(struct uart_port *port) 1202static void sci_stop_tx(struct uart_port *port)
@@ -1198,6 +1205,8 @@ static void sci_stop_tx(struct uart_port *port)
1198 1205
1199 /* Clear TIE (Transmit Interrupt Enable) bit in SCSCR */ 1206 /* Clear TIE (Transmit Interrupt Enable) bit in SCSCR */
1200 ctrl = sci_in(port, SCSCR); 1207 ctrl = sci_in(port, SCSCR);
1208 if (port->type == PORT_SCIFA)
1209 ctrl &= ~0x8000;
1201 ctrl &= ~SCI_CTRL_FLAGS_TIE; 1210 ctrl &= ~SCI_CTRL_FLAGS_TIE;
1202 sci_out(port, SCSCR, ctrl); 1211 sci_out(port, SCSCR, ctrl);
1203} 1212}
@@ -1208,6 +1217,8 @@ static void sci_start_rx(struct uart_port *port)
1208 1217
1209 /* Set RIE (Receive Interrupt Enable) bit in SCSCR */ 1218 /* Set RIE (Receive Interrupt Enable) bit in SCSCR */
1210 ctrl |= sci_in(port, SCSCR); 1219 ctrl |= sci_in(port, SCSCR);
1220 if (port->type == PORT_SCIFA)
1221 ctrl &= ~0x4000;
1211 sci_out(port, SCSCR, ctrl); 1222 sci_out(port, SCSCR, ctrl);
1212} 1223}
1213 1224
@@ -1217,6 +1228,8 @@ static void sci_stop_rx(struct uart_port *port)
1217 1228
1218 /* Clear RIE (Receive Interrupt Enable) bit in SCSCR */ 1229 /* Clear RIE (Receive Interrupt Enable) bit in SCSCR */
1219 ctrl = sci_in(port, SCSCR); 1230 ctrl = sci_in(port, SCSCR);
1231 if (port->type == PORT_SCIFA)
1232 ctrl &= ~0x4000;
1220 ctrl &= ~(SCI_CTRL_FLAGS_RIE | SCI_CTRL_FLAGS_REIE); 1233 ctrl &= ~(SCI_CTRL_FLAGS_RIE | SCI_CTRL_FLAGS_REIE);
1221 sci_out(port, SCSCR, ctrl); 1234 sci_out(port, SCSCR, ctrl);
1222} 1235}
@@ -1251,8 +1264,12 @@ static void rx_timer_fn(unsigned long arg)
1251{ 1264{
1252 struct sci_port *s = (struct sci_port *)arg; 1265 struct sci_port *s = (struct sci_port *)arg;
1253 struct uart_port *port = &s->port; 1266 struct uart_port *port = &s->port;
1254
1255 u16 scr = sci_in(port, SCSCR); 1267 u16 scr = sci_in(port, SCSCR);
1268
1269 if (port->type == PORT_SCIFA) {
1270 scr &= ~0x4000;
1271 enable_irq(s->irqs[1]);
1272 }
1256 sci_out(port, SCSCR, scr | SCI_CTRL_FLAGS_RIE); 1273 sci_out(port, SCSCR, scr | SCI_CTRL_FLAGS_RIE);
1257 dev_dbg(port->dev, "DMA Rx timed out\n"); 1274 dev_dbg(port->dev, "DMA Rx timed out\n");
1258 schedule_work(&s->work_rx); 1275 schedule_work(&s->work_rx);
@@ -1339,8 +1356,7 @@ static void sci_request_dma(struct uart_port *port)
1339 sg_init_table(sg, 1); 1356 sg_init_table(sg, 1);
1340 sg_set_page(sg, virt_to_page(buf[i]), s->buf_len_rx, 1357 sg_set_page(sg, virt_to_page(buf[i]), s->buf_len_rx,
1341 (int)buf[i] & ~PAGE_MASK); 1358 (int)buf[i] & ~PAGE_MASK);
1342 sg->dma_address = dma[i]; 1359 sg_dma_address(sg) = dma[i];
1343 sg->dma_length = sg->length;
1344 } 1360 }
1345 1361
1346 INIT_WORK(&s->work_rx, work_fn_rx); 1362 INIT_WORK(&s->work_rx, work_fn_rx);
@@ -1403,8 +1419,12 @@ static void sci_shutdown(struct uart_port *port)
1403static void sci_set_termios(struct uart_port *port, struct ktermios *termios, 1419static void sci_set_termios(struct uart_port *port, struct ktermios *termios,
1404 struct ktermios *old) 1420 struct ktermios *old)
1405{ 1421{
1422#ifdef CONFIG_SERIAL_SH_SCI_DMA
1423 struct sci_port *s = to_sci_port(port);
1424#endif
1406 unsigned int status, baud, smr_val, max_baud; 1425 unsigned int status, baud, smr_val, max_baud;
1407 int t = -1; 1426 int t = -1;
1427 u16 scfcr = 0;
1408 1428
1409 /* 1429 /*
1410 * earlyprintk comes here early on with port->uartclk set to zero. 1430 * earlyprintk comes here early on with port->uartclk set to zero.
@@ -1427,7 +1447,7 @@ static void sci_set_termios(struct uart_port *port, struct ktermios *termios,
1427 sci_out(port, SCSCR, 0x00); /* TE=0, RE=0, CKE1=0 */ 1447 sci_out(port, SCSCR, 0x00); /* TE=0, RE=0, CKE1=0 */
1428 1448
1429 if (port->type != PORT_SCI) 1449 if (port->type != PORT_SCI)
1430 sci_out(port, SCFCR, SCFCR_RFRST | SCFCR_TFRST); 1450 sci_out(port, SCFCR, scfcr | SCFCR_RFRST | SCFCR_TFRST);
1431 1451
1432 smr_val = sci_in(port, SCSMR) & 3; 1452 smr_val = sci_in(port, SCSMR) & 3;
1433 if ((termios->c_cflag & CSIZE) == CS7) 1453 if ((termios->c_cflag & CSIZE) == CS7)
@@ -1458,10 +1478,32 @@ static void sci_set_termios(struct uart_port *port, struct ktermios *termios,
1458 } 1478 }
1459 1479
1460 sci_init_pins(port, termios->c_cflag); 1480 sci_init_pins(port, termios->c_cflag);
1461 sci_out(port, SCFCR, (termios->c_cflag & CRTSCTS) ? SCFCR_MCE : 0); 1481 sci_out(port, SCFCR, scfcr | ((termios->c_cflag & CRTSCTS) ? SCFCR_MCE : 0));
1462 1482
1463 sci_out(port, SCSCR, SCSCR_INIT(port)); 1483 sci_out(port, SCSCR, SCSCR_INIT(port));
1464 1484
1485#ifdef CONFIG_SERIAL_SH_SCI_DMA
1486 /*
1487 * Calculate delay for 1.5 DMA buffers: see
1488 * drivers/serial/serial_core.c::uart_update_timeout(). With 10 bits
1489 * (CS8), 250Hz, 115200 baud and 64 bytes FIFO, the above function
1490 * calculates 1 jiffie for the data plus 5 jiffies for the "slop(e)."
1491 * Then below we calculate 3 jiffies (12ms) for 1.5 DMA buffers (3 FIFO
1492 * sizes), but it has been found out experimentally, that this is not
1493 * enough: the driver too often needlessly runs on a DMA timeout. 20ms
1494 * as a minimum seem to work perfectly.
1495 */
1496 if (s->chan_rx) {
1497 s->rx_timeout = (port->timeout - HZ / 50) * s->buf_len_rx * 3 /
1498 port->fifosize / 2;
1499 dev_dbg(port->dev,
1500 "DMA Rx t-out %ums, tty t-out %u jiffies\n",
1501 s->rx_timeout * 1000 / HZ, port->timeout);
1502 if (s->rx_timeout < msecs_to_jiffies(20))
1503 s->rx_timeout = msecs_to_jiffies(20);
1504 }
1505#endif
1506
1465 if ((termios->c_cflag & CREAD) != 0) 1507 if ((termios->c_cflag & CREAD) != 0)
1466 sci_start_rx(port); 1508 sci_start_rx(port);
1467} 1509}
@@ -1553,10 +1595,10 @@ static struct uart_ops sci_uart_ops = {
1553#endif 1595#endif
1554}; 1596};
1555 1597
1556static void __devinit sci_init_single(struct platform_device *dev, 1598static int __devinit sci_init_single(struct platform_device *dev,
1557 struct sci_port *sci_port, 1599 struct sci_port *sci_port,
1558 unsigned int index, 1600 unsigned int index,
1559 struct plat_sci_port *p) 1601 struct plat_sci_port *p)
1560{ 1602{
1561 struct uart_port *port = &sci_port->port; 1603 struct uart_port *port = &sci_port->port;
1562 1604
@@ -1577,8 +1619,23 @@ static void __devinit sci_init_single(struct platform_device *dev,
1577 } 1619 }
1578 1620
1579 if (dev) { 1621 if (dev) {
1580 sci_port->iclk = p->clk ? clk_get(&dev->dev, p->clk) : NULL; 1622 sci_port->iclk = clk_get(&dev->dev, "sci_ick");
1581 sci_port->dclk = clk_get(&dev->dev, "peripheral_clk"); 1623 if (IS_ERR(sci_port->iclk)) {
1624 sci_port->iclk = clk_get(&dev->dev, "peripheral_clk");
1625 if (IS_ERR(sci_port->iclk)) {
1626 dev_err(&dev->dev, "can't get iclk\n");
1627 return PTR_ERR(sci_port->iclk);
1628 }
1629 }
1630
1631 /*
1632 * The function clock is optional, ignore it if we can't
1633 * find it.
1634 */
1635 sci_port->fclk = clk_get(&dev->dev, "sci_fck");
1636 if (IS_ERR(sci_port->fclk))
1637 sci_port->fclk = NULL;
1638
1582 sci_port->enable = sci_clk_enable; 1639 sci_port->enable = sci_clk_enable;
1583 sci_port->disable = sci_clk_disable; 1640 sci_port->disable = sci_clk_disable;
1584 port->dev = &dev->dev; 1641 port->dev = &dev->dev;
@@ -1605,6 +1662,7 @@ static void __devinit sci_init_single(struct platform_device *dev,
1605#endif 1662#endif
1606 1663
1607 memcpy(&sci_port->irqs, &p->irqs, sizeof(p->irqs)); 1664 memcpy(&sci_port->irqs, &p->irqs, sizeof(p->irqs));
1665 return 0;
1608} 1666}
1609 1667
1610#ifdef CONFIG_SERIAL_SH_SCI_CONSOLE 1668#ifdef CONFIG_SERIAL_SH_SCI_CONSOLE
@@ -1754,8 +1812,11 @@ static int sci_remove(struct platform_device *dev)
1754 cpufreq_unregister_notifier(&priv->clk_nb, CPUFREQ_TRANSITION_NOTIFIER); 1812 cpufreq_unregister_notifier(&priv->clk_nb, CPUFREQ_TRANSITION_NOTIFIER);
1755 1813
1756 spin_lock_irqsave(&priv->lock, flags); 1814 spin_lock_irqsave(&priv->lock, flags);
1757 list_for_each_entry(p, &priv->ports, node) 1815 list_for_each_entry(p, &priv->ports, node) {
1758 uart_remove_one_port(&sci_uart_driver, &p->port); 1816 uart_remove_one_port(&sci_uart_driver, &p->port);
1817 clk_put(p->iclk);
1818 clk_put(p->fclk);
1819 }
1759 spin_unlock_irqrestore(&priv->lock, flags); 1820 spin_unlock_irqrestore(&priv->lock, flags);
1760 1821
1761 kfree(priv); 1822 kfree(priv);
@@ -1781,7 +1842,9 @@ static int __devinit sci_probe_single(struct platform_device *dev,
1781 return 0; 1842 return 0;
1782 } 1843 }
1783 1844
1784 sci_init_single(dev, sciport, index, p); 1845 ret = sci_init_single(dev, sciport, index, p);
1846 if (ret)
1847 return ret;
1785 1848
1786 ret = uart_add_one_port(&sci_uart_driver, &sciport->port); 1849 ret = uart_add_one_port(&sci_uart_driver, &sciport->port);
1787 if (ret) 1850 if (ret)
diff --git a/drivers/sh/Kconfig b/drivers/sh/Kconfig
new file mode 100644
index 00000000000..a54de0b9b3d
--- /dev/null
+++ b/drivers/sh/Kconfig
@@ -0,0 +1,24 @@
1config INTC_USERIMASK
2 bool "Userspace interrupt masking support"
3 depends on ARCH_SHMOBILE || (SUPERH && CPU_SH4A)
4 help
5 This enables support for hardware-assisted userspace hardirq
6 masking.
7
8 SH-4A and newer interrupt blocks all support a special shadowed
9 page with all non-masking registers obscured when mapped in to
10 userspace. This is primarily for use by userspace device
11 drivers that are using special priority levels.
12
13 If in doubt, say N.
14
15config INTC_BALANCING
16 bool "Hardware IRQ balancing support"
17 depends on SMP && SUPERH && CPU_SUBTYPE_SH7786
18 help
19 This enables support for IRQ auto-distribution mode on SH-X3
20 SMP parts. All of the balancing and CPU wakeup decisions are
21 taken care of automatically by hardware for distributed
22 vectors.
23
24 If in doubt, say N.
diff --git a/drivers/sh/Makefile b/drivers/sh/Makefile
index 4956bf1f213..78bb5127abd 100644
--- a/drivers/sh/Makefile
+++ b/drivers/sh/Makefile
@@ -4,4 +4,6 @@
4obj-$(CONFIG_SUPERHYWAY) += superhyway/ 4obj-$(CONFIG_SUPERHYWAY) += superhyway/
5obj-$(CONFIG_MAPLE) += maple/ 5obj-$(CONFIG_MAPLE) += maple/
6obj-$(CONFIG_GENERIC_GPIO) += pfc.o 6obj-$(CONFIG_GENERIC_GPIO) += pfc.o
7obj-$(CONFIG_SUPERH) += clk.o
8obj-$(CONFIG_SH_CLK_CPG) += clk-cpg.o
7obj-y += intc.o 9obj-y += intc.o
diff --git a/drivers/sh/clk-cpg.c b/drivers/sh/clk-cpg.c
new file mode 100644
index 00000000000..f5c80ba9ab1
--- /dev/null
+++ b/drivers/sh/clk-cpg.c
@@ -0,0 +1,298 @@
1#include <linux/clk.h>
2#include <linux/compiler.h>
3#include <linux/slab.h>
4#include <linux/io.h>
5#include <linux/sh_clk.h>
6
7static int sh_clk_mstp32_enable(struct clk *clk)
8{
9 __raw_writel(__raw_readl(clk->enable_reg) & ~(1 << clk->enable_bit),
10 clk->enable_reg);
11 return 0;
12}
13
14static void sh_clk_mstp32_disable(struct clk *clk)
15{
16 __raw_writel(__raw_readl(clk->enable_reg) | (1 << clk->enable_bit),
17 clk->enable_reg);
18}
19
20static struct clk_ops sh_clk_mstp32_clk_ops = {
21 .enable = sh_clk_mstp32_enable,
22 .disable = sh_clk_mstp32_disable,
23 .recalc = followparent_recalc,
24};
25
26int __init sh_clk_mstp32_register(struct clk *clks, int nr)
27{
28 struct clk *clkp;
29 int ret = 0;
30 int k;
31
32 for (k = 0; !ret && (k < nr); k++) {
33 clkp = clks + k;
34 clkp->ops = &sh_clk_mstp32_clk_ops;
35 ret |= clk_register(clkp);
36 }
37
38 return ret;
39}
40
41static long sh_clk_div_round_rate(struct clk *clk, unsigned long rate)
42{
43 return clk_rate_table_round(clk, clk->freq_table, rate);
44}
45
46static int sh_clk_div6_divisors[64] = {
47 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
48 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32,
49 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48,
50 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64
51};
52
53static struct clk_div_mult_table sh_clk_div6_table = {
54 .divisors = sh_clk_div6_divisors,
55 .nr_divisors = ARRAY_SIZE(sh_clk_div6_divisors),
56};
57
58static unsigned long sh_clk_div6_recalc(struct clk *clk)
59{
60 struct clk_div_mult_table *table = &sh_clk_div6_table;
61 unsigned int idx;
62
63 clk_rate_table_build(clk, clk->freq_table, table->nr_divisors,
64 table, NULL);
65
66 idx = __raw_readl(clk->enable_reg) & 0x003f;
67
68 return clk->freq_table[idx].frequency;
69}
70
71static int sh_clk_div6_set_rate(struct clk *clk,
72 unsigned long rate, int algo_id)
73{
74 unsigned long value;
75 int idx;
76
77 idx = clk_rate_table_find(clk, clk->freq_table, rate);
78 if (idx < 0)
79 return idx;
80
81 value = __raw_readl(clk->enable_reg);
82 value &= ~0x3f;
83 value |= idx;
84 __raw_writel(value, clk->enable_reg);
85 return 0;
86}
87
88static int sh_clk_div6_enable(struct clk *clk)
89{
90 unsigned long value;
91 int ret;
92
93 ret = sh_clk_div6_set_rate(clk, clk->rate, 0);
94 if (ret == 0) {
95 value = __raw_readl(clk->enable_reg);
96 value &= ~0x100; /* clear stop bit to enable clock */
97 __raw_writel(value, clk->enable_reg);
98 }
99 return ret;
100}
101
102static void sh_clk_div6_disable(struct clk *clk)
103{
104 unsigned long value;
105
106 value = __raw_readl(clk->enable_reg);
107 value |= 0x100; /* stop clock */
108 value |= 0x3f; /* VDIV bits must be non-zero, overwrite divider */
109 __raw_writel(value, clk->enable_reg);
110}
111
112static struct clk_ops sh_clk_div6_clk_ops = {
113 .recalc = sh_clk_div6_recalc,
114 .round_rate = sh_clk_div_round_rate,
115 .set_rate = sh_clk_div6_set_rate,
116 .enable = sh_clk_div6_enable,
117 .disable = sh_clk_div6_disable,
118};
119
120int __init sh_clk_div6_register(struct clk *clks, int nr)
121{
122 struct clk *clkp;
123 void *freq_table;
124 int nr_divs = sh_clk_div6_table.nr_divisors;
125 int freq_table_size = sizeof(struct cpufreq_frequency_table);
126 int ret = 0;
127 int k;
128
129 freq_table_size *= (nr_divs + 1);
130 freq_table = kzalloc(freq_table_size * nr, GFP_KERNEL);
131 if (!freq_table) {
132 pr_err("sh_clk_div6_register: unable to alloc memory\n");
133 return -ENOMEM;
134 }
135
136 for (k = 0; !ret && (k < nr); k++) {
137 clkp = clks + k;
138
139 clkp->ops = &sh_clk_div6_clk_ops;
140 clkp->id = -1;
141 clkp->freq_table = freq_table + (k * freq_table_size);
142 clkp->freq_table[nr_divs].frequency = CPUFREQ_TABLE_END;
143
144 ret = clk_register(clkp);
145 }
146
147 return ret;
148}
149
150static unsigned long sh_clk_div4_recalc(struct clk *clk)
151{
152 struct clk_div4_table *d4t = clk->priv;
153 struct clk_div_mult_table *table = d4t->div_mult_table;
154 unsigned int idx;
155
156 clk_rate_table_build(clk, clk->freq_table, table->nr_divisors,
157 table, &clk->arch_flags);
158
159 idx = (__raw_readl(clk->enable_reg) >> clk->enable_bit) & 0x000f;
160
161 return clk->freq_table[idx].frequency;
162}
163
164static int sh_clk_div4_set_parent(struct clk *clk, struct clk *parent)
165{
166 struct clk_div4_table *d4t = clk->priv;
167 struct clk_div_mult_table *table = d4t->div_mult_table;
168 u32 value;
169 int ret;
170
171 /* we really need a better way to determine parent index, but for
172 * now assume internal parent comes with CLK_ENABLE_ON_INIT set,
173 * no CLK_ENABLE_ON_INIT means external clock...
174 */
175
176 if (parent->flags & CLK_ENABLE_ON_INIT)
177 value = __raw_readl(clk->enable_reg) & ~(1 << 7);
178 else
179 value = __raw_readl(clk->enable_reg) | (1 << 7);
180
181 ret = clk_reparent(clk, parent);
182 if (ret < 0)
183 return ret;
184
185 __raw_writel(value, clk->enable_reg);
186
187 /* Rebiuld the frequency table */
188 clk_rate_table_build(clk, clk->freq_table, table->nr_divisors,
189 table, &clk->arch_flags);
190
191 return 0;
192}
193
194static int sh_clk_div4_set_rate(struct clk *clk, unsigned long rate, int algo_id)
195{
196 struct clk_div4_table *d4t = clk->priv;
197 unsigned long value;
198 int idx = clk_rate_table_find(clk, clk->freq_table, rate);
199 if (idx < 0)
200 return idx;
201
202 value = __raw_readl(clk->enable_reg);
203 value &= ~(0xf << clk->enable_bit);
204 value |= (idx << clk->enable_bit);
205 __raw_writel(value, clk->enable_reg);
206
207 if (d4t->kick)
208 d4t->kick(clk);
209
210 return 0;
211}
212
213static int sh_clk_div4_enable(struct clk *clk)
214{
215 __raw_writel(__raw_readl(clk->enable_reg) & ~(1 << 8), clk->enable_reg);
216 return 0;
217}
218
219static void sh_clk_div4_disable(struct clk *clk)
220{
221 __raw_writel(__raw_readl(clk->enable_reg) | (1 << 8), clk->enable_reg);
222}
223
224static struct clk_ops sh_clk_div4_clk_ops = {
225 .recalc = sh_clk_div4_recalc,
226 .set_rate = sh_clk_div4_set_rate,
227 .round_rate = sh_clk_div_round_rate,
228};
229
230static struct clk_ops sh_clk_div4_enable_clk_ops = {
231 .recalc = sh_clk_div4_recalc,
232 .set_rate = sh_clk_div4_set_rate,
233 .round_rate = sh_clk_div_round_rate,
234 .enable = sh_clk_div4_enable,
235 .disable = sh_clk_div4_disable,
236};
237
238static struct clk_ops sh_clk_div4_reparent_clk_ops = {
239 .recalc = sh_clk_div4_recalc,
240 .set_rate = sh_clk_div4_set_rate,
241 .round_rate = sh_clk_div_round_rate,
242 .enable = sh_clk_div4_enable,
243 .disable = sh_clk_div4_disable,
244 .set_parent = sh_clk_div4_set_parent,
245};
246
247static int __init sh_clk_div4_register_ops(struct clk *clks, int nr,
248 struct clk_div4_table *table, struct clk_ops *ops)
249{
250 struct clk *clkp;
251 void *freq_table;
252 int nr_divs = table->div_mult_table->nr_divisors;
253 int freq_table_size = sizeof(struct cpufreq_frequency_table);
254 int ret = 0;
255 int k;
256
257 freq_table_size *= (nr_divs + 1);
258 freq_table = kzalloc(freq_table_size * nr, GFP_KERNEL);
259 if (!freq_table) {
260 pr_err("sh_clk_div4_register: unable to alloc memory\n");
261 return -ENOMEM;
262 }
263
264 for (k = 0; !ret && (k < nr); k++) {
265 clkp = clks + k;
266
267 clkp->ops = ops;
268 clkp->id = -1;
269 clkp->priv = table;
270
271 clkp->freq_table = freq_table + (k * freq_table_size);
272 clkp->freq_table[nr_divs].frequency = CPUFREQ_TABLE_END;
273
274 ret = clk_register(clkp);
275 }
276
277 return ret;
278}
279
280int __init sh_clk_div4_register(struct clk *clks, int nr,
281 struct clk_div4_table *table)
282{
283 return sh_clk_div4_register_ops(clks, nr, table, &sh_clk_div4_clk_ops);
284}
285
286int __init sh_clk_div4_enable_register(struct clk *clks, int nr,
287 struct clk_div4_table *table)
288{
289 return sh_clk_div4_register_ops(clks, nr, table,
290 &sh_clk_div4_enable_clk_ops);
291}
292
293int __init sh_clk_div4_reparent_register(struct clk *clks, int nr,
294 struct clk_div4_table *table)
295{
296 return sh_clk_div4_register_ops(clks, nr, table,
297 &sh_clk_div4_reparent_clk_ops);
298}
diff --git a/drivers/sh/clk.c b/drivers/sh/clk.c
new file mode 100644
index 00000000000..5d84adac9ec
--- /dev/null
+++ b/drivers/sh/clk.c
@@ -0,0 +1,545 @@
1/*
2 * drivers/sh/clk.c - SuperH clock framework
3 *
4 * Copyright (C) 2005 - 2009 Paul Mundt
5 *
6 * This clock framework is derived from the OMAP version by:
7 *
8 * Copyright (C) 2004 - 2008 Nokia Corporation
9 * Written by Tuukka Tikkanen <tuukka.tikkanen@elektrobit.com>
10 *
11 * Modified for omap shared clock framework by Tony Lindgren <tony@atomide.com>
12 *
13 * This file is subject to the terms and conditions of the GNU General Public
14 * License. See the file "COPYING" in the main directory of this archive
15 * for more details.
16 */
17#include <linux/kernel.h>
18#include <linux/init.h>
19#include <linux/module.h>
20#include <linux/mutex.h>
21#include <linux/list.h>
22#include <linux/kobject.h>
23#include <linux/sysdev.h>
24#include <linux/seq_file.h>
25#include <linux/err.h>
26#include <linux/platform_device.h>
27#include <linux/debugfs.h>
28#include <linux/cpufreq.h>
29#include <linux/clk.h>
30#include <linux/sh_clk.h>
31
32static LIST_HEAD(clock_list);
33static DEFINE_SPINLOCK(clock_lock);
34static DEFINE_MUTEX(clock_list_sem);
35
36void clk_rate_table_build(struct clk *clk,
37 struct cpufreq_frequency_table *freq_table,
38 int nr_freqs,
39 struct clk_div_mult_table *src_table,
40 unsigned long *bitmap)
41{
42 unsigned long mult, div;
43 unsigned long freq;
44 int i;
45
46 for (i = 0; i < nr_freqs; i++) {
47 div = 1;
48 mult = 1;
49
50 if (src_table->divisors && i < src_table->nr_divisors)
51 div = src_table->divisors[i];
52
53 if (src_table->multipliers && i < src_table->nr_multipliers)
54 mult = src_table->multipliers[i];
55
56 if (!div || !mult || (bitmap && !test_bit(i, bitmap)))
57 freq = CPUFREQ_ENTRY_INVALID;
58 else
59 freq = clk->parent->rate * mult / div;
60
61 freq_table[i].index = i;
62 freq_table[i].frequency = freq;
63 }
64
65 /* Termination entry */
66 freq_table[i].index = i;
67 freq_table[i].frequency = CPUFREQ_TABLE_END;
68}
69
70long clk_rate_table_round(struct clk *clk,
71 struct cpufreq_frequency_table *freq_table,
72 unsigned long rate)
73{
74 unsigned long rate_error, rate_error_prev = ~0UL;
75 unsigned long rate_best_fit = rate;
76 unsigned long highest, lowest;
77 int i;
78
79 highest = lowest = 0;
80
81 for (i = 0; freq_table[i].frequency != CPUFREQ_TABLE_END; i++) {
82 unsigned long freq = freq_table[i].frequency;
83
84 if (freq == CPUFREQ_ENTRY_INVALID)
85 continue;
86
87 if (freq > highest)
88 highest = freq;
89 if (freq < lowest)
90 lowest = freq;
91
92 rate_error = abs(freq - rate);
93 if (rate_error < rate_error_prev) {
94 rate_best_fit = freq;
95 rate_error_prev = rate_error;
96 }
97
98 if (rate_error == 0)
99 break;
100 }
101
102 if (rate >= highest)
103 rate_best_fit = highest;
104 if (rate <= lowest)
105 rate_best_fit = lowest;
106
107 return rate_best_fit;
108}
109
110int clk_rate_table_find(struct clk *clk,
111 struct cpufreq_frequency_table *freq_table,
112 unsigned long rate)
113{
114 int i;
115
116 for (i = 0; freq_table[i].frequency != CPUFREQ_TABLE_END; i++) {
117 unsigned long freq = freq_table[i].frequency;
118
119 if (freq == CPUFREQ_ENTRY_INVALID)
120 continue;
121
122 if (freq == rate)
123 return i;
124 }
125
126 return -ENOENT;
127}
128
129/* Used for clocks that always have same value as the parent clock */
130unsigned long followparent_recalc(struct clk *clk)
131{
132 return clk->parent ? clk->parent->rate : 0;
133}
134
135int clk_reparent(struct clk *child, struct clk *parent)
136{
137 list_del_init(&child->sibling);
138 if (parent)
139 list_add(&child->sibling, &parent->children);
140 child->parent = parent;
141
142 /* now do the debugfs renaming to reattach the child
143 to the proper parent */
144
145 return 0;
146}
147
148/* Propagate rate to children */
149void propagate_rate(struct clk *tclk)
150{
151 struct clk *clkp;
152
153 list_for_each_entry(clkp, &tclk->children, sibling) {
154 if (clkp->ops && clkp->ops->recalc)
155 clkp->rate = clkp->ops->recalc(clkp);
156
157 propagate_rate(clkp);
158 }
159}
160
161static void __clk_disable(struct clk *clk)
162{
163 if (WARN(!clk->usecount, "Trying to disable clock %s with 0 usecount\n",
164 clk->name))
165 return;
166
167 if (!(--clk->usecount)) {
168 if (likely(clk->ops && clk->ops->disable))
169 clk->ops->disable(clk);
170 if (likely(clk->parent))
171 __clk_disable(clk->parent);
172 }
173}
174
175void clk_disable(struct clk *clk)
176{
177 unsigned long flags;
178
179 if (!clk)
180 return;
181
182 spin_lock_irqsave(&clock_lock, flags);
183 __clk_disable(clk);
184 spin_unlock_irqrestore(&clock_lock, flags);
185}
186EXPORT_SYMBOL_GPL(clk_disable);
187
188static int __clk_enable(struct clk *clk)
189{
190 int ret = 0;
191
192 if (clk->usecount++ == 0) {
193 if (clk->parent) {
194 ret = __clk_enable(clk->parent);
195 if (unlikely(ret))
196 goto err;
197 }
198
199 if (clk->ops && clk->ops->enable) {
200 ret = clk->ops->enable(clk);
201 if (ret) {
202 if (clk->parent)
203 __clk_disable(clk->parent);
204 goto err;
205 }
206 }
207 }
208
209 return ret;
210err:
211 clk->usecount--;
212 return ret;
213}
214
215int clk_enable(struct clk *clk)
216{
217 unsigned long flags;
218 int ret;
219
220 if (!clk)
221 return -EINVAL;
222
223 spin_lock_irqsave(&clock_lock, flags);
224 ret = __clk_enable(clk);
225 spin_unlock_irqrestore(&clock_lock, flags);
226
227 return ret;
228}
229EXPORT_SYMBOL_GPL(clk_enable);
230
231static LIST_HEAD(root_clks);
232
233/**
234 * recalculate_root_clocks - recalculate and propagate all root clocks
235 *
236 * Recalculates all root clocks (clocks with no parent), which if the
237 * clock's .recalc is set correctly, should also propagate their rates.
238 * Called at init.
239 */
240void recalculate_root_clocks(void)
241{
242 struct clk *clkp;
243
244 list_for_each_entry(clkp, &root_clks, sibling) {
245 if (clkp->ops && clkp->ops->recalc)
246 clkp->rate = clkp->ops->recalc(clkp);
247 propagate_rate(clkp);
248 }
249}
250
251int clk_register(struct clk *clk)
252{
253 if (clk == NULL || IS_ERR(clk))
254 return -EINVAL;
255
256 /*
257 * trap out already registered clocks
258 */
259 if (clk->node.next || clk->node.prev)
260 return 0;
261
262 mutex_lock(&clock_list_sem);
263
264 INIT_LIST_HEAD(&clk->children);
265 clk->usecount = 0;
266
267 if (clk->parent)
268 list_add(&clk->sibling, &clk->parent->children);
269 else
270 list_add(&clk->sibling, &root_clks);
271
272 list_add(&clk->node, &clock_list);
273 if (clk->ops && clk->ops->init)
274 clk->ops->init(clk);
275 mutex_unlock(&clock_list_sem);
276
277 return 0;
278}
279EXPORT_SYMBOL_GPL(clk_register);
280
281void clk_unregister(struct clk *clk)
282{
283 mutex_lock(&clock_list_sem);
284 list_del(&clk->sibling);
285 list_del(&clk->node);
286 mutex_unlock(&clock_list_sem);
287}
288EXPORT_SYMBOL_GPL(clk_unregister);
289
290void clk_enable_init_clocks(void)
291{
292 struct clk *clkp;
293
294 list_for_each_entry(clkp, &clock_list, node)
295 if (clkp->flags & CLK_ENABLE_ON_INIT)
296 clk_enable(clkp);
297}
298
299unsigned long clk_get_rate(struct clk *clk)
300{
301 return clk->rate;
302}
303EXPORT_SYMBOL_GPL(clk_get_rate);
304
305int clk_set_rate(struct clk *clk, unsigned long rate)
306{
307 return clk_set_rate_ex(clk, rate, 0);
308}
309EXPORT_SYMBOL_GPL(clk_set_rate);
310
311int clk_set_rate_ex(struct clk *clk, unsigned long rate, int algo_id)
312{
313 int ret = -EOPNOTSUPP;
314 unsigned long flags;
315
316 spin_lock_irqsave(&clock_lock, flags);
317
318 if (likely(clk->ops && clk->ops->set_rate)) {
319 ret = clk->ops->set_rate(clk, rate, algo_id);
320 if (ret != 0)
321 goto out_unlock;
322 } else {
323 clk->rate = rate;
324 ret = 0;
325 }
326
327 if (clk->ops && clk->ops->recalc)
328 clk->rate = clk->ops->recalc(clk);
329
330 propagate_rate(clk);
331
332out_unlock:
333 spin_unlock_irqrestore(&clock_lock, flags);
334
335 return ret;
336}
337EXPORT_SYMBOL_GPL(clk_set_rate_ex);
338
339int clk_set_parent(struct clk *clk, struct clk *parent)
340{
341 unsigned long flags;
342 int ret = -EINVAL;
343
344 if (!parent || !clk)
345 return ret;
346 if (clk->parent == parent)
347 return 0;
348
349 spin_lock_irqsave(&clock_lock, flags);
350 if (clk->usecount == 0) {
351 if (clk->ops->set_parent)
352 ret = clk->ops->set_parent(clk, parent);
353 else
354 ret = clk_reparent(clk, parent);
355
356 if (ret == 0) {
357 pr_debug("clock: set parent of %s to %s (new rate %ld)\n",
358 clk->name, clk->parent->name, clk->rate);
359 if (clk->ops->recalc)
360 clk->rate = clk->ops->recalc(clk);
361 propagate_rate(clk);
362 }
363 } else
364 ret = -EBUSY;
365 spin_unlock_irqrestore(&clock_lock, flags);
366
367 return ret;
368}
369EXPORT_SYMBOL_GPL(clk_set_parent);
370
371struct clk *clk_get_parent(struct clk *clk)
372{
373 return clk->parent;
374}
375EXPORT_SYMBOL_GPL(clk_get_parent);
376
377long clk_round_rate(struct clk *clk, unsigned long rate)
378{
379 if (likely(clk->ops && clk->ops->round_rate)) {
380 unsigned long flags, rounded;
381
382 spin_lock_irqsave(&clock_lock, flags);
383 rounded = clk->ops->round_rate(clk, rate);
384 spin_unlock_irqrestore(&clock_lock, flags);
385
386 return rounded;
387 }
388
389 return clk_get_rate(clk);
390}
391EXPORT_SYMBOL_GPL(clk_round_rate);
392
393#ifdef CONFIG_PM
394static int clks_sysdev_suspend(struct sys_device *dev, pm_message_t state)
395{
396 static pm_message_t prev_state;
397 struct clk *clkp;
398
399 switch (state.event) {
400 case PM_EVENT_ON:
401 /* Resumeing from hibernation */
402 if (prev_state.event != PM_EVENT_FREEZE)
403 break;
404
405 list_for_each_entry(clkp, &clock_list, node) {
406 if (likely(clkp->ops)) {
407 unsigned long rate = clkp->rate;
408
409 if (likely(clkp->ops->set_parent))
410 clkp->ops->set_parent(clkp,
411 clkp->parent);
412 if (likely(clkp->ops->set_rate))
413 clkp->ops->set_rate(clkp,
414 rate, NO_CHANGE);
415 else if (likely(clkp->ops->recalc))
416 clkp->rate = clkp->ops->recalc(clkp);
417 }
418 }
419 break;
420 case PM_EVENT_FREEZE:
421 break;
422 case PM_EVENT_SUSPEND:
423 break;
424 }
425
426 prev_state = state;
427 return 0;
428}
429
430static int clks_sysdev_resume(struct sys_device *dev)
431{
432 return clks_sysdev_suspend(dev, PMSG_ON);
433}
434
435static struct sysdev_class clks_sysdev_class = {
436 .name = "clks",
437};
438
439static struct sysdev_driver clks_sysdev_driver = {
440 .suspend = clks_sysdev_suspend,
441 .resume = clks_sysdev_resume,
442};
443
444static struct sys_device clks_sysdev_dev = {
445 .cls = &clks_sysdev_class,
446};
447
448static int __init clk_sysdev_init(void)
449{
450 sysdev_class_register(&clks_sysdev_class);
451 sysdev_driver_register(&clks_sysdev_class, &clks_sysdev_driver);
452 sysdev_register(&clks_sysdev_dev);
453
454 return 0;
455}
456subsys_initcall(clk_sysdev_init);
457#endif
458
459/*
460 * debugfs support to trace clock tree hierarchy and attributes
461 */
462static struct dentry *clk_debugfs_root;
463
464static int clk_debugfs_register_one(struct clk *c)
465{
466 int err;
467 struct dentry *d, *child, *child_tmp;
468 struct clk *pa = c->parent;
469 char s[255];
470 char *p = s;
471
472 p += sprintf(p, "%s", c->name);
473 if (c->id >= 0)
474 sprintf(p, ":%d", c->id);
475 d = debugfs_create_dir(s, pa ? pa->dentry : clk_debugfs_root);
476 if (!d)
477 return -ENOMEM;
478 c->dentry = d;
479
480 d = debugfs_create_u8("usecount", S_IRUGO, c->dentry, (u8 *)&c->usecount);
481 if (!d) {
482 err = -ENOMEM;
483 goto err_out;
484 }
485 d = debugfs_create_u32("rate", S_IRUGO, c->dentry, (u32 *)&c->rate);
486 if (!d) {
487 err = -ENOMEM;
488 goto err_out;
489 }
490 d = debugfs_create_x32("flags", S_IRUGO, c->dentry, (u32 *)&c->flags);
491 if (!d) {
492 err = -ENOMEM;
493 goto err_out;
494 }
495 return 0;
496
497err_out:
498 d = c->dentry;
499 list_for_each_entry_safe(child, child_tmp, &d->d_subdirs, d_u.d_child)
500 debugfs_remove(child);
501 debugfs_remove(c->dentry);
502 return err;
503}
504
505static int clk_debugfs_register(struct clk *c)
506{
507 int err;
508 struct clk *pa = c->parent;
509
510 if (pa && !pa->dentry) {
511 err = clk_debugfs_register(pa);
512 if (err)
513 return err;
514 }
515
516 if (!c->dentry && c->name) {
517 err = clk_debugfs_register_one(c);
518 if (err)
519 return err;
520 }
521 return 0;
522}
523
524static int __init clk_debugfs_init(void)
525{
526 struct clk *c;
527 struct dentry *d;
528 int err;
529
530 d = debugfs_create_dir("clock", NULL);
531 if (!d)
532 return -ENOMEM;
533 clk_debugfs_root = d;
534
535 list_for_each_entry(c, &clock_list, node) {
536 err = clk_debugfs_register(c);
537 if (err)
538 goto err_out;
539 }
540 return 0;
541err_out:
542 debugfs_remove_recursive(clk_debugfs_root);
543 return err;
544}
545late_initcall(clk_debugfs_init);
diff --git a/drivers/sh/intc.c b/drivers/sh/intc.c
index 94ad6bd86a0..c585574b9ae 100644
--- a/drivers/sh/intc.c
+++ b/drivers/sh/intc.c
@@ -28,6 +28,7 @@
28#include <linux/topology.h> 28#include <linux/topology.h>
29#include <linux/bitmap.h> 29#include <linux/bitmap.h>
30#include <linux/cpumask.h> 30#include <linux/cpumask.h>
31#include <asm/sizes.h>
31 32
32#define _INTC_MK(fn, mode, addr_e, addr_d, width, shift) \ 33#define _INTC_MK(fn, mode, addr_e, addr_d, width, shift) \
33 ((shift) | ((width) << 5) | ((fn) << 9) | ((mode) << 13) | \ 34 ((shift) | ((width) << 5) | ((fn) << 9) | ((mode) << 13) | \
@@ -45,6 +46,12 @@ struct intc_handle_int {
45 unsigned long handle; 46 unsigned long handle;
46}; 47};
47 48
49struct intc_window {
50 phys_addr_t phys;
51 void __iomem *virt;
52 unsigned long size;
53};
54
48struct intc_desc_int { 55struct intc_desc_int {
49 struct list_head list; 56 struct list_head list;
50 struct sys_device sysdev; 57 struct sys_device sysdev;
@@ -58,6 +65,8 @@ struct intc_desc_int {
58 unsigned int nr_prio; 65 unsigned int nr_prio;
59 struct intc_handle_int *sense; 66 struct intc_handle_int *sense;
60 unsigned int nr_sense; 67 unsigned int nr_sense;
68 struct intc_window *window;
69 unsigned int nr_windows;
61 struct irq_chip chip; 70 struct irq_chip chip;
62}; 71};
63 72
@@ -87,8 +96,12 @@ static DEFINE_SPINLOCK(vector_lock);
87#define SMP_NR(d, x) 1 96#define SMP_NR(d, x) 1
88#endif 97#endif
89 98
90static unsigned int intc_prio_level[NR_IRQS]; /* for now */ 99static unsigned int intc_prio_level[NR_IRQS]; /* for now */
100static unsigned int default_prio_level = 2; /* 2 - 16 */
91static unsigned long ack_handle[NR_IRQS]; 101static unsigned long ack_handle[NR_IRQS];
102#ifdef CONFIG_INTC_BALANCING
103static unsigned long dist_handle[NR_IRQS];
104#endif
92 105
93static inline struct intc_desc_int *get_intc_desc(unsigned int irq) 106static inline struct intc_desc_int *get_intc_desc(unsigned int irq)
94{ 107{
@@ -96,6 +109,47 @@ static inline struct intc_desc_int *get_intc_desc(unsigned int irq)
96 return container_of(chip, struct intc_desc_int, chip); 109 return container_of(chip, struct intc_desc_int, chip);
97} 110}
98 111
112static unsigned long intc_phys_to_virt(struct intc_desc_int *d,
113 unsigned long address)
114{
115 struct intc_window *window;
116 int k;
117
118 /* scan through physical windows and convert address */
119 for (k = 0; k < d->nr_windows; k++) {
120 window = d->window + k;
121
122 if (address < window->phys)
123 continue;
124
125 if (address >= (window->phys + window->size))
126 continue;
127
128 address -= window->phys;
129 address += (unsigned long)window->virt;
130
131 return address;
132 }
133
134 /* no windows defined, register must be 1:1 mapped virt:phys */
135 return address;
136}
137
138static unsigned int intc_get_reg(struct intc_desc_int *d, unsigned long address)
139{
140 unsigned int k;
141
142 address = intc_phys_to_virt(d, address);
143
144 for (k = 0; k < d->nr_reg; k++) {
145 if (d->reg[k] == address)
146 return k;
147 }
148
149 BUG();
150 return 0;
151}
152
99static inline unsigned int set_field(unsigned int value, 153static inline unsigned int set_field(unsigned int value,
100 unsigned int field_value, 154 unsigned int field_value,
101 unsigned int handle) 155 unsigned int handle)
@@ -229,6 +283,85 @@ static void (*intc_disable_fns[])(unsigned long addr,
229 [MODE_PCLR_REG] = intc_mode_field, 283 [MODE_PCLR_REG] = intc_mode_field,
230}; 284};
231 285
286#ifdef CONFIG_INTC_BALANCING
287static inline void intc_balancing_enable(unsigned int irq)
288{
289 struct intc_desc_int *d = get_intc_desc(irq);
290 unsigned long handle = dist_handle[irq];
291 unsigned long addr;
292
293 if (irq_balancing_disabled(irq) || !handle)
294 return;
295
296 addr = INTC_REG(d, _INTC_ADDR_D(handle), 0);
297 intc_reg_fns[_INTC_FN(handle)](addr, handle, 1);
298}
299
300static inline void intc_balancing_disable(unsigned int irq)
301{
302 struct intc_desc_int *d = get_intc_desc(irq);
303 unsigned long handle = dist_handle[irq];
304 unsigned long addr;
305
306 if (irq_balancing_disabled(irq) || !handle)
307 return;
308
309 addr = INTC_REG(d, _INTC_ADDR_D(handle), 0);
310 intc_reg_fns[_INTC_FN(handle)](addr, handle, 0);
311}
312
313static unsigned int intc_dist_data(struct intc_desc *desc,
314 struct intc_desc_int *d,
315 intc_enum enum_id)
316{
317 struct intc_mask_reg *mr = desc->hw.mask_regs;
318 unsigned int i, j, fn, mode;
319 unsigned long reg_e, reg_d;
320
321 for (i = 0; mr && enum_id && i < desc->hw.nr_mask_regs; i++) {
322 mr = desc->hw.mask_regs + i;
323
324 /*
325 * Skip this entry if there's no auto-distribution
326 * register associated with it.
327 */
328 if (!mr->dist_reg)
329 continue;
330
331 for (j = 0; j < ARRAY_SIZE(mr->enum_ids); j++) {
332 if (mr->enum_ids[j] != enum_id)
333 continue;
334
335 fn = REG_FN_MODIFY_BASE;
336 mode = MODE_ENABLE_REG;
337 reg_e = mr->dist_reg;
338 reg_d = mr->dist_reg;
339
340 fn += (mr->reg_width >> 3) - 1;
341 return _INTC_MK(fn, mode,
342 intc_get_reg(d, reg_e),
343 intc_get_reg(d, reg_d),
344 1,
345 (mr->reg_width - 1) - j);
346 }
347 }
348
349 /*
350 * It's possible we've gotten here with no distribution options
351 * available for the IRQ in question, so we just skip over those.
352 */
353 return 0;
354}
355#else
356static inline void intc_balancing_enable(unsigned int irq)
357{
358}
359
360static inline void intc_balancing_disable(unsigned int irq)
361{
362}
363#endif
364
232static inline void _intc_enable(unsigned int irq, unsigned long handle) 365static inline void _intc_enable(unsigned int irq, unsigned long handle)
233{ 366{
234 struct intc_desc_int *d = get_intc_desc(irq); 367 struct intc_desc_int *d = get_intc_desc(irq);
@@ -244,6 +377,8 @@ static inline void _intc_enable(unsigned int irq, unsigned long handle)
244 intc_enable_fns[_INTC_MODE(handle)](addr, handle, intc_reg_fns\ 377 intc_enable_fns[_INTC_MODE(handle)](addr, handle, intc_reg_fns\
245 [_INTC_FN(handle)], irq); 378 [_INTC_FN(handle)], irq);
246 } 379 }
380
381 intc_balancing_enable(irq);
247} 382}
248 383
249static void intc_enable(unsigned int irq) 384static void intc_enable(unsigned int irq)
@@ -254,10 +389,12 @@ static void intc_enable(unsigned int irq)
254static void intc_disable(unsigned int irq) 389static void intc_disable(unsigned int irq)
255{ 390{
256 struct intc_desc_int *d = get_intc_desc(irq); 391 struct intc_desc_int *d = get_intc_desc(irq);
257 unsigned long handle = (unsigned long) get_irq_chip_data(irq); 392 unsigned long handle = (unsigned long)get_irq_chip_data(irq);
258 unsigned long addr; 393 unsigned long addr;
259 unsigned int cpu; 394 unsigned int cpu;
260 395
396 intc_balancing_disable(irq);
397
261 for (cpu = 0; cpu < SMP_NR(d, _INTC_ADDR_D(handle)); cpu++) { 398 for (cpu = 0; cpu < SMP_NR(d, _INTC_ADDR_D(handle)); cpu++) {
262#ifdef CONFIG_SMP 399#ifdef CONFIG_SMP
263 if (!cpumask_test_cpu(cpu, irq_to_desc(irq)->affinity)) 400 if (!cpumask_test_cpu(cpu, irq_to_desc(irq)->affinity))
@@ -336,8 +473,7 @@ static void intc_mask_ack(unsigned int irq)
336 473
337 intc_disable(irq); 474 intc_disable(irq);
338 475
339 /* read register and write zero only to the assocaited bit */ 476 /* read register and write zero only to the associated bit */
340
341 if (handle) { 477 if (handle) {
342 addr = INTC_REG(d, _INTC_ADDR_D(handle), 0); 478 addr = INTC_REG(d, _INTC_ADDR_D(handle), 0);
343 switch (_INTC_FN(handle)) { 479 switch (_INTC_FN(handle)) {
@@ -366,7 +502,8 @@ static struct intc_handle_int *intc_find_irq(struct intc_handle_int *hp,
366{ 502{
367 int i; 503 int i;
368 504
369 /* this doesn't scale well, but... 505 /*
506 * this doesn't scale well, but...
370 * 507 *
371 * this function should only be used for cerain uncommon 508 * this function should only be used for cerain uncommon
372 * operations such as intc_set_priority() and intc_set_sense() 509 * operations such as intc_set_priority() and intc_set_sense()
@@ -377,7 +514,6 @@ static struct intc_handle_int *intc_find_irq(struct intc_handle_int *hp,
377 * memory footprint down is to make sure the array is sorted 514 * memory footprint down is to make sure the array is sorted
378 * and then perform a bisect to lookup the irq. 515 * and then perform a bisect to lookup the irq.
379 */ 516 */
380
381 for (i = 0; i < nr_hp; i++) { 517 for (i = 0; i < nr_hp; i++) {
382 if ((hp + i)->irq != irq) 518 if ((hp + i)->irq != irq)
383 continue; 519 continue;
@@ -408,7 +544,6 @@ int intc_set_priority(unsigned int irq, unsigned int prio)
408 * primary masking method is using intc_prio_level[irq] 544 * primary masking method is using intc_prio_level[irq]
409 * priority level will be set during next enable() 545 * priority level will be set during next enable()
410 */ 546 */
411
412 if (_INTC_FN(ihp->handle) != REG_FN_ERR) 547 if (_INTC_FN(ihp->handle) != REG_FN_ERR)
413 _intc_enable(irq, ihp->handle); 548 _intc_enable(irq, ihp->handle);
414 } 549 }
@@ -447,20 +582,6 @@ static int intc_set_sense(unsigned int irq, unsigned int type)
447 return 0; 582 return 0;
448} 583}
449 584
450static unsigned int __init intc_get_reg(struct intc_desc_int *d,
451 unsigned long address)
452{
453 unsigned int k;
454
455 for (k = 0; k < d->nr_reg; k++) {
456 if (d->reg[k] == address)
457 return k;
458 }
459
460 BUG();
461 return 0;
462}
463
464static intc_enum __init intc_grp_id(struct intc_desc *desc, 585static intc_enum __init intc_grp_id(struct intc_desc *desc,
465 intc_enum enum_id) 586 intc_enum enum_id)
466{ 587{
@@ -718,13 +839,14 @@ static void __init intc_register_irq(struct intc_desc *desc,
718 */ 839 */
719 set_bit(irq, intc_irq_map); 840 set_bit(irq, intc_irq_map);
720 841
721 /* Prefer single interrupt source bitmap over other combinations: 842 /*
843 * Prefer single interrupt source bitmap over other combinations:
844 *
722 * 1. bitmap, single interrupt source 845 * 1. bitmap, single interrupt source
723 * 2. priority, single interrupt source 846 * 2. priority, single interrupt source
724 * 3. bitmap, multiple interrupt sources (groups) 847 * 3. bitmap, multiple interrupt sources (groups)
725 * 4. priority, multiple interrupt sources (groups) 848 * 4. priority, multiple interrupt sources (groups)
726 */ 849 */
727
728 data[0] = intc_mask_data(desc, d, enum_id, 0); 850 data[0] = intc_mask_data(desc, d, enum_id, 0);
729 data[1] = intc_prio_data(desc, d, enum_id, 0); 851 data[1] = intc_prio_data(desc, d, enum_id, 0);
730 852
@@ -749,10 +871,11 @@ static void __init intc_register_irq(struct intc_desc *desc,
749 handle_level_irq, "level"); 871 handle_level_irq, "level");
750 set_irq_chip_data(irq, (void *)data[primary]); 872 set_irq_chip_data(irq, (void *)data[primary]);
751 873
752 /* set priority level 874 /*
875 * set priority level
753 * - this needs to be at least 2 for 5-bit priorities on 7780 876 * - this needs to be at least 2 for 5-bit priorities on 7780
754 */ 877 */
755 intc_prio_level[irq] = 2; 878 intc_prio_level[irq] = default_prio_level;
756 879
757 /* enable secondary masking method if present */ 880 /* enable secondary masking method if present */
758 if (data[!primary]) 881 if (data[!primary])
@@ -769,7 +892,6 @@ static void __init intc_register_irq(struct intc_desc *desc,
769 * only secondary priority should access registers, so 892 * only secondary priority should access registers, so
770 * set _INTC_FN(h) = REG_FN_ERR for intc_set_priority() 893 * set _INTC_FN(h) = REG_FN_ERR for intc_set_priority()
771 */ 894 */
772
773 hp->handle &= ~_INTC_MK(0x0f, 0, 0, 0, 0, 0); 895 hp->handle &= ~_INTC_MK(0x0f, 0, 0, 0, 0, 0);
774 hp->handle |= _INTC_MK(REG_FN_ERR, 0, 0, 0, 0, 0); 896 hp->handle |= _INTC_MK(REG_FN_ERR, 0, 0, 0, 0, 0);
775 } 897 }
@@ -790,6 +912,11 @@ static void __init intc_register_irq(struct intc_desc *desc,
790 if (desc->hw.ack_regs) 912 if (desc->hw.ack_regs)
791 ack_handle[irq] = intc_ack_data(desc, d, enum_id); 913 ack_handle[irq] = intc_ack_data(desc, d, enum_id);
792 914
915#ifdef CONFIG_INTC_BALANCING
916 if (desc->hw.mask_regs)
917 dist_handle[irq] = intc_dist_data(desc, d, enum_id);
918#endif
919
793#ifdef CONFIG_ARM 920#ifdef CONFIG_ARM
794 set_irq_flags(irq, IRQF_VALID); /* Enable IRQ on ARM systems */ 921 set_irq_flags(irq, IRQF_VALID); /* Enable IRQ on ARM systems */
795#endif 922#endif
@@ -801,6 +928,8 @@ static unsigned int __init save_reg(struct intc_desc_int *d,
801 unsigned int smp) 928 unsigned int smp)
802{ 929{
803 if (value) { 930 if (value) {
931 value = intc_phys_to_virt(d, value);
932
804 d->reg[cnt] = value; 933 d->reg[cnt] = value;
805#ifdef CONFIG_SMP 934#ifdef CONFIG_SMP
806 d->smp[cnt] = smp; 935 d->smp[cnt] = smp;
@@ -816,25 +945,59 @@ static void intc_redirect_irq(unsigned int irq, struct irq_desc *desc)
816 generic_handle_irq((unsigned int)get_irq_data(irq)); 945 generic_handle_irq((unsigned int)get_irq_data(irq));
817} 946}
818 947
819void __init register_intc_controller(struct intc_desc *desc) 948int __init register_intc_controller(struct intc_desc *desc)
820{ 949{
821 unsigned int i, k, smp; 950 unsigned int i, k, smp;
822 struct intc_hw_desc *hw = &desc->hw; 951 struct intc_hw_desc *hw = &desc->hw;
823 struct intc_desc_int *d; 952 struct intc_desc_int *d;
953 struct resource *res;
954
955 pr_info("intc: Registered controller '%s' with %u IRQs\n",
956 desc->name, hw->nr_vectors);
824 957
825 d = kzalloc(sizeof(*d), GFP_NOWAIT); 958 d = kzalloc(sizeof(*d), GFP_NOWAIT);
959 if (!d)
960 goto err0;
826 961
827 INIT_LIST_HEAD(&d->list); 962 INIT_LIST_HEAD(&d->list);
828 list_add(&d->list, &intc_list); 963 list_add(&d->list, &intc_list);
829 964
965 if (desc->num_resources) {
966 d->nr_windows = desc->num_resources;
967 d->window = kzalloc(d->nr_windows * sizeof(*d->window),
968 GFP_NOWAIT);
969 if (!d->window)
970 goto err1;
971
972 for (k = 0; k < d->nr_windows; k++) {
973 res = desc->resource + k;
974 WARN_ON(resource_type(res) != IORESOURCE_MEM);
975 d->window[k].phys = res->start;
976 d->window[k].size = resource_size(res);
977 d->window[k].virt = ioremap_nocache(res->start,
978 resource_size(res));
979 if (!d->window[k].virt)
980 goto err2;
981 }
982 }
983
830 d->nr_reg = hw->mask_regs ? hw->nr_mask_regs * 2 : 0; 984 d->nr_reg = hw->mask_regs ? hw->nr_mask_regs * 2 : 0;
985#ifdef CONFIG_INTC_BALANCING
986 if (d->nr_reg)
987 d->nr_reg += hw->nr_mask_regs;
988#endif
831 d->nr_reg += hw->prio_regs ? hw->nr_prio_regs * 2 : 0; 989 d->nr_reg += hw->prio_regs ? hw->nr_prio_regs * 2 : 0;
832 d->nr_reg += hw->sense_regs ? hw->nr_sense_regs : 0; 990 d->nr_reg += hw->sense_regs ? hw->nr_sense_regs : 0;
833 d->nr_reg += hw->ack_regs ? hw->nr_ack_regs : 0; 991 d->nr_reg += hw->ack_regs ? hw->nr_ack_regs : 0;
834 992
835 d->reg = kzalloc(d->nr_reg * sizeof(*d->reg), GFP_NOWAIT); 993 d->reg = kzalloc(d->nr_reg * sizeof(*d->reg), GFP_NOWAIT);
994 if (!d->reg)
995 goto err2;
996
836#ifdef CONFIG_SMP 997#ifdef CONFIG_SMP
837 d->smp = kzalloc(d->nr_reg * sizeof(*d->smp), GFP_NOWAIT); 998 d->smp = kzalloc(d->nr_reg * sizeof(*d->smp), GFP_NOWAIT);
999 if (!d->smp)
1000 goto err3;
838#endif 1001#endif
839 k = 0; 1002 k = 0;
840 1003
@@ -843,12 +1006,17 @@ void __init register_intc_controller(struct intc_desc *desc)
843 smp = IS_SMP(hw->mask_regs[i]); 1006 smp = IS_SMP(hw->mask_regs[i]);
844 k += save_reg(d, k, hw->mask_regs[i].set_reg, smp); 1007 k += save_reg(d, k, hw->mask_regs[i].set_reg, smp);
845 k += save_reg(d, k, hw->mask_regs[i].clr_reg, smp); 1008 k += save_reg(d, k, hw->mask_regs[i].clr_reg, smp);
1009#ifdef CONFIG_INTC_BALANCING
1010 k += save_reg(d, k, hw->mask_regs[i].dist_reg, 0);
1011#endif
846 } 1012 }
847 } 1013 }
848 1014
849 if (hw->prio_regs) { 1015 if (hw->prio_regs) {
850 d->prio = kzalloc(hw->nr_vectors * sizeof(*d->prio), 1016 d->prio = kzalloc(hw->nr_vectors * sizeof(*d->prio),
851 GFP_NOWAIT); 1017 GFP_NOWAIT);
1018 if (!d->prio)
1019 goto err4;
852 1020
853 for (i = 0; i < hw->nr_prio_regs; i++) { 1021 for (i = 0; i < hw->nr_prio_regs; i++) {
854 smp = IS_SMP(hw->prio_regs[i]); 1022 smp = IS_SMP(hw->prio_regs[i]);
@@ -860,6 +1028,8 @@ void __init register_intc_controller(struct intc_desc *desc)
860 if (hw->sense_regs) { 1028 if (hw->sense_regs) {
861 d->sense = kzalloc(hw->nr_vectors * sizeof(*d->sense), 1029 d->sense = kzalloc(hw->nr_vectors * sizeof(*d->sense),
862 GFP_NOWAIT); 1030 GFP_NOWAIT);
1031 if (!d->sense)
1032 goto err5;
863 1033
864 for (i = 0; i < hw->nr_sense_regs; i++) 1034 for (i = 0; i < hw->nr_sense_regs; i++)
865 k += save_reg(d, k, hw->sense_regs[i].reg, 0); 1035 k += save_reg(d, k, hw->sense_regs[i].reg, 0);
@@ -906,7 +1076,7 @@ void __init register_intc_controller(struct intc_desc *desc)
906 1076
907 irq_desc = irq_to_desc_alloc_node(irq, numa_node_id()); 1077 irq_desc = irq_to_desc_alloc_node(irq, numa_node_id());
908 if (unlikely(!irq_desc)) { 1078 if (unlikely(!irq_desc)) {
909 pr_info("can't get irq_desc for %d\n", irq); 1079 pr_err("can't get irq_desc for %d\n", irq);
910 continue; 1080 continue;
911 } 1081 }
912 1082
@@ -926,7 +1096,7 @@ void __init register_intc_controller(struct intc_desc *desc)
926 */ 1096 */
927 irq_desc = irq_to_desc_alloc_node(irq2, numa_node_id()); 1097 irq_desc = irq_to_desc_alloc_node(irq2, numa_node_id());
928 if (unlikely(!irq_desc)) { 1098 if (unlikely(!irq_desc)) {
929 pr_info("can't get irq_desc for %d\n", irq2); 1099 pr_err("can't get irq_desc for %d\n", irq2);
930 continue; 1100 continue;
931 } 1101 }
932 1102
@@ -942,8 +1112,100 @@ void __init register_intc_controller(struct intc_desc *desc)
942 /* enable bits matching force_enable after registering irqs */ 1112 /* enable bits matching force_enable after registering irqs */
943 if (desc->force_enable) 1113 if (desc->force_enable)
944 intc_enable_disable_enum(desc, d, desc->force_enable, 1); 1114 intc_enable_disable_enum(desc, d, desc->force_enable, 1);
1115
1116 return 0;
1117err5:
1118 kfree(d->prio);
1119err4:
1120#ifdef CONFIG_SMP
1121 kfree(d->smp);
1122err3:
1123#endif
1124 kfree(d->reg);
1125err2:
1126 for (k = 0; k < d->nr_windows; k++)
1127 if (d->window[k].virt)
1128 iounmap(d->window[k].virt);
1129
1130 kfree(d->window);
1131err1:
1132 kfree(d);
1133err0:
1134 pr_err("unable to allocate INTC memory\n");
1135
1136 return -ENOMEM;
1137}
1138
1139#ifdef CONFIG_INTC_USERIMASK
1140static void __iomem *uimask;
1141
1142int register_intc_userimask(unsigned long addr)
1143{
1144 if (unlikely(uimask))
1145 return -EBUSY;
1146
1147 uimask = ioremap_nocache(addr, SZ_4K);
1148 if (unlikely(!uimask))
1149 return -ENOMEM;
1150
1151 pr_info("intc: userimask support registered for levels 0 -> %d\n",
1152 default_prio_level - 1);
1153
1154 return 0;
1155}
1156
1157static ssize_t
1158show_intc_userimask(struct sysdev_class *cls,
1159 struct sysdev_class_attribute *attr, char *buf)
1160{
1161 return sprintf(buf, "%d\n", (__raw_readl(uimask) >> 4) & 0xf);
1162}
1163
1164static ssize_t
1165store_intc_userimask(struct sysdev_class *cls,
1166 struct sysdev_class_attribute *attr,
1167 const char *buf, size_t count)
1168{
1169 unsigned long level;
1170
1171 level = simple_strtoul(buf, NULL, 10);
1172
1173 /*
1174 * Minimal acceptable IRQ levels are in the 2 - 16 range, but
1175 * these are chomped so as to not interfere with normal IRQs.
1176 *
1177 * Level 1 is a special case on some CPUs in that it's not
1178 * directly settable, but given that USERIMASK cuts off below a
1179 * certain level, we don't care about this limitation here.
1180 * Level 0 on the other hand equates to user masking disabled.
1181 *
1182 * We use default_prio_level as a cut off so that only special
1183 * case opt-in IRQs can be mangled.
1184 */
1185 if (level >= default_prio_level)
1186 return -EINVAL;
1187
1188 __raw_writel(0xa5 << 24 | level << 4, uimask);
1189
1190 return count;
945} 1191}
946 1192
1193static SYSDEV_CLASS_ATTR(userimask, S_IRUSR | S_IWUSR,
1194 show_intc_userimask, store_intc_userimask);
1195#endif
1196
1197static ssize_t
1198show_intc_name(struct sys_device *dev, struct sysdev_attribute *attr, char *buf)
1199{
1200 struct intc_desc_int *d;
1201
1202 d = container_of(dev, struct intc_desc_int, sysdev);
1203
1204 return sprintf(buf, "%s\n", d->chip.name);
1205}
1206
1207static SYSDEV_ATTR(name, S_IRUGO, show_intc_name, NULL);
1208
947static int intc_suspend(struct sys_device *dev, pm_message_t state) 1209static int intc_suspend(struct sys_device *dev, pm_message_t state)
948{ 1210{
949 struct intc_desc_int *d; 1211 struct intc_desc_int *d;
@@ -1003,19 +1265,28 @@ static int __init register_intc_sysdevs(void)
1003 int id = 0; 1265 int id = 0;
1004 1266
1005 error = sysdev_class_register(&intc_sysdev_class); 1267 error = sysdev_class_register(&intc_sysdev_class);
1268#ifdef CONFIG_INTC_USERIMASK
1269 if (!error && uimask)
1270 error = sysdev_class_create_file(&intc_sysdev_class,
1271 &attr_userimask);
1272#endif
1006 if (!error) { 1273 if (!error) {
1007 list_for_each_entry(d, &intc_list, list) { 1274 list_for_each_entry(d, &intc_list, list) {
1008 d->sysdev.id = id; 1275 d->sysdev.id = id;
1009 d->sysdev.cls = &intc_sysdev_class; 1276 d->sysdev.cls = &intc_sysdev_class;
1010 error = sysdev_register(&d->sysdev); 1277 error = sysdev_register(&d->sysdev);
1278 if (error == 0)
1279 error = sysdev_create_file(&d->sysdev,
1280 &attr_name);
1011 if (error) 1281 if (error)
1012 break; 1282 break;
1283
1013 id++; 1284 id++;
1014 } 1285 }
1015 } 1286 }
1016 1287
1017 if (error) 1288 if (error)
1018 pr_warning("intc: sysdev registration error\n"); 1289 pr_err("intc: sysdev registration error\n");
1019 1290
1020 return error; 1291 return error;
1021} 1292}
@@ -1048,7 +1319,7 @@ unsigned int create_irq_nr(unsigned int irq_want, int node)
1048 1319
1049 desc = irq_to_desc_alloc_node(new, node); 1320 desc = irq_to_desc_alloc_node(new, node);
1050 if (unlikely(!desc)) { 1321 if (unlikely(!desc)) {
1051 pr_info("can't get irq_desc for %d\n", new); 1322 pr_err("can't get irq_desc for %d\n", new);
1052 goto out_unlock; 1323 goto out_unlock;
1053 } 1324 }
1054 1325