aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/sh
diff options
context:
space:
mode:
authorAndrea Bastoni <bastoni@cs.unc.edu>2010-05-30 19:16:45 -0400
committerAndrea Bastoni <bastoni@cs.unc.edu>2010-05-30 19:16:45 -0400
commitada47b5fe13d89735805b566185f4885f5a3f750 (patch)
tree644b88f8a71896307d71438e9b3af49126ffb22b /drivers/sh
parent43e98717ad40a4ae64545b5ba047c7b86aa44f4f (diff)
parent3280f21d43ee541f97f8cda5792150d2dbec20d5 (diff)
Merge branch 'wip-2.6.34' into old-private-masterarchived-private-master
Diffstat (limited to 'drivers/sh')
-rw-r--r--drivers/sh/Makefile1
-rw-r--r--drivers/sh/intc.c401
-rw-r--r--drivers/sh/maple/maple.c4
-rw-r--r--drivers/sh/pfc.c604
4 files changed, 936 insertions, 74 deletions
diff --git a/drivers/sh/Makefile b/drivers/sh/Makefile
index 6a025cefe6dc..4956bf1f2134 100644
--- a/drivers/sh/Makefile
+++ b/drivers/sh/Makefile
@@ -3,4 +3,5 @@
3# 3#
4obj-$(CONFIG_SUPERHYWAY) += superhyway/ 4obj-$(CONFIG_SUPERHYWAY) += superhyway/
5obj-$(CONFIG_MAPLE) += maple/ 5obj-$(CONFIG_MAPLE) += maple/
6obj-$(CONFIG_GENERIC_GPIO) += pfc.o
6obj-y += intc.o 7obj-y += intc.o
diff --git a/drivers/sh/intc.c b/drivers/sh/intc.c
index 559b5fe9dc0f..94ad6bd86a00 100644
--- a/drivers/sh/intc.c
+++ b/drivers/sh/intc.c
@@ -2,6 +2,7 @@
2 * Shared interrupt handling code for IPR and INTC2 types of IRQs. 2 * Shared interrupt handling code for IPR and INTC2 types of IRQs.
3 * 3 *
4 * Copyright (C) 2007, 2008 Magnus Damm 4 * Copyright (C) 2007, 2008 Magnus Damm
5 * Copyright (C) 2009, 2010 Paul Mundt
5 * 6 *
6 * Based on intc2.c and ipr.c 7 * Based on intc2.c and ipr.c
7 * 8 *
@@ -19,11 +20,14 @@
19#include <linux/irq.h> 20#include <linux/irq.h>
20#include <linux/module.h> 21#include <linux/module.h>
21#include <linux/io.h> 22#include <linux/io.h>
23#include <linux/slab.h>
22#include <linux/interrupt.h> 24#include <linux/interrupt.h>
23#include <linux/sh_intc.h> 25#include <linux/sh_intc.h>
24#include <linux/sysdev.h> 26#include <linux/sysdev.h>
25#include <linux/list.h> 27#include <linux/list.h>
26#include <linux/topology.h> 28#include <linux/topology.h>
29#include <linux/bitmap.h>
30#include <linux/cpumask.h>
27 31
28#define _INTC_MK(fn, mode, addr_e, addr_d, width, shift) \ 32#define _INTC_MK(fn, mode, addr_e, addr_d, width, shift) \
29 ((shift) | ((width) << 5) | ((fn) << 9) | ((mode) << 13) | \ 33 ((shift) | ((width) << 5) | ((fn) << 9) | ((mode) << 13) | \
@@ -59,6 +63,20 @@ struct intc_desc_int {
59 63
60static LIST_HEAD(intc_list); 64static LIST_HEAD(intc_list);
61 65
66/*
67 * The intc_irq_map provides a global map of bound IRQ vectors for a
68 * given platform. Allocation of IRQs are either static through the CPU
69 * vector map, or dynamic in the case of board mux vectors or MSI.
70 *
71 * As this is a central point for all IRQ controllers on the system,
72 * each of the available sources are mapped out here. This combined with
73 * sparseirq makes it quite trivial to keep the vector map tightly packed
74 * when dynamically creating IRQs, as well as tying in to otherwise
75 * unused irq_desc positions in the sparse array.
76 */
77static DECLARE_BITMAP(intc_irq_map, NR_IRQS);
78static DEFINE_SPINLOCK(vector_lock);
79
62#ifdef CONFIG_SMP 80#ifdef CONFIG_SMP
63#define IS_SMP(x) x.smp 81#define IS_SMP(x) x.smp
64#define INTC_REG(d, x, c) (d->reg[(x)] + ((d->smp[(x)] & 0xff) * c)) 82#define INTC_REG(d, x, c) (d->reg[(x)] + ((d->smp[(x)] & 0xff) * c))
@@ -70,9 +88,7 @@ static LIST_HEAD(intc_list);
70#endif 88#endif
71 89
72static unsigned int intc_prio_level[NR_IRQS]; /* for now */ 90static unsigned int intc_prio_level[NR_IRQS]; /* for now */
73#if defined(CONFIG_CPU_SH3) || defined(CONFIG_CPU_SH4A)
74static unsigned long ack_handle[NR_IRQS]; 91static unsigned long ack_handle[NR_IRQS];
75#endif
76 92
77static inline struct intc_desc_int *get_intc_desc(unsigned int irq) 93static inline struct intc_desc_int *get_intc_desc(unsigned int irq)
78{ 94{
@@ -220,6 +236,10 @@ static inline void _intc_enable(unsigned int irq, unsigned long handle)
220 unsigned int cpu; 236 unsigned int cpu;
221 237
222 for (cpu = 0; cpu < SMP_NR(d, _INTC_ADDR_E(handle)); cpu++) { 238 for (cpu = 0; cpu < SMP_NR(d, _INTC_ADDR_E(handle)); cpu++) {
239#ifdef CONFIG_SMP
240 if (!cpumask_test_cpu(cpu, irq_to_desc(irq)->affinity))
241 continue;
242#endif
223 addr = INTC_REG(d, _INTC_ADDR_E(handle), cpu); 243 addr = INTC_REG(d, _INTC_ADDR_E(handle), cpu);
224 intc_enable_fns[_INTC_MODE(handle)](addr, handle, intc_reg_fns\ 244 intc_enable_fns[_INTC_MODE(handle)](addr, handle, intc_reg_fns\
225 [_INTC_FN(handle)], irq); 245 [_INTC_FN(handle)], irq);
@@ -239,18 +259,75 @@ static void intc_disable(unsigned int irq)
239 unsigned int cpu; 259 unsigned int cpu;
240 260
241 for (cpu = 0; cpu < SMP_NR(d, _INTC_ADDR_D(handle)); cpu++) { 261 for (cpu = 0; cpu < SMP_NR(d, _INTC_ADDR_D(handle)); cpu++) {
262#ifdef CONFIG_SMP
263 if (!cpumask_test_cpu(cpu, irq_to_desc(irq)->affinity))
264 continue;
265#endif
242 addr = INTC_REG(d, _INTC_ADDR_D(handle), cpu); 266 addr = INTC_REG(d, _INTC_ADDR_D(handle), cpu);
243 intc_disable_fns[_INTC_MODE(handle)](addr, handle,intc_reg_fns\ 267 intc_disable_fns[_INTC_MODE(handle)](addr, handle,intc_reg_fns\
244 [_INTC_FN(handle)], irq); 268 [_INTC_FN(handle)], irq);
245 } 269 }
246} 270}
247 271
272static void (*intc_enable_noprio_fns[])(unsigned long addr,
273 unsigned long handle,
274 void (*fn)(unsigned long,
275 unsigned long,
276 unsigned long),
277 unsigned int irq) = {
278 [MODE_ENABLE_REG] = intc_mode_field,
279 [MODE_MASK_REG] = intc_mode_zero,
280 [MODE_DUAL_REG] = intc_mode_field,
281 [MODE_PRIO_REG] = intc_mode_field,
282 [MODE_PCLR_REG] = intc_mode_field,
283};
284
285static void intc_enable_disable(struct intc_desc_int *d,
286 unsigned long handle, int do_enable)
287{
288 unsigned long addr;
289 unsigned int cpu;
290 void (*fn)(unsigned long, unsigned long,
291 void (*)(unsigned long, unsigned long, unsigned long),
292 unsigned int);
293
294 if (do_enable) {
295 for (cpu = 0; cpu < SMP_NR(d, _INTC_ADDR_E(handle)); cpu++) {
296 addr = INTC_REG(d, _INTC_ADDR_E(handle), cpu);
297 fn = intc_enable_noprio_fns[_INTC_MODE(handle)];
298 fn(addr, handle, intc_reg_fns[_INTC_FN(handle)], 0);
299 }
300 } else {
301 for (cpu = 0; cpu < SMP_NR(d, _INTC_ADDR_D(handle)); cpu++) {
302 addr = INTC_REG(d, _INTC_ADDR_D(handle), cpu);
303 fn = intc_disable_fns[_INTC_MODE(handle)];
304 fn(addr, handle, intc_reg_fns[_INTC_FN(handle)], 0);
305 }
306 }
307}
308
248static int intc_set_wake(unsigned int irq, unsigned int on) 309static int intc_set_wake(unsigned int irq, unsigned int on)
249{ 310{
250 return 0; /* allow wakeup, but setup hardware in intc_suspend() */ 311 return 0; /* allow wakeup, but setup hardware in intc_suspend() */
251} 312}
252 313
253#if defined(CONFIG_CPU_SH3) || defined(CONFIG_CPU_SH4A) 314#ifdef CONFIG_SMP
315/*
316 * This is held with the irq desc lock held, so we don't require any
317 * additional locking here at the intc desc level. The affinity mask is
318 * later tested in the enable/disable paths.
319 */
320static int intc_set_affinity(unsigned int irq, const struct cpumask *cpumask)
321{
322 if (!cpumask_intersects(cpumask, cpu_online_mask))
323 return -1;
324
325 cpumask_copy(irq_to_desc(irq)->affinity, cpumask);
326
327 return 0;
328}
329#endif
330
254static void intc_mask_ack(unsigned int irq) 331static void intc_mask_ack(unsigned int irq)
255{ 332{
256 struct intc_desc_int *d = get_intc_desc(irq); 333 struct intc_desc_int *d = get_intc_desc(irq);
@@ -282,7 +359,6 @@ static void intc_mask_ack(unsigned int irq)
282 } 359 }
283 } 360 }
284} 361}
285#endif
286 362
287static struct intc_handle_int *intc_find_irq(struct intc_handle_int *hp, 363static struct intc_handle_int *intc_find_irq(struct intc_handle_int *hp,
288 unsigned int nr_hp, 364 unsigned int nr_hp,
@@ -388,11 +464,11 @@ static unsigned int __init intc_get_reg(struct intc_desc_int *d,
388static intc_enum __init intc_grp_id(struct intc_desc *desc, 464static intc_enum __init intc_grp_id(struct intc_desc *desc,
389 intc_enum enum_id) 465 intc_enum enum_id)
390{ 466{
391 struct intc_group *g = desc->groups; 467 struct intc_group *g = desc->hw.groups;
392 unsigned int i, j; 468 unsigned int i, j;
393 469
394 for (i = 0; g && enum_id && i < desc->nr_groups; i++) { 470 for (i = 0; g && enum_id && i < desc->hw.nr_groups; i++) {
395 g = desc->groups + i; 471 g = desc->hw.groups + i;
396 472
397 for (j = 0; g->enum_ids[j]; j++) { 473 for (j = 0; g->enum_ids[j]; j++) {
398 if (g->enum_ids[j] != enum_id) 474 if (g->enum_ids[j] != enum_id)
@@ -405,19 +481,21 @@ static intc_enum __init intc_grp_id(struct intc_desc *desc,
405 return 0; 481 return 0;
406} 482}
407 483
408static unsigned int __init intc_mask_data(struct intc_desc *desc, 484static unsigned int __init _intc_mask_data(struct intc_desc *desc,
409 struct intc_desc_int *d, 485 struct intc_desc_int *d,
410 intc_enum enum_id, int do_grps) 486 intc_enum enum_id,
487 unsigned int *reg_idx,
488 unsigned int *fld_idx)
411{ 489{
412 struct intc_mask_reg *mr = desc->mask_regs; 490 struct intc_mask_reg *mr = desc->hw.mask_regs;
413 unsigned int i, j, fn, mode; 491 unsigned int fn, mode;
414 unsigned long reg_e, reg_d; 492 unsigned long reg_e, reg_d;
415 493
416 for (i = 0; mr && enum_id && i < desc->nr_mask_regs; i++) { 494 while (mr && enum_id && *reg_idx < desc->hw.nr_mask_regs) {
417 mr = desc->mask_regs + i; 495 mr = desc->hw.mask_regs + *reg_idx;
418 496
419 for (j = 0; j < ARRAY_SIZE(mr->enum_ids); j++) { 497 for (; *fld_idx < ARRAY_SIZE(mr->enum_ids); (*fld_idx)++) {
420 if (mr->enum_ids[j] != enum_id) 498 if (mr->enum_ids[*fld_idx] != enum_id)
421 continue; 499 continue;
422 500
423 if (mr->set_reg && mr->clr_reg) { 501 if (mr->set_reg && mr->clr_reg) {
@@ -443,29 +521,49 @@ static unsigned int __init intc_mask_data(struct intc_desc *desc,
443 intc_get_reg(d, reg_e), 521 intc_get_reg(d, reg_e),
444 intc_get_reg(d, reg_d), 522 intc_get_reg(d, reg_d),
445 1, 523 1,
446 (mr->reg_width - 1) - j); 524 (mr->reg_width - 1) - *fld_idx);
447 } 525 }
526
527 *fld_idx = 0;
528 (*reg_idx)++;
448 } 529 }
449 530
531 return 0;
532}
533
534static unsigned int __init intc_mask_data(struct intc_desc *desc,
535 struct intc_desc_int *d,
536 intc_enum enum_id, int do_grps)
537{
538 unsigned int i = 0;
539 unsigned int j = 0;
540 unsigned int ret;
541
542 ret = _intc_mask_data(desc, d, enum_id, &i, &j);
543 if (ret)
544 return ret;
545
450 if (do_grps) 546 if (do_grps)
451 return intc_mask_data(desc, d, intc_grp_id(desc, enum_id), 0); 547 return intc_mask_data(desc, d, intc_grp_id(desc, enum_id), 0);
452 548
453 return 0; 549 return 0;
454} 550}
455 551
456static unsigned int __init intc_prio_data(struct intc_desc *desc, 552static unsigned int __init _intc_prio_data(struct intc_desc *desc,
457 struct intc_desc_int *d, 553 struct intc_desc_int *d,
458 intc_enum enum_id, int do_grps) 554 intc_enum enum_id,
555 unsigned int *reg_idx,
556 unsigned int *fld_idx)
459{ 557{
460 struct intc_prio_reg *pr = desc->prio_regs; 558 struct intc_prio_reg *pr = desc->hw.prio_regs;
461 unsigned int i, j, fn, mode, bit; 559 unsigned int fn, n, mode, bit;
462 unsigned long reg_e, reg_d; 560 unsigned long reg_e, reg_d;
463 561
464 for (i = 0; pr && enum_id && i < desc->nr_prio_regs; i++) { 562 while (pr && enum_id && *reg_idx < desc->hw.nr_prio_regs) {
465 pr = desc->prio_regs + i; 563 pr = desc->hw.prio_regs + *reg_idx;
466 564
467 for (j = 0; j < ARRAY_SIZE(pr->enum_ids); j++) { 565 for (; *fld_idx < ARRAY_SIZE(pr->enum_ids); (*fld_idx)++) {
468 if (pr->enum_ids[j] != enum_id) 566 if (pr->enum_ids[*fld_idx] != enum_id)
469 continue; 567 continue;
470 568
471 if (pr->set_reg && pr->clr_reg) { 569 if (pr->set_reg && pr->clr_reg) {
@@ -483,35 +581,79 @@ static unsigned int __init intc_prio_data(struct intc_desc *desc,
483 } 581 }
484 582
485 fn += (pr->reg_width >> 3) - 1; 583 fn += (pr->reg_width >> 3) - 1;
584 n = *fld_idx + 1;
486 585
487 BUG_ON((j + 1) * pr->field_width > pr->reg_width); 586 BUG_ON(n * pr->field_width > pr->reg_width);
488 587
489 bit = pr->reg_width - ((j + 1) * pr->field_width); 588 bit = pr->reg_width - (n * pr->field_width);
490 589
491 return _INTC_MK(fn, mode, 590 return _INTC_MK(fn, mode,
492 intc_get_reg(d, reg_e), 591 intc_get_reg(d, reg_e),
493 intc_get_reg(d, reg_d), 592 intc_get_reg(d, reg_d),
494 pr->field_width, bit); 593 pr->field_width, bit);
495 } 594 }
595
596 *fld_idx = 0;
597 (*reg_idx)++;
496 } 598 }
497 599
600 return 0;
601}
602
603static unsigned int __init intc_prio_data(struct intc_desc *desc,
604 struct intc_desc_int *d,
605 intc_enum enum_id, int do_grps)
606{
607 unsigned int i = 0;
608 unsigned int j = 0;
609 unsigned int ret;
610
611 ret = _intc_prio_data(desc, d, enum_id, &i, &j);
612 if (ret)
613 return ret;
614
498 if (do_grps) 615 if (do_grps)
499 return intc_prio_data(desc, d, intc_grp_id(desc, enum_id), 0); 616 return intc_prio_data(desc, d, intc_grp_id(desc, enum_id), 0);
500 617
501 return 0; 618 return 0;
502} 619}
503 620
504#if defined(CONFIG_CPU_SH3) || defined(CONFIG_CPU_SH4A) 621static void __init intc_enable_disable_enum(struct intc_desc *desc,
622 struct intc_desc_int *d,
623 intc_enum enum_id, int enable)
624{
625 unsigned int i, j, data;
626
627 /* go through and enable/disable all mask bits */
628 i = j = 0;
629 do {
630 data = _intc_mask_data(desc, d, enum_id, &i, &j);
631 if (data)
632 intc_enable_disable(d, data, enable);
633 j++;
634 } while (data);
635
636 /* go through and enable/disable all priority fields */
637 i = j = 0;
638 do {
639 data = _intc_prio_data(desc, d, enum_id, &i, &j);
640 if (data)
641 intc_enable_disable(d, data, enable);
642
643 j++;
644 } while (data);
645}
646
505static unsigned int __init intc_ack_data(struct intc_desc *desc, 647static unsigned int __init intc_ack_data(struct intc_desc *desc,
506 struct intc_desc_int *d, 648 struct intc_desc_int *d,
507 intc_enum enum_id) 649 intc_enum enum_id)
508{ 650{
509 struct intc_mask_reg *mr = desc->ack_regs; 651 struct intc_mask_reg *mr = desc->hw.ack_regs;
510 unsigned int i, j, fn, mode; 652 unsigned int i, j, fn, mode;
511 unsigned long reg_e, reg_d; 653 unsigned long reg_e, reg_d;
512 654
513 for (i = 0; mr && enum_id && i < desc->nr_ack_regs; i++) { 655 for (i = 0; mr && enum_id && i < desc->hw.nr_ack_regs; i++) {
514 mr = desc->ack_regs + i; 656 mr = desc->hw.ack_regs + i;
515 657
516 for (j = 0; j < ARRAY_SIZE(mr->enum_ids); j++) { 658 for (j = 0; j < ARRAY_SIZE(mr->enum_ids); j++) {
517 if (mr->enum_ids[j] != enum_id) 659 if (mr->enum_ids[j] != enum_id)
@@ -533,17 +675,16 @@ static unsigned int __init intc_ack_data(struct intc_desc *desc,
533 675
534 return 0; 676 return 0;
535} 677}
536#endif
537 678
538static unsigned int __init intc_sense_data(struct intc_desc *desc, 679static unsigned int __init intc_sense_data(struct intc_desc *desc,
539 struct intc_desc_int *d, 680 struct intc_desc_int *d,
540 intc_enum enum_id) 681 intc_enum enum_id)
541{ 682{
542 struct intc_sense_reg *sr = desc->sense_regs; 683 struct intc_sense_reg *sr = desc->hw.sense_regs;
543 unsigned int i, j, fn, bit; 684 unsigned int i, j, fn, bit;
544 685
545 for (i = 0; sr && enum_id && i < desc->nr_sense_regs; i++) { 686 for (i = 0; sr && enum_id && i < desc->hw.nr_sense_regs; i++) {
546 sr = desc->sense_regs + i; 687 sr = desc->hw.sense_regs + i;
547 688
548 for (j = 0; j < ARRAY_SIZE(sr->enum_ids); j++) { 689 for (j = 0; j < ARRAY_SIZE(sr->enum_ids); j++) {
549 if (sr->enum_ids[j] != enum_id) 690 if (sr->enum_ids[j] != enum_id)
@@ -572,6 +713,11 @@ static void __init intc_register_irq(struct intc_desc *desc,
572 struct intc_handle_int *hp; 713 struct intc_handle_int *hp;
573 unsigned int data[2], primary; 714 unsigned int data[2], primary;
574 715
716 /*
717 * Register the IRQ position with the global IRQ map
718 */
719 set_bit(irq, intc_irq_map);
720
575 /* Prefer single interrupt source bitmap over other combinations: 721 /* Prefer single interrupt source bitmap over other combinations:
576 * 1. bitmap, single interrupt source 722 * 1. bitmap, single interrupt source
577 * 2. priority, single interrupt source 723 * 2. priority, single interrupt source
@@ -641,9 +787,11 @@ static void __init intc_register_irq(struct intc_desc *desc,
641 /* irq should be disabled by default */ 787 /* irq should be disabled by default */
642 d->chip.mask(irq); 788 d->chip.mask(irq);
643 789
644#if defined(CONFIG_CPU_SH3) || defined(CONFIG_CPU_SH4A) 790 if (desc->hw.ack_regs)
645 if (desc->ack_regs)
646 ack_handle[irq] = intc_ack_data(desc, d, enum_id); 791 ack_handle[irq] = intc_ack_data(desc, d, enum_id);
792
793#ifdef CONFIG_ARM
794 set_irq_flags(irq, IRQF_VALID); /* Enable IRQ on ARM systems */
647#endif 795#endif
648} 796}
649 797
@@ -671,6 +819,7 @@ static void intc_redirect_irq(unsigned int irq, struct irq_desc *desc)
671void __init register_intc_controller(struct intc_desc *desc) 819void __init register_intc_controller(struct intc_desc *desc)
672{ 820{
673 unsigned int i, k, smp; 821 unsigned int i, k, smp;
822 struct intc_hw_desc *hw = &desc->hw;
674 struct intc_desc_int *d; 823 struct intc_desc_int *d;
675 824
676 d = kzalloc(sizeof(*d), GFP_NOWAIT); 825 d = kzalloc(sizeof(*d), GFP_NOWAIT);
@@ -678,43 +827,42 @@ void __init register_intc_controller(struct intc_desc *desc)
678 INIT_LIST_HEAD(&d->list); 827 INIT_LIST_HEAD(&d->list);
679 list_add(&d->list, &intc_list); 828 list_add(&d->list, &intc_list);
680 829
681 d->nr_reg = desc->mask_regs ? desc->nr_mask_regs * 2 : 0; 830 d->nr_reg = hw->mask_regs ? hw->nr_mask_regs * 2 : 0;
682 d->nr_reg += desc->prio_regs ? desc->nr_prio_regs * 2 : 0; 831 d->nr_reg += hw->prio_regs ? hw->nr_prio_regs * 2 : 0;
683 d->nr_reg += desc->sense_regs ? desc->nr_sense_regs : 0; 832 d->nr_reg += hw->sense_regs ? hw->nr_sense_regs : 0;
833 d->nr_reg += hw->ack_regs ? hw->nr_ack_regs : 0;
684 834
685#if defined(CONFIG_CPU_SH3) || defined(CONFIG_CPU_SH4A)
686 d->nr_reg += desc->ack_regs ? desc->nr_ack_regs : 0;
687#endif
688 d->reg = kzalloc(d->nr_reg * sizeof(*d->reg), GFP_NOWAIT); 835 d->reg = kzalloc(d->nr_reg * sizeof(*d->reg), GFP_NOWAIT);
689#ifdef CONFIG_SMP 836#ifdef CONFIG_SMP
690 d->smp = kzalloc(d->nr_reg * sizeof(*d->smp), GFP_NOWAIT); 837 d->smp = kzalloc(d->nr_reg * sizeof(*d->smp), GFP_NOWAIT);
691#endif 838#endif
692 k = 0; 839 k = 0;
693 840
694 if (desc->mask_regs) { 841 if (hw->mask_regs) {
695 for (i = 0; i < desc->nr_mask_regs; i++) { 842 for (i = 0; i < hw->nr_mask_regs; i++) {
696 smp = IS_SMP(desc->mask_regs[i]); 843 smp = IS_SMP(hw->mask_regs[i]);
697 k += save_reg(d, k, desc->mask_regs[i].set_reg, smp); 844 k += save_reg(d, k, hw->mask_regs[i].set_reg, smp);
698 k += save_reg(d, k, desc->mask_regs[i].clr_reg, smp); 845 k += save_reg(d, k, hw->mask_regs[i].clr_reg, smp);
699 } 846 }
700 } 847 }
701 848
702 if (desc->prio_regs) { 849 if (hw->prio_regs) {
703 d->prio = kzalloc(desc->nr_vectors * sizeof(*d->prio), GFP_NOWAIT); 850 d->prio = kzalloc(hw->nr_vectors * sizeof(*d->prio),
851 GFP_NOWAIT);
704 852
705 for (i = 0; i < desc->nr_prio_regs; i++) { 853 for (i = 0; i < hw->nr_prio_regs; i++) {
706 smp = IS_SMP(desc->prio_regs[i]); 854 smp = IS_SMP(hw->prio_regs[i]);
707 k += save_reg(d, k, desc->prio_regs[i].set_reg, smp); 855 k += save_reg(d, k, hw->prio_regs[i].set_reg, smp);
708 k += save_reg(d, k, desc->prio_regs[i].clr_reg, smp); 856 k += save_reg(d, k, hw->prio_regs[i].clr_reg, smp);
709 } 857 }
710 } 858 }
711 859
712 if (desc->sense_regs) { 860 if (hw->sense_regs) {
713 d->sense = kzalloc(desc->nr_vectors * sizeof(*d->sense), GFP_NOWAIT); 861 d->sense = kzalloc(hw->nr_vectors * sizeof(*d->sense),
862 GFP_NOWAIT);
714 863
715 for (i = 0; i < desc->nr_sense_regs; i++) { 864 for (i = 0; i < hw->nr_sense_regs; i++)
716 k += save_reg(d, k, desc->sense_regs[i].reg, 0); 865 k += save_reg(d, k, hw->sense_regs[i].reg, 0);
717 }
718 } 866 }
719 867
720 d->chip.name = desc->name; 868 d->chip.name = desc->name;
@@ -726,21 +874,30 @@ void __init register_intc_controller(struct intc_desc *desc)
726 d->chip.shutdown = intc_disable; 874 d->chip.shutdown = intc_disable;
727 d->chip.set_type = intc_set_sense; 875 d->chip.set_type = intc_set_sense;
728 d->chip.set_wake = intc_set_wake; 876 d->chip.set_wake = intc_set_wake;
877#ifdef CONFIG_SMP
878 d->chip.set_affinity = intc_set_affinity;
879#endif
729 880
730#if defined(CONFIG_CPU_SH3) || defined(CONFIG_CPU_SH4A) 881 if (hw->ack_regs) {
731 if (desc->ack_regs) { 882 for (i = 0; i < hw->nr_ack_regs; i++)
732 for (i = 0; i < desc->nr_ack_regs; i++) 883 k += save_reg(d, k, hw->ack_regs[i].set_reg, 0);
733 k += save_reg(d, k, desc->ack_regs[i].set_reg, 0);
734 884
735 d->chip.mask_ack = intc_mask_ack; 885 d->chip.mask_ack = intc_mask_ack;
736 } 886 }
737#endif 887
888 /* disable bits matching force_disable before registering irqs */
889 if (desc->force_disable)
890 intc_enable_disable_enum(desc, d, desc->force_disable, 0);
891
892 /* disable bits matching force_enable before registering irqs */
893 if (desc->force_enable)
894 intc_enable_disable_enum(desc, d, desc->force_enable, 0);
738 895
739 BUG_ON(k > 256); /* _INTC_ADDR_E() and _INTC_ADDR_D() are 8 bits */ 896 BUG_ON(k > 256); /* _INTC_ADDR_E() and _INTC_ADDR_D() are 8 bits */
740 897
741 /* register the vectors one by one */ 898 /* register the vectors one by one */
742 for (i = 0; i < desc->nr_vectors; i++) { 899 for (i = 0; i < hw->nr_vectors; i++) {
743 struct intc_vect *vect = desc->vectors + i; 900 struct intc_vect *vect = hw->vectors + i;
744 unsigned int irq = evt2irq(vect->vect); 901 unsigned int irq = evt2irq(vect->vect);
745 struct irq_desc *irq_desc; 902 struct irq_desc *irq_desc;
746 903
@@ -755,8 +912,8 @@ void __init register_intc_controller(struct intc_desc *desc)
755 912
756 intc_register_irq(desc, d, vect->enum_id, irq); 913 intc_register_irq(desc, d, vect->enum_id, irq);
757 914
758 for (k = i + 1; k < desc->nr_vectors; k++) { 915 for (k = i + 1; k < hw->nr_vectors; k++) {
759 struct intc_vect *vect2 = desc->vectors + k; 916 struct intc_vect *vect2 = hw->vectors + k;
760 unsigned int irq2 = evt2irq(vect2->vect); 917 unsigned int irq2 = evt2irq(vect2->vect);
761 918
762 if (vect->enum_id != vect2->enum_id) 919 if (vect->enum_id != vect2->enum_id)
@@ -776,11 +933,15 @@ void __init register_intc_controller(struct intc_desc *desc)
776 vect2->enum_id = 0; 933 vect2->enum_id = 0;
777 934
778 /* redirect this interrupts to the first one */ 935 /* redirect this interrupts to the first one */
779 set_irq_chip_and_handler_name(irq2, &d->chip, 936 set_irq_chip(irq2, &dummy_irq_chip);
780 intc_redirect_irq, "redirect"); 937 set_irq_chained_handler(irq2, intc_redirect_irq);
781 set_irq_data(irq2, (void *)irq); 938 set_irq_data(irq2, (void *)irq);
782 } 939 }
783 } 940 }
941
942 /* enable bits matching force_enable after registering irqs */
943 if (desc->force_enable)
944 intc_enable_disable_enum(desc, d, desc->force_enable, 1);
784} 945}
785 946
786static int intc_suspend(struct sys_device *dev, pm_message_t state) 947static int intc_suspend(struct sys_device *dev, pm_message_t state)
@@ -797,6 +958,8 @@ static int intc_suspend(struct sys_device *dev, pm_message_t state)
797 if (d->state.event != PM_EVENT_FREEZE) 958 if (d->state.event != PM_EVENT_FREEZE)
798 break; 959 break;
799 for_each_irq_desc(irq, desc) { 960 for_each_irq_desc(irq, desc) {
961 if (desc->handle_irq == intc_redirect_irq)
962 continue;
800 if (desc->chip != &d->chip) 963 if (desc->chip != &d->chip)
801 continue; 964 continue;
802 if (desc->status & IRQ_DISABLED) 965 if (desc->status & IRQ_DISABLED)
@@ -856,5 +1019,99 @@ static int __init register_intc_sysdevs(void)
856 1019
857 return error; 1020 return error;
858} 1021}
859
860device_initcall(register_intc_sysdevs); 1022device_initcall(register_intc_sysdevs);
1023
1024/*
1025 * Dynamic IRQ allocation and deallocation
1026 */
1027unsigned int create_irq_nr(unsigned int irq_want, int node)
1028{
1029 unsigned int irq = 0, new;
1030 unsigned long flags;
1031 struct irq_desc *desc;
1032
1033 spin_lock_irqsave(&vector_lock, flags);
1034
1035 /*
1036 * First try the wanted IRQ
1037 */
1038 if (test_and_set_bit(irq_want, intc_irq_map) == 0) {
1039 new = irq_want;
1040 } else {
1041 /* .. then fall back to scanning. */
1042 new = find_first_zero_bit(intc_irq_map, nr_irqs);
1043 if (unlikely(new == nr_irqs))
1044 goto out_unlock;
1045
1046 __set_bit(new, intc_irq_map);
1047 }
1048
1049 desc = irq_to_desc_alloc_node(new, node);
1050 if (unlikely(!desc)) {
1051 pr_info("can't get irq_desc for %d\n", new);
1052 goto out_unlock;
1053 }
1054
1055 desc = move_irq_desc(desc, node);
1056 irq = new;
1057
1058out_unlock:
1059 spin_unlock_irqrestore(&vector_lock, flags);
1060
1061 if (irq > 0) {
1062 dynamic_irq_init(irq);
1063#ifdef CONFIG_ARM
1064 set_irq_flags(irq, IRQF_VALID); /* Enable IRQ on ARM systems */
1065#endif
1066 }
1067
1068 return irq;
1069}
1070
1071int create_irq(void)
1072{
1073 int nid = cpu_to_node(smp_processor_id());
1074 int irq;
1075
1076 irq = create_irq_nr(NR_IRQS_LEGACY, nid);
1077 if (irq == 0)
1078 irq = -1;
1079
1080 return irq;
1081}
1082
1083void destroy_irq(unsigned int irq)
1084{
1085 unsigned long flags;
1086
1087 dynamic_irq_cleanup(irq);
1088
1089 spin_lock_irqsave(&vector_lock, flags);
1090 __clear_bit(irq, intc_irq_map);
1091 spin_unlock_irqrestore(&vector_lock, flags);
1092}
1093
1094int reserve_irq_vector(unsigned int irq)
1095{
1096 unsigned long flags;
1097 int ret = 0;
1098
1099 spin_lock_irqsave(&vector_lock, flags);
1100 if (test_and_set_bit(irq, intc_irq_map))
1101 ret = -EBUSY;
1102 spin_unlock_irqrestore(&vector_lock, flags);
1103
1104 return ret;
1105}
1106
1107void reserve_irq_legacy(void)
1108{
1109 unsigned long flags;
1110 int i, j;
1111
1112 spin_lock_irqsave(&vector_lock, flags);
1113 j = find_first_bit(intc_irq_map, nr_irqs);
1114 for (i = 0; i < j; i++)
1115 __set_bit(i, intc_irq_map);
1116 spin_unlock_irqrestore(&vector_lock, flags);
1117}
diff --git a/drivers/sh/maple/maple.c b/drivers/sh/maple/maple.c
index 93c20e135ee1..4e8f57d4131f 100644
--- a/drivers/sh/maple/maple.c
+++ b/drivers/sh/maple/maple.c
@@ -106,7 +106,7 @@ static void maple_dma_reset(void)
106 * max delay is 11 106 * max delay is 11
107 */ 107 */
108 ctrl_outl(MAPLE_2MBPS | MAPLE_TIMEOUT(0xFFFF), MAPLE_SPEED); 108 ctrl_outl(MAPLE_2MBPS | MAPLE_TIMEOUT(0xFFFF), MAPLE_SPEED);
109 ctrl_outl(PHYSADDR(maple_sendbuf), MAPLE_DMAADDR); 109 ctrl_outl(virt_to_phys(maple_sendbuf), MAPLE_DMAADDR);
110 ctrl_outl(1, MAPLE_ENABLE); 110 ctrl_outl(1, MAPLE_ENABLE);
111} 111}
112 112
@@ -258,7 +258,7 @@ static void maple_build_block(struct mapleq *mq)
258 maple_lastptr = maple_sendptr; 258 maple_lastptr = maple_sendptr;
259 259
260 *maple_sendptr++ = (port << 16) | len | 0x80000000; 260 *maple_sendptr++ = (port << 16) | len | 0x80000000;
261 *maple_sendptr++ = PHYSADDR(mq->recvbuf->buf); 261 *maple_sendptr++ = virt_to_phys(mq->recvbuf->buf);
262 *maple_sendptr++ = 262 *maple_sendptr++ =
263 mq->command | (to << 8) | (from << 16) | (len << 24); 263 mq->command | (to << 8) | (from << 16) | (len << 24);
264 while (len-- > 0) 264 while (len-- > 0)
diff --git a/drivers/sh/pfc.c b/drivers/sh/pfc.c
new file mode 100644
index 000000000000..cf0303acab8e
--- /dev/null
+++ b/drivers/sh/pfc.c
@@ -0,0 +1,604 @@
1/*
2 * Pinmuxed GPIO support for SuperH.
3 *
4 * Copyright (C) 2008 Magnus Damm
5 *
6 * This file is subject to the terms and conditions of the GNU General Public
7 * License. See the file "COPYING" in the main directory of this archive
8 * for more details.
9 */
10#include <linux/errno.h>
11#include <linux/kernel.h>
12#include <linux/list.h>
13#include <linux/module.h>
14#include <linux/clk.h>
15#include <linux/err.h>
16#include <linux/io.h>
17#include <linux/irq.h>
18#include <linux/bitops.h>
19#include <linux/gpio.h>
20
21static int enum_in_range(pinmux_enum_t enum_id, struct pinmux_range *r)
22{
23 if (enum_id < r->begin)
24 return 0;
25
26 if (enum_id > r->end)
27 return 0;
28
29 return 1;
30}
31
32static unsigned long gpio_read_raw_reg(unsigned long reg,
33 unsigned long reg_width)
34{
35 switch (reg_width) {
36 case 8:
37 return __raw_readb(reg);
38 case 16:
39 return __raw_readw(reg);
40 case 32:
41 return __raw_readl(reg);
42 }
43
44 BUG();
45 return 0;
46}
47
48static void gpio_write_raw_reg(unsigned long reg,
49 unsigned long reg_width,
50 unsigned long data)
51{
52 switch (reg_width) {
53 case 8:
54 __raw_writeb(data, reg);
55 return;
56 case 16:
57 __raw_writew(data, reg);
58 return;
59 case 32:
60 __raw_writel(data, reg);
61 return;
62 }
63
64 BUG();
65}
66
67static void gpio_write_bit(struct pinmux_data_reg *dr,
68 unsigned long in_pos, unsigned long value)
69{
70 unsigned long pos;
71
72 pos = dr->reg_width - (in_pos + 1);
73
74 pr_debug("write_bit addr = %lx, value = %d, pos = %ld, "
75 "r_width = %ld\n",
76 dr->reg, !!value, pos, dr->reg_width);
77
78 if (value)
79 set_bit(pos, &dr->reg_shadow);
80 else
81 clear_bit(pos, &dr->reg_shadow);
82
83 gpio_write_raw_reg(dr->reg, dr->reg_width, dr->reg_shadow);
84}
85
86static int gpio_read_reg(unsigned long reg, unsigned long reg_width,
87 unsigned long field_width, unsigned long in_pos)
88{
89 unsigned long data, mask, pos;
90
91 data = 0;
92 mask = (1 << field_width) - 1;
93 pos = reg_width - ((in_pos + 1) * field_width);
94
95 pr_debug("read_reg: addr = %lx, pos = %ld, "
96 "r_width = %ld, f_width = %ld\n",
97 reg, pos, reg_width, field_width);
98
99 data = gpio_read_raw_reg(reg, reg_width);
100 return (data >> pos) & mask;
101}
102
103static void gpio_write_reg(unsigned long reg, unsigned long reg_width,
104 unsigned long field_width, unsigned long in_pos,
105 unsigned long value)
106{
107 unsigned long mask, pos;
108
109 mask = (1 << field_width) - 1;
110 pos = reg_width - ((in_pos + 1) * field_width);
111
112 pr_debug("write_reg addr = %lx, value = %ld, pos = %ld, "
113 "r_width = %ld, f_width = %ld\n",
114 reg, value, pos, reg_width, field_width);
115
116 mask = ~(mask << pos);
117 value = value << pos;
118
119 switch (reg_width) {
120 case 8:
121 __raw_writeb((__raw_readb(reg) & mask) | value, reg);
122 break;
123 case 16:
124 __raw_writew((__raw_readw(reg) & mask) | value, reg);
125 break;
126 case 32:
127 __raw_writel((__raw_readl(reg) & mask) | value, reg);
128 break;
129 }
130}
131
132static int setup_data_reg(struct pinmux_info *gpioc, unsigned gpio)
133{
134 struct pinmux_gpio *gpiop = &gpioc->gpios[gpio];
135 struct pinmux_data_reg *data_reg;
136 int k, n;
137
138 if (!enum_in_range(gpiop->enum_id, &gpioc->data))
139 return -1;
140
141 k = 0;
142 while (1) {
143 data_reg = gpioc->data_regs + k;
144
145 if (!data_reg->reg_width)
146 break;
147
148 for (n = 0; n < data_reg->reg_width; n++) {
149 if (data_reg->enum_ids[n] == gpiop->enum_id) {
150 gpiop->flags &= ~PINMUX_FLAG_DREG;
151 gpiop->flags |= (k << PINMUX_FLAG_DREG_SHIFT);
152 gpiop->flags &= ~PINMUX_FLAG_DBIT;
153 gpiop->flags |= (n << PINMUX_FLAG_DBIT_SHIFT);
154 return 0;
155 }
156 }
157 k++;
158 }
159
160 BUG();
161
162 return -1;
163}
164
165static void setup_data_regs(struct pinmux_info *gpioc)
166{
167 struct pinmux_data_reg *drp;
168 int k;
169
170 for (k = gpioc->first_gpio; k <= gpioc->last_gpio; k++)
171 setup_data_reg(gpioc, k);
172
173 k = 0;
174 while (1) {
175 drp = gpioc->data_regs + k;
176
177 if (!drp->reg_width)
178 break;
179
180 drp->reg_shadow = gpio_read_raw_reg(drp->reg, drp->reg_width);
181 k++;
182 }
183}
184
185static int get_data_reg(struct pinmux_info *gpioc, unsigned gpio,
186 struct pinmux_data_reg **drp, int *bitp)
187{
188 struct pinmux_gpio *gpiop = &gpioc->gpios[gpio];
189 int k, n;
190
191 if (!enum_in_range(gpiop->enum_id, &gpioc->data))
192 return -1;
193
194 k = (gpiop->flags & PINMUX_FLAG_DREG) >> PINMUX_FLAG_DREG_SHIFT;
195 n = (gpiop->flags & PINMUX_FLAG_DBIT) >> PINMUX_FLAG_DBIT_SHIFT;
196 *drp = gpioc->data_regs + k;
197 *bitp = n;
198 return 0;
199}
200
201static int get_config_reg(struct pinmux_info *gpioc, pinmux_enum_t enum_id,
202 struct pinmux_cfg_reg **crp, int *indexp,
203 unsigned long **cntp)
204{
205 struct pinmux_cfg_reg *config_reg;
206 unsigned long r_width, f_width;
207 int k, n;
208
209 k = 0;
210 while (1) {
211 config_reg = gpioc->cfg_regs + k;
212
213 r_width = config_reg->reg_width;
214 f_width = config_reg->field_width;
215
216 if (!r_width)
217 break;
218 for (n = 0; n < (r_width / f_width) * 1 << f_width; n++) {
219 if (config_reg->enum_ids[n] == enum_id) {
220 *crp = config_reg;
221 *indexp = n;
222 *cntp = &config_reg->cnt[n / (1 << f_width)];
223 return 0;
224 }
225 }
226 k++;
227 }
228
229 return -1;
230}
231
232static int get_gpio_enum_id(struct pinmux_info *gpioc, unsigned gpio,
233 int pos, pinmux_enum_t *enum_idp)
234{
235 pinmux_enum_t enum_id = gpioc->gpios[gpio].enum_id;
236 pinmux_enum_t *data = gpioc->gpio_data;
237 int k;
238
239 if (!enum_in_range(enum_id, &gpioc->data)) {
240 if (!enum_in_range(enum_id, &gpioc->mark)) {
241 pr_err("non data/mark enum_id for gpio %d\n", gpio);
242 return -1;
243 }
244 }
245
246 if (pos) {
247 *enum_idp = data[pos + 1];
248 return pos + 1;
249 }
250
251 for (k = 0; k < gpioc->gpio_data_size; k++) {
252 if (data[k] == enum_id) {
253 *enum_idp = data[k + 1];
254 return k + 1;
255 }
256 }
257
258 pr_err("cannot locate data/mark enum_id for gpio %d\n", gpio);
259 return -1;
260}
261
262static void write_config_reg(struct pinmux_info *gpioc,
263 struct pinmux_cfg_reg *crp,
264 int index)
265{
266 unsigned long ncomb, pos, value;
267
268 ncomb = 1 << crp->field_width;
269 pos = index / ncomb;
270 value = index % ncomb;
271
272 gpio_write_reg(crp->reg, crp->reg_width, crp->field_width, pos, value);
273}
274
275static int check_config_reg(struct pinmux_info *gpioc,
276 struct pinmux_cfg_reg *crp,
277 int index)
278{
279 unsigned long ncomb, pos, value;
280
281 ncomb = 1 << crp->field_width;
282 pos = index / ncomb;
283 value = index % ncomb;
284
285 if (gpio_read_reg(crp->reg, crp->reg_width,
286 crp->field_width, pos) == value)
287 return 0;
288
289 return -1;
290}
291
292enum { GPIO_CFG_DRYRUN, GPIO_CFG_REQ, GPIO_CFG_FREE };
293
294static int pinmux_config_gpio(struct pinmux_info *gpioc, unsigned gpio,
295 int pinmux_type, int cfg_mode)
296{
297 struct pinmux_cfg_reg *cr = NULL;
298 pinmux_enum_t enum_id;
299 struct pinmux_range *range;
300 int in_range, pos, index;
301 unsigned long *cntp;
302
303 switch (pinmux_type) {
304
305 case PINMUX_TYPE_FUNCTION:
306 range = NULL;
307 break;
308
309 case PINMUX_TYPE_OUTPUT:
310 range = &gpioc->output;
311 break;
312
313 case PINMUX_TYPE_INPUT:
314 range = &gpioc->input;
315 break;
316
317 case PINMUX_TYPE_INPUT_PULLUP:
318 range = &gpioc->input_pu;
319 break;
320
321 case PINMUX_TYPE_INPUT_PULLDOWN:
322 range = &gpioc->input_pd;
323 break;
324
325 default:
326 goto out_err;
327 }
328
329 pos = 0;
330 enum_id = 0;
331 index = 0;
332 while (1) {
333 pos = get_gpio_enum_id(gpioc, gpio, pos, &enum_id);
334 if (pos <= 0)
335 goto out_err;
336
337 if (!enum_id)
338 break;
339
340 /* first check if this is a function enum */
341 in_range = enum_in_range(enum_id, &gpioc->function);
342 if (!in_range) {
343 /* not a function enum */
344 if (range) {
345 /*
346 * other range exists, so this pin is
347 * a regular GPIO pin that now is being
348 * bound to a specific direction.
349 *
350 * for this case we only allow function enums
351 * and the enums that match the other range.
352 */
353 in_range = enum_in_range(enum_id, range);
354
355 /*
356 * special case pass through for fixed
357 * input-only or output-only pins without
358 * function enum register association.
359 */
360 if (in_range && enum_id == range->force)
361 continue;
362 } else {
363 /*
364 * no other range exists, so this pin
365 * must then be of the function type.
366 *
367 * allow function type pins to select
368 * any combination of function/in/out
369 * in their MARK lists.
370 */
371 in_range = 1;
372 }
373 }
374
375 if (!in_range)
376 continue;
377
378 if (get_config_reg(gpioc, enum_id, &cr, &index, &cntp) != 0)
379 goto out_err;
380
381 switch (cfg_mode) {
382 case GPIO_CFG_DRYRUN:
383 if (!*cntp || !check_config_reg(gpioc, cr, index))
384 continue;
385 break;
386
387 case GPIO_CFG_REQ:
388 write_config_reg(gpioc, cr, index);
389 *cntp = *cntp + 1;
390 break;
391
392 case GPIO_CFG_FREE:
393 *cntp = *cntp - 1;
394 break;
395 }
396 }
397
398 return 0;
399 out_err:
400 return -1;
401}
402
403static DEFINE_SPINLOCK(gpio_lock);
404
405static struct pinmux_info *chip_to_pinmux(struct gpio_chip *chip)
406{
407 return container_of(chip, struct pinmux_info, chip);
408}
409
410static int sh_gpio_request(struct gpio_chip *chip, unsigned offset)
411{
412 struct pinmux_info *gpioc = chip_to_pinmux(chip);
413 struct pinmux_data_reg *dummy;
414 unsigned long flags;
415 int i, ret, pinmux_type;
416
417 ret = -EINVAL;
418
419 if (!gpioc)
420 goto err_out;
421
422 spin_lock_irqsave(&gpio_lock, flags);
423
424 if ((gpioc->gpios[offset].flags & PINMUX_FLAG_TYPE) != PINMUX_TYPE_NONE)
425 goto err_unlock;
426
427 /* setup pin function here if no data is associated with pin */
428
429 if (get_data_reg(gpioc, offset, &dummy, &i) != 0)
430 pinmux_type = PINMUX_TYPE_FUNCTION;
431 else
432 pinmux_type = PINMUX_TYPE_GPIO;
433
434 if (pinmux_type == PINMUX_TYPE_FUNCTION) {
435 if (pinmux_config_gpio(gpioc, offset,
436 pinmux_type,
437 GPIO_CFG_DRYRUN) != 0)
438 goto err_unlock;
439
440 if (pinmux_config_gpio(gpioc, offset,
441 pinmux_type,
442 GPIO_CFG_REQ) != 0)
443 BUG();
444 }
445
446 gpioc->gpios[offset].flags &= ~PINMUX_FLAG_TYPE;
447 gpioc->gpios[offset].flags |= pinmux_type;
448
449 ret = 0;
450 err_unlock:
451 spin_unlock_irqrestore(&gpio_lock, flags);
452 err_out:
453 return ret;
454}
455
456static void sh_gpio_free(struct gpio_chip *chip, unsigned offset)
457{
458 struct pinmux_info *gpioc = chip_to_pinmux(chip);
459 unsigned long flags;
460 int pinmux_type;
461
462 if (!gpioc)
463 return;
464
465 spin_lock_irqsave(&gpio_lock, flags);
466
467 pinmux_type = gpioc->gpios[offset].flags & PINMUX_FLAG_TYPE;
468 pinmux_config_gpio(gpioc, offset, pinmux_type, GPIO_CFG_FREE);
469 gpioc->gpios[offset].flags &= ~PINMUX_FLAG_TYPE;
470 gpioc->gpios[offset].flags |= PINMUX_TYPE_NONE;
471
472 spin_unlock_irqrestore(&gpio_lock, flags);
473}
474
475static int pinmux_direction(struct pinmux_info *gpioc,
476 unsigned gpio, int new_pinmux_type)
477{
478 int pinmux_type;
479 int ret = -EINVAL;
480
481 if (!gpioc)
482 goto err_out;
483
484 pinmux_type = gpioc->gpios[gpio].flags & PINMUX_FLAG_TYPE;
485
486 switch (pinmux_type) {
487 case PINMUX_TYPE_GPIO:
488 break;
489 case PINMUX_TYPE_OUTPUT:
490 case PINMUX_TYPE_INPUT:
491 case PINMUX_TYPE_INPUT_PULLUP:
492 case PINMUX_TYPE_INPUT_PULLDOWN:
493 pinmux_config_gpio(gpioc, gpio, pinmux_type, GPIO_CFG_FREE);
494 break;
495 default:
496 goto err_out;
497 }
498
499 if (pinmux_config_gpio(gpioc, gpio,
500 new_pinmux_type,
501 GPIO_CFG_DRYRUN) != 0)
502 goto err_out;
503
504 if (pinmux_config_gpio(gpioc, gpio,
505 new_pinmux_type,
506 GPIO_CFG_REQ) != 0)
507 BUG();
508
509 gpioc->gpios[gpio].flags &= ~PINMUX_FLAG_TYPE;
510 gpioc->gpios[gpio].flags |= new_pinmux_type;
511
512 ret = 0;
513 err_out:
514 return ret;
515}
516
517static int sh_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
518{
519 struct pinmux_info *gpioc = chip_to_pinmux(chip);
520 unsigned long flags;
521 int ret;
522
523 spin_lock_irqsave(&gpio_lock, flags);
524 ret = pinmux_direction(gpioc, offset, PINMUX_TYPE_INPUT);
525 spin_unlock_irqrestore(&gpio_lock, flags);
526
527 return ret;
528}
529
530static void sh_gpio_set_value(struct pinmux_info *gpioc,
531 unsigned gpio, int value)
532{
533 struct pinmux_data_reg *dr = NULL;
534 int bit = 0;
535
536 if (!gpioc || get_data_reg(gpioc, gpio, &dr, &bit) != 0)
537 BUG();
538 else
539 gpio_write_bit(dr, bit, value);
540}
541
542static int sh_gpio_direction_output(struct gpio_chip *chip, unsigned offset,
543 int value)
544{
545 struct pinmux_info *gpioc = chip_to_pinmux(chip);
546 unsigned long flags;
547 int ret;
548
549 sh_gpio_set_value(gpioc, offset, value);
550 spin_lock_irqsave(&gpio_lock, flags);
551 ret = pinmux_direction(gpioc, offset, PINMUX_TYPE_OUTPUT);
552 spin_unlock_irqrestore(&gpio_lock, flags);
553
554 return ret;
555}
556
557static int sh_gpio_get_value(struct pinmux_info *gpioc, unsigned gpio)
558{
559 struct pinmux_data_reg *dr = NULL;
560 int bit = 0;
561
562 if (!gpioc || get_data_reg(gpioc, gpio, &dr, &bit) != 0) {
563 BUG();
564 return 0;
565 }
566
567 return gpio_read_reg(dr->reg, dr->reg_width, 1, bit);
568}
569
570static int sh_gpio_get(struct gpio_chip *chip, unsigned offset)
571{
572 return sh_gpio_get_value(chip_to_pinmux(chip), offset);
573}
574
575static void sh_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
576{
577 sh_gpio_set_value(chip_to_pinmux(chip), offset, value);
578}
579
580int register_pinmux(struct pinmux_info *pip)
581{
582 struct gpio_chip *chip = &pip->chip;
583
584 pr_info("sh pinmux: %s handling gpio %d -> %d\n",
585 pip->name, pip->first_gpio, pip->last_gpio);
586
587 setup_data_regs(pip);
588
589 chip->request = sh_gpio_request;
590 chip->free = sh_gpio_free;
591 chip->direction_input = sh_gpio_direction_input;
592 chip->get = sh_gpio_get;
593 chip->direction_output = sh_gpio_direction_output;
594 chip->set = sh_gpio_set;
595
596 WARN_ON(pip->first_gpio != 0); /* needs testing */
597
598 chip->label = pip->name;
599 chip->owner = THIS_MODULE;
600 chip->base = pip->first_gpio;
601 chip->ngpio = (pip->last_gpio - pip->first_gpio) + 1;
602
603 return gpiochip_add(chip);
604}