aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/common
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/common')
-rw-r--r--arch/arm/common/gic.c102
-rw-r--r--arch/arm/common/it8152.c7
-rw-r--r--arch/arm/common/pl330.c3
-rw-r--r--arch/arm/common/vic.c16
4 files changed, 51 insertions, 77 deletions
diff --git a/arch/arm/common/gic.c b/arch/arm/common/gic.c
index b2dc2dd7f1df..f0783be17352 100644
--- a/arch/arm/common/gic.c
+++ b/arch/arm/common/gic.c
@@ -41,6 +41,7 @@
41 41
42#include <asm/irq.h> 42#include <asm/irq.h>
43#include <asm/exception.h> 43#include <asm/exception.h>
44#include <asm/smp_plat.h>
44#include <asm/mach/irq.h> 45#include <asm/mach/irq.h>
45#include <asm/hardware/gic.h> 46#include <asm/hardware/gic.h>
46 47
@@ -50,7 +51,6 @@ union gic_base {
50}; 51};
51 52
52struct gic_chip_data { 53struct gic_chip_data {
53 unsigned int irq_offset;
54 union gic_base dist_base; 54 union gic_base dist_base;
55 union gic_base cpu_base; 55 union gic_base cpu_base;
56#ifdef CONFIG_CPU_PM 56#ifdef CONFIG_CPU_PM
@@ -60,9 +60,7 @@ struct gic_chip_data {
60 u32 __percpu *saved_ppi_enable; 60 u32 __percpu *saved_ppi_enable;
61 u32 __percpu *saved_ppi_conf; 61 u32 __percpu *saved_ppi_conf;
62#endif 62#endif
63#ifdef CONFIG_IRQ_DOMAIN 63 struct irq_domain *domain;
64 struct irq_domain domain;
65#endif
66 unsigned int gic_irqs; 64 unsigned int gic_irqs;
67#ifdef CONFIG_GIC_NON_BANKED 65#ifdef CONFIG_GIC_NON_BANKED
68 void __iomem *(*get_base)(union gic_base *); 66 void __iomem *(*get_base)(union gic_base *);
@@ -281,7 +279,7 @@ asmlinkage void __exception_irq_entry gic_handle_irq(struct pt_regs *regs)
281 irqnr = irqstat & ~0x1c00; 279 irqnr = irqstat & ~0x1c00;
282 280
283 if (likely(irqnr > 15 && irqnr < 1021)) { 281 if (likely(irqnr > 15 && irqnr < 1021)) {
284 irqnr = irq_domain_to_irq(&gic->domain, irqnr); 282 irqnr = irq_find_mapping(gic->domain, irqnr);
285 handle_IRQ(irqnr, regs); 283 handle_IRQ(irqnr, regs);
286 continue; 284 continue;
287 } 285 }
@@ -313,8 +311,8 @@ static void gic_handle_cascade_irq(unsigned int irq, struct irq_desc *desc)
313 if (gic_irq == 1023) 311 if (gic_irq == 1023)
314 goto out; 312 goto out;
315 313
316 cascade_irq = irq_domain_to_irq(&chip_data->domain, gic_irq); 314 cascade_irq = irq_find_mapping(chip_data->domain, gic_irq);
317 if (unlikely(gic_irq < 32 || gic_irq > 1020 || cascade_irq >= NR_IRQS)) 315 if (unlikely(gic_irq < 32 || gic_irq > 1020))
318 do_bad_IRQ(cascade_irq, desc); 316 do_bad_IRQ(cascade_irq, desc);
319 else 317 else
320 generic_handle_irq(cascade_irq); 318 generic_handle_irq(cascade_irq);
@@ -347,16 +345,11 @@ void __init gic_cascade_irq(unsigned int gic_nr, unsigned int irq)
347 345
348static void __init gic_dist_init(struct gic_chip_data *gic) 346static void __init gic_dist_init(struct gic_chip_data *gic)
349{ 347{
350 unsigned int i, irq; 348 unsigned int i;
351 u32 cpumask; 349 u32 cpumask;
352 unsigned int gic_irqs = gic->gic_irqs; 350 unsigned int gic_irqs = gic->gic_irqs;
353 struct irq_domain *domain = &gic->domain;
354 void __iomem *base = gic_data_dist_base(gic); 351 void __iomem *base = gic_data_dist_base(gic);
355 u32 cpu = 0; 352 u32 cpu = cpu_logical_map(smp_processor_id());
356
357#ifdef CONFIG_SMP
358 cpu = cpu_logical_map(smp_processor_id());
359#endif
360 353
361 cpumask = 1 << cpu; 354 cpumask = 1 << cpu;
362 cpumask |= cpumask << 8; 355 cpumask |= cpumask << 8;
@@ -389,23 +382,6 @@ static void __init gic_dist_init(struct gic_chip_data *gic)
389 for (i = 32; i < gic_irqs; i += 32) 382 for (i = 32; i < gic_irqs; i += 32)
390 writel_relaxed(0xffffffff, base + GIC_DIST_ENABLE_CLEAR + i * 4 / 32); 383 writel_relaxed(0xffffffff, base + GIC_DIST_ENABLE_CLEAR + i * 4 / 32);
391 384
392 /*
393 * Setup the Linux IRQ subsystem.
394 */
395 irq_domain_for_each_irq(domain, i, irq) {
396 if (i < 32) {
397 irq_set_percpu_devid(irq);
398 irq_set_chip_and_handler(irq, &gic_chip,
399 handle_percpu_devid_irq);
400 set_irq_flags(irq, IRQF_VALID | IRQF_NOAUTOEN);
401 } else {
402 irq_set_chip_and_handler(irq, &gic_chip,
403 handle_fasteoi_irq);
404 set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
405 }
406 irq_set_chip_data(irq, gic);
407 }
408
409 writel_relaxed(1, base + GIC_DIST_CTRL); 385 writel_relaxed(1, base + GIC_DIST_CTRL);
410} 386}
411 387
@@ -621,11 +597,27 @@ static void __init gic_pm_init(struct gic_chip_data *gic)
621} 597}
622#endif 598#endif
623 599
624#ifdef CONFIG_OF 600static int gic_irq_domain_map(struct irq_domain *d, unsigned int irq,
625static int gic_irq_domain_dt_translate(struct irq_domain *d, 601 irq_hw_number_t hw)
626 struct device_node *controller, 602{
627 const u32 *intspec, unsigned int intsize, 603 if (hw < 32) {
628 unsigned long *out_hwirq, unsigned int *out_type) 604 irq_set_percpu_devid(irq);
605 irq_set_chip_and_handler(irq, &gic_chip,
606 handle_percpu_devid_irq);
607 set_irq_flags(irq, IRQF_VALID | IRQF_NOAUTOEN);
608 } else {
609 irq_set_chip_and_handler(irq, &gic_chip,
610 handle_fasteoi_irq);
611 set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
612 }
613 irq_set_chip_data(irq, d->host_data);
614 return 0;
615}
616
617static int gic_irq_domain_xlate(struct irq_domain *d,
618 struct device_node *controller,
619 const u32 *intspec, unsigned int intsize,
620 unsigned long *out_hwirq, unsigned int *out_type)
629{ 621{
630 if (d->of_node != controller) 622 if (d->of_node != controller)
631 return -EINVAL; 623 return -EINVAL;
@@ -642,26 +634,23 @@ static int gic_irq_domain_dt_translate(struct irq_domain *d,
642 *out_type = intspec[2] & IRQ_TYPE_SENSE_MASK; 634 *out_type = intspec[2] & IRQ_TYPE_SENSE_MASK;
643 return 0; 635 return 0;
644} 636}
645#endif
646 637
647const struct irq_domain_ops gic_irq_domain_ops = { 638const struct irq_domain_ops gic_irq_domain_ops = {
648#ifdef CONFIG_OF 639 .map = gic_irq_domain_map,
649 .dt_translate = gic_irq_domain_dt_translate, 640 .xlate = gic_irq_domain_xlate,
650#endif
651}; 641};
652 642
653void __init gic_init_bases(unsigned int gic_nr, int irq_start, 643void __init gic_init_bases(unsigned int gic_nr, int irq_start,
654 void __iomem *dist_base, void __iomem *cpu_base, 644 void __iomem *dist_base, void __iomem *cpu_base,
655 u32 percpu_offset) 645 u32 percpu_offset, struct device_node *node)
656{ 646{
647 irq_hw_number_t hwirq_base;
657 struct gic_chip_data *gic; 648 struct gic_chip_data *gic;
658 struct irq_domain *domain; 649 int gic_irqs, irq_base;
659 int gic_irqs;
660 650
661 BUG_ON(gic_nr >= MAX_GIC_NR); 651 BUG_ON(gic_nr >= MAX_GIC_NR);
662 652
663 gic = &gic_data[gic_nr]; 653 gic = &gic_data[gic_nr];
664 domain = &gic->domain;
665#ifdef CONFIG_GIC_NON_BANKED 654#ifdef CONFIG_GIC_NON_BANKED
666 if (percpu_offset) { /* Frankein-GIC without banked registers... */ 655 if (percpu_offset) { /* Frankein-GIC without banked registers... */
667 unsigned int cpu; 656 unsigned int cpu;
@@ -697,10 +686,10 @@ void __init gic_init_bases(unsigned int gic_nr, int irq_start,
697 * For primary GICs, skip over SGIs. 686 * For primary GICs, skip over SGIs.
698 * For secondary GICs, skip over PPIs, too. 687 * For secondary GICs, skip over PPIs, too.
699 */ 688 */
700 domain->hwirq_base = 32; 689 hwirq_base = 32;
701 if (gic_nr == 0) { 690 if (gic_nr == 0) {
702 if ((irq_start & 31) > 0) { 691 if ((irq_start & 31) > 0) {
703 domain->hwirq_base = 16; 692 hwirq_base = 16;
704 if (irq_start != -1) 693 if (irq_start != -1)
705 irq_start = (irq_start & ~31) + 16; 694 irq_start = (irq_start & ~31) + 16;
706 } 695 }
@@ -716,17 +705,17 @@ void __init gic_init_bases(unsigned int gic_nr, int irq_start,
716 gic_irqs = 1020; 705 gic_irqs = 1020;
717 gic->gic_irqs = gic_irqs; 706 gic->gic_irqs = gic_irqs;
718 707
719 domain->nr_irq = gic_irqs - domain->hwirq_base; 708 gic_irqs -= hwirq_base; /* calculate # of irqs to allocate */
720 domain->irq_base = irq_alloc_descs(irq_start, 16, domain->nr_irq, 709 irq_base = irq_alloc_descs(irq_start, 16, gic_irqs, numa_node_id());
721 numa_node_id()); 710 if (IS_ERR_VALUE(irq_base)) {
722 if (IS_ERR_VALUE(domain->irq_base)) {
723 WARN(1, "Cannot allocate irq_descs @ IRQ%d, assuming pre-allocated\n", 711 WARN(1, "Cannot allocate irq_descs @ IRQ%d, assuming pre-allocated\n",
724 irq_start); 712 irq_start);
725 domain->irq_base = irq_start; 713 irq_base = irq_start;
726 } 714 }
727 domain->priv = gic; 715 gic->domain = irq_domain_add_legacy(node, gic_irqs, irq_base,
728 domain->ops = &gic_irq_domain_ops; 716 hwirq_base, &gic_irq_domain_ops, gic);
729 irq_domain_add(domain); 717 if (WARN_ON(!gic->domain))
718 return;
730 719
731 gic_chip.flags |= gic_arch_extn.flags; 720 gic_chip.flags |= gic_arch_extn.flags;
732 gic_dist_init(gic); 721 gic_dist_init(gic);
@@ -771,7 +760,6 @@ int __init gic_of_init(struct device_node *node, struct device_node *parent)
771 void __iomem *dist_base; 760 void __iomem *dist_base;
772 u32 percpu_offset; 761 u32 percpu_offset;
773 int irq; 762 int irq;
774 struct irq_domain *domain = &gic_data[gic_cnt].domain;
775 763
776 if (WARN_ON(!node)) 764 if (WARN_ON(!node))
777 return -ENODEV; 765 return -ENODEV;
@@ -785,9 +773,7 @@ int __init gic_of_init(struct device_node *node, struct device_node *parent)
785 if (of_property_read_u32(node, "cpu-offset", &percpu_offset)) 773 if (of_property_read_u32(node, "cpu-offset", &percpu_offset))
786 percpu_offset = 0; 774 percpu_offset = 0;
787 775
788 domain->of_node = of_node_get(node); 776 gic_init_bases(gic_cnt, -1, dist_base, cpu_base, percpu_offset, node);
789
790 gic_init_bases(gic_cnt, -1, dist_base, cpu_base, percpu_offset);
791 777
792 if (parent) { 778 if (parent) {
793 irq = irq_of_parse_and_map(node, 0); 779 irq = irq_of_parse_and_map(node, 0);
diff --git a/arch/arm/common/it8152.c b/arch/arm/common/it8152.c
index 9384c2d02baa..dcb13494ca0d 100644
--- a/arch/arm/common/it8152.c
+++ b/arch/arm/common/it8152.c
@@ -320,13 +320,6 @@ err0:
320 return -EBUSY; 320 return -EBUSY;
321} 321}
322 322
323/*
324 * If we set up a device for bus mastering, we need to check the latency
325 * timer as we don't have even crappy BIOSes to set it properly.
326 * The implementation is from arch/i386/pci/i386.c
327 */
328unsigned int pcibios_max_latency = 255;
329
330/* ITE bridge requires setting latency timer to avoid early bus access 323/* ITE bridge requires setting latency timer to avoid early bus access
331 termination by PCI bus master devices 324 termination by PCI bus master devices
332*/ 325*/
diff --git a/arch/arm/common/pl330.c b/arch/arm/common/pl330.c
index d8e44a43047c..ff3ad2244824 100644
--- a/arch/arm/common/pl330.c
+++ b/arch/arm/common/pl330.c
@@ -1502,12 +1502,13 @@ int pl330_chan_ctrl(void *ch_id, enum pl330_chan_op op)
1502 struct pl330_thread *thrd = ch_id; 1502 struct pl330_thread *thrd = ch_id;
1503 struct pl330_dmac *pl330; 1503 struct pl330_dmac *pl330;
1504 unsigned long flags; 1504 unsigned long flags;
1505 int ret = 0, active = thrd->req_running; 1505 int ret = 0, active;
1506 1506
1507 if (!thrd || thrd->free || thrd->dmac->state == DYING) 1507 if (!thrd || thrd->free || thrd->dmac->state == DYING)
1508 return -EINVAL; 1508 return -EINVAL;
1509 1509
1510 pl330 = thrd->dmac; 1510 pl330 = thrd->dmac;
1511 active = thrd->req_running;
1511 1512
1512 spin_lock_irqsave(&pl330->lock, flags); 1513 spin_lock_irqsave(&pl330->lock, flags);
1513 1514
diff --git a/arch/arm/common/vic.c b/arch/arm/common/vic.c
index dcb004a804c7..7a66311f3066 100644
--- a/arch/arm/common/vic.c
+++ b/arch/arm/common/vic.c
@@ -56,7 +56,7 @@ struct vic_device {
56 u32 int_enable; 56 u32 int_enable;
57 u32 soft_int; 57 u32 soft_int;
58 u32 protect; 58 u32 protect;
59 struct irq_domain domain; 59 struct irq_domain *domain;
60}; 60};
61 61
62/* we cannot allocate memory when VICs are initially registered */ 62/* we cannot allocate memory when VICs are initially registered */
@@ -192,14 +192,8 @@ static void __init vic_register(void __iomem *base, unsigned int irq,
192 v->resume_sources = resume_sources; 192 v->resume_sources = resume_sources;
193 v->irq = irq; 193 v->irq = irq;
194 vic_id++; 194 vic_id++;
195 195 v->domain = irq_domain_add_legacy(node, 32, irq, 0,
196 v->domain.irq_base = irq; 196 &irq_domain_simple_ops, v);
197 v->domain.nr_irq = 32;
198#ifdef CONFIG_OF_IRQ
199 v->domain.of_node = of_node_get(node);
200#endif /* CONFIG_OF */
201 v->domain.ops = &irq_domain_simple_ops;
202 irq_domain_add(&v->domain);
203} 197}
204 198
205static void vic_ack_irq(struct irq_data *d) 199static void vic_ack_irq(struct irq_data *d)
@@ -348,7 +342,7 @@ static void __init vic_init_st(void __iomem *base, unsigned int irq_start,
348 vic_register(base, irq_start, 0, node); 342 vic_register(base, irq_start, 0, node);
349} 343}
350 344
351static void __init __vic_init(void __iomem *base, unsigned int irq_start, 345void __init __vic_init(void __iomem *base, unsigned int irq_start,
352 u32 vic_sources, u32 resume_sources, 346 u32 vic_sources, u32 resume_sources,
353 struct device_node *node) 347 struct device_node *node)
354{ 348{
@@ -444,7 +438,7 @@ static int handle_one_vic(struct vic_device *vic, struct pt_regs *regs)
444 stat = readl_relaxed(vic->base + VIC_IRQ_STATUS); 438 stat = readl_relaxed(vic->base + VIC_IRQ_STATUS);
445 while (stat) { 439 while (stat) {
446 irq = ffs(stat) - 1; 440 irq = ffs(stat) - 1;
447 handle_IRQ(irq_domain_to_irq(&vic->domain, irq), regs); 441 handle_IRQ(irq_find_mapping(vic->domain, irq), regs);
448 stat &= ~(1 << irq); 442 stat &= ~(1 << irq);
449 handled = 1; 443 handled = 1;
450 } 444 }