aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/IRQ-domain.txt117
-rw-r--r--MAINTAINERS9
-rw-r--r--arch/arm/common/gic.c95
-rw-r--r--arch/arm/common/vic.c16
-rw-r--r--arch/arm/include/asm/hardware/gic.h4
-rw-r--r--arch/arm/include/asm/hardware/vic.h2
-rw-r--r--arch/arm/mach-exynos/common.c2
-rw-r--r--arch/arm/mach-imx/imx51-dt.c4
-rw-r--r--arch/arm/mach-imx/imx53-dt.c4
-rw-r--r--arch/arm/mach-imx/mach-imx6q.c3
-rw-r--r--arch/arm/mach-msm/board-msm8x60.c8
-rw-r--r--arch/arm/mach-omap2/board-generic.c2
-rw-r--r--arch/arm/mach-prima2/irq.c2
-rw-r--r--arch/arm/mach-versatile/core.c7
-rw-r--r--arch/c6x/Kconfig1
-rw-r--r--arch/c6x/include/asm/irq.h245
-rw-r--r--arch/c6x/kernel/irq.c612
-rw-r--r--arch/c6x/platforms/megamod-pic.c25
-rw-r--r--arch/microblaze/Kconfig1
-rw-r--r--arch/microblaze/include/asm/hardirq.h16
-rw-r--r--arch/microblaze/include/asm/irq.h42
-rw-r--r--arch/microblaze/kernel/intc.c61
-rw-r--r--arch/microblaze/kernel/irq.c24
-rw-r--r--arch/microblaze/kernel/setup.c2
-rw-r--r--arch/mips/Kconfig1
-rw-r--r--arch/mips/include/asm/irq.h5
-rw-r--r--arch/mips/kernel/prom.c14
-rw-r--r--arch/openrisc/include/asm/prom.h10
-rw-r--r--arch/powerpc/Kconfig1
-rw-r--r--arch/powerpc/include/asm/ehv_pic.h2
-rw-r--r--arch/powerpc/include/asm/i8259.h2
-rw-r--r--arch/powerpc/include/asm/irq.h247
-rw-r--r--arch/powerpc/include/asm/mpic.h2
-rw-r--r--arch/powerpc/include/asm/xics.h2
-rw-r--r--arch/powerpc/kernel/irq.c617
-rw-r--r--arch/powerpc/platforms/512x/mpc5121_ads_cpld.c12
-rw-r--r--arch/powerpc/platforms/52xx/media5200.c15
-rw-r--r--arch/powerpc/platforms/52xx/mpc52xx_gpt.c16
-rw-r--r--arch/powerpc/platforms/52xx/mpc52xx_pic.c12
-rw-r--r--arch/powerpc/platforms/82xx/pq2ads-pci-pic.c14
-rw-r--r--arch/powerpc/platforms/85xx/socrates_fpga_pic.c15
-rw-r--r--arch/powerpc/platforms/86xx/gef_pic.c15
-rw-r--r--arch/powerpc/platforms/cell/axon_msi.c29
-rw-r--r--arch/powerpc/platforms/cell/beat_interrupt.c16
-rw-r--r--arch/powerpc/platforms/cell/interrupt.c16
-rw-r--r--arch/powerpc/platforms/cell/spider-pic.c14
-rw-r--r--arch/powerpc/platforms/embedded6xx/flipper-pic.c24
-rw-r--r--arch/powerpc/platforms/embedded6xx/hlwd-pic.c29
-rw-r--r--arch/powerpc/platforms/iseries/irq.c11
-rw-r--r--arch/powerpc/platforms/powermac/pic.c26
-rw-r--r--arch/powerpc/platforms/powermac/smp.c9
-rw-r--r--arch/powerpc/platforms/ps3/interrupt.c11
-rw-r--r--arch/powerpc/platforms/wsp/opb_pic.c26
-rw-r--r--arch/powerpc/sysdev/cpm1.c9
-rw-r--r--arch/powerpc/sysdev/cpm2_pic.c23
-rw-r--r--arch/powerpc/sysdev/ehv_pic.c14
-rw-r--r--arch/powerpc/sysdev/fsl_msi.c10
-rw-r--r--arch/powerpc/sysdev/fsl_msi.h2
-rw-r--r--arch/powerpc/sysdev/i8259.c15
-rw-r--r--arch/powerpc/sysdev/ipic.c31
-rw-r--r--arch/powerpc/sysdev/ipic.h2
-rw-r--r--arch/powerpc/sysdev/mpc8xx_pic.c11
-rw-r--r--arch/powerpc/sysdev/mpic.c17
-rw-r--r--arch/powerpc/sysdev/mpic_msi.c2
-rw-r--r--arch/powerpc/sysdev/mv64x60_pic.c11
-rw-r--r--arch/powerpc/sysdev/qe_lib/qe_ic.c26
-rw-r--r--arch/powerpc/sysdev/qe_lib/qe_ic.h2
-rw-r--r--arch/powerpc/sysdev/tsi108_pci.c13
-rw-r--r--arch/powerpc/sysdev/uic.c26
-rw-r--r--arch/powerpc/sysdev/xics/xics-common.c25
-rw-r--r--arch/powerpc/sysdev/xilinx_intc.c19
-rw-r--r--arch/sparc/include/asm/prom.h10
-rw-r--r--arch/x86/Kconfig2
-rw-r--r--arch/x86/include/asm/irq_controller.h12
-rw-r--r--arch/x86/include/asm/prom.h10
-rw-r--r--arch/x86/kernel/devicetree.c101
-rw-r--r--drivers/gpio/gpio-mpc8xxx.c30
-rw-r--r--drivers/mfd/Kconfig1
-rw-r--r--drivers/mfd/twl-core.c21
-rw-r--r--drivers/net/phy/mdio-gpio.c4
-rw-r--r--drivers/of/platform.c4
-rw-r--r--include/linux/irqdomain.h191
-rw-r--r--include/linux/of_address.h33
-rw-r--r--include/linux/of_irq.h4
-rw-r--r--include/linux/of_platform.h12
-rw-r--r--kernel/irq/irqdomain.c828
86 files changed, 1435 insertions, 2603 deletions
diff --git a/Documentation/IRQ-domain.txt b/Documentation/IRQ-domain.txt
new file mode 100644
index 000000000000..27dcaabfb4db
--- /dev/null
+++ b/Documentation/IRQ-domain.txt
@@ -0,0 +1,117 @@
1irq_domain interrupt number mapping library
2
3The current design of the Linux kernel uses a single large number
4space where each separate IRQ source is assigned a different number.
5This is simple when there is only one interrupt controller, but in
6systems with multiple interrupt controllers the kernel must ensure
7that each one gets assigned non-overlapping allocations of Linux
8IRQ numbers.
9
10The irq_alloc_desc*() and irq_free_desc*() APIs provide allocation of
11irq numbers, but they don't provide any support for reverse mapping of
12the controller-local IRQ (hwirq) number into the Linux IRQ number
13space.
14
15The irq_domain library adds mapping between hwirq and IRQ numbers on
16top of the irq_alloc_desc*() API. An irq_domain to manage mapping is
17preferred over interrupt controller drivers open coding their own
18reverse mapping scheme.
19
20irq_domain also implements translation from Device Tree interrupt
21specifiers to hwirq numbers, and can be easily extended to support
22other IRQ topology data sources.
23
24=== irq_domain usage ===
25An interrupt controller driver creates and registers an irq_domain by
26calling one of the irq_domain_add_*() functions (each mapping method
27has a different allocator function, more on that later). The function
28will return a pointer to the irq_domain on success. The caller must
29provide the allocator function with an irq_domain_ops structure with
30the .map callback populated as a minimum.
31
32In most cases, the irq_domain will begin empty without any mappings
33between hwirq and IRQ numbers. Mappings are added to the irq_domain
34by calling irq_create_mapping() which accepts the irq_domain and a
35hwirq number as arguments. If a mapping for the hwirq doesn't already
36exist then it will allocate a new Linux irq_desc, associate it with
37the hwirq, and call the .map() callback so the driver can perform any
38required hardware setup.
39
40When an interrupt is received, irq_find_mapping() function should
41be used to find the Linux IRQ number from the hwirq number.
42
43If the driver has the Linux IRQ number or the irq_data pointer, and
44needs to know the associated hwirq number (such as in the irq_chip
45callbacks) then it can be directly obtained from irq_data->hwirq.
46
47=== Types of irq_domain mappings ===
48There are several mechanisms available for reverse mapping from hwirq
49to Linux irq, and each mechanism uses a different allocation function.
50Which reverse map type should be used depends on the use case. Each
51of the reverse map types are described below:
52
53==== Linear ====
54irq_domain_add_linear()
55
56The linear reverse map maintains a fixed size table indexed by the
57hwirq number. When a hwirq is mapped, an irq_desc is allocated for
58the hwirq, and the IRQ number is stored in the table.
59
60The Linear map is a good choice when the maximum number of hwirqs is
61fixed and a relatively small number (~ < 256). The advantages of this
62map are fixed time lookup for IRQ numbers, and irq_descs are only
63allocated for in-use IRQs. The disadvantage is that the table must be
64as large as the largest possible hwirq number.
65
66The majority of drivers should use the linear map.
67
68==== Tree ====
69irq_domain_add_tree()
70
71The irq_domain maintains a radix tree map from hwirq numbers to Linux
72IRQs. When an hwirq is mapped, an irq_desc is allocated and the
73hwirq is used as the lookup key for the radix tree.
74
75The tree map is a good choice if the hwirq number can be very large
76since it doesn't need to allocate a table as large as the largest
77hwirq number. The disadvantage is that hwirq to IRQ number lookup is
78dependent on how many entries are in the table.
79
80Very few drivers should need this mapping. At the moment, powerpc
81iseries is the only user.
82
83==== No Map ===-
84irq_domain_add_nomap()
85
86The No Map mapping is to be used when the hwirq number is
87programmable in the hardware. In this case it is best to program the
88Linux IRQ number into the hardware itself so that no mapping is
89required. Calling irq_create_direct_mapping() will allocate a Linux
90IRQ number and call the .map() callback so that driver can program the
91Linux IRQ number into the hardware.
92
93Most drivers cannot use this mapping.
94
95==== Legacy ====
96irq_domain_add_legacy()
97irq_domain_add_legacy_isa()
98
99The Legacy mapping is a special case for drivers that already have a
100range of irq_descs allocated for the hwirqs. It is used when the
101driver cannot be immediately converted to use the linear mapping. For
102example, many embedded system board support files use a set of #defines
103for IRQ numbers that are passed to struct device registrations. In that
104case the Linux IRQ numbers cannot be dynamically assigned and the legacy
105mapping should be used.
106
107The legacy map assumes a contiguous range of IRQ numbers has already
108been allocated for the controller and that the IRQ number can be
109calculated by adding a fixed offset to the hwirq number, and
110visa-versa. The disadvantage is that it requires the interrupt
111controller to manage IRQ allocations and it requires an irq_desc to be
112allocated for every hwirq, even if it is unused.
113
114The legacy map should only be used if fixed IRQ mappings must be
115supported. For example, ISA controllers would use the legacy map for
116mapping Linux IRQs 0-15 so that existing ISA drivers get the correct IRQ
117numbers.
diff --git a/MAINTAINERS b/MAINTAINERS
index 9a648eb8e213..57dd0f56cd37 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -3640,6 +3640,15 @@ S: Maintained
3640T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git irq/core 3640T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git irq/core
3641F: kernel/irq/ 3641F: kernel/irq/
3642 3642
3643IRQ DOMAINS (IRQ NUMBER MAPPING LIBRARY)
3644M: Benjamin Herrenschmidt <benh@kernel.crashing.org>
3645M: Grant Likely <grant.likely@secretlab.ca>
3646T: git git://git.secretlab.ca/git/linux-2.6.git irqdomain/next
3647S: Maintained
3648F: Documentation/IRQ-domain.txt
3649F: include/linux/irqdomain.h
3650F: kernel/irq/irqdomain.c
3651
3643ISAPNP 3652ISAPNP
3644M: Jaroslav Kysela <perex@perex.cz> 3653M: Jaroslav Kysela <perex@perex.cz>
3645S: Maintained 3654S: Maintained
diff --git a/arch/arm/common/gic.c b/arch/arm/common/gic.c
index c47d6199b784..f0783be17352 100644
--- a/arch/arm/common/gic.c
+++ b/arch/arm/common/gic.c
@@ -51,7 +51,6 @@ union gic_base {
51}; 51};
52 52
53struct gic_chip_data { 53struct gic_chip_data {
54 unsigned int irq_offset;
55 union gic_base dist_base; 54 union gic_base dist_base;
56 union gic_base cpu_base; 55 union gic_base cpu_base;
57#ifdef CONFIG_CPU_PM 56#ifdef CONFIG_CPU_PM
@@ -61,9 +60,7 @@ struct gic_chip_data {
61 u32 __percpu *saved_ppi_enable; 60 u32 __percpu *saved_ppi_enable;
62 u32 __percpu *saved_ppi_conf; 61 u32 __percpu *saved_ppi_conf;
63#endif 62#endif
64#ifdef CONFIG_IRQ_DOMAIN 63 struct irq_domain *domain;
65 struct irq_domain domain;
66#endif
67 unsigned int gic_irqs; 64 unsigned int gic_irqs;
68#ifdef CONFIG_GIC_NON_BANKED 65#ifdef CONFIG_GIC_NON_BANKED
69 void __iomem *(*get_base)(union gic_base *); 66 void __iomem *(*get_base)(union gic_base *);
@@ -282,7 +279,7 @@ asmlinkage void __exception_irq_entry gic_handle_irq(struct pt_regs *regs)
282 irqnr = irqstat & ~0x1c00; 279 irqnr = irqstat & ~0x1c00;
283 280
284 if (likely(irqnr > 15 && irqnr < 1021)) { 281 if (likely(irqnr > 15 && irqnr < 1021)) {
285 irqnr = irq_domain_to_irq(&gic->domain, irqnr); 282 irqnr = irq_find_mapping(gic->domain, irqnr);
286 handle_IRQ(irqnr, regs); 283 handle_IRQ(irqnr, regs);
287 continue; 284 continue;
288 } 285 }
@@ -314,8 +311,8 @@ static void gic_handle_cascade_irq(unsigned int irq, struct irq_desc *desc)
314 if (gic_irq == 1023) 311 if (gic_irq == 1023)
315 goto out; 312 goto out;
316 313
317 cascade_irq = irq_domain_to_irq(&chip_data->domain, gic_irq); 314 cascade_irq = irq_find_mapping(chip_data->domain, gic_irq);
318 if (unlikely(gic_irq < 32 || gic_irq > 1020 || cascade_irq >= NR_IRQS)) 315 if (unlikely(gic_irq < 32 || gic_irq > 1020))
319 do_bad_IRQ(cascade_irq, desc); 316 do_bad_IRQ(cascade_irq, desc);
320 else 317 else
321 generic_handle_irq(cascade_irq); 318 generic_handle_irq(cascade_irq);
@@ -348,10 +345,9 @@ void __init gic_cascade_irq(unsigned int gic_nr, unsigned int irq)
348 345
349static void __init gic_dist_init(struct gic_chip_data *gic) 346static void __init gic_dist_init(struct gic_chip_data *gic)
350{ 347{
351 unsigned int i, irq; 348 unsigned int i;
352 u32 cpumask; 349 u32 cpumask;
353 unsigned int gic_irqs = gic->gic_irqs; 350 unsigned int gic_irqs = gic->gic_irqs;
354 struct irq_domain *domain = &gic->domain;
355 void __iomem *base = gic_data_dist_base(gic); 351 void __iomem *base = gic_data_dist_base(gic);
356 u32 cpu = cpu_logical_map(smp_processor_id()); 352 u32 cpu = cpu_logical_map(smp_processor_id());
357 353
@@ -386,23 +382,6 @@ static void __init gic_dist_init(struct gic_chip_data *gic)
386 for (i = 32; i < gic_irqs; i += 32) 382 for (i = 32; i < gic_irqs; i += 32)
387 writel_relaxed(0xffffffff, base + GIC_DIST_ENABLE_CLEAR + i * 4 / 32); 383 writel_relaxed(0xffffffff, base + GIC_DIST_ENABLE_CLEAR + i * 4 / 32);
388 384
389 /*
390 * Setup the Linux IRQ subsystem.
391 */
392 irq_domain_for_each_irq(domain, i, irq) {
393 if (i < 32) {
394 irq_set_percpu_devid(irq);
395 irq_set_chip_and_handler(irq, &gic_chip,
396 handle_percpu_devid_irq);
397 set_irq_flags(irq, IRQF_VALID | IRQF_NOAUTOEN);
398 } else {
399 irq_set_chip_and_handler(irq, &gic_chip,
400 handle_fasteoi_irq);
401 set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
402 }
403 irq_set_chip_data(irq, gic);
404 }
405
406 writel_relaxed(1, base + GIC_DIST_CTRL); 385 writel_relaxed(1, base + GIC_DIST_CTRL);
407} 386}
408 387
@@ -618,11 +597,27 @@ static void __init gic_pm_init(struct gic_chip_data *gic)
618} 597}
619#endif 598#endif
620 599
621#ifdef CONFIG_OF 600static int gic_irq_domain_map(struct irq_domain *d, unsigned int irq,
622static int gic_irq_domain_dt_translate(struct irq_domain *d, 601 irq_hw_number_t hw)
623 struct device_node *controller, 602{
624 const u32 *intspec, unsigned int intsize, 603 if (hw < 32) {
625 unsigned long *out_hwirq, unsigned int *out_type) 604 irq_set_percpu_devid(irq);
605 irq_set_chip_and_handler(irq, &gic_chip,
606 handle_percpu_devid_irq);
607 set_irq_flags(irq, IRQF_VALID | IRQF_NOAUTOEN);
608 } else {
609 irq_set_chip_and_handler(irq, &gic_chip,
610 handle_fasteoi_irq);
611 set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
612 }
613 irq_set_chip_data(irq, d->host_data);
614 return 0;
615}
616
617static int gic_irq_domain_xlate(struct irq_domain *d,
618 struct device_node *controller,
619 const u32 *intspec, unsigned int intsize,
620 unsigned long *out_hwirq, unsigned int *out_type)
626{ 621{
627 if (d->of_node != controller) 622 if (d->of_node != controller)
628 return -EINVAL; 623 return -EINVAL;
@@ -639,26 +634,23 @@ static int gic_irq_domain_dt_translate(struct irq_domain *d,
639 *out_type = intspec[2] & IRQ_TYPE_SENSE_MASK; 634 *out_type = intspec[2] & IRQ_TYPE_SENSE_MASK;
640 return 0; 635 return 0;
641} 636}
642#endif
643 637
644const struct irq_domain_ops gic_irq_domain_ops = { 638const struct irq_domain_ops gic_irq_domain_ops = {
645#ifdef CONFIG_OF 639 .map = gic_irq_domain_map,
646 .dt_translate = gic_irq_domain_dt_translate, 640 .xlate = gic_irq_domain_xlate,
647#endif
648}; 641};
649 642
650void __init gic_init_bases(unsigned int gic_nr, int irq_start, 643void __init gic_init_bases(unsigned int gic_nr, int irq_start,
651 void __iomem *dist_base, void __iomem *cpu_base, 644 void __iomem *dist_base, void __iomem *cpu_base,
652 u32 percpu_offset) 645 u32 percpu_offset, struct device_node *node)
653{ 646{
647 irq_hw_number_t hwirq_base;
654 struct gic_chip_data *gic; 648 struct gic_chip_data *gic;
655 struct irq_domain *domain; 649 int gic_irqs, irq_base;
656 int gic_irqs;
657 650
658 BUG_ON(gic_nr >= MAX_GIC_NR); 651 BUG_ON(gic_nr >= MAX_GIC_NR);
659 652
660 gic = &gic_data[gic_nr]; 653 gic = &gic_data[gic_nr];
661 domain = &gic->domain;
662#ifdef CONFIG_GIC_NON_BANKED 654#ifdef CONFIG_GIC_NON_BANKED
663 if (percpu_offset) { /* Frankein-GIC without banked registers... */ 655 if (percpu_offset) { /* Frankein-GIC without banked registers... */
664 unsigned int cpu; 656 unsigned int cpu;
@@ -694,10 +686,10 @@ void __init gic_init_bases(unsigned int gic_nr, int irq_start,
694 * For primary GICs, skip over SGIs. 686 * For primary GICs, skip over SGIs.
695 * For secondary GICs, skip over PPIs, too. 687 * For secondary GICs, skip over PPIs, too.
696 */ 688 */
697 domain->hwirq_base = 32; 689 hwirq_base = 32;
698 if (gic_nr == 0) { 690 if (gic_nr == 0) {
699 if ((irq_start & 31) > 0) { 691 if ((irq_start & 31) > 0) {
700 domain->hwirq_base = 16; 692 hwirq_base = 16;
701 if (irq_start != -1) 693 if (irq_start != -1)
702 irq_start = (irq_start & ~31) + 16; 694 irq_start = (irq_start & ~31) + 16;
703 } 695 }
@@ -713,17 +705,17 @@ void __init gic_init_bases(unsigned int gic_nr, int irq_start,
713 gic_irqs = 1020; 705 gic_irqs = 1020;
714 gic->gic_irqs = gic_irqs; 706 gic->gic_irqs = gic_irqs;
715 707
716 domain->nr_irq = gic_irqs - domain->hwirq_base; 708 gic_irqs -= hwirq_base; /* calculate # of irqs to allocate */
717 domain->irq_base = irq_alloc_descs(irq_start, 16, domain->nr_irq, 709 irq_base = irq_alloc_descs(irq_start, 16, gic_irqs, numa_node_id());
718 numa_node_id()); 710 if (IS_ERR_VALUE(irq_base)) {
719 if (IS_ERR_VALUE(domain->irq_base)) {
720 WARN(1, "Cannot allocate irq_descs @ IRQ%d, assuming pre-allocated\n", 711 WARN(1, "Cannot allocate irq_descs @ IRQ%d, assuming pre-allocated\n",
721 irq_start); 712 irq_start);
722 domain->irq_base = irq_start; 713 irq_base = irq_start;
723 } 714 }
724 domain->priv = gic; 715 gic->domain = irq_domain_add_legacy(node, gic_irqs, irq_base,
725 domain->ops = &gic_irq_domain_ops; 716 hwirq_base, &gic_irq_domain_ops, gic);
726 irq_domain_add(domain); 717 if (WARN_ON(!gic->domain))
718 return;
727 719
728 gic_chip.flags |= gic_arch_extn.flags; 720 gic_chip.flags |= gic_arch_extn.flags;
729 gic_dist_init(gic); 721 gic_dist_init(gic);
@@ -768,7 +760,6 @@ int __init gic_of_init(struct device_node *node, struct device_node *parent)
768 void __iomem *dist_base; 760 void __iomem *dist_base;
769 u32 percpu_offset; 761 u32 percpu_offset;
770 int irq; 762 int irq;
771 struct irq_domain *domain = &gic_data[gic_cnt].domain;
772 763
773 if (WARN_ON(!node)) 764 if (WARN_ON(!node))
774 return -ENODEV; 765 return -ENODEV;
@@ -782,9 +773,7 @@ int __init gic_of_init(struct device_node *node, struct device_node *parent)
782 if (of_property_read_u32(node, "cpu-offset", &percpu_offset)) 773 if (of_property_read_u32(node, "cpu-offset", &percpu_offset))
783 percpu_offset = 0; 774 percpu_offset = 0;
784 775
785 domain->of_node = of_node_get(node); 776 gic_init_bases(gic_cnt, -1, dist_base, cpu_base, percpu_offset, node);
786
787 gic_init_bases(gic_cnt, -1, dist_base, cpu_base, percpu_offset);
788 777
789 if (parent) { 778 if (parent) {
790 irq = irq_of_parse_and_map(node, 0); 779 irq = irq_of_parse_and_map(node, 0);
diff --git a/arch/arm/common/vic.c b/arch/arm/common/vic.c
index dcb004a804c7..7a66311f3066 100644
--- a/arch/arm/common/vic.c
+++ b/arch/arm/common/vic.c
@@ -56,7 +56,7 @@ struct vic_device {
56 u32 int_enable; 56 u32 int_enable;
57 u32 soft_int; 57 u32 soft_int;
58 u32 protect; 58 u32 protect;
59 struct irq_domain domain; 59 struct irq_domain *domain;
60}; 60};
61 61
62/* we cannot allocate memory when VICs are initially registered */ 62/* we cannot allocate memory when VICs are initially registered */
@@ -192,14 +192,8 @@ static void __init vic_register(void __iomem *base, unsigned int irq,
192 v->resume_sources = resume_sources; 192 v->resume_sources = resume_sources;
193 v->irq = irq; 193 v->irq = irq;
194 vic_id++; 194 vic_id++;
195 195 v->domain = irq_domain_add_legacy(node, 32, irq, 0,
196 v->domain.irq_base = irq; 196 &irq_domain_simple_ops, v);
197 v->domain.nr_irq = 32;
198#ifdef CONFIG_OF_IRQ
199 v->domain.of_node = of_node_get(node);
200#endif /* CONFIG_OF */
201 v->domain.ops = &irq_domain_simple_ops;
202 irq_domain_add(&v->domain);
203} 197}
204 198
205static void vic_ack_irq(struct irq_data *d) 199static void vic_ack_irq(struct irq_data *d)
@@ -348,7 +342,7 @@ static void __init vic_init_st(void __iomem *base, unsigned int irq_start,
348 vic_register(base, irq_start, 0, node); 342 vic_register(base, irq_start, 0, node);
349} 343}
350 344
351static void __init __vic_init(void __iomem *base, unsigned int irq_start, 345void __init __vic_init(void __iomem *base, unsigned int irq_start,
352 u32 vic_sources, u32 resume_sources, 346 u32 vic_sources, u32 resume_sources,
353 struct device_node *node) 347 struct device_node *node)
354{ 348{
@@ -444,7 +438,7 @@ static int handle_one_vic(struct vic_device *vic, struct pt_regs *regs)
444 stat = readl_relaxed(vic->base + VIC_IRQ_STATUS); 438 stat = readl_relaxed(vic->base + VIC_IRQ_STATUS);
445 while (stat) { 439 while (stat) {
446 irq = ffs(stat) - 1; 440 irq = ffs(stat) - 1;
447 handle_IRQ(irq_domain_to_irq(&vic->domain, irq), regs); 441 handle_IRQ(irq_find_mapping(vic->domain, irq), regs);
448 stat &= ~(1 << irq); 442 stat &= ~(1 << irq);
449 handled = 1; 443 handled = 1;
450 } 444 }
diff --git a/arch/arm/include/asm/hardware/gic.h b/arch/arm/include/asm/hardware/gic.h
index 4bdfe0018696..4b1ce6cd477f 100644
--- a/arch/arm/include/asm/hardware/gic.h
+++ b/arch/arm/include/asm/hardware/gic.h
@@ -39,7 +39,7 @@ struct device_node;
39extern struct irq_chip gic_arch_extn; 39extern struct irq_chip gic_arch_extn;
40 40
41void gic_init_bases(unsigned int, int, void __iomem *, void __iomem *, 41void gic_init_bases(unsigned int, int, void __iomem *, void __iomem *,
42 u32 offset); 42 u32 offset, struct device_node *);
43int gic_of_init(struct device_node *node, struct device_node *parent); 43int gic_of_init(struct device_node *node, struct device_node *parent);
44void gic_secondary_init(unsigned int); 44void gic_secondary_init(unsigned int);
45void gic_handle_irq(struct pt_regs *regs); 45void gic_handle_irq(struct pt_regs *regs);
@@ -49,7 +49,7 @@ void gic_raise_softirq(const struct cpumask *mask, unsigned int irq);
49static inline void gic_init(unsigned int nr, int start, 49static inline void gic_init(unsigned int nr, int start,
50 void __iomem *dist , void __iomem *cpu) 50 void __iomem *dist , void __iomem *cpu)
51{ 51{
52 gic_init_bases(nr, start, dist, cpu, 0); 52 gic_init_bases(nr, start, dist, cpu, 0, NULL);
53} 53}
54 54
55#endif 55#endif
diff --git a/arch/arm/include/asm/hardware/vic.h b/arch/arm/include/asm/hardware/vic.h
index f42ebd619590..e14af1a1a320 100644
--- a/arch/arm/include/asm/hardware/vic.h
+++ b/arch/arm/include/asm/hardware/vic.h
@@ -47,6 +47,8 @@
47struct device_node; 47struct device_node;
48struct pt_regs; 48struct pt_regs;
49 49
50void __vic_init(void __iomem *base, unsigned int irq_start, u32 vic_sources,
51 u32 resume_sources, struct device_node *node);
50void vic_init(void __iomem *base, unsigned int irq_start, u32 vic_sources, u32 resume_sources); 52void vic_init(void __iomem *base, unsigned int irq_start, u32 vic_sources, u32 resume_sources);
51int vic_of_init(struct device_node *node, struct device_node *parent); 53int vic_of_init(struct device_node *node, struct device_node *parent);
52void vic_handle_irq(struct pt_regs *regs); 54void vic_handle_irq(struct pt_regs *regs);
diff --git a/arch/arm/mach-exynos/common.c b/arch/arm/mach-exynos/common.c
index c59e18871006..6de298c5d2d3 100644
--- a/arch/arm/mach-exynos/common.c
+++ b/arch/arm/mach-exynos/common.c
@@ -402,7 +402,7 @@ void __init exynos4_init_irq(void)
402 gic_bank_offset = soc_is_exynos4412() ? 0x4000 : 0x8000; 402 gic_bank_offset = soc_is_exynos4412() ? 0x4000 : 0x8000;
403 403
404 if (!of_have_populated_dt()) 404 if (!of_have_populated_dt())
405 gic_init_bases(0, IRQ_PPI(0), S5P_VA_GIC_DIST, S5P_VA_GIC_CPU, gic_bank_offset); 405 gic_init_bases(0, IRQ_PPI(0), S5P_VA_GIC_DIST, S5P_VA_GIC_CPU, gic_bank_offset, NULL);
406#ifdef CONFIG_OF 406#ifdef CONFIG_OF
407 else 407 else
408 of_irq_init(exynos4_dt_irq_match); 408 of_irq_init(exynos4_dt_irq_match);
diff --git a/arch/arm/mach-imx/imx51-dt.c b/arch/arm/mach-imx/imx51-dt.c
index e6bad17b908c..1e03ef42faa0 100644
--- a/arch/arm/mach-imx/imx51-dt.c
+++ b/arch/arm/mach-imx/imx51-dt.c
@@ -47,7 +47,7 @@ static const struct of_dev_auxdata imx51_auxdata_lookup[] __initconst = {
47static int __init imx51_tzic_add_irq_domain(struct device_node *np, 47static int __init imx51_tzic_add_irq_domain(struct device_node *np,
48 struct device_node *interrupt_parent) 48 struct device_node *interrupt_parent)
49{ 49{
50 irq_domain_add_simple(np, 0); 50 irq_domain_add_legacy(np, 128, 0, 0, &irq_domain_simple_ops, NULL);
51 return 0; 51 return 0;
52} 52}
53 53
@@ -57,7 +57,7 @@ static int __init imx51_gpio_add_irq_domain(struct device_node *np,
57 static int gpio_irq_base = MXC_GPIO_IRQ_START + ARCH_NR_GPIOS; 57 static int gpio_irq_base = MXC_GPIO_IRQ_START + ARCH_NR_GPIOS;
58 58
59 gpio_irq_base -= 32; 59 gpio_irq_base -= 32;
60 irq_domain_add_simple(np, gpio_irq_base); 60 irq_domain_add_legacy(np, 32, gpio_irq_base, 0, &irq_domain_simple_ops, NULL);
61 61
62 return 0; 62 return 0;
63} 63}
diff --git a/arch/arm/mach-imx/imx53-dt.c b/arch/arm/mach-imx/imx53-dt.c
index 05ebb3e68679..fd5be0f20fbb 100644
--- a/arch/arm/mach-imx/imx53-dt.c
+++ b/arch/arm/mach-imx/imx53-dt.c
@@ -51,7 +51,7 @@ static const struct of_dev_auxdata imx53_auxdata_lookup[] __initconst = {
51static int __init imx53_tzic_add_irq_domain(struct device_node *np, 51static int __init imx53_tzic_add_irq_domain(struct device_node *np,
52 struct device_node *interrupt_parent) 52 struct device_node *interrupt_parent)
53{ 53{
54 irq_domain_add_simple(np, 0); 54 irq_domain_add_legacy(np, 128, 0, 0, &irq_domain_simple_ops, NULL);
55 return 0; 55 return 0;
56} 56}
57 57
@@ -61,7 +61,7 @@ static int __init imx53_gpio_add_irq_domain(struct device_node *np,
61 static int gpio_irq_base = MXC_GPIO_IRQ_START + ARCH_NR_GPIOS; 61 static int gpio_irq_base = MXC_GPIO_IRQ_START + ARCH_NR_GPIOS;
62 62
63 gpio_irq_base -= 32; 63 gpio_irq_base -= 32;
64 irq_domain_add_simple(np, gpio_irq_base); 64 irq_domain_add_legacy(np, 32, gpio_irq_base, 0, &irq_domain_simple_ops, NULL);
65 65
66 return 0; 66 return 0;
67} 67}
diff --git a/arch/arm/mach-imx/mach-imx6q.c b/arch/arm/mach-imx/mach-imx6q.c
index c25728106917..6075d4d62dd6 100644
--- a/arch/arm/mach-imx/mach-imx6q.c
+++ b/arch/arm/mach-imx/mach-imx6q.c
@@ -97,7 +97,8 @@ static int __init imx6q_gpio_add_irq_domain(struct device_node *np,
97 static int gpio_irq_base = MXC_GPIO_IRQ_START + ARCH_NR_GPIOS; 97 static int gpio_irq_base = MXC_GPIO_IRQ_START + ARCH_NR_GPIOS;
98 98
99 gpio_irq_base -= 32; 99 gpio_irq_base -= 32;
100 irq_domain_add_simple(np, gpio_irq_base); 100 irq_domain_add_legacy(np, 32, gpio_irq_base, 0, &irq_domain_simple_ops,
101 NULL);
101 102
102 return 0; 103 return 0;
103} 104}
diff --git a/arch/arm/mach-msm/board-msm8x60.c b/arch/arm/mach-msm/board-msm8x60.c
index 0a113424632c..962e71169750 100644
--- a/arch/arm/mach-msm/board-msm8x60.c
+++ b/arch/arm/mach-msm/board-msm8x60.c
@@ -80,12 +80,8 @@ static struct of_device_id msm_dt_gic_match[] __initdata = {
80 80
81static void __init msm8x60_dt_init(void) 81static void __init msm8x60_dt_init(void)
82{ 82{
83 struct device_node *node; 83 irq_domain_generate_simple(msm_dt_gic_match, MSM8X60_QGIC_DIST_PHYS,
84 84 GIC_SPI_START);
85 node = of_find_matching_node_by_address(NULL, msm_dt_gic_match,
86 MSM8X60_QGIC_DIST_PHYS);
87 if (node)
88 irq_domain_add_simple(node, GIC_SPI_START);
89 85
90 if (of_machine_is_compatible("qcom,msm8660-surf")) { 86 if (of_machine_is_compatible("qcom,msm8660-surf")) {
91 printk(KERN_INFO "Init surf UART registers\n"); 87 printk(KERN_INFO "Init surf UART registers\n");
diff --git a/arch/arm/mach-omap2/board-generic.c b/arch/arm/mach-omap2/board-generic.c
index d32b5935233f..02d7e828a14b 100644
--- a/arch/arm/mach-omap2/board-generic.c
+++ b/arch/arm/mach-omap2/board-generic.c
@@ -68,7 +68,7 @@ static void __init omap_generic_init(void)
68{ 68{
69 struct device_node *node = of_find_matching_node(NULL, intc_match); 69 struct device_node *node = of_find_matching_node(NULL, intc_match);
70 if (node) 70 if (node)
71 irq_domain_add_simple(node, 0); 71 irq_domain_add_legacy(node, 32, 0, 0, &irq_domain_simple_ops, NULL);
72 72
73 omap_sdrc_init(NULL, NULL); 73 omap_sdrc_init(NULL, NULL);
74 74
diff --git a/arch/arm/mach-prima2/irq.c b/arch/arm/mach-prima2/irq.c
index d93ceef4a50a..37c2de9b6f26 100644
--- a/arch/arm/mach-prima2/irq.c
+++ b/arch/arm/mach-prima2/irq.c
@@ -68,7 +68,7 @@ void __init sirfsoc_of_irq_init(void)
68 if (!sirfsoc_intc_base) 68 if (!sirfsoc_intc_base)
69 panic("unable to map intc cpu registers\n"); 69 panic("unable to map intc cpu registers\n");
70 70
71 irq_domain_add_simple(np, 0); 71 irq_domain_add_legacy(np, 32, 0, 0, &irq_domain_simple_ops, NULL);
72 72
73 of_node_put(np); 73 of_node_put(np);
74 74
diff --git a/arch/arm/mach-versatile/core.c b/arch/arm/mach-versatile/core.c
index 02b7b9303f3b..008ce22b9a06 100644
--- a/arch/arm/mach-versatile/core.c
+++ b/arch/arm/mach-versatile/core.c
@@ -98,8 +98,11 @@ static const struct of_device_id sic_of_match[] __initconst = {
98 98
99void __init versatile_init_irq(void) 99void __init versatile_init_irq(void)
100{ 100{
101 vic_init(VA_VIC_BASE, IRQ_VIC_START, ~0, 0); 101 struct device_node *np;
102 irq_domain_generate_simple(vic_of_match, VERSATILE_VIC_BASE, IRQ_VIC_START); 102
103 np = of_find_matching_node_by_address(NULL, vic_of_match,
104 VERSATILE_VIC_BASE);
105 __vic_init(VA_VIC_BASE, IRQ_VIC_START, ~0, 0, np);
103 106
104 writel(~0, VA_SIC_BASE + SIC_IRQ_ENABLE_CLEAR); 107 writel(~0, VA_SIC_BASE + SIC_IRQ_ENABLE_CLEAR);
105 108
diff --git a/arch/c6x/Kconfig b/arch/c6x/Kconfig
index 26e67f0f0051..3c64b2894c13 100644
--- a/arch/c6x/Kconfig
+++ b/arch/c6x/Kconfig
@@ -12,6 +12,7 @@ config TMS320C6X
12 select HAVE_GENERIC_HARDIRQS 12 select HAVE_GENERIC_HARDIRQS
13 select HAVE_MEMBLOCK 13 select HAVE_MEMBLOCK
14 select HAVE_SPARSE_IRQ 14 select HAVE_SPARSE_IRQ
15 select IRQ_DOMAIN
15 select OF 16 select OF
16 select OF_EARLY_FLATTREE 17 select OF_EARLY_FLATTREE
17 18
diff --git a/arch/c6x/include/asm/irq.h b/arch/c6x/include/asm/irq.h
index a6ae3c9d9c40..f13b78d5e1ca 100644
--- a/arch/c6x/include/asm/irq.h
+++ b/arch/c6x/include/asm/irq.h
@@ -13,6 +13,7 @@
13#ifndef _ASM_C6X_IRQ_H 13#ifndef _ASM_C6X_IRQ_H
14#define _ASM_C6X_IRQ_H 14#define _ASM_C6X_IRQ_H
15 15
16#include <linux/irqdomain.h>
16#include <linux/threads.h> 17#include <linux/threads.h>
17#include <linux/list.h> 18#include <linux/list.h>
18#include <linux/radix-tree.h> 19#include <linux/radix-tree.h>
@@ -41,253 +42,9 @@
41/* This number is used when no interrupt has been assigned */ 42/* This number is used when no interrupt has been assigned */
42#define NO_IRQ 0 43#define NO_IRQ 0
43 44
44/* This type is the placeholder for a hardware interrupt number. It has to
45 * be big enough to enclose whatever representation is used by a given
46 * platform.
47 */
48typedef unsigned long irq_hw_number_t;
49
50/* Interrupt controller "host" data structure. This could be defined as a
51 * irq domain controller. That is, it handles the mapping between hardware
52 * and virtual interrupt numbers for a given interrupt domain. The host
53 * structure is generally created by the PIC code for a given PIC instance
54 * (though a host can cover more than one PIC if they have a flat number
55 * model). It's the host callbacks that are responsible for setting the
56 * irq_chip on a given irq_desc after it's been mapped.
57 *
58 * The host code and data structures are fairly agnostic to the fact that
59 * we use an open firmware device-tree. We do have references to struct
60 * device_node in two places: in irq_find_host() to find the host matching
61 * a given interrupt controller node, and of course as an argument to its
62 * counterpart host->ops->match() callback. However, those are treated as
63 * generic pointers by the core and the fact that it's actually a device-node
64 * pointer is purely a convention between callers and implementation. This
65 * code could thus be used on other architectures by replacing those two
66 * by some sort of arch-specific void * "token" used to identify interrupt
67 * controllers.
68 */
69struct irq_host;
70struct radix_tree_root;
71struct device_node;
72
73/* Functions below are provided by the host and called whenever a new mapping
74 * is created or an old mapping is disposed. The host can then proceed to
75 * whatever internal data structures management is required. It also needs
76 * to setup the irq_desc when returning from map().
77 */
78struct irq_host_ops {
79 /* Match an interrupt controller device node to a host, returns
80 * 1 on a match
81 */
82 int (*match)(struct irq_host *h, struct device_node *node);
83
84 /* Create or update a mapping between a virtual irq number and a hw
85 * irq number. This is called only once for a given mapping.
86 */
87 int (*map)(struct irq_host *h, unsigned int virq, irq_hw_number_t hw);
88
89 /* Dispose of such a mapping */
90 void (*unmap)(struct irq_host *h, unsigned int virq);
91
92 /* Translate device-tree interrupt specifier from raw format coming
93 * from the firmware to a irq_hw_number_t (interrupt line number) and
94 * type (sense) that can be passed to set_irq_type(). In the absence
95 * of this callback, irq_create_of_mapping() and irq_of_parse_and_map()
96 * will return the hw number in the first cell and IRQ_TYPE_NONE for
97 * the type (which amount to keeping whatever default value the
98 * interrupt controller has for that line)
99 */
100 int (*xlate)(struct irq_host *h, struct device_node *ctrler,
101 const u32 *intspec, unsigned int intsize,
102 irq_hw_number_t *out_hwirq, unsigned int *out_type);
103};
104
105struct irq_host {
106 struct list_head link;
107
108 /* type of reverse mapping technique */
109 unsigned int revmap_type;
110#define IRQ_HOST_MAP_PRIORITY 0 /* core priority irqs, get irqs 1..15 */
111#define IRQ_HOST_MAP_NOMAP 1 /* no fast reverse mapping */
112#define IRQ_HOST_MAP_LINEAR 2 /* linear map of interrupts */
113#define IRQ_HOST_MAP_TREE 3 /* radix tree */
114 union {
115 struct {
116 unsigned int size;
117 unsigned int *revmap;
118 } linear;
119 struct radix_tree_root tree;
120 } revmap_data;
121 struct irq_host_ops *ops;
122 void *host_data;
123 irq_hw_number_t inval_irq;
124
125 /* Optional device node pointer */
126 struct device_node *of_node;
127};
128
129struct irq_data; 45struct irq_data;
130extern irq_hw_number_t irqd_to_hwirq(struct irq_data *d); 46extern irq_hw_number_t irqd_to_hwirq(struct irq_data *d);
131extern irq_hw_number_t virq_to_hw(unsigned int virq); 47extern irq_hw_number_t virq_to_hw(unsigned int virq);
132extern bool virq_is_host(unsigned int virq, struct irq_host *host);
133
134/**
135 * irq_alloc_host - Allocate a new irq_host data structure
136 * @of_node: optional device-tree node of the interrupt controller
137 * @revmap_type: type of reverse mapping to use
138 * @revmap_arg: for IRQ_HOST_MAP_LINEAR linear only: size of the map
139 * @ops: map/unmap host callbacks
140 * @inval_irq: provide a hw number in that host space that is always invalid
141 *
142 * Allocates and initialize and irq_host structure. Note that in the case of
143 * IRQ_HOST_MAP_LEGACY, the map() callback will be called before this returns
144 * for all legacy interrupts except 0 (which is always the invalid irq for
145 * a legacy controller). For a IRQ_HOST_MAP_LINEAR, the map is allocated by
146 * this call as well. For a IRQ_HOST_MAP_TREE, the radix tree will be allocated
147 * later during boot automatically (the reverse mapping will use the slow path
148 * until that happens).
149 */
150extern struct irq_host *irq_alloc_host(struct device_node *of_node,
151 unsigned int revmap_type,
152 unsigned int revmap_arg,
153 struct irq_host_ops *ops,
154 irq_hw_number_t inval_irq);
155
156
157/**
158 * irq_find_host - Locates a host for a given device node
159 * @node: device-tree node of the interrupt controller
160 */
161extern struct irq_host *irq_find_host(struct device_node *node);
162
163
164/**
165 * irq_set_default_host - Set a "default" host
166 * @host: default host pointer
167 *
168 * For convenience, it's possible to set a "default" host that will be used
169 * whenever NULL is passed to irq_create_mapping(). It makes life easier for
170 * platforms that want to manipulate a few hard coded interrupt numbers that
171 * aren't properly represented in the device-tree.
172 */
173extern void irq_set_default_host(struct irq_host *host);
174
175
176/**
177 * irq_set_virq_count - Set the maximum number of virt irqs
178 * @count: number of linux virtual irqs, capped with NR_IRQS
179 *
180 * This is mainly for use by platforms like iSeries who want to program
181 * the virtual irq number in the controller to avoid the reverse mapping
182 */
183extern void irq_set_virq_count(unsigned int count);
184
185
186/**
187 * irq_create_mapping - Map a hardware interrupt into linux virq space
188 * @host: host owning this hardware interrupt or NULL for default host
189 * @hwirq: hardware irq number in that host space
190 *
191 * Only one mapping per hardware interrupt is permitted. Returns a linux
192 * virq number.
193 * If the sense/trigger is to be specified, set_irq_type() should be called
194 * on the number returned from that call.
195 */
196extern unsigned int irq_create_mapping(struct irq_host *host,
197 irq_hw_number_t hwirq);
198
199
200/**
201 * irq_dispose_mapping - Unmap an interrupt
202 * @virq: linux virq number of the interrupt to unmap
203 */
204extern void irq_dispose_mapping(unsigned int virq);
205
206/**
207 * irq_find_mapping - Find a linux virq from an hw irq number.
208 * @host: host owning this hardware interrupt
209 * @hwirq: hardware irq number in that host space
210 *
211 * This is a slow path, for use by generic code. It's expected that an
212 * irq controller implementation directly calls the appropriate low level
213 * mapping function.
214 */
215extern unsigned int irq_find_mapping(struct irq_host *host,
216 irq_hw_number_t hwirq);
217
218/**
219 * irq_create_direct_mapping - Allocate a virq for direct mapping
220 * @host: host to allocate the virq for or NULL for default host
221 *
222 * This routine is used for irq controllers which can choose the hardware
223 * interrupt numbers they generate. In such a case it's simplest to use
224 * the linux virq as the hardware interrupt number.
225 */
226extern unsigned int irq_create_direct_mapping(struct irq_host *host);
227
228/**
229 * irq_radix_revmap_insert - Insert a hw irq to linux virq number mapping.
230 * @host: host owning this hardware interrupt
231 * @virq: linux irq number
232 * @hwirq: hardware irq number in that host space
233 *
234 * This is for use by irq controllers that use a radix tree reverse
235 * mapping for fast lookup.
236 */
237extern void irq_radix_revmap_insert(struct irq_host *host, unsigned int virq,
238 irq_hw_number_t hwirq);
239
240/**
241 * irq_radix_revmap_lookup - Find a linux virq from a hw irq number.
242 * @host: host owning this hardware interrupt
243 * @hwirq: hardware irq number in that host space
244 *
245 * This is a fast path, for use by irq controller code that uses radix tree
246 * revmaps
247 */
248extern unsigned int irq_radix_revmap_lookup(struct irq_host *host,
249 irq_hw_number_t hwirq);
250
251/**
252 * irq_linear_revmap - Find a linux virq from a hw irq number.
253 * @host: host owning this hardware interrupt
254 * @hwirq: hardware irq number in that host space
255 *
256 * This is a fast path, for use by irq controller code that uses linear
257 * revmaps. It does fallback to the slow path if the revmap doesn't exist
258 * yet and will create the revmap entry with appropriate locking
259 */
260
261extern unsigned int irq_linear_revmap(struct irq_host *host,
262 irq_hw_number_t hwirq);
263
264
265
266/**
267 * irq_alloc_virt - Allocate virtual irq numbers
268 * @host: host owning these new virtual irqs
269 * @count: number of consecutive numbers to allocate
270 * @hint: pass a hint number, the allocator will try to use a 1:1 mapping
271 *
272 * This is a low level function that is used internally by irq_create_mapping()
273 * and that can be used by some irq controllers implementations for things
274 * like allocating ranges of numbers for MSIs. The revmaps are left untouched.
275 */
276extern unsigned int irq_alloc_virt(struct irq_host *host,
277 unsigned int count,
278 unsigned int hint);
279
280/**
281 * irq_free_virt - Free virtual irq numbers
282 * @virq: virtual irq number of the first interrupt to free
283 * @count: number of interrupts to free
284 *
285 * This function is the opposite of irq_alloc_virt. It will not clear reverse
286 * maps, this should be done previously by unmap'ing the interrupt. In fact,
287 * all interrupts covered by the range being freed should have been unmapped
288 * prior to calling this.
289 */
290extern void irq_free_virt(unsigned int virq, unsigned int count);
291 48
292extern void __init init_pic_c64xplus(void); 49extern void __init init_pic_c64xplus(void);
293 50
diff --git a/arch/c6x/kernel/irq.c b/arch/c6x/kernel/irq.c
index 0929e4b2b244..d77bcfdf0d8e 100644
--- a/arch/c6x/kernel/irq.c
+++ b/arch/c6x/kernel/irq.c
@@ -73,10 +73,10 @@ asmlinkage void c6x_do_IRQ(unsigned int prio, struct pt_regs *regs)
73 set_irq_regs(old_regs); 73 set_irq_regs(old_regs);
74} 74}
75 75
76static struct irq_host *core_host; 76static struct irq_domain *core_domain;
77 77
78static int core_host_map(struct irq_host *h, unsigned int virq, 78static int core_domain_map(struct irq_domain *h, unsigned int virq,
79 irq_hw_number_t hw) 79 irq_hw_number_t hw)
80{ 80{
81 if (hw < 4 || hw >= NR_PRIORITY_IRQS) 81 if (hw < 4 || hw >= NR_PRIORITY_IRQS)
82 return -EINVAL; 82 return -EINVAL;
@@ -86,8 +86,9 @@ static int core_host_map(struct irq_host *h, unsigned int virq,
86 return 0; 86 return 0;
87} 87}
88 88
89static struct irq_host_ops core_host_ops = { 89static const struct irq_domain_ops core_domain_ops = {
90 .map = core_host_map, 90 .map = core_domain_map,
91 .xlate = irq_domain_xlate_onecell,
91}; 92};
92 93
93void __init init_IRQ(void) 94void __init init_IRQ(void)
@@ -100,10 +101,11 @@ void __init init_IRQ(void)
100 np = of_find_compatible_node(NULL, NULL, "ti,c64x+core-pic"); 101 np = of_find_compatible_node(NULL, NULL, "ti,c64x+core-pic");
101 if (np != NULL) { 102 if (np != NULL) {
102 /* create the core host */ 103 /* create the core host */
103 core_host = irq_alloc_host(np, IRQ_HOST_MAP_PRIORITY, 0, 104 core_domain = irq_domain_add_legacy(np, NR_PRIORITY_IRQS,
104 &core_host_ops, 0); 105 0, 0, &core_domain_ops,
105 if (core_host) 106 NULL);
106 irq_set_default_host(core_host); 107 if (core_domain)
108 irq_set_default_host(core_domain);
107 of_node_put(np); 109 of_node_put(np);
108 } 110 }
109 111
@@ -128,601 +130,15 @@ int arch_show_interrupts(struct seq_file *p, int prec)
128 return 0; 130 return 0;
129} 131}
130 132
131/*
132 * IRQ controller and virtual interrupts
133 */
134
135/* The main irq map itself is an array of NR_IRQ entries containing the
136 * associate host and irq number. An entry with a host of NULL is free.
137 * An entry can be allocated if it's free, the allocator always then sets
138 * hwirq first to the host's invalid irq number and then fills ops.
139 */
140struct irq_map_entry {
141 irq_hw_number_t hwirq;
142 struct irq_host *host;
143};
144
145static LIST_HEAD(irq_hosts);
146static DEFINE_RAW_SPINLOCK(irq_big_lock);
147static DEFINE_MUTEX(revmap_trees_mutex);
148static struct irq_map_entry irq_map[NR_IRQS];
149static unsigned int irq_virq_count = NR_IRQS;
150static struct irq_host *irq_default_host;
151
152irq_hw_number_t irqd_to_hwirq(struct irq_data *d) 133irq_hw_number_t irqd_to_hwirq(struct irq_data *d)
153{ 134{
154 return irq_map[d->irq].hwirq; 135 return d->hwirq;
155} 136}
156EXPORT_SYMBOL_GPL(irqd_to_hwirq); 137EXPORT_SYMBOL_GPL(irqd_to_hwirq);
157 138
158irq_hw_number_t virq_to_hw(unsigned int virq) 139irq_hw_number_t virq_to_hw(unsigned int virq)
159{ 140{
160 return irq_map[virq].hwirq; 141 struct irq_data *irq_data = irq_get_irq_data(virq);
142 return WARN_ON(!irq_data) ? 0 : irq_data->hwirq;
161} 143}
162EXPORT_SYMBOL_GPL(virq_to_hw); 144EXPORT_SYMBOL_GPL(virq_to_hw);
163
164bool virq_is_host(unsigned int virq, struct irq_host *host)
165{
166 return irq_map[virq].host == host;
167}
168EXPORT_SYMBOL_GPL(virq_is_host);
169
170static int default_irq_host_match(struct irq_host *h, struct device_node *np)
171{
172 return h->of_node != NULL && h->of_node == np;
173}
174
175struct irq_host *irq_alloc_host(struct device_node *of_node,
176 unsigned int revmap_type,
177 unsigned int revmap_arg,
178 struct irq_host_ops *ops,
179 irq_hw_number_t inval_irq)
180{
181 struct irq_host *host;
182 unsigned int size = sizeof(struct irq_host);
183 unsigned int i;
184 unsigned int *rmap;
185 unsigned long flags;
186
187 /* Allocate structure and revmap table if using linear mapping */
188 if (revmap_type == IRQ_HOST_MAP_LINEAR)
189 size += revmap_arg * sizeof(unsigned int);
190 host = kzalloc(size, GFP_KERNEL);
191 if (host == NULL)
192 return NULL;
193
194 /* Fill structure */
195 host->revmap_type = revmap_type;
196 host->inval_irq = inval_irq;
197 host->ops = ops;
198 host->of_node = of_node_get(of_node);
199
200 if (host->ops->match == NULL)
201 host->ops->match = default_irq_host_match;
202
203 raw_spin_lock_irqsave(&irq_big_lock, flags);
204
205 /* Check for the priority controller. */
206 if (revmap_type == IRQ_HOST_MAP_PRIORITY) {
207 if (irq_map[0].host != NULL) {
208 raw_spin_unlock_irqrestore(&irq_big_lock, flags);
209 of_node_put(host->of_node);
210 kfree(host);
211 return NULL;
212 }
213 irq_map[0].host = host;
214 }
215
216 list_add(&host->link, &irq_hosts);
217 raw_spin_unlock_irqrestore(&irq_big_lock, flags);
218
219 /* Additional setups per revmap type */
220 switch (revmap_type) {
221 case IRQ_HOST_MAP_PRIORITY:
222 /* 0 is always the invalid number for priority */
223 host->inval_irq = 0;
224 /* setup us as the host for all priority interrupts */
225 for (i = 1; i < NR_PRIORITY_IRQS; i++) {
226 irq_map[i].hwirq = i;
227 smp_wmb();
228 irq_map[i].host = host;
229 smp_wmb();
230
231 ops->map(host, i, i);
232 }
233 break;
234 case IRQ_HOST_MAP_LINEAR:
235 rmap = (unsigned int *)(host + 1);
236 for (i = 0; i < revmap_arg; i++)
237 rmap[i] = NO_IRQ;
238 host->revmap_data.linear.size = revmap_arg;
239 smp_wmb();
240 host->revmap_data.linear.revmap = rmap;
241 break;
242 case IRQ_HOST_MAP_TREE:
243 INIT_RADIX_TREE(&host->revmap_data.tree, GFP_KERNEL);
244 break;
245 default:
246 break;
247 }
248
249 pr_debug("irq: Allocated host of type %d @0x%p\n", revmap_type, host);
250
251 return host;
252}
253
254struct irq_host *irq_find_host(struct device_node *node)
255{
256 struct irq_host *h, *found = NULL;
257 unsigned long flags;
258
259 /* We might want to match the legacy controller last since
260 * it might potentially be set to match all interrupts in
261 * the absence of a device node. This isn't a problem so far
262 * yet though...
263 */
264 raw_spin_lock_irqsave(&irq_big_lock, flags);
265 list_for_each_entry(h, &irq_hosts, link)
266 if (h->ops->match(h, node)) {
267 found = h;
268 break;
269 }
270 raw_spin_unlock_irqrestore(&irq_big_lock, flags);
271 return found;
272}
273EXPORT_SYMBOL_GPL(irq_find_host);
274
275void irq_set_default_host(struct irq_host *host)
276{
277 pr_debug("irq: Default host set to @0x%p\n", host);
278
279 irq_default_host = host;
280}
281
282void irq_set_virq_count(unsigned int count)
283{
284 pr_debug("irq: Trying to set virq count to %d\n", count);
285
286 BUG_ON(count < NR_PRIORITY_IRQS);
287 if (count < NR_IRQS)
288 irq_virq_count = count;
289}
290
291static int irq_setup_virq(struct irq_host *host, unsigned int virq,
292 irq_hw_number_t hwirq)
293{
294 int res;
295
296 res = irq_alloc_desc_at(virq, 0);
297 if (res != virq) {
298 pr_debug("irq: -> allocating desc failed\n");
299 goto error;
300 }
301
302 /* map it */
303 smp_wmb();
304 irq_map[virq].hwirq = hwirq;
305 smp_mb();
306
307 if (host->ops->map(host, virq, hwirq)) {
308 pr_debug("irq: -> mapping failed, freeing\n");
309 goto errdesc;
310 }
311
312 irq_clear_status_flags(virq, IRQ_NOREQUEST);
313
314 return 0;
315
316errdesc:
317 irq_free_descs(virq, 1);
318error:
319 irq_free_virt(virq, 1);
320 return -1;
321}
322
323unsigned int irq_create_direct_mapping(struct irq_host *host)
324{
325 unsigned int virq;
326
327 if (host == NULL)
328 host = irq_default_host;
329
330 BUG_ON(host == NULL);
331 WARN_ON(host->revmap_type != IRQ_HOST_MAP_NOMAP);
332
333 virq = irq_alloc_virt(host, 1, 0);
334 if (virq == NO_IRQ) {
335 pr_debug("irq: create_direct virq allocation failed\n");
336 return NO_IRQ;
337 }
338
339 pr_debug("irq: create_direct obtained virq %d\n", virq);
340
341 if (irq_setup_virq(host, virq, virq))
342 return NO_IRQ;
343
344 return virq;
345}
346
347unsigned int irq_create_mapping(struct irq_host *host,
348 irq_hw_number_t hwirq)
349{
350 unsigned int virq, hint;
351
352 pr_debug("irq: irq_create_mapping(0x%p, 0x%lx)\n", host, hwirq);
353
354 /* Look for default host if nececssary */
355 if (host == NULL)
356 host = irq_default_host;
357 if (host == NULL) {
358 printk(KERN_WARNING "irq_create_mapping called for"
359 " NULL host, hwirq=%lx\n", hwirq);
360 WARN_ON(1);
361 return NO_IRQ;
362 }
363 pr_debug("irq: -> using host @%p\n", host);
364
365 /* Check if mapping already exists */
366 virq = irq_find_mapping(host, hwirq);
367 if (virq != NO_IRQ) {
368 pr_debug("irq: -> existing mapping on virq %d\n", virq);
369 return virq;
370 }
371
372 /* Allocate a virtual interrupt number */
373 hint = hwirq % irq_virq_count;
374 virq = irq_alloc_virt(host, 1, hint);
375 if (virq == NO_IRQ) {
376 pr_debug("irq: -> virq allocation failed\n");
377 return NO_IRQ;
378 }
379
380 if (irq_setup_virq(host, virq, hwirq))
381 return NO_IRQ;
382
383 pr_debug("irq: irq %lu on host %s mapped to virtual irq %u\n",
384 hwirq, host->of_node ? host->of_node->full_name : "null", virq);
385
386 return virq;
387}
388EXPORT_SYMBOL_GPL(irq_create_mapping);
389
390unsigned int irq_create_of_mapping(struct device_node *controller,
391 const u32 *intspec, unsigned int intsize)
392{
393 struct irq_host *host;
394 irq_hw_number_t hwirq;
395 unsigned int type = IRQ_TYPE_NONE;
396 unsigned int virq;
397
398 if (controller == NULL)
399 host = irq_default_host;
400 else
401 host = irq_find_host(controller);
402 if (host == NULL) {
403 printk(KERN_WARNING "irq: no irq host found for %s !\n",
404 controller->full_name);
405 return NO_IRQ;
406 }
407
408 /* If host has no translation, then we assume interrupt line */
409 if (host->ops->xlate == NULL)
410 hwirq = intspec[0];
411 else {
412 if (host->ops->xlate(host, controller, intspec, intsize,
413 &hwirq, &type))
414 return NO_IRQ;
415 }
416
417 /* Create mapping */
418 virq = irq_create_mapping(host, hwirq);
419 if (virq == NO_IRQ)
420 return virq;
421
422 /* Set type if specified and different than the current one */
423 if (type != IRQ_TYPE_NONE &&
424 type != (irqd_get_trigger_type(irq_get_irq_data(virq))))
425 irq_set_irq_type(virq, type);
426 return virq;
427}
428EXPORT_SYMBOL_GPL(irq_create_of_mapping);
429
430void irq_dispose_mapping(unsigned int virq)
431{
432 struct irq_host *host;
433 irq_hw_number_t hwirq;
434
435 if (virq == NO_IRQ)
436 return;
437
438 /* Never unmap priority interrupts */
439 if (virq < NR_PRIORITY_IRQS)
440 return;
441
442 host = irq_map[virq].host;
443 if (WARN_ON(host == NULL))
444 return;
445
446 irq_set_status_flags(virq, IRQ_NOREQUEST);
447
448 /* remove chip and handler */
449 irq_set_chip_and_handler(virq, NULL, NULL);
450
451 /* Make sure it's completed */
452 synchronize_irq(virq);
453
454 /* Tell the PIC about it */
455 if (host->ops->unmap)
456 host->ops->unmap(host, virq);
457 smp_mb();
458
459 /* Clear reverse map */
460 hwirq = irq_map[virq].hwirq;
461 switch (host->revmap_type) {
462 case IRQ_HOST_MAP_LINEAR:
463 if (hwirq < host->revmap_data.linear.size)
464 host->revmap_data.linear.revmap[hwirq] = NO_IRQ;
465 break;
466 case IRQ_HOST_MAP_TREE:
467 mutex_lock(&revmap_trees_mutex);
468 radix_tree_delete(&host->revmap_data.tree, hwirq);
469 mutex_unlock(&revmap_trees_mutex);
470 break;
471 }
472
473 /* Destroy map */
474 smp_mb();
475 irq_map[virq].hwirq = host->inval_irq;
476
477 irq_free_descs(virq, 1);
478 /* Free it */
479 irq_free_virt(virq, 1);
480}
481EXPORT_SYMBOL_GPL(irq_dispose_mapping);
482
483unsigned int irq_find_mapping(struct irq_host *host,
484 irq_hw_number_t hwirq)
485{
486 unsigned int i;
487 unsigned int hint = hwirq % irq_virq_count;
488
489 /* Look for default host if nececssary */
490 if (host == NULL)
491 host = irq_default_host;
492 if (host == NULL)
493 return NO_IRQ;
494
495 /* Slow path does a linear search of the map */
496 i = hint;
497 do {
498 if (irq_map[i].host == host &&
499 irq_map[i].hwirq == hwirq)
500 return i;
501 i++;
502 if (i >= irq_virq_count)
503 i = 4;
504 } while (i != hint);
505 return NO_IRQ;
506}
507EXPORT_SYMBOL_GPL(irq_find_mapping);
508
509unsigned int irq_radix_revmap_lookup(struct irq_host *host,
510 irq_hw_number_t hwirq)
511{
512 struct irq_map_entry *ptr;
513 unsigned int virq;
514
515 if (WARN_ON_ONCE(host->revmap_type != IRQ_HOST_MAP_TREE))
516 return irq_find_mapping(host, hwirq);
517
518 /*
519 * The ptr returned references the static global irq_map.
520 * but freeing an irq can delete nodes along the path to
521 * do the lookup via call_rcu.
522 */
523 rcu_read_lock();
524 ptr = radix_tree_lookup(&host->revmap_data.tree, hwirq);
525 rcu_read_unlock();
526
527 /*
528 * If found in radix tree, then fine.
529 * Else fallback to linear lookup - this should not happen in practice
530 * as it means that we failed to insert the node in the radix tree.
531 */
532 if (ptr)
533 virq = ptr - irq_map;
534 else
535 virq = irq_find_mapping(host, hwirq);
536
537 return virq;
538}
539
540void irq_radix_revmap_insert(struct irq_host *host, unsigned int virq,
541 irq_hw_number_t hwirq)
542{
543 if (WARN_ON(host->revmap_type != IRQ_HOST_MAP_TREE))
544 return;
545
546 if (virq != NO_IRQ) {
547 mutex_lock(&revmap_trees_mutex);
548 radix_tree_insert(&host->revmap_data.tree, hwirq,
549 &irq_map[virq]);
550 mutex_unlock(&revmap_trees_mutex);
551 }
552}
553
554unsigned int irq_linear_revmap(struct irq_host *host,
555 irq_hw_number_t hwirq)
556{
557 unsigned int *revmap;
558
559 if (WARN_ON_ONCE(host->revmap_type != IRQ_HOST_MAP_LINEAR))
560 return irq_find_mapping(host, hwirq);
561
562 /* Check revmap bounds */
563 if (unlikely(hwirq >= host->revmap_data.linear.size))
564 return irq_find_mapping(host, hwirq);
565
566 /* Check if revmap was allocated */
567 revmap = host->revmap_data.linear.revmap;
568 if (unlikely(revmap == NULL))
569 return irq_find_mapping(host, hwirq);
570
571 /* Fill up revmap with slow path if no mapping found */
572 if (unlikely(revmap[hwirq] == NO_IRQ))
573 revmap[hwirq] = irq_find_mapping(host, hwirq);
574
575 return revmap[hwirq];
576}
577
578unsigned int irq_alloc_virt(struct irq_host *host,
579 unsigned int count,
580 unsigned int hint)
581{
582 unsigned long flags;
583 unsigned int i, j, found = NO_IRQ;
584
585 if (count == 0 || count > (irq_virq_count - NR_PRIORITY_IRQS))
586 return NO_IRQ;
587
588 raw_spin_lock_irqsave(&irq_big_lock, flags);
589
590 /* Use hint for 1 interrupt if any */
591 if (count == 1 && hint >= NR_PRIORITY_IRQS &&
592 hint < irq_virq_count && irq_map[hint].host == NULL) {
593 found = hint;
594 goto hint_found;
595 }
596
597 /* Look for count consecutive numbers in the allocatable
598 * (non-legacy) space
599 */
600 for (i = NR_PRIORITY_IRQS, j = 0; i < irq_virq_count; i++) {
601 if (irq_map[i].host != NULL)
602 j = 0;
603 else
604 j++;
605
606 if (j == count) {
607 found = i - count + 1;
608 break;
609 }
610 }
611 if (found == NO_IRQ) {
612 raw_spin_unlock_irqrestore(&irq_big_lock, flags);
613 return NO_IRQ;
614 }
615 hint_found:
616 for (i = found; i < (found + count); i++) {
617 irq_map[i].hwirq = host->inval_irq;
618 smp_wmb();
619 irq_map[i].host = host;
620 }
621 raw_spin_unlock_irqrestore(&irq_big_lock, flags);
622 return found;
623}
624
625void irq_free_virt(unsigned int virq, unsigned int count)
626{
627 unsigned long flags;
628 unsigned int i;
629
630 WARN_ON(virq < NR_PRIORITY_IRQS);
631 WARN_ON(count == 0 || (virq + count) > irq_virq_count);
632
633 if (virq < NR_PRIORITY_IRQS) {
634 if (virq + count < NR_PRIORITY_IRQS)
635 return;
636 count -= NR_PRIORITY_IRQS - virq;
637 virq = NR_PRIORITY_IRQS;
638 }
639
640 if (count > irq_virq_count || virq > irq_virq_count - count) {
641 if (virq > irq_virq_count)
642 return;
643 count = irq_virq_count - virq;
644 }
645
646 raw_spin_lock_irqsave(&irq_big_lock, flags);
647 for (i = virq; i < (virq + count); i++) {
648 struct irq_host *host;
649
650 host = irq_map[i].host;
651 irq_map[i].hwirq = host->inval_irq;
652 smp_wmb();
653 irq_map[i].host = NULL;
654 }
655 raw_spin_unlock_irqrestore(&irq_big_lock, flags);
656}
657
658#ifdef CONFIG_VIRQ_DEBUG
659static int virq_debug_show(struct seq_file *m, void *private)
660{
661 unsigned long flags;
662 struct irq_desc *desc;
663 const char *p;
664 static const char none[] = "none";
665 void *data;
666 int i;
667
668 seq_printf(m, "%-5s %-7s %-15s %-18s %s\n", "virq", "hwirq",
669 "chip name", "chip data", "host name");
670
671 for (i = 1; i < nr_irqs; i++) {
672 desc = irq_to_desc(i);
673 if (!desc)
674 continue;
675
676 raw_spin_lock_irqsave(&desc->lock, flags);
677
678 if (desc->action && desc->action->handler) {
679 struct irq_chip *chip;
680
681 seq_printf(m, "%5d ", i);
682 seq_printf(m, "0x%05lx ", irq_map[i].hwirq);
683
684 chip = irq_desc_get_chip(desc);
685 if (chip && chip->name)
686 p = chip->name;
687 else
688 p = none;
689 seq_printf(m, "%-15s ", p);
690
691 data = irq_desc_get_chip_data(desc);
692 seq_printf(m, "0x%16p ", data);
693
694 if (irq_map[i].host && irq_map[i].host->of_node)
695 p = irq_map[i].host->of_node->full_name;
696 else
697 p = none;
698 seq_printf(m, "%s\n", p);
699 }
700
701 raw_spin_unlock_irqrestore(&desc->lock, flags);
702 }
703
704 return 0;
705}
706
707static int virq_debug_open(struct inode *inode, struct file *file)
708{
709 return single_open(file, virq_debug_show, inode->i_private);
710}
711
712static const struct file_operations virq_debug_fops = {
713 .open = virq_debug_open,
714 .read = seq_read,
715 .llseek = seq_lseek,
716 .release = single_release,
717};
718
719static int __init irq_debugfs_init(void)
720{
721 if (debugfs_create_file("virq_mapping", S_IRUGO, powerpc_debugfs_root,
722 NULL, &virq_debug_fops) == NULL)
723 return -ENOMEM;
724
725 return 0;
726}
727device_initcall(irq_debugfs_init);
728#endif /* CONFIG_VIRQ_DEBUG */
diff --git a/arch/c6x/platforms/megamod-pic.c b/arch/c6x/platforms/megamod-pic.c
index 7c37a947fb1c..c1c4e2ae3f85 100644
--- a/arch/c6x/platforms/megamod-pic.c
+++ b/arch/c6x/platforms/megamod-pic.c
@@ -48,7 +48,7 @@ struct megamod_regs {
48}; 48};
49 49
50struct megamod_pic { 50struct megamod_pic {
51 struct irq_host *irqhost; 51 struct irq_domain *irqhost;
52 struct megamod_regs __iomem *regs; 52 struct megamod_regs __iomem *regs;
53 raw_spinlock_t lock; 53 raw_spinlock_t lock;
54 54
@@ -116,7 +116,7 @@ static void megamod_irq_cascade(unsigned int irq, struct irq_desc *desc)
116 } 116 }
117} 117}
118 118
119static int megamod_map(struct irq_host *h, unsigned int virq, 119static int megamod_map(struct irq_domain *h, unsigned int virq,
120 irq_hw_number_t hw) 120 irq_hw_number_t hw)
121{ 121{
122 struct megamod_pic *pic = h->host_data; 122 struct megamod_pic *pic = h->host_data;
@@ -136,21 +136,9 @@ static int megamod_map(struct irq_host *h, unsigned int virq,
136 return 0; 136 return 0;
137} 137}
138 138
139static int megamod_xlate(struct irq_host *h, struct device_node *ct, 139static const struct irq_domain_ops megamod_domain_ops = {
140 const u32 *intspec, unsigned int intsize,
141 irq_hw_number_t *out_hwirq, unsigned int *out_type)
142
143{
144 /* megamod intspecs must have 1 cell */
145 BUG_ON(intsize != 1);
146 *out_hwirq = intspec[0];
147 *out_type = IRQ_TYPE_NONE;
148 return 0;
149}
150
151static struct irq_host_ops megamod_host_ops = {
152 .map = megamod_map, 140 .map = megamod_map,
153 .xlate = megamod_xlate, 141 .xlate = irq_domain_xlate_onecell,
154}; 142};
155 143
156static void __init set_megamod_mux(struct megamod_pic *pic, int src, int output) 144static void __init set_megamod_mux(struct megamod_pic *pic, int src, int output)
@@ -223,9 +211,8 @@ static struct megamod_pic * __init init_megamod_pic(struct device_node *np)
223 return NULL; 211 return NULL;
224 } 212 }
225 213
226 pic->irqhost = irq_alloc_host(np, IRQ_HOST_MAP_LINEAR, 214 pic->irqhost = irq_domain_add_linear(np, NR_COMBINERS * 32,
227 NR_COMBINERS * 32, &megamod_host_ops, 215 &megamod_domain_ops, pic);
228 IRQ_UNMAPPED);
229 if (!pic->irqhost) { 216 if (!pic->irqhost) {
230 pr_err("%s: Could not alloc host.\n", np->full_name); 217 pr_err("%s: Could not alloc host.\n", np->full_name);
231 goto error_free; 218 goto error_free;
diff --git a/arch/microblaze/Kconfig b/arch/microblaze/Kconfig
index c8d6efb99dbf..11060fa87da3 100644
--- a/arch/microblaze/Kconfig
+++ b/arch/microblaze/Kconfig
@@ -14,6 +14,7 @@ config MICROBLAZE
14 select TRACING_SUPPORT 14 select TRACING_SUPPORT
15 select OF 15 select OF
16 select OF_EARLY_FLATTREE 16 select OF_EARLY_FLATTREE
17 select IRQ_DOMAIN
17 select HAVE_GENERIC_HARDIRQS 18 select HAVE_GENERIC_HARDIRQS
18 select GENERIC_IRQ_PROBE 19 select GENERIC_IRQ_PROBE
19 select GENERIC_IRQ_SHOW 20 select GENERIC_IRQ_SHOW
diff --git a/arch/microblaze/include/asm/hardirq.h b/arch/microblaze/include/asm/hardirq.h
index cd1ac9aad56c..fb3c05a0cbbf 100644
--- a/arch/microblaze/include/asm/hardirq.h
+++ b/arch/microblaze/include/asm/hardirq.h
@@ -1,17 +1 @@
1/*
2 * Copyright (C) 2006 Atmark Techno, Inc.
3 *
4 * This file is subject to the terms and conditions of the GNU General Public
5 * License. See the file "COPYING" in the main directory of this archive
6 * for more details.
7 */
8
9#ifndef _ASM_MICROBLAZE_HARDIRQ_H
10#define _ASM_MICROBLAZE_HARDIRQ_H
11
12/* should be defined in each interrupt controller driver */
13extern unsigned int get_irq(struct pt_regs *regs);
14
15#include <asm-generic/hardirq.h> #include <asm-generic/hardirq.h>
16
17#endif /* _ASM_MICROBLAZE_HARDIRQ_H */
diff --git a/arch/microblaze/include/asm/irq.h b/arch/microblaze/include/asm/irq.h
index a175132e4496..bab3b1393ad4 100644
--- a/arch/microblaze/include/asm/irq.h
+++ b/arch/microblaze/include/asm/irq.h
@@ -9,49 +9,13 @@
9#ifndef _ASM_MICROBLAZE_IRQ_H 9#ifndef _ASM_MICROBLAZE_IRQ_H
10#define _ASM_MICROBLAZE_IRQ_H 10#define _ASM_MICROBLAZE_IRQ_H
11 11
12 12#define NR_IRQS (32 + 1)
13/*
14 * Linux IRQ# is currently offset by one to map to the hardware
15 * irq number. So hardware IRQ0 maps to Linux irq 1.
16 */
17#define NO_IRQ_OFFSET 1
18#define IRQ_OFFSET NO_IRQ_OFFSET
19#define NR_IRQS (32 + IRQ_OFFSET)
20#include <asm-generic/irq.h> 13#include <asm-generic/irq.h>
21 14
22/* This type is the placeholder for a hardware interrupt number. It has to
23 * be big enough to enclose whatever representation is used by a given
24 * platform.
25 */
26typedef unsigned long irq_hw_number_t;
27
28extern unsigned int nr_irq;
29
30struct pt_regs; 15struct pt_regs;
31extern void do_IRQ(struct pt_regs *regs); 16extern void do_IRQ(struct pt_regs *regs);
32 17
33/** FIXME - not implement 18/* should be defined in each interrupt controller driver */
34 * irq_dispose_mapping - Unmap an interrupt 19extern unsigned int get_irq(void);
35 * @virq: linux virq number of the interrupt to unmap
36 */
37static inline void irq_dispose_mapping(unsigned int virq)
38{
39 return;
40}
41
42struct irq_host;
43
44/**
45 * irq_create_mapping - Map a hardware interrupt into linux virq space
46 * @host: host owning this hardware interrupt or NULL for default host
47 * @hwirq: hardware irq number in that host space
48 *
49 * Only one mapping per hardware interrupt is permitted. Returns a linux
50 * virq number.
51 * If the sense/trigger is to be specified, set_irq_type() should be called
52 * on the number returned from that call.
53 */
54extern unsigned int irq_create_mapping(struct irq_host *host,
55 irq_hw_number_t hwirq);
56 20
57#endif /* _ASM_MICROBLAZE_IRQ_H */ 21#endif /* _ASM_MICROBLAZE_IRQ_H */
diff --git a/arch/microblaze/kernel/intc.c b/arch/microblaze/kernel/intc.c
index 44b177e2ab12..ad120672cee5 100644
--- a/arch/microblaze/kernel/intc.c
+++ b/arch/microblaze/kernel/intc.c
@@ -9,6 +9,7 @@
9 */ 9 */
10 10
11#include <linux/init.h> 11#include <linux/init.h>
12#include <linux/irqdomain.h>
12#include <linux/irq.h> 13#include <linux/irq.h>
13#include <asm/page.h> 14#include <asm/page.h>
14#include <linux/io.h> 15#include <linux/io.h>
@@ -25,8 +26,6 @@ static unsigned int intc_baseaddr;
25#define INTC_BASE intc_baseaddr 26#define INTC_BASE intc_baseaddr
26#endif 27#endif
27 28
28unsigned int nr_irq;
29
30/* No one else should require these constants, so define them locally here. */ 29/* No one else should require these constants, so define them locally here. */
31#define ISR 0x00 /* Interrupt Status Register */ 30#define ISR 0x00 /* Interrupt Status Register */
32#define IPR 0x04 /* Interrupt Pending Register */ 31#define IPR 0x04 /* Interrupt Pending Register */
@@ -84,24 +83,45 @@ static struct irq_chip intc_dev = {
84 .irq_mask_ack = intc_mask_ack, 83 .irq_mask_ack = intc_mask_ack,
85}; 84};
86 85
87unsigned int get_irq(struct pt_regs *regs) 86static struct irq_domain *root_domain;
87
88unsigned int get_irq(void)
88{ 89{
89 int irq; 90 unsigned int hwirq, irq = -1;
90 91
91 /* 92 hwirq = in_be32(INTC_BASE + IVR);
92 * NOTE: This function is the one that needs to be improved in 93 if (hwirq != -1U)
93 * order to handle multiple interrupt controllers. It currently 94 irq = irq_find_mapping(root_domain, hwirq);
94 * is hardcoded to check for interrupts only on the first INTC. 95
95 */ 96 pr_debug("get_irq: hwirq=%d, irq=%d\n", hwirq, irq);
96 irq = in_be32(INTC_BASE + IVR) + NO_IRQ_OFFSET;
97 pr_debug("get_irq: %d\n", irq);
98 97
99 return irq; 98 return irq;
100} 99}
101 100
101int xintc_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hw)
102{
103 u32 intr_mask = (u32)d->host_data;
104
105 if (intr_mask & (1 << hw)) {
106 irq_set_chip_and_handler_name(irq, &intc_dev,
107 handle_edge_irq, "edge");
108 irq_clear_status_flags(irq, IRQ_LEVEL);
109 } else {
110 irq_set_chip_and_handler_name(irq, &intc_dev,
111 handle_level_irq, "level");
112 irq_set_status_flags(irq, IRQ_LEVEL);
113 }
114 return 0;
115}
116
117static const struct irq_domain_ops xintc_irq_domain_ops = {
118 .xlate = irq_domain_xlate_onetwocell,
119 .map = xintc_map,
120};
121
102void __init init_IRQ(void) 122void __init init_IRQ(void)
103{ 123{
104 u32 i, intr_mask; 124 u32 nr_irq, intr_mask;
105 struct device_node *intc = NULL; 125 struct device_node *intc = NULL;
106#ifdef CONFIG_SELFMOD_INTC 126#ifdef CONFIG_SELFMOD_INTC
107 unsigned int intc_baseaddr = 0; 127 unsigned int intc_baseaddr = 0;
@@ -146,16 +166,9 @@ void __init init_IRQ(void)
146 /* Turn on the Master Enable. */ 166 /* Turn on the Master Enable. */
147 out_be32(intc_baseaddr + MER, MER_HIE | MER_ME); 167 out_be32(intc_baseaddr + MER, MER_HIE | MER_ME);
148 168
149 for (i = IRQ_OFFSET; i < (nr_irq + IRQ_OFFSET); ++i) { 169 /* Yeah, okay, casting the intr_mask to a void* is butt-ugly, but I'm
150 if (intr_mask & (0x00000001 << (i - IRQ_OFFSET))) { 170 * lazy and Michal can clean it up to something nicer when he tests
151 irq_set_chip_and_handler_name(i, &intc_dev, 171 * and commits this patch. ~~gcl */
152 handle_edge_irq, "edge"); 172 root_domain = irq_domain_add_linear(intc, nr_irq, &xintc_irq_domain_ops,
153 irq_clear_status_flags(i, IRQ_LEVEL); 173 (void *)intr_mask);
154 } else {
155 irq_set_chip_and_handler_name(i, &intc_dev,
156 handle_level_irq, "level");
157 irq_set_status_flags(i, IRQ_LEVEL);
158 }
159 irq_get_irq_data(i)->hwirq = i - IRQ_OFFSET;
160 }
161} 174}
diff --git a/arch/microblaze/kernel/irq.c b/arch/microblaze/kernel/irq.c
index bbebcae72c02..ace700afbfdf 100644
--- a/arch/microblaze/kernel/irq.c
+++ b/arch/microblaze/kernel/irq.c
@@ -31,14 +31,13 @@ void __irq_entry do_IRQ(struct pt_regs *regs)
31 trace_hardirqs_off(); 31 trace_hardirqs_off();
32 32
33 irq_enter(); 33 irq_enter();
34 irq = get_irq(regs); 34 irq = get_irq();
35next_irq: 35next_irq:
36 BUG_ON(!irq); 36 BUG_ON(!irq);
37 /* Substract 1 because of get_irq */ 37 generic_handle_irq(irq);
38 generic_handle_irq(irq + IRQ_OFFSET - NO_IRQ_OFFSET);
39 38
40 irq = get_irq(regs); 39 irq = get_irq();
41 if (irq) { 40 if (irq != -1U) {
42 pr_debug("next irq: %d\n", irq); 41 pr_debug("next irq: %d\n", irq);
43 ++concurrent_irq; 42 ++concurrent_irq;
44 goto next_irq; 43 goto next_irq;
@@ -48,18 +47,3 @@ next_irq:
48 set_irq_regs(old_regs); 47 set_irq_regs(old_regs);
49 trace_hardirqs_on(); 48 trace_hardirqs_on();
50} 49}
51
52/* MS: There is no any advance mapping mechanism. We are using simple 32bit
53 intc without any cascades or any connection that's why mapping is 1:1 */
54unsigned int irq_create_mapping(struct irq_host *host, irq_hw_number_t hwirq)
55{
56 return hwirq + IRQ_OFFSET;
57}
58EXPORT_SYMBOL_GPL(irq_create_mapping);
59
60unsigned int irq_create_of_mapping(struct device_node *controller,
61 const u32 *intspec, unsigned int intsize)
62{
63 return intspec[0] + IRQ_OFFSET;
64}
65EXPORT_SYMBOL_GPL(irq_create_of_mapping);
diff --git a/arch/microblaze/kernel/setup.c b/arch/microblaze/kernel/setup.c
index 604cd9dd1333..70e6d0b41ab4 100644
--- a/arch/microblaze/kernel/setup.c
+++ b/arch/microblaze/kernel/setup.c
@@ -51,8 +51,6 @@ void __init setup_arch(char **cmdline_p)
51 51
52 unflatten_device_tree(); 52 unflatten_device_tree();
53 53
54 /* NOTE I think that this function is not necessary to call */
55 /* irq_early_init(); */
56 setup_cpuinfo(); 54 setup_cpuinfo();
57 55
58 microblaze_cache_init(); 56 microblaze_cache_init();
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index 5ab6e89603c5..edbbae17e820 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -2327,6 +2327,7 @@ config USE_OF
2327 bool "Flattened Device Tree support" 2327 bool "Flattened Device Tree support"
2328 select OF 2328 select OF
2329 select OF_EARLY_FLATTREE 2329 select OF_EARLY_FLATTREE
2330 select IRQ_DOMAIN
2330 help 2331 help
2331 Include support for flattened device tree machine descriptions. 2332 Include support for flattened device tree machine descriptions.
2332 2333
diff --git a/arch/mips/include/asm/irq.h b/arch/mips/include/asm/irq.h
index 2354c870a63a..fb698dc09bc9 100644
--- a/arch/mips/include/asm/irq.h
+++ b/arch/mips/include/asm/irq.h
@@ -11,15 +11,12 @@
11 11
12#include <linux/linkage.h> 12#include <linux/linkage.h>
13#include <linux/smp.h> 13#include <linux/smp.h>
14#include <linux/irqdomain.h>
14 15
15#include <asm/mipsmtregs.h> 16#include <asm/mipsmtregs.h>
16 17
17#include <irq.h> 18#include <irq.h>
18 19
19static inline void irq_dispose_mapping(unsigned int virq)
20{
21}
22
23#ifdef CONFIG_I8259 20#ifdef CONFIG_I8259
24static inline int irq_canonicalize(int irq) 21static inline int irq_canonicalize(int irq)
25{ 22{
diff --git a/arch/mips/kernel/prom.c b/arch/mips/kernel/prom.c
index 6b8b4208481e..558b5395795d 100644
--- a/arch/mips/kernel/prom.c
+++ b/arch/mips/kernel/prom.c
@@ -60,20 +60,6 @@ void __init early_init_dt_setup_initrd_arch(unsigned long start,
60} 60}
61#endif 61#endif
62 62
63/*
64 * irq_create_of_mapping - Hook to resolve OF irq specifier into a Linux irq#
65 *
66 * Currently the mapping mechanism is trivial; simple flat hwirq numbers are
67 * mapped 1:1 onto Linux irq numbers. Cascaded irq controllers are not
68 * supported.
69 */
70unsigned int irq_create_of_mapping(struct device_node *controller,
71 const u32 *intspec, unsigned int intsize)
72{
73 return intspec[0];
74}
75EXPORT_SYMBOL_GPL(irq_create_of_mapping);
76
77void __init early_init_devtree(void *params) 63void __init early_init_devtree(void *params)
78{ 64{
79 /* Setup flat device-tree pointer */ 65 /* Setup flat device-tree pointer */
diff --git a/arch/openrisc/include/asm/prom.h b/arch/openrisc/include/asm/prom.h
index e1f3fe26606c..bbb34e5343a2 100644
--- a/arch/openrisc/include/asm/prom.h
+++ b/arch/openrisc/include/asm/prom.h
@@ -24,6 +24,7 @@
24 24
25#include <linux/types.h> 25#include <linux/types.h>
26#include <asm/irq.h> 26#include <asm/irq.h>
27#include <linux/irqdomain.h>
27#include <linux/atomic.h> 28#include <linux/atomic.h>
28#include <linux/of_irq.h> 29#include <linux/of_irq.h>
29#include <linux/of_fdt.h> 30#include <linux/of_fdt.h>
@@ -63,15 +64,6 @@ extern const void *of_get_mac_address(struct device_node *np);
63struct pci_dev; 64struct pci_dev;
64extern int of_irq_map_pci(struct pci_dev *pdev, struct of_irq *out_irq); 65extern int of_irq_map_pci(struct pci_dev *pdev, struct of_irq *out_irq);
65 66
66/* This routine is here to provide compatibility with how powerpc
67 * handles IRQ mapping for OF device nodes. We precompute and permanently
68 * register them in the platform_device objects, whereas powerpc computes them
69 * on request.
70 */
71static inline void irq_dispose_mapping(unsigned int virq)
72{
73}
74
75#endif /* __ASSEMBLY__ */ 67#endif /* __ASSEMBLY__ */
76#endif /* __KERNEL__ */ 68#endif /* __KERNEL__ */
77#endif /* _ASM_OPENRISC_PROM_H */ 69#endif /* _ASM_OPENRISC_PROM_H */
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index 1919634a9b32..303703d716fe 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -135,6 +135,7 @@ config PPC
135 select HAVE_GENERIC_HARDIRQS 135 select HAVE_GENERIC_HARDIRQS
136 select HAVE_SPARSE_IRQ 136 select HAVE_SPARSE_IRQ
137 select IRQ_PER_CPU 137 select IRQ_PER_CPU
138 select IRQ_DOMAIN
138 select GENERIC_IRQ_SHOW 139 select GENERIC_IRQ_SHOW
139 select GENERIC_IRQ_SHOW_LEVEL 140 select GENERIC_IRQ_SHOW_LEVEL
140 select IRQ_FORCED_THREADING 141 select IRQ_FORCED_THREADING
diff --git a/arch/powerpc/include/asm/ehv_pic.h b/arch/powerpc/include/asm/ehv_pic.h
index a9e1f4f796f6..dc7d48e3ea90 100644
--- a/arch/powerpc/include/asm/ehv_pic.h
+++ b/arch/powerpc/include/asm/ehv_pic.h
@@ -25,7 +25,7 @@
25 25
26struct ehv_pic { 26struct ehv_pic {
27 /* The remapper for this EHV_PIC */ 27 /* The remapper for this EHV_PIC */
28 struct irq_host *irqhost; 28 struct irq_domain *irqhost;
29 29
30 /* The "linux" controller struct */ 30 /* The "linux" controller struct */
31 struct irq_chip hc_irq; 31 struct irq_chip hc_irq;
diff --git a/arch/powerpc/include/asm/i8259.h b/arch/powerpc/include/asm/i8259.h
index 105ade297aad..c3fdfbd5a673 100644
--- a/arch/powerpc/include/asm/i8259.h
+++ b/arch/powerpc/include/asm/i8259.h
@@ -6,7 +6,7 @@
6 6
7extern void i8259_init(struct device_node *node, unsigned long intack_addr); 7extern void i8259_init(struct device_node *node, unsigned long intack_addr);
8extern unsigned int i8259_irq(void); 8extern unsigned int i8259_irq(void);
9extern struct irq_host *i8259_get_host(void); 9extern struct irq_domain *i8259_get_host(void);
10 10
11#endif /* __KERNEL__ */ 11#endif /* __KERNEL__ */
12#endif /* _ASM_POWERPC_I8259_H */ 12#endif /* _ASM_POWERPC_I8259_H */
diff --git a/arch/powerpc/include/asm/irq.h b/arch/powerpc/include/asm/irq.h
index c0e1bc319e35..fe0b09dceb7d 100644
--- a/arch/powerpc/include/asm/irq.h
+++ b/arch/powerpc/include/asm/irq.h
@@ -9,6 +9,7 @@
9 * 2 of the License, or (at your option) any later version. 9 * 2 of the License, or (at your option) any later version.
10 */ 10 */
11 11
12#include <linux/irqdomain.h>
12#include <linux/threads.h> 13#include <linux/threads.h>
13#include <linux/list.h> 14#include <linux/list.h>
14#include <linux/radix-tree.h> 15#include <linux/radix-tree.h>
@@ -35,258 +36,12 @@ extern atomic_t ppc_n_lost_interrupts;
35/* Total number of virq in the platform */ 36/* Total number of virq in the platform */
36#define NR_IRQS CONFIG_NR_IRQS 37#define NR_IRQS CONFIG_NR_IRQS
37 38
38/* Number of irqs reserved for the legacy controller */
39#define NUM_ISA_INTERRUPTS 16
40
41/* Same thing, used by the generic IRQ code */ 39/* Same thing, used by the generic IRQ code */
42#define NR_IRQS_LEGACY NUM_ISA_INTERRUPTS 40#define NR_IRQS_LEGACY NUM_ISA_INTERRUPTS
43 41
44/* This type is the placeholder for a hardware interrupt number. It has to
45 * be big enough to enclose whatever representation is used by a given
46 * platform.
47 */
48typedef unsigned long irq_hw_number_t;
49
50/* Interrupt controller "host" data structure. This could be defined as a
51 * irq domain controller. That is, it handles the mapping between hardware
52 * and virtual interrupt numbers for a given interrupt domain. The host
53 * structure is generally created by the PIC code for a given PIC instance
54 * (though a host can cover more than one PIC if they have a flat number
55 * model). It's the host callbacks that are responsible for setting the
56 * irq_chip on a given irq_desc after it's been mapped.
57 *
58 * The host code and data structures are fairly agnostic to the fact that
59 * we use an open firmware device-tree. We do have references to struct
60 * device_node in two places: in irq_find_host() to find the host matching
61 * a given interrupt controller node, and of course as an argument to its
62 * counterpart host->ops->match() callback. However, those are treated as
63 * generic pointers by the core and the fact that it's actually a device-node
64 * pointer is purely a convention between callers and implementation. This
65 * code could thus be used on other architectures by replacing those two
66 * by some sort of arch-specific void * "token" used to identify interrupt
67 * controllers.
68 */
69struct irq_host;
70struct radix_tree_root;
71
72/* Functions below are provided by the host and called whenever a new mapping
73 * is created or an old mapping is disposed. The host can then proceed to
74 * whatever internal data structures management is required. It also needs
75 * to setup the irq_desc when returning from map().
76 */
77struct irq_host_ops {
78 /* Match an interrupt controller device node to a host, returns
79 * 1 on a match
80 */
81 int (*match)(struct irq_host *h, struct device_node *node);
82
83 /* Create or update a mapping between a virtual irq number and a hw
84 * irq number. This is called only once for a given mapping.
85 */
86 int (*map)(struct irq_host *h, unsigned int virq, irq_hw_number_t hw);
87
88 /* Dispose of such a mapping */
89 void (*unmap)(struct irq_host *h, unsigned int virq);
90
91 /* Translate device-tree interrupt specifier from raw format coming
92 * from the firmware to a irq_hw_number_t (interrupt line number) and
93 * type (sense) that can be passed to set_irq_type(). In the absence
94 * of this callback, irq_create_of_mapping() and irq_of_parse_and_map()
95 * will return the hw number in the first cell and IRQ_TYPE_NONE for
96 * the type (which amount to keeping whatever default value the
97 * interrupt controller has for that line)
98 */
99 int (*xlate)(struct irq_host *h, struct device_node *ctrler,
100 const u32 *intspec, unsigned int intsize,
101 irq_hw_number_t *out_hwirq, unsigned int *out_type);
102};
103
104struct irq_host {
105 struct list_head link;
106
107 /* type of reverse mapping technique */
108 unsigned int revmap_type;
109#define IRQ_HOST_MAP_LEGACY 0 /* legacy 8259, gets irqs 1..15 */
110#define IRQ_HOST_MAP_NOMAP 1 /* no fast reverse mapping */
111#define IRQ_HOST_MAP_LINEAR 2 /* linear map of interrupts */
112#define IRQ_HOST_MAP_TREE 3 /* radix tree */
113 union {
114 struct {
115 unsigned int size;
116 unsigned int *revmap;
117 } linear;
118 struct radix_tree_root tree;
119 } revmap_data;
120 struct irq_host_ops *ops;
121 void *host_data;
122 irq_hw_number_t inval_irq;
123
124 /* Optional device node pointer */
125 struct device_node *of_node;
126};
127
128struct irq_data; 42struct irq_data;
129extern irq_hw_number_t irqd_to_hwirq(struct irq_data *d); 43extern irq_hw_number_t irqd_to_hwirq(struct irq_data *d);
130extern irq_hw_number_t virq_to_hw(unsigned int virq); 44extern irq_hw_number_t virq_to_hw(unsigned int virq);
131extern bool virq_is_host(unsigned int virq, struct irq_host *host);
132
133/**
134 * irq_alloc_host - Allocate a new irq_host data structure
135 * @of_node: optional device-tree node of the interrupt controller
136 * @revmap_type: type of reverse mapping to use
137 * @revmap_arg: for IRQ_HOST_MAP_LINEAR linear only: size of the map
138 * @ops: map/unmap host callbacks
139 * @inval_irq: provide a hw number in that host space that is always invalid
140 *
141 * Allocates and initialize and irq_host structure. Note that in the case of
142 * IRQ_HOST_MAP_LEGACY, the map() callback will be called before this returns
143 * for all legacy interrupts except 0 (which is always the invalid irq for
144 * a legacy controller). For a IRQ_HOST_MAP_LINEAR, the map is allocated by
145 * this call as well. For a IRQ_HOST_MAP_TREE, the radix tree will be allocated
146 * later during boot automatically (the reverse mapping will use the slow path
147 * until that happens).
148 */
149extern struct irq_host *irq_alloc_host(struct device_node *of_node,
150 unsigned int revmap_type,
151 unsigned int revmap_arg,
152 struct irq_host_ops *ops,
153 irq_hw_number_t inval_irq);
154
155
156/**
157 * irq_find_host - Locates a host for a given device node
158 * @node: device-tree node of the interrupt controller
159 */
160extern struct irq_host *irq_find_host(struct device_node *node);
161
162
163/**
164 * irq_set_default_host - Set a "default" host
165 * @host: default host pointer
166 *
167 * For convenience, it's possible to set a "default" host that will be used
168 * whenever NULL is passed to irq_create_mapping(). It makes life easier for
169 * platforms that want to manipulate a few hard coded interrupt numbers that
170 * aren't properly represented in the device-tree.
171 */
172extern void irq_set_default_host(struct irq_host *host);
173
174
175/**
176 * irq_set_virq_count - Set the maximum number of virt irqs
177 * @count: number of linux virtual irqs, capped with NR_IRQS
178 *
179 * This is mainly for use by platforms like iSeries who want to program
180 * the virtual irq number in the controller to avoid the reverse mapping
181 */
182extern void irq_set_virq_count(unsigned int count);
183
184
185/**
186 * irq_create_mapping - Map a hardware interrupt into linux virq space
187 * @host: host owning this hardware interrupt or NULL for default host
188 * @hwirq: hardware irq number in that host space
189 *
190 * Only one mapping per hardware interrupt is permitted. Returns a linux
191 * virq number.
192 * If the sense/trigger is to be specified, set_irq_type() should be called
193 * on the number returned from that call.
194 */
195extern unsigned int irq_create_mapping(struct irq_host *host,
196 irq_hw_number_t hwirq);
197
198
199/**
200 * irq_dispose_mapping - Unmap an interrupt
201 * @virq: linux virq number of the interrupt to unmap
202 */
203extern void irq_dispose_mapping(unsigned int virq);
204
205/**
206 * irq_find_mapping - Find a linux virq from an hw irq number.
207 * @host: host owning this hardware interrupt
208 * @hwirq: hardware irq number in that host space
209 *
210 * This is a slow path, for use by generic code. It's expected that an
211 * irq controller implementation directly calls the appropriate low level
212 * mapping function.
213 */
214extern unsigned int irq_find_mapping(struct irq_host *host,
215 irq_hw_number_t hwirq);
216
217/**
218 * irq_create_direct_mapping - Allocate a virq for direct mapping
219 * @host: host to allocate the virq for or NULL for default host
220 *
221 * This routine is used for irq controllers which can choose the hardware
222 * interrupt numbers they generate. In such a case it's simplest to use
223 * the linux virq as the hardware interrupt number.
224 */
225extern unsigned int irq_create_direct_mapping(struct irq_host *host);
226
227/**
228 * irq_radix_revmap_insert - Insert a hw irq to linux virq number mapping.
229 * @host: host owning this hardware interrupt
230 * @virq: linux irq number
231 * @hwirq: hardware irq number in that host space
232 *
233 * This is for use by irq controllers that use a radix tree reverse
234 * mapping for fast lookup.
235 */
236extern void irq_radix_revmap_insert(struct irq_host *host, unsigned int virq,
237 irq_hw_number_t hwirq);
238
239/**
240 * irq_radix_revmap_lookup - Find a linux virq from a hw irq number.
241 * @host: host owning this hardware interrupt
242 * @hwirq: hardware irq number in that host space
243 *
244 * This is a fast path, for use by irq controller code that uses radix tree
245 * revmaps
246 */
247extern unsigned int irq_radix_revmap_lookup(struct irq_host *host,
248 irq_hw_number_t hwirq);
249
250/**
251 * irq_linear_revmap - Find a linux virq from a hw irq number.
252 * @host: host owning this hardware interrupt
253 * @hwirq: hardware irq number in that host space
254 *
255 * This is a fast path, for use by irq controller code that uses linear
256 * revmaps. It does fallback to the slow path if the revmap doesn't exist
257 * yet and will create the revmap entry with appropriate locking
258 */
259
260extern unsigned int irq_linear_revmap(struct irq_host *host,
261 irq_hw_number_t hwirq);
262
263
264
265/**
266 * irq_alloc_virt - Allocate virtual irq numbers
267 * @host: host owning these new virtual irqs
268 * @count: number of consecutive numbers to allocate
269 * @hint: pass a hint number, the allocator will try to use a 1:1 mapping
270 *
271 * This is a low level function that is used internally by irq_create_mapping()
272 * and that can be used by some irq controllers implementations for things
273 * like allocating ranges of numbers for MSIs. The revmaps are left untouched.
274 */
275extern unsigned int irq_alloc_virt(struct irq_host *host,
276 unsigned int count,
277 unsigned int hint);
278
279/**
280 * irq_free_virt - Free virtual irq numbers
281 * @virq: virtual irq number of the first interrupt to free
282 * @count: number of interrupts to free
283 *
284 * This function is the opposite of irq_alloc_virt. It will not clear reverse
285 * maps, this should be done previously by unmap'ing the interrupt. In fact,
286 * all interrupts covered by the range being freed should have been unmapped
287 * prior to calling this.
288 */
289extern void irq_free_virt(unsigned int virq, unsigned int count);
290 45
291/** 46/**
292 * irq_early_init - Init irq remapping subsystem 47 * irq_early_init - Init irq remapping subsystem
diff --git a/arch/powerpc/include/asm/mpic.h b/arch/powerpc/include/asm/mpic.h
index 67b4d9837236..a5b7c56237f9 100644
--- a/arch/powerpc/include/asm/mpic.h
+++ b/arch/powerpc/include/asm/mpic.h
@@ -255,7 +255,7 @@ struct mpic
255 struct device_node *node; 255 struct device_node *node;
256 256
257 /* The remapper for this MPIC */ 257 /* The remapper for this MPIC */
258 struct irq_host *irqhost; 258 struct irq_domain *irqhost;
259 259
260 /* The "linux" controller struct */ 260 /* The "linux" controller struct */
261 struct irq_chip hc_irq; 261 struct irq_chip hc_irq;
diff --git a/arch/powerpc/include/asm/xics.h b/arch/powerpc/include/asm/xics.h
index c48de98ba94e..4ae9a09c3b89 100644
--- a/arch/powerpc/include/asm/xics.h
+++ b/arch/powerpc/include/asm/xics.h
@@ -86,7 +86,7 @@ struct ics {
86extern unsigned int xics_default_server; 86extern unsigned int xics_default_server;
87extern unsigned int xics_default_distrib_server; 87extern unsigned int xics_default_distrib_server;
88extern unsigned int xics_interrupt_server_size; 88extern unsigned int xics_interrupt_server_size;
89extern struct irq_host *xics_host; 89extern struct irq_domain *xics_host;
90 90
91struct xics_cppr { 91struct xics_cppr {
92 unsigned char stack[MAX_NUM_PRIORITIES]; 92 unsigned char stack[MAX_NUM_PRIORITIES];
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
index 01e2877e8e04..bdfb3eee3e6f 100644
--- a/arch/powerpc/kernel/irq.c
+++ b/arch/powerpc/kernel/irq.c
@@ -490,409 +490,19 @@ void do_softirq(void)
490 local_irq_restore(flags); 490 local_irq_restore(flags);
491} 491}
492 492
493
494/*
495 * IRQ controller and virtual interrupts
496 */
497
498/* The main irq map itself is an array of NR_IRQ entries containing the
499 * associate host and irq number. An entry with a host of NULL is free.
500 * An entry can be allocated if it's free, the allocator always then sets
501 * hwirq first to the host's invalid irq number and then fills ops.
502 */
503struct irq_map_entry {
504 irq_hw_number_t hwirq;
505 struct irq_host *host;
506};
507
508static LIST_HEAD(irq_hosts);
509static DEFINE_RAW_SPINLOCK(irq_big_lock);
510static DEFINE_MUTEX(revmap_trees_mutex);
511static struct irq_map_entry irq_map[NR_IRQS];
512static unsigned int irq_virq_count = NR_IRQS;
513static struct irq_host *irq_default_host;
514
515irq_hw_number_t irqd_to_hwirq(struct irq_data *d) 493irq_hw_number_t irqd_to_hwirq(struct irq_data *d)
516{ 494{
517 return irq_map[d->irq].hwirq; 495 return d->hwirq;
518} 496}
519EXPORT_SYMBOL_GPL(irqd_to_hwirq); 497EXPORT_SYMBOL_GPL(irqd_to_hwirq);
520 498
521irq_hw_number_t virq_to_hw(unsigned int virq) 499irq_hw_number_t virq_to_hw(unsigned int virq)
522{ 500{
523 return irq_map[virq].hwirq; 501 struct irq_data *irq_data = irq_get_irq_data(virq);
502 return WARN_ON(!irq_data) ? 0 : irq_data->hwirq;
524} 503}
525EXPORT_SYMBOL_GPL(virq_to_hw); 504EXPORT_SYMBOL_GPL(virq_to_hw);
526 505
527bool virq_is_host(unsigned int virq, struct irq_host *host)
528{
529 return irq_map[virq].host == host;
530}
531EXPORT_SYMBOL_GPL(virq_is_host);
532
533static int default_irq_host_match(struct irq_host *h, struct device_node *np)
534{
535 return h->of_node != NULL && h->of_node == np;
536}
537
538struct irq_host *irq_alloc_host(struct device_node *of_node,
539 unsigned int revmap_type,
540 unsigned int revmap_arg,
541 struct irq_host_ops *ops,
542 irq_hw_number_t inval_irq)
543{
544 struct irq_host *host;
545 unsigned int size = sizeof(struct irq_host);
546 unsigned int i;
547 unsigned int *rmap;
548 unsigned long flags;
549
550 /* Allocate structure and revmap table if using linear mapping */
551 if (revmap_type == IRQ_HOST_MAP_LINEAR)
552 size += revmap_arg * sizeof(unsigned int);
553 host = kzalloc(size, GFP_KERNEL);
554 if (host == NULL)
555 return NULL;
556
557 /* Fill structure */
558 host->revmap_type = revmap_type;
559 host->inval_irq = inval_irq;
560 host->ops = ops;
561 host->of_node = of_node_get(of_node);
562
563 if (host->ops->match == NULL)
564 host->ops->match = default_irq_host_match;
565
566 raw_spin_lock_irqsave(&irq_big_lock, flags);
567
568 /* If it's a legacy controller, check for duplicates and
569 * mark it as allocated (we use irq 0 host pointer for that
570 */
571 if (revmap_type == IRQ_HOST_MAP_LEGACY) {
572 if (irq_map[0].host != NULL) {
573 raw_spin_unlock_irqrestore(&irq_big_lock, flags);
574 of_node_put(host->of_node);
575 kfree(host);
576 return NULL;
577 }
578 irq_map[0].host = host;
579 }
580
581 list_add(&host->link, &irq_hosts);
582 raw_spin_unlock_irqrestore(&irq_big_lock, flags);
583
584 /* Additional setups per revmap type */
585 switch(revmap_type) {
586 case IRQ_HOST_MAP_LEGACY:
587 /* 0 is always the invalid number for legacy */
588 host->inval_irq = 0;
589 /* setup us as the host for all legacy interrupts */
590 for (i = 1; i < NUM_ISA_INTERRUPTS; i++) {
591 irq_map[i].hwirq = i;
592 smp_wmb();
593 irq_map[i].host = host;
594 smp_wmb();
595
596 /* Legacy flags are left to default at this point,
597 * one can then use irq_create_mapping() to
598 * explicitly change them
599 */
600 ops->map(host, i, i);
601
602 /* Clear norequest flags */
603 irq_clear_status_flags(i, IRQ_NOREQUEST);
604 }
605 break;
606 case IRQ_HOST_MAP_LINEAR:
607 rmap = (unsigned int *)(host + 1);
608 for (i = 0; i < revmap_arg; i++)
609 rmap[i] = NO_IRQ;
610 host->revmap_data.linear.size = revmap_arg;
611 smp_wmb();
612 host->revmap_data.linear.revmap = rmap;
613 break;
614 case IRQ_HOST_MAP_TREE:
615 INIT_RADIX_TREE(&host->revmap_data.tree, GFP_KERNEL);
616 break;
617 default:
618 break;
619 }
620
621 pr_debug("irq: Allocated host of type %d @0x%p\n", revmap_type, host);
622
623 return host;
624}
625
626struct irq_host *irq_find_host(struct device_node *node)
627{
628 struct irq_host *h, *found = NULL;
629 unsigned long flags;
630
631 /* We might want to match the legacy controller last since
632 * it might potentially be set to match all interrupts in
633 * the absence of a device node. This isn't a problem so far
634 * yet though...
635 */
636 raw_spin_lock_irqsave(&irq_big_lock, flags);
637 list_for_each_entry(h, &irq_hosts, link)
638 if (h->ops->match(h, node)) {
639 found = h;
640 break;
641 }
642 raw_spin_unlock_irqrestore(&irq_big_lock, flags);
643 return found;
644}
645EXPORT_SYMBOL_GPL(irq_find_host);
646
647void irq_set_default_host(struct irq_host *host)
648{
649 pr_debug("irq: Default host set to @0x%p\n", host);
650
651 irq_default_host = host;
652}
653
654void irq_set_virq_count(unsigned int count)
655{
656 pr_debug("irq: Trying to set virq count to %d\n", count);
657
658 BUG_ON(count < NUM_ISA_INTERRUPTS);
659 if (count < NR_IRQS)
660 irq_virq_count = count;
661}
662
663static int irq_setup_virq(struct irq_host *host, unsigned int virq,
664 irq_hw_number_t hwirq)
665{
666 int res;
667
668 res = irq_alloc_desc_at(virq, 0);
669 if (res != virq) {
670 pr_debug("irq: -> allocating desc failed\n");
671 goto error;
672 }
673
674 /* map it */
675 smp_wmb();
676 irq_map[virq].hwirq = hwirq;
677 smp_mb();
678
679 if (host->ops->map(host, virq, hwirq)) {
680 pr_debug("irq: -> mapping failed, freeing\n");
681 goto errdesc;
682 }
683
684 irq_clear_status_flags(virq, IRQ_NOREQUEST);
685
686 return 0;
687
688errdesc:
689 irq_free_descs(virq, 1);
690error:
691 irq_free_virt(virq, 1);
692 return -1;
693}
694
695unsigned int irq_create_direct_mapping(struct irq_host *host)
696{
697 unsigned int virq;
698
699 if (host == NULL)
700 host = irq_default_host;
701
702 BUG_ON(host == NULL);
703 WARN_ON(host->revmap_type != IRQ_HOST_MAP_NOMAP);
704
705 virq = irq_alloc_virt(host, 1, 0);
706 if (virq == NO_IRQ) {
707 pr_debug("irq: create_direct virq allocation failed\n");
708 return NO_IRQ;
709 }
710
711 pr_debug("irq: create_direct obtained virq %d\n", virq);
712
713 if (irq_setup_virq(host, virq, virq))
714 return NO_IRQ;
715
716 return virq;
717}
718
719unsigned int irq_create_mapping(struct irq_host *host,
720 irq_hw_number_t hwirq)
721{
722 unsigned int virq, hint;
723
724 pr_debug("irq: irq_create_mapping(0x%p, 0x%lx)\n", host, hwirq);
725
726 /* Look for default host if nececssary */
727 if (host == NULL)
728 host = irq_default_host;
729 if (host == NULL) {
730 printk(KERN_WARNING "irq_create_mapping called for"
731 " NULL host, hwirq=%lx\n", hwirq);
732 WARN_ON(1);
733 return NO_IRQ;
734 }
735 pr_debug("irq: -> using host @%p\n", host);
736
737 /* Check if mapping already exists */
738 virq = irq_find_mapping(host, hwirq);
739 if (virq != NO_IRQ) {
740 pr_debug("irq: -> existing mapping on virq %d\n", virq);
741 return virq;
742 }
743
744 /* Get a virtual interrupt number */
745 if (host->revmap_type == IRQ_HOST_MAP_LEGACY) {
746 /* Handle legacy */
747 virq = (unsigned int)hwirq;
748 if (virq == 0 || virq >= NUM_ISA_INTERRUPTS)
749 return NO_IRQ;
750 return virq;
751 } else {
752 /* Allocate a virtual interrupt number */
753 hint = hwirq % irq_virq_count;
754 virq = irq_alloc_virt(host, 1, hint);
755 if (virq == NO_IRQ) {
756 pr_debug("irq: -> virq allocation failed\n");
757 return NO_IRQ;
758 }
759 }
760
761 if (irq_setup_virq(host, virq, hwirq))
762 return NO_IRQ;
763
764 pr_debug("irq: irq %lu on host %s mapped to virtual irq %u\n",
765 hwirq, host->of_node ? host->of_node->full_name : "null", virq);
766
767 return virq;
768}
769EXPORT_SYMBOL_GPL(irq_create_mapping);
770
771unsigned int irq_create_of_mapping(struct device_node *controller,
772 const u32 *intspec, unsigned int intsize)
773{
774 struct irq_host *host;
775 irq_hw_number_t hwirq;
776 unsigned int type = IRQ_TYPE_NONE;
777 unsigned int virq;
778
779 if (controller == NULL)
780 host = irq_default_host;
781 else
782 host = irq_find_host(controller);
783 if (host == NULL) {
784 printk(KERN_WARNING "irq: no irq host found for %s !\n",
785 controller->full_name);
786 return NO_IRQ;
787 }
788
789 /* If host has no translation, then we assume interrupt line */
790 if (host->ops->xlate == NULL)
791 hwirq = intspec[0];
792 else {
793 if (host->ops->xlate(host, controller, intspec, intsize,
794 &hwirq, &type))
795 return NO_IRQ;
796 }
797
798 /* Create mapping */
799 virq = irq_create_mapping(host, hwirq);
800 if (virq == NO_IRQ)
801 return virq;
802
803 /* Set type if specified and different than the current one */
804 if (type != IRQ_TYPE_NONE &&
805 type != (irqd_get_trigger_type(irq_get_irq_data(virq))))
806 irq_set_irq_type(virq, type);
807 return virq;
808}
809EXPORT_SYMBOL_GPL(irq_create_of_mapping);
810
811void irq_dispose_mapping(unsigned int virq)
812{
813 struct irq_host *host;
814 irq_hw_number_t hwirq;
815
816 if (virq == NO_IRQ)
817 return;
818
819 host = irq_map[virq].host;
820 if (WARN_ON(host == NULL))
821 return;
822
823 /* Never unmap legacy interrupts */
824 if (host->revmap_type == IRQ_HOST_MAP_LEGACY)
825 return;
826
827 irq_set_status_flags(virq, IRQ_NOREQUEST);
828
829 /* remove chip and handler */
830 irq_set_chip_and_handler(virq, NULL, NULL);
831
832 /* Make sure it's completed */
833 synchronize_irq(virq);
834
835 /* Tell the PIC about it */
836 if (host->ops->unmap)
837 host->ops->unmap(host, virq);
838 smp_mb();
839
840 /* Clear reverse map */
841 hwirq = irq_map[virq].hwirq;
842 switch(host->revmap_type) {
843 case IRQ_HOST_MAP_LINEAR:
844 if (hwirq < host->revmap_data.linear.size)
845 host->revmap_data.linear.revmap[hwirq] = NO_IRQ;
846 break;
847 case IRQ_HOST_MAP_TREE:
848 mutex_lock(&revmap_trees_mutex);
849 radix_tree_delete(&host->revmap_data.tree, hwirq);
850 mutex_unlock(&revmap_trees_mutex);
851 break;
852 }
853
854 /* Destroy map */
855 smp_mb();
856 irq_map[virq].hwirq = host->inval_irq;
857
858 irq_free_descs(virq, 1);
859 /* Free it */
860 irq_free_virt(virq, 1);
861}
862EXPORT_SYMBOL_GPL(irq_dispose_mapping);
863
864unsigned int irq_find_mapping(struct irq_host *host,
865 irq_hw_number_t hwirq)
866{
867 unsigned int i;
868 unsigned int hint = hwirq % irq_virq_count;
869
870 /* Look for default host if nececssary */
871 if (host == NULL)
872 host = irq_default_host;
873 if (host == NULL)
874 return NO_IRQ;
875
876 /* legacy -> bail early */
877 if (host->revmap_type == IRQ_HOST_MAP_LEGACY)
878 return hwirq;
879
880 /* Slow path does a linear search of the map */
881 if (hint < NUM_ISA_INTERRUPTS)
882 hint = NUM_ISA_INTERRUPTS;
883 i = hint;
884 do {
885 if (irq_map[i].host == host &&
886 irq_map[i].hwirq == hwirq)
887 return i;
888 i++;
889 if (i >= irq_virq_count)
890 i = NUM_ISA_INTERRUPTS;
891 } while(i != hint);
892 return NO_IRQ;
893}
894EXPORT_SYMBOL_GPL(irq_find_mapping);
895
896#ifdef CONFIG_SMP 506#ifdef CONFIG_SMP
897int irq_choose_cpu(const struct cpumask *mask) 507int irq_choose_cpu(const struct cpumask *mask)
898{ 508{
@@ -929,232 +539,11 @@ int irq_choose_cpu(const struct cpumask *mask)
929} 539}
930#endif 540#endif
931 541
932unsigned int irq_radix_revmap_lookup(struct irq_host *host,
933 irq_hw_number_t hwirq)
934{
935 struct irq_map_entry *ptr;
936 unsigned int virq;
937
938 if (WARN_ON_ONCE(host->revmap_type != IRQ_HOST_MAP_TREE))
939 return irq_find_mapping(host, hwirq);
940
941 /*
942 * The ptr returned references the static global irq_map.
943 * but freeing an irq can delete nodes along the path to
944 * do the lookup via call_rcu.
945 */
946 rcu_read_lock();
947 ptr = radix_tree_lookup(&host->revmap_data.tree, hwirq);
948 rcu_read_unlock();
949
950 /*
951 * If found in radix tree, then fine.
952 * Else fallback to linear lookup - this should not happen in practice
953 * as it means that we failed to insert the node in the radix tree.
954 */
955 if (ptr)
956 virq = ptr - irq_map;
957 else
958 virq = irq_find_mapping(host, hwirq);
959
960 return virq;
961}
962
963void irq_radix_revmap_insert(struct irq_host *host, unsigned int virq,
964 irq_hw_number_t hwirq)
965{
966 if (WARN_ON(host->revmap_type != IRQ_HOST_MAP_TREE))
967 return;
968
969 if (virq != NO_IRQ) {
970 mutex_lock(&revmap_trees_mutex);
971 radix_tree_insert(&host->revmap_data.tree, hwirq,
972 &irq_map[virq]);
973 mutex_unlock(&revmap_trees_mutex);
974 }
975}
976
977unsigned int irq_linear_revmap(struct irq_host *host,
978 irq_hw_number_t hwirq)
979{
980 unsigned int *revmap;
981
982 if (WARN_ON_ONCE(host->revmap_type != IRQ_HOST_MAP_LINEAR))
983 return irq_find_mapping(host, hwirq);
984
985 /* Check revmap bounds */
986 if (unlikely(hwirq >= host->revmap_data.linear.size))
987 return irq_find_mapping(host, hwirq);
988
989 /* Check if revmap was allocated */
990 revmap = host->revmap_data.linear.revmap;
991 if (unlikely(revmap == NULL))
992 return irq_find_mapping(host, hwirq);
993
994 /* Fill up revmap with slow path if no mapping found */
995 if (unlikely(revmap[hwirq] == NO_IRQ))
996 revmap[hwirq] = irq_find_mapping(host, hwirq);
997
998 return revmap[hwirq];
999}
1000
1001unsigned int irq_alloc_virt(struct irq_host *host,
1002 unsigned int count,
1003 unsigned int hint)
1004{
1005 unsigned long flags;
1006 unsigned int i, j, found = NO_IRQ;
1007
1008 if (count == 0 || count > (irq_virq_count - NUM_ISA_INTERRUPTS))
1009 return NO_IRQ;
1010
1011 raw_spin_lock_irqsave(&irq_big_lock, flags);
1012
1013 /* Use hint for 1 interrupt if any */
1014 if (count == 1 && hint >= NUM_ISA_INTERRUPTS &&
1015 hint < irq_virq_count && irq_map[hint].host == NULL) {
1016 found = hint;
1017 goto hint_found;
1018 }
1019
1020 /* Look for count consecutive numbers in the allocatable
1021 * (non-legacy) space
1022 */
1023 for (i = NUM_ISA_INTERRUPTS, j = 0; i < irq_virq_count; i++) {
1024 if (irq_map[i].host != NULL)
1025 j = 0;
1026 else
1027 j++;
1028
1029 if (j == count) {
1030 found = i - count + 1;
1031 break;
1032 }
1033 }
1034 if (found == NO_IRQ) {
1035 raw_spin_unlock_irqrestore(&irq_big_lock, flags);
1036 return NO_IRQ;
1037 }
1038 hint_found:
1039 for (i = found; i < (found + count); i++) {
1040 irq_map[i].hwirq = host->inval_irq;
1041 smp_wmb();
1042 irq_map[i].host = host;
1043 }
1044 raw_spin_unlock_irqrestore(&irq_big_lock, flags);
1045 return found;
1046}
1047
1048void irq_free_virt(unsigned int virq, unsigned int count)
1049{
1050 unsigned long flags;
1051 unsigned int i;
1052
1053 WARN_ON (virq < NUM_ISA_INTERRUPTS);
1054 WARN_ON (count == 0 || (virq + count) > irq_virq_count);
1055
1056 if (virq < NUM_ISA_INTERRUPTS) {
1057 if (virq + count < NUM_ISA_INTERRUPTS)
1058 return;
1059 count =- NUM_ISA_INTERRUPTS - virq;
1060 virq = NUM_ISA_INTERRUPTS;
1061 }
1062
1063 if (count > irq_virq_count || virq > irq_virq_count - count) {
1064 if (virq > irq_virq_count)
1065 return;
1066 count = irq_virq_count - virq;
1067 }
1068
1069 raw_spin_lock_irqsave(&irq_big_lock, flags);
1070 for (i = virq; i < (virq + count); i++) {
1071 struct irq_host *host;
1072
1073 host = irq_map[i].host;
1074 irq_map[i].hwirq = host->inval_irq;
1075 smp_wmb();
1076 irq_map[i].host = NULL;
1077 }
1078 raw_spin_unlock_irqrestore(&irq_big_lock, flags);
1079}
1080
1081int arch_early_irq_init(void) 542int arch_early_irq_init(void)
1082{ 543{
1083 return 0; 544 return 0;
1084} 545}
1085 546
1086#ifdef CONFIG_VIRQ_DEBUG
1087static int virq_debug_show(struct seq_file *m, void *private)
1088{
1089 unsigned long flags;
1090 struct irq_desc *desc;
1091 const char *p;
1092 static const char none[] = "none";
1093 void *data;
1094 int i;
1095
1096 seq_printf(m, "%-5s %-7s %-15s %-18s %s\n", "virq", "hwirq",
1097 "chip name", "chip data", "host name");
1098
1099 for (i = 1; i < nr_irqs; i++) {
1100 desc = irq_to_desc(i);
1101 if (!desc)
1102 continue;
1103
1104 raw_spin_lock_irqsave(&desc->lock, flags);
1105
1106 if (desc->action && desc->action->handler) {
1107 struct irq_chip *chip;
1108
1109 seq_printf(m, "%5d ", i);
1110 seq_printf(m, "0x%05lx ", irq_map[i].hwirq);
1111
1112 chip = irq_desc_get_chip(desc);
1113 if (chip && chip->name)
1114 p = chip->name;
1115 else
1116 p = none;
1117 seq_printf(m, "%-15s ", p);
1118
1119 data = irq_desc_get_chip_data(desc);
1120 seq_printf(m, "0x%16p ", data);
1121
1122 if (irq_map[i].host && irq_map[i].host->of_node)
1123 p = irq_map[i].host->of_node->full_name;
1124 else
1125 p = none;
1126 seq_printf(m, "%s\n", p);
1127 }
1128
1129 raw_spin_unlock_irqrestore(&desc->lock, flags);
1130 }
1131
1132 return 0;
1133}
1134
1135static int virq_debug_open(struct inode *inode, struct file *file)
1136{
1137 return single_open(file, virq_debug_show, inode->i_private);
1138}
1139
1140static const struct file_operations virq_debug_fops = {
1141 .open = virq_debug_open,
1142 .read = seq_read,
1143 .llseek = seq_lseek,
1144 .release = single_release,
1145};
1146
1147static int __init irq_debugfs_init(void)
1148{
1149 if (debugfs_create_file("virq_mapping", S_IRUGO, powerpc_debugfs_root,
1150 NULL, &virq_debug_fops) == NULL)
1151 return -ENOMEM;
1152
1153 return 0;
1154}
1155__initcall(irq_debugfs_init);
1156#endif /* CONFIG_VIRQ_DEBUG */
1157
1158#ifdef CONFIG_PPC64 547#ifdef CONFIG_PPC64
1159static int __init setup_noirqdistrib(char *str) 548static int __init setup_noirqdistrib(char *str)
1160{ 549{
diff --git a/arch/powerpc/platforms/512x/mpc5121_ads_cpld.c b/arch/powerpc/platforms/512x/mpc5121_ads_cpld.c
index 9f09319352c0..ca3a062ed1b9 100644
--- a/arch/powerpc/platforms/512x/mpc5121_ads_cpld.c
+++ b/arch/powerpc/platforms/512x/mpc5121_ads_cpld.c
@@ -21,7 +21,7 @@
21#include <asm/prom.h> 21#include <asm/prom.h>
22 22
23static struct device_node *cpld_pic_node; 23static struct device_node *cpld_pic_node;
24static struct irq_host *cpld_pic_host; 24static struct irq_domain *cpld_pic_host;
25 25
26/* 26/*
27 * Bits to ignore in the misc_status register 27 * Bits to ignore in the misc_status register
@@ -123,13 +123,13 @@ cpld_pic_cascade(unsigned int irq, struct irq_desc *desc)
123} 123}
124 124
125static int 125static int
126cpld_pic_host_match(struct irq_host *h, struct device_node *node) 126cpld_pic_host_match(struct irq_domain *h, struct device_node *node)
127{ 127{
128 return cpld_pic_node == node; 128 return cpld_pic_node == node;
129} 129}
130 130
131static int 131static int
132cpld_pic_host_map(struct irq_host *h, unsigned int virq, 132cpld_pic_host_map(struct irq_domain *h, unsigned int virq,
133 irq_hw_number_t hw) 133 irq_hw_number_t hw)
134{ 134{
135 irq_set_status_flags(virq, IRQ_LEVEL); 135 irq_set_status_flags(virq, IRQ_LEVEL);
@@ -137,8 +137,7 @@ cpld_pic_host_map(struct irq_host *h, unsigned int virq,
137 return 0; 137 return 0;
138} 138}
139 139
140static struct 140static const struct irq_domain_ops cpld_pic_host_ops = {
141irq_host_ops cpld_pic_host_ops = {
142 .match = cpld_pic_host_match, 141 .match = cpld_pic_host_match,
143 .map = cpld_pic_host_map, 142 .map = cpld_pic_host_map,
144}; 143};
@@ -191,8 +190,7 @@ mpc5121_ads_cpld_pic_init(void)
191 190
192 cpld_pic_node = of_node_get(np); 191 cpld_pic_node = of_node_get(np);
193 192
194 cpld_pic_host = 193 cpld_pic_host = irq_domain_add_linear(np, 16, &cpld_pic_host_ops, NULL);
195 irq_alloc_host(np, IRQ_HOST_MAP_LINEAR, 16, &cpld_pic_host_ops, 16);
196 if (!cpld_pic_host) { 194 if (!cpld_pic_host) {
197 printk(KERN_ERR "CPLD PIC: failed to allocate irq host!\n"); 195 printk(KERN_ERR "CPLD PIC: failed to allocate irq host!\n");
198 goto end; 196 goto end;
diff --git a/arch/powerpc/platforms/52xx/media5200.c b/arch/powerpc/platforms/52xx/media5200.c
index 96f85e5e0cd3..17d91b7da315 100644
--- a/arch/powerpc/platforms/52xx/media5200.c
+++ b/arch/powerpc/platforms/52xx/media5200.c
@@ -45,7 +45,7 @@ static struct of_device_id mpc5200_gpio_ids[] __initdata = {
45struct media5200_irq { 45struct media5200_irq {
46 void __iomem *regs; 46 void __iomem *regs;
47 spinlock_t lock; 47 spinlock_t lock;
48 struct irq_host *irqhost; 48 struct irq_domain *irqhost;
49}; 49};
50struct media5200_irq media5200_irq; 50struct media5200_irq media5200_irq;
51 51
@@ -112,7 +112,7 @@ void media5200_irq_cascade(unsigned int virq, struct irq_desc *desc)
112 raw_spin_unlock(&desc->lock); 112 raw_spin_unlock(&desc->lock);
113} 113}
114 114
115static int media5200_irq_map(struct irq_host *h, unsigned int virq, 115static int media5200_irq_map(struct irq_domain *h, unsigned int virq,
116 irq_hw_number_t hw) 116 irq_hw_number_t hw)
117{ 117{
118 pr_debug("%s: h=%p, virq=%i, hwirq=%i\n", __func__, h, virq, (int)hw); 118 pr_debug("%s: h=%p, virq=%i, hwirq=%i\n", __func__, h, virq, (int)hw);
@@ -122,7 +122,7 @@ static int media5200_irq_map(struct irq_host *h, unsigned int virq,
122 return 0; 122 return 0;
123} 123}
124 124
125static int media5200_irq_xlate(struct irq_host *h, struct device_node *ct, 125static int media5200_irq_xlate(struct irq_domain *h, struct device_node *ct,
126 const u32 *intspec, unsigned int intsize, 126 const u32 *intspec, unsigned int intsize,
127 irq_hw_number_t *out_hwirq, 127 irq_hw_number_t *out_hwirq,
128 unsigned int *out_flags) 128 unsigned int *out_flags)
@@ -136,7 +136,7 @@ static int media5200_irq_xlate(struct irq_host *h, struct device_node *ct,
136 return 0; 136 return 0;
137} 137}
138 138
139static struct irq_host_ops media5200_irq_ops = { 139static const struct irq_domain_ops media5200_irq_ops = {
140 .map = media5200_irq_map, 140 .map = media5200_irq_map,
141 .xlate = media5200_irq_xlate, 141 .xlate = media5200_irq_xlate,
142}; 142};
@@ -173,15 +173,12 @@ static void __init media5200_init_irq(void)
173 173
174 spin_lock_init(&media5200_irq.lock); 174 spin_lock_init(&media5200_irq.lock);
175 175
176 media5200_irq.irqhost = irq_alloc_host(fpga_np, IRQ_HOST_MAP_LINEAR, 176 media5200_irq.irqhost = irq_domain_add_linear(fpga_np,
177 MEDIA5200_NUM_IRQS, 177 MEDIA5200_NUM_IRQS, &media5200_irq_ops, &media5200_irq);
178 &media5200_irq_ops, -1);
179 if (!media5200_irq.irqhost) 178 if (!media5200_irq.irqhost)
180 goto out; 179 goto out;
181 pr_debug("%s: allocated irqhost\n", __func__); 180 pr_debug("%s: allocated irqhost\n", __func__);
182 181
183 media5200_irq.irqhost->host_data = &media5200_irq;
184
185 irq_set_handler_data(cascade_virq, &media5200_irq); 182 irq_set_handler_data(cascade_virq, &media5200_irq);
186 irq_set_chained_handler(cascade_virq, media5200_irq_cascade); 183 irq_set_chained_handler(cascade_virq, media5200_irq_cascade);
187 184
diff --git a/arch/powerpc/platforms/52xx/mpc52xx_gpt.c b/arch/powerpc/platforms/52xx/mpc52xx_gpt.c
index f94f06e52762..028470b95886 100644
--- a/arch/powerpc/platforms/52xx/mpc52xx_gpt.c
+++ b/arch/powerpc/platforms/52xx/mpc52xx_gpt.c
@@ -81,7 +81,7 @@ MODULE_LICENSE("GPL");
81 * @regs: virtual address of GPT registers 81 * @regs: virtual address of GPT registers
82 * @lock: spinlock to coordinate between different functions. 82 * @lock: spinlock to coordinate between different functions.
83 * @gc: gpio_chip instance structure; used when GPIO is enabled 83 * @gc: gpio_chip instance structure; used when GPIO is enabled
84 * @irqhost: Pointer to irq_host instance; used when IRQ mode is supported 84 * @irqhost: Pointer to irq_domain instance; used when IRQ mode is supported
85 * @wdt_mode: only relevant for gpt0: bit 0 (MPC52xx_GPT_CAN_WDT) indicates 85 * @wdt_mode: only relevant for gpt0: bit 0 (MPC52xx_GPT_CAN_WDT) indicates
86 * if the gpt may be used as wdt, bit 1 (MPC52xx_GPT_IS_WDT) indicates 86 * if the gpt may be used as wdt, bit 1 (MPC52xx_GPT_IS_WDT) indicates
87 * if the timer is actively used as wdt which blocks gpt functions 87 * if the timer is actively used as wdt which blocks gpt functions
@@ -91,7 +91,7 @@ struct mpc52xx_gpt_priv {
91 struct device *dev; 91 struct device *dev;
92 struct mpc52xx_gpt __iomem *regs; 92 struct mpc52xx_gpt __iomem *regs;
93 spinlock_t lock; 93 spinlock_t lock;
94 struct irq_host *irqhost; 94 struct irq_domain *irqhost;
95 u32 ipb_freq; 95 u32 ipb_freq;
96 u8 wdt_mode; 96 u8 wdt_mode;
97 97
@@ -204,7 +204,7 @@ void mpc52xx_gpt_irq_cascade(unsigned int virq, struct irq_desc *desc)
204 } 204 }
205} 205}
206 206
207static int mpc52xx_gpt_irq_map(struct irq_host *h, unsigned int virq, 207static int mpc52xx_gpt_irq_map(struct irq_domain *h, unsigned int virq,
208 irq_hw_number_t hw) 208 irq_hw_number_t hw)
209{ 209{
210 struct mpc52xx_gpt_priv *gpt = h->host_data; 210 struct mpc52xx_gpt_priv *gpt = h->host_data;
@@ -216,7 +216,7 @@ static int mpc52xx_gpt_irq_map(struct irq_host *h, unsigned int virq,
216 return 0; 216 return 0;
217} 217}
218 218
219static int mpc52xx_gpt_irq_xlate(struct irq_host *h, struct device_node *ct, 219static int mpc52xx_gpt_irq_xlate(struct irq_domain *h, struct device_node *ct,
220 const u32 *intspec, unsigned int intsize, 220 const u32 *intspec, unsigned int intsize,
221 irq_hw_number_t *out_hwirq, 221 irq_hw_number_t *out_hwirq,
222 unsigned int *out_flags) 222 unsigned int *out_flags)
@@ -236,7 +236,7 @@ static int mpc52xx_gpt_irq_xlate(struct irq_host *h, struct device_node *ct,
236 return 0; 236 return 0;
237} 237}
238 238
239static struct irq_host_ops mpc52xx_gpt_irq_ops = { 239static const struct irq_domain_ops mpc52xx_gpt_irq_ops = {
240 .map = mpc52xx_gpt_irq_map, 240 .map = mpc52xx_gpt_irq_map,
241 .xlate = mpc52xx_gpt_irq_xlate, 241 .xlate = mpc52xx_gpt_irq_xlate,
242}; 242};
@@ -252,14 +252,12 @@ mpc52xx_gpt_irq_setup(struct mpc52xx_gpt_priv *gpt, struct device_node *node)
252 if (!cascade_virq) 252 if (!cascade_virq)
253 return; 253 return;
254 254
255 gpt->irqhost = irq_alloc_host(node, IRQ_HOST_MAP_LINEAR, 1, 255 gpt->irqhost = irq_domain_add_linear(node, 1, &mpc52xx_gpt_irq_ops, gpt);
256 &mpc52xx_gpt_irq_ops, -1);
257 if (!gpt->irqhost) { 256 if (!gpt->irqhost) {
258 dev_err(gpt->dev, "irq_alloc_host() failed\n"); 257 dev_err(gpt->dev, "irq_domain_add_linear() failed\n");
259 return; 258 return;
260 } 259 }
261 260
262 gpt->irqhost->host_data = gpt;
263 irq_set_handler_data(cascade_virq, gpt); 261 irq_set_handler_data(cascade_virq, gpt);
264 irq_set_chained_handler(cascade_virq, mpc52xx_gpt_irq_cascade); 262 irq_set_chained_handler(cascade_virq, mpc52xx_gpt_irq_cascade);
265 263
diff --git a/arch/powerpc/platforms/52xx/mpc52xx_pic.c b/arch/powerpc/platforms/52xx/mpc52xx_pic.c
index 1a9a49570579..8520b58a5e9a 100644
--- a/arch/powerpc/platforms/52xx/mpc52xx_pic.c
+++ b/arch/powerpc/platforms/52xx/mpc52xx_pic.c
@@ -132,7 +132,7 @@ static struct of_device_id mpc52xx_sdma_ids[] __initdata = {
132 132
133static struct mpc52xx_intr __iomem *intr; 133static struct mpc52xx_intr __iomem *intr;
134static struct mpc52xx_sdma __iomem *sdma; 134static struct mpc52xx_sdma __iomem *sdma;
135static struct irq_host *mpc52xx_irqhost = NULL; 135static struct irq_domain *mpc52xx_irqhost = NULL;
136 136
137static unsigned char mpc52xx_map_senses[4] = { 137static unsigned char mpc52xx_map_senses[4] = {
138 IRQ_TYPE_LEVEL_HIGH, 138 IRQ_TYPE_LEVEL_HIGH,
@@ -301,7 +301,7 @@ static int mpc52xx_is_extirq(int l1, int l2)
301/** 301/**
302 * mpc52xx_irqhost_xlate - translate virq# from device tree interrupts property 302 * mpc52xx_irqhost_xlate - translate virq# from device tree interrupts property
303 */ 303 */
304static int mpc52xx_irqhost_xlate(struct irq_host *h, struct device_node *ct, 304static int mpc52xx_irqhost_xlate(struct irq_domain *h, struct device_node *ct,
305 const u32 *intspec, unsigned int intsize, 305 const u32 *intspec, unsigned int intsize,
306 irq_hw_number_t *out_hwirq, 306 irq_hw_number_t *out_hwirq,
307 unsigned int *out_flags) 307 unsigned int *out_flags)
@@ -335,7 +335,7 @@ static int mpc52xx_irqhost_xlate(struct irq_host *h, struct device_node *ct,
335/** 335/**
336 * mpc52xx_irqhost_map - Hook to map from virq to an irq_chip structure 336 * mpc52xx_irqhost_map - Hook to map from virq to an irq_chip structure
337 */ 337 */
338static int mpc52xx_irqhost_map(struct irq_host *h, unsigned int virq, 338static int mpc52xx_irqhost_map(struct irq_domain *h, unsigned int virq,
339 irq_hw_number_t irq) 339 irq_hw_number_t irq)
340{ 340{
341 int l1irq; 341 int l1irq;
@@ -384,7 +384,7 @@ static int mpc52xx_irqhost_map(struct irq_host *h, unsigned int virq,
384 return 0; 384 return 0;
385} 385}
386 386
387static struct irq_host_ops mpc52xx_irqhost_ops = { 387static const struct irq_domain_ops mpc52xx_irqhost_ops = {
388 .xlate = mpc52xx_irqhost_xlate, 388 .xlate = mpc52xx_irqhost_xlate,
389 .map = mpc52xx_irqhost_map, 389 .map = mpc52xx_irqhost_map,
390}; 390};
@@ -444,9 +444,9 @@ void __init mpc52xx_init_irq(void)
444 * As last step, add an irq host to translate the real 444 * As last step, add an irq host to translate the real
445 * hw irq information provided by the ofw to linux virq 445 * hw irq information provided by the ofw to linux virq
446 */ 446 */
447 mpc52xx_irqhost = irq_alloc_host(picnode, IRQ_HOST_MAP_LINEAR, 447 mpc52xx_irqhost = irq_domain_add_linear(picnode,
448 MPC52xx_IRQ_HIGHTESTHWIRQ, 448 MPC52xx_IRQ_HIGHTESTHWIRQ,
449 &mpc52xx_irqhost_ops, -1); 449 &mpc52xx_irqhost_ops, NULL);
450 450
451 if (!mpc52xx_irqhost) 451 if (!mpc52xx_irqhost)
452 panic(__FILE__ ": Cannot allocate the IRQ host\n"); 452 panic(__FILE__ ": Cannot allocate the IRQ host\n");
diff --git a/arch/powerpc/platforms/82xx/pq2ads-pci-pic.c b/arch/powerpc/platforms/82xx/pq2ads-pci-pic.c
index 8ccf9ed62fe2..328d221fd1c0 100644
--- a/arch/powerpc/platforms/82xx/pq2ads-pci-pic.c
+++ b/arch/powerpc/platforms/82xx/pq2ads-pci-pic.c
@@ -29,7 +29,7 @@ static DEFINE_RAW_SPINLOCK(pci_pic_lock);
29 29
30struct pq2ads_pci_pic { 30struct pq2ads_pci_pic {
31 struct device_node *node; 31 struct device_node *node;
32 struct irq_host *host; 32 struct irq_domain *host;
33 33
34 struct { 34 struct {
35 u32 stat; 35 u32 stat;
@@ -103,7 +103,7 @@ static void pq2ads_pci_irq_demux(unsigned int irq, struct irq_desc *desc)
103 } 103 }
104} 104}
105 105
106static int pci_pic_host_map(struct irq_host *h, unsigned int virq, 106static int pci_pic_host_map(struct irq_domain *h, unsigned int virq,
107 irq_hw_number_t hw) 107 irq_hw_number_t hw)
108{ 108{
109 irq_set_status_flags(virq, IRQ_LEVEL); 109 irq_set_status_flags(virq, IRQ_LEVEL);
@@ -112,14 +112,14 @@ static int pci_pic_host_map(struct irq_host *h, unsigned int virq,
112 return 0; 112 return 0;
113} 113}
114 114
115static struct irq_host_ops pci_pic_host_ops = { 115static const struct irq_domain_ops pci_pic_host_ops = {
116 .map = pci_pic_host_map, 116 .map = pci_pic_host_map,
117}; 117};
118 118
119int __init pq2ads_pci_init_irq(void) 119int __init pq2ads_pci_init_irq(void)
120{ 120{
121 struct pq2ads_pci_pic *priv; 121 struct pq2ads_pci_pic *priv;
122 struct irq_host *host; 122 struct irq_domain *host;
123 struct device_node *np; 123 struct device_node *np;
124 int ret = -ENODEV; 124 int ret = -ENODEV;
125 int irq; 125 int irq;
@@ -156,17 +156,13 @@ int __init pq2ads_pci_init_irq(void)
156 out_be32(&priv->regs->mask, ~0); 156 out_be32(&priv->regs->mask, ~0);
157 mb(); 157 mb();
158 158
159 host = irq_alloc_host(np, IRQ_HOST_MAP_LINEAR, NUM_IRQS, 159 host = irq_domain_add_linear(np, NUM_IRQS, &pci_pic_host_ops, priv);
160 &pci_pic_host_ops, NUM_IRQS);
161 if (!host) { 160 if (!host) {
162 ret = -ENOMEM; 161 ret = -ENOMEM;
163 goto out_unmap_regs; 162 goto out_unmap_regs;
164 } 163 }
165 164
166 host->host_data = priv;
167
168 priv->host = host; 165 priv->host = host;
169 host->host_data = priv;
170 irq_set_handler_data(irq, priv); 166 irq_set_handler_data(irq, priv);
171 irq_set_chained_handler(irq, pq2ads_pci_irq_demux); 167 irq_set_chained_handler(irq, pq2ads_pci_irq_demux);
172 168
diff --git a/arch/powerpc/platforms/85xx/socrates_fpga_pic.c b/arch/powerpc/platforms/85xx/socrates_fpga_pic.c
index 12cb9bb2cc68..3bbbf7489487 100644
--- a/arch/powerpc/platforms/85xx/socrates_fpga_pic.c
+++ b/arch/powerpc/platforms/85xx/socrates_fpga_pic.c
@@ -51,7 +51,7 @@ static struct socrates_fpga_irq_info fpga_irqs[SOCRATES_FPGA_NUM_IRQS] = {
51static DEFINE_RAW_SPINLOCK(socrates_fpga_pic_lock); 51static DEFINE_RAW_SPINLOCK(socrates_fpga_pic_lock);
52 52
53static void __iomem *socrates_fpga_pic_iobase; 53static void __iomem *socrates_fpga_pic_iobase;
54static struct irq_host *socrates_fpga_pic_irq_host; 54static struct irq_domain *socrates_fpga_pic_irq_host;
55static unsigned int socrates_fpga_irqs[3]; 55static unsigned int socrates_fpga_irqs[3];
56 56
57static inline uint32_t socrates_fpga_pic_read(int reg) 57static inline uint32_t socrates_fpga_pic_read(int reg)
@@ -227,7 +227,7 @@ static struct irq_chip socrates_fpga_pic_chip = {
227 .irq_set_type = socrates_fpga_pic_set_type, 227 .irq_set_type = socrates_fpga_pic_set_type,
228}; 228};
229 229
230static int socrates_fpga_pic_host_map(struct irq_host *h, unsigned int virq, 230static int socrates_fpga_pic_host_map(struct irq_domain *h, unsigned int virq,
231 irq_hw_number_t hwirq) 231 irq_hw_number_t hwirq)
232{ 232{
233 /* All interrupts are LEVEL sensitive */ 233 /* All interrupts are LEVEL sensitive */
@@ -238,7 +238,7 @@ static int socrates_fpga_pic_host_map(struct irq_host *h, unsigned int virq,
238 return 0; 238 return 0;
239} 239}
240 240
241static int socrates_fpga_pic_host_xlate(struct irq_host *h, 241static int socrates_fpga_pic_host_xlate(struct irq_domain *h,
242 struct device_node *ct, const u32 *intspec, unsigned int intsize, 242 struct device_node *ct, const u32 *intspec, unsigned int intsize,
243 irq_hw_number_t *out_hwirq, unsigned int *out_flags) 243 irq_hw_number_t *out_hwirq, unsigned int *out_flags)
244{ 244{
@@ -269,7 +269,7 @@ static int socrates_fpga_pic_host_xlate(struct irq_host *h,
269 return 0; 269 return 0;
270} 270}
271 271
272static struct irq_host_ops socrates_fpga_pic_host_ops = { 272static const struct irq_domain_ops socrates_fpga_pic_host_ops = {
273 .map = socrates_fpga_pic_host_map, 273 .map = socrates_fpga_pic_host_map,
274 .xlate = socrates_fpga_pic_host_xlate, 274 .xlate = socrates_fpga_pic_host_xlate,
275}; 275};
@@ -279,10 +279,9 @@ void socrates_fpga_pic_init(struct device_node *pic)
279 unsigned long flags; 279 unsigned long flags;
280 int i; 280 int i;
281 281
282 /* Setup an irq_host structure */ 282 /* Setup an irq_domain structure */
283 socrates_fpga_pic_irq_host = irq_alloc_host(pic, IRQ_HOST_MAP_LINEAR, 283 socrates_fpga_pic_irq_host = irq_domain_add_linear(pic,
284 SOCRATES_FPGA_NUM_IRQS, &socrates_fpga_pic_host_ops, 284 SOCRATES_FPGA_NUM_IRQS, &socrates_fpga_pic_host_ops, NULL);
285 SOCRATES_FPGA_NUM_IRQS);
286 if (socrates_fpga_pic_irq_host == NULL) { 285 if (socrates_fpga_pic_irq_host == NULL) {
287 pr_err("FPGA PIC: Unable to allocate host\n"); 286 pr_err("FPGA PIC: Unable to allocate host\n");
288 return; 287 return;
diff --git a/arch/powerpc/platforms/86xx/gef_pic.c b/arch/powerpc/platforms/86xx/gef_pic.c
index 94594e58594c..af3fd697de82 100644
--- a/arch/powerpc/platforms/86xx/gef_pic.c
+++ b/arch/powerpc/platforms/86xx/gef_pic.c
@@ -50,7 +50,7 @@
50static DEFINE_RAW_SPINLOCK(gef_pic_lock); 50static DEFINE_RAW_SPINLOCK(gef_pic_lock);
51 51
52static void __iomem *gef_pic_irq_reg_base; 52static void __iomem *gef_pic_irq_reg_base;
53static struct irq_host *gef_pic_irq_host; 53static struct irq_domain *gef_pic_irq_host;
54static int gef_pic_cascade_irq; 54static int gef_pic_cascade_irq;
55 55
56/* 56/*
@@ -153,7 +153,7 @@ static struct irq_chip gef_pic_chip = {
153/* When an interrupt is being configured, this call allows some flexibilty 153/* When an interrupt is being configured, this call allows some flexibilty
154 * in deciding which irq_chip structure is used 154 * in deciding which irq_chip structure is used
155 */ 155 */
156static int gef_pic_host_map(struct irq_host *h, unsigned int virq, 156static int gef_pic_host_map(struct irq_domain *h, unsigned int virq,
157 irq_hw_number_t hwirq) 157 irq_hw_number_t hwirq)
158{ 158{
159 /* All interrupts are LEVEL sensitive */ 159 /* All interrupts are LEVEL sensitive */
@@ -163,7 +163,7 @@ static int gef_pic_host_map(struct irq_host *h, unsigned int virq,
163 return 0; 163 return 0;
164} 164}
165 165
166static int gef_pic_host_xlate(struct irq_host *h, struct device_node *ct, 166static int gef_pic_host_xlate(struct irq_domain *h, struct device_node *ct,
167 const u32 *intspec, unsigned int intsize, 167 const u32 *intspec, unsigned int intsize,
168 irq_hw_number_t *out_hwirq, unsigned int *out_flags) 168 irq_hw_number_t *out_hwirq, unsigned int *out_flags)
169{ 169{
@@ -177,7 +177,7 @@ static int gef_pic_host_xlate(struct irq_host *h, struct device_node *ct,
177 return 0; 177 return 0;
178} 178}
179 179
180static struct irq_host_ops gef_pic_host_ops = { 180static const struct irq_domain_ops gef_pic_host_ops = {
181 .map = gef_pic_host_map, 181 .map = gef_pic_host_map,
182 .xlate = gef_pic_host_xlate, 182 .xlate = gef_pic_host_xlate,
183}; 183};
@@ -211,10 +211,9 @@ void __init gef_pic_init(struct device_node *np)
211 return; 211 return;
212 } 212 }
213 213
214 /* Setup an irq_host structure */ 214 /* Setup an irq_domain structure */
215 gef_pic_irq_host = irq_alloc_host(np, IRQ_HOST_MAP_LINEAR, 215 gef_pic_irq_host = irq_domain_add_linear(np, GEF_PIC_NUM_IRQS,
216 GEF_PIC_NUM_IRQS, 216 &gef_pic_host_ops, NULL);
217 &gef_pic_host_ops, NO_IRQ);
218 if (gef_pic_irq_host == NULL) 217 if (gef_pic_irq_host == NULL)
219 return; 218 return;
220 219
diff --git a/arch/powerpc/platforms/cell/axon_msi.c b/arch/powerpc/platforms/cell/axon_msi.c
index 40a6e34793b4..db360fc4cf0e 100644
--- a/arch/powerpc/platforms/cell/axon_msi.c
+++ b/arch/powerpc/platforms/cell/axon_msi.c
@@ -67,7 +67,7 @@
67 67
68 68
69struct axon_msic { 69struct axon_msic {
70 struct irq_host *irq_host; 70 struct irq_domain *irq_domain;
71 __le32 *fifo_virt; 71 __le32 *fifo_virt;
72 dma_addr_t fifo_phys; 72 dma_addr_t fifo_phys;
73 dcr_host_t dcr_host; 73 dcr_host_t dcr_host;
@@ -152,7 +152,7 @@ static void axon_msi_cascade(unsigned int irq, struct irq_desc *desc)
152 152
153static struct axon_msic *find_msi_translator(struct pci_dev *dev) 153static struct axon_msic *find_msi_translator(struct pci_dev *dev)
154{ 154{
155 struct irq_host *irq_host; 155 struct irq_domain *irq_domain;
156 struct device_node *dn, *tmp; 156 struct device_node *dn, *tmp;
157 const phandle *ph; 157 const phandle *ph;
158 struct axon_msic *msic = NULL; 158 struct axon_msic *msic = NULL;
@@ -184,14 +184,14 @@ static struct axon_msic *find_msi_translator(struct pci_dev *dev)
184 goto out_error; 184 goto out_error;
185 } 185 }
186 186
187 irq_host = irq_find_host(dn); 187 irq_domain = irq_find_host(dn);
188 if (!irq_host) { 188 if (!irq_domain) {
189 dev_dbg(&dev->dev, "axon_msi: no irq_host found for node %s\n", 189 dev_dbg(&dev->dev, "axon_msi: no irq_domain found for node %s\n",
190 dn->full_name); 190 dn->full_name);
191 goto out_error; 191 goto out_error;
192 } 192 }
193 193
194 msic = irq_host->host_data; 194 msic = irq_domain->host_data;
195 195
196out_error: 196out_error:
197 of_node_put(dn); 197 of_node_put(dn);
@@ -280,7 +280,7 @@ static int axon_msi_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
280 BUILD_BUG_ON(NR_IRQS > 65536); 280 BUILD_BUG_ON(NR_IRQS > 65536);
281 281
282 list_for_each_entry(entry, &dev->msi_list, list) { 282 list_for_each_entry(entry, &dev->msi_list, list) {
283 virq = irq_create_direct_mapping(msic->irq_host); 283 virq = irq_create_direct_mapping(msic->irq_domain);
284 if (virq == NO_IRQ) { 284 if (virq == NO_IRQ) {
285 dev_warn(&dev->dev, 285 dev_warn(&dev->dev,
286 "axon_msi: virq allocation failed!\n"); 286 "axon_msi: virq allocation failed!\n");
@@ -318,7 +318,7 @@ static struct irq_chip msic_irq_chip = {
318 .name = "AXON-MSI", 318 .name = "AXON-MSI",
319}; 319};
320 320
321static int msic_host_map(struct irq_host *h, unsigned int virq, 321static int msic_host_map(struct irq_domain *h, unsigned int virq,
322 irq_hw_number_t hw) 322 irq_hw_number_t hw)
323{ 323{
324 irq_set_chip_data(virq, h->host_data); 324 irq_set_chip_data(virq, h->host_data);
@@ -327,7 +327,7 @@ static int msic_host_map(struct irq_host *h, unsigned int virq,
327 return 0; 327 return 0;
328} 328}
329 329
330static struct irq_host_ops msic_host_ops = { 330static const struct irq_domain_ops msic_host_ops = {
331 .map = msic_host_map, 331 .map = msic_host_map,
332}; 332};
333 333
@@ -337,7 +337,7 @@ static void axon_msi_shutdown(struct platform_device *device)
337 u32 tmp; 337 u32 tmp;
338 338
339 pr_devel("axon_msi: disabling %s\n", 339 pr_devel("axon_msi: disabling %s\n",
340 msic->irq_host->of_node->full_name); 340 msic->irq_domain->of_node->full_name);
341 tmp = dcr_read(msic->dcr_host, MSIC_CTRL_REG); 341 tmp = dcr_read(msic->dcr_host, MSIC_CTRL_REG);
342 tmp &= ~MSIC_CTRL_ENABLE & ~MSIC_CTRL_IRQ_ENABLE; 342 tmp &= ~MSIC_CTRL_ENABLE & ~MSIC_CTRL_IRQ_ENABLE;
343 msic_dcr_write(msic, MSIC_CTRL_REG, tmp); 343 msic_dcr_write(msic, MSIC_CTRL_REG, tmp);
@@ -392,16 +392,13 @@ static int axon_msi_probe(struct platform_device *device)
392 } 392 }
393 memset(msic->fifo_virt, 0xff, MSIC_FIFO_SIZE_BYTES); 393 memset(msic->fifo_virt, 0xff, MSIC_FIFO_SIZE_BYTES);
394 394
395 msic->irq_host = irq_alloc_host(dn, IRQ_HOST_MAP_NOMAP, 395 msic->irq_domain = irq_domain_add_nomap(dn, &msic_host_ops, msic);
396 NR_IRQS, &msic_host_ops, 0); 396 if (!msic->irq_domain) {
397 if (!msic->irq_host) { 397 printk(KERN_ERR "axon_msi: couldn't allocate irq_domain for %s\n",
398 printk(KERN_ERR "axon_msi: couldn't allocate irq_host for %s\n",
399 dn->full_name); 398 dn->full_name);
400 goto out_free_fifo; 399 goto out_free_fifo;
401 } 400 }
402 401
403 msic->irq_host->host_data = msic;
404
405 irq_set_handler_data(virq, msic); 402 irq_set_handler_data(virq, msic);
406 irq_set_chained_handler(virq, axon_msi_cascade); 403 irq_set_chained_handler(virq, axon_msi_cascade);
407 pr_devel("axon_msi: irq 0x%x setup for axon_msi\n", virq); 404 pr_devel("axon_msi: irq 0x%x setup for axon_msi\n", virq);
diff --git a/arch/powerpc/platforms/cell/beat_interrupt.c b/arch/powerpc/platforms/cell/beat_interrupt.c
index 55015e1f6939..e5c3a2c6090d 100644
--- a/arch/powerpc/platforms/cell/beat_interrupt.c
+++ b/arch/powerpc/platforms/cell/beat_interrupt.c
@@ -34,7 +34,7 @@ static DEFINE_RAW_SPINLOCK(beatic_irq_mask_lock);
34static uint64_t beatic_irq_mask_enable[(MAX_IRQS+255)/64]; 34static uint64_t beatic_irq_mask_enable[(MAX_IRQS+255)/64];
35static uint64_t beatic_irq_mask_ack[(MAX_IRQS+255)/64]; 35static uint64_t beatic_irq_mask_ack[(MAX_IRQS+255)/64];
36 36
37static struct irq_host *beatic_host; 37static struct irq_domain *beatic_host;
38 38
39/* 39/*
40 * In this implementation, "virq" == "IRQ plug number", 40 * In this implementation, "virq" == "IRQ plug number",
@@ -122,7 +122,7 @@ static struct irq_chip beatic_pic = {
122 * 122 *
123 * Note that the number (virq) is already assigned at upper layer. 123 * Note that the number (virq) is already assigned at upper layer.
124 */ 124 */
125static void beatic_pic_host_unmap(struct irq_host *h, unsigned int virq) 125static void beatic_pic_host_unmap(struct irq_domain *h, unsigned int virq)
126{ 126{
127 beat_destruct_irq_plug(virq); 127 beat_destruct_irq_plug(virq);
128} 128}
@@ -133,7 +133,7 @@ static void beatic_pic_host_unmap(struct irq_host *h, unsigned int virq)
133 * 133 *
134 * Note that the number (virq) is already assigned at upper layer. 134 * Note that the number (virq) is already assigned at upper layer.
135 */ 135 */
136static int beatic_pic_host_map(struct irq_host *h, unsigned int virq, 136static int beatic_pic_host_map(struct irq_domain *h, unsigned int virq,
137 irq_hw_number_t hw) 137 irq_hw_number_t hw)
138{ 138{
139 int64_t err; 139 int64_t err;
@@ -154,7 +154,7 @@ static int beatic_pic_host_map(struct irq_host *h, unsigned int virq,
154 * Called from irq_create_of_mapping() only. 154 * Called from irq_create_of_mapping() only.
155 * Note: We have only 1 entry to translate. 155 * Note: We have only 1 entry to translate.
156 */ 156 */
157static int beatic_pic_host_xlate(struct irq_host *h, struct device_node *ct, 157static int beatic_pic_host_xlate(struct irq_domain *h, struct device_node *ct,
158 const u32 *intspec, unsigned int intsize, 158 const u32 *intspec, unsigned int intsize,
159 irq_hw_number_t *out_hwirq, 159 irq_hw_number_t *out_hwirq,
160 unsigned int *out_flags) 160 unsigned int *out_flags)
@@ -166,13 +166,13 @@ static int beatic_pic_host_xlate(struct irq_host *h, struct device_node *ct,
166 return 0; 166 return 0;
167} 167}
168 168
169static int beatic_pic_host_match(struct irq_host *h, struct device_node *np) 169static int beatic_pic_host_match(struct irq_domain *h, struct device_node *np)
170{ 170{
171 /* Match all */ 171 /* Match all */
172 return 1; 172 return 1;
173} 173}
174 174
175static struct irq_host_ops beatic_pic_host_ops = { 175static const struct irq_domain_ops beatic_pic_host_ops = {
176 .map = beatic_pic_host_map, 176 .map = beatic_pic_host_map,
177 .unmap = beatic_pic_host_unmap, 177 .unmap = beatic_pic_host_unmap,
178 .xlate = beatic_pic_host_xlate, 178 .xlate = beatic_pic_host_xlate,
@@ -239,9 +239,7 @@ void __init beatic_init_IRQ(void)
239 ppc_md.get_irq = beatic_get_irq; 239 ppc_md.get_irq = beatic_get_irq;
240 240
241 /* Allocate an irq host */ 241 /* Allocate an irq host */
242 beatic_host = irq_alloc_host(NULL, IRQ_HOST_MAP_NOMAP, 0, 242 beatic_host = irq_domain_add_nomap(NULL, &beatic_pic_host_ops, NULL);
243 &beatic_pic_host_ops,
244 0);
245 BUG_ON(beatic_host == NULL); 243 BUG_ON(beatic_host == NULL);
246 irq_set_default_host(beatic_host); 244 irq_set_default_host(beatic_host);
247} 245}
diff --git a/arch/powerpc/platforms/cell/interrupt.c b/arch/powerpc/platforms/cell/interrupt.c
index 96a433dd2d64..2d42f3bb66d6 100644
--- a/arch/powerpc/platforms/cell/interrupt.c
+++ b/arch/powerpc/platforms/cell/interrupt.c
@@ -56,7 +56,7 @@ struct iic {
56 56
57static DEFINE_PER_CPU(struct iic, cpu_iic); 57static DEFINE_PER_CPU(struct iic, cpu_iic);
58#define IIC_NODE_COUNT 2 58#define IIC_NODE_COUNT 2
59static struct irq_host *iic_host; 59static struct irq_domain *iic_host;
60 60
61/* Convert between "pending" bits and hw irq number */ 61/* Convert between "pending" bits and hw irq number */
62static irq_hw_number_t iic_pending_to_hwnum(struct cbe_iic_pending_bits bits) 62static irq_hw_number_t iic_pending_to_hwnum(struct cbe_iic_pending_bits bits)
@@ -186,7 +186,7 @@ void iic_message_pass(int cpu, int msg)
186 out_be64(&per_cpu(cpu_iic, cpu).regs->generate, (0xf - msg) << 4); 186 out_be64(&per_cpu(cpu_iic, cpu).regs->generate, (0xf - msg) << 4);
187} 187}
188 188
189struct irq_host *iic_get_irq_host(int node) 189struct irq_domain *iic_get_irq_host(int node)
190{ 190{
191 return iic_host; 191 return iic_host;
192} 192}
@@ -222,13 +222,13 @@ void iic_request_IPIs(void)
222#endif /* CONFIG_SMP */ 222#endif /* CONFIG_SMP */
223 223
224 224
225static int iic_host_match(struct irq_host *h, struct device_node *node) 225static int iic_host_match(struct irq_domain *h, struct device_node *node)
226{ 226{
227 return of_device_is_compatible(node, 227 return of_device_is_compatible(node,
228 "IBM,CBEA-Internal-Interrupt-Controller"); 228 "IBM,CBEA-Internal-Interrupt-Controller");
229} 229}
230 230
231static int iic_host_map(struct irq_host *h, unsigned int virq, 231static int iic_host_map(struct irq_domain *h, unsigned int virq,
232 irq_hw_number_t hw) 232 irq_hw_number_t hw)
233{ 233{
234 switch (hw & IIC_IRQ_TYPE_MASK) { 234 switch (hw & IIC_IRQ_TYPE_MASK) {
@@ -245,7 +245,7 @@ static int iic_host_map(struct irq_host *h, unsigned int virq,
245 return 0; 245 return 0;
246} 246}
247 247
248static int iic_host_xlate(struct irq_host *h, struct device_node *ct, 248static int iic_host_xlate(struct irq_domain *h, struct device_node *ct,
249 const u32 *intspec, unsigned int intsize, 249 const u32 *intspec, unsigned int intsize,
250 irq_hw_number_t *out_hwirq, unsigned int *out_flags) 250 irq_hw_number_t *out_hwirq, unsigned int *out_flags)
251 251
@@ -285,7 +285,7 @@ static int iic_host_xlate(struct irq_host *h, struct device_node *ct,
285 return 0; 285 return 0;
286} 286}
287 287
288static struct irq_host_ops iic_host_ops = { 288static const struct irq_domain_ops iic_host_ops = {
289 .match = iic_host_match, 289 .match = iic_host_match,
290 .map = iic_host_map, 290 .map = iic_host_map,
291 .xlate = iic_host_xlate, 291 .xlate = iic_host_xlate,
@@ -378,8 +378,8 @@ static int __init setup_iic(void)
378void __init iic_init_IRQ(void) 378void __init iic_init_IRQ(void)
379{ 379{
380 /* Setup an irq host data structure */ 380 /* Setup an irq host data structure */
381 iic_host = irq_alloc_host(NULL, IRQ_HOST_MAP_LINEAR, IIC_SOURCE_COUNT, 381 iic_host = irq_domain_add_linear(NULL, IIC_SOURCE_COUNT, &iic_host_ops,
382 &iic_host_ops, IIC_IRQ_INVALID); 382 NULL);
383 BUG_ON(iic_host == NULL); 383 BUG_ON(iic_host == NULL);
384 irq_set_default_host(iic_host); 384 irq_set_default_host(iic_host);
385 385
diff --git a/arch/powerpc/platforms/cell/spider-pic.c b/arch/powerpc/platforms/cell/spider-pic.c
index 442c28c00f88..d8b7cc8a66ca 100644
--- a/arch/powerpc/platforms/cell/spider-pic.c
+++ b/arch/powerpc/platforms/cell/spider-pic.c
@@ -62,7 +62,7 @@ enum {
62#define SPIDER_IRQ_INVALID 63 62#define SPIDER_IRQ_INVALID 63
63 63
64struct spider_pic { 64struct spider_pic {
65 struct irq_host *host; 65 struct irq_domain *host;
66 void __iomem *regs; 66 void __iomem *regs;
67 unsigned int node_id; 67 unsigned int node_id;
68}; 68};
@@ -168,7 +168,7 @@ static struct irq_chip spider_pic = {
168 .irq_set_type = spider_set_irq_type, 168 .irq_set_type = spider_set_irq_type,
169}; 169};
170 170
171static int spider_host_map(struct irq_host *h, unsigned int virq, 171static int spider_host_map(struct irq_domain *h, unsigned int virq,
172 irq_hw_number_t hw) 172 irq_hw_number_t hw)
173{ 173{
174 irq_set_chip_data(virq, h->host_data); 174 irq_set_chip_data(virq, h->host_data);
@@ -180,7 +180,7 @@ static int spider_host_map(struct irq_host *h, unsigned int virq,
180 return 0; 180 return 0;
181} 181}
182 182
183static int spider_host_xlate(struct irq_host *h, struct device_node *ct, 183static int spider_host_xlate(struct irq_domain *h, struct device_node *ct,
184 const u32 *intspec, unsigned int intsize, 184 const u32 *intspec, unsigned int intsize,
185 irq_hw_number_t *out_hwirq, unsigned int *out_flags) 185 irq_hw_number_t *out_hwirq, unsigned int *out_flags)
186 186
@@ -194,7 +194,7 @@ static int spider_host_xlate(struct irq_host *h, struct device_node *ct,
194 return 0; 194 return 0;
195} 195}
196 196
197static struct irq_host_ops spider_host_ops = { 197static const struct irq_domain_ops spider_host_ops = {
198 .map = spider_host_map, 198 .map = spider_host_map,
199 .xlate = spider_host_xlate, 199 .xlate = spider_host_xlate,
200}; 200};
@@ -299,12 +299,10 @@ static void __init spider_init_one(struct device_node *of_node, int chip,
299 panic("spider_pic: can't map registers !"); 299 panic("spider_pic: can't map registers !");
300 300
301 /* Allocate a host */ 301 /* Allocate a host */
302 pic->host = irq_alloc_host(of_node, IRQ_HOST_MAP_LINEAR, 302 pic->host = irq_domain_add_linear(of_node, SPIDER_SRC_COUNT,
303 SPIDER_SRC_COUNT, &spider_host_ops, 303 &spider_host_ops, pic);
304 SPIDER_IRQ_INVALID);
305 if (pic->host == NULL) 304 if (pic->host == NULL)
306 panic("spider_pic: can't allocate irq host !"); 305 panic("spider_pic: can't allocate irq host !");
307 pic->host->host_data = pic;
308 306
309 /* Go through all sources and disable them */ 307 /* Go through all sources and disable them */
310 for (i = 0; i < SPIDER_SRC_COUNT; i++) { 308 for (i = 0; i < SPIDER_SRC_COUNT; i++) {
diff --git a/arch/powerpc/platforms/embedded6xx/flipper-pic.c b/arch/powerpc/platforms/embedded6xx/flipper-pic.c
index f61a2dd96b99..53d6eee01963 100644
--- a/arch/powerpc/platforms/embedded6xx/flipper-pic.c
+++ b/arch/powerpc/platforms/embedded6xx/flipper-pic.c
@@ -96,9 +96,9 @@ static struct irq_chip flipper_pic = {
96 * 96 *
97 */ 97 */
98 98
99static struct irq_host *flipper_irq_host; 99static struct irq_domain *flipper_irq_host;
100 100
101static int flipper_pic_map(struct irq_host *h, unsigned int virq, 101static int flipper_pic_map(struct irq_domain *h, unsigned int virq,
102 irq_hw_number_t hwirq) 102 irq_hw_number_t hwirq)
103{ 103{
104 irq_set_chip_data(virq, h->host_data); 104 irq_set_chip_data(virq, h->host_data);
@@ -107,13 +107,13 @@ static int flipper_pic_map(struct irq_host *h, unsigned int virq,
107 return 0; 107 return 0;
108} 108}
109 109
110static int flipper_pic_match(struct irq_host *h, struct device_node *np) 110static int flipper_pic_match(struct irq_domain *h, struct device_node *np)
111{ 111{
112 return 1; 112 return 1;
113} 113}
114 114
115 115
116static struct irq_host_ops flipper_irq_host_ops = { 116static const struct irq_domain_ops flipper_irq_domain_ops = {
117 .map = flipper_pic_map, 117 .map = flipper_pic_map,
118 .match = flipper_pic_match, 118 .match = flipper_pic_match,
119}; 119};
@@ -130,10 +130,10 @@ static void __flipper_quiesce(void __iomem *io_base)
130 out_be32(io_base + FLIPPER_ICR, 0xffffffff); 130 out_be32(io_base + FLIPPER_ICR, 0xffffffff);
131} 131}
132 132
133struct irq_host * __init flipper_pic_init(struct device_node *np) 133struct irq_domain * __init flipper_pic_init(struct device_node *np)
134{ 134{
135 struct device_node *pi; 135 struct device_node *pi;
136 struct irq_host *irq_host = NULL; 136 struct irq_domain *irq_domain = NULL;
137 struct resource res; 137 struct resource res;
138 void __iomem *io_base; 138 void __iomem *io_base;
139 int retval; 139 int retval;
@@ -159,17 +159,15 @@ struct irq_host * __init flipper_pic_init(struct device_node *np)
159 159
160 __flipper_quiesce(io_base); 160 __flipper_quiesce(io_base);
161 161
162 irq_host = irq_alloc_host(np, IRQ_HOST_MAP_LINEAR, FLIPPER_NR_IRQS, 162 irq_domain = irq_domain_add_linear(np, FLIPPER_NR_IRQS,
163 &flipper_irq_host_ops, -1); 163 &flipper_irq_domain_ops, io_base);
164 if (!irq_host) { 164 if (!irq_domain) {
165 pr_err("failed to allocate irq_host\n"); 165 pr_err("failed to allocate irq_domain\n");
166 return NULL; 166 return NULL;
167 } 167 }
168 168
169 irq_host->host_data = io_base;
170
171out: 169out:
172 return irq_host; 170 return irq_domain;
173} 171}
174 172
175unsigned int flipper_pic_get_irq(void) 173unsigned int flipper_pic_get_irq(void)
diff --git a/arch/powerpc/platforms/embedded6xx/hlwd-pic.c b/arch/powerpc/platforms/embedded6xx/hlwd-pic.c
index e4919170c6bc..3006b5117ec6 100644
--- a/arch/powerpc/platforms/embedded6xx/hlwd-pic.c
+++ b/arch/powerpc/platforms/embedded6xx/hlwd-pic.c
@@ -89,9 +89,9 @@ static struct irq_chip hlwd_pic = {
89 * 89 *
90 */ 90 */
91 91
92static struct irq_host *hlwd_irq_host; 92static struct irq_domain *hlwd_irq_host;
93 93
94static int hlwd_pic_map(struct irq_host *h, unsigned int virq, 94static int hlwd_pic_map(struct irq_domain *h, unsigned int virq,
95 irq_hw_number_t hwirq) 95 irq_hw_number_t hwirq)
96{ 96{
97 irq_set_chip_data(virq, h->host_data); 97 irq_set_chip_data(virq, h->host_data);
@@ -100,11 +100,11 @@ static int hlwd_pic_map(struct irq_host *h, unsigned int virq,
100 return 0; 100 return 0;
101} 101}
102 102
103static struct irq_host_ops hlwd_irq_host_ops = { 103static const struct irq_domain_ops hlwd_irq_domain_ops = {
104 .map = hlwd_pic_map, 104 .map = hlwd_pic_map,
105}; 105};
106 106
107static unsigned int __hlwd_pic_get_irq(struct irq_host *h) 107static unsigned int __hlwd_pic_get_irq(struct irq_domain *h)
108{ 108{
109 void __iomem *io_base = h->host_data; 109 void __iomem *io_base = h->host_data;
110 int irq; 110 int irq;
@@ -123,14 +123,14 @@ static void hlwd_pic_irq_cascade(unsigned int cascade_virq,
123 struct irq_desc *desc) 123 struct irq_desc *desc)
124{ 124{
125 struct irq_chip *chip = irq_desc_get_chip(desc); 125 struct irq_chip *chip = irq_desc_get_chip(desc);
126 struct irq_host *irq_host = irq_get_handler_data(cascade_virq); 126 struct irq_domain *irq_domain = irq_get_handler_data(cascade_virq);
127 unsigned int virq; 127 unsigned int virq;
128 128
129 raw_spin_lock(&desc->lock); 129 raw_spin_lock(&desc->lock);
130 chip->irq_mask(&desc->irq_data); /* IRQ_LEVEL */ 130 chip->irq_mask(&desc->irq_data); /* IRQ_LEVEL */
131 raw_spin_unlock(&desc->lock); 131 raw_spin_unlock(&desc->lock);
132 132
133 virq = __hlwd_pic_get_irq(irq_host); 133 virq = __hlwd_pic_get_irq(irq_domain);
134 if (virq != NO_IRQ) 134 if (virq != NO_IRQ)
135 generic_handle_irq(virq); 135 generic_handle_irq(virq);
136 else 136 else
@@ -155,9 +155,9 @@ static void __hlwd_quiesce(void __iomem *io_base)
155 out_be32(io_base + HW_BROADWAY_ICR, 0xffffffff); 155 out_be32(io_base + HW_BROADWAY_ICR, 0xffffffff);
156} 156}
157 157
158struct irq_host *hlwd_pic_init(struct device_node *np) 158struct irq_domain *hlwd_pic_init(struct device_node *np)
159{ 159{
160 struct irq_host *irq_host; 160 struct irq_domain *irq_domain;
161 struct resource res; 161 struct resource res;
162 void __iomem *io_base; 162 void __iomem *io_base;
163 int retval; 163 int retval;
@@ -177,15 +177,14 @@ struct irq_host *hlwd_pic_init(struct device_node *np)
177 177
178 __hlwd_quiesce(io_base); 178 __hlwd_quiesce(io_base);
179 179
180 irq_host = irq_alloc_host(np, IRQ_HOST_MAP_LINEAR, HLWD_NR_IRQS, 180 irq_domain = irq_domain_add_linear(np, HLWD_NR_IRQS,
181 &hlwd_irq_host_ops, -1); 181 &hlwd_irq_domain_ops, io_base);
182 if (!irq_host) { 182 if (!irq_domain) {
183 pr_err("failed to allocate irq_host\n"); 183 pr_err("failed to allocate irq_domain\n");
184 return NULL; 184 return NULL;
185 } 185 }
186 irq_host->host_data = io_base;
187 186
188 return irq_host; 187 return irq_domain;
189} 188}
190 189
191unsigned int hlwd_pic_get_irq(void) 190unsigned int hlwd_pic_get_irq(void)
@@ -200,7 +199,7 @@ unsigned int hlwd_pic_get_irq(void)
200 199
201void hlwd_pic_probe(void) 200void hlwd_pic_probe(void)
202{ 201{
203 struct irq_host *host; 202 struct irq_domain *host;
204 struct device_node *np; 203 struct device_node *np;
205 const u32 *interrupts; 204 const u32 *interrupts;
206 int cascade_virq; 205 int cascade_virq;
diff --git a/arch/powerpc/platforms/iseries/irq.c b/arch/powerpc/platforms/iseries/irq.c
index b2103453eb01..05ce5164cafc 100644
--- a/arch/powerpc/platforms/iseries/irq.c
+++ b/arch/powerpc/platforms/iseries/irq.c
@@ -342,7 +342,7 @@ unsigned int iSeries_get_irq(void)
342 342
343#ifdef CONFIG_PCI 343#ifdef CONFIG_PCI
344 344
345static int iseries_irq_host_map(struct irq_host *h, unsigned int virq, 345static int iseries_irq_host_map(struct irq_domain *h, unsigned int virq,
346 irq_hw_number_t hw) 346 irq_hw_number_t hw)
347{ 347{
348 irq_set_chip_and_handler(virq, &iseries_pic, handle_fasteoi_irq); 348 irq_set_chip_and_handler(virq, &iseries_pic, handle_fasteoi_irq);
@@ -350,13 +350,13 @@ static int iseries_irq_host_map(struct irq_host *h, unsigned int virq,
350 return 0; 350 return 0;
351} 351}
352 352
353static int iseries_irq_host_match(struct irq_host *h, struct device_node *np) 353static int iseries_irq_host_match(struct irq_domain *h, struct device_node *np)
354{ 354{
355 /* Match all */ 355 /* Match all */
356 return 1; 356 return 1;
357} 357}
358 358
359static struct irq_host_ops iseries_irq_host_ops = { 359static const struct irq_domain_ops iseries_irq_domain_ops = {
360 .map = iseries_irq_host_map, 360 .map = iseries_irq_host_map,
361 .match = iseries_irq_host_match, 361 .match = iseries_irq_host_match,
362}; 362};
@@ -368,7 +368,7 @@ static struct irq_host_ops iseries_irq_host_ops = {
368void __init iSeries_init_IRQ(void) 368void __init iSeries_init_IRQ(void)
369{ 369{
370 /* Register PCI event handler and open an event path */ 370 /* Register PCI event handler and open an event path */
371 struct irq_host *host; 371 struct irq_domain *host;
372 int ret; 372 int ret;
373 373
374 /* 374 /*
@@ -380,8 +380,7 @@ void __init iSeries_init_IRQ(void)
380 /* Create irq host. No need for a revmap since HV will give us 380 /* Create irq host. No need for a revmap since HV will give us
381 * back our virtual irq number 381 * back our virtual irq number
382 */ 382 */
383 host = irq_alloc_host(NULL, IRQ_HOST_MAP_NOMAP, 0, 383 host = irq_domain_add_nomap(NULL, &iseries_irq_domain_ops, NULL);
384 &iseries_irq_host_ops, 0);
385 BUG_ON(host == NULL); 384 BUG_ON(host == NULL);
386 irq_set_default_host(host); 385 irq_set_default_host(host);
387 386
diff --git a/arch/powerpc/platforms/powermac/pic.c b/arch/powerpc/platforms/powermac/pic.c
index 7761aabfc293..92afc382a49e 100644
--- a/arch/powerpc/platforms/powermac/pic.c
+++ b/arch/powerpc/platforms/powermac/pic.c
@@ -61,7 +61,7 @@ static DEFINE_RAW_SPINLOCK(pmac_pic_lock);
61static unsigned long ppc_lost_interrupts[NR_MASK_WORDS]; 61static unsigned long ppc_lost_interrupts[NR_MASK_WORDS];
62static unsigned long ppc_cached_irq_mask[NR_MASK_WORDS]; 62static unsigned long ppc_cached_irq_mask[NR_MASK_WORDS];
63static int pmac_irq_cascade = -1; 63static int pmac_irq_cascade = -1;
64static struct irq_host *pmac_pic_host; 64static struct irq_domain *pmac_pic_host;
65 65
66static void __pmac_retrigger(unsigned int irq_nr) 66static void __pmac_retrigger(unsigned int irq_nr)
67{ 67{
@@ -268,13 +268,13 @@ static struct irqaction gatwick_cascade_action = {
268 .name = "cascade", 268 .name = "cascade",
269}; 269};
270 270
271static int pmac_pic_host_match(struct irq_host *h, struct device_node *node) 271static int pmac_pic_host_match(struct irq_domain *h, struct device_node *node)
272{ 272{
273 /* We match all, we don't always have a node anyway */ 273 /* We match all, we don't always have a node anyway */
274 return 1; 274 return 1;
275} 275}
276 276
277static int pmac_pic_host_map(struct irq_host *h, unsigned int virq, 277static int pmac_pic_host_map(struct irq_domain *h, unsigned int virq,
278 irq_hw_number_t hw) 278 irq_hw_number_t hw)
279{ 279{
280 if (hw >= max_irqs) 280 if (hw >= max_irqs)
@@ -288,21 +288,10 @@ static int pmac_pic_host_map(struct irq_host *h, unsigned int virq,
288 return 0; 288 return 0;
289} 289}
290 290
291static int pmac_pic_host_xlate(struct irq_host *h, struct device_node *ct, 291static const struct irq_domain_ops pmac_pic_host_ops = {
292 const u32 *intspec, unsigned int intsize,
293 irq_hw_number_t *out_hwirq,
294 unsigned int *out_flags)
295
296{
297 *out_flags = IRQ_TYPE_NONE;
298 *out_hwirq = *intspec;
299 return 0;
300}
301
302static struct irq_host_ops pmac_pic_host_ops = {
303 .match = pmac_pic_host_match, 292 .match = pmac_pic_host_match,
304 .map = pmac_pic_host_map, 293 .map = pmac_pic_host_map,
305 .xlate = pmac_pic_host_xlate, 294 .xlate = irq_domain_xlate_onecell,
306}; 295};
307 296
308static void __init pmac_pic_probe_oldstyle(void) 297static void __init pmac_pic_probe_oldstyle(void)
@@ -352,9 +341,8 @@ static void __init pmac_pic_probe_oldstyle(void)
352 /* 341 /*
353 * Allocate an irq host 342 * Allocate an irq host
354 */ 343 */
355 pmac_pic_host = irq_alloc_host(master, IRQ_HOST_MAP_LINEAR, max_irqs, 344 pmac_pic_host = irq_domain_add_linear(master, max_irqs,
356 &pmac_pic_host_ops, 345 &pmac_pic_host_ops, NULL);
357 max_irqs);
358 BUG_ON(pmac_pic_host == NULL); 346 BUG_ON(pmac_pic_host == NULL);
359 irq_set_default_host(pmac_pic_host); 347 irq_set_default_host(pmac_pic_host);
360 348
diff --git a/arch/powerpc/platforms/powermac/smp.c b/arch/powerpc/platforms/powermac/smp.c
index 44d769258ebf..a81e5a88fbdf 100644
--- a/arch/powerpc/platforms/powermac/smp.c
+++ b/arch/powerpc/platforms/powermac/smp.c
@@ -125,7 +125,7 @@ static volatile u32 __iomem *psurge_start;
125static int psurge_type = PSURGE_NONE; 125static int psurge_type = PSURGE_NONE;
126 126
127/* irq for secondary cpus to report */ 127/* irq for secondary cpus to report */
128static struct irq_host *psurge_host; 128static struct irq_domain *psurge_host;
129int psurge_secondary_virq; 129int psurge_secondary_virq;
130 130
131/* 131/*
@@ -176,7 +176,7 @@ static void smp_psurge_cause_ipi(int cpu, unsigned long data)
176 psurge_set_ipi(cpu); 176 psurge_set_ipi(cpu);
177} 177}
178 178
179static int psurge_host_map(struct irq_host *h, unsigned int virq, 179static int psurge_host_map(struct irq_domain *h, unsigned int virq,
180 irq_hw_number_t hw) 180 irq_hw_number_t hw)
181{ 181{
182 irq_set_chip_and_handler(virq, &dummy_irq_chip, handle_percpu_irq); 182 irq_set_chip_and_handler(virq, &dummy_irq_chip, handle_percpu_irq);
@@ -184,7 +184,7 @@ static int psurge_host_map(struct irq_host *h, unsigned int virq,
184 return 0; 184 return 0;
185} 185}
186 186
187struct irq_host_ops psurge_host_ops = { 187static const struct irq_domain_ops psurge_host_ops = {
188 .map = psurge_host_map, 188 .map = psurge_host_map,
189}; 189};
190 190
@@ -192,8 +192,7 @@ static int psurge_secondary_ipi_init(void)
192{ 192{
193 int rc = -ENOMEM; 193 int rc = -ENOMEM;
194 194
195 psurge_host = irq_alloc_host(NULL, IRQ_HOST_MAP_NOMAP, 0, 195 psurge_host = irq_domain_add_nomap(NULL, &psurge_host_ops, NULL);
196 &psurge_host_ops, 0);
197 196
198 if (psurge_host) 197 if (psurge_host)
199 psurge_secondary_virq = irq_create_direct_mapping(psurge_host); 198 psurge_secondary_virq = irq_create_direct_mapping(psurge_host);
diff --git a/arch/powerpc/platforms/ps3/interrupt.c b/arch/powerpc/platforms/ps3/interrupt.c
index 617efa12a3a5..2a4ff86cc21f 100644
--- a/arch/powerpc/platforms/ps3/interrupt.c
+++ b/arch/powerpc/platforms/ps3/interrupt.c
@@ -667,7 +667,7 @@ static void __maybe_unused _dump_mask(struct ps3_private *pd,
667static void dump_bmp(struct ps3_private* pd) {}; 667static void dump_bmp(struct ps3_private* pd) {};
668#endif /* defined(DEBUG) */ 668#endif /* defined(DEBUG) */
669 669
670static int ps3_host_map(struct irq_host *h, unsigned int virq, 670static int ps3_host_map(struct irq_domain *h, unsigned int virq,
671 irq_hw_number_t hwirq) 671 irq_hw_number_t hwirq)
672{ 672{
673 DBG("%s:%d: hwirq %lu, virq %u\n", __func__, __LINE__, hwirq, 673 DBG("%s:%d: hwirq %lu, virq %u\n", __func__, __LINE__, hwirq,
@@ -678,13 +678,13 @@ static int ps3_host_map(struct irq_host *h, unsigned int virq,
678 return 0; 678 return 0;
679} 679}
680 680
681static int ps3_host_match(struct irq_host *h, struct device_node *np) 681static int ps3_host_match(struct irq_domain *h, struct device_node *np)
682{ 682{
683 /* Match all */ 683 /* Match all */
684 return 1; 684 return 1;
685} 685}
686 686
687static struct irq_host_ops ps3_host_ops = { 687static const struct irq_domain_ops ps3_host_ops = {
688 .map = ps3_host_map, 688 .map = ps3_host_map,
689 .match = ps3_host_match, 689 .match = ps3_host_match,
690}; 690};
@@ -751,10 +751,9 @@ void __init ps3_init_IRQ(void)
751{ 751{
752 int result; 752 int result;
753 unsigned cpu; 753 unsigned cpu;
754 struct irq_host *host; 754 struct irq_domain *host;
755 755
756 host = irq_alloc_host(NULL, IRQ_HOST_MAP_NOMAP, 0, &ps3_host_ops, 756 host = irq_domain_add_nomap(NULL, &ps3_host_ops, NULL);
757 PS3_INVALID_OUTLET);
758 irq_set_default_host(host); 757 irq_set_default_host(host);
759 irq_set_virq_count(PS3_PLUG_MAX + 1); 758 irq_set_virq_count(PS3_PLUG_MAX + 1);
760 759
diff --git a/arch/powerpc/platforms/wsp/opb_pic.c b/arch/powerpc/platforms/wsp/opb_pic.c
index 19f353dfcd03..cb565bf93650 100644
--- a/arch/powerpc/platforms/wsp/opb_pic.c
+++ b/arch/powerpc/platforms/wsp/opb_pic.c
@@ -30,7 +30,7 @@
30static int opb_index = 0; 30static int opb_index = 0;
31 31
32struct opb_pic { 32struct opb_pic {
33 struct irq_host *host; 33 struct irq_domain *host;
34 void *regs; 34 void *regs;
35 int index; 35 int index;
36 spinlock_t lock; 36 spinlock_t lock;
@@ -179,7 +179,7 @@ static struct irq_chip opb_irq_chip = {
179 .irq_set_type = opb_set_irq_type 179 .irq_set_type = opb_set_irq_type
180}; 180};
181 181
182static int opb_host_map(struct irq_host *host, unsigned int virq, 182static int opb_host_map(struct irq_domain *host, unsigned int virq,
183 irq_hw_number_t hwirq) 183 irq_hw_number_t hwirq)
184{ 184{
185 struct opb_pic *opb; 185 struct opb_pic *opb;
@@ -196,20 +196,9 @@ static int opb_host_map(struct irq_host *host, unsigned int virq,
196 return 0; 196 return 0;
197} 197}
198 198
199static int opb_host_xlate(struct irq_host *host, struct device_node *dn, 199static const struct irq_domain_ops opb_host_ops = {
200 const u32 *intspec, unsigned int intsize,
201 irq_hw_number_t *out_hwirq, unsigned int *out_type)
202{
203 /* Interrupt size must == 2 */
204 BUG_ON(intsize != 2);
205 *out_hwirq = intspec[0];
206 *out_type = intspec[1];
207 return 0;
208}
209
210static struct irq_host_ops opb_host_ops = {
211 .map = opb_host_map, 200 .map = opb_host_map,
212 .xlate = opb_host_xlate, 201 .xlate = irq_domain_xlate_twocell,
213}; 202};
214 203
215irqreturn_t opb_irq_handler(int irq, void *private) 204irqreturn_t opb_irq_handler(int irq, void *private)
@@ -263,13 +252,11 @@ struct opb_pic *opb_pic_init_one(struct device_node *dn)
263 goto free_opb; 252 goto free_opb;
264 } 253 }
265 254
266 /* Allocate an irq host so that Linux knows that despite only 255 /* Allocate an irq domain so that Linux knows that despite only
267 * having one interrupt to issue, we're the controller for multiple 256 * having one interrupt to issue, we're the controller for multiple
268 * hardware IRQs, so later we can lookup their virtual IRQs. */ 257 * hardware IRQs, so later we can lookup their virtual IRQs. */
269 258
270 opb->host = irq_alloc_host(dn, IRQ_HOST_MAP_LINEAR, 259 opb->host = irq_domain_add_linear(dn, OPB_NR_IRQS, &opb_host_ops, opb);
271 OPB_NR_IRQS, &opb_host_ops, -1);
272
273 if (!opb->host) { 260 if (!opb->host) {
274 printk(KERN_ERR "opb: Failed to allocate IRQ host!\n"); 261 printk(KERN_ERR "opb: Failed to allocate IRQ host!\n");
275 goto free_regs; 262 goto free_regs;
@@ -277,7 +264,6 @@ struct opb_pic *opb_pic_init_one(struct device_node *dn)
277 264
278 opb->index = opb_index++; 265 opb->index = opb_index++;
279 spin_lock_init(&opb->lock); 266 spin_lock_init(&opb->lock);
280 opb->host->host_data = opb;
281 267
282 /* Disable all interrupts by default */ 268 /* Disable all interrupts by default */
283 opb_out(opb, OPB_MLSASIER, 0); 269 opb_out(opb, OPB_MLSASIER, 0);
diff --git a/arch/powerpc/sysdev/cpm1.c b/arch/powerpc/sysdev/cpm1.c
index 5d7d59a43c4c..d4fa03f2b6ac 100644
--- a/arch/powerpc/sysdev/cpm1.c
+++ b/arch/powerpc/sysdev/cpm1.c
@@ -54,7 +54,7 @@ cpm8xx_t __iomem *cpmp; /* Pointer to comm processor space */
54immap_t __iomem *mpc8xx_immr; 54immap_t __iomem *mpc8xx_immr;
55static cpic8xx_t __iomem *cpic_reg; 55static cpic8xx_t __iomem *cpic_reg;
56 56
57static struct irq_host *cpm_pic_host; 57static struct irq_domain *cpm_pic_host;
58 58
59static void cpm_mask_irq(struct irq_data *d) 59static void cpm_mask_irq(struct irq_data *d)
60{ 60{
@@ -98,7 +98,7 @@ int cpm_get_irq(void)
98 return irq_linear_revmap(cpm_pic_host, cpm_vec); 98 return irq_linear_revmap(cpm_pic_host, cpm_vec);
99} 99}
100 100
101static int cpm_pic_host_map(struct irq_host *h, unsigned int virq, 101static int cpm_pic_host_map(struct irq_domain *h, unsigned int virq,
102 irq_hw_number_t hw) 102 irq_hw_number_t hw)
103{ 103{
104 pr_debug("cpm_pic_host_map(%d, 0x%lx)\n", virq, hw); 104 pr_debug("cpm_pic_host_map(%d, 0x%lx)\n", virq, hw);
@@ -123,7 +123,7 @@ static struct irqaction cpm_error_irqaction = {
123 .name = "error", 123 .name = "error",
124}; 124};
125 125
126static struct irq_host_ops cpm_pic_host_ops = { 126static const struct irq_domain_ops cpm_pic_host_ops = {
127 .map = cpm_pic_host_map, 127 .map = cpm_pic_host_map,
128}; 128};
129 129
@@ -164,8 +164,7 @@ unsigned int cpm_pic_init(void)
164 164
165 out_be32(&cpic_reg->cpic_cimr, 0); 165 out_be32(&cpic_reg->cpic_cimr, 0);
166 166
167 cpm_pic_host = irq_alloc_host(np, IRQ_HOST_MAP_LINEAR, 167 cpm_pic_host = irq_domain_add_linear(np, 64, &cpm_pic_host_ops, NULL);
168 64, &cpm_pic_host_ops, 64);
169 if (cpm_pic_host == NULL) { 168 if (cpm_pic_host == NULL) {
170 printk(KERN_ERR "CPM2 PIC: failed to allocate irq host!\n"); 169 printk(KERN_ERR "CPM2 PIC: failed to allocate irq host!\n");
171 sirq = NO_IRQ; 170 sirq = NO_IRQ;
diff --git a/arch/powerpc/sysdev/cpm2_pic.c b/arch/powerpc/sysdev/cpm2_pic.c
index bcab50e2a9eb..d3be961e2ae7 100644
--- a/arch/powerpc/sysdev/cpm2_pic.c
+++ b/arch/powerpc/sysdev/cpm2_pic.c
@@ -50,7 +50,7 @@
50 50
51static intctl_cpm2_t __iomem *cpm2_intctl; 51static intctl_cpm2_t __iomem *cpm2_intctl;
52 52
53static struct irq_host *cpm2_pic_host; 53static struct irq_domain *cpm2_pic_host;
54#define NR_MASK_WORDS ((NR_IRQS + 31) / 32) 54#define NR_MASK_WORDS ((NR_IRQS + 31) / 32)
55static unsigned long ppc_cached_irq_mask[NR_MASK_WORDS]; 55static unsigned long ppc_cached_irq_mask[NR_MASK_WORDS];
56 56
@@ -214,7 +214,7 @@ unsigned int cpm2_get_irq(void)
214 return irq_linear_revmap(cpm2_pic_host, irq); 214 return irq_linear_revmap(cpm2_pic_host, irq);
215} 215}
216 216
217static int cpm2_pic_host_map(struct irq_host *h, unsigned int virq, 217static int cpm2_pic_host_map(struct irq_domain *h, unsigned int virq,
218 irq_hw_number_t hw) 218 irq_hw_number_t hw)
219{ 219{
220 pr_debug("cpm2_pic_host_map(%d, 0x%lx)\n", virq, hw); 220 pr_debug("cpm2_pic_host_map(%d, 0x%lx)\n", virq, hw);
@@ -224,21 +224,9 @@ static int cpm2_pic_host_map(struct irq_host *h, unsigned int virq,
224 return 0; 224 return 0;
225} 225}
226 226
227static int cpm2_pic_host_xlate(struct irq_host *h, struct device_node *ct, 227static const struct irq_domain_ops cpm2_pic_host_ops = {
228 const u32 *intspec, unsigned int intsize,
229 irq_hw_number_t *out_hwirq, unsigned int *out_flags)
230{
231 *out_hwirq = intspec[0];
232 if (intsize > 1)
233 *out_flags = intspec[1];
234 else
235 *out_flags = IRQ_TYPE_NONE;
236 return 0;
237}
238
239static struct irq_host_ops cpm2_pic_host_ops = {
240 .map = cpm2_pic_host_map, 228 .map = cpm2_pic_host_map,
241 .xlate = cpm2_pic_host_xlate, 229 .xlate = irq_domain_xlate_onetwocell,
242}; 230};
243 231
244void cpm2_pic_init(struct device_node *node) 232void cpm2_pic_init(struct device_node *node)
@@ -275,8 +263,7 @@ void cpm2_pic_init(struct device_node *node)
275 out_be32(&cpm2_intctl->ic_scprrl, 0x05309770); 263 out_be32(&cpm2_intctl->ic_scprrl, 0x05309770);
276 264
277 /* create a legacy host */ 265 /* create a legacy host */
278 cpm2_pic_host = irq_alloc_host(node, IRQ_HOST_MAP_LINEAR, 266 cpm2_pic_host = irq_domain_add_linear(node, 64, &cpm2_pic_host_ops, NULL);
279 64, &cpm2_pic_host_ops, 64);
280 if (cpm2_pic_host == NULL) { 267 if (cpm2_pic_host == NULL) {
281 printk(KERN_ERR "CPM2 PIC: failed to allocate irq host!\n"); 268 printk(KERN_ERR "CPM2 PIC: failed to allocate irq host!\n");
282 return; 269 return;
diff --git a/arch/powerpc/sysdev/ehv_pic.c b/arch/powerpc/sysdev/ehv_pic.c
index b6731e4a6646..6e0e1005227f 100644
--- a/arch/powerpc/sysdev/ehv_pic.c
+++ b/arch/powerpc/sysdev/ehv_pic.c
@@ -182,13 +182,13 @@ unsigned int ehv_pic_get_irq(void)
182 return irq_linear_revmap(global_ehv_pic->irqhost, irq); 182 return irq_linear_revmap(global_ehv_pic->irqhost, irq);
183} 183}
184 184
185static int ehv_pic_host_match(struct irq_host *h, struct device_node *node) 185static int ehv_pic_host_match(struct irq_domain *h, struct device_node *node)
186{ 186{
187 /* Exact match, unless ehv_pic node is NULL */ 187 /* Exact match, unless ehv_pic node is NULL */
188 return h->of_node == NULL || h->of_node == node; 188 return h->of_node == NULL || h->of_node == node;
189} 189}
190 190
191static int ehv_pic_host_map(struct irq_host *h, unsigned int virq, 191static int ehv_pic_host_map(struct irq_domain *h, unsigned int virq,
192 irq_hw_number_t hw) 192 irq_hw_number_t hw)
193{ 193{
194 struct ehv_pic *ehv_pic = h->host_data; 194 struct ehv_pic *ehv_pic = h->host_data;
@@ -217,7 +217,7 @@ static int ehv_pic_host_map(struct irq_host *h, unsigned int virq,
217 return 0; 217 return 0;
218} 218}
219 219
220static int ehv_pic_host_xlate(struct irq_host *h, struct device_node *ct, 220static int ehv_pic_host_xlate(struct irq_domain *h, struct device_node *ct,
221 const u32 *intspec, unsigned int intsize, 221 const u32 *intspec, unsigned int intsize,
222 irq_hw_number_t *out_hwirq, unsigned int *out_flags) 222 irq_hw_number_t *out_hwirq, unsigned int *out_flags)
223 223
@@ -248,7 +248,7 @@ static int ehv_pic_host_xlate(struct irq_host *h, struct device_node *ct,
248 return 0; 248 return 0;
249} 249}
250 250
251static struct irq_host_ops ehv_pic_host_ops = { 251static const struct irq_domain_ops ehv_pic_host_ops = {
252 .match = ehv_pic_host_match, 252 .match = ehv_pic_host_match,
253 .map = ehv_pic_host_map, 253 .map = ehv_pic_host_map,
254 .xlate = ehv_pic_host_xlate, 254 .xlate = ehv_pic_host_xlate,
@@ -275,9 +275,8 @@ void __init ehv_pic_init(void)
275 return; 275 return;
276 } 276 }
277 277
278 ehv_pic->irqhost = irq_alloc_host(np, IRQ_HOST_MAP_LINEAR, 278 ehv_pic->irqhost = irq_domain_add_linear(np, NR_EHV_PIC_INTS,
279 NR_EHV_PIC_INTS, &ehv_pic_host_ops, 0); 279 &ehv_pic_host_ops, ehv_pic);
280
281 if (!ehv_pic->irqhost) { 280 if (!ehv_pic->irqhost) {
282 of_node_put(np); 281 of_node_put(np);
283 kfree(ehv_pic); 282 kfree(ehv_pic);
@@ -293,7 +292,6 @@ void __init ehv_pic_init(void)
293 of_node_put(np2); 292 of_node_put(np2);
294 } 293 }
295 294
296 ehv_pic->irqhost->host_data = ehv_pic;
297 ehv_pic->hc_irq = ehv_pic_irq_chip; 295 ehv_pic->hc_irq = ehv_pic_irq_chip;
298 ehv_pic->hc_irq.irq_set_affinity = ehv_pic_set_affinity; 296 ehv_pic->hc_irq.irq_set_affinity = ehv_pic_set_affinity;
299 ehv_pic->coreint_flag = coreint_flag; 297 ehv_pic->coreint_flag = coreint_flag;
diff --git a/arch/powerpc/sysdev/fsl_msi.c b/arch/powerpc/sysdev/fsl_msi.c
index ecb5c1946d22..0c01debe963b 100644
--- a/arch/powerpc/sysdev/fsl_msi.c
+++ b/arch/powerpc/sysdev/fsl_msi.c
@@ -60,7 +60,7 @@ static struct irq_chip fsl_msi_chip = {
60 .name = "FSL-MSI", 60 .name = "FSL-MSI",
61}; 61};
62 62
63static int fsl_msi_host_map(struct irq_host *h, unsigned int virq, 63static int fsl_msi_host_map(struct irq_domain *h, unsigned int virq,
64 irq_hw_number_t hw) 64 irq_hw_number_t hw)
65{ 65{
66 struct fsl_msi *msi_data = h->host_data; 66 struct fsl_msi *msi_data = h->host_data;
@@ -74,7 +74,7 @@ static int fsl_msi_host_map(struct irq_host *h, unsigned int virq,
74 return 0; 74 return 0;
75} 75}
76 76
77static struct irq_host_ops fsl_msi_host_ops = { 77static const struct irq_domain_ops fsl_msi_host_ops = {
78 .map = fsl_msi_host_map, 78 .map = fsl_msi_host_map,
79}; 79};
80 80
@@ -387,8 +387,8 @@ static int __devinit fsl_of_msi_probe(struct platform_device *dev)
387 } 387 }
388 platform_set_drvdata(dev, msi); 388 platform_set_drvdata(dev, msi);
389 389
390 msi->irqhost = irq_alloc_host(dev->dev.of_node, IRQ_HOST_MAP_LINEAR, 390 msi->irqhost = irq_domain_add_linear(dev->dev.of_node,
391 NR_MSI_IRQS, &fsl_msi_host_ops, 0); 391 NR_MSI_IRQS, &fsl_msi_host_ops, msi);
392 392
393 if (msi->irqhost == NULL) { 393 if (msi->irqhost == NULL) {
394 dev_err(&dev->dev, "No memory for MSI irqhost\n"); 394 dev_err(&dev->dev, "No memory for MSI irqhost\n");
@@ -420,8 +420,6 @@ static int __devinit fsl_of_msi_probe(struct platform_device *dev)
420 420
421 msi->feature = features->fsl_pic_ip; 421 msi->feature = features->fsl_pic_ip;
422 422
423 msi->irqhost->host_data = msi;
424
425 /* 423 /*
426 * Remember the phandle, so that we can match with any PCI nodes 424 * Remember the phandle, so that we can match with any PCI nodes
427 * that have an "fsl,msi" property. 425 * that have an "fsl,msi" property.
diff --git a/arch/powerpc/sysdev/fsl_msi.h b/arch/powerpc/sysdev/fsl_msi.h
index f6c646a52541..8225f8653f78 100644
--- a/arch/powerpc/sysdev/fsl_msi.h
+++ b/arch/powerpc/sysdev/fsl_msi.h
@@ -26,7 +26,7 @@
26#define FSL_PIC_IP_VMPIC 0x00000003 26#define FSL_PIC_IP_VMPIC 0x00000003
27 27
28struct fsl_msi { 28struct fsl_msi {
29 struct irq_host *irqhost; 29 struct irq_domain *irqhost;
30 30
31 unsigned long cascade_irq; 31 unsigned long cascade_irq;
32 32
diff --git a/arch/powerpc/sysdev/i8259.c b/arch/powerpc/sysdev/i8259.c
index d18bb27e4df9..997df6a7ab5d 100644
--- a/arch/powerpc/sysdev/i8259.c
+++ b/arch/powerpc/sysdev/i8259.c
@@ -25,7 +25,7 @@ static unsigned char cached_8259[2] = { 0xff, 0xff };
25 25
26static DEFINE_RAW_SPINLOCK(i8259_lock); 26static DEFINE_RAW_SPINLOCK(i8259_lock);
27 27
28static struct irq_host *i8259_host; 28static struct irq_domain *i8259_host;
29 29
30/* 30/*
31 * Acknowledge the IRQ using either the PCI host bridge's interrupt 31 * Acknowledge the IRQ using either the PCI host bridge's interrupt
@@ -163,12 +163,12 @@ static struct resource pic_edgectrl_iores = {
163 .flags = IORESOURCE_BUSY, 163 .flags = IORESOURCE_BUSY,
164}; 164};
165 165
166static int i8259_host_match(struct irq_host *h, struct device_node *node) 166static int i8259_host_match(struct irq_domain *h, struct device_node *node)
167{ 167{
168 return h->of_node == NULL || h->of_node == node; 168 return h->of_node == NULL || h->of_node == node;
169} 169}
170 170
171static int i8259_host_map(struct irq_host *h, unsigned int virq, 171static int i8259_host_map(struct irq_domain *h, unsigned int virq,
172 irq_hw_number_t hw) 172 irq_hw_number_t hw)
173{ 173{
174 pr_debug("i8259_host_map(%d, 0x%lx)\n", virq, hw); 174 pr_debug("i8259_host_map(%d, 0x%lx)\n", virq, hw);
@@ -185,7 +185,7 @@ static int i8259_host_map(struct irq_host *h, unsigned int virq,
185 return 0; 185 return 0;
186} 186}
187 187
188static int i8259_host_xlate(struct irq_host *h, struct device_node *ct, 188static int i8259_host_xlate(struct irq_domain *h, struct device_node *ct,
189 const u32 *intspec, unsigned int intsize, 189 const u32 *intspec, unsigned int intsize,
190 irq_hw_number_t *out_hwirq, unsigned int *out_flags) 190 irq_hw_number_t *out_hwirq, unsigned int *out_flags)
191{ 191{
@@ -205,13 +205,13 @@ static int i8259_host_xlate(struct irq_host *h, struct device_node *ct,
205 return 0; 205 return 0;
206} 206}
207 207
208static struct irq_host_ops i8259_host_ops = { 208static struct irq_domain_ops i8259_host_ops = {
209 .match = i8259_host_match, 209 .match = i8259_host_match,
210 .map = i8259_host_map, 210 .map = i8259_host_map,
211 .xlate = i8259_host_xlate, 211 .xlate = i8259_host_xlate,
212}; 212};
213 213
214struct irq_host *i8259_get_host(void) 214struct irq_domain *i8259_get_host(void)
215{ 215{
216 return i8259_host; 216 return i8259_host;
217} 217}
@@ -263,8 +263,7 @@ void i8259_init(struct device_node *node, unsigned long intack_addr)
263 raw_spin_unlock_irqrestore(&i8259_lock, flags); 263 raw_spin_unlock_irqrestore(&i8259_lock, flags);
264 264
265 /* create a legacy host */ 265 /* create a legacy host */
266 i8259_host = irq_alloc_host(node, IRQ_HOST_MAP_LEGACY, 266 i8259_host = irq_domain_add_legacy_isa(node, &i8259_host_ops, NULL);
267 0, &i8259_host_ops, 0);
268 if (i8259_host == NULL) { 267 if (i8259_host == NULL) {
269 printk(KERN_ERR "i8259: failed to allocate irq host !\n"); 268 printk(KERN_ERR "i8259: failed to allocate irq host !\n");
270 return; 269 return;
diff --git a/arch/powerpc/sysdev/ipic.c b/arch/powerpc/sysdev/ipic.c
index 95da897f05a7..b50f97811c25 100644
--- a/arch/powerpc/sysdev/ipic.c
+++ b/arch/powerpc/sysdev/ipic.c
@@ -672,13 +672,13 @@ static struct irq_chip ipic_edge_irq_chip = {
672 .irq_set_type = ipic_set_irq_type, 672 .irq_set_type = ipic_set_irq_type,
673}; 673};
674 674
675static int ipic_host_match(struct irq_host *h, struct device_node *node) 675static int ipic_host_match(struct irq_domain *h, struct device_node *node)
676{ 676{
677 /* Exact match, unless ipic node is NULL */ 677 /* Exact match, unless ipic node is NULL */
678 return h->of_node == NULL || h->of_node == node; 678 return h->of_node == NULL || h->of_node == node;
679} 679}
680 680
681static int ipic_host_map(struct irq_host *h, unsigned int virq, 681static int ipic_host_map(struct irq_domain *h, unsigned int virq,
682 irq_hw_number_t hw) 682 irq_hw_number_t hw)
683{ 683{
684 struct ipic *ipic = h->host_data; 684 struct ipic *ipic = h->host_data;
@@ -692,26 +692,10 @@ static int ipic_host_map(struct irq_host *h, unsigned int virq,
692 return 0; 692 return 0;
693} 693}
694 694
695static int ipic_host_xlate(struct irq_host *h, struct device_node *ct, 695static struct irq_domain_ops ipic_host_ops = {
696 const u32 *intspec, unsigned int intsize,
697 irq_hw_number_t *out_hwirq, unsigned int *out_flags)
698
699{
700 /* interrupt sense values coming from the device tree equal either
701 * LEVEL_LOW (low assertion) or EDGE_FALLING (high-to-low change)
702 */
703 *out_hwirq = intspec[0];
704 if (intsize > 1)
705 *out_flags = intspec[1];
706 else
707 *out_flags = IRQ_TYPE_NONE;
708 return 0;
709}
710
711static struct irq_host_ops ipic_host_ops = {
712 .match = ipic_host_match, 696 .match = ipic_host_match,
713 .map = ipic_host_map, 697 .map = ipic_host_map,
714 .xlate = ipic_host_xlate, 698 .xlate = irq_domain_xlate_onetwocell,
715}; 699};
716 700
717struct ipic * __init ipic_init(struct device_node *node, unsigned int flags) 701struct ipic * __init ipic_init(struct device_node *node, unsigned int flags)
@@ -728,9 +712,8 @@ struct ipic * __init ipic_init(struct device_node *node, unsigned int flags)
728 if (ipic == NULL) 712 if (ipic == NULL)
729 return NULL; 713 return NULL;
730 714
731 ipic->irqhost = irq_alloc_host(node, IRQ_HOST_MAP_LINEAR, 715 ipic->irqhost = irq_domain_add_linear(node, NR_IPIC_INTS,
732 NR_IPIC_INTS, 716 &ipic_host_ops, ipic);
733 &ipic_host_ops, 0);
734 if (ipic->irqhost == NULL) { 717 if (ipic->irqhost == NULL) {
735 kfree(ipic); 718 kfree(ipic);
736 return NULL; 719 return NULL;
@@ -738,8 +721,6 @@ struct ipic * __init ipic_init(struct device_node *node, unsigned int flags)
738 721
739 ipic->regs = ioremap(res.start, resource_size(&res)); 722 ipic->regs = ioremap(res.start, resource_size(&res));
740 723
741 ipic->irqhost->host_data = ipic;
742
743 /* init hw */ 724 /* init hw */
744 ipic_write(ipic->regs, IPIC_SICNR, 0x0); 725 ipic_write(ipic->regs, IPIC_SICNR, 0x0);
745 726
diff --git a/arch/powerpc/sysdev/ipic.h b/arch/powerpc/sysdev/ipic.h
index 9391c57b0c51..90031d1282e1 100644
--- a/arch/powerpc/sysdev/ipic.h
+++ b/arch/powerpc/sysdev/ipic.h
@@ -43,7 +43,7 @@ struct ipic {
43 volatile u32 __iomem *regs; 43 volatile u32 __iomem *regs;
44 44
45 /* The remapper for this IPIC */ 45 /* The remapper for this IPIC */
46 struct irq_host *irqhost; 46 struct irq_domain *irqhost;
47}; 47};
48 48
49struct ipic_info { 49struct ipic_info {
diff --git a/arch/powerpc/sysdev/mpc8xx_pic.c b/arch/powerpc/sysdev/mpc8xx_pic.c
index 2ca0a85fcce9..d5f5416be310 100644
--- a/arch/powerpc/sysdev/mpc8xx_pic.c
+++ b/arch/powerpc/sysdev/mpc8xx_pic.c
@@ -17,7 +17,7 @@
17 17
18extern int cpm_get_irq(struct pt_regs *regs); 18extern int cpm_get_irq(struct pt_regs *regs);
19 19
20static struct irq_host *mpc8xx_pic_host; 20static struct irq_domain *mpc8xx_pic_host;
21#define NR_MASK_WORDS ((NR_IRQS + 31) / 32) 21#define NR_MASK_WORDS ((NR_IRQS + 31) / 32)
22static unsigned long ppc_cached_irq_mask[NR_MASK_WORDS]; 22static unsigned long ppc_cached_irq_mask[NR_MASK_WORDS];
23static sysconf8xx_t __iomem *siu_reg; 23static sysconf8xx_t __iomem *siu_reg;
@@ -110,7 +110,7 @@ unsigned int mpc8xx_get_irq(void)
110 110
111} 111}
112 112
113static int mpc8xx_pic_host_map(struct irq_host *h, unsigned int virq, 113static int mpc8xx_pic_host_map(struct irq_domain *h, unsigned int virq,
114 irq_hw_number_t hw) 114 irq_hw_number_t hw)
115{ 115{
116 pr_debug("mpc8xx_pic_host_map(%d, 0x%lx)\n", virq, hw); 116 pr_debug("mpc8xx_pic_host_map(%d, 0x%lx)\n", virq, hw);
@@ -121,7 +121,7 @@ static int mpc8xx_pic_host_map(struct irq_host *h, unsigned int virq,
121} 121}
122 122
123 123
124static int mpc8xx_pic_host_xlate(struct irq_host *h, struct device_node *ct, 124static int mpc8xx_pic_host_xlate(struct irq_domain *h, struct device_node *ct,
125 const u32 *intspec, unsigned int intsize, 125 const u32 *intspec, unsigned int intsize,
126 irq_hw_number_t *out_hwirq, unsigned int *out_flags) 126 irq_hw_number_t *out_hwirq, unsigned int *out_flags)
127{ 127{
@@ -142,7 +142,7 @@ static int mpc8xx_pic_host_xlate(struct irq_host *h, struct device_node *ct,
142} 142}
143 143
144 144
145static struct irq_host_ops mpc8xx_pic_host_ops = { 145static struct irq_domain_ops mpc8xx_pic_host_ops = {
146 .map = mpc8xx_pic_host_map, 146 .map = mpc8xx_pic_host_map,
147 .xlate = mpc8xx_pic_host_xlate, 147 .xlate = mpc8xx_pic_host_xlate,
148}; 148};
@@ -171,8 +171,7 @@ int mpc8xx_pic_init(void)
171 goto out; 171 goto out;
172 } 172 }
173 173
174 mpc8xx_pic_host = irq_alloc_host(np, IRQ_HOST_MAP_LINEAR, 174 mpc8xx_pic_host = irq_domain_add_linear(np, 64, &mpc8xx_pic_host_ops, NULL);
175 64, &mpc8xx_pic_host_ops, 64);
176 if (mpc8xx_pic_host == NULL) { 175 if (mpc8xx_pic_host == NULL) {
177 printk(KERN_ERR "MPC8xx PIC: failed to allocate irq host!\n"); 176 printk(KERN_ERR "MPC8xx PIC: failed to allocate irq host!\n");
178 ret = -ENOMEM; 177 ret = -ENOMEM;
diff --git a/arch/powerpc/sysdev/mpic.c b/arch/powerpc/sysdev/mpic.c
index 4e9ccb1015de..c83a512fa175 100644
--- a/arch/powerpc/sysdev/mpic.c
+++ b/arch/powerpc/sysdev/mpic.c
@@ -965,13 +965,13 @@ static struct irq_chip mpic_irq_ht_chip = {
965#endif /* CONFIG_MPIC_U3_HT_IRQS */ 965#endif /* CONFIG_MPIC_U3_HT_IRQS */
966 966
967 967
968static int mpic_host_match(struct irq_host *h, struct device_node *node) 968static int mpic_host_match(struct irq_domain *h, struct device_node *node)
969{ 969{
970 /* Exact match, unless mpic node is NULL */ 970 /* Exact match, unless mpic node is NULL */
971 return h->of_node == NULL || h->of_node == node; 971 return h->of_node == NULL || h->of_node == node;
972} 972}
973 973
974static int mpic_host_map(struct irq_host *h, unsigned int virq, 974static int mpic_host_map(struct irq_domain *h, unsigned int virq,
975 irq_hw_number_t hw) 975 irq_hw_number_t hw)
976{ 976{
977 struct mpic *mpic = h->host_data; 977 struct mpic *mpic = h->host_data;
@@ -1041,7 +1041,7 @@ static int mpic_host_map(struct irq_host *h, unsigned int virq,
1041 return 0; 1041 return 0;
1042} 1042}
1043 1043
1044static int mpic_host_xlate(struct irq_host *h, struct device_node *ct, 1044static int mpic_host_xlate(struct irq_domain *h, struct device_node *ct,
1045 const u32 *intspec, unsigned int intsize, 1045 const u32 *intspec, unsigned int intsize,
1046 irq_hw_number_t *out_hwirq, unsigned int *out_flags) 1046 irq_hw_number_t *out_hwirq, unsigned int *out_flags)
1047 1047
@@ -1121,13 +1121,13 @@ static void mpic_cascade(unsigned int irq, struct irq_desc *desc)
1121 BUG_ON(!(mpic->flags & MPIC_SECONDARY)); 1121 BUG_ON(!(mpic->flags & MPIC_SECONDARY));
1122 1122
1123 virq = mpic_get_one_irq(mpic); 1123 virq = mpic_get_one_irq(mpic);
1124 if (virq != NO_IRQ) 1124 if (virq)
1125 generic_handle_irq(virq); 1125 generic_handle_irq(virq);
1126 1126
1127 chip->irq_eoi(&desc->irq_data); 1127 chip->irq_eoi(&desc->irq_data);
1128} 1128}
1129 1129
1130static struct irq_host_ops mpic_host_ops = { 1130static struct irq_domain_ops mpic_host_ops = {
1131 .match = mpic_host_match, 1131 .match = mpic_host_match,
1132 .map = mpic_host_map, 1132 .map = mpic_host_map,
1133 .xlate = mpic_host_xlate, 1133 .xlate = mpic_host_xlate,
@@ -1345,10 +1345,9 @@ struct mpic * __init mpic_alloc(struct device_node *node,
1345 mpic->isu_shift = 1 + __ilog2(mpic->isu_size - 1); 1345 mpic->isu_shift = 1 + __ilog2(mpic->isu_size - 1);
1346 mpic->isu_mask = (1 << mpic->isu_shift) - 1; 1346 mpic->isu_mask = (1 << mpic->isu_shift) - 1;
1347 1347
1348 mpic->irqhost = irq_alloc_host(mpic->node, IRQ_HOST_MAP_LINEAR, 1348 mpic->irqhost = irq_domain_add_linear(mpic->node,
1349 isu_size ? isu_size : mpic->num_sources, 1349 isu_size ? isu_size : mpic->num_sources,
1350 &mpic_host_ops, 1350 &mpic_host_ops, mpic);
1351 flags & MPIC_LARGE_VECTORS ? 2048 : 256);
1352 1351
1353 /* 1352 /*
1354 * FIXME: The code leaks the MPIC object and mappings here; this 1353 * FIXME: The code leaks the MPIC object and mappings here; this
@@ -1357,8 +1356,6 @@ struct mpic * __init mpic_alloc(struct device_node *node,
1357 if (mpic->irqhost == NULL) 1356 if (mpic->irqhost == NULL)
1358 return NULL; 1357 return NULL;
1359 1358
1360 mpic->irqhost->host_data = mpic;
1361
1362 /* Display version */ 1359 /* Display version */
1363 switch (greg_feature & MPIC_GREG_FEATURE_VERSION_MASK) { 1360 switch (greg_feature & MPIC_GREG_FEATURE_VERSION_MASK) {
1364 case 1: 1361 case 1:
diff --git a/arch/powerpc/sysdev/mpic_msi.c b/arch/powerpc/sysdev/mpic_msi.c
index 0f67cd79d481..0622aa91b18a 100644
--- a/arch/powerpc/sysdev/mpic_msi.c
+++ b/arch/powerpc/sysdev/mpic_msi.c
@@ -32,7 +32,7 @@ void mpic_msi_reserve_hwirq(struct mpic *mpic, irq_hw_number_t hwirq)
32static int mpic_msi_reserve_u3_hwirqs(struct mpic *mpic) 32static int mpic_msi_reserve_u3_hwirqs(struct mpic *mpic)
33{ 33{
34 irq_hw_number_t hwirq; 34 irq_hw_number_t hwirq;
35 struct irq_host_ops *ops = mpic->irqhost->ops; 35 const struct irq_domain_ops *ops = mpic->irqhost->ops;
36 struct device_node *np; 36 struct device_node *np;
37 int flags, index, i; 37 int flags, index, i;
38 struct of_irq oirq; 38 struct of_irq oirq;
diff --git a/arch/powerpc/sysdev/mv64x60_pic.c b/arch/powerpc/sysdev/mv64x60_pic.c
index 14d130268e7a..8848e99a83f2 100644
--- a/arch/powerpc/sysdev/mv64x60_pic.c
+++ b/arch/powerpc/sysdev/mv64x60_pic.c
@@ -70,7 +70,7 @@ static u32 mv64x60_cached_low_mask;
70static u32 mv64x60_cached_high_mask = MV64X60_HIGH_GPP_GROUPS; 70static u32 mv64x60_cached_high_mask = MV64X60_HIGH_GPP_GROUPS;
71static u32 mv64x60_cached_gpp_mask; 71static u32 mv64x60_cached_gpp_mask;
72 72
73static struct irq_host *mv64x60_irq_host; 73static struct irq_domain *mv64x60_irq_host;
74 74
75/* 75/*
76 * mv64x60_chip_low functions 76 * mv64x60_chip_low functions
@@ -208,7 +208,7 @@ static struct irq_chip *mv64x60_chips[] = {
208 [MV64x60_LEVEL1_GPP] = &mv64x60_chip_gpp, 208 [MV64x60_LEVEL1_GPP] = &mv64x60_chip_gpp,
209}; 209};
210 210
211static int mv64x60_host_map(struct irq_host *h, unsigned int virq, 211static int mv64x60_host_map(struct irq_domain *h, unsigned int virq,
212 irq_hw_number_t hwirq) 212 irq_hw_number_t hwirq)
213{ 213{
214 int level1; 214 int level1;
@@ -223,7 +223,7 @@ static int mv64x60_host_map(struct irq_host *h, unsigned int virq,
223 return 0; 223 return 0;
224} 224}
225 225
226static struct irq_host_ops mv64x60_host_ops = { 226static struct irq_domain_ops mv64x60_host_ops = {
227 .map = mv64x60_host_map, 227 .map = mv64x60_host_map,
228}; 228};
229 229
@@ -250,9 +250,8 @@ void __init mv64x60_init_irq(void)
250 paddr = of_translate_address(np, reg); 250 paddr = of_translate_address(np, reg);
251 mv64x60_irq_reg_base = ioremap(paddr, reg[1]); 251 mv64x60_irq_reg_base = ioremap(paddr, reg[1]);
252 252
253 mv64x60_irq_host = irq_alloc_host(np, IRQ_HOST_MAP_LINEAR, 253 mv64x60_irq_host = irq_domain_add_linear(np, MV64x60_NUM_IRQS,
254 MV64x60_NUM_IRQS, 254 &mv64x60_host_ops, NULL);
255 &mv64x60_host_ops, MV64x60_NUM_IRQS);
256 255
257 spin_lock_irqsave(&mv64x60_lock, flags); 256 spin_lock_irqsave(&mv64x60_lock, flags);
258 out_le32(mv64x60_gpp_reg_base + MV64x60_GPP_INTR_MASK, 257 out_le32(mv64x60_gpp_reg_base + MV64x60_GPP_INTR_MASK,
diff --git a/arch/powerpc/sysdev/qe_lib/qe_ic.c b/arch/powerpc/sysdev/qe_lib/qe_ic.c
index 73034bd203c4..2fba6ef2f95e 100644
--- a/arch/powerpc/sysdev/qe_lib/qe_ic.c
+++ b/arch/powerpc/sysdev/qe_lib/qe_ic.c
@@ -245,13 +245,13 @@ static struct irq_chip qe_ic_irq_chip = {
245 .irq_mask_ack = qe_ic_mask_irq, 245 .irq_mask_ack = qe_ic_mask_irq,
246}; 246};
247 247
248static int qe_ic_host_match(struct irq_host *h, struct device_node *node) 248static int qe_ic_host_match(struct irq_domain *h, struct device_node *node)
249{ 249{
250 /* Exact match, unless qe_ic node is NULL */ 250 /* Exact match, unless qe_ic node is NULL */
251 return h->of_node == NULL || h->of_node == node; 251 return h->of_node == NULL || h->of_node == node;
252} 252}
253 253
254static int qe_ic_host_map(struct irq_host *h, unsigned int virq, 254static int qe_ic_host_map(struct irq_domain *h, unsigned int virq,
255 irq_hw_number_t hw) 255 irq_hw_number_t hw)
256{ 256{
257 struct qe_ic *qe_ic = h->host_data; 257 struct qe_ic *qe_ic = h->host_data;
@@ -272,23 +272,10 @@ static int qe_ic_host_map(struct irq_host *h, unsigned int virq,
272 return 0; 272 return 0;
273} 273}
274 274
275static int qe_ic_host_xlate(struct irq_host *h, struct device_node *ct, 275static struct irq_domain_ops qe_ic_host_ops = {
276 const u32 * intspec, unsigned int intsize,
277 irq_hw_number_t * out_hwirq,
278 unsigned int *out_flags)
279{
280 *out_hwirq = intspec[0];
281 if (intsize > 1)
282 *out_flags = intspec[1];
283 else
284 *out_flags = IRQ_TYPE_NONE;
285 return 0;
286}
287
288static struct irq_host_ops qe_ic_host_ops = {
289 .match = qe_ic_host_match, 276 .match = qe_ic_host_match,
290 .map = qe_ic_host_map, 277 .map = qe_ic_host_map,
291 .xlate = qe_ic_host_xlate, 278 .xlate = irq_domain_xlate_onetwocell,
292}; 279};
293 280
294/* Return an interrupt vector or NO_IRQ if no interrupt is pending. */ 281/* Return an interrupt vector or NO_IRQ if no interrupt is pending. */
@@ -339,8 +326,8 @@ void __init qe_ic_init(struct device_node *node, unsigned int flags,
339 if (qe_ic == NULL) 326 if (qe_ic == NULL)
340 return; 327 return;
341 328
342 qe_ic->irqhost = irq_alloc_host(node, IRQ_HOST_MAP_LINEAR, 329 qe_ic->irqhost = irq_domain_add_linear(node, NR_QE_IC_INTS,
343 NR_QE_IC_INTS, &qe_ic_host_ops, 0); 330 &qe_ic_host_ops, qe_ic);
344 if (qe_ic->irqhost == NULL) { 331 if (qe_ic->irqhost == NULL) {
345 kfree(qe_ic); 332 kfree(qe_ic);
346 return; 333 return;
@@ -348,7 +335,6 @@ void __init qe_ic_init(struct device_node *node, unsigned int flags,
348 335
349 qe_ic->regs = ioremap(res.start, resource_size(&res)); 336 qe_ic->regs = ioremap(res.start, resource_size(&res));
350 337
351 qe_ic->irqhost->host_data = qe_ic;
352 qe_ic->hc_irq = qe_ic_irq_chip; 338 qe_ic->hc_irq = qe_ic_irq_chip;
353 339
354 qe_ic->virq_high = irq_of_parse_and_map(node, 0); 340 qe_ic->virq_high = irq_of_parse_and_map(node, 0);
diff --git a/arch/powerpc/sysdev/qe_lib/qe_ic.h b/arch/powerpc/sysdev/qe_lib/qe_ic.h
index c1361d005a8a..c327872ed35c 100644
--- a/arch/powerpc/sysdev/qe_lib/qe_ic.h
+++ b/arch/powerpc/sysdev/qe_lib/qe_ic.h
@@ -79,7 +79,7 @@ struct qe_ic {
79 volatile u32 __iomem *regs; 79 volatile u32 __iomem *regs;
80 80
81 /* The remapper for this QEIC */ 81 /* The remapper for this QEIC */
82 struct irq_host *irqhost; 82 struct irq_domain *irqhost;
83 83
84 /* The "linux" controller struct */ 84 /* The "linux" controller struct */
85 struct irq_chip hc_irq; 85 struct irq_chip hc_irq;
diff --git a/arch/powerpc/sysdev/tsi108_pci.c b/arch/powerpc/sysdev/tsi108_pci.c
index 4d18658116e5..188012c58f7f 100644
--- a/arch/powerpc/sysdev/tsi108_pci.c
+++ b/arch/powerpc/sysdev/tsi108_pci.c
@@ -51,7 +51,7 @@
51u32 tsi108_pci_cfg_base; 51u32 tsi108_pci_cfg_base;
52static u32 tsi108_pci_cfg_phys; 52static u32 tsi108_pci_cfg_phys;
53u32 tsi108_csr_vir_base; 53u32 tsi108_csr_vir_base;
54static struct irq_host *pci_irq_host; 54static struct irq_domain *pci_irq_host;
55 55
56extern u32 get_vir_csrbase(void); 56extern u32 get_vir_csrbase(void);
57extern u32 tsi108_read_reg(u32 reg_offset); 57extern u32 tsi108_read_reg(u32 reg_offset);
@@ -376,7 +376,7 @@ static struct irq_chip tsi108_pci_irq = {
376 .irq_unmask = tsi108_pci_irq_unmask, 376 .irq_unmask = tsi108_pci_irq_unmask,
377}; 377};
378 378
379static int pci_irq_host_xlate(struct irq_host *h, struct device_node *ct, 379static int pci_irq_host_xlate(struct irq_domain *h, struct device_node *ct,
380 const u32 *intspec, unsigned int intsize, 380 const u32 *intspec, unsigned int intsize,
381 irq_hw_number_t *out_hwirq, unsigned int *out_flags) 381 irq_hw_number_t *out_hwirq, unsigned int *out_flags)
382{ 382{
@@ -385,7 +385,7 @@ static int pci_irq_host_xlate(struct irq_host *h, struct device_node *ct,
385 return 0; 385 return 0;
386} 386}
387 387
388static int pci_irq_host_map(struct irq_host *h, unsigned int virq, 388static int pci_irq_host_map(struct irq_domain *h, unsigned int virq,
389 irq_hw_number_t hw) 389 irq_hw_number_t hw)
390{ unsigned int irq; 390{ unsigned int irq;
391 DBG("%s(%d, 0x%lx)\n", __func__, virq, hw); 391 DBG("%s(%d, 0x%lx)\n", __func__, virq, hw);
@@ -397,7 +397,7 @@ static int pci_irq_host_map(struct irq_host *h, unsigned int virq,
397 return 0; 397 return 0;
398} 398}
399 399
400static struct irq_host_ops pci_irq_host_ops = { 400static struct irq_domain_ops pci_irq_domain_ops = {
401 .map = pci_irq_host_map, 401 .map = pci_irq_host_map,
402 .xlate = pci_irq_host_xlate, 402 .xlate = pci_irq_host_xlate,
403}; 403};
@@ -419,10 +419,9 @@ void __init tsi108_pci_int_init(struct device_node *node)
419{ 419{
420 DBG("Tsi108_pci_int_init: initializing PCI interrupts\n"); 420 DBG("Tsi108_pci_int_init: initializing PCI interrupts\n");
421 421
422 pci_irq_host = irq_alloc_host(node, IRQ_HOST_MAP_LEGACY, 422 pci_irq_host = irq_domain_add_legacy_isa(node, &pci_irq_domain_ops, NULL);
423 0, &pci_irq_host_ops, 0);
424 if (pci_irq_host == NULL) { 423 if (pci_irq_host == NULL) {
425 printk(KERN_ERR "pci_irq_host: failed to allocate irq host !\n"); 424 printk(KERN_ERR "pci_irq_host: failed to allocate irq domain!\n");
426 return; 425 return;
427 } 426 }
428 427
diff --git a/arch/powerpc/sysdev/uic.c b/arch/powerpc/sysdev/uic.c
index 063c901b1265..92033936a8f7 100644
--- a/arch/powerpc/sysdev/uic.c
+++ b/arch/powerpc/sysdev/uic.c
@@ -49,7 +49,7 @@ struct uic {
49 raw_spinlock_t lock; 49 raw_spinlock_t lock;
50 50
51 /* The remapper for this UIC */ 51 /* The remapper for this UIC */
52 struct irq_host *irqhost; 52 struct irq_domain *irqhost;
53}; 53};
54 54
55static void uic_unmask_irq(struct irq_data *d) 55static void uic_unmask_irq(struct irq_data *d)
@@ -174,7 +174,7 @@ static struct irq_chip uic_irq_chip = {
174 .irq_set_type = uic_set_irq_type, 174 .irq_set_type = uic_set_irq_type,
175}; 175};
176 176
177static int uic_host_map(struct irq_host *h, unsigned int virq, 177static int uic_host_map(struct irq_domain *h, unsigned int virq,
178 irq_hw_number_t hw) 178 irq_hw_number_t hw)
179{ 179{
180 struct uic *uic = h->host_data; 180 struct uic *uic = h->host_data;
@@ -190,21 +190,9 @@ static int uic_host_map(struct irq_host *h, unsigned int virq,
190 return 0; 190 return 0;
191} 191}
192 192
193static int uic_host_xlate(struct irq_host *h, struct device_node *ct, 193static struct irq_domain_ops uic_host_ops = {
194 const u32 *intspec, unsigned int intsize,
195 irq_hw_number_t *out_hwirq, unsigned int *out_type)
196
197{
198 /* UIC intspecs must have 2 cells */
199 BUG_ON(intsize != 2);
200 *out_hwirq = intspec[0];
201 *out_type = intspec[1];
202 return 0;
203}
204
205static struct irq_host_ops uic_host_ops = {
206 .map = uic_host_map, 194 .map = uic_host_map,
207 .xlate = uic_host_xlate, 195 .xlate = irq_domain_xlate_twocell,
208}; 196};
209 197
210void uic_irq_cascade(unsigned int virq, struct irq_desc *desc) 198void uic_irq_cascade(unsigned int virq, struct irq_desc *desc)
@@ -270,13 +258,11 @@ static struct uic * __init uic_init_one(struct device_node *node)
270 } 258 }
271 uic->dcrbase = *dcrreg; 259 uic->dcrbase = *dcrreg;
272 260
273 uic->irqhost = irq_alloc_host(node, IRQ_HOST_MAP_LINEAR, 261 uic->irqhost = irq_domain_add_linear(node, NR_UIC_INTS, &uic_host_ops,
274 NR_UIC_INTS, &uic_host_ops, -1); 262 uic);
275 if (! uic->irqhost) 263 if (! uic->irqhost)
276 return NULL; /* FIXME: panic? */ 264 return NULL; /* FIXME: panic? */
277 265
278 uic->irqhost->host_data = uic;
279
280 /* Start with all interrupts disabled, level and non-critical */ 266 /* Start with all interrupts disabled, level and non-critical */
281 mtdcr(uic->dcrbase + UIC_ER, 0); 267 mtdcr(uic->dcrbase + UIC_ER, 0);
282 mtdcr(uic->dcrbase + UIC_CR, 0); 268 mtdcr(uic->dcrbase + UIC_CR, 0);
diff --git a/arch/powerpc/sysdev/xics/xics-common.c b/arch/powerpc/sysdev/xics/xics-common.c
index d72eda6a4c05..ea5e204e3450 100644
--- a/arch/powerpc/sysdev/xics/xics-common.c
+++ b/arch/powerpc/sysdev/xics/xics-common.c
@@ -40,7 +40,7 @@ unsigned int xics_interrupt_server_size = 8;
40 40
41DEFINE_PER_CPU(struct xics_cppr, xics_cppr); 41DEFINE_PER_CPU(struct xics_cppr, xics_cppr);
42 42
43struct irq_host *xics_host; 43struct irq_domain *xics_host;
44 44
45static LIST_HEAD(ics_list); 45static LIST_HEAD(ics_list);
46 46
@@ -212,16 +212,16 @@ void xics_migrate_irqs_away(void)
212 /* We can't set affinity on ISA interrupts */ 212 /* We can't set affinity on ISA interrupts */
213 if (virq < NUM_ISA_INTERRUPTS) 213 if (virq < NUM_ISA_INTERRUPTS)
214 continue; 214 continue;
215 if (!virq_is_host(virq, xics_host))
216 continue;
217 irq = (unsigned int)virq_to_hw(virq);
218 /* We need to get IPIs still. */
219 if (irq == XICS_IPI || irq == XICS_IRQ_SPURIOUS)
220 continue;
221 desc = irq_to_desc(virq); 215 desc = irq_to_desc(virq);
222 /* We only need to migrate enabled IRQS */ 216 /* We only need to migrate enabled IRQS */
223 if (!desc || !desc->action) 217 if (!desc || !desc->action)
224 continue; 218 continue;
219 if (desc->irq_data.domain != xics_host)
220 continue;
221 irq = desc->irq_data.hwirq;
222 /* We need to get IPIs still. */
223 if (irq == XICS_IPI || irq == XICS_IRQ_SPURIOUS)
224 continue;
225 chip = irq_desc_get_chip(desc); 225 chip = irq_desc_get_chip(desc);
226 if (!chip || !chip->irq_set_affinity) 226 if (!chip || !chip->irq_set_affinity)
227 continue; 227 continue;
@@ -301,7 +301,7 @@ int xics_get_irq_server(unsigned int virq, const struct cpumask *cpumask,
301} 301}
302#endif /* CONFIG_SMP */ 302#endif /* CONFIG_SMP */
303 303
304static int xics_host_match(struct irq_host *h, struct device_node *node) 304static int xics_host_match(struct irq_domain *h, struct device_node *node)
305{ 305{
306 struct ics *ics; 306 struct ics *ics;
307 307
@@ -323,7 +323,7 @@ static struct irq_chip xics_ipi_chip = {
323 .irq_unmask = xics_ipi_unmask, 323 .irq_unmask = xics_ipi_unmask,
324}; 324};
325 325
326static int xics_host_map(struct irq_host *h, unsigned int virq, 326static int xics_host_map(struct irq_domain *h, unsigned int virq,
327 irq_hw_number_t hw) 327 irq_hw_number_t hw)
328{ 328{
329 struct ics *ics; 329 struct ics *ics;
@@ -351,7 +351,7 @@ static int xics_host_map(struct irq_host *h, unsigned int virq,
351 return -EINVAL; 351 return -EINVAL;
352} 352}
353 353
354static int xics_host_xlate(struct irq_host *h, struct device_node *ct, 354static int xics_host_xlate(struct irq_domain *h, struct device_node *ct,
355 const u32 *intspec, unsigned int intsize, 355 const u32 *intspec, unsigned int intsize,
356 irq_hw_number_t *out_hwirq, unsigned int *out_flags) 356 irq_hw_number_t *out_hwirq, unsigned int *out_flags)
357 357
@@ -366,7 +366,7 @@ static int xics_host_xlate(struct irq_host *h, struct device_node *ct,
366 return 0; 366 return 0;
367} 367}
368 368
369static struct irq_host_ops xics_host_ops = { 369static struct irq_domain_ops xics_host_ops = {
370 .match = xics_host_match, 370 .match = xics_host_match,
371 .map = xics_host_map, 371 .map = xics_host_map,
372 .xlate = xics_host_xlate, 372 .xlate = xics_host_xlate,
@@ -374,8 +374,7 @@ static struct irq_host_ops xics_host_ops = {
374 374
375static void __init xics_init_host(void) 375static void __init xics_init_host(void)
376{ 376{
377 xics_host = irq_alloc_host(NULL, IRQ_HOST_MAP_TREE, 0, &xics_host_ops, 377 xics_host = irq_domain_add_tree(NULL, &xics_host_ops, NULL);
378 XICS_IRQ_SPURIOUS);
379 BUG_ON(xics_host == NULL); 378 BUG_ON(xics_host == NULL);
380 irq_set_default_host(xics_host); 379 irq_set_default_host(xics_host);
381} 380}
diff --git a/arch/powerpc/sysdev/xilinx_intc.c b/arch/powerpc/sysdev/xilinx_intc.c
index 6183799754af..8d73c3c0bee6 100644
--- a/arch/powerpc/sysdev/xilinx_intc.c
+++ b/arch/powerpc/sysdev/xilinx_intc.c
@@ -40,7 +40,7 @@
40#define XINTC_IVR 24 /* Interrupt Vector */ 40#define XINTC_IVR 24 /* Interrupt Vector */
41#define XINTC_MER 28 /* Master Enable */ 41#define XINTC_MER 28 /* Master Enable */
42 42
43static struct irq_host *master_irqhost; 43static struct irq_domain *master_irqhost;
44 44
45#define XILINX_INTC_MAXIRQS (32) 45#define XILINX_INTC_MAXIRQS (32)
46 46
@@ -141,7 +141,7 @@ static struct irq_chip xilinx_intc_edge_irqchip = {
141/** 141/**
142 * xilinx_intc_xlate - translate virq# from device tree interrupts property 142 * xilinx_intc_xlate - translate virq# from device tree interrupts property
143 */ 143 */
144static int xilinx_intc_xlate(struct irq_host *h, struct device_node *ct, 144static int xilinx_intc_xlate(struct irq_domain *h, struct device_node *ct,
145 const u32 *intspec, unsigned int intsize, 145 const u32 *intspec, unsigned int intsize,
146 irq_hw_number_t *out_hwirq, 146 irq_hw_number_t *out_hwirq,
147 unsigned int *out_flags) 147 unsigned int *out_flags)
@@ -161,7 +161,7 @@ static int xilinx_intc_xlate(struct irq_host *h, struct device_node *ct,
161 161
162 return 0; 162 return 0;
163} 163}
164static int xilinx_intc_map(struct irq_host *h, unsigned int virq, 164static int xilinx_intc_map(struct irq_domain *h, unsigned int virq,
165 irq_hw_number_t irq) 165 irq_hw_number_t irq)
166{ 166{
167 irq_set_chip_data(virq, h->host_data); 167 irq_set_chip_data(virq, h->host_data);
@@ -177,15 +177,15 @@ static int xilinx_intc_map(struct irq_host *h, unsigned int virq,
177 return 0; 177 return 0;
178} 178}
179 179
180static struct irq_host_ops xilinx_intc_ops = { 180static struct irq_domain_ops xilinx_intc_ops = {
181 .map = xilinx_intc_map, 181 .map = xilinx_intc_map,
182 .xlate = xilinx_intc_xlate, 182 .xlate = xilinx_intc_xlate,
183}; 183};
184 184
185struct irq_host * __init 185struct irq_domain * __init
186xilinx_intc_init(struct device_node *np) 186xilinx_intc_init(struct device_node *np)
187{ 187{
188 struct irq_host * irq; 188 struct irq_domain * irq;
189 void * regs; 189 void * regs;
190 190
191 /* Find and map the intc registers */ 191 /* Find and map the intc registers */
@@ -200,12 +200,11 @@ xilinx_intc_init(struct device_node *np)
200 out_be32(regs + XINTC_IAR, ~(u32) 0); /* Acknowledge pending irqs */ 200 out_be32(regs + XINTC_IAR, ~(u32) 0); /* Acknowledge pending irqs */
201 out_be32(regs + XINTC_MER, 0x3UL); /* Turn on the Master Enable. */ 201 out_be32(regs + XINTC_MER, 0x3UL); /* Turn on the Master Enable. */
202 202
203 /* Allocate and initialize an irq_host structure. */ 203 /* Allocate and initialize an irq_domain structure. */
204 irq = irq_alloc_host(np, IRQ_HOST_MAP_LINEAR, XILINX_INTC_MAXIRQS, 204 irq = irq_domain_add_linear(np, XILINX_INTC_MAXIRQS, &xilinx_intc_ops,
205 &xilinx_intc_ops, -1); 205 regs);
206 if (!irq) 206 if (!irq)
207 panic(__FILE__ ": Cannot allocate IRQ host\n"); 207 panic(__FILE__ ": Cannot allocate IRQ host\n");
208 irq->host_data = regs;
209 208
210 return irq; 209 return irq;
211} 210}
diff --git a/arch/sparc/include/asm/prom.h b/arch/sparc/include/asm/prom.h
index edd3d3cde460..c28765110706 100644
--- a/arch/sparc/include/asm/prom.h
+++ b/arch/sparc/include/asm/prom.h
@@ -22,6 +22,7 @@
22#include <linux/proc_fs.h> 22#include <linux/proc_fs.h>
23#include <linux/mutex.h> 23#include <linux/mutex.h>
24#include <linux/atomic.h> 24#include <linux/atomic.h>
25#include <linux/irqdomain.h>
25 26
26#define OF_ROOT_NODE_ADDR_CELLS_DEFAULT 2 27#define OF_ROOT_NODE_ADDR_CELLS_DEFAULT 2
27#define OF_ROOT_NODE_SIZE_CELLS_DEFAULT 1 28#define OF_ROOT_NODE_SIZE_CELLS_DEFAULT 1
@@ -55,15 +56,6 @@ struct resource;
55extern void __iomem *of_ioremap(struct resource *res, unsigned long offset, unsigned long size, char *name); 56extern void __iomem *of_ioremap(struct resource *res, unsigned long offset, unsigned long size, char *name);
56extern void of_iounmap(struct resource *res, void __iomem *base, unsigned long size); 57extern void of_iounmap(struct resource *res, void __iomem *base, unsigned long size);
57 58
58/* These routines are here to provide compatibility with how powerpc
59 * handles IRQ mapping for OF device nodes. We precompute and permanently
60 * register them in the platform_device objects, whereas powerpc computes them
61 * on request.
62 */
63static inline void irq_dispose_mapping(unsigned int virq)
64{
65}
66
67extern struct device_node *of_console_device; 59extern struct device_node *of_console_device;
68extern char *of_console_path; 60extern char *of_console_path;
69extern char *of_console_options; 61extern char *of_console_options;
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 5bed94e189fa..e0829a6a4660 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -398,6 +398,7 @@ config X86_INTEL_CE
398 select X86_REBOOTFIXUPS 398 select X86_REBOOTFIXUPS
399 select OF 399 select OF
400 select OF_EARLY_FLATTREE 400 select OF_EARLY_FLATTREE
401 select IRQ_DOMAIN
401 ---help--- 402 ---help---
402 Select for the Intel CE media processor (CE4100) SOC. 403 Select for the Intel CE media processor (CE4100) SOC.
403 This option compiles in support for the CE4100 SOC for settop 404 This option compiles in support for the CE4100 SOC for settop
@@ -2076,6 +2077,7 @@ config OLPC
2076 select GPIOLIB 2077 select GPIOLIB
2077 select OF 2078 select OF
2078 select OF_PROMTREE 2079 select OF_PROMTREE
2080 select IRQ_DOMAIN
2079 ---help--- 2081 ---help---
2080 Add support for detecting the unique features of the OLPC 2082 Add support for detecting the unique features of the OLPC
2081 XO hardware. 2083 XO hardware.
diff --git a/arch/x86/include/asm/irq_controller.h b/arch/x86/include/asm/irq_controller.h
deleted file mode 100644
index 423bbbddf36d..000000000000
--- a/arch/x86/include/asm/irq_controller.h
+++ /dev/null
@@ -1,12 +0,0 @@
1#ifndef __IRQ_CONTROLLER__
2#define __IRQ_CONTROLLER__
3
4struct irq_domain {
5 int (*xlate)(struct irq_domain *h, const u32 *intspec, u32 intsize,
6 u32 *out_hwirq, u32 *out_type);
7 void *priv;
8 struct device_node *controller;
9 struct list_head l;
10};
11
12#endif
diff --git a/arch/x86/include/asm/prom.h b/arch/x86/include/asm/prom.h
index 644dd885f05a..60bef663609a 100644
--- a/arch/x86/include/asm/prom.h
+++ b/arch/x86/include/asm/prom.h
@@ -21,7 +21,6 @@
21#include <asm/irq.h> 21#include <asm/irq.h>
22#include <linux/atomic.h> 22#include <linux/atomic.h>
23#include <asm/setup.h> 23#include <asm/setup.h>
24#include <asm/irq_controller.h>
25 24
26#ifdef CONFIG_OF 25#ifdef CONFIG_OF
27extern int of_ioapic; 26extern int of_ioapic;
@@ -43,15 +42,6 @@ extern char cmd_line[COMMAND_LINE_SIZE];
43#define pci_address_to_pio pci_address_to_pio 42#define pci_address_to_pio pci_address_to_pio
44unsigned long pci_address_to_pio(phys_addr_t addr); 43unsigned long pci_address_to_pio(phys_addr_t addr);
45 44
46/**
47 * irq_dispose_mapping - Unmap an interrupt
48 * @virq: linux virq number of the interrupt to unmap
49 *
50 * FIXME: We really should implement proper virq handling like power,
51 * but that's going to be major surgery.
52 */
53static inline void irq_dispose_mapping(unsigned int virq) { }
54
55#define HAVE_ARCH_DEVTREE_FIXUPS 45#define HAVE_ARCH_DEVTREE_FIXUPS
56 46
57#endif /* __ASSEMBLY__ */ 47#endif /* __ASSEMBLY__ */
diff --git a/arch/x86/kernel/devicetree.c b/arch/x86/kernel/devicetree.c
index 52821799a702..3ae2ced4a874 100644
--- a/arch/x86/kernel/devicetree.c
+++ b/arch/x86/kernel/devicetree.c
@@ -4,6 +4,7 @@
4#include <linux/bootmem.h> 4#include <linux/bootmem.h>
5#include <linux/export.h> 5#include <linux/export.h>
6#include <linux/io.h> 6#include <linux/io.h>
7#include <linux/irqdomain.h>
7#include <linux/interrupt.h> 8#include <linux/interrupt.h>
8#include <linux/list.h> 9#include <linux/list.h>
9#include <linux/of.h> 10#include <linux/of.h>
@@ -17,64 +18,14 @@
17#include <linux/initrd.h> 18#include <linux/initrd.h>
18 19
19#include <asm/hpet.h> 20#include <asm/hpet.h>
20#include <asm/irq_controller.h>
21#include <asm/apic.h> 21#include <asm/apic.h>
22#include <asm/pci_x86.h> 22#include <asm/pci_x86.h>
23 23
24__initdata u64 initial_dtb; 24__initdata u64 initial_dtb;
25char __initdata cmd_line[COMMAND_LINE_SIZE]; 25char __initdata cmd_line[COMMAND_LINE_SIZE];
26static LIST_HEAD(irq_domains);
27static DEFINE_RAW_SPINLOCK(big_irq_lock);
28 26
29int __initdata of_ioapic; 27int __initdata of_ioapic;
30 28
31#ifdef CONFIG_X86_IO_APIC
32static void add_interrupt_host(struct irq_domain *ih)
33{
34 unsigned long flags;
35
36 raw_spin_lock_irqsave(&big_irq_lock, flags);
37 list_add(&ih->l, &irq_domains);
38 raw_spin_unlock_irqrestore(&big_irq_lock, flags);
39}
40#endif
41
42static struct irq_domain *get_ih_from_node(struct device_node *controller)
43{
44 struct irq_domain *ih, *found = NULL;
45 unsigned long flags;
46
47 raw_spin_lock_irqsave(&big_irq_lock, flags);
48 list_for_each_entry(ih, &irq_domains, l) {
49 if (ih->controller == controller) {
50 found = ih;
51 break;
52 }
53 }
54 raw_spin_unlock_irqrestore(&big_irq_lock, flags);
55 return found;
56}
57
58unsigned int irq_create_of_mapping(struct device_node *controller,
59 const u32 *intspec, unsigned int intsize)
60{
61 struct irq_domain *ih;
62 u32 virq, type;
63 int ret;
64
65 ih = get_ih_from_node(controller);
66 if (!ih)
67 return 0;
68 ret = ih->xlate(ih, intspec, intsize, &virq, &type);
69 if (ret)
70 return 0;
71 if (type == IRQ_TYPE_NONE)
72 return virq;
73 irq_set_irq_type(virq, type);
74 return virq;
75}
76EXPORT_SYMBOL_GPL(irq_create_of_mapping);
77
78unsigned long pci_address_to_pio(phys_addr_t address) 29unsigned long pci_address_to_pio(phys_addr_t address)
79{ 30{
80 /* 31 /*
@@ -354,36 +305,43 @@ static struct of_ioapic_type of_ioapic_type[] =
354 }, 305 },
355}; 306};
356 307
357static int ioapic_xlate(struct irq_domain *id, const u32 *intspec, u32 intsize, 308static int ioapic_xlate(struct irq_domain *domain,
358 u32 *out_hwirq, u32 *out_type) 309 struct device_node *controller,
310 const u32 *intspec, u32 intsize,
311 irq_hw_number_t *out_hwirq, u32 *out_type)
359{ 312{
360 struct mp_ioapic_gsi *gsi_cfg;
361 struct io_apic_irq_attr attr; 313 struct io_apic_irq_attr attr;
362 struct of_ioapic_type *it; 314 struct of_ioapic_type *it;
363 u32 line, idx, type; 315 u32 line, idx;
316 int rc;
364 317
365 if (intsize < 2) 318 if (WARN_ON(intsize < 2))
366 return -EINVAL; 319 return -EINVAL;
367 320
368 line = *intspec; 321 line = intspec[0];
369 idx = (u32) id->priv;
370 gsi_cfg = mp_ioapic_gsi_routing(idx);
371 *out_hwirq = line + gsi_cfg->gsi_base;
372
373 intspec++;
374 type = *intspec;
375 322
376 if (type >= ARRAY_SIZE(of_ioapic_type)) 323 if (intspec[1] >= ARRAY_SIZE(of_ioapic_type))
377 return -EINVAL; 324 return -EINVAL;
378 325
379 it = of_ioapic_type + type; 326 it = &of_ioapic_type[intspec[1]];
380 *out_type = it->out_type;
381 327
328 idx = (u32) domain->host_data;
382 set_io_apic_irq_attr(&attr, idx, line, it->trigger, it->polarity); 329 set_io_apic_irq_attr(&attr, idx, line, it->trigger, it->polarity);
383 330
384 return io_apic_setup_irq_pin_once(*out_hwirq, cpu_to_node(0), &attr); 331 rc = io_apic_setup_irq_pin_once(irq_find_mapping(domain, line),
332 cpu_to_node(0), &attr);
333 if (rc)
334 return rc;
335
336 *out_hwirq = line;
337 *out_type = it->out_type;
338 return 0;
385} 339}
386 340
341const struct irq_domain_ops ioapic_irq_domain_ops = {
342 .xlate = ioapic_xlate,
343};
344
387static void __init ioapic_add_ofnode(struct device_node *np) 345static void __init ioapic_add_ofnode(struct device_node *np)
388{ 346{
389 struct resource r; 347 struct resource r;
@@ -399,13 +357,14 @@ static void __init ioapic_add_ofnode(struct device_node *np)
399 for (i = 0; i < nr_ioapics; i++) { 357 for (i = 0; i < nr_ioapics; i++) {
400 if (r.start == mpc_ioapic_addr(i)) { 358 if (r.start == mpc_ioapic_addr(i)) {
401 struct irq_domain *id; 359 struct irq_domain *id;
360 struct mp_ioapic_gsi *gsi_cfg;
361
362 gsi_cfg = mp_ioapic_gsi_routing(i);
402 363
403 id = kzalloc(sizeof(*id), GFP_KERNEL); 364 id = irq_domain_add_legacy(np, 32, gsi_cfg->gsi_base, 0,
365 &ioapic_irq_domain_ops,
366 (void*)i);
404 BUG_ON(!id); 367 BUG_ON(!id);
405 id->controller = np;
406 id->xlate = ioapic_xlate;
407 id->priv = (void *)i;
408 add_interrupt_host(id);
409 return; 368 return;
410 } 369 }
411 } 370 }
diff --git a/drivers/gpio/gpio-mpc8xxx.c b/drivers/gpio/gpio-mpc8xxx.c
index 5cd04b65c556..e6568c19c939 100644
--- a/drivers/gpio/gpio-mpc8xxx.c
+++ b/drivers/gpio/gpio-mpc8xxx.c
@@ -37,7 +37,7 @@ struct mpc8xxx_gpio_chip {
37 * open drain mode safely 37 * open drain mode safely
38 */ 38 */
39 u32 data; 39 u32 data;
40 struct irq_host *irq; 40 struct irq_domain *irq;
41 void *of_dev_id_data; 41 void *of_dev_id_data;
42}; 42};
43 43
@@ -281,7 +281,7 @@ static struct irq_chip mpc8xxx_irq_chip = {
281 .irq_set_type = mpc8xxx_irq_set_type, 281 .irq_set_type = mpc8xxx_irq_set_type,
282}; 282};
283 283
284static int mpc8xxx_gpio_irq_map(struct irq_host *h, unsigned int virq, 284static int mpc8xxx_gpio_irq_map(struct irq_domain *h, unsigned int virq,
285 irq_hw_number_t hw) 285 irq_hw_number_t hw)
286{ 286{
287 struct mpc8xxx_gpio_chip *mpc8xxx_gc = h->host_data; 287 struct mpc8xxx_gpio_chip *mpc8xxx_gc = h->host_data;
@@ -296,24 +296,9 @@ static int mpc8xxx_gpio_irq_map(struct irq_host *h, unsigned int virq,
296 return 0; 296 return 0;
297} 297}
298 298
299static int mpc8xxx_gpio_irq_xlate(struct irq_host *h, struct device_node *ct, 299static struct irq_domain_ops mpc8xxx_gpio_irq_ops = {
300 const u32 *intspec, unsigned int intsize,
301 irq_hw_number_t *out_hwirq,
302 unsigned int *out_flags)
303
304{
305 /* interrupt sense values coming from the device tree equal either
306 * EDGE_FALLING or EDGE_BOTH
307 */
308 *out_hwirq = intspec[0];
309 *out_flags = intspec[1];
310
311 return 0;
312}
313
314static struct irq_host_ops mpc8xxx_gpio_irq_ops = {
315 .map = mpc8xxx_gpio_irq_map, 300 .map = mpc8xxx_gpio_irq_map,
316 .xlate = mpc8xxx_gpio_irq_xlate, 301 .xlate = irq_domain_xlate_twocell,
317}; 302};
318 303
319static struct of_device_id mpc8xxx_gpio_ids[] __initdata = { 304static struct of_device_id mpc8xxx_gpio_ids[] __initdata = {
@@ -364,9 +349,8 @@ static void __init mpc8xxx_add_controller(struct device_node *np)
364 if (hwirq == NO_IRQ) 349 if (hwirq == NO_IRQ)
365 goto skip_irq; 350 goto skip_irq;
366 351
367 mpc8xxx_gc->irq = 352 mpc8xxx_gc->irq = irq_domain_add_linear(np, MPC8XXX_GPIO_PINS,
368 irq_alloc_host(np, IRQ_HOST_MAP_LINEAR, MPC8XXX_GPIO_PINS, 353 &mpc8xxx_gpio_irq_ops, mpc8xxx_gc);
369 &mpc8xxx_gpio_irq_ops, MPC8XXX_GPIO_PINS);
370 if (!mpc8xxx_gc->irq) 354 if (!mpc8xxx_gc->irq)
371 goto skip_irq; 355 goto skip_irq;
372 356
@@ -374,8 +358,6 @@ static void __init mpc8xxx_add_controller(struct device_node *np)
374 if (id) 358 if (id)
375 mpc8xxx_gc->of_dev_id_data = id->data; 359 mpc8xxx_gc->of_dev_id_data = id->data;
376 360
377 mpc8xxx_gc->irq->host_data = mpc8xxx_gc;
378
379 /* ack and mask all irqs */ 361 /* ack and mask all irqs */
380 out_be32(mm_gc->regs + GPIO_IER, 0xffffffff); 362 out_be32(mm_gc->regs + GPIO_IER, 0xffffffff);
381 out_be32(mm_gc->regs + GPIO_IMR, 0); 363 out_be32(mm_gc->regs + GPIO_IMR, 0);
diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig
index f147395bac9a..1489c3540f96 100644
--- a/drivers/mfd/Kconfig
+++ b/drivers/mfd/Kconfig
@@ -201,6 +201,7 @@ config MENELAUS
201config TWL4030_CORE 201config TWL4030_CORE
202 bool "Texas Instruments TWL4030/TWL5030/TWL6030/TPS659x0 Support" 202 bool "Texas Instruments TWL4030/TWL5030/TWL6030/TPS659x0 Support"
203 depends on I2C=y && GENERIC_HARDIRQS 203 depends on I2C=y && GENERIC_HARDIRQS
204 select IRQ_DOMAIN
204 help 205 help
205 Say yes here if you have TWL4030 / TWL6030 family chip on your board. 206 Say yes here if you have TWL4030 / TWL6030 family chip on your board.
206 This core driver provides register access and IRQ handling 207 This core driver provides register access and IRQ handling
diff --git a/drivers/mfd/twl-core.c b/drivers/mfd/twl-core.c
index 8ce3959c6919..4970d43952db 100644
--- a/drivers/mfd/twl-core.c
+++ b/drivers/mfd/twl-core.c
@@ -149,7 +149,7 @@
149 149
150#define TWL_MODULE_LAST TWL4030_MODULE_LAST 150#define TWL_MODULE_LAST TWL4030_MODULE_LAST
151 151
152#define TWL4030_NR_IRQS 8 152#define TWL4030_NR_IRQS 34 /* core:8, power:8, gpio: 18 */
153#define TWL6030_NR_IRQS 20 153#define TWL6030_NR_IRQS 20
154 154
155/* Base Address defns for twl4030_map[] */ 155/* Base Address defns for twl4030_map[] */
@@ -263,10 +263,6 @@ struct twl_client {
263 263
264static struct twl_client twl_modules[TWL_NUM_SLAVES]; 264static struct twl_client twl_modules[TWL_NUM_SLAVES];
265 265
266#ifdef CONFIG_IRQ_DOMAIN
267static struct irq_domain domain;
268#endif
269
270/* mapping the module id to slave id and base address */ 266/* mapping the module id to slave id and base address */
271struct twl_mapping { 267struct twl_mapping {
272 unsigned char sid; /* Slave ID */ 268 unsigned char sid; /* Slave ID */
@@ -1227,14 +1223,8 @@ twl_probe(struct i2c_client *client, const struct i2c_device_id *id)
1227 1223
1228 pdata->irq_base = status; 1224 pdata->irq_base = status;
1229 pdata->irq_end = pdata->irq_base + nr_irqs; 1225 pdata->irq_end = pdata->irq_base + nr_irqs;
1230 1226 irq_domain_add_legacy(node, nr_irqs, pdata->irq_base, 0,
1231#ifdef CONFIG_IRQ_DOMAIN 1227 &irq_domain_simple_ops, NULL);
1232 domain.irq_base = pdata->irq_base;
1233 domain.nr_irq = nr_irqs;
1234 domain.of_node = of_node_get(node);
1235 domain.ops = &irq_domain_simple_ops;
1236 irq_domain_add(&domain);
1237#endif
1238 1228
1239 if (i2c_check_functionality(client->adapter, I2C_FUNC_I2C) == 0) { 1229 if (i2c_check_functionality(client->adapter, I2C_FUNC_I2C) == 0) {
1240 dev_dbg(&client->dev, "can't talk I2C?\n"); 1230 dev_dbg(&client->dev, "can't talk I2C?\n");
@@ -1315,11 +1305,10 @@ twl_probe(struct i2c_client *client, const struct i2c_device_id *id)
1315 twl_i2c_write_u8(TWL4030_MODULE_INTBR, temp, REG_GPPUPDCTR1); 1305 twl_i2c_write_u8(TWL4030_MODULE_INTBR, temp, REG_GPPUPDCTR1);
1316 } 1306 }
1317 1307
1318#ifdef CONFIG_OF_DEVICE 1308 status = -ENODEV;
1319 if (node) 1309 if (node)
1320 status = of_platform_populate(node, NULL, NULL, &client->dev); 1310 status = of_platform_populate(node, NULL, NULL, &client->dev);
1321 else 1311 if (status)
1322#endif
1323 status = add_children(pdata, id->driver_data); 1312 status = add_children(pdata, id->driver_data);
1324 1313
1325fail: 1314fail:
diff --git a/drivers/net/phy/mdio-gpio.c b/drivers/net/phy/mdio-gpio.c
index 50e8e5e74465..7189adf54bd1 100644
--- a/drivers/net/phy/mdio-gpio.c
+++ b/drivers/net/phy/mdio-gpio.c
@@ -255,13 +255,13 @@ static inline int __init mdio_ofgpio_init(void)
255 return platform_driver_register(&mdio_ofgpio_driver); 255 return platform_driver_register(&mdio_ofgpio_driver);
256} 256}
257 257
258static inline void __exit mdio_ofgpio_exit(void) 258static inline void mdio_ofgpio_exit(void)
259{ 259{
260 platform_driver_unregister(&mdio_ofgpio_driver); 260 platform_driver_unregister(&mdio_ofgpio_driver);
261} 261}
262#else 262#else
263static inline int __init mdio_ofgpio_init(void) { return 0; } 263static inline int __init mdio_ofgpio_init(void) { return 0; }
264static inline void __exit mdio_ofgpio_exit(void) { } 264static inline void mdio_ofgpio_exit(void) { }
265#endif /* CONFIG_OF_GPIO */ 265#endif /* CONFIG_OF_GPIO */
266 266
267static struct platform_driver mdio_gpio_driver = { 267static struct platform_driver mdio_gpio_driver = {
diff --git a/drivers/of/platform.c b/drivers/of/platform.c
index 63b3ec48c203..20fbebd49db3 100644
--- a/drivers/of/platform.c
+++ b/drivers/of/platform.c
@@ -55,7 +55,7 @@ EXPORT_SYMBOL(of_find_device_by_node);
55#include <asm/dcr.h> 55#include <asm/dcr.h>
56#endif 56#endif
57 57
58#if !defined(CONFIG_SPARC) 58#ifdef CONFIG_OF_ADDRESS
59/* 59/*
60 * The following routines scan a subtree and registers a device for 60 * The following routines scan a subtree and registers a device for
61 * each applicable node. 61 * each applicable node.
@@ -462,4 +462,4 @@ int of_platform_populate(struct device_node *root,
462 of_node_put(root); 462 of_node_put(root);
463 return rc; 463 return rc;
464} 464}
465#endif /* !CONFIG_SPARC */ 465#endif /* CONFIG_OF_ADDRESS */
diff --git a/include/linux/irqdomain.h b/include/linux/irqdomain.h
index bd4272b61a14..ead4a4215797 100644
--- a/include/linux/irqdomain.h
+++ b/include/linux/irqdomain.h
@@ -9,99 +9,182 @@
9 * representation into a hardware irq number that can be mapped back to a 9 * representation into a hardware irq number that can be mapped back to a
10 * Linux irq number without any extra platform support code. 10 * Linux irq number without any extra platform support code.
11 * 11 *
12 * irq_domain is expected to be embedded in an interrupt controller's private 12 * Interrupt controller "domain" data structure. This could be defined as a
13 * data structure. 13 * irq domain controller. That is, it handles the mapping between hardware
14 * and virtual interrupt numbers for a given interrupt domain. The domain
15 * structure is generally created by the PIC code for a given PIC instance
16 * (though a domain can cover more than one PIC if they have a flat number
17 * model). It's the domain callbacks that are responsible for setting the
18 * irq_chip on a given irq_desc after it's been mapped.
19 *
20 * The host code and data structures are agnostic to whether or not
21 * we use an open firmware device-tree. We do have references to struct
22 * device_node in two places: in irq_find_host() to find the host matching
23 * a given interrupt controller node, and of course as an argument to its
24 * counterpart domain->ops->match() callback. However, those are treated as
25 * generic pointers by the core and the fact that it's actually a device-node
26 * pointer is purely a convention between callers and implementation. This
27 * code could thus be used on other architectures by replacing those two
28 * by some sort of arch-specific void * "token" used to identify interrupt
29 * controllers.
14 */ 30 */
31
15#ifndef _LINUX_IRQDOMAIN_H 32#ifndef _LINUX_IRQDOMAIN_H
16#define _LINUX_IRQDOMAIN_H 33#define _LINUX_IRQDOMAIN_H
17 34
18#include <linux/irq.h> 35#include <linux/types.h>
19#include <linux/mod_devicetable.h> 36#include <linux/radix-tree.h>
20 37
21#ifdef CONFIG_IRQ_DOMAIN
22struct device_node; 38struct device_node;
23struct irq_domain; 39struct irq_domain;
40struct of_device_id;
41
42/* Number of irqs reserved for a legacy isa controller */
43#define NUM_ISA_INTERRUPTS 16
44
45/* This type is the placeholder for a hardware interrupt number. It has to
46 * be big enough to enclose whatever representation is used by a given
47 * platform.
48 */
49typedef unsigned long irq_hw_number_t;
24 50
25/** 51/**
26 * struct irq_domain_ops - Methods for irq_domain objects 52 * struct irq_domain_ops - Methods for irq_domain objects
27 * @to_irq: (optional) given a local hardware irq number, return the linux 53 * @match: Match an interrupt controller device node to a host, returns
28 * irq number. If to_irq is not implemented, then the irq_domain 54 * 1 on a match
29 * will use this translation: irq = (domain->irq_base + hwirq) 55 * @map: Create or update a mapping between a virtual irq number and a hw
30 * @dt_translate: Given a device tree node and interrupt specifier, decode 56 * irq number. This is called only once for a given mapping.
31 * the hardware irq number and linux irq type value. 57 * @unmap: Dispose of such a mapping
58 * @xlate: Given a device tree node and interrupt specifier, decode
59 * the hardware irq number and linux irq type value.
60 *
61 * Functions below are provided by the driver and called whenever a new mapping
62 * is created or an old mapping is disposed. The driver can then proceed to
63 * whatever internal data structures management is required. It also needs
64 * to setup the irq_desc when returning from map().
32 */ 65 */
33struct irq_domain_ops { 66struct irq_domain_ops {
34 unsigned int (*to_irq)(struct irq_domain *d, unsigned long hwirq); 67 int (*match)(struct irq_domain *d, struct device_node *node);
35 68 int (*map)(struct irq_domain *d, unsigned int virq, irq_hw_number_t hw);
36#ifdef CONFIG_OF 69 void (*unmap)(struct irq_domain *d, unsigned int virq);
37 int (*dt_translate)(struct irq_domain *d, struct device_node *node, 70 int (*xlate)(struct irq_domain *d, struct device_node *node,
38 const u32 *intspec, unsigned int intsize, 71 const u32 *intspec, unsigned int intsize,
39 unsigned long *out_hwirq, unsigned int *out_type); 72 unsigned long *out_hwirq, unsigned int *out_type);
40#endif /* CONFIG_OF */
41}; 73};
42 74
43/** 75/**
44 * struct irq_domain - Hardware interrupt number translation object 76 * struct irq_domain - Hardware interrupt number translation object
45 * @list: Element in global irq_domain list. 77 * @link: Element in global irq_domain list.
78 * @revmap_type: Method used for reverse mapping hwirq numbers to linux irq. This
79 * will be one of the IRQ_DOMAIN_MAP_* values.
80 * @revmap_data: Revmap method specific data.
81 * @ops: pointer to irq_domain methods
82 * @host_data: private data pointer for use by owner. Not touched by irq_domain
83 * core code.
46 * @irq_base: Start of irq_desc range assigned to the irq_domain. The creator 84 * @irq_base: Start of irq_desc range assigned to the irq_domain. The creator
47 * of the irq_domain is responsible for allocating the array of 85 * of the irq_domain is responsible for allocating the array of
48 * irq_desc structures. 86 * irq_desc structures.
49 * @nr_irq: Number of irqs managed by the irq domain 87 * @nr_irq: Number of irqs managed by the irq domain
50 * @hwirq_base: Starting number for hwirqs managed by the irq domain 88 * @hwirq_base: Starting number for hwirqs managed by the irq domain
51 * @ops: pointer to irq_domain methods
52 * @priv: private data pointer for use by owner. Not touched by irq_domain
53 * core code.
54 * @of_node: (optional) Pointer to device tree nodes associated with the 89 * @of_node: (optional) Pointer to device tree nodes associated with the
55 * irq_domain. Used when decoding device tree interrupt specifiers. 90 * irq_domain. Used when decoding device tree interrupt specifiers.
56 */ 91 */
57struct irq_domain { 92struct irq_domain {
58 struct list_head list; 93 struct list_head link;
59 unsigned int irq_base; 94
60 unsigned int nr_irq; 95 /* type of reverse mapping_technique */
61 unsigned int hwirq_base; 96 unsigned int revmap_type;
97 union {
98 struct {
99 unsigned int size;
100 unsigned int first_irq;
101 irq_hw_number_t first_hwirq;
102 } legacy;
103 struct {
104 unsigned int size;
105 unsigned int *revmap;
106 } linear;
107 struct radix_tree_root tree;
108 } revmap_data;
62 const struct irq_domain_ops *ops; 109 const struct irq_domain_ops *ops;
63 void *priv; 110 void *host_data;
111 irq_hw_number_t inval_irq;
112
113 /* Optional device node pointer */
64 struct device_node *of_node; 114 struct device_node *of_node;
65}; 115};
66 116
67/** 117#ifdef CONFIG_IRQ_DOMAIN
68 * irq_domain_to_irq() - Translate from a hardware irq to a linux irq number 118struct irq_domain *irq_domain_add_legacy(struct device_node *of_node,
69 * 119 unsigned int size,
70 * Returns the linux irq number associated with a hardware irq. By default, 120 unsigned int first_irq,
71 * the mapping is irq == domain->irq_base + hwirq, but this mapping can 121 irq_hw_number_t first_hwirq,
72 * be overridden if the irq_domain implements a .to_irq() hook. 122 const struct irq_domain_ops *ops,
73 */ 123 void *host_data);
74static inline unsigned int irq_domain_to_irq(struct irq_domain *d, 124struct irq_domain *irq_domain_add_linear(struct device_node *of_node,
75 unsigned long hwirq) 125 unsigned int size,
126 const struct irq_domain_ops *ops,
127 void *host_data);
128struct irq_domain *irq_domain_add_nomap(struct device_node *of_node,
129 const struct irq_domain_ops *ops,
130 void *host_data);
131struct irq_domain *irq_domain_add_tree(struct device_node *of_node,
132 const struct irq_domain_ops *ops,
133 void *host_data);
134
135extern struct irq_domain *irq_find_host(struct device_node *node);
136extern void irq_set_default_host(struct irq_domain *host);
137extern void irq_set_virq_count(unsigned int count);
138
139static inline struct irq_domain *irq_domain_add_legacy_isa(
140 struct device_node *of_node,
141 const struct irq_domain_ops *ops,
142 void *host_data)
76{ 143{
77 if (d->ops->to_irq) 144 return irq_domain_add_legacy(of_node, NUM_ISA_INTERRUPTS, 0, 0, ops,
78 return d->ops->to_irq(d, hwirq); 145 host_data);
79 if (WARN_ON(hwirq < d->hwirq_base))
80 return 0;
81 return d->irq_base + hwirq - d->hwirq_base;
82} 146}
147extern struct irq_domain *irq_find_host(struct device_node *node);
148extern void irq_set_default_host(struct irq_domain *host);
149extern void irq_set_virq_count(unsigned int count);
83 150
84#define irq_domain_for_each_hwirq(d, hw) \
85 for (hw = d->hwirq_base; hw < d->hwirq_base + d->nr_irq; hw++)
86 151
87#define irq_domain_for_each_irq(d, hw, irq) \ 152extern unsigned int irq_create_mapping(struct irq_domain *host,
88 for (hw = d->hwirq_base, irq = irq_domain_to_irq(d, hw); \ 153 irq_hw_number_t hwirq);
89 hw < d->hwirq_base + d->nr_irq; \ 154extern void irq_dispose_mapping(unsigned int virq);
90 hw++, irq = irq_domain_to_irq(d, hw)) 155extern unsigned int irq_find_mapping(struct irq_domain *host,
156 irq_hw_number_t hwirq);
157extern unsigned int irq_create_direct_mapping(struct irq_domain *host);
158extern void irq_radix_revmap_insert(struct irq_domain *host, unsigned int virq,
159 irq_hw_number_t hwirq);
160extern unsigned int irq_radix_revmap_lookup(struct irq_domain *host,
161 irq_hw_number_t hwirq);
162extern unsigned int irq_linear_revmap(struct irq_domain *host,
163 irq_hw_number_t hwirq);
91 164
92extern void irq_domain_add(struct irq_domain *domain); 165extern const struct irq_domain_ops irq_domain_simple_ops;
93extern void irq_domain_del(struct irq_domain *domain);
94 166
95extern struct irq_domain_ops irq_domain_simple_ops; 167/* stock xlate functions */
96#endif /* CONFIG_IRQ_DOMAIN */ 168int irq_domain_xlate_onecell(struct irq_domain *d, struct device_node *ctrlr,
169 const u32 *intspec, unsigned int intsize,
170 irq_hw_number_t *out_hwirq, unsigned int *out_type);
171int irq_domain_xlate_twocell(struct irq_domain *d, struct device_node *ctrlr,
172 const u32 *intspec, unsigned int intsize,
173 irq_hw_number_t *out_hwirq, unsigned int *out_type);
174int irq_domain_xlate_onetwocell(struct irq_domain *d, struct device_node *ctrlr,
175 const u32 *intspec, unsigned int intsize,
176 irq_hw_number_t *out_hwirq, unsigned int *out_type);
97 177
98#if defined(CONFIG_IRQ_DOMAIN) && defined(CONFIG_OF_IRQ) 178#if defined(CONFIG_OF_IRQ)
99extern void irq_domain_add_simple(struct device_node *controller, int irq_base);
100extern void irq_domain_generate_simple(const struct of_device_id *match, 179extern void irq_domain_generate_simple(const struct of_device_id *match,
101 u64 phys_base, unsigned int irq_start); 180 u64 phys_base, unsigned int irq_start);
102#else /* CONFIG_IRQ_DOMAIN && CONFIG_OF_IRQ */ 181#else /* CONFIG_OF_IRQ */
103static inline void irq_domain_generate_simple(const struct of_device_id *match, 182static inline void irq_domain_generate_simple(const struct of_device_id *match,
104 u64 phys_base, unsigned int irq_start) { } 183 u64 phys_base, unsigned int irq_start) { }
105#endif /* CONFIG_IRQ_DOMAIN && CONFIG_OF_IRQ */ 184#endif /* !CONFIG_OF_IRQ */
185
186#else /* CONFIG_IRQ_DOMAIN */
187static inline void irq_dispose_mapping(unsigned int virq) { }
188#endif /* !CONFIG_IRQ_DOMAIN */
106 189
107#endif /* _LINUX_IRQDOMAIN_H */ 190#endif /* _LINUX_IRQDOMAIN_H */
diff --git a/include/linux/of_address.h b/include/linux/of_address.h
index 3118623c2c1f..01b925ad8d78 100644
--- a/include/linux/of_address.h
+++ b/include/linux/of_address.h
@@ -4,6 +4,7 @@
4#include <linux/errno.h> 4#include <linux/errno.h>
5#include <linux/of.h> 5#include <linux/of.h>
6 6
7#ifdef CONFIG_OF_ADDRESS
7extern u64 of_translate_address(struct device_node *np, const __be32 *addr); 8extern u64 of_translate_address(struct device_node *np, const __be32 *addr);
8extern int of_address_to_resource(struct device_node *dev, int index, 9extern int of_address_to_resource(struct device_node *dev, int index,
9 struct resource *r); 10 struct resource *r);
@@ -25,12 +26,37 @@ static inline unsigned long pci_address_to_pio(phys_addr_t addr) { return -1; }
25#define pci_address_to_pio pci_address_to_pio 26#define pci_address_to_pio pci_address_to_pio
26#endif 27#endif
27 28
28#ifdef CONFIG_PCI 29#else /* CONFIG_OF_ADDRESS */
30static inline int of_address_to_resource(struct device_node *dev, int index,
31 struct resource *r)
32{
33 return -EINVAL;
34}
35static inline struct device_node *of_find_matching_node_by_address(
36 struct device_node *from,
37 const struct of_device_id *matches,
38 u64 base_address)
39{
40 return NULL;
41}
42static inline void __iomem *of_iomap(struct device_node *device, int index)
43{
44 return NULL;
45}
46static inline const u32 *of_get_address(struct device_node *dev, int index,
47 u64 *size, unsigned int *flags)
48{
49 return NULL;
50}
51#endif /* CONFIG_OF_ADDRESS */
52
53
54#if defined(CONFIG_OF_ADDRESS) && defined(CONFIG_PCI)
29extern const __be32 *of_get_pci_address(struct device_node *dev, int bar_no, 55extern const __be32 *of_get_pci_address(struct device_node *dev, int bar_no,
30 u64 *size, unsigned int *flags); 56 u64 *size, unsigned int *flags);
31extern int of_pci_address_to_resource(struct device_node *dev, int bar, 57extern int of_pci_address_to_resource(struct device_node *dev, int bar,
32 struct resource *r); 58 struct resource *r);
33#else /* CONFIG_PCI */ 59#else /* CONFIG_OF_ADDRESS && CONFIG_PCI */
34static inline int of_pci_address_to_resource(struct device_node *dev, int bar, 60static inline int of_pci_address_to_resource(struct device_node *dev, int bar,
35 struct resource *r) 61 struct resource *r)
36{ 62{
@@ -42,8 +68,7 @@ static inline const __be32 *of_get_pci_address(struct device_node *dev,
42{ 68{
43 return NULL; 69 return NULL;
44} 70}
45#endif /* CONFIG_PCI */ 71#endif /* CONFIG_OF_ADDRESS && CONFIG_PCI */
46
47 72
48#endif /* __OF_ADDRESS_H */ 73#endif /* __OF_ADDRESS_H */
49 74
diff --git a/include/linux/of_irq.h b/include/linux/of_irq.h
index d0307eed20c9..d229ad3edee0 100644
--- a/include/linux/of_irq.h
+++ b/include/linux/of_irq.h
@@ -6,6 +6,7 @@ struct of_irq;
6#include <linux/types.h> 6#include <linux/types.h>
7#include <linux/errno.h> 7#include <linux/errno.h>
8#include <linux/irq.h> 8#include <linux/irq.h>
9#include <linux/irqdomain.h>
9#include <linux/ioport.h> 10#include <linux/ioport.h>
10#include <linux/of.h> 11#include <linux/of.h>
11 12
@@ -65,9 +66,6 @@ extern int of_irq_map_one(struct device_node *device, int index,
65extern unsigned int irq_create_of_mapping(struct device_node *controller, 66extern unsigned int irq_create_of_mapping(struct device_node *controller,
66 const u32 *intspec, 67 const u32 *intspec,
67 unsigned int intsize); 68 unsigned int intsize);
68#ifdef CONFIG_IRQ_DOMAIN
69extern void irq_dispose_mapping(unsigned int irq);
70#endif
71extern int of_irq_to_resource(struct device_node *dev, int index, 69extern int of_irq_to_resource(struct device_node *dev, int index,
72 struct resource *r); 70 struct resource *r);
73extern int of_irq_count(struct device_node *dev); 71extern int of_irq_count(struct device_node *dev);
diff --git a/include/linux/of_platform.h b/include/linux/of_platform.h
index 040ce2f6e8de..242fa3563e2e 100644
--- a/include/linux/of_platform.h
+++ b/include/linux/of_platform.h
@@ -81,7 +81,7 @@ extern struct platform_device *of_device_alloc(struct device_node *np,
81 struct device *parent); 81 struct device *parent);
82extern struct platform_device *of_find_device_by_node(struct device_node *np); 82extern struct platform_device *of_find_device_by_node(struct device_node *np);
83 83
84#if !defined(CONFIG_SPARC) /* SPARC has its own device registration method */ 84#ifdef CONFIG_OF_ADDRESS /* device reg helpers depend on OF_ADDRESS */
85/* Platform devices and busses creation */ 85/* Platform devices and busses creation */
86extern struct platform_device *of_platform_device_create(struct device_node *np, 86extern struct platform_device *of_platform_device_create(struct device_node *np,
87 const char *bus_id, 87 const char *bus_id,
@@ -94,7 +94,15 @@ extern int of_platform_populate(struct device_node *root,
94 const struct of_device_id *matches, 94 const struct of_device_id *matches,
95 const struct of_dev_auxdata *lookup, 95 const struct of_dev_auxdata *lookup,
96 struct device *parent); 96 struct device *parent);
97#endif /* !CONFIG_SPARC */ 97#else
98static inline int of_platform_populate(struct device_node *root,
99 const struct of_device_id *matches,
100 const struct of_dev_auxdata *lookup,
101 struct device *parent)
102{
103 return -ENODEV;
104}
105#endif /* !CONFIG_OF_ADDRESS */
98 106
99#endif /* CONFIG_OF_DEVICE */ 107#endif /* CONFIG_OF_DEVICE */
100 108
diff --git a/kernel/irq/irqdomain.c b/kernel/irq/irqdomain.c
index 1f9e26526b69..af48e59bc2ff 100644
--- a/kernel/irq/irqdomain.c
+++ b/kernel/irq/irqdomain.c
@@ -1,189 +1,793 @@
1#include <linux/debugfs.h>
2#include <linux/hardirq.h>
3#include <linux/interrupt.h>
1#include <linux/irq.h> 4#include <linux/irq.h>
5#include <linux/irqdesc.h>
2#include <linux/irqdomain.h> 6#include <linux/irqdomain.h>
3#include <linux/module.h> 7#include <linux/module.h>
4#include <linux/mutex.h> 8#include <linux/mutex.h>
5#include <linux/of.h> 9#include <linux/of.h>
6#include <linux/of_address.h> 10#include <linux/of_address.h>
11#include <linux/seq_file.h>
7#include <linux/slab.h> 12#include <linux/slab.h>
13#include <linux/smp.h>
14#include <linux/fs.h>
15
16#define IRQ_DOMAIN_MAP_LEGACY 0 /* driver allocated fixed range of irqs.
17 * ie. legacy 8259, gets irqs 1..15 */
18#define IRQ_DOMAIN_MAP_NOMAP 1 /* no fast reverse mapping */
19#define IRQ_DOMAIN_MAP_LINEAR 2 /* linear map of interrupts */
20#define IRQ_DOMAIN_MAP_TREE 3 /* radix tree */
8 21
9static LIST_HEAD(irq_domain_list); 22static LIST_HEAD(irq_domain_list);
10static DEFINE_MUTEX(irq_domain_mutex); 23static DEFINE_MUTEX(irq_domain_mutex);
11 24
25static DEFINE_MUTEX(revmap_trees_mutex);
26static unsigned int irq_virq_count = NR_IRQS;
27static struct irq_domain *irq_default_domain;
28
12/** 29/**
13 * irq_domain_add() - Register an irq_domain 30 * irq_domain_alloc() - Allocate a new irq_domain data structure
14 * @domain: ptr to initialized irq_domain structure 31 * @of_node: optional device-tree node of the interrupt controller
32 * @revmap_type: type of reverse mapping to use
33 * @ops: map/unmap domain callbacks
34 * @host_data: Controller private data pointer
15 * 35 *
16 * Registers an irq_domain structure. The irq_domain must at a minimum be 36 * Allocates and initialize and irq_domain structure. Caller is expected to
17 * initialized with an ops structure pointer, and either a ->to_irq hook or 37 * register allocated irq_domain with irq_domain_register(). Returns pointer
18 * a valid irq_base value. Everything else is optional. 38 * to IRQ domain, or NULL on failure.
19 */ 39 */
20void irq_domain_add(struct irq_domain *domain) 40static struct irq_domain *irq_domain_alloc(struct device_node *of_node,
41 unsigned int revmap_type,
42 const struct irq_domain_ops *ops,
43 void *host_data)
21{ 44{
22 struct irq_data *d; 45 struct irq_domain *domain;
23 int hwirq, irq;
24 46
25 /* 47 domain = kzalloc(sizeof(*domain), GFP_KERNEL);
26 * This assumes that the irq_domain owner has already allocated 48 if (WARN_ON(!domain))
27 * the irq_descs. This block will be removed when support for dynamic 49 return NULL;
28 * allocation of irq_descs is added to irq_domain. 50
29 */ 51 /* Fill structure */
30 irq_domain_for_each_irq(domain, hwirq, irq) { 52 domain->revmap_type = revmap_type;
31 d = irq_get_irq_data(irq); 53 domain->ops = ops;
32 if (!d) { 54 domain->host_data = host_data;
33 WARN(1, "error: assigning domain to non existant irq_desc"); 55 domain->of_node = of_node_get(of_node);
34 return; 56
35 } 57 return domain;
36 if (d->domain) { 58}
37 /* things are broken; just report, don't clean up */ 59
38 WARN(1, "error: irq_desc already assigned to a domain"); 60static void irq_domain_add(struct irq_domain *domain)
39 return; 61{
62 mutex_lock(&irq_domain_mutex);
63 list_add(&domain->link, &irq_domain_list);
64 mutex_unlock(&irq_domain_mutex);
65 pr_debug("irq: Allocated domain of type %d @0x%p\n",
66 domain->revmap_type, domain);
67}
68
69static unsigned int irq_domain_legacy_revmap(struct irq_domain *domain,
70 irq_hw_number_t hwirq)
71{
72 irq_hw_number_t first_hwirq = domain->revmap_data.legacy.first_hwirq;
73 int size = domain->revmap_data.legacy.size;
74
75 if (WARN_ON(hwirq < first_hwirq || hwirq >= first_hwirq + size))
76 return 0;
77 return hwirq - first_hwirq + domain->revmap_data.legacy.first_irq;
78}
79
80/**
81 * irq_domain_add_legacy() - Allocate and register a legacy revmap irq_domain.
82 * @of_node: pointer to interrupt controller's device tree node.
83 * @size: total number of irqs in legacy mapping
84 * @first_irq: first number of irq block assigned to the domain
85 * @first_hwirq: first hwirq number to use for the translation. Should normally
86 * be '0', but a positive integer can be used if the effective
87 * hwirqs numbering does not begin at zero.
88 * @ops: map/unmap domain callbacks
89 * @host_data: Controller private data pointer
90 *
91 * Note: the map() callback will be called before this function returns
92 * for all legacy interrupts except 0 (which is always the invalid irq for
93 * a legacy controller).
94 */
95struct irq_domain *irq_domain_add_legacy(struct device_node *of_node,
96 unsigned int size,
97 unsigned int first_irq,
98 irq_hw_number_t first_hwirq,
99 const struct irq_domain_ops *ops,
100 void *host_data)
101{
102 struct irq_domain *domain;
103 unsigned int i;
104
105 domain = irq_domain_alloc(of_node, IRQ_DOMAIN_MAP_LEGACY, ops, host_data);
106 if (!domain)
107 return NULL;
108
109 domain->revmap_data.legacy.first_irq = first_irq;
110 domain->revmap_data.legacy.first_hwirq = first_hwirq;
111 domain->revmap_data.legacy.size = size;
112
113 mutex_lock(&irq_domain_mutex);
114 /* Verify that all the irqs are available */
115 for (i = 0; i < size; i++) {
116 int irq = first_irq + i;
117 struct irq_data *irq_data = irq_get_irq_data(irq);
118
119 if (WARN_ON(!irq_data || irq_data->domain)) {
120 mutex_unlock(&irq_domain_mutex);
121 of_node_put(domain->of_node);
122 kfree(domain);
123 return NULL;
40 } 124 }
41 d->domain = domain;
42 d->hwirq = hwirq;
43 } 125 }
44 126
45 mutex_lock(&irq_domain_mutex); 127 /* Claim all of the irqs before registering a legacy domain */
46 list_add(&domain->list, &irq_domain_list); 128 for (i = 0; i < size; i++) {
129 struct irq_data *irq_data = irq_get_irq_data(first_irq + i);
130 irq_data->hwirq = first_hwirq + i;
131 irq_data->domain = domain;
132 }
47 mutex_unlock(&irq_domain_mutex); 133 mutex_unlock(&irq_domain_mutex);
134
135 for (i = 0; i < size; i++) {
136 int irq = first_irq + i;
137 int hwirq = first_hwirq + i;
138
139 /* IRQ0 gets ignored */
140 if (!irq)
141 continue;
142
143 /* Legacy flags are left to default at this point,
144 * one can then use irq_create_mapping() to
145 * explicitly change them
146 */
147 ops->map(domain, irq, hwirq);
148
149 /* Clear norequest flags */
150 irq_clear_status_flags(irq, IRQ_NOREQUEST);
151 }
152
153 irq_domain_add(domain);
154 return domain;
155}
156
157/**
158 * irq_domain_add_linear() - Allocate and register a legacy revmap irq_domain.
159 * @of_node: pointer to interrupt controller's device tree node.
160 * @ops: map/unmap domain callbacks
161 * @host_data: Controller private data pointer
162 */
163struct irq_domain *irq_domain_add_linear(struct device_node *of_node,
164 unsigned int size,
165 const struct irq_domain_ops *ops,
166 void *host_data)
167{
168 struct irq_domain *domain;
169 unsigned int *revmap;
170
171 revmap = kzalloc(sizeof(*revmap) * size, GFP_KERNEL);
172 if (WARN_ON(!revmap))
173 return NULL;
174
175 domain = irq_domain_alloc(of_node, IRQ_DOMAIN_MAP_LINEAR, ops, host_data);
176 if (!domain) {
177 kfree(revmap);
178 return NULL;
179 }
180 domain->revmap_data.linear.size = size;
181 domain->revmap_data.linear.revmap = revmap;
182 irq_domain_add(domain);
183 return domain;
184}
185
186struct irq_domain *irq_domain_add_nomap(struct device_node *of_node,
187 const struct irq_domain_ops *ops,
188 void *host_data)
189{
190 struct irq_domain *domain = irq_domain_alloc(of_node,
191 IRQ_DOMAIN_MAP_NOMAP, ops, host_data);
192 if (domain)
193 irq_domain_add(domain);
194 return domain;
195}
196
197/**
198 * irq_domain_add_tree()
199 * @of_node: pointer to interrupt controller's device tree node.
200 * @ops: map/unmap domain callbacks
201 *
202 * Note: The radix tree will be allocated later during boot automatically
203 * (the reverse mapping will use the slow path until that happens).
204 */
205struct irq_domain *irq_domain_add_tree(struct device_node *of_node,
206 const struct irq_domain_ops *ops,
207 void *host_data)
208{
209 struct irq_domain *domain = irq_domain_alloc(of_node,
210 IRQ_DOMAIN_MAP_TREE, ops, host_data);
211 if (domain) {
212 INIT_RADIX_TREE(&domain->revmap_data.tree, GFP_KERNEL);
213 irq_domain_add(domain);
214 }
215 return domain;
48} 216}
49 217
50/** 218/**
51 * irq_domain_del() - Unregister an irq_domain 219 * irq_find_host() - Locates a domain for a given device node
52 * @domain: ptr to registered irq_domain. 220 * @node: device-tree node of the interrupt controller
53 */ 221 */
54void irq_domain_del(struct irq_domain *domain) 222struct irq_domain *irq_find_host(struct device_node *node)
55{ 223{
56 struct irq_data *d; 224 struct irq_domain *h, *found = NULL;
57 int hwirq, irq; 225 int rc;
58 226
227 /* We might want to match the legacy controller last since
228 * it might potentially be set to match all interrupts in
229 * the absence of a device node. This isn't a problem so far
230 * yet though...
231 */
59 mutex_lock(&irq_domain_mutex); 232 mutex_lock(&irq_domain_mutex);
60 list_del(&domain->list); 233 list_for_each_entry(h, &irq_domain_list, link) {
234 if (h->ops->match)
235 rc = h->ops->match(h, node);
236 else
237 rc = (h->of_node != NULL) && (h->of_node == node);
238
239 if (rc) {
240 found = h;
241 break;
242 }
243 }
61 mutex_unlock(&irq_domain_mutex); 244 mutex_unlock(&irq_domain_mutex);
245 return found;
246}
247EXPORT_SYMBOL_GPL(irq_find_host);
248
249/**
250 * irq_set_default_host() - Set a "default" irq domain
251 * @domain: default domain pointer
252 *
253 * For convenience, it's possible to set a "default" domain that will be used
254 * whenever NULL is passed to irq_create_mapping(). It makes life easier for
255 * platforms that want to manipulate a few hard coded interrupt numbers that
256 * aren't properly represented in the device-tree.
257 */
258void irq_set_default_host(struct irq_domain *domain)
259{
260 pr_debug("irq: Default domain set to @0x%p\n", domain);
261
262 irq_default_domain = domain;
263}
264
265/**
266 * irq_set_virq_count() - Set the maximum number of linux irqs
267 * @count: number of linux irqs, capped with NR_IRQS
268 *
269 * This is mainly for use by platforms like iSeries who want to program
270 * the virtual irq number in the controller to avoid the reverse mapping
271 */
272void irq_set_virq_count(unsigned int count)
273{
274 pr_debug("irq: Trying to set virq count to %d\n", count);
62 275
63 /* Clear the irq_domain assignments */ 276 BUG_ON(count < NUM_ISA_INTERRUPTS);
64 irq_domain_for_each_irq(domain, hwirq, irq) { 277 if (count < NR_IRQS)
65 d = irq_get_irq_data(irq); 278 irq_virq_count = count;
66 d->domain = NULL; 279}
280
281static int irq_setup_virq(struct irq_domain *domain, unsigned int virq,
282 irq_hw_number_t hwirq)
283{
284 struct irq_data *irq_data = irq_get_irq_data(virq);
285
286 irq_data->hwirq = hwirq;
287 irq_data->domain = domain;
288 if (domain->ops->map(domain, virq, hwirq)) {
289 pr_debug("irq: -> mapping failed, freeing\n");
290 irq_data->domain = NULL;
291 irq_data->hwirq = 0;
292 return -1;
67 } 293 }
294
295 irq_clear_status_flags(virq, IRQ_NOREQUEST);
296
297 return 0;
68} 298}
69 299
70#if defined(CONFIG_OF_IRQ)
71/** 300/**
72 * irq_create_of_mapping() - Map a linux irq number from a DT interrupt spec 301 * irq_create_direct_mapping() - Allocate an irq for direct mapping
302 * @domain: domain to allocate the irq for or NULL for default domain
73 * 303 *
74 * Used by the device tree interrupt mapping code to translate a device tree 304 * This routine is used for irq controllers which can choose the hardware
75 * interrupt specifier to a valid linux irq number. Returns either a valid 305 * interrupt numbers they generate. In such a case it's simplest to use
76 * linux IRQ number or 0. 306 * the linux irq as the hardware interrupt number.
307 */
308unsigned int irq_create_direct_mapping(struct irq_domain *domain)
309{
310 unsigned int virq;
311
312 if (domain == NULL)
313 domain = irq_default_domain;
314
315 BUG_ON(domain == NULL);
316 WARN_ON(domain->revmap_type != IRQ_DOMAIN_MAP_NOMAP);
317
318 virq = irq_alloc_desc_from(1, 0);
319 if (!virq) {
320 pr_debug("irq: create_direct virq allocation failed\n");
321 return 0;
322 }
323 if (virq >= irq_virq_count) {
324 pr_err("ERROR: no free irqs available below %i maximum\n",
325 irq_virq_count);
326 irq_free_desc(virq);
327 return 0;
328 }
329
330 pr_debug("irq: create_direct obtained virq %d\n", virq);
331
332 if (irq_setup_virq(domain, virq, virq)) {
333 irq_free_desc(virq);
334 return 0;
335 }
336
337 return virq;
338}
339
340/**
341 * irq_create_mapping() - Map a hardware interrupt into linux irq space
342 * @domain: domain owning this hardware interrupt or NULL for default domain
343 * @hwirq: hardware irq number in that domain space
77 * 344 *
78 * When the caller no longer need the irq number returned by this function it 345 * Only one mapping per hardware interrupt is permitted. Returns a linux
79 * should arrange to call irq_dispose_mapping(). 346 * irq number.
347 * If the sense/trigger is to be specified, set_irq_type() should be called
348 * on the number returned from that call.
80 */ 349 */
350unsigned int irq_create_mapping(struct irq_domain *domain,
351 irq_hw_number_t hwirq)
352{
353 unsigned int virq, hint;
354
355 pr_debug("irq: irq_create_mapping(0x%p, 0x%lx)\n", domain, hwirq);
356
357 /* Look for default domain if nececssary */
358 if (domain == NULL)
359 domain = irq_default_domain;
360 if (domain == NULL) {
361 printk(KERN_WARNING "irq_create_mapping called for"
362 " NULL domain, hwirq=%lx\n", hwirq);
363 WARN_ON(1);
364 return 0;
365 }
366 pr_debug("irq: -> using domain @%p\n", domain);
367
368 /* Check if mapping already exists */
369 virq = irq_find_mapping(domain, hwirq);
370 if (virq) {
371 pr_debug("irq: -> existing mapping on virq %d\n", virq);
372 return virq;
373 }
374
375 /* Get a virtual interrupt number */
376 if (domain->revmap_type == IRQ_DOMAIN_MAP_LEGACY)
377 return irq_domain_legacy_revmap(domain, hwirq);
378
379 /* Allocate a virtual interrupt number */
380 hint = hwirq % irq_virq_count;
381 if (hint == 0)
382 hint++;
383 virq = irq_alloc_desc_from(hint, 0);
384 if (!virq)
385 virq = irq_alloc_desc_from(1, 0);
386 if (!virq) {
387 pr_debug("irq: -> virq allocation failed\n");
388 return 0;
389 }
390
391 if (irq_setup_virq(domain, virq, hwirq)) {
392 if (domain->revmap_type != IRQ_DOMAIN_MAP_LEGACY)
393 irq_free_desc(virq);
394 return 0;
395 }
396
397 pr_debug("irq: irq %lu on domain %s mapped to virtual irq %u\n",
398 hwirq, domain->of_node ? domain->of_node->full_name : "null", virq);
399
400 return virq;
401}
402EXPORT_SYMBOL_GPL(irq_create_mapping);
403
81unsigned int irq_create_of_mapping(struct device_node *controller, 404unsigned int irq_create_of_mapping(struct device_node *controller,
82 const u32 *intspec, unsigned int intsize) 405 const u32 *intspec, unsigned int intsize)
83{ 406{
84 struct irq_domain *domain; 407 struct irq_domain *domain;
85 unsigned long hwirq; 408 irq_hw_number_t hwirq;
86 unsigned int irq, type; 409 unsigned int type = IRQ_TYPE_NONE;
87 int rc = -EINVAL; 410 unsigned int virq;
88 411
89 /* Find a domain which can translate the irq spec */ 412 domain = controller ? irq_find_host(controller) : irq_default_domain;
90 mutex_lock(&irq_domain_mutex); 413 if (!domain) {
91 list_for_each_entry(domain, &irq_domain_list, list) { 414#ifdef CONFIG_MIPS
92 if (!domain->ops->dt_translate) 415 /*
93 continue; 416 * Workaround to avoid breaking interrupt controller drivers
94 rc = domain->ops->dt_translate(domain, controller, 417 * that don't yet register an irq_domain. This is temporary
95 intspec, intsize, &hwirq, &type); 418 * code. ~~~gcl, Feb 24, 2012
96 if (rc == 0) 419 *
97 break; 420 * Scheduled for removal in Linux v3.6. That should be enough
421 * time.
422 */
423 if (intsize > 0)
424 return intspec[0];
425#endif
426 printk(KERN_WARNING "irq: no irq domain found for %s !\n",
427 controller->full_name);
428 return 0;
98 } 429 }
99 mutex_unlock(&irq_domain_mutex);
100 430
101 if (rc != 0) 431 /* If domain has no translation, then we assume interrupt line */
102 return 0; 432 if (domain->ops->xlate == NULL)
433 hwirq = intspec[0];
434 else {
435 if (domain->ops->xlate(domain, controller, intspec, intsize,
436 &hwirq, &type))
437 return 0;
438 }
439
440 /* Create mapping */
441 virq = irq_create_mapping(domain, hwirq);
442 if (!virq)
443 return virq;
103 444
104 irq = irq_domain_to_irq(domain, hwirq); 445 /* Set type if specified and different than the current one */
105 if (type != IRQ_TYPE_NONE) 446 if (type != IRQ_TYPE_NONE &&
106 irq_set_irq_type(irq, type); 447 type != (irqd_get_trigger_type(irq_get_irq_data(virq))))
107 pr_debug("%s: mapped hwirq=%i to irq=%i, flags=%x\n", 448 irq_set_irq_type(virq, type);
108 controller->full_name, (int)hwirq, irq, type); 449 return virq;
109 return irq;
110} 450}
111EXPORT_SYMBOL_GPL(irq_create_of_mapping); 451EXPORT_SYMBOL_GPL(irq_create_of_mapping);
112 452
113/** 453/**
114 * irq_dispose_mapping() - Discard a mapping created by irq_create_of_mapping() 454 * irq_dispose_mapping() - Unmap an interrupt
115 * @irq: linux irq number to be discarded 455 * @virq: linux irq number of the interrupt to unmap
456 */
457void irq_dispose_mapping(unsigned int virq)
458{
459 struct irq_data *irq_data = irq_get_irq_data(virq);
460 struct irq_domain *domain;
461 irq_hw_number_t hwirq;
462
463 if (!virq || !irq_data)
464 return;
465
466 domain = irq_data->domain;
467 if (WARN_ON(domain == NULL))
468 return;
469
470 /* Never unmap legacy interrupts */
471 if (domain->revmap_type == IRQ_DOMAIN_MAP_LEGACY)
472 return;
473
474 irq_set_status_flags(virq, IRQ_NOREQUEST);
475
476 /* remove chip and handler */
477 irq_set_chip_and_handler(virq, NULL, NULL);
478
479 /* Make sure it's completed */
480 synchronize_irq(virq);
481
482 /* Tell the PIC about it */
483 if (domain->ops->unmap)
484 domain->ops->unmap(domain, virq);
485 smp_mb();
486
487 /* Clear reverse map */
488 hwirq = irq_data->hwirq;
489 switch(domain->revmap_type) {
490 case IRQ_DOMAIN_MAP_LINEAR:
491 if (hwirq < domain->revmap_data.linear.size)
492 domain->revmap_data.linear.revmap[hwirq] = 0;
493 break;
494 case IRQ_DOMAIN_MAP_TREE:
495 mutex_lock(&revmap_trees_mutex);
496 radix_tree_delete(&domain->revmap_data.tree, hwirq);
497 mutex_unlock(&revmap_trees_mutex);
498 break;
499 }
500
501 irq_free_desc(virq);
502}
503EXPORT_SYMBOL_GPL(irq_dispose_mapping);
504
505/**
506 * irq_find_mapping() - Find a linux irq from an hw irq number.
507 * @domain: domain owning this hardware interrupt
508 * @hwirq: hardware irq number in that domain space
509 *
510 * This is a slow path, for use by generic code. It's expected that an
511 * irq controller implementation directly calls the appropriate low level
512 * mapping function.
513 */
514unsigned int irq_find_mapping(struct irq_domain *domain,
515 irq_hw_number_t hwirq)
516{
517 unsigned int i;
518 unsigned int hint = hwirq % irq_virq_count;
519
520 /* Look for default domain if nececssary */
521 if (domain == NULL)
522 domain = irq_default_domain;
523 if (domain == NULL)
524 return 0;
525
526 /* legacy -> bail early */
527 if (domain->revmap_type == IRQ_DOMAIN_MAP_LEGACY)
528 return irq_domain_legacy_revmap(domain, hwirq);
529
530 /* Slow path does a linear search of the map */
531 if (hint == 0)
532 hint = 1;
533 i = hint;
534 do {
535 struct irq_data *data = irq_get_irq_data(i);
536 if (data && (data->domain == domain) && (data->hwirq == hwirq))
537 return i;
538 i++;
539 if (i >= irq_virq_count)
540 i = 1;
541 } while(i != hint);
542 return 0;
543}
544EXPORT_SYMBOL_GPL(irq_find_mapping);
545
546/**
547 * irq_radix_revmap_lookup() - Find a linux irq from a hw irq number.
548 * @domain: domain owning this hardware interrupt
549 * @hwirq: hardware irq number in that domain space
116 * 550 *
117 * Calling this function indicates the caller no longer needs a reference to 551 * This is a fast path, for use by irq controller code that uses radix tree
118 * the linux irq number returned by a prior call to irq_create_of_mapping(). 552 * revmaps
119 */ 553 */
120void irq_dispose_mapping(unsigned int irq) 554unsigned int irq_radix_revmap_lookup(struct irq_domain *domain,
555 irq_hw_number_t hwirq)
121{ 556{
557 struct irq_data *irq_data;
558
559 if (WARN_ON_ONCE(domain->revmap_type != IRQ_DOMAIN_MAP_TREE))
560 return irq_find_mapping(domain, hwirq);
561
562 /*
563 * Freeing an irq can delete nodes along the path to
564 * do the lookup via call_rcu.
565 */
566 rcu_read_lock();
567 irq_data = radix_tree_lookup(&domain->revmap_data.tree, hwirq);
568 rcu_read_unlock();
569
122 /* 570 /*
123 * nothing yet; will be filled when support for dynamic allocation of 571 * If found in radix tree, then fine.
124 * irq_descs is added to irq_domain 572 * Else fallback to linear lookup - this should not happen in practice
573 * as it means that we failed to insert the node in the radix tree.
125 */ 574 */
575 return irq_data ? irq_data->irq : irq_find_mapping(domain, hwirq);
126} 576}
127EXPORT_SYMBOL_GPL(irq_dispose_mapping);
128 577
129int irq_domain_simple_dt_translate(struct irq_domain *d, 578/**
130 struct device_node *controller, 579 * irq_radix_revmap_insert() - Insert a hw irq to linux irq number mapping.
131 const u32 *intspec, unsigned int intsize, 580 * @domain: domain owning this hardware interrupt
132 unsigned long *out_hwirq, unsigned int *out_type) 581 * @virq: linux irq number
582 * @hwirq: hardware irq number in that domain space
583 *
584 * This is for use by irq controllers that use a radix tree reverse
585 * mapping for fast lookup.
586 */
587void irq_radix_revmap_insert(struct irq_domain *domain, unsigned int virq,
588 irq_hw_number_t hwirq)
133{ 589{
134 if (d->of_node != controller) 590 struct irq_data *irq_data = irq_get_irq_data(virq);
135 return -EINVAL; 591
136 if (intsize < 1) 592 if (WARN_ON(domain->revmap_type != IRQ_DOMAIN_MAP_TREE))
137 return -EINVAL; 593 return;
138 if (d->nr_irq && ((intspec[0] < d->hwirq_base) || 594
139 (intspec[0] >= d->hwirq_base + d->nr_irq))) 595 if (virq) {
140 return -EINVAL; 596 mutex_lock(&revmap_trees_mutex);
597 radix_tree_insert(&domain->revmap_data.tree, hwirq, irq_data);
598 mutex_unlock(&revmap_trees_mutex);
599 }
600}
601
602/**
603 * irq_linear_revmap() - Find a linux irq from a hw irq number.
604 * @domain: domain owning this hardware interrupt
605 * @hwirq: hardware irq number in that domain space
606 *
607 * This is a fast path, for use by irq controller code that uses linear
608 * revmaps. It does fallback to the slow path if the revmap doesn't exist
609 * yet and will create the revmap entry with appropriate locking
610 */
611unsigned int irq_linear_revmap(struct irq_domain *domain,
612 irq_hw_number_t hwirq)
613{
614 unsigned int *revmap;
615
616 if (WARN_ON_ONCE(domain->revmap_type != IRQ_DOMAIN_MAP_LINEAR))
617 return irq_find_mapping(domain, hwirq);
618
619 /* Check revmap bounds */
620 if (unlikely(hwirq >= domain->revmap_data.linear.size))
621 return irq_find_mapping(domain, hwirq);
622
623 /* Check if revmap was allocated */
624 revmap = domain->revmap_data.linear.revmap;
625 if (unlikely(revmap == NULL))
626 return irq_find_mapping(domain, hwirq);
627
628 /* Fill up revmap with slow path if no mapping found */
629 if (unlikely(!revmap[hwirq]))
630 revmap[hwirq] = irq_find_mapping(domain, hwirq);
631
632 return revmap[hwirq];
633}
634
635#ifdef CONFIG_VIRQ_DEBUG
636static int virq_debug_show(struct seq_file *m, void *private)
637{
638 unsigned long flags;
639 struct irq_desc *desc;
640 const char *p;
641 static const char none[] = "none";
642 void *data;
643 int i;
644
645 seq_printf(m, "%-5s %-7s %-15s %-18s %s\n", "virq", "hwirq",
646 "chip name", "chip data", "domain name");
647
648 for (i = 1; i < nr_irqs; i++) {
649 desc = irq_to_desc(i);
650 if (!desc)
651 continue;
652
653 raw_spin_lock_irqsave(&desc->lock, flags);
654
655 if (desc->action && desc->action->handler) {
656 struct irq_chip *chip;
657
658 seq_printf(m, "%5d ", i);
659 seq_printf(m, "0x%05lx ", desc->irq_data.hwirq);
660
661 chip = irq_desc_get_chip(desc);
662 if (chip && chip->name)
663 p = chip->name;
664 else
665 p = none;
666 seq_printf(m, "%-15s ", p);
667
668 data = irq_desc_get_chip_data(desc);
669 seq_printf(m, "0x%16p ", data);
670
671 if (desc->irq_data.domain->of_node)
672 p = desc->irq_data.domain->of_node->full_name;
673 else
674 p = none;
675 seq_printf(m, "%s\n", p);
676 }
677
678 raw_spin_unlock_irqrestore(&desc->lock, flags);
679 }
680
681 return 0;
682}
141 683
684static int virq_debug_open(struct inode *inode, struct file *file)
685{
686 return single_open(file, virq_debug_show, inode->i_private);
687}
688
689static const struct file_operations virq_debug_fops = {
690 .open = virq_debug_open,
691 .read = seq_read,
692 .llseek = seq_lseek,
693 .release = single_release,
694};
695
696static int __init irq_debugfs_init(void)
697{
698 if (debugfs_create_file("virq_mapping", S_IRUGO, powerpc_debugfs_root,
699 NULL, &virq_debug_fops) == NULL)
700 return -ENOMEM;
701
702 return 0;
703}
704__initcall(irq_debugfs_init);
705#endif /* CONFIG_VIRQ_DEBUG */
706
707int irq_domain_simple_map(struct irq_domain *d, unsigned int irq,
708 irq_hw_number_t hwirq)
709{
710 return 0;
711}
712
713/**
714 * irq_domain_xlate_onecell() - Generic xlate for direct one cell bindings
715 *
716 * Device Tree IRQ specifier translation function which works with one cell
717 * bindings where the cell value maps directly to the hwirq number.
718 */
719int irq_domain_xlate_onecell(struct irq_domain *d, struct device_node *ctrlr,
720 const u32 *intspec, unsigned int intsize,
721 unsigned long *out_hwirq, unsigned int *out_type)
722{
723 if (WARN_ON(intsize < 1))
724 return -EINVAL;
142 *out_hwirq = intspec[0]; 725 *out_hwirq = intspec[0];
143 *out_type = IRQ_TYPE_NONE; 726 *out_type = IRQ_TYPE_NONE;
144 if (intsize > 1)
145 *out_type = intspec[1] & IRQ_TYPE_SENSE_MASK;
146 return 0; 727 return 0;
147} 728}
729EXPORT_SYMBOL_GPL(irq_domain_xlate_onecell);
148 730
149/** 731/**
150 * irq_domain_create_simple() - Set up a 'simple' translation range 732 * irq_domain_xlate_twocell() - Generic xlate for direct two cell bindings
733 *
734 * Device Tree IRQ specifier translation function which works with two cell
735 * bindings where the cell values map directly to the hwirq number
736 * and linux irq flags.
151 */ 737 */
152void irq_domain_add_simple(struct device_node *controller, int irq_base) 738int irq_domain_xlate_twocell(struct irq_domain *d, struct device_node *ctrlr,
739 const u32 *intspec, unsigned int intsize,
740 irq_hw_number_t *out_hwirq, unsigned int *out_type)
153{ 741{
154 struct irq_domain *domain; 742 if (WARN_ON(intsize < 2))
155 743 return -EINVAL;
156 domain = kzalloc(sizeof(*domain), GFP_KERNEL); 744 *out_hwirq = intspec[0];
157 if (!domain) { 745 *out_type = intspec[1] & IRQ_TYPE_SENSE_MASK;
158 WARN_ON(1); 746 return 0;
159 return; 747}
160 } 748EXPORT_SYMBOL_GPL(irq_domain_xlate_twocell);
161 749
162 domain->irq_base = irq_base; 750/**
163 domain->of_node = of_node_get(controller); 751 * irq_domain_xlate_onetwocell() - Generic xlate for one or two cell bindings
164 domain->ops = &irq_domain_simple_ops; 752 *
165 irq_domain_add(domain); 753 * Device Tree IRQ specifier translation function which works with either one
754 * or two cell bindings where the cell values map directly to the hwirq number
755 * and linux irq flags.
756 *
757 * Note: don't use this function unless your interrupt controller explicitly
758 * supports both one and two cell bindings. For the majority of controllers
759 * the _onecell() or _twocell() variants above should be used.
760 */
761int irq_domain_xlate_onetwocell(struct irq_domain *d,
762 struct device_node *ctrlr,
763 const u32 *intspec, unsigned int intsize,
764 unsigned long *out_hwirq, unsigned int *out_type)
765{
766 if (WARN_ON(intsize < 1))
767 return -EINVAL;
768 *out_hwirq = intspec[0];
769 *out_type = (intsize > 1) ? intspec[1] : IRQ_TYPE_NONE;
770 return 0;
166} 771}
167EXPORT_SYMBOL_GPL(irq_domain_add_simple); 772EXPORT_SYMBOL_GPL(irq_domain_xlate_onetwocell);
168 773
774const struct irq_domain_ops irq_domain_simple_ops = {
775 .map = irq_domain_simple_map,
776 .xlate = irq_domain_xlate_onetwocell,
777};
778EXPORT_SYMBOL_GPL(irq_domain_simple_ops);
779
780#ifdef CONFIG_OF_IRQ
169void irq_domain_generate_simple(const struct of_device_id *match, 781void irq_domain_generate_simple(const struct of_device_id *match,
170 u64 phys_base, unsigned int irq_start) 782 u64 phys_base, unsigned int irq_start)
171{ 783{
172 struct device_node *node; 784 struct device_node *node;
173 pr_info("looking for phys_base=%llx, irq_start=%i\n", 785 pr_debug("looking for phys_base=%llx, irq_start=%i\n",
174 (unsigned long long) phys_base, (int) irq_start); 786 (unsigned long long) phys_base, (int) irq_start);
175 node = of_find_matching_node_by_address(NULL, match, phys_base); 787 node = of_find_matching_node_by_address(NULL, match, phys_base);
176 if (node) 788 if (node)
177 irq_domain_add_simple(node, irq_start); 789 irq_domain_add_legacy(node, 32, irq_start, 0,
178 else 790 &irq_domain_simple_ops, NULL);
179 pr_info("no node found\n");
180} 791}
181EXPORT_SYMBOL_GPL(irq_domain_generate_simple); 792EXPORT_SYMBOL_GPL(irq_domain_generate_simple);
182#endif /* CONFIG_OF_IRQ */ 793#endif
183
184struct irq_domain_ops irq_domain_simple_ops = {
185#ifdef CONFIG_OF_IRQ
186 .dt_translate = irq_domain_simple_dt_translate,
187#endif /* CONFIG_OF_IRQ */
188};
189EXPORT_SYMBOL_GPL(irq_domain_simple_ops);