aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2016-10-03 22:10:15 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2016-10-03 22:10:15 -0400
commit999dcbe2414e15e19cdc1f91497d01f262c6e1cf (patch)
tree5ad7cfa7d337f4a38be8045130c7f1a213bbe925
parent5e1b834b27fb2c27cde33a0752425f11d10c0b2d (diff)
parent4cd13c21b207e80ddb1144c576500098f2d5f882 (diff)
Merge branch 'irq-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull irq updates from Thomas Gleixner: "The irq departement proudly presents: - A rework of the core infrastructure to optimally spread interrupt for multiqueue devices. The first version was a bit naive and failed to take thread siblings and other details into account. Developed in cooperation with Christoph and Keith. - Proper delegation of softirqs to ksoftirqd, so if ksoftirqd is active then no further softirq processsing on interrupt return happens. Otherwise we try to delegate and still run another batch of network packets in the irq return path, which then tries to delegate to ksoftirqd ..... - A proper machine parseable sysfs based alternative for /proc/interrupts. - ACPI support for the GICV3-ITS and ARM interrupt remapping - Two new irq chips from the ARM SoC zoo: STM32-EXTI and MVEBU-PIC - A new irq chip for the JCore (SuperH) - The usual pile of small fixlets in core and irqchip drivers" * 'irq-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (42 commits) softirq: Let ksoftirqd do its job genirq: Make function __irq_do_set_handler() static ARM/dts: Add EXTI controller node to stm32f429 ARM/STM32: Select external interrupts controller drivers/irqchip: Add STM32 external interrupts support Documentation/dt-bindings: Document STM32 EXTI controller bindings irqchip/mips-gic: Use for_each_set_bit to iterate over local IRQs pci/msi: Retrieve affinity for a vector genirq/affinity: Remove old irq spread infrastructure genirq/msi: Switch to new irq spreading infrastructure genirq/affinity: Provide smarter irq spreading infrastructure genirq/msi: Add cpumask allocation to alloc_msi_entry genirq: Expose interrupt information through sysfs irqchip/gicv3-its: Use MADT ITS subtable to do PCI/MSI domain initialization irqchip/gicv3-its: Factor out PCI-MSI part that might be reused for ACPI irqchip/gicv3-its: Probe ITS in the ACPI way irqchip/gicv3-its: Refactor ITS DT init code to prepare for ACPI irqchip/gicv3-its: Cleanup for ITS domain initialization PCI/MSI: Setup MSI domain on a per-device basis using IORT ACPI table ACPI: Add new IORT functions to support MSI domain handling ...
-rw-r--r--Documentation/ABI/testing/sysfs-kernel-irq53
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/jcore,aic.txt26
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/marvell,armada-8k-pic.txt25
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/marvell,odmi-controller.txt2
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/st,stm32-exti.txt20
-rw-r--r--arch/arm/Kconfig1
-rw-r--r--arch/arm/boot/dts/stm32f429.dtsi8
-rw-r--r--arch/arm/include/asm/arch_gicv3.h6
-rw-r--r--arch/arm64/Kconfig.platforms1
-rw-r--r--arch/arm64/include/asm/arch_gicv3.h6
-rw-r--r--drivers/acpi/Kconfig4
-rw-r--r--drivers/acpi/Makefile2
-rw-r--r--drivers/acpi/arm64/Kconfig6
-rw-r--r--drivers/acpi/arm64/Makefile1
-rw-r--r--drivers/acpi/arm64/iort.c368
-rw-r--r--drivers/acpi/bus.c2
-rw-r--r--drivers/base/platform-msi.c3
-rw-r--r--drivers/irqchip/Kconfig15
-rw-r--r--drivers/irqchip/Makefile3
-rw-r--r--drivers/irqchip/irq-gic-pm.c23
-rw-r--r--drivers/irqchip/irq-gic-v3-its-pci-msi.c88
-rw-r--r--drivers/irqchip/irq-gic-v3-its.c171
-rw-r--r--drivers/irqchip/irq-gic-v3.c15
-rw-r--r--drivers/irqchip/irq-gic.c38
-rw-r--r--drivers/irqchip/irq-jcore-aic.c95
-rw-r--r--drivers/irqchip/irq-keystone.c2
-rw-r--r--drivers/irqchip/irq-mips-gic.c14
-rw-r--r--drivers/irqchip/irq-mvebu-pic.c197
-rw-r--r--drivers/irqchip/irq-stm32-exti.c201
-rw-r--r--drivers/pci/msi.c172
-rw-r--r--drivers/staging/fsl-mc/bus/mc-msi.c3
-rw-r--r--include/linux/acpi_iort.h42
-rw-r--r--include/linux/interrupt.h14
-rw-r--r--include/linux/irq.h18
-rw-r--r--include/linux/irqchip/arm-gic-v3.h4
-rw-r--r--include/linux/irqdesc.h3
-rw-r--r--include/linux/msi.h5
-rw-r--r--include/linux/pci.h6
-rw-r--r--kernel/irq/affinity.c167
-rw-r--r--kernel/irq/chip.c21
-rw-r--r--kernel/irq/generic-chip.c72
-rw-r--r--kernel/irq/irqdesc.c224
-rw-r--r--kernel/irq/irqdomain.c11
-rw-r--r--kernel/irq/manage.c5
-rw-r--r--kernel/irq/msi.c26
-rw-r--r--kernel/softirq.c16
46 files changed, 1924 insertions, 281 deletions
diff --git a/Documentation/ABI/testing/sysfs-kernel-irq b/Documentation/ABI/testing/sysfs-kernel-irq
new file mode 100644
index 000000000000..eb074b100986
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-kernel-irq
@@ -0,0 +1,53 @@
1What: /sys/kernel/irq
2Date: September 2016
3KernelVersion: 4.9
4Contact: Craig Gallek <kraig@google.com>
5Description: Directory containing information about the system's IRQs.
6 Specifically, data from the associated struct irq_desc.
7 The information here is similar to that in /proc/interrupts
8 but in a more machine-friendly format. This directory contains
9 one subdirectory for each Linux IRQ number.
10
11What: /sys/kernel/irq/<irq>/actions
12Date: September 2016
13KernelVersion: 4.9
14Contact: Craig Gallek <kraig@google.com>
15Description: The IRQ action chain. A comma-separated list of zero or more
16 device names associated with this interrupt.
17
18What: /sys/kernel/irq/<irq>/chip_name
19Date: September 2016
20KernelVersion: 4.9
21Contact: Craig Gallek <kraig@google.com>
22Description: Human-readable chip name supplied by the associated device
23 driver.
24
25What: /sys/kernel/irq/<irq>/hwirq
26Date: September 2016
27KernelVersion: 4.9
28Contact: Craig Gallek <kraig@google.com>
29Description: When interrupt translation domains are used, this file contains
30 the underlying hardware IRQ number used for this Linux IRQ.
31
32What: /sys/kernel/irq/<irq>/name
33Date: September 2016
34KernelVersion: 4.9
35Contact: Craig Gallek <kraig@google.com>
36Description: Human-readable flow handler name as defined by the irq chip
37 driver.
38
39What: /sys/kernel/irq/<irq>/per_cpu_count
40Date: September 2016
41KernelVersion: 4.9
42Contact: Craig Gallek <kraig@google.com>
43Description: The number of times the interrupt has fired since boot. This
44 is a comma-separated list of counters; one per CPU in CPU id
45 order. NOTE: This file consistently shows counters for all
46 CPU ids. This differs from the behavior of /proc/interrupts
47 which only shows counters for online CPUs.
48
49What: /sys/kernel/irq/<irq>/type
50Date: September 2016
51KernelVersion: 4.9
52Contact: Craig Gallek <kraig@google.com>
53Description: The type of the interrupt. Either the string 'level' or 'edge'.
diff --git a/Documentation/devicetree/bindings/interrupt-controller/jcore,aic.txt b/Documentation/devicetree/bindings/interrupt-controller/jcore,aic.txt
new file mode 100644
index 000000000000..ee2ad36f8df8
--- /dev/null
+++ b/Documentation/devicetree/bindings/interrupt-controller/jcore,aic.txt
@@ -0,0 +1,26 @@
1J-Core Advanced Interrupt Controller
2
3Required properties:
4
5- compatible: Should be "jcore,aic1" for the (obsolete) first-generation aic
6 with 8 interrupt lines with programmable priorities, or "jcore,aic2" for
7 the "aic2" core with 64 interrupts.
8
9- reg: Memory region(s) for configuration. For SMP, there should be one
10 region per cpu, indexed by the sequential, zero-based hardware cpu
11 number.
12
13- interrupt-controller: Identifies the node as an interrupt controller
14
15- #interrupt-cells: Specifies the number of cells needed to encode an
16 interrupt source. The value shall be 1.
17
18
19Example:
20
21aic: interrupt-controller@200 {
22 compatible = "jcore,aic2";
23 reg = < 0x200 0x30 0x500 0x30 >;
24 interrupt-controller;
25 #interrupt-cells = <1>;
26};
diff --git a/Documentation/devicetree/bindings/interrupt-controller/marvell,armada-8k-pic.txt b/Documentation/devicetree/bindings/interrupt-controller/marvell,armada-8k-pic.txt
new file mode 100644
index 000000000000..86a7b4cd03f5
--- /dev/null
+++ b/Documentation/devicetree/bindings/interrupt-controller/marvell,armada-8k-pic.txt
@@ -0,0 +1,25 @@
1Marvell Armada 7K/8K PIC Interrupt controller
2---------------------------------------------
3
4This is the Device Tree binding for the PIC, a secondary interrupt
5controller available on the Marvell Armada 7K/8K ARM64 SoCs, and
6typically connected to the GIC as the primary interrupt controller.
7
8Required properties:
9- compatible: should be "marvell,armada-8k-pic"
10- interrupt-controller: identifies the node as an interrupt controller
11- #interrupt-cells: the number of cells to define interrupts on this
12 controller. Should be 1
13- reg: the register area for the PIC interrupt controller
14- interrupts: the interrupt to the primary interrupt controller,
15 typically the GIC
16
17Example:
18
19 pic: interrupt-controller@3f0100 {
20 compatible = "marvell,armada-8k-pic";
21 reg = <0x3f0100 0x10>;
22 #interrupt-cells = <1>;
23 interrupt-controller;
24 interrupts = <GIC_PPI 15 IRQ_TYPE_LEVEL_HIGH>;
25 };
diff --git a/Documentation/devicetree/bindings/interrupt-controller/marvell,odmi-controller.txt b/Documentation/devicetree/bindings/interrupt-controller/marvell,odmi-controller.txt
index 8af0a8e613ab..3f6442c7f867 100644
--- a/Documentation/devicetree/bindings/interrupt-controller/marvell,odmi-controller.txt
+++ b/Documentation/devicetree/bindings/interrupt-controller/marvell,odmi-controller.txt
@@ -31,7 +31,7 @@ Required properties:
31Example: 31Example:
32 32
33 odmi: odmi@300000 { 33 odmi: odmi@300000 {
34 compatible = "marvell,ap806-odm-controller", 34 compatible = "marvell,ap806-odmi-controller",
35 "marvell,odmi-controller"; 35 "marvell,odmi-controller";
36 interrupt-controller; 36 interrupt-controller;
37 msi-controller; 37 msi-controller;
diff --git a/Documentation/devicetree/bindings/interrupt-controller/st,stm32-exti.txt b/Documentation/devicetree/bindings/interrupt-controller/st,stm32-exti.txt
new file mode 100644
index 000000000000..6e7703d4ff5b
--- /dev/null
+++ b/Documentation/devicetree/bindings/interrupt-controller/st,stm32-exti.txt
@@ -0,0 +1,20 @@
1STM32 External Interrupt Controller
2
3Required properties:
4
5- compatible: Should be "st,stm32-exti"
6- reg: Specifies base physical address and size of the registers
7- interrupt-controller: Indentifies the node as an interrupt controller
8- #interrupt-cells: Specifies the number of cells to encode an interrupt
9 specifier, shall be 2
10- interrupts: interrupts references to primary interrupt controller
11
12Example:
13
14exti: interrupt-controller@40013c00 {
15 compatible = "st,stm32-exti";
16 interrupt-controller;
17 #interrupt-cells = <2>;
18 reg = <0x40013C00 0x400>;
19 interrupts = <1>, <2>, <3>, <6>, <7>, <8>, <9>, <10>, <23>, <40>, <41>, <42>, <62>, <76>;
20};
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index b2113c24850c..3cd9042fbb62 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -879,6 +879,7 @@ config ARCH_STM32
879 select CLKSRC_STM32 879 select CLKSRC_STM32
880 select PINCTRL 880 select PINCTRL
881 select RESET_CONTROLLER 881 select RESET_CONTROLLER
882 select STM32_EXTI
882 help 883 help
883 Support for STMicroelectronics STM32 processors. 884 Support for STMicroelectronics STM32 processors.
884 885
diff --git a/arch/arm/boot/dts/stm32f429.dtsi b/arch/arm/boot/dts/stm32f429.dtsi
index 35df462559ca..1a189d44ad38 100644
--- a/arch/arm/boot/dts/stm32f429.dtsi
+++ b/arch/arm/boot/dts/stm32f429.dtsi
@@ -176,6 +176,14 @@
176 reg = <0x40013800 0x400>; 176 reg = <0x40013800 0x400>;
177 }; 177 };
178 178
179 exti: interrupt-controller@40013c00 {
180 compatible = "st,stm32-exti";
181 interrupt-controller;
182 #interrupt-cells = <2>;
183 reg = <0x40013C00 0x400>;
184 interrupts = <1>, <2>, <3>, <6>, <7>, <8>, <9>, <10>, <23>, <40>, <41>, <42>, <62>, <76>;
185 };
186
179 pin-controller { 187 pin-controller {
180 #address-cells = <1>; 188 #address-cells = <1>;
181 #size-cells = <1>; 189 #size-cells = <1>;
diff --git a/arch/arm/include/asm/arch_gicv3.h b/arch/arm/include/asm/arch_gicv3.h
index e08d15184056..dfe4002812da 100644
--- a/arch/arm/include/asm/arch_gicv3.h
+++ b/arch/arm/include/asm/arch_gicv3.h
@@ -34,6 +34,7 @@
34#define ICC_CTLR __ACCESS_CP15(c12, 0, c12, 4) 34#define ICC_CTLR __ACCESS_CP15(c12, 0, c12, 4)
35#define ICC_SRE __ACCESS_CP15(c12, 0, c12, 5) 35#define ICC_SRE __ACCESS_CP15(c12, 0, c12, 5)
36#define ICC_IGRPEN1 __ACCESS_CP15(c12, 0, c12, 7) 36#define ICC_IGRPEN1 __ACCESS_CP15(c12, 0, c12, 7)
37#define ICC_BPR1 __ACCESS_CP15(c12, 0, c12, 3)
37 38
38#define ICC_HSRE __ACCESS_CP15(c12, 4, c9, 5) 39#define ICC_HSRE __ACCESS_CP15(c12, 4, c9, 5)
39 40
@@ -157,6 +158,11 @@ static inline void gic_write_sre(u32 val)
157 isb(); 158 isb();
158} 159}
159 160
161static inline void gic_write_bpr1(u32 val)
162{
163 asm volatile("mcr " __stringify(ICC_BPR1) : : "r" (val));
164}
165
160/* 166/*
161 * Even in 32bit systems that use LPAE, there is no guarantee that the I/O 167 * Even in 32bit systems that use LPAE, there is no guarantee that the I/O
162 * interface provides true 64bit atomic accesses, so using strd/ldrd doesn't 168 * interface provides true 64bit atomic accesses, so using strd/ldrd doesn't
diff --git a/arch/arm64/Kconfig.platforms b/arch/arm64/Kconfig.platforms
index 4a4318bc219a..a8c77c72a831 100644
--- a/arch/arm64/Kconfig.platforms
+++ b/arch/arm64/Kconfig.platforms
@@ -93,6 +93,7 @@ config ARCH_MVEBU
93 select ARMADA_CP110_SYSCON 93 select ARMADA_CP110_SYSCON
94 select ARMADA_37XX_CLK 94 select ARMADA_37XX_CLK
95 select MVEBU_ODMI 95 select MVEBU_ODMI
96 select MVEBU_PIC
96 help 97 help
97 This enables support for Marvell EBU familly, including: 98 This enables support for Marvell EBU familly, including:
98 - Armada 3700 SoC Family 99 - Armada 3700 SoC Family
diff --git a/arch/arm64/include/asm/arch_gicv3.h b/arch/arm64/include/asm/arch_gicv3.h
index 8ec88e5b290f..fc2a0cb47b2c 100644
--- a/arch/arm64/include/asm/arch_gicv3.h
+++ b/arch/arm64/include/asm/arch_gicv3.h
@@ -28,6 +28,7 @@
28#define ICC_CTLR_EL1 sys_reg(3, 0, 12, 12, 4) 28#define ICC_CTLR_EL1 sys_reg(3, 0, 12, 12, 4)
29#define ICC_SRE_EL1 sys_reg(3, 0, 12, 12, 5) 29#define ICC_SRE_EL1 sys_reg(3, 0, 12, 12, 5)
30#define ICC_GRPEN1_EL1 sys_reg(3, 0, 12, 12, 7) 30#define ICC_GRPEN1_EL1 sys_reg(3, 0, 12, 12, 7)
31#define ICC_BPR1_EL1 sys_reg(3, 0, 12, 12, 3)
31 32
32#define ICC_SRE_EL2 sys_reg(3, 4, 12, 9, 5) 33#define ICC_SRE_EL2 sys_reg(3, 4, 12, 9, 5)
33 34
@@ -165,6 +166,11 @@ static inline void gic_write_sre(u32 val)
165 isb(); 166 isb();
166} 167}
167 168
169static inline void gic_write_bpr1(u32 val)
170{
171 asm volatile("msr_s " __stringify(ICC_BPR1_EL1) ", %0" : : "r" (val));
172}
173
168#define gic_read_typer(c) readq_relaxed(c) 174#define gic_read_typer(c) readq_relaxed(c)
169#define gic_write_irouter(v, c) writeq_relaxed(v, c) 175#define gic_write_irouter(v, c) writeq_relaxed(v, c)
170 176
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig
index 696c6f74a9c7..33201d4de633 100644
--- a/drivers/acpi/Kconfig
+++ b/drivers/acpi/Kconfig
@@ -523,4 +523,8 @@ config ACPI_CONFIGFS
523 userspace. The configurable ACPI groups will be visible under 523 userspace. The configurable ACPI groups will be visible under
524 /config/acpi, assuming configfs is mounted under /config. 524 /config/acpi, assuming configfs is mounted under /config.
525 525
526if ARM64
527source "drivers/acpi/arm64/Kconfig"
528endif
529
526endif # ACPI 530endif # ACPI
diff --git a/drivers/acpi/Makefile b/drivers/acpi/Makefile
index 3a1fa8f03749..313f970888e4 100644
--- a/drivers/acpi/Makefile
+++ b/drivers/acpi/Makefile
@@ -106,3 +106,5 @@ obj-$(CONFIG_ACPI_CONFIGFS) += acpi_configfs.o
106 106
107video-objs += acpi_video.o video_detect.o 107video-objs += acpi_video.o video_detect.o
108obj-y += dptf/ 108obj-y += dptf/
109
110obj-$(CONFIG_ARM64) += arm64/
diff --git a/drivers/acpi/arm64/Kconfig b/drivers/acpi/arm64/Kconfig
new file mode 100644
index 000000000000..4616da4c15be
--- /dev/null
+++ b/drivers/acpi/arm64/Kconfig
@@ -0,0 +1,6 @@
1#
2# ACPI Configuration for ARM64
3#
4
5config ACPI_IORT
6 bool
diff --git a/drivers/acpi/arm64/Makefile b/drivers/acpi/arm64/Makefile
new file mode 100644
index 000000000000..72331f2ce0e9
--- /dev/null
+++ b/drivers/acpi/arm64/Makefile
@@ -0,0 +1 @@
obj-$(CONFIG_ACPI_IORT) += iort.o
diff --git a/drivers/acpi/arm64/iort.c b/drivers/acpi/arm64/iort.c
new file mode 100644
index 000000000000..6b81746cd13c
--- /dev/null
+++ b/drivers/acpi/arm64/iort.c
@@ -0,0 +1,368 @@
1/*
2 * Copyright (C) 2016, Semihalf
3 * Author: Tomasz Nowicki <tn@semihalf.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * This file implements early detection/parsing of I/O mapping
15 * reported to OS through firmware via I/O Remapping Table (IORT)
16 * IORT document number: ARM DEN 0049A
17 */
18
19#define pr_fmt(fmt) "ACPI: IORT: " fmt
20
21#include <linux/acpi_iort.h>
22#include <linux/kernel.h>
23#include <linux/pci.h>
24
25struct iort_its_msi_chip {
26 struct list_head list;
27 struct fwnode_handle *fw_node;
28 u32 translation_id;
29};
30
31typedef acpi_status (*iort_find_node_callback)
32 (struct acpi_iort_node *node, void *context);
33
34/* Root pointer to the mapped IORT table */
35static struct acpi_table_header *iort_table;
36
37static LIST_HEAD(iort_msi_chip_list);
38static DEFINE_SPINLOCK(iort_msi_chip_lock);
39
40/**
41 * iort_register_domain_token() - register domain token and related ITS ID
42 * to the list from where we can get it back later on.
43 * @trans_id: ITS ID.
44 * @fw_node: Domain token.
45 *
46 * Returns: 0 on success, -ENOMEM if no memory when allocating list element
47 */
48int iort_register_domain_token(int trans_id, struct fwnode_handle *fw_node)
49{
50 struct iort_its_msi_chip *its_msi_chip;
51
52 its_msi_chip = kzalloc(sizeof(*its_msi_chip), GFP_KERNEL);
53 if (!its_msi_chip)
54 return -ENOMEM;
55
56 its_msi_chip->fw_node = fw_node;
57 its_msi_chip->translation_id = trans_id;
58
59 spin_lock(&iort_msi_chip_lock);
60 list_add(&its_msi_chip->list, &iort_msi_chip_list);
61 spin_unlock(&iort_msi_chip_lock);
62
63 return 0;
64}
65
66/**
67 * iort_deregister_domain_token() - Deregister domain token based on ITS ID
68 * @trans_id: ITS ID.
69 *
70 * Returns: none.
71 */
72void iort_deregister_domain_token(int trans_id)
73{
74 struct iort_its_msi_chip *its_msi_chip, *t;
75
76 spin_lock(&iort_msi_chip_lock);
77 list_for_each_entry_safe(its_msi_chip, t, &iort_msi_chip_list, list) {
78 if (its_msi_chip->translation_id == trans_id) {
79 list_del(&its_msi_chip->list);
80 kfree(its_msi_chip);
81 break;
82 }
83 }
84 spin_unlock(&iort_msi_chip_lock);
85}
86
87/**
88 * iort_find_domain_token() - Find domain token based on given ITS ID
89 * @trans_id: ITS ID.
90 *
91 * Returns: domain token when find on the list, NULL otherwise
92 */
93struct fwnode_handle *iort_find_domain_token(int trans_id)
94{
95 struct fwnode_handle *fw_node = NULL;
96 struct iort_its_msi_chip *its_msi_chip;
97
98 spin_lock(&iort_msi_chip_lock);
99 list_for_each_entry(its_msi_chip, &iort_msi_chip_list, list) {
100 if (its_msi_chip->translation_id == trans_id) {
101 fw_node = its_msi_chip->fw_node;
102 break;
103 }
104 }
105 spin_unlock(&iort_msi_chip_lock);
106
107 return fw_node;
108}
109
110static struct acpi_iort_node *iort_scan_node(enum acpi_iort_node_type type,
111 iort_find_node_callback callback,
112 void *context)
113{
114 struct acpi_iort_node *iort_node, *iort_end;
115 struct acpi_table_iort *iort;
116 int i;
117
118 if (!iort_table)
119 return NULL;
120
121 /* Get the first IORT node */
122 iort = (struct acpi_table_iort *)iort_table;
123 iort_node = ACPI_ADD_PTR(struct acpi_iort_node, iort,
124 iort->node_offset);
125 iort_end = ACPI_ADD_PTR(struct acpi_iort_node, iort_table,
126 iort_table->length);
127
128 for (i = 0; i < iort->node_count; i++) {
129 if (WARN_TAINT(iort_node >= iort_end, TAINT_FIRMWARE_WORKAROUND,
130 "IORT node pointer overflows, bad table!\n"))
131 return NULL;
132
133 if (iort_node->type == type &&
134 ACPI_SUCCESS(callback(iort_node, context)))
135 return iort_node;
136
137 iort_node = ACPI_ADD_PTR(struct acpi_iort_node, iort_node,
138 iort_node->length);
139 }
140
141 return NULL;
142}
143
144static acpi_status iort_match_node_callback(struct acpi_iort_node *node,
145 void *context)
146{
147 struct device *dev = context;
148 acpi_status status;
149
150 if (node->type == ACPI_IORT_NODE_NAMED_COMPONENT) {
151 struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL };
152 struct acpi_device *adev = to_acpi_device_node(dev->fwnode);
153 struct acpi_iort_named_component *ncomp;
154
155 if (!adev) {
156 status = AE_NOT_FOUND;
157 goto out;
158 }
159
160 status = acpi_get_name(adev->handle, ACPI_FULL_PATHNAME, &buf);
161 if (ACPI_FAILURE(status)) {
162 dev_warn(dev, "Can't get device full path name\n");
163 goto out;
164 }
165
166 ncomp = (struct acpi_iort_named_component *)node->node_data;
167 status = !strcmp(ncomp->device_name, buf.pointer) ?
168 AE_OK : AE_NOT_FOUND;
169 acpi_os_free(buf.pointer);
170 } else if (node->type == ACPI_IORT_NODE_PCI_ROOT_COMPLEX) {
171 struct acpi_iort_root_complex *pci_rc;
172 struct pci_bus *bus;
173
174 bus = to_pci_bus(dev);
175 pci_rc = (struct acpi_iort_root_complex *)node->node_data;
176
177 /*
178 * It is assumed that PCI segment numbers maps one-to-one
179 * with root complexes. Each segment number can represent only
180 * one root complex.
181 */
182 status = pci_rc->pci_segment_number == pci_domain_nr(bus) ?
183 AE_OK : AE_NOT_FOUND;
184 } else {
185 status = AE_NOT_FOUND;
186 }
187out:
188 return status;
189}
190
191static int iort_id_map(struct acpi_iort_id_mapping *map, u8 type, u32 rid_in,
192 u32 *rid_out)
193{
194 /* Single mapping does not care for input id */
195 if (map->flags & ACPI_IORT_ID_SINGLE_MAPPING) {
196 if (type == ACPI_IORT_NODE_NAMED_COMPONENT ||
197 type == ACPI_IORT_NODE_PCI_ROOT_COMPLEX) {
198 *rid_out = map->output_base;
199 return 0;
200 }
201
202 pr_warn(FW_BUG "[map %p] SINGLE MAPPING flag not allowed for node type %d, skipping ID map\n",
203 map, type);
204 return -ENXIO;
205 }
206
207 if (rid_in < map->input_base ||
208 (rid_in >= map->input_base + map->id_count))
209 return -ENXIO;
210
211 *rid_out = map->output_base + (rid_in - map->input_base);
212 return 0;
213}
214
215static struct acpi_iort_node *iort_node_map_rid(struct acpi_iort_node *node,
216 u32 rid_in, u32 *rid_out,
217 u8 type)
218{
219 u32 rid = rid_in;
220
221 /* Parse the ID mapping tree to find specified node type */
222 while (node) {
223 struct acpi_iort_id_mapping *map;
224 int i;
225
226 if (node->type == type) {
227 if (rid_out)
228 *rid_out = rid;
229 return node;
230 }
231
232 if (!node->mapping_offset || !node->mapping_count)
233 goto fail_map;
234
235 map = ACPI_ADD_PTR(struct acpi_iort_id_mapping, node,
236 node->mapping_offset);
237
238 /* Firmware bug! */
239 if (!map->output_reference) {
240 pr_err(FW_BUG "[node %p type %d] ID map has NULL parent reference\n",
241 node, node->type);
242 goto fail_map;
243 }
244
245 /* Do the RID translation */
246 for (i = 0; i < node->mapping_count; i++, map++) {
247 if (!iort_id_map(map, node->type, rid, &rid))
248 break;
249 }
250
251 if (i == node->mapping_count)
252 goto fail_map;
253
254 node = ACPI_ADD_PTR(struct acpi_iort_node, iort_table,
255 map->output_reference);
256 }
257
258fail_map:
259 /* Map input RID to output RID unchanged on mapping failure*/
260 if (rid_out)
261 *rid_out = rid_in;
262
263 return NULL;
264}
265
266static struct acpi_iort_node *iort_find_dev_node(struct device *dev)
267{
268 struct pci_bus *pbus;
269
270 if (!dev_is_pci(dev))
271 return iort_scan_node(ACPI_IORT_NODE_NAMED_COMPONENT,
272 iort_match_node_callback, dev);
273
274 /* Find a PCI root bus */
275 pbus = to_pci_dev(dev)->bus;
276 while (!pci_is_root_bus(pbus))
277 pbus = pbus->parent;
278
279 return iort_scan_node(ACPI_IORT_NODE_PCI_ROOT_COMPLEX,
280 iort_match_node_callback, &pbus->dev);
281}
282
283/**
284 * iort_msi_map_rid() - Map a MSI requester ID for a device
285 * @dev: The device for which the mapping is to be done.
286 * @req_id: The device requester ID.
287 *
288 * Returns: mapped MSI RID on success, input requester ID otherwise
289 */
290u32 iort_msi_map_rid(struct device *dev, u32 req_id)
291{
292 struct acpi_iort_node *node;
293 u32 dev_id;
294
295 node = iort_find_dev_node(dev);
296 if (!node)
297 return req_id;
298
299 iort_node_map_rid(node, req_id, &dev_id, ACPI_IORT_NODE_ITS_GROUP);
300 return dev_id;
301}
302
303/**
304 * iort_dev_find_its_id() - Find the ITS identifier for a device
305 * @dev: The device.
306 * @idx: Index of the ITS identifier list.
307 * @its_id: ITS identifier.
308 *
309 * Returns: 0 on success, appropriate error value otherwise
310 */
311static int iort_dev_find_its_id(struct device *dev, u32 req_id,
312 unsigned int idx, int *its_id)
313{
314 struct acpi_iort_its_group *its;
315 struct acpi_iort_node *node;
316
317 node = iort_find_dev_node(dev);
318 if (!node)
319 return -ENXIO;
320
321 node = iort_node_map_rid(node, req_id, NULL, ACPI_IORT_NODE_ITS_GROUP);
322 if (!node)
323 return -ENXIO;
324
325 /* Move to ITS specific data */
326 its = (struct acpi_iort_its_group *)node->node_data;
327 if (idx > its->its_count) {
328 dev_err(dev, "requested ITS ID index [%d] is greater than available [%d]\n",
329 idx, its->its_count);
330 return -ENXIO;
331 }
332
333 *its_id = its->identifiers[idx];
334 return 0;
335}
336
337/**
338 * iort_get_device_domain() - Find MSI domain related to a device
339 * @dev: The device.
340 * @req_id: Requester ID for the device.
341 *
342 * Returns: the MSI domain for this device, NULL otherwise
343 */
344struct irq_domain *iort_get_device_domain(struct device *dev, u32 req_id)
345{
346 struct fwnode_handle *handle;
347 int its_id;
348
349 if (iort_dev_find_its_id(dev, req_id, 0, &its_id))
350 return NULL;
351
352 handle = iort_find_domain_token(its_id);
353 if (!handle)
354 return NULL;
355
356 return irq_find_matching_fwnode(handle, DOMAIN_BUS_PCI_MSI);
357}
358
359void __init acpi_iort_init(void)
360{
361 acpi_status status;
362
363 status = acpi_get_table(ACPI_SIG_IORT, 0, &iort_table);
364 if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) {
365 const char *msg = acpi_format_exception(status);
366 pr_err("Failed to get table, %s\n", msg);
367 }
368}
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
index 8df0afad35a9..56190d00fd87 100644
--- a/drivers/acpi/bus.c
+++ b/drivers/acpi/bus.c
@@ -36,6 +36,7 @@
36#ifdef CONFIG_X86 36#ifdef CONFIG_X86
37#include <asm/mpspec.h> 37#include <asm/mpspec.h>
38#endif 38#endif
39#include <linux/acpi_iort.h>
39#include <linux/pci.h> 40#include <linux/pci.h>
40#include <acpi/apei.h> 41#include <acpi/apei.h>
41#include <linux/dmi.h> 42#include <linux/dmi.h>
@@ -1188,6 +1189,7 @@ static int __init acpi_init(void)
1188 } 1189 }
1189 1190
1190 pci_mmcfg_late_init(); 1191 pci_mmcfg_late_init();
1192 acpi_iort_init();
1191 acpi_scan_init(); 1193 acpi_scan_init();
1192 acpi_ec_init(); 1194 acpi_ec_init();
1193 acpi_debugfs_init(); 1195 acpi_debugfs_init();
diff --git a/drivers/base/platform-msi.c b/drivers/base/platform-msi.c
index 279e53989374..be6a599bc0c1 100644
--- a/drivers/base/platform-msi.c
+++ b/drivers/base/platform-msi.c
@@ -142,13 +142,12 @@ static int platform_msi_alloc_descs_with_irq(struct device *dev, int virq,
142 } 142 }
143 143
144 for (i = 0; i < nvec; i++) { 144 for (i = 0; i < nvec; i++) {
145 desc = alloc_msi_entry(dev); 145 desc = alloc_msi_entry(dev, 1, NULL);
146 if (!desc) 146 if (!desc)
147 break; 147 break;
148 148
149 desc->platform.msi_priv_data = data; 149 desc->platform.msi_priv_data = data;
150 desc->platform.msi_index = base + i; 150 desc->platform.msi_index = base + i;
151 desc->nvec_used = 1;
152 desc->irq = virq ? virq + i : 0; 151 desc->irq = virq ? virq + i : 0;
153 152
154 list_add_tail(&desc->list, dev_to_msi_list(dev)); 153 list_add_tail(&desc->list, dev_to_msi_list(dev));
diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig
index 7f8728984f44..82b0b5daf3f5 100644
--- a/drivers/irqchip/Kconfig
+++ b/drivers/irqchip/Kconfig
@@ -39,6 +39,7 @@ config ARM_GIC_V3_ITS
39 bool 39 bool
40 depends on PCI 40 depends on PCI
41 depends on PCI_MSI 41 depends on PCI_MSI
42 select ACPI_IORT if ACPI
42 43
43config ARM_NVIC 44config ARM_NVIC
44 bool 45 bool
@@ -156,6 +157,13 @@ config PIC32_EVIC
156 select GENERIC_IRQ_CHIP 157 select GENERIC_IRQ_CHIP
157 select IRQ_DOMAIN 158 select IRQ_DOMAIN
158 159
160config JCORE_AIC
161 bool "J-Core integrated AIC"
162 depends on OF && (SUPERH || COMPILE_TEST)
163 select IRQ_DOMAIN
164 help
165 Support for the J-Core integrated AIC.
166
159config RENESAS_INTC_IRQPIN 167config RENESAS_INTC_IRQPIN
160 bool 168 bool
161 select IRQ_DOMAIN 169 select IRQ_DOMAIN
@@ -251,6 +259,9 @@ config IRQ_MXS
251config MVEBU_ODMI 259config MVEBU_ODMI
252 bool 260 bool
253 261
262config MVEBU_PIC
263 bool
264
254config LS_SCFG_MSI 265config LS_SCFG_MSI
255 def_bool y if SOC_LS1021A || ARCH_LAYERSCAPE 266 def_bool y if SOC_LS1021A || ARCH_LAYERSCAPE
256 depends on PCI && PCI_MSI 267 depends on PCI && PCI_MSI
@@ -264,3 +275,7 @@ config EZNPS_GIC
264 select IRQ_DOMAIN 275 select IRQ_DOMAIN
265 help 276 help
266 Support the EZchip NPS400 global interrupt controller 277 Support the EZchip NPS400 global interrupt controller
278
279config STM32_EXTI
280 bool
281 select IRQ_DOMAIN
diff --git a/drivers/irqchip/Makefile b/drivers/irqchip/Makefile
index 4c203b6b8163..b372e792adc2 100644
--- a/drivers/irqchip/Makefile
+++ b/drivers/irqchip/Makefile
@@ -40,6 +40,7 @@ obj-$(CONFIG_I8259) += irq-i8259.o
40obj-$(CONFIG_IMGPDC_IRQ) += irq-imgpdc.o 40obj-$(CONFIG_IMGPDC_IRQ) += irq-imgpdc.o
41obj-$(CONFIG_IRQ_MIPS_CPU) += irq-mips-cpu.o 41obj-$(CONFIG_IRQ_MIPS_CPU) += irq-mips-cpu.o
42obj-$(CONFIG_SIRF_IRQ) += irq-sirfsoc.o 42obj-$(CONFIG_SIRF_IRQ) += irq-sirfsoc.o
43obj-$(CONFIG_JCORE_AIC) += irq-jcore-aic.o
43obj-$(CONFIG_RENESAS_INTC_IRQPIN) += irq-renesas-intc-irqpin.o 44obj-$(CONFIG_RENESAS_INTC_IRQPIN) += irq-renesas-intc-irqpin.o
44obj-$(CONFIG_RENESAS_IRQC) += irq-renesas-irqc.o 45obj-$(CONFIG_RENESAS_IRQC) += irq-renesas-irqc.o
45obj-$(CONFIG_VERSATILE_FPGA_IRQ) += irq-versatile-fpga.o 46obj-$(CONFIG_VERSATILE_FPGA_IRQ) += irq-versatile-fpga.o
@@ -68,6 +69,8 @@ obj-$(CONFIG_INGENIC_IRQ) += irq-ingenic.o
68obj-$(CONFIG_IMX_GPCV2) += irq-imx-gpcv2.o 69obj-$(CONFIG_IMX_GPCV2) += irq-imx-gpcv2.o
69obj-$(CONFIG_PIC32_EVIC) += irq-pic32-evic.o 70obj-$(CONFIG_PIC32_EVIC) += irq-pic32-evic.o
70obj-$(CONFIG_MVEBU_ODMI) += irq-mvebu-odmi.o 71obj-$(CONFIG_MVEBU_ODMI) += irq-mvebu-odmi.o
72obj-$(CONFIG_MVEBU_PIC) += irq-mvebu-pic.o
71obj-$(CONFIG_LS_SCFG_MSI) += irq-ls-scfg-msi.o 73obj-$(CONFIG_LS_SCFG_MSI) += irq-ls-scfg-msi.o
72obj-$(CONFIG_EZNPS_GIC) += irq-eznps.o 74obj-$(CONFIG_EZNPS_GIC) += irq-eznps.o
73obj-$(CONFIG_ARCH_ASPEED) += irq-aspeed-vic.o 75obj-$(CONFIG_ARCH_ASPEED) += irq-aspeed-vic.o
76obj-$(CONFIG_STM32_EXTI) += irq-stm32-exti.o
diff --git a/drivers/irqchip/irq-gic-pm.c b/drivers/irqchip/irq-gic-pm.c
index 4cbffba3ff13..ecafd295c31c 100644
--- a/drivers/irqchip/irq-gic-pm.c
+++ b/drivers/irqchip/irq-gic-pm.c
@@ -64,7 +64,6 @@ static int gic_runtime_suspend(struct device *dev)
64 64
65static int gic_get_clocks(struct device *dev, const struct gic_clk_data *data) 65static int gic_get_clocks(struct device *dev, const struct gic_clk_data *data)
66{ 66{
67 struct clk *clk;
68 unsigned int i; 67 unsigned int i;
69 int ret; 68 int ret;
70 69
@@ -76,28 +75,16 @@ static int gic_get_clocks(struct device *dev, const struct gic_clk_data *data)
76 return ret; 75 return ret;
77 76
78 for (i = 0; i < data->num_clocks; i++) { 77 for (i = 0; i < data->num_clocks; i++) {
79 clk = of_clk_get_by_name(dev->of_node, data->clocks[i]); 78 ret = of_pm_clk_add_clk(dev, data->clocks[i]);
80 if (IS_ERR(clk)) {
81 dev_err(dev, "failed to get clock %s\n",
82 data->clocks[i]);
83 ret = PTR_ERR(clk);
84 goto error;
85 }
86
87 ret = pm_clk_add_clk(dev, clk);
88 if (ret) { 79 if (ret) {
89 dev_err(dev, "failed to add clock at index %d\n", i); 80 dev_err(dev, "failed to add clock %s\n",
90 clk_put(clk); 81 data->clocks[i]);
91 goto error; 82 pm_clk_destroy(dev);
83 return ret;
92 } 84 }
93 } 85 }
94 86
95 return 0; 87 return 0;
96
97error:
98 pm_clk_destroy(dev);
99
100 return ret;
101} 88}
102 89
103static int gic_probe(struct platform_device *pdev) 90static int gic_probe(struct platform_device *pdev)
diff --git a/drivers/irqchip/irq-gic-v3-its-pci-msi.c b/drivers/irqchip/irq-gic-v3-its-pci-msi.c
index aee60ed025dc..aee1c60d7ab5 100644
--- a/drivers/irqchip/irq-gic-v3-its-pci-msi.c
+++ b/drivers/irqchip/irq-gic-v3-its-pci-msi.c
@@ -15,6 +15,7 @@
15 * along with this program. If not, see <http://www.gnu.org/licenses/>. 15 * along with this program. If not, see <http://www.gnu.org/licenses/>.
16 */ 16 */
17 17
18#include <linux/acpi_iort.h>
18#include <linux/msi.h> 19#include <linux/msi.h>
19#include <linux/of.h> 20#include <linux/of.h>
20#include <linux/of_irq.h> 21#include <linux/of_irq.h>
@@ -106,34 +107,91 @@ static struct of_device_id its_device_id[] = {
106 {}, 107 {},
107}; 108};
108 109
109static int __init its_pci_msi_init(void) 110static int __init its_pci_msi_init_one(struct fwnode_handle *handle,
111 const char *name)
110{ 112{
111 struct device_node *np;
112 struct irq_domain *parent; 113 struct irq_domain *parent;
113 114
115 parent = irq_find_matching_fwnode(handle, DOMAIN_BUS_NEXUS);
116 if (!parent || !msi_get_domain_info(parent)) {
117 pr_err("%s: Unable to locate ITS domain\n", name);
118 return -ENXIO;
119 }
120
121 if (!pci_msi_create_irq_domain(handle, &its_pci_msi_domain_info,
122 parent)) {
123 pr_err("%s: Unable to create PCI domain\n", name);
124 return -ENOMEM;
125 }
126
127 return 0;
128}
129
130static int __init its_pci_of_msi_init(void)
131{
132 struct device_node *np;
133
114 for (np = of_find_matching_node(NULL, its_device_id); np; 134 for (np = of_find_matching_node(NULL, its_device_id); np;
115 np = of_find_matching_node(np, its_device_id)) { 135 np = of_find_matching_node(np, its_device_id)) {
116 if (!of_property_read_bool(np, "msi-controller")) 136 if (!of_property_read_bool(np, "msi-controller"))
117 continue; 137 continue;
118 138
119 parent = irq_find_matching_host(np, DOMAIN_BUS_NEXUS); 139 if (its_pci_msi_init_one(of_node_to_fwnode(np), np->full_name))
120 if (!parent || !msi_get_domain_info(parent)) {
121 pr_err("%s: unable to locate ITS domain\n",
122 np->full_name);
123 continue;
124 }
125
126 if (!pci_msi_create_irq_domain(of_node_to_fwnode(np),
127 &its_pci_msi_domain_info,
128 parent)) {
129 pr_err("%s: unable to create PCI domain\n",
130 np->full_name);
131 continue; 140 continue;
132 }
133 141
134 pr_info("PCI/MSI: %s domain created\n", np->full_name); 142 pr_info("PCI/MSI: %s domain created\n", np->full_name);
135 } 143 }
136 144
137 return 0; 145 return 0;
138} 146}
147
148#ifdef CONFIG_ACPI
149
150static int __init
151its_pci_msi_parse_madt(struct acpi_subtable_header *header,
152 const unsigned long end)
153{
154 struct acpi_madt_generic_translator *its_entry;
155 struct fwnode_handle *dom_handle;
156 const char *node_name;
157 int err = -ENXIO;
158
159 its_entry = (struct acpi_madt_generic_translator *)header;
160 node_name = kasprintf(GFP_KERNEL, "ITS@0x%lx",
161 (long)its_entry->base_address);
162 dom_handle = iort_find_domain_token(its_entry->translation_id);
163 if (!dom_handle) {
164 pr_err("%s: Unable to locate ITS domain handle\n", node_name);
165 goto out;
166 }
167
168 err = its_pci_msi_init_one(dom_handle, node_name);
169 if (!err)
170 pr_info("PCI/MSI: %s domain created\n", node_name);
171
172out:
173 kfree(node_name);
174 return err;
175}
176
177static int __init its_pci_acpi_msi_init(void)
178{
179 acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_TRANSLATOR,
180 its_pci_msi_parse_madt, 0);
181 return 0;
182}
183#else
184static int __init its_pci_acpi_msi_init(void)
185{
186 return 0;
187}
188#endif
189
190static int __init its_pci_msi_init(void)
191{
192 its_pci_of_msi_init();
193 its_pci_acpi_msi_init();
194
195 return 0;
196}
139early_initcall(its_pci_msi_init); 197early_initcall(its_pci_msi_init);
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
index 36b9c28a5c91..35c851c14e49 100644
--- a/drivers/irqchip/irq-gic-v3-its.c
+++ b/drivers/irqchip/irq-gic-v3-its.c
@@ -15,10 +15,13 @@
15 * along with this program. If not, see <http://www.gnu.org/licenses/>. 15 * along with this program. If not, see <http://www.gnu.org/licenses/>.
16 */ 16 */
17 17
18#include <linux/acpi.h>
18#include <linux/bitmap.h> 19#include <linux/bitmap.h>
19#include <linux/cpu.h> 20#include <linux/cpu.h>
20#include <linux/delay.h> 21#include <linux/delay.h>
21#include <linux/interrupt.h> 22#include <linux/interrupt.h>
23#include <linux/irqdomain.h>
24#include <linux/acpi_iort.h>
22#include <linux/log2.h> 25#include <linux/log2.h>
23#include <linux/mm.h> 26#include <linux/mm.h>
24#include <linux/msi.h> 27#include <linux/msi.h>
@@ -75,7 +78,7 @@ struct its_node {
75 raw_spinlock_t lock; 78 raw_spinlock_t lock;
76 struct list_head entry; 79 struct list_head entry;
77 void __iomem *base; 80 void __iomem *base;
78 unsigned long phys_base; 81 phys_addr_t phys_base;
79 struct its_cmd_block *cmd_base; 82 struct its_cmd_block *cmd_base;
80 struct its_cmd_block *cmd_write; 83 struct its_cmd_block *cmd_write;
81 struct its_baser tables[GITS_BASER_NR_REGS]; 84 struct its_baser tables[GITS_BASER_NR_REGS];
@@ -115,6 +118,7 @@ struct its_device {
115static LIST_HEAD(its_nodes); 118static LIST_HEAD(its_nodes);
116static DEFINE_SPINLOCK(its_lock); 119static DEFINE_SPINLOCK(its_lock);
117static struct rdists *gic_rdists; 120static struct rdists *gic_rdists;
121static struct irq_domain *its_parent;
118 122
119#define gic_data_rdist() (raw_cpu_ptr(gic_rdists->rdist)) 123#define gic_data_rdist() (raw_cpu_ptr(gic_rdists->rdist))
120#define gic_data_rdist_rd_base() (gic_data_rdist()->rd_base) 124#define gic_data_rdist_rd_base() (gic_data_rdist()->rd_base)
@@ -1437,6 +1441,11 @@ static int its_irq_gic_domain_alloc(struct irq_domain *domain,
1437 fwspec.param[0] = GIC_IRQ_TYPE_LPI; 1441 fwspec.param[0] = GIC_IRQ_TYPE_LPI;
1438 fwspec.param[1] = hwirq; 1442 fwspec.param[1] = hwirq;
1439 fwspec.param[2] = IRQ_TYPE_EDGE_RISING; 1443 fwspec.param[2] = IRQ_TYPE_EDGE_RISING;
1444 } else if (is_fwnode_irqchip(domain->parent->fwnode)) {
1445 fwspec.fwnode = domain->parent->fwnode;
1446 fwspec.param_count = 2;
1447 fwspec.param[0] = hwirq;
1448 fwspec.param[1] = IRQ_TYPE_EDGE_RISING;
1440 } else { 1449 } else {
1441 return -EINVAL; 1450 return -EINVAL;
1442 } 1451 }
@@ -1614,44 +1623,59 @@ static void its_enable_quirks(struct its_node *its)
1614 gic_enable_quirks(iidr, its_quirks, its); 1623 gic_enable_quirks(iidr, its_quirks, its);
1615} 1624}
1616 1625
1617static int __init its_probe(struct device_node *node, 1626static int its_init_domain(struct fwnode_handle *handle, struct its_node *its)
1618 struct irq_domain *parent) 1627{
1628 struct irq_domain *inner_domain;
1629 struct msi_domain_info *info;
1630
1631 info = kzalloc(sizeof(*info), GFP_KERNEL);
1632 if (!info)
1633 return -ENOMEM;
1634
1635 inner_domain = irq_domain_create_tree(handle, &its_domain_ops, its);
1636 if (!inner_domain) {
1637 kfree(info);
1638 return -ENOMEM;
1639 }
1640
1641 inner_domain->parent = its_parent;
1642 inner_domain->bus_token = DOMAIN_BUS_NEXUS;
1643 info->ops = &its_msi_domain_ops;
1644 info->data = its;
1645 inner_domain->host_data = info;
1646
1647 return 0;
1648}
1649
1650static int __init its_probe_one(struct resource *res,
1651 struct fwnode_handle *handle, int numa_node)
1619{ 1652{
1620 struct resource res;
1621 struct its_node *its; 1653 struct its_node *its;
1622 void __iomem *its_base; 1654 void __iomem *its_base;
1623 struct irq_domain *inner_domain;
1624 u32 val; 1655 u32 val;
1625 u64 baser, tmp; 1656 u64 baser, tmp;
1626 int err; 1657 int err;
1627 1658
1628 err = of_address_to_resource(node, 0, &res); 1659 its_base = ioremap(res->start, resource_size(res));
1629 if (err) {
1630 pr_warn("%s: no regs?\n", node->full_name);
1631 return -ENXIO;
1632 }
1633
1634 its_base = ioremap(res.start, resource_size(&res));
1635 if (!its_base) { 1660 if (!its_base) {
1636 pr_warn("%s: unable to map registers\n", node->full_name); 1661 pr_warn("ITS@%pa: Unable to map ITS registers\n", &res->start);
1637 return -ENOMEM; 1662 return -ENOMEM;
1638 } 1663 }
1639 1664
1640 val = readl_relaxed(its_base + GITS_PIDR2) & GIC_PIDR2_ARCH_MASK; 1665 val = readl_relaxed(its_base + GITS_PIDR2) & GIC_PIDR2_ARCH_MASK;
1641 if (val != 0x30 && val != 0x40) { 1666 if (val != 0x30 && val != 0x40) {
1642 pr_warn("%s: no ITS detected, giving up\n", node->full_name); 1667 pr_warn("ITS@%pa: No ITS detected, giving up\n", &res->start);
1643 err = -ENODEV; 1668 err = -ENODEV;
1644 goto out_unmap; 1669 goto out_unmap;
1645 } 1670 }
1646 1671
1647 err = its_force_quiescent(its_base); 1672 err = its_force_quiescent(its_base);
1648 if (err) { 1673 if (err) {
1649 pr_warn("%s: failed to quiesce, giving up\n", 1674 pr_warn("ITS@%pa: Failed to quiesce, giving up\n", &res->start);
1650 node->full_name);
1651 goto out_unmap; 1675 goto out_unmap;
1652 } 1676 }
1653 1677
1654 pr_info("ITS: %s\n", node->full_name); 1678 pr_info("ITS %pR\n", res);
1655 1679
1656 its = kzalloc(sizeof(*its), GFP_KERNEL); 1680 its = kzalloc(sizeof(*its), GFP_KERNEL);
1657 if (!its) { 1681 if (!its) {
@@ -1663,9 +1687,9 @@ static int __init its_probe(struct device_node *node,
1663 INIT_LIST_HEAD(&its->entry); 1687 INIT_LIST_HEAD(&its->entry);
1664 INIT_LIST_HEAD(&its->its_device_list); 1688 INIT_LIST_HEAD(&its->its_device_list);
1665 its->base = its_base; 1689 its->base = its_base;
1666 its->phys_base = res.start; 1690 its->phys_base = res->start;
1667 its->ite_size = ((readl_relaxed(its_base + GITS_TYPER) >> 4) & 0xf) + 1; 1691 its->ite_size = ((readl_relaxed(its_base + GITS_TYPER) >> 4) & 0xf) + 1;
1668 its->numa_node = of_node_to_nid(node); 1692 its->numa_node = numa_node;
1669 1693
1670 its->cmd_base = kzalloc(ITS_CMD_QUEUE_SZ, GFP_KERNEL); 1694 its->cmd_base = kzalloc(ITS_CMD_QUEUE_SZ, GFP_KERNEL);
1671 if (!its->cmd_base) { 1695 if (!its->cmd_base) {
@@ -1712,28 +1736,9 @@ static int __init its_probe(struct device_node *node,
1712 writeq_relaxed(0, its->base + GITS_CWRITER); 1736 writeq_relaxed(0, its->base + GITS_CWRITER);
1713 writel_relaxed(GITS_CTLR_ENABLE, its->base + GITS_CTLR); 1737 writel_relaxed(GITS_CTLR_ENABLE, its->base + GITS_CTLR);
1714 1738
1715 if (of_property_read_bool(node, "msi-controller")) { 1739 err = its_init_domain(handle, its);
1716 struct msi_domain_info *info; 1740 if (err)
1717 1741 goto out_free_tables;
1718 info = kzalloc(sizeof(*info), GFP_KERNEL);
1719 if (!info) {
1720 err = -ENOMEM;
1721 goto out_free_tables;
1722 }
1723
1724 inner_domain = irq_domain_add_tree(node, &its_domain_ops, its);
1725 if (!inner_domain) {
1726 err = -ENOMEM;
1727 kfree(info);
1728 goto out_free_tables;
1729 }
1730
1731 inner_domain->parent = parent;
1732 inner_domain->bus_token = DOMAIN_BUS_NEXUS;
1733 info->ops = &its_msi_domain_ops;
1734 info->data = its;
1735 inner_domain->host_data = info;
1736 }
1737 1742
1738 spin_lock(&its_lock); 1743 spin_lock(&its_lock);
1739 list_add(&its->entry, &its_nodes); 1744 list_add(&its->entry, &its_nodes);
@@ -1749,7 +1754,7 @@ out_free_its:
1749 kfree(its); 1754 kfree(its);
1750out_unmap: 1755out_unmap:
1751 iounmap(its_base); 1756 iounmap(its_base);
1752 pr_err("ITS: failed probing %s (%d)\n", node->full_name, err); 1757 pr_err("ITS@%pa: failed probing (%d)\n", &res->start, err);
1753 return err; 1758 return err;
1754} 1759}
1755 1760
@@ -1777,16 +1782,92 @@ static struct of_device_id its_device_id[] = {
1777 {}, 1782 {},
1778}; 1783};
1779 1784
1780int __init its_init(struct device_node *node, struct rdists *rdists, 1785static int __init its_of_probe(struct device_node *node)
1781 struct irq_domain *parent_domain)
1782{ 1786{
1783 struct device_node *np; 1787 struct device_node *np;
1788 struct resource res;
1784 1789
1785 for (np = of_find_matching_node(node, its_device_id); np; 1790 for (np = of_find_matching_node(node, its_device_id); np;
1786 np = of_find_matching_node(np, its_device_id)) { 1791 np = of_find_matching_node(np, its_device_id)) {
1787 its_probe(np, parent_domain); 1792 if (!of_property_read_bool(np, "msi-controller")) {
1793 pr_warn("%s: no msi-controller property, ITS ignored\n",
1794 np->full_name);
1795 continue;
1796 }
1797
1798 if (of_address_to_resource(np, 0, &res)) {
1799 pr_warn("%s: no regs?\n", np->full_name);
1800 continue;
1801 }
1802
1803 its_probe_one(&res, &np->fwnode, of_node_to_nid(np));
1804 }
1805 return 0;
1806}
1807
1808#ifdef CONFIG_ACPI
1809
1810#define ACPI_GICV3_ITS_MEM_SIZE (SZ_128K)
1811
1812static int __init gic_acpi_parse_madt_its(struct acpi_subtable_header *header,
1813 const unsigned long end)
1814{
1815 struct acpi_madt_generic_translator *its_entry;
1816 struct fwnode_handle *dom_handle;
1817 struct resource res;
1818 int err;
1819
1820 its_entry = (struct acpi_madt_generic_translator *)header;
1821 memset(&res, 0, sizeof(res));
1822 res.start = its_entry->base_address;
1823 res.end = its_entry->base_address + ACPI_GICV3_ITS_MEM_SIZE - 1;
1824 res.flags = IORESOURCE_MEM;
1825
1826 dom_handle = irq_domain_alloc_fwnode((void *)its_entry->base_address);
1827 if (!dom_handle) {
1828 pr_err("ITS@%pa: Unable to allocate GICv3 ITS domain token\n",
1829 &res.start);
1830 return -ENOMEM;
1788 } 1831 }
1789 1832
1833 err = iort_register_domain_token(its_entry->translation_id, dom_handle);
1834 if (err) {
1835 pr_err("ITS@%pa: Unable to register GICv3 ITS domain token (ITS ID %d) to IORT\n",
1836 &res.start, its_entry->translation_id);
1837 goto dom_err;
1838 }
1839
1840 err = its_probe_one(&res, dom_handle, NUMA_NO_NODE);
1841 if (!err)
1842 return 0;
1843
1844 iort_deregister_domain_token(its_entry->translation_id);
1845dom_err:
1846 irq_domain_free_fwnode(dom_handle);
1847 return err;
1848}
1849
1850static void __init its_acpi_probe(void)
1851{
1852 acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_TRANSLATOR,
1853 gic_acpi_parse_madt_its, 0);
1854}
1855#else
1856static void __init its_acpi_probe(void) { }
1857#endif
1858
1859int __init its_init(struct fwnode_handle *handle, struct rdists *rdists,
1860 struct irq_domain *parent_domain)
1861{
1862 struct device_node *of_node;
1863
1864 its_parent = parent_domain;
1865 of_node = to_of_node(handle);
1866 if (of_node)
1867 its_of_probe(of_node);
1868 else
1869 its_acpi_probe();
1870
1790 if (list_empty(&its_nodes)) { 1871 if (list_empty(&its_nodes)) {
1791 pr_warn("ITS: No ITS available, not enabling LPIs\n"); 1872 pr_warn("ITS: No ITS available, not enabling LPIs\n");
1792 return -ENXIO; 1873 return -ENXIO;
diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c
index da6c0ba61d4f..9b81bd8b929c 100644
--- a/drivers/irqchip/irq-gic-v3.c
+++ b/drivers/irqchip/irq-gic-v3.c
@@ -495,6 +495,14 @@ static void gic_cpu_sys_reg_init(void)
495 /* Set priority mask register */ 495 /* Set priority mask register */
496 gic_write_pmr(DEFAULT_PMR_VALUE); 496 gic_write_pmr(DEFAULT_PMR_VALUE);
497 497
498 /*
499 * Some firmwares hand over to the kernel with the BPR changed from
500 * its reset value (and with a value large enough to prevent
501 * any pre-emptive interrupts from working at all). Writing a zero
502 * to BPR restores is reset value.
503 */
504 gic_write_bpr1(0);
505
498 if (static_key_true(&supports_deactivate)) { 506 if (static_key_true(&supports_deactivate)) {
499 /* EOI drops priority only (mode 1) */ 507 /* EOI drops priority only (mode 1) */
500 gic_write_ctlr(ICC_CTLR_EL1_EOImode_drop); 508 gic_write_ctlr(ICC_CTLR_EL1_EOImode_drop);
@@ -911,7 +919,6 @@ static int __init gic_init_bases(void __iomem *dist_base,
911 u64 redist_stride, 919 u64 redist_stride,
912 struct fwnode_handle *handle) 920 struct fwnode_handle *handle)
913{ 921{
914 struct device_node *node;
915 u32 typer; 922 u32 typer;
916 int gic_irqs; 923 int gic_irqs;
917 int err; 924 int err;
@@ -952,10 +959,8 @@ static int __init gic_init_bases(void __iomem *dist_base,
952 959
953 set_handle_irq(gic_handle_irq); 960 set_handle_irq(gic_handle_irq);
954 961
955 node = to_of_node(handle); 962 if (IS_ENABLED(CONFIG_ARM_GIC_V3_ITS) && gic_dist_supports_lpis())
956 if (IS_ENABLED(CONFIG_ARM_GIC_V3_ITS) && gic_dist_supports_lpis() && 963 its_init(handle, &gic_data.rdists, gic_data.domain);
957 node) /* Temp hack to prevent ITS init for ACPI */
958 its_init(node, &gic_data.rdists, gic_data.domain);
959 964
960 gic_smp_init(); 965 gic_smp_init();
961 gic_dist_init(); 966 gic_dist_init();
diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c
index 390fac59c6bc..58e5b4e87056 100644
--- a/drivers/irqchip/irq-gic.c
+++ b/drivers/irqchip/irq-gic.c
@@ -91,7 +91,27 @@ struct gic_chip_data {
91#endif 91#endif
92}; 92};
93 93
94static DEFINE_RAW_SPINLOCK(irq_controller_lock); 94#ifdef CONFIG_BL_SWITCHER
95
96static DEFINE_RAW_SPINLOCK(cpu_map_lock);
97
98#define gic_lock_irqsave(f) \
99 raw_spin_lock_irqsave(&cpu_map_lock, (f))
100#define gic_unlock_irqrestore(f) \
101 raw_spin_unlock_irqrestore(&cpu_map_lock, (f))
102
103#define gic_lock() raw_spin_lock(&cpu_map_lock)
104#define gic_unlock() raw_spin_unlock(&cpu_map_lock)
105
106#else
107
108#define gic_lock_irqsave(f) do { (void)(f); } while(0)
109#define gic_unlock_irqrestore(f) do { (void)(f); } while(0)
110
111#define gic_lock() do { } while(0)
112#define gic_unlock() do { } while(0)
113
114#endif
95 115
96/* 116/*
97 * The GIC mapping of CPU interfaces does not necessarily match 117 * The GIC mapping of CPU interfaces does not necessarily match
@@ -317,12 +337,12 @@ static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
317 if (cpu >= NR_GIC_CPU_IF || cpu >= nr_cpu_ids) 337 if (cpu >= NR_GIC_CPU_IF || cpu >= nr_cpu_ids)
318 return -EINVAL; 338 return -EINVAL;
319 339
320 raw_spin_lock_irqsave(&irq_controller_lock, flags); 340 gic_lock_irqsave(flags);
321 mask = 0xff << shift; 341 mask = 0xff << shift;
322 bit = gic_cpu_map[cpu] << shift; 342 bit = gic_cpu_map[cpu] << shift;
323 val = readl_relaxed(reg) & ~mask; 343 val = readl_relaxed(reg) & ~mask;
324 writel_relaxed(val | bit, reg); 344 writel_relaxed(val | bit, reg);
325 raw_spin_unlock_irqrestore(&irq_controller_lock, flags); 345 gic_unlock_irqrestore(flags);
326 346
327 return IRQ_SET_MASK_OK_DONE; 347 return IRQ_SET_MASK_OK_DONE;
328} 348}
@@ -374,9 +394,7 @@ static void gic_handle_cascade_irq(struct irq_desc *desc)
374 394
375 chained_irq_enter(chip, desc); 395 chained_irq_enter(chip, desc);
376 396
377 raw_spin_lock(&irq_controller_lock);
378 status = readl_relaxed(gic_data_cpu_base(chip_data) + GIC_CPU_INTACK); 397 status = readl_relaxed(gic_data_cpu_base(chip_data) + GIC_CPU_INTACK);
379 raw_spin_unlock(&irq_controller_lock);
380 398
381 gic_irq = (status & GICC_IAR_INT_ID_MASK); 399 gic_irq = (status & GICC_IAR_INT_ID_MASK);
382 if (gic_irq == GICC_INT_SPURIOUS) 400 if (gic_irq == GICC_INT_SPURIOUS)
@@ -776,7 +794,7 @@ static void gic_raise_softirq(const struct cpumask *mask, unsigned int irq)
776 return; 794 return;
777 } 795 }
778 796
779 raw_spin_lock_irqsave(&irq_controller_lock, flags); 797 gic_lock_irqsave(flags);
780 798
781 /* Convert our logical CPU mask into a physical one. */ 799 /* Convert our logical CPU mask into a physical one. */
782 for_each_cpu(cpu, mask) 800 for_each_cpu(cpu, mask)
@@ -791,7 +809,7 @@ static void gic_raise_softirq(const struct cpumask *mask, unsigned int irq)
791 /* this always happens on GIC0 */ 809 /* this always happens on GIC0 */
792 writel_relaxed(map << 16 | irq, gic_data_dist_base(&gic_data[0]) + GIC_DIST_SOFTINT); 810 writel_relaxed(map << 16 | irq, gic_data_dist_base(&gic_data[0]) + GIC_DIST_SOFTINT);
793 811
794 raw_spin_unlock_irqrestore(&irq_controller_lock, flags); 812 gic_unlock_irqrestore(flags);
795} 813}
796#endif 814#endif
797 815
@@ -859,7 +877,7 @@ void gic_migrate_target(unsigned int new_cpu_id)
859 cur_target_mask = 0x01010101 << cur_cpu_id; 877 cur_target_mask = 0x01010101 << cur_cpu_id;
860 ror_val = (cur_cpu_id - new_cpu_id) & 31; 878 ror_val = (cur_cpu_id - new_cpu_id) & 31;
861 879
862 raw_spin_lock(&irq_controller_lock); 880 gic_lock();
863 881
864 /* Update the target interface for this logical CPU */ 882 /* Update the target interface for this logical CPU */
865 gic_cpu_map[cpu] = 1 << new_cpu_id; 883 gic_cpu_map[cpu] = 1 << new_cpu_id;
@@ -879,7 +897,7 @@ void gic_migrate_target(unsigned int new_cpu_id)
879 } 897 }
880 } 898 }
881 899
882 raw_spin_unlock(&irq_controller_lock); 900 gic_unlock();
883 901
884 /* 902 /*
885 * Now let's migrate and clear any potential SGIs that might be 903 * Now let's migrate and clear any potential SGIs that might be
@@ -921,7 +939,7 @@ unsigned long gic_get_sgir_physaddr(void)
921 return gic_dist_physaddr + GIC_DIST_SOFTINT; 939 return gic_dist_physaddr + GIC_DIST_SOFTINT;
922} 940}
923 941
924void __init gic_init_physaddr(struct device_node *node) 942static void __init gic_init_physaddr(struct device_node *node)
925{ 943{
926 struct resource res; 944 struct resource res;
927 if (of_address_to_resource(node, 0, &res) == 0) { 945 if (of_address_to_resource(node, 0, &res) == 0) {
diff --git a/drivers/irqchip/irq-jcore-aic.c b/drivers/irqchip/irq-jcore-aic.c
new file mode 100644
index 000000000000..84b01dec277d
--- /dev/null
+++ b/drivers/irqchip/irq-jcore-aic.c
@@ -0,0 +1,95 @@
1/*
2 * J-Core SoC AIC driver
3 *
4 * Copyright (C) 2015-2016 Smart Energy Instruments, Inc.
5 *
6 * This file is subject to the terms and conditions of the GNU General Public
7 * License. See the file "COPYING" in the main directory of this archive
8 * for more details.
9 */
10
11#include <linux/irq.h>
12#include <linux/io.h>
13#include <linux/irqchip.h>
14#include <linux/irqdomain.h>
15#include <linux/cpu.h>
16#include <linux/of.h>
17#include <linux/of_address.h>
18#include <linux/of_irq.h>
19
20#define JCORE_AIC_MAX_HWIRQ 127
21#define JCORE_AIC1_MIN_HWIRQ 16
22#define JCORE_AIC2_MIN_HWIRQ 64
23
24#define JCORE_AIC1_INTPRI_REG 8
25
26static struct irq_chip jcore_aic;
27
28static int jcore_aic_irqdomain_map(struct irq_domain *d, unsigned int irq,
29 irq_hw_number_t hwirq)
30{
31 struct irq_chip *aic = d->host_data;
32
33 irq_set_chip_and_handler(irq, aic, handle_simple_irq);
34
35 return 0;
36}
37
38static const struct irq_domain_ops jcore_aic_irqdomain_ops = {
39 .map = jcore_aic_irqdomain_map,
40 .xlate = irq_domain_xlate_onecell,
41};
42
43static void noop(struct irq_data *data)
44{
45}
46
47static int __init aic_irq_of_init(struct device_node *node,
48 struct device_node *parent)
49{
50 unsigned min_irq = JCORE_AIC2_MIN_HWIRQ;
51 unsigned dom_sz = JCORE_AIC_MAX_HWIRQ+1;
52 struct irq_domain *domain;
53
54 pr_info("Initializing J-Core AIC\n");
55
56 /* AIC1 needs priority initialization to receive interrupts. */
57 if (of_device_is_compatible(node, "jcore,aic1")) {
58 unsigned cpu;
59
60 for_each_present_cpu(cpu) {
61 void __iomem *base = of_iomap(node, cpu);
62
63 if (!base) {
64 pr_err("Unable to map AIC for cpu %u\n", cpu);
65 return -ENOMEM;
66 }
67 __raw_writel(0xffffffff, base + JCORE_AIC1_INTPRI_REG);
68 iounmap(base);
69 }
70 min_irq = JCORE_AIC1_MIN_HWIRQ;
71 }
72
73 /*
74 * The irq chip framework requires either mask/unmask or enable/disable
75 * function pointers to be provided, but the hardware does not have any
76 * such mechanism; the only interrupt masking is at the cpu level and
77 * it affects all interrupts. We provide dummy mask/unmask. The hardware
78 * handles all interrupt control and clears pending status when the cpu
79 * accepts the interrupt.
80 */
81 jcore_aic.irq_mask = noop;
82 jcore_aic.irq_unmask = noop;
83 jcore_aic.name = "AIC";
84
85 domain = irq_domain_add_linear(node, dom_sz, &jcore_aic_irqdomain_ops,
86 &jcore_aic);
87 if (!domain)
88 return -ENOMEM;
89 irq_create_strict_mappings(domain, min_irq, min_irq, dom_sz - min_irq);
90
91 return 0;
92}
93
94IRQCHIP_DECLARE(jcore_aic2, "jcore,aic2", aic_irq_of_init);
95IRQCHIP_DECLARE(jcore_aic1, "jcore,aic1", aic_irq_of_init);
diff --git a/drivers/irqchip/irq-keystone.c b/drivers/irqchip/irq-keystone.c
index deb89d63a728..54a5e870a8f5 100644
--- a/drivers/irqchip/irq-keystone.c
+++ b/drivers/irqchip/irq-keystone.c
@@ -109,7 +109,7 @@ static void keystone_irq_handler(struct irq_desc *desc)
109 dev_dbg(kirq->dev, "dispatch bit %d, virq %d\n", 109 dev_dbg(kirq->dev, "dispatch bit %d, virq %d\n",
110 src, virq); 110 src, virq);
111 if (!virq) 111 if (!virq)
112 dev_warn(kirq->dev, "sporious irq detected hwirq %d, virq %d\n", 112 dev_warn(kirq->dev, "spurious irq detected hwirq %d, virq %d\n",
113 src, virq); 113 src, virq);
114 generic_handle_irq(virq); 114 generic_handle_irq(virq);
115 } 115 }
diff --git a/drivers/irqchip/irq-mips-gic.c b/drivers/irqchip/irq-mips-gic.c
index 6185696405d5..c0178a122940 100644
--- a/drivers/irqchip/irq-mips-gic.c
+++ b/drivers/irqchip/irq-mips-gic.c
@@ -371,18 +371,13 @@ static void gic_handle_shared_int(bool chained)
371 bitmap_and(pending, pending, intrmask, gic_shared_intrs); 371 bitmap_and(pending, pending, intrmask, gic_shared_intrs);
372 bitmap_and(pending, pending, pcpu_mask, gic_shared_intrs); 372 bitmap_and(pending, pending, pcpu_mask, gic_shared_intrs);
373 373
374 intr = find_first_bit(pending, gic_shared_intrs); 374 for_each_set_bit(intr, pending, gic_shared_intrs) {
375 while (intr != gic_shared_intrs) {
376 virq = irq_linear_revmap(gic_irq_domain, 375 virq = irq_linear_revmap(gic_irq_domain,
377 GIC_SHARED_TO_HWIRQ(intr)); 376 GIC_SHARED_TO_HWIRQ(intr));
378 if (chained) 377 if (chained)
379 generic_handle_irq(virq); 378 generic_handle_irq(virq);
380 else 379 else
381 do_IRQ(virq); 380 do_IRQ(virq);
382
383 /* go to next pending bit */
384 bitmap_clear(pending, intr, 1);
385 intr = find_first_bit(pending, gic_shared_intrs);
386 } 381 }
387} 382}
388 383
@@ -518,18 +513,13 @@ static void gic_handle_local_int(bool chained)
518 513
519 bitmap_and(&pending, &pending, &masked, GIC_NUM_LOCAL_INTRS); 514 bitmap_and(&pending, &pending, &masked, GIC_NUM_LOCAL_INTRS);
520 515
521 intr = find_first_bit(&pending, GIC_NUM_LOCAL_INTRS); 516 for_each_set_bit(intr, &pending, GIC_NUM_LOCAL_INTRS) {
522 while (intr != GIC_NUM_LOCAL_INTRS) {
523 virq = irq_linear_revmap(gic_irq_domain, 517 virq = irq_linear_revmap(gic_irq_domain,
524 GIC_LOCAL_TO_HWIRQ(intr)); 518 GIC_LOCAL_TO_HWIRQ(intr));
525 if (chained) 519 if (chained)
526 generic_handle_irq(virq); 520 generic_handle_irq(virq);
527 else 521 else
528 do_IRQ(virq); 522 do_IRQ(virq);
529
530 /* go to next pending bit */
531 bitmap_clear(&pending, intr, 1);
532 intr = find_first_bit(&pending, GIC_NUM_LOCAL_INTRS);
533 } 523 }
534} 524}
535 525
diff --git a/drivers/irqchip/irq-mvebu-pic.c b/drivers/irqchip/irq-mvebu-pic.c
new file mode 100644
index 000000000000..eec63951129a
--- /dev/null
+++ b/drivers/irqchip/irq-mvebu-pic.c
@@ -0,0 +1,197 @@
1/*
2 * Copyright (C) 2016 Marvell
3 *
4 * Yehuda Yitschak <yehuday@marvell.com>
5 * Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
6 *
7 * This file is licensed under the terms of the GNU General Public
8 * License version 2. This program is licensed "as is" without any
9 * warranty of any kind, whether express or implied.
10 */
11
12#include <linux/interrupt.h>
13#include <linux/io.h>
14#include <linux/irq.h>
15#include <linux/irqchip.h>
16#include <linux/irqchip/chained_irq.h>
17#include <linux/irqdomain.h>
18#include <linux/module.h>
19#include <linux/of_irq.h>
20#include <linux/platform_device.h>
21
22#define PIC_CAUSE 0x0
23#define PIC_MASK 0x4
24
25#define PIC_MAX_IRQS 32
26#define PIC_MAX_IRQ_MASK ((1UL << PIC_MAX_IRQS) - 1)
27
28struct mvebu_pic {
29 void __iomem *base;
30 u32 parent_irq;
31 struct irq_domain *domain;
32 struct irq_chip irq_chip;
33};
34
35static void mvebu_pic_reset(struct mvebu_pic *pic)
36{
37 /* ACK and mask all interrupts */
38 writel(0, pic->base + PIC_MASK);
39 writel(PIC_MAX_IRQ_MASK, pic->base + PIC_CAUSE);
40}
41
42static void mvebu_pic_eoi_irq(struct irq_data *d)
43{
44 struct mvebu_pic *pic = irq_data_get_irq_chip_data(d);
45
46 writel(1 << d->hwirq, pic->base + PIC_CAUSE);
47}
48
49static void mvebu_pic_mask_irq(struct irq_data *d)
50{
51 struct mvebu_pic *pic = irq_data_get_irq_chip_data(d);
52 u32 reg;
53
54 reg = readl(pic->base + PIC_MASK);
55 reg |= (1 << d->hwirq);
56 writel(reg, pic->base + PIC_MASK);
57}
58
59static void mvebu_pic_unmask_irq(struct irq_data *d)
60{
61 struct mvebu_pic *pic = irq_data_get_irq_chip_data(d);
62 u32 reg;
63
64 reg = readl(pic->base + PIC_MASK);
65 reg &= ~(1 << d->hwirq);
66 writel(reg, pic->base + PIC_MASK);
67}
68
69static int mvebu_pic_irq_map(struct irq_domain *domain, unsigned int virq,
70 irq_hw_number_t hwirq)
71{
72 struct mvebu_pic *pic = domain->host_data;
73
74 irq_set_percpu_devid(virq);
75 irq_set_chip_data(virq, pic);
76 irq_set_chip_and_handler(virq, &pic->irq_chip,
77 handle_percpu_devid_irq);
78 irq_set_status_flags(virq, IRQ_LEVEL);
79 irq_set_probe(virq);
80
81 return 0;
82}
83
84static const struct irq_domain_ops mvebu_pic_domain_ops = {
85 .map = mvebu_pic_irq_map,
86 .xlate = irq_domain_xlate_onecell,
87};
88
89static void mvebu_pic_handle_cascade_irq(struct irq_desc *desc)
90{
91 struct mvebu_pic *pic = irq_desc_get_handler_data(desc);
92 struct irq_chip *chip = irq_desc_get_chip(desc);
93 unsigned long irqmap, irqn;
94 unsigned int cascade_irq;
95
96 irqmap = readl_relaxed(pic->base + PIC_CAUSE);
97 chained_irq_enter(chip, desc);
98
99 for_each_set_bit(irqn, &irqmap, BITS_PER_LONG) {
100 cascade_irq = irq_find_mapping(pic->domain, irqn);
101 generic_handle_irq(cascade_irq);
102 }
103
104 chained_irq_exit(chip, desc);
105}
106
107static void mvebu_pic_enable_percpu_irq(void *data)
108{
109 struct mvebu_pic *pic = data;
110
111 mvebu_pic_reset(pic);
112 enable_percpu_irq(pic->parent_irq, IRQ_TYPE_NONE);
113}
114
115static void mvebu_pic_disable_percpu_irq(void *data)
116{
117 struct mvebu_pic *pic = data;
118
119 disable_percpu_irq(pic->parent_irq);
120}
121
122static int mvebu_pic_probe(struct platform_device *pdev)
123{
124 struct device_node *node = pdev->dev.of_node;
125 struct mvebu_pic *pic;
126 struct irq_chip *irq_chip;
127 struct resource *res;
128
129 pic = devm_kzalloc(&pdev->dev, sizeof(struct mvebu_pic), GFP_KERNEL);
130 if (!pic)
131 return -ENOMEM;
132
133 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
134 pic->base = devm_ioremap_resource(&pdev->dev, res);
135 if (IS_ERR(pic->base))
136 return PTR_ERR(pic->base);
137
138 irq_chip = &pic->irq_chip;
139 irq_chip->name = dev_name(&pdev->dev);
140 irq_chip->irq_mask = mvebu_pic_mask_irq;
141 irq_chip->irq_unmask = mvebu_pic_unmask_irq;
142 irq_chip->irq_eoi = mvebu_pic_eoi_irq;
143
144 pic->parent_irq = irq_of_parse_and_map(node, 0);
145 if (pic->parent_irq <= 0) {
146 dev_err(&pdev->dev, "Failed to parse parent interrupt\n");
147 return -EINVAL;
148 }
149
150 pic->domain = irq_domain_add_linear(node, PIC_MAX_IRQS,
151 &mvebu_pic_domain_ops, pic);
152 if (!pic->domain) {
153 dev_err(&pdev->dev, "Failed to allocate irq domain\n");
154 return -ENOMEM;
155 }
156
157 irq_set_chained_handler(pic->parent_irq, mvebu_pic_handle_cascade_irq);
158 irq_set_handler_data(pic->parent_irq, pic);
159
160 on_each_cpu(mvebu_pic_enable_percpu_irq, pic, 1);
161
162 platform_set_drvdata(pdev, pic);
163
164 return 0;
165}
166
167static int mvebu_pic_remove(struct platform_device *pdev)
168{
169 struct mvebu_pic *pic = platform_get_drvdata(pdev);
170
171 on_each_cpu(mvebu_pic_disable_percpu_irq, pic, 1);
172 irq_domain_remove(pic->domain);
173
174 return 0;
175}
176
177static const struct of_device_id mvebu_pic_of_match[] = {
178 { .compatible = "marvell,armada-8k-pic", },
179 {},
180};
181MODULE_DEVICE_TABLE(of, mvebu_pic_of_match);
182
183static struct platform_driver mvebu_pic_driver = {
184 .probe = mvebu_pic_probe,
185 .remove = mvebu_pic_remove,
186 .driver = {
187 .name = "mvebu-pic",
188 .of_match_table = mvebu_pic_of_match,
189 },
190};
191module_platform_driver(mvebu_pic_driver);
192
193MODULE_AUTHOR("Yehuda Yitschak <yehuday@marvell.com>");
194MODULE_AUTHOR("Thomas Petazzoni <thomas.petazzoni@free-electrons.com>");
195MODULE_LICENSE("GPL v2");
196MODULE_ALIAS("platform:mvebu_pic");
197
diff --git a/drivers/irqchip/irq-stm32-exti.c b/drivers/irqchip/irq-stm32-exti.c
new file mode 100644
index 000000000000..491568c95aa5
--- /dev/null
+++ b/drivers/irqchip/irq-stm32-exti.c
@@ -0,0 +1,201 @@
1/*
2 * Copyright (C) Maxime Coquelin 2015
3 * Author: Maxime Coquelin <mcoquelin.stm32@gmail.com>
4 * License terms: GNU General Public License (GPL), version 2
5 */
6
7#include <linux/bitops.h>
8#include <linux/interrupt.h>
9#include <linux/io.h>
10#include <linux/irq.h>
11#include <linux/irqchip.h>
12#include <linux/irqchip/chained_irq.h>
13#include <linux/irqdomain.h>
14#include <linux/of_address.h>
15#include <linux/of_irq.h>
16
17#define EXTI_IMR 0x0
18#define EXTI_EMR 0x4
19#define EXTI_RTSR 0x8
20#define EXTI_FTSR 0xc
21#define EXTI_SWIER 0x10
22#define EXTI_PR 0x14
23
24static void stm32_irq_handler(struct irq_desc *desc)
25{
26 struct irq_domain *domain = irq_desc_get_handler_data(desc);
27 struct irq_chip_generic *gc = domain->gc->gc[0];
28 struct irq_chip *chip = irq_desc_get_chip(desc);
29 unsigned long pending;
30 int n;
31
32 chained_irq_enter(chip, desc);
33
34 while ((pending = irq_reg_readl(gc, EXTI_PR))) {
35 for_each_set_bit(n, &pending, BITS_PER_LONG) {
36 generic_handle_irq(irq_find_mapping(domain, n));
37 irq_reg_writel(gc, BIT(n), EXTI_PR);
38 }
39 }
40
41 chained_irq_exit(chip, desc);
42}
43
44static int stm32_irq_set_type(struct irq_data *data, unsigned int type)
45{
46 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(data);
47 int pin = data->hwirq;
48 u32 rtsr, ftsr;
49
50 irq_gc_lock(gc);
51
52 rtsr = irq_reg_readl(gc, EXTI_RTSR);
53 ftsr = irq_reg_readl(gc, EXTI_FTSR);
54
55 switch (type) {
56 case IRQ_TYPE_EDGE_RISING:
57 rtsr |= BIT(pin);
58 ftsr &= ~BIT(pin);
59 break;
60 case IRQ_TYPE_EDGE_FALLING:
61 rtsr &= ~BIT(pin);
62 ftsr |= BIT(pin);
63 break;
64 case IRQ_TYPE_EDGE_BOTH:
65 rtsr |= BIT(pin);
66 ftsr |= BIT(pin);
67 break;
68 default:
69 irq_gc_unlock(gc);
70 return -EINVAL;
71 }
72
73 irq_reg_writel(gc, rtsr, EXTI_RTSR);
74 irq_reg_writel(gc, ftsr, EXTI_FTSR);
75
76 irq_gc_unlock(gc);
77
78 return 0;
79}
80
81static int stm32_irq_set_wake(struct irq_data *data, unsigned int on)
82{
83 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(data);
84 int pin = data->hwirq;
85 u32 emr;
86
87 irq_gc_lock(gc);
88
89 emr = irq_reg_readl(gc, EXTI_EMR);
90 if (on)
91 emr |= BIT(pin);
92 else
93 emr &= ~BIT(pin);
94 irq_reg_writel(gc, emr, EXTI_EMR);
95
96 irq_gc_unlock(gc);
97
98 return 0;
99}
100
101static int stm32_exti_alloc(struct irq_domain *d, unsigned int virq,
102 unsigned int nr_irqs, void *data)
103{
104 struct irq_chip_generic *gc = d->gc->gc[0];
105 struct irq_fwspec *fwspec = data;
106 irq_hw_number_t hwirq;
107
108 hwirq = fwspec->param[0];
109
110 irq_map_generic_chip(d, virq, hwirq);
111 irq_domain_set_info(d, virq, hwirq, &gc->chip_types->chip, gc,
112 handle_simple_irq, NULL, NULL);
113
114 return 0;
115}
116
117static void stm32_exti_free(struct irq_domain *d, unsigned int virq,
118 unsigned int nr_irqs)
119{
120 struct irq_data *data = irq_domain_get_irq_data(d, virq);
121
122 irq_domain_reset_irq_data(data);
123}
124
125struct irq_domain_ops irq_exti_domain_ops = {
126 .map = irq_map_generic_chip,
127 .xlate = irq_domain_xlate_onetwocell,
128 .alloc = stm32_exti_alloc,
129 .free = stm32_exti_free,
130};
131
132static int __init stm32_exti_init(struct device_node *node,
133 struct device_node *parent)
134{
135 unsigned int clr = IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN;
136 int nr_irqs, nr_exti, ret, i;
137 struct irq_chip_generic *gc;
138 struct irq_domain *domain;
139 void *base;
140
141 base = of_iomap(node, 0);
142 if (!base) {
143 pr_err("%s: Unable to map registers\n", node->full_name);
144 return -ENOMEM;
145 }
146
147 /* Determine number of irqs supported */
148 writel_relaxed(~0UL, base + EXTI_RTSR);
149 nr_exti = fls(readl_relaxed(base + EXTI_RTSR));
150 writel_relaxed(0, base + EXTI_RTSR);
151
152 pr_info("%s: %d External IRQs detected\n", node->full_name, nr_exti);
153
154 domain = irq_domain_add_linear(node, nr_exti,
155 &irq_exti_domain_ops, NULL);
156 if (!domain) {
157 pr_err("%s: Could not register interrupt domain.\n",
158 node->name);
159 ret = -ENOMEM;
160 goto out_unmap;
161 }
162
163 ret = irq_alloc_domain_generic_chips(domain, nr_exti, 1, "exti",
164 handle_edge_irq, clr, 0, 0);
165 if (ret) {
166 pr_err("%s: Could not allocate generic interrupt chip.\n",
167 node->full_name);
168 goto out_free_domain;
169 }
170
171 gc = domain->gc->gc[0];
172 gc->reg_base = base;
173 gc->chip_types->type = IRQ_TYPE_EDGE_BOTH;
174 gc->chip_types->chip.name = gc->chip_types[0].chip.name;
175 gc->chip_types->chip.irq_ack = irq_gc_ack_set_bit;
176 gc->chip_types->chip.irq_mask = irq_gc_mask_clr_bit;
177 gc->chip_types->chip.irq_unmask = irq_gc_mask_set_bit;
178 gc->chip_types->chip.irq_set_type = stm32_irq_set_type;
179 gc->chip_types->chip.irq_set_wake = stm32_irq_set_wake;
180 gc->chip_types->regs.ack = EXTI_PR;
181 gc->chip_types->regs.mask = EXTI_IMR;
182 gc->chip_types->handler = handle_edge_irq;
183
184 nr_irqs = of_irq_count(node);
185 for (i = 0; i < nr_irqs; i++) {
186 unsigned int irq = irq_of_parse_and_map(node, i);
187
188 irq_set_handler_data(irq, domain);
189 irq_set_chained_handler(irq, stm32_irq_handler);
190 }
191
192 return 0;
193
194out_free_domain:
195 irq_domain_remove(domain);
196out_unmap:
197 iounmap(base);
198 return ret;
199}
200
201IRQCHIP_DECLARE(stm32_exti, "st,stm32-exti", stm32_exti_init);
diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c
index 98f12223c734..bfdd0744b686 100644
--- a/drivers/pci/msi.c
+++ b/drivers/pci/msi.c
@@ -19,6 +19,7 @@
19#include <linux/smp.h> 19#include <linux/smp.h>
20#include <linux/errno.h> 20#include <linux/errno.h>
21#include <linux/io.h> 21#include <linux/io.h>
22#include <linux/acpi_iort.h>
22#include <linux/slab.h> 23#include <linux/slab.h>
23#include <linux/irqdomain.h> 24#include <linux/irqdomain.h>
24#include <linux/of_irq.h> 25#include <linux/of_irq.h>
@@ -549,15 +550,23 @@ error_attrs:
549 return ret; 550 return ret;
550} 551}
551 552
552static struct msi_desc *msi_setup_entry(struct pci_dev *dev, int nvec) 553static struct msi_desc *
554msi_setup_entry(struct pci_dev *dev, int nvec, bool affinity)
553{ 555{
554 u16 control; 556 struct cpumask *masks = NULL;
555 struct msi_desc *entry; 557 struct msi_desc *entry;
558 u16 control;
559
560 if (affinity) {
561 masks = irq_create_affinity_masks(dev->irq_affinity, nvec);
562 if (!masks)
563 pr_err("Unable to allocate affinity masks, ignoring\n");
564 }
556 565
557 /* MSI Entry Initialization */ 566 /* MSI Entry Initialization */
558 entry = alloc_msi_entry(&dev->dev); 567 entry = alloc_msi_entry(&dev->dev, nvec, masks);
559 if (!entry) 568 if (!entry)
560 return NULL; 569 goto out;
561 570
562 pci_read_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, &control); 571 pci_read_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, &control);
563 572
@@ -568,8 +577,6 @@ static struct msi_desc *msi_setup_entry(struct pci_dev *dev, int nvec)
568 entry->msi_attrib.default_irq = dev->irq; /* Save IOAPIC IRQ */ 577 entry->msi_attrib.default_irq = dev->irq; /* Save IOAPIC IRQ */
569 entry->msi_attrib.multi_cap = (control & PCI_MSI_FLAGS_QMASK) >> 1; 578 entry->msi_attrib.multi_cap = (control & PCI_MSI_FLAGS_QMASK) >> 1;
570 entry->msi_attrib.multiple = ilog2(__roundup_pow_of_two(nvec)); 579 entry->msi_attrib.multiple = ilog2(__roundup_pow_of_two(nvec));
571 entry->nvec_used = nvec;
572 entry->affinity = dev->irq_affinity;
573 580
574 if (control & PCI_MSI_FLAGS_64BIT) 581 if (control & PCI_MSI_FLAGS_64BIT)
575 entry->mask_pos = dev->msi_cap + PCI_MSI_MASK_64; 582 entry->mask_pos = dev->msi_cap + PCI_MSI_MASK_64;
@@ -580,6 +587,8 @@ static struct msi_desc *msi_setup_entry(struct pci_dev *dev, int nvec)
580 if (entry->msi_attrib.maskbit) 587 if (entry->msi_attrib.maskbit)
581 pci_read_config_dword(dev, entry->mask_pos, &entry->masked); 588 pci_read_config_dword(dev, entry->mask_pos, &entry->masked);
582 589
590out:
591 kfree(masks);
583 return entry; 592 return entry;
584} 593}
585 594
@@ -608,7 +617,7 @@ static int msi_verify_entries(struct pci_dev *dev)
608 * an error, and a positive return value indicates the number of interrupts 617 * an error, and a positive return value indicates the number of interrupts
609 * which could have been allocated. 618 * which could have been allocated.
610 */ 619 */
611static int msi_capability_init(struct pci_dev *dev, int nvec) 620static int msi_capability_init(struct pci_dev *dev, int nvec, bool affinity)
612{ 621{
613 struct msi_desc *entry; 622 struct msi_desc *entry;
614 int ret; 623 int ret;
@@ -616,7 +625,7 @@ static int msi_capability_init(struct pci_dev *dev, int nvec)
616 625
617 pci_msi_set_enable(dev, 0); /* Disable MSI during set up */ 626 pci_msi_set_enable(dev, 0); /* Disable MSI during set up */
618 627
619 entry = msi_setup_entry(dev, nvec); 628 entry = msi_setup_entry(dev, nvec, affinity);
620 if (!entry) 629 if (!entry)
621 return -ENOMEM; 630 return -ENOMEM;
622 631
@@ -679,28 +688,29 @@ static void __iomem *msix_map_region(struct pci_dev *dev, unsigned nr_entries)
679} 688}
680 689
681static int msix_setup_entries(struct pci_dev *dev, void __iomem *base, 690static int msix_setup_entries(struct pci_dev *dev, void __iomem *base,
682 struct msix_entry *entries, int nvec) 691 struct msix_entry *entries, int nvec,
692 bool affinity)
683{ 693{
684 const struct cpumask *mask = NULL; 694 struct cpumask *curmsk, *masks = NULL;
685 struct msi_desc *entry; 695 struct msi_desc *entry;
686 int cpu = -1, i; 696 int ret, i;
687 697
688 for (i = 0; i < nvec; i++) { 698 if (affinity) {
689 if (dev->irq_affinity) { 699 masks = irq_create_affinity_masks(dev->irq_affinity, nvec);
690 cpu = cpumask_next(cpu, dev->irq_affinity); 700 if (!masks)
691 if (cpu >= nr_cpu_ids) 701 pr_err("Unable to allocate affinity masks, ignoring\n");
692 cpu = cpumask_first(dev->irq_affinity); 702 }
693 mask = cpumask_of(cpu);
694 }
695 703
696 entry = alloc_msi_entry(&dev->dev); 704 for (i = 0, curmsk = masks; i < nvec; i++) {
705 entry = alloc_msi_entry(&dev->dev, 1, curmsk);
697 if (!entry) { 706 if (!entry) {
698 if (!i) 707 if (!i)
699 iounmap(base); 708 iounmap(base);
700 else 709 else
701 free_msi_irqs(dev); 710 free_msi_irqs(dev);
702 /* No enough memory. Don't try again */ 711 /* No enough memory. Don't try again */
703 return -ENOMEM; 712 ret = -ENOMEM;
713 goto out;
704 } 714 }
705 715
706 entry->msi_attrib.is_msix = 1; 716 entry->msi_attrib.is_msix = 1;
@@ -711,12 +721,14 @@ static int msix_setup_entries(struct pci_dev *dev, void __iomem *base,
711 entry->msi_attrib.entry_nr = i; 721 entry->msi_attrib.entry_nr = i;
712 entry->msi_attrib.default_irq = dev->irq; 722 entry->msi_attrib.default_irq = dev->irq;
713 entry->mask_base = base; 723 entry->mask_base = base;
714 entry->nvec_used = 1;
715 entry->affinity = mask;
716 724
717 list_add_tail(&entry->list, dev_to_msi_list(&dev->dev)); 725 list_add_tail(&entry->list, dev_to_msi_list(&dev->dev));
726 if (masks)
727 curmsk++;
718 } 728 }
719 729 ret = 0;
730out:
731 kfree(masks);
720 return 0; 732 return 0;
721} 733}
722 734
@@ -745,8 +757,8 @@ static void msix_program_entries(struct pci_dev *dev,
745 * single MSI-X irq. A return of zero indicates the successful setup of 757 * single MSI-X irq. A return of zero indicates the successful setup of
746 * requested MSI-X entries with allocated irqs or non-zero for otherwise. 758 * requested MSI-X entries with allocated irqs or non-zero for otherwise.
747 **/ 759 **/
748static int msix_capability_init(struct pci_dev *dev, 760static int msix_capability_init(struct pci_dev *dev, struct msix_entry *entries,
749 struct msix_entry *entries, int nvec) 761 int nvec, bool affinity)
750{ 762{
751 int ret; 763 int ret;
752 u16 control; 764 u16 control;
@@ -761,7 +773,7 @@ static int msix_capability_init(struct pci_dev *dev,
761 if (!base) 773 if (!base)
762 return -ENOMEM; 774 return -ENOMEM;
763 775
764 ret = msix_setup_entries(dev, base, entries, nvec); 776 ret = msix_setup_entries(dev, base, entries, nvec, affinity);
765 if (ret) 777 if (ret)
766 return ret; 778 return ret;
767 779
@@ -941,22 +953,8 @@ int pci_msix_vec_count(struct pci_dev *dev)
941} 953}
942EXPORT_SYMBOL(pci_msix_vec_count); 954EXPORT_SYMBOL(pci_msix_vec_count);
943 955
944/** 956static int __pci_enable_msix(struct pci_dev *dev, struct msix_entry *entries,
945 * pci_enable_msix - configure device's MSI-X capability structure 957 int nvec, bool affinity)
946 * @dev: pointer to the pci_dev data structure of MSI-X device function
947 * @entries: pointer to an array of MSI-X entries (optional)
948 * @nvec: number of MSI-X irqs requested for allocation by device driver
949 *
950 * Setup the MSI-X capability structure of device function with the number
951 * of requested irqs upon its software driver call to request for
952 * MSI-X mode enabled on its hardware device function. A return of zero
953 * indicates the successful configuration of MSI-X capability structure
954 * with new allocated MSI-X irqs. A return of < 0 indicates a failure.
955 * Or a return of > 0 indicates that driver request is exceeding the number
956 * of irqs or MSI-X vectors available. Driver should use the returned value to
957 * re-send its request.
958 **/
959int pci_enable_msix(struct pci_dev *dev, struct msix_entry *entries, int nvec)
960{ 958{
961 int nr_entries; 959 int nr_entries;
962 int i, j; 960 int i, j;
@@ -988,7 +986,27 @@ int pci_enable_msix(struct pci_dev *dev, struct msix_entry *entries, int nvec)
988 dev_info(&dev->dev, "can't enable MSI-X (MSI IRQ already assigned)\n"); 986 dev_info(&dev->dev, "can't enable MSI-X (MSI IRQ already assigned)\n");
989 return -EINVAL; 987 return -EINVAL;
990 } 988 }
991 return msix_capability_init(dev, entries, nvec); 989 return msix_capability_init(dev, entries, nvec, affinity);
990}
991
992/**
993 * pci_enable_msix - configure device's MSI-X capability structure
994 * @dev: pointer to the pci_dev data structure of MSI-X device function
995 * @entries: pointer to an array of MSI-X entries (optional)
996 * @nvec: number of MSI-X irqs requested for allocation by device driver
997 *
998 * Setup the MSI-X capability structure of device function with the number
999 * of requested irqs upon its software driver call to request for
1000 * MSI-X mode enabled on its hardware device function. A return of zero
1001 * indicates the successful configuration of MSI-X capability structure
1002 * with new allocated MSI-X irqs. A return of < 0 indicates a failure.
1003 * Or a return of > 0 indicates that driver request is exceeding the number
1004 * of irqs or MSI-X vectors available. Driver should use the returned value to
1005 * re-send its request.
1006 **/
1007int pci_enable_msix(struct pci_dev *dev, struct msix_entry *entries, int nvec)
1008{
1009 return __pci_enable_msix(dev, entries, nvec, false);
992} 1010}
993EXPORT_SYMBOL(pci_enable_msix); 1011EXPORT_SYMBOL(pci_enable_msix);
994 1012
@@ -1041,6 +1059,7 @@ EXPORT_SYMBOL(pci_msi_enabled);
1041static int __pci_enable_msi_range(struct pci_dev *dev, int minvec, int maxvec, 1059static int __pci_enable_msi_range(struct pci_dev *dev, int minvec, int maxvec,
1042 unsigned int flags) 1060 unsigned int flags)
1043{ 1061{
1062 bool affinity = flags & PCI_IRQ_AFFINITY;
1044 int nvec; 1063 int nvec;
1045 int rc; 1064 int rc;
1046 1065
@@ -1069,19 +1088,17 @@ static int __pci_enable_msi_range(struct pci_dev *dev, int minvec, int maxvec,
1069 nvec = maxvec; 1088 nvec = maxvec;
1070 1089
1071 for (;;) { 1090 for (;;) {
1072 if (flags & PCI_IRQ_AFFINITY) { 1091 if (affinity) {
1073 dev->irq_affinity = irq_create_affinity_mask(&nvec); 1092 nvec = irq_calc_affinity_vectors(dev->irq_affinity,
1093 nvec);
1074 if (nvec < minvec) 1094 if (nvec < minvec)
1075 return -ENOSPC; 1095 return -ENOSPC;
1076 } 1096 }
1077 1097
1078 rc = msi_capability_init(dev, nvec); 1098 rc = msi_capability_init(dev, nvec, affinity);
1079 if (rc == 0) 1099 if (rc == 0)
1080 return nvec; 1100 return nvec;
1081 1101
1082 kfree(dev->irq_affinity);
1083 dev->irq_affinity = NULL;
1084
1085 if (rc < 0) 1102 if (rc < 0)
1086 return rc; 1103 return rc;
1087 if (rc < minvec) 1104 if (rc < minvec)
@@ -1113,26 +1130,24 @@ static int __pci_enable_msix_range(struct pci_dev *dev,
1113 struct msix_entry *entries, int minvec, int maxvec, 1130 struct msix_entry *entries, int minvec, int maxvec,
1114 unsigned int flags) 1131 unsigned int flags)
1115{ 1132{
1116 int nvec = maxvec; 1133 bool affinity = flags & PCI_IRQ_AFFINITY;
1117 int rc; 1134 int rc, nvec = maxvec;
1118 1135
1119 if (maxvec < minvec) 1136 if (maxvec < minvec)
1120 return -ERANGE; 1137 return -ERANGE;
1121 1138
1122 for (;;) { 1139 for (;;) {
1123 if (flags & PCI_IRQ_AFFINITY) { 1140 if (affinity) {
1124 dev->irq_affinity = irq_create_affinity_mask(&nvec); 1141 nvec = irq_calc_affinity_vectors(dev->irq_affinity,
1142 nvec);
1125 if (nvec < minvec) 1143 if (nvec < minvec)
1126 return -ENOSPC; 1144 return -ENOSPC;
1127 } 1145 }
1128 1146
1129 rc = pci_enable_msix(dev, entries, nvec); 1147 rc = __pci_enable_msix(dev, entries, nvec, affinity);
1130 if (rc == 0) 1148 if (rc == 0)
1131 return nvec; 1149 return nvec;
1132 1150
1133 kfree(dev->irq_affinity);
1134 dev->irq_affinity = NULL;
1135
1136 if (rc < 0) 1151 if (rc < 0)
1137 return rc; 1152 return rc;
1138 if (rc < minvec) 1153 if (rc < minvec)
@@ -1256,6 +1271,37 @@ int pci_irq_vector(struct pci_dev *dev, unsigned int nr)
1256} 1271}
1257EXPORT_SYMBOL(pci_irq_vector); 1272EXPORT_SYMBOL(pci_irq_vector);
1258 1273
1274/**
1275 * pci_irq_get_affinity - return the affinity of a particular msi vector
1276 * @dev: PCI device to operate on
1277 * @nr: device-relative interrupt vector index (0-based).
1278 */
1279const struct cpumask *pci_irq_get_affinity(struct pci_dev *dev, int nr)
1280{
1281 if (dev->msix_enabled) {
1282 struct msi_desc *entry;
1283 int i = 0;
1284
1285 for_each_pci_msi_entry(entry, dev) {
1286 if (i == nr)
1287 return entry->affinity;
1288 i++;
1289 }
1290 WARN_ON_ONCE(1);
1291 return NULL;
1292 } else if (dev->msi_enabled) {
1293 struct msi_desc *entry = first_pci_msi_entry(dev);
1294
1295 if (WARN_ON_ONCE(!entry || nr >= entry->nvec_used))
1296 return NULL;
1297
1298 return &entry->affinity[nr];
1299 } else {
1300 return cpu_possible_mask;
1301 }
1302}
1303EXPORT_SYMBOL(pci_irq_get_affinity);
1304
1259struct pci_dev *msi_desc_to_pci_dev(struct msi_desc *desc) 1305struct pci_dev *msi_desc_to_pci_dev(struct msi_desc *desc)
1260{ 1306{
1261 return to_pci_dev(desc->dev); 1307 return to_pci_dev(desc->dev);
@@ -1502,8 +1548,8 @@ u32 pci_msi_domain_get_msi_rid(struct irq_domain *domain, struct pci_dev *pdev)
1502 pci_for_each_dma_alias(pdev, get_msi_id_cb, &rid); 1548 pci_for_each_dma_alias(pdev, get_msi_id_cb, &rid);
1503 1549
1504 of_node = irq_domain_get_of_node(domain); 1550 of_node = irq_domain_get_of_node(domain);
1505 if (of_node) 1551 rid = of_node ? of_msi_map_rid(&pdev->dev, of_node, rid) :
1506 rid = of_msi_map_rid(&pdev->dev, of_node, rid); 1552 iort_msi_map_rid(&pdev->dev, rid);
1507 1553
1508 return rid; 1554 return rid;
1509} 1555}
@@ -1519,9 +1565,13 @@ u32 pci_msi_domain_get_msi_rid(struct irq_domain *domain, struct pci_dev *pdev)
1519 */ 1565 */
1520struct irq_domain *pci_msi_get_device_domain(struct pci_dev *pdev) 1566struct irq_domain *pci_msi_get_device_domain(struct pci_dev *pdev)
1521{ 1567{
1568 struct irq_domain *dom;
1522 u32 rid = 0; 1569 u32 rid = 0;
1523 1570
1524 pci_for_each_dma_alias(pdev, get_msi_id_cb, &rid); 1571 pci_for_each_dma_alias(pdev, get_msi_id_cb, &rid);
1525 return of_msi_map_get_device_domain(&pdev->dev, rid); 1572 dom = of_msi_map_get_device_domain(&pdev->dev, rid);
1573 if (!dom)
1574 dom = iort_get_device_domain(&pdev->dev, rid);
1575 return dom;
1526} 1576}
1527#endif /* CONFIG_PCI_MSI_IRQ_DOMAIN */ 1577#endif /* CONFIG_PCI_MSI_IRQ_DOMAIN */
diff --git a/drivers/staging/fsl-mc/bus/mc-msi.c b/drivers/staging/fsl-mc/bus/mc-msi.c
index c7be156ae5e0..4fd8e41ef468 100644
--- a/drivers/staging/fsl-mc/bus/mc-msi.c
+++ b/drivers/staging/fsl-mc/bus/mc-msi.c
@@ -213,7 +213,7 @@ static int fsl_mc_msi_alloc_descs(struct device *dev, unsigned int irq_count)
213 struct msi_desc *msi_desc; 213 struct msi_desc *msi_desc;
214 214
215 for (i = 0; i < irq_count; i++) { 215 for (i = 0; i < irq_count; i++) {
216 msi_desc = alloc_msi_entry(dev); 216 msi_desc = alloc_msi_entry(dev, 1, NULL);
217 if (!msi_desc) { 217 if (!msi_desc) {
218 dev_err(dev, "Failed to allocate msi entry\n"); 218 dev_err(dev, "Failed to allocate msi entry\n");
219 error = -ENOMEM; 219 error = -ENOMEM;
@@ -221,7 +221,6 @@ static int fsl_mc_msi_alloc_descs(struct device *dev, unsigned int irq_count)
221 } 221 }
222 222
223 msi_desc->fsl_mc.msi_index = i; 223 msi_desc->fsl_mc.msi_index = i;
224 msi_desc->nvec_used = 1;
225 INIT_LIST_HEAD(&msi_desc->list); 224 INIT_LIST_HEAD(&msi_desc->list);
226 list_add_tail(&msi_desc->list, dev_to_msi_list(dev)); 225 list_add_tail(&msi_desc->list, dev_to_msi_list(dev));
227 } 226 }
diff --git a/include/linux/acpi_iort.h b/include/linux/acpi_iort.h
new file mode 100644
index 000000000000..0e32dac8fd03
--- /dev/null
+++ b/include/linux/acpi_iort.h
@@ -0,0 +1,42 @@
1/*
2 * Copyright (C) 2016, Semihalf
3 * Author: Tomasz Nowicki <tn@semihalf.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
16 * Place - Suite 330, Boston, MA 02111-1307 USA.
17 */
18
19#ifndef __ACPI_IORT_H__
20#define __ACPI_IORT_H__
21
22#include <linux/acpi.h>
23#include <linux/fwnode.h>
24#include <linux/irqdomain.h>
25
26int iort_register_domain_token(int trans_id, struct fwnode_handle *fw_node);
27void iort_deregister_domain_token(int trans_id);
28struct fwnode_handle *iort_find_domain_token(int trans_id);
29#ifdef CONFIG_ACPI_IORT
30void acpi_iort_init(void);
31u32 iort_msi_map_rid(struct device *dev, u32 req_id);
32struct irq_domain *iort_get_device_domain(struct device *dev, u32 req_id);
33#else
34static inline void acpi_iort_init(void) { }
35static inline u32 iort_msi_map_rid(struct device *dev, u32 req_id)
36{ return req_id; }
37static inline struct irq_domain *iort_get_device_domain(struct device *dev,
38 u32 req_id)
39{ return NULL; }
40#endif
41
42#endif /* __ACPI_IORT_H__ */
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
index b6683f0ffc9f..72f0721f75e7 100644
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
@@ -278,7 +278,8 @@ extern int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m);
278extern int 278extern int
279irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify); 279irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify);
280 280
281struct cpumask *irq_create_affinity_mask(unsigned int *nr_vecs); 281struct cpumask *irq_create_affinity_masks(const struct cpumask *affinity, int nvec);
282int irq_calc_affinity_vectors(const struct cpumask *affinity, int maxvec);
282 283
283#else /* CONFIG_SMP */ 284#else /* CONFIG_SMP */
284 285
@@ -311,11 +312,18 @@ irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify)
311 return 0; 312 return 0;
312} 313}
313 314
314static inline struct cpumask *irq_create_affinity_mask(unsigned int *nr_vecs) 315static inline struct cpumask *
316irq_create_affinity_masks(const struct cpumask *affinity, int nvec)
315{ 317{
316 *nr_vecs = 1;
317 return NULL; 318 return NULL;
318} 319}
320
321static inline int
322irq_calc_affinity_vectors(const struct cpumask *affinity, int maxvec)
323{
324 return maxvec;
325}
326
319#endif /* CONFIG_SMP */ 327#endif /* CONFIG_SMP */
320 328
321/* 329/*
diff --git a/include/linux/irq.h b/include/linux/irq.h
index 0ac26c892fe2..e79875574b39 100644
--- a/include/linux/irq.h
+++ b/include/linux/irq.h
@@ -916,12 +916,20 @@ void irq_remove_generic_chip(struct irq_chip_generic *gc, u32 msk,
916 unsigned int clr, unsigned int set); 916 unsigned int clr, unsigned int set);
917 917
918struct irq_chip_generic *irq_get_domain_generic_chip(struct irq_domain *d, unsigned int hw_irq); 918struct irq_chip_generic *irq_get_domain_generic_chip(struct irq_domain *d, unsigned int hw_irq);
919int irq_alloc_domain_generic_chips(struct irq_domain *d, int irqs_per_chip,
920 int num_ct, const char *name,
921 irq_flow_handler_t handler,
922 unsigned int clr, unsigned int set,
923 enum irq_gc_flags flags);
924 919
920int __irq_alloc_domain_generic_chips(struct irq_domain *d, int irqs_per_chip,
921 int num_ct, const char *name,
922 irq_flow_handler_t handler,
923 unsigned int clr, unsigned int set,
924 enum irq_gc_flags flags);
925
926#define irq_alloc_domain_generic_chips(d, irqs_per_chip, num_ct, name, \
927 handler, clr, set, flags) \
928({ \
929 MAYBE_BUILD_BUG_ON(irqs_per_chip > 32); \
930 __irq_alloc_domain_generic_chips(d, irqs_per_chip, num_ct, name,\
931 handler, clr, set, flags); \
932})
925 933
926static inline struct irq_chip_type *irq_data_get_chip_type(struct irq_data *d) 934static inline struct irq_chip_type *irq_data_get_chip_type(struct irq_data *d)
927{ 935{
diff --git a/include/linux/irqchip/arm-gic-v3.h b/include/linux/irqchip/arm-gic-v3.h
index 99ac022edc60..8361c8d3edd1 100644
--- a/include/linux/irqchip/arm-gic-v3.h
+++ b/include/linux/irqchip/arm-gic-v3.h
@@ -430,9 +430,9 @@ struct rdists {
430}; 430};
431 431
432struct irq_domain; 432struct irq_domain;
433struct device_node; 433struct fwnode_handle;
434int its_cpu_init(void); 434int its_cpu_init(void);
435int its_init(struct device_node *node, struct rdists *rdists, 435int its_init(struct fwnode_handle *handle, struct rdists *rdists,
436 struct irq_domain *domain); 436 struct irq_domain *domain);
437 437
438static inline bool gic_enable_sre(void) 438static inline bool gic_enable_sre(void)
diff --git a/include/linux/irqdesc.h b/include/linux/irqdesc.h
index b51beebf9804..c9be57931b58 100644
--- a/include/linux/irqdesc.h
+++ b/include/linux/irqdesc.h
@@ -2,6 +2,7 @@
2#define _LINUX_IRQDESC_H 2#define _LINUX_IRQDESC_H
3 3
4#include <linux/rcupdate.h> 4#include <linux/rcupdate.h>
5#include <linux/kobject.h>
5 6
6/* 7/*
7 * Core internal functions to deal with irq descriptors 8 * Core internal functions to deal with irq descriptors
@@ -43,6 +44,7 @@ struct pt_regs;
43 * @force_resume_depth: number of irqactions on a irq descriptor with 44 * @force_resume_depth: number of irqactions on a irq descriptor with
44 * IRQF_FORCE_RESUME set 45 * IRQF_FORCE_RESUME set
45 * @rcu: rcu head for delayed free 46 * @rcu: rcu head for delayed free
47 * @kobj: kobject used to represent this struct in sysfs
46 * @dir: /proc/irq/ procfs entry 48 * @dir: /proc/irq/ procfs entry
47 * @name: flow handler name for /proc/interrupts output 49 * @name: flow handler name for /proc/interrupts output
48 */ 50 */
@@ -88,6 +90,7 @@ struct irq_desc {
88#endif 90#endif
89#ifdef CONFIG_SPARSE_IRQ 91#ifdef CONFIG_SPARSE_IRQ
90 struct rcu_head rcu; 92 struct rcu_head rcu;
93 struct kobject kobj;
91#endif 94#endif
92 int parent_irq; 95 int parent_irq;
93 struct module *owner; 96 struct module *owner;
diff --git a/include/linux/msi.h b/include/linux/msi.h
index e8c81fbd5f9c..0db320b7bb15 100644
--- a/include/linux/msi.h
+++ b/include/linux/msi.h
@@ -68,7 +68,7 @@ struct msi_desc {
68 unsigned int nvec_used; 68 unsigned int nvec_used;
69 struct device *dev; 69 struct device *dev;
70 struct msi_msg msg; 70 struct msi_msg msg;
71 const struct cpumask *affinity; 71 struct cpumask *affinity;
72 72
73 union { 73 union {
74 /* PCI MSI/X specific data */ 74 /* PCI MSI/X specific data */
@@ -123,7 +123,8 @@ static inline void *msi_desc_to_pci_sysdata(struct msi_desc *desc)
123} 123}
124#endif /* CONFIG_PCI_MSI */ 124#endif /* CONFIG_PCI_MSI */
125 125
126struct msi_desc *alloc_msi_entry(struct device *dev); 126struct msi_desc *alloc_msi_entry(struct device *dev, int nvec,
127 const struct cpumask *affinity);
127void free_msi_entry(struct msi_desc *entry); 128void free_msi_entry(struct msi_desc *entry);
128void __pci_read_msi_msg(struct msi_desc *entry, struct msi_msg *msg); 129void __pci_read_msi_msg(struct msi_desc *entry, struct msi_msg *msg);
129void __pci_write_msi_msg(struct msi_desc *entry, struct msi_msg *msg); 130void __pci_write_msi_msg(struct msi_desc *entry, struct msi_msg *msg);
diff --git a/include/linux/pci.h b/include/linux/pci.h
index a917d4b20554..7cc0acba8939 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -1301,6 +1301,7 @@ int pci_alloc_irq_vectors(struct pci_dev *dev, unsigned int min_vecs,
1301 unsigned int max_vecs, unsigned int flags); 1301 unsigned int max_vecs, unsigned int flags);
1302void pci_free_irq_vectors(struct pci_dev *dev); 1302void pci_free_irq_vectors(struct pci_dev *dev);
1303int pci_irq_vector(struct pci_dev *dev, unsigned int nr); 1303int pci_irq_vector(struct pci_dev *dev, unsigned int nr);
1304const struct cpumask *pci_irq_get_affinity(struct pci_dev *pdev, int vec);
1304 1305
1305#else 1306#else
1306static inline int pci_msi_vec_count(struct pci_dev *dev) { return -ENOSYS; } 1307static inline int pci_msi_vec_count(struct pci_dev *dev) { return -ENOSYS; }
@@ -1343,6 +1344,11 @@ static inline int pci_irq_vector(struct pci_dev *dev, unsigned int nr)
1343 return -EINVAL; 1344 return -EINVAL;
1344 return dev->irq; 1345 return dev->irq;
1345} 1346}
1347static inline const struct cpumask *pci_irq_get_affinity(struct pci_dev *pdev,
1348 int vec)
1349{
1350 return cpu_possible_mask;
1351}
1346#endif 1352#endif
1347 1353
1348#ifdef CONFIG_PCIEPORTBUS 1354#ifdef CONFIG_PCIEPORTBUS
diff --git a/kernel/irq/affinity.c b/kernel/irq/affinity.c
index 32f6cfcff212..17f51d63da56 100644
--- a/kernel/irq/affinity.c
+++ b/kernel/irq/affinity.c
@@ -4,60 +4,151 @@
4#include <linux/slab.h> 4#include <linux/slab.h>
5#include <linux/cpu.h> 5#include <linux/cpu.h>
6 6
7static int get_first_sibling(unsigned int cpu) 7static void irq_spread_init_one(struct cpumask *irqmsk, struct cpumask *nmsk,
8 int cpus_per_vec)
8{ 9{
9 unsigned int ret; 10 const struct cpumask *siblmsk;
11 int cpu, sibl;
10 12
11 ret = cpumask_first(topology_sibling_cpumask(cpu)); 13 for ( ; cpus_per_vec > 0; ) {
12 if (ret < nr_cpu_ids) 14 cpu = cpumask_first(nmsk);
13 return ret; 15
14 return cpu; 16 /* Should not happen, but I'm too lazy to think about it */
17 if (cpu >= nr_cpu_ids)
18 return;
19
20 cpumask_clear_cpu(cpu, nmsk);
21 cpumask_set_cpu(cpu, irqmsk);
22 cpus_per_vec--;
23
24 /* If the cpu has siblings, use them first */
25 siblmsk = topology_sibling_cpumask(cpu);
26 for (sibl = -1; cpus_per_vec > 0; ) {
27 sibl = cpumask_next(sibl, siblmsk);
28 if (sibl >= nr_cpu_ids)
29 break;
30 if (!cpumask_test_and_clear_cpu(sibl, nmsk))
31 continue;
32 cpumask_set_cpu(sibl, irqmsk);
33 cpus_per_vec--;
34 }
35 }
36}
37
38static int get_nodes_in_cpumask(const struct cpumask *mask, nodemask_t *nodemsk)
39{
40 int n, nodes;
41
42 /* Calculate the number of nodes in the supplied affinity mask */
43 for (n = 0, nodes = 0; n < num_online_nodes(); n++) {
44 if (cpumask_intersects(mask, cpumask_of_node(n))) {
45 node_set(n, *nodemsk);
46 nodes++;
47 }
48 }
49 return nodes;
15} 50}
16 51
17/* 52/**
18 * Take a map of online CPUs and the number of available interrupt vectors 53 * irq_create_affinity_masks - Create affinity masks for multiqueue spreading
19 * and generate an output cpumask suitable for spreading MSI/MSI-X vectors 54 * @affinity: The affinity mask to spread. If NULL cpu_online_mask
20 * so that they are distributed as good as possible around the CPUs. If 55 * is used
21 * more vectors than CPUs are available we'll map one to each CPU, 56 * @nvecs: The number of vectors
22 * otherwise we map one to the first sibling of each socket.
23 * 57 *
24 * If there are more vectors than CPUs we will still only have one bit 58 * Returns the masks pointer or NULL if allocation failed.
25 * set per CPU, but interrupt code will keep on assigning the vectors from
26 * the start of the bitmap until we run out of vectors.
27 */ 59 */
28struct cpumask *irq_create_affinity_mask(unsigned int *nr_vecs) 60struct cpumask *irq_create_affinity_masks(const struct cpumask *affinity,
61 int nvec)
29{ 62{
30 struct cpumask *affinity_mask; 63 int n, nodes, vecs_per_node, cpus_per_vec, extra_vecs, curvec = 0;
31 unsigned int max_vecs = *nr_vecs; 64 nodemask_t nodemsk = NODE_MASK_NONE;
65 struct cpumask *masks;
66 cpumask_var_t nmsk;
32 67
33 if (max_vecs == 1) 68 if (!zalloc_cpumask_var(&nmsk, GFP_KERNEL))
34 return NULL; 69 return NULL;
35 70
36 affinity_mask = kzalloc(cpumask_size(), GFP_KERNEL); 71 masks = kzalloc(nvec * sizeof(*masks), GFP_KERNEL);
37 if (!affinity_mask) { 72 if (!masks)
38 *nr_vecs = 1; 73 goto out;
39 return NULL;
40 }
41 74
75 /* Stabilize the cpumasks */
42 get_online_cpus(); 76 get_online_cpus();
43 if (max_vecs >= num_online_cpus()) { 77 /* If the supplied affinity mask is NULL, use cpu online mask */
44 cpumask_copy(affinity_mask, cpu_online_mask); 78 if (!affinity)
45 *nr_vecs = num_online_cpus(); 79 affinity = cpu_online_mask;
46 } else { 80
47 unsigned int vecs = 0, cpu; 81 nodes = get_nodes_in_cpumask(affinity, &nodemsk);
48
49 for_each_online_cpu(cpu) {
50 if (cpu == get_first_sibling(cpu)) {
51 cpumask_set_cpu(cpu, affinity_mask);
52 vecs++;
53 }
54 82
55 if (--max_vecs == 0) 83 /*
84 * If the number of nodes in the mask is less than or equal the
85 * number of vectors we just spread the vectors across the nodes.
86 */
87 if (nvec <= nodes) {
88 for_each_node_mask(n, nodemsk) {
89 cpumask_copy(masks + curvec, cpumask_of_node(n));
90 if (++curvec == nvec)
56 break; 91 break;
57 } 92 }
58 *nr_vecs = vecs; 93 goto outonl;
59 } 94 }
95
96 /* Spread the vectors per node */
97 vecs_per_node = nvec / nodes;
98 /* Account for rounding errors */
99 extra_vecs = nvec - (nodes * vecs_per_node);
100
101 for_each_node_mask(n, nodemsk) {
102 int ncpus, v, vecs_to_assign = vecs_per_node;
103
104 /* Get the cpus on this node which are in the mask */
105 cpumask_and(nmsk, affinity, cpumask_of_node(n));
106
107 /* Calculate the number of cpus per vector */
108 ncpus = cpumask_weight(nmsk);
109
110 for (v = 0; curvec < nvec && v < vecs_to_assign; curvec++, v++) {
111 cpus_per_vec = ncpus / vecs_to_assign;
112
113 /* Account for extra vectors to compensate rounding errors */
114 if (extra_vecs) {
115 cpus_per_vec++;
116 if (!--extra_vecs)
117 vecs_per_node++;
118 }
119 irq_spread_init_one(masks + curvec, nmsk, cpus_per_vec);
120 }
121
122 if (curvec >= nvec)
123 break;
124 }
125
126outonl:
60 put_online_cpus(); 127 put_online_cpus();
128out:
129 free_cpumask_var(nmsk);
130 return masks;
131}
132
133/**
134 * irq_calc_affinity_vectors - Calculate to optimal number of vectors for a given affinity mask
135 * @affinity: The affinity mask to spread. If NULL cpu_online_mask
136 * is used
137 * @maxvec: The maximum number of vectors available
138 */
139int irq_calc_affinity_vectors(const struct cpumask *affinity, int maxvec)
140{
141 int cpus, ret;
61 142
62 return affinity_mask; 143 /* Stabilize the cpumasks */
144 get_online_cpus();
145 /* If the supplied affinity mask is NULL, use cpu online mask */
146 if (!affinity)
147 affinity = cpu_online_mask;
148
149 cpus = cpumask_weight(affinity);
150 ret = (cpus < maxvec) ? cpus : maxvec;
151
152 put_online_cpus();
153 return ret;
63} 154}
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c
index 26ba5654d9d5..be3c34e4f2ac 100644
--- a/kernel/irq/chip.c
+++ b/kernel/irq/chip.c
@@ -76,7 +76,6 @@ int irq_set_irq_type(unsigned int irq, unsigned int type)
76 if (!desc) 76 if (!desc)
77 return -EINVAL; 77 return -EINVAL;
78 78
79 type &= IRQ_TYPE_SENSE_MASK;
80 ret = __irq_set_trigger(desc, type); 79 ret = __irq_set_trigger(desc, type);
81 irq_put_desc_busunlock(desc, flags); 80 irq_put_desc_busunlock(desc, flags);
82 return ret; 81 return ret;
@@ -756,7 +755,6 @@ void handle_percpu_devid_irq(struct irq_desc *desc)
756{ 755{
757 struct irq_chip *chip = irq_desc_get_chip(desc); 756 struct irq_chip *chip = irq_desc_get_chip(desc);
758 struct irqaction *action = desc->action; 757 struct irqaction *action = desc->action;
759 void *dev_id = raw_cpu_ptr(action->percpu_dev_id);
760 unsigned int irq = irq_desc_get_irq(desc); 758 unsigned int irq = irq_desc_get_irq(desc);
761 irqreturn_t res; 759 irqreturn_t res;
762 760
@@ -765,15 +763,26 @@ void handle_percpu_devid_irq(struct irq_desc *desc)
765 if (chip->irq_ack) 763 if (chip->irq_ack)
766 chip->irq_ack(&desc->irq_data); 764 chip->irq_ack(&desc->irq_data);
767 765
768 trace_irq_handler_entry(irq, action); 766 if (likely(action)) {
769 res = action->handler(irq, dev_id); 767 trace_irq_handler_entry(irq, action);
770 trace_irq_handler_exit(irq, action, res); 768 res = action->handler(irq, raw_cpu_ptr(action->percpu_dev_id));
769 trace_irq_handler_exit(irq, action, res);
770 } else {
771 unsigned int cpu = smp_processor_id();
772 bool enabled = cpumask_test_cpu(cpu, desc->percpu_enabled);
773
774 if (enabled)
775 irq_percpu_disable(desc, cpu);
776
777 pr_err_once("Spurious%s percpu IRQ%u on CPU%u\n",
778 enabled ? " and unmasked" : "", irq, cpu);
779 }
771 780
772 if (chip->irq_eoi) 781 if (chip->irq_eoi)
773 chip->irq_eoi(&desc->irq_data); 782 chip->irq_eoi(&desc->irq_data);
774} 783}
775 784
776void 785static void
777__irq_do_set_handler(struct irq_desc *desc, irq_flow_handler_t handle, 786__irq_do_set_handler(struct irq_desc *desc, irq_flow_handler_t handle,
778 int is_chained, const char *name) 787 int is_chained, const char *name)
779{ 788{
diff --git a/kernel/irq/generic-chip.c b/kernel/irq/generic-chip.c
index abd286afbd27..ee32870079c9 100644
--- a/kernel/irq/generic-chip.c
+++ b/kernel/irq/generic-chip.c
@@ -260,9 +260,9 @@ irq_gc_init_mask_cache(struct irq_chip_generic *gc, enum irq_gc_flags flags)
260} 260}
261 261
262/** 262/**
263 * irq_alloc_domain_generic_chip - Allocate generic chips for an irq domain 263 * __irq_alloc_domain_generic_chip - Allocate generic chips for an irq domain
264 * @d: irq domain for which to allocate chips 264 * @d: irq domain for which to allocate chips
265 * @irqs_per_chip: Number of interrupts each chip handles 265 * @irqs_per_chip: Number of interrupts each chip handles (max 32)
266 * @num_ct: Number of irq_chip_type instances associated with this 266 * @num_ct: Number of irq_chip_type instances associated with this
267 * @name: Name of the irq chip 267 * @name: Name of the irq chip
268 * @handler: Default flow handler associated with these chips 268 * @handler: Default flow handler associated with these chips
@@ -270,11 +270,11 @@ irq_gc_init_mask_cache(struct irq_chip_generic *gc, enum irq_gc_flags flags)
270 * @set: IRQ_* bits to set in the mapping function 270 * @set: IRQ_* bits to set in the mapping function
271 * @gcflags: Generic chip specific setup flags 271 * @gcflags: Generic chip specific setup flags
272 */ 272 */
273int irq_alloc_domain_generic_chips(struct irq_domain *d, int irqs_per_chip, 273int __irq_alloc_domain_generic_chips(struct irq_domain *d, int irqs_per_chip,
274 int num_ct, const char *name, 274 int num_ct, const char *name,
275 irq_flow_handler_t handler, 275 irq_flow_handler_t handler,
276 unsigned int clr, unsigned int set, 276 unsigned int clr, unsigned int set,
277 enum irq_gc_flags gcflags) 277 enum irq_gc_flags gcflags)
278{ 278{
279 struct irq_domain_chip_generic *dgc; 279 struct irq_domain_chip_generic *dgc;
280 struct irq_chip_generic *gc; 280 struct irq_chip_generic *gc;
@@ -326,7 +326,21 @@ int irq_alloc_domain_generic_chips(struct irq_domain *d, int irqs_per_chip,
326 d->name = name; 326 d->name = name;
327 return 0; 327 return 0;
328} 328}
329EXPORT_SYMBOL_GPL(irq_alloc_domain_generic_chips); 329EXPORT_SYMBOL_GPL(__irq_alloc_domain_generic_chips);
330
331static struct irq_chip_generic *
332__irq_get_domain_generic_chip(struct irq_domain *d, unsigned int hw_irq)
333{
334 struct irq_domain_chip_generic *dgc = d->gc;
335 int idx;
336
337 if (!dgc)
338 return ERR_PTR(-ENODEV);
339 idx = hw_irq / dgc->irqs_per_chip;
340 if (idx >= dgc->num_chips)
341 return ERR_PTR(-EINVAL);
342 return dgc->gc[idx];
343}
330 344
331/** 345/**
332 * irq_get_domain_generic_chip - Get a pointer to the generic chip of a hw_irq 346 * irq_get_domain_generic_chip - Get a pointer to the generic chip of a hw_irq
@@ -336,15 +350,9 @@ EXPORT_SYMBOL_GPL(irq_alloc_domain_generic_chips);
336struct irq_chip_generic * 350struct irq_chip_generic *
337irq_get_domain_generic_chip(struct irq_domain *d, unsigned int hw_irq) 351irq_get_domain_generic_chip(struct irq_domain *d, unsigned int hw_irq)
338{ 352{
339 struct irq_domain_chip_generic *dgc = d->gc; 353 struct irq_chip_generic *gc = __irq_get_domain_generic_chip(d, hw_irq);
340 int idx;
341 354
342 if (!dgc) 355 return !IS_ERR(gc) ? gc : NULL;
343 return NULL;
344 idx = hw_irq / dgc->irqs_per_chip;
345 if (idx >= dgc->num_chips)
346 return NULL;
347 return dgc->gc[idx];
348} 356}
349EXPORT_SYMBOL_GPL(irq_get_domain_generic_chip); 357EXPORT_SYMBOL_GPL(irq_get_domain_generic_chip);
350 358
@@ -368,13 +376,9 @@ int irq_map_generic_chip(struct irq_domain *d, unsigned int virq,
368 unsigned long flags; 376 unsigned long flags;
369 int idx; 377 int idx;
370 378
371 if (!d->gc) 379 gc = __irq_get_domain_generic_chip(d, hw_irq);
372 return -ENODEV; 380 if (IS_ERR(gc))
373 381 return PTR_ERR(gc);
374 idx = hw_irq / dgc->irqs_per_chip;
375 if (idx >= dgc->num_chips)
376 return -EINVAL;
377 gc = dgc->gc[idx];
378 382
379 idx = hw_irq % dgc->irqs_per_chip; 383 idx = hw_irq % dgc->irqs_per_chip;
380 384
@@ -409,10 +413,30 @@ int irq_map_generic_chip(struct irq_domain *d, unsigned int virq,
409 irq_modify_status(virq, dgc->irq_flags_to_clear, dgc->irq_flags_to_set); 413 irq_modify_status(virq, dgc->irq_flags_to_clear, dgc->irq_flags_to_set);
410 return 0; 414 return 0;
411} 415}
412EXPORT_SYMBOL_GPL(irq_map_generic_chip); 416
417static void irq_unmap_generic_chip(struct irq_domain *d, unsigned int virq)
418{
419 struct irq_data *data = irq_domain_get_irq_data(d, virq);
420 struct irq_domain_chip_generic *dgc = d->gc;
421 unsigned int hw_irq = data->hwirq;
422 struct irq_chip_generic *gc;
423 int irq_idx;
424
425 gc = irq_get_domain_generic_chip(d, hw_irq);
426 if (!gc)
427 return;
428
429 irq_idx = hw_irq % dgc->irqs_per_chip;
430
431 clear_bit(irq_idx, &gc->installed);
432 irq_domain_set_info(d, virq, hw_irq, &no_irq_chip, NULL, NULL, NULL,
433 NULL);
434
435}
413 436
414struct irq_domain_ops irq_generic_chip_ops = { 437struct irq_domain_ops irq_generic_chip_ops = {
415 .map = irq_map_generic_chip, 438 .map = irq_map_generic_chip,
439 .unmap = irq_unmap_generic_chip,
416 .xlate = irq_domain_xlate_onetwocell, 440 .xlate = irq_domain_xlate_onetwocell,
417}; 441};
418EXPORT_SYMBOL_GPL(irq_generic_chip_ops); 442EXPORT_SYMBOL_GPL(irq_generic_chip_ops);
diff --git a/kernel/irq/irqdesc.c b/kernel/irq/irqdesc.c
index a623b44f2d4b..00bb0aeea1d0 100644
--- a/kernel/irq/irqdesc.c
+++ b/kernel/irq/irqdesc.c
@@ -15,6 +15,7 @@
15#include <linux/radix-tree.h> 15#include <linux/radix-tree.h>
16#include <linux/bitmap.h> 16#include <linux/bitmap.h>
17#include <linux/irqdomain.h> 17#include <linux/irqdomain.h>
18#include <linux/sysfs.h>
18 19
19#include "internals.h" 20#include "internals.h"
20 21
@@ -123,6 +124,181 @@ static DECLARE_BITMAP(allocated_irqs, IRQ_BITMAP_BITS);
123 124
124#ifdef CONFIG_SPARSE_IRQ 125#ifdef CONFIG_SPARSE_IRQ
125 126
127static void irq_kobj_release(struct kobject *kobj);
128
129#ifdef CONFIG_SYSFS
130static struct kobject *irq_kobj_base;
131
132#define IRQ_ATTR_RO(_name) \
133static struct kobj_attribute _name##_attr = __ATTR_RO(_name)
134
135static ssize_t per_cpu_count_show(struct kobject *kobj,
136 struct kobj_attribute *attr, char *buf)
137{
138 struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj);
139 int cpu, irq = desc->irq_data.irq;
140 ssize_t ret = 0;
141 char *p = "";
142
143 for_each_possible_cpu(cpu) {
144 unsigned int c = kstat_irqs_cpu(irq, cpu);
145
146 ret += scnprintf(buf + ret, PAGE_SIZE - ret, "%s%u", p, c);
147 p = ",";
148 }
149
150 ret += scnprintf(buf + ret, PAGE_SIZE - ret, "\n");
151 return ret;
152}
153IRQ_ATTR_RO(per_cpu_count);
154
155static ssize_t chip_name_show(struct kobject *kobj,
156 struct kobj_attribute *attr, char *buf)
157{
158 struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj);
159 ssize_t ret = 0;
160
161 raw_spin_lock_irq(&desc->lock);
162 if (desc->irq_data.chip && desc->irq_data.chip->name) {
163 ret = scnprintf(buf, PAGE_SIZE, "%s\n",
164 desc->irq_data.chip->name);
165 }
166 raw_spin_unlock_irq(&desc->lock);
167
168 return ret;
169}
170IRQ_ATTR_RO(chip_name);
171
172static ssize_t hwirq_show(struct kobject *kobj,
173 struct kobj_attribute *attr, char *buf)
174{
175 struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj);
176 ssize_t ret = 0;
177
178 raw_spin_lock_irq(&desc->lock);
179 if (desc->irq_data.domain)
180 ret = sprintf(buf, "%d\n", (int)desc->irq_data.hwirq);
181 raw_spin_unlock_irq(&desc->lock);
182
183 return ret;
184}
185IRQ_ATTR_RO(hwirq);
186
187static ssize_t type_show(struct kobject *kobj,
188 struct kobj_attribute *attr, char *buf)
189{
190 struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj);
191 ssize_t ret = 0;
192
193 raw_spin_lock_irq(&desc->lock);
194 ret = sprintf(buf, "%s\n",
195 irqd_is_level_type(&desc->irq_data) ? "level" : "edge");
196 raw_spin_unlock_irq(&desc->lock);
197
198 return ret;
199
200}
201IRQ_ATTR_RO(type);
202
203static ssize_t name_show(struct kobject *kobj,
204 struct kobj_attribute *attr, char *buf)
205{
206 struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj);
207 ssize_t ret = 0;
208
209 raw_spin_lock_irq(&desc->lock);
210 if (desc->name)
211 ret = scnprintf(buf, PAGE_SIZE, "%s\n", desc->name);
212 raw_spin_unlock_irq(&desc->lock);
213
214 return ret;
215}
216IRQ_ATTR_RO(name);
217
218static ssize_t actions_show(struct kobject *kobj,
219 struct kobj_attribute *attr, char *buf)
220{
221 struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj);
222 struct irqaction *action;
223 ssize_t ret = 0;
224 char *p = "";
225
226 raw_spin_lock_irq(&desc->lock);
227 for (action = desc->action; action != NULL; action = action->next) {
228 ret += scnprintf(buf + ret, PAGE_SIZE - ret, "%s%s",
229 p, action->name);
230 p = ",";
231 }
232 raw_spin_unlock_irq(&desc->lock);
233
234 if (ret)
235 ret += scnprintf(buf + ret, PAGE_SIZE - ret, "\n");
236
237 return ret;
238}
239IRQ_ATTR_RO(actions);
240
241static struct attribute *irq_attrs[] = {
242 &per_cpu_count_attr.attr,
243 &chip_name_attr.attr,
244 &hwirq_attr.attr,
245 &type_attr.attr,
246 &name_attr.attr,
247 &actions_attr.attr,
248 NULL
249};
250
251static struct kobj_type irq_kobj_type = {
252 .release = irq_kobj_release,
253 .sysfs_ops = &kobj_sysfs_ops,
254 .default_attrs = irq_attrs,
255};
256
257static void irq_sysfs_add(int irq, struct irq_desc *desc)
258{
259 if (irq_kobj_base) {
260 /*
261 * Continue even in case of failure as this is nothing
262 * crucial.
263 */
264 if (kobject_add(&desc->kobj, irq_kobj_base, "%d", irq))
265 pr_warn("Failed to add kobject for irq %d\n", irq);
266 }
267}
268
269static int __init irq_sysfs_init(void)
270{
271 struct irq_desc *desc;
272 int irq;
273
274 /* Prevent concurrent irq alloc/free */
275 irq_lock_sparse();
276
277 irq_kobj_base = kobject_create_and_add("irq", kernel_kobj);
278 if (!irq_kobj_base) {
279 irq_unlock_sparse();
280 return -ENOMEM;
281 }
282
283 /* Add the already allocated interrupts */
284 for_each_irq_desc(irq, desc)
285 irq_sysfs_add(irq, desc);
286 irq_unlock_sparse();
287
288 return 0;
289}
290postcore_initcall(irq_sysfs_init);
291
292#else /* !CONFIG_SYSFS */
293
294static struct kobj_type irq_kobj_type = {
295 .release = irq_kobj_release,
296};
297
298static void irq_sysfs_add(int irq, struct irq_desc *desc) {}
299
300#endif /* CONFIG_SYSFS */
301
126static RADIX_TREE(irq_desc_tree, GFP_KERNEL); 302static RADIX_TREE(irq_desc_tree, GFP_KERNEL);
127 303
128static void irq_insert_desc(unsigned int irq, struct irq_desc *desc) 304static void irq_insert_desc(unsigned int irq, struct irq_desc *desc)
@@ -187,6 +363,7 @@ static struct irq_desc *alloc_desc(int irq, int node, unsigned int flags,
187 363
188 desc_set_defaults(irq, desc, node, affinity, owner); 364 desc_set_defaults(irq, desc, node, affinity, owner);
189 irqd_set(&desc->irq_data, flags); 365 irqd_set(&desc->irq_data, flags);
366 kobject_init(&desc->kobj, &irq_kobj_type);
190 367
191 return desc; 368 return desc;
192 369
@@ -197,15 +374,22 @@ err_desc:
197 return NULL; 374 return NULL;
198} 375}
199 376
200static void delayed_free_desc(struct rcu_head *rhp) 377static void irq_kobj_release(struct kobject *kobj)
201{ 378{
202 struct irq_desc *desc = container_of(rhp, struct irq_desc, rcu); 379 struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj);
203 380
204 free_masks(desc); 381 free_masks(desc);
205 free_percpu(desc->kstat_irqs); 382 free_percpu(desc->kstat_irqs);
206 kfree(desc); 383 kfree(desc);
207} 384}
208 385
386static void delayed_free_desc(struct rcu_head *rhp)
387{
388 struct irq_desc *desc = container_of(rhp, struct irq_desc, rcu);
389
390 kobject_put(&desc->kobj);
391}
392
209static void free_desc(unsigned int irq) 393static void free_desc(unsigned int irq)
210{ 394{
211 struct irq_desc *desc = irq_to_desc(irq); 395 struct irq_desc *desc = irq_to_desc(irq);
@@ -217,8 +401,12 @@ static void free_desc(unsigned int irq)
217 * kstat_irq_usr(). Once we deleted the descriptor from the 401 * kstat_irq_usr(). Once we deleted the descriptor from the
218 * sparse tree we can free it. Access in proc will fail to 402 * sparse tree we can free it. Access in proc will fail to
219 * lookup the descriptor. 403 * lookup the descriptor.
404 *
405 * The sysfs entry must be serialized against a concurrent
406 * irq_sysfs_init() as well.
220 */ 407 */
221 mutex_lock(&sparse_irq_lock); 408 mutex_lock(&sparse_irq_lock);
409 kobject_del(&desc->kobj);
222 delete_irq_desc(irq); 410 delete_irq_desc(irq);
223 mutex_unlock(&sparse_irq_lock); 411 mutex_unlock(&sparse_irq_lock);
224 412
@@ -236,31 +424,31 @@ static int alloc_descs(unsigned int start, unsigned int cnt, int node,
236 const struct cpumask *mask = NULL; 424 const struct cpumask *mask = NULL;
237 struct irq_desc *desc; 425 struct irq_desc *desc;
238 unsigned int flags; 426 unsigned int flags;
239 int i, cpu = -1; 427 int i;
240 428
241 if (affinity && cpumask_empty(affinity)) 429 /* Validate affinity mask(s) */
242 return -EINVAL; 430 if (affinity) {
431 for (i = 0, mask = affinity; i < cnt; i++, mask++) {
432 if (cpumask_empty(mask))
433 return -EINVAL;
434 }
435 }
243 436
244 flags = affinity ? IRQD_AFFINITY_MANAGED : 0; 437 flags = affinity ? IRQD_AFFINITY_MANAGED : 0;
438 mask = NULL;
245 439
246 for (i = 0; i < cnt; i++) { 440 for (i = 0; i < cnt; i++) {
247 if (affinity) { 441 if (affinity) {
248 cpu = cpumask_next(cpu, affinity); 442 node = cpu_to_node(cpumask_first(affinity));
249 if (cpu >= nr_cpu_ids) 443 mask = affinity;
250 cpu = cpumask_first(affinity); 444 affinity++;
251 node = cpu_to_node(cpu);
252
253 /*
254 * For single allocations we use the caller provided
255 * mask otherwise we use the mask of the target cpu
256 */
257 mask = cnt == 1 ? affinity : cpumask_of(cpu);
258 } 445 }
259 desc = alloc_desc(start + i, node, flags, mask, owner); 446 desc = alloc_desc(start + i, node, flags, mask, owner);
260 if (!desc) 447 if (!desc)
261 goto err; 448 goto err;
262 mutex_lock(&sparse_irq_lock); 449 mutex_lock(&sparse_irq_lock);
263 irq_insert_desc(start + i, desc); 450 irq_insert_desc(start + i, desc);
451 irq_sysfs_add(start + i, desc);
264 mutex_unlock(&sparse_irq_lock); 452 mutex_unlock(&sparse_irq_lock);
265 } 453 }
266 return start; 454 return start;
@@ -481,9 +669,9 @@ EXPORT_SYMBOL_GPL(irq_free_descs);
481 * @cnt: Number of consecutive irqs to allocate. 669 * @cnt: Number of consecutive irqs to allocate.
482 * @node: Preferred node on which the irq descriptor should be allocated 670 * @node: Preferred node on which the irq descriptor should be allocated
483 * @owner: Owning module (can be NULL) 671 * @owner: Owning module (can be NULL)
484 * @affinity: Optional pointer to an affinity mask which hints where the 672 * @affinity: Optional pointer to an affinity mask array of size @cnt which
485 * irq descriptors should be allocated and which default 673 * hints where the irq descriptors should be allocated and which
486 * affinities to use 674 * default affinities to use
487 * 675 *
488 * Returns the first irq number or error code 676 * Returns the first irq number or error code
489 */ 677 */
diff --git a/kernel/irq/irqdomain.c b/kernel/irq/irqdomain.c
index 4752b43662e0..8c0a0ae43521 100644
--- a/kernel/irq/irqdomain.c
+++ b/kernel/irq/irqdomain.c
@@ -80,7 +80,7 @@ EXPORT_SYMBOL_GPL(irq_domain_free_fwnode);
80 80
81/** 81/**
82 * __irq_domain_add() - Allocate a new irq_domain data structure 82 * __irq_domain_add() - Allocate a new irq_domain data structure
83 * @of_node: optional device-tree node of the interrupt controller 83 * @fwnode: firmware node for the interrupt controller
84 * @size: Size of linear map; 0 for radix mapping only 84 * @size: Size of linear map; 0 for radix mapping only
85 * @hwirq_max: Maximum number of interrupts supported by controller 85 * @hwirq_max: Maximum number of interrupts supported by controller
86 * @direct_max: Maximum value of direct maps; Use ~0 for no limit; 0 for no 86 * @direct_max: Maximum value of direct maps; Use ~0 for no limit; 0 for no
@@ -96,10 +96,8 @@ struct irq_domain *__irq_domain_add(struct fwnode_handle *fwnode, int size,
96 const struct irq_domain_ops *ops, 96 const struct irq_domain_ops *ops,
97 void *host_data) 97 void *host_data)
98{ 98{
99 struct device_node *of_node = to_of_node(fwnode);
99 struct irq_domain *domain; 100 struct irq_domain *domain;
100 struct device_node *of_node;
101
102 of_node = to_of_node(fwnode);
103 101
104 domain = kzalloc_node(sizeof(*domain) + (sizeof(unsigned int) * size), 102 domain = kzalloc_node(sizeof(*domain) + (sizeof(unsigned int) * size),
105 GFP_KERNEL, of_node_to_nid(of_node)); 103 GFP_KERNEL, of_node_to_nid(of_node));
@@ -868,7 +866,10 @@ int irq_domain_xlate_onetwocell(struct irq_domain *d,
868 if (WARN_ON(intsize < 1)) 866 if (WARN_ON(intsize < 1))
869 return -EINVAL; 867 return -EINVAL;
870 *out_hwirq = intspec[0]; 868 *out_hwirq = intspec[0];
871 *out_type = (intsize > 1) ? intspec[1] : IRQ_TYPE_NONE; 869 if (intsize > 1)
870 *out_type = intspec[1] & IRQ_TYPE_SENSE_MASK;
871 else
872 *out_type = IRQ_TYPE_NONE;
872 return 0; 873 return 0;
873} 874}
874EXPORT_SYMBOL_GPL(irq_domain_xlate_onetwocell); 875EXPORT_SYMBOL_GPL(irq_domain_xlate_onetwocell);
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index 9530fcd27704..0c5f1a5db654 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -669,8 +669,6 @@ int __irq_set_trigger(struct irq_desc *desc, unsigned long flags)
669 return 0; 669 return 0;
670 } 670 }
671 671
672 flags &= IRQ_TYPE_SENSE_MASK;
673
674 if (chip->flags & IRQCHIP_SET_TYPE_MASKED) { 672 if (chip->flags & IRQCHIP_SET_TYPE_MASKED) {
675 if (!irqd_irq_masked(&desc->irq_data)) 673 if (!irqd_irq_masked(&desc->irq_data))
676 mask_irq(desc); 674 mask_irq(desc);
@@ -678,7 +676,8 @@ int __irq_set_trigger(struct irq_desc *desc, unsigned long flags)
678 unmask = 1; 676 unmask = 1;
679 } 677 }
680 678
681 /* caller masked out all except trigger mode flags */ 679 /* Mask all flags except trigger mode */
680 flags &= IRQ_TYPE_SENSE_MASK;
682 ret = chip->irq_set_type(&desc->irq_data, flags); 681 ret = chip->irq_set_type(&desc->irq_data, flags);
683 682
684 switch (ret) { 683 switch (ret) {
diff --git a/kernel/irq/msi.c b/kernel/irq/msi.c
index 19e9dfbe97fa..8a3e872798f3 100644
--- a/kernel/irq/msi.c
+++ b/kernel/irq/msi.c
@@ -18,20 +18,42 @@
18/* Temparory solution for building, will be removed later */ 18/* Temparory solution for building, will be removed later */
19#include <linux/pci.h> 19#include <linux/pci.h>
20 20
21struct msi_desc *alloc_msi_entry(struct device *dev) 21/**
22 * alloc_msi_entry - Allocate an initialize msi_entry
23 * @dev: Pointer to the device for which this is allocated
24 * @nvec: The number of vectors used in this entry
25 * @affinity: Optional pointer to an affinity mask array size of @nvec
26 *
27 * If @affinity is not NULL then a an affinity array[@nvec] is allocated
28 * and the affinity masks from @affinity are copied.
29 */
30struct msi_desc *
31alloc_msi_entry(struct device *dev, int nvec, const struct cpumask *affinity)
22{ 32{
23 struct msi_desc *desc = kzalloc(sizeof(*desc), GFP_KERNEL); 33 struct msi_desc *desc;
34
35 desc = kzalloc(sizeof(*desc), GFP_KERNEL);
24 if (!desc) 36 if (!desc)
25 return NULL; 37 return NULL;
26 38
27 INIT_LIST_HEAD(&desc->list); 39 INIT_LIST_HEAD(&desc->list);
28 desc->dev = dev; 40 desc->dev = dev;
41 desc->nvec_used = nvec;
42 if (affinity) {
43 desc->affinity = kmemdup(affinity,
44 nvec * sizeof(*desc->affinity), GFP_KERNEL);
45 if (!desc->affinity) {
46 kfree(desc);
47 return NULL;
48 }
49 }
29 50
30 return desc; 51 return desc;
31} 52}
32 53
33void free_msi_entry(struct msi_desc *entry) 54void free_msi_entry(struct msi_desc *entry)
34{ 55{
56 kfree(entry->affinity);
35 kfree(entry); 57 kfree(entry);
36} 58}
37 59
diff --git a/kernel/softirq.c b/kernel/softirq.c
index 17caf4b63342..8ed90e3a88d6 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -78,6 +78,17 @@ static void wakeup_softirqd(void)
78} 78}
79 79
80/* 80/*
81 * If ksoftirqd is scheduled, we do not want to process pending softirqs
82 * right now. Let ksoftirqd handle this at its own rate, to get fairness.
83 */
84static bool ksoftirqd_running(void)
85{
86 struct task_struct *tsk = __this_cpu_read(ksoftirqd);
87
88 return tsk && (tsk->state == TASK_RUNNING);
89}
90
91/*
81 * preempt_count and SOFTIRQ_OFFSET usage: 92 * preempt_count and SOFTIRQ_OFFSET usage:
82 * - preempt_count is changed by SOFTIRQ_OFFSET on entering or leaving 93 * - preempt_count is changed by SOFTIRQ_OFFSET on entering or leaving
83 * softirq processing. 94 * softirq processing.
@@ -313,7 +324,7 @@ asmlinkage __visible void do_softirq(void)
313 324
314 pending = local_softirq_pending(); 325 pending = local_softirq_pending();
315 326
316 if (pending) 327 if (pending && !ksoftirqd_running())
317 do_softirq_own_stack(); 328 do_softirq_own_stack();
318 329
319 local_irq_restore(flags); 330 local_irq_restore(flags);
@@ -340,6 +351,9 @@ void irq_enter(void)
340 351
341static inline void invoke_softirq(void) 352static inline void invoke_softirq(void)
342{ 353{
354 if (ksoftirqd_running())
355 return;
356
343 if (!force_irqthreads) { 357 if (!force_irqthreads) {
344#ifdef CONFIG_HAVE_IRQ_EXIT_ON_IRQ_STACK 358#ifdef CONFIG_HAVE_IRQ_EXIT_ON_IRQ_STACK
345 /* 359 /*