aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/fsl,irqsteer.txt11
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/loongson,ls1x-intc.txt24
-rw-r--r--drivers/irqchip/Kconfig9
-rw-r--r--drivers/irqchip/Makefile1
-rw-r--r--drivers/irqchip/irq-brcmstb-l2.c10
-rw-r--r--drivers/irqchip/irq-gic-v3-its.c28
-rw-r--r--drivers/irqchip/irq-i8259.c9
-rw-r--r--drivers/irqchip/irq-imx-irqsteer.c115
-rw-r--r--drivers/irqchip/irq-ls1x.c192
-rw-r--r--drivers/irqchip/irq-sifive-plic.c116
-rw-r--r--include/linux/interrupt.h18
-rw-r--r--include/linux/irq.h10
-rw-r--r--include/linux/irqdesc.h5
-rw-r--r--include/linux/irqdomain.h1
-rw-r--r--kernel/irq/chip.c54
-rw-r--r--kernel/irq/debugfs.c6
-rw-r--r--kernel/irq/internals.h2
-rw-r--r--kernel/irq/irqdesc.c35
-rw-r--r--kernel/irq/irqdomain.c14
-rw-r--r--kernel/irq/manage.c405
20 files changed, 964 insertions, 101 deletions
diff --git a/Documentation/devicetree/bindings/interrupt-controller/fsl,irqsteer.txt b/Documentation/devicetree/bindings/interrupt-controller/fsl,irqsteer.txt
index 45790ce6f5b9..582991c426ee 100644
--- a/Documentation/devicetree/bindings/interrupt-controller/fsl,irqsteer.txt
+++ b/Documentation/devicetree/bindings/interrupt-controller/fsl,irqsteer.txt
@@ -6,8 +6,9 @@ Required properties:
6 - "fsl,imx8m-irqsteer" 6 - "fsl,imx8m-irqsteer"
7 - "fsl,imx-irqsteer" 7 - "fsl,imx-irqsteer"
8- reg: Physical base address and size of registers. 8- reg: Physical base address and size of registers.
9- interrupts: Should contain the parent interrupt line used to multiplex the 9- interrupts: Should contain the up to 8 parent interrupt lines used to
10 input interrupts. 10 multiplex the input interrupts. They should be specified sequentially
11 from output 0 to 7.
11- clocks: Should contain one clock for entry in clock-names 12- clocks: Should contain one clock for entry in clock-names
12 see Documentation/devicetree/bindings/clock/clock-bindings.txt 13 see Documentation/devicetree/bindings/clock/clock-bindings.txt
13- clock-names: 14- clock-names:
@@ -16,8 +17,8 @@ Required properties:
16- #interrupt-cells: Specifies the number of cells needed to encode an 17- #interrupt-cells: Specifies the number of cells needed to encode an
17 interrupt source. The value must be 1. 18 interrupt source. The value must be 1.
18- fsl,channel: The output channel that all input IRQs should be steered into. 19- fsl,channel: The output channel that all input IRQs should be steered into.
19- fsl,irq-groups: Number of IRQ groups managed by this controller instance. 20- fsl,num-irqs: Number of input interrupts of this channel.
20 Each group manages 64 input interrupts. 21 Should be multiple of 32 input interrupts and up to 512 interrupts.
21 22
22Example: 23Example:
23 24
@@ -28,7 +29,7 @@ Example:
28 clocks = <&clk IMX8MQ_CLK_DISP_APB_ROOT>; 29 clocks = <&clk IMX8MQ_CLK_DISP_APB_ROOT>;
29 clock-names = "ipg"; 30 clock-names = "ipg";
30 fsl,channel = <0>; 31 fsl,channel = <0>;
31 fsl,irq-groups = <1>; 32 fsl,num-irqs = <64>;
32 interrupt-controller; 33 interrupt-controller;
33 #interrupt-cells = <1>; 34 #interrupt-cells = <1>;
34 }; 35 };
diff --git a/Documentation/devicetree/bindings/interrupt-controller/loongson,ls1x-intc.txt b/Documentation/devicetree/bindings/interrupt-controller/loongson,ls1x-intc.txt
new file mode 100644
index 000000000000..a63ed9fcb535
--- /dev/null
+++ b/Documentation/devicetree/bindings/interrupt-controller/loongson,ls1x-intc.txt
@@ -0,0 +1,24 @@
1Loongson ls1x Interrupt Controller
2
3Required properties:
4
5- compatible : should be "loongson,ls1x-intc". Valid strings are:
6
7- reg : Specifies base physical address and size of the registers.
8- interrupt-controller : Identifies the node as an interrupt controller
9- #interrupt-cells : Specifies the number of cells needed to encode an
10 interrupt source. The value shall be 2.
11- interrupts : Specifies the CPU interrupt the controller is connected to.
12
13Example:
14
15intc: interrupt-controller@1fd01040 {
16 compatible = "loongson,ls1x-intc";
17 reg = <0x1fd01040 0x18>;
18
19 interrupt-controller;
20 #interrupt-cells = <2>;
21
22 interrupt-parent = <&cpu_intc>;
23 interrupts = <2>;
24};
diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig
index 3d1e60779078..5dcb5456cd14 100644
--- a/drivers/irqchip/Kconfig
+++ b/drivers/irqchip/Kconfig
@@ -406,6 +406,15 @@ config IMX_IRQSTEER
406 help 406 help
407 Support for the i.MX IRQSTEER interrupt multiplexer/remapper. 407 Support for the i.MX IRQSTEER interrupt multiplexer/remapper.
408 408
409config LS1X_IRQ
410 bool "Loongson-1 Interrupt Controller"
411 depends on MACH_LOONGSON32
412 default y
413 select IRQ_DOMAIN
414 select GENERIC_IRQ_CHIP
415 help
416 Support for the Loongson-1 platform Interrupt Controller.
417
409endmenu 418endmenu
410 419
411config SIFIVE_PLIC 420config SIFIVE_PLIC
diff --git a/drivers/irqchip/Makefile b/drivers/irqchip/Makefile
index c93713d24b86..7acd0e36d0b4 100644
--- a/drivers/irqchip/Makefile
+++ b/drivers/irqchip/Makefile
@@ -94,3 +94,4 @@ obj-$(CONFIG_CSKY_APB_INTC) += irq-csky-apb-intc.o
94obj-$(CONFIG_SIFIVE_PLIC) += irq-sifive-plic.o 94obj-$(CONFIG_SIFIVE_PLIC) += irq-sifive-plic.o
95obj-$(CONFIG_IMX_IRQSTEER) += irq-imx-irqsteer.o 95obj-$(CONFIG_IMX_IRQSTEER) += irq-imx-irqsteer.o
96obj-$(CONFIG_MADERA_IRQ) += irq-madera.o 96obj-$(CONFIG_MADERA_IRQ) += irq-madera.o
97obj-$(CONFIG_LS1X_IRQ) += irq-ls1x.o
diff --git a/drivers/irqchip/irq-brcmstb-l2.c b/drivers/irqchip/irq-brcmstb-l2.c
index 0e65f609352e..83364fedbf0a 100644
--- a/drivers/irqchip/irq-brcmstb-l2.c
+++ b/drivers/irqchip/irq-brcmstb-l2.c
@@ -129,8 +129,9 @@ static void brcmstb_l2_intc_suspend(struct irq_data *d)
129 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); 129 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
130 struct irq_chip_type *ct = irq_data_get_chip_type(d); 130 struct irq_chip_type *ct = irq_data_get_chip_type(d);
131 struct brcmstb_l2_intc_data *b = gc->private; 131 struct brcmstb_l2_intc_data *b = gc->private;
132 unsigned long flags;
132 133
133 irq_gc_lock(gc); 134 irq_gc_lock_irqsave(gc, flags);
134 /* Save the current mask */ 135 /* Save the current mask */
135 b->saved_mask = irq_reg_readl(gc, ct->regs.mask); 136 b->saved_mask = irq_reg_readl(gc, ct->regs.mask);
136 137
@@ -139,7 +140,7 @@ static void brcmstb_l2_intc_suspend(struct irq_data *d)
139 irq_reg_writel(gc, ~gc->wake_active, ct->regs.disable); 140 irq_reg_writel(gc, ~gc->wake_active, ct->regs.disable);
140 irq_reg_writel(gc, gc->wake_active, ct->regs.enable); 141 irq_reg_writel(gc, gc->wake_active, ct->regs.enable);
141 } 142 }
142 irq_gc_unlock(gc); 143 irq_gc_unlock_irqrestore(gc, flags);
143} 144}
144 145
145static void brcmstb_l2_intc_resume(struct irq_data *d) 146static void brcmstb_l2_intc_resume(struct irq_data *d)
@@ -147,8 +148,9 @@ static void brcmstb_l2_intc_resume(struct irq_data *d)
147 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); 148 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
148 struct irq_chip_type *ct = irq_data_get_chip_type(d); 149 struct irq_chip_type *ct = irq_data_get_chip_type(d);
149 struct brcmstb_l2_intc_data *b = gc->private; 150 struct brcmstb_l2_intc_data *b = gc->private;
151 unsigned long flags;
150 152
151 irq_gc_lock(gc); 153 irq_gc_lock_irqsave(gc, flags);
152 if (ct->chip.irq_ack) { 154 if (ct->chip.irq_ack) {
153 /* Clear unmasked non-wakeup interrupts */ 155 /* Clear unmasked non-wakeup interrupts */
154 irq_reg_writel(gc, ~b->saved_mask & ~gc->wake_active, 156 irq_reg_writel(gc, ~b->saved_mask & ~gc->wake_active,
@@ -158,7 +160,7 @@ static void brcmstb_l2_intc_resume(struct irq_data *d)
158 /* Restore the saved mask */ 160 /* Restore the saved mask */
159 irq_reg_writel(gc, b->saved_mask, ct->regs.disable); 161 irq_reg_writel(gc, b->saved_mask, ct->regs.disable);
160 irq_reg_writel(gc, ~b->saved_mask, ct->regs.enable); 162 irq_reg_writel(gc, ~b->saved_mask, ct->regs.enable);
161 irq_gc_unlock(gc); 163 irq_gc_unlock_irqrestore(gc, flags);
162} 164}
163 165
164static int __init brcmstb_l2_intc_of_init(struct device_node *np, 166static int __init brcmstb_l2_intc_of_init(struct device_node *np,
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
index 7f2a45445b00..666f5986b0ce 100644
--- a/drivers/irqchip/irq-gic-v3-its.c
+++ b/drivers/irqchip/irq-gic-v3-its.c
@@ -1737,6 +1737,7 @@ static int its_setup_baser(struct its_node *its, struct its_baser *baser,
1737 u64 type = GITS_BASER_TYPE(val); 1737 u64 type = GITS_BASER_TYPE(val);
1738 u64 baser_phys, tmp; 1738 u64 baser_phys, tmp;
1739 u32 alloc_pages; 1739 u32 alloc_pages;
1740 struct page *page;
1740 void *base; 1741 void *base;
1741 1742
1742retry_alloc_baser: 1743retry_alloc_baser:
@@ -1749,10 +1750,11 @@ retry_alloc_baser:
1749 order = get_order(GITS_BASER_PAGES_MAX * psz); 1750 order = get_order(GITS_BASER_PAGES_MAX * psz);
1750 } 1751 }
1751 1752
1752 base = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, order); 1753 page = alloc_pages_node(its->numa_node, GFP_KERNEL | __GFP_ZERO, order);
1753 if (!base) 1754 if (!page)
1754 return -ENOMEM; 1755 return -ENOMEM;
1755 1756
1757 base = (void *)page_address(page);
1756 baser_phys = virt_to_phys(base); 1758 baser_phys = virt_to_phys(base);
1757 1759
1758 /* Check if the physical address of the memory is above 48bits */ 1760 /* Check if the physical address of the memory is above 48bits */
@@ -1946,6 +1948,8 @@ static int its_alloc_tables(struct its_node *its)
1946 indirect = its_parse_indirect_baser(its, baser, 1948 indirect = its_parse_indirect_baser(its, baser,
1947 psz, &order, 1949 psz, &order,
1948 its->device_ids); 1950 its->device_ids);
1951 break;
1952
1949 case GITS_BASER_TYPE_VCPU: 1953 case GITS_BASER_TYPE_VCPU:
1950 indirect = its_parse_indirect_baser(its, baser, 1954 indirect = its_parse_indirect_baser(its, baser,
1951 psz, &order, 1955 psz, &order,
@@ -2236,7 +2240,8 @@ static struct its_baser *its_get_baser(struct its_node *its, u32 type)
2236 return NULL; 2240 return NULL;
2237} 2241}
2238 2242
2239static bool its_alloc_table_entry(struct its_baser *baser, u32 id) 2243static bool its_alloc_table_entry(struct its_node *its,
2244 struct its_baser *baser, u32 id)
2240{ 2245{
2241 struct page *page; 2246 struct page *page;
2242 u32 esz, idx; 2247 u32 esz, idx;
@@ -2256,7 +2261,8 @@ static bool its_alloc_table_entry(struct its_baser *baser, u32 id)
2256 2261
2257 /* Allocate memory for 2nd level table */ 2262 /* Allocate memory for 2nd level table */
2258 if (!table[idx]) { 2263 if (!table[idx]) {
2259 page = alloc_pages(GFP_KERNEL | __GFP_ZERO, get_order(baser->psz)); 2264 page = alloc_pages_node(its->numa_node, GFP_KERNEL | __GFP_ZERO,
2265 get_order(baser->psz));
2260 if (!page) 2266 if (!page)
2261 return false; 2267 return false;
2262 2268
@@ -2287,7 +2293,7 @@ static bool its_alloc_device_table(struct its_node *its, u32 dev_id)
2287 if (!baser) 2293 if (!baser)
2288 return (ilog2(dev_id) < its->device_ids); 2294 return (ilog2(dev_id) < its->device_ids);
2289 2295
2290 return its_alloc_table_entry(baser, dev_id); 2296 return its_alloc_table_entry(its, baser, dev_id);
2291} 2297}
2292 2298
2293static bool its_alloc_vpe_table(u32 vpe_id) 2299static bool its_alloc_vpe_table(u32 vpe_id)
@@ -2311,7 +2317,7 @@ static bool its_alloc_vpe_table(u32 vpe_id)
2311 if (!baser) 2317 if (!baser)
2312 return false; 2318 return false;
2313 2319
2314 if (!its_alloc_table_entry(baser, vpe_id)) 2320 if (!its_alloc_table_entry(its, baser, vpe_id))
2315 return false; 2321 return false;
2316 } 2322 }
2317 2323
@@ -2345,7 +2351,7 @@ static struct its_device *its_create_device(struct its_node *its, u32 dev_id,
2345 nr_ites = max(2, nvecs); 2351 nr_ites = max(2, nvecs);
2346 sz = nr_ites * its->ite_size; 2352 sz = nr_ites * its->ite_size;
2347 sz = max(sz, ITS_ITT_ALIGN) + ITS_ITT_ALIGN - 1; 2353 sz = max(sz, ITS_ITT_ALIGN) + ITS_ITT_ALIGN - 1;
2348 itt = kzalloc(sz, GFP_KERNEL); 2354 itt = kzalloc_node(sz, GFP_KERNEL, its->numa_node);
2349 if (alloc_lpis) { 2355 if (alloc_lpis) {
2350 lpi_map = its_lpi_alloc(nvecs, &lpi_base, &nr_lpis); 2356 lpi_map = its_lpi_alloc(nvecs, &lpi_base, &nr_lpis);
2351 if (lpi_map) 2357 if (lpi_map)
@@ -3487,6 +3493,7 @@ static int __init its_probe_one(struct resource *res,
3487 void __iomem *its_base; 3493 void __iomem *its_base;
3488 u32 val, ctlr; 3494 u32 val, ctlr;
3489 u64 baser, tmp, typer; 3495 u64 baser, tmp, typer;
3496 struct page *page;
3490 int err; 3497 int err;
3491 3498
3492 its_base = ioremap(res->start, resource_size(res)); 3499 its_base = ioremap(res->start, resource_size(res));
@@ -3542,12 +3549,13 @@ static int __init its_probe_one(struct resource *res,
3542 3549
3543 its->numa_node = numa_node; 3550 its->numa_node = numa_node;
3544 3551
3545 its->cmd_base = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 3552 page = alloc_pages_node(its->numa_node, GFP_KERNEL | __GFP_ZERO,
3546 get_order(ITS_CMD_QUEUE_SZ)); 3553 get_order(ITS_CMD_QUEUE_SZ));
3547 if (!its->cmd_base) { 3554 if (!page) {
3548 err = -ENOMEM; 3555 err = -ENOMEM;
3549 goto out_free_its; 3556 goto out_free_its;
3550 } 3557 }
3558 its->cmd_base = (void *)page_address(page);
3551 its->cmd_write = its->cmd_base; 3559 its->cmd_write = its->cmd_base;
3552 its->fwnode_handle = handle; 3560 its->fwnode_handle = handle;
3553 its->get_msi_base = its_irq_get_msi_base; 3561 its->get_msi_base = its_irq_get_msi_base;
diff --git a/drivers/irqchip/irq-i8259.c b/drivers/irqchip/irq-i8259.c
index b0d4aab1a58c..d000870d9b6b 100644
--- a/drivers/irqchip/irq-i8259.c
+++ b/drivers/irqchip/irq-i8259.c
@@ -225,14 +225,6 @@ static struct syscore_ops i8259_syscore_ops = {
225 .shutdown = i8259A_shutdown, 225 .shutdown = i8259A_shutdown,
226}; 226};
227 227
228static int __init i8259A_init_sysfs(void)
229{
230 register_syscore_ops(&i8259_syscore_ops);
231 return 0;
232}
233
234device_initcall(i8259A_init_sysfs);
235
236static void init_8259A(int auto_eoi) 228static void init_8259A(int auto_eoi)
237{ 229{
238 unsigned long flags; 230 unsigned long flags;
@@ -332,6 +324,7 @@ struct irq_domain * __init __init_i8259_irqs(struct device_node *node)
332 panic("Failed to add i8259 IRQ domain"); 324 panic("Failed to add i8259 IRQ domain");
333 325
334 setup_irq(I8259A_IRQ_BASE + PIC_CASCADE_IR, &irq2); 326 setup_irq(I8259A_IRQ_BASE + PIC_CASCADE_IR, &irq2);
327 register_syscore_ops(&i8259_syscore_ops);
335 return domain; 328 return domain;
336} 329}
337 330
diff --git a/drivers/irqchip/irq-imx-irqsteer.c b/drivers/irqchip/irq-imx-irqsteer.c
index 5b3f1d735685..d1098f4da6a4 100644
--- a/drivers/irqchip/irq-imx-irqsteer.c
+++ b/drivers/irqchip/irq-imx-irqsteer.c
@@ -10,10 +10,11 @@
10#include <linux/irqchip/chained_irq.h> 10#include <linux/irqchip/chained_irq.h>
11#include <linux/irqdomain.h> 11#include <linux/irqdomain.h>
12#include <linux/kernel.h> 12#include <linux/kernel.h>
13#include <linux/of_irq.h>
13#include <linux/of_platform.h> 14#include <linux/of_platform.h>
14#include <linux/spinlock.h> 15#include <linux/spinlock.h>
15 16
16#define CTRL_STRIDE_OFF(_t, _r) (_t * 8 * _r) 17#define CTRL_STRIDE_OFF(_t, _r) (_t * 4 * _r)
17#define CHANCTRL 0x0 18#define CHANCTRL 0x0
18#define CHANMASK(n, t) (CTRL_STRIDE_OFF(t, 0) + 0x4 * (n) + 0x4) 19#define CHANMASK(n, t) (CTRL_STRIDE_OFF(t, 0) + 0x4 * (n) + 0x4)
19#define CHANSET(n, t) (CTRL_STRIDE_OFF(t, 1) + 0x4 * (n) + 0x4) 20#define CHANSET(n, t) (CTRL_STRIDE_OFF(t, 1) + 0x4 * (n) + 0x4)
@@ -21,12 +22,15 @@
21#define CHAN_MINTDIS(t) (CTRL_STRIDE_OFF(t, 3) + 0x4) 22#define CHAN_MINTDIS(t) (CTRL_STRIDE_OFF(t, 3) + 0x4)
22#define CHAN_MASTRSTAT(t) (CTRL_STRIDE_OFF(t, 3) + 0x8) 23#define CHAN_MASTRSTAT(t) (CTRL_STRIDE_OFF(t, 3) + 0x8)
23 24
25#define CHAN_MAX_OUTPUT_INT 0x8
26
24struct irqsteer_data { 27struct irqsteer_data {
25 void __iomem *regs; 28 void __iomem *regs;
26 struct clk *ipg_clk; 29 struct clk *ipg_clk;
27 int irq; 30 int irq[CHAN_MAX_OUTPUT_INT];
31 int irq_count;
28 raw_spinlock_t lock; 32 raw_spinlock_t lock;
29 int irq_groups; 33 int reg_num;
30 int channel; 34 int channel;
31 struct irq_domain *domain; 35 struct irq_domain *domain;
32 u32 *saved_reg; 36 u32 *saved_reg;
@@ -35,7 +39,7 @@ struct irqsteer_data {
35static int imx_irqsteer_get_reg_index(struct irqsteer_data *data, 39static int imx_irqsteer_get_reg_index(struct irqsteer_data *data,
36 unsigned long irqnum) 40 unsigned long irqnum)
37{ 41{
38 return (data->irq_groups * 2 - irqnum / 32 - 1); 42 return (data->reg_num - irqnum / 32 - 1);
39} 43}
40 44
41static void imx_irqsteer_irq_unmask(struct irq_data *d) 45static void imx_irqsteer_irq_unmask(struct irq_data *d)
@@ -46,9 +50,9 @@ static void imx_irqsteer_irq_unmask(struct irq_data *d)
46 u32 val; 50 u32 val;
47 51
48 raw_spin_lock_irqsave(&data->lock, flags); 52 raw_spin_lock_irqsave(&data->lock, flags);
49 val = readl_relaxed(data->regs + CHANMASK(idx, data->irq_groups)); 53 val = readl_relaxed(data->regs + CHANMASK(idx, data->reg_num));
50 val |= BIT(d->hwirq % 32); 54 val |= BIT(d->hwirq % 32);
51 writel_relaxed(val, data->regs + CHANMASK(idx, data->irq_groups)); 55 writel_relaxed(val, data->regs + CHANMASK(idx, data->reg_num));
52 raw_spin_unlock_irqrestore(&data->lock, flags); 56 raw_spin_unlock_irqrestore(&data->lock, flags);
53} 57}
54 58
@@ -60,9 +64,9 @@ static void imx_irqsteer_irq_mask(struct irq_data *d)
60 u32 val; 64 u32 val;
61 65
62 raw_spin_lock_irqsave(&data->lock, flags); 66 raw_spin_lock_irqsave(&data->lock, flags);
63 val = readl_relaxed(data->regs + CHANMASK(idx, data->irq_groups)); 67 val = readl_relaxed(data->regs + CHANMASK(idx, data->reg_num));
64 val &= ~BIT(d->hwirq % 32); 68 val &= ~BIT(d->hwirq % 32);
65 writel_relaxed(val, data->regs + CHANMASK(idx, data->irq_groups)); 69 writel_relaxed(val, data->regs + CHANMASK(idx, data->reg_num));
66 raw_spin_unlock_irqrestore(&data->lock, flags); 70 raw_spin_unlock_irqrestore(&data->lock, flags);
67} 71}
68 72
@@ -87,23 +91,47 @@ static const struct irq_domain_ops imx_irqsteer_domain_ops = {
87 .xlate = irq_domain_xlate_onecell, 91 .xlate = irq_domain_xlate_onecell,
88}; 92};
89 93
94static int imx_irqsteer_get_hwirq_base(struct irqsteer_data *data, u32 irq)
95{
96 int i;
97
98 for (i = 0; i < data->irq_count; i++) {
99 if (data->irq[i] == irq)
100 return i * 64;
101 }
102
103 return -EINVAL;
104}
105
90static void imx_irqsteer_irq_handler(struct irq_desc *desc) 106static void imx_irqsteer_irq_handler(struct irq_desc *desc)
91{ 107{
92 struct irqsteer_data *data = irq_desc_get_handler_data(desc); 108 struct irqsteer_data *data = irq_desc_get_handler_data(desc);
93 int i; 109 int hwirq;
110 int irq, i;
94 111
95 chained_irq_enter(irq_desc_get_chip(desc), desc); 112 chained_irq_enter(irq_desc_get_chip(desc), desc);
96 113
97 for (i = 0; i < data->irq_groups * 64; i += 32) { 114 irq = irq_desc_get_irq(desc);
98 int idx = imx_irqsteer_get_reg_index(data, i); 115 hwirq = imx_irqsteer_get_hwirq_base(data, irq);
116 if (hwirq < 0) {
117 pr_warn("%s: unable to get hwirq base for irq %d\n",
118 __func__, irq);
119 return;
120 }
121
122 for (i = 0; i < 2; i++, hwirq += 32) {
123 int idx = imx_irqsteer_get_reg_index(data, hwirq);
99 unsigned long irqmap; 124 unsigned long irqmap;
100 int pos, virq; 125 int pos, virq;
101 126
127 if (hwirq >= data->reg_num * 32)
128 break;
129
102 irqmap = readl_relaxed(data->regs + 130 irqmap = readl_relaxed(data->regs +
103 CHANSTATUS(idx, data->irq_groups)); 131 CHANSTATUS(idx, data->reg_num));
104 132
105 for_each_set_bit(pos, &irqmap, 32) { 133 for_each_set_bit(pos, &irqmap, 32) {
106 virq = irq_find_mapping(data->domain, pos + i); 134 virq = irq_find_mapping(data->domain, pos + hwirq);
107 if (virq) 135 if (virq)
108 generic_handle_irq(virq); 136 generic_handle_irq(virq);
109 } 137 }
@@ -117,7 +145,8 @@ static int imx_irqsteer_probe(struct platform_device *pdev)
117 struct device_node *np = pdev->dev.of_node; 145 struct device_node *np = pdev->dev.of_node;
118 struct irqsteer_data *data; 146 struct irqsteer_data *data;
119 struct resource *res; 147 struct resource *res;
120 int ret; 148 u32 irqs_num;
149 int i, ret;
121 150
122 data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL); 151 data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
123 if (!data) 152 if (!data)
@@ -130,12 +159,6 @@ static int imx_irqsteer_probe(struct platform_device *pdev)
130 return PTR_ERR(data->regs); 159 return PTR_ERR(data->regs);
131 } 160 }
132 161
133 data->irq = platform_get_irq(pdev, 0);
134 if (data->irq <= 0) {
135 dev_err(&pdev->dev, "failed to get irq\n");
136 return -ENODEV;
137 }
138
139 data->ipg_clk = devm_clk_get(&pdev->dev, "ipg"); 162 data->ipg_clk = devm_clk_get(&pdev->dev, "ipg");
140 if (IS_ERR(data->ipg_clk)) { 163 if (IS_ERR(data->ipg_clk)) {
141 ret = PTR_ERR(data->ipg_clk); 164 ret = PTR_ERR(data->ipg_clk);
@@ -146,12 +169,19 @@ static int imx_irqsteer_probe(struct platform_device *pdev)
146 169
147 raw_spin_lock_init(&data->lock); 170 raw_spin_lock_init(&data->lock);
148 171
149 of_property_read_u32(np, "fsl,irq-groups", &data->irq_groups); 172 of_property_read_u32(np, "fsl,num-irqs", &irqs_num);
150 of_property_read_u32(np, "fsl,channel", &data->channel); 173 of_property_read_u32(np, "fsl,channel", &data->channel);
151 174
175 /*
176 * There is one output irq for each group of 64 inputs.
177 * One register bit map can represent 32 input interrupts.
178 */
179 data->irq_count = DIV_ROUND_UP(irqs_num, 64);
180 data->reg_num = irqs_num / 32;
181
152 if (IS_ENABLED(CONFIG_PM_SLEEP)) { 182 if (IS_ENABLED(CONFIG_PM_SLEEP)) {
153 data->saved_reg = devm_kzalloc(&pdev->dev, 183 data->saved_reg = devm_kzalloc(&pdev->dev,
154 sizeof(u32) * data->irq_groups * 2, 184 sizeof(u32) * data->reg_num,
155 GFP_KERNEL); 185 GFP_KERNEL);
156 if (!data->saved_reg) 186 if (!data->saved_reg)
157 return -ENOMEM; 187 return -ENOMEM;
@@ -166,27 +196,48 @@ static int imx_irqsteer_probe(struct platform_device *pdev)
166 /* steer all IRQs into configured channel */ 196 /* steer all IRQs into configured channel */
167 writel_relaxed(BIT(data->channel), data->regs + CHANCTRL); 197 writel_relaxed(BIT(data->channel), data->regs + CHANCTRL);
168 198
169 data->domain = irq_domain_add_linear(np, data->irq_groups * 64, 199 data->domain = irq_domain_add_linear(np, data->reg_num * 32,
170 &imx_irqsteer_domain_ops, data); 200 &imx_irqsteer_domain_ops, data);
171 if (!data->domain) { 201 if (!data->domain) {
172 dev_err(&pdev->dev, "failed to create IRQ domain\n"); 202 dev_err(&pdev->dev, "failed to create IRQ domain\n");
173 clk_disable_unprepare(data->ipg_clk); 203 ret = -ENOMEM;
174 return -ENOMEM; 204 goto out;
205 }
206
207 if (!data->irq_count || data->irq_count > CHAN_MAX_OUTPUT_INT) {
208 ret = -EINVAL;
209 goto out;
175 } 210 }
176 211
177 irq_set_chained_handler_and_data(data->irq, imx_irqsteer_irq_handler, 212 for (i = 0; i < data->irq_count; i++) {
178 data); 213 data->irq[i] = irq_of_parse_and_map(np, i);
214 if (!data->irq[i]) {
215 ret = -EINVAL;
216 goto out;
217 }
218
219 irq_set_chained_handler_and_data(data->irq[i],
220 imx_irqsteer_irq_handler,
221 data);
222 }
179 223
180 platform_set_drvdata(pdev, data); 224 platform_set_drvdata(pdev, data);
181 225
182 return 0; 226 return 0;
227out:
228 clk_disable_unprepare(data->ipg_clk);
229 return ret;
183} 230}
184 231
185static int imx_irqsteer_remove(struct platform_device *pdev) 232static int imx_irqsteer_remove(struct platform_device *pdev)
186{ 233{
187 struct irqsteer_data *irqsteer_data = platform_get_drvdata(pdev); 234 struct irqsteer_data *irqsteer_data = platform_get_drvdata(pdev);
235 int i;
236
237 for (i = 0; i < irqsteer_data->irq_count; i++)
238 irq_set_chained_handler_and_data(irqsteer_data->irq[i],
239 NULL, NULL);
188 240
189 irq_set_chained_handler_and_data(irqsteer_data->irq, NULL, NULL);
190 irq_domain_remove(irqsteer_data->domain); 241 irq_domain_remove(irqsteer_data->domain);
191 242
192 clk_disable_unprepare(irqsteer_data->ipg_clk); 243 clk_disable_unprepare(irqsteer_data->ipg_clk);
@@ -199,9 +250,9 @@ static void imx_irqsteer_save_regs(struct irqsteer_data *data)
199{ 250{
200 int i; 251 int i;
201 252
202 for (i = 0; i < data->irq_groups * 2; i++) 253 for (i = 0; i < data->reg_num; i++)
203 data->saved_reg[i] = readl_relaxed(data->regs + 254 data->saved_reg[i] = readl_relaxed(data->regs +
204 CHANMASK(i, data->irq_groups)); 255 CHANMASK(i, data->reg_num));
205} 256}
206 257
207static void imx_irqsteer_restore_regs(struct irqsteer_data *data) 258static void imx_irqsteer_restore_regs(struct irqsteer_data *data)
@@ -209,9 +260,9 @@ static void imx_irqsteer_restore_regs(struct irqsteer_data *data)
209 int i; 260 int i;
210 261
211 writel_relaxed(BIT(data->channel), data->regs + CHANCTRL); 262 writel_relaxed(BIT(data->channel), data->regs + CHANCTRL);
212 for (i = 0; i < data->irq_groups * 2; i++) 263 for (i = 0; i < data->reg_num; i++)
213 writel_relaxed(data->saved_reg[i], 264 writel_relaxed(data->saved_reg[i],
214 data->regs + CHANMASK(i, data->irq_groups)); 265 data->regs + CHANMASK(i, data->reg_num));
215} 266}
216 267
217static int imx_irqsteer_suspend(struct device *dev) 268static int imx_irqsteer_suspend(struct device *dev)
diff --git a/drivers/irqchip/irq-ls1x.c b/drivers/irqchip/irq-ls1x.c
new file mode 100644
index 000000000000..86b72fbd3b45
--- /dev/null
+++ b/drivers/irqchip/irq-ls1x.c
@@ -0,0 +1,192 @@
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2019, Jiaxun Yang <jiaxun.yang@flygoat.com>
4 * Loongson-1 platform IRQ support
5 */
6
7#include <linux/errno.h>
8#include <linux/init.h>
9#include <linux/types.h>
10#include <linux/interrupt.h>
11#include <linux/ioport.h>
12#include <linux/irqchip.h>
13#include <linux/of_address.h>
14#include <linux/of_irq.h>
15#include <linux/io.h>
16#include <linux/irqchip/chained_irq.h>
17
18#define LS_REG_INTC_STATUS 0x00
19#define LS_REG_INTC_EN 0x04
20#define LS_REG_INTC_SET 0x08
21#define LS_REG_INTC_CLR 0x0c
22#define LS_REG_INTC_POL 0x10
23#define LS_REG_INTC_EDGE 0x14
24
25/**
26 * struct ls1x_intc_priv - private ls1x-intc data.
27 * @domain: IRQ domain.
28 * @intc_base: IO Base of intc registers.
29 */
30
31struct ls1x_intc_priv {
32 struct irq_domain *domain;
33 void __iomem *intc_base;
34};
35
36
37static void ls1x_chained_handle_irq(struct irq_desc *desc)
38{
39 struct ls1x_intc_priv *priv = irq_desc_get_handler_data(desc);
40 struct irq_chip *chip = irq_desc_get_chip(desc);
41 u32 pending;
42
43 chained_irq_enter(chip, desc);
44 pending = readl(priv->intc_base + LS_REG_INTC_STATUS) &
45 readl(priv->intc_base + LS_REG_INTC_EN);
46
47 if (!pending)
48 spurious_interrupt();
49
50 while (pending) {
51 int bit = __ffs(pending);
52
53 generic_handle_irq(irq_find_mapping(priv->domain, bit));
54 pending &= ~BIT(bit);
55 }
56
57 chained_irq_exit(chip, desc);
58}
59
60static void ls_intc_set_bit(struct irq_chip_generic *gc,
61 unsigned int offset,
62 u32 mask, bool set)
63{
64 if (set)
65 writel(readl(gc->reg_base + offset) | mask,
66 gc->reg_base + offset);
67 else
68 writel(readl(gc->reg_base + offset) & ~mask,
69 gc->reg_base + offset);
70}
71
72static int ls_intc_set_type(struct irq_data *data, unsigned int type)
73{
74 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(data);
75 u32 mask = data->mask;
76
77 switch (type) {
78 case IRQ_TYPE_LEVEL_HIGH:
79 ls_intc_set_bit(gc, LS_REG_INTC_EDGE, mask, false);
80 ls_intc_set_bit(gc, LS_REG_INTC_POL, mask, true);
81 break;
82 case IRQ_TYPE_LEVEL_LOW:
83 ls_intc_set_bit(gc, LS_REG_INTC_EDGE, mask, false);
84 ls_intc_set_bit(gc, LS_REG_INTC_POL, mask, false);
85 break;
86 case IRQ_TYPE_EDGE_RISING:
87 ls_intc_set_bit(gc, LS_REG_INTC_EDGE, mask, true);
88 ls_intc_set_bit(gc, LS_REG_INTC_POL, mask, true);
89 break;
90 case IRQ_TYPE_EDGE_FALLING:
91 ls_intc_set_bit(gc, LS_REG_INTC_EDGE, mask, true);
92 ls_intc_set_bit(gc, LS_REG_INTC_POL, mask, false);
93 break;
94 default:
95 return -EINVAL;
96 }
97
98 irqd_set_trigger_type(data, type);
99 return irq_setup_alt_chip(data, type);
100}
101
102
103static int __init ls1x_intc_of_init(struct device_node *node,
104 struct device_node *parent)
105{
106 struct irq_chip_generic *gc;
107 struct irq_chip_type *ct;
108 struct ls1x_intc_priv *priv;
109 int parent_irq, err = 0;
110
111 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
112 if (!priv)
113 return -ENOMEM;
114
115 priv->intc_base = of_iomap(node, 0);
116 if (!priv->intc_base) {
117 err = -ENODEV;
118 goto out_free_priv;
119 }
120
121 parent_irq = irq_of_parse_and_map(node, 0);
122 if (!parent_irq) {
123 pr_err("ls1x-irq: unable to get parent irq\n");
124 err = -ENODEV;
125 goto out_iounmap;
126 }
127
128 /* Set up an IRQ domain */
129 priv->domain = irq_domain_add_linear(node, 32, &irq_generic_chip_ops,
130 NULL);
131 if (!priv->domain) {
132 pr_err("ls1x-irq: cannot add IRQ domain\n");
133 goto out_iounmap;
134 }
135
136 err = irq_alloc_domain_generic_chips(priv->domain, 32, 2,
137 node->full_name, handle_level_irq,
138 IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN, 0,
139 IRQ_GC_INIT_MASK_CACHE);
140 if (err) {
141 pr_err("ls1x-irq: unable to register IRQ domain\n");
142 goto out_free_domain;
143 }
144
145 /* Mask all irqs */
146 writel(0x0, priv->intc_base + LS_REG_INTC_EN);
147
148 /* Ack all irqs */
149 writel(0xffffffff, priv->intc_base + LS_REG_INTC_CLR);
150
151 /* Set all irqs to high level triggered */
152 writel(0xffffffff, priv->intc_base + LS_REG_INTC_POL);
153
154 gc = irq_get_domain_generic_chip(priv->domain, 0);
155
156 gc->reg_base = priv->intc_base;
157
158 ct = gc->chip_types;
159 ct[0].type = IRQ_TYPE_LEVEL_MASK;
160 ct[0].regs.mask = LS_REG_INTC_EN;
161 ct[0].regs.ack = LS_REG_INTC_CLR;
162 ct[0].chip.irq_unmask = irq_gc_mask_set_bit;
163 ct[0].chip.irq_mask = irq_gc_mask_clr_bit;
164 ct[0].chip.irq_ack = irq_gc_ack_set_bit;
165 ct[0].chip.irq_set_type = ls_intc_set_type;
166 ct[0].handler = handle_level_irq;
167
168 ct[1].type = IRQ_TYPE_EDGE_BOTH;
169 ct[1].regs.mask = LS_REG_INTC_EN;
170 ct[1].regs.ack = LS_REG_INTC_CLR;
171 ct[1].chip.irq_unmask = irq_gc_mask_set_bit;
172 ct[1].chip.irq_mask = irq_gc_mask_clr_bit;
173 ct[1].chip.irq_ack = irq_gc_ack_set_bit;
174 ct[1].chip.irq_set_type = ls_intc_set_type;
175 ct[1].handler = handle_edge_irq;
176
177 irq_set_chained_handler_and_data(parent_irq,
178 ls1x_chained_handle_irq, priv);
179
180 return 0;
181
182out_free_domain:
183 irq_domain_remove(priv->domain);
184out_iounmap:
185 iounmap(priv->intc_base);
186out_free_priv:
187 kfree(priv);
188
189 return err;
190}
191
192IRQCHIP_DECLARE(ls1x_intc, "loongson,ls1x-intc", ls1x_intc_of_init);
diff --git a/drivers/irqchip/irq-sifive-plic.c b/drivers/irqchip/irq-sifive-plic.c
index 357e9daf94ae..cf755964f2f8 100644
--- a/drivers/irqchip/irq-sifive-plic.c
+++ b/drivers/irqchip/irq-sifive-plic.c
@@ -59,62 +59,83 @@ static void __iomem *plic_regs;
59 59
60struct plic_handler { 60struct plic_handler {
61 bool present; 61 bool present;
62 int ctxid; 62 void __iomem *hart_base;
63 /*
64 * Protect mask operations on the registers given that we can't
65 * assume atomic memory operations work on them.
66 */
67 raw_spinlock_t enable_lock;
68 void __iomem *enable_base;
63}; 69};
64static DEFINE_PER_CPU(struct plic_handler, plic_handlers); 70static DEFINE_PER_CPU(struct plic_handler, plic_handlers);
65 71
66static inline void __iomem *plic_hart_offset(int ctxid) 72static inline void plic_toggle(struct plic_handler *handler,
67{ 73 int hwirq, int enable)
68 return plic_regs + CONTEXT_BASE + ctxid * CONTEXT_PER_HART;
69}
70
71static inline u32 __iomem *plic_enable_base(int ctxid)
72{
73 return plic_regs + ENABLE_BASE + ctxid * ENABLE_PER_HART;
74}
75
76/*
77 * Protect mask operations on the registers given that we can't assume that
78 * atomic memory operations work on them.
79 */
80static DEFINE_RAW_SPINLOCK(plic_toggle_lock);
81
82static inline void plic_toggle(int ctxid, int hwirq, int enable)
83{ 74{
84 u32 __iomem *reg = plic_enable_base(ctxid) + (hwirq / 32); 75 u32 __iomem *reg = handler->enable_base + (hwirq / 32) * sizeof(u32);
85 u32 hwirq_mask = 1 << (hwirq % 32); 76 u32 hwirq_mask = 1 << (hwirq % 32);
86 77
87 raw_spin_lock(&plic_toggle_lock); 78 raw_spin_lock(&handler->enable_lock);
88 if (enable) 79 if (enable)
89 writel(readl(reg) | hwirq_mask, reg); 80 writel(readl(reg) | hwirq_mask, reg);
90 else 81 else
91 writel(readl(reg) & ~hwirq_mask, reg); 82 writel(readl(reg) & ~hwirq_mask, reg);
92 raw_spin_unlock(&plic_toggle_lock); 83 raw_spin_unlock(&handler->enable_lock);
93} 84}
94 85
95static inline void plic_irq_toggle(struct irq_data *d, int enable) 86static inline void plic_irq_toggle(const struct cpumask *mask,
87 int hwirq, int enable)
96{ 88{
97 int cpu; 89 int cpu;
98 90
99 writel(enable, plic_regs + PRIORITY_BASE + d->hwirq * PRIORITY_PER_ID); 91 writel(enable, plic_regs + PRIORITY_BASE + hwirq * PRIORITY_PER_ID);
100 for_each_cpu(cpu, irq_data_get_affinity_mask(d)) { 92 for_each_cpu(cpu, mask) {
101 struct plic_handler *handler = per_cpu_ptr(&plic_handlers, cpu); 93 struct plic_handler *handler = per_cpu_ptr(&plic_handlers, cpu);
102 94
103 if (handler->present) 95 if (handler->present)
104 plic_toggle(handler->ctxid, d->hwirq, enable); 96 plic_toggle(handler, hwirq, enable);
105 } 97 }
106} 98}
107 99
108static void plic_irq_enable(struct irq_data *d) 100static void plic_irq_enable(struct irq_data *d)
109{ 101{
110 plic_irq_toggle(d, 1); 102 unsigned int cpu = cpumask_any_and(irq_data_get_affinity_mask(d),
103 cpu_online_mask);
104 if (WARN_ON_ONCE(cpu >= nr_cpu_ids))
105 return;
106 plic_irq_toggle(cpumask_of(cpu), d->hwirq, 1);
111} 107}
112 108
113static void plic_irq_disable(struct irq_data *d) 109static void plic_irq_disable(struct irq_data *d)
114{ 110{
115 plic_irq_toggle(d, 0); 111 plic_irq_toggle(cpu_possible_mask, d->hwirq, 0);
116} 112}
117 113
114#ifdef CONFIG_SMP
115static int plic_set_affinity(struct irq_data *d,
116 const struct cpumask *mask_val, bool force)
117{
118 unsigned int cpu;
119
120 if (force)
121 cpu = cpumask_first(mask_val);
122 else
123 cpu = cpumask_any_and(mask_val, cpu_online_mask);
124
125 if (cpu >= nr_cpu_ids)
126 return -EINVAL;
127
128 if (!irqd_irq_disabled(d)) {
129 plic_irq_toggle(cpu_possible_mask, d->hwirq, 0);
130 plic_irq_toggle(cpumask_of(cpu), d->hwirq, 1);
131 }
132
133 irq_data_update_effective_affinity(d, cpumask_of(cpu));
134
135 return IRQ_SET_MASK_OK_DONE;
136}
137#endif
138
118static struct irq_chip plic_chip = { 139static struct irq_chip plic_chip = {
119 .name = "SiFive PLIC", 140 .name = "SiFive PLIC",
120 /* 141 /*
@@ -123,6 +144,9 @@ static struct irq_chip plic_chip = {
123 */ 144 */
124 .irq_enable = plic_irq_enable, 145 .irq_enable = plic_irq_enable,
125 .irq_disable = plic_irq_disable, 146 .irq_disable = plic_irq_disable,
147#ifdef CONFIG_SMP
148 .irq_set_affinity = plic_set_affinity,
149#endif
126}; 150};
127 151
128static int plic_irqdomain_map(struct irq_domain *d, unsigned int irq, 152static int plic_irqdomain_map(struct irq_domain *d, unsigned int irq,
@@ -150,7 +174,7 @@ static struct irq_domain *plic_irqdomain;
150static void plic_handle_irq(struct pt_regs *regs) 174static void plic_handle_irq(struct pt_regs *regs)
151{ 175{
152 struct plic_handler *handler = this_cpu_ptr(&plic_handlers); 176 struct plic_handler *handler = this_cpu_ptr(&plic_handlers);
153 void __iomem *claim = plic_hart_offset(handler->ctxid) + CONTEXT_CLAIM; 177 void __iomem *claim = handler->hart_base + CONTEXT_CLAIM;
154 irq_hw_number_t hwirq; 178 irq_hw_number_t hwirq;
155 179
156 WARN_ON_ONCE(!handler->present); 180 WARN_ON_ONCE(!handler->present);
@@ -186,7 +210,7 @@ static int plic_find_hart_id(struct device_node *node)
186static int __init plic_init(struct device_node *node, 210static int __init plic_init(struct device_node *node,
187 struct device_node *parent) 211 struct device_node *parent)
188{ 212{
189 int error = 0, nr_handlers, nr_mapped = 0, i; 213 int error = 0, nr_contexts, nr_handlers = 0, i;
190 u32 nr_irqs; 214 u32 nr_irqs;
191 215
192 if (plic_regs) { 216 if (plic_regs) {
@@ -203,10 +227,10 @@ static int __init plic_init(struct device_node *node,
203 if (WARN_ON(!nr_irqs)) 227 if (WARN_ON(!nr_irqs))
204 goto out_iounmap; 228 goto out_iounmap;
205 229
206 nr_handlers = of_irq_count(node); 230 nr_contexts = of_irq_count(node);
207 if (WARN_ON(!nr_handlers)) 231 if (WARN_ON(!nr_contexts))
208 goto out_iounmap; 232 goto out_iounmap;
209 if (WARN_ON(nr_handlers < num_possible_cpus())) 233 if (WARN_ON(nr_contexts < num_possible_cpus()))
210 goto out_iounmap; 234 goto out_iounmap;
211 235
212 error = -ENOMEM; 236 error = -ENOMEM;
@@ -215,7 +239,7 @@ static int __init plic_init(struct device_node *node,
215 if (WARN_ON(!plic_irqdomain)) 239 if (WARN_ON(!plic_irqdomain))
216 goto out_iounmap; 240 goto out_iounmap;
217 241
218 for (i = 0; i < nr_handlers; i++) { 242 for (i = 0; i < nr_contexts; i++) {
219 struct of_phandle_args parent; 243 struct of_phandle_args parent;
220 struct plic_handler *handler; 244 struct plic_handler *handler;
221 irq_hw_number_t hwirq; 245 irq_hw_number_t hwirq;
@@ -237,19 +261,33 @@ static int __init plic_init(struct device_node *node,
237 } 261 }
238 262
239 cpu = riscv_hartid_to_cpuid(hartid); 263 cpu = riscv_hartid_to_cpuid(hartid);
264 if (cpu < 0) {
265 pr_warn("Invalid cpuid for context %d\n", i);
266 continue;
267 }
268
240 handler = per_cpu_ptr(&plic_handlers, cpu); 269 handler = per_cpu_ptr(&plic_handlers, cpu);
270 if (handler->present) {
271 pr_warn("handler already present for context %d.\n", i);
272 continue;
273 }
274
241 handler->present = true; 275 handler->present = true;
242 handler->ctxid = i; 276 handler->hart_base =
277 plic_regs + CONTEXT_BASE + i * CONTEXT_PER_HART;
278 raw_spin_lock_init(&handler->enable_lock);
279 handler->enable_base =
280 plic_regs + ENABLE_BASE + i * ENABLE_PER_HART;
243 281
244 /* priority must be > threshold to trigger an interrupt */ 282 /* priority must be > threshold to trigger an interrupt */
245 writel(0, plic_hart_offset(i) + CONTEXT_THRESHOLD); 283 writel(0, handler->hart_base + CONTEXT_THRESHOLD);
246 for (hwirq = 1; hwirq <= nr_irqs; hwirq++) 284 for (hwirq = 1; hwirq <= nr_irqs; hwirq++)
247 plic_toggle(i, hwirq, 0); 285 plic_toggle(handler, hwirq, 0);
248 nr_mapped++; 286 nr_handlers++;
249 } 287 }
250 288
251 pr_info("mapped %d interrupts to %d (out of %d) handlers.\n", 289 pr_info("mapped %d interrupts with %d handlers for %d contexts.\n",
252 nr_irqs, nr_mapped, nr_handlers); 290 nr_irqs, nr_handlers, nr_contexts);
253 set_handle_irq(plic_handle_irq); 291 set_handle_irq(plic_handle_irq);
254 return 0; 292 return 0;
255 293
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
index dcdddf4fa76b..690b238a44d5 100644
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
@@ -156,6 +156,10 @@ __request_percpu_irq(unsigned int irq, irq_handler_t handler,
156 unsigned long flags, const char *devname, 156 unsigned long flags, const char *devname,
157 void __percpu *percpu_dev_id); 157 void __percpu *percpu_dev_id);
158 158
159extern int __must_check
160request_nmi(unsigned int irq, irq_handler_t handler, unsigned long flags,
161 const char *name, void *dev);
162
159static inline int __must_check 163static inline int __must_check
160request_percpu_irq(unsigned int irq, irq_handler_t handler, 164request_percpu_irq(unsigned int irq, irq_handler_t handler,
161 const char *devname, void __percpu *percpu_dev_id) 165 const char *devname, void __percpu *percpu_dev_id)
@@ -164,9 +168,16 @@ request_percpu_irq(unsigned int irq, irq_handler_t handler,
164 devname, percpu_dev_id); 168 devname, percpu_dev_id);
165} 169}
166 170
171extern int __must_check
172request_percpu_nmi(unsigned int irq, irq_handler_t handler,
173 const char *devname, void __percpu *dev);
174
167extern const void *free_irq(unsigned int, void *); 175extern const void *free_irq(unsigned int, void *);
168extern void free_percpu_irq(unsigned int, void __percpu *); 176extern void free_percpu_irq(unsigned int, void __percpu *);
169 177
178extern const void *free_nmi(unsigned int irq, void *dev_id);
179extern void free_percpu_nmi(unsigned int irq, void __percpu *percpu_dev_id);
180
170struct device; 181struct device;
171 182
172extern int __must_check 183extern int __must_check
@@ -217,6 +228,13 @@ extern void enable_percpu_irq(unsigned int irq, unsigned int type);
217extern bool irq_percpu_is_enabled(unsigned int irq); 228extern bool irq_percpu_is_enabled(unsigned int irq);
218extern void irq_wake_thread(unsigned int irq, void *dev_id); 229extern void irq_wake_thread(unsigned int irq, void *dev_id);
219 230
231extern void disable_nmi_nosync(unsigned int irq);
232extern void disable_percpu_nmi(unsigned int irq);
233extern void enable_nmi(unsigned int irq);
234extern void enable_percpu_nmi(unsigned int irq, unsigned int type);
235extern int prepare_percpu_nmi(unsigned int irq);
236extern void teardown_percpu_nmi(unsigned int irq);
237
220/* The following three functions are for the core kernel use only. */ 238/* The following three functions are for the core kernel use only. */
221extern void suspend_device_irqs(void); 239extern void suspend_device_irqs(void);
222extern void resume_device_irqs(void); 240extern void resume_device_irqs(void);
diff --git a/include/linux/irq.h b/include/linux/irq.h
index def2b2aac8b1..5e91f6bcaacd 100644
--- a/include/linux/irq.h
+++ b/include/linux/irq.h
@@ -442,6 +442,8 @@ static inline irq_hw_number_t irqd_to_hwirq(struct irq_data *d)
442 * @irq_set_vcpu_affinity: optional to target a vCPU in a virtual machine 442 * @irq_set_vcpu_affinity: optional to target a vCPU in a virtual machine
443 * @ipi_send_single: send a single IPI to destination cpus 443 * @ipi_send_single: send a single IPI to destination cpus
444 * @ipi_send_mask: send an IPI to destination cpus in cpumask 444 * @ipi_send_mask: send an IPI to destination cpus in cpumask
445 * @irq_nmi_setup: function called from core code before enabling an NMI
446 * @irq_nmi_teardown: function called from core code after disabling an NMI
445 * @flags: chip specific flags 447 * @flags: chip specific flags
446 */ 448 */
447struct irq_chip { 449struct irq_chip {
@@ -490,6 +492,9 @@ struct irq_chip {
490 void (*ipi_send_single)(struct irq_data *data, unsigned int cpu); 492 void (*ipi_send_single)(struct irq_data *data, unsigned int cpu);
491 void (*ipi_send_mask)(struct irq_data *data, const struct cpumask *dest); 493 void (*ipi_send_mask)(struct irq_data *data, const struct cpumask *dest);
492 494
495 int (*irq_nmi_setup)(struct irq_data *data);
496 void (*irq_nmi_teardown)(struct irq_data *data);
497
493 unsigned long flags; 498 unsigned long flags;
494}; 499};
495 500
@@ -505,6 +510,7 @@ struct irq_chip {
505 * IRQCHIP_ONESHOT_SAFE: One shot does not require mask/unmask 510 * IRQCHIP_ONESHOT_SAFE: One shot does not require mask/unmask
506 * IRQCHIP_EOI_THREADED: Chip requires eoi() on unmask in threaded mode 511 * IRQCHIP_EOI_THREADED: Chip requires eoi() on unmask in threaded mode
507 * IRQCHIP_SUPPORTS_LEVEL_MSI Chip can provide two doorbells for Level MSIs 512 * IRQCHIP_SUPPORTS_LEVEL_MSI Chip can provide two doorbells for Level MSIs
513 * IRQCHIP_SUPPORTS_NMI: Chip can deliver NMIs, only for root irqchips
508 */ 514 */
509enum { 515enum {
510 IRQCHIP_SET_TYPE_MASKED = (1 << 0), 516 IRQCHIP_SET_TYPE_MASKED = (1 << 0),
@@ -515,6 +521,7 @@ enum {
515 IRQCHIP_ONESHOT_SAFE = (1 << 5), 521 IRQCHIP_ONESHOT_SAFE = (1 << 5),
516 IRQCHIP_EOI_THREADED = (1 << 6), 522 IRQCHIP_EOI_THREADED = (1 << 6),
517 IRQCHIP_SUPPORTS_LEVEL_MSI = (1 << 7), 523 IRQCHIP_SUPPORTS_LEVEL_MSI = (1 << 7),
524 IRQCHIP_SUPPORTS_NMI = (1 << 8),
518}; 525};
519 526
520#include <linux/irqdesc.h> 527#include <linux/irqdesc.h>
@@ -594,6 +601,9 @@ extern void handle_percpu_devid_irq(struct irq_desc *desc);
594extern void handle_bad_irq(struct irq_desc *desc); 601extern void handle_bad_irq(struct irq_desc *desc);
595extern void handle_nested_irq(unsigned int irq); 602extern void handle_nested_irq(unsigned int irq);
596 603
604extern void handle_fasteoi_nmi(struct irq_desc *desc);
605extern void handle_percpu_devid_fasteoi_nmi(struct irq_desc *desc);
606
597extern int irq_chip_compose_msi_msg(struct irq_data *data, struct msi_msg *msg); 607extern int irq_chip_compose_msi_msg(struct irq_data *data, struct msi_msg *msg);
598extern int irq_chip_pm_get(struct irq_data *data); 608extern int irq_chip_pm_get(struct irq_data *data);
599extern int irq_chip_pm_put(struct irq_data *data); 609extern int irq_chip_pm_put(struct irq_data *data);
diff --git a/include/linux/irqdesc.h b/include/linux/irqdesc.h
index 1d679feff3f6..d6e2ab538ef2 100644
--- a/include/linux/irqdesc.h
+++ b/include/linux/irqdesc.h
@@ -173,6 +173,11 @@ static inline int handle_domain_irq(struct irq_domain *domain,
173{ 173{
174 return __handle_domain_irq(domain, hwirq, true, regs); 174 return __handle_domain_irq(domain, hwirq, true, regs);
175} 175}
176
177#ifdef CONFIG_IRQ_DOMAIN
178int handle_domain_nmi(struct irq_domain *domain, unsigned int hwirq,
179 struct pt_regs *regs);
180#endif
176#endif 181#endif
177 182
178/* Test to see if a driver has successfully requested an irq */ 183/* Test to see if a driver has successfully requested an irq */
diff --git a/include/linux/irqdomain.h b/include/linux/irqdomain.h
index 35965f41d7be..d2130dc7c0e6 100644
--- a/include/linux/irqdomain.h
+++ b/include/linux/irqdomain.h
@@ -265,6 +265,7 @@ extern struct irq_domain *irq_find_matching_fwspec(struct irq_fwspec *fwspec,
265 enum irq_domain_bus_token bus_token); 265 enum irq_domain_bus_token bus_token);
266extern bool irq_domain_check_msi_remap(void); 266extern bool irq_domain_check_msi_remap(void);
267extern void irq_set_default_host(struct irq_domain *host); 267extern void irq_set_default_host(struct irq_domain *host);
268extern struct irq_domain *irq_get_default_host(void);
268extern int irq_domain_alloc_descs(int virq, unsigned int nr_irqs, 269extern int irq_domain_alloc_descs(int virq, unsigned int nr_irqs,
269 irq_hw_number_t hwirq, int node, 270 irq_hw_number_t hwirq, int node,
270 const struct irq_affinity_desc *affinity); 271 const struct irq_affinity_desc *affinity);
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c
index e960c4f46ee0..99b7dd6982a4 100644
--- a/kernel/irq/chip.c
+++ b/kernel/irq/chip.c
@@ -730,6 +730,37 @@ out:
730EXPORT_SYMBOL_GPL(handle_fasteoi_irq); 730EXPORT_SYMBOL_GPL(handle_fasteoi_irq);
731 731
732/** 732/**
733 * handle_fasteoi_nmi - irq handler for NMI interrupt lines
734 * @desc: the interrupt description structure for this irq
735 *
736 * A simple NMI-safe handler, considering the restrictions
737 * from request_nmi.
738 *
739 * Only a single callback will be issued to the chip: an ->eoi()
740 * call when the interrupt has been serviced. This enables support
741 * for modern forms of interrupt handlers, which handle the flow
742 * details in hardware, transparently.
743 */
744void handle_fasteoi_nmi(struct irq_desc *desc)
745{
746 struct irq_chip *chip = irq_desc_get_chip(desc);
747 struct irqaction *action = desc->action;
748 unsigned int irq = irq_desc_get_irq(desc);
749 irqreturn_t res;
750
751 trace_irq_handler_entry(irq, action);
752 /*
753 * NMIs cannot be shared, there is only one action.
754 */
755 res = action->handler(irq, action->dev_id);
756 trace_irq_handler_exit(irq, action, res);
757
758 if (chip->irq_eoi)
759 chip->irq_eoi(&desc->irq_data);
760}
761EXPORT_SYMBOL_GPL(handle_fasteoi_nmi);
762
763/**
733 * handle_edge_irq - edge type IRQ handler 764 * handle_edge_irq - edge type IRQ handler
734 * @desc: the interrupt description structure for this irq 765 * @desc: the interrupt description structure for this irq
735 * 766 *
@@ -916,6 +947,29 @@ void handle_percpu_devid_irq(struct irq_desc *desc)
916 chip->irq_eoi(&desc->irq_data); 947 chip->irq_eoi(&desc->irq_data);
917} 948}
918 949
950/**
951 * handle_percpu_devid_fasteoi_nmi - Per CPU local NMI handler with per cpu
952 * dev ids
953 * @desc: the interrupt description structure for this irq
954 *
955 * Similar to handle_fasteoi_nmi, but handling the dev_id cookie
956 * as a percpu pointer.
957 */
958void handle_percpu_devid_fasteoi_nmi(struct irq_desc *desc)
959{
960 struct irq_chip *chip = irq_desc_get_chip(desc);
961 struct irqaction *action = desc->action;
962 unsigned int irq = irq_desc_get_irq(desc);
963 irqreturn_t res;
964
965 trace_irq_handler_entry(irq, action);
966 res = action->handler(irq, raw_cpu_ptr(action->percpu_dev_id));
967 trace_irq_handler_exit(irq, action, res);
968
969 if (chip->irq_eoi)
970 chip->irq_eoi(&desc->irq_data);
971}
972
919static void 973static void
920__irq_do_set_handler(struct irq_desc *desc, irq_flow_handler_t handle, 974__irq_do_set_handler(struct irq_desc *desc, irq_flow_handler_t handle,
921 int is_chained, const char *name) 975 int is_chained, const char *name)
diff --git a/kernel/irq/debugfs.c b/kernel/irq/debugfs.c
index bbd783a83409..516c00a5e867 100644
--- a/kernel/irq/debugfs.c
+++ b/kernel/irq/debugfs.c
@@ -56,6 +56,7 @@ static const struct irq_bit_descr irqchip_flags[] = {
56 BIT_MASK_DESCR(IRQCHIP_ONESHOT_SAFE), 56 BIT_MASK_DESCR(IRQCHIP_ONESHOT_SAFE),
57 BIT_MASK_DESCR(IRQCHIP_EOI_THREADED), 57 BIT_MASK_DESCR(IRQCHIP_EOI_THREADED),
58 BIT_MASK_DESCR(IRQCHIP_SUPPORTS_LEVEL_MSI), 58 BIT_MASK_DESCR(IRQCHIP_SUPPORTS_LEVEL_MSI),
59 BIT_MASK_DESCR(IRQCHIP_SUPPORTS_NMI),
59}; 60};
60 61
61static void 62static void
@@ -140,6 +141,7 @@ static const struct irq_bit_descr irqdesc_istates[] = {
140 BIT_MASK_DESCR(IRQS_WAITING), 141 BIT_MASK_DESCR(IRQS_WAITING),
141 BIT_MASK_DESCR(IRQS_PENDING), 142 BIT_MASK_DESCR(IRQS_PENDING),
142 BIT_MASK_DESCR(IRQS_SUSPENDED), 143 BIT_MASK_DESCR(IRQS_SUSPENDED),
144 BIT_MASK_DESCR(IRQS_NMI),
143}; 145};
144 146
145 147
@@ -203,8 +205,8 @@ static ssize_t irq_debug_write(struct file *file, const char __user *user_buf,
203 chip_bus_lock(desc); 205 chip_bus_lock(desc);
204 raw_spin_lock_irqsave(&desc->lock, flags); 206 raw_spin_lock_irqsave(&desc->lock, flags);
205 207
206 if (irq_settings_is_level(desc)) { 208 if (irq_settings_is_level(desc) || desc->istate & IRQS_NMI) {
207 /* Can't do level, sorry */ 209 /* Can't do level nor NMIs, sorry */
208 err = -EINVAL; 210 err = -EINVAL;
209 } else { 211 } else {
210 desc->istate |= IRQS_PENDING; 212 desc->istate |= IRQS_PENDING;
diff --git a/kernel/irq/internals.h b/kernel/irq/internals.h
index e74e7eea76cf..70c3053bc1f6 100644
--- a/kernel/irq/internals.h
+++ b/kernel/irq/internals.h
@@ -49,6 +49,7 @@ enum {
49 * IRQS_WAITING - irq is waiting 49 * IRQS_WAITING - irq is waiting
50 * IRQS_PENDING - irq is pending and replayed later 50 * IRQS_PENDING - irq is pending and replayed later
51 * IRQS_SUSPENDED - irq is suspended 51 * IRQS_SUSPENDED - irq is suspended
52 * IRQS_NMI - irq line is used to deliver NMIs
52 */ 53 */
53enum { 54enum {
54 IRQS_AUTODETECT = 0x00000001, 55 IRQS_AUTODETECT = 0x00000001,
@@ -60,6 +61,7 @@ enum {
60 IRQS_PENDING = 0x00000200, 61 IRQS_PENDING = 0x00000200,
61 IRQS_SUSPENDED = 0x00000800, 62 IRQS_SUSPENDED = 0x00000800,
62 IRQS_TIMINGS = 0x00001000, 63 IRQS_TIMINGS = 0x00001000,
64 IRQS_NMI = 0x00002000,
63}; 65};
64 66
65#include "debug.h" 67#include "debug.h"
diff --git a/kernel/irq/irqdesc.c b/kernel/irq/irqdesc.c
index 84fa255d0329..13539e12cd80 100644
--- a/kernel/irq/irqdesc.c
+++ b/kernel/irq/irqdesc.c
@@ -670,6 +670,41 @@ int __handle_domain_irq(struct irq_domain *domain, unsigned int hwirq,
670 set_irq_regs(old_regs); 670 set_irq_regs(old_regs);
671 return ret; 671 return ret;
672} 672}
673
674#ifdef CONFIG_IRQ_DOMAIN
675/**
676 * handle_domain_nmi - Invoke the handler for a HW irq belonging to a domain
677 * @domain: The domain where to perform the lookup
678 * @hwirq: The HW irq number to convert to a logical one
679 * @regs: Register file coming from the low-level handling code
680 *
681 * Returns: 0 on success, or -EINVAL if conversion has failed
682 */
683int handle_domain_nmi(struct irq_domain *domain, unsigned int hwirq,
684 struct pt_regs *regs)
685{
686 struct pt_regs *old_regs = set_irq_regs(regs);
687 unsigned int irq;
688 int ret = 0;
689
690 nmi_enter();
691
692 irq = irq_find_mapping(domain, hwirq);
693
694 /*
695 * ack_bad_irq is not NMI-safe, just report
696 * an invalid interrupt.
697 */
698 if (likely(irq))
699 generic_handle_irq(irq);
700 else
701 ret = -EINVAL;
702
703 nmi_exit();
704 set_irq_regs(old_regs);
705 return ret;
706}
707#endif
673#endif 708#endif
674 709
675/* Dynamic interrupt handling */ 710/* Dynamic interrupt handling */
diff --git a/kernel/irq/irqdomain.c b/kernel/irq/irqdomain.c
index 45c74373c7a4..3bf9793d8825 100644
--- a/kernel/irq/irqdomain.c
+++ b/kernel/irq/irqdomain.c
@@ -458,6 +458,20 @@ void irq_set_default_host(struct irq_domain *domain)
458} 458}
459EXPORT_SYMBOL_GPL(irq_set_default_host); 459EXPORT_SYMBOL_GPL(irq_set_default_host);
460 460
461/**
462 * irq_get_default_host() - Retrieve the "default" irq domain
463 *
464 * Returns: the default domain, if any.
465 *
466 * Modern code should never use this. This should only be used on
467 * systems that cannot implement a firmware->fwnode mapping (which
468 * both DT and ACPI provide).
469 */
470struct irq_domain *irq_get_default_host(void)
471{
472 return irq_default_domain;
473}
474
461static void irq_domain_clear_mapping(struct irq_domain *domain, 475static void irq_domain_clear_mapping(struct irq_domain *domain,
462 irq_hw_number_t hwirq) 476 irq_hw_number_t hwirq)
463{ 477{
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index 3f8a8ebda484..9ec34a2a6638 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -341,7 +341,7 @@ irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify)
341 /* The release function is promised process context */ 341 /* The release function is promised process context */
342 might_sleep(); 342 might_sleep();
343 343
344 if (!desc) 344 if (!desc || desc->istate & IRQS_NMI)
345 return -EINVAL; 345 return -EINVAL;
346 346
347 /* Complete initialisation of *notify */ 347 /* Complete initialisation of *notify */
@@ -553,6 +553,21 @@ bool disable_hardirq(unsigned int irq)
553} 553}
554EXPORT_SYMBOL_GPL(disable_hardirq); 554EXPORT_SYMBOL_GPL(disable_hardirq);
555 555
556/**
557 * disable_nmi_nosync - disable an nmi without waiting
558 * @irq: Interrupt to disable
559 *
560 * Disable the selected interrupt line. Disables and enables are
561 * nested.
562 * The interrupt to disable must have been requested through request_nmi.
563 * Unlike disable_nmi(), this function does not ensure existing
564 * instances of the IRQ handler have completed before returning.
565 */
566void disable_nmi_nosync(unsigned int irq)
567{
568 disable_irq_nosync(irq);
569}
570
556void __enable_irq(struct irq_desc *desc) 571void __enable_irq(struct irq_desc *desc)
557{ 572{
558 switch (desc->depth) { 573 switch (desc->depth) {
@@ -609,6 +624,20 @@ out:
609} 624}
610EXPORT_SYMBOL(enable_irq); 625EXPORT_SYMBOL(enable_irq);
611 626
627/**
628 * enable_nmi - enable handling of an nmi
629 * @irq: Interrupt to enable
630 *
631 * The interrupt to enable must have been requested through request_nmi.
632 * Undoes the effect of one call to disable_nmi(). If this
633 * matches the last disable, processing of interrupts on this
634 * IRQ line is re-enabled.
635 */
636void enable_nmi(unsigned int irq)
637{
638 enable_irq(irq);
639}
640
612static int set_irq_wake_real(unsigned int irq, unsigned int on) 641static int set_irq_wake_real(unsigned int irq, unsigned int on)
613{ 642{
614 struct irq_desc *desc = irq_to_desc(irq); 643 struct irq_desc *desc = irq_to_desc(irq);
@@ -644,6 +673,12 @@ int irq_set_irq_wake(unsigned int irq, unsigned int on)
644 if (!desc) 673 if (!desc)
645 return -EINVAL; 674 return -EINVAL;
646 675
676 /* Don't use NMIs as wake up interrupts please */
677 if (desc->istate & IRQS_NMI) {
678 ret = -EINVAL;
679 goto out_unlock;
680 }
681
647 /* wakeup-capable irqs can be shared between drivers that 682 /* wakeup-capable irqs can be shared between drivers that
648 * don't need to have the same sleep mode behaviors. 683 * don't need to have the same sleep mode behaviors.
649 */ 684 */
@@ -666,6 +701,8 @@ int irq_set_irq_wake(unsigned int irq, unsigned int on)
666 irqd_clear(&desc->irq_data, IRQD_WAKEUP_STATE); 701 irqd_clear(&desc->irq_data, IRQD_WAKEUP_STATE);
667 } 702 }
668 } 703 }
704
705out_unlock:
669 irq_put_desc_busunlock(desc, flags); 706 irq_put_desc_busunlock(desc, flags);
670 return ret; 707 return ret;
671} 708}
@@ -1129,6 +1166,39 @@ static void irq_release_resources(struct irq_desc *desc)
1129 c->irq_release_resources(d); 1166 c->irq_release_resources(d);
1130} 1167}
1131 1168
1169static bool irq_supports_nmi(struct irq_desc *desc)
1170{
1171 struct irq_data *d = irq_desc_get_irq_data(desc);
1172
1173#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
1174 /* Only IRQs directly managed by the root irqchip can be set as NMI */
1175 if (d->parent_data)
1176 return false;
1177#endif
1178 /* Don't support NMIs for chips behind a slow bus */
1179 if (d->chip->irq_bus_lock || d->chip->irq_bus_sync_unlock)
1180 return false;
1181
1182 return d->chip->flags & IRQCHIP_SUPPORTS_NMI;
1183}
1184
1185static int irq_nmi_setup(struct irq_desc *desc)
1186{
1187 struct irq_data *d = irq_desc_get_irq_data(desc);
1188 struct irq_chip *c = d->chip;
1189
1190 return c->irq_nmi_setup ? c->irq_nmi_setup(d) : -EINVAL;
1191}
1192
1193static void irq_nmi_teardown(struct irq_desc *desc)
1194{
1195 struct irq_data *d = irq_desc_get_irq_data(desc);
1196 struct irq_chip *c = d->chip;
1197
1198 if (c->irq_nmi_teardown)
1199 c->irq_nmi_teardown(d);
1200}
1201
1132static int 1202static int
1133setup_irq_thread(struct irqaction *new, unsigned int irq, bool secondary) 1203setup_irq_thread(struct irqaction *new, unsigned int irq, bool secondary)
1134{ 1204{
@@ -1303,9 +1373,17 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
1303 * fields must have IRQF_SHARED set and the bits which 1373 * fields must have IRQF_SHARED set and the bits which
1304 * set the trigger type must match. Also all must 1374 * set the trigger type must match. Also all must
1305 * agree on ONESHOT. 1375 * agree on ONESHOT.
1376 * Interrupt lines used for NMIs cannot be shared.
1306 */ 1377 */
1307 unsigned int oldtype; 1378 unsigned int oldtype;
1308 1379
1380 if (desc->istate & IRQS_NMI) {
1381 pr_err("Invalid attempt to share NMI for %s (irq %d) on irqchip %s.\n",
1382 new->name, irq, desc->irq_data.chip->name);
1383 ret = -EINVAL;
1384 goto out_unlock;
1385 }
1386
1309 /* 1387 /*
1310 * If nobody did set the configuration before, inherit 1388 * If nobody did set the configuration before, inherit
1311 * the one provided by the requester. 1389 * the one provided by the requester.
@@ -1757,6 +1835,59 @@ const void *free_irq(unsigned int irq, void *dev_id)
1757} 1835}
1758EXPORT_SYMBOL(free_irq); 1836EXPORT_SYMBOL(free_irq);
1759 1837
1838/* This function must be called with desc->lock held */
1839static const void *__cleanup_nmi(unsigned int irq, struct irq_desc *desc)
1840{
1841 const char *devname = NULL;
1842
1843 desc->istate &= ~IRQS_NMI;
1844
1845 if (!WARN_ON(desc->action == NULL)) {
1846 irq_pm_remove_action(desc, desc->action);
1847 devname = desc->action->name;
1848 unregister_handler_proc(irq, desc->action);
1849
1850 kfree(desc->action);
1851 desc->action = NULL;
1852 }
1853
1854 irq_settings_clr_disable_unlazy(desc);
1855 irq_shutdown(desc);
1856
1857 irq_release_resources(desc);
1858
1859 irq_chip_pm_put(&desc->irq_data);
1860 module_put(desc->owner);
1861
1862 return devname;
1863}
1864
1865const void *free_nmi(unsigned int irq, void *dev_id)
1866{
1867 struct irq_desc *desc = irq_to_desc(irq);
1868 unsigned long flags;
1869 const void *devname;
1870
1871 if (!desc || WARN_ON(!(desc->istate & IRQS_NMI)))
1872 return NULL;
1873
1874 if (WARN_ON(irq_settings_is_per_cpu_devid(desc)))
1875 return NULL;
1876
1877 /* NMI still enabled */
1878 if (WARN_ON(desc->depth == 0))
1879 disable_nmi_nosync(irq);
1880
1881 raw_spin_lock_irqsave(&desc->lock, flags);
1882
1883 irq_nmi_teardown(desc);
1884 devname = __cleanup_nmi(irq, desc);
1885
1886 raw_spin_unlock_irqrestore(&desc->lock, flags);
1887
1888 return devname;
1889}
1890
1760/** 1891/**
1761 * request_threaded_irq - allocate an interrupt line 1892 * request_threaded_irq - allocate an interrupt line
1762 * @irq: Interrupt line to allocate 1893 * @irq: Interrupt line to allocate
@@ -1926,6 +2057,101 @@ int request_any_context_irq(unsigned int irq, irq_handler_t handler,
1926} 2057}
1927EXPORT_SYMBOL_GPL(request_any_context_irq); 2058EXPORT_SYMBOL_GPL(request_any_context_irq);
1928 2059
2060/**
2061 * request_nmi - allocate an interrupt line for NMI delivery
2062 * @irq: Interrupt line to allocate
2063 * @handler: Function to be called when the IRQ occurs.
2064 * Threaded handler for threaded interrupts.
2065 * @irqflags: Interrupt type flags
2066 * @name: An ascii name for the claiming device
2067 * @dev_id: A cookie passed back to the handler function
2068 *
2069 * This call allocates interrupt resources and enables the
2070 * interrupt line and IRQ handling. It sets up the IRQ line
2071 * to be handled as an NMI.
2072 *
2073 * An interrupt line delivering NMIs cannot be shared and IRQ handling
2074 * cannot be threaded.
2075 *
2076 * Interrupt lines requested for NMI delivering must produce per cpu
2077 * interrupts and have auto enabling setting disabled.
2078 *
2079 * Dev_id must be globally unique. Normally the address of the
2080 * device data structure is used as the cookie. Since the handler
2081 * receives this value it makes sense to use it.
2082 *
2083 * If the interrupt line cannot be used to deliver NMIs, function
2084 * will fail and return a negative value.
2085 */
2086int request_nmi(unsigned int irq, irq_handler_t handler,
2087 unsigned long irqflags, const char *name, void *dev_id)
2088{
2089 struct irqaction *action;
2090 struct irq_desc *desc;
2091 unsigned long flags;
2092 int retval;
2093
2094 if (irq == IRQ_NOTCONNECTED)
2095 return -ENOTCONN;
2096
2097 /* NMI cannot be shared, used for Polling */
2098 if (irqflags & (IRQF_SHARED | IRQF_COND_SUSPEND | IRQF_IRQPOLL))
2099 return -EINVAL;
2100
2101 if (!(irqflags & IRQF_PERCPU))
2102 return -EINVAL;
2103
2104 if (!handler)
2105 return -EINVAL;
2106
2107 desc = irq_to_desc(irq);
2108
2109 if (!desc || irq_settings_can_autoenable(desc) ||
2110 !irq_settings_can_request(desc) ||
2111 WARN_ON(irq_settings_is_per_cpu_devid(desc)) ||
2112 !irq_supports_nmi(desc))
2113 return -EINVAL;
2114
2115 action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
2116 if (!action)
2117 return -ENOMEM;
2118
2119 action->handler = handler;
2120 action->flags = irqflags | IRQF_NO_THREAD | IRQF_NOBALANCING;
2121 action->name = name;
2122 action->dev_id = dev_id;
2123
2124 retval = irq_chip_pm_get(&desc->irq_data);
2125 if (retval < 0)
2126 goto err_out;
2127
2128 retval = __setup_irq(irq, desc, action);
2129 if (retval)
2130 goto err_irq_setup;
2131
2132 raw_spin_lock_irqsave(&desc->lock, flags);
2133
2134 /* Setup NMI state */
2135 desc->istate |= IRQS_NMI;
2136 retval = irq_nmi_setup(desc);
2137 if (retval) {
2138 __cleanup_nmi(irq, desc);
2139 raw_spin_unlock_irqrestore(&desc->lock, flags);
2140 return -EINVAL;
2141 }
2142
2143 raw_spin_unlock_irqrestore(&desc->lock, flags);
2144
2145 return 0;
2146
2147err_irq_setup:
2148 irq_chip_pm_put(&desc->irq_data);
2149err_out:
2150 kfree(action);
2151
2152 return retval;
2153}
2154
1929void enable_percpu_irq(unsigned int irq, unsigned int type) 2155void enable_percpu_irq(unsigned int irq, unsigned int type)
1930{ 2156{
1931 unsigned int cpu = smp_processor_id(); 2157 unsigned int cpu = smp_processor_id();
@@ -1960,6 +2186,11 @@ out:
1960} 2186}
1961EXPORT_SYMBOL_GPL(enable_percpu_irq); 2187EXPORT_SYMBOL_GPL(enable_percpu_irq);
1962 2188
2189void enable_percpu_nmi(unsigned int irq, unsigned int type)
2190{
2191 enable_percpu_irq(irq, type);
2192}
2193
1963/** 2194/**
1964 * irq_percpu_is_enabled - Check whether the per cpu irq is enabled 2195 * irq_percpu_is_enabled - Check whether the per cpu irq is enabled
1965 * @irq: Linux irq number to check for 2196 * @irq: Linux irq number to check for
@@ -1999,6 +2230,11 @@ void disable_percpu_irq(unsigned int irq)
1999} 2230}
2000EXPORT_SYMBOL_GPL(disable_percpu_irq); 2231EXPORT_SYMBOL_GPL(disable_percpu_irq);
2001 2232
2233void disable_percpu_nmi(unsigned int irq)
2234{
2235 disable_percpu_irq(irq);
2236}
2237
2002/* 2238/*
2003 * Internal function to unregister a percpu irqaction. 2239 * Internal function to unregister a percpu irqaction.
2004 */ 2240 */
@@ -2030,6 +2266,8 @@ static struct irqaction *__free_percpu_irq(unsigned int irq, void __percpu *dev_
2030 /* Found it - now remove it from the list of entries: */ 2266 /* Found it - now remove it from the list of entries: */
2031 desc->action = NULL; 2267 desc->action = NULL;
2032 2268
2269 desc->istate &= ~IRQS_NMI;
2270
2033 raw_spin_unlock_irqrestore(&desc->lock, flags); 2271 raw_spin_unlock_irqrestore(&desc->lock, flags);
2034 2272
2035 unregister_handler_proc(irq, action); 2273 unregister_handler_proc(irq, action);
@@ -2083,6 +2321,19 @@ void free_percpu_irq(unsigned int irq, void __percpu *dev_id)
2083} 2321}
2084EXPORT_SYMBOL_GPL(free_percpu_irq); 2322EXPORT_SYMBOL_GPL(free_percpu_irq);
2085 2323
2324void free_percpu_nmi(unsigned int irq, void __percpu *dev_id)
2325{
2326 struct irq_desc *desc = irq_to_desc(irq);
2327
2328 if (!desc || !irq_settings_is_per_cpu_devid(desc))
2329 return;
2330
2331 if (WARN_ON(!(desc->istate & IRQS_NMI)))
2332 return;
2333
2334 kfree(__free_percpu_irq(irq, dev_id));
2335}
2336
2086/** 2337/**
2087 * setup_percpu_irq - setup a per-cpu interrupt 2338 * setup_percpu_irq - setup a per-cpu interrupt
2088 * @irq: Interrupt line to setup 2339 * @irq: Interrupt line to setup
@@ -2173,6 +2424,158 @@ int __request_percpu_irq(unsigned int irq, irq_handler_t handler,
2173EXPORT_SYMBOL_GPL(__request_percpu_irq); 2424EXPORT_SYMBOL_GPL(__request_percpu_irq);
2174 2425
2175/** 2426/**
2427 * request_percpu_nmi - allocate a percpu interrupt line for NMI delivery
2428 * @irq: Interrupt line to allocate
2429 * @handler: Function to be called when the IRQ occurs.
2430 * @name: An ascii name for the claiming device
2431 * @dev_id: A percpu cookie passed back to the handler function
2432 *
2433 * This call allocates interrupt resources for a per CPU NMI. Per CPU NMIs
2434 * have to be setup on each CPU by calling prepare_percpu_nmi() before
2435 * being enabled on the same CPU by using enable_percpu_nmi().
2436 *
2437 * Dev_id must be globally unique. It is a per-cpu variable, and
2438 * the handler gets called with the interrupted CPU's instance of
2439 * that variable.
2440 *
2441 * Interrupt lines requested for NMI delivering should have auto enabling
2442 * setting disabled.
2443 *
2444 * If the interrupt line cannot be used to deliver NMIs, function
2445 * will fail returning a negative value.
2446 */
2447int request_percpu_nmi(unsigned int irq, irq_handler_t handler,
2448 const char *name, void __percpu *dev_id)
2449{
2450 struct irqaction *action;
2451 struct irq_desc *desc;
2452 unsigned long flags;
2453 int retval;
2454
2455 if (!handler)
2456 return -EINVAL;
2457
2458 desc = irq_to_desc(irq);
2459
2460 if (!desc || !irq_settings_can_request(desc) ||
2461 !irq_settings_is_per_cpu_devid(desc) ||
2462 irq_settings_can_autoenable(desc) ||
2463 !irq_supports_nmi(desc))
2464 return -EINVAL;
2465
2466 /* The line cannot already be NMI */
2467 if (desc->istate & IRQS_NMI)
2468 return -EINVAL;
2469
2470 action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
2471 if (!action)
2472 return -ENOMEM;
2473
2474 action->handler = handler;
2475 action->flags = IRQF_PERCPU | IRQF_NO_SUSPEND | IRQF_NO_THREAD
2476 | IRQF_NOBALANCING;
2477 action->name = name;
2478 action->percpu_dev_id = dev_id;
2479
2480 retval = irq_chip_pm_get(&desc->irq_data);
2481 if (retval < 0)
2482 goto err_out;
2483
2484 retval = __setup_irq(irq, desc, action);
2485 if (retval)
2486 goto err_irq_setup;
2487
2488 raw_spin_lock_irqsave(&desc->lock, flags);
2489 desc->istate |= IRQS_NMI;
2490 raw_spin_unlock_irqrestore(&desc->lock, flags);
2491
2492 return 0;
2493
2494err_irq_setup:
2495 irq_chip_pm_put(&desc->irq_data);
2496err_out:
2497 kfree(action);
2498
2499 return retval;
2500}
2501
2502/**
2503 * prepare_percpu_nmi - performs CPU local setup for NMI delivery
2504 * @irq: Interrupt line to prepare for NMI delivery
2505 *
2506 * This call prepares an interrupt line to deliver NMI on the current CPU,
2507 * before that interrupt line gets enabled with enable_percpu_nmi().
2508 *
2509 * As a CPU local operation, this should be called from non-preemptible
2510 * context.
2511 *
2512 * If the interrupt line cannot be used to deliver NMIs, function
2513 * will fail returning a negative value.
2514 */
2515int prepare_percpu_nmi(unsigned int irq)
2516{
2517 unsigned long flags;
2518 struct irq_desc *desc;
2519 int ret = 0;
2520
2521 WARN_ON(preemptible());
2522
2523 desc = irq_get_desc_lock(irq, &flags,
2524 IRQ_GET_DESC_CHECK_PERCPU);
2525 if (!desc)
2526 return -EINVAL;
2527
2528 if (WARN(!(desc->istate & IRQS_NMI),
2529 KERN_ERR "prepare_percpu_nmi called for a non-NMI interrupt: irq %u\n",
2530 irq)) {
2531 ret = -EINVAL;
2532 goto out;
2533 }
2534
2535 ret = irq_nmi_setup(desc);
2536 if (ret) {
2537 pr_err("Failed to setup NMI delivery: irq %u\n", irq);
2538 goto out;
2539 }
2540
2541out:
2542 irq_put_desc_unlock(desc, flags);
2543 return ret;
2544}
2545
2546/**
2547 * teardown_percpu_nmi - undoes NMI setup of IRQ line
2548 * @irq: Interrupt line from which CPU local NMI configuration should be
2549 * removed
2550 *
2551 * This call undoes the setup done by prepare_percpu_nmi().
2552 *
2553 * IRQ line should not be enabled for the current CPU.
2554 *
2555 * As a CPU local operation, this should be called from non-preemptible
2556 * context.
2557 */
2558void teardown_percpu_nmi(unsigned int irq)
2559{
2560 unsigned long flags;
2561 struct irq_desc *desc;
2562
2563 WARN_ON(preemptible());
2564
2565 desc = irq_get_desc_lock(irq, &flags,
2566 IRQ_GET_DESC_CHECK_PERCPU);
2567 if (!desc)
2568 return;
2569
2570 if (WARN_ON(!(desc->istate & IRQS_NMI)))
2571 goto out;
2572
2573 irq_nmi_teardown(desc);
2574out:
2575 irq_put_desc_unlock(desc, flags);
2576}
2577
2578/**
2176 * irq_get_irqchip_state - returns the irqchip state of a interrupt. 2579 * irq_get_irqchip_state - returns the irqchip state of a interrupt.
2177 * @irq: Interrupt line that is forwarded to a VM 2580 * @irq: Interrupt line that is forwarded to a VM
2178 * @which: One of IRQCHIP_STATE_* the caller wants to know about 2581 * @which: One of IRQCHIP_STATE_* the caller wants to know about