diff options
30 files changed, 1177 insertions, 281 deletions
diff --git a/Documentation/devicetree/bindings/interrupt-controller/fsl,irqsteer.txt b/Documentation/devicetree/bindings/interrupt-controller/fsl,irqsteer.txt index 45790ce6f5b9..582991c426ee 100644 --- a/Documentation/devicetree/bindings/interrupt-controller/fsl,irqsteer.txt +++ b/Documentation/devicetree/bindings/interrupt-controller/fsl,irqsteer.txt | |||
| @@ -6,8 +6,9 @@ Required properties: | |||
| 6 | - "fsl,imx8m-irqsteer" | 6 | - "fsl,imx8m-irqsteer" |
| 7 | - "fsl,imx-irqsteer" | 7 | - "fsl,imx-irqsteer" |
| 8 | - reg: Physical base address and size of registers. | 8 | - reg: Physical base address and size of registers. |
| 9 | - interrupts: Should contain the parent interrupt line used to multiplex the | 9 | - interrupts: Should contain the up to 8 parent interrupt lines used to |
| 10 | input interrupts. | 10 | multiplex the input interrupts. They should be specified sequentially |
| 11 | from output 0 to 7. | ||
| 11 | - clocks: Should contain one clock for entry in clock-names | 12 | - clocks: Should contain one clock for entry in clock-names |
| 12 | see Documentation/devicetree/bindings/clock/clock-bindings.txt | 13 | see Documentation/devicetree/bindings/clock/clock-bindings.txt |
| 13 | - clock-names: | 14 | - clock-names: |
| @@ -16,8 +17,8 @@ Required properties: | |||
| 16 | - #interrupt-cells: Specifies the number of cells needed to encode an | 17 | - #interrupt-cells: Specifies the number of cells needed to encode an |
| 17 | interrupt source. The value must be 1. | 18 | interrupt source. The value must be 1. |
| 18 | - fsl,channel: The output channel that all input IRQs should be steered into. | 19 | - fsl,channel: The output channel that all input IRQs should be steered into. |
| 19 | - fsl,irq-groups: Number of IRQ groups managed by this controller instance. | 20 | - fsl,num-irqs: Number of input interrupts of this channel. |
| 20 | Each group manages 64 input interrupts. | 21 | Should be multiple of 32 input interrupts and up to 512 interrupts. |
| 21 | 22 | ||
| 22 | Example: | 23 | Example: |
| 23 | 24 | ||
| @@ -28,7 +29,7 @@ Example: | |||
| 28 | clocks = <&clk IMX8MQ_CLK_DISP_APB_ROOT>; | 29 | clocks = <&clk IMX8MQ_CLK_DISP_APB_ROOT>; |
| 29 | clock-names = "ipg"; | 30 | clock-names = "ipg"; |
| 30 | fsl,channel = <0>; | 31 | fsl,channel = <0>; |
| 31 | fsl,irq-groups = <1>; | 32 | fsl,num-irqs = <64>; |
| 32 | interrupt-controller; | 33 | interrupt-controller; |
| 33 | #interrupt-cells = <1>; | 34 | #interrupt-cells = <1>; |
| 34 | }; | 35 | }; |
diff --git a/Documentation/devicetree/bindings/interrupt-controller/loongson,ls1x-intc.txt b/Documentation/devicetree/bindings/interrupt-controller/loongson,ls1x-intc.txt new file mode 100644 index 000000000000..a63ed9fcb535 --- /dev/null +++ b/Documentation/devicetree/bindings/interrupt-controller/loongson,ls1x-intc.txt | |||
| @@ -0,0 +1,24 @@ | |||
| 1 | Loongson ls1x Interrupt Controller | ||
| 2 | |||
| 3 | Required properties: | ||
| 4 | |||
| 5 | - compatible : should be "loongson,ls1x-intc". Valid strings are: | ||
| 6 | |||
| 7 | - reg : Specifies base physical address and size of the registers. | ||
| 8 | - interrupt-controller : Identifies the node as an interrupt controller | ||
| 9 | - #interrupt-cells : Specifies the number of cells needed to encode an | ||
| 10 | interrupt source. The value shall be 2. | ||
| 11 | - interrupts : Specifies the CPU interrupt the controller is connected to. | ||
| 12 | |||
| 13 | Example: | ||
| 14 | |||
| 15 | intc: interrupt-controller@1fd01040 { | ||
| 16 | compatible = "loongson,ls1x-intc"; | ||
| 17 | reg = <0x1fd01040 0x18>; | ||
| 18 | |||
| 19 | interrupt-controller; | ||
| 20 | #interrupt-cells = <2>; | ||
| 21 | |||
| 22 | interrupt-parent = <&cpu_intc>; | ||
| 23 | interrupts = <2>; | ||
| 24 | }; | ||
diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig index 3d1e60779078..5dcb5456cd14 100644 --- a/drivers/irqchip/Kconfig +++ b/drivers/irqchip/Kconfig | |||
| @@ -406,6 +406,15 @@ config IMX_IRQSTEER | |||
| 406 | help | 406 | help |
| 407 | Support for the i.MX IRQSTEER interrupt multiplexer/remapper. | 407 | Support for the i.MX IRQSTEER interrupt multiplexer/remapper. |
| 408 | 408 | ||
| 409 | config LS1X_IRQ | ||
| 410 | bool "Loongson-1 Interrupt Controller" | ||
| 411 | depends on MACH_LOONGSON32 | ||
| 412 | default y | ||
| 413 | select IRQ_DOMAIN | ||
| 414 | select GENERIC_IRQ_CHIP | ||
| 415 | help | ||
| 416 | Support for the Loongson-1 platform Interrupt Controller. | ||
| 417 | |||
| 409 | endmenu | 418 | endmenu |
| 410 | 419 | ||
| 411 | config SIFIVE_PLIC | 420 | config SIFIVE_PLIC |
diff --git a/drivers/irqchip/Makefile b/drivers/irqchip/Makefile index c93713d24b86..7acd0e36d0b4 100644 --- a/drivers/irqchip/Makefile +++ b/drivers/irqchip/Makefile | |||
| @@ -94,3 +94,4 @@ obj-$(CONFIG_CSKY_APB_INTC) += irq-csky-apb-intc.o | |||
| 94 | obj-$(CONFIG_SIFIVE_PLIC) += irq-sifive-plic.o | 94 | obj-$(CONFIG_SIFIVE_PLIC) += irq-sifive-plic.o |
| 95 | obj-$(CONFIG_IMX_IRQSTEER) += irq-imx-irqsteer.o | 95 | obj-$(CONFIG_IMX_IRQSTEER) += irq-imx-irqsteer.o |
| 96 | obj-$(CONFIG_MADERA_IRQ) += irq-madera.o | 96 | obj-$(CONFIG_MADERA_IRQ) += irq-madera.o |
| 97 | obj-$(CONFIG_LS1X_IRQ) += irq-ls1x.o | ||
diff --git a/drivers/irqchip/irq-brcmstb-l2.c b/drivers/irqchip/irq-brcmstb-l2.c index 0e65f609352e..83364fedbf0a 100644 --- a/drivers/irqchip/irq-brcmstb-l2.c +++ b/drivers/irqchip/irq-brcmstb-l2.c | |||
| @@ -129,8 +129,9 @@ static void brcmstb_l2_intc_suspend(struct irq_data *d) | |||
| 129 | struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); | 129 | struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); |
| 130 | struct irq_chip_type *ct = irq_data_get_chip_type(d); | 130 | struct irq_chip_type *ct = irq_data_get_chip_type(d); |
| 131 | struct brcmstb_l2_intc_data *b = gc->private; | 131 | struct brcmstb_l2_intc_data *b = gc->private; |
| 132 | unsigned long flags; | ||
| 132 | 133 | ||
| 133 | irq_gc_lock(gc); | 134 | irq_gc_lock_irqsave(gc, flags); |
| 134 | /* Save the current mask */ | 135 | /* Save the current mask */ |
| 135 | b->saved_mask = irq_reg_readl(gc, ct->regs.mask); | 136 | b->saved_mask = irq_reg_readl(gc, ct->regs.mask); |
| 136 | 137 | ||
| @@ -139,7 +140,7 @@ static void brcmstb_l2_intc_suspend(struct irq_data *d) | |||
| 139 | irq_reg_writel(gc, ~gc->wake_active, ct->regs.disable); | 140 | irq_reg_writel(gc, ~gc->wake_active, ct->regs.disable); |
| 140 | irq_reg_writel(gc, gc->wake_active, ct->regs.enable); | 141 | irq_reg_writel(gc, gc->wake_active, ct->regs.enable); |
| 141 | } | 142 | } |
| 142 | irq_gc_unlock(gc); | 143 | irq_gc_unlock_irqrestore(gc, flags); |
| 143 | } | 144 | } |
| 144 | 145 | ||
| 145 | static void brcmstb_l2_intc_resume(struct irq_data *d) | 146 | static void brcmstb_l2_intc_resume(struct irq_data *d) |
| @@ -147,8 +148,9 @@ static void brcmstb_l2_intc_resume(struct irq_data *d) | |||
| 147 | struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); | 148 | struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); |
| 148 | struct irq_chip_type *ct = irq_data_get_chip_type(d); | 149 | struct irq_chip_type *ct = irq_data_get_chip_type(d); |
| 149 | struct brcmstb_l2_intc_data *b = gc->private; | 150 | struct brcmstb_l2_intc_data *b = gc->private; |
| 151 | unsigned long flags; | ||
| 150 | 152 | ||
| 151 | irq_gc_lock(gc); | 153 | irq_gc_lock_irqsave(gc, flags); |
| 152 | if (ct->chip.irq_ack) { | 154 | if (ct->chip.irq_ack) { |
| 153 | /* Clear unmasked non-wakeup interrupts */ | 155 | /* Clear unmasked non-wakeup interrupts */ |
| 154 | irq_reg_writel(gc, ~b->saved_mask & ~gc->wake_active, | 156 | irq_reg_writel(gc, ~b->saved_mask & ~gc->wake_active, |
| @@ -158,7 +160,7 @@ static void brcmstb_l2_intc_resume(struct irq_data *d) | |||
| 158 | /* Restore the saved mask */ | 160 | /* Restore the saved mask */ |
| 159 | irq_reg_writel(gc, b->saved_mask, ct->regs.disable); | 161 | irq_reg_writel(gc, b->saved_mask, ct->regs.disable); |
| 160 | irq_reg_writel(gc, ~b->saved_mask, ct->regs.enable); | 162 | irq_reg_writel(gc, ~b->saved_mask, ct->regs.enable); |
| 161 | irq_gc_unlock(gc); | 163 | irq_gc_unlock_irqrestore(gc, flags); |
| 162 | } | 164 | } |
| 163 | 165 | ||
| 164 | static int __init brcmstb_l2_intc_of_init(struct device_node *np, | 166 | static int __init brcmstb_l2_intc_of_init(struct device_node *np, |
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c index c3aba3fc818d..2dd1ff0cf558 100644 --- a/drivers/irqchip/irq-gic-v3-its.c +++ b/drivers/irqchip/irq-gic-v3-its.c | |||
| @@ -1746,6 +1746,7 @@ static int its_setup_baser(struct its_node *its, struct its_baser *baser, | |||
| 1746 | u64 type = GITS_BASER_TYPE(val); | 1746 | u64 type = GITS_BASER_TYPE(val); |
| 1747 | u64 baser_phys, tmp; | 1747 | u64 baser_phys, tmp; |
| 1748 | u32 alloc_pages; | 1748 | u32 alloc_pages; |
| 1749 | struct page *page; | ||
| 1749 | void *base; | 1750 | void *base; |
| 1750 | 1751 | ||
| 1751 | retry_alloc_baser: | 1752 | retry_alloc_baser: |
| @@ -1758,10 +1759,11 @@ retry_alloc_baser: | |||
| 1758 | order = get_order(GITS_BASER_PAGES_MAX * psz); | 1759 | order = get_order(GITS_BASER_PAGES_MAX * psz); |
| 1759 | } | 1760 | } |
| 1760 | 1761 | ||
| 1761 | base = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, order); | 1762 | page = alloc_pages_node(its->numa_node, GFP_KERNEL | __GFP_ZERO, order); |
| 1762 | if (!base) | 1763 | if (!page) |
| 1763 | return -ENOMEM; | 1764 | return -ENOMEM; |
| 1764 | 1765 | ||
| 1766 | base = (void *)page_address(page); | ||
| 1765 | baser_phys = virt_to_phys(base); | 1767 | baser_phys = virt_to_phys(base); |
| 1766 | 1768 | ||
| 1767 | /* Check if the physical address of the memory is above 48bits */ | 1769 | /* Check if the physical address of the memory is above 48bits */ |
| @@ -1955,6 +1957,8 @@ static int its_alloc_tables(struct its_node *its) | |||
| 1955 | indirect = its_parse_indirect_baser(its, baser, | 1957 | indirect = its_parse_indirect_baser(its, baser, |
| 1956 | psz, &order, | 1958 | psz, &order, |
| 1957 | its->device_ids); | 1959 | its->device_ids); |
| 1960 | break; | ||
| 1961 | |||
| 1958 | case GITS_BASER_TYPE_VCPU: | 1962 | case GITS_BASER_TYPE_VCPU: |
| 1959 | indirect = its_parse_indirect_baser(its, baser, | 1963 | indirect = its_parse_indirect_baser(its, baser, |
| 1960 | psz, &order, | 1964 | psz, &order, |
| @@ -2292,7 +2296,8 @@ static struct its_baser *its_get_baser(struct its_node *its, u32 type) | |||
| 2292 | return NULL; | 2296 | return NULL; |
| 2293 | } | 2297 | } |
| 2294 | 2298 | ||
| 2295 | static bool its_alloc_table_entry(struct its_baser *baser, u32 id) | 2299 | static bool its_alloc_table_entry(struct its_node *its, |
| 2300 | struct its_baser *baser, u32 id) | ||
| 2296 | { | 2301 | { |
| 2297 | struct page *page; | 2302 | struct page *page; |
| 2298 | u32 esz, idx; | 2303 | u32 esz, idx; |
| @@ -2312,7 +2317,8 @@ static bool its_alloc_table_entry(struct its_baser *baser, u32 id) | |||
| 2312 | 2317 | ||
| 2313 | /* Allocate memory for 2nd level table */ | 2318 | /* Allocate memory for 2nd level table */ |
| 2314 | if (!table[idx]) { | 2319 | if (!table[idx]) { |
| 2315 | page = alloc_pages(GFP_KERNEL | __GFP_ZERO, get_order(baser->psz)); | 2320 | page = alloc_pages_node(its->numa_node, GFP_KERNEL | __GFP_ZERO, |
| 2321 | get_order(baser->psz)); | ||
| 2316 | if (!page) | 2322 | if (!page) |
| 2317 | return false; | 2323 | return false; |
| 2318 | 2324 | ||
| @@ -2343,7 +2349,7 @@ static bool its_alloc_device_table(struct its_node *its, u32 dev_id) | |||
| 2343 | if (!baser) | 2349 | if (!baser) |
| 2344 | return (ilog2(dev_id) < its->device_ids); | 2350 | return (ilog2(dev_id) < its->device_ids); |
| 2345 | 2351 | ||
| 2346 | return its_alloc_table_entry(baser, dev_id); | 2352 | return its_alloc_table_entry(its, baser, dev_id); |
| 2347 | } | 2353 | } |
| 2348 | 2354 | ||
| 2349 | static bool its_alloc_vpe_table(u32 vpe_id) | 2355 | static bool its_alloc_vpe_table(u32 vpe_id) |
| @@ -2367,7 +2373,7 @@ static bool its_alloc_vpe_table(u32 vpe_id) | |||
| 2367 | if (!baser) | 2373 | if (!baser) |
| 2368 | return false; | 2374 | return false; |
| 2369 | 2375 | ||
| 2370 | if (!its_alloc_table_entry(baser, vpe_id)) | 2376 | if (!its_alloc_table_entry(its, baser, vpe_id)) |
| 2371 | return false; | 2377 | return false; |
| 2372 | } | 2378 | } |
| 2373 | 2379 | ||
| @@ -2401,7 +2407,7 @@ static struct its_device *its_create_device(struct its_node *its, u32 dev_id, | |||
| 2401 | nr_ites = max(2, nvecs); | 2407 | nr_ites = max(2, nvecs); |
| 2402 | sz = nr_ites * its->ite_size; | 2408 | sz = nr_ites * its->ite_size; |
| 2403 | sz = max(sz, ITS_ITT_ALIGN) + ITS_ITT_ALIGN - 1; | 2409 | sz = max(sz, ITS_ITT_ALIGN) + ITS_ITT_ALIGN - 1; |
| 2404 | itt = kzalloc(sz, GFP_KERNEL); | 2410 | itt = kzalloc_node(sz, GFP_KERNEL, its->numa_node); |
| 2405 | if (alloc_lpis) { | 2411 | if (alloc_lpis) { |
| 2406 | lpi_map = its_lpi_alloc(nvecs, &lpi_base, &nr_lpis); | 2412 | lpi_map = its_lpi_alloc(nvecs, &lpi_base, &nr_lpis); |
| 2407 | if (lpi_map) | 2413 | if (lpi_map) |
| @@ -3543,6 +3549,7 @@ static int __init its_probe_one(struct resource *res, | |||
| 3543 | void __iomem *its_base; | 3549 | void __iomem *its_base; |
| 3544 | u32 val, ctlr; | 3550 | u32 val, ctlr; |
| 3545 | u64 baser, tmp, typer; | 3551 | u64 baser, tmp, typer; |
| 3552 | struct page *page; | ||
| 3546 | int err; | 3553 | int err; |
| 3547 | 3554 | ||
| 3548 | its_base = ioremap(res->start, resource_size(res)); | 3555 | its_base = ioremap(res->start, resource_size(res)); |
| @@ -3599,12 +3606,13 @@ static int __init its_probe_one(struct resource *res, | |||
| 3599 | 3606 | ||
| 3600 | its->numa_node = numa_node; | 3607 | its->numa_node = numa_node; |
| 3601 | 3608 | ||
| 3602 | its->cmd_base = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, | 3609 | page = alloc_pages_node(its->numa_node, GFP_KERNEL | __GFP_ZERO, |
| 3603 | get_order(ITS_CMD_QUEUE_SZ)); | 3610 | get_order(ITS_CMD_QUEUE_SZ)); |
| 3604 | if (!its->cmd_base) { | 3611 | if (!page) { |
| 3605 | err = -ENOMEM; | 3612 | err = -ENOMEM; |
| 3606 | goto out_free_its; | 3613 | goto out_free_its; |
| 3607 | } | 3614 | } |
| 3615 | its->cmd_base = (void *)page_address(page); | ||
| 3608 | its->cmd_write = its->cmd_base; | 3616 | its->cmd_write = its->cmd_base; |
| 3609 | its->fwnode_handle = handle; | 3617 | its->fwnode_handle = handle; |
| 3610 | its->get_msi_base = its_irq_get_msi_base; | 3618 | its->get_msi_base = its_irq_get_msi_base; |
diff --git a/drivers/irqchip/irq-i8259.c b/drivers/irqchip/irq-i8259.c index b0d4aab1a58c..d000870d9b6b 100644 --- a/drivers/irqchip/irq-i8259.c +++ b/drivers/irqchip/irq-i8259.c | |||
| @@ -225,14 +225,6 @@ static struct syscore_ops i8259_syscore_ops = { | |||
| 225 | .shutdown = i8259A_shutdown, | 225 | .shutdown = i8259A_shutdown, |
| 226 | }; | 226 | }; |
| 227 | 227 | ||
| 228 | static int __init i8259A_init_sysfs(void) | ||
| 229 | { | ||
| 230 | register_syscore_ops(&i8259_syscore_ops); | ||
| 231 | return 0; | ||
| 232 | } | ||
| 233 | |||
| 234 | device_initcall(i8259A_init_sysfs); | ||
| 235 | |||
| 236 | static void init_8259A(int auto_eoi) | 228 | static void init_8259A(int auto_eoi) |
| 237 | { | 229 | { |
| 238 | unsigned long flags; | 230 | unsigned long flags; |
| @@ -332,6 +324,7 @@ struct irq_domain * __init __init_i8259_irqs(struct device_node *node) | |||
| 332 | panic("Failed to add i8259 IRQ domain"); | 324 | panic("Failed to add i8259 IRQ domain"); |
| 333 | 325 | ||
| 334 | setup_irq(I8259A_IRQ_BASE + PIC_CASCADE_IR, &irq2); | 326 | setup_irq(I8259A_IRQ_BASE + PIC_CASCADE_IR, &irq2); |
| 327 | register_syscore_ops(&i8259_syscore_ops); | ||
| 335 | return domain; | 328 | return domain; |
| 336 | } | 329 | } |
| 337 | 330 | ||
diff --git a/drivers/irqchip/irq-imx-irqsteer.c b/drivers/irqchip/irq-imx-irqsteer.c index 5b3f1d735685..d1098f4da6a4 100644 --- a/drivers/irqchip/irq-imx-irqsteer.c +++ b/drivers/irqchip/irq-imx-irqsteer.c | |||
| @@ -10,10 +10,11 @@ | |||
| 10 | #include <linux/irqchip/chained_irq.h> | 10 | #include <linux/irqchip/chained_irq.h> |
| 11 | #include <linux/irqdomain.h> | 11 | #include <linux/irqdomain.h> |
| 12 | #include <linux/kernel.h> | 12 | #include <linux/kernel.h> |
| 13 | #include <linux/of_irq.h> | ||
| 13 | #include <linux/of_platform.h> | 14 | #include <linux/of_platform.h> |
| 14 | #include <linux/spinlock.h> | 15 | #include <linux/spinlock.h> |
| 15 | 16 | ||
| 16 | #define CTRL_STRIDE_OFF(_t, _r) (_t * 8 * _r) | 17 | #define CTRL_STRIDE_OFF(_t, _r) (_t * 4 * _r) |
| 17 | #define CHANCTRL 0x0 | 18 | #define CHANCTRL 0x0 |
| 18 | #define CHANMASK(n, t) (CTRL_STRIDE_OFF(t, 0) + 0x4 * (n) + 0x4) | 19 | #define CHANMASK(n, t) (CTRL_STRIDE_OFF(t, 0) + 0x4 * (n) + 0x4) |
| 19 | #define CHANSET(n, t) (CTRL_STRIDE_OFF(t, 1) + 0x4 * (n) + 0x4) | 20 | #define CHANSET(n, t) (CTRL_STRIDE_OFF(t, 1) + 0x4 * (n) + 0x4) |
| @@ -21,12 +22,15 @@ | |||
| 21 | #define CHAN_MINTDIS(t) (CTRL_STRIDE_OFF(t, 3) + 0x4) | 22 | #define CHAN_MINTDIS(t) (CTRL_STRIDE_OFF(t, 3) + 0x4) |
| 22 | #define CHAN_MASTRSTAT(t) (CTRL_STRIDE_OFF(t, 3) + 0x8) | 23 | #define CHAN_MASTRSTAT(t) (CTRL_STRIDE_OFF(t, 3) + 0x8) |
| 23 | 24 | ||
| 25 | #define CHAN_MAX_OUTPUT_INT 0x8 | ||
| 26 | |||
| 24 | struct irqsteer_data { | 27 | struct irqsteer_data { |
| 25 | void __iomem *regs; | 28 | void __iomem *regs; |
| 26 | struct clk *ipg_clk; | 29 | struct clk *ipg_clk; |
| 27 | int irq; | 30 | int irq[CHAN_MAX_OUTPUT_INT]; |
| 31 | int irq_count; | ||
| 28 | raw_spinlock_t lock; | 32 | raw_spinlock_t lock; |
| 29 | int irq_groups; | 33 | int reg_num; |
| 30 | int channel; | 34 | int channel; |
| 31 | struct irq_domain *domain; | 35 | struct irq_domain *domain; |
| 32 | u32 *saved_reg; | 36 | u32 *saved_reg; |
| @@ -35,7 +39,7 @@ struct irqsteer_data { | |||
| 35 | static int imx_irqsteer_get_reg_index(struct irqsteer_data *data, | 39 | static int imx_irqsteer_get_reg_index(struct irqsteer_data *data, |
| 36 | unsigned long irqnum) | 40 | unsigned long irqnum) |
| 37 | { | 41 | { |
| 38 | return (data->irq_groups * 2 - irqnum / 32 - 1); | 42 | return (data->reg_num - irqnum / 32 - 1); |
| 39 | } | 43 | } |
| 40 | 44 | ||
| 41 | static void imx_irqsteer_irq_unmask(struct irq_data *d) | 45 | static void imx_irqsteer_irq_unmask(struct irq_data *d) |
| @@ -46,9 +50,9 @@ static void imx_irqsteer_irq_unmask(struct irq_data *d) | |||
| 46 | u32 val; | 50 | u32 val; |
| 47 | 51 | ||
| 48 | raw_spin_lock_irqsave(&data->lock, flags); | 52 | raw_spin_lock_irqsave(&data->lock, flags); |
| 49 | val = readl_relaxed(data->regs + CHANMASK(idx, data->irq_groups)); | 53 | val = readl_relaxed(data->regs + CHANMASK(idx, data->reg_num)); |
| 50 | val |= BIT(d->hwirq % 32); | 54 | val |= BIT(d->hwirq % 32); |
| 51 | writel_relaxed(val, data->regs + CHANMASK(idx, data->irq_groups)); | 55 | writel_relaxed(val, data->regs + CHANMASK(idx, data->reg_num)); |
| 52 | raw_spin_unlock_irqrestore(&data->lock, flags); | 56 | raw_spin_unlock_irqrestore(&data->lock, flags); |
| 53 | } | 57 | } |
| 54 | 58 | ||
| @@ -60,9 +64,9 @@ static void imx_irqsteer_irq_mask(struct irq_data *d) | |||
| 60 | u32 val; | 64 | u32 val; |
| 61 | 65 | ||
| 62 | raw_spin_lock_irqsave(&data->lock, flags); | 66 | raw_spin_lock_irqsave(&data->lock, flags); |
| 63 | val = readl_relaxed(data->regs + CHANMASK(idx, data->irq_groups)); | 67 | val = readl_relaxed(data->regs + CHANMASK(idx, data->reg_num)); |
| 64 | val &= ~BIT(d->hwirq % 32); | 68 | val &= ~BIT(d->hwirq % 32); |
| 65 | writel_relaxed(val, data->regs + CHANMASK(idx, data->irq_groups)); | 69 | writel_relaxed(val, data->regs + CHANMASK(idx, data->reg_num)); |
| 66 | raw_spin_unlock_irqrestore(&data->lock, flags); | 70 | raw_spin_unlock_irqrestore(&data->lock, flags); |
| 67 | } | 71 | } |
| 68 | 72 | ||
| @@ -87,23 +91,47 @@ static const struct irq_domain_ops imx_irqsteer_domain_ops = { | |||
| 87 | .xlate = irq_domain_xlate_onecell, | 91 | .xlate = irq_domain_xlate_onecell, |
| 88 | }; | 92 | }; |
| 89 | 93 | ||
| 94 | static int imx_irqsteer_get_hwirq_base(struct irqsteer_data *data, u32 irq) | ||
| 95 | { | ||
| 96 | int i; | ||
| 97 | |||
| 98 | for (i = 0; i < data->irq_count; i++) { | ||
| 99 | if (data->irq[i] == irq) | ||
| 100 | return i * 64; | ||
| 101 | } | ||
| 102 | |||
| 103 | return -EINVAL; | ||
| 104 | } | ||
| 105 | |||
| 90 | static void imx_irqsteer_irq_handler(struct irq_desc *desc) | 106 | static void imx_irqsteer_irq_handler(struct irq_desc *desc) |
| 91 | { | 107 | { |
| 92 | struct irqsteer_data *data = irq_desc_get_handler_data(desc); | 108 | struct irqsteer_data *data = irq_desc_get_handler_data(desc); |
| 93 | int i; | 109 | int hwirq; |
| 110 | int irq, i; | ||
| 94 | 111 | ||
| 95 | chained_irq_enter(irq_desc_get_chip(desc), desc); | 112 | chained_irq_enter(irq_desc_get_chip(desc), desc); |
| 96 | 113 | ||
| 97 | for (i = 0; i < data->irq_groups * 64; i += 32) { | 114 | irq = irq_desc_get_irq(desc); |
| 98 | int idx = imx_irqsteer_get_reg_index(data, i); | 115 | hwirq = imx_irqsteer_get_hwirq_base(data, irq); |
| 116 | if (hwirq < 0) { | ||
| 117 | pr_warn("%s: unable to get hwirq base for irq %d\n", | ||
| 118 | __func__, irq); | ||
| 119 | return; | ||
| 120 | } | ||
| 121 | |||
| 122 | for (i = 0; i < 2; i++, hwirq += 32) { | ||
| 123 | int idx = imx_irqsteer_get_reg_index(data, hwirq); | ||
| 99 | unsigned long irqmap; | 124 | unsigned long irqmap; |
| 100 | int pos, virq; | 125 | int pos, virq; |
| 101 | 126 | ||
| 127 | if (hwirq >= data->reg_num * 32) | ||
| 128 | break; | ||
| 129 | |||
| 102 | irqmap = readl_relaxed(data->regs + | 130 | irqmap = readl_relaxed(data->regs + |
| 103 | CHANSTATUS(idx, data->irq_groups)); | 131 | CHANSTATUS(idx, data->reg_num)); |
| 104 | 132 | ||
| 105 | for_each_set_bit(pos, &irqmap, 32) { | 133 | for_each_set_bit(pos, &irqmap, 32) { |
| 106 | virq = irq_find_mapping(data->domain, pos + i); | 134 | virq = irq_find_mapping(data->domain, pos + hwirq); |
| 107 | if (virq) | 135 | if (virq) |
| 108 | generic_handle_irq(virq); | 136 | generic_handle_irq(virq); |
| 109 | } | 137 | } |
| @@ -117,7 +145,8 @@ static int imx_irqsteer_probe(struct platform_device *pdev) | |||
| 117 | struct device_node *np = pdev->dev.of_node; | 145 | struct device_node *np = pdev->dev.of_node; |
| 118 | struct irqsteer_data *data; | 146 | struct irqsteer_data *data; |
| 119 | struct resource *res; | 147 | struct resource *res; |
| 120 | int ret; | 148 | u32 irqs_num; |
| 149 | int i, ret; | ||
| 121 | 150 | ||
| 122 | data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL); | 151 | data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL); |
| 123 | if (!data) | 152 | if (!data) |
| @@ -130,12 +159,6 @@ static int imx_irqsteer_probe(struct platform_device *pdev) | |||
| 130 | return PTR_ERR(data->regs); | 159 | return PTR_ERR(data->regs); |
| 131 | } | 160 | } |
| 132 | 161 | ||
| 133 | data->irq = platform_get_irq(pdev, 0); | ||
| 134 | if (data->irq <= 0) { | ||
| 135 | dev_err(&pdev->dev, "failed to get irq\n"); | ||
| 136 | return -ENODEV; | ||
| 137 | } | ||
| 138 | |||
| 139 | data->ipg_clk = devm_clk_get(&pdev->dev, "ipg"); | 162 | data->ipg_clk = devm_clk_get(&pdev->dev, "ipg"); |
| 140 | if (IS_ERR(data->ipg_clk)) { | 163 | if (IS_ERR(data->ipg_clk)) { |
| 141 | ret = PTR_ERR(data->ipg_clk); | 164 | ret = PTR_ERR(data->ipg_clk); |
| @@ -146,12 +169,19 @@ static int imx_irqsteer_probe(struct platform_device *pdev) | |||
| 146 | 169 | ||
| 147 | raw_spin_lock_init(&data->lock); | 170 | raw_spin_lock_init(&data->lock); |
| 148 | 171 | ||
| 149 | of_property_read_u32(np, "fsl,irq-groups", &data->irq_groups); | 172 | of_property_read_u32(np, "fsl,num-irqs", &irqs_num); |
| 150 | of_property_read_u32(np, "fsl,channel", &data->channel); | 173 | of_property_read_u32(np, "fsl,channel", &data->channel); |
| 151 | 174 | ||
| 175 | /* | ||
| 176 | * There is one output irq for each group of 64 inputs. | ||
| 177 | * One register bit map can represent 32 input interrupts. | ||
| 178 | */ | ||
| 179 | data->irq_count = DIV_ROUND_UP(irqs_num, 64); | ||
| 180 | data->reg_num = irqs_num / 32; | ||
| 181 | |||
| 152 | if (IS_ENABLED(CONFIG_PM_SLEEP)) { | 182 | if (IS_ENABLED(CONFIG_PM_SLEEP)) { |
| 153 | data->saved_reg = devm_kzalloc(&pdev->dev, | 183 | data->saved_reg = devm_kzalloc(&pdev->dev, |
| 154 | sizeof(u32) * data->irq_groups * 2, | 184 | sizeof(u32) * data->reg_num, |
| 155 | GFP_KERNEL); | 185 | GFP_KERNEL); |
| 156 | if (!data->saved_reg) | 186 | if (!data->saved_reg) |
| 157 | return -ENOMEM; | 187 | return -ENOMEM; |
| @@ -166,27 +196,48 @@ static int imx_irqsteer_probe(struct platform_device *pdev) | |||
| 166 | /* steer all IRQs into configured channel */ | 196 | /* steer all IRQs into configured channel */ |
| 167 | writel_relaxed(BIT(data->channel), data->regs + CHANCTRL); | 197 | writel_relaxed(BIT(data->channel), data->regs + CHANCTRL); |
| 168 | 198 | ||
| 169 | data->domain = irq_domain_add_linear(np, data->irq_groups * 64, | 199 | data->domain = irq_domain_add_linear(np, data->reg_num * 32, |
| 170 | &imx_irqsteer_domain_ops, data); | 200 | &imx_irqsteer_domain_ops, data); |
| 171 | if (!data->domain) { | 201 | if (!data->domain) { |
| 172 | dev_err(&pdev->dev, "failed to create IRQ domain\n"); | 202 | dev_err(&pdev->dev, "failed to create IRQ domain\n"); |
| 173 | clk_disable_unprepare(data->ipg_clk); | 203 | ret = -ENOMEM; |
| 174 | return -ENOMEM; | 204 | goto out; |
| 205 | } | ||
| 206 | |||
| 207 | if (!data->irq_count || data->irq_count > CHAN_MAX_OUTPUT_INT) { | ||
| 208 | ret = -EINVAL; | ||
| 209 | goto out; | ||
| 175 | } | 210 | } |
| 176 | 211 | ||
| 177 | irq_set_chained_handler_and_data(data->irq, imx_irqsteer_irq_handler, | 212 | for (i = 0; i < data->irq_count; i++) { |
| 178 | data); | 213 | data->irq[i] = irq_of_parse_and_map(np, i); |
| 214 | if (!data->irq[i]) { | ||
| 215 | ret = -EINVAL; | ||
| 216 | goto out; | ||
| 217 | } | ||
| 218 | |||
| 219 | irq_set_chained_handler_and_data(data->irq[i], | ||
| 220 | imx_irqsteer_irq_handler, | ||
| 221 | data); | ||
| 222 | } | ||
| 179 | 223 | ||
| 180 | platform_set_drvdata(pdev, data); | 224 | platform_set_drvdata(pdev, data); |
| 181 | 225 | ||
| 182 | return 0; | 226 | return 0; |
| 227 | out: | ||
| 228 | clk_disable_unprepare(data->ipg_clk); | ||
| 229 | return ret; | ||
| 183 | } | 230 | } |
| 184 | 231 | ||
| 185 | static int imx_irqsteer_remove(struct platform_device *pdev) | 232 | static int imx_irqsteer_remove(struct platform_device *pdev) |
| 186 | { | 233 | { |
| 187 | struct irqsteer_data *irqsteer_data = platform_get_drvdata(pdev); | 234 | struct irqsteer_data *irqsteer_data = platform_get_drvdata(pdev); |
| 235 | int i; | ||
| 236 | |||
| 237 | for (i = 0; i < irqsteer_data->irq_count; i++) | ||
| 238 | irq_set_chained_handler_and_data(irqsteer_data->irq[i], | ||
| 239 | NULL, NULL); | ||
| 188 | 240 | ||
| 189 | irq_set_chained_handler_and_data(irqsteer_data->irq, NULL, NULL); | ||
| 190 | irq_domain_remove(irqsteer_data->domain); | 241 | irq_domain_remove(irqsteer_data->domain); |
| 191 | 242 | ||
| 192 | clk_disable_unprepare(irqsteer_data->ipg_clk); | 243 | clk_disable_unprepare(irqsteer_data->ipg_clk); |
| @@ -199,9 +250,9 @@ static void imx_irqsteer_save_regs(struct irqsteer_data *data) | |||
| 199 | { | 250 | { |
| 200 | int i; | 251 | int i; |
| 201 | 252 | ||
| 202 | for (i = 0; i < data->irq_groups * 2; i++) | 253 | for (i = 0; i < data->reg_num; i++) |
| 203 | data->saved_reg[i] = readl_relaxed(data->regs + | 254 | data->saved_reg[i] = readl_relaxed(data->regs + |
| 204 | CHANMASK(i, data->irq_groups)); | 255 | CHANMASK(i, data->reg_num)); |
| 205 | } | 256 | } |
| 206 | 257 | ||
| 207 | static void imx_irqsteer_restore_regs(struct irqsteer_data *data) | 258 | static void imx_irqsteer_restore_regs(struct irqsteer_data *data) |
| @@ -209,9 +260,9 @@ static void imx_irqsteer_restore_regs(struct irqsteer_data *data) | |||
| 209 | int i; | 260 | int i; |
| 210 | 261 | ||
| 211 | writel_relaxed(BIT(data->channel), data->regs + CHANCTRL); | 262 | writel_relaxed(BIT(data->channel), data->regs + CHANCTRL); |
| 212 | for (i = 0; i < data->irq_groups * 2; i++) | 263 | for (i = 0; i < data->reg_num; i++) |
| 213 | writel_relaxed(data->saved_reg[i], | 264 | writel_relaxed(data->saved_reg[i], |
| 214 | data->regs + CHANMASK(i, data->irq_groups)); | 265 | data->regs + CHANMASK(i, data->reg_num)); |
| 215 | } | 266 | } |
| 216 | 267 | ||
| 217 | static int imx_irqsteer_suspend(struct device *dev) | 268 | static int imx_irqsteer_suspend(struct device *dev) |
diff --git a/drivers/irqchip/irq-ls1x.c b/drivers/irqchip/irq-ls1x.c new file mode 100644 index 000000000000..86b72fbd3b45 --- /dev/null +++ b/drivers/irqchip/irq-ls1x.c | |||
| @@ -0,0 +1,192 @@ | |||
| 1 | // SPDX-License-Identifier: GPL-2.0 | ||
| 2 | /* | ||
| 3 | * Copyright (C) 2019, Jiaxun Yang <jiaxun.yang@flygoat.com> | ||
| 4 | * Loongson-1 platform IRQ support | ||
| 5 | */ | ||
| 6 | |||
| 7 | #include <linux/errno.h> | ||
| 8 | #include <linux/init.h> | ||
| 9 | #include <linux/types.h> | ||
| 10 | #include <linux/interrupt.h> | ||
| 11 | #include <linux/ioport.h> | ||
| 12 | #include <linux/irqchip.h> | ||
| 13 | #include <linux/of_address.h> | ||
| 14 | #include <linux/of_irq.h> | ||
| 15 | #include <linux/io.h> | ||
| 16 | #include <linux/irqchip/chained_irq.h> | ||
| 17 | |||
| 18 | #define LS_REG_INTC_STATUS 0x00 | ||
| 19 | #define LS_REG_INTC_EN 0x04 | ||
| 20 | #define LS_REG_INTC_SET 0x08 | ||
| 21 | #define LS_REG_INTC_CLR 0x0c | ||
| 22 | #define LS_REG_INTC_POL 0x10 | ||
| 23 | #define LS_REG_INTC_EDGE 0x14 | ||
| 24 | |||
| 25 | /** | ||
| 26 | * struct ls1x_intc_priv - private ls1x-intc data. | ||
| 27 | * @domain: IRQ domain. | ||
| 28 | * @intc_base: IO Base of intc registers. | ||
| 29 | */ | ||
| 30 | |||
| 31 | struct ls1x_intc_priv { | ||
| 32 | struct irq_domain *domain; | ||
| 33 | void __iomem *intc_base; | ||
| 34 | }; | ||
| 35 | |||
| 36 | |||
| 37 | static void ls1x_chained_handle_irq(struct irq_desc *desc) | ||
| 38 | { | ||
| 39 | struct ls1x_intc_priv *priv = irq_desc_get_handler_data(desc); | ||
| 40 | struct irq_chip *chip = irq_desc_get_chip(desc); | ||
| 41 | u32 pending; | ||
| 42 | |||
| 43 | chained_irq_enter(chip, desc); | ||
| 44 | pending = readl(priv->intc_base + LS_REG_INTC_STATUS) & | ||
| 45 | readl(priv->intc_base + LS_REG_INTC_EN); | ||
| 46 | |||
| 47 | if (!pending) | ||
| 48 | spurious_interrupt(); | ||
| 49 | |||
| 50 | while (pending) { | ||
| 51 | int bit = __ffs(pending); | ||
| 52 | |||
| 53 | generic_handle_irq(irq_find_mapping(priv->domain, bit)); | ||
| 54 | pending &= ~BIT(bit); | ||
| 55 | } | ||
| 56 | |||
| 57 | chained_irq_exit(chip, desc); | ||
| 58 | } | ||
| 59 | |||
| 60 | static void ls_intc_set_bit(struct irq_chip_generic *gc, | ||
| 61 | unsigned int offset, | ||
| 62 | u32 mask, bool set) | ||
| 63 | { | ||
| 64 | if (set) | ||
| 65 | writel(readl(gc->reg_base + offset) | mask, | ||
| 66 | gc->reg_base + offset); | ||
| 67 | else | ||
| 68 | writel(readl(gc->reg_base + offset) & ~mask, | ||
| 69 | gc->reg_base + offset); | ||
| 70 | } | ||
| 71 | |||
| 72 | static int ls_intc_set_type(struct irq_data *data, unsigned int type) | ||
| 73 | { | ||
| 74 | struct irq_chip_generic *gc = irq_data_get_irq_chip_data(data); | ||
| 75 | u32 mask = data->mask; | ||
| 76 | |||
| 77 | switch (type) { | ||
| 78 | case IRQ_TYPE_LEVEL_HIGH: | ||
| 79 | ls_intc_set_bit(gc, LS_REG_INTC_EDGE, mask, false); | ||
| 80 | ls_intc_set_bit(gc, LS_REG_INTC_POL, mask, true); | ||
| 81 | break; | ||
| 82 | case IRQ_TYPE_LEVEL_LOW: | ||
| 83 | ls_intc_set_bit(gc, LS_REG_INTC_EDGE, mask, false); | ||
| 84 | ls_intc_set_bit(gc, LS_REG_INTC_POL, mask, false); | ||
| 85 | break; | ||
| 86 | case IRQ_TYPE_EDGE_RISING: | ||
| 87 | ls_intc_set_bit(gc, LS_REG_INTC_EDGE, mask, true); | ||
| 88 | ls_intc_set_bit(gc, LS_REG_INTC_POL, mask, true); | ||
| 89 | break; | ||
| 90 | case IRQ_TYPE_EDGE_FALLING: | ||
| 91 | ls_intc_set_bit(gc, LS_REG_INTC_EDGE, mask, true); | ||
| 92 | ls_intc_set_bit(gc, LS_REG_INTC_POL, mask, false); | ||
| 93 | break; | ||
| 94 | default: | ||
| 95 | return -EINVAL; | ||
| 96 | } | ||
| 97 | |||
| 98 | irqd_set_trigger_type(data, type); | ||
| 99 | return irq_setup_alt_chip(data, type); | ||
| 100 | } | ||
| 101 | |||
| 102 | |||
| 103 | static int __init ls1x_intc_of_init(struct device_node *node, | ||
| 104 | struct device_node *parent) | ||
| 105 | { | ||
| 106 | struct irq_chip_generic *gc; | ||
| 107 | struct irq_chip_type *ct; | ||
| 108 | struct ls1x_intc_priv *priv; | ||
| 109 | int parent_irq, err = 0; | ||
| 110 | |||
| 111 | priv = kzalloc(sizeof(*priv), GFP_KERNEL); | ||
| 112 | if (!priv) | ||
| 113 | return -ENOMEM; | ||
| 114 | |||
| 115 | priv->intc_base = of_iomap(node, 0); | ||
| 116 | if (!priv->intc_base) { | ||
| 117 | err = -ENODEV; | ||
| 118 | goto out_free_priv; | ||
| 119 | } | ||
| 120 | |||
| 121 | parent_irq = irq_of_parse_and_map(node, 0); | ||
| 122 | if (!parent_irq) { | ||
| 123 | pr_err("ls1x-irq: unable to get parent irq\n"); | ||
| 124 | err = -ENODEV; | ||
| 125 | goto out_iounmap; | ||
| 126 | } | ||
| 127 | |||
| 128 | /* Set up an IRQ domain */ | ||
| 129 | priv->domain = irq_domain_add_linear(node, 32, &irq_generic_chip_ops, | ||
| 130 | NULL); | ||
| 131 | if (!priv->domain) { | ||
| 132 | pr_err("ls1x-irq: cannot add IRQ domain\n"); | ||
| 133 | goto out_iounmap; | ||
| 134 | } | ||
| 135 | |||
| 136 | err = irq_alloc_domain_generic_chips(priv->domain, 32, 2, | ||
| 137 | node->full_name, handle_level_irq, | ||
| 138 | IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN, 0, | ||
| 139 | IRQ_GC_INIT_MASK_CACHE); | ||
| 140 | if (err) { | ||
| 141 | pr_err("ls1x-irq: unable to register IRQ domain\n"); | ||
| 142 | goto out_free_domain; | ||
| 143 | } | ||
| 144 | |||
| 145 | /* Mask all irqs */ | ||
| 146 | writel(0x0, priv->intc_base + LS_REG_INTC_EN); | ||
| 147 | |||
| 148 | /* Ack all irqs */ | ||
| 149 | writel(0xffffffff, priv->intc_base + LS_REG_INTC_CLR); | ||
| 150 | |||
| 151 | /* Set all irqs to high level triggered */ | ||
| 152 | writel(0xffffffff, priv->intc_base + LS_REG_INTC_POL); | ||
| 153 | |||
| 154 | gc = irq_get_domain_generic_chip(priv->domain, 0); | ||
| 155 | |||
| 156 | gc->reg_base = priv->intc_base; | ||
| 157 | |||
| 158 | ct = gc->chip_types; | ||
| 159 | ct[0].type = IRQ_TYPE_LEVEL_MASK; | ||
| 160 | ct[0].regs.mask = LS_REG_INTC_EN; | ||
| 161 | ct[0].regs.ack = LS_REG_INTC_CLR; | ||
| 162 | ct[0].chip.irq_unmask = irq_gc_mask_set_bit; | ||
| 163 | ct[0].chip.irq_mask = irq_gc_mask_clr_bit; | ||
| 164 | ct[0].chip.irq_ack = irq_gc_ack_set_bit; | ||
| 165 | ct[0].chip.irq_set_type = ls_intc_set_type; | ||
| 166 | ct[0].handler = handle_level_irq; | ||
| 167 | |||
| 168 | ct[1].type = IRQ_TYPE_EDGE_BOTH; | ||
| 169 | ct[1].regs.mask = LS_REG_INTC_EN; | ||
| 170 | ct[1].regs.ack = LS_REG_INTC_CLR; | ||
| 171 | ct[1].chip.irq_unmask = irq_gc_mask_set_bit; | ||
| 172 | ct[1].chip.irq_mask = irq_gc_mask_clr_bit; | ||
| 173 | ct[1].chip.irq_ack = irq_gc_ack_set_bit; | ||
| 174 | ct[1].chip.irq_set_type = ls_intc_set_type; | ||
| 175 | ct[1].handler = handle_edge_irq; | ||
| 176 | |||
| 177 | irq_set_chained_handler_and_data(parent_irq, | ||
| 178 | ls1x_chained_handle_irq, priv); | ||
| 179 | |||
| 180 | return 0; | ||
| 181 | |||
| 182 | out_free_domain: | ||
| 183 | irq_domain_remove(priv->domain); | ||
| 184 | out_iounmap: | ||
| 185 | iounmap(priv->intc_base); | ||
| 186 | out_free_priv: | ||
| 187 | kfree(priv); | ||
| 188 | |||
| 189 | return err; | ||
| 190 | } | ||
| 191 | |||
| 192 | IRQCHIP_DECLARE(ls1x_intc, "loongson,ls1x-intc", ls1x_intc_of_init); | ||
diff --git a/drivers/irqchip/irq-sifive-plic.c b/drivers/irqchip/irq-sifive-plic.c index 357e9daf94ae..cf755964f2f8 100644 --- a/drivers/irqchip/irq-sifive-plic.c +++ b/drivers/irqchip/irq-sifive-plic.c | |||
| @@ -59,62 +59,83 @@ static void __iomem *plic_regs; | |||
| 59 | 59 | ||
| 60 | struct plic_handler { | 60 | struct plic_handler { |
| 61 | bool present; | 61 | bool present; |
| 62 | int ctxid; | 62 | void __iomem *hart_base; |
| 63 | /* | ||
| 64 | * Protect mask operations on the registers given that we can't | ||
| 65 | * assume atomic memory operations work on them. | ||
| 66 | */ | ||
| 67 | raw_spinlock_t enable_lock; | ||
| 68 | void __iomem *enable_base; | ||
| 63 | }; | 69 | }; |
| 64 | static DEFINE_PER_CPU(struct plic_handler, plic_handlers); | 70 | static DEFINE_PER_CPU(struct plic_handler, plic_handlers); |
| 65 | 71 | ||
| 66 | static inline void __iomem *plic_hart_offset(int ctxid) | 72 | static inline void plic_toggle(struct plic_handler *handler, |
| 67 | { | 73 | int hwirq, int enable) |
| 68 | return plic_regs + CONTEXT_BASE + ctxid * CONTEXT_PER_HART; | ||
| 69 | } | ||
| 70 | |||
| 71 | static inline u32 __iomem *plic_enable_base(int ctxid) | ||
| 72 | { | ||
| 73 | return plic_regs + ENABLE_BASE + ctxid * ENABLE_PER_HART; | ||
| 74 | } | ||
| 75 | |||
| 76 | /* | ||
| 77 | * Protect mask operations on the registers given that we can't assume that | ||
| 78 | * atomic memory operations work on them. | ||
| 79 | */ | ||
| 80 | static DEFINE_RAW_SPINLOCK(plic_toggle_lock); | ||
| 81 | |||
| 82 | static inline void plic_toggle(int ctxid, int hwirq, int enable) | ||
| 83 | { | 74 | { |
| 84 | u32 __iomem *reg = plic_enable_base(ctxid) + (hwirq / 32); | 75 | u32 __iomem *reg = handler->enable_base + (hwirq / 32) * sizeof(u32); |
| 85 | u32 hwirq_mask = 1 << (hwirq % 32); | 76 | u32 hwirq_mask = 1 << (hwirq % 32); |
| 86 | 77 | ||
| 87 | raw_spin_lock(&plic_toggle_lock); | 78 | raw_spin_lock(&handler->enable_lock); |
| 88 | if (enable) | 79 | if (enable) |
| 89 | writel(readl(reg) | hwirq_mask, reg); | 80 | writel(readl(reg) | hwirq_mask, reg); |
| 90 | else | 81 | else |
| 91 | writel(readl(reg) & ~hwirq_mask, reg); | 82 | writel(readl(reg) & ~hwirq_mask, reg); |
| 92 | raw_spin_unlock(&plic_toggle_lock); | 83 | raw_spin_unlock(&handler->enable_lock); |
| 93 | } | 84 | } |
| 94 | 85 | ||
| 95 | static inline void plic_irq_toggle(struct irq_data *d, int enable) | 86 | static inline void plic_irq_toggle(const struct cpumask *mask, |
| 87 | int hwirq, int enable) | ||
| 96 | { | 88 | { |
| 97 | int cpu; | 89 | int cpu; |
| 98 | 90 | ||
| 99 | writel(enable, plic_regs + PRIORITY_BASE + d->hwirq * PRIORITY_PER_ID); | 91 | writel(enable, plic_regs + PRIORITY_BASE + hwirq * PRIORITY_PER_ID); |
| 100 | for_each_cpu(cpu, irq_data_get_affinity_mask(d)) { | 92 | for_each_cpu(cpu, mask) { |
| 101 | struct plic_handler *handler = per_cpu_ptr(&plic_handlers, cpu); | 93 | struct plic_handler *handler = per_cpu_ptr(&plic_handlers, cpu); |
| 102 | 94 | ||
| 103 | if (handler->present) | 95 | if (handler->present) |
| 104 | plic_toggle(handler->ctxid, d->hwirq, enable); | 96 | plic_toggle(handler, hwirq, enable); |
| 105 | } | 97 | } |
| 106 | } | 98 | } |
| 107 | 99 | ||
| 108 | static void plic_irq_enable(struct irq_data *d) | 100 | static void plic_irq_enable(struct irq_data *d) |
| 109 | { | 101 | { |
| 110 | plic_irq_toggle(d, 1); | 102 | unsigned int cpu = cpumask_any_and(irq_data_get_affinity_mask(d), |
| 103 | cpu_online_mask); | ||
| 104 | if (WARN_ON_ONCE(cpu >= nr_cpu_ids)) | ||
| 105 | return; | ||
| 106 | plic_irq_toggle(cpumask_of(cpu), d->hwirq, 1); | ||
| 111 | } | 107 | } |
| 112 | 108 | ||
| 113 | static void plic_irq_disable(struct irq_data *d) | 109 | static void plic_irq_disable(struct irq_data *d) |
| 114 | { | 110 | { |
| 115 | plic_irq_toggle(d, 0); | 111 | plic_irq_toggle(cpu_possible_mask, d->hwirq, 0); |
| 116 | } | 112 | } |
| 117 | 113 | ||
| 114 | #ifdef CONFIG_SMP | ||
| 115 | static int plic_set_affinity(struct irq_data *d, | ||
| 116 | const struct cpumask *mask_val, bool force) | ||
| 117 | { | ||
| 118 | unsigned int cpu; | ||
| 119 | |||
| 120 | if (force) | ||
| 121 | cpu = cpumask_first(mask_val); | ||
| 122 | else | ||
| 123 | cpu = cpumask_any_and(mask_val, cpu_online_mask); | ||
| 124 | |||
| 125 | if (cpu >= nr_cpu_ids) | ||
| 126 | return -EINVAL; | ||
| 127 | |||
| 128 | if (!irqd_irq_disabled(d)) { | ||
| 129 | plic_irq_toggle(cpu_possible_mask, d->hwirq, 0); | ||
| 130 | plic_irq_toggle(cpumask_of(cpu), d->hwirq, 1); | ||
| 131 | } | ||
| 132 | |||
| 133 | irq_data_update_effective_affinity(d, cpumask_of(cpu)); | ||
| 134 | |||
| 135 | return IRQ_SET_MASK_OK_DONE; | ||
| 136 | } | ||
| 137 | #endif | ||
| 138 | |||
| 118 | static struct irq_chip plic_chip = { | 139 | static struct irq_chip plic_chip = { |
| 119 | .name = "SiFive PLIC", | 140 | .name = "SiFive PLIC", |
| 120 | /* | 141 | /* |
| @@ -123,6 +144,9 @@ static struct irq_chip plic_chip = { | |||
| 123 | */ | 144 | */ |
| 124 | .irq_enable = plic_irq_enable, | 145 | .irq_enable = plic_irq_enable, |
| 125 | .irq_disable = plic_irq_disable, | 146 | .irq_disable = plic_irq_disable, |
| 147 | #ifdef CONFIG_SMP | ||
| 148 | .irq_set_affinity = plic_set_affinity, | ||
| 149 | #endif | ||
| 126 | }; | 150 | }; |
| 127 | 151 | ||
| 128 | static int plic_irqdomain_map(struct irq_domain *d, unsigned int irq, | 152 | static int plic_irqdomain_map(struct irq_domain *d, unsigned int irq, |
| @@ -150,7 +174,7 @@ static struct irq_domain *plic_irqdomain; | |||
| 150 | static void plic_handle_irq(struct pt_regs *regs) | 174 | static void plic_handle_irq(struct pt_regs *regs) |
| 151 | { | 175 | { |
| 152 | struct plic_handler *handler = this_cpu_ptr(&plic_handlers); | 176 | struct plic_handler *handler = this_cpu_ptr(&plic_handlers); |
| 153 | void __iomem *claim = plic_hart_offset(handler->ctxid) + CONTEXT_CLAIM; | 177 | void __iomem *claim = handler->hart_base + CONTEXT_CLAIM; |
| 154 | irq_hw_number_t hwirq; | 178 | irq_hw_number_t hwirq; |
| 155 | 179 | ||
| 156 | WARN_ON_ONCE(!handler->present); | 180 | WARN_ON_ONCE(!handler->present); |
| @@ -186,7 +210,7 @@ static int plic_find_hart_id(struct device_node *node) | |||
| 186 | static int __init plic_init(struct device_node *node, | 210 | static int __init plic_init(struct device_node *node, |
| 187 | struct device_node *parent) | 211 | struct device_node *parent) |
| 188 | { | 212 | { |
| 189 | int error = 0, nr_handlers, nr_mapped = 0, i; | 213 | int error = 0, nr_contexts, nr_handlers = 0, i; |
| 190 | u32 nr_irqs; | 214 | u32 nr_irqs; |
| 191 | 215 | ||
| 192 | if (plic_regs) { | 216 | if (plic_regs) { |
| @@ -203,10 +227,10 @@ static int __init plic_init(struct device_node *node, | |||
| 203 | if (WARN_ON(!nr_irqs)) | 227 | if (WARN_ON(!nr_irqs)) |
| 204 | goto out_iounmap; | 228 | goto out_iounmap; |
| 205 | 229 | ||
| 206 | nr_handlers = of_irq_count(node); | 230 | nr_contexts = of_irq_count(node); |
| 207 | if (WARN_ON(!nr_handlers)) | 231 | if (WARN_ON(!nr_contexts)) |
| 208 | goto out_iounmap; | 232 | goto out_iounmap; |
| 209 | if (WARN_ON(nr_handlers < num_possible_cpus())) | 233 | if (WARN_ON(nr_contexts < num_possible_cpus())) |
| 210 | goto out_iounmap; | 234 | goto out_iounmap; |
| 211 | 235 | ||
| 212 | error = -ENOMEM; | 236 | error = -ENOMEM; |
| @@ -215,7 +239,7 @@ static int __init plic_init(struct device_node *node, | |||
| 215 | if (WARN_ON(!plic_irqdomain)) | 239 | if (WARN_ON(!plic_irqdomain)) |
| 216 | goto out_iounmap; | 240 | goto out_iounmap; |
| 217 | 241 | ||
| 218 | for (i = 0; i < nr_handlers; i++) { | 242 | for (i = 0; i < nr_contexts; i++) { |
| 219 | struct of_phandle_args parent; | 243 | struct of_phandle_args parent; |
| 220 | struct plic_handler *handler; | 244 | struct plic_handler *handler; |
| 221 | irq_hw_number_t hwirq; | 245 | irq_hw_number_t hwirq; |
| @@ -237,19 +261,33 @@ static int __init plic_init(struct device_node *node, | |||
| 237 | } | 261 | } |
| 238 | 262 | ||
| 239 | cpu = riscv_hartid_to_cpuid(hartid); | 263 | cpu = riscv_hartid_to_cpuid(hartid); |
| 264 | if (cpu < 0) { | ||
| 265 | pr_warn("Invalid cpuid for context %d\n", i); | ||
| 266 | continue; | ||
| 267 | } | ||
| 268 | |||
| 240 | handler = per_cpu_ptr(&plic_handlers, cpu); | 269 | handler = per_cpu_ptr(&plic_handlers, cpu); |
| 270 | if (handler->present) { | ||
| 271 | pr_warn("handler already present for context %d.\n", i); | ||
| 272 | continue; | ||
| 273 | } | ||
| 274 | |||
| 241 | handler->present = true; | 275 | handler->present = true; |
| 242 | handler->ctxid = i; | 276 | handler->hart_base = |
| 277 | plic_regs + CONTEXT_BASE + i * CONTEXT_PER_HART; | ||
| 278 | raw_spin_lock_init(&handler->enable_lock); | ||
| 279 | handler->enable_base = | ||
| 280 | plic_regs + ENABLE_BASE + i * ENABLE_PER_HART; | ||
| 243 | 281 | ||
| 244 | /* priority must be > threshold to trigger an interrupt */ | 282 | /* priority must be > threshold to trigger an interrupt */ |
| 245 | writel(0, plic_hart_offset(i) + CONTEXT_THRESHOLD); | 283 | writel(0, handler->hart_base + CONTEXT_THRESHOLD); |
| 246 | for (hwirq = 1; hwirq <= nr_irqs; hwirq++) | 284 | for (hwirq = 1; hwirq <= nr_irqs; hwirq++) |
| 247 | plic_toggle(i, hwirq, 0); | 285 | plic_toggle(handler, hwirq, 0); |
| 248 | nr_mapped++; | 286 | nr_handlers++; |
| 249 | } | 287 | } |
| 250 | 288 | ||
| 251 | pr_info("mapped %d interrupts to %d (out of %d) handlers.\n", | 289 | pr_info("mapped %d interrupts with %d handlers for %d contexts.\n", |
| 252 | nr_irqs, nr_mapped, nr_handlers); | 290 | nr_irqs, nr_handlers, nr_contexts); |
| 253 | set_handle_irq(plic_handle_irq); | 291 | set_handle_irq(plic_handle_irq); |
| 254 | return 0; | 292 | return 0; |
| 255 | 293 | ||
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index 7fee665ec45e..e905861186e3 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c | |||
| @@ -2041,53 +2041,52 @@ static int nvme_setup_host_mem(struct nvme_dev *dev) | |||
| 2041 | return ret; | 2041 | return ret; |
| 2042 | } | 2042 | } |
| 2043 | 2043 | ||
| 2044 | /* irq_queues covers admin queue */ | 2044 | /* |
| 2045 | static void nvme_calc_io_queues(struct nvme_dev *dev, unsigned int irq_queues) | 2045 | * nirqs is the number of interrupts available for write and read |
| 2046 | * queues. The core already reserved an interrupt for the admin queue. | ||
| 2047 | */ | ||
| 2048 | static void nvme_calc_irq_sets(struct irq_affinity *affd, unsigned int nrirqs) | ||
| 2046 | { | 2049 | { |
| 2047 | unsigned int this_w_queues = write_queues; | 2050 | struct nvme_dev *dev = affd->priv; |
| 2048 | 2051 | unsigned int nr_read_queues; | |
| 2049 | WARN_ON(!irq_queues); | ||
| 2050 | |||
| 2051 | /* | ||
| 2052 | * Setup read/write queue split, assign admin queue one independent | ||
| 2053 | * irq vector if irq_queues is > 1. | ||
| 2054 | */ | ||
| 2055 | if (irq_queues <= 2) { | ||
| 2056 | dev->io_queues[HCTX_TYPE_DEFAULT] = 1; | ||
| 2057 | dev->io_queues[HCTX_TYPE_READ] = 0; | ||
| 2058 | return; | ||
| 2059 | } | ||
| 2060 | |||
| 2061 | /* | ||
| 2062 | * If 'write_queues' is set, ensure it leaves room for at least | ||
| 2063 | * one read queue and one admin queue | ||
| 2064 | */ | ||
| 2065 | if (this_w_queues >= irq_queues) | ||
| 2066 | this_w_queues = irq_queues - 2; | ||
| 2067 | 2052 | ||
| 2068 | /* | 2053 | /* |
| 2069 | * If 'write_queues' is set to zero, reads and writes will share | 2054 | * If there is no interupt available for queues, ensure that |
| 2070 | * a queue set. | 2055 | * the default queue is set to 1. The affinity set size is |
| 2056 | * also set to one, but the irq core ignores it for this case. | ||
| 2057 | * | ||
| 2058 | * If only one interrupt is available or 'write_queue' == 0, combine | ||
| 2059 | * write and read queues. | ||
| 2060 | * | ||
| 2061 | * If 'write_queues' > 0, ensure it leaves room for at least one read | ||
| 2062 | * queue. | ||
| 2071 | */ | 2063 | */ |
| 2072 | if (!this_w_queues) { | 2064 | if (!nrirqs) { |
| 2073 | dev->io_queues[HCTX_TYPE_DEFAULT] = irq_queues - 1; | 2065 | nrirqs = 1; |
| 2074 | dev->io_queues[HCTX_TYPE_READ] = 0; | 2066 | nr_read_queues = 0; |
| 2067 | } else if (nrirqs == 1 || !write_queues) { | ||
| 2068 | nr_read_queues = 0; | ||
| 2069 | } else if (write_queues >= nrirqs) { | ||
| 2070 | nr_read_queues = 1; | ||
| 2075 | } else { | 2071 | } else { |
| 2076 | dev->io_queues[HCTX_TYPE_DEFAULT] = this_w_queues; | 2072 | nr_read_queues = nrirqs - write_queues; |
| 2077 | dev->io_queues[HCTX_TYPE_READ] = irq_queues - this_w_queues - 1; | ||
| 2078 | } | 2073 | } |
| 2074 | |||
| 2075 | dev->io_queues[HCTX_TYPE_DEFAULT] = nrirqs - nr_read_queues; | ||
| 2076 | affd->set_size[HCTX_TYPE_DEFAULT] = nrirqs - nr_read_queues; | ||
| 2077 | dev->io_queues[HCTX_TYPE_READ] = nr_read_queues; | ||
| 2078 | affd->set_size[HCTX_TYPE_READ] = nr_read_queues; | ||
| 2079 | affd->nr_sets = nr_read_queues ? 2 : 1; | ||
| 2079 | } | 2080 | } |
| 2080 | 2081 | ||
| 2081 | static int nvme_setup_irqs(struct nvme_dev *dev, unsigned int nr_io_queues) | 2082 | static int nvme_setup_irqs(struct nvme_dev *dev, unsigned int nr_io_queues) |
| 2082 | { | 2083 | { |
| 2083 | struct pci_dev *pdev = to_pci_dev(dev->dev); | 2084 | struct pci_dev *pdev = to_pci_dev(dev->dev); |
| 2084 | int irq_sets[2]; | ||
| 2085 | struct irq_affinity affd = { | 2085 | struct irq_affinity affd = { |
| 2086 | .pre_vectors = 1, | 2086 | .pre_vectors = 1, |
| 2087 | .nr_sets = ARRAY_SIZE(irq_sets), | 2087 | .calc_sets = nvme_calc_irq_sets, |
| 2088 | .sets = irq_sets, | 2088 | .priv = dev, |
| 2089 | }; | 2089 | }; |
| 2090 | int result = 0; | ||
| 2091 | unsigned int irq_queues, this_p_queues; | 2090 | unsigned int irq_queues, this_p_queues; |
| 2092 | 2091 | ||
| 2093 | /* | 2092 | /* |
| @@ -2103,51 +2102,12 @@ static int nvme_setup_irqs(struct nvme_dev *dev, unsigned int nr_io_queues) | |||
| 2103 | } | 2102 | } |
| 2104 | dev->io_queues[HCTX_TYPE_POLL] = this_p_queues; | 2103 | dev->io_queues[HCTX_TYPE_POLL] = this_p_queues; |
| 2105 | 2104 | ||
| 2106 | /* | 2105 | /* Initialize for the single interrupt case */ |
| 2107 | * For irq sets, we have to ask for minvec == maxvec. This passes | 2106 | dev->io_queues[HCTX_TYPE_DEFAULT] = 1; |
| 2108 | * any reduction back to us, so we can adjust our queue counts and | 2107 | dev->io_queues[HCTX_TYPE_READ] = 0; |
| 2109 | * IRQ vector needs. | ||
| 2110 | */ | ||
| 2111 | do { | ||
| 2112 | nvme_calc_io_queues(dev, irq_queues); | ||
| 2113 | irq_sets[0] = dev->io_queues[HCTX_TYPE_DEFAULT]; | ||
| 2114 | irq_sets[1] = dev->io_queues[HCTX_TYPE_READ]; | ||
| 2115 | if (!irq_sets[1]) | ||
| 2116 | affd.nr_sets = 1; | ||
| 2117 | |||
| 2118 | /* | ||
| 2119 | * If we got a failure and we're down to asking for just | ||
| 2120 | * 1 + 1 queues, just ask for a single vector. We'll share | ||
| 2121 | * that between the single IO queue and the admin queue. | ||
| 2122 | * Otherwise, we assign one independent vector to admin queue. | ||
| 2123 | */ | ||
| 2124 | if (irq_queues > 1) | ||
| 2125 | irq_queues = irq_sets[0] + irq_sets[1] + 1; | ||
| 2126 | 2108 | ||
| 2127 | result = pci_alloc_irq_vectors_affinity(pdev, irq_queues, | 2109 | return pci_alloc_irq_vectors_affinity(pdev, 1, irq_queues, |
| 2128 | irq_queues, | 2110 | PCI_IRQ_ALL_TYPES | PCI_IRQ_AFFINITY, &affd); |
| 2129 | PCI_IRQ_ALL_TYPES | PCI_IRQ_AFFINITY, &affd); | ||
| 2130 | |||
| 2131 | /* | ||
| 2132 | * Need to reduce our vec counts. If we get ENOSPC, the | ||
| 2133 | * platform should support mulitple vecs, we just need | ||
| 2134 | * to decrease our ask. If we get EINVAL, the platform | ||
| 2135 | * likely does not. Back down to ask for just one vector. | ||
| 2136 | */ | ||
| 2137 | if (result == -ENOSPC) { | ||
| 2138 | irq_queues--; | ||
| 2139 | if (!irq_queues) | ||
| 2140 | return result; | ||
| 2141 | continue; | ||
| 2142 | } else if (result == -EINVAL) { | ||
| 2143 | irq_queues = 1; | ||
| 2144 | continue; | ||
| 2145 | } else if (result <= 0) | ||
| 2146 | return -EIO; | ||
| 2147 | break; | ||
| 2148 | } while (1); | ||
| 2149 | |||
| 2150 | return result; | ||
| 2151 | } | 2111 | } |
| 2152 | 2112 | ||
| 2153 | static void nvme_disable_io_queues(struct nvme_dev *dev) | 2113 | static void nvme_disable_io_queues(struct nvme_dev *dev) |
| @@ -3024,6 +2984,7 @@ static struct pci_driver nvme_driver = { | |||
| 3024 | 2984 | ||
| 3025 | static int __init nvme_init(void) | 2985 | static int __init nvme_init(void) |
| 3026 | { | 2986 | { |
| 2987 | BUILD_BUG_ON(IRQ_AFFINITY_MAX_SETS < 2); | ||
| 3027 | return pci_register_driver(&nvme_driver); | 2988 | return pci_register_driver(&nvme_driver); |
| 3028 | } | 2989 | } |
| 3029 | 2990 | ||
diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c index 4c0b47867258..73986825d221 100644 --- a/drivers/pci/msi.c +++ b/drivers/pci/msi.c | |||
| @@ -532,7 +532,7 @@ error_attrs: | |||
| 532 | } | 532 | } |
| 533 | 533 | ||
| 534 | static struct msi_desc * | 534 | static struct msi_desc * |
| 535 | msi_setup_entry(struct pci_dev *dev, int nvec, const struct irq_affinity *affd) | 535 | msi_setup_entry(struct pci_dev *dev, int nvec, struct irq_affinity *affd) |
| 536 | { | 536 | { |
| 537 | struct irq_affinity_desc *masks = NULL; | 537 | struct irq_affinity_desc *masks = NULL; |
| 538 | struct msi_desc *entry; | 538 | struct msi_desc *entry; |
| @@ -597,7 +597,7 @@ static int msi_verify_entries(struct pci_dev *dev) | |||
| 597 | * which could have been allocated. | 597 | * which could have been allocated. |
| 598 | */ | 598 | */ |
| 599 | static int msi_capability_init(struct pci_dev *dev, int nvec, | 599 | static int msi_capability_init(struct pci_dev *dev, int nvec, |
| 600 | const struct irq_affinity *affd) | 600 | struct irq_affinity *affd) |
| 601 | { | 601 | { |
| 602 | struct msi_desc *entry; | 602 | struct msi_desc *entry; |
| 603 | int ret; | 603 | int ret; |
| @@ -669,7 +669,7 @@ static void __iomem *msix_map_region(struct pci_dev *dev, unsigned nr_entries) | |||
| 669 | 669 | ||
| 670 | static int msix_setup_entries(struct pci_dev *dev, void __iomem *base, | 670 | static int msix_setup_entries(struct pci_dev *dev, void __iomem *base, |
| 671 | struct msix_entry *entries, int nvec, | 671 | struct msix_entry *entries, int nvec, |
| 672 | const struct irq_affinity *affd) | 672 | struct irq_affinity *affd) |
| 673 | { | 673 | { |
| 674 | struct irq_affinity_desc *curmsk, *masks = NULL; | 674 | struct irq_affinity_desc *curmsk, *masks = NULL; |
| 675 | struct msi_desc *entry; | 675 | struct msi_desc *entry; |
| @@ -736,7 +736,7 @@ static void msix_program_entries(struct pci_dev *dev, | |||
| 736 | * requested MSI-X entries with allocated irqs or non-zero for otherwise. | 736 | * requested MSI-X entries with allocated irqs or non-zero for otherwise. |
| 737 | **/ | 737 | **/ |
| 738 | static int msix_capability_init(struct pci_dev *dev, struct msix_entry *entries, | 738 | static int msix_capability_init(struct pci_dev *dev, struct msix_entry *entries, |
| 739 | int nvec, const struct irq_affinity *affd) | 739 | int nvec, struct irq_affinity *affd) |
| 740 | { | 740 | { |
| 741 | int ret; | 741 | int ret; |
| 742 | u16 control; | 742 | u16 control; |
| @@ -932,7 +932,7 @@ int pci_msix_vec_count(struct pci_dev *dev) | |||
| 932 | EXPORT_SYMBOL(pci_msix_vec_count); | 932 | EXPORT_SYMBOL(pci_msix_vec_count); |
| 933 | 933 | ||
| 934 | static int __pci_enable_msix(struct pci_dev *dev, struct msix_entry *entries, | 934 | static int __pci_enable_msix(struct pci_dev *dev, struct msix_entry *entries, |
| 935 | int nvec, const struct irq_affinity *affd) | 935 | int nvec, struct irq_affinity *affd) |
| 936 | { | 936 | { |
| 937 | int nr_entries; | 937 | int nr_entries; |
| 938 | int i, j; | 938 | int i, j; |
| @@ -1018,7 +1018,7 @@ int pci_msi_enabled(void) | |||
| 1018 | EXPORT_SYMBOL(pci_msi_enabled); | 1018 | EXPORT_SYMBOL(pci_msi_enabled); |
| 1019 | 1019 | ||
| 1020 | static int __pci_enable_msi_range(struct pci_dev *dev, int minvec, int maxvec, | 1020 | static int __pci_enable_msi_range(struct pci_dev *dev, int minvec, int maxvec, |
| 1021 | const struct irq_affinity *affd) | 1021 | struct irq_affinity *affd) |
| 1022 | { | 1022 | { |
| 1023 | int nvec; | 1023 | int nvec; |
| 1024 | int rc; | 1024 | int rc; |
| @@ -1035,13 +1035,6 @@ static int __pci_enable_msi_range(struct pci_dev *dev, int minvec, int maxvec, | |||
| 1035 | if (maxvec < minvec) | 1035 | if (maxvec < minvec) |
| 1036 | return -ERANGE; | 1036 | return -ERANGE; |
| 1037 | 1037 | ||
| 1038 | /* | ||
| 1039 | * If the caller is passing in sets, we can't support a range of | ||
| 1040 | * vectors. The caller needs to handle that. | ||
| 1041 | */ | ||
| 1042 | if (affd && affd->nr_sets && minvec != maxvec) | ||
| 1043 | return -EINVAL; | ||
| 1044 | |||
| 1045 | if (WARN_ON_ONCE(dev->msi_enabled)) | 1038 | if (WARN_ON_ONCE(dev->msi_enabled)) |
| 1046 | return -EINVAL; | 1039 | return -EINVAL; |
| 1047 | 1040 | ||
| @@ -1086,20 +1079,13 @@ EXPORT_SYMBOL(pci_enable_msi); | |||
| 1086 | 1079 | ||
| 1087 | static int __pci_enable_msix_range(struct pci_dev *dev, | 1080 | static int __pci_enable_msix_range(struct pci_dev *dev, |
| 1088 | struct msix_entry *entries, int minvec, | 1081 | struct msix_entry *entries, int minvec, |
| 1089 | int maxvec, const struct irq_affinity *affd) | 1082 | int maxvec, struct irq_affinity *affd) |
| 1090 | { | 1083 | { |
| 1091 | int rc, nvec = maxvec; | 1084 | int rc, nvec = maxvec; |
| 1092 | 1085 | ||
| 1093 | if (maxvec < minvec) | 1086 | if (maxvec < minvec) |
| 1094 | return -ERANGE; | 1087 | return -ERANGE; |
| 1095 | 1088 | ||
| 1096 | /* | ||
| 1097 | * If the caller is passing in sets, we can't support a range of | ||
| 1098 | * supported vectors. The caller needs to handle that. | ||
| 1099 | */ | ||
| 1100 | if (affd && affd->nr_sets && minvec != maxvec) | ||
| 1101 | return -EINVAL; | ||
| 1102 | |||
| 1103 | if (WARN_ON_ONCE(dev->msix_enabled)) | 1089 | if (WARN_ON_ONCE(dev->msix_enabled)) |
| 1104 | return -EINVAL; | 1090 | return -EINVAL; |
| 1105 | 1091 | ||
| @@ -1165,9 +1151,9 @@ EXPORT_SYMBOL(pci_enable_msix_range); | |||
| 1165 | */ | 1151 | */ |
| 1166 | int pci_alloc_irq_vectors_affinity(struct pci_dev *dev, unsigned int min_vecs, | 1152 | int pci_alloc_irq_vectors_affinity(struct pci_dev *dev, unsigned int min_vecs, |
| 1167 | unsigned int max_vecs, unsigned int flags, | 1153 | unsigned int max_vecs, unsigned int flags, |
| 1168 | const struct irq_affinity *affd) | 1154 | struct irq_affinity *affd) |
| 1169 | { | 1155 | { |
| 1170 | static const struct irq_affinity msi_default_affd; | 1156 | struct irq_affinity msi_default_affd = {0}; |
| 1171 | int msix_vecs = -ENOSPC; | 1157 | int msix_vecs = -ENOSPC; |
| 1172 | int msi_vecs = -ENOSPC; | 1158 | int msi_vecs = -ENOSPC; |
| 1173 | 1159 | ||
| @@ -1196,6 +1182,13 @@ int pci_alloc_irq_vectors_affinity(struct pci_dev *dev, unsigned int min_vecs, | |||
| 1196 | /* use legacy irq if allowed */ | 1182 | /* use legacy irq if allowed */ |
| 1197 | if (flags & PCI_IRQ_LEGACY) { | 1183 | if (flags & PCI_IRQ_LEGACY) { |
| 1198 | if (min_vecs == 1 && dev->irq) { | 1184 | if (min_vecs == 1 && dev->irq) { |
| 1185 | /* | ||
| 1186 | * Invoke the affinity spreading logic to ensure that | ||
| 1187 | * the device driver can adjust queue configuration | ||
| 1188 | * for the single interrupt case. | ||
| 1189 | */ | ||
| 1190 | if (affd) | ||
| 1191 | irq_create_affinity_masks(1, affd); | ||
| 1199 | pci_intx(dev, 1); | 1192 | pci_intx(dev, 1); |
| 1200 | return 1; | 1193 | return 1; |
| 1201 | } | 1194 | } |
diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c index 74e260027c7d..76e49d902609 100644 --- a/drivers/scsi/be2iscsi/be_main.c +++ b/drivers/scsi/be2iscsi/be_main.c | |||
| @@ -3566,7 +3566,7 @@ static void be2iscsi_enable_msix(struct beiscsi_hba *phba) | |||
| 3566 | 3566 | ||
| 3567 | /* if eqid_count == 1 fall back to INTX */ | 3567 | /* if eqid_count == 1 fall back to INTX */ |
| 3568 | if (enable_msix && nvec > 1) { | 3568 | if (enable_msix && nvec > 1) { |
| 3569 | const struct irq_affinity desc = { .post_vectors = 1 }; | 3569 | struct irq_affinity desc = { .post_vectors = 1 }; |
| 3570 | 3570 | ||
| 3571 | if (pci_alloc_irq_vectors_affinity(phba->pcidev, 2, nvec, | 3571 | if (pci_alloc_irq_vectors_affinity(phba->pcidev, 2, nvec, |
| 3572 | PCI_IRQ_MSIX | PCI_IRQ_AFFINITY, &desc) < 0) { | 3572 | PCI_IRQ_MSIX | PCI_IRQ_AFFINITY, &desc) < 0) { |
diff --git a/fs/proc/stat.c b/fs/proc/stat.c index 535eda7857cf..76175211b304 100644 --- a/fs/proc/stat.c +++ b/fs/proc/stat.c | |||
| @@ -79,6 +79,31 @@ static u64 get_iowait_time(int cpu) | |||
| 79 | 79 | ||
| 80 | #endif | 80 | #endif |
| 81 | 81 | ||
| 82 | static void show_irq_gap(struct seq_file *p, unsigned int gap) | ||
| 83 | { | ||
| 84 | static const char zeros[] = " 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0"; | ||
| 85 | |||
| 86 | while (gap > 0) { | ||
| 87 | unsigned int inc; | ||
| 88 | |||
| 89 | inc = min_t(unsigned int, gap, ARRAY_SIZE(zeros) / 2); | ||
| 90 | seq_write(p, zeros, 2 * inc); | ||
| 91 | gap -= inc; | ||
| 92 | } | ||
| 93 | } | ||
| 94 | |||
| 95 | static void show_all_irqs(struct seq_file *p) | ||
| 96 | { | ||
| 97 | unsigned int i, next = 0; | ||
| 98 | |||
| 99 | for_each_active_irq(i) { | ||
| 100 | show_irq_gap(p, i - next); | ||
| 101 | seq_put_decimal_ull(p, " ", kstat_irqs_usr(i)); | ||
| 102 | next = i + 1; | ||
| 103 | } | ||
| 104 | show_irq_gap(p, nr_irqs - next); | ||
| 105 | } | ||
| 106 | |||
| 82 | static int show_stat(struct seq_file *p, void *v) | 107 | static int show_stat(struct seq_file *p, void *v) |
| 83 | { | 108 | { |
| 84 | int i, j; | 109 | int i, j; |
| @@ -156,9 +181,7 @@ static int show_stat(struct seq_file *p, void *v) | |||
| 156 | } | 181 | } |
| 157 | seq_put_decimal_ull(p, "intr ", (unsigned long long)sum); | 182 | seq_put_decimal_ull(p, "intr ", (unsigned long long)sum); |
| 158 | 183 | ||
| 159 | /* sum again ? it could be updated? */ | 184 | show_all_irqs(p); |
| 160 | for_each_irq_nr(j) | ||
| 161 | seq_put_decimal_ull(p, " ", kstat_irqs_usr(j)); | ||
| 162 | 185 | ||
| 163 | seq_printf(p, | 186 | seq_printf(p, |
| 164 | "\nctxt %llu\n" | 187 | "\nctxt %llu\n" |
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h index 4a728dba02e2..690b238a44d5 100644 --- a/include/linux/interrupt.h +++ b/include/linux/interrupt.h | |||
| @@ -156,6 +156,10 @@ __request_percpu_irq(unsigned int irq, irq_handler_t handler, | |||
| 156 | unsigned long flags, const char *devname, | 156 | unsigned long flags, const char *devname, |
| 157 | void __percpu *percpu_dev_id); | 157 | void __percpu *percpu_dev_id); |
| 158 | 158 | ||
| 159 | extern int __must_check | ||
| 160 | request_nmi(unsigned int irq, irq_handler_t handler, unsigned long flags, | ||
| 161 | const char *name, void *dev); | ||
| 162 | |||
| 159 | static inline int __must_check | 163 | static inline int __must_check |
| 160 | request_percpu_irq(unsigned int irq, irq_handler_t handler, | 164 | request_percpu_irq(unsigned int irq, irq_handler_t handler, |
| 161 | const char *devname, void __percpu *percpu_dev_id) | 165 | const char *devname, void __percpu *percpu_dev_id) |
| @@ -164,9 +168,16 @@ request_percpu_irq(unsigned int irq, irq_handler_t handler, | |||
| 164 | devname, percpu_dev_id); | 168 | devname, percpu_dev_id); |
| 165 | } | 169 | } |
| 166 | 170 | ||
| 171 | extern int __must_check | ||
| 172 | request_percpu_nmi(unsigned int irq, irq_handler_t handler, | ||
| 173 | const char *devname, void __percpu *dev); | ||
| 174 | |||
| 167 | extern const void *free_irq(unsigned int, void *); | 175 | extern const void *free_irq(unsigned int, void *); |
| 168 | extern void free_percpu_irq(unsigned int, void __percpu *); | 176 | extern void free_percpu_irq(unsigned int, void __percpu *); |
| 169 | 177 | ||
| 178 | extern const void *free_nmi(unsigned int irq, void *dev_id); | ||
| 179 | extern void free_percpu_nmi(unsigned int irq, void __percpu *percpu_dev_id); | ||
| 180 | |||
| 170 | struct device; | 181 | struct device; |
| 171 | 182 | ||
| 172 | extern int __must_check | 183 | extern int __must_check |
| @@ -217,6 +228,13 @@ extern void enable_percpu_irq(unsigned int irq, unsigned int type); | |||
| 217 | extern bool irq_percpu_is_enabled(unsigned int irq); | 228 | extern bool irq_percpu_is_enabled(unsigned int irq); |
| 218 | extern void irq_wake_thread(unsigned int irq, void *dev_id); | 229 | extern void irq_wake_thread(unsigned int irq, void *dev_id); |
| 219 | 230 | ||
| 231 | extern void disable_nmi_nosync(unsigned int irq); | ||
| 232 | extern void disable_percpu_nmi(unsigned int irq); | ||
| 233 | extern void enable_nmi(unsigned int irq); | ||
| 234 | extern void enable_percpu_nmi(unsigned int irq, unsigned int type); | ||
| 235 | extern int prepare_percpu_nmi(unsigned int irq); | ||
| 236 | extern void teardown_percpu_nmi(unsigned int irq); | ||
| 237 | |||
| 220 | /* The following three functions are for the core kernel use only. */ | 238 | /* The following three functions are for the core kernel use only. */ |
| 221 | extern void suspend_device_irqs(void); | 239 | extern void suspend_device_irqs(void); |
| 222 | extern void resume_device_irqs(void); | 240 | extern void resume_device_irqs(void); |
| @@ -241,20 +259,29 @@ struct irq_affinity_notify { | |||
| 241 | void (*release)(struct kref *ref); | 259 | void (*release)(struct kref *ref); |
| 242 | }; | 260 | }; |
| 243 | 261 | ||
| 262 | #define IRQ_AFFINITY_MAX_SETS 4 | ||
| 263 | |||
| 244 | /** | 264 | /** |
| 245 | * struct irq_affinity - Description for automatic irq affinity assignements | 265 | * struct irq_affinity - Description for automatic irq affinity assignements |
| 246 | * @pre_vectors: Don't apply affinity to @pre_vectors at beginning of | 266 | * @pre_vectors: Don't apply affinity to @pre_vectors at beginning of |
| 247 | * the MSI(-X) vector space | 267 | * the MSI(-X) vector space |
| 248 | * @post_vectors: Don't apply affinity to @post_vectors at end of | 268 | * @post_vectors: Don't apply affinity to @post_vectors at end of |
| 249 | * the MSI(-X) vector space | 269 | * the MSI(-X) vector space |
| 250 | * @nr_sets: Length of passed in *sets array | 270 | * @nr_sets: The number of interrupt sets for which affinity |
| 251 | * @sets: Number of affinitized sets | 271 | * spreading is required |
| 272 | * @set_size: Array holding the size of each interrupt set | ||
| 273 | * @calc_sets: Callback for calculating the number and size | ||
| 274 | * of interrupt sets | ||
| 275 | * @priv: Private data for usage by @calc_sets, usually a | ||
| 276 | * pointer to driver/device specific data. | ||
| 252 | */ | 277 | */ |
| 253 | struct irq_affinity { | 278 | struct irq_affinity { |
| 254 | int pre_vectors; | 279 | unsigned int pre_vectors; |
| 255 | int post_vectors; | 280 | unsigned int post_vectors; |
| 256 | int nr_sets; | 281 | unsigned int nr_sets; |
| 257 | int *sets; | 282 | unsigned int set_size[IRQ_AFFINITY_MAX_SETS]; |
| 283 | void (*calc_sets)(struct irq_affinity *, unsigned int nvecs); | ||
| 284 | void *priv; | ||
| 258 | }; | 285 | }; |
| 259 | 286 | ||
| 260 | /** | 287 | /** |
| @@ -314,9 +341,10 @@ extern int | |||
| 314 | irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify); | 341 | irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify); |
| 315 | 342 | ||
| 316 | struct irq_affinity_desc * | 343 | struct irq_affinity_desc * |
| 317 | irq_create_affinity_masks(int nvec, const struct irq_affinity *affd); | 344 | irq_create_affinity_masks(unsigned int nvec, struct irq_affinity *affd); |
| 318 | 345 | ||
| 319 | int irq_calc_affinity_vectors(int minvec, int maxvec, const struct irq_affinity *affd); | 346 | unsigned int irq_calc_affinity_vectors(unsigned int minvec, unsigned int maxvec, |
| 347 | const struct irq_affinity *affd); | ||
| 320 | 348 | ||
| 321 | #else /* CONFIG_SMP */ | 349 | #else /* CONFIG_SMP */ |
| 322 | 350 | ||
| @@ -350,13 +378,14 @@ irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify) | |||
| 350 | } | 378 | } |
| 351 | 379 | ||
| 352 | static inline struct irq_affinity_desc * | 380 | static inline struct irq_affinity_desc * |
| 353 | irq_create_affinity_masks(int nvec, const struct irq_affinity *affd) | 381 | irq_create_affinity_masks(unsigned int nvec, struct irq_affinity *affd) |
| 354 | { | 382 | { |
| 355 | return NULL; | 383 | return NULL; |
| 356 | } | 384 | } |
| 357 | 385 | ||
| 358 | static inline int | 386 | static inline unsigned int |
| 359 | irq_calc_affinity_vectors(int minvec, int maxvec, const struct irq_affinity *affd) | 387 | irq_calc_affinity_vectors(unsigned int minvec, unsigned int maxvec, |
| 388 | const struct irq_affinity *affd) | ||
| 360 | { | 389 | { |
| 361 | return maxvec; | 390 | return maxvec; |
| 362 | } | 391 | } |
diff --git a/include/linux/irq.h b/include/linux/irq.h index def2b2aac8b1..5e91f6bcaacd 100644 --- a/include/linux/irq.h +++ b/include/linux/irq.h | |||
| @@ -442,6 +442,8 @@ static inline irq_hw_number_t irqd_to_hwirq(struct irq_data *d) | |||
| 442 | * @irq_set_vcpu_affinity: optional to target a vCPU in a virtual machine | 442 | * @irq_set_vcpu_affinity: optional to target a vCPU in a virtual machine |
| 443 | * @ipi_send_single: send a single IPI to destination cpus | 443 | * @ipi_send_single: send a single IPI to destination cpus |
| 444 | * @ipi_send_mask: send an IPI to destination cpus in cpumask | 444 | * @ipi_send_mask: send an IPI to destination cpus in cpumask |
| 445 | * @irq_nmi_setup: function called from core code before enabling an NMI | ||
| 446 | * @irq_nmi_teardown: function called from core code after disabling an NMI | ||
| 445 | * @flags: chip specific flags | 447 | * @flags: chip specific flags |
| 446 | */ | 448 | */ |
| 447 | struct irq_chip { | 449 | struct irq_chip { |
| @@ -490,6 +492,9 @@ struct irq_chip { | |||
| 490 | void (*ipi_send_single)(struct irq_data *data, unsigned int cpu); | 492 | void (*ipi_send_single)(struct irq_data *data, unsigned int cpu); |
| 491 | void (*ipi_send_mask)(struct irq_data *data, const struct cpumask *dest); | 493 | void (*ipi_send_mask)(struct irq_data *data, const struct cpumask *dest); |
| 492 | 494 | ||
| 495 | int (*irq_nmi_setup)(struct irq_data *data); | ||
| 496 | void (*irq_nmi_teardown)(struct irq_data *data); | ||
| 497 | |||
| 493 | unsigned long flags; | 498 | unsigned long flags; |
| 494 | }; | 499 | }; |
| 495 | 500 | ||
| @@ -505,6 +510,7 @@ struct irq_chip { | |||
| 505 | * IRQCHIP_ONESHOT_SAFE: One shot does not require mask/unmask | 510 | * IRQCHIP_ONESHOT_SAFE: One shot does not require mask/unmask |
| 506 | * IRQCHIP_EOI_THREADED: Chip requires eoi() on unmask in threaded mode | 511 | * IRQCHIP_EOI_THREADED: Chip requires eoi() on unmask in threaded mode |
| 507 | * IRQCHIP_SUPPORTS_LEVEL_MSI Chip can provide two doorbells for Level MSIs | 512 | * IRQCHIP_SUPPORTS_LEVEL_MSI Chip can provide two doorbells for Level MSIs |
| 513 | * IRQCHIP_SUPPORTS_NMI: Chip can deliver NMIs, only for root irqchips | ||
| 508 | */ | 514 | */ |
| 509 | enum { | 515 | enum { |
| 510 | IRQCHIP_SET_TYPE_MASKED = (1 << 0), | 516 | IRQCHIP_SET_TYPE_MASKED = (1 << 0), |
| @@ -515,6 +521,7 @@ enum { | |||
| 515 | IRQCHIP_ONESHOT_SAFE = (1 << 5), | 521 | IRQCHIP_ONESHOT_SAFE = (1 << 5), |
| 516 | IRQCHIP_EOI_THREADED = (1 << 6), | 522 | IRQCHIP_EOI_THREADED = (1 << 6), |
| 517 | IRQCHIP_SUPPORTS_LEVEL_MSI = (1 << 7), | 523 | IRQCHIP_SUPPORTS_LEVEL_MSI = (1 << 7), |
| 524 | IRQCHIP_SUPPORTS_NMI = (1 << 8), | ||
| 518 | }; | 525 | }; |
| 519 | 526 | ||
| 520 | #include <linux/irqdesc.h> | 527 | #include <linux/irqdesc.h> |
| @@ -594,6 +601,9 @@ extern void handle_percpu_devid_irq(struct irq_desc *desc); | |||
| 594 | extern void handle_bad_irq(struct irq_desc *desc); | 601 | extern void handle_bad_irq(struct irq_desc *desc); |
| 595 | extern void handle_nested_irq(unsigned int irq); | 602 | extern void handle_nested_irq(unsigned int irq); |
| 596 | 603 | ||
| 604 | extern void handle_fasteoi_nmi(struct irq_desc *desc); | ||
| 605 | extern void handle_percpu_devid_fasteoi_nmi(struct irq_desc *desc); | ||
| 606 | |||
| 597 | extern int irq_chip_compose_msi_msg(struct irq_data *data, struct msi_msg *msg); | 607 | extern int irq_chip_compose_msi_msg(struct irq_data *data, struct msi_msg *msg); |
| 598 | extern int irq_chip_pm_get(struct irq_data *data); | 608 | extern int irq_chip_pm_get(struct irq_data *data); |
| 599 | extern int irq_chip_pm_put(struct irq_data *data); | 609 | extern int irq_chip_pm_put(struct irq_data *data); |
diff --git a/include/linux/irqdesc.h b/include/linux/irqdesc.h index dd1e40ddac7d..d6e2ab538ef2 100644 --- a/include/linux/irqdesc.h +++ b/include/linux/irqdesc.h | |||
| @@ -28,6 +28,7 @@ struct pt_regs; | |||
| 28 | * @core_internal_state__do_not_mess_with_it: core internal status information | 28 | * @core_internal_state__do_not_mess_with_it: core internal status information |
| 29 | * @depth: disable-depth, for nested irq_disable() calls | 29 | * @depth: disable-depth, for nested irq_disable() calls |
| 30 | * @wake_depth: enable depth, for multiple irq_set_irq_wake() callers | 30 | * @wake_depth: enable depth, for multiple irq_set_irq_wake() callers |
| 31 | * @tot_count: stats field for non-percpu irqs | ||
| 31 | * @irq_count: stats field to detect stalled irqs | 32 | * @irq_count: stats field to detect stalled irqs |
| 32 | * @last_unhandled: aging timer for unhandled count | 33 | * @last_unhandled: aging timer for unhandled count |
| 33 | * @irqs_unhandled: stats field for spurious unhandled interrupts | 34 | * @irqs_unhandled: stats field for spurious unhandled interrupts |
| @@ -65,6 +66,7 @@ struct irq_desc { | |||
| 65 | unsigned int core_internal_state__do_not_mess_with_it; | 66 | unsigned int core_internal_state__do_not_mess_with_it; |
| 66 | unsigned int depth; /* nested irq disables */ | 67 | unsigned int depth; /* nested irq disables */ |
| 67 | unsigned int wake_depth; /* nested wake enables */ | 68 | unsigned int wake_depth; /* nested wake enables */ |
| 69 | unsigned int tot_count; | ||
| 68 | unsigned int irq_count; /* For detecting broken IRQs */ | 70 | unsigned int irq_count; /* For detecting broken IRQs */ |
| 69 | unsigned long last_unhandled; /* Aging timer for unhandled count */ | 71 | unsigned long last_unhandled; /* Aging timer for unhandled count */ |
| 70 | unsigned int irqs_unhandled; | 72 | unsigned int irqs_unhandled; |
| @@ -171,6 +173,11 @@ static inline int handle_domain_irq(struct irq_domain *domain, | |||
| 171 | { | 173 | { |
| 172 | return __handle_domain_irq(domain, hwirq, true, regs); | 174 | return __handle_domain_irq(domain, hwirq, true, regs); |
| 173 | } | 175 | } |
| 176 | |||
| 177 | #ifdef CONFIG_IRQ_DOMAIN | ||
| 178 | int handle_domain_nmi(struct irq_domain *domain, unsigned int hwirq, | ||
| 179 | struct pt_regs *regs); | ||
| 180 | #endif | ||
| 174 | #endif | 181 | #endif |
| 175 | 182 | ||
| 176 | /* Test to see if a driver has successfully requested an irq */ | 183 | /* Test to see if a driver has successfully requested an irq */ |
diff --git a/include/linux/irqdomain.h b/include/linux/irqdomain.h index 35965f41d7be..d2130dc7c0e6 100644 --- a/include/linux/irqdomain.h +++ b/include/linux/irqdomain.h | |||
| @@ -265,6 +265,7 @@ extern struct irq_domain *irq_find_matching_fwspec(struct irq_fwspec *fwspec, | |||
| 265 | enum irq_domain_bus_token bus_token); | 265 | enum irq_domain_bus_token bus_token); |
| 266 | extern bool irq_domain_check_msi_remap(void); | 266 | extern bool irq_domain_check_msi_remap(void); |
| 267 | extern void irq_set_default_host(struct irq_domain *host); | 267 | extern void irq_set_default_host(struct irq_domain *host); |
| 268 | extern struct irq_domain *irq_get_default_host(void); | ||
| 268 | extern int irq_domain_alloc_descs(int virq, unsigned int nr_irqs, | 269 | extern int irq_domain_alloc_descs(int virq, unsigned int nr_irqs, |
| 269 | irq_hw_number_t hwirq, int node, | 270 | irq_hw_number_t hwirq, int node, |
| 270 | const struct irq_affinity_desc *affinity); | 271 | const struct irq_affinity_desc *affinity); |
diff --git a/include/linux/kthread.h b/include/linux/kthread.h index c1961761311d..1577a2d56e9d 100644 --- a/include/linux/kthread.h +++ b/include/linux/kthread.h | |||
| @@ -56,6 +56,7 @@ void kthread_bind_mask(struct task_struct *k, const struct cpumask *mask); | |||
| 56 | int kthread_stop(struct task_struct *k); | 56 | int kthread_stop(struct task_struct *k); |
| 57 | bool kthread_should_stop(void); | 57 | bool kthread_should_stop(void); |
| 58 | bool kthread_should_park(void); | 58 | bool kthread_should_park(void); |
| 59 | bool __kthread_should_park(struct task_struct *k); | ||
| 59 | bool kthread_freezable_should_stop(bool *was_frozen); | 60 | bool kthread_freezable_should_stop(bool *was_frozen); |
| 60 | void *kthread_data(struct task_struct *k); | 61 | void *kthread_data(struct task_struct *k); |
| 61 | void *kthread_probe_data(struct task_struct *k); | 62 | void *kthread_probe_data(struct task_struct *k); |
diff --git a/include/linux/pci.h b/include/linux/pci.h index 65f1d8c2f082..e7c51b00cdfe 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h | |||
| @@ -1393,7 +1393,7 @@ static inline int pci_enable_msix_exact(struct pci_dev *dev, | |||
| 1393 | } | 1393 | } |
| 1394 | int pci_alloc_irq_vectors_affinity(struct pci_dev *dev, unsigned int min_vecs, | 1394 | int pci_alloc_irq_vectors_affinity(struct pci_dev *dev, unsigned int min_vecs, |
| 1395 | unsigned int max_vecs, unsigned int flags, | 1395 | unsigned int max_vecs, unsigned int flags, |
| 1396 | const struct irq_affinity *affd); | 1396 | struct irq_affinity *affd); |
| 1397 | 1397 | ||
| 1398 | void pci_free_irq_vectors(struct pci_dev *dev); | 1398 | void pci_free_irq_vectors(struct pci_dev *dev); |
| 1399 | int pci_irq_vector(struct pci_dev *dev, unsigned int nr); | 1399 | int pci_irq_vector(struct pci_dev *dev, unsigned int nr); |
| @@ -1419,7 +1419,7 @@ static inline int pci_enable_msix_exact(struct pci_dev *dev, | |||
| 1419 | static inline int | 1419 | static inline int |
| 1420 | pci_alloc_irq_vectors_affinity(struct pci_dev *dev, unsigned int min_vecs, | 1420 | pci_alloc_irq_vectors_affinity(struct pci_dev *dev, unsigned int min_vecs, |
| 1421 | unsigned int max_vecs, unsigned int flags, | 1421 | unsigned int max_vecs, unsigned int flags, |
| 1422 | const struct irq_affinity *aff_desc) | 1422 | struct irq_affinity *aff_desc) |
| 1423 | { | 1423 | { |
| 1424 | if ((flags & PCI_IRQ_LEGACY) && min_vecs == 1 && dev->irq) | 1424 | if ((flags & PCI_IRQ_LEGACY) && min_vecs == 1 && dev->irq) |
| 1425 | return 1; | 1425 | return 1; |
diff --git a/kernel/irq/affinity.c b/kernel/irq/affinity.c index 45b68b4ea48b..f18cd5aa33e8 100644 --- a/kernel/irq/affinity.c +++ b/kernel/irq/affinity.c | |||
| @@ -9,7 +9,7 @@ | |||
| 9 | #include <linux/cpu.h> | 9 | #include <linux/cpu.h> |
| 10 | 10 | ||
| 11 | static void irq_spread_init_one(struct cpumask *irqmsk, struct cpumask *nmsk, | 11 | static void irq_spread_init_one(struct cpumask *irqmsk, struct cpumask *nmsk, |
| 12 | int cpus_per_vec) | 12 | unsigned int cpus_per_vec) |
| 13 | { | 13 | { |
| 14 | const struct cpumask *siblmsk; | 14 | const struct cpumask *siblmsk; |
| 15 | int cpu, sibl; | 15 | int cpu, sibl; |
| @@ -95,15 +95,17 @@ static int get_nodes_in_cpumask(cpumask_var_t *node_to_cpumask, | |||
| 95 | } | 95 | } |
| 96 | 96 | ||
| 97 | static int __irq_build_affinity_masks(const struct irq_affinity *affd, | 97 | static int __irq_build_affinity_masks(const struct irq_affinity *affd, |
| 98 | int startvec, int numvecs, int firstvec, | 98 | unsigned int startvec, |
| 99 | unsigned int numvecs, | ||
| 100 | unsigned int firstvec, | ||
| 99 | cpumask_var_t *node_to_cpumask, | 101 | cpumask_var_t *node_to_cpumask, |
| 100 | const struct cpumask *cpu_mask, | 102 | const struct cpumask *cpu_mask, |
| 101 | struct cpumask *nmsk, | 103 | struct cpumask *nmsk, |
| 102 | struct irq_affinity_desc *masks) | 104 | struct irq_affinity_desc *masks) |
| 103 | { | 105 | { |
| 104 | int n, nodes, cpus_per_vec, extra_vecs, done = 0; | 106 | unsigned int n, nodes, cpus_per_vec, extra_vecs, done = 0; |
| 105 | int last_affv = firstvec + numvecs; | 107 | unsigned int last_affv = firstvec + numvecs; |
| 106 | int curvec = startvec; | 108 | unsigned int curvec = startvec; |
| 107 | nodemask_t nodemsk = NODE_MASK_NONE; | 109 | nodemask_t nodemsk = NODE_MASK_NONE; |
| 108 | 110 | ||
| 109 | if (!cpumask_weight(cpu_mask)) | 111 | if (!cpumask_weight(cpu_mask)) |
| @@ -117,18 +119,16 @@ static int __irq_build_affinity_masks(const struct irq_affinity *affd, | |||
| 117 | */ | 119 | */ |
| 118 | if (numvecs <= nodes) { | 120 | if (numvecs <= nodes) { |
| 119 | for_each_node_mask(n, nodemsk) { | 121 | for_each_node_mask(n, nodemsk) { |
| 120 | cpumask_or(&masks[curvec].mask, | 122 | cpumask_or(&masks[curvec].mask, &masks[curvec].mask, |
| 121 | &masks[curvec].mask, | 123 | node_to_cpumask[n]); |
| 122 | node_to_cpumask[n]); | ||
| 123 | if (++curvec == last_affv) | 124 | if (++curvec == last_affv) |
| 124 | curvec = firstvec; | 125 | curvec = firstvec; |
| 125 | } | 126 | } |
| 126 | done = numvecs; | 127 | return numvecs; |
| 127 | goto out; | ||
| 128 | } | 128 | } |
| 129 | 129 | ||
| 130 | for_each_node_mask(n, nodemsk) { | 130 | for_each_node_mask(n, nodemsk) { |
| 131 | int ncpus, v, vecs_to_assign, vecs_per_node; | 131 | unsigned int ncpus, v, vecs_to_assign, vecs_per_node; |
| 132 | 132 | ||
| 133 | /* Spread the vectors per node */ | 133 | /* Spread the vectors per node */ |
| 134 | vecs_per_node = (numvecs - (curvec - firstvec)) / nodes; | 134 | vecs_per_node = (numvecs - (curvec - firstvec)) / nodes; |
| @@ -163,8 +163,6 @@ static int __irq_build_affinity_masks(const struct irq_affinity *affd, | |||
| 163 | curvec = firstvec; | 163 | curvec = firstvec; |
| 164 | --nodes; | 164 | --nodes; |
| 165 | } | 165 | } |
| 166 | |||
| 167 | out: | ||
| 168 | return done; | 166 | return done; |
| 169 | } | 167 | } |
| 170 | 168 | ||
| @@ -174,19 +172,24 @@ out: | |||
| 174 | * 2) spread other possible CPUs on these vectors | 172 | * 2) spread other possible CPUs on these vectors |
| 175 | */ | 173 | */ |
| 176 | static int irq_build_affinity_masks(const struct irq_affinity *affd, | 174 | static int irq_build_affinity_masks(const struct irq_affinity *affd, |
| 177 | int startvec, int numvecs, int firstvec, | 175 | unsigned int startvec, unsigned int numvecs, |
| 178 | cpumask_var_t *node_to_cpumask, | 176 | unsigned int firstvec, |
| 179 | struct irq_affinity_desc *masks) | 177 | struct irq_affinity_desc *masks) |
| 180 | { | 178 | { |
| 181 | int curvec = startvec, nr_present, nr_others; | 179 | unsigned int curvec = startvec, nr_present, nr_others; |
| 182 | int ret = -ENOMEM; | 180 | cpumask_var_t *node_to_cpumask; |
| 183 | cpumask_var_t nmsk, npresmsk; | 181 | cpumask_var_t nmsk, npresmsk; |
| 182 | int ret = -ENOMEM; | ||
| 184 | 183 | ||
| 185 | if (!zalloc_cpumask_var(&nmsk, GFP_KERNEL)) | 184 | if (!zalloc_cpumask_var(&nmsk, GFP_KERNEL)) |
| 186 | return ret; | 185 | return ret; |
| 187 | 186 | ||
| 188 | if (!zalloc_cpumask_var(&npresmsk, GFP_KERNEL)) | 187 | if (!zalloc_cpumask_var(&npresmsk, GFP_KERNEL)) |
| 189 | goto fail; | 188 | goto fail_nmsk; |
| 189 | |||
| 190 | node_to_cpumask = alloc_node_to_cpumask(); | ||
| 191 | if (!node_to_cpumask) | ||
| 192 | goto fail_npresmsk; | ||
| 190 | 193 | ||
| 191 | ret = 0; | 194 | ret = 0; |
| 192 | /* Stabilize the cpumasks */ | 195 | /* Stabilize the cpumasks */ |
| @@ -217,13 +220,22 @@ static int irq_build_affinity_masks(const struct irq_affinity *affd, | |||
| 217 | if (nr_present < numvecs) | 220 | if (nr_present < numvecs) |
| 218 | WARN_ON(nr_present + nr_others < numvecs); | 221 | WARN_ON(nr_present + nr_others < numvecs); |
| 219 | 222 | ||
| 223 | free_node_to_cpumask(node_to_cpumask); | ||
| 224 | |||
| 225 | fail_npresmsk: | ||
| 220 | free_cpumask_var(npresmsk); | 226 | free_cpumask_var(npresmsk); |
| 221 | 227 | ||
| 222 | fail: | 228 | fail_nmsk: |
| 223 | free_cpumask_var(nmsk); | 229 | free_cpumask_var(nmsk); |
| 224 | return ret; | 230 | return ret; |
| 225 | } | 231 | } |
| 226 | 232 | ||
| 233 | static void default_calc_sets(struct irq_affinity *affd, unsigned int affvecs) | ||
| 234 | { | ||
| 235 | affd->nr_sets = 1; | ||
| 236 | affd->set_size[0] = affvecs; | ||
| 237 | } | ||
| 238 | |||
| 227 | /** | 239 | /** |
| 228 | * irq_create_affinity_masks - Create affinity masks for multiqueue spreading | 240 | * irq_create_affinity_masks - Create affinity masks for multiqueue spreading |
| 229 | * @nvecs: The total number of vectors | 241 | * @nvecs: The total number of vectors |
| @@ -232,50 +244,62 @@ static int irq_build_affinity_masks(const struct irq_affinity *affd, | |||
| 232 | * Returns the irq_affinity_desc pointer or NULL if allocation failed. | 244 | * Returns the irq_affinity_desc pointer or NULL if allocation failed. |
| 233 | */ | 245 | */ |
| 234 | struct irq_affinity_desc * | 246 | struct irq_affinity_desc * |
| 235 | irq_create_affinity_masks(int nvecs, const struct irq_affinity *affd) | 247 | irq_create_affinity_masks(unsigned int nvecs, struct irq_affinity *affd) |
| 236 | { | 248 | { |
| 237 | int affvecs = nvecs - affd->pre_vectors - affd->post_vectors; | 249 | unsigned int affvecs, curvec, usedvecs, i; |
| 238 | int curvec, usedvecs; | ||
| 239 | cpumask_var_t *node_to_cpumask; | ||
| 240 | struct irq_affinity_desc *masks = NULL; | 250 | struct irq_affinity_desc *masks = NULL; |
| 241 | int i, nr_sets; | ||
| 242 | 251 | ||
| 243 | /* | 252 | /* |
| 244 | * If there aren't any vectors left after applying the pre/post | 253 | * Determine the number of vectors which need interrupt affinities |
| 245 | * vectors don't bother with assigning affinity. | 254 | * assigned. If the pre/post request exhausts the available vectors |
| 255 | * then nothing to do here except for invoking the calc_sets() | ||
| 256 | * callback so the device driver can adjust to the situation. If there | ||
| 257 | * is only a single vector, then managing the queue is pointless as | ||
| 258 | * well. | ||
| 246 | */ | 259 | */ |
| 247 | if (nvecs == affd->pre_vectors + affd->post_vectors) | 260 | if (nvecs > 1 && nvecs > affd->pre_vectors + affd->post_vectors) |
| 261 | affvecs = nvecs - affd->pre_vectors - affd->post_vectors; | ||
| 262 | else | ||
| 263 | affvecs = 0; | ||
| 264 | |||
| 265 | /* | ||
| 266 | * Simple invocations do not provide a calc_sets() callback. Install | ||
| 267 | * the generic one. | ||
| 268 | */ | ||
| 269 | if (!affd->calc_sets) | ||
| 270 | affd->calc_sets = default_calc_sets; | ||
| 271 | |||
| 272 | /* Recalculate the sets */ | ||
| 273 | affd->calc_sets(affd, affvecs); | ||
| 274 | |||
| 275 | if (WARN_ON_ONCE(affd->nr_sets > IRQ_AFFINITY_MAX_SETS)) | ||
| 248 | return NULL; | 276 | return NULL; |
| 249 | 277 | ||
| 250 | node_to_cpumask = alloc_node_to_cpumask(); | 278 | /* Nothing to assign? */ |
| 251 | if (!node_to_cpumask) | 279 | if (!affvecs) |
| 252 | return NULL; | 280 | return NULL; |
| 253 | 281 | ||
| 254 | masks = kcalloc(nvecs, sizeof(*masks), GFP_KERNEL); | 282 | masks = kcalloc(nvecs, sizeof(*masks), GFP_KERNEL); |
| 255 | if (!masks) | 283 | if (!masks) |
| 256 | goto outnodemsk; | 284 | return NULL; |
| 257 | 285 | ||
| 258 | /* Fill out vectors at the beginning that don't need affinity */ | 286 | /* Fill out vectors at the beginning that don't need affinity */ |
| 259 | for (curvec = 0; curvec < affd->pre_vectors; curvec++) | 287 | for (curvec = 0; curvec < affd->pre_vectors; curvec++) |
| 260 | cpumask_copy(&masks[curvec].mask, irq_default_affinity); | 288 | cpumask_copy(&masks[curvec].mask, irq_default_affinity); |
| 289 | |||
| 261 | /* | 290 | /* |
| 262 | * Spread on present CPUs starting from affd->pre_vectors. If we | 291 | * Spread on present CPUs starting from affd->pre_vectors. If we |
| 263 | * have multiple sets, build each sets affinity mask separately. | 292 | * have multiple sets, build each sets affinity mask separately. |
| 264 | */ | 293 | */ |
| 265 | nr_sets = affd->nr_sets; | 294 | for (i = 0, usedvecs = 0; i < affd->nr_sets; i++) { |
| 266 | if (!nr_sets) | 295 | unsigned int this_vecs = affd->set_size[i]; |
| 267 | nr_sets = 1; | ||
| 268 | |||
| 269 | for (i = 0, usedvecs = 0; i < nr_sets; i++) { | ||
| 270 | int this_vecs = affd->sets ? affd->sets[i] : affvecs; | ||
| 271 | int ret; | 296 | int ret; |
| 272 | 297 | ||
| 273 | ret = irq_build_affinity_masks(affd, curvec, this_vecs, | 298 | ret = irq_build_affinity_masks(affd, curvec, this_vecs, |
| 274 | curvec, node_to_cpumask, masks); | 299 | curvec, masks); |
| 275 | if (ret) { | 300 | if (ret) { |
| 276 | kfree(masks); | 301 | kfree(masks); |
| 277 | masks = NULL; | 302 | return NULL; |
| 278 | goto outnodemsk; | ||
| 279 | } | 303 | } |
| 280 | curvec += this_vecs; | 304 | curvec += this_vecs; |
| 281 | usedvecs += this_vecs; | 305 | usedvecs += this_vecs; |
| @@ -293,8 +317,6 @@ irq_create_affinity_masks(int nvecs, const struct irq_affinity *affd) | |||
| 293 | for (i = affd->pre_vectors; i < nvecs - affd->post_vectors; i++) | 317 | for (i = affd->pre_vectors; i < nvecs - affd->post_vectors; i++) |
| 294 | masks[i].is_managed = 1; | 318 | masks[i].is_managed = 1; |
| 295 | 319 | ||
| 296 | outnodemsk: | ||
| 297 | free_node_to_cpumask(node_to_cpumask); | ||
| 298 | return masks; | 320 | return masks; |
| 299 | } | 321 | } |
| 300 | 322 | ||
| @@ -304,25 +326,22 @@ outnodemsk: | |||
| 304 | * @maxvec: The maximum number of vectors available | 326 | * @maxvec: The maximum number of vectors available |
| 305 | * @affd: Description of the affinity requirements | 327 | * @affd: Description of the affinity requirements |
| 306 | */ | 328 | */ |
| 307 | int irq_calc_affinity_vectors(int minvec, int maxvec, const struct irq_affinity *affd) | 329 | unsigned int irq_calc_affinity_vectors(unsigned int minvec, unsigned int maxvec, |
| 330 | const struct irq_affinity *affd) | ||
| 308 | { | 331 | { |
| 309 | int resv = affd->pre_vectors + affd->post_vectors; | 332 | unsigned int resv = affd->pre_vectors + affd->post_vectors; |
| 310 | int vecs = maxvec - resv; | 333 | unsigned int set_vecs; |
| 311 | int set_vecs; | ||
| 312 | 334 | ||
| 313 | if (resv > minvec) | 335 | if (resv > minvec) |
| 314 | return 0; | 336 | return 0; |
| 315 | 337 | ||
| 316 | if (affd->nr_sets) { | 338 | if (affd->calc_sets) { |
| 317 | int i; | 339 | set_vecs = maxvec - resv; |
| 318 | |||
| 319 | for (i = 0, set_vecs = 0; i < affd->nr_sets; i++) | ||
| 320 | set_vecs += affd->sets[i]; | ||
| 321 | } else { | 340 | } else { |
| 322 | get_online_cpus(); | 341 | get_online_cpus(); |
| 323 | set_vecs = cpumask_weight(cpu_possible_mask); | 342 | set_vecs = cpumask_weight(cpu_possible_mask); |
| 324 | put_online_cpus(); | 343 | put_online_cpus(); |
| 325 | } | 344 | } |
| 326 | 345 | ||
| 327 | return resv + min(set_vecs, vecs); | 346 | return resv + min(set_vecs, maxvec - resv); |
| 328 | } | 347 | } |
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c index 34e969069488..99b7dd6982a4 100644 --- a/kernel/irq/chip.c +++ b/kernel/irq/chip.c | |||
| @@ -730,6 +730,37 @@ out: | |||
| 730 | EXPORT_SYMBOL_GPL(handle_fasteoi_irq); | 730 | EXPORT_SYMBOL_GPL(handle_fasteoi_irq); |
| 731 | 731 | ||
| 732 | /** | 732 | /** |
| 733 | * handle_fasteoi_nmi - irq handler for NMI interrupt lines | ||
| 734 | * @desc: the interrupt description structure for this irq | ||
| 735 | * | ||
| 736 | * A simple NMI-safe handler, considering the restrictions | ||
| 737 | * from request_nmi. | ||
| 738 | * | ||
| 739 | * Only a single callback will be issued to the chip: an ->eoi() | ||
| 740 | * call when the interrupt has been serviced. This enables support | ||
| 741 | * for modern forms of interrupt handlers, which handle the flow | ||
| 742 | * details in hardware, transparently. | ||
| 743 | */ | ||
| 744 | void handle_fasteoi_nmi(struct irq_desc *desc) | ||
| 745 | { | ||
| 746 | struct irq_chip *chip = irq_desc_get_chip(desc); | ||
| 747 | struct irqaction *action = desc->action; | ||
| 748 | unsigned int irq = irq_desc_get_irq(desc); | ||
| 749 | irqreturn_t res; | ||
| 750 | |||
| 751 | trace_irq_handler_entry(irq, action); | ||
| 752 | /* | ||
| 753 | * NMIs cannot be shared, there is only one action. | ||
| 754 | */ | ||
| 755 | res = action->handler(irq, action->dev_id); | ||
| 756 | trace_irq_handler_exit(irq, action, res); | ||
| 757 | |||
| 758 | if (chip->irq_eoi) | ||
| 759 | chip->irq_eoi(&desc->irq_data); | ||
| 760 | } | ||
| 761 | EXPORT_SYMBOL_GPL(handle_fasteoi_nmi); | ||
| 762 | |||
| 763 | /** | ||
| 733 | * handle_edge_irq - edge type IRQ handler | 764 | * handle_edge_irq - edge type IRQ handler |
| 734 | * @desc: the interrupt description structure for this irq | 765 | * @desc: the interrupt description structure for this irq |
| 735 | * | 766 | * |
| @@ -855,7 +886,11 @@ void handle_percpu_irq(struct irq_desc *desc) | |||
| 855 | { | 886 | { |
| 856 | struct irq_chip *chip = irq_desc_get_chip(desc); | 887 | struct irq_chip *chip = irq_desc_get_chip(desc); |
| 857 | 888 | ||
| 858 | kstat_incr_irqs_this_cpu(desc); | 889 | /* |
| 890 | * PER CPU interrupts are not serialized. Do not touch | ||
| 891 | * desc->tot_count. | ||
| 892 | */ | ||
| 893 | __kstat_incr_irqs_this_cpu(desc); | ||
| 859 | 894 | ||
| 860 | if (chip->irq_ack) | 895 | if (chip->irq_ack) |
| 861 | chip->irq_ack(&desc->irq_data); | 896 | chip->irq_ack(&desc->irq_data); |
| @@ -884,7 +919,11 @@ void handle_percpu_devid_irq(struct irq_desc *desc) | |||
| 884 | unsigned int irq = irq_desc_get_irq(desc); | 919 | unsigned int irq = irq_desc_get_irq(desc); |
| 885 | irqreturn_t res; | 920 | irqreturn_t res; |
| 886 | 921 | ||
| 887 | kstat_incr_irqs_this_cpu(desc); | 922 | /* |
| 923 | * PER CPU interrupts are not serialized. Do not touch | ||
| 924 | * desc->tot_count. | ||
| 925 | */ | ||
| 926 | __kstat_incr_irqs_this_cpu(desc); | ||
| 888 | 927 | ||
| 889 | if (chip->irq_ack) | 928 | if (chip->irq_ack) |
| 890 | chip->irq_ack(&desc->irq_data); | 929 | chip->irq_ack(&desc->irq_data); |
| @@ -908,6 +947,29 @@ void handle_percpu_devid_irq(struct irq_desc *desc) | |||
| 908 | chip->irq_eoi(&desc->irq_data); | 947 | chip->irq_eoi(&desc->irq_data); |
| 909 | } | 948 | } |
| 910 | 949 | ||
| 950 | /** | ||
| 951 | * handle_percpu_devid_fasteoi_nmi - Per CPU local NMI handler with per cpu | ||
| 952 | * dev ids | ||
| 953 | * @desc: the interrupt description structure for this irq | ||
| 954 | * | ||
| 955 | * Similar to handle_fasteoi_nmi, but handling the dev_id cookie | ||
| 956 | * as a percpu pointer. | ||
| 957 | */ | ||
| 958 | void handle_percpu_devid_fasteoi_nmi(struct irq_desc *desc) | ||
| 959 | { | ||
| 960 | struct irq_chip *chip = irq_desc_get_chip(desc); | ||
| 961 | struct irqaction *action = desc->action; | ||
| 962 | unsigned int irq = irq_desc_get_irq(desc); | ||
| 963 | irqreturn_t res; | ||
| 964 | |||
| 965 | trace_irq_handler_entry(irq, action); | ||
| 966 | res = action->handler(irq, raw_cpu_ptr(action->percpu_dev_id)); | ||
| 967 | trace_irq_handler_exit(irq, action, res); | ||
| 968 | |||
| 969 | if (chip->irq_eoi) | ||
| 970 | chip->irq_eoi(&desc->irq_data); | ||
| 971 | } | ||
| 972 | |||
| 911 | static void | 973 | static void |
| 912 | __irq_do_set_handler(struct irq_desc *desc, irq_flow_handler_t handle, | 974 | __irq_do_set_handler(struct irq_desc *desc, irq_flow_handler_t handle, |
| 913 | int is_chained, const char *name) | 975 | int is_chained, const char *name) |
diff --git a/kernel/irq/debugfs.c b/kernel/irq/debugfs.c index 6f636136cccc..516c00a5e867 100644 --- a/kernel/irq/debugfs.c +++ b/kernel/irq/debugfs.c | |||
| @@ -56,6 +56,7 @@ static const struct irq_bit_descr irqchip_flags[] = { | |||
| 56 | BIT_MASK_DESCR(IRQCHIP_ONESHOT_SAFE), | 56 | BIT_MASK_DESCR(IRQCHIP_ONESHOT_SAFE), |
| 57 | BIT_MASK_DESCR(IRQCHIP_EOI_THREADED), | 57 | BIT_MASK_DESCR(IRQCHIP_EOI_THREADED), |
| 58 | BIT_MASK_DESCR(IRQCHIP_SUPPORTS_LEVEL_MSI), | 58 | BIT_MASK_DESCR(IRQCHIP_SUPPORTS_LEVEL_MSI), |
| 59 | BIT_MASK_DESCR(IRQCHIP_SUPPORTS_NMI), | ||
| 59 | }; | 60 | }; |
| 60 | 61 | ||
| 61 | static void | 62 | static void |
| @@ -140,6 +141,7 @@ static const struct irq_bit_descr irqdesc_istates[] = { | |||
| 140 | BIT_MASK_DESCR(IRQS_WAITING), | 141 | BIT_MASK_DESCR(IRQS_WAITING), |
| 141 | BIT_MASK_DESCR(IRQS_PENDING), | 142 | BIT_MASK_DESCR(IRQS_PENDING), |
| 142 | BIT_MASK_DESCR(IRQS_SUSPENDED), | 143 | BIT_MASK_DESCR(IRQS_SUSPENDED), |
| 144 | BIT_MASK_DESCR(IRQS_NMI), | ||
| 143 | }; | 145 | }; |
| 144 | 146 | ||
| 145 | 147 | ||
| @@ -203,8 +205,8 @@ static ssize_t irq_debug_write(struct file *file, const char __user *user_buf, | |||
| 203 | chip_bus_lock(desc); | 205 | chip_bus_lock(desc); |
| 204 | raw_spin_lock_irqsave(&desc->lock, flags); | 206 | raw_spin_lock_irqsave(&desc->lock, flags); |
| 205 | 207 | ||
| 206 | if (irq_settings_is_level(desc)) { | 208 | if (irq_settings_is_level(desc) || desc->istate & IRQS_NMI) { |
| 207 | /* Can't do level, sorry */ | 209 | /* Can't do level nor NMIs, sorry */ |
| 208 | err = -EINVAL; | 210 | err = -EINVAL; |
| 209 | } else { | 211 | } else { |
| 210 | desc->istate |= IRQS_PENDING; | 212 | desc->istate |= IRQS_PENDING; |
| @@ -256,8 +258,6 @@ static int __init irq_debugfs_init(void) | |||
| 256 | int irq; | 258 | int irq; |
| 257 | 259 | ||
| 258 | root_dir = debugfs_create_dir("irq", NULL); | 260 | root_dir = debugfs_create_dir("irq", NULL); |
| 259 | if (!root_dir) | ||
| 260 | return -ENOMEM; | ||
| 261 | 261 | ||
| 262 | irq_domain_debugfs_init(root_dir); | 262 | irq_domain_debugfs_init(root_dir); |
| 263 | 263 | ||
diff --git a/kernel/irq/handle.c b/kernel/irq/handle.c index 38554bc35375..6df5ddfdb0f8 100644 --- a/kernel/irq/handle.c +++ b/kernel/irq/handle.c | |||
| @@ -166,7 +166,7 @@ irqreturn_t __handle_irq_event_percpu(struct irq_desc *desc, unsigned int *flags | |||
| 166 | 166 | ||
| 167 | __irq_wake_thread(desc, action); | 167 | __irq_wake_thread(desc, action); |
| 168 | 168 | ||
| 169 | /* Fall through to add to randomness */ | 169 | /* Fall through - to add to randomness */ |
| 170 | case IRQ_HANDLED: | 170 | case IRQ_HANDLED: |
| 171 | *flags |= action->flags; | 171 | *flags |= action->flags; |
| 172 | break; | 172 | break; |
diff --git a/kernel/irq/internals.h b/kernel/irq/internals.h index ca6afa267070..70c3053bc1f6 100644 --- a/kernel/irq/internals.h +++ b/kernel/irq/internals.h | |||
| @@ -49,6 +49,7 @@ enum { | |||
| 49 | * IRQS_WAITING - irq is waiting | 49 | * IRQS_WAITING - irq is waiting |
| 50 | * IRQS_PENDING - irq is pending and replayed later | 50 | * IRQS_PENDING - irq is pending and replayed later |
| 51 | * IRQS_SUSPENDED - irq is suspended | 51 | * IRQS_SUSPENDED - irq is suspended |
| 52 | * IRQS_NMI - irq line is used to deliver NMIs | ||
| 52 | */ | 53 | */ |
| 53 | enum { | 54 | enum { |
| 54 | IRQS_AUTODETECT = 0x00000001, | 55 | IRQS_AUTODETECT = 0x00000001, |
| @@ -60,6 +61,7 @@ enum { | |||
| 60 | IRQS_PENDING = 0x00000200, | 61 | IRQS_PENDING = 0x00000200, |
| 61 | IRQS_SUSPENDED = 0x00000800, | 62 | IRQS_SUSPENDED = 0x00000800, |
| 62 | IRQS_TIMINGS = 0x00001000, | 63 | IRQS_TIMINGS = 0x00001000, |
| 64 | IRQS_NMI = 0x00002000, | ||
| 63 | }; | 65 | }; |
| 64 | 66 | ||
| 65 | #include "debug.h" | 67 | #include "debug.h" |
| @@ -242,12 +244,18 @@ static inline void irq_state_set_masked(struct irq_desc *desc) | |||
| 242 | 244 | ||
| 243 | #undef __irqd_to_state | 245 | #undef __irqd_to_state |
| 244 | 246 | ||
| 245 | static inline void kstat_incr_irqs_this_cpu(struct irq_desc *desc) | 247 | static inline void __kstat_incr_irqs_this_cpu(struct irq_desc *desc) |
| 246 | { | 248 | { |
| 247 | __this_cpu_inc(*desc->kstat_irqs); | 249 | __this_cpu_inc(*desc->kstat_irqs); |
| 248 | __this_cpu_inc(kstat.irqs_sum); | 250 | __this_cpu_inc(kstat.irqs_sum); |
| 249 | } | 251 | } |
| 250 | 252 | ||
| 253 | static inline void kstat_incr_irqs_this_cpu(struct irq_desc *desc) | ||
| 254 | { | ||
| 255 | __kstat_incr_irqs_this_cpu(desc); | ||
| 256 | desc->tot_count++; | ||
| 257 | } | ||
| 258 | |||
| 251 | static inline int irq_desc_get_node(struct irq_desc *desc) | 259 | static inline int irq_desc_get_node(struct irq_desc *desc) |
| 252 | { | 260 | { |
| 253 | return irq_common_data_get_node(&desc->irq_common_data); | 261 | return irq_common_data_get_node(&desc->irq_common_data); |
diff --git a/kernel/irq/irqdesc.c b/kernel/irq/irqdesc.c index ef8ad36cadcf..13539e12cd80 100644 --- a/kernel/irq/irqdesc.c +++ b/kernel/irq/irqdesc.c | |||
| @@ -119,6 +119,7 @@ static void desc_set_defaults(unsigned int irq, struct irq_desc *desc, int node, | |||
| 119 | desc->depth = 1; | 119 | desc->depth = 1; |
| 120 | desc->irq_count = 0; | 120 | desc->irq_count = 0; |
| 121 | desc->irqs_unhandled = 0; | 121 | desc->irqs_unhandled = 0; |
| 122 | desc->tot_count = 0; | ||
| 122 | desc->name = NULL; | 123 | desc->name = NULL; |
| 123 | desc->owner = owner; | 124 | desc->owner = owner; |
| 124 | for_each_possible_cpu(cpu) | 125 | for_each_possible_cpu(cpu) |
| @@ -669,6 +670,41 @@ int __handle_domain_irq(struct irq_domain *domain, unsigned int hwirq, | |||
| 669 | set_irq_regs(old_regs); | 670 | set_irq_regs(old_regs); |
| 670 | return ret; | 671 | return ret; |
| 671 | } | 672 | } |
| 673 | |||
| 674 | #ifdef CONFIG_IRQ_DOMAIN | ||
| 675 | /** | ||
| 676 | * handle_domain_nmi - Invoke the handler for a HW irq belonging to a domain | ||
| 677 | * @domain: The domain where to perform the lookup | ||
| 678 | * @hwirq: The HW irq number to convert to a logical one | ||
| 679 | * @regs: Register file coming from the low-level handling code | ||
| 680 | * | ||
| 681 | * Returns: 0 on success, or -EINVAL if conversion has failed | ||
| 682 | */ | ||
| 683 | int handle_domain_nmi(struct irq_domain *domain, unsigned int hwirq, | ||
| 684 | struct pt_regs *regs) | ||
| 685 | { | ||
| 686 | struct pt_regs *old_regs = set_irq_regs(regs); | ||
| 687 | unsigned int irq; | ||
| 688 | int ret = 0; | ||
| 689 | |||
| 690 | nmi_enter(); | ||
| 691 | |||
| 692 | irq = irq_find_mapping(domain, hwirq); | ||
| 693 | |||
| 694 | /* | ||
| 695 | * ack_bad_irq is not NMI-safe, just report | ||
| 696 | * an invalid interrupt. | ||
| 697 | */ | ||
| 698 | if (likely(irq)) | ||
| 699 | generic_handle_irq(irq); | ||
| 700 | else | ||
| 701 | ret = -EINVAL; | ||
| 702 | |||
| 703 | nmi_exit(); | ||
| 704 | set_irq_regs(old_regs); | ||
| 705 | return ret; | ||
| 706 | } | ||
| 707 | #endif | ||
| 672 | #endif | 708 | #endif |
| 673 | 709 | ||
| 674 | /* Dynamic interrupt handling */ | 710 | /* Dynamic interrupt handling */ |
| @@ -919,11 +955,15 @@ unsigned int kstat_irqs_cpu(unsigned int irq, int cpu) | |||
| 919 | unsigned int kstat_irqs(unsigned int irq) | 955 | unsigned int kstat_irqs(unsigned int irq) |
| 920 | { | 956 | { |
| 921 | struct irq_desc *desc = irq_to_desc(irq); | 957 | struct irq_desc *desc = irq_to_desc(irq); |
| 922 | int cpu; | ||
| 923 | unsigned int sum = 0; | 958 | unsigned int sum = 0; |
| 959 | int cpu; | ||
| 924 | 960 | ||
| 925 | if (!desc || !desc->kstat_irqs) | 961 | if (!desc || !desc->kstat_irqs) |
| 926 | return 0; | 962 | return 0; |
| 963 | if (!irq_settings_is_per_cpu_devid(desc) && | ||
| 964 | !irq_settings_is_per_cpu(desc)) | ||
| 965 | return desc->tot_count; | ||
| 966 | |||
| 927 | for_each_possible_cpu(cpu) | 967 | for_each_possible_cpu(cpu) |
| 928 | sum += *per_cpu_ptr(desc->kstat_irqs, cpu); | 968 | sum += *per_cpu_ptr(desc->kstat_irqs, cpu); |
| 929 | return sum; | 969 | return sum; |
diff --git a/kernel/irq/irqdomain.c b/kernel/irq/irqdomain.c index 8b0be4bd6565..3bf9793d8825 100644 --- a/kernel/irq/irqdomain.c +++ b/kernel/irq/irqdomain.c | |||
| @@ -458,6 +458,20 @@ void irq_set_default_host(struct irq_domain *domain) | |||
| 458 | } | 458 | } |
| 459 | EXPORT_SYMBOL_GPL(irq_set_default_host); | 459 | EXPORT_SYMBOL_GPL(irq_set_default_host); |
| 460 | 460 | ||
| 461 | /** | ||
| 462 | * irq_get_default_host() - Retrieve the "default" irq domain | ||
| 463 | * | ||
| 464 | * Returns: the default domain, if any. | ||
| 465 | * | ||
| 466 | * Modern code should never use this. This should only be used on | ||
| 467 | * systems that cannot implement a firmware->fwnode mapping (which | ||
| 468 | * both DT and ACPI provide). | ||
| 469 | */ | ||
| 470 | struct irq_domain *irq_get_default_host(void) | ||
| 471 | { | ||
| 472 | return irq_default_domain; | ||
| 473 | } | ||
| 474 | |||
| 461 | static void irq_domain_clear_mapping(struct irq_domain *domain, | 475 | static void irq_domain_clear_mapping(struct irq_domain *domain, |
| 462 | irq_hw_number_t hwirq) | 476 | irq_hw_number_t hwirq) |
| 463 | { | 477 | { |
| @@ -1749,8 +1763,6 @@ void __init irq_domain_debugfs_init(struct dentry *root) | |||
| 1749 | struct irq_domain *d; | 1763 | struct irq_domain *d; |
| 1750 | 1764 | ||
| 1751 | domain_dir = debugfs_create_dir("domains", root); | 1765 | domain_dir = debugfs_create_dir("domains", root); |
| 1752 | if (!domain_dir) | ||
| 1753 | return; | ||
| 1754 | 1766 | ||
| 1755 | debugfs_create_file("default", 0444, domain_dir, NULL, | 1767 | debugfs_create_file("default", 0444, domain_dir, NULL, |
| 1756 | &irq_domain_debug_fops); | 1768 | &irq_domain_debug_fops); |
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index 84b54a17b95d..9ec34a2a6638 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c | |||
| @@ -341,7 +341,7 @@ irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify) | |||
| 341 | /* The release function is promised process context */ | 341 | /* The release function is promised process context */ |
| 342 | might_sleep(); | 342 | might_sleep(); |
| 343 | 343 | ||
| 344 | if (!desc) | 344 | if (!desc || desc->istate & IRQS_NMI) |
| 345 | return -EINVAL; | 345 | return -EINVAL; |
| 346 | 346 | ||
| 347 | /* Complete initialisation of *notify */ | 347 | /* Complete initialisation of *notify */ |
| @@ -553,6 +553,21 @@ bool disable_hardirq(unsigned int irq) | |||
| 553 | } | 553 | } |
| 554 | EXPORT_SYMBOL_GPL(disable_hardirq); | 554 | EXPORT_SYMBOL_GPL(disable_hardirq); |
| 555 | 555 | ||
| 556 | /** | ||
| 557 | * disable_nmi_nosync - disable an nmi without waiting | ||
| 558 | * @irq: Interrupt to disable | ||
| 559 | * | ||
| 560 | * Disable the selected interrupt line. Disables and enables are | ||
| 561 | * nested. | ||
| 562 | * The interrupt to disable must have been requested through request_nmi. | ||
| 563 | * Unlike disable_nmi(), this function does not ensure existing | ||
| 564 | * instances of the IRQ handler have completed before returning. | ||
| 565 | */ | ||
| 566 | void disable_nmi_nosync(unsigned int irq) | ||
| 567 | { | ||
| 568 | disable_irq_nosync(irq); | ||
| 569 | } | ||
| 570 | |||
| 556 | void __enable_irq(struct irq_desc *desc) | 571 | void __enable_irq(struct irq_desc *desc) |
| 557 | { | 572 | { |
| 558 | switch (desc->depth) { | 573 | switch (desc->depth) { |
| @@ -609,6 +624,20 @@ out: | |||
| 609 | } | 624 | } |
| 610 | EXPORT_SYMBOL(enable_irq); | 625 | EXPORT_SYMBOL(enable_irq); |
| 611 | 626 | ||
| 627 | /** | ||
| 628 | * enable_nmi - enable handling of an nmi | ||
| 629 | * @irq: Interrupt to enable | ||
| 630 | * | ||
| 631 | * The interrupt to enable must have been requested through request_nmi. | ||
| 632 | * Undoes the effect of one call to disable_nmi(). If this | ||
| 633 | * matches the last disable, processing of interrupts on this | ||
| 634 | * IRQ line is re-enabled. | ||
| 635 | */ | ||
| 636 | void enable_nmi(unsigned int irq) | ||
| 637 | { | ||
| 638 | enable_irq(irq); | ||
| 639 | } | ||
| 640 | |||
| 612 | static int set_irq_wake_real(unsigned int irq, unsigned int on) | 641 | static int set_irq_wake_real(unsigned int irq, unsigned int on) |
| 613 | { | 642 | { |
| 614 | struct irq_desc *desc = irq_to_desc(irq); | 643 | struct irq_desc *desc = irq_to_desc(irq); |
| @@ -644,6 +673,12 @@ int irq_set_irq_wake(unsigned int irq, unsigned int on) | |||
| 644 | if (!desc) | 673 | if (!desc) |
| 645 | return -EINVAL; | 674 | return -EINVAL; |
| 646 | 675 | ||
| 676 | /* Don't use NMIs as wake up interrupts please */ | ||
| 677 | if (desc->istate & IRQS_NMI) { | ||
| 678 | ret = -EINVAL; | ||
| 679 | goto out_unlock; | ||
| 680 | } | ||
| 681 | |||
| 647 | /* wakeup-capable irqs can be shared between drivers that | 682 | /* wakeup-capable irqs can be shared between drivers that |
| 648 | * don't need to have the same sleep mode behaviors. | 683 | * don't need to have the same sleep mode behaviors. |
| 649 | */ | 684 | */ |
| @@ -666,6 +701,8 @@ int irq_set_irq_wake(unsigned int irq, unsigned int on) | |||
| 666 | irqd_clear(&desc->irq_data, IRQD_WAKEUP_STATE); | 701 | irqd_clear(&desc->irq_data, IRQD_WAKEUP_STATE); |
| 667 | } | 702 | } |
| 668 | } | 703 | } |
| 704 | |||
| 705 | out_unlock: | ||
| 669 | irq_put_desc_busunlock(desc, flags); | 706 | irq_put_desc_busunlock(desc, flags); |
| 670 | return ret; | 707 | return ret; |
| 671 | } | 708 | } |
| @@ -726,6 +763,7 @@ int __irq_set_trigger(struct irq_desc *desc, unsigned long flags) | |||
| 726 | case IRQ_SET_MASK_OK_DONE: | 763 | case IRQ_SET_MASK_OK_DONE: |
| 727 | irqd_clear(&desc->irq_data, IRQD_TRIGGER_MASK); | 764 | irqd_clear(&desc->irq_data, IRQD_TRIGGER_MASK); |
| 728 | irqd_set(&desc->irq_data, flags); | 765 | irqd_set(&desc->irq_data, flags); |
| 766 | /* fall through */ | ||
| 729 | 767 | ||
| 730 | case IRQ_SET_MASK_OK_NOCOPY: | 768 | case IRQ_SET_MASK_OK_NOCOPY: |
| 731 | flags = irqd_get_trigger_type(&desc->irq_data); | 769 | flags = irqd_get_trigger_type(&desc->irq_data); |
| @@ -1128,6 +1166,39 @@ static void irq_release_resources(struct irq_desc *desc) | |||
| 1128 | c->irq_release_resources(d); | 1166 | c->irq_release_resources(d); |
| 1129 | } | 1167 | } |
| 1130 | 1168 | ||
| 1169 | static bool irq_supports_nmi(struct irq_desc *desc) | ||
| 1170 | { | ||
| 1171 | struct irq_data *d = irq_desc_get_irq_data(desc); | ||
| 1172 | |||
| 1173 | #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY | ||
| 1174 | /* Only IRQs directly managed by the root irqchip can be set as NMI */ | ||
| 1175 | if (d->parent_data) | ||
| 1176 | return false; | ||
| 1177 | #endif | ||
| 1178 | /* Don't support NMIs for chips behind a slow bus */ | ||
| 1179 | if (d->chip->irq_bus_lock || d->chip->irq_bus_sync_unlock) | ||
| 1180 | return false; | ||
| 1181 | |||
| 1182 | return d->chip->flags & IRQCHIP_SUPPORTS_NMI; | ||
| 1183 | } | ||
| 1184 | |||
| 1185 | static int irq_nmi_setup(struct irq_desc *desc) | ||
| 1186 | { | ||
| 1187 | struct irq_data *d = irq_desc_get_irq_data(desc); | ||
| 1188 | struct irq_chip *c = d->chip; | ||
| 1189 | |||
| 1190 | return c->irq_nmi_setup ? c->irq_nmi_setup(d) : -EINVAL; | ||
| 1191 | } | ||
| 1192 | |||
| 1193 | static void irq_nmi_teardown(struct irq_desc *desc) | ||
| 1194 | { | ||
| 1195 | struct irq_data *d = irq_desc_get_irq_data(desc); | ||
| 1196 | struct irq_chip *c = d->chip; | ||
| 1197 | |||
| 1198 | if (c->irq_nmi_teardown) | ||
| 1199 | c->irq_nmi_teardown(d); | ||
| 1200 | } | ||
| 1201 | |||
| 1131 | static int | 1202 | static int |
| 1132 | setup_irq_thread(struct irqaction *new, unsigned int irq, bool secondary) | 1203 | setup_irq_thread(struct irqaction *new, unsigned int irq, bool secondary) |
| 1133 | { | 1204 | { |
| @@ -1302,9 +1373,17 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new) | |||
| 1302 | * fields must have IRQF_SHARED set and the bits which | 1373 | * fields must have IRQF_SHARED set and the bits which |
| 1303 | * set the trigger type must match. Also all must | 1374 | * set the trigger type must match. Also all must |
| 1304 | * agree on ONESHOT. | 1375 | * agree on ONESHOT. |
| 1376 | * Interrupt lines used for NMIs cannot be shared. | ||
| 1305 | */ | 1377 | */ |
| 1306 | unsigned int oldtype; | 1378 | unsigned int oldtype; |
| 1307 | 1379 | ||
| 1380 | if (desc->istate & IRQS_NMI) { | ||
| 1381 | pr_err("Invalid attempt to share NMI for %s (irq %d) on irqchip %s.\n", | ||
| 1382 | new->name, irq, desc->irq_data.chip->name); | ||
| 1383 | ret = -EINVAL; | ||
| 1384 | goto out_unlock; | ||
| 1385 | } | ||
| 1386 | |||
| 1308 | /* | 1387 | /* |
| 1309 | * If nobody did set the configuration before, inherit | 1388 | * If nobody did set the configuration before, inherit |
| 1310 | * the one provided by the requester. | 1389 | * the one provided by the requester. |
| @@ -1756,6 +1835,59 @@ const void *free_irq(unsigned int irq, void *dev_id) | |||
| 1756 | } | 1835 | } |
| 1757 | EXPORT_SYMBOL(free_irq); | 1836 | EXPORT_SYMBOL(free_irq); |
| 1758 | 1837 | ||
| 1838 | /* This function must be called with desc->lock held */ | ||
| 1839 | static const void *__cleanup_nmi(unsigned int irq, struct irq_desc *desc) | ||
| 1840 | { | ||
| 1841 | const char *devname = NULL; | ||
| 1842 | |||
| 1843 | desc->istate &= ~IRQS_NMI; | ||
| 1844 | |||
| 1845 | if (!WARN_ON(desc->action == NULL)) { | ||
| 1846 | irq_pm_remove_action(desc, desc->action); | ||
| 1847 | devname = desc->action->name; | ||
| 1848 | unregister_handler_proc(irq, desc->action); | ||
| 1849 | |||
| 1850 | kfree(desc->action); | ||
| 1851 | desc->action = NULL; | ||
| 1852 | } | ||
| 1853 | |||
| 1854 | irq_settings_clr_disable_unlazy(desc); | ||
| 1855 | irq_shutdown(desc); | ||
| 1856 | |||
| 1857 | irq_release_resources(desc); | ||
| 1858 | |||
| 1859 | irq_chip_pm_put(&desc->irq_data); | ||
| 1860 | module_put(desc->owner); | ||
| 1861 | |||
| 1862 | return devname; | ||
| 1863 | } | ||
| 1864 | |||
| 1865 | const void *free_nmi(unsigned int irq, void *dev_id) | ||
| 1866 | { | ||
| 1867 | struct irq_desc *desc = irq_to_desc(irq); | ||
| 1868 | unsigned long flags; | ||
| 1869 | const void *devname; | ||
| 1870 | |||
| 1871 | if (!desc || WARN_ON(!(desc->istate & IRQS_NMI))) | ||
| 1872 | return NULL; | ||
| 1873 | |||
| 1874 | if (WARN_ON(irq_settings_is_per_cpu_devid(desc))) | ||
| 1875 | return NULL; | ||
| 1876 | |||
| 1877 | /* NMI still enabled */ | ||
| 1878 | if (WARN_ON(desc->depth == 0)) | ||
| 1879 | disable_nmi_nosync(irq); | ||
| 1880 | |||
| 1881 | raw_spin_lock_irqsave(&desc->lock, flags); | ||
| 1882 | |||
| 1883 | irq_nmi_teardown(desc); | ||
| 1884 | devname = __cleanup_nmi(irq, desc); | ||
| 1885 | |||
| 1886 | raw_spin_unlock_irqrestore(&desc->lock, flags); | ||
| 1887 | |||
| 1888 | return devname; | ||
| 1889 | } | ||
| 1890 | |||
| 1759 | /** | 1891 | /** |
| 1760 | * request_threaded_irq - allocate an interrupt line | 1892 | * request_threaded_irq - allocate an interrupt line |
| 1761 | * @irq: Interrupt line to allocate | 1893 | * @irq: Interrupt line to allocate |
| @@ -1925,6 +2057,101 @@ int request_any_context_irq(unsigned int irq, irq_handler_t handler, | |||
| 1925 | } | 2057 | } |
| 1926 | EXPORT_SYMBOL_GPL(request_any_context_irq); | 2058 | EXPORT_SYMBOL_GPL(request_any_context_irq); |
| 1927 | 2059 | ||
| 2060 | /** | ||
| 2061 | * request_nmi - allocate an interrupt line for NMI delivery | ||
| 2062 | * @irq: Interrupt line to allocate | ||
| 2063 | * @handler: Function to be called when the IRQ occurs. | ||
| 2064 | * Threaded handler for threaded interrupts. | ||
| 2065 | * @irqflags: Interrupt type flags | ||
| 2066 | * @name: An ascii name for the claiming device | ||
| 2067 | * @dev_id: A cookie passed back to the handler function | ||
| 2068 | * | ||
| 2069 | * This call allocates interrupt resources and enables the | ||
| 2070 | * interrupt line and IRQ handling. It sets up the IRQ line | ||
| 2071 | * to be handled as an NMI. | ||
| 2072 | * | ||
| 2073 | * An interrupt line delivering NMIs cannot be shared and IRQ handling | ||
| 2074 | * cannot be threaded. | ||
| 2075 | * | ||
| 2076 | * Interrupt lines requested for NMI delivering must produce per cpu | ||
| 2077 | * interrupts and have auto enabling setting disabled. | ||
| 2078 | * | ||
| 2079 | * Dev_id must be globally unique. Normally the address of the | ||
| 2080 | * device data structure is used as the cookie. Since the handler | ||
| 2081 | * receives this value it makes sense to use it. | ||
| 2082 | * | ||
| 2083 | * If the interrupt line cannot be used to deliver NMIs, function | ||
| 2084 | * will fail and return a negative value. | ||
| 2085 | */ | ||
| 2086 | int request_nmi(unsigned int irq, irq_handler_t handler, | ||
| 2087 | unsigned long irqflags, const char *name, void *dev_id) | ||
| 2088 | { | ||
| 2089 | struct irqaction *action; | ||
| 2090 | struct irq_desc *desc; | ||
| 2091 | unsigned long flags; | ||
| 2092 | int retval; | ||
| 2093 | |||
| 2094 | if (irq == IRQ_NOTCONNECTED) | ||
| 2095 | return -ENOTCONN; | ||
| 2096 | |||
| 2097 | /* NMI cannot be shared, used for Polling */ | ||
| 2098 | if (irqflags & (IRQF_SHARED | IRQF_COND_SUSPEND | IRQF_IRQPOLL)) | ||
| 2099 | return -EINVAL; | ||
| 2100 | |||
| 2101 | if (!(irqflags & IRQF_PERCPU)) | ||
| 2102 | return -EINVAL; | ||
| 2103 | |||
| 2104 | if (!handler) | ||
| 2105 | return -EINVAL; | ||
| 2106 | |||
| 2107 | desc = irq_to_desc(irq); | ||
| 2108 | |||
| 2109 | if (!desc || irq_settings_can_autoenable(desc) || | ||
| 2110 | !irq_settings_can_request(desc) || | ||
| 2111 | WARN_ON(irq_settings_is_per_cpu_devid(desc)) || | ||
| 2112 | !irq_supports_nmi(desc)) | ||
| 2113 | return -EINVAL; | ||
| 2114 | |||
| 2115 | action = kzalloc(sizeof(struct irqaction), GFP_KERNEL); | ||
| 2116 | if (!action) | ||
| 2117 | return -ENOMEM; | ||
| 2118 | |||
| 2119 | action->handler = handler; | ||
| 2120 | action->flags = irqflags | IRQF_NO_THREAD | IRQF_NOBALANCING; | ||
| 2121 | action->name = name; | ||
| 2122 | action->dev_id = dev_id; | ||
| 2123 | |||
| 2124 | retval = irq_chip_pm_get(&desc->irq_data); | ||
| 2125 | if (retval < 0) | ||
| 2126 | goto err_out; | ||
| 2127 | |||
| 2128 | retval = __setup_irq(irq, desc, action); | ||
| 2129 | if (retval) | ||
| 2130 | goto err_irq_setup; | ||
| 2131 | |||
| 2132 | raw_spin_lock_irqsave(&desc->lock, flags); | ||
| 2133 | |||
| 2134 | /* Setup NMI state */ | ||
| 2135 | desc->istate |= IRQS_NMI; | ||
| 2136 | retval = irq_nmi_setup(desc); | ||
| 2137 | if (retval) { | ||
| 2138 | __cleanup_nmi(irq, desc); | ||
| 2139 | raw_spin_unlock_irqrestore(&desc->lock, flags); | ||
| 2140 | return -EINVAL; | ||
| 2141 | } | ||
| 2142 | |||
| 2143 | raw_spin_unlock_irqrestore(&desc->lock, flags); | ||
| 2144 | |||
| 2145 | return 0; | ||
| 2146 | |||
| 2147 | err_irq_setup: | ||
| 2148 | irq_chip_pm_put(&desc->irq_data); | ||
| 2149 | err_out: | ||
| 2150 | kfree(action); | ||
| 2151 | |||
| 2152 | return retval; | ||
| 2153 | } | ||
| 2154 | |||
| 1928 | void enable_percpu_irq(unsigned int irq, unsigned int type) | 2155 | void enable_percpu_irq(unsigned int irq, unsigned int type) |
| 1929 | { | 2156 | { |
| 1930 | unsigned int cpu = smp_processor_id(); | 2157 | unsigned int cpu = smp_processor_id(); |
| @@ -1959,6 +2186,11 @@ out: | |||
| 1959 | } | 2186 | } |
| 1960 | EXPORT_SYMBOL_GPL(enable_percpu_irq); | 2187 | EXPORT_SYMBOL_GPL(enable_percpu_irq); |
| 1961 | 2188 | ||
| 2189 | void enable_percpu_nmi(unsigned int irq, unsigned int type) | ||
| 2190 | { | ||
| 2191 | enable_percpu_irq(irq, type); | ||
| 2192 | } | ||
| 2193 | |||
| 1962 | /** | 2194 | /** |
| 1963 | * irq_percpu_is_enabled - Check whether the per cpu irq is enabled | 2195 | * irq_percpu_is_enabled - Check whether the per cpu irq is enabled |
| 1964 | * @irq: Linux irq number to check for | 2196 | * @irq: Linux irq number to check for |
| @@ -1998,6 +2230,11 @@ void disable_percpu_irq(unsigned int irq) | |||
| 1998 | } | 2230 | } |
| 1999 | EXPORT_SYMBOL_GPL(disable_percpu_irq); | 2231 | EXPORT_SYMBOL_GPL(disable_percpu_irq); |
| 2000 | 2232 | ||
| 2233 | void disable_percpu_nmi(unsigned int irq) | ||
| 2234 | { | ||
| 2235 | disable_percpu_irq(irq); | ||
| 2236 | } | ||
| 2237 | |||
| 2001 | /* | 2238 | /* |
| 2002 | * Internal function to unregister a percpu irqaction. | 2239 | * Internal function to unregister a percpu irqaction. |
| 2003 | */ | 2240 | */ |
| @@ -2029,6 +2266,8 @@ static struct irqaction *__free_percpu_irq(unsigned int irq, void __percpu *dev_ | |||
| 2029 | /* Found it - now remove it from the list of entries: */ | 2266 | /* Found it - now remove it from the list of entries: */ |
| 2030 | desc->action = NULL; | 2267 | desc->action = NULL; |
| 2031 | 2268 | ||
| 2269 | desc->istate &= ~IRQS_NMI; | ||
| 2270 | |||
| 2032 | raw_spin_unlock_irqrestore(&desc->lock, flags); | 2271 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
| 2033 | 2272 | ||
| 2034 | unregister_handler_proc(irq, action); | 2273 | unregister_handler_proc(irq, action); |
| @@ -2082,6 +2321,19 @@ void free_percpu_irq(unsigned int irq, void __percpu *dev_id) | |||
| 2082 | } | 2321 | } |
| 2083 | EXPORT_SYMBOL_GPL(free_percpu_irq); | 2322 | EXPORT_SYMBOL_GPL(free_percpu_irq); |
| 2084 | 2323 | ||
| 2324 | void free_percpu_nmi(unsigned int irq, void __percpu *dev_id) | ||
| 2325 | { | ||
| 2326 | struct irq_desc *desc = irq_to_desc(irq); | ||
| 2327 | |||
| 2328 | if (!desc || !irq_settings_is_per_cpu_devid(desc)) | ||
| 2329 | return; | ||
| 2330 | |||
| 2331 | if (WARN_ON(!(desc->istate & IRQS_NMI))) | ||
| 2332 | return; | ||
| 2333 | |||
| 2334 | kfree(__free_percpu_irq(irq, dev_id)); | ||
| 2335 | } | ||
| 2336 | |||
| 2085 | /** | 2337 | /** |
| 2086 | * setup_percpu_irq - setup a per-cpu interrupt | 2338 | * setup_percpu_irq - setup a per-cpu interrupt |
| 2087 | * @irq: Interrupt line to setup | 2339 | * @irq: Interrupt line to setup |
| @@ -2172,6 +2424,158 @@ int __request_percpu_irq(unsigned int irq, irq_handler_t handler, | |||
| 2172 | EXPORT_SYMBOL_GPL(__request_percpu_irq); | 2424 | EXPORT_SYMBOL_GPL(__request_percpu_irq); |
| 2173 | 2425 | ||
| 2174 | /** | 2426 | /** |
| 2427 | * request_percpu_nmi - allocate a percpu interrupt line for NMI delivery | ||
| 2428 | * @irq: Interrupt line to allocate | ||
| 2429 | * @handler: Function to be called when the IRQ occurs. | ||
| 2430 | * @name: An ascii name for the claiming device | ||
| 2431 | * @dev_id: A percpu cookie passed back to the handler function | ||
| 2432 | * | ||
| 2433 | * This call allocates interrupt resources for a per CPU NMI. Per CPU NMIs | ||
| 2434 | * have to be setup on each CPU by calling prepare_percpu_nmi() before | ||
| 2435 | * being enabled on the same CPU by using enable_percpu_nmi(). | ||
| 2436 | * | ||
| 2437 | * Dev_id must be globally unique. It is a per-cpu variable, and | ||
| 2438 | * the handler gets called with the interrupted CPU's instance of | ||
| 2439 | * that variable. | ||
| 2440 | * | ||
| 2441 | * Interrupt lines requested for NMI delivering should have auto enabling | ||
| 2442 | * setting disabled. | ||
| 2443 | * | ||
| 2444 | * If the interrupt line cannot be used to deliver NMIs, function | ||
| 2445 | * will fail returning a negative value. | ||
| 2446 | */ | ||
| 2447 | int request_percpu_nmi(unsigned int irq, irq_handler_t handler, | ||
| 2448 | const char *name, void __percpu *dev_id) | ||
| 2449 | { | ||
| 2450 | struct irqaction *action; | ||
| 2451 | struct irq_desc *desc; | ||
| 2452 | unsigned long flags; | ||
| 2453 | int retval; | ||
| 2454 | |||
| 2455 | if (!handler) | ||
| 2456 | return -EINVAL; | ||
| 2457 | |||
| 2458 | desc = irq_to_desc(irq); | ||
| 2459 | |||
| 2460 | if (!desc || !irq_settings_can_request(desc) || | ||
| 2461 | !irq_settings_is_per_cpu_devid(desc) || | ||
| 2462 | irq_settings_can_autoenable(desc) || | ||
| 2463 | !irq_supports_nmi(desc)) | ||
| 2464 | return -EINVAL; | ||
| 2465 | |||
| 2466 | /* The line cannot already be NMI */ | ||
| 2467 | if (desc->istate & IRQS_NMI) | ||
| 2468 | return -EINVAL; | ||
| 2469 | |||
| 2470 | action = kzalloc(sizeof(struct irqaction), GFP_KERNEL); | ||
| 2471 | if (!action) | ||
| 2472 | return -ENOMEM; | ||
| 2473 | |||
| 2474 | action->handler = handler; | ||
| 2475 | action->flags = IRQF_PERCPU | IRQF_NO_SUSPEND | IRQF_NO_THREAD | ||
| 2476 | | IRQF_NOBALANCING; | ||
| 2477 | action->name = name; | ||
| 2478 | action->percpu_dev_id = dev_id; | ||
| 2479 | |||
| 2480 | retval = irq_chip_pm_get(&desc->irq_data); | ||
| 2481 | if (retval < 0) | ||
| 2482 | goto err_out; | ||
| 2483 | |||
| 2484 | retval = __setup_irq(irq, desc, action); | ||
| 2485 | if (retval) | ||
| 2486 | goto err_irq_setup; | ||
| 2487 | |||
| 2488 | raw_spin_lock_irqsave(&desc->lock, flags); | ||
| 2489 | desc->istate |= IRQS_NMI; | ||
| 2490 | raw_spin_unlock_irqrestore(&desc->lock, flags); | ||
| 2491 | |||
| 2492 | return 0; | ||
| 2493 | |||
| 2494 | err_irq_setup: | ||
| 2495 | irq_chip_pm_put(&desc->irq_data); | ||
| 2496 | err_out: | ||
| 2497 | kfree(action); | ||
| 2498 | |||
| 2499 | return retval; | ||
| 2500 | } | ||
| 2501 | |||
| 2502 | /** | ||
| 2503 | * prepare_percpu_nmi - performs CPU local setup for NMI delivery | ||
| 2504 | * @irq: Interrupt line to prepare for NMI delivery | ||
| 2505 | * | ||
| 2506 | * This call prepares an interrupt line to deliver NMI on the current CPU, | ||
| 2507 | * before that interrupt line gets enabled with enable_percpu_nmi(). | ||
| 2508 | * | ||
| 2509 | * As a CPU local operation, this should be called from non-preemptible | ||
| 2510 | * context. | ||
| 2511 | * | ||
| 2512 | * If the interrupt line cannot be used to deliver NMIs, function | ||
| 2513 | * will fail returning a negative value. | ||
| 2514 | */ | ||
| 2515 | int prepare_percpu_nmi(unsigned int irq) | ||
| 2516 | { | ||
| 2517 | unsigned long flags; | ||
| 2518 | struct irq_desc *desc; | ||
| 2519 | int ret = 0; | ||
| 2520 | |||
| 2521 | WARN_ON(preemptible()); | ||
| 2522 | |||
| 2523 | desc = irq_get_desc_lock(irq, &flags, | ||
| 2524 | IRQ_GET_DESC_CHECK_PERCPU); | ||
| 2525 | if (!desc) | ||
| 2526 | return -EINVAL; | ||
| 2527 | |||
| 2528 | if (WARN(!(desc->istate & IRQS_NMI), | ||
| 2529 | KERN_ERR "prepare_percpu_nmi called for a non-NMI interrupt: irq %u\n", | ||
| 2530 | irq)) { | ||
| 2531 | ret = -EINVAL; | ||
| 2532 | goto out; | ||
| 2533 | } | ||
| 2534 | |||
| 2535 | ret = irq_nmi_setup(desc); | ||
| 2536 | if (ret) { | ||
| 2537 | pr_err("Failed to setup NMI delivery: irq %u\n", irq); | ||
| 2538 | goto out; | ||
| 2539 | } | ||
| 2540 | |||
| 2541 | out: | ||
| 2542 | irq_put_desc_unlock(desc, flags); | ||
| 2543 | return ret; | ||
| 2544 | } | ||
| 2545 | |||
| 2546 | /** | ||
| 2547 | * teardown_percpu_nmi - undoes NMI setup of IRQ line | ||
| 2548 | * @irq: Interrupt line from which CPU local NMI configuration should be | ||
| 2549 | * removed | ||
| 2550 | * | ||
| 2551 | * This call undoes the setup done by prepare_percpu_nmi(). | ||
| 2552 | * | ||
| 2553 | * IRQ line should not be enabled for the current CPU. | ||
| 2554 | * | ||
| 2555 | * As a CPU local operation, this should be called from non-preemptible | ||
| 2556 | * context. | ||
| 2557 | */ | ||
| 2558 | void teardown_percpu_nmi(unsigned int irq) | ||
| 2559 | { | ||
| 2560 | unsigned long flags; | ||
| 2561 | struct irq_desc *desc; | ||
| 2562 | |||
| 2563 | WARN_ON(preemptible()); | ||
| 2564 | |||
| 2565 | desc = irq_get_desc_lock(irq, &flags, | ||
| 2566 | IRQ_GET_DESC_CHECK_PERCPU); | ||
| 2567 | if (!desc) | ||
| 2568 | return; | ||
| 2569 | |||
| 2570 | if (WARN_ON(!(desc->istate & IRQS_NMI))) | ||
| 2571 | goto out; | ||
| 2572 | |||
| 2573 | irq_nmi_teardown(desc); | ||
| 2574 | out: | ||
| 2575 | irq_put_desc_unlock(desc, flags); | ||
| 2576 | } | ||
| 2577 | |||
| 2578 | /** | ||
| 2175 | * irq_get_irqchip_state - returns the irqchip state of a interrupt. | 2579 | * irq_get_irqchip_state - returns the irqchip state of a interrupt. |
| 2176 | * @irq: Interrupt line that is forwarded to a VM | 2580 | * @irq: Interrupt line that is forwarded to a VM |
| 2177 | * @which: One of IRQCHIP_STATE_* the caller wants to know about | 2581 | * @which: One of IRQCHIP_STATE_* the caller wants to know about |
diff --git a/kernel/kthread.c b/kernel/kthread.c index 087d18d771b5..65234c89d85b 100644 --- a/kernel/kthread.c +++ b/kernel/kthread.c | |||
| @@ -101,6 +101,12 @@ bool kthread_should_stop(void) | |||
| 101 | } | 101 | } |
| 102 | EXPORT_SYMBOL(kthread_should_stop); | 102 | EXPORT_SYMBOL(kthread_should_stop); |
| 103 | 103 | ||
| 104 | bool __kthread_should_park(struct task_struct *k) | ||
| 105 | { | ||
| 106 | return test_bit(KTHREAD_SHOULD_PARK, &to_kthread(k)->flags); | ||
| 107 | } | ||
| 108 | EXPORT_SYMBOL_GPL(__kthread_should_park); | ||
| 109 | |||
| 104 | /** | 110 | /** |
| 105 | * kthread_should_park - should this kthread park now? | 111 | * kthread_should_park - should this kthread park now? |
| 106 | * | 112 | * |
| @@ -114,7 +120,7 @@ EXPORT_SYMBOL(kthread_should_stop); | |||
| 114 | */ | 120 | */ |
| 115 | bool kthread_should_park(void) | 121 | bool kthread_should_park(void) |
| 116 | { | 122 | { |
| 117 | return test_bit(KTHREAD_SHOULD_PARK, &to_kthread(current)->flags); | 123 | return __kthread_should_park(current); |
| 118 | } | 124 | } |
| 119 | EXPORT_SYMBOL_GPL(kthread_should_park); | 125 | EXPORT_SYMBOL_GPL(kthread_should_park); |
| 120 | 126 | ||
diff --git a/kernel/softirq.c b/kernel/softirq.c index d28813306b2c..10277429ed84 100644 --- a/kernel/softirq.c +++ b/kernel/softirq.c | |||
| @@ -89,7 +89,8 @@ static bool ksoftirqd_running(unsigned long pending) | |||
| 89 | 89 | ||
| 90 | if (pending & SOFTIRQ_NOW_MASK) | 90 | if (pending & SOFTIRQ_NOW_MASK) |
| 91 | return false; | 91 | return false; |
| 92 | return tsk && (tsk->state == TASK_RUNNING); | 92 | return tsk && (tsk->state == TASK_RUNNING) && |
| 93 | !__kthread_should_park(tsk); | ||
| 93 | } | 94 | } |
| 94 | 95 | ||
| 95 | /* | 96 | /* |
