summaryrefslogtreecommitdiffstats
path: root/kernel/irq/manage.c
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2019-02-23 04:53:31 -0500
committerThomas Gleixner <tglx@linutronix.de>2019-02-23 04:53:31 -0500
commita324ca9cad4736252c33c1e28cffe1d87f262d03 (patch)
treeda64e14dd8432602634773b52073928c50dfb85c /kernel/irq/manage.c
parent4e6b26d23dc1faee318796d5c7f91b5692b1e6be (diff)
parent28528fca4908142bd1a3247956cba56c9c667d71 (diff)
Merge tag 'irqchip-5.1' of git://git.kernel.org/pub/scm/linux/kernel/git/maz/arm-platforms into irq/core
Pull irqchip updates from Marc Zyngier - Core pseudo-NMI handling code - Allow the default irq domain to be retrieved - A new interrupt controller for the Loongson LS1X platform - Affinity support for the SiFive PLIC - Better support for the iMX irqsteer driver - NUMA aware memory allocations for GICv3 - A handful of other fixes (i8259, GICv3, PLIC)
Diffstat (limited to 'kernel/irq/manage.c')
-rw-r--r--kernel/irq/manage.c405
1 files changed, 404 insertions, 1 deletions
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index 3f8a8ebda484..9ec34a2a6638 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -341,7 +341,7 @@ irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify)
341 /* The release function is promised process context */ 341 /* The release function is promised process context */
342 might_sleep(); 342 might_sleep();
343 343
344 if (!desc) 344 if (!desc || desc->istate & IRQS_NMI)
345 return -EINVAL; 345 return -EINVAL;
346 346
347 /* Complete initialisation of *notify */ 347 /* Complete initialisation of *notify */
@@ -553,6 +553,21 @@ bool disable_hardirq(unsigned int irq)
553} 553}
554EXPORT_SYMBOL_GPL(disable_hardirq); 554EXPORT_SYMBOL_GPL(disable_hardirq);
555 555
556/**
557 * disable_nmi_nosync - disable an nmi without waiting
558 * @irq: Interrupt to disable
559 *
560 * Disable the selected interrupt line. Disables and enables are
561 * nested.
562 * The interrupt to disable must have been requested through request_nmi.
563 * Unlike disable_nmi(), this function does not ensure existing
564 * instances of the IRQ handler have completed before returning.
565 */
566void disable_nmi_nosync(unsigned int irq)
567{
568 disable_irq_nosync(irq);
569}
570
556void __enable_irq(struct irq_desc *desc) 571void __enable_irq(struct irq_desc *desc)
557{ 572{
558 switch (desc->depth) { 573 switch (desc->depth) {
@@ -609,6 +624,20 @@ out:
609} 624}
610EXPORT_SYMBOL(enable_irq); 625EXPORT_SYMBOL(enable_irq);
611 626
627/**
628 * enable_nmi - enable handling of an nmi
629 * @irq: Interrupt to enable
630 *
631 * The interrupt to enable must have been requested through request_nmi.
632 * Undoes the effect of one call to disable_nmi(). If this
633 * matches the last disable, processing of interrupts on this
634 * IRQ line is re-enabled.
635 */
636void enable_nmi(unsigned int irq)
637{
638 enable_irq(irq);
639}
640
612static int set_irq_wake_real(unsigned int irq, unsigned int on) 641static int set_irq_wake_real(unsigned int irq, unsigned int on)
613{ 642{
614 struct irq_desc *desc = irq_to_desc(irq); 643 struct irq_desc *desc = irq_to_desc(irq);
@@ -644,6 +673,12 @@ int irq_set_irq_wake(unsigned int irq, unsigned int on)
644 if (!desc) 673 if (!desc)
645 return -EINVAL; 674 return -EINVAL;
646 675
676 /* Don't use NMIs as wake up interrupts please */
677 if (desc->istate & IRQS_NMI) {
678 ret = -EINVAL;
679 goto out_unlock;
680 }
681
647 /* wakeup-capable irqs can be shared between drivers that 682 /* wakeup-capable irqs can be shared between drivers that
648 * don't need to have the same sleep mode behaviors. 683 * don't need to have the same sleep mode behaviors.
649 */ 684 */
@@ -666,6 +701,8 @@ int irq_set_irq_wake(unsigned int irq, unsigned int on)
666 irqd_clear(&desc->irq_data, IRQD_WAKEUP_STATE); 701 irqd_clear(&desc->irq_data, IRQD_WAKEUP_STATE);
667 } 702 }
668 } 703 }
704
705out_unlock:
669 irq_put_desc_busunlock(desc, flags); 706 irq_put_desc_busunlock(desc, flags);
670 return ret; 707 return ret;
671} 708}
@@ -1129,6 +1166,39 @@ static void irq_release_resources(struct irq_desc *desc)
1129 c->irq_release_resources(d); 1166 c->irq_release_resources(d);
1130} 1167}
1131 1168
1169static bool irq_supports_nmi(struct irq_desc *desc)
1170{
1171 struct irq_data *d = irq_desc_get_irq_data(desc);
1172
1173#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
1174 /* Only IRQs directly managed by the root irqchip can be set as NMI */
1175 if (d->parent_data)
1176 return false;
1177#endif
1178 /* Don't support NMIs for chips behind a slow bus */
1179 if (d->chip->irq_bus_lock || d->chip->irq_bus_sync_unlock)
1180 return false;
1181
1182 return d->chip->flags & IRQCHIP_SUPPORTS_NMI;
1183}
1184
1185static int irq_nmi_setup(struct irq_desc *desc)
1186{
1187 struct irq_data *d = irq_desc_get_irq_data(desc);
1188 struct irq_chip *c = d->chip;
1189
1190 return c->irq_nmi_setup ? c->irq_nmi_setup(d) : -EINVAL;
1191}
1192
1193static void irq_nmi_teardown(struct irq_desc *desc)
1194{
1195 struct irq_data *d = irq_desc_get_irq_data(desc);
1196 struct irq_chip *c = d->chip;
1197
1198 if (c->irq_nmi_teardown)
1199 c->irq_nmi_teardown(d);
1200}
1201
1132static int 1202static int
1133setup_irq_thread(struct irqaction *new, unsigned int irq, bool secondary) 1203setup_irq_thread(struct irqaction *new, unsigned int irq, bool secondary)
1134{ 1204{
@@ -1303,9 +1373,17 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
1303 * fields must have IRQF_SHARED set and the bits which 1373 * fields must have IRQF_SHARED set and the bits which
1304 * set the trigger type must match. Also all must 1374 * set the trigger type must match. Also all must
1305 * agree on ONESHOT. 1375 * agree on ONESHOT.
1376 * Interrupt lines used for NMIs cannot be shared.
1306 */ 1377 */
1307 unsigned int oldtype; 1378 unsigned int oldtype;
1308 1379
1380 if (desc->istate & IRQS_NMI) {
1381 pr_err("Invalid attempt to share NMI for %s (irq %d) on irqchip %s.\n",
1382 new->name, irq, desc->irq_data.chip->name);
1383 ret = -EINVAL;
1384 goto out_unlock;
1385 }
1386
1309 /* 1387 /*
1310 * If nobody did set the configuration before, inherit 1388 * If nobody did set the configuration before, inherit
1311 * the one provided by the requester. 1389 * the one provided by the requester.
@@ -1757,6 +1835,59 @@ const void *free_irq(unsigned int irq, void *dev_id)
1757} 1835}
1758EXPORT_SYMBOL(free_irq); 1836EXPORT_SYMBOL(free_irq);
1759 1837
1838/* This function must be called with desc->lock held */
1839static const void *__cleanup_nmi(unsigned int irq, struct irq_desc *desc)
1840{
1841 const char *devname = NULL;
1842
1843 desc->istate &= ~IRQS_NMI;
1844
1845 if (!WARN_ON(desc->action == NULL)) {
1846 irq_pm_remove_action(desc, desc->action);
1847 devname = desc->action->name;
1848 unregister_handler_proc(irq, desc->action);
1849
1850 kfree(desc->action);
1851 desc->action = NULL;
1852 }
1853
1854 irq_settings_clr_disable_unlazy(desc);
1855 irq_shutdown(desc);
1856
1857 irq_release_resources(desc);
1858
1859 irq_chip_pm_put(&desc->irq_data);
1860 module_put(desc->owner);
1861
1862 return devname;
1863}
1864
1865const void *free_nmi(unsigned int irq, void *dev_id)
1866{
1867 struct irq_desc *desc = irq_to_desc(irq);
1868 unsigned long flags;
1869 const void *devname;
1870
1871 if (!desc || WARN_ON(!(desc->istate & IRQS_NMI)))
1872 return NULL;
1873
1874 if (WARN_ON(irq_settings_is_per_cpu_devid(desc)))
1875 return NULL;
1876
1877 /* NMI still enabled */
1878 if (WARN_ON(desc->depth == 0))
1879 disable_nmi_nosync(irq);
1880
1881 raw_spin_lock_irqsave(&desc->lock, flags);
1882
1883 irq_nmi_teardown(desc);
1884 devname = __cleanup_nmi(irq, desc);
1885
1886 raw_spin_unlock_irqrestore(&desc->lock, flags);
1887
1888 return devname;
1889}
1890
1760/** 1891/**
1761 * request_threaded_irq - allocate an interrupt line 1892 * request_threaded_irq - allocate an interrupt line
1762 * @irq: Interrupt line to allocate 1893 * @irq: Interrupt line to allocate
@@ -1926,6 +2057,101 @@ int request_any_context_irq(unsigned int irq, irq_handler_t handler,
1926} 2057}
1927EXPORT_SYMBOL_GPL(request_any_context_irq); 2058EXPORT_SYMBOL_GPL(request_any_context_irq);
1928 2059
2060/**
2061 * request_nmi - allocate an interrupt line for NMI delivery
2062 * @irq: Interrupt line to allocate
2063 * @handler: Function to be called when the IRQ occurs.
2064 * Threaded handler for threaded interrupts.
2065 * @irqflags: Interrupt type flags
2066 * @name: An ascii name for the claiming device
2067 * @dev_id: A cookie passed back to the handler function
2068 *
2069 * This call allocates interrupt resources and enables the
2070 * interrupt line and IRQ handling. It sets up the IRQ line
2071 * to be handled as an NMI.
2072 *
2073 * An interrupt line delivering NMIs cannot be shared and IRQ handling
2074 * cannot be threaded.
2075 *
2076 * Interrupt lines requested for NMI delivering must produce per cpu
2077 * interrupts and have auto enabling setting disabled.
2078 *
2079 * Dev_id must be globally unique. Normally the address of the
2080 * device data structure is used as the cookie. Since the handler
2081 * receives this value it makes sense to use it.
2082 *
2083 * If the interrupt line cannot be used to deliver NMIs, function
2084 * will fail and return a negative value.
2085 */
2086int request_nmi(unsigned int irq, irq_handler_t handler,
2087 unsigned long irqflags, const char *name, void *dev_id)
2088{
2089 struct irqaction *action;
2090 struct irq_desc *desc;
2091 unsigned long flags;
2092 int retval;
2093
2094 if (irq == IRQ_NOTCONNECTED)
2095 return -ENOTCONN;
2096
2097 /* NMI cannot be shared, used for Polling */
2098 if (irqflags & (IRQF_SHARED | IRQF_COND_SUSPEND | IRQF_IRQPOLL))
2099 return -EINVAL;
2100
2101 if (!(irqflags & IRQF_PERCPU))
2102 return -EINVAL;
2103
2104 if (!handler)
2105 return -EINVAL;
2106
2107 desc = irq_to_desc(irq);
2108
2109 if (!desc || irq_settings_can_autoenable(desc) ||
2110 !irq_settings_can_request(desc) ||
2111 WARN_ON(irq_settings_is_per_cpu_devid(desc)) ||
2112 !irq_supports_nmi(desc))
2113 return -EINVAL;
2114
2115 action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
2116 if (!action)
2117 return -ENOMEM;
2118
2119 action->handler = handler;
2120 action->flags = irqflags | IRQF_NO_THREAD | IRQF_NOBALANCING;
2121 action->name = name;
2122 action->dev_id = dev_id;
2123
2124 retval = irq_chip_pm_get(&desc->irq_data);
2125 if (retval < 0)
2126 goto err_out;
2127
2128 retval = __setup_irq(irq, desc, action);
2129 if (retval)
2130 goto err_irq_setup;
2131
2132 raw_spin_lock_irqsave(&desc->lock, flags);
2133
2134 /* Setup NMI state */
2135 desc->istate |= IRQS_NMI;
2136 retval = irq_nmi_setup(desc);
2137 if (retval) {
2138 __cleanup_nmi(irq, desc);
2139 raw_spin_unlock_irqrestore(&desc->lock, flags);
2140 return -EINVAL;
2141 }
2142
2143 raw_spin_unlock_irqrestore(&desc->lock, flags);
2144
2145 return 0;
2146
2147err_irq_setup:
2148 irq_chip_pm_put(&desc->irq_data);
2149err_out:
2150 kfree(action);
2151
2152 return retval;
2153}
2154
1929void enable_percpu_irq(unsigned int irq, unsigned int type) 2155void enable_percpu_irq(unsigned int irq, unsigned int type)
1930{ 2156{
1931 unsigned int cpu = smp_processor_id(); 2157 unsigned int cpu = smp_processor_id();
@@ -1960,6 +2186,11 @@ out:
1960} 2186}
1961EXPORT_SYMBOL_GPL(enable_percpu_irq); 2187EXPORT_SYMBOL_GPL(enable_percpu_irq);
1962 2188
2189void enable_percpu_nmi(unsigned int irq, unsigned int type)
2190{
2191 enable_percpu_irq(irq, type);
2192}
2193
1963/** 2194/**
1964 * irq_percpu_is_enabled - Check whether the per cpu irq is enabled 2195 * irq_percpu_is_enabled - Check whether the per cpu irq is enabled
1965 * @irq: Linux irq number to check for 2196 * @irq: Linux irq number to check for
@@ -1999,6 +2230,11 @@ void disable_percpu_irq(unsigned int irq)
1999} 2230}
2000EXPORT_SYMBOL_GPL(disable_percpu_irq); 2231EXPORT_SYMBOL_GPL(disable_percpu_irq);
2001 2232
2233void disable_percpu_nmi(unsigned int irq)
2234{
2235 disable_percpu_irq(irq);
2236}
2237
2002/* 2238/*
2003 * Internal function to unregister a percpu irqaction. 2239 * Internal function to unregister a percpu irqaction.
2004 */ 2240 */
@@ -2030,6 +2266,8 @@ static struct irqaction *__free_percpu_irq(unsigned int irq, void __percpu *dev_
2030 /* Found it - now remove it from the list of entries: */ 2266 /* Found it - now remove it from the list of entries: */
2031 desc->action = NULL; 2267 desc->action = NULL;
2032 2268
2269 desc->istate &= ~IRQS_NMI;
2270
2033 raw_spin_unlock_irqrestore(&desc->lock, flags); 2271 raw_spin_unlock_irqrestore(&desc->lock, flags);
2034 2272
2035 unregister_handler_proc(irq, action); 2273 unregister_handler_proc(irq, action);
@@ -2083,6 +2321,19 @@ void free_percpu_irq(unsigned int irq, void __percpu *dev_id)
2083} 2321}
2084EXPORT_SYMBOL_GPL(free_percpu_irq); 2322EXPORT_SYMBOL_GPL(free_percpu_irq);
2085 2323
2324void free_percpu_nmi(unsigned int irq, void __percpu *dev_id)
2325{
2326 struct irq_desc *desc = irq_to_desc(irq);
2327
2328 if (!desc || !irq_settings_is_per_cpu_devid(desc))
2329 return;
2330
2331 if (WARN_ON(!(desc->istate & IRQS_NMI)))
2332 return;
2333
2334 kfree(__free_percpu_irq(irq, dev_id));
2335}
2336
2086/** 2337/**
2087 * setup_percpu_irq - setup a per-cpu interrupt 2338 * setup_percpu_irq - setup a per-cpu interrupt
2088 * @irq: Interrupt line to setup 2339 * @irq: Interrupt line to setup
@@ -2173,6 +2424,158 @@ int __request_percpu_irq(unsigned int irq, irq_handler_t handler,
2173EXPORT_SYMBOL_GPL(__request_percpu_irq); 2424EXPORT_SYMBOL_GPL(__request_percpu_irq);
2174 2425
2175/** 2426/**
2427 * request_percpu_nmi - allocate a percpu interrupt line for NMI delivery
2428 * @irq: Interrupt line to allocate
2429 * @handler: Function to be called when the IRQ occurs.
2430 * @name: An ascii name for the claiming device
2431 * @dev_id: A percpu cookie passed back to the handler function
2432 *
2433 * This call allocates interrupt resources for a per CPU NMI. Per CPU NMIs
2434 * have to be setup on each CPU by calling prepare_percpu_nmi() before
2435 * being enabled on the same CPU by using enable_percpu_nmi().
2436 *
2437 * Dev_id must be globally unique. It is a per-cpu variable, and
2438 * the handler gets called with the interrupted CPU's instance of
2439 * that variable.
2440 *
2441 * Interrupt lines requested for NMI delivering should have auto enabling
2442 * setting disabled.
2443 *
2444 * If the interrupt line cannot be used to deliver NMIs, function
2445 * will fail returning a negative value.
2446 */
2447int request_percpu_nmi(unsigned int irq, irq_handler_t handler,
2448 const char *name, void __percpu *dev_id)
2449{
2450 struct irqaction *action;
2451 struct irq_desc *desc;
2452 unsigned long flags;
2453 int retval;
2454
2455 if (!handler)
2456 return -EINVAL;
2457
2458 desc = irq_to_desc(irq);
2459
2460 if (!desc || !irq_settings_can_request(desc) ||
2461 !irq_settings_is_per_cpu_devid(desc) ||
2462 irq_settings_can_autoenable(desc) ||
2463 !irq_supports_nmi(desc))
2464 return -EINVAL;
2465
2466 /* The line cannot already be NMI */
2467 if (desc->istate & IRQS_NMI)
2468 return -EINVAL;
2469
2470 action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
2471 if (!action)
2472 return -ENOMEM;
2473
2474 action->handler = handler;
2475 action->flags = IRQF_PERCPU | IRQF_NO_SUSPEND | IRQF_NO_THREAD
2476 | IRQF_NOBALANCING;
2477 action->name = name;
2478 action->percpu_dev_id = dev_id;
2479
2480 retval = irq_chip_pm_get(&desc->irq_data);
2481 if (retval < 0)
2482 goto err_out;
2483
2484 retval = __setup_irq(irq, desc, action);
2485 if (retval)
2486 goto err_irq_setup;
2487
2488 raw_spin_lock_irqsave(&desc->lock, flags);
2489 desc->istate |= IRQS_NMI;
2490 raw_spin_unlock_irqrestore(&desc->lock, flags);
2491
2492 return 0;
2493
2494err_irq_setup:
2495 irq_chip_pm_put(&desc->irq_data);
2496err_out:
2497 kfree(action);
2498
2499 return retval;
2500}
2501
2502/**
2503 * prepare_percpu_nmi - performs CPU local setup for NMI delivery
2504 * @irq: Interrupt line to prepare for NMI delivery
2505 *
2506 * This call prepares an interrupt line to deliver NMI on the current CPU,
2507 * before that interrupt line gets enabled with enable_percpu_nmi().
2508 *
2509 * As a CPU local operation, this should be called from non-preemptible
2510 * context.
2511 *
2512 * If the interrupt line cannot be used to deliver NMIs, function
2513 * will fail returning a negative value.
2514 */
2515int prepare_percpu_nmi(unsigned int irq)
2516{
2517 unsigned long flags;
2518 struct irq_desc *desc;
2519 int ret = 0;
2520
2521 WARN_ON(preemptible());
2522
2523 desc = irq_get_desc_lock(irq, &flags,
2524 IRQ_GET_DESC_CHECK_PERCPU);
2525 if (!desc)
2526 return -EINVAL;
2527
2528 if (WARN(!(desc->istate & IRQS_NMI),
2529 KERN_ERR "prepare_percpu_nmi called for a non-NMI interrupt: irq %u\n",
2530 irq)) {
2531 ret = -EINVAL;
2532 goto out;
2533 }
2534
2535 ret = irq_nmi_setup(desc);
2536 if (ret) {
2537 pr_err("Failed to setup NMI delivery: irq %u\n", irq);
2538 goto out;
2539 }
2540
2541out:
2542 irq_put_desc_unlock(desc, flags);
2543 return ret;
2544}
2545
2546/**
2547 * teardown_percpu_nmi - undoes NMI setup of IRQ line
2548 * @irq: Interrupt line from which CPU local NMI configuration should be
2549 * removed
2550 *
2551 * This call undoes the setup done by prepare_percpu_nmi().
2552 *
2553 * IRQ line should not be enabled for the current CPU.
2554 *
2555 * As a CPU local operation, this should be called from non-preemptible
2556 * context.
2557 */
2558void teardown_percpu_nmi(unsigned int irq)
2559{
2560 unsigned long flags;
2561 struct irq_desc *desc;
2562
2563 WARN_ON(preemptible());
2564
2565 desc = irq_get_desc_lock(irq, &flags,
2566 IRQ_GET_DESC_CHECK_PERCPU);
2567 if (!desc)
2568 return;
2569
2570 if (WARN_ON(!(desc->istate & IRQS_NMI)))
2571 goto out;
2572
2573 irq_nmi_teardown(desc);
2574out:
2575 irq_put_desc_unlock(desc, flags);
2576}
2577
2578/**
2176 * irq_get_irqchip_state - returns the irqchip state of a interrupt. 2579 * irq_get_irqchip_state - returns the irqchip state of a interrupt.
2177 * @irq: Interrupt line that is forwarded to a VM 2580 * @irq: Interrupt line that is forwarded to a VM
2178 * @which: One of IRQCHIP_STATE_* the caller wants to know about 2581 * @which: One of IRQCHIP_STATE_* the caller wants to know about