aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/irq/manage.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/irq/manage.c')
-rw-r--r--kernel/irq/manage.c413
1 files changed, 410 insertions, 3 deletions
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index 84b54a17b95d..78f3ddeb7fe4 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -196,6 +196,7 @@ int irq_do_set_affinity(struct irq_data *data, const struct cpumask *mask,
196 case IRQ_SET_MASK_OK: 196 case IRQ_SET_MASK_OK:
197 case IRQ_SET_MASK_OK_DONE: 197 case IRQ_SET_MASK_OK_DONE:
198 cpumask_copy(desc->irq_common_data.affinity, mask); 198 cpumask_copy(desc->irq_common_data.affinity, mask);
199 /* fall through */
199 case IRQ_SET_MASK_OK_NOCOPY: 200 case IRQ_SET_MASK_OK_NOCOPY:
200 irq_validate_effective_affinity(data); 201 irq_validate_effective_affinity(data);
201 irq_set_thread_affinity(desc); 202 irq_set_thread_affinity(desc);
@@ -341,7 +342,7 @@ irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify)
341 /* The release function is promised process context */ 342 /* The release function is promised process context */
342 might_sleep(); 343 might_sleep();
343 344
344 if (!desc) 345 if (!desc || desc->istate & IRQS_NMI)
345 return -EINVAL; 346 return -EINVAL;
346 347
347 /* Complete initialisation of *notify */ 348 /* Complete initialisation of *notify */
@@ -356,8 +357,10 @@ irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify)
356 desc->affinity_notify = notify; 357 desc->affinity_notify = notify;
357 raw_spin_unlock_irqrestore(&desc->lock, flags); 358 raw_spin_unlock_irqrestore(&desc->lock, flags);
358 359
359 if (old_notify) 360 if (old_notify) {
361 cancel_work_sync(&old_notify->work);
360 kref_put(&old_notify->kref, old_notify->release); 362 kref_put(&old_notify->kref, old_notify->release);
363 }
361 364
362 return 0; 365 return 0;
363} 366}
@@ -553,6 +556,21 @@ bool disable_hardirq(unsigned int irq)
553} 556}
554EXPORT_SYMBOL_GPL(disable_hardirq); 557EXPORT_SYMBOL_GPL(disable_hardirq);
555 558
559/**
560 * disable_nmi_nosync - disable an nmi without waiting
561 * @irq: Interrupt to disable
562 *
563 * Disable the selected interrupt line. Disables and enables are
564 * nested.
565 * The interrupt to disable must have been requested through request_nmi.
566 * Unlike disable_nmi(), this function does not ensure existing
567 * instances of the IRQ handler have completed before returning.
568 */
569void disable_nmi_nosync(unsigned int irq)
570{
571 disable_irq_nosync(irq);
572}
573
556void __enable_irq(struct irq_desc *desc) 574void __enable_irq(struct irq_desc *desc)
557{ 575{
558 switch (desc->depth) { 576 switch (desc->depth) {
@@ -609,6 +627,20 @@ out:
609} 627}
610EXPORT_SYMBOL(enable_irq); 628EXPORT_SYMBOL(enable_irq);
611 629
630/**
631 * enable_nmi - enable handling of an nmi
632 * @irq: Interrupt to enable
633 *
634 * The interrupt to enable must have been requested through request_nmi.
635 * Undoes the effect of one call to disable_nmi(). If this
636 * matches the last disable, processing of interrupts on this
637 * IRQ line is re-enabled.
638 */
639void enable_nmi(unsigned int irq)
640{
641 enable_irq(irq);
642}
643
612static int set_irq_wake_real(unsigned int irq, unsigned int on) 644static int set_irq_wake_real(unsigned int irq, unsigned int on)
613{ 645{
614 struct irq_desc *desc = irq_to_desc(irq); 646 struct irq_desc *desc = irq_to_desc(irq);
@@ -644,6 +676,12 @@ int irq_set_irq_wake(unsigned int irq, unsigned int on)
644 if (!desc) 676 if (!desc)
645 return -EINVAL; 677 return -EINVAL;
646 678
679 /* Don't use NMIs as wake up interrupts please */
680 if (desc->istate & IRQS_NMI) {
681 ret = -EINVAL;
682 goto out_unlock;
683 }
684
647 /* wakeup-capable irqs can be shared between drivers that 685 /* wakeup-capable irqs can be shared between drivers that
648 * don't need to have the same sleep mode behaviors. 686 * don't need to have the same sleep mode behaviors.
649 */ 687 */
@@ -666,6 +704,8 @@ int irq_set_irq_wake(unsigned int irq, unsigned int on)
666 irqd_clear(&desc->irq_data, IRQD_WAKEUP_STATE); 704 irqd_clear(&desc->irq_data, IRQD_WAKEUP_STATE);
667 } 705 }
668 } 706 }
707
708out_unlock:
669 irq_put_desc_busunlock(desc, flags); 709 irq_put_desc_busunlock(desc, flags);
670 return ret; 710 return ret;
671} 711}
@@ -726,6 +766,7 @@ int __irq_set_trigger(struct irq_desc *desc, unsigned long flags)
726 case IRQ_SET_MASK_OK_DONE: 766 case IRQ_SET_MASK_OK_DONE:
727 irqd_clear(&desc->irq_data, IRQD_TRIGGER_MASK); 767 irqd_clear(&desc->irq_data, IRQD_TRIGGER_MASK);
728 irqd_set(&desc->irq_data, flags); 768 irqd_set(&desc->irq_data, flags);
769 /* fall through */
729 770
730 case IRQ_SET_MASK_OK_NOCOPY: 771 case IRQ_SET_MASK_OK_NOCOPY:
731 flags = irqd_get_trigger_type(&desc->irq_data); 772 flags = irqd_get_trigger_type(&desc->irq_data);
@@ -740,7 +781,7 @@ int __irq_set_trigger(struct irq_desc *desc, unsigned long flags)
740 ret = 0; 781 ret = 0;
741 break; 782 break;
742 default: 783 default:
743 pr_err("Setting trigger mode %lu for irq %u failed (%pF)\n", 784 pr_err("Setting trigger mode %lu for irq %u failed (%pS)\n",
744 flags, irq_desc_get_irq(desc), chip->irq_set_type); 785 flags, irq_desc_get_irq(desc), chip->irq_set_type);
745 } 786 }
746 if (unmask) 787 if (unmask)
@@ -1128,6 +1169,39 @@ static void irq_release_resources(struct irq_desc *desc)
1128 c->irq_release_resources(d); 1169 c->irq_release_resources(d);
1129} 1170}
1130 1171
1172static bool irq_supports_nmi(struct irq_desc *desc)
1173{
1174 struct irq_data *d = irq_desc_get_irq_data(desc);
1175
1176#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
1177 /* Only IRQs directly managed by the root irqchip can be set as NMI */
1178 if (d->parent_data)
1179 return false;
1180#endif
1181 /* Don't support NMIs for chips behind a slow bus */
1182 if (d->chip->irq_bus_lock || d->chip->irq_bus_sync_unlock)
1183 return false;
1184
1185 return d->chip->flags & IRQCHIP_SUPPORTS_NMI;
1186}
1187
1188static int irq_nmi_setup(struct irq_desc *desc)
1189{
1190 struct irq_data *d = irq_desc_get_irq_data(desc);
1191 struct irq_chip *c = d->chip;
1192
1193 return c->irq_nmi_setup ? c->irq_nmi_setup(d) : -EINVAL;
1194}
1195
1196static void irq_nmi_teardown(struct irq_desc *desc)
1197{
1198 struct irq_data *d = irq_desc_get_irq_data(desc);
1199 struct irq_chip *c = d->chip;
1200
1201 if (c->irq_nmi_teardown)
1202 c->irq_nmi_teardown(d);
1203}
1204
1131static int 1205static int
1132setup_irq_thread(struct irqaction *new, unsigned int irq, bool secondary) 1206setup_irq_thread(struct irqaction *new, unsigned int irq, bool secondary)
1133{ 1207{
@@ -1302,9 +1376,17 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
1302 * fields must have IRQF_SHARED set and the bits which 1376 * fields must have IRQF_SHARED set and the bits which
1303 * set the trigger type must match. Also all must 1377 * set the trigger type must match. Also all must
1304 * agree on ONESHOT. 1378 * agree on ONESHOT.
1379 * Interrupt lines used for NMIs cannot be shared.
1305 */ 1380 */
1306 unsigned int oldtype; 1381 unsigned int oldtype;
1307 1382
1383 if (desc->istate & IRQS_NMI) {
1384 pr_err("Invalid attempt to share NMI for %s (irq %d) on irqchip %s.\n",
1385 new->name, irq, desc->irq_data.chip->name);
1386 ret = -EINVAL;
1387 goto out_unlock;
1388 }
1389
1308 /* 1390 /*
1309 * If nobody did set the configuration before, inherit 1391 * If nobody did set the configuration before, inherit
1310 * the one provided by the requester. 1392 * the one provided by the requester.
@@ -1756,6 +1838,59 @@ const void *free_irq(unsigned int irq, void *dev_id)
1756} 1838}
1757EXPORT_SYMBOL(free_irq); 1839EXPORT_SYMBOL(free_irq);
1758 1840
1841/* This function must be called with desc->lock held */
1842static const void *__cleanup_nmi(unsigned int irq, struct irq_desc *desc)
1843{
1844 const char *devname = NULL;
1845
1846 desc->istate &= ~IRQS_NMI;
1847
1848 if (!WARN_ON(desc->action == NULL)) {
1849 irq_pm_remove_action(desc, desc->action);
1850 devname = desc->action->name;
1851 unregister_handler_proc(irq, desc->action);
1852
1853 kfree(desc->action);
1854 desc->action = NULL;
1855 }
1856
1857 irq_settings_clr_disable_unlazy(desc);
1858 irq_shutdown(desc);
1859
1860 irq_release_resources(desc);
1861
1862 irq_chip_pm_put(&desc->irq_data);
1863 module_put(desc->owner);
1864
1865 return devname;
1866}
1867
1868const void *free_nmi(unsigned int irq, void *dev_id)
1869{
1870 struct irq_desc *desc = irq_to_desc(irq);
1871 unsigned long flags;
1872 const void *devname;
1873
1874 if (!desc || WARN_ON(!(desc->istate & IRQS_NMI)))
1875 return NULL;
1876
1877 if (WARN_ON(irq_settings_is_per_cpu_devid(desc)))
1878 return NULL;
1879
1880 /* NMI still enabled */
1881 if (WARN_ON(desc->depth == 0))
1882 disable_nmi_nosync(irq);
1883
1884 raw_spin_lock_irqsave(&desc->lock, flags);
1885
1886 irq_nmi_teardown(desc);
1887 devname = __cleanup_nmi(irq, desc);
1888
1889 raw_spin_unlock_irqrestore(&desc->lock, flags);
1890
1891 return devname;
1892}
1893
1759/** 1894/**
1760 * request_threaded_irq - allocate an interrupt line 1895 * request_threaded_irq - allocate an interrupt line
1761 * @irq: Interrupt line to allocate 1896 * @irq: Interrupt line to allocate
@@ -1925,6 +2060,101 @@ int request_any_context_irq(unsigned int irq, irq_handler_t handler,
1925} 2060}
1926EXPORT_SYMBOL_GPL(request_any_context_irq); 2061EXPORT_SYMBOL_GPL(request_any_context_irq);
1927 2062
2063/**
2064 * request_nmi - allocate an interrupt line for NMI delivery
2065 * @irq: Interrupt line to allocate
2066 * @handler: Function to be called when the IRQ occurs.
2067 * Threaded handler for threaded interrupts.
2068 * @irqflags: Interrupt type flags
2069 * @name: An ascii name for the claiming device
2070 * @dev_id: A cookie passed back to the handler function
2071 *
2072 * This call allocates interrupt resources and enables the
2073 * interrupt line and IRQ handling. It sets up the IRQ line
2074 * to be handled as an NMI.
2075 *
2076 * An interrupt line delivering NMIs cannot be shared and IRQ handling
2077 * cannot be threaded.
2078 *
2079 * Interrupt lines requested for NMI delivering must produce per cpu
2080 * interrupts and have auto enabling setting disabled.
2081 *
2082 * Dev_id must be globally unique. Normally the address of the
2083 * device data structure is used as the cookie. Since the handler
2084 * receives this value it makes sense to use it.
2085 *
2086 * If the interrupt line cannot be used to deliver NMIs, function
2087 * will fail and return a negative value.
2088 */
2089int request_nmi(unsigned int irq, irq_handler_t handler,
2090 unsigned long irqflags, const char *name, void *dev_id)
2091{
2092 struct irqaction *action;
2093 struct irq_desc *desc;
2094 unsigned long flags;
2095 int retval;
2096
2097 if (irq == IRQ_NOTCONNECTED)
2098 return -ENOTCONN;
2099
2100 /* NMI cannot be shared, used for Polling */
2101 if (irqflags & (IRQF_SHARED | IRQF_COND_SUSPEND | IRQF_IRQPOLL))
2102 return -EINVAL;
2103
2104 if (!(irqflags & IRQF_PERCPU))
2105 return -EINVAL;
2106
2107 if (!handler)
2108 return -EINVAL;
2109
2110 desc = irq_to_desc(irq);
2111
2112 if (!desc || irq_settings_can_autoenable(desc) ||
2113 !irq_settings_can_request(desc) ||
2114 WARN_ON(irq_settings_is_per_cpu_devid(desc)) ||
2115 !irq_supports_nmi(desc))
2116 return -EINVAL;
2117
2118 action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
2119 if (!action)
2120 return -ENOMEM;
2121
2122 action->handler = handler;
2123 action->flags = irqflags | IRQF_NO_THREAD | IRQF_NOBALANCING;
2124 action->name = name;
2125 action->dev_id = dev_id;
2126
2127 retval = irq_chip_pm_get(&desc->irq_data);
2128 if (retval < 0)
2129 goto err_out;
2130
2131 retval = __setup_irq(irq, desc, action);
2132 if (retval)
2133 goto err_irq_setup;
2134
2135 raw_spin_lock_irqsave(&desc->lock, flags);
2136
2137 /* Setup NMI state */
2138 desc->istate |= IRQS_NMI;
2139 retval = irq_nmi_setup(desc);
2140 if (retval) {
2141 __cleanup_nmi(irq, desc);
2142 raw_spin_unlock_irqrestore(&desc->lock, flags);
2143 return -EINVAL;
2144 }
2145
2146 raw_spin_unlock_irqrestore(&desc->lock, flags);
2147
2148 return 0;
2149
2150err_irq_setup:
2151 irq_chip_pm_put(&desc->irq_data);
2152err_out:
2153 kfree(action);
2154
2155 return retval;
2156}
2157
1928void enable_percpu_irq(unsigned int irq, unsigned int type) 2158void enable_percpu_irq(unsigned int irq, unsigned int type)
1929{ 2159{
1930 unsigned int cpu = smp_processor_id(); 2160 unsigned int cpu = smp_processor_id();
@@ -1959,6 +2189,11 @@ out:
1959} 2189}
1960EXPORT_SYMBOL_GPL(enable_percpu_irq); 2190EXPORT_SYMBOL_GPL(enable_percpu_irq);
1961 2191
2192void enable_percpu_nmi(unsigned int irq, unsigned int type)
2193{
2194 enable_percpu_irq(irq, type);
2195}
2196
1962/** 2197/**
1963 * irq_percpu_is_enabled - Check whether the per cpu irq is enabled 2198 * irq_percpu_is_enabled - Check whether the per cpu irq is enabled
1964 * @irq: Linux irq number to check for 2199 * @irq: Linux irq number to check for
@@ -1998,6 +2233,11 @@ void disable_percpu_irq(unsigned int irq)
1998} 2233}
1999EXPORT_SYMBOL_GPL(disable_percpu_irq); 2234EXPORT_SYMBOL_GPL(disable_percpu_irq);
2000 2235
2236void disable_percpu_nmi(unsigned int irq)
2237{
2238 disable_percpu_irq(irq);
2239}
2240
2001/* 2241/*
2002 * Internal function to unregister a percpu irqaction. 2242 * Internal function to unregister a percpu irqaction.
2003 */ 2243 */
@@ -2029,6 +2269,8 @@ static struct irqaction *__free_percpu_irq(unsigned int irq, void __percpu *dev_
2029 /* Found it - now remove it from the list of entries: */ 2269 /* Found it - now remove it from the list of entries: */
2030 desc->action = NULL; 2270 desc->action = NULL;
2031 2271
2272 desc->istate &= ~IRQS_NMI;
2273
2032 raw_spin_unlock_irqrestore(&desc->lock, flags); 2274 raw_spin_unlock_irqrestore(&desc->lock, flags);
2033 2275
2034 unregister_handler_proc(irq, action); 2276 unregister_handler_proc(irq, action);
@@ -2082,6 +2324,19 @@ void free_percpu_irq(unsigned int irq, void __percpu *dev_id)
2082} 2324}
2083EXPORT_SYMBOL_GPL(free_percpu_irq); 2325EXPORT_SYMBOL_GPL(free_percpu_irq);
2084 2326
2327void free_percpu_nmi(unsigned int irq, void __percpu *dev_id)
2328{
2329 struct irq_desc *desc = irq_to_desc(irq);
2330
2331 if (!desc || !irq_settings_is_per_cpu_devid(desc))
2332 return;
2333
2334 if (WARN_ON(!(desc->istate & IRQS_NMI)))
2335 return;
2336
2337 kfree(__free_percpu_irq(irq, dev_id));
2338}
2339
2085/** 2340/**
2086 * setup_percpu_irq - setup a per-cpu interrupt 2341 * setup_percpu_irq - setup a per-cpu interrupt
2087 * @irq: Interrupt line to setup 2342 * @irq: Interrupt line to setup
@@ -2172,6 +2427,158 @@ int __request_percpu_irq(unsigned int irq, irq_handler_t handler,
2172EXPORT_SYMBOL_GPL(__request_percpu_irq); 2427EXPORT_SYMBOL_GPL(__request_percpu_irq);
2173 2428
2174/** 2429/**
2430 * request_percpu_nmi - allocate a percpu interrupt line for NMI delivery
2431 * @irq: Interrupt line to allocate
2432 * @handler: Function to be called when the IRQ occurs.
2433 * @name: An ascii name for the claiming device
2434 * @dev_id: A percpu cookie passed back to the handler function
2435 *
2436 * This call allocates interrupt resources for a per CPU NMI. Per CPU NMIs
2437 * have to be setup on each CPU by calling prepare_percpu_nmi() before
2438 * being enabled on the same CPU by using enable_percpu_nmi().
2439 *
2440 * Dev_id must be globally unique. It is a per-cpu variable, and
2441 * the handler gets called with the interrupted CPU's instance of
2442 * that variable.
2443 *
2444 * Interrupt lines requested for NMI delivering should have auto enabling
2445 * setting disabled.
2446 *
2447 * If the interrupt line cannot be used to deliver NMIs, function
2448 * will fail returning a negative value.
2449 */
2450int request_percpu_nmi(unsigned int irq, irq_handler_t handler,
2451 const char *name, void __percpu *dev_id)
2452{
2453 struct irqaction *action;
2454 struct irq_desc *desc;
2455 unsigned long flags;
2456 int retval;
2457
2458 if (!handler)
2459 return -EINVAL;
2460
2461 desc = irq_to_desc(irq);
2462
2463 if (!desc || !irq_settings_can_request(desc) ||
2464 !irq_settings_is_per_cpu_devid(desc) ||
2465 irq_settings_can_autoenable(desc) ||
2466 !irq_supports_nmi(desc))
2467 return -EINVAL;
2468
2469 /* The line cannot already be NMI */
2470 if (desc->istate & IRQS_NMI)
2471 return -EINVAL;
2472
2473 action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
2474 if (!action)
2475 return -ENOMEM;
2476
2477 action->handler = handler;
2478 action->flags = IRQF_PERCPU | IRQF_NO_SUSPEND | IRQF_NO_THREAD
2479 | IRQF_NOBALANCING;
2480 action->name = name;
2481 action->percpu_dev_id = dev_id;
2482
2483 retval = irq_chip_pm_get(&desc->irq_data);
2484 if (retval < 0)
2485 goto err_out;
2486
2487 retval = __setup_irq(irq, desc, action);
2488 if (retval)
2489 goto err_irq_setup;
2490
2491 raw_spin_lock_irqsave(&desc->lock, flags);
2492 desc->istate |= IRQS_NMI;
2493 raw_spin_unlock_irqrestore(&desc->lock, flags);
2494
2495 return 0;
2496
2497err_irq_setup:
2498 irq_chip_pm_put(&desc->irq_data);
2499err_out:
2500 kfree(action);
2501
2502 return retval;
2503}
2504
2505/**
2506 * prepare_percpu_nmi - performs CPU local setup for NMI delivery
2507 * @irq: Interrupt line to prepare for NMI delivery
2508 *
2509 * This call prepares an interrupt line to deliver NMI on the current CPU,
2510 * before that interrupt line gets enabled with enable_percpu_nmi().
2511 *
2512 * As a CPU local operation, this should be called from non-preemptible
2513 * context.
2514 *
2515 * If the interrupt line cannot be used to deliver NMIs, function
2516 * will fail returning a negative value.
2517 */
2518int prepare_percpu_nmi(unsigned int irq)
2519{
2520 unsigned long flags;
2521 struct irq_desc *desc;
2522 int ret = 0;
2523
2524 WARN_ON(preemptible());
2525
2526 desc = irq_get_desc_lock(irq, &flags,
2527 IRQ_GET_DESC_CHECK_PERCPU);
2528 if (!desc)
2529 return -EINVAL;
2530
2531 if (WARN(!(desc->istate & IRQS_NMI),
2532 KERN_ERR "prepare_percpu_nmi called for a non-NMI interrupt: irq %u\n",
2533 irq)) {
2534 ret = -EINVAL;
2535 goto out;
2536 }
2537
2538 ret = irq_nmi_setup(desc);
2539 if (ret) {
2540 pr_err("Failed to setup NMI delivery: irq %u\n", irq);
2541 goto out;
2542 }
2543
2544out:
2545 irq_put_desc_unlock(desc, flags);
2546 return ret;
2547}
2548
2549/**
2550 * teardown_percpu_nmi - undoes NMI setup of IRQ line
2551 * @irq: Interrupt line from which CPU local NMI configuration should be
2552 * removed
2553 *
2554 * This call undoes the setup done by prepare_percpu_nmi().
2555 *
2556 * IRQ line should not be enabled for the current CPU.
2557 *
2558 * As a CPU local operation, this should be called from non-preemptible
2559 * context.
2560 */
2561void teardown_percpu_nmi(unsigned int irq)
2562{
2563 unsigned long flags;
2564 struct irq_desc *desc;
2565
2566 WARN_ON(preemptible());
2567
2568 desc = irq_get_desc_lock(irq, &flags,
2569 IRQ_GET_DESC_CHECK_PERCPU);
2570 if (!desc)
2571 return;
2572
2573 if (WARN_ON(!(desc->istate & IRQS_NMI)))
2574 goto out;
2575
2576 irq_nmi_teardown(desc);
2577out:
2578 irq_put_desc_unlock(desc, flags);
2579}
2580
2581/**
2175 * irq_get_irqchip_state - returns the irqchip state of a interrupt. 2582 * irq_get_irqchip_state - returns the irqchip state of a interrupt.
2176 * @irq: Interrupt line that is forwarded to a VM 2583 * @irq: Interrupt line that is forwarded to a VM
2177 * @which: One of IRQCHIP_STATE_* the caller wants to know about 2584 * @which: One of IRQCHIP_STATE_* the caller wants to know about