aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/irq/affinity.c121
-rw-r--r--kernel/irq/chip.c66
-rw-r--r--kernel/irq/debugfs.c8
-rw-r--r--kernel/irq/handle.c2
-rw-r--r--kernel/irq/internals.h10
-rw-r--r--kernel/irq/irqdesc.c42
-rw-r--r--kernel/irq/irqdomain.c16
-rw-r--r--kernel/irq/manage.c406
-rw-r--r--kernel/kthread.c8
-rw-r--r--kernel/softirq.c3
10 files changed, 617 insertions, 65 deletions
diff --git a/kernel/irq/affinity.c b/kernel/irq/affinity.c
index 45b68b4ea48b..f18cd5aa33e8 100644
--- a/kernel/irq/affinity.c
+++ b/kernel/irq/affinity.c
@@ -9,7 +9,7 @@
9#include <linux/cpu.h> 9#include <linux/cpu.h>
10 10
11static void irq_spread_init_one(struct cpumask *irqmsk, struct cpumask *nmsk, 11static void irq_spread_init_one(struct cpumask *irqmsk, struct cpumask *nmsk,
12 int cpus_per_vec) 12 unsigned int cpus_per_vec)
13{ 13{
14 const struct cpumask *siblmsk; 14 const struct cpumask *siblmsk;
15 int cpu, sibl; 15 int cpu, sibl;
@@ -95,15 +95,17 @@ static int get_nodes_in_cpumask(cpumask_var_t *node_to_cpumask,
95} 95}
96 96
97static int __irq_build_affinity_masks(const struct irq_affinity *affd, 97static int __irq_build_affinity_masks(const struct irq_affinity *affd,
98 int startvec, int numvecs, int firstvec, 98 unsigned int startvec,
99 unsigned int numvecs,
100 unsigned int firstvec,
99 cpumask_var_t *node_to_cpumask, 101 cpumask_var_t *node_to_cpumask,
100 const struct cpumask *cpu_mask, 102 const struct cpumask *cpu_mask,
101 struct cpumask *nmsk, 103 struct cpumask *nmsk,
102 struct irq_affinity_desc *masks) 104 struct irq_affinity_desc *masks)
103{ 105{
104 int n, nodes, cpus_per_vec, extra_vecs, done = 0; 106 unsigned int n, nodes, cpus_per_vec, extra_vecs, done = 0;
105 int last_affv = firstvec + numvecs; 107 unsigned int last_affv = firstvec + numvecs;
106 int curvec = startvec; 108 unsigned int curvec = startvec;
107 nodemask_t nodemsk = NODE_MASK_NONE; 109 nodemask_t nodemsk = NODE_MASK_NONE;
108 110
109 if (!cpumask_weight(cpu_mask)) 111 if (!cpumask_weight(cpu_mask))
@@ -117,18 +119,16 @@ static int __irq_build_affinity_masks(const struct irq_affinity *affd,
117 */ 119 */
118 if (numvecs <= nodes) { 120 if (numvecs <= nodes) {
119 for_each_node_mask(n, nodemsk) { 121 for_each_node_mask(n, nodemsk) {
120 cpumask_or(&masks[curvec].mask, 122 cpumask_or(&masks[curvec].mask, &masks[curvec].mask,
121 &masks[curvec].mask, 123 node_to_cpumask[n]);
122 node_to_cpumask[n]);
123 if (++curvec == last_affv) 124 if (++curvec == last_affv)
124 curvec = firstvec; 125 curvec = firstvec;
125 } 126 }
126 done = numvecs; 127 return numvecs;
127 goto out;
128 } 128 }
129 129
130 for_each_node_mask(n, nodemsk) { 130 for_each_node_mask(n, nodemsk) {
131 int ncpus, v, vecs_to_assign, vecs_per_node; 131 unsigned int ncpus, v, vecs_to_assign, vecs_per_node;
132 132
133 /* Spread the vectors per node */ 133 /* Spread the vectors per node */
134 vecs_per_node = (numvecs - (curvec - firstvec)) / nodes; 134 vecs_per_node = (numvecs - (curvec - firstvec)) / nodes;
@@ -163,8 +163,6 @@ static int __irq_build_affinity_masks(const struct irq_affinity *affd,
163 curvec = firstvec; 163 curvec = firstvec;
164 --nodes; 164 --nodes;
165 } 165 }
166
167out:
168 return done; 166 return done;
169} 167}
170 168
@@ -174,19 +172,24 @@ out:
174 * 2) spread other possible CPUs on these vectors 172 * 2) spread other possible CPUs on these vectors
175 */ 173 */
176static int irq_build_affinity_masks(const struct irq_affinity *affd, 174static int irq_build_affinity_masks(const struct irq_affinity *affd,
177 int startvec, int numvecs, int firstvec, 175 unsigned int startvec, unsigned int numvecs,
178 cpumask_var_t *node_to_cpumask, 176 unsigned int firstvec,
179 struct irq_affinity_desc *masks) 177 struct irq_affinity_desc *masks)
180{ 178{
181 int curvec = startvec, nr_present, nr_others; 179 unsigned int curvec = startvec, nr_present, nr_others;
182 int ret = -ENOMEM; 180 cpumask_var_t *node_to_cpumask;
183 cpumask_var_t nmsk, npresmsk; 181 cpumask_var_t nmsk, npresmsk;
182 int ret = -ENOMEM;
184 183
185 if (!zalloc_cpumask_var(&nmsk, GFP_KERNEL)) 184 if (!zalloc_cpumask_var(&nmsk, GFP_KERNEL))
186 return ret; 185 return ret;
187 186
188 if (!zalloc_cpumask_var(&npresmsk, GFP_KERNEL)) 187 if (!zalloc_cpumask_var(&npresmsk, GFP_KERNEL))
189 goto fail; 188 goto fail_nmsk;
189
190 node_to_cpumask = alloc_node_to_cpumask();
191 if (!node_to_cpumask)
192 goto fail_npresmsk;
190 193
191 ret = 0; 194 ret = 0;
192 /* Stabilize the cpumasks */ 195 /* Stabilize the cpumasks */
@@ -217,13 +220,22 @@ static int irq_build_affinity_masks(const struct irq_affinity *affd,
217 if (nr_present < numvecs) 220 if (nr_present < numvecs)
218 WARN_ON(nr_present + nr_others < numvecs); 221 WARN_ON(nr_present + nr_others < numvecs);
219 222
223 free_node_to_cpumask(node_to_cpumask);
224
225 fail_npresmsk:
220 free_cpumask_var(npresmsk); 226 free_cpumask_var(npresmsk);
221 227
222 fail: 228 fail_nmsk:
223 free_cpumask_var(nmsk); 229 free_cpumask_var(nmsk);
224 return ret; 230 return ret;
225} 231}
226 232
233static void default_calc_sets(struct irq_affinity *affd, unsigned int affvecs)
234{
235 affd->nr_sets = 1;
236 affd->set_size[0] = affvecs;
237}
238
227/** 239/**
228 * irq_create_affinity_masks - Create affinity masks for multiqueue spreading 240 * irq_create_affinity_masks - Create affinity masks for multiqueue spreading
229 * @nvecs: The total number of vectors 241 * @nvecs: The total number of vectors
@@ -232,50 +244,62 @@ static int irq_build_affinity_masks(const struct irq_affinity *affd,
232 * Returns the irq_affinity_desc pointer or NULL if allocation failed. 244 * Returns the irq_affinity_desc pointer or NULL if allocation failed.
233 */ 245 */
234struct irq_affinity_desc * 246struct irq_affinity_desc *
235irq_create_affinity_masks(int nvecs, const struct irq_affinity *affd) 247irq_create_affinity_masks(unsigned int nvecs, struct irq_affinity *affd)
236{ 248{
237 int affvecs = nvecs - affd->pre_vectors - affd->post_vectors; 249 unsigned int affvecs, curvec, usedvecs, i;
238 int curvec, usedvecs;
239 cpumask_var_t *node_to_cpumask;
240 struct irq_affinity_desc *masks = NULL; 250 struct irq_affinity_desc *masks = NULL;
241 int i, nr_sets;
242 251
243 /* 252 /*
244 * If there aren't any vectors left after applying the pre/post 253 * Determine the number of vectors which need interrupt affinities
245 * vectors don't bother with assigning affinity. 254 * assigned. If the pre/post request exhausts the available vectors
255 * then nothing to do here except for invoking the calc_sets()
256 * callback so the device driver can adjust to the situation. If there
257 * is only a single vector, then managing the queue is pointless as
258 * well.
246 */ 259 */
247 if (nvecs == affd->pre_vectors + affd->post_vectors) 260 if (nvecs > 1 && nvecs > affd->pre_vectors + affd->post_vectors)
261 affvecs = nvecs - affd->pre_vectors - affd->post_vectors;
262 else
263 affvecs = 0;
264
265 /*
266 * Simple invocations do not provide a calc_sets() callback. Install
267 * the generic one.
268 */
269 if (!affd->calc_sets)
270 affd->calc_sets = default_calc_sets;
271
272 /* Recalculate the sets */
273 affd->calc_sets(affd, affvecs);
274
275 if (WARN_ON_ONCE(affd->nr_sets > IRQ_AFFINITY_MAX_SETS))
248 return NULL; 276 return NULL;
249 277
250 node_to_cpumask = alloc_node_to_cpumask(); 278 /* Nothing to assign? */
251 if (!node_to_cpumask) 279 if (!affvecs)
252 return NULL; 280 return NULL;
253 281
254 masks = kcalloc(nvecs, sizeof(*masks), GFP_KERNEL); 282 masks = kcalloc(nvecs, sizeof(*masks), GFP_KERNEL);
255 if (!masks) 283 if (!masks)
256 goto outnodemsk; 284 return NULL;
257 285
258 /* Fill out vectors at the beginning that don't need affinity */ 286 /* Fill out vectors at the beginning that don't need affinity */
259 for (curvec = 0; curvec < affd->pre_vectors; curvec++) 287 for (curvec = 0; curvec < affd->pre_vectors; curvec++)
260 cpumask_copy(&masks[curvec].mask, irq_default_affinity); 288 cpumask_copy(&masks[curvec].mask, irq_default_affinity);
289
261 /* 290 /*
262 * Spread on present CPUs starting from affd->pre_vectors. If we 291 * Spread on present CPUs starting from affd->pre_vectors. If we
263 * have multiple sets, build each sets affinity mask separately. 292 * have multiple sets, build each sets affinity mask separately.
264 */ 293 */
265 nr_sets = affd->nr_sets; 294 for (i = 0, usedvecs = 0; i < affd->nr_sets; i++) {
266 if (!nr_sets) 295 unsigned int this_vecs = affd->set_size[i];
267 nr_sets = 1;
268
269 for (i = 0, usedvecs = 0; i < nr_sets; i++) {
270 int this_vecs = affd->sets ? affd->sets[i] : affvecs;
271 int ret; 296 int ret;
272 297
273 ret = irq_build_affinity_masks(affd, curvec, this_vecs, 298 ret = irq_build_affinity_masks(affd, curvec, this_vecs,
274 curvec, node_to_cpumask, masks); 299 curvec, masks);
275 if (ret) { 300 if (ret) {
276 kfree(masks); 301 kfree(masks);
277 masks = NULL; 302 return NULL;
278 goto outnodemsk;
279 } 303 }
280 curvec += this_vecs; 304 curvec += this_vecs;
281 usedvecs += this_vecs; 305 usedvecs += this_vecs;
@@ -293,8 +317,6 @@ irq_create_affinity_masks(int nvecs, const struct irq_affinity *affd)
293 for (i = affd->pre_vectors; i < nvecs - affd->post_vectors; i++) 317 for (i = affd->pre_vectors; i < nvecs - affd->post_vectors; i++)
294 masks[i].is_managed = 1; 318 masks[i].is_managed = 1;
295 319
296outnodemsk:
297 free_node_to_cpumask(node_to_cpumask);
298 return masks; 320 return masks;
299} 321}
300 322
@@ -304,25 +326,22 @@ outnodemsk:
304 * @maxvec: The maximum number of vectors available 326 * @maxvec: The maximum number of vectors available
305 * @affd: Description of the affinity requirements 327 * @affd: Description of the affinity requirements
306 */ 328 */
307int irq_calc_affinity_vectors(int minvec, int maxvec, const struct irq_affinity *affd) 329unsigned int irq_calc_affinity_vectors(unsigned int minvec, unsigned int maxvec,
330 const struct irq_affinity *affd)
308{ 331{
309 int resv = affd->pre_vectors + affd->post_vectors; 332 unsigned int resv = affd->pre_vectors + affd->post_vectors;
310 int vecs = maxvec - resv; 333 unsigned int set_vecs;
311 int set_vecs;
312 334
313 if (resv > minvec) 335 if (resv > minvec)
314 return 0; 336 return 0;
315 337
316 if (affd->nr_sets) { 338 if (affd->calc_sets) {
317 int i; 339 set_vecs = maxvec - resv;
318
319 for (i = 0, set_vecs = 0; i < affd->nr_sets; i++)
320 set_vecs += affd->sets[i];
321 } else { 340 } else {
322 get_online_cpus(); 341 get_online_cpus();
323 set_vecs = cpumask_weight(cpu_possible_mask); 342 set_vecs = cpumask_weight(cpu_possible_mask);
324 put_online_cpus(); 343 put_online_cpus();
325 } 344 }
326 345
327 return resv + min(set_vecs, vecs); 346 return resv + min(set_vecs, maxvec - resv);
328} 347}
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c
index 34e969069488..99b7dd6982a4 100644
--- a/kernel/irq/chip.c
+++ b/kernel/irq/chip.c
@@ -730,6 +730,37 @@ out:
730EXPORT_SYMBOL_GPL(handle_fasteoi_irq); 730EXPORT_SYMBOL_GPL(handle_fasteoi_irq);
731 731
732/** 732/**
733 * handle_fasteoi_nmi - irq handler for NMI interrupt lines
734 * @desc: the interrupt description structure for this irq
735 *
736 * A simple NMI-safe handler, considering the restrictions
737 * from request_nmi.
738 *
739 * Only a single callback will be issued to the chip: an ->eoi()
740 * call when the interrupt has been serviced. This enables support
741 * for modern forms of interrupt handlers, which handle the flow
742 * details in hardware, transparently.
743 */
744void handle_fasteoi_nmi(struct irq_desc *desc)
745{
746 struct irq_chip *chip = irq_desc_get_chip(desc);
747 struct irqaction *action = desc->action;
748 unsigned int irq = irq_desc_get_irq(desc);
749 irqreturn_t res;
750
751 trace_irq_handler_entry(irq, action);
752 /*
753 * NMIs cannot be shared, there is only one action.
754 */
755 res = action->handler(irq, action->dev_id);
756 trace_irq_handler_exit(irq, action, res);
757
758 if (chip->irq_eoi)
759 chip->irq_eoi(&desc->irq_data);
760}
761EXPORT_SYMBOL_GPL(handle_fasteoi_nmi);
762
763/**
733 * handle_edge_irq - edge type IRQ handler 764 * handle_edge_irq - edge type IRQ handler
734 * @desc: the interrupt description structure for this irq 765 * @desc: the interrupt description structure for this irq
735 * 766 *
@@ -855,7 +886,11 @@ void handle_percpu_irq(struct irq_desc *desc)
855{ 886{
856 struct irq_chip *chip = irq_desc_get_chip(desc); 887 struct irq_chip *chip = irq_desc_get_chip(desc);
857 888
858 kstat_incr_irqs_this_cpu(desc); 889 /*
890 * PER CPU interrupts are not serialized. Do not touch
891 * desc->tot_count.
892 */
893 __kstat_incr_irqs_this_cpu(desc);
859 894
860 if (chip->irq_ack) 895 if (chip->irq_ack)
861 chip->irq_ack(&desc->irq_data); 896 chip->irq_ack(&desc->irq_data);
@@ -884,7 +919,11 @@ void handle_percpu_devid_irq(struct irq_desc *desc)
884 unsigned int irq = irq_desc_get_irq(desc); 919 unsigned int irq = irq_desc_get_irq(desc);
885 irqreturn_t res; 920 irqreturn_t res;
886 921
887 kstat_incr_irqs_this_cpu(desc); 922 /*
923 * PER CPU interrupts are not serialized. Do not touch
924 * desc->tot_count.
925 */
926 __kstat_incr_irqs_this_cpu(desc);
888 927
889 if (chip->irq_ack) 928 if (chip->irq_ack)
890 chip->irq_ack(&desc->irq_data); 929 chip->irq_ack(&desc->irq_data);
@@ -908,6 +947,29 @@ void handle_percpu_devid_irq(struct irq_desc *desc)
908 chip->irq_eoi(&desc->irq_data); 947 chip->irq_eoi(&desc->irq_data);
909} 948}
910 949
950/**
951 * handle_percpu_devid_fasteoi_nmi - Per CPU local NMI handler with per cpu
952 * dev ids
953 * @desc: the interrupt description structure for this irq
954 *
955 * Similar to handle_fasteoi_nmi, but handling the dev_id cookie
956 * as a percpu pointer.
957 */
958void handle_percpu_devid_fasteoi_nmi(struct irq_desc *desc)
959{
960 struct irq_chip *chip = irq_desc_get_chip(desc);
961 struct irqaction *action = desc->action;
962 unsigned int irq = irq_desc_get_irq(desc);
963 irqreturn_t res;
964
965 trace_irq_handler_entry(irq, action);
966 res = action->handler(irq, raw_cpu_ptr(action->percpu_dev_id));
967 trace_irq_handler_exit(irq, action, res);
968
969 if (chip->irq_eoi)
970 chip->irq_eoi(&desc->irq_data);
971}
972
911static void 973static void
912__irq_do_set_handler(struct irq_desc *desc, irq_flow_handler_t handle, 974__irq_do_set_handler(struct irq_desc *desc, irq_flow_handler_t handle,
913 int is_chained, const char *name) 975 int is_chained, const char *name)
diff --git a/kernel/irq/debugfs.c b/kernel/irq/debugfs.c
index 6f636136cccc..516c00a5e867 100644
--- a/kernel/irq/debugfs.c
+++ b/kernel/irq/debugfs.c
@@ -56,6 +56,7 @@ static const struct irq_bit_descr irqchip_flags[] = {
56 BIT_MASK_DESCR(IRQCHIP_ONESHOT_SAFE), 56 BIT_MASK_DESCR(IRQCHIP_ONESHOT_SAFE),
57 BIT_MASK_DESCR(IRQCHIP_EOI_THREADED), 57 BIT_MASK_DESCR(IRQCHIP_EOI_THREADED),
58 BIT_MASK_DESCR(IRQCHIP_SUPPORTS_LEVEL_MSI), 58 BIT_MASK_DESCR(IRQCHIP_SUPPORTS_LEVEL_MSI),
59 BIT_MASK_DESCR(IRQCHIP_SUPPORTS_NMI),
59}; 60};
60 61
61static void 62static void
@@ -140,6 +141,7 @@ static const struct irq_bit_descr irqdesc_istates[] = {
140 BIT_MASK_DESCR(IRQS_WAITING), 141 BIT_MASK_DESCR(IRQS_WAITING),
141 BIT_MASK_DESCR(IRQS_PENDING), 142 BIT_MASK_DESCR(IRQS_PENDING),
142 BIT_MASK_DESCR(IRQS_SUSPENDED), 143 BIT_MASK_DESCR(IRQS_SUSPENDED),
144 BIT_MASK_DESCR(IRQS_NMI),
143}; 145};
144 146
145 147
@@ -203,8 +205,8 @@ static ssize_t irq_debug_write(struct file *file, const char __user *user_buf,
203 chip_bus_lock(desc); 205 chip_bus_lock(desc);
204 raw_spin_lock_irqsave(&desc->lock, flags); 206 raw_spin_lock_irqsave(&desc->lock, flags);
205 207
206 if (irq_settings_is_level(desc)) { 208 if (irq_settings_is_level(desc) || desc->istate & IRQS_NMI) {
207 /* Can't do level, sorry */ 209 /* Can't do level nor NMIs, sorry */
208 err = -EINVAL; 210 err = -EINVAL;
209 } else { 211 } else {
210 desc->istate |= IRQS_PENDING; 212 desc->istate |= IRQS_PENDING;
@@ -256,8 +258,6 @@ static int __init irq_debugfs_init(void)
256 int irq; 258 int irq;
257 259
258 root_dir = debugfs_create_dir("irq", NULL); 260 root_dir = debugfs_create_dir("irq", NULL);
259 if (!root_dir)
260 return -ENOMEM;
261 261
262 irq_domain_debugfs_init(root_dir); 262 irq_domain_debugfs_init(root_dir);
263 263
diff --git a/kernel/irq/handle.c b/kernel/irq/handle.c
index 38554bc35375..6df5ddfdb0f8 100644
--- a/kernel/irq/handle.c
+++ b/kernel/irq/handle.c
@@ -166,7 +166,7 @@ irqreturn_t __handle_irq_event_percpu(struct irq_desc *desc, unsigned int *flags
166 166
167 __irq_wake_thread(desc, action); 167 __irq_wake_thread(desc, action);
168 168
169 /* Fall through to add to randomness */ 169 /* Fall through - to add to randomness */
170 case IRQ_HANDLED: 170 case IRQ_HANDLED:
171 *flags |= action->flags; 171 *flags |= action->flags;
172 break; 172 break;
diff --git a/kernel/irq/internals.h b/kernel/irq/internals.h
index ca6afa267070..70c3053bc1f6 100644
--- a/kernel/irq/internals.h
+++ b/kernel/irq/internals.h
@@ -49,6 +49,7 @@ enum {
49 * IRQS_WAITING - irq is waiting 49 * IRQS_WAITING - irq is waiting
50 * IRQS_PENDING - irq is pending and replayed later 50 * IRQS_PENDING - irq is pending and replayed later
51 * IRQS_SUSPENDED - irq is suspended 51 * IRQS_SUSPENDED - irq is suspended
52 * IRQS_NMI - irq line is used to deliver NMIs
52 */ 53 */
53enum { 54enum {
54 IRQS_AUTODETECT = 0x00000001, 55 IRQS_AUTODETECT = 0x00000001,
@@ -60,6 +61,7 @@ enum {
60 IRQS_PENDING = 0x00000200, 61 IRQS_PENDING = 0x00000200,
61 IRQS_SUSPENDED = 0x00000800, 62 IRQS_SUSPENDED = 0x00000800,
62 IRQS_TIMINGS = 0x00001000, 63 IRQS_TIMINGS = 0x00001000,
64 IRQS_NMI = 0x00002000,
63}; 65};
64 66
65#include "debug.h" 67#include "debug.h"
@@ -242,12 +244,18 @@ static inline void irq_state_set_masked(struct irq_desc *desc)
242 244
243#undef __irqd_to_state 245#undef __irqd_to_state
244 246
245static inline void kstat_incr_irqs_this_cpu(struct irq_desc *desc) 247static inline void __kstat_incr_irqs_this_cpu(struct irq_desc *desc)
246{ 248{
247 __this_cpu_inc(*desc->kstat_irqs); 249 __this_cpu_inc(*desc->kstat_irqs);
248 __this_cpu_inc(kstat.irqs_sum); 250 __this_cpu_inc(kstat.irqs_sum);
249} 251}
250 252
253static inline void kstat_incr_irqs_this_cpu(struct irq_desc *desc)
254{
255 __kstat_incr_irqs_this_cpu(desc);
256 desc->tot_count++;
257}
258
251static inline int irq_desc_get_node(struct irq_desc *desc) 259static inline int irq_desc_get_node(struct irq_desc *desc)
252{ 260{
253 return irq_common_data_get_node(&desc->irq_common_data); 261 return irq_common_data_get_node(&desc->irq_common_data);
diff --git a/kernel/irq/irqdesc.c b/kernel/irq/irqdesc.c
index ef8ad36cadcf..13539e12cd80 100644
--- a/kernel/irq/irqdesc.c
+++ b/kernel/irq/irqdesc.c
@@ -119,6 +119,7 @@ static void desc_set_defaults(unsigned int irq, struct irq_desc *desc, int node,
119 desc->depth = 1; 119 desc->depth = 1;
120 desc->irq_count = 0; 120 desc->irq_count = 0;
121 desc->irqs_unhandled = 0; 121 desc->irqs_unhandled = 0;
122 desc->tot_count = 0;
122 desc->name = NULL; 123 desc->name = NULL;
123 desc->owner = owner; 124 desc->owner = owner;
124 for_each_possible_cpu(cpu) 125 for_each_possible_cpu(cpu)
@@ -669,6 +670,41 @@ int __handle_domain_irq(struct irq_domain *domain, unsigned int hwirq,
669 set_irq_regs(old_regs); 670 set_irq_regs(old_regs);
670 return ret; 671 return ret;
671} 672}
673
674#ifdef CONFIG_IRQ_DOMAIN
675/**
676 * handle_domain_nmi - Invoke the handler for a HW irq belonging to a domain
677 * @domain: The domain where to perform the lookup
678 * @hwirq: The HW irq number to convert to a logical one
679 * @regs: Register file coming from the low-level handling code
680 *
681 * Returns: 0 on success, or -EINVAL if conversion has failed
682 */
683int handle_domain_nmi(struct irq_domain *domain, unsigned int hwirq,
684 struct pt_regs *regs)
685{
686 struct pt_regs *old_regs = set_irq_regs(regs);
687 unsigned int irq;
688 int ret = 0;
689
690 nmi_enter();
691
692 irq = irq_find_mapping(domain, hwirq);
693
694 /*
695 * ack_bad_irq is not NMI-safe, just report
696 * an invalid interrupt.
697 */
698 if (likely(irq))
699 generic_handle_irq(irq);
700 else
701 ret = -EINVAL;
702
703 nmi_exit();
704 set_irq_regs(old_regs);
705 return ret;
706}
707#endif
672#endif 708#endif
673 709
674/* Dynamic interrupt handling */ 710/* Dynamic interrupt handling */
@@ -919,11 +955,15 @@ unsigned int kstat_irqs_cpu(unsigned int irq, int cpu)
919unsigned int kstat_irqs(unsigned int irq) 955unsigned int kstat_irqs(unsigned int irq)
920{ 956{
921 struct irq_desc *desc = irq_to_desc(irq); 957 struct irq_desc *desc = irq_to_desc(irq);
922 int cpu;
923 unsigned int sum = 0; 958 unsigned int sum = 0;
959 int cpu;
924 960
925 if (!desc || !desc->kstat_irqs) 961 if (!desc || !desc->kstat_irqs)
926 return 0; 962 return 0;
963 if (!irq_settings_is_per_cpu_devid(desc) &&
964 !irq_settings_is_per_cpu(desc))
965 return desc->tot_count;
966
927 for_each_possible_cpu(cpu) 967 for_each_possible_cpu(cpu)
928 sum += *per_cpu_ptr(desc->kstat_irqs, cpu); 968 sum += *per_cpu_ptr(desc->kstat_irqs, cpu);
929 return sum; 969 return sum;
diff --git a/kernel/irq/irqdomain.c b/kernel/irq/irqdomain.c
index 8b0be4bd6565..3bf9793d8825 100644
--- a/kernel/irq/irqdomain.c
+++ b/kernel/irq/irqdomain.c
@@ -458,6 +458,20 @@ void irq_set_default_host(struct irq_domain *domain)
458} 458}
459EXPORT_SYMBOL_GPL(irq_set_default_host); 459EXPORT_SYMBOL_GPL(irq_set_default_host);
460 460
461/**
462 * irq_get_default_host() - Retrieve the "default" irq domain
463 *
464 * Returns: the default domain, if any.
465 *
466 * Modern code should never use this. This should only be used on
467 * systems that cannot implement a firmware->fwnode mapping (which
468 * both DT and ACPI provide).
469 */
470struct irq_domain *irq_get_default_host(void)
471{
472 return irq_default_domain;
473}
474
461static void irq_domain_clear_mapping(struct irq_domain *domain, 475static void irq_domain_clear_mapping(struct irq_domain *domain,
462 irq_hw_number_t hwirq) 476 irq_hw_number_t hwirq)
463{ 477{
@@ -1749,8 +1763,6 @@ void __init irq_domain_debugfs_init(struct dentry *root)
1749 struct irq_domain *d; 1763 struct irq_domain *d;
1750 1764
1751 domain_dir = debugfs_create_dir("domains", root); 1765 domain_dir = debugfs_create_dir("domains", root);
1752 if (!domain_dir)
1753 return;
1754 1766
1755 debugfs_create_file("default", 0444, domain_dir, NULL, 1767 debugfs_create_file("default", 0444, domain_dir, NULL,
1756 &irq_domain_debug_fops); 1768 &irq_domain_debug_fops);
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index 84b54a17b95d..9ec34a2a6638 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -341,7 +341,7 @@ irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify)
341 /* The release function is promised process context */ 341 /* The release function is promised process context */
342 might_sleep(); 342 might_sleep();
343 343
344 if (!desc) 344 if (!desc || desc->istate & IRQS_NMI)
345 return -EINVAL; 345 return -EINVAL;
346 346
347 /* Complete initialisation of *notify */ 347 /* Complete initialisation of *notify */
@@ -553,6 +553,21 @@ bool disable_hardirq(unsigned int irq)
553} 553}
554EXPORT_SYMBOL_GPL(disable_hardirq); 554EXPORT_SYMBOL_GPL(disable_hardirq);
555 555
556/**
557 * disable_nmi_nosync - disable an nmi without waiting
558 * @irq: Interrupt to disable
559 *
560 * Disable the selected interrupt line. Disables and enables are
561 * nested.
562 * The interrupt to disable must have been requested through request_nmi.
563 * Unlike disable_nmi(), this function does not ensure existing
564 * instances of the IRQ handler have completed before returning.
565 */
566void disable_nmi_nosync(unsigned int irq)
567{
568 disable_irq_nosync(irq);
569}
570
556void __enable_irq(struct irq_desc *desc) 571void __enable_irq(struct irq_desc *desc)
557{ 572{
558 switch (desc->depth) { 573 switch (desc->depth) {
@@ -609,6 +624,20 @@ out:
609} 624}
610EXPORT_SYMBOL(enable_irq); 625EXPORT_SYMBOL(enable_irq);
611 626
627/**
628 * enable_nmi - enable handling of an nmi
629 * @irq: Interrupt to enable
630 *
631 * The interrupt to enable must have been requested through request_nmi.
632 * Undoes the effect of one call to disable_nmi(). If this
633 * matches the last disable, processing of interrupts on this
634 * IRQ line is re-enabled.
635 */
636void enable_nmi(unsigned int irq)
637{
638 enable_irq(irq);
639}
640
612static int set_irq_wake_real(unsigned int irq, unsigned int on) 641static int set_irq_wake_real(unsigned int irq, unsigned int on)
613{ 642{
614 struct irq_desc *desc = irq_to_desc(irq); 643 struct irq_desc *desc = irq_to_desc(irq);
@@ -644,6 +673,12 @@ int irq_set_irq_wake(unsigned int irq, unsigned int on)
644 if (!desc) 673 if (!desc)
645 return -EINVAL; 674 return -EINVAL;
646 675
676 /* Don't use NMIs as wake up interrupts please */
677 if (desc->istate & IRQS_NMI) {
678 ret = -EINVAL;
679 goto out_unlock;
680 }
681
647 /* wakeup-capable irqs can be shared between drivers that 682 /* wakeup-capable irqs can be shared between drivers that
648 * don't need to have the same sleep mode behaviors. 683 * don't need to have the same sleep mode behaviors.
649 */ 684 */
@@ -666,6 +701,8 @@ int irq_set_irq_wake(unsigned int irq, unsigned int on)
666 irqd_clear(&desc->irq_data, IRQD_WAKEUP_STATE); 701 irqd_clear(&desc->irq_data, IRQD_WAKEUP_STATE);
667 } 702 }
668 } 703 }
704
705out_unlock:
669 irq_put_desc_busunlock(desc, flags); 706 irq_put_desc_busunlock(desc, flags);
670 return ret; 707 return ret;
671} 708}
@@ -726,6 +763,7 @@ int __irq_set_trigger(struct irq_desc *desc, unsigned long flags)
726 case IRQ_SET_MASK_OK_DONE: 763 case IRQ_SET_MASK_OK_DONE:
727 irqd_clear(&desc->irq_data, IRQD_TRIGGER_MASK); 764 irqd_clear(&desc->irq_data, IRQD_TRIGGER_MASK);
728 irqd_set(&desc->irq_data, flags); 765 irqd_set(&desc->irq_data, flags);
766 /* fall through */
729 767
730 case IRQ_SET_MASK_OK_NOCOPY: 768 case IRQ_SET_MASK_OK_NOCOPY:
731 flags = irqd_get_trigger_type(&desc->irq_data); 769 flags = irqd_get_trigger_type(&desc->irq_data);
@@ -1128,6 +1166,39 @@ static void irq_release_resources(struct irq_desc *desc)
1128 c->irq_release_resources(d); 1166 c->irq_release_resources(d);
1129} 1167}
1130 1168
1169static bool irq_supports_nmi(struct irq_desc *desc)
1170{
1171 struct irq_data *d = irq_desc_get_irq_data(desc);
1172
1173#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
1174 /* Only IRQs directly managed by the root irqchip can be set as NMI */
1175 if (d->parent_data)
1176 return false;
1177#endif
1178 /* Don't support NMIs for chips behind a slow bus */
1179 if (d->chip->irq_bus_lock || d->chip->irq_bus_sync_unlock)
1180 return false;
1181
1182 return d->chip->flags & IRQCHIP_SUPPORTS_NMI;
1183}
1184
1185static int irq_nmi_setup(struct irq_desc *desc)
1186{
1187 struct irq_data *d = irq_desc_get_irq_data(desc);
1188 struct irq_chip *c = d->chip;
1189
1190 return c->irq_nmi_setup ? c->irq_nmi_setup(d) : -EINVAL;
1191}
1192
1193static void irq_nmi_teardown(struct irq_desc *desc)
1194{
1195 struct irq_data *d = irq_desc_get_irq_data(desc);
1196 struct irq_chip *c = d->chip;
1197
1198 if (c->irq_nmi_teardown)
1199 c->irq_nmi_teardown(d);
1200}
1201
1131static int 1202static int
1132setup_irq_thread(struct irqaction *new, unsigned int irq, bool secondary) 1203setup_irq_thread(struct irqaction *new, unsigned int irq, bool secondary)
1133{ 1204{
@@ -1302,9 +1373,17 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
1302 * fields must have IRQF_SHARED set and the bits which 1373 * fields must have IRQF_SHARED set and the bits which
1303 * set the trigger type must match. Also all must 1374 * set the trigger type must match. Also all must
1304 * agree on ONESHOT. 1375 * agree on ONESHOT.
1376 * Interrupt lines used for NMIs cannot be shared.
1305 */ 1377 */
1306 unsigned int oldtype; 1378 unsigned int oldtype;
1307 1379
1380 if (desc->istate & IRQS_NMI) {
1381 pr_err("Invalid attempt to share NMI for %s (irq %d) on irqchip %s.\n",
1382 new->name, irq, desc->irq_data.chip->name);
1383 ret = -EINVAL;
1384 goto out_unlock;
1385 }
1386
1308 /* 1387 /*
1309 * If nobody did set the configuration before, inherit 1388 * If nobody did set the configuration before, inherit
1310 * the one provided by the requester. 1389 * the one provided by the requester.
@@ -1756,6 +1835,59 @@ const void *free_irq(unsigned int irq, void *dev_id)
1756} 1835}
1757EXPORT_SYMBOL(free_irq); 1836EXPORT_SYMBOL(free_irq);
1758 1837
1838/* This function must be called with desc->lock held */
1839static const void *__cleanup_nmi(unsigned int irq, struct irq_desc *desc)
1840{
1841 const char *devname = NULL;
1842
1843 desc->istate &= ~IRQS_NMI;
1844
1845 if (!WARN_ON(desc->action == NULL)) {
1846 irq_pm_remove_action(desc, desc->action);
1847 devname = desc->action->name;
1848 unregister_handler_proc(irq, desc->action);
1849
1850 kfree(desc->action);
1851 desc->action = NULL;
1852 }
1853
1854 irq_settings_clr_disable_unlazy(desc);
1855 irq_shutdown(desc);
1856
1857 irq_release_resources(desc);
1858
1859 irq_chip_pm_put(&desc->irq_data);
1860 module_put(desc->owner);
1861
1862 return devname;
1863}
1864
1865const void *free_nmi(unsigned int irq, void *dev_id)
1866{
1867 struct irq_desc *desc = irq_to_desc(irq);
1868 unsigned long flags;
1869 const void *devname;
1870
1871 if (!desc || WARN_ON(!(desc->istate & IRQS_NMI)))
1872 return NULL;
1873
1874 if (WARN_ON(irq_settings_is_per_cpu_devid(desc)))
1875 return NULL;
1876
1877 /* NMI still enabled */
1878 if (WARN_ON(desc->depth == 0))
1879 disable_nmi_nosync(irq);
1880
1881 raw_spin_lock_irqsave(&desc->lock, flags);
1882
1883 irq_nmi_teardown(desc);
1884 devname = __cleanup_nmi(irq, desc);
1885
1886 raw_spin_unlock_irqrestore(&desc->lock, flags);
1887
1888 return devname;
1889}
1890
1759/** 1891/**
1760 * request_threaded_irq - allocate an interrupt line 1892 * request_threaded_irq - allocate an interrupt line
1761 * @irq: Interrupt line to allocate 1893 * @irq: Interrupt line to allocate
@@ -1925,6 +2057,101 @@ int request_any_context_irq(unsigned int irq, irq_handler_t handler,
1925} 2057}
1926EXPORT_SYMBOL_GPL(request_any_context_irq); 2058EXPORT_SYMBOL_GPL(request_any_context_irq);
1927 2059
2060/**
2061 * request_nmi - allocate an interrupt line for NMI delivery
2062 * @irq: Interrupt line to allocate
2063 * @handler: Function to be called when the IRQ occurs.
2064 * Threaded handler for threaded interrupts.
2065 * @irqflags: Interrupt type flags
2066 * @name: An ascii name for the claiming device
2067 * @dev_id: A cookie passed back to the handler function
2068 *
2069 * This call allocates interrupt resources and enables the
2070 * interrupt line and IRQ handling. It sets up the IRQ line
2071 * to be handled as an NMI.
2072 *
2073 * An interrupt line delivering NMIs cannot be shared and IRQ handling
2074 * cannot be threaded.
2075 *
2076 * Interrupt lines requested for NMI delivering must produce per cpu
2077 * interrupts and have auto enabling setting disabled.
2078 *
2079 * Dev_id must be globally unique. Normally the address of the
2080 * device data structure is used as the cookie. Since the handler
2081 * receives this value it makes sense to use it.
2082 *
2083 * If the interrupt line cannot be used to deliver NMIs, function
2084 * will fail and return a negative value.
2085 */
2086int request_nmi(unsigned int irq, irq_handler_t handler,
2087 unsigned long irqflags, const char *name, void *dev_id)
2088{
2089 struct irqaction *action;
2090 struct irq_desc *desc;
2091 unsigned long flags;
2092 int retval;
2093
2094 if (irq == IRQ_NOTCONNECTED)
2095 return -ENOTCONN;
2096
2097 /* NMI cannot be shared, used for Polling */
2098 if (irqflags & (IRQF_SHARED | IRQF_COND_SUSPEND | IRQF_IRQPOLL))
2099 return -EINVAL;
2100
2101 if (!(irqflags & IRQF_PERCPU))
2102 return -EINVAL;
2103
2104 if (!handler)
2105 return -EINVAL;
2106
2107 desc = irq_to_desc(irq);
2108
2109 if (!desc || irq_settings_can_autoenable(desc) ||
2110 !irq_settings_can_request(desc) ||
2111 WARN_ON(irq_settings_is_per_cpu_devid(desc)) ||
2112 !irq_supports_nmi(desc))
2113 return -EINVAL;
2114
2115 action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
2116 if (!action)
2117 return -ENOMEM;
2118
2119 action->handler = handler;
2120 action->flags = irqflags | IRQF_NO_THREAD | IRQF_NOBALANCING;
2121 action->name = name;
2122 action->dev_id = dev_id;
2123
2124 retval = irq_chip_pm_get(&desc->irq_data);
2125 if (retval < 0)
2126 goto err_out;
2127
2128 retval = __setup_irq(irq, desc, action);
2129 if (retval)
2130 goto err_irq_setup;
2131
2132 raw_spin_lock_irqsave(&desc->lock, flags);
2133
2134 /* Setup NMI state */
2135 desc->istate |= IRQS_NMI;
2136 retval = irq_nmi_setup(desc);
2137 if (retval) {
2138 __cleanup_nmi(irq, desc);
2139 raw_spin_unlock_irqrestore(&desc->lock, flags);
2140 return -EINVAL;
2141 }
2142
2143 raw_spin_unlock_irqrestore(&desc->lock, flags);
2144
2145 return 0;
2146
2147err_irq_setup:
2148 irq_chip_pm_put(&desc->irq_data);
2149err_out:
2150 kfree(action);
2151
2152 return retval;
2153}
2154
1928void enable_percpu_irq(unsigned int irq, unsigned int type) 2155void enable_percpu_irq(unsigned int irq, unsigned int type)
1929{ 2156{
1930 unsigned int cpu = smp_processor_id(); 2157 unsigned int cpu = smp_processor_id();
@@ -1959,6 +2186,11 @@ out:
1959} 2186}
1960EXPORT_SYMBOL_GPL(enable_percpu_irq); 2187EXPORT_SYMBOL_GPL(enable_percpu_irq);
1961 2188
2189void enable_percpu_nmi(unsigned int irq, unsigned int type)
2190{
2191 enable_percpu_irq(irq, type);
2192}
2193
1962/** 2194/**
1963 * irq_percpu_is_enabled - Check whether the per cpu irq is enabled 2195 * irq_percpu_is_enabled - Check whether the per cpu irq is enabled
1964 * @irq: Linux irq number to check for 2196 * @irq: Linux irq number to check for
@@ -1998,6 +2230,11 @@ void disable_percpu_irq(unsigned int irq)
1998} 2230}
1999EXPORT_SYMBOL_GPL(disable_percpu_irq); 2231EXPORT_SYMBOL_GPL(disable_percpu_irq);
2000 2232
2233void disable_percpu_nmi(unsigned int irq)
2234{
2235 disable_percpu_irq(irq);
2236}
2237
2001/* 2238/*
2002 * Internal function to unregister a percpu irqaction. 2239 * Internal function to unregister a percpu irqaction.
2003 */ 2240 */
@@ -2029,6 +2266,8 @@ static struct irqaction *__free_percpu_irq(unsigned int irq, void __percpu *dev_
2029 /* Found it - now remove it from the list of entries: */ 2266 /* Found it - now remove it from the list of entries: */
2030 desc->action = NULL; 2267 desc->action = NULL;
2031 2268
2269 desc->istate &= ~IRQS_NMI;
2270
2032 raw_spin_unlock_irqrestore(&desc->lock, flags); 2271 raw_spin_unlock_irqrestore(&desc->lock, flags);
2033 2272
2034 unregister_handler_proc(irq, action); 2273 unregister_handler_proc(irq, action);
@@ -2082,6 +2321,19 @@ void free_percpu_irq(unsigned int irq, void __percpu *dev_id)
2082} 2321}
2083EXPORT_SYMBOL_GPL(free_percpu_irq); 2322EXPORT_SYMBOL_GPL(free_percpu_irq);
2084 2323
2324void free_percpu_nmi(unsigned int irq, void __percpu *dev_id)
2325{
2326 struct irq_desc *desc = irq_to_desc(irq);
2327
2328 if (!desc || !irq_settings_is_per_cpu_devid(desc))
2329 return;
2330
2331 if (WARN_ON(!(desc->istate & IRQS_NMI)))
2332 return;
2333
2334 kfree(__free_percpu_irq(irq, dev_id));
2335}
2336
2085/** 2337/**
2086 * setup_percpu_irq - setup a per-cpu interrupt 2338 * setup_percpu_irq - setup a per-cpu interrupt
2087 * @irq: Interrupt line to setup 2339 * @irq: Interrupt line to setup
@@ -2172,6 +2424,158 @@ int __request_percpu_irq(unsigned int irq, irq_handler_t handler,
2172EXPORT_SYMBOL_GPL(__request_percpu_irq); 2424EXPORT_SYMBOL_GPL(__request_percpu_irq);
2173 2425
2174/** 2426/**
2427 * request_percpu_nmi - allocate a percpu interrupt line for NMI delivery
2428 * @irq: Interrupt line to allocate
2429 * @handler: Function to be called when the IRQ occurs.
2430 * @name: An ascii name for the claiming device
2431 * @dev_id: A percpu cookie passed back to the handler function
2432 *
2433 * This call allocates interrupt resources for a per CPU NMI. Per CPU NMIs
2434 * have to be setup on each CPU by calling prepare_percpu_nmi() before
2435 * being enabled on the same CPU by using enable_percpu_nmi().
2436 *
2437 * Dev_id must be globally unique. It is a per-cpu variable, and
2438 * the handler gets called with the interrupted CPU's instance of
2439 * that variable.
2440 *
2441 * Interrupt lines requested for NMI delivering should have auto enabling
2442 * setting disabled.
2443 *
2444 * If the interrupt line cannot be used to deliver NMIs, function
2445 * will fail returning a negative value.
2446 */
2447int request_percpu_nmi(unsigned int irq, irq_handler_t handler,
2448 const char *name, void __percpu *dev_id)
2449{
2450 struct irqaction *action;
2451 struct irq_desc *desc;
2452 unsigned long flags;
2453 int retval;
2454
2455 if (!handler)
2456 return -EINVAL;
2457
2458 desc = irq_to_desc(irq);
2459
2460 if (!desc || !irq_settings_can_request(desc) ||
2461 !irq_settings_is_per_cpu_devid(desc) ||
2462 irq_settings_can_autoenable(desc) ||
2463 !irq_supports_nmi(desc))
2464 return -EINVAL;
2465
2466 /* The line cannot already be NMI */
2467 if (desc->istate & IRQS_NMI)
2468 return -EINVAL;
2469
2470 action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
2471 if (!action)
2472 return -ENOMEM;
2473
2474 action->handler = handler;
2475 action->flags = IRQF_PERCPU | IRQF_NO_SUSPEND | IRQF_NO_THREAD
2476 | IRQF_NOBALANCING;
2477 action->name = name;
2478 action->percpu_dev_id = dev_id;
2479
2480 retval = irq_chip_pm_get(&desc->irq_data);
2481 if (retval < 0)
2482 goto err_out;
2483
2484 retval = __setup_irq(irq, desc, action);
2485 if (retval)
2486 goto err_irq_setup;
2487
2488 raw_spin_lock_irqsave(&desc->lock, flags);
2489 desc->istate |= IRQS_NMI;
2490 raw_spin_unlock_irqrestore(&desc->lock, flags);
2491
2492 return 0;
2493
2494err_irq_setup:
2495 irq_chip_pm_put(&desc->irq_data);
2496err_out:
2497 kfree(action);
2498
2499 return retval;
2500}
2501
2502/**
2503 * prepare_percpu_nmi - performs CPU local setup for NMI delivery
2504 * @irq: Interrupt line to prepare for NMI delivery
2505 *
2506 * This call prepares an interrupt line to deliver NMI on the current CPU,
2507 * before that interrupt line gets enabled with enable_percpu_nmi().
2508 *
2509 * As a CPU local operation, this should be called from non-preemptible
2510 * context.
2511 *
2512 * If the interrupt line cannot be used to deliver NMIs, function
2513 * will fail returning a negative value.
2514 */
2515int prepare_percpu_nmi(unsigned int irq)
2516{
2517 unsigned long flags;
2518 struct irq_desc *desc;
2519 int ret = 0;
2520
2521 WARN_ON(preemptible());
2522
2523 desc = irq_get_desc_lock(irq, &flags,
2524 IRQ_GET_DESC_CHECK_PERCPU);
2525 if (!desc)
2526 return -EINVAL;
2527
2528 if (WARN(!(desc->istate & IRQS_NMI),
2529 KERN_ERR "prepare_percpu_nmi called for a non-NMI interrupt: irq %u\n",
2530 irq)) {
2531 ret = -EINVAL;
2532 goto out;
2533 }
2534
2535 ret = irq_nmi_setup(desc);
2536 if (ret) {
2537 pr_err("Failed to setup NMI delivery: irq %u\n", irq);
2538 goto out;
2539 }
2540
2541out:
2542 irq_put_desc_unlock(desc, flags);
2543 return ret;
2544}
2545
2546/**
2547 * teardown_percpu_nmi - undoes NMI setup of IRQ line
2548 * @irq: Interrupt line from which CPU local NMI configuration should be
2549 * removed
2550 *
2551 * This call undoes the setup done by prepare_percpu_nmi().
2552 *
2553 * IRQ line should not be enabled for the current CPU.
2554 *
2555 * As a CPU local operation, this should be called from non-preemptible
2556 * context.
2557 */
2558void teardown_percpu_nmi(unsigned int irq)
2559{
2560 unsigned long flags;
2561 struct irq_desc *desc;
2562
2563 WARN_ON(preemptible());
2564
2565 desc = irq_get_desc_lock(irq, &flags,
2566 IRQ_GET_DESC_CHECK_PERCPU);
2567 if (!desc)
2568 return;
2569
2570 if (WARN_ON(!(desc->istate & IRQS_NMI)))
2571 goto out;
2572
2573 irq_nmi_teardown(desc);
2574out:
2575 irq_put_desc_unlock(desc, flags);
2576}
2577
2578/**
2175 * irq_get_irqchip_state - returns the irqchip state of a interrupt. 2579 * irq_get_irqchip_state - returns the irqchip state of a interrupt.
2176 * @irq: Interrupt line that is forwarded to a VM 2580 * @irq: Interrupt line that is forwarded to a VM
2177 * @which: One of IRQCHIP_STATE_* the caller wants to know about 2581 * @which: One of IRQCHIP_STATE_* the caller wants to know about
diff --git a/kernel/kthread.c b/kernel/kthread.c
index 087d18d771b5..65234c89d85b 100644
--- a/kernel/kthread.c
+++ b/kernel/kthread.c
@@ -101,6 +101,12 @@ bool kthread_should_stop(void)
101} 101}
102EXPORT_SYMBOL(kthread_should_stop); 102EXPORT_SYMBOL(kthread_should_stop);
103 103
104bool __kthread_should_park(struct task_struct *k)
105{
106 return test_bit(KTHREAD_SHOULD_PARK, &to_kthread(k)->flags);
107}
108EXPORT_SYMBOL_GPL(__kthread_should_park);
109
104/** 110/**
105 * kthread_should_park - should this kthread park now? 111 * kthread_should_park - should this kthread park now?
106 * 112 *
@@ -114,7 +120,7 @@ EXPORT_SYMBOL(kthread_should_stop);
114 */ 120 */
115bool kthread_should_park(void) 121bool kthread_should_park(void)
116{ 122{
117 return test_bit(KTHREAD_SHOULD_PARK, &to_kthread(current)->flags); 123 return __kthread_should_park(current);
118} 124}
119EXPORT_SYMBOL_GPL(kthread_should_park); 125EXPORT_SYMBOL_GPL(kthread_should_park);
120 126
diff --git a/kernel/softirq.c b/kernel/softirq.c
index d28813306b2c..10277429ed84 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -89,7 +89,8 @@ static bool ksoftirqd_running(unsigned long pending)
89 89
90 if (pending & SOFTIRQ_NOW_MASK) 90 if (pending & SOFTIRQ_NOW_MASK)
91 return false; 91 return false;
92 return tsk && (tsk->state == TASK_RUNNING); 92 return tsk && (tsk->state == TASK_RUNNING) &&
93 !__kthread_should_park(tsk);
93} 94}
94 95
95/* 96/*