aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/irq/manage.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/irq/manage.c')
-rw-r--r--kernel/irq/manage.c593
1 files changed, 403 insertions, 190 deletions
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index 2782bacdf494..acd599a43bfb 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -17,6 +17,17 @@
17 17
18#include "internals.h" 18#include "internals.h"
19 19
20#ifdef CONFIG_IRQ_FORCED_THREADING
21__read_mostly bool force_irqthreads;
22
23static int __init setup_forced_irqthreads(char *arg)
24{
25 force_irqthreads = true;
26 return 0;
27}
28early_param("threadirqs", setup_forced_irqthreads);
29#endif
30
20/** 31/**
21 * synchronize_irq - wait for pending IRQ handlers (on other CPUs) 32 * synchronize_irq - wait for pending IRQ handlers (on other CPUs)
22 * @irq: interrupt number to wait for 33 * @irq: interrupt number to wait for
@@ -30,7 +41,7 @@
30void synchronize_irq(unsigned int irq) 41void synchronize_irq(unsigned int irq)
31{ 42{
32 struct irq_desc *desc = irq_to_desc(irq); 43 struct irq_desc *desc = irq_to_desc(irq);
33 unsigned int status; 44 unsigned int state;
34 45
35 if (!desc) 46 if (!desc)
36 return; 47 return;
@@ -42,16 +53,16 @@ void synchronize_irq(unsigned int irq)
42 * Wait until we're out of the critical section. This might 53 * Wait until we're out of the critical section. This might
43 * give the wrong answer due to the lack of memory barriers. 54 * give the wrong answer due to the lack of memory barriers.
44 */ 55 */
45 while (desc->status & IRQ_INPROGRESS) 56 while (desc->istate & IRQS_INPROGRESS)
46 cpu_relax(); 57 cpu_relax();
47 58
48 /* Ok, that indicated we're done: double-check carefully. */ 59 /* Ok, that indicated we're done: double-check carefully. */
49 raw_spin_lock_irqsave(&desc->lock, flags); 60 raw_spin_lock_irqsave(&desc->lock, flags);
50 status = desc->status; 61 state = desc->istate;
51 raw_spin_unlock_irqrestore(&desc->lock, flags); 62 raw_spin_unlock_irqrestore(&desc->lock, flags);
52 63
53 /* Oops, that failed? */ 64 /* Oops, that failed? */
54 } while (status & IRQ_INPROGRESS); 65 } while (state & IRQS_INPROGRESS);
55 66
56 /* 67 /*
57 * We made sure that no hardirq handler is running. Now verify 68 * We made sure that no hardirq handler is running. Now verify
@@ -73,8 +84,8 @@ int irq_can_set_affinity(unsigned int irq)
73{ 84{
74 struct irq_desc *desc = irq_to_desc(irq); 85 struct irq_desc *desc = irq_to_desc(irq);
75 86
76 if (CHECK_IRQ_PER_CPU(desc->status) || !desc->irq_data.chip || 87 if (!desc || !irqd_can_balance(&desc->irq_data) ||
77 !desc->irq_data.chip->irq_set_affinity) 88 !desc->irq_data.chip || !desc->irq_data.chip->irq_set_affinity)
78 return 0; 89 return 0;
79 90
80 return 1; 91 return 1;
@@ -100,67 +111,169 @@ void irq_set_thread_affinity(struct irq_desc *desc)
100 } 111 }
101} 112}
102 113
114#ifdef CONFIG_GENERIC_PENDING_IRQ
115static inline bool irq_can_move_pcntxt(struct irq_desc *desc)
116{
117 return irq_settings_can_move_pcntxt(desc);
118}
119static inline bool irq_move_pending(struct irq_desc *desc)
120{
121 return irqd_is_setaffinity_pending(&desc->irq_data);
122}
123static inline void
124irq_copy_pending(struct irq_desc *desc, const struct cpumask *mask)
125{
126 cpumask_copy(desc->pending_mask, mask);
127}
128static inline void
129irq_get_pending(struct cpumask *mask, struct irq_desc *desc)
130{
131 cpumask_copy(mask, desc->pending_mask);
132}
133#else
134static inline bool irq_can_move_pcntxt(struct irq_desc *desc) { return true; }
135static inline bool irq_move_pending(struct irq_desc *desc) { return false; }
136static inline void
137irq_copy_pending(struct irq_desc *desc, const struct cpumask *mask) { }
138static inline void
139irq_get_pending(struct cpumask *mask, struct irq_desc *desc) { }
140#endif
141
103/** 142/**
104 * irq_set_affinity - Set the irq affinity of a given irq 143 * irq_set_affinity - Set the irq affinity of a given irq
105 * @irq: Interrupt to set affinity 144 * @irq: Interrupt to set affinity
106 * @cpumask: cpumask 145 * @cpumask: cpumask
107 * 146 *
108 */ 147 */
109int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask) 148int irq_set_affinity(unsigned int irq, const struct cpumask *mask)
110{ 149{
111 struct irq_desc *desc = irq_to_desc(irq); 150 struct irq_desc *desc = irq_to_desc(irq);
112 struct irq_chip *chip = desc->irq_data.chip; 151 struct irq_chip *chip = desc->irq_data.chip;
113 unsigned long flags; 152 unsigned long flags;
153 int ret = 0;
114 154
115 if (!chip->irq_set_affinity) 155 if (!chip->irq_set_affinity)
116 return -EINVAL; 156 return -EINVAL;
117 157
118 raw_spin_lock_irqsave(&desc->lock, flags); 158 raw_spin_lock_irqsave(&desc->lock, flags);
119 159
120#ifdef CONFIG_GENERIC_PENDING_IRQ 160 if (irq_can_move_pcntxt(desc)) {
121 if (desc->status & IRQ_MOVE_PCNTXT) { 161 ret = chip->irq_set_affinity(&desc->irq_data, mask, false);
122 if (!chip->irq_set_affinity(&desc->irq_data, cpumask, false)) { 162 switch (ret) {
123 cpumask_copy(desc->irq_data.affinity, cpumask); 163 case IRQ_SET_MASK_OK:
164 cpumask_copy(desc->irq_data.affinity, mask);
165 case IRQ_SET_MASK_OK_NOCOPY:
124 irq_set_thread_affinity(desc); 166 irq_set_thread_affinity(desc);
167 ret = 0;
125 } 168 }
169 } else {
170 irqd_set_move_pending(&desc->irq_data);
171 irq_copy_pending(desc, mask);
126 } 172 }
127 else { 173
128 desc->status |= IRQ_MOVE_PENDING; 174 if (desc->affinity_notify) {
129 cpumask_copy(desc->pending_mask, cpumask); 175 kref_get(&desc->affinity_notify->kref);
130 } 176 schedule_work(&desc->affinity_notify->work);
131#else
132 if (!chip->irq_set_affinity(&desc->irq_data, cpumask, false)) {
133 cpumask_copy(desc->irq_data.affinity, cpumask);
134 irq_set_thread_affinity(desc);
135 } 177 }
136#endif 178 irq_compat_set_affinity(desc);
137 desc->status |= IRQ_AFFINITY_SET; 179 irqd_set(&desc->irq_data, IRQD_AFFINITY_SET);
138 raw_spin_unlock_irqrestore(&desc->lock, flags); 180 raw_spin_unlock_irqrestore(&desc->lock, flags);
139 return 0; 181 return ret;
140} 182}
141 183
142int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m) 184int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m)
143{ 185{
186 unsigned long flags;
187 struct irq_desc *desc = irq_get_desc_lock(irq, &flags);
188
189 if (!desc)
190 return -EINVAL;
191 desc->affinity_hint = m;
192 irq_put_desc_unlock(desc, flags);
193 return 0;
194}
195EXPORT_SYMBOL_GPL(irq_set_affinity_hint);
196
197static void irq_affinity_notify(struct work_struct *work)
198{
199 struct irq_affinity_notify *notify =
200 container_of(work, struct irq_affinity_notify, work);
201 struct irq_desc *desc = irq_to_desc(notify->irq);
202 cpumask_var_t cpumask;
203 unsigned long flags;
204
205 if (!desc || !alloc_cpumask_var(&cpumask, GFP_KERNEL))
206 goto out;
207
208 raw_spin_lock_irqsave(&desc->lock, flags);
209 if (irq_move_pending(desc))
210 irq_get_pending(cpumask, desc);
211 else
212 cpumask_copy(cpumask, desc->irq_data.affinity);
213 raw_spin_unlock_irqrestore(&desc->lock, flags);
214
215 notify->notify(notify, cpumask);
216
217 free_cpumask_var(cpumask);
218out:
219 kref_put(&notify->kref, notify->release);
220}
221
222/**
223 * irq_set_affinity_notifier - control notification of IRQ affinity changes
224 * @irq: Interrupt for which to enable/disable notification
225 * @notify: Context for notification, or %NULL to disable
226 * notification. Function pointers must be initialised;
227 * the other fields will be initialised by this function.
228 *
229 * Must be called in process context. Notification may only be enabled
230 * after the IRQ is allocated and must be disabled before the IRQ is
231 * freed using free_irq().
232 */
233int
234irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify)
235{
144 struct irq_desc *desc = irq_to_desc(irq); 236 struct irq_desc *desc = irq_to_desc(irq);
237 struct irq_affinity_notify *old_notify;
145 unsigned long flags; 238 unsigned long flags;
146 239
240 /* The release function is promised process context */
241 might_sleep();
242
147 if (!desc) 243 if (!desc)
148 return -EINVAL; 244 return -EINVAL;
149 245
246 /* Complete initialisation of *notify */
247 if (notify) {
248 notify->irq = irq;
249 kref_init(&notify->kref);
250 INIT_WORK(&notify->work, irq_affinity_notify);
251 }
252
150 raw_spin_lock_irqsave(&desc->lock, flags); 253 raw_spin_lock_irqsave(&desc->lock, flags);
151 desc->affinity_hint = m; 254 old_notify = desc->affinity_notify;
255 desc->affinity_notify = notify;
152 raw_spin_unlock_irqrestore(&desc->lock, flags); 256 raw_spin_unlock_irqrestore(&desc->lock, flags);
153 257
258 if (old_notify)
259 kref_put(&old_notify->kref, old_notify->release);
260
154 return 0; 261 return 0;
155} 262}
156EXPORT_SYMBOL_GPL(irq_set_affinity_hint); 263EXPORT_SYMBOL_GPL(irq_set_affinity_notifier);
157 264
158#ifndef CONFIG_AUTO_IRQ_AFFINITY 265#ifndef CONFIG_AUTO_IRQ_AFFINITY
159/* 266/*
160 * Generic version of the affinity autoselector. 267 * Generic version of the affinity autoselector.
161 */ 268 */
162static int setup_affinity(unsigned int irq, struct irq_desc *desc) 269static int
270setup_affinity(unsigned int irq, struct irq_desc *desc, struct cpumask *mask)
163{ 271{
272 struct irq_chip *chip = irq_desc_get_chip(desc);
273 struct cpumask *set = irq_default_affinity;
274 int ret;
275
276 /* Excludes PER_CPU and NO_BALANCE interrupts */
164 if (!irq_can_set_affinity(irq)) 277 if (!irq_can_set_affinity(irq))
165 return 0; 278 return 0;
166 279
@@ -168,22 +281,29 @@ static int setup_affinity(unsigned int irq, struct irq_desc *desc)
168 * Preserve an userspace affinity setup, but make sure that 281 * Preserve an userspace affinity setup, but make sure that
169 * one of the targets is online. 282 * one of the targets is online.
170 */ 283 */
171 if (desc->status & (IRQ_AFFINITY_SET | IRQ_NO_BALANCING)) { 284 if (irqd_has_set(&desc->irq_data, IRQD_AFFINITY_SET)) {
172 if (cpumask_any_and(desc->irq_data.affinity, cpu_online_mask) 285 if (cpumask_intersects(desc->irq_data.affinity,
173 < nr_cpu_ids) 286 cpu_online_mask))
174 goto set_affinity; 287 set = desc->irq_data.affinity;
175 else 288 else {
176 desc->status &= ~IRQ_AFFINITY_SET; 289 irq_compat_clr_affinity(desc);
290 irqd_clear(&desc->irq_data, IRQD_AFFINITY_SET);
291 }
177 } 292 }
178 293
179 cpumask_and(desc->irq_data.affinity, cpu_online_mask, irq_default_affinity); 294 cpumask_and(mask, cpu_online_mask, set);
180set_affinity: 295 ret = chip->irq_set_affinity(&desc->irq_data, mask, false);
181 desc->irq_data.chip->irq_set_affinity(&desc->irq_data, desc->irq_data.affinity, false); 296 switch (ret) {
182 297 case IRQ_SET_MASK_OK:
298 cpumask_copy(desc->irq_data.affinity, mask);
299 case IRQ_SET_MASK_OK_NOCOPY:
300 irq_set_thread_affinity(desc);
301 }
183 return 0; 302 return 0;
184} 303}
185#else 304#else
186static inline int setup_affinity(unsigned int irq, struct irq_desc *d) 305static inline int
306setup_affinity(unsigned int irq, struct irq_desc *d, struct cpumask *mask)
187{ 307{
188 return irq_select_affinity(irq); 308 return irq_select_affinity(irq);
189} 309}
@@ -192,23 +312,21 @@ static inline int setup_affinity(unsigned int irq, struct irq_desc *d)
192/* 312/*
193 * Called when affinity is set via /proc/irq 313 * Called when affinity is set via /proc/irq
194 */ 314 */
195int irq_select_affinity_usr(unsigned int irq) 315int irq_select_affinity_usr(unsigned int irq, struct cpumask *mask)
196{ 316{
197 struct irq_desc *desc = irq_to_desc(irq); 317 struct irq_desc *desc = irq_to_desc(irq);
198 unsigned long flags; 318 unsigned long flags;
199 int ret; 319 int ret;
200 320
201 raw_spin_lock_irqsave(&desc->lock, flags); 321 raw_spin_lock_irqsave(&desc->lock, flags);
202 ret = setup_affinity(irq, desc); 322 ret = setup_affinity(irq, desc, mask);
203 if (!ret)
204 irq_set_thread_affinity(desc);
205 raw_spin_unlock_irqrestore(&desc->lock, flags); 323 raw_spin_unlock_irqrestore(&desc->lock, flags);
206
207 return ret; 324 return ret;
208} 325}
209 326
210#else 327#else
211static inline int setup_affinity(unsigned int irq, struct irq_desc *desc) 328static inline int
329setup_affinity(unsigned int irq, struct irq_desc *desc, struct cpumask *mask)
212{ 330{
213 return 0; 331 return 0;
214} 332}
@@ -219,13 +337,23 @@ void __disable_irq(struct irq_desc *desc, unsigned int irq, bool suspend)
219 if (suspend) { 337 if (suspend) {
220 if (!desc->action || (desc->action->flags & IRQF_NO_SUSPEND)) 338 if (!desc->action || (desc->action->flags & IRQF_NO_SUSPEND))
221 return; 339 return;
222 desc->status |= IRQ_SUSPENDED; 340 desc->istate |= IRQS_SUSPENDED;
223 } 341 }
224 342
225 if (!desc->depth++) { 343 if (!desc->depth++)
226 desc->status |= IRQ_DISABLED; 344 irq_disable(desc);
227 desc->irq_data.chip->irq_disable(&desc->irq_data); 345}
228 } 346
347static int __disable_irq_nosync(unsigned int irq)
348{
349 unsigned long flags;
350 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags);
351
352 if (!desc)
353 return -EINVAL;
354 __disable_irq(desc, irq, false);
355 irq_put_desc_busunlock(desc, flags);
356 return 0;
229} 357}
230 358
231/** 359/**
@@ -241,17 +369,7 @@ void __disable_irq(struct irq_desc *desc, unsigned int irq, bool suspend)
241 */ 369 */
242void disable_irq_nosync(unsigned int irq) 370void disable_irq_nosync(unsigned int irq)
243{ 371{
244 struct irq_desc *desc = irq_to_desc(irq); 372 __disable_irq_nosync(irq);
245 unsigned long flags;
246
247 if (!desc)
248 return;
249
250 chip_bus_lock(desc);
251 raw_spin_lock_irqsave(&desc->lock, flags);
252 __disable_irq(desc, irq, false);
253 raw_spin_unlock_irqrestore(&desc->lock, flags);
254 chip_bus_sync_unlock(desc);
255} 373}
256EXPORT_SYMBOL(disable_irq_nosync); 374EXPORT_SYMBOL(disable_irq_nosync);
257 375
@@ -269,13 +387,7 @@ EXPORT_SYMBOL(disable_irq_nosync);
269 */ 387 */
270void disable_irq(unsigned int irq) 388void disable_irq(unsigned int irq)
271{ 389{
272 struct irq_desc *desc = irq_to_desc(irq); 390 if (!__disable_irq_nosync(irq))
273
274 if (!desc)
275 return;
276
277 disable_irq_nosync(irq);
278 if (desc->action)
279 synchronize_irq(irq); 391 synchronize_irq(irq);
280} 392}
281EXPORT_SYMBOL(disable_irq); 393EXPORT_SYMBOL(disable_irq);
@@ -283,7 +395,7 @@ EXPORT_SYMBOL(disable_irq);
283void __enable_irq(struct irq_desc *desc, unsigned int irq, bool resume) 395void __enable_irq(struct irq_desc *desc, unsigned int irq, bool resume)
284{ 396{
285 if (resume) { 397 if (resume) {
286 if (!(desc->status & IRQ_SUSPENDED)) { 398 if (!(desc->istate & IRQS_SUSPENDED)) {
287 if (!desc->action) 399 if (!desc->action)
288 return; 400 return;
289 if (!(desc->action->flags & IRQF_FORCE_RESUME)) 401 if (!(desc->action->flags & IRQF_FORCE_RESUME))
@@ -291,7 +403,7 @@ void __enable_irq(struct irq_desc *desc, unsigned int irq, bool resume)
291 /* Pretend that it got disabled ! */ 403 /* Pretend that it got disabled ! */
292 desc->depth++; 404 desc->depth++;
293 } 405 }
294 desc->status &= ~IRQ_SUSPENDED; 406 desc->istate &= ~IRQS_SUSPENDED;
295 } 407 }
296 408
297 switch (desc->depth) { 409 switch (desc->depth) {
@@ -300,12 +412,11 @@ void __enable_irq(struct irq_desc *desc, unsigned int irq, bool resume)
300 WARN(1, KERN_WARNING "Unbalanced enable for IRQ %d\n", irq); 412 WARN(1, KERN_WARNING "Unbalanced enable for IRQ %d\n", irq);
301 break; 413 break;
302 case 1: { 414 case 1: {
303 unsigned int status = desc->status & ~IRQ_DISABLED; 415 if (desc->istate & IRQS_SUSPENDED)
304
305 if (desc->status & IRQ_SUSPENDED)
306 goto err_out; 416 goto err_out;
307 /* Prevent probing on this irq: */ 417 /* Prevent probing on this irq: */
308 desc->status = status | IRQ_NOPROBE; 418 irq_settings_set_noprobe(desc);
419 irq_enable(desc);
309 check_irq_resend(desc, irq); 420 check_irq_resend(desc, irq);
310 /* fall-through */ 421 /* fall-through */
311 } 422 }
@@ -327,21 +438,18 @@ void __enable_irq(struct irq_desc *desc, unsigned int irq, bool resume)
327 */ 438 */
328void enable_irq(unsigned int irq) 439void enable_irq(unsigned int irq)
329{ 440{
330 struct irq_desc *desc = irq_to_desc(irq);
331 unsigned long flags; 441 unsigned long flags;
442 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags);
332 443
333 if (!desc) 444 if (!desc)
334 return; 445 return;
446 if (WARN(!desc->irq_data.chip,
447 KERN_ERR "enable_irq before setup/request_irq: irq %u\n", irq))
448 goto out;
335 449
336 if (WARN(!desc->irq_data.chip || !desc->irq_data.chip->irq_enable,
337 KERN_ERR "enable_irq before setup/request_irq: irq %u\n", irq))
338 return;
339
340 chip_bus_lock(desc);
341 raw_spin_lock_irqsave(&desc->lock, flags);
342 __enable_irq(desc, irq, false); 450 __enable_irq(desc, irq, false);
343 raw_spin_unlock_irqrestore(&desc->lock, flags); 451out:
344 chip_bus_sync_unlock(desc); 452 irq_put_desc_busunlock(desc, flags);
345} 453}
346EXPORT_SYMBOL(enable_irq); 454EXPORT_SYMBOL(enable_irq);
347 455
@@ -357,7 +465,7 @@ static int set_irq_wake_real(unsigned int irq, unsigned int on)
357} 465}
358 466
359/** 467/**
360 * set_irq_wake - control irq power management wakeup 468 * irq_set_irq_wake - control irq power management wakeup
361 * @irq: interrupt to control 469 * @irq: interrupt to control
362 * @on: enable/disable power management wakeup 470 * @on: enable/disable power management wakeup
363 * 471 *
@@ -368,23 +476,22 @@ static int set_irq_wake_real(unsigned int irq, unsigned int on)
368 * Wakeup mode lets this IRQ wake the system from sleep 476 * Wakeup mode lets this IRQ wake the system from sleep
369 * states like "suspend to RAM". 477 * states like "suspend to RAM".
370 */ 478 */
371int set_irq_wake(unsigned int irq, unsigned int on) 479int irq_set_irq_wake(unsigned int irq, unsigned int on)
372{ 480{
373 struct irq_desc *desc = irq_to_desc(irq);
374 unsigned long flags; 481 unsigned long flags;
482 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags);
375 int ret = 0; 483 int ret = 0;
376 484
377 /* wakeup-capable irqs can be shared between drivers that 485 /* wakeup-capable irqs can be shared between drivers that
378 * don't need to have the same sleep mode behaviors. 486 * don't need to have the same sleep mode behaviors.
379 */ 487 */
380 raw_spin_lock_irqsave(&desc->lock, flags);
381 if (on) { 488 if (on) {
382 if (desc->wake_depth++ == 0) { 489 if (desc->wake_depth++ == 0) {
383 ret = set_irq_wake_real(irq, on); 490 ret = set_irq_wake_real(irq, on);
384 if (ret) 491 if (ret)
385 desc->wake_depth = 0; 492 desc->wake_depth = 0;
386 else 493 else
387 desc->status |= IRQ_WAKEUP; 494 irqd_set(&desc->irq_data, IRQD_WAKEUP_STATE);
388 } 495 }
389 } else { 496 } else {
390 if (desc->wake_depth == 0) { 497 if (desc->wake_depth == 0) {
@@ -394,14 +501,13 @@ int set_irq_wake(unsigned int irq, unsigned int on)
394 if (ret) 501 if (ret)
395 desc->wake_depth = 1; 502 desc->wake_depth = 1;
396 else 503 else
397 desc->status &= ~IRQ_WAKEUP; 504 irqd_clear(&desc->irq_data, IRQD_WAKEUP_STATE);
398 } 505 }
399 } 506 }
400 507 irq_put_desc_busunlock(desc, flags);
401 raw_spin_unlock_irqrestore(&desc->lock, flags);
402 return ret; 508 return ret;
403} 509}
404EXPORT_SYMBOL(set_irq_wake); 510EXPORT_SYMBOL(irq_set_irq_wake);
405 511
406/* 512/*
407 * Internal function that tells the architecture code whether a 513 * Internal function that tells the architecture code whether a
@@ -410,43 +516,27 @@ EXPORT_SYMBOL(set_irq_wake);
410 */ 516 */
411int can_request_irq(unsigned int irq, unsigned long irqflags) 517int can_request_irq(unsigned int irq, unsigned long irqflags)
412{ 518{
413 struct irq_desc *desc = irq_to_desc(irq);
414 struct irqaction *action;
415 unsigned long flags; 519 unsigned long flags;
520 struct irq_desc *desc = irq_get_desc_lock(irq, &flags);
521 int canrequest = 0;
416 522
417 if (!desc) 523 if (!desc)
418 return 0; 524 return 0;
419 525
420 if (desc->status & IRQ_NOREQUEST) 526 if (irq_settings_can_request(desc)) {
421 return 0; 527 if (desc->action)
422 528 if (irqflags & desc->action->flags & IRQF_SHARED)
423 raw_spin_lock_irqsave(&desc->lock, flags); 529 canrequest =1;
424 action = desc->action; 530 }
425 if (action) 531 irq_put_desc_unlock(desc, flags);
426 if (irqflags & action->flags & IRQF_SHARED) 532 return canrequest;
427 action = NULL;
428
429 raw_spin_unlock_irqrestore(&desc->lock, flags);
430
431 return !action;
432}
433
434void compat_irq_chip_set_default_handler(struct irq_desc *desc)
435{
436 /*
437 * If the architecture still has not overriden
438 * the flow handler then zap the default. This
439 * should catch incorrect flow-type setting.
440 */
441 if (desc->handle_irq == &handle_bad_irq)
442 desc->handle_irq = NULL;
443} 533}
444 534
445int __irq_set_trigger(struct irq_desc *desc, unsigned int irq, 535int __irq_set_trigger(struct irq_desc *desc, unsigned int irq,
446 unsigned long flags) 536 unsigned long flags)
447{ 537{
448 int ret;
449 struct irq_chip *chip = desc->irq_data.chip; 538 struct irq_chip *chip = desc->irq_data.chip;
539 int ret, unmask = 0;
450 540
451 if (!chip || !chip->irq_set_type) { 541 if (!chip || !chip->irq_set_type) {
452 /* 542 /*
@@ -458,23 +548,43 @@ int __irq_set_trigger(struct irq_desc *desc, unsigned int irq,
458 return 0; 548 return 0;
459 } 549 }
460 550
551 flags &= IRQ_TYPE_SENSE_MASK;
552
553 if (chip->flags & IRQCHIP_SET_TYPE_MASKED) {
554 if (!(desc->istate & IRQS_MASKED))
555 mask_irq(desc);
556 if (!(desc->istate & IRQS_DISABLED))
557 unmask = 1;
558 }
559
461 /* caller masked out all except trigger mode flags */ 560 /* caller masked out all except trigger mode flags */
462 ret = chip->irq_set_type(&desc->irq_data, flags); 561 ret = chip->irq_set_type(&desc->irq_data, flags);
463 562
464 if (ret) 563 switch (ret) {
465 pr_err("setting trigger mode %lu for irq %u failed (%pF)\n", 564 case IRQ_SET_MASK_OK:
466 flags, irq, chip->irq_set_type); 565 irqd_clear(&desc->irq_data, IRQD_TRIGGER_MASK);
467 else { 566 irqd_set(&desc->irq_data, flags);
468 if (flags & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH)) 567
469 flags |= IRQ_LEVEL; 568 case IRQ_SET_MASK_OK_NOCOPY:
470 /* note that IRQF_TRIGGER_MASK == IRQ_TYPE_SENSE_MASK */ 569 flags = irqd_get_trigger_type(&desc->irq_data);
471 desc->status &= ~(IRQ_LEVEL | IRQ_TYPE_SENSE_MASK); 570 irq_settings_set_trigger_mask(desc, flags);
472 desc->status |= flags; 571 irqd_clear(&desc->irq_data, IRQD_LEVEL);
572 irq_settings_clr_level(desc);
573 if (flags & IRQ_TYPE_LEVEL_MASK) {
574 irq_settings_set_level(desc);
575 irqd_set(&desc->irq_data, IRQD_LEVEL);
576 }
473 577
474 if (chip != desc->irq_data.chip) 578 if (chip != desc->irq_data.chip)
475 irq_chip_set_defaults(desc->irq_data.chip); 579 irq_chip_set_defaults(desc->irq_data.chip);
580 ret = 0;
581 break;
582 default:
583 pr_err("setting trigger mode %lu for irq %u failed (%pF)\n",
584 flags, irq, chip->irq_set_type);
476 } 585 }
477 586 if (unmask)
587 unmask_irq(desc);
478 return ret; 588 return ret;
479} 589}
480 590
@@ -518,8 +628,11 @@ static int irq_wait_for_interrupt(struct irqaction *action)
518 * handler finished. unmask if the interrupt has not been disabled and 628 * handler finished. unmask if the interrupt has not been disabled and
519 * is marked MASKED. 629 * is marked MASKED.
520 */ 630 */
521static void irq_finalize_oneshot(unsigned int irq, struct irq_desc *desc) 631static void irq_finalize_oneshot(struct irq_desc *desc,
632 struct irqaction *action, bool force)
522{ 633{
634 if (!(desc->istate & IRQS_ONESHOT))
635 return;
523again: 636again:
524 chip_bus_lock(desc); 637 chip_bus_lock(desc);
525 raw_spin_lock_irq(&desc->lock); 638 raw_spin_lock_irq(&desc->lock);
@@ -531,26 +644,44 @@ again:
531 * The thread is faster done than the hard interrupt handler 644 * The thread is faster done than the hard interrupt handler
532 * on the other CPU. If we unmask the irq line then the 645 * on the other CPU. If we unmask the irq line then the
533 * interrupt can come in again and masks the line, leaves due 646 * interrupt can come in again and masks the line, leaves due
534 * to IRQ_INPROGRESS and the irq line is masked forever. 647 * to IRQS_INPROGRESS and the irq line is masked forever.
648 *
649 * This also serializes the state of shared oneshot handlers
650 * versus "desc->threads_onehsot |= action->thread_mask;" in
651 * irq_wake_thread(). See the comment there which explains the
652 * serialization.
535 */ 653 */
536 if (unlikely(desc->status & IRQ_INPROGRESS)) { 654 if (unlikely(desc->istate & IRQS_INPROGRESS)) {
537 raw_spin_unlock_irq(&desc->lock); 655 raw_spin_unlock_irq(&desc->lock);
538 chip_bus_sync_unlock(desc); 656 chip_bus_sync_unlock(desc);
539 cpu_relax(); 657 cpu_relax();
540 goto again; 658 goto again;
541 } 659 }
542 660
543 if (!(desc->status & IRQ_DISABLED) && (desc->status & IRQ_MASKED)) { 661 /*
544 desc->status &= ~IRQ_MASKED; 662 * Now check again, whether the thread should run. Otherwise
663 * we would clear the threads_oneshot bit of this thread which
664 * was just set.
665 */
666 if (!force && test_bit(IRQTF_RUNTHREAD, &action->thread_flags))
667 goto out_unlock;
668
669 desc->threads_oneshot &= ~action->thread_mask;
670
671 if (!desc->threads_oneshot && !(desc->istate & IRQS_DISABLED) &&
672 (desc->istate & IRQS_MASKED)) {
673 irq_compat_clr_masked(desc);
674 desc->istate &= ~IRQS_MASKED;
545 desc->irq_data.chip->irq_unmask(&desc->irq_data); 675 desc->irq_data.chip->irq_unmask(&desc->irq_data);
546 } 676 }
677out_unlock:
547 raw_spin_unlock_irq(&desc->lock); 678 raw_spin_unlock_irq(&desc->lock);
548 chip_bus_sync_unlock(desc); 679 chip_bus_sync_unlock(desc);
549} 680}
550 681
551#ifdef CONFIG_SMP 682#ifdef CONFIG_SMP
552/* 683/*
553 * Check whether we need to change the affinity of the interrupt thread. 684 * Check whether we need to chasnge the affinity of the interrupt thread.
554 */ 685 */
555static void 686static void
556irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) 687irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action)
@@ -582,6 +713,32 @@ irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) { }
582#endif 713#endif
583 714
584/* 715/*
716 * Interrupts which are not explicitely requested as threaded
717 * interrupts rely on the implicit bh/preempt disable of the hard irq
718 * context. So we need to disable bh here to avoid deadlocks and other
719 * side effects.
720 */
721static void
722irq_forced_thread_fn(struct irq_desc *desc, struct irqaction *action)
723{
724 local_bh_disable();
725 action->thread_fn(action->irq, action->dev_id);
726 irq_finalize_oneshot(desc, action, false);
727 local_bh_enable();
728}
729
730/*
731 * Interrupts explicitely requested as threaded interupts want to be
732 * preemtible - many of them need to sleep and wait for slow busses to
733 * complete.
734 */
735static void irq_thread_fn(struct irq_desc *desc, struct irqaction *action)
736{
737 action->thread_fn(action->irq, action->dev_id);
738 irq_finalize_oneshot(desc, action, false);
739}
740
741/*
585 * Interrupt handler thread 742 * Interrupt handler thread
586 */ 743 */
587static int irq_thread(void *data) 744static int irq_thread(void *data)
@@ -591,7 +748,14 @@ static int irq_thread(void *data)
591 }; 748 };
592 struct irqaction *action = data; 749 struct irqaction *action = data;
593 struct irq_desc *desc = irq_to_desc(action->irq); 750 struct irq_desc *desc = irq_to_desc(action->irq);
594 int wake, oneshot = desc->status & IRQ_ONESHOT; 751 void (*handler_fn)(struct irq_desc *desc, struct irqaction *action);
752 int wake;
753
754 if (force_irqthreads & test_bit(IRQTF_FORCED_THREAD,
755 &action->thread_flags))
756 handler_fn = irq_forced_thread_fn;
757 else
758 handler_fn = irq_thread_fn;
595 759
596 sched_setscheduler(current, SCHED_FIFO, &param); 760 sched_setscheduler(current, SCHED_FIFO, &param);
597 current->irqaction = action; 761 current->irqaction = action;
@@ -603,23 +767,20 @@ static int irq_thread(void *data)
603 atomic_inc(&desc->threads_active); 767 atomic_inc(&desc->threads_active);
604 768
605 raw_spin_lock_irq(&desc->lock); 769 raw_spin_lock_irq(&desc->lock);
606 if (unlikely(desc->status & IRQ_DISABLED)) { 770 if (unlikely(desc->istate & IRQS_DISABLED)) {
607 /* 771 /*
608 * CHECKME: We might need a dedicated 772 * CHECKME: We might need a dedicated
609 * IRQ_THREAD_PENDING flag here, which 773 * IRQ_THREAD_PENDING flag here, which
610 * retriggers the thread in check_irq_resend() 774 * retriggers the thread in check_irq_resend()
611 * but AFAICT IRQ_PENDING should be fine as it 775 * but AFAICT IRQS_PENDING should be fine as it
612 * retriggers the interrupt itself --- tglx 776 * retriggers the interrupt itself --- tglx
613 */ 777 */
614 desc->status |= IRQ_PENDING; 778 irq_compat_set_pending(desc);
779 desc->istate |= IRQS_PENDING;
615 raw_spin_unlock_irq(&desc->lock); 780 raw_spin_unlock_irq(&desc->lock);
616 } else { 781 } else {
617 raw_spin_unlock_irq(&desc->lock); 782 raw_spin_unlock_irq(&desc->lock);
618 783 handler_fn(desc, action);
619 action->thread_fn(action->irq, action->dev_id);
620
621 if (oneshot)
622 irq_finalize_oneshot(action->irq, desc);
623 } 784 }
624 785
625 wake = atomic_dec_and_test(&desc->threads_active); 786 wake = atomic_dec_and_test(&desc->threads_active);
@@ -628,6 +789,9 @@ static int irq_thread(void *data)
628 wake_up(&desc->wait_for_threads); 789 wake_up(&desc->wait_for_threads);
629 } 790 }
630 791
792 /* Prevent a stale desc->threads_oneshot */
793 irq_finalize_oneshot(desc, action, true);
794
631 /* 795 /*
632 * Clear irqaction. Otherwise exit_irq_thread() would make 796 * Clear irqaction. Otherwise exit_irq_thread() would make
633 * fuzz about an active irq thread going into nirvana. 797 * fuzz about an active irq thread going into nirvana.
@@ -642,6 +806,7 @@ static int irq_thread(void *data)
642void exit_irq_thread(void) 806void exit_irq_thread(void)
643{ 807{
644 struct task_struct *tsk = current; 808 struct task_struct *tsk = current;
809 struct irq_desc *desc;
645 810
646 if (!tsk->irqaction) 811 if (!tsk->irqaction)
647 return; 812 return;
@@ -650,6 +815,14 @@ void exit_irq_thread(void)
650 "exiting task \"%s\" (%d) is an active IRQ thread (irq %d)\n", 815 "exiting task \"%s\" (%d) is an active IRQ thread (irq %d)\n",
651 tsk->comm ? tsk->comm : "", tsk->pid, tsk->irqaction->irq); 816 tsk->comm ? tsk->comm : "", tsk->pid, tsk->irqaction->irq);
652 817
818 desc = irq_to_desc(tsk->irqaction->irq);
819
820 /*
821 * Prevent a stale desc->threads_oneshot. Must be called
822 * before setting the IRQTF_DIED flag.
823 */
824 irq_finalize_oneshot(desc, tsk->irqaction, true);
825
653 /* 826 /*
654 * Set the THREAD DIED flag to prevent further wakeups of the 827 * Set the THREAD DIED flag to prevent further wakeups of the
655 * soon to be gone threaded handler. 828 * soon to be gone threaded handler.
@@ -657,6 +830,22 @@ void exit_irq_thread(void)
657 set_bit(IRQTF_DIED, &tsk->irqaction->flags); 830 set_bit(IRQTF_DIED, &tsk->irqaction->flags);
658} 831}
659 832
833static void irq_setup_forced_threading(struct irqaction *new)
834{
835 if (!force_irqthreads)
836 return;
837 if (new->flags & (IRQF_NO_THREAD | IRQF_PERCPU | IRQF_ONESHOT))
838 return;
839
840 new->flags |= IRQF_ONESHOT;
841
842 if (!new->thread_fn) {
843 set_bit(IRQTF_FORCED_THREAD, &new->thread_flags);
844 new->thread_fn = new->handler;
845 new->handler = irq_default_primary_handler;
846 }
847}
848
660/* 849/*
661 * Internal function to register an irqaction - typically used to 850 * Internal function to register an irqaction - typically used to
662 * allocate special interrupts that are part of the architecture. 851 * allocate special interrupts that are part of the architecture.
@@ -666,9 +855,9 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
666{ 855{
667 struct irqaction *old, **old_ptr; 856 struct irqaction *old, **old_ptr;
668 const char *old_name = NULL; 857 const char *old_name = NULL;
669 unsigned long flags; 858 unsigned long flags, thread_mask = 0;
670 int nested, shared = 0; 859 int ret, nested, shared = 0;
671 int ret; 860 cpumask_var_t mask;
672 861
673 if (!desc) 862 if (!desc)
674 return -EINVAL; 863 return -EINVAL;
@@ -692,15 +881,11 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
692 rand_initialize_irq(irq); 881 rand_initialize_irq(irq);
693 } 882 }
694 883
695 /* Oneshot interrupts are not allowed with shared */
696 if ((new->flags & IRQF_ONESHOT) && (new->flags & IRQF_SHARED))
697 return -EINVAL;
698
699 /* 884 /*
700 * Check whether the interrupt nests into another interrupt 885 * Check whether the interrupt nests into another interrupt
701 * thread. 886 * thread.
702 */ 887 */
703 nested = desc->status & IRQ_NESTED_THREAD; 888 nested = irq_settings_is_nested_thread(desc);
704 if (nested) { 889 if (nested) {
705 if (!new->thread_fn) 890 if (!new->thread_fn)
706 return -EINVAL; 891 return -EINVAL;
@@ -710,6 +895,8 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
710 * dummy function which warns when called. 895 * dummy function which warns when called.
711 */ 896 */
712 new->handler = irq_nested_primary_handler; 897 new->handler = irq_nested_primary_handler;
898 } else {
899 irq_setup_forced_threading(new);
713 } 900 }
714 901
715 /* 902 /*
@@ -733,6 +920,11 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
733 new->thread = t; 920 new->thread = t;
734 } 921 }
735 922
923 if (!alloc_cpumask_var(&mask, GFP_KERNEL)) {
924 ret = -ENOMEM;
925 goto out_thread;
926 }
927
736 /* 928 /*
737 * The following block of code has to be executed atomically 929 * The following block of code has to be executed atomically
738 */ 930 */
@@ -744,29 +936,40 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
744 * Can't share interrupts unless both agree to and are 936 * Can't share interrupts unless both agree to and are
745 * the same type (level, edge, polarity). So both flag 937 * the same type (level, edge, polarity). So both flag
746 * fields must have IRQF_SHARED set and the bits which 938 * fields must have IRQF_SHARED set and the bits which
747 * set the trigger type must match. 939 * set the trigger type must match. Also all must
940 * agree on ONESHOT.
748 */ 941 */
749 if (!((old->flags & new->flags) & IRQF_SHARED) || 942 if (!((old->flags & new->flags) & IRQF_SHARED) ||
750 ((old->flags ^ new->flags) & IRQF_TRIGGER_MASK)) { 943 ((old->flags ^ new->flags) & IRQF_TRIGGER_MASK) ||
944 ((old->flags ^ new->flags) & IRQF_ONESHOT)) {
751 old_name = old->name; 945 old_name = old->name;
752 goto mismatch; 946 goto mismatch;
753 } 947 }
754 948
755#if defined(CONFIG_IRQ_PER_CPU)
756 /* All handlers must agree on per-cpuness */ 949 /* All handlers must agree on per-cpuness */
757 if ((old->flags & IRQF_PERCPU) != 950 if ((old->flags & IRQF_PERCPU) !=
758 (new->flags & IRQF_PERCPU)) 951 (new->flags & IRQF_PERCPU))
759 goto mismatch; 952 goto mismatch;
760#endif
761 953
762 /* add new interrupt at end of irq queue */ 954 /* add new interrupt at end of irq queue */
763 do { 955 do {
956 thread_mask |= old->thread_mask;
764 old_ptr = &old->next; 957 old_ptr = &old->next;
765 old = *old_ptr; 958 old = *old_ptr;
766 } while (old); 959 } while (old);
767 shared = 1; 960 shared = 1;
768 } 961 }
769 962
963 /*
964 * Setup the thread mask for this irqaction. Unlikely to have
965 * 32 resp 64 irqs sharing one line, but who knows.
966 */
967 if (new->flags & IRQF_ONESHOT && thread_mask == ~0UL) {
968 ret = -EBUSY;
969 goto out_mask;
970 }
971 new->thread_mask = 1 << ffz(thread_mask);
972
770 if (!shared) { 973 if (!shared) {
771 irq_chip_set_defaults(desc->irq_data.chip); 974 irq_chip_set_defaults(desc->irq_data.chip);
772 975
@@ -778,42 +981,44 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
778 new->flags & IRQF_TRIGGER_MASK); 981 new->flags & IRQF_TRIGGER_MASK);
779 982
780 if (ret) 983 if (ret)
781 goto out_thread; 984 goto out_mask;
782 } else 985 }
783 compat_irq_chip_set_default_handler(desc);
784#if defined(CONFIG_IRQ_PER_CPU)
785 if (new->flags & IRQF_PERCPU)
786 desc->status |= IRQ_PER_CPU;
787#endif
788 986
789 desc->status &= ~(IRQ_AUTODETECT | IRQ_WAITING | IRQ_ONESHOT | 987 desc->istate &= ~(IRQS_AUTODETECT | IRQS_SPURIOUS_DISABLED | \
790 IRQ_INPROGRESS | IRQ_SPURIOUS_DISABLED); 988 IRQS_INPROGRESS | IRQS_ONESHOT | \
989 IRQS_WAITING);
990
991 if (new->flags & IRQF_PERCPU) {
992 irqd_set(&desc->irq_data, IRQD_PER_CPU);
993 irq_settings_set_per_cpu(desc);
994 }
791 995
792 if (new->flags & IRQF_ONESHOT) 996 if (new->flags & IRQF_ONESHOT)
793 desc->status |= IRQ_ONESHOT; 997 desc->istate |= IRQS_ONESHOT;
794 998
795 if (!(desc->status & IRQ_NOAUTOEN)) { 999 if (irq_settings_can_autoenable(desc))
796 desc->depth = 0; 1000 irq_startup(desc);
797 desc->status &= ~IRQ_DISABLED; 1001 else
798 desc->irq_data.chip->irq_startup(&desc->irq_data);
799 } else
800 /* Undo nested disables: */ 1002 /* Undo nested disables: */
801 desc->depth = 1; 1003 desc->depth = 1;
802 1004
803 /* Exclude IRQ from balancing if requested */ 1005 /* Exclude IRQ from balancing if requested */
804 if (new->flags & IRQF_NOBALANCING) 1006 if (new->flags & IRQF_NOBALANCING) {
805 desc->status |= IRQ_NO_BALANCING; 1007 irq_settings_set_no_balancing(desc);
1008 irqd_set(&desc->irq_data, IRQD_NO_BALANCING);
1009 }
806 1010
807 /* Set default affinity mask once everything is setup */ 1011 /* Set default affinity mask once everything is setup */
808 setup_affinity(irq, desc); 1012 setup_affinity(irq, desc, mask);
809 1013
810 } else if ((new->flags & IRQF_TRIGGER_MASK) 1014 } else if (new->flags & IRQF_TRIGGER_MASK) {
811 && (new->flags & IRQF_TRIGGER_MASK) 1015 unsigned int nmsk = new->flags & IRQF_TRIGGER_MASK;
812 != (desc->status & IRQ_TYPE_SENSE_MASK)) { 1016 unsigned int omsk = irq_settings_get_trigger_mask(desc);
813 /* hope the handler works with the actual trigger mode... */ 1017
814 pr_warning("IRQ %d uses trigger mode %d; requested %d\n", 1018 if (nmsk != omsk)
815 irq, (int)(desc->status & IRQ_TYPE_SENSE_MASK), 1019 /* hope the handler works with current trigger mode */
816 (int)(new->flags & IRQF_TRIGGER_MASK)); 1020 pr_warning("IRQ %d uses trigger mode %u; requested %u\n",
1021 irq, nmsk, omsk);
817 } 1022 }
818 1023
819 new->irq = irq; 1024 new->irq = irq;
@@ -827,8 +1032,8 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
827 * Check whether we disabled the irq via the spurious handler 1032 * Check whether we disabled the irq via the spurious handler
828 * before. Reenable it and give it another chance. 1033 * before. Reenable it and give it another chance.
829 */ 1034 */
830 if (shared && (desc->status & IRQ_SPURIOUS_DISABLED)) { 1035 if (shared && (desc->istate & IRQS_SPURIOUS_DISABLED)) {
831 desc->status &= ~IRQ_SPURIOUS_DISABLED; 1036 desc->istate &= ~IRQS_SPURIOUS_DISABLED;
832 __enable_irq(desc, irq, false); 1037 __enable_irq(desc, irq, false);
833 } 1038 }
834 1039
@@ -858,6 +1063,9 @@ mismatch:
858#endif 1063#endif
859 ret = -EBUSY; 1064 ret = -EBUSY;
860 1065
1066out_mask:
1067 free_cpumask_var(mask);
1068
861out_thread: 1069out_thread:
862 raw_spin_unlock_irqrestore(&desc->lock, flags); 1070 raw_spin_unlock_irqrestore(&desc->lock, flags);
863 if (new->thread) { 1071 if (new->thread) {
@@ -880,9 +1088,14 @@ out_thread:
880 */ 1088 */
881int setup_irq(unsigned int irq, struct irqaction *act) 1089int setup_irq(unsigned int irq, struct irqaction *act)
882{ 1090{
1091 int retval;
883 struct irq_desc *desc = irq_to_desc(irq); 1092 struct irq_desc *desc = irq_to_desc(irq);
884 1093
885 return __setup_irq(irq, desc, act); 1094 chip_bus_lock(desc);
1095 retval = __setup_irq(irq, desc, act);
1096 chip_bus_sync_unlock(desc);
1097
1098 return retval;
886} 1099}
887EXPORT_SYMBOL_GPL(setup_irq); 1100EXPORT_SYMBOL_GPL(setup_irq);
888 1101
@@ -933,13 +1146,8 @@ static struct irqaction *__free_irq(unsigned int irq, void *dev_id)
933#endif 1146#endif
934 1147
935 /* If this was the last handler, shut down the IRQ line: */ 1148 /* If this was the last handler, shut down the IRQ line: */
936 if (!desc->action) { 1149 if (!desc->action)
937 desc->status |= IRQ_DISABLED; 1150 irq_shutdown(desc);
938 if (desc->irq_data.chip->irq_shutdown)
939 desc->irq_data.chip->irq_shutdown(&desc->irq_data);
940 else
941 desc->irq_data.chip->irq_disable(&desc->irq_data);
942 }
943 1151
944#ifdef CONFIG_SMP 1152#ifdef CONFIG_SMP
945 /* make sure affinity_hint is cleaned up */ 1153 /* make sure affinity_hint is cleaned up */
@@ -1013,6 +1221,11 @@ void free_irq(unsigned int irq, void *dev_id)
1013 if (!desc) 1221 if (!desc)
1014 return; 1222 return;
1015 1223
1224#ifdef CONFIG_SMP
1225 if (WARN_ON(desc->affinity_notify))
1226 desc->affinity_notify = NULL;
1227#endif
1228
1016 chip_bus_lock(desc); 1229 chip_bus_lock(desc);
1017 kfree(__free_irq(irq, dev_id)); 1230 kfree(__free_irq(irq, dev_id));
1018 chip_bus_sync_unlock(desc); 1231 chip_bus_sync_unlock(desc);
@@ -1083,7 +1296,7 @@ int request_threaded_irq(unsigned int irq, irq_handler_t handler,
1083 if (!desc) 1296 if (!desc)
1084 return -EINVAL; 1297 return -EINVAL;
1085 1298
1086 if (desc->status & IRQ_NOREQUEST) 1299 if (!irq_settings_can_request(desc))
1087 return -EINVAL; 1300 return -EINVAL;
1088 1301
1089 if (!handler) { 1302 if (!handler) {
@@ -1158,7 +1371,7 @@ int request_any_context_irq(unsigned int irq, irq_handler_t handler,
1158 if (!desc) 1371 if (!desc)
1159 return -EINVAL; 1372 return -EINVAL;
1160 1373
1161 if (desc->status & IRQ_NESTED_THREAD) { 1374 if (irq_settings_is_nested_thread(desc)) {
1162 ret = request_threaded_irq(irq, NULL, handler, 1375 ret = request_threaded_irq(irq, NULL, handler,
1163 flags, name, dev_id); 1376 flags, name, dev_id);
1164 return !ret ? IRQC_IS_NESTED : ret; 1377 return !ret ? IRQC_IS_NESTED : ret;