diff options
Diffstat (limited to 'kernel/irq/chip.c')
-rw-r--r-- | kernel/irq/chip.c | 181 |
1 files changed, 114 insertions, 67 deletions
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c index c1660194d115..b7091d5ca2f8 100644 --- a/kernel/irq/chip.c +++ b/kernel/irq/chip.c | |||
@@ -18,11 +18,7 @@ | |||
18 | 18 | ||
19 | #include "internals.h" | 19 | #include "internals.h" |
20 | 20 | ||
21 | /** | 21 | static void dynamic_irq_init_x(unsigned int irq, bool keep_chip_data) |
22 | * dynamic_irq_init - initialize a dynamically allocated irq | ||
23 | * @irq: irq number to initialize | ||
24 | */ | ||
25 | void dynamic_irq_init(unsigned int irq) | ||
26 | { | 22 | { |
27 | struct irq_desc *desc; | 23 | struct irq_desc *desc; |
28 | unsigned long flags; | 24 | unsigned long flags; |
@@ -34,14 +30,15 @@ void dynamic_irq_init(unsigned int irq) | |||
34 | } | 30 | } |
35 | 31 | ||
36 | /* Ensure we don't have left over values from a previous use of this irq */ | 32 | /* Ensure we don't have left over values from a previous use of this irq */ |
37 | spin_lock_irqsave(&desc->lock, flags); | 33 | raw_spin_lock_irqsave(&desc->lock, flags); |
38 | desc->status = IRQ_DISABLED; | 34 | desc->status = IRQ_DISABLED; |
39 | desc->chip = &no_irq_chip; | 35 | desc->chip = &no_irq_chip; |
40 | desc->handle_irq = handle_bad_irq; | 36 | desc->handle_irq = handle_bad_irq; |
41 | desc->depth = 1; | 37 | desc->depth = 1; |
42 | desc->msi_desc = NULL; | 38 | desc->msi_desc = NULL; |
43 | desc->handler_data = NULL; | 39 | desc->handler_data = NULL; |
44 | desc->chip_data = NULL; | 40 | if (!keep_chip_data) |
41 | desc->chip_data = NULL; | ||
45 | desc->action = NULL; | 42 | desc->action = NULL; |
46 | desc->irq_count = 0; | 43 | desc->irq_count = 0; |
47 | desc->irqs_unhandled = 0; | 44 | desc->irqs_unhandled = 0; |
@@ -51,14 +48,30 @@ void dynamic_irq_init(unsigned int irq) | |||
51 | cpumask_clear(desc->pending_mask); | 48 | cpumask_clear(desc->pending_mask); |
52 | #endif | 49 | #endif |
53 | #endif | 50 | #endif |
54 | spin_unlock_irqrestore(&desc->lock, flags); | 51 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
55 | } | 52 | } |
56 | 53 | ||
57 | /** | 54 | /** |
58 | * dynamic_irq_cleanup - cleanup a dynamically allocated irq | 55 | * dynamic_irq_init - initialize a dynamically allocated irq |
59 | * @irq: irq number to initialize | 56 | * @irq: irq number to initialize |
60 | */ | 57 | */ |
61 | void dynamic_irq_cleanup(unsigned int irq) | 58 | void dynamic_irq_init(unsigned int irq) |
59 | { | ||
60 | dynamic_irq_init_x(irq, false); | ||
61 | } | ||
62 | |||
63 | /** | ||
64 | * dynamic_irq_init_keep_chip_data - initialize a dynamically allocated irq | ||
65 | * @irq: irq number to initialize | ||
66 | * | ||
67 | * does not set irq_to_desc(irq)->chip_data to NULL | ||
68 | */ | ||
69 | void dynamic_irq_init_keep_chip_data(unsigned int irq) | ||
70 | { | ||
71 | dynamic_irq_init_x(irq, true); | ||
72 | } | ||
73 | |||
74 | static void dynamic_irq_cleanup_x(unsigned int irq, bool keep_chip_data) | ||
62 | { | 75 | { |
63 | struct irq_desc *desc = irq_to_desc(irq); | 76 | struct irq_desc *desc = irq_to_desc(irq); |
64 | unsigned long flags; | 77 | unsigned long flags; |
@@ -68,21 +81,42 @@ void dynamic_irq_cleanup(unsigned int irq) | |||
68 | return; | 81 | return; |
69 | } | 82 | } |
70 | 83 | ||
71 | spin_lock_irqsave(&desc->lock, flags); | 84 | raw_spin_lock_irqsave(&desc->lock, flags); |
72 | if (desc->action) { | 85 | if (desc->action) { |
73 | spin_unlock_irqrestore(&desc->lock, flags); | 86 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
74 | WARN(1, KERN_ERR "Destroying IRQ%d without calling free_irq\n", | 87 | WARN(1, KERN_ERR "Destroying IRQ%d without calling free_irq\n", |
75 | irq); | 88 | irq); |
76 | return; | 89 | return; |
77 | } | 90 | } |
78 | desc->msi_desc = NULL; | 91 | desc->msi_desc = NULL; |
79 | desc->handler_data = NULL; | 92 | desc->handler_data = NULL; |
80 | desc->chip_data = NULL; | 93 | if (!keep_chip_data) |
94 | desc->chip_data = NULL; | ||
81 | desc->handle_irq = handle_bad_irq; | 95 | desc->handle_irq = handle_bad_irq; |
82 | desc->chip = &no_irq_chip; | 96 | desc->chip = &no_irq_chip; |
83 | desc->name = NULL; | 97 | desc->name = NULL; |
84 | clear_kstat_irqs(desc); | 98 | clear_kstat_irqs(desc); |
85 | spin_unlock_irqrestore(&desc->lock, flags); | 99 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
100 | } | ||
101 | |||
102 | /** | ||
103 | * dynamic_irq_cleanup - cleanup a dynamically allocated irq | ||
104 | * @irq: irq number to initialize | ||
105 | */ | ||
106 | void dynamic_irq_cleanup(unsigned int irq) | ||
107 | { | ||
108 | dynamic_irq_cleanup_x(irq, false); | ||
109 | } | ||
110 | |||
111 | /** | ||
112 | * dynamic_irq_cleanup_keep_chip_data - cleanup a dynamically allocated irq | ||
113 | * @irq: irq number to initialize | ||
114 | * | ||
115 | * does not set irq_to_desc(irq)->chip_data to NULL | ||
116 | */ | ||
117 | void dynamic_irq_cleanup_keep_chip_data(unsigned int irq) | ||
118 | { | ||
119 | dynamic_irq_cleanup_x(irq, true); | ||
86 | } | 120 | } |
87 | 121 | ||
88 | 122 | ||
@@ -104,10 +138,10 @@ int set_irq_chip(unsigned int irq, struct irq_chip *chip) | |||
104 | if (!chip) | 138 | if (!chip) |
105 | chip = &no_irq_chip; | 139 | chip = &no_irq_chip; |
106 | 140 | ||
107 | spin_lock_irqsave(&desc->lock, flags); | 141 | raw_spin_lock_irqsave(&desc->lock, flags); |
108 | irq_chip_set_defaults(chip); | 142 | irq_chip_set_defaults(chip); |
109 | desc->chip = chip; | 143 | desc->chip = chip; |
110 | spin_unlock_irqrestore(&desc->lock, flags); | 144 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
111 | 145 | ||
112 | return 0; | 146 | return 0; |
113 | } | 147 | } |
@@ -133,9 +167,9 @@ int set_irq_type(unsigned int irq, unsigned int type) | |||
133 | if (type == IRQ_TYPE_NONE) | 167 | if (type == IRQ_TYPE_NONE) |
134 | return 0; | 168 | return 0; |
135 | 169 | ||
136 | spin_lock_irqsave(&desc->lock, flags); | 170 | raw_spin_lock_irqsave(&desc->lock, flags); |
137 | ret = __irq_set_trigger(desc, irq, type); | 171 | ret = __irq_set_trigger(desc, irq, type); |
138 | spin_unlock_irqrestore(&desc->lock, flags); | 172 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
139 | return ret; | 173 | return ret; |
140 | } | 174 | } |
141 | EXPORT_SYMBOL(set_irq_type); | 175 | EXPORT_SYMBOL(set_irq_type); |
@@ -158,19 +192,19 @@ int set_irq_data(unsigned int irq, void *data) | |||
158 | return -EINVAL; | 192 | return -EINVAL; |
159 | } | 193 | } |
160 | 194 | ||
161 | spin_lock_irqsave(&desc->lock, flags); | 195 | raw_spin_lock_irqsave(&desc->lock, flags); |
162 | desc->handler_data = data; | 196 | desc->handler_data = data; |
163 | spin_unlock_irqrestore(&desc->lock, flags); | 197 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
164 | return 0; | 198 | return 0; |
165 | } | 199 | } |
166 | EXPORT_SYMBOL(set_irq_data); | 200 | EXPORT_SYMBOL(set_irq_data); |
167 | 201 | ||
168 | /** | 202 | /** |
169 | * set_irq_data - set irq type data for an irq | 203 | * set_irq_msi - set MSI descriptor data for an irq |
170 | * @irq: Interrupt number | 204 | * @irq: Interrupt number |
171 | * @entry: Pointer to MSI descriptor data | 205 | * @entry: Pointer to MSI descriptor data |
172 | * | 206 | * |
173 | * Set the hardware irq controller data for an irq | 207 | * Set the MSI descriptor entry for an irq |
174 | */ | 208 | */ |
175 | int set_irq_msi(unsigned int irq, struct msi_desc *entry) | 209 | int set_irq_msi(unsigned int irq, struct msi_desc *entry) |
176 | { | 210 | { |
@@ -183,11 +217,11 @@ int set_irq_msi(unsigned int irq, struct msi_desc *entry) | |||
183 | return -EINVAL; | 217 | return -EINVAL; |
184 | } | 218 | } |
185 | 219 | ||
186 | spin_lock_irqsave(&desc->lock, flags); | 220 | raw_spin_lock_irqsave(&desc->lock, flags); |
187 | desc->msi_desc = entry; | 221 | desc->msi_desc = entry; |
188 | if (entry) | 222 | if (entry) |
189 | entry->irq = irq; | 223 | entry->irq = irq; |
190 | spin_unlock_irqrestore(&desc->lock, flags); | 224 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
191 | return 0; | 225 | return 0; |
192 | } | 226 | } |
193 | 227 | ||
@@ -214,9 +248,9 @@ int set_irq_chip_data(unsigned int irq, void *data) | |||
214 | return -EINVAL; | 248 | return -EINVAL; |
215 | } | 249 | } |
216 | 250 | ||
217 | spin_lock_irqsave(&desc->lock, flags); | 251 | raw_spin_lock_irqsave(&desc->lock, flags); |
218 | desc->chip_data = data; | 252 | desc->chip_data = data; |
219 | spin_unlock_irqrestore(&desc->lock, flags); | 253 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
220 | 254 | ||
221 | return 0; | 255 | return 0; |
222 | } | 256 | } |
@@ -241,12 +275,12 @@ void set_irq_nested_thread(unsigned int irq, int nest) | |||
241 | if (!desc) | 275 | if (!desc) |
242 | return; | 276 | return; |
243 | 277 | ||
244 | spin_lock_irqsave(&desc->lock, flags); | 278 | raw_spin_lock_irqsave(&desc->lock, flags); |
245 | if (nest) | 279 | if (nest) |
246 | desc->status |= IRQ_NESTED_THREAD; | 280 | desc->status |= IRQ_NESTED_THREAD; |
247 | else | 281 | else |
248 | desc->status &= ~IRQ_NESTED_THREAD; | 282 | desc->status &= ~IRQ_NESTED_THREAD; |
249 | spin_unlock_irqrestore(&desc->lock, flags); | 283 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
250 | } | 284 | } |
251 | EXPORT_SYMBOL_GPL(set_irq_nested_thread); | 285 | EXPORT_SYMBOL_GPL(set_irq_nested_thread); |
252 | 286 | ||
@@ -325,6 +359,23 @@ static inline void mask_ack_irq(struct irq_desc *desc, int irq) | |||
325 | if (desc->chip->ack) | 359 | if (desc->chip->ack) |
326 | desc->chip->ack(irq); | 360 | desc->chip->ack(irq); |
327 | } | 361 | } |
362 | desc->status |= IRQ_MASKED; | ||
363 | } | ||
364 | |||
365 | static inline void mask_irq(struct irq_desc *desc, int irq) | ||
366 | { | ||
367 | if (desc->chip->mask) { | ||
368 | desc->chip->mask(irq); | ||
369 | desc->status |= IRQ_MASKED; | ||
370 | } | ||
371 | } | ||
372 | |||
373 | static inline void unmask_irq(struct irq_desc *desc, int irq) | ||
374 | { | ||
375 | if (desc->chip->unmask) { | ||
376 | desc->chip->unmask(irq); | ||
377 | desc->status &= ~IRQ_MASKED; | ||
378 | } | ||
328 | } | 379 | } |
329 | 380 | ||
330 | /* | 381 | /* |
@@ -343,7 +394,7 @@ void handle_nested_irq(unsigned int irq) | |||
343 | 394 | ||
344 | might_sleep(); | 395 | might_sleep(); |
345 | 396 | ||
346 | spin_lock_irq(&desc->lock); | 397 | raw_spin_lock_irq(&desc->lock); |
347 | 398 | ||
348 | kstat_incr_irqs_this_cpu(irq, desc); | 399 | kstat_incr_irqs_this_cpu(irq, desc); |
349 | 400 | ||
@@ -352,17 +403,17 @@ void handle_nested_irq(unsigned int irq) | |||
352 | goto out_unlock; | 403 | goto out_unlock; |
353 | 404 | ||
354 | desc->status |= IRQ_INPROGRESS; | 405 | desc->status |= IRQ_INPROGRESS; |
355 | spin_unlock_irq(&desc->lock); | 406 | raw_spin_unlock_irq(&desc->lock); |
356 | 407 | ||
357 | action_ret = action->thread_fn(action->irq, action->dev_id); | 408 | action_ret = action->thread_fn(action->irq, action->dev_id); |
358 | if (!noirqdebug) | 409 | if (!noirqdebug) |
359 | note_interrupt(irq, desc, action_ret); | 410 | note_interrupt(irq, desc, action_ret); |
360 | 411 | ||
361 | spin_lock_irq(&desc->lock); | 412 | raw_spin_lock_irq(&desc->lock); |
362 | desc->status &= ~IRQ_INPROGRESS; | 413 | desc->status &= ~IRQ_INPROGRESS; |
363 | 414 | ||
364 | out_unlock: | 415 | out_unlock: |
365 | spin_unlock_irq(&desc->lock); | 416 | raw_spin_unlock_irq(&desc->lock); |
366 | } | 417 | } |
367 | EXPORT_SYMBOL_GPL(handle_nested_irq); | 418 | EXPORT_SYMBOL_GPL(handle_nested_irq); |
368 | 419 | ||
@@ -384,7 +435,7 @@ handle_simple_irq(unsigned int irq, struct irq_desc *desc) | |||
384 | struct irqaction *action; | 435 | struct irqaction *action; |
385 | irqreturn_t action_ret; | 436 | irqreturn_t action_ret; |
386 | 437 | ||
387 | spin_lock(&desc->lock); | 438 | raw_spin_lock(&desc->lock); |
388 | 439 | ||
389 | if (unlikely(desc->status & IRQ_INPROGRESS)) | 440 | if (unlikely(desc->status & IRQ_INPROGRESS)) |
390 | goto out_unlock; | 441 | goto out_unlock; |
@@ -396,16 +447,16 @@ handle_simple_irq(unsigned int irq, struct irq_desc *desc) | |||
396 | goto out_unlock; | 447 | goto out_unlock; |
397 | 448 | ||
398 | desc->status |= IRQ_INPROGRESS; | 449 | desc->status |= IRQ_INPROGRESS; |
399 | spin_unlock(&desc->lock); | 450 | raw_spin_unlock(&desc->lock); |
400 | 451 | ||
401 | action_ret = handle_IRQ_event(irq, action); | 452 | action_ret = handle_IRQ_event(irq, action); |
402 | if (!noirqdebug) | 453 | if (!noirqdebug) |
403 | note_interrupt(irq, desc, action_ret); | 454 | note_interrupt(irq, desc, action_ret); |
404 | 455 | ||
405 | spin_lock(&desc->lock); | 456 | raw_spin_lock(&desc->lock); |
406 | desc->status &= ~IRQ_INPROGRESS; | 457 | desc->status &= ~IRQ_INPROGRESS; |
407 | out_unlock: | 458 | out_unlock: |
408 | spin_unlock(&desc->lock); | 459 | raw_spin_unlock(&desc->lock); |
409 | } | 460 | } |
410 | 461 | ||
411 | /** | 462 | /** |
@@ -424,7 +475,7 @@ handle_level_irq(unsigned int irq, struct irq_desc *desc) | |||
424 | struct irqaction *action; | 475 | struct irqaction *action; |
425 | irqreturn_t action_ret; | 476 | irqreturn_t action_ret; |
426 | 477 | ||
427 | spin_lock(&desc->lock); | 478 | raw_spin_lock(&desc->lock); |
428 | mask_ack_irq(desc, irq); | 479 | mask_ack_irq(desc, irq); |
429 | 480 | ||
430 | if (unlikely(desc->status & IRQ_INPROGRESS)) | 481 | if (unlikely(desc->status & IRQ_INPROGRESS)) |
@@ -441,21 +492,19 @@ handle_level_irq(unsigned int irq, struct irq_desc *desc) | |||
441 | goto out_unlock; | 492 | goto out_unlock; |
442 | 493 | ||
443 | desc->status |= IRQ_INPROGRESS; | 494 | desc->status |= IRQ_INPROGRESS; |
444 | spin_unlock(&desc->lock); | 495 | raw_spin_unlock(&desc->lock); |
445 | 496 | ||
446 | action_ret = handle_IRQ_event(irq, action); | 497 | action_ret = handle_IRQ_event(irq, action); |
447 | if (!noirqdebug) | 498 | if (!noirqdebug) |
448 | note_interrupt(irq, desc, action_ret); | 499 | note_interrupt(irq, desc, action_ret); |
449 | 500 | ||
450 | spin_lock(&desc->lock); | 501 | raw_spin_lock(&desc->lock); |
451 | desc->status &= ~IRQ_INPROGRESS; | 502 | desc->status &= ~IRQ_INPROGRESS; |
452 | 503 | ||
453 | if (unlikely(desc->status & IRQ_ONESHOT)) | 504 | if (!(desc->status & (IRQ_DISABLED | IRQ_ONESHOT))) |
454 | desc->status |= IRQ_MASKED; | 505 | unmask_irq(desc, irq); |
455 | else if (!(desc->status & IRQ_DISABLED) && desc->chip->unmask) | ||
456 | desc->chip->unmask(irq); | ||
457 | out_unlock: | 506 | out_unlock: |
458 | spin_unlock(&desc->lock); | 507 | raw_spin_unlock(&desc->lock); |
459 | } | 508 | } |
460 | EXPORT_SYMBOL_GPL(handle_level_irq); | 509 | EXPORT_SYMBOL_GPL(handle_level_irq); |
461 | 510 | ||
@@ -475,7 +524,7 @@ handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc) | |||
475 | struct irqaction *action; | 524 | struct irqaction *action; |
476 | irqreturn_t action_ret; | 525 | irqreturn_t action_ret; |
477 | 526 | ||
478 | spin_lock(&desc->lock); | 527 | raw_spin_lock(&desc->lock); |
479 | 528 | ||
480 | if (unlikely(desc->status & IRQ_INPROGRESS)) | 529 | if (unlikely(desc->status & IRQ_INPROGRESS)) |
481 | goto out; | 530 | goto out; |
@@ -490,25 +539,24 @@ handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc) | |||
490 | action = desc->action; | 539 | action = desc->action; |
491 | if (unlikely(!action || (desc->status & IRQ_DISABLED))) { | 540 | if (unlikely(!action || (desc->status & IRQ_DISABLED))) { |
492 | desc->status |= IRQ_PENDING; | 541 | desc->status |= IRQ_PENDING; |
493 | if (desc->chip->mask) | 542 | mask_irq(desc, irq); |
494 | desc->chip->mask(irq); | ||
495 | goto out; | 543 | goto out; |
496 | } | 544 | } |
497 | 545 | ||
498 | desc->status |= IRQ_INPROGRESS; | 546 | desc->status |= IRQ_INPROGRESS; |
499 | desc->status &= ~IRQ_PENDING; | 547 | desc->status &= ~IRQ_PENDING; |
500 | spin_unlock(&desc->lock); | 548 | raw_spin_unlock(&desc->lock); |
501 | 549 | ||
502 | action_ret = handle_IRQ_event(irq, action); | 550 | action_ret = handle_IRQ_event(irq, action); |
503 | if (!noirqdebug) | 551 | if (!noirqdebug) |
504 | note_interrupt(irq, desc, action_ret); | 552 | note_interrupt(irq, desc, action_ret); |
505 | 553 | ||
506 | spin_lock(&desc->lock); | 554 | raw_spin_lock(&desc->lock); |
507 | desc->status &= ~IRQ_INPROGRESS; | 555 | desc->status &= ~IRQ_INPROGRESS; |
508 | out: | 556 | out: |
509 | desc->chip->eoi(irq); | 557 | desc->chip->eoi(irq); |
510 | 558 | ||
511 | spin_unlock(&desc->lock); | 559 | raw_spin_unlock(&desc->lock); |
512 | } | 560 | } |
513 | 561 | ||
514 | /** | 562 | /** |
@@ -520,7 +568,7 @@ out: | |||
520 | * signal. The occurence is latched into the irq controller hardware | 568 | * signal. The occurence is latched into the irq controller hardware |
521 | * and must be acked in order to be reenabled. After the ack another | 569 | * and must be acked in order to be reenabled. After the ack another |
522 | * interrupt can happen on the same source even before the first one | 570 | * interrupt can happen on the same source even before the first one |
523 | * is handled by the assosiacted event handler. If this happens it | 571 | * is handled by the associated event handler. If this happens it |
524 | * might be necessary to disable (mask) the interrupt depending on the | 572 | * might be necessary to disable (mask) the interrupt depending on the |
525 | * controller hardware. This requires to reenable the interrupt inside | 573 | * controller hardware. This requires to reenable the interrupt inside |
526 | * of the loop which handles the interrupts which have arrived while | 574 | * of the loop which handles the interrupts which have arrived while |
@@ -530,7 +578,7 @@ out: | |||
530 | void | 578 | void |
531 | handle_edge_irq(unsigned int irq, struct irq_desc *desc) | 579 | handle_edge_irq(unsigned int irq, struct irq_desc *desc) |
532 | { | 580 | { |
533 | spin_lock(&desc->lock); | 581 | raw_spin_lock(&desc->lock); |
534 | 582 | ||
535 | desc->status &= ~(IRQ_REPLAY | IRQ_WAITING); | 583 | desc->status &= ~(IRQ_REPLAY | IRQ_WAITING); |
536 | 584 | ||
@@ -559,7 +607,7 @@ handle_edge_irq(unsigned int irq, struct irq_desc *desc) | |||
559 | irqreturn_t action_ret; | 607 | irqreturn_t action_ret; |
560 | 608 | ||
561 | if (unlikely(!action)) { | 609 | if (unlikely(!action)) { |
562 | desc->chip->mask(irq); | 610 | mask_irq(desc, irq); |
563 | goto out_unlock; | 611 | goto out_unlock; |
564 | } | 612 | } |
565 | 613 | ||
@@ -571,26 +619,25 @@ handle_edge_irq(unsigned int irq, struct irq_desc *desc) | |||
571 | if (unlikely((desc->status & | 619 | if (unlikely((desc->status & |
572 | (IRQ_PENDING | IRQ_MASKED | IRQ_DISABLED)) == | 620 | (IRQ_PENDING | IRQ_MASKED | IRQ_DISABLED)) == |
573 | (IRQ_PENDING | IRQ_MASKED))) { | 621 | (IRQ_PENDING | IRQ_MASKED))) { |
574 | desc->chip->unmask(irq); | 622 | unmask_irq(desc, irq); |
575 | desc->status &= ~IRQ_MASKED; | ||
576 | } | 623 | } |
577 | 624 | ||
578 | desc->status &= ~IRQ_PENDING; | 625 | desc->status &= ~IRQ_PENDING; |
579 | spin_unlock(&desc->lock); | 626 | raw_spin_unlock(&desc->lock); |
580 | action_ret = handle_IRQ_event(irq, action); | 627 | action_ret = handle_IRQ_event(irq, action); |
581 | if (!noirqdebug) | 628 | if (!noirqdebug) |
582 | note_interrupt(irq, desc, action_ret); | 629 | note_interrupt(irq, desc, action_ret); |
583 | spin_lock(&desc->lock); | 630 | raw_spin_lock(&desc->lock); |
584 | 631 | ||
585 | } while ((desc->status & (IRQ_PENDING | IRQ_DISABLED)) == IRQ_PENDING); | 632 | } while ((desc->status & (IRQ_PENDING | IRQ_DISABLED)) == IRQ_PENDING); |
586 | 633 | ||
587 | desc->status &= ~IRQ_INPROGRESS; | 634 | desc->status &= ~IRQ_INPROGRESS; |
588 | out_unlock: | 635 | out_unlock: |
589 | spin_unlock(&desc->lock); | 636 | raw_spin_unlock(&desc->lock); |
590 | } | 637 | } |
591 | 638 | ||
592 | /** | 639 | /** |
593 | * handle_percpu_IRQ - Per CPU local irq handler | 640 | * handle_percpu_irq - Per CPU local irq handler |
594 | * @irq: the interrupt number | 641 | * @irq: the interrupt number |
595 | * @desc: the interrupt description structure for this irq | 642 | * @desc: the interrupt description structure for this irq |
596 | * | 643 | * |
@@ -643,7 +690,7 @@ __set_irq_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained, | |||
643 | } | 690 | } |
644 | 691 | ||
645 | chip_bus_lock(irq, desc); | 692 | chip_bus_lock(irq, desc); |
646 | spin_lock_irqsave(&desc->lock, flags); | 693 | raw_spin_lock_irqsave(&desc->lock, flags); |
647 | 694 | ||
648 | /* Uninstall? */ | 695 | /* Uninstall? */ |
649 | if (handle == handle_bad_irq) { | 696 | if (handle == handle_bad_irq) { |
@@ -661,7 +708,7 @@ __set_irq_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained, | |||
661 | desc->depth = 0; | 708 | desc->depth = 0; |
662 | desc->chip->startup(irq); | 709 | desc->chip->startup(irq); |
663 | } | 710 | } |
664 | spin_unlock_irqrestore(&desc->lock, flags); | 711 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
665 | chip_bus_sync_unlock(irq, desc); | 712 | chip_bus_sync_unlock(irq, desc); |
666 | } | 713 | } |
667 | EXPORT_SYMBOL_GPL(__set_irq_handler); | 714 | EXPORT_SYMBOL_GPL(__set_irq_handler); |
@@ -682,7 +729,7 @@ set_irq_chip_and_handler_name(unsigned int irq, struct irq_chip *chip, | |||
682 | __set_irq_handler(irq, handle, 0, name); | 729 | __set_irq_handler(irq, handle, 0, name); |
683 | } | 730 | } |
684 | 731 | ||
685 | void __init set_irq_noprobe(unsigned int irq) | 732 | void set_irq_noprobe(unsigned int irq) |
686 | { | 733 | { |
687 | struct irq_desc *desc = irq_to_desc(irq); | 734 | struct irq_desc *desc = irq_to_desc(irq); |
688 | unsigned long flags; | 735 | unsigned long flags; |
@@ -692,12 +739,12 @@ void __init set_irq_noprobe(unsigned int irq) | |||
692 | return; | 739 | return; |
693 | } | 740 | } |
694 | 741 | ||
695 | spin_lock_irqsave(&desc->lock, flags); | 742 | raw_spin_lock_irqsave(&desc->lock, flags); |
696 | desc->status |= IRQ_NOPROBE; | 743 | desc->status |= IRQ_NOPROBE; |
697 | spin_unlock_irqrestore(&desc->lock, flags); | 744 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
698 | } | 745 | } |
699 | 746 | ||
700 | void __init set_irq_probe(unsigned int irq) | 747 | void set_irq_probe(unsigned int irq) |
701 | { | 748 | { |
702 | struct irq_desc *desc = irq_to_desc(irq); | 749 | struct irq_desc *desc = irq_to_desc(irq); |
703 | unsigned long flags; | 750 | unsigned long flags; |
@@ -707,7 +754,7 @@ void __init set_irq_probe(unsigned int irq) | |||
707 | return; | 754 | return; |
708 | } | 755 | } |
709 | 756 | ||
710 | spin_lock_irqsave(&desc->lock, flags); | 757 | raw_spin_lock_irqsave(&desc->lock, flags); |
711 | desc->status &= ~IRQ_NOPROBE; | 758 | desc->status &= ~IRQ_NOPROBE; |
712 | spin_unlock_irqrestore(&desc->lock, flags); | 759 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
713 | } | 760 | } |