diff options
Diffstat (limited to 'kernel/irq/manage.c')
-rw-r--r-- | kernel/irq/manage.c | 206 |
1 files changed, 148 insertions, 58 deletions
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index 60c49e324390..540f6c49f3fa 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c | |||
@@ -31,10 +31,10 @@ cpumask_t irq_default_affinity = CPU_MASK_ALL; | |||
31 | */ | 31 | */ |
32 | void synchronize_irq(unsigned int irq) | 32 | void synchronize_irq(unsigned int irq) |
33 | { | 33 | { |
34 | struct irq_desc *desc = irq_desc + irq; | 34 | struct irq_desc *desc = irq_to_desc(irq); |
35 | unsigned int status; | 35 | unsigned int status; |
36 | 36 | ||
37 | if (irq >= NR_IRQS) | 37 | if (!desc) |
38 | return; | 38 | return; |
39 | 39 | ||
40 | do { | 40 | do { |
@@ -64,7 +64,7 @@ EXPORT_SYMBOL(synchronize_irq); | |||
64 | */ | 64 | */ |
65 | int irq_can_set_affinity(unsigned int irq) | 65 | int irq_can_set_affinity(unsigned int irq) |
66 | { | 66 | { |
67 | struct irq_desc *desc = irq_desc + irq; | 67 | struct irq_desc *desc = irq_to_desc(irq); |
68 | 68 | ||
69 | if (CHECK_IRQ_PER_CPU(desc->status) || !desc->chip || | 69 | if (CHECK_IRQ_PER_CPU(desc->status) || !desc->chip || |
70 | !desc->chip->set_affinity) | 70 | !desc->chip->set_affinity) |
@@ -81,26 +81,28 @@ int irq_can_set_affinity(unsigned int irq) | |||
81 | */ | 81 | */ |
82 | int irq_set_affinity(unsigned int irq, cpumask_t cpumask) | 82 | int irq_set_affinity(unsigned int irq, cpumask_t cpumask) |
83 | { | 83 | { |
84 | struct irq_desc *desc = irq_desc + irq; | 84 | struct irq_desc *desc = irq_to_desc(irq); |
85 | unsigned long flags; | ||
85 | 86 | ||
86 | if (!desc->chip->set_affinity) | 87 | if (!desc->chip->set_affinity) |
87 | return -EINVAL; | 88 | return -EINVAL; |
88 | 89 | ||
89 | set_balance_irq_affinity(irq, cpumask); | 90 | spin_lock_irqsave(&desc->lock, flags); |
90 | 91 | ||
91 | #ifdef CONFIG_GENERIC_PENDING_IRQ | 92 | #ifdef CONFIG_GENERIC_PENDING_IRQ |
92 | if (desc->status & IRQ_MOVE_PCNTXT) { | 93 | if (desc->status & IRQ_MOVE_PCNTXT || desc->status & IRQ_DISABLED) { |
93 | unsigned long flags; | 94 | desc->affinity = cpumask; |
94 | |||
95 | spin_lock_irqsave(&desc->lock, flags); | ||
96 | desc->chip->set_affinity(irq, cpumask); | 95 | desc->chip->set_affinity(irq, cpumask); |
97 | spin_unlock_irqrestore(&desc->lock, flags); | 96 | } else { |
98 | } else | 97 | desc->status |= IRQ_MOVE_PENDING; |
99 | set_pending_irq(irq, cpumask); | 98 | desc->pending_mask = cpumask; |
99 | } | ||
100 | #else | 100 | #else |
101 | desc->affinity = cpumask; | 101 | desc->affinity = cpumask; |
102 | desc->chip->set_affinity(irq, cpumask); | 102 | desc->chip->set_affinity(irq, cpumask); |
103 | #endif | 103 | #endif |
104 | desc->status |= IRQ_AFFINITY_SET; | ||
105 | spin_unlock_irqrestore(&desc->lock, flags); | ||
104 | return 0; | 106 | return 0; |
105 | } | 107 | } |
106 | 108 | ||
@@ -108,7 +110,7 @@ int irq_set_affinity(unsigned int irq, cpumask_t cpumask) | |||
108 | /* | 110 | /* |
109 | * Generic version of the affinity autoselector. | 111 | * Generic version of the affinity autoselector. |
110 | */ | 112 | */ |
111 | int irq_select_affinity(unsigned int irq) | 113 | int do_irq_select_affinity(unsigned int irq, struct irq_desc *desc) |
112 | { | 114 | { |
113 | cpumask_t mask; | 115 | cpumask_t mask; |
114 | 116 | ||
@@ -117,14 +119,50 @@ int irq_select_affinity(unsigned int irq) | |||
117 | 119 | ||
118 | cpus_and(mask, cpu_online_map, irq_default_affinity); | 120 | cpus_and(mask, cpu_online_map, irq_default_affinity); |
119 | 121 | ||
120 | irq_desc[irq].affinity = mask; | 122 | /* |
121 | irq_desc[irq].chip->set_affinity(irq, mask); | 123 | * Preserve an userspace affinity setup, but make sure that |
124 | * one of the targets is online. | ||
125 | */ | ||
126 | if (desc->status & (IRQ_AFFINITY_SET | IRQ_NO_BALANCING)) { | ||
127 | if (cpus_intersects(desc->affinity, cpu_online_map)) | ||
128 | mask = desc->affinity; | ||
129 | else | ||
130 | desc->status &= ~IRQ_AFFINITY_SET; | ||
131 | } | ||
132 | |||
133 | desc->affinity = mask; | ||
134 | desc->chip->set_affinity(irq, mask); | ||
122 | 135 | ||
123 | set_balance_irq_affinity(irq, mask); | ||
124 | return 0; | 136 | return 0; |
125 | } | 137 | } |
138 | #else | ||
139 | static inline int do_irq_select_affinity(unsigned int irq, struct irq_desc *d) | ||
140 | { | ||
141 | return irq_select_affinity(irq); | ||
142 | } | ||
126 | #endif | 143 | #endif |
127 | 144 | ||
145 | /* | ||
146 | * Called when affinity is set via /proc/irq | ||
147 | */ | ||
148 | int irq_select_affinity_usr(unsigned int irq) | ||
149 | { | ||
150 | struct irq_desc *desc = irq_to_desc(irq); | ||
151 | unsigned long flags; | ||
152 | int ret; | ||
153 | |||
154 | spin_lock_irqsave(&desc->lock, flags); | ||
155 | ret = do_irq_select_affinity(irq, desc); | ||
156 | spin_unlock_irqrestore(&desc->lock, flags); | ||
157 | |||
158 | return ret; | ||
159 | } | ||
160 | |||
161 | #else | ||
162 | static inline int do_irq_select_affinity(int irq, struct irq_desc *desc) | ||
163 | { | ||
164 | return 0; | ||
165 | } | ||
128 | #endif | 166 | #endif |
129 | 167 | ||
130 | /** | 168 | /** |
@@ -140,10 +178,10 @@ int irq_select_affinity(unsigned int irq) | |||
140 | */ | 178 | */ |
141 | void disable_irq_nosync(unsigned int irq) | 179 | void disable_irq_nosync(unsigned int irq) |
142 | { | 180 | { |
143 | struct irq_desc *desc = irq_desc + irq; | 181 | struct irq_desc *desc = irq_to_desc(irq); |
144 | unsigned long flags; | 182 | unsigned long flags; |
145 | 183 | ||
146 | if (irq >= NR_IRQS) | 184 | if (!desc) |
147 | return; | 185 | return; |
148 | 186 | ||
149 | spin_lock_irqsave(&desc->lock, flags); | 187 | spin_lock_irqsave(&desc->lock, flags); |
@@ -169,9 +207,9 @@ EXPORT_SYMBOL(disable_irq_nosync); | |||
169 | */ | 207 | */ |
170 | void disable_irq(unsigned int irq) | 208 | void disable_irq(unsigned int irq) |
171 | { | 209 | { |
172 | struct irq_desc *desc = irq_desc + irq; | 210 | struct irq_desc *desc = irq_to_desc(irq); |
173 | 211 | ||
174 | if (irq >= NR_IRQS) | 212 | if (!desc) |
175 | return; | 213 | return; |
176 | 214 | ||
177 | disable_irq_nosync(irq); | 215 | disable_irq_nosync(irq); |
@@ -211,10 +249,10 @@ static void __enable_irq(struct irq_desc *desc, unsigned int irq) | |||
211 | */ | 249 | */ |
212 | void enable_irq(unsigned int irq) | 250 | void enable_irq(unsigned int irq) |
213 | { | 251 | { |
214 | struct irq_desc *desc = irq_desc + irq; | 252 | struct irq_desc *desc = irq_to_desc(irq); |
215 | unsigned long flags; | 253 | unsigned long flags; |
216 | 254 | ||
217 | if (irq >= NR_IRQS) | 255 | if (!desc) |
218 | return; | 256 | return; |
219 | 257 | ||
220 | spin_lock_irqsave(&desc->lock, flags); | 258 | spin_lock_irqsave(&desc->lock, flags); |
@@ -223,9 +261,9 @@ void enable_irq(unsigned int irq) | |||
223 | } | 261 | } |
224 | EXPORT_SYMBOL(enable_irq); | 262 | EXPORT_SYMBOL(enable_irq); |
225 | 263 | ||
226 | int set_irq_wake_real(unsigned int irq, unsigned int on) | 264 | static int set_irq_wake_real(unsigned int irq, unsigned int on) |
227 | { | 265 | { |
228 | struct irq_desc *desc = irq_desc + irq; | 266 | struct irq_desc *desc = irq_to_desc(irq); |
229 | int ret = -ENXIO; | 267 | int ret = -ENXIO; |
230 | 268 | ||
231 | if (desc->chip->set_wake) | 269 | if (desc->chip->set_wake) |
@@ -248,7 +286,7 @@ int set_irq_wake_real(unsigned int irq, unsigned int on) | |||
248 | */ | 286 | */ |
249 | int set_irq_wake(unsigned int irq, unsigned int on) | 287 | int set_irq_wake(unsigned int irq, unsigned int on) |
250 | { | 288 | { |
251 | struct irq_desc *desc = irq_desc + irq; | 289 | struct irq_desc *desc = irq_to_desc(irq); |
252 | unsigned long flags; | 290 | unsigned long flags; |
253 | int ret = 0; | 291 | int ret = 0; |
254 | 292 | ||
@@ -288,12 +326,16 @@ EXPORT_SYMBOL(set_irq_wake); | |||
288 | */ | 326 | */ |
289 | int can_request_irq(unsigned int irq, unsigned long irqflags) | 327 | int can_request_irq(unsigned int irq, unsigned long irqflags) |
290 | { | 328 | { |
329 | struct irq_desc *desc = irq_to_desc(irq); | ||
291 | struct irqaction *action; | 330 | struct irqaction *action; |
292 | 331 | ||
293 | if (irq >= NR_IRQS || irq_desc[irq].status & IRQ_NOREQUEST) | 332 | if (!desc) |
333 | return 0; | ||
334 | |||
335 | if (desc->status & IRQ_NOREQUEST) | ||
294 | return 0; | 336 | return 0; |
295 | 337 | ||
296 | action = irq_desc[irq].action; | 338 | action = desc->action; |
297 | if (action) | 339 | if (action) |
298 | if (irqflags & action->flags & IRQF_SHARED) | 340 | if (irqflags & action->flags & IRQF_SHARED) |
299 | action = NULL; | 341 | action = NULL; |
@@ -312,27 +354,35 @@ void compat_irq_chip_set_default_handler(struct irq_desc *desc) | |||
312 | desc->handle_irq = NULL; | 354 | desc->handle_irq = NULL; |
313 | } | 355 | } |
314 | 356 | ||
315 | static int __irq_set_trigger(struct irq_chip *chip, unsigned int irq, | 357 | int __irq_set_trigger(struct irq_desc *desc, unsigned int irq, |
316 | unsigned long flags) | 358 | unsigned long flags) |
317 | { | 359 | { |
318 | int ret; | 360 | int ret; |
361 | struct irq_chip *chip = desc->chip; | ||
319 | 362 | ||
320 | if (!chip || !chip->set_type) { | 363 | if (!chip || !chip->set_type) { |
321 | /* | 364 | /* |
322 | * IRQF_TRIGGER_* but the PIC does not support multiple | 365 | * IRQF_TRIGGER_* but the PIC does not support multiple |
323 | * flow-types? | 366 | * flow-types? |
324 | */ | 367 | */ |
325 | pr_warning("No set_type function for IRQ %d (%s)\n", irq, | 368 | pr_debug("No set_type function for IRQ %d (%s)\n", irq, |
326 | chip ? (chip->name ? : "unknown") : "unknown"); | 369 | chip ? (chip->name ? : "unknown") : "unknown"); |
327 | return 0; | 370 | return 0; |
328 | } | 371 | } |
329 | 372 | ||
330 | ret = chip->set_type(irq, flags & IRQF_TRIGGER_MASK); | 373 | /* caller masked out all except trigger mode flags */ |
374 | ret = chip->set_type(irq, flags); | ||
331 | 375 | ||
332 | if (ret) | 376 | if (ret) |
333 | pr_err("setting trigger mode %d for irq %u failed (%pF)\n", | 377 | pr_err("setting trigger mode %d for irq %u failed (%pF)\n", |
334 | (int)(flags & IRQF_TRIGGER_MASK), | 378 | (int)flags, irq, chip->set_type); |
335 | irq, chip->set_type); | 379 | else { |
380 | if (flags & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH)) | ||
381 | flags |= IRQ_LEVEL; | ||
382 | /* note that IRQF_TRIGGER_MASK == IRQ_TYPE_SENSE_MASK */ | ||
383 | desc->status &= ~(IRQ_LEVEL | IRQ_TYPE_SENSE_MASK); | ||
384 | desc->status |= flags; | ||
385 | } | ||
336 | 386 | ||
337 | return ret; | 387 | return ret; |
338 | } | 388 | } |
@@ -341,16 +391,16 @@ static int __irq_set_trigger(struct irq_chip *chip, unsigned int irq, | |||
341 | * Internal function to register an irqaction - typically used to | 391 | * Internal function to register an irqaction - typically used to |
342 | * allocate special interrupts that are part of the architecture. | 392 | * allocate special interrupts that are part of the architecture. |
343 | */ | 393 | */ |
344 | int setup_irq(unsigned int irq, struct irqaction *new) | 394 | static int |
395 | __setup_irq(unsigned int irq, struct irq_desc * desc, struct irqaction *new) | ||
345 | { | 396 | { |
346 | struct irq_desc *desc = irq_desc + irq; | ||
347 | struct irqaction *old, **p; | 397 | struct irqaction *old, **p; |
348 | const char *old_name = NULL; | 398 | const char *old_name = NULL; |
349 | unsigned long flags; | 399 | unsigned long flags; |
350 | int shared = 0; | 400 | int shared = 0; |
351 | int ret; | 401 | int ret; |
352 | 402 | ||
353 | if (irq >= NR_IRQS) | 403 | if (!desc) |
354 | return -EINVAL; | 404 | return -EINVAL; |
355 | 405 | ||
356 | if (desc->chip == &no_irq_chip) | 406 | if (desc->chip == &no_irq_chip) |
@@ -411,7 +461,8 @@ int setup_irq(unsigned int irq, struct irqaction *new) | |||
411 | 461 | ||
412 | /* Setup the type (level, edge polarity) if configured: */ | 462 | /* Setup the type (level, edge polarity) if configured: */ |
413 | if (new->flags & IRQF_TRIGGER_MASK) { | 463 | if (new->flags & IRQF_TRIGGER_MASK) { |
414 | ret = __irq_set_trigger(desc->chip, irq, new->flags); | 464 | ret = __irq_set_trigger(desc, irq, |
465 | new->flags & IRQF_TRIGGER_MASK); | ||
415 | 466 | ||
416 | if (ret) { | 467 | if (ret) { |
417 | spin_unlock_irqrestore(&desc->lock, flags); | 468 | spin_unlock_irqrestore(&desc->lock, flags); |
@@ -430,24 +481,29 @@ int setup_irq(unsigned int irq, struct irqaction *new) | |||
430 | if (!(desc->status & IRQ_NOAUTOEN)) { | 481 | if (!(desc->status & IRQ_NOAUTOEN)) { |
431 | desc->depth = 0; | 482 | desc->depth = 0; |
432 | desc->status &= ~IRQ_DISABLED; | 483 | desc->status &= ~IRQ_DISABLED; |
433 | if (desc->chip->startup) | 484 | desc->chip->startup(irq); |
434 | desc->chip->startup(irq); | ||
435 | else | ||
436 | desc->chip->enable(irq); | ||
437 | } else | 485 | } else |
438 | /* Undo nested disables: */ | 486 | /* Undo nested disables: */ |
439 | desc->depth = 1; | 487 | desc->depth = 1; |
440 | 488 | ||
489 | /* Exclude IRQ from balancing if requested */ | ||
490 | if (new->flags & IRQF_NOBALANCING) | ||
491 | desc->status |= IRQ_NO_BALANCING; | ||
492 | |||
441 | /* Set default affinity mask once everything is setup */ | 493 | /* Set default affinity mask once everything is setup */ |
442 | irq_select_affinity(irq); | 494 | do_irq_select_affinity(irq, desc); |
495 | |||
496 | } else if ((new->flags & IRQF_TRIGGER_MASK) | ||
497 | && (new->flags & IRQF_TRIGGER_MASK) | ||
498 | != (desc->status & IRQ_TYPE_SENSE_MASK)) { | ||
499 | /* hope the handler works with the actual trigger mode... */ | ||
500 | pr_warning("IRQ %d uses trigger mode %d; requested %d\n", | ||
501 | irq, (int)(desc->status & IRQ_TYPE_SENSE_MASK), | ||
502 | (int)(new->flags & IRQF_TRIGGER_MASK)); | ||
443 | } | 503 | } |
444 | 504 | ||
445 | *p = new; | 505 | *p = new; |
446 | 506 | ||
447 | /* Exclude IRQ from balancing */ | ||
448 | if (new->flags & IRQF_NOBALANCING) | ||
449 | desc->status |= IRQ_NO_BALANCING; | ||
450 | |||
451 | /* Reset broken irq detection when installing new handler */ | 507 | /* Reset broken irq detection when installing new handler */ |
452 | desc->irq_count = 0; | 508 | desc->irq_count = 0; |
453 | desc->irqs_unhandled = 0; | 509 | desc->irqs_unhandled = 0; |
@@ -464,7 +520,7 @@ int setup_irq(unsigned int irq, struct irqaction *new) | |||
464 | spin_unlock_irqrestore(&desc->lock, flags); | 520 | spin_unlock_irqrestore(&desc->lock, flags); |
465 | 521 | ||
466 | new->irq = irq; | 522 | new->irq = irq; |
467 | register_irq_proc(irq); | 523 | register_irq_proc(irq, desc); |
468 | new->dir = NULL; | 524 | new->dir = NULL; |
469 | register_handler_proc(irq, new); | 525 | register_handler_proc(irq, new); |
470 | 526 | ||
@@ -484,6 +540,20 @@ mismatch: | |||
484 | } | 540 | } |
485 | 541 | ||
486 | /** | 542 | /** |
543 | * setup_irq - setup an interrupt | ||
544 | * @irq: Interrupt line to setup | ||
545 | * @act: irqaction for the interrupt | ||
546 | * | ||
547 | * Used to statically setup interrupts in the early boot process. | ||
548 | */ | ||
549 | int setup_irq(unsigned int irq, struct irqaction *act) | ||
550 | { | ||
551 | struct irq_desc *desc = irq_to_desc(irq); | ||
552 | |||
553 | return __setup_irq(irq, desc, act); | ||
554 | } | ||
555 | |||
556 | /** | ||
487 | * free_irq - free an interrupt | 557 | * free_irq - free an interrupt |
488 | * @irq: Interrupt line to free | 558 | * @irq: Interrupt line to free |
489 | * @dev_id: Device identity to free | 559 | * @dev_id: Device identity to free |
@@ -499,15 +569,15 @@ mismatch: | |||
499 | */ | 569 | */ |
500 | void free_irq(unsigned int irq, void *dev_id) | 570 | void free_irq(unsigned int irq, void *dev_id) |
501 | { | 571 | { |
502 | struct irq_desc *desc; | 572 | struct irq_desc *desc = irq_to_desc(irq); |
503 | struct irqaction **p; | 573 | struct irqaction **p; |
504 | unsigned long flags; | 574 | unsigned long flags; |
505 | 575 | ||
506 | WARN_ON(in_interrupt()); | 576 | WARN_ON(in_interrupt()); |
507 | if (irq >= NR_IRQS) | 577 | |
578 | if (!desc) | ||
508 | return; | 579 | return; |
509 | 580 | ||
510 | desc = irq_desc + irq; | ||
511 | spin_lock_irqsave(&desc->lock, flags); | 581 | spin_lock_irqsave(&desc->lock, flags); |
512 | p = &desc->action; | 582 | p = &desc->action; |
513 | for (;;) { | 583 | for (;;) { |
@@ -596,14 +666,28 @@ EXPORT_SYMBOL(free_irq); | |||
596 | * IRQF_SHARED Interrupt is shared | 666 | * IRQF_SHARED Interrupt is shared |
597 | * IRQF_DISABLED Disable local interrupts while processing | 667 | * IRQF_DISABLED Disable local interrupts while processing |
598 | * IRQF_SAMPLE_RANDOM The interrupt can be used for entropy | 668 | * IRQF_SAMPLE_RANDOM The interrupt can be used for entropy |
669 | * IRQF_TRIGGER_* Specify active edge(s) or level | ||
599 | * | 670 | * |
600 | */ | 671 | */ |
601 | int request_irq(unsigned int irq, irq_handler_t handler, | 672 | int request_irq(unsigned int irq, irq_handler_t handler, |
602 | unsigned long irqflags, const char *devname, void *dev_id) | 673 | unsigned long irqflags, const char *devname, void *dev_id) |
603 | { | 674 | { |
604 | struct irqaction *action; | 675 | struct irqaction *action; |
676 | struct irq_desc *desc; | ||
605 | int retval; | 677 | int retval; |
606 | 678 | ||
679 | /* | ||
680 | * handle_IRQ_event() always ignores IRQF_DISABLED except for | ||
681 | * the _first_ irqaction (sigh). That can cause oopsing, but | ||
682 | * the behavior is classified as "will not fix" so we need to | ||
683 | * start nudging drivers away from using that idiom. | ||
684 | */ | ||
685 | if ((irqflags & (IRQF_SHARED|IRQF_DISABLED)) | ||
686 | == (IRQF_SHARED|IRQF_DISABLED)) | ||
687 | pr_warning("IRQ %d/%s: IRQF_DISABLED is not " | ||
688 | "guaranteed on shared IRQs\n", | ||
689 | irq, devname); | ||
690 | |||
607 | #ifdef CONFIG_LOCKDEP | 691 | #ifdef CONFIG_LOCKDEP |
608 | /* | 692 | /* |
609 | * Lockdep wants atomic interrupt handlers: | 693 | * Lockdep wants atomic interrupt handlers: |
@@ -618,9 +702,12 @@ int request_irq(unsigned int irq, irq_handler_t handler, | |||
618 | */ | 702 | */ |
619 | if ((irqflags & IRQF_SHARED) && !dev_id) | 703 | if ((irqflags & IRQF_SHARED) && !dev_id) |
620 | return -EINVAL; | 704 | return -EINVAL; |
621 | if (irq >= NR_IRQS) | 705 | |
706 | desc = irq_to_desc(irq); | ||
707 | if (!desc) | ||
622 | return -EINVAL; | 708 | return -EINVAL; |
623 | if (irq_desc[irq].status & IRQ_NOREQUEST) | 709 | |
710 | if (desc->status & IRQ_NOREQUEST) | ||
624 | return -EINVAL; | 711 | return -EINVAL; |
625 | if (!handler) | 712 | if (!handler) |
626 | return -EINVAL; | 713 | return -EINVAL; |
@@ -636,26 +723,29 @@ int request_irq(unsigned int irq, irq_handler_t handler, | |||
636 | action->next = NULL; | 723 | action->next = NULL; |
637 | action->dev_id = dev_id; | 724 | action->dev_id = dev_id; |
638 | 725 | ||
726 | retval = __setup_irq(irq, desc, action); | ||
727 | if (retval) | ||
728 | kfree(action); | ||
729 | |||
639 | #ifdef CONFIG_DEBUG_SHIRQ | 730 | #ifdef CONFIG_DEBUG_SHIRQ |
640 | if (irqflags & IRQF_SHARED) { | 731 | if (irqflags & IRQF_SHARED) { |
641 | /* | 732 | /* |
642 | * It's a shared IRQ -- the driver ought to be prepared for it | 733 | * It's a shared IRQ -- the driver ought to be prepared for it |
643 | * to happen immediately, so let's make sure.... | 734 | * to happen immediately, so let's make sure.... |
644 | * We do this before actually registering it, to make sure that | 735 | * We disable the irq to make sure that a 'real' IRQ doesn't |
645 | * a 'real' IRQ doesn't run in parallel with our fake | 736 | * run in parallel with our fake. |
646 | */ | 737 | */ |
647 | unsigned long flags; | 738 | unsigned long flags; |
648 | 739 | ||
740 | disable_irq(irq); | ||
649 | local_irq_save(flags); | 741 | local_irq_save(flags); |
742 | |||
650 | handler(irq, dev_id); | 743 | handler(irq, dev_id); |
744 | |||
651 | local_irq_restore(flags); | 745 | local_irq_restore(flags); |
746 | enable_irq(irq); | ||
652 | } | 747 | } |
653 | #endif | 748 | #endif |
654 | |||
655 | retval = setup_irq(irq, action); | ||
656 | if (retval) | ||
657 | kfree(action); | ||
658 | |||
659 | return retval; | 749 | return retval; |
660 | } | 750 | } |
661 | EXPORT_SYMBOL(request_irq); | 751 | EXPORT_SYMBOL(request_irq); |