diff options
Diffstat (limited to 'kernel/irq/manage.c')
| -rw-r--r-- | kernel/irq/manage.c | 130 |
1 files changed, 87 insertions, 43 deletions
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index 0314074fa23..c498a1b8c62 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c | |||
| @@ -31,10 +31,10 @@ cpumask_t irq_default_affinity = CPU_MASK_ALL; | |||
| 31 | */ | 31 | */ |
| 32 | void synchronize_irq(unsigned int irq) | 32 | void synchronize_irq(unsigned int irq) |
| 33 | { | 33 | { |
| 34 | struct irq_desc *desc = irq_desc + irq; | 34 | struct irq_desc *desc = irq_to_desc(irq); |
| 35 | unsigned int status; | 35 | unsigned int status; |
| 36 | 36 | ||
| 37 | if (irq >= NR_IRQS) | 37 | if (!desc) |
| 38 | return; | 38 | return; |
| 39 | 39 | ||
| 40 | do { | 40 | do { |
| @@ -64,7 +64,7 @@ EXPORT_SYMBOL(synchronize_irq); | |||
| 64 | */ | 64 | */ |
| 65 | int irq_can_set_affinity(unsigned int irq) | 65 | int irq_can_set_affinity(unsigned int irq) |
| 66 | { | 66 | { |
| 67 | struct irq_desc *desc = irq_desc + irq; | 67 | struct irq_desc *desc = irq_to_desc(irq); |
| 68 | 68 | ||
| 69 | if (CHECK_IRQ_PER_CPU(desc->status) || !desc->chip || | 69 | if (CHECK_IRQ_PER_CPU(desc->status) || !desc->chip || |
| 70 | !desc->chip->set_affinity) | 70 | !desc->chip->set_affinity) |
| @@ -81,15 +81,21 @@ int irq_can_set_affinity(unsigned int irq) | |||
| 81 | */ | 81 | */ |
| 82 | int irq_set_affinity(unsigned int irq, cpumask_t cpumask) | 82 | int irq_set_affinity(unsigned int irq, cpumask_t cpumask) |
| 83 | { | 83 | { |
| 84 | struct irq_desc *desc = irq_desc + irq; | 84 | struct irq_desc *desc = irq_to_desc(irq); |
| 85 | 85 | ||
| 86 | if (!desc->chip->set_affinity) | 86 | if (!desc->chip->set_affinity) |
| 87 | return -EINVAL; | 87 | return -EINVAL; |
| 88 | 88 | ||
| 89 | set_balance_irq_affinity(irq, cpumask); | ||
| 90 | |||
| 91 | #ifdef CONFIG_GENERIC_PENDING_IRQ | 89 | #ifdef CONFIG_GENERIC_PENDING_IRQ |
| 92 | set_pending_irq(irq, cpumask); | 90 | if (desc->status & IRQ_MOVE_PCNTXT || desc->status & IRQ_DISABLED) { |
| 91 | unsigned long flags; | ||
| 92 | |||
| 93 | spin_lock_irqsave(&desc->lock, flags); | ||
| 94 | desc->affinity = cpumask; | ||
| 95 | desc->chip->set_affinity(irq, cpumask); | ||
| 96 | spin_unlock_irqrestore(&desc->lock, flags); | ||
| 97 | } else | ||
| 98 | set_pending_irq(irq, cpumask); | ||
| 93 | #else | 99 | #else |
| 94 | desc->affinity = cpumask; | 100 | desc->affinity = cpumask; |
| 95 | desc->chip->set_affinity(irq, cpumask); | 101 | desc->chip->set_affinity(irq, cpumask); |
| @@ -104,16 +110,17 @@ int irq_set_affinity(unsigned int irq, cpumask_t cpumask) | |||
| 104 | int irq_select_affinity(unsigned int irq) | 110 | int irq_select_affinity(unsigned int irq) |
| 105 | { | 111 | { |
| 106 | cpumask_t mask; | 112 | cpumask_t mask; |
| 113 | struct irq_desc *desc; | ||
| 107 | 114 | ||
| 108 | if (!irq_can_set_affinity(irq)) | 115 | if (!irq_can_set_affinity(irq)) |
| 109 | return 0; | 116 | return 0; |
| 110 | 117 | ||
| 111 | cpus_and(mask, cpu_online_map, irq_default_affinity); | 118 | cpus_and(mask, cpu_online_map, irq_default_affinity); |
| 112 | 119 | ||
| 113 | irq_desc[irq].affinity = mask; | 120 | desc = irq_to_desc(irq); |
| 114 | irq_desc[irq].chip->set_affinity(irq, mask); | 121 | desc->affinity = mask; |
| 122 | desc->chip->set_affinity(irq, mask); | ||
| 115 | 123 | ||
| 116 | set_balance_irq_affinity(irq, mask); | ||
| 117 | return 0; | 124 | return 0; |
| 118 | } | 125 | } |
| 119 | #endif | 126 | #endif |
| @@ -133,10 +140,10 @@ int irq_select_affinity(unsigned int irq) | |||
| 133 | */ | 140 | */ |
| 134 | void disable_irq_nosync(unsigned int irq) | 141 | void disable_irq_nosync(unsigned int irq) |
| 135 | { | 142 | { |
| 136 | struct irq_desc *desc = irq_desc + irq; | 143 | struct irq_desc *desc = irq_to_desc(irq); |
| 137 | unsigned long flags; | 144 | unsigned long flags; |
| 138 | 145 | ||
| 139 | if (irq >= NR_IRQS) | 146 | if (!desc) |
| 140 | return; | 147 | return; |
| 141 | 148 | ||
| 142 | spin_lock_irqsave(&desc->lock, flags); | 149 | spin_lock_irqsave(&desc->lock, flags); |
| @@ -162,9 +169,9 @@ EXPORT_SYMBOL(disable_irq_nosync); | |||
| 162 | */ | 169 | */ |
| 163 | void disable_irq(unsigned int irq) | 170 | void disable_irq(unsigned int irq) |
| 164 | { | 171 | { |
| 165 | struct irq_desc *desc = irq_desc + irq; | 172 | struct irq_desc *desc = irq_to_desc(irq); |
| 166 | 173 | ||
| 167 | if (irq >= NR_IRQS) | 174 | if (!desc) |
| 168 | return; | 175 | return; |
| 169 | 176 | ||
| 170 | disable_irq_nosync(irq); | 177 | disable_irq_nosync(irq); |
| @@ -204,10 +211,10 @@ static void __enable_irq(struct irq_desc *desc, unsigned int irq) | |||
| 204 | */ | 211 | */ |
| 205 | void enable_irq(unsigned int irq) | 212 | void enable_irq(unsigned int irq) |
| 206 | { | 213 | { |
| 207 | struct irq_desc *desc = irq_desc + irq; | 214 | struct irq_desc *desc = irq_to_desc(irq); |
| 208 | unsigned long flags; | 215 | unsigned long flags; |
| 209 | 216 | ||
| 210 | if (irq >= NR_IRQS) | 217 | if (!desc) |
| 211 | return; | 218 | return; |
| 212 | 219 | ||
| 213 | spin_lock_irqsave(&desc->lock, flags); | 220 | spin_lock_irqsave(&desc->lock, flags); |
| @@ -216,9 +223,9 @@ void enable_irq(unsigned int irq) | |||
| 216 | } | 223 | } |
| 217 | EXPORT_SYMBOL(enable_irq); | 224 | EXPORT_SYMBOL(enable_irq); |
| 218 | 225 | ||
| 219 | int set_irq_wake_real(unsigned int irq, unsigned int on) | 226 | static int set_irq_wake_real(unsigned int irq, unsigned int on) |
| 220 | { | 227 | { |
| 221 | struct irq_desc *desc = irq_desc + irq; | 228 | struct irq_desc *desc = irq_to_desc(irq); |
| 222 | int ret = -ENXIO; | 229 | int ret = -ENXIO; |
| 223 | 230 | ||
| 224 | if (desc->chip->set_wake) | 231 | if (desc->chip->set_wake) |
| @@ -241,7 +248,7 @@ int set_irq_wake_real(unsigned int irq, unsigned int on) | |||
| 241 | */ | 248 | */ |
| 242 | int set_irq_wake(unsigned int irq, unsigned int on) | 249 | int set_irq_wake(unsigned int irq, unsigned int on) |
| 243 | { | 250 | { |
| 244 | struct irq_desc *desc = irq_desc + irq; | 251 | struct irq_desc *desc = irq_to_desc(irq); |
| 245 | unsigned long flags; | 252 | unsigned long flags; |
| 246 | int ret = 0; | 253 | int ret = 0; |
| 247 | 254 | ||
| @@ -281,12 +288,16 @@ EXPORT_SYMBOL(set_irq_wake); | |||
| 281 | */ | 288 | */ |
| 282 | int can_request_irq(unsigned int irq, unsigned long irqflags) | 289 | int can_request_irq(unsigned int irq, unsigned long irqflags) |
| 283 | { | 290 | { |
| 291 | struct irq_desc *desc = irq_to_desc(irq); | ||
| 284 | struct irqaction *action; | 292 | struct irqaction *action; |
| 285 | 293 | ||
| 286 | if (irq >= NR_IRQS || irq_desc[irq].status & IRQ_NOREQUEST) | 294 | if (!desc) |
| 295 | return 0; | ||
| 296 | |||
| 297 | if (desc->status & IRQ_NOREQUEST) | ||
| 287 | return 0; | 298 | return 0; |
| 288 | 299 | ||
| 289 | action = irq_desc[irq].action; | 300 | action = desc->action; |
| 290 | if (action) | 301 | if (action) |
| 291 | if (irqflags & action->flags & IRQF_SHARED) | 302 | if (irqflags & action->flags & IRQF_SHARED) |
| 292 | action = NULL; | 303 | action = NULL; |
| @@ -305,10 +316,11 @@ void compat_irq_chip_set_default_handler(struct irq_desc *desc) | |||
| 305 | desc->handle_irq = NULL; | 316 | desc->handle_irq = NULL; |
| 306 | } | 317 | } |
| 307 | 318 | ||
| 308 | static int __irq_set_trigger(struct irq_chip *chip, unsigned int irq, | 319 | int __irq_set_trigger(struct irq_desc *desc, unsigned int irq, |
| 309 | unsigned long flags) | 320 | unsigned long flags) |
| 310 | { | 321 | { |
| 311 | int ret; | 322 | int ret; |
| 323 | struct irq_chip *chip = desc->chip; | ||
| 312 | 324 | ||
| 313 | if (!chip || !chip->set_type) { | 325 | if (!chip || !chip->set_type) { |
| 314 | /* | 326 | /* |
| @@ -326,6 +338,11 @@ static int __irq_set_trigger(struct irq_chip *chip, unsigned int irq, | |||
| 326 | pr_err("setting trigger mode %d for irq %u failed (%pF)\n", | 338 | pr_err("setting trigger mode %d for irq %u failed (%pF)\n", |
| 327 | (int)(flags & IRQF_TRIGGER_MASK), | 339 | (int)(flags & IRQF_TRIGGER_MASK), |
| 328 | irq, chip->set_type); | 340 | irq, chip->set_type); |
| 341 | else { | ||
| 342 | /* note that IRQF_TRIGGER_MASK == IRQ_TYPE_SENSE_MASK */ | ||
| 343 | desc->status &= ~IRQ_TYPE_SENSE_MASK; | ||
| 344 | desc->status |= flags & IRQ_TYPE_SENSE_MASK; | ||
| 345 | } | ||
| 329 | 346 | ||
| 330 | return ret; | 347 | return ret; |
| 331 | } | 348 | } |
| @@ -334,16 +351,16 @@ static int __irq_set_trigger(struct irq_chip *chip, unsigned int irq, | |||
| 334 | * Internal function to register an irqaction - typically used to | 351 | * Internal function to register an irqaction - typically used to |
| 335 | * allocate special interrupts that are part of the architecture. | 352 | * allocate special interrupts that are part of the architecture. |
| 336 | */ | 353 | */ |
| 337 | int setup_irq(unsigned int irq, struct irqaction *new) | 354 | static int |
| 355 | __setup_irq(unsigned int irq, struct irq_desc * desc, struct irqaction *new) | ||
| 338 | { | 356 | { |
| 339 | struct irq_desc *desc = irq_desc + irq; | ||
| 340 | struct irqaction *old, **p; | 357 | struct irqaction *old, **p; |
| 341 | const char *old_name = NULL; | 358 | const char *old_name = NULL; |
| 342 | unsigned long flags; | 359 | unsigned long flags; |
| 343 | int shared = 0; | 360 | int shared = 0; |
| 344 | int ret; | 361 | int ret; |
| 345 | 362 | ||
| 346 | if (irq >= NR_IRQS) | 363 | if (!desc) |
| 347 | return -EINVAL; | 364 | return -EINVAL; |
| 348 | 365 | ||
| 349 | if (desc->chip == &no_irq_chip) | 366 | if (desc->chip == &no_irq_chip) |
| @@ -404,7 +421,7 @@ int setup_irq(unsigned int irq, struct irqaction *new) | |||
| 404 | 421 | ||
| 405 | /* Setup the type (level, edge polarity) if configured: */ | 422 | /* Setup the type (level, edge polarity) if configured: */ |
| 406 | if (new->flags & IRQF_TRIGGER_MASK) { | 423 | if (new->flags & IRQF_TRIGGER_MASK) { |
| 407 | ret = __irq_set_trigger(desc->chip, irq, new->flags); | 424 | ret = __irq_set_trigger(desc, irq, new->flags); |
| 408 | 425 | ||
| 409 | if (ret) { | 426 | if (ret) { |
| 410 | spin_unlock_irqrestore(&desc->lock, flags); | 427 | spin_unlock_irqrestore(&desc->lock, flags); |
| @@ -423,16 +440,21 @@ int setup_irq(unsigned int irq, struct irqaction *new) | |||
| 423 | if (!(desc->status & IRQ_NOAUTOEN)) { | 440 | if (!(desc->status & IRQ_NOAUTOEN)) { |
| 424 | desc->depth = 0; | 441 | desc->depth = 0; |
| 425 | desc->status &= ~IRQ_DISABLED; | 442 | desc->status &= ~IRQ_DISABLED; |
| 426 | if (desc->chip->startup) | 443 | desc->chip->startup(irq); |
| 427 | desc->chip->startup(irq); | ||
| 428 | else | ||
| 429 | desc->chip->enable(irq); | ||
| 430 | } else | 444 | } else |
| 431 | /* Undo nested disables: */ | 445 | /* Undo nested disables: */ |
| 432 | desc->depth = 1; | 446 | desc->depth = 1; |
| 433 | 447 | ||
| 434 | /* Set default affinity mask once everything is setup */ | 448 | /* Set default affinity mask once everything is setup */ |
| 435 | irq_select_affinity(irq); | 449 | irq_select_affinity(irq); |
| 450 | |||
| 451 | } else if ((new->flags & IRQF_TRIGGER_MASK) | ||
| 452 | && (new->flags & IRQF_TRIGGER_MASK) | ||
| 453 | != (desc->status & IRQ_TYPE_SENSE_MASK)) { | ||
| 454 | /* hope the handler works with the actual trigger mode... */ | ||
| 455 | pr_warning("IRQ %d uses trigger mode %d; requested %d\n", | ||
| 456 | irq, (int)(desc->status & IRQ_TYPE_SENSE_MASK), | ||
| 457 | (int)(new->flags & IRQF_TRIGGER_MASK)); | ||
| 436 | } | 458 | } |
| 437 | 459 | ||
| 438 | *p = new; | 460 | *p = new; |
| @@ -457,7 +479,7 @@ int setup_irq(unsigned int irq, struct irqaction *new) | |||
| 457 | spin_unlock_irqrestore(&desc->lock, flags); | 479 | spin_unlock_irqrestore(&desc->lock, flags); |
| 458 | 480 | ||
| 459 | new->irq = irq; | 481 | new->irq = irq; |
| 460 | register_irq_proc(irq); | 482 | register_irq_proc(irq, desc); |
| 461 | new->dir = NULL; | 483 | new->dir = NULL; |
| 462 | register_handler_proc(irq, new); | 484 | register_handler_proc(irq, new); |
| 463 | 485 | ||
| @@ -477,6 +499,20 @@ mismatch: | |||
| 477 | } | 499 | } |
| 478 | 500 | ||
| 479 | /** | 501 | /** |
| 502 | * setup_irq - setup an interrupt | ||
| 503 | * @irq: Interrupt line to setup | ||
| 504 | * @act: irqaction for the interrupt | ||
| 505 | * | ||
| 506 | * Used to statically setup interrupts in the early boot process. | ||
| 507 | */ | ||
| 508 | int setup_irq(unsigned int irq, struct irqaction *act) | ||
| 509 | { | ||
| 510 | struct irq_desc *desc = irq_to_desc(irq); | ||
| 511 | |||
| 512 | return __setup_irq(irq, desc, act); | ||
| 513 | } | ||
| 514 | |||
| 515 | /** | ||
| 480 | * free_irq - free an interrupt | 516 | * free_irq - free an interrupt |
| 481 | * @irq: Interrupt line to free | 517 | * @irq: Interrupt line to free |
| 482 | * @dev_id: Device identity to free | 518 | * @dev_id: Device identity to free |
| @@ -492,15 +528,15 @@ mismatch: | |||
| 492 | */ | 528 | */ |
| 493 | void free_irq(unsigned int irq, void *dev_id) | 529 | void free_irq(unsigned int irq, void *dev_id) |
| 494 | { | 530 | { |
| 495 | struct irq_desc *desc; | 531 | struct irq_desc *desc = irq_to_desc(irq); |
| 496 | struct irqaction **p; | 532 | struct irqaction **p; |
| 497 | unsigned long flags; | 533 | unsigned long flags; |
| 498 | 534 | ||
| 499 | WARN_ON(in_interrupt()); | 535 | WARN_ON(in_interrupt()); |
| 500 | if (irq >= NR_IRQS) | 536 | |
| 537 | if (!desc) | ||
| 501 | return; | 538 | return; |
| 502 | 539 | ||
| 503 | desc = irq_desc + irq; | ||
| 504 | spin_lock_irqsave(&desc->lock, flags); | 540 | spin_lock_irqsave(&desc->lock, flags); |
| 505 | p = &desc->action; | 541 | p = &desc->action; |
| 506 | for (;;) { | 542 | for (;;) { |
| @@ -589,12 +625,14 @@ EXPORT_SYMBOL(free_irq); | |||
| 589 | * IRQF_SHARED Interrupt is shared | 625 | * IRQF_SHARED Interrupt is shared |
| 590 | * IRQF_DISABLED Disable local interrupts while processing | 626 | * IRQF_DISABLED Disable local interrupts while processing |
| 591 | * IRQF_SAMPLE_RANDOM The interrupt can be used for entropy | 627 | * IRQF_SAMPLE_RANDOM The interrupt can be used for entropy |
| 628 | * IRQF_TRIGGER_* Specify active edge(s) or level | ||
| 592 | * | 629 | * |
| 593 | */ | 630 | */ |
| 594 | int request_irq(unsigned int irq, irq_handler_t handler, | 631 | int request_irq(unsigned int irq, irq_handler_t handler, |
| 595 | unsigned long irqflags, const char *devname, void *dev_id) | 632 | unsigned long irqflags, const char *devname, void *dev_id) |
| 596 | { | 633 | { |
| 597 | struct irqaction *action; | 634 | struct irqaction *action; |
| 635 | struct irq_desc *desc; | ||
| 598 | int retval; | 636 | int retval; |
| 599 | 637 | ||
| 600 | #ifdef CONFIG_LOCKDEP | 638 | #ifdef CONFIG_LOCKDEP |
| @@ -611,9 +649,12 @@ int request_irq(unsigned int irq, irq_handler_t handler, | |||
| 611 | */ | 649 | */ |
| 612 | if ((irqflags & IRQF_SHARED) && !dev_id) | 650 | if ((irqflags & IRQF_SHARED) && !dev_id) |
| 613 | return -EINVAL; | 651 | return -EINVAL; |
| 614 | if (irq >= NR_IRQS) | 652 | |
| 653 | desc = irq_to_desc(irq); | ||
| 654 | if (!desc) | ||
| 615 | return -EINVAL; | 655 | return -EINVAL; |
| 616 | if (irq_desc[irq].status & IRQ_NOREQUEST) | 656 | |
| 657 | if (desc->status & IRQ_NOREQUEST) | ||
| 617 | return -EINVAL; | 658 | return -EINVAL; |
| 618 | if (!handler) | 659 | if (!handler) |
| 619 | return -EINVAL; | 660 | return -EINVAL; |
| @@ -629,26 +670,29 @@ int request_irq(unsigned int irq, irq_handler_t handler, | |||
| 629 | action->next = NULL; | 670 | action->next = NULL; |
| 630 | action->dev_id = dev_id; | 671 | action->dev_id = dev_id; |
| 631 | 672 | ||
| 673 | retval = __setup_irq(irq, desc, action); | ||
| 674 | if (retval) | ||
| 675 | kfree(action); | ||
| 676 | |||
| 632 | #ifdef CONFIG_DEBUG_SHIRQ | 677 | #ifdef CONFIG_DEBUG_SHIRQ |
| 633 | if (irqflags & IRQF_SHARED) { | 678 | if (irqflags & IRQF_SHARED) { |
| 634 | /* | 679 | /* |
| 635 | * It's a shared IRQ -- the driver ought to be prepared for it | 680 | * It's a shared IRQ -- the driver ought to be prepared for it |
| 636 | * to happen immediately, so let's make sure.... | 681 | * to happen immediately, so let's make sure.... |
| 637 | * We do this before actually registering it, to make sure that | 682 | * We disable the irq to make sure that a 'real' IRQ doesn't |
| 638 | * a 'real' IRQ doesn't run in parallel with our fake | 683 | * run in parallel with our fake. |
| 639 | */ | 684 | */ |
| 640 | unsigned long flags; | 685 | unsigned long flags; |
| 641 | 686 | ||
| 687 | disable_irq(irq); | ||
| 642 | local_irq_save(flags); | 688 | local_irq_save(flags); |
| 689 | |||
| 643 | handler(irq, dev_id); | 690 | handler(irq, dev_id); |
| 691 | |||
| 644 | local_irq_restore(flags); | 692 | local_irq_restore(flags); |
| 693 | enable_irq(irq); | ||
| 645 | } | 694 | } |
| 646 | #endif | 695 | #endif |
| 647 | |||
| 648 | retval = setup_irq(irq, action); | ||
| 649 | if (retval) | ||
| 650 | kfree(action); | ||
| 651 | |||
| 652 | return retval; | 696 | return retval; |
| 653 | } | 697 | } |
| 654 | EXPORT_SYMBOL(request_irq); | 698 | EXPORT_SYMBOL(request_irq); |
