diff options
Diffstat (limited to 'kernel')
| -rw-r--r-- | kernel/irq/chip.c | 7 | ||||
| -rw-r--r-- | kernel/irq/handle.c | 36 | ||||
| -rw-r--r-- | kernel/irq/internals.h | 1 | ||||
| -rw-r--r-- | kernel/irq/manage.c | 192 | ||||
| -rw-r--r-- | kernel/irq/numa_migrate.c | 11 | ||||
| -rw-r--r-- | kernel/irq/spurious.c | 14 | ||||
| -rw-r--r-- | kernel/module.c | 26 |
7 files changed, 170 insertions, 117 deletions
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c index 122fef4b0bd..c687ba4363f 100644 --- a/kernel/irq/chip.c +++ b/kernel/irq/chip.c | |||
| @@ -81,6 +81,7 @@ void dynamic_irq_cleanup(unsigned int irq) | |||
| 81 | desc->handle_irq = handle_bad_irq; | 81 | desc->handle_irq = handle_bad_irq; |
| 82 | desc->chip = &no_irq_chip; | 82 | desc->chip = &no_irq_chip; |
| 83 | desc->name = NULL; | 83 | desc->name = NULL; |
| 84 | clear_kstat_irqs(desc); | ||
| 84 | spin_unlock_irqrestore(&desc->lock, flags); | 85 | spin_unlock_irqrestore(&desc->lock, flags); |
| 85 | } | 86 | } |
| 86 | 87 | ||
| @@ -293,7 +294,8 @@ static inline void mask_ack_irq(struct irq_desc *desc, int irq) | |||
| 293 | desc->chip->mask_ack(irq); | 294 | desc->chip->mask_ack(irq); |
| 294 | else { | 295 | else { |
| 295 | desc->chip->mask(irq); | 296 | desc->chip->mask(irq); |
| 296 | desc->chip->ack(irq); | 297 | if (desc->chip->ack) |
| 298 | desc->chip->ack(irq); | ||
| 297 | } | 299 | } |
| 298 | } | 300 | } |
| 299 | 301 | ||
| @@ -479,7 +481,8 @@ handle_edge_irq(unsigned int irq, struct irq_desc *desc) | |||
| 479 | kstat_incr_irqs_this_cpu(irq, desc); | 481 | kstat_incr_irqs_this_cpu(irq, desc); |
| 480 | 482 | ||
| 481 | /* Start handling the irq */ | 483 | /* Start handling the irq */ |
| 482 | desc->chip->ack(irq); | 484 | if (desc->chip->ack) |
| 485 | desc->chip->ack(irq); | ||
| 483 | desc = irq_remap_to_desc(irq, desc); | 486 | desc = irq_remap_to_desc(irq, desc); |
| 484 | 487 | ||
| 485 | /* Mark the IRQ currently in progress.*/ | 488 | /* Mark the IRQ currently in progress.*/ |
diff --git a/kernel/irq/handle.c b/kernel/irq/handle.c index f51eaee921b..9ebf7796887 100644 --- a/kernel/irq/handle.c +++ b/kernel/irq/handle.c | |||
| @@ -82,19 +82,21 @@ static struct irq_desc irq_desc_init = { | |||
| 82 | 82 | ||
| 83 | void init_kstat_irqs(struct irq_desc *desc, int cpu, int nr) | 83 | void init_kstat_irqs(struct irq_desc *desc, int cpu, int nr) |
| 84 | { | 84 | { |
| 85 | unsigned long bytes; | ||
| 86 | char *ptr; | ||
| 87 | int node; | 85 | int node; |
| 88 | 86 | void *ptr; | |
| 89 | /* Compute how many bytes we need per irq and allocate them */ | ||
| 90 | bytes = nr * sizeof(unsigned int); | ||
| 91 | 87 | ||
| 92 | node = cpu_to_node(cpu); | 88 | node = cpu_to_node(cpu); |
| 93 | ptr = kzalloc_node(bytes, GFP_ATOMIC, node); | 89 | ptr = kzalloc_node(nr * sizeof(*desc->kstat_irqs), GFP_ATOMIC, node); |
| 94 | printk(KERN_DEBUG " alloc kstat_irqs on cpu %d node %d\n", cpu, node); | ||
| 95 | 90 | ||
| 96 | if (ptr) | 91 | /* |
| 97 | desc->kstat_irqs = (unsigned int *)ptr; | 92 | * don't overwite if can not get new one |
| 93 | * init_copy_kstat_irqs() could still use old one | ||
| 94 | */ | ||
| 95 | if (ptr) { | ||
| 96 | printk(KERN_DEBUG " alloc kstat_irqs on cpu %d node %d\n", | ||
| 97 | cpu, node); | ||
| 98 | desc->kstat_irqs = ptr; | ||
| 99 | } | ||
| 98 | } | 100 | } |
| 99 | 101 | ||
| 100 | static void init_one_irq_desc(int irq, struct irq_desc *desc, int cpu) | 102 | static void init_one_irq_desc(int irq, struct irq_desc *desc, int cpu) |
| @@ -237,6 +239,7 @@ struct irq_desc irq_desc[NR_IRQS] __cacheline_aligned_in_smp = { | |||
| 237 | } | 239 | } |
| 238 | }; | 240 | }; |
| 239 | 241 | ||
| 242 | static unsigned int kstat_irqs_all[NR_IRQS][NR_CPUS]; | ||
| 240 | int __init early_irq_init(void) | 243 | int __init early_irq_init(void) |
| 241 | { | 244 | { |
| 242 | struct irq_desc *desc; | 245 | struct irq_desc *desc; |
| @@ -253,6 +256,7 @@ int __init early_irq_init(void) | |||
| 253 | for (i = 0; i < count; i++) { | 256 | for (i = 0; i < count; i++) { |
| 254 | desc[i].irq = i; | 257 | desc[i].irq = i; |
| 255 | init_alloc_desc_masks(&desc[i], 0, true); | 258 | init_alloc_desc_masks(&desc[i], 0, true); |
| 259 | desc[i].kstat_irqs = kstat_irqs_all[i]; | ||
| 256 | } | 260 | } |
| 257 | return arch_early_irq_init(); | 261 | return arch_early_irq_init(); |
| 258 | } | 262 | } |
| @@ -268,6 +272,11 @@ struct irq_desc *irq_to_desc_alloc_cpu(unsigned int irq, int cpu) | |||
| 268 | } | 272 | } |
| 269 | #endif /* !CONFIG_SPARSE_IRQ */ | 273 | #endif /* !CONFIG_SPARSE_IRQ */ |
| 270 | 274 | ||
| 275 | void clear_kstat_irqs(struct irq_desc *desc) | ||
| 276 | { | ||
| 277 | memset(desc->kstat_irqs, 0, nr_cpu_ids * sizeof(*(desc->kstat_irqs))); | ||
| 278 | } | ||
| 279 | |||
| 271 | /* | 280 | /* |
| 272 | * What should we do if we get a hw irq event on an illegal vector? | 281 | * What should we do if we get a hw irq event on an illegal vector? |
| 273 | * Each architecture has to answer this themself. | 282 | * Each architecture has to answer this themself. |
| @@ -341,6 +350,8 @@ irqreturn_t handle_IRQ_event(unsigned int irq, struct irqaction *action) | |||
| 341 | irqreturn_t ret, retval = IRQ_NONE; | 350 | irqreturn_t ret, retval = IRQ_NONE; |
| 342 | unsigned int status = 0; | 351 | unsigned int status = 0; |
| 343 | 352 | ||
| 353 | WARN_ONCE(!in_irq(), "BUG: IRQ handler called from non-hardirq context!"); | ||
| 354 | |||
| 344 | if (!(action->flags & IRQF_DISABLED)) | 355 | if (!(action->flags & IRQF_DISABLED)) |
| 345 | local_irq_enable_in_hardirq(); | 356 | local_irq_enable_in_hardirq(); |
| 346 | 357 | ||
| @@ -360,6 +371,11 @@ irqreturn_t handle_IRQ_event(unsigned int irq, struct irqaction *action) | |||
| 360 | } | 371 | } |
| 361 | 372 | ||
| 362 | #ifndef CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ | 373 | #ifndef CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ |
| 374 | |||
| 375 | #ifdef CONFIG_ENABLE_WARN_DEPRECATED | ||
| 376 | # warning __do_IRQ is deprecated. Please convert to proper flow handlers | ||
| 377 | #endif | ||
| 378 | |||
| 363 | /** | 379 | /** |
| 364 | * __do_IRQ - original all in one highlevel IRQ handler | 380 | * __do_IRQ - original all in one highlevel IRQ handler |
| 365 | * @irq: the interrupt number | 381 | * @irq: the interrupt number |
| @@ -480,12 +496,10 @@ void early_init_irq_lock_class(void) | |||
| 480 | } | 496 | } |
| 481 | } | 497 | } |
| 482 | 498 | ||
| 483 | #ifdef CONFIG_SPARSE_IRQ | ||
| 484 | unsigned int kstat_irqs_cpu(unsigned int irq, int cpu) | 499 | unsigned int kstat_irqs_cpu(unsigned int irq, int cpu) |
| 485 | { | 500 | { |
| 486 | struct irq_desc *desc = irq_to_desc(irq); | 501 | struct irq_desc *desc = irq_to_desc(irq); |
| 487 | return desc ? desc->kstat_irqs[cpu] : 0; | 502 | return desc ? desc->kstat_irqs[cpu] : 0; |
| 488 | } | 503 | } |
| 489 | #endif | ||
| 490 | EXPORT_SYMBOL(kstat_irqs_cpu); | 504 | EXPORT_SYMBOL(kstat_irqs_cpu); |
| 491 | 505 | ||
diff --git a/kernel/irq/internals.h b/kernel/irq/internals.h index 40416a81a0f..ee1aa9f8e8b 100644 --- a/kernel/irq/internals.h +++ b/kernel/irq/internals.h | |||
| @@ -15,6 +15,7 @@ extern int __irq_set_trigger(struct irq_desc *desc, unsigned int irq, | |||
| 15 | 15 | ||
| 16 | extern struct lock_class_key irq_desc_lock_class; | 16 | extern struct lock_class_key irq_desc_lock_class; |
| 17 | extern void init_kstat_irqs(struct irq_desc *desc, int cpu, int nr); | 17 | extern void init_kstat_irqs(struct irq_desc *desc, int cpu, int nr); |
| 18 | extern void clear_kstat_irqs(struct irq_desc *desc); | ||
| 18 | extern spinlock_t sparse_irq_lock; | 19 | extern spinlock_t sparse_irq_lock; |
| 19 | 20 | ||
| 20 | #ifdef CONFIG_SPARSE_IRQ | 21 | #ifdef CONFIG_SPARSE_IRQ |
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index a3a5dc9ef34..6458e99984c 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c | |||
| @@ -109,7 +109,7 @@ int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask) | |||
| 109 | /* | 109 | /* |
| 110 | * Generic version of the affinity autoselector. | 110 | * Generic version of the affinity autoselector. |
| 111 | */ | 111 | */ |
| 112 | int do_irq_select_affinity(unsigned int irq, struct irq_desc *desc) | 112 | static int setup_affinity(unsigned int irq, struct irq_desc *desc) |
| 113 | { | 113 | { |
| 114 | if (!irq_can_set_affinity(irq)) | 114 | if (!irq_can_set_affinity(irq)) |
| 115 | return 0; | 115 | return 0; |
| @@ -133,7 +133,7 @@ set_affinity: | |||
| 133 | return 0; | 133 | return 0; |
| 134 | } | 134 | } |
| 135 | #else | 135 | #else |
| 136 | static inline int do_irq_select_affinity(unsigned int irq, struct irq_desc *d) | 136 | static inline int setup_affinity(unsigned int irq, struct irq_desc *d) |
| 137 | { | 137 | { |
| 138 | return irq_select_affinity(irq); | 138 | return irq_select_affinity(irq); |
| 139 | } | 139 | } |
| @@ -149,14 +149,14 @@ int irq_select_affinity_usr(unsigned int irq) | |||
| 149 | int ret; | 149 | int ret; |
| 150 | 150 | ||
| 151 | spin_lock_irqsave(&desc->lock, flags); | 151 | spin_lock_irqsave(&desc->lock, flags); |
| 152 | ret = do_irq_select_affinity(irq, desc); | 152 | ret = setup_affinity(irq, desc); |
| 153 | spin_unlock_irqrestore(&desc->lock, flags); | 153 | spin_unlock_irqrestore(&desc->lock, flags); |
| 154 | 154 | ||
| 155 | return ret; | 155 | return ret; |
| 156 | } | 156 | } |
| 157 | 157 | ||
| 158 | #else | 158 | #else |
| 159 | static inline int do_irq_select_affinity(int irq, struct irq_desc *desc) | 159 | static inline int setup_affinity(unsigned int irq, struct irq_desc *desc) |
| 160 | { | 160 | { |
| 161 | return 0; | 161 | return 0; |
| 162 | } | 162 | } |
| @@ -389,9 +389,9 @@ int __irq_set_trigger(struct irq_desc *desc, unsigned int irq, | |||
| 389 | * allocate special interrupts that are part of the architecture. | 389 | * allocate special interrupts that are part of the architecture. |
| 390 | */ | 390 | */ |
| 391 | static int | 391 | static int |
| 392 | __setup_irq(unsigned int irq, struct irq_desc * desc, struct irqaction *new) | 392 | __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new) |
| 393 | { | 393 | { |
| 394 | struct irqaction *old, **p; | 394 | struct irqaction *old, **old_ptr; |
| 395 | const char *old_name = NULL; | 395 | const char *old_name = NULL; |
| 396 | unsigned long flags; | 396 | unsigned long flags; |
| 397 | int shared = 0; | 397 | int shared = 0; |
| @@ -423,8 +423,8 @@ __setup_irq(unsigned int irq, struct irq_desc * desc, struct irqaction *new) | |||
| 423 | * The following block of code has to be executed atomically | 423 | * The following block of code has to be executed atomically |
| 424 | */ | 424 | */ |
| 425 | spin_lock_irqsave(&desc->lock, flags); | 425 | spin_lock_irqsave(&desc->lock, flags); |
| 426 | p = &desc->action; | 426 | old_ptr = &desc->action; |
| 427 | old = *p; | 427 | old = *old_ptr; |
| 428 | if (old) { | 428 | if (old) { |
| 429 | /* | 429 | /* |
| 430 | * Can't share interrupts unless both agree to and are | 430 | * Can't share interrupts unless both agree to and are |
| @@ -447,8 +447,8 @@ __setup_irq(unsigned int irq, struct irq_desc * desc, struct irqaction *new) | |||
| 447 | 447 | ||
| 448 | /* add new interrupt at end of irq queue */ | 448 | /* add new interrupt at end of irq queue */ |
| 449 | do { | 449 | do { |
| 450 | p = &old->next; | 450 | old_ptr = &old->next; |
| 451 | old = *p; | 451 | old = *old_ptr; |
| 452 | } while (old); | 452 | } while (old); |
| 453 | shared = 1; | 453 | shared = 1; |
| 454 | } | 454 | } |
| @@ -488,7 +488,7 @@ __setup_irq(unsigned int irq, struct irq_desc * desc, struct irqaction *new) | |||
| 488 | desc->status |= IRQ_NO_BALANCING; | 488 | desc->status |= IRQ_NO_BALANCING; |
| 489 | 489 | ||
| 490 | /* Set default affinity mask once everything is setup */ | 490 | /* Set default affinity mask once everything is setup */ |
| 491 | do_irq_select_affinity(irq, desc); | 491 | setup_affinity(irq, desc); |
| 492 | 492 | ||
| 493 | } else if ((new->flags & IRQF_TRIGGER_MASK) | 493 | } else if ((new->flags & IRQF_TRIGGER_MASK) |
| 494 | && (new->flags & IRQF_TRIGGER_MASK) | 494 | && (new->flags & IRQF_TRIGGER_MASK) |
| @@ -499,7 +499,7 @@ __setup_irq(unsigned int irq, struct irq_desc * desc, struct irqaction *new) | |||
| 499 | (int)(new->flags & IRQF_TRIGGER_MASK)); | 499 | (int)(new->flags & IRQF_TRIGGER_MASK)); |
| 500 | } | 500 | } |
| 501 | 501 | ||
| 502 | *p = new; | 502 | *old_ptr = new; |
| 503 | 503 | ||
| 504 | /* Reset broken irq detection when installing new handler */ | 504 | /* Reset broken irq detection when installing new handler */ |
| 505 | desc->irq_count = 0; | 505 | desc->irq_count = 0; |
| @@ -549,90 +549,117 @@ int setup_irq(unsigned int irq, struct irqaction *act) | |||
| 549 | 549 | ||
| 550 | return __setup_irq(irq, desc, act); | 550 | return __setup_irq(irq, desc, act); |
| 551 | } | 551 | } |
| 552 | EXPORT_SYMBOL_GPL(setup_irq); | ||
| 552 | 553 | ||
| 553 | /** | 554 | /* |
| 554 | * free_irq - free an interrupt | 555 | * Internal function to unregister an irqaction - used to free |
| 555 | * @irq: Interrupt line to free | 556 | * regular and special interrupts that are part of the architecture. |
| 556 | * @dev_id: Device identity to free | ||
| 557 | * | ||
| 558 | * Remove an interrupt handler. The handler is removed and if the | ||
| 559 | * interrupt line is no longer in use by any driver it is disabled. | ||
| 560 | * On a shared IRQ the caller must ensure the interrupt is disabled | ||
| 561 | * on the card it drives before calling this function. The function | ||
| 562 | * does not return until any executing interrupts for this IRQ | ||
| 563 | * have completed. | ||
| 564 | * | ||
| 565 | * This function must not be called from interrupt context. | ||
| 566 | */ | 557 | */ |
| 567 | void free_irq(unsigned int irq, void *dev_id) | 558 | static struct irqaction *__free_irq(unsigned int irq, void *dev_id) |
| 568 | { | 559 | { |
| 569 | struct irq_desc *desc = irq_to_desc(irq); | 560 | struct irq_desc *desc = irq_to_desc(irq); |
| 570 | struct irqaction **p; | 561 | struct irqaction *action, **action_ptr; |
| 571 | unsigned long flags; | 562 | unsigned long flags; |
| 572 | 563 | ||
| 573 | WARN_ON(in_interrupt()); | 564 | WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq); |
| 574 | 565 | ||
| 575 | if (!desc) | 566 | if (!desc) |
| 576 | return; | 567 | return NULL; |
| 577 | 568 | ||
| 578 | spin_lock_irqsave(&desc->lock, flags); | 569 | spin_lock_irqsave(&desc->lock, flags); |
| 579 | p = &desc->action; | 570 | |
| 571 | /* | ||
| 572 | * There can be multiple actions per IRQ descriptor, find the right | ||
| 573 | * one based on the dev_id: | ||
| 574 | */ | ||
| 575 | action_ptr = &desc->action; | ||
| 580 | for (;;) { | 576 | for (;;) { |
| 581 | struct irqaction *action = *p; | 577 | action = *action_ptr; |
| 582 | 578 | ||
| 583 | if (action) { | 579 | if (!action) { |
| 584 | struct irqaction **pp = p; | 580 | WARN(1, "Trying to free already-free IRQ %d\n", irq); |
| 581 | spin_unlock_irqrestore(&desc->lock, flags); | ||
| 585 | 582 | ||
| 586 | p = &action->next; | 583 | return NULL; |
| 587 | if (action->dev_id != dev_id) | 584 | } |
| 588 | continue; | ||
| 589 | 585 | ||
| 590 | /* Found it - now remove it from the list of entries */ | 586 | if (action->dev_id == dev_id) |
| 591 | *pp = action->next; | 587 | break; |
| 588 | action_ptr = &action->next; | ||
| 589 | } | ||
| 592 | 590 | ||
| 593 | /* Currently used only by UML, might disappear one day.*/ | 591 | /* Found it - now remove it from the list of entries: */ |
| 592 | *action_ptr = action->next; | ||
| 593 | |||
| 594 | /* Currently used only by UML, might disappear one day: */ | ||
| 594 | #ifdef CONFIG_IRQ_RELEASE_METHOD | 595 | #ifdef CONFIG_IRQ_RELEASE_METHOD |
| 595 | if (desc->chip->release) | 596 | if (desc->chip->release) |
| 596 | desc->chip->release(irq, dev_id); | 597 | desc->chip->release(irq, dev_id); |
| 597 | #endif | 598 | #endif |
| 598 | 599 | ||
| 599 | if (!desc->action) { | 600 | /* If this was the last handler, shut down the IRQ line: */ |
| 600 | desc->status |= IRQ_DISABLED; | 601 | if (!desc->action) { |
| 601 | if (desc->chip->shutdown) | 602 | desc->status |= IRQ_DISABLED; |
| 602 | desc->chip->shutdown(irq); | 603 | if (desc->chip->shutdown) |
| 603 | else | 604 | desc->chip->shutdown(irq); |
| 604 | desc->chip->disable(irq); | 605 | else |
| 605 | } | 606 | desc->chip->disable(irq); |
| 606 | spin_unlock_irqrestore(&desc->lock, flags); | 607 | } |
| 607 | unregister_handler_proc(irq, action); | 608 | spin_unlock_irqrestore(&desc->lock, flags); |
| 609 | |||
| 610 | unregister_handler_proc(irq, action); | ||
| 611 | |||
| 612 | /* Make sure it's not being used on another CPU: */ | ||
| 613 | synchronize_irq(irq); | ||
| 608 | 614 | ||
| 609 | /* Make sure it's not being used on another CPU */ | ||
| 610 | synchronize_irq(irq); | ||
| 611 | #ifdef CONFIG_DEBUG_SHIRQ | ||
| 612 | /* | ||
| 613 | * It's a shared IRQ -- the driver ought to be | ||
| 614 | * prepared for it to happen even now it's | ||
| 615 | * being freed, so let's make sure.... We do | ||
| 616 | * this after actually deregistering it, to | ||
| 617 | * make sure that a 'real' IRQ doesn't run in | ||
| 618 | * parallel with our fake | ||
| 619 | */ | ||
| 620 | if (action->flags & IRQF_SHARED) { | ||
| 621 | local_irq_save(flags); | ||
| 622 | action->handler(irq, dev_id); | ||
| 623 | local_irq_restore(flags); | ||
| 624 | } | ||
| 625 | #endif | ||
| 626 | kfree(action); | ||
| 627 | return; | ||
| 628 | } | ||
| 629 | printk(KERN_ERR "Trying to free already-free IRQ %d\n", irq); | ||
| 630 | #ifdef CONFIG_DEBUG_SHIRQ | 615 | #ifdef CONFIG_DEBUG_SHIRQ |
| 631 | dump_stack(); | 616 | /* |
| 632 | #endif | 617 | * It's a shared IRQ -- the driver ought to be prepared for an IRQ |
| 633 | spin_unlock_irqrestore(&desc->lock, flags); | 618 | * event to happen even now it's being freed, so let's make sure that |
| 634 | return; | 619 | * is so by doing an extra call to the handler .... |
| 620 | * | ||
| 621 | * ( We do this after actually deregistering it, to make sure that a | ||
| 622 | * 'real' IRQ doesn't run in * parallel with our fake. ) | ||
| 623 | */ | ||
| 624 | if (action->flags & IRQF_SHARED) { | ||
| 625 | local_irq_save(flags); | ||
| 626 | action->handler(irq, dev_id); | ||
| 627 | local_irq_restore(flags); | ||
| 635 | } | 628 | } |
| 629 | #endif | ||
| 630 | return action; | ||
| 631 | } | ||
| 632 | |||
| 633 | /** | ||
| 634 | * remove_irq - free an interrupt | ||
| 635 | * @irq: Interrupt line to free | ||
| 636 | * @act: irqaction for the interrupt | ||
| 637 | * | ||
| 638 | * Used to remove interrupts statically setup by the early boot process. | ||
| 639 | */ | ||
| 640 | void remove_irq(unsigned int irq, struct irqaction *act) | ||
| 641 | { | ||
| 642 | __free_irq(irq, act->dev_id); | ||
| 643 | } | ||
| 644 | EXPORT_SYMBOL_GPL(remove_irq); | ||
| 645 | |||
| 646 | /** | ||
| 647 | * free_irq - free an interrupt allocated with request_irq | ||
| 648 | * @irq: Interrupt line to free | ||
| 649 | * @dev_id: Device identity to free | ||
| 650 | * | ||
| 651 | * Remove an interrupt handler. The handler is removed and if the | ||
| 652 | * interrupt line is no longer in use by any driver it is disabled. | ||
| 653 | * On a shared IRQ the caller must ensure the interrupt is disabled | ||
| 654 | * on the card it drives before calling this function. The function | ||
| 655 | * does not return until any executing interrupts for this IRQ | ||
| 656 | * have completed. | ||
| 657 | * | ||
| 658 | * This function must not be called from interrupt context. | ||
| 659 | */ | ||
| 660 | void free_irq(unsigned int irq, void *dev_id) | ||
| 661 | { | ||
| 662 | kfree(__free_irq(irq, dev_id)); | ||
| 636 | } | 663 | } |
| 637 | EXPORT_SYMBOL(free_irq); | 664 | EXPORT_SYMBOL(free_irq); |
| 638 | 665 | ||
| @@ -679,11 +706,12 @@ int request_irq(unsigned int irq, irq_handler_t handler, | |||
| 679 | * the behavior is classified as "will not fix" so we need to | 706 | * the behavior is classified as "will not fix" so we need to |
| 680 | * start nudging drivers away from using that idiom. | 707 | * start nudging drivers away from using that idiom. |
| 681 | */ | 708 | */ |
| 682 | if ((irqflags & (IRQF_SHARED|IRQF_DISABLED)) | 709 | if ((irqflags & (IRQF_SHARED|IRQF_DISABLED)) == |
| 683 | == (IRQF_SHARED|IRQF_DISABLED)) | 710 | (IRQF_SHARED|IRQF_DISABLED)) { |
| 684 | pr_warning("IRQ %d/%s: IRQF_DISABLED is not " | 711 | pr_warning( |
| 685 | "guaranteed on shared IRQs\n", | 712 | "IRQ %d/%s: IRQF_DISABLED is not guaranteed on shared IRQs\n", |
| 686 | irq, devname); | 713 | irq, devname); |
| 714 | } | ||
| 687 | 715 | ||
| 688 | #ifdef CONFIG_LOCKDEP | 716 | #ifdef CONFIG_LOCKDEP |
| 689 | /* | 717 | /* |
| @@ -709,15 +737,13 @@ int request_irq(unsigned int irq, irq_handler_t handler, | |||
| 709 | if (!handler) | 737 | if (!handler) |
| 710 | return -EINVAL; | 738 | return -EINVAL; |
| 711 | 739 | ||
| 712 | action = kmalloc(sizeof(struct irqaction), GFP_ATOMIC); | 740 | action = kzalloc(sizeof(struct irqaction), GFP_KERNEL); |
| 713 | if (!action) | 741 | if (!action) |
| 714 | return -ENOMEM; | 742 | return -ENOMEM; |
| 715 | 743 | ||
| 716 | action->handler = handler; | 744 | action->handler = handler; |
| 717 | action->flags = irqflags; | 745 | action->flags = irqflags; |
| 718 | cpus_clear(action->mask); | ||
| 719 | action->name = devname; | 746 | action->name = devname; |
| 720 | action->next = NULL; | ||
| 721 | action->dev_id = dev_id; | 747 | action->dev_id = dev_id; |
| 722 | 748 | ||
| 723 | retval = __setup_irq(irq, desc, action); | 749 | retval = __setup_irq(irq, desc, action); |
diff --git a/kernel/irq/numa_migrate.c b/kernel/irq/numa_migrate.c index 7f9b80434e3..243d6121e50 100644 --- a/kernel/irq/numa_migrate.c +++ b/kernel/irq/numa_migrate.c | |||
| @@ -17,16 +17,11 @@ static void init_copy_kstat_irqs(struct irq_desc *old_desc, | |||
| 17 | struct irq_desc *desc, | 17 | struct irq_desc *desc, |
| 18 | int cpu, int nr) | 18 | int cpu, int nr) |
| 19 | { | 19 | { |
| 20 | unsigned long bytes; | ||
| 21 | |||
| 22 | init_kstat_irqs(desc, cpu, nr); | 20 | init_kstat_irqs(desc, cpu, nr); |
| 23 | 21 | ||
| 24 | if (desc->kstat_irqs != old_desc->kstat_irqs) { | 22 | if (desc->kstat_irqs != old_desc->kstat_irqs) |
| 25 | /* Compute how many bytes we need per irq and allocate them */ | 23 | memcpy(desc->kstat_irqs, old_desc->kstat_irqs, |
| 26 | bytes = nr * sizeof(unsigned int); | 24 | nr * sizeof(*desc->kstat_irqs)); |
| 27 | |||
| 28 | memcpy(desc->kstat_irqs, old_desc->kstat_irqs, bytes); | ||
| 29 | } | ||
| 30 | } | 25 | } |
| 31 | 26 | ||
| 32 | static void free_kstat_irqs(struct irq_desc *old_desc, struct irq_desc *desc) | 27 | static void free_kstat_irqs(struct irq_desc *old_desc, struct irq_desc *desc) |
diff --git a/kernel/irq/spurious.c b/kernel/irq/spurious.c index dd364c11e56..4d568294de3 100644 --- a/kernel/irq/spurious.c +++ b/kernel/irq/spurious.c | |||
| @@ -104,7 +104,7 @@ static int misrouted_irq(int irq) | |||
| 104 | return ok; | 104 | return ok; |
| 105 | } | 105 | } |
| 106 | 106 | ||
| 107 | static void poll_spurious_irqs(unsigned long dummy) | 107 | static void poll_all_shared_irqs(void) |
| 108 | { | 108 | { |
| 109 | struct irq_desc *desc; | 109 | struct irq_desc *desc; |
| 110 | int i; | 110 | int i; |
| @@ -123,11 +123,23 @@ static void poll_spurious_irqs(unsigned long dummy) | |||
| 123 | 123 | ||
| 124 | try_one_irq(i, desc); | 124 | try_one_irq(i, desc); |
| 125 | } | 125 | } |
| 126 | } | ||
| 127 | |||
| 128 | static void poll_spurious_irqs(unsigned long dummy) | ||
| 129 | { | ||
| 130 | poll_all_shared_irqs(); | ||
| 126 | 131 | ||
| 127 | mod_timer(&poll_spurious_irq_timer, | 132 | mod_timer(&poll_spurious_irq_timer, |
| 128 | jiffies + POLL_SPURIOUS_IRQ_INTERVAL); | 133 | jiffies + POLL_SPURIOUS_IRQ_INTERVAL); |
| 129 | } | 134 | } |
| 130 | 135 | ||
| 136 | #ifdef CONFIG_DEBUG_SHIRQ | ||
| 137 | void debug_poll_all_shared_irqs(void) | ||
| 138 | { | ||
| 139 | poll_all_shared_irqs(); | ||
| 140 | } | ||
| 141 | #endif | ||
| 142 | |||
| 131 | /* | 143 | /* |
| 132 | * If 99,900 of the previous 100,000 interrupts have not been handled | 144 | * If 99,900 of the previous 100,000 interrupts have not been handled |
| 133 | * then assume that the IRQ is stuck in some manner. Drop a diagnostic | 145 | * then assume that the IRQ is stuck in some manner. Drop a diagnostic |
diff --git a/kernel/module.c b/kernel/module.c index f0e04d6b67d..29f2d7b33dd 100644 --- a/kernel/module.c +++ b/kernel/module.c | |||
| @@ -2049,14 +2049,6 @@ static noinline struct module *load_module(void __user *umod, | |||
| 2049 | if (err < 0) | 2049 | if (err < 0) |
| 2050 | goto free_mod; | 2050 | goto free_mod; |
| 2051 | 2051 | ||
| 2052 | #if defined(CONFIG_MODULE_UNLOAD) && defined(CONFIG_SMP) | ||
| 2053 | mod->refptr = percpu_modalloc(sizeof(local_t), __alignof__(local_t), | ||
| 2054 | mod->name); | ||
| 2055 | if (!mod->refptr) { | ||
| 2056 | err = -ENOMEM; | ||
| 2057 | goto free_mod; | ||
| 2058 | } | ||
| 2059 | #endif | ||
| 2060 | if (pcpuindex) { | 2052 | if (pcpuindex) { |
| 2061 | /* We have a special allocation for this section. */ | 2053 | /* We have a special allocation for this section. */ |
| 2062 | percpu = percpu_modalloc(sechdrs[pcpuindex].sh_size, | 2054 | percpu = percpu_modalloc(sechdrs[pcpuindex].sh_size, |
| @@ -2064,7 +2056,7 @@ static noinline struct module *load_module(void __user *umod, | |||
| 2064 | mod->name); | 2056 | mod->name); |
| 2065 | if (!percpu) { | 2057 | if (!percpu) { |
| 2066 | err = -ENOMEM; | 2058 | err = -ENOMEM; |
| 2067 | goto free_percpu; | 2059 | goto free_mod; |
| 2068 | } | 2060 | } |
| 2069 | sechdrs[pcpuindex].sh_flags &= ~(unsigned long)SHF_ALLOC; | 2061 | sechdrs[pcpuindex].sh_flags &= ~(unsigned long)SHF_ALLOC; |
| 2070 | mod->percpu = percpu; | 2062 | mod->percpu = percpu; |
| @@ -2116,6 +2108,14 @@ static noinline struct module *load_module(void __user *umod, | |||
| 2116 | /* Module has been moved. */ | 2108 | /* Module has been moved. */ |
| 2117 | mod = (void *)sechdrs[modindex].sh_addr; | 2109 | mod = (void *)sechdrs[modindex].sh_addr; |
| 2118 | 2110 | ||
| 2111 | #if defined(CONFIG_MODULE_UNLOAD) && defined(CONFIG_SMP) | ||
| 2112 | mod->refptr = percpu_modalloc(sizeof(local_t), __alignof__(local_t), | ||
| 2113 | mod->name); | ||
| 2114 | if (!mod->refptr) { | ||
| 2115 | err = -ENOMEM; | ||
| 2116 | goto free_init; | ||
| 2117 | } | ||
| 2118 | #endif | ||
| 2119 | /* Now we've moved module, initialize linked lists, etc. */ | 2119 | /* Now we've moved module, initialize linked lists, etc. */ |
| 2120 | module_unload_init(mod); | 2120 | module_unload_init(mod); |
| 2121 | 2121 | ||
| @@ -2322,15 +2322,17 @@ static noinline struct module *load_module(void __user *umod, | |||
| 2322 | ftrace_release(mod->module_core, mod->core_size); | 2322 | ftrace_release(mod->module_core, mod->core_size); |
| 2323 | free_unload: | 2323 | free_unload: |
| 2324 | module_unload_free(mod); | 2324 | module_unload_free(mod); |
| 2325 | free_init: | ||
| 2326 | #if defined(CONFIG_MODULE_UNLOAD) && defined(CONFIG_SMP) | ||
| 2327 | percpu_modfree(mod->refptr); | ||
| 2328 | #endif | ||
| 2325 | module_free(mod, mod->module_init); | 2329 | module_free(mod, mod->module_init); |
| 2326 | free_core: | 2330 | free_core: |
| 2327 | module_free(mod, mod->module_core); | 2331 | module_free(mod, mod->module_core); |
| 2332 | /* mod will be freed with core. Don't access it beyond this line! */ | ||
| 2328 | free_percpu: | 2333 | free_percpu: |
| 2329 | if (percpu) | 2334 | if (percpu) |
| 2330 | percpu_modfree(percpu); | 2335 | percpu_modfree(percpu); |
| 2331 | #if defined(CONFIG_MODULE_UNLOAD) && defined(CONFIG_SMP) | ||
| 2332 | percpu_modfree(mod->refptr); | ||
| 2333 | #endif | ||
| 2334 | free_mod: | 2336 | free_mod: |
| 2335 | kfree(args); | 2337 | kfree(args); |
| 2336 | free_hdr: | 2338 | free_hdr: |
