diff options
Diffstat (limited to 'kernel/irq/manage.c')
| -rw-r--r-- | kernel/irq/manage.c | 54 |
1 files changed, 27 insertions, 27 deletions
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index c3003e9d91a3..4dfb19521d9f 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c | |||
| @@ -73,8 +73,8 @@ int irq_can_set_affinity(unsigned int irq) | |||
| 73 | { | 73 | { |
| 74 | struct irq_desc *desc = irq_to_desc(irq); | 74 | struct irq_desc *desc = irq_to_desc(irq); |
| 75 | 75 | ||
| 76 | if (CHECK_IRQ_PER_CPU(desc->status) || !desc->chip || | 76 | if (CHECK_IRQ_PER_CPU(desc->status) || !desc->irq_data.chip || |
| 77 | !desc->chip->set_affinity) | 77 | !desc->irq_data.chip->set_affinity) |
| 78 | return 0; | 78 | return 0; |
| 79 | 79 | ||
| 80 | return 1; | 80 | return 1; |
| @@ -111,15 +111,15 @@ int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask) | |||
| 111 | struct irq_desc *desc = irq_to_desc(irq); | 111 | struct irq_desc *desc = irq_to_desc(irq); |
| 112 | unsigned long flags; | 112 | unsigned long flags; |
| 113 | 113 | ||
| 114 | if (!desc->chip->set_affinity) | 114 | if (!desc->irq_data.chip->set_affinity) |
| 115 | return -EINVAL; | 115 | return -EINVAL; |
| 116 | 116 | ||
| 117 | raw_spin_lock_irqsave(&desc->lock, flags); | 117 | raw_spin_lock_irqsave(&desc->lock, flags); |
| 118 | 118 | ||
| 119 | #ifdef CONFIG_GENERIC_PENDING_IRQ | 119 | #ifdef CONFIG_GENERIC_PENDING_IRQ |
| 120 | if (desc->status & IRQ_MOVE_PCNTXT) { | 120 | if (desc->status & IRQ_MOVE_PCNTXT) { |
| 121 | if (!desc->chip->set_affinity(irq, cpumask)) { | 121 | if (!desc->irq_data.chip->set_affinity(irq, cpumask)) { |
| 122 | cpumask_copy(desc->affinity, cpumask); | 122 | cpumask_copy(desc->irq_data.affinity, cpumask); |
| 123 | irq_set_thread_affinity(desc); | 123 | irq_set_thread_affinity(desc); |
| 124 | } | 124 | } |
| 125 | } | 125 | } |
| @@ -128,8 +128,8 @@ int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask) | |||
| 128 | cpumask_copy(desc->pending_mask, cpumask); | 128 | cpumask_copy(desc->pending_mask, cpumask); |
| 129 | } | 129 | } |
| 130 | #else | 130 | #else |
| 131 | if (!desc->chip->set_affinity(irq, cpumask)) { | 131 | if (!desc->irq_data.chip->set_affinity(irq, cpumask)) { |
| 132 | cpumask_copy(desc->affinity, cpumask); | 132 | cpumask_copy(desc->irq_data.affinity, cpumask); |
| 133 | irq_set_thread_affinity(desc); | 133 | irq_set_thread_affinity(desc); |
| 134 | } | 134 | } |
| 135 | #endif | 135 | #endif |
| @@ -168,16 +168,16 @@ static int setup_affinity(unsigned int irq, struct irq_desc *desc) | |||
| 168 | * one of the targets is online. | 168 | * one of the targets is online. |
| 169 | */ | 169 | */ |
| 170 | if (desc->status & (IRQ_AFFINITY_SET | IRQ_NO_BALANCING)) { | 170 | if (desc->status & (IRQ_AFFINITY_SET | IRQ_NO_BALANCING)) { |
| 171 | if (cpumask_any_and(desc->affinity, cpu_online_mask) | 171 | if (cpumask_any_and(desc->irq_data.affinity, cpu_online_mask) |
| 172 | < nr_cpu_ids) | 172 | < nr_cpu_ids) |
| 173 | goto set_affinity; | 173 | goto set_affinity; |
| 174 | else | 174 | else |
| 175 | desc->status &= ~IRQ_AFFINITY_SET; | 175 | desc->status &= ~IRQ_AFFINITY_SET; |
| 176 | } | 176 | } |
| 177 | 177 | ||
| 178 | cpumask_and(desc->affinity, cpu_online_mask, irq_default_affinity); | 178 | cpumask_and(desc->irq_data.affinity, cpu_online_mask, irq_default_affinity); |
| 179 | set_affinity: | 179 | set_affinity: |
| 180 | desc->chip->set_affinity(irq, desc->affinity); | 180 | desc->irq_data.chip->set_affinity(irq, desc->irq_data.affinity); |
| 181 | 181 | ||
| 182 | return 0; | 182 | return 0; |
| 183 | } | 183 | } |
| @@ -223,7 +223,7 @@ void __disable_irq(struct irq_desc *desc, unsigned int irq, bool suspend) | |||
| 223 | 223 | ||
| 224 | if (!desc->depth++) { | 224 | if (!desc->depth++) { |
| 225 | desc->status |= IRQ_DISABLED; | 225 | desc->status |= IRQ_DISABLED; |
| 226 | desc->chip->disable(irq); | 226 | desc->irq_data.chip->disable(irq); |
| 227 | } | 227 | } |
| 228 | } | 228 | } |
| 229 | 229 | ||
| @@ -313,7 +313,7 @@ void __enable_irq(struct irq_desc *desc, unsigned int irq, bool resume) | |||
| 313 | * IRQ line is re-enabled. | 313 | * IRQ line is re-enabled. |
| 314 | * | 314 | * |
| 315 | * This function may be called from IRQ context only when | 315 | * This function may be called from IRQ context only when |
| 316 | * desc->chip->bus_lock and desc->chip->bus_sync_unlock are NULL ! | 316 | * desc->irq_data.chip->bus_lock and desc->chip->bus_sync_unlock are NULL ! |
| 317 | */ | 317 | */ |
| 318 | void enable_irq(unsigned int irq) | 318 | void enable_irq(unsigned int irq) |
| 319 | { | 319 | { |
| @@ -336,8 +336,8 @@ static int set_irq_wake_real(unsigned int irq, unsigned int on) | |||
| 336 | struct irq_desc *desc = irq_to_desc(irq); | 336 | struct irq_desc *desc = irq_to_desc(irq); |
| 337 | int ret = -ENXIO; | 337 | int ret = -ENXIO; |
| 338 | 338 | ||
| 339 | if (desc->chip->set_wake) | 339 | if (desc->irq_data.chip->set_wake) |
| 340 | ret = desc->chip->set_wake(irq, on); | 340 | ret = desc->irq_data.chip->set_wake(irq, on); |
| 341 | 341 | ||
| 342 | return ret; | 342 | return ret; |
| 343 | } | 343 | } |
| @@ -432,7 +432,7 @@ int __irq_set_trigger(struct irq_desc *desc, unsigned int irq, | |||
| 432 | unsigned long flags) | 432 | unsigned long flags) |
| 433 | { | 433 | { |
| 434 | int ret; | 434 | int ret; |
| 435 | struct irq_chip *chip = desc->chip; | 435 | struct irq_chip *chip = desc->irq_data.chip; |
| 436 | 436 | ||
| 437 | if (!chip || !chip->set_type) { | 437 | if (!chip || !chip->set_type) { |
| 438 | /* | 438 | /* |
| @@ -457,8 +457,8 @@ int __irq_set_trigger(struct irq_desc *desc, unsigned int irq, | |||
| 457 | desc->status &= ~(IRQ_LEVEL | IRQ_TYPE_SENSE_MASK); | 457 | desc->status &= ~(IRQ_LEVEL | IRQ_TYPE_SENSE_MASK); |
| 458 | desc->status |= flags; | 458 | desc->status |= flags; |
| 459 | 459 | ||
| 460 | if (chip != desc->chip) | 460 | if (chip != desc->irq_data.chip) |
| 461 | irq_chip_set_defaults(desc->chip); | 461 | irq_chip_set_defaults(desc->irq_data.chip); |
| 462 | } | 462 | } |
| 463 | 463 | ||
| 464 | return ret; | 464 | return ret; |
| @@ -528,7 +528,7 @@ again: | |||
| 528 | 528 | ||
| 529 | if (!(desc->status & IRQ_DISABLED) && (desc->status & IRQ_MASKED)) { | 529 | if (!(desc->status & IRQ_DISABLED) && (desc->status & IRQ_MASKED)) { |
| 530 | desc->status &= ~IRQ_MASKED; | 530 | desc->status &= ~IRQ_MASKED; |
| 531 | desc->chip->unmask(irq); | 531 | desc->irq_data.chip->unmask(irq); |
| 532 | } | 532 | } |
| 533 | raw_spin_unlock_irq(&desc->lock); | 533 | raw_spin_unlock_irq(&desc->lock); |
| 534 | chip_bus_sync_unlock(irq, desc); | 534 | chip_bus_sync_unlock(irq, desc); |
| @@ -556,7 +556,7 @@ irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) | |||
| 556 | } | 556 | } |
| 557 | 557 | ||
| 558 | raw_spin_lock_irq(&desc->lock); | 558 | raw_spin_lock_irq(&desc->lock); |
| 559 | cpumask_copy(mask, desc->affinity); | 559 | cpumask_copy(mask, desc->irq_data.affinity); |
| 560 | raw_spin_unlock_irq(&desc->lock); | 560 | raw_spin_unlock_irq(&desc->lock); |
| 561 | 561 | ||
| 562 | set_cpus_allowed_ptr(current, mask); | 562 | set_cpus_allowed_ptr(current, mask); |
| @@ -657,7 +657,7 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new) | |||
| 657 | if (!desc) | 657 | if (!desc) |
| 658 | return -EINVAL; | 658 | return -EINVAL; |
| 659 | 659 | ||
| 660 | if (desc->chip == &no_irq_chip) | 660 | if (desc->irq_data.chip == &no_irq_chip) |
| 661 | return -ENOSYS; | 661 | return -ENOSYS; |
| 662 | /* | 662 | /* |
| 663 | * Some drivers like serial.c use request_irq() heavily, | 663 | * Some drivers like serial.c use request_irq() heavily, |
| @@ -752,7 +752,7 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new) | |||
| 752 | } | 752 | } |
| 753 | 753 | ||
| 754 | if (!shared) { | 754 | if (!shared) { |
| 755 | irq_chip_set_defaults(desc->chip); | 755 | irq_chip_set_defaults(desc->irq_data.chip); |
| 756 | 756 | ||
| 757 | init_waitqueue_head(&desc->wait_for_threads); | 757 | init_waitqueue_head(&desc->wait_for_threads); |
| 758 | 758 | ||
| @@ -779,7 +779,7 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new) | |||
| 779 | if (!(desc->status & IRQ_NOAUTOEN)) { | 779 | if (!(desc->status & IRQ_NOAUTOEN)) { |
| 780 | desc->depth = 0; | 780 | desc->depth = 0; |
| 781 | desc->status &= ~IRQ_DISABLED; | 781 | desc->status &= ~IRQ_DISABLED; |
| 782 | desc->chip->startup(irq); | 782 | desc->irq_data.chip->startup(irq); |
| 783 | } else | 783 | } else |
| 784 | /* Undo nested disables: */ | 784 | /* Undo nested disables: */ |
| 785 | desc->depth = 1; | 785 | desc->depth = 1; |
| @@ -912,17 +912,17 @@ static struct irqaction *__free_irq(unsigned int irq, void *dev_id) | |||
| 912 | 912 | ||
| 913 | /* Currently used only by UML, might disappear one day: */ | 913 | /* Currently used only by UML, might disappear one day: */ |
| 914 | #ifdef CONFIG_IRQ_RELEASE_METHOD | 914 | #ifdef CONFIG_IRQ_RELEASE_METHOD |
| 915 | if (desc->chip->release) | 915 | if (desc->irq_data.chip->release) |
| 916 | desc->chip->release(irq, dev_id); | 916 | desc->irq_data.chip->release(irq, dev_id); |
| 917 | #endif | 917 | #endif |
| 918 | 918 | ||
| 919 | /* If this was the last handler, shut down the IRQ line: */ | 919 | /* If this was the last handler, shut down the IRQ line: */ |
| 920 | if (!desc->action) { | 920 | if (!desc->action) { |
| 921 | desc->status |= IRQ_DISABLED; | 921 | desc->status |= IRQ_DISABLED; |
| 922 | if (desc->chip->shutdown) | 922 | if (desc->irq_data.chip->shutdown) |
| 923 | desc->chip->shutdown(irq); | 923 | desc->irq_data.chip->shutdown(irq); |
| 924 | else | 924 | else |
| 925 | desc->chip->disable(irq); | 925 | desc->irq_data.chip->disable(irq); |
| 926 | } | 926 | } |
| 927 | 927 | ||
| 928 | #ifdef CONFIG_SMP | 928 | #ifdef CONFIG_SMP |
