diff options
Diffstat (limited to 'kernel/irq/manage.c')
| -rw-r--r-- | kernel/irq/manage.c | 31 |
1 files changed, 24 insertions, 7 deletions
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index a3eb7baf1e46..7e2e7dd4cd2f 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c | |||
| @@ -185,6 +185,20 @@ static inline int setup_affinity(unsigned int irq, struct irq_desc *desc) | |||
| 185 | } | 185 | } |
| 186 | #endif | 186 | #endif |
| 187 | 187 | ||
| 188 | void __disable_irq(struct irq_desc *desc, unsigned int irq, bool suspend) | ||
| 189 | { | ||
| 190 | if (suspend) { | ||
| 191 | if (!desc->action || (desc->action->flags & IRQF_TIMER)) | ||
| 192 | return; | ||
| 193 | desc->status |= IRQ_SUSPENDED; | ||
| 194 | } | ||
| 195 | |||
| 196 | if (!desc->depth++) { | ||
| 197 | desc->status |= IRQ_DISABLED; | ||
| 198 | desc->chip->disable(irq); | ||
| 199 | } | ||
| 200 | } | ||
| 201 | |||
| 188 | /** | 202 | /** |
| 189 | * disable_irq_nosync - disable an irq without waiting | 203 | * disable_irq_nosync - disable an irq without waiting |
| 190 | * @irq: Interrupt to disable | 204 | * @irq: Interrupt to disable |
| @@ -205,10 +219,7 @@ void disable_irq_nosync(unsigned int irq) | |||
| 205 | return; | 219 | return; |
| 206 | 220 | ||
| 207 | spin_lock_irqsave(&desc->lock, flags); | 221 | spin_lock_irqsave(&desc->lock, flags); |
| 208 | if (!desc->depth++) { | 222 | __disable_irq(desc, irq, false); |
| 209 | desc->status |= IRQ_DISABLED; | ||
| 210 | desc->chip->disable(irq); | ||
| 211 | } | ||
| 212 | spin_unlock_irqrestore(&desc->lock, flags); | 223 | spin_unlock_irqrestore(&desc->lock, flags); |
| 213 | } | 224 | } |
| 214 | EXPORT_SYMBOL(disable_irq_nosync); | 225 | EXPORT_SYMBOL(disable_irq_nosync); |
| @@ -238,15 +249,21 @@ void disable_irq(unsigned int irq) | |||
| 238 | } | 249 | } |
| 239 | EXPORT_SYMBOL(disable_irq); | 250 | EXPORT_SYMBOL(disable_irq); |
| 240 | 251 | ||
| 241 | static void __enable_irq(struct irq_desc *desc, unsigned int irq) | 252 | void __enable_irq(struct irq_desc *desc, unsigned int irq, bool resume) |
| 242 | { | 253 | { |
| 254 | if (resume) | ||
| 255 | desc->status &= ~IRQ_SUSPENDED; | ||
| 256 | |||
| 243 | switch (desc->depth) { | 257 | switch (desc->depth) { |
| 244 | case 0: | 258 | case 0: |
| 259 | err_out: | ||
| 245 | WARN(1, KERN_WARNING "Unbalanced enable for IRQ %d\n", irq); | 260 | WARN(1, KERN_WARNING "Unbalanced enable for IRQ %d\n", irq); |
| 246 | break; | 261 | break; |
| 247 | case 1: { | 262 | case 1: { |
| 248 | unsigned int status = desc->status & ~IRQ_DISABLED; | 263 | unsigned int status = desc->status & ~IRQ_DISABLED; |
| 249 | 264 | ||
| 265 | if (desc->status & IRQ_SUSPENDED) | ||
| 266 | goto err_out; | ||
| 250 | /* Prevent probing on this irq: */ | 267 | /* Prevent probing on this irq: */ |
| 251 | desc->status = status | IRQ_NOPROBE; | 268 | desc->status = status | IRQ_NOPROBE; |
| 252 | check_irq_resend(desc, irq); | 269 | check_irq_resend(desc, irq); |
| @@ -276,7 +293,7 @@ void enable_irq(unsigned int irq) | |||
| 276 | return; | 293 | return; |
| 277 | 294 | ||
| 278 | spin_lock_irqsave(&desc->lock, flags); | 295 | spin_lock_irqsave(&desc->lock, flags); |
| 279 | __enable_irq(desc, irq); | 296 | __enable_irq(desc, irq, false); |
| 280 | spin_unlock_irqrestore(&desc->lock, flags); | 297 | spin_unlock_irqrestore(&desc->lock, flags); |
| 281 | } | 298 | } |
| 282 | EXPORT_SYMBOL(enable_irq); | 299 | EXPORT_SYMBOL(enable_irq); |
| @@ -638,7 +655,7 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new) | |||
| 638 | */ | 655 | */ |
| 639 | if (shared && (desc->status & IRQ_SPURIOUS_DISABLED)) { | 656 | if (shared && (desc->status & IRQ_SPURIOUS_DISABLED)) { |
| 640 | desc->status &= ~IRQ_SPURIOUS_DISABLED; | 657 | desc->status &= ~IRQ_SPURIOUS_DISABLED; |
| 641 | __enable_irq(desc, irq); | 658 | __enable_irq(desc, irq, false); |
| 642 | } | 659 | } |
| 643 | 660 | ||
| 644 | spin_unlock_irqrestore(&desc->lock, flags); | 661 | spin_unlock_irqrestore(&desc->lock, flags); |
