diff options
| author | Rafael J. Wysocki <rafael.j.wysocki@intel.com> | 2014-09-30 14:46:13 -0400 |
|---|---|---|
| committer | Rafael J. Wysocki <rafael.j.wysocki@intel.com> | 2014-09-30 14:46:13 -0400 |
| commit | e4cb0c9e92f7b16db7a1e892ac6bcf2f736dfd50 (patch) | |
| tree | 35d66a4fc97aa642c14483966f050b5663ff02ca /kernel | |
| parent | 905563ff47db35dcb3f69e69d434207270ad1966 (diff) | |
| parent | 27f3d18630cd7fbb03b62bd78a74303cb8c88069 (diff) | |
Merge branch 'pm-genirq' into acpi-pm
Diffstat (limited to 'kernel')
| -rw-r--r-- | kernel/irq/chip.c | 85 | ||||
| -rw-r--r-- | kernel/irq/internals.h | 16 | ||||
| -rw-r--r-- | kernel/irq/manage.c | 32 | ||||
| -rw-r--r-- | kernel/irq/pm.c | 159 | ||||
| -rw-r--r-- | kernel/power/process.c | 1 |
5 files changed, 196 insertions, 97 deletions
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c index 6223fab9a9d2..8fb52e9bddc1 100644 --- a/kernel/irq/chip.c +++ b/kernel/irq/chip.c | |||
| @@ -342,6 +342,31 @@ static bool irq_check_poll(struct irq_desc *desc) | |||
| 342 | return irq_wait_for_poll(desc); | 342 | return irq_wait_for_poll(desc); |
| 343 | } | 343 | } |
| 344 | 344 | ||
| 345 | static bool irq_may_run(struct irq_desc *desc) | ||
| 346 | { | ||
| 347 | unsigned int mask = IRQD_IRQ_INPROGRESS | IRQD_WAKEUP_ARMED; | ||
| 348 | |||
| 349 | /* | ||
| 350 | * If the interrupt is not in progress and is not an armed | ||
| 351 | * wakeup interrupt, proceed. | ||
| 352 | */ | ||
| 353 | if (!irqd_has_set(&desc->irq_data, mask)) | ||
| 354 | return true; | ||
| 355 | |||
| 356 | /* | ||
| 357 | * If the interrupt is an armed wakeup source, mark it pending | ||
| 358 | * and suspended, disable it and notify the pm core about the | ||
| 359 | * event. | ||
| 360 | */ | ||
| 361 | if (irq_pm_check_wakeup(desc)) | ||
| 362 | return false; | ||
| 363 | |||
| 364 | /* | ||
| 365 | * Handle a potential concurrent poll on a different core. | ||
| 366 | */ | ||
| 367 | return irq_check_poll(desc); | ||
| 368 | } | ||
| 369 | |||
| 345 | /** | 370 | /** |
| 346 | * handle_simple_irq - Simple and software-decoded IRQs. | 371 | * handle_simple_irq - Simple and software-decoded IRQs. |
| 347 | * @irq: the interrupt number | 372 | * @irq: the interrupt number |
| @@ -359,9 +384,8 @@ handle_simple_irq(unsigned int irq, struct irq_desc *desc) | |||
| 359 | { | 384 | { |
| 360 | raw_spin_lock(&desc->lock); | 385 | raw_spin_lock(&desc->lock); |
| 361 | 386 | ||
| 362 | if (unlikely(irqd_irq_inprogress(&desc->irq_data))) | 387 | if (!irq_may_run(desc)) |
| 363 | if (!irq_check_poll(desc)) | 388 | goto out_unlock; |
| 364 | goto out_unlock; | ||
| 365 | 389 | ||
| 366 | desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); | 390 | desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); |
| 367 | kstat_incr_irqs_this_cpu(irq, desc); | 391 | kstat_incr_irqs_this_cpu(irq, desc); |
| @@ -412,9 +436,8 @@ handle_level_irq(unsigned int irq, struct irq_desc *desc) | |||
| 412 | raw_spin_lock(&desc->lock); | 436 | raw_spin_lock(&desc->lock); |
| 413 | mask_ack_irq(desc); | 437 | mask_ack_irq(desc); |
| 414 | 438 | ||
| 415 | if (unlikely(irqd_irq_inprogress(&desc->irq_data))) | 439 | if (!irq_may_run(desc)) |
| 416 | if (!irq_check_poll(desc)) | 440 | goto out_unlock; |
| 417 | goto out_unlock; | ||
| 418 | 441 | ||
| 419 | desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); | 442 | desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); |
| 420 | kstat_incr_irqs_this_cpu(irq, desc); | 443 | kstat_incr_irqs_this_cpu(irq, desc); |
| @@ -485,9 +508,8 @@ handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc) | |||
| 485 | 508 | ||
| 486 | raw_spin_lock(&desc->lock); | 509 | raw_spin_lock(&desc->lock); |
| 487 | 510 | ||
| 488 | if (unlikely(irqd_irq_inprogress(&desc->irq_data))) | 511 | if (!irq_may_run(desc)) |
| 489 | if (!irq_check_poll(desc)) | 512 | goto out; |
| 490 | goto out; | ||
| 491 | 513 | ||
| 492 | desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); | 514 | desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); |
| 493 | kstat_incr_irqs_this_cpu(irq, desc); | 515 | kstat_incr_irqs_this_cpu(irq, desc); |
| @@ -541,19 +563,23 @@ handle_edge_irq(unsigned int irq, struct irq_desc *desc) | |||
| 541 | raw_spin_lock(&desc->lock); | 563 | raw_spin_lock(&desc->lock); |
| 542 | 564 | ||
| 543 | desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); | 565 | desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); |
| 566 | |||
| 567 | if (!irq_may_run(desc)) { | ||
| 568 | desc->istate |= IRQS_PENDING; | ||
| 569 | mask_ack_irq(desc); | ||
| 570 | goto out_unlock; | ||
| 571 | } | ||
| 572 | |||
| 544 | /* | 573 | /* |
| 545 | * If we're currently running this IRQ, or its disabled, | 574 | * If its disabled or no action available then mask it and get |
| 546 | * we shouldn't process the IRQ. Mark it pending, handle | 575 | * out of here. |
| 547 | * the necessary masking and go out | ||
| 548 | */ | 576 | */ |
| 549 | if (unlikely(irqd_irq_disabled(&desc->irq_data) || | 577 | if (irqd_irq_disabled(&desc->irq_data) || !desc->action) { |
| 550 | irqd_irq_inprogress(&desc->irq_data) || !desc->action)) { | 578 | desc->istate |= IRQS_PENDING; |
| 551 | if (!irq_check_poll(desc)) { | 579 | mask_ack_irq(desc); |
| 552 | desc->istate |= IRQS_PENDING; | 580 | goto out_unlock; |
| 553 | mask_ack_irq(desc); | ||
| 554 | goto out_unlock; | ||
| 555 | } | ||
| 556 | } | 581 | } |
| 582 | |||
| 557 | kstat_incr_irqs_this_cpu(irq, desc); | 583 | kstat_incr_irqs_this_cpu(irq, desc); |
| 558 | 584 | ||
| 559 | /* Start handling the irq */ | 585 | /* Start handling the irq */ |
| @@ -602,18 +628,21 @@ void handle_edge_eoi_irq(unsigned int irq, struct irq_desc *desc) | |||
| 602 | raw_spin_lock(&desc->lock); | 628 | raw_spin_lock(&desc->lock); |
| 603 | 629 | ||
| 604 | desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); | 630 | desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); |
| 631 | |||
| 632 | if (!irq_may_run(desc)) { | ||
| 633 | desc->istate |= IRQS_PENDING; | ||
| 634 | goto out_eoi; | ||
| 635 | } | ||
| 636 | |||
| 605 | /* | 637 | /* |
| 606 | * If we're currently running this IRQ, or its disabled, | 638 | * If its disabled or no action available then mask it and get |
| 607 | * we shouldn't process the IRQ. Mark it pending, handle | 639 | * out of here. |
| 608 | * the necessary masking and go out | ||
| 609 | */ | 640 | */ |
| 610 | if (unlikely(irqd_irq_disabled(&desc->irq_data) || | 641 | if (irqd_irq_disabled(&desc->irq_data) || !desc->action) { |
| 611 | irqd_irq_inprogress(&desc->irq_data) || !desc->action)) { | 642 | desc->istate |= IRQS_PENDING; |
| 612 | if (!irq_check_poll(desc)) { | 643 | goto out_eoi; |
| 613 | desc->istate |= IRQS_PENDING; | ||
| 614 | goto out_eoi; | ||
| 615 | } | ||
| 616 | } | 644 | } |
| 645 | |||
| 617 | kstat_incr_irqs_this_cpu(irq, desc); | 646 | kstat_incr_irqs_this_cpu(irq, desc); |
| 618 | 647 | ||
| 619 | do { | 648 | do { |
diff --git a/kernel/irq/internals.h b/kernel/irq/internals.h index 099ea2e0eb88..4332d766619d 100644 --- a/kernel/irq/internals.h +++ b/kernel/irq/internals.h | |||
| @@ -63,8 +63,8 @@ enum { | |||
| 63 | 63 | ||
| 64 | extern int __irq_set_trigger(struct irq_desc *desc, unsigned int irq, | 64 | extern int __irq_set_trigger(struct irq_desc *desc, unsigned int irq, |
| 65 | unsigned long flags); | 65 | unsigned long flags); |
| 66 | extern void __disable_irq(struct irq_desc *desc, unsigned int irq, bool susp); | 66 | extern void __disable_irq(struct irq_desc *desc, unsigned int irq); |
| 67 | extern void __enable_irq(struct irq_desc *desc, unsigned int irq, bool resume); | 67 | extern void __enable_irq(struct irq_desc *desc, unsigned int irq); |
| 68 | 68 | ||
| 69 | extern int irq_startup(struct irq_desc *desc, bool resend); | 69 | extern int irq_startup(struct irq_desc *desc, bool resend); |
| 70 | extern void irq_shutdown(struct irq_desc *desc); | 70 | extern void irq_shutdown(struct irq_desc *desc); |
| @@ -194,3 +194,15 @@ static inline void kstat_incr_irqs_this_cpu(unsigned int irq, struct irq_desc *d | |||
| 194 | __this_cpu_inc(*desc->kstat_irqs); | 194 | __this_cpu_inc(*desc->kstat_irqs); |
| 195 | __this_cpu_inc(kstat.irqs_sum); | 195 | __this_cpu_inc(kstat.irqs_sum); |
| 196 | } | 196 | } |
| 197 | |||
| 198 | #ifdef CONFIG_PM_SLEEP | ||
| 199 | bool irq_pm_check_wakeup(struct irq_desc *desc); | ||
| 200 | void irq_pm_install_action(struct irq_desc *desc, struct irqaction *action); | ||
| 201 | void irq_pm_remove_action(struct irq_desc *desc, struct irqaction *action); | ||
| 202 | #else | ||
| 203 | static inline bool irq_pm_check_wakeup(struct irq_desc *desc) { return false; } | ||
| 204 | static inline void | ||
| 205 | irq_pm_install_action(struct irq_desc *desc, struct irqaction *action) { } | ||
| 206 | static inline void | ||
| 207 | irq_pm_remove_action(struct irq_desc *desc, struct irqaction *action) { } | ||
| 208 | #endif | ||
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index 3dc6a61bf06a..0a9104b4608b 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c | |||
| @@ -382,14 +382,8 @@ setup_affinity(unsigned int irq, struct irq_desc *desc, struct cpumask *mask) | |||
| 382 | } | 382 | } |
| 383 | #endif | 383 | #endif |
| 384 | 384 | ||
| 385 | void __disable_irq(struct irq_desc *desc, unsigned int irq, bool suspend) | 385 | void __disable_irq(struct irq_desc *desc, unsigned int irq) |
| 386 | { | 386 | { |
| 387 | if (suspend) { | ||
| 388 | if (!desc->action || (desc->action->flags & IRQF_NO_SUSPEND)) | ||
| 389 | return; | ||
| 390 | desc->istate |= IRQS_SUSPENDED; | ||
| 391 | } | ||
| 392 | |||
| 393 | if (!desc->depth++) | 387 | if (!desc->depth++) |
| 394 | irq_disable(desc); | 388 | irq_disable(desc); |
| 395 | } | 389 | } |
| @@ -401,7 +395,7 @@ static int __disable_irq_nosync(unsigned int irq) | |||
| 401 | 395 | ||
| 402 | if (!desc) | 396 | if (!desc) |
| 403 | return -EINVAL; | 397 | return -EINVAL; |
| 404 | __disable_irq(desc, irq, false); | 398 | __disable_irq(desc, irq); |
| 405 | irq_put_desc_busunlock(desc, flags); | 399 | irq_put_desc_busunlock(desc, flags); |
| 406 | return 0; | 400 | return 0; |
| 407 | } | 401 | } |
| @@ -442,20 +436,8 @@ void disable_irq(unsigned int irq) | |||
| 442 | } | 436 | } |
| 443 | EXPORT_SYMBOL(disable_irq); | 437 | EXPORT_SYMBOL(disable_irq); |
| 444 | 438 | ||
| 445 | void __enable_irq(struct irq_desc *desc, unsigned int irq, bool resume) | 439 | void __enable_irq(struct irq_desc *desc, unsigned int irq) |
| 446 | { | 440 | { |
| 447 | if (resume) { | ||
| 448 | if (!(desc->istate & IRQS_SUSPENDED)) { | ||
| 449 | if (!desc->action) | ||
| 450 | return; | ||
| 451 | if (!(desc->action->flags & IRQF_FORCE_RESUME)) | ||
| 452 | return; | ||
| 453 | /* Pretend that it got disabled ! */ | ||
| 454 | desc->depth++; | ||
| 455 | } | ||
| 456 | desc->istate &= ~IRQS_SUSPENDED; | ||
| 457 | } | ||
| 458 | |||
| 459 | switch (desc->depth) { | 441 | switch (desc->depth) { |
| 460 | case 0: | 442 | case 0: |
| 461 | err_out: | 443 | err_out: |
| @@ -497,7 +479,7 @@ void enable_irq(unsigned int irq) | |||
| 497 | KERN_ERR "enable_irq before setup/request_irq: irq %u\n", irq)) | 479 | KERN_ERR "enable_irq before setup/request_irq: irq %u\n", irq)) |
| 498 | goto out; | 480 | goto out; |
| 499 | 481 | ||
| 500 | __enable_irq(desc, irq, false); | 482 | __enable_irq(desc, irq); |
| 501 | out: | 483 | out: |
| 502 | irq_put_desc_busunlock(desc, flags); | 484 | irq_put_desc_busunlock(desc, flags); |
| 503 | } | 485 | } |
| @@ -1218,6 +1200,8 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new) | |||
| 1218 | new->irq = irq; | 1200 | new->irq = irq; |
| 1219 | *old_ptr = new; | 1201 | *old_ptr = new; |
| 1220 | 1202 | ||
| 1203 | irq_pm_install_action(desc, new); | ||
| 1204 | |||
| 1221 | /* Reset broken irq detection when installing new handler */ | 1205 | /* Reset broken irq detection when installing new handler */ |
| 1222 | desc->irq_count = 0; | 1206 | desc->irq_count = 0; |
| 1223 | desc->irqs_unhandled = 0; | 1207 | desc->irqs_unhandled = 0; |
| @@ -1228,7 +1212,7 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new) | |||
| 1228 | */ | 1212 | */ |
| 1229 | if (shared && (desc->istate & IRQS_SPURIOUS_DISABLED)) { | 1213 | if (shared && (desc->istate & IRQS_SPURIOUS_DISABLED)) { |
| 1230 | desc->istate &= ~IRQS_SPURIOUS_DISABLED; | 1214 | desc->istate &= ~IRQS_SPURIOUS_DISABLED; |
| 1231 | __enable_irq(desc, irq, false); | 1215 | __enable_irq(desc, irq); |
| 1232 | } | 1216 | } |
| 1233 | 1217 | ||
| 1234 | raw_spin_unlock_irqrestore(&desc->lock, flags); | 1218 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
| @@ -1336,6 +1320,8 @@ static struct irqaction *__free_irq(unsigned int irq, void *dev_id) | |||
| 1336 | /* Found it - now remove it from the list of entries: */ | 1320 | /* Found it - now remove it from the list of entries: */ |
| 1337 | *action_ptr = action->next; | 1321 | *action_ptr = action->next; |
| 1338 | 1322 | ||
| 1323 | irq_pm_remove_action(desc, action); | ||
| 1324 | |||
| 1339 | /* If this was the last handler, shut down the IRQ line: */ | 1325 | /* If this was the last handler, shut down the IRQ line: */ |
| 1340 | if (!desc->action) { | 1326 | if (!desc->action) { |
| 1341 | irq_shutdown(desc); | 1327 | irq_shutdown(desc); |
diff --git a/kernel/irq/pm.c b/kernel/irq/pm.c index abcd6ca86cb7..3ca532592704 100644 --- a/kernel/irq/pm.c +++ b/kernel/irq/pm.c | |||
| @@ -9,17 +9,105 @@ | |||
| 9 | #include <linux/irq.h> | 9 | #include <linux/irq.h> |
| 10 | #include <linux/module.h> | 10 | #include <linux/module.h> |
| 11 | #include <linux/interrupt.h> | 11 | #include <linux/interrupt.h> |
| 12 | #include <linux/suspend.h> | ||
| 12 | #include <linux/syscore_ops.h> | 13 | #include <linux/syscore_ops.h> |
| 13 | 14 | ||
| 14 | #include "internals.h" | 15 | #include "internals.h" |
| 15 | 16 | ||
| 17 | bool irq_pm_check_wakeup(struct irq_desc *desc) | ||
| 18 | { | ||
| 19 | if (irqd_is_wakeup_armed(&desc->irq_data)) { | ||
| 20 | irqd_clear(&desc->irq_data, IRQD_WAKEUP_ARMED); | ||
| 21 | desc->istate |= IRQS_SUSPENDED | IRQS_PENDING; | ||
| 22 | desc->depth++; | ||
| 23 | irq_disable(desc); | ||
| 24 | pm_system_wakeup(); | ||
| 25 | return true; | ||
| 26 | } | ||
| 27 | return false; | ||
| 28 | } | ||
| 29 | |||
| 30 | /* | ||
| 31 | * Called from __setup_irq() with desc->lock held after @action has | ||
| 32 | * been installed in the action chain. | ||
| 33 | */ | ||
| 34 | void irq_pm_install_action(struct irq_desc *desc, struct irqaction *action) | ||
| 35 | { | ||
| 36 | desc->nr_actions++; | ||
| 37 | |||
| 38 | if (action->flags & IRQF_FORCE_RESUME) | ||
| 39 | desc->force_resume_depth++; | ||
| 40 | |||
| 41 | WARN_ON_ONCE(desc->force_resume_depth && | ||
| 42 | desc->force_resume_depth != desc->nr_actions); | ||
| 43 | |||
| 44 | if (action->flags & IRQF_NO_SUSPEND) | ||
| 45 | desc->no_suspend_depth++; | ||
| 46 | |||
| 47 | WARN_ON_ONCE(desc->no_suspend_depth && | ||
| 48 | desc->no_suspend_depth != desc->nr_actions); | ||
| 49 | } | ||
| 50 | |||
| 51 | /* | ||
| 52 | * Called from __free_irq() with desc->lock held after @action has | ||
| 53 | * been removed from the action chain. | ||
| 54 | */ | ||
| 55 | void irq_pm_remove_action(struct irq_desc *desc, struct irqaction *action) | ||
| 56 | { | ||
| 57 | desc->nr_actions--; | ||
| 58 | |||
| 59 | if (action->flags & IRQF_FORCE_RESUME) | ||
| 60 | desc->force_resume_depth--; | ||
| 61 | |||
| 62 | if (action->flags & IRQF_NO_SUSPEND) | ||
| 63 | desc->no_suspend_depth--; | ||
| 64 | } | ||
| 65 | |||
| 66 | static bool suspend_device_irq(struct irq_desc *desc, int irq) | ||
| 67 | { | ||
| 68 | if (!desc->action || desc->no_suspend_depth) | ||
| 69 | return false; | ||
| 70 | |||
| 71 | if (irqd_is_wakeup_set(&desc->irq_data)) { | ||
| 72 | irqd_set(&desc->irq_data, IRQD_WAKEUP_ARMED); | ||
| 73 | /* | ||
| 74 | * We return true here to force the caller to issue | ||
| 75 | * synchronize_irq(). We need to make sure that the | ||
| 76 | * IRQD_WAKEUP_ARMED is visible before we return from | ||
| 77 | * suspend_device_irqs(). | ||
| 78 | */ | ||
| 79 | return true; | ||
| 80 | } | ||
| 81 | |||
| 82 | desc->istate |= IRQS_SUSPENDED; | ||
| 83 | __disable_irq(desc, irq); | ||
| 84 | |||
| 85 | /* | ||
| 86 | * Hardware which has no wakeup source configuration facility | ||
| 87 | * requires that the non wakeup interrupts are masked at the | ||
| 88 | * chip level. The chip implementation indicates that with | ||
| 89 | * IRQCHIP_MASK_ON_SUSPEND. | ||
| 90 | */ | ||
| 91 | if (irq_desc_get_chip(desc)->flags & IRQCHIP_MASK_ON_SUSPEND) | ||
| 92 | mask_irq(desc); | ||
| 93 | return true; | ||
| 94 | } | ||
| 95 | |||
| 16 | /** | 96 | /** |
| 17 | * suspend_device_irqs - disable all currently enabled interrupt lines | 97 | * suspend_device_irqs - disable all currently enabled interrupt lines |
| 18 | * | 98 | * |
| 19 | * During system-wide suspend or hibernation device drivers need to be prevented | 99 | * During system-wide suspend or hibernation device drivers need to be |
| 20 | * from receiving interrupts and this function is provided for this purpose. | 100 | * prevented from receiving interrupts and this function is provided |
| 21 | * It marks all interrupt lines in use, except for the timer ones, as disabled | 101 | * for this purpose. |
| 22 | * and sets the IRQS_SUSPENDED flag for each of them. | 102 | * |
| 103 | * So we disable all interrupts and mark them IRQS_SUSPENDED except | ||
| 104 | * for those which are unused, those which are marked as not | ||
| 105 | * suspendable via an interrupt request with the flag IRQF_NO_SUSPEND | ||
| 106 | * set and those which are marked as active wakeup sources. | ||
| 107 | * | ||
| 108 | * The active wakeup sources are handled by the flow handler entry | ||
| 109 | * code which checks for the IRQD_WAKEUP_ARMED flag, suspends the | ||
| 110 | * interrupt and notifies the pm core about the wakeup. | ||
| 23 | */ | 111 | */ |
| 24 | void suspend_device_irqs(void) | 112 | void suspend_device_irqs(void) |
| 25 | { | 113 | { |
| @@ -28,18 +116,36 @@ void suspend_device_irqs(void) | |||
| 28 | 116 | ||
| 29 | for_each_irq_desc(irq, desc) { | 117 | for_each_irq_desc(irq, desc) { |
| 30 | unsigned long flags; | 118 | unsigned long flags; |
| 119 | bool sync; | ||
| 31 | 120 | ||
| 32 | raw_spin_lock_irqsave(&desc->lock, flags); | 121 | raw_spin_lock_irqsave(&desc->lock, flags); |
| 33 | __disable_irq(desc, irq, true); | 122 | sync = suspend_device_irq(desc, irq); |
| 34 | raw_spin_unlock_irqrestore(&desc->lock, flags); | 123 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
| 35 | } | ||
| 36 | 124 | ||
| 37 | for_each_irq_desc(irq, desc) | 125 | if (sync) |
| 38 | if (desc->istate & IRQS_SUSPENDED) | ||
| 39 | synchronize_irq(irq); | 126 | synchronize_irq(irq); |
| 127 | } | ||
| 40 | } | 128 | } |
| 41 | EXPORT_SYMBOL_GPL(suspend_device_irqs); | 129 | EXPORT_SYMBOL_GPL(suspend_device_irqs); |
| 42 | 130 | ||
| 131 | static void resume_irq(struct irq_desc *desc, int irq) | ||
| 132 | { | ||
| 133 | irqd_clear(&desc->irq_data, IRQD_WAKEUP_ARMED); | ||
| 134 | |||
| 135 | if (desc->istate & IRQS_SUSPENDED) | ||
| 136 | goto resume; | ||
| 137 | |||
| 138 | /* Force resume the interrupt? */ | ||
| 139 | if (!desc->force_resume_depth) | ||
| 140 | return; | ||
| 141 | |||
| 142 | /* Pretend that it got disabled ! */ | ||
| 143 | desc->depth++; | ||
| 144 | resume: | ||
| 145 | desc->istate &= ~IRQS_SUSPENDED; | ||
| 146 | __enable_irq(desc, irq); | ||
| 147 | } | ||
| 148 | |||
| 43 | static void resume_irqs(bool want_early) | 149 | static void resume_irqs(bool want_early) |
| 44 | { | 150 | { |
| 45 | struct irq_desc *desc; | 151 | struct irq_desc *desc; |
| @@ -54,7 +160,7 @@ static void resume_irqs(bool want_early) | |||
| 54 | continue; | 160 | continue; |
| 55 | 161 | ||
| 56 | raw_spin_lock_irqsave(&desc->lock, flags); | 162 | raw_spin_lock_irqsave(&desc->lock, flags); |
| 57 | __enable_irq(desc, irq, true); | 163 | resume_irq(desc, irq); |
| 58 | raw_spin_unlock_irqrestore(&desc->lock, flags); | 164 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
| 59 | } | 165 | } |
| 60 | } | 166 | } |
| @@ -93,38 +199,3 @@ void resume_device_irqs(void) | |||
| 93 | resume_irqs(false); | 199 | resume_irqs(false); |
| 94 | } | 200 | } |
| 95 | EXPORT_SYMBOL_GPL(resume_device_irqs); | 201 | EXPORT_SYMBOL_GPL(resume_device_irqs); |
| 96 | |||
| 97 | /** | ||
| 98 | * check_wakeup_irqs - check if any wake-up interrupts are pending | ||
| 99 | */ | ||
| 100 | int check_wakeup_irqs(void) | ||
| 101 | { | ||
| 102 | struct irq_desc *desc; | ||
| 103 | int irq; | ||
| 104 | |||
| 105 | for_each_irq_desc(irq, desc) { | ||
| 106 | /* | ||
| 107 | * Only interrupts which are marked as wakeup source | ||
| 108 | * and have not been disabled before the suspend check | ||
| 109 | * can abort suspend. | ||
| 110 | */ | ||
| 111 | if (irqd_is_wakeup_set(&desc->irq_data)) { | ||
| 112 | if (desc->depth == 1 && desc->istate & IRQS_PENDING) | ||
| 113 | return -EBUSY; | ||
| 114 | continue; | ||
| 115 | } | ||
| 116 | /* | ||
| 117 | * Check the non wakeup interrupts whether they need | ||
| 118 | * to be masked before finally going into suspend | ||
| 119 | * state. That's for hardware which has no wakeup | ||
| 120 | * source configuration facility. The chip | ||
| 121 | * implementation indicates that with | ||
| 122 | * IRQCHIP_MASK_ON_SUSPEND. | ||
| 123 | */ | ||
| 124 | if (desc->istate & IRQS_SUSPENDED && | ||
| 125 | irq_desc_get_chip(desc)->flags & IRQCHIP_MASK_ON_SUSPEND) | ||
| 126 | mask_irq(desc); | ||
| 127 | } | ||
| 128 | |||
| 129 | return 0; | ||
| 130 | } | ||
diff --git a/kernel/power/process.c b/kernel/power/process.c index 4ee194eb524b..7b323221b9ee 100644 --- a/kernel/power/process.c +++ b/kernel/power/process.c | |||
| @@ -129,6 +129,7 @@ int freeze_processes(void) | |||
| 129 | if (!pm_freezing) | 129 | if (!pm_freezing) |
| 130 | atomic_inc(&system_freezing_cnt); | 130 | atomic_inc(&system_freezing_cnt); |
| 131 | 131 | ||
| 132 | pm_wakeup_clear(); | ||
| 132 | printk("Freezing user space processes ... "); | 133 | printk("Freezing user space processes ... "); |
| 133 | pm_freezing = true; | 134 | pm_freezing = true; |
| 134 | error = try_to_freeze_tasks(true); | 135 | error = try_to_freeze_tasks(true); |
