diff options
author | Rafael J. Wysocki <rafael.j.wysocki@intel.com> | 2014-09-30 14:46:13 -0400 |
---|---|---|
committer | Rafael J. Wysocki <rafael.j.wysocki@intel.com> | 2014-09-30 14:46:13 -0400 |
commit | e4cb0c9e92f7b16db7a1e892ac6bcf2f736dfd50 (patch) | |
tree | 35d66a4fc97aa642c14483966f050b5663ff02ca | |
parent | 905563ff47db35dcb3f69e69d434207270ad1966 (diff) | |
parent | 27f3d18630cd7fbb03b62bd78a74303cb8c88069 (diff) |
Merge branch 'pm-genirq' into acpi-pm
-rw-r--r-- | Documentation/power/suspend-and-interrupts.txt | 123 | ||||
-rw-r--r-- | arch/x86/kernel/apic/io_apic.c | 5 | ||||
-rw-r--r-- | drivers/base/power/wakeup.c | 16 | ||||
-rw-r--r-- | drivers/base/syscore.c | 7 | ||||
-rw-r--r-- | drivers/pci/pcie/pme.c | 61 | ||||
-rw-r--r-- | include/linux/interrupt.h | 5 | ||||
-rw-r--r-- | include/linux/irq.h | 8 | ||||
-rw-r--r-- | include/linux/irqdesc.h | 10 | ||||
-rw-r--r-- | include/linux/suspend.h | 4 | ||||
-rw-r--r-- | kernel/irq/chip.c | 85 | ||||
-rw-r--r-- | kernel/irq/internals.h | 16 | ||||
-rw-r--r-- | kernel/irq/manage.c | 32 | ||||
-rw-r--r-- | kernel/irq/pm.c | 159 | ||||
-rw-r--r-- | kernel/power/process.c | 1 |
14 files changed, 415 insertions, 117 deletions
diff --git a/Documentation/power/suspend-and-interrupts.txt b/Documentation/power/suspend-and-interrupts.txt new file mode 100644 index 000000000000..69663640dea5 --- /dev/null +++ b/Documentation/power/suspend-and-interrupts.txt | |||
@@ -0,0 +1,123 @@ | |||
1 | System Suspend and Device Interrupts | ||
2 | |||
3 | Copyright (C) 2014 Intel Corp. | ||
4 | Author: Rafael J. Wysocki <rafael.j.wysocki@intel.com> | ||
5 | |||
6 | |||
7 | Suspending and Resuming Device IRQs | ||
8 | ----------------------------------- | ||
9 | |||
10 | Device interrupt request lines (IRQs) are generally disabled during system | ||
11 | suspend after the "late" phase of suspending devices (that is, after all of the | ||
12 | ->prepare, ->suspend and ->suspend_late callbacks have been executed for all | ||
13 | devices). That is done by suspend_device_irqs(). | ||
14 | |||
15 | The rationale for doing so is that after the "late" phase of device suspend | ||
16 | there is no legitimate reason why any interrupts from suspended devices should | ||
17 | trigger and if any devices have not been suspended properly yet, it is better to | ||
18 | block interrupts from them anyway. Also, in the past we had problems with | ||
19 | interrupt handlers for shared IRQs that device drivers implementing them were | ||
20 | not prepared for interrupts triggering after their devices had been suspended. | ||
21 | In some cases they would attempt to access, for example, memory address spaces | ||
22 | of suspended devices and cause unpredictable behavior to ensue as a result. | ||
23 | Unfortunately, such problems are very difficult to debug and the introduction | ||
24 | of suspend_device_irqs(), along with the "noirq" phase of device suspend and | ||
25 | resume, was the only practical way to mitigate them. | ||
26 | |||
27 | Device IRQs are re-enabled during system resume, right before the "early" phase | ||
28 | of resuming devices (that is, before starting to execute ->resume_early | ||
29 | callbacks for devices). The function doing that is resume_device_irqs(). | ||
30 | |||
31 | |||
32 | The IRQF_NO_SUSPEND Flag | ||
33 | ------------------------ | ||
34 | |||
35 | There are interrupts that can legitimately trigger during the entire system | ||
36 | suspend-resume cycle, including the "noirq" phases of suspending and resuming | ||
37 | devices as well as during the time when nonboot CPUs are taken offline and | ||
38 | brought back online. That applies to timer interrupts in the first place, | ||
39 | but also to IPIs and to some other special-purpose interrupts. | ||
40 | |||
41 | The IRQF_NO_SUSPEND flag is used to indicate that to the IRQ subsystem when | ||
42 | requesting a special-purpose interrupt. It causes suspend_device_irqs() to | ||
43 | leave the corresponding IRQ enabled so as to allow the interrupt to work all | ||
44 | the time as expected. | ||
45 | |||
46 | Note that the IRQF_NO_SUSPEND flag affects the entire IRQ and not just one | ||
47 | user of it. Thus, if the IRQ is shared, all of the interrupt handlers installed | ||
48 | for it will be executed as usual after suspend_device_irqs(), even if the | ||
49 | IRQF_NO_SUSPEND flag was not passed to request_irq() (or equivalent) by some of | ||
50 | the IRQ's users. For this reason, using IRQF_NO_SUSPEND and IRQF_SHARED at the | ||
51 | same time should be avoided. | ||
52 | |||
53 | |||
54 | System Wakeup Interrupts, enable_irq_wake() and disable_irq_wake() | ||
55 | ------------------------------------------------------------------ | ||
56 | |||
57 | System wakeup interrupts generally need to be configured to wake up the system | ||
58 | from sleep states, especially if they are used for different purposes (e.g. as | ||
59 | I/O interrupts) in the working state. | ||
60 | |||
61 | That may involve turning on a special signal handling logic within the platform | ||
62 | (such as an SoC) so that signals from a given line are routed in a different way | ||
63 | during system sleep so as to trigger a system wakeup when needed. For example, | ||
64 | the platform may include a dedicated interrupt controller used specifically for | ||
65 | handling system wakeup events. Then, if a given interrupt line is supposed to | ||
66 | wake up the system from sleep sates, the corresponding input of that interrupt | ||
67 | controller needs to be enabled to receive signals from the line in question. | ||
68 | After wakeup, it generally is better to disable that input to prevent the | ||
69 | dedicated controller from triggering interrupts unnecessarily. | ||
70 | |||
71 | The IRQ subsystem provides two helper functions to be used by device drivers for | ||
72 | those purposes. Namely, enable_irq_wake() turns on the platform's logic for | ||
73 | handling the given IRQ as a system wakeup interrupt line and disable_irq_wake() | ||
74 | turns that logic off. | ||
75 | |||
76 | Calling enable_irq_wake() causes suspend_device_irqs() to treat the given IRQ | ||
77 | in a special way. Namely, the IRQ remains enabled, by on the first interrupt | ||
78 | it will be disabled, marked as pending and "suspended" so that it will be | ||
79 | re-enabled by resume_device_irqs() during the subsequent system resume. Also | ||
80 | the PM core is notified about the event which casues the system suspend in | ||
81 | progress to be aborted (that doesn't have to happen immediately, but at one | ||
82 | of the points where the suspend thread looks for pending wakeup events). | ||
83 | |||
84 | This way every interrupt from a wakeup interrupt source will either cause the | ||
85 | system suspend currently in progress to be aborted or wake up the system if | ||
86 | already suspended. However, after suspend_device_irqs() interrupt handlers are | ||
87 | not executed for system wakeup IRQs. They are only executed for IRQF_NO_SUSPEND | ||
88 | IRQs at that time, but those IRQs should not be configured for system wakeup | ||
89 | using enable_irq_wake(). | ||
90 | |||
91 | |||
92 | Interrupts and Suspend-to-Idle | ||
93 | ------------------------------ | ||
94 | |||
95 | Suspend-to-idle (also known as the "freeze" sleep state) is a relatively new | ||
96 | system sleep state that works by idling all of the processors and waiting for | ||
97 | interrupts right after the "noirq" phase of suspending devices. | ||
98 | |||
99 | Of course, this means that all of the interrupts with the IRQF_NO_SUSPEND flag | ||
100 | set will bring CPUs out of idle while in that state, but they will not cause the | ||
101 | IRQ subsystem to trigger a system wakeup. | ||
102 | |||
103 | System wakeup interrupts, in turn, will trigger wakeup from suspend-to-idle in | ||
104 | analogy with what they do in the full system suspend case. The only difference | ||
105 | is that the wakeup from suspend-to-idle is signaled using the usual working | ||
106 | state interrupt delivery mechanisms and doesn't require the platform to use | ||
107 | any special interrupt handling logic for it to work. | ||
108 | |||
109 | |||
110 | IRQF_NO_SUSPEND and enable_irq_wake() | ||
111 | ------------------------------------- | ||
112 | |||
113 | There are no valid reasons to use both enable_irq_wake() and the IRQF_NO_SUSPEND | ||
114 | flag on the same IRQ. | ||
115 | |||
116 | First of all, if the IRQ is not shared, the rules for handling IRQF_NO_SUSPEND | ||
117 | interrupts (interrupt handlers are invoked after suspend_device_irqs()) are | ||
118 | directly at odds with the rules for handling system wakeup interrupts (interrupt | ||
119 | handlers are not invoked after suspend_device_irqs()). | ||
120 | |||
121 | Second, both enable_irq_wake() and IRQF_NO_SUSPEND apply to entire IRQs and not | ||
122 | to individual interrupt handlers, so sharing an IRQ between a system wakeup | ||
123 | interrupt source and an IRQF_NO_SUSPEND interrupt source does not make sense. | ||
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c index 337ce5a9b15c..1183d545da1e 100644 --- a/arch/x86/kernel/apic/io_apic.c +++ b/arch/x86/kernel/apic/io_apic.c | |||
@@ -2623,6 +2623,7 @@ static struct irq_chip ioapic_chip __read_mostly = { | |||
2623 | .irq_eoi = ack_apic_level, | 2623 | .irq_eoi = ack_apic_level, |
2624 | .irq_set_affinity = native_ioapic_set_affinity, | 2624 | .irq_set_affinity = native_ioapic_set_affinity, |
2625 | .irq_retrigger = ioapic_retrigger_irq, | 2625 | .irq_retrigger = ioapic_retrigger_irq, |
2626 | .flags = IRQCHIP_SKIP_SET_WAKE, | ||
2626 | }; | 2627 | }; |
2627 | 2628 | ||
2628 | static inline void init_IO_APIC_traps(void) | 2629 | static inline void init_IO_APIC_traps(void) |
@@ -3173,6 +3174,7 @@ static struct irq_chip msi_chip = { | |||
3173 | .irq_ack = ack_apic_edge, | 3174 | .irq_ack = ack_apic_edge, |
3174 | .irq_set_affinity = msi_set_affinity, | 3175 | .irq_set_affinity = msi_set_affinity, |
3175 | .irq_retrigger = ioapic_retrigger_irq, | 3176 | .irq_retrigger = ioapic_retrigger_irq, |
3177 | .flags = IRQCHIP_SKIP_SET_WAKE, | ||
3176 | }; | 3178 | }; |
3177 | 3179 | ||
3178 | int setup_msi_irq(struct pci_dev *dev, struct msi_desc *msidesc, | 3180 | int setup_msi_irq(struct pci_dev *dev, struct msi_desc *msidesc, |
@@ -3271,6 +3273,7 @@ static struct irq_chip dmar_msi_type = { | |||
3271 | .irq_ack = ack_apic_edge, | 3273 | .irq_ack = ack_apic_edge, |
3272 | .irq_set_affinity = dmar_msi_set_affinity, | 3274 | .irq_set_affinity = dmar_msi_set_affinity, |
3273 | .irq_retrigger = ioapic_retrigger_irq, | 3275 | .irq_retrigger = ioapic_retrigger_irq, |
3276 | .flags = IRQCHIP_SKIP_SET_WAKE, | ||
3274 | }; | 3277 | }; |
3275 | 3278 | ||
3276 | int arch_setup_dmar_msi(unsigned int irq) | 3279 | int arch_setup_dmar_msi(unsigned int irq) |
@@ -3321,6 +3324,7 @@ static struct irq_chip hpet_msi_type = { | |||
3321 | .irq_ack = ack_apic_edge, | 3324 | .irq_ack = ack_apic_edge, |
3322 | .irq_set_affinity = hpet_msi_set_affinity, | 3325 | .irq_set_affinity = hpet_msi_set_affinity, |
3323 | .irq_retrigger = ioapic_retrigger_irq, | 3326 | .irq_retrigger = ioapic_retrigger_irq, |
3327 | .flags = IRQCHIP_SKIP_SET_WAKE, | ||
3324 | }; | 3328 | }; |
3325 | 3329 | ||
3326 | int default_setup_hpet_msi(unsigned int irq, unsigned int id) | 3330 | int default_setup_hpet_msi(unsigned int irq, unsigned int id) |
@@ -3384,6 +3388,7 @@ static struct irq_chip ht_irq_chip = { | |||
3384 | .irq_ack = ack_apic_edge, | 3388 | .irq_ack = ack_apic_edge, |
3385 | .irq_set_affinity = ht_set_affinity, | 3389 | .irq_set_affinity = ht_set_affinity, |
3386 | .irq_retrigger = ioapic_retrigger_irq, | 3390 | .irq_retrigger = ioapic_retrigger_irq, |
3391 | .flags = IRQCHIP_SKIP_SET_WAKE, | ||
3387 | }; | 3392 | }; |
3388 | 3393 | ||
3389 | int arch_setup_ht_irq(unsigned int irq, struct pci_dev *dev) | 3394 | int arch_setup_ht_irq(unsigned int irq, struct pci_dev *dev) |
diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c index eb1bd2ecad8b..c2744b30d5d9 100644 --- a/drivers/base/power/wakeup.c +++ b/drivers/base/power/wakeup.c | |||
@@ -24,6 +24,9 @@ | |||
24 | */ | 24 | */ |
25 | bool events_check_enabled __read_mostly; | 25 | bool events_check_enabled __read_mostly; |
26 | 26 | ||
27 | /* If set and the system is suspending, terminate the suspend. */ | ||
28 | static bool pm_abort_suspend __read_mostly; | ||
29 | |||
27 | /* | 30 | /* |
28 | * Combined counters of registered wakeup events and wakeup events in progress. | 31 | * Combined counters of registered wakeup events and wakeup events in progress. |
29 | * They need to be modified together atomically, so it's better to use one | 32 | * They need to be modified together atomically, so it's better to use one |
@@ -719,7 +722,18 @@ bool pm_wakeup_pending(void) | |||
719 | pm_print_active_wakeup_sources(); | 722 | pm_print_active_wakeup_sources(); |
720 | } | 723 | } |
721 | 724 | ||
722 | return ret; | 725 | return ret || pm_abort_suspend; |
726 | } | ||
727 | |||
728 | void pm_system_wakeup(void) | ||
729 | { | ||
730 | pm_abort_suspend = true; | ||
731 | freeze_wake(); | ||
732 | } | ||
733 | |||
734 | void pm_wakeup_clear(void) | ||
735 | { | ||
736 | pm_abort_suspend = false; | ||
723 | } | 737 | } |
724 | 738 | ||
725 | /** | 739 | /** |
diff --git a/drivers/base/syscore.c b/drivers/base/syscore.c index dbb8350ea8dc..8d98a329f6ea 100644 --- a/drivers/base/syscore.c +++ b/drivers/base/syscore.c | |||
@@ -9,7 +9,7 @@ | |||
9 | #include <linux/syscore_ops.h> | 9 | #include <linux/syscore_ops.h> |
10 | #include <linux/mutex.h> | 10 | #include <linux/mutex.h> |
11 | #include <linux/module.h> | 11 | #include <linux/module.h> |
12 | #include <linux/interrupt.h> | 12 | #include <linux/suspend.h> |
13 | #include <trace/events/power.h> | 13 | #include <trace/events/power.h> |
14 | 14 | ||
15 | static LIST_HEAD(syscore_ops_list); | 15 | static LIST_HEAD(syscore_ops_list); |
@@ -54,9 +54,8 @@ int syscore_suspend(void) | |||
54 | pr_debug("Checking wakeup interrupts\n"); | 54 | pr_debug("Checking wakeup interrupts\n"); |
55 | 55 | ||
56 | /* Return error code if there are any wakeup interrupts pending. */ | 56 | /* Return error code if there are any wakeup interrupts pending. */ |
57 | ret = check_wakeup_irqs(); | 57 | if (pm_wakeup_pending()) |
58 | if (ret) | 58 | return -EBUSY; |
59 | return ret; | ||
60 | 59 | ||
61 | WARN_ONCE(!irqs_disabled(), | 60 | WARN_ONCE(!irqs_disabled(), |
62 | "Interrupts enabled before system core suspend.\n"); | 61 | "Interrupts enabled before system core suspend.\n"); |
diff --git a/drivers/pci/pcie/pme.c b/drivers/pci/pcie/pme.c index 82e06a86cd77..a9f9c46e5022 100644 --- a/drivers/pci/pcie/pme.c +++ b/drivers/pci/pcie/pme.c | |||
@@ -41,11 +41,17 @@ static int __init pcie_pme_setup(char *str) | |||
41 | } | 41 | } |
42 | __setup("pcie_pme=", pcie_pme_setup); | 42 | __setup("pcie_pme=", pcie_pme_setup); |
43 | 43 | ||
44 | enum pme_suspend_level { | ||
45 | PME_SUSPEND_NONE = 0, | ||
46 | PME_SUSPEND_WAKEUP, | ||
47 | PME_SUSPEND_NOIRQ, | ||
48 | }; | ||
49 | |||
44 | struct pcie_pme_service_data { | 50 | struct pcie_pme_service_data { |
45 | spinlock_t lock; | 51 | spinlock_t lock; |
46 | struct pcie_device *srv; | 52 | struct pcie_device *srv; |
47 | struct work_struct work; | 53 | struct work_struct work; |
48 | bool noirq; /* Don't enable the PME interrupt used by this service. */ | 54 | enum pme_suspend_level suspend_level; |
49 | }; | 55 | }; |
50 | 56 | ||
51 | /** | 57 | /** |
@@ -223,7 +229,7 @@ static void pcie_pme_work_fn(struct work_struct *work) | |||
223 | spin_lock_irq(&data->lock); | 229 | spin_lock_irq(&data->lock); |
224 | 230 | ||
225 | for (;;) { | 231 | for (;;) { |
226 | if (data->noirq) | 232 | if (data->suspend_level != PME_SUSPEND_NONE) |
227 | break; | 233 | break; |
228 | 234 | ||
229 | pcie_capability_read_dword(port, PCI_EXP_RTSTA, &rtsta); | 235 | pcie_capability_read_dword(port, PCI_EXP_RTSTA, &rtsta); |
@@ -250,7 +256,7 @@ static void pcie_pme_work_fn(struct work_struct *work) | |||
250 | spin_lock_irq(&data->lock); | 256 | spin_lock_irq(&data->lock); |
251 | } | 257 | } |
252 | 258 | ||
253 | if (!data->noirq) | 259 | if (data->suspend_level == PME_SUSPEND_NONE) |
254 | pcie_pme_interrupt_enable(port, true); | 260 | pcie_pme_interrupt_enable(port, true); |
255 | 261 | ||
256 | spin_unlock_irq(&data->lock); | 262 | spin_unlock_irq(&data->lock); |
@@ -367,6 +373,21 @@ static int pcie_pme_probe(struct pcie_device *srv) | |||
367 | return ret; | 373 | return ret; |
368 | } | 374 | } |
369 | 375 | ||
376 | static bool pcie_pme_check_wakeup(struct pci_bus *bus) | ||
377 | { | ||
378 | struct pci_dev *dev; | ||
379 | |||
380 | if (!bus) | ||
381 | return false; | ||
382 | |||
383 | list_for_each_entry(dev, &bus->devices, bus_list) | ||
384 | if (device_may_wakeup(&dev->dev) | ||
385 | || pcie_pme_check_wakeup(dev->subordinate)) | ||
386 | return true; | ||
387 | |||
388 | return false; | ||
389 | } | ||
390 | |||
370 | /** | 391 | /** |
371 | * pcie_pme_suspend - Suspend PCIe PME service device. | 392 | * pcie_pme_suspend - Suspend PCIe PME service device. |
372 | * @srv: PCIe service device to suspend. | 393 | * @srv: PCIe service device to suspend. |
@@ -375,11 +396,26 @@ static int pcie_pme_suspend(struct pcie_device *srv) | |||
375 | { | 396 | { |
376 | struct pcie_pme_service_data *data = get_service_data(srv); | 397 | struct pcie_pme_service_data *data = get_service_data(srv); |
377 | struct pci_dev *port = srv->port; | 398 | struct pci_dev *port = srv->port; |
399 | bool wakeup; | ||
378 | 400 | ||
401 | if (device_may_wakeup(&port->dev)) { | ||
402 | wakeup = true; | ||
403 | } else { | ||
404 | down_read(&pci_bus_sem); | ||
405 | wakeup = pcie_pme_check_wakeup(port->subordinate); | ||
406 | up_read(&pci_bus_sem); | ||
407 | } | ||
379 | spin_lock_irq(&data->lock); | 408 | spin_lock_irq(&data->lock); |
380 | pcie_pme_interrupt_enable(port, false); | 409 | if (wakeup) { |
381 | pcie_clear_root_pme_status(port); | 410 | enable_irq_wake(srv->irq); |
382 | data->noirq = true; | 411 | data->suspend_level = PME_SUSPEND_WAKEUP; |
412 | } else { | ||
413 | struct pci_dev *port = srv->port; | ||
414 | |||
415 | pcie_pme_interrupt_enable(port, false); | ||
416 | pcie_clear_root_pme_status(port); | ||
417 | data->suspend_level = PME_SUSPEND_NOIRQ; | ||
418 | } | ||
383 | spin_unlock_irq(&data->lock); | 419 | spin_unlock_irq(&data->lock); |
384 | 420 | ||
385 | synchronize_irq(srv->irq); | 421 | synchronize_irq(srv->irq); |
@@ -394,12 +430,17 @@ static int pcie_pme_suspend(struct pcie_device *srv) | |||
394 | static int pcie_pme_resume(struct pcie_device *srv) | 430 | static int pcie_pme_resume(struct pcie_device *srv) |
395 | { | 431 | { |
396 | struct pcie_pme_service_data *data = get_service_data(srv); | 432 | struct pcie_pme_service_data *data = get_service_data(srv); |
397 | struct pci_dev *port = srv->port; | ||
398 | 433 | ||
399 | spin_lock_irq(&data->lock); | 434 | spin_lock_irq(&data->lock); |
400 | data->noirq = false; | 435 | if (data->suspend_level == PME_SUSPEND_NOIRQ) { |
401 | pcie_clear_root_pme_status(port); | 436 | struct pci_dev *port = srv->port; |
402 | pcie_pme_interrupt_enable(port, true); | 437 | |
438 | pcie_clear_root_pme_status(port); | ||
439 | pcie_pme_interrupt_enable(port, true); | ||
440 | } else { | ||
441 | disable_irq_wake(srv->irq); | ||
442 | } | ||
443 | data->suspend_level = PME_SUSPEND_NONE; | ||
403 | spin_unlock_irq(&data->lock); | 444 | spin_unlock_irq(&data->lock); |
404 | 445 | ||
405 | return 0; | 446 | return 0; |
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h index 698ad053d064..69517a24bc50 100644 --- a/include/linux/interrupt.h +++ b/include/linux/interrupt.h | |||
@@ -193,11 +193,6 @@ extern void irq_wake_thread(unsigned int irq, void *dev_id); | |||
193 | /* The following three functions are for the core kernel use only. */ | 193 | /* The following three functions are for the core kernel use only. */ |
194 | extern void suspend_device_irqs(void); | 194 | extern void suspend_device_irqs(void); |
195 | extern void resume_device_irqs(void); | 195 | extern void resume_device_irqs(void); |
196 | #ifdef CONFIG_PM_SLEEP | ||
197 | extern int check_wakeup_irqs(void); | ||
198 | #else | ||
199 | static inline int check_wakeup_irqs(void) { return 0; } | ||
200 | #endif | ||
201 | 196 | ||
202 | /** | 197 | /** |
203 | * struct irq_affinity_notify - context for notification of IRQ affinity changes | 198 | * struct irq_affinity_notify - context for notification of IRQ affinity changes |
diff --git a/include/linux/irq.h b/include/linux/irq.h index 62af59242ddc..03f48d936f66 100644 --- a/include/linux/irq.h +++ b/include/linux/irq.h | |||
@@ -173,6 +173,7 @@ struct irq_data { | |||
173 | * IRQD_IRQ_DISABLED - Disabled state of the interrupt | 173 | * IRQD_IRQ_DISABLED - Disabled state of the interrupt |
174 | * IRQD_IRQ_MASKED - Masked state of the interrupt | 174 | * IRQD_IRQ_MASKED - Masked state of the interrupt |
175 | * IRQD_IRQ_INPROGRESS - In progress state of the interrupt | 175 | * IRQD_IRQ_INPROGRESS - In progress state of the interrupt |
176 | * IRQD_WAKEUP_ARMED - Wakeup mode armed | ||
176 | */ | 177 | */ |
177 | enum { | 178 | enum { |
178 | IRQD_TRIGGER_MASK = 0xf, | 179 | IRQD_TRIGGER_MASK = 0xf, |
@@ -186,6 +187,7 @@ enum { | |||
186 | IRQD_IRQ_DISABLED = (1 << 16), | 187 | IRQD_IRQ_DISABLED = (1 << 16), |
187 | IRQD_IRQ_MASKED = (1 << 17), | 188 | IRQD_IRQ_MASKED = (1 << 17), |
188 | IRQD_IRQ_INPROGRESS = (1 << 18), | 189 | IRQD_IRQ_INPROGRESS = (1 << 18), |
190 | IRQD_WAKEUP_ARMED = (1 << 19), | ||
189 | }; | 191 | }; |
190 | 192 | ||
191 | static inline bool irqd_is_setaffinity_pending(struct irq_data *d) | 193 | static inline bool irqd_is_setaffinity_pending(struct irq_data *d) |
@@ -257,6 +259,12 @@ static inline bool irqd_irq_inprogress(struct irq_data *d) | |||
257 | return d->state_use_accessors & IRQD_IRQ_INPROGRESS; | 259 | return d->state_use_accessors & IRQD_IRQ_INPROGRESS; |
258 | } | 260 | } |
259 | 261 | ||
262 | static inline bool irqd_is_wakeup_armed(struct irq_data *d) | ||
263 | { | ||
264 | return d->state_use_accessors & IRQD_WAKEUP_ARMED; | ||
265 | } | ||
266 | |||
267 | |||
260 | /* | 268 | /* |
261 | * Functions for chained handlers which can be enabled/disabled by the | 269 | * Functions for chained handlers which can be enabled/disabled by the |
262 | * standard disable_irq/enable_irq calls. Must be called with | 270 | * standard disable_irq/enable_irq calls. Must be called with |
diff --git a/include/linux/irqdesc.h b/include/linux/irqdesc.h index 472c021a2d4f..cb1a31e448ae 100644 --- a/include/linux/irqdesc.h +++ b/include/linux/irqdesc.h | |||
@@ -36,6 +36,11 @@ struct irq_desc; | |||
36 | * @threads_oneshot: bitfield to handle shared oneshot threads | 36 | * @threads_oneshot: bitfield to handle shared oneshot threads |
37 | * @threads_active: number of irqaction threads currently running | 37 | * @threads_active: number of irqaction threads currently running |
38 | * @wait_for_threads: wait queue for sync_irq to wait for threaded handlers | 38 | * @wait_for_threads: wait queue for sync_irq to wait for threaded handlers |
39 | * @nr_actions: number of installed actions on this descriptor | ||
40 | * @no_suspend_depth: number of irqactions on a irq descriptor with | ||
41 | * IRQF_NO_SUSPEND set | ||
42 | * @force_resume_depth: number of irqactions on a irq descriptor with | ||
43 | * IRQF_FORCE_RESUME set | ||
39 | * @dir: /proc/irq/ procfs entry | 44 | * @dir: /proc/irq/ procfs entry |
40 | * @name: flow handler name for /proc/interrupts output | 45 | * @name: flow handler name for /proc/interrupts output |
41 | */ | 46 | */ |
@@ -68,6 +73,11 @@ struct irq_desc { | |||
68 | unsigned long threads_oneshot; | 73 | unsigned long threads_oneshot; |
69 | atomic_t threads_active; | 74 | atomic_t threads_active; |
70 | wait_queue_head_t wait_for_threads; | 75 | wait_queue_head_t wait_for_threads; |
76 | #ifdef CONFIG_PM_SLEEP | ||
77 | unsigned int nr_actions; | ||
78 | unsigned int no_suspend_depth; | ||
79 | unsigned int force_resume_depth; | ||
80 | #endif | ||
71 | #ifdef CONFIG_PROC_FS | 81 | #ifdef CONFIG_PROC_FS |
72 | struct proc_dir_entry *dir; | 82 | struct proc_dir_entry *dir; |
73 | #endif | 83 | #endif |
diff --git a/include/linux/suspend.h b/include/linux/suspend.h index 519064e0c943..06a9910827c2 100644 --- a/include/linux/suspend.h +++ b/include/linux/suspend.h | |||
@@ -371,6 +371,8 @@ extern int unregister_pm_notifier(struct notifier_block *nb); | |||
371 | extern bool events_check_enabled; | 371 | extern bool events_check_enabled; |
372 | 372 | ||
373 | extern bool pm_wakeup_pending(void); | 373 | extern bool pm_wakeup_pending(void); |
374 | extern void pm_system_wakeup(void); | ||
375 | extern void pm_wakeup_clear(void); | ||
374 | extern bool pm_get_wakeup_count(unsigned int *count, bool block); | 376 | extern bool pm_get_wakeup_count(unsigned int *count, bool block); |
375 | extern bool pm_save_wakeup_count(unsigned int count); | 377 | extern bool pm_save_wakeup_count(unsigned int count); |
376 | extern void pm_wakep_autosleep_enabled(bool set); | 378 | extern void pm_wakep_autosleep_enabled(bool set); |
@@ -418,6 +420,8 @@ static inline int unregister_pm_notifier(struct notifier_block *nb) | |||
418 | #define pm_notifier(fn, pri) do { (void)(fn); } while (0) | 420 | #define pm_notifier(fn, pri) do { (void)(fn); } while (0) |
419 | 421 | ||
420 | static inline bool pm_wakeup_pending(void) { return false; } | 422 | static inline bool pm_wakeup_pending(void) { return false; } |
423 | static inline void pm_system_wakeup(void) {} | ||
424 | static inline void pm_wakeup_clear(void) {} | ||
421 | 425 | ||
422 | static inline void lock_system_sleep(void) {} | 426 | static inline void lock_system_sleep(void) {} |
423 | static inline void unlock_system_sleep(void) {} | 427 | static inline void unlock_system_sleep(void) {} |
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c index 6223fab9a9d2..8fb52e9bddc1 100644 --- a/kernel/irq/chip.c +++ b/kernel/irq/chip.c | |||
@@ -342,6 +342,31 @@ static bool irq_check_poll(struct irq_desc *desc) | |||
342 | return irq_wait_for_poll(desc); | 342 | return irq_wait_for_poll(desc); |
343 | } | 343 | } |
344 | 344 | ||
345 | static bool irq_may_run(struct irq_desc *desc) | ||
346 | { | ||
347 | unsigned int mask = IRQD_IRQ_INPROGRESS | IRQD_WAKEUP_ARMED; | ||
348 | |||
349 | /* | ||
350 | * If the interrupt is not in progress and is not an armed | ||
351 | * wakeup interrupt, proceed. | ||
352 | */ | ||
353 | if (!irqd_has_set(&desc->irq_data, mask)) | ||
354 | return true; | ||
355 | |||
356 | /* | ||
357 | * If the interrupt is an armed wakeup source, mark it pending | ||
358 | * and suspended, disable it and notify the pm core about the | ||
359 | * event. | ||
360 | */ | ||
361 | if (irq_pm_check_wakeup(desc)) | ||
362 | return false; | ||
363 | |||
364 | /* | ||
365 | * Handle a potential concurrent poll on a different core. | ||
366 | */ | ||
367 | return irq_check_poll(desc); | ||
368 | } | ||
369 | |||
345 | /** | 370 | /** |
346 | * handle_simple_irq - Simple and software-decoded IRQs. | 371 | * handle_simple_irq - Simple and software-decoded IRQs. |
347 | * @irq: the interrupt number | 372 | * @irq: the interrupt number |
@@ -359,9 +384,8 @@ handle_simple_irq(unsigned int irq, struct irq_desc *desc) | |||
359 | { | 384 | { |
360 | raw_spin_lock(&desc->lock); | 385 | raw_spin_lock(&desc->lock); |
361 | 386 | ||
362 | if (unlikely(irqd_irq_inprogress(&desc->irq_data))) | 387 | if (!irq_may_run(desc)) |
363 | if (!irq_check_poll(desc)) | 388 | goto out_unlock; |
364 | goto out_unlock; | ||
365 | 389 | ||
366 | desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); | 390 | desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); |
367 | kstat_incr_irqs_this_cpu(irq, desc); | 391 | kstat_incr_irqs_this_cpu(irq, desc); |
@@ -412,9 +436,8 @@ handle_level_irq(unsigned int irq, struct irq_desc *desc) | |||
412 | raw_spin_lock(&desc->lock); | 436 | raw_spin_lock(&desc->lock); |
413 | mask_ack_irq(desc); | 437 | mask_ack_irq(desc); |
414 | 438 | ||
415 | if (unlikely(irqd_irq_inprogress(&desc->irq_data))) | 439 | if (!irq_may_run(desc)) |
416 | if (!irq_check_poll(desc)) | 440 | goto out_unlock; |
417 | goto out_unlock; | ||
418 | 441 | ||
419 | desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); | 442 | desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); |
420 | kstat_incr_irqs_this_cpu(irq, desc); | 443 | kstat_incr_irqs_this_cpu(irq, desc); |
@@ -485,9 +508,8 @@ handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc) | |||
485 | 508 | ||
486 | raw_spin_lock(&desc->lock); | 509 | raw_spin_lock(&desc->lock); |
487 | 510 | ||
488 | if (unlikely(irqd_irq_inprogress(&desc->irq_data))) | 511 | if (!irq_may_run(desc)) |
489 | if (!irq_check_poll(desc)) | 512 | goto out; |
490 | goto out; | ||
491 | 513 | ||
492 | desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); | 514 | desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); |
493 | kstat_incr_irqs_this_cpu(irq, desc); | 515 | kstat_incr_irqs_this_cpu(irq, desc); |
@@ -541,19 +563,23 @@ handle_edge_irq(unsigned int irq, struct irq_desc *desc) | |||
541 | raw_spin_lock(&desc->lock); | 563 | raw_spin_lock(&desc->lock); |
542 | 564 | ||
543 | desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); | 565 | desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); |
566 | |||
567 | if (!irq_may_run(desc)) { | ||
568 | desc->istate |= IRQS_PENDING; | ||
569 | mask_ack_irq(desc); | ||
570 | goto out_unlock; | ||
571 | } | ||
572 | |||
544 | /* | 573 | /* |
545 | * If we're currently running this IRQ, or its disabled, | 574 | * If its disabled or no action available then mask it and get |
546 | * we shouldn't process the IRQ. Mark it pending, handle | 575 | * out of here. |
547 | * the necessary masking and go out | ||
548 | */ | 576 | */ |
549 | if (unlikely(irqd_irq_disabled(&desc->irq_data) || | 577 | if (irqd_irq_disabled(&desc->irq_data) || !desc->action) { |
550 | irqd_irq_inprogress(&desc->irq_data) || !desc->action)) { | 578 | desc->istate |= IRQS_PENDING; |
551 | if (!irq_check_poll(desc)) { | 579 | mask_ack_irq(desc); |
552 | desc->istate |= IRQS_PENDING; | 580 | goto out_unlock; |
553 | mask_ack_irq(desc); | ||
554 | goto out_unlock; | ||
555 | } | ||
556 | } | 581 | } |
582 | |||
557 | kstat_incr_irqs_this_cpu(irq, desc); | 583 | kstat_incr_irqs_this_cpu(irq, desc); |
558 | 584 | ||
559 | /* Start handling the irq */ | 585 | /* Start handling the irq */ |
@@ -602,18 +628,21 @@ void handle_edge_eoi_irq(unsigned int irq, struct irq_desc *desc) | |||
602 | raw_spin_lock(&desc->lock); | 628 | raw_spin_lock(&desc->lock); |
603 | 629 | ||
604 | desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); | 630 | desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); |
631 | |||
632 | if (!irq_may_run(desc)) { | ||
633 | desc->istate |= IRQS_PENDING; | ||
634 | goto out_eoi; | ||
635 | } | ||
636 | |||
605 | /* | 637 | /* |
606 | * If we're currently running this IRQ, or its disabled, | 638 | * If its disabled or no action available then mask it and get |
607 | * we shouldn't process the IRQ. Mark it pending, handle | 639 | * out of here. |
608 | * the necessary masking and go out | ||
609 | */ | 640 | */ |
610 | if (unlikely(irqd_irq_disabled(&desc->irq_data) || | 641 | if (irqd_irq_disabled(&desc->irq_data) || !desc->action) { |
611 | irqd_irq_inprogress(&desc->irq_data) || !desc->action)) { | 642 | desc->istate |= IRQS_PENDING; |
612 | if (!irq_check_poll(desc)) { | 643 | goto out_eoi; |
613 | desc->istate |= IRQS_PENDING; | ||
614 | goto out_eoi; | ||
615 | } | ||
616 | } | 644 | } |
645 | |||
617 | kstat_incr_irqs_this_cpu(irq, desc); | 646 | kstat_incr_irqs_this_cpu(irq, desc); |
618 | 647 | ||
619 | do { | 648 | do { |
diff --git a/kernel/irq/internals.h b/kernel/irq/internals.h index 099ea2e0eb88..4332d766619d 100644 --- a/kernel/irq/internals.h +++ b/kernel/irq/internals.h | |||
@@ -63,8 +63,8 @@ enum { | |||
63 | 63 | ||
64 | extern int __irq_set_trigger(struct irq_desc *desc, unsigned int irq, | 64 | extern int __irq_set_trigger(struct irq_desc *desc, unsigned int irq, |
65 | unsigned long flags); | 65 | unsigned long flags); |
66 | extern void __disable_irq(struct irq_desc *desc, unsigned int irq, bool susp); | 66 | extern void __disable_irq(struct irq_desc *desc, unsigned int irq); |
67 | extern void __enable_irq(struct irq_desc *desc, unsigned int irq, bool resume); | 67 | extern void __enable_irq(struct irq_desc *desc, unsigned int irq); |
68 | 68 | ||
69 | extern int irq_startup(struct irq_desc *desc, bool resend); | 69 | extern int irq_startup(struct irq_desc *desc, bool resend); |
70 | extern void irq_shutdown(struct irq_desc *desc); | 70 | extern void irq_shutdown(struct irq_desc *desc); |
@@ -194,3 +194,15 @@ static inline void kstat_incr_irqs_this_cpu(unsigned int irq, struct irq_desc *d | |||
194 | __this_cpu_inc(*desc->kstat_irqs); | 194 | __this_cpu_inc(*desc->kstat_irqs); |
195 | __this_cpu_inc(kstat.irqs_sum); | 195 | __this_cpu_inc(kstat.irqs_sum); |
196 | } | 196 | } |
197 | |||
198 | #ifdef CONFIG_PM_SLEEP | ||
199 | bool irq_pm_check_wakeup(struct irq_desc *desc); | ||
200 | void irq_pm_install_action(struct irq_desc *desc, struct irqaction *action); | ||
201 | void irq_pm_remove_action(struct irq_desc *desc, struct irqaction *action); | ||
202 | #else | ||
203 | static inline bool irq_pm_check_wakeup(struct irq_desc *desc) { return false; } | ||
204 | static inline void | ||
205 | irq_pm_install_action(struct irq_desc *desc, struct irqaction *action) { } | ||
206 | static inline void | ||
207 | irq_pm_remove_action(struct irq_desc *desc, struct irqaction *action) { } | ||
208 | #endif | ||
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index 3dc6a61bf06a..0a9104b4608b 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c | |||
@@ -382,14 +382,8 @@ setup_affinity(unsigned int irq, struct irq_desc *desc, struct cpumask *mask) | |||
382 | } | 382 | } |
383 | #endif | 383 | #endif |
384 | 384 | ||
385 | void __disable_irq(struct irq_desc *desc, unsigned int irq, bool suspend) | 385 | void __disable_irq(struct irq_desc *desc, unsigned int irq) |
386 | { | 386 | { |
387 | if (suspend) { | ||
388 | if (!desc->action || (desc->action->flags & IRQF_NO_SUSPEND)) | ||
389 | return; | ||
390 | desc->istate |= IRQS_SUSPENDED; | ||
391 | } | ||
392 | |||
393 | if (!desc->depth++) | 387 | if (!desc->depth++) |
394 | irq_disable(desc); | 388 | irq_disable(desc); |
395 | } | 389 | } |
@@ -401,7 +395,7 @@ static int __disable_irq_nosync(unsigned int irq) | |||
401 | 395 | ||
402 | if (!desc) | 396 | if (!desc) |
403 | return -EINVAL; | 397 | return -EINVAL; |
404 | __disable_irq(desc, irq, false); | 398 | __disable_irq(desc, irq); |
405 | irq_put_desc_busunlock(desc, flags); | 399 | irq_put_desc_busunlock(desc, flags); |
406 | return 0; | 400 | return 0; |
407 | } | 401 | } |
@@ -442,20 +436,8 @@ void disable_irq(unsigned int irq) | |||
442 | } | 436 | } |
443 | EXPORT_SYMBOL(disable_irq); | 437 | EXPORT_SYMBOL(disable_irq); |
444 | 438 | ||
445 | void __enable_irq(struct irq_desc *desc, unsigned int irq, bool resume) | 439 | void __enable_irq(struct irq_desc *desc, unsigned int irq) |
446 | { | 440 | { |
447 | if (resume) { | ||
448 | if (!(desc->istate & IRQS_SUSPENDED)) { | ||
449 | if (!desc->action) | ||
450 | return; | ||
451 | if (!(desc->action->flags & IRQF_FORCE_RESUME)) | ||
452 | return; | ||
453 | /* Pretend that it got disabled ! */ | ||
454 | desc->depth++; | ||
455 | } | ||
456 | desc->istate &= ~IRQS_SUSPENDED; | ||
457 | } | ||
458 | |||
459 | switch (desc->depth) { | 441 | switch (desc->depth) { |
460 | case 0: | 442 | case 0: |
461 | err_out: | 443 | err_out: |
@@ -497,7 +479,7 @@ void enable_irq(unsigned int irq) | |||
497 | KERN_ERR "enable_irq before setup/request_irq: irq %u\n", irq)) | 479 | KERN_ERR "enable_irq before setup/request_irq: irq %u\n", irq)) |
498 | goto out; | 480 | goto out; |
499 | 481 | ||
500 | __enable_irq(desc, irq, false); | 482 | __enable_irq(desc, irq); |
501 | out: | 483 | out: |
502 | irq_put_desc_busunlock(desc, flags); | 484 | irq_put_desc_busunlock(desc, flags); |
503 | } | 485 | } |
@@ -1218,6 +1200,8 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new) | |||
1218 | new->irq = irq; | 1200 | new->irq = irq; |
1219 | *old_ptr = new; | 1201 | *old_ptr = new; |
1220 | 1202 | ||
1203 | irq_pm_install_action(desc, new); | ||
1204 | |||
1221 | /* Reset broken irq detection when installing new handler */ | 1205 | /* Reset broken irq detection when installing new handler */ |
1222 | desc->irq_count = 0; | 1206 | desc->irq_count = 0; |
1223 | desc->irqs_unhandled = 0; | 1207 | desc->irqs_unhandled = 0; |
@@ -1228,7 +1212,7 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new) | |||
1228 | */ | 1212 | */ |
1229 | if (shared && (desc->istate & IRQS_SPURIOUS_DISABLED)) { | 1213 | if (shared && (desc->istate & IRQS_SPURIOUS_DISABLED)) { |
1230 | desc->istate &= ~IRQS_SPURIOUS_DISABLED; | 1214 | desc->istate &= ~IRQS_SPURIOUS_DISABLED; |
1231 | __enable_irq(desc, irq, false); | 1215 | __enable_irq(desc, irq); |
1232 | } | 1216 | } |
1233 | 1217 | ||
1234 | raw_spin_unlock_irqrestore(&desc->lock, flags); | 1218 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
@@ -1336,6 +1320,8 @@ static struct irqaction *__free_irq(unsigned int irq, void *dev_id) | |||
1336 | /* Found it - now remove it from the list of entries: */ | 1320 | /* Found it - now remove it from the list of entries: */ |
1337 | *action_ptr = action->next; | 1321 | *action_ptr = action->next; |
1338 | 1322 | ||
1323 | irq_pm_remove_action(desc, action); | ||
1324 | |||
1339 | /* If this was the last handler, shut down the IRQ line: */ | 1325 | /* If this was the last handler, shut down the IRQ line: */ |
1340 | if (!desc->action) { | 1326 | if (!desc->action) { |
1341 | irq_shutdown(desc); | 1327 | irq_shutdown(desc); |
diff --git a/kernel/irq/pm.c b/kernel/irq/pm.c index abcd6ca86cb7..3ca532592704 100644 --- a/kernel/irq/pm.c +++ b/kernel/irq/pm.c | |||
@@ -9,17 +9,105 @@ | |||
9 | #include <linux/irq.h> | 9 | #include <linux/irq.h> |
10 | #include <linux/module.h> | 10 | #include <linux/module.h> |
11 | #include <linux/interrupt.h> | 11 | #include <linux/interrupt.h> |
12 | #include <linux/suspend.h> | ||
12 | #include <linux/syscore_ops.h> | 13 | #include <linux/syscore_ops.h> |
13 | 14 | ||
14 | #include "internals.h" | 15 | #include "internals.h" |
15 | 16 | ||
17 | bool irq_pm_check_wakeup(struct irq_desc *desc) | ||
18 | { | ||
19 | if (irqd_is_wakeup_armed(&desc->irq_data)) { | ||
20 | irqd_clear(&desc->irq_data, IRQD_WAKEUP_ARMED); | ||
21 | desc->istate |= IRQS_SUSPENDED | IRQS_PENDING; | ||
22 | desc->depth++; | ||
23 | irq_disable(desc); | ||
24 | pm_system_wakeup(); | ||
25 | return true; | ||
26 | } | ||
27 | return false; | ||
28 | } | ||
29 | |||
30 | /* | ||
31 | * Called from __setup_irq() with desc->lock held after @action has | ||
32 | * been installed in the action chain. | ||
33 | */ | ||
34 | void irq_pm_install_action(struct irq_desc *desc, struct irqaction *action) | ||
35 | { | ||
36 | desc->nr_actions++; | ||
37 | |||
38 | if (action->flags & IRQF_FORCE_RESUME) | ||
39 | desc->force_resume_depth++; | ||
40 | |||
41 | WARN_ON_ONCE(desc->force_resume_depth && | ||
42 | desc->force_resume_depth != desc->nr_actions); | ||
43 | |||
44 | if (action->flags & IRQF_NO_SUSPEND) | ||
45 | desc->no_suspend_depth++; | ||
46 | |||
47 | WARN_ON_ONCE(desc->no_suspend_depth && | ||
48 | desc->no_suspend_depth != desc->nr_actions); | ||
49 | } | ||
50 | |||
51 | /* | ||
52 | * Called from __free_irq() with desc->lock held after @action has | ||
53 | * been removed from the action chain. | ||
54 | */ | ||
55 | void irq_pm_remove_action(struct irq_desc *desc, struct irqaction *action) | ||
56 | { | ||
57 | desc->nr_actions--; | ||
58 | |||
59 | if (action->flags & IRQF_FORCE_RESUME) | ||
60 | desc->force_resume_depth--; | ||
61 | |||
62 | if (action->flags & IRQF_NO_SUSPEND) | ||
63 | desc->no_suspend_depth--; | ||
64 | } | ||
65 | |||
66 | static bool suspend_device_irq(struct irq_desc *desc, int irq) | ||
67 | { | ||
68 | if (!desc->action || desc->no_suspend_depth) | ||
69 | return false; | ||
70 | |||
71 | if (irqd_is_wakeup_set(&desc->irq_data)) { | ||
72 | irqd_set(&desc->irq_data, IRQD_WAKEUP_ARMED); | ||
73 | /* | ||
74 | * We return true here to force the caller to issue | ||
75 | * synchronize_irq(). We need to make sure that the | ||
76 | * IRQD_WAKEUP_ARMED is visible before we return from | ||
77 | * suspend_device_irqs(). | ||
78 | */ | ||
79 | return true; | ||
80 | } | ||
81 | |||
82 | desc->istate |= IRQS_SUSPENDED; | ||
83 | __disable_irq(desc, irq); | ||
84 | |||
85 | /* | ||
86 | * Hardware which has no wakeup source configuration facility | ||
87 | * requires that the non wakeup interrupts are masked at the | ||
88 | * chip level. The chip implementation indicates that with | ||
89 | * IRQCHIP_MASK_ON_SUSPEND. | ||
90 | */ | ||
91 | if (irq_desc_get_chip(desc)->flags & IRQCHIP_MASK_ON_SUSPEND) | ||
92 | mask_irq(desc); | ||
93 | return true; | ||
94 | } | ||
95 | |||
16 | /** | 96 | /** |
17 | * suspend_device_irqs - disable all currently enabled interrupt lines | 97 | * suspend_device_irqs - disable all currently enabled interrupt lines |
18 | * | 98 | * |
19 | * During system-wide suspend or hibernation device drivers need to be prevented | 99 | * During system-wide suspend or hibernation device drivers need to be |
20 | * from receiving interrupts and this function is provided for this purpose. | 100 | * prevented from receiving interrupts and this function is provided |
21 | * It marks all interrupt lines in use, except for the timer ones, as disabled | 101 | * for this purpose. |
22 | * and sets the IRQS_SUSPENDED flag for each of them. | 102 | * |
103 | * So we disable all interrupts and mark them IRQS_SUSPENDED except | ||
104 | * for those which are unused, those which are marked as not | ||
105 | * suspendable via an interrupt request with the flag IRQF_NO_SUSPEND | ||
106 | * set and those which are marked as active wakeup sources. | ||
107 | * | ||
108 | * The active wakeup sources are handled by the flow handler entry | ||
109 | * code which checks for the IRQD_WAKEUP_ARMED flag, suspends the | ||
110 | * interrupt and notifies the pm core about the wakeup. | ||
23 | */ | 111 | */ |
24 | void suspend_device_irqs(void) | 112 | void suspend_device_irqs(void) |
25 | { | 113 | { |
@@ -28,18 +116,36 @@ void suspend_device_irqs(void) | |||
28 | 116 | ||
29 | for_each_irq_desc(irq, desc) { | 117 | for_each_irq_desc(irq, desc) { |
30 | unsigned long flags; | 118 | unsigned long flags; |
119 | bool sync; | ||
31 | 120 | ||
32 | raw_spin_lock_irqsave(&desc->lock, flags); | 121 | raw_spin_lock_irqsave(&desc->lock, flags); |
33 | __disable_irq(desc, irq, true); | 122 | sync = suspend_device_irq(desc, irq); |
34 | raw_spin_unlock_irqrestore(&desc->lock, flags); | 123 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
35 | } | ||
36 | 124 | ||
37 | for_each_irq_desc(irq, desc) | 125 | if (sync) |
38 | if (desc->istate & IRQS_SUSPENDED) | ||
39 | synchronize_irq(irq); | 126 | synchronize_irq(irq); |
127 | } | ||
40 | } | 128 | } |
41 | EXPORT_SYMBOL_GPL(suspend_device_irqs); | 129 | EXPORT_SYMBOL_GPL(suspend_device_irqs); |
42 | 130 | ||
131 | static void resume_irq(struct irq_desc *desc, int irq) | ||
132 | { | ||
133 | irqd_clear(&desc->irq_data, IRQD_WAKEUP_ARMED); | ||
134 | |||
135 | if (desc->istate & IRQS_SUSPENDED) | ||
136 | goto resume; | ||
137 | |||
138 | /* Force resume the interrupt? */ | ||
139 | if (!desc->force_resume_depth) | ||
140 | return; | ||
141 | |||
142 | /* Pretend that it got disabled ! */ | ||
143 | desc->depth++; | ||
144 | resume: | ||
145 | desc->istate &= ~IRQS_SUSPENDED; | ||
146 | __enable_irq(desc, irq); | ||
147 | } | ||
148 | |||
43 | static void resume_irqs(bool want_early) | 149 | static void resume_irqs(bool want_early) |
44 | { | 150 | { |
45 | struct irq_desc *desc; | 151 | struct irq_desc *desc; |
@@ -54,7 +160,7 @@ static void resume_irqs(bool want_early) | |||
54 | continue; | 160 | continue; |
55 | 161 | ||
56 | raw_spin_lock_irqsave(&desc->lock, flags); | 162 | raw_spin_lock_irqsave(&desc->lock, flags); |
57 | __enable_irq(desc, irq, true); | 163 | resume_irq(desc, irq); |
58 | raw_spin_unlock_irqrestore(&desc->lock, flags); | 164 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
59 | } | 165 | } |
60 | } | 166 | } |
@@ -93,38 +199,3 @@ void resume_device_irqs(void) | |||
93 | resume_irqs(false); | 199 | resume_irqs(false); |
94 | } | 200 | } |
95 | EXPORT_SYMBOL_GPL(resume_device_irqs); | 201 | EXPORT_SYMBOL_GPL(resume_device_irqs); |
96 | |||
97 | /** | ||
98 | * check_wakeup_irqs - check if any wake-up interrupts are pending | ||
99 | */ | ||
100 | int check_wakeup_irqs(void) | ||
101 | { | ||
102 | struct irq_desc *desc; | ||
103 | int irq; | ||
104 | |||
105 | for_each_irq_desc(irq, desc) { | ||
106 | /* | ||
107 | * Only interrupts which are marked as wakeup source | ||
108 | * and have not been disabled before the suspend check | ||
109 | * can abort suspend. | ||
110 | */ | ||
111 | if (irqd_is_wakeup_set(&desc->irq_data)) { | ||
112 | if (desc->depth == 1 && desc->istate & IRQS_PENDING) | ||
113 | return -EBUSY; | ||
114 | continue; | ||
115 | } | ||
116 | /* | ||
117 | * Check the non wakeup interrupts whether they need | ||
118 | * to be masked before finally going into suspend | ||
119 | * state. That's for hardware which has no wakeup | ||
120 | * source configuration facility. The chip | ||
121 | * implementation indicates that with | ||
122 | * IRQCHIP_MASK_ON_SUSPEND. | ||
123 | */ | ||
124 | if (desc->istate & IRQS_SUSPENDED && | ||
125 | irq_desc_get_chip(desc)->flags & IRQCHIP_MASK_ON_SUSPEND) | ||
126 | mask_irq(desc); | ||
127 | } | ||
128 | |||
129 | return 0; | ||
130 | } | ||
diff --git a/kernel/power/process.c b/kernel/power/process.c index 4ee194eb524b..7b323221b9ee 100644 --- a/kernel/power/process.c +++ b/kernel/power/process.c | |||
@@ -129,6 +129,7 @@ int freeze_processes(void) | |||
129 | if (!pm_freezing) | 129 | if (!pm_freezing) |
130 | atomic_inc(&system_freezing_cnt); | 130 | atomic_inc(&system_freezing_cnt); |
131 | 131 | ||
132 | pm_wakeup_clear(); | ||
132 | printk("Freezing user space processes ... "); | 133 | printk("Freezing user space processes ... "); |
133 | pm_freezing = true; | 134 | pm_freezing = true; |
134 | error = try_to_freeze_tasks(true); | 135 | error = try_to_freeze_tasks(true); |