aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-10-09 16:07:43 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2014-10-09 16:07:43 -0400
commitb528392669415dc1e53a047215e5ad6c2de879fc (patch)
treed19aa6e1464ef7c7d9f399ac8ec9a7707e5ba6b4 /kernel
parent80213c03c4151d900cf293ef0fc51f8d88495e14 (diff)
parent9f1a053296953c69d7f23511db9441290cb89e2c (diff)
Merge tag 'pm+acpi-3.18-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm
Pull ACPI and power management updates from Rafael Wysocki: "Features-wise, to me the most important this time is a rework of wakeup interrupts handling in the core that makes them work consistently across all of the available sleep states, including suspend-to-idle. Many thanks to Thomas Gleixner for his help with this work. Second is an update of the generic PM domains code that has been in need of some care for quite a while. Unused code is being removed, DT support is being added and domains are now going to be attached to devices in bus type code in analogy with the ACPI PM domain. The majority of work here was done by Ulf Hansson who also has been the most active developer this time. Apart from this we have a traditional ACPICA update, this time to upstream version 20140828 and a few ACPI wakeup interrupts handling patches on top of the general rework mentioned above. There also are several cpufreq commits including renaming the cpufreq-cpu0 driver to cpufreq-dt, as this is what implements generic DT-based cpufreq support, and a new DT-based idle states infrastructure for cpuidle. In addition to that, the ACPI LPSS driver is updated, ACPI support for Apple machines is improved, a few bugs are fixed and a few cleanups are made all over. Finally, the Adaptive Voltage Scaling (AVS) subsystem now has a tree maintained by Kevin Hilman that will be merged through the PM tree. Numbers-wise, the generic PM domains update takes the lead this time with 32 non-merge commits, second is cpufreq (15 commits) and the 3rd place goes to the wakeup interrupts handling rework (13 commits). Specifics: - Rework the handling of wakeup IRQs by the IRQ core such that all of them will be switched over to "wakeup" mode in suspend_device_irqs() and in that mode the first interrupt will abort system suspend in progress or wake up the system if already in suspend-to-idle (or equivalent) without executing any interrupt handlers. Among other things that eliminates the wakeup-related motivation to use the IRQF_NO_SUSPEND interrupt flag with interrupts which don't really need it and should not use it (Thomas Gleixner and Rafael Wysocki) - Switch over ACPI to handling wakeup interrupts with the help of the new mechanism introduced by the above IRQ core rework (Rafael Wysocki) - Rework the core generic PM domains code to eliminate code that's not used, add DT support and add a generic mechanism by which devices can be added to PM domains automatically during enumeration (Ulf Hansson, Geert Uytterhoeven and Tomasz Figa). - Add debugfs-based mechanics for debugging generic PM domains (Maciej Matraszek). - ACPICA update to upstream version 20140828. Included are updates related to the SRAT and GTDT tables and the _PSx methods are in the METHOD_NAME list now (Bob Moore and Hanjun Guo). - Add _OSI("Darwin") support to the ACPI core (unfortunately, that can't really be done in a straightforward way) to prevent Thunderbolt from being turned off on Apple systems after boot (or after resume from system suspend) and rework the ACPI Smart Battery Subsystem (SBS) driver to work correctly with Apple platforms (Matthew Garrett and Andreas Noever). - ACPI LPSS (Low-Power Subsystem) driver update cleaning up the code, adding support for 133MHz I2C source clock on Intel Baytrail to it and making it avoid using UART RTS override with Auto Flow Control (Heikki Krogerus). - ACPI backlight updates removing the video_set_use_native_backlight quirk which is not necessary any more, making the code check the list of output devices returned by the _DOD method to avoid creating acpi_video interfaces that won't work and adding a quirk for Lenovo Ideapad Z570 (Hans de Goede, Aaron Lu and Stepan Bujnak) - New Win8 ACPI OSI quirks for some Dell laptops (Edward Lin) - Assorted ACPI code cleanups (Fabian Frederick, Rasmus Villemoes, Sudip Mukherjee, Yijing Wang, and Zhang Rui) - cpufreq core updates and cleanups (Viresh Kumar, Preeti U Murthy, Rasmus Villemoes) - cpufreq driver updates: cpufreq-cpu0/cpufreq-dt (driver name change among other things), ppc-corenet, powernv (Viresh Kumar, Preeti U Murthy, Shilpasri G Bhat, Lucas Stach) - cpuidle support for DT-based idle states infrastructure, new ARM64 cpuidle driver, cpuidle core cleanups (Lorenzo Pieralisi, Rasmus Villemoes) - ARM big.LITTLE cpuidle driver updates: support for DT-based initialization and Exynos5800 compatible string (Lorenzo Pieralisi, Kevin Hilman) - Rework of the test_suspend kernel command line argument and a new trace event for console resume (Srinivas Pandruvada, Todd E Brandt) - Second attempt to optimize swsusp_free() (hibernation core) to make it avoid going through all PFNs which may be way too slow on some systems (Joerg Roedel) - devfreq updates (Paul Bolle, Punit Agrawal, Ãrjan Eide). - rockchip-io Adaptive Voltage Scaling (AVS) driver and AVS entry update in MAINTAINERS (Heiko Stübner, Kevin Hilman) - PM core fix related to clock management (Geert Uytterhoeven) - PM core's sysfs code cleanup (Johannes Berg)" * tag 'pm+acpi-3.18-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm: (105 commits) ACPI / fan: printk replacement PM / clk: Fix crash in clocks management code if !CONFIG_PM_RUNTIME PM / Domains: Rename cpu_data to cpuidle_data cpufreq: cpufreq-dt: fix potential double put of cpu OF node cpufreq: cpu0: rename driver and internals to 'cpufreq_dt' PM / hibernate: Iterate over set bits instead of PFNs in swsusp_free() cpufreq: ppc-corenet: remove duplicate update of cpu_data ACPI / sleep: Rework the handling of ACPI GPE wakeup from suspend-to-idle PM / sleep: Rename platform suspend/resume functions in suspend.c PM / sleep: Export dpm_suspend_late/noirq() and dpm_resume_early/noirq() ACPICA: Introduce acpi_enable_all_wakeup_gpes() ACPICA: Clear all non-wakeup GPEs in acpi_hw_enable_wakeup_gpe_block() ACPI / video: check _DOD list when creating backlight devices PM / Domains: Move dev_pm_domain_attach|detach() to pm_domain.h cpufreq: Replace strnicmp with strncasecmp cpufreq: powernv: Set the cpus to nominal frequency during reboot/kexec cpufreq: powernv: Set the pstate of the last hotplugged out cpu in policy->cpus to minimum cpufreq: Allow stop CPU callback to be used by all cpufreq drivers PM / devfreq: exynos: Enable building exynos PPMU as module PM / devfreq: Export helper functions for drivers ...
Diffstat (limited to 'kernel')
-rw-r--r--kernel/irq/chip.c85
-rw-r--r--kernel/irq/internals.h16
-rw-r--r--kernel/irq/manage.c32
-rw-r--r--kernel/irq/pm.c159
-rw-r--r--kernel/power/Kconfig4
-rw-r--r--kernel/power/process.c1
-rw-r--r--kernel/power/snapshot.c54
-rw-r--r--kernel/power/suspend.c51
-rw-r--r--kernel/power/suspend_test.c32
9 files changed, 308 insertions, 126 deletions
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c
index 6223fab9a9d2..8fb52e9bddc1 100644
--- a/kernel/irq/chip.c
+++ b/kernel/irq/chip.c
@@ -342,6 +342,31 @@ static bool irq_check_poll(struct irq_desc *desc)
342 return irq_wait_for_poll(desc); 342 return irq_wait_for_poll(desc);
343} 343}
344 344
345static bool irq_may_run(struct irq_desc *desc)
346{
347 unsigned int mask = IRQD_IRQ_INPROGRESS | IRQD_WAKEUP_ARMED;
348
349 /*
350 * If the interrupt is not in progress and is not an armed
351 * wakeup interrupt, proceed.
352 */
353 if (!irqd_has_set(&desc->irq_data, mask))
354 return true;
355
356 /*
357 * If the interrupt is an armed wakeup source, mark it pending
358 * and suspended, disable it and notify the pm core about the
359 * event.
360 */
361 if (irq_pm_check_wakeup(desc))
362 return false;
363
364 /*
365 * Handle a potential concurrent poll on a different core.
366 */
367 return irq_check_poll(desc);
368}
369
345/** 370/**
346 * handle_simple_irq - Simple and software-decoded IRQs. 371 * handle_simple_irq - Simple and software-decoded IRQs.
347 * @irq: the interrupt number 372 * @irq: the interrupt number
@@ -359,9 +384,8 @@ handle_simple_irq(unsigned int irq, struct irq_desc *desc)
359{ 384{
360 raw_spin_lock(&desc->lock); 385 raw_spin_lock(&desc->lock);
361 386
362 if (unlikely(irqd_irq_inprogress(&desc->irq_data))) 387 if (!irq_may_run(desc))
363 if (!irq_check_poll(desc)) 388 goto out_unlock;
364 goto out_unlock;
365 389
366 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); 390 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
367 kstat_incr_irqs_this_cpu(irq, desc); 391 kstat_incr_irqs_this_cpu(irq, desc);
@@ -412,9 +436,8 @@ handle_level_irq(unsigned int irq, struct irq_desc *desc)
412 raw_spin_lock(&desc->lock); 436 raw_spin_lock(&desc->lock);
413 mask_ack_irq(desc); 437 mask_ack_irq(desc);
414 438
415 if (unlikely(irqd_irq_inprogress(&desc->irq_data))) 439 if (!irq_may_run(desc))
416 if (!irq_check_poll(desc)) 440 goto out_unlock;
417 goto out_unlock;
418 441
419 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); 442 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
420 kstat_incr_irqs_this_cpu(irq, desc); 443 kstat_incr_irqs_this_cpu(irq, desc);
@@ -485,9 +508,8 @@ handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc)
485 508
486 raw_spin_lock(&desc->lock); 509 raw_spin_lock(&desc->lock);
487 510
488 if (unlikely(irqd_irq_inprogress(&desc->irq_data))) 511 if (!irq_may_run(desc))
489 if (!irq_check_poll(desc)) 512 goto out;
490 goto out;
491 513
492 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); 514 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
493 kstat_incr_irqs_this_cpu(irq, desc); 515 kstat_incr_irqs_this_cpu(irq, desc);
@@ -541,19 +563,23 @@ handle_edge_irq(unsigned int irq, struct irq_desc *desc)
541 raw_spin_lock(&desc->lock); 563 raw_spin_lock(&desc->lock);
542 564
543 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); 565 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
566
567 if (!irq_may_run(desc)) {
568 desc->istate |= IRQS_PENDING;
569 mask_ack_irq(desc);
570 goto out_unlock;
571 }
572
544 /* 573 /*
545 * If we're currently running this IRQ, or its disabled, 574 * If its disabled or no action available then mask it and get
546 * we shouldn't process the IRQ. Mark it pending, handle 575 * out of here.
547 * the necessary masking and go out
548 */ 576 */
549 if (unlikely(irqd_irq_disabled(&desc->irq_data) || 577 if (irqd_irq_disabled(&desc->irq_data) || !desc->action) {
550 irqd_irq_inprogress(&desc->irq_data) || !desc->action)) { 578 desc->istate |= IRQS_PENDING;
551 if (!irq_check_poll(desc)) { 579 mask_ack_irq(desc);
552 desc->istate |= IRQS_PENDING; 580 goto out_unlock;
553 mask_ack_irq(desc);
554 goto out_unlock;
555 }
556 } 581 }
582
557 kstat_incr_irqs_this_cpu(irq, desc); 583 kstat_incr_irqs_this_cpu(irq, desc);
558 584
559 /* Start handling the irq */ 585 /* Start handling the irq */
@@ -602,18 +628,21 @@ void handle_edge_eoi_irq(unsigned int irq, struct irq_desc *desc)
602 raw_spin_lock(&desc->lock); 628 raw_spin_lock(&desc->lock);
603 629
604 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); 630 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
631
632 if (!irq_may_run(desc)) {
633 desc->istate |= IRQS_PENDING;
634 goto out_eoi;
635 }
636
605 /* 637 /*
606 * If we're currently running this IRQ, or its disabled, 638 * If its disabled or no action available then mask it and get
607 * we shouldn't process the IRQ. Mark it pending, handle 639 * out of here.
608 * the necessary masking and go out
609 */ 640 */
610 if (unlikely(irqd_irq_disabled(&desc->irq_data) || 641 if (irqd_irq_disabled(&desc->irq_data) || !desc->action) {
611 irqd_irq_inprogress(&desc->irq_data) || !desc->action)) { 642 desc->istate |= IRQS_PENDING;
612 if (!irq_check_poll(desc)) { 643 goto out_eoi;
613 desc->istate |= IRQS_PENDING;
614 goto out_eoi;
615 }
616 } 644 }
645
617 kstat_incr_irqs_this_cpu(irq, desc); 646 kstat_incr_irqs_this_cpu(irq, desc);
618 647
619 do { 648 do {
diff --git a/kernel/irq/internals.h b/kernel/irq/internals.h
index 099ea2e0eb88..4332d766619d 100644
--- a/kernel/irq/internals.h
+++ b/kernel/irq/internals.h
@@ -63,8 +63,8 @@ enum {
63 63
64extern int __irq_set_trigger(struct irq_desc *desc, unsigned int irq, 64extern int __irq_set_trigger(struct irq_desc *desc, unsigned int irq,
65 unsigned long flags); 65 unsigned long flags);
66extern void __disable_irq(struct irq_desc *desc, unsigned int irq, bool susp); 66extern void __disable_irq(struct irq_desc *desc, unsigned int irq);
67extern void __enable_irq(struct irq_desc *desc, unsigned int irq, bool resume); 67extern void __enable_irq(struct irq_desc *desc, unsigned int irq);
68 68
69extern int irq_startup(struct irq_desc *desc, bool resend); 69extern int irq_startup(struct irq_desc *desc, bool resend);
70extern void irq_shutdown(struct irq_desc *desc); 70extern void irq_shutdown(struct irq_desc *desc);
@@ -194,3 +194,15 @@ static inline void kstat_incr_irqs_this_cpu(unsigned int irq, struct irq_desc *d
194 __this_cpu_inc(*desc->kstat_irqs); 194 __this_cpu_inc(*desc->kstat_irqs);
195 __this_cpu_inc(kstat.irqs_sum); 195 __this_cpu_inc(kstat.irqs_sum);
196} 196}
197
198#ifdef CONFIG_PM_SLEEP
199bool irq_pm_check_wakeup(struct irq_desc *desc);
200void irq_pm_install_action(struct irq_desc *desc, struct irqaction *action);
201void irq_pm_remove_action(struct irq_desc *desc, struct irqaction *action);
202#else
203static inline bool irq_pm_check_wakeup(struct irq_desc *desc) { return false; }
204static inline void
205irq_pm_install_action(struct irq_desc *desc, struct irqaction *action) { }
206static inline void
207irq_pm_remove_action(struct irq_desc *desc, struct irqaction *action) { }
208#endif
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index 3dc6a61bf06a..0a9104b4608b 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -382,14 +382,8 @@ setup_affinity(unsigned int irq, struct irq_desc *desc, struct cpumask *mask)
382} 382}
383#endif 383#endif
384 384
385void __disable_irq(struct irq_desc *desc, unsigned int irq, bool suspend) 385void __disable_irq(struct irq_desc *desc, unsigned int irq)
386{ 386{
387 if (suspend) {
388 if (!desc->action || (desc->action->flags & IRQF_NO_SUSPEND))
389 return;
390 desc->istate |= IRQS_SUSPENDED;
391 }
392
393 if (!desc->depth++) 387 if (!desc->depth++)
394 irq_disable(desc); 388 irq_disable(desc);
395} 389}
@@ -401,7 +395,7 @@ static int __disable_irq_nosync(unsigned int irq)
401 395
402 if (!desc) 396 if (!desc)
403 return -EINVAL; 397 return -EINVAL;
404 __disable_irq(desc, irq, false); 398 __disable_irq(desc, irq);
405 irq_put_desc_busunlock(desc, flags); 399 irq_put_desc_busunlock(desc, flags);
406 return 0; 400 return 0;
407} 401}
@@ -442,20 +436,8 @@ void disable_irq(unsigned int irq)
442} 436}
443EXPORT_SYMBOL(disable_irq); 437EXPORT_SYMBOL(disable_irq);
444 438
445void __enable_irq(struct irq_desc *desc, unsigned int irq, bool resume) 439void __enable_irq(struct irq_desc *desc, unsigned int irq)
446{ 440{
447 if (resume) {
448 if (!(desc->istate & IRQS_SUSPENDED)) {
449 if (!desc->action)
450 return;
451 if (!(desc->action->flags & IRQF_FORCE_RESUME))
452 return;
453 /* Pretend that it got disabled ! */
454 desc->depth++;
455 }
456 desc->istate &= ~IRQS_SUSPENDED;
457 }
458
459 switch (desc->depth) { 441 switch (desc->depth) {
460 case 0: 442 case 0:
461 err_out: 443 err_out:
@@ -497,7 +479,7 @@ void enable_irq(unsigned int irq)
497 KERN_ERR "enable_irq before setup/request_irq: irq %u\n", irq)) 479 KERN_ERR "enable_irq before setup/request_irq: irq %u\n", irq))
498 goto out; 480 goto out;
499 481
500 __enable_irq(desc, irq, false); 482 __enable_irq(desc, irq);
501out: 483out:
502 irq_put_desc_busunlock(desc, flags); 484 irq_put_desc_busunlock(desc, flags);
503} 485}
@@ -1218,6 +1200,8 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
1218 new->irq = irq; 1200 new->irq = irq;
1219 *old_ptr = new; 1201 *old_ptr = new;
1220 1202
1203 irq_pm_install_action(desc, new);
1204
1221 /* Reset broken irq detection when installing new handler */ 1205 /* Reset broken irq detection when installing new handler */
1222 desc->irq_count = 0; 1206 desc->irq_count = 0;
1223 desc->irqs_unhandled = 0; 1207 desc->irqs_unhandled = 0;
@@ -1228,7 +1212,7 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
1228 */ 1212 */
1229 if (shared && (desc->istate & IRQS_SPURIOUS_DISABLED)) { 1213 if (shared && (desc->istate & IRQS_SPURIOUS_DISABLED)) {
1230 desc->istate &= ~IRQS_SPURIOUS_DISABLED; 1214 desc->istate &= ~IRQS_SPURIOUS_DISABLED;
1231 __enable_irq(desc, irq, false); 1215 __enable_irq(desc, irq);
1232 } 1216 }
1233 1217
1234 raw_spin_unlock_irqrestore(&desc->lock, flags); 1218 raw_spin_unlock_irqrestore(&desc->lock, flags);
@@ -1336,6 +1320,8 @@ static struct irqaction *__free_irq(unsigned int irq, void *dev_id)
1336 /* Found it - now remove it from the list of entries: */ 1320 /* Found it - now remove it from the list of entries: */
1337 *action_ptr = action->next; 1321 *action_ptr = action->next;
1338 1322
1323 irq_pm_remove_action(desc, action);
1324
1339 /* If this was the last handler, shut down the IRQ line: */ 1325 /* If this was the last handler, shut down the IRQ line: */
1340 if (!desc->action) { 1326 if (!desc->action) {
1341 irq_shutdown(desc); 1327 irq_shutdown(desc);
diff --git a/kernel/irq/pm.c b/kernel/irq/pm.c
index abcd6ca86cb7..3ca532592704 100644
--- a/kernel/irq/pm.c
+++ b/kernel/irq/pm.c
@@ -9,17 +9,105 @@
9#include <linux/irq.h> 9#include <linux/irq.h>
10#include <linux/module.h> 10#include <linux/module.h>
11#include <linux/interrupt.h> 11#include <linux/interrupt.h>
12#include <linux/suspend.h>
12#include <linux/syscore_ops.h> 13#include <linux/syscore_ops.h>
13 14
14#include "internals.h" 15#include "internals.h"
15 16
17bool irq_pm_check_wakeup(struct irq_desc *desc)
18{
19 if (irqd_is_wakeup_armed(&desc->irq_data)) {
20 irqd_clear(&desc->irq_data, IRQD_WAKEUP_ARMED);
21 desc->istate |= IRQS_SUSPENDED | IRQS_PENDING;
22 desc->depth++;
23 irq_disable(desc);
24 pm_system_wakeup();
25 return true;
26 }
27 return false;
28}
29
30/*
31 * Called from __setup_irq() with desc->lock held after @action has
32 * been installed in the action chain.
33 */
34void irq_pm_install_action(struct irq_desc *desc, struct irqaction *action)
35{
36 desc->nr_actions++;
37
38 if (action->flags & IRQF_FORCE_RESUME)
39 desc->force_resume_depth++;
40
41 WARN_ON_ONCE(desc->force_resume_depth &&
42 desc->force_resume_depth != desc->nr_actions);
43
44 if (action->flags & IRQF_NO_SUSPEND)
45 desc->no_suspend_depth++;
46
47 WARN_ON_ONCE(desc->no_suspend_depth &&
48 desc->no_suspend_depth != desc->nr_actions);
49}
50
51/*
52 * Called from __free_irq() with desc->lock held after @action has
53 * been removed from the action chain.
54 */
55void irq_pm_remove_action(struct irq_desc *desc, struct irqaction *action)
56{
57 desc->nr_actions--;
58
59 if (action->flags & IRQF_FORCE_RESUME)
60 desc->force_resume_depth--;
61
62 if (action->flags & IRQF_NO_SUSPEND)
63 desc->no_suspend_depth--;
64}
65
66static bool suspend_device_irq(struct irq_desc *desc, int irq)
67{
68 if (!desc->action || desc->no_suspend_depth)
69 return false;
70
71 if (irqd_is_wakeup_set(&desc->irq_data)) {
72 irqd_set(&desc->irq_data, IRQD_WAKEUP_ARMED);
73 /*
74 * We return true here to force the caller to issue
75 * synchronize_irq(). We need to make sure that the
76 * IRQD_WAKEUP_ARMED is visible before we return from
77 * suspend_device_irqs().
78 */
79 return true;
80 }
81
82 desc->istate |= IRQS_SUSPENDED;
83 __disable_irq(desc, irq);
84
85 /*
86 * Hardware which has no wakeup source configuration facility
87 * requires that the non wakeup interrupts are masked at the
88 * chip level. The chip implementation indicates that with
89 * IRQCHIP_MASK_ON_SUSPEND.
90 */
91 if (irq_desc_get_chip(desc)->flags & IRQCHIP_MASK_ON_SUSPEND)
92 mask_irq(desc);
93 return true;
94}
95
16/** 96/**
17 * suspend_device_irqs - disable all currently enabled interrupt lines 97 * suspend_device_irqs - disable all currently enabled interrupt lines
18 * 98 *
19 * During system-wide suspend or hibernation device drivers need to be prevented 99 * During system-wide suspend or hibernation device drivers need to be
20 * from receiving interrupts and this function is provided for this purpose. 100 * prevented from receiving interrupts and this function is provided
21 * It marks all interrupt lines in use, except for the timer ones, as disabled 101 * for this purpose.
22 * and sets the IRQS_SUSPENDED flag for each of them. 102 *
103 * So we disable all interrupts and mark them IRQS_SUSPENDED except
104 * for those which are unused, those which are marked as not
105 * suspendable via an interrupt request with the flag IRQF_NO_SUSPEND
106 * set and those which are marked as active wakeup sources.
107 *
108 * The active wakeup sources are handled by the flow handler entry
109 * code which checks for the IRQD_WAKEUP_ARMED flag, suspends the
110 * interrupt and notifies the pm core about the wakeup.
23 */ 111 */
24void suspend_device_irqs(void) 112void suspend_device_irqs(void)
25{ 113{
@@ -28,18 +116,36 @@ void suspend_device_irqs(void)
28 116
29 for_each_irq_desc(irq, desc) { 117 for_each_irq_desc(irq, desc) {
30 unsigned long flags; 118 unsigned long flags;
119 bool sync;
31 120
32 raw_spin_lock_irqsave(&desc->lock, flags); 121 raw_spin_lock_irqsave(&desc->lock, flags);
33 __disable_irq(desc, irq, true); 122 sync = suspend_device_irq(desc, irq);
34 raw_spin_unlock_irqrestore(&desc->lock, flags); 123 raw_spin_unlock_irqrestore(&desc->lock, flags);
35 }
36 124
37 for_each_irq_desc(irq, desc) 125 if (sync)
38 if (desc->istate & IRQS_SUSPENDED)
39 synchronize_irq(irq); 126 synchronize_irq(irq);
127 }
40} 128}
41EXPORT_SYMBOL_GPL(suspend_device_irqs); 129EXPORT_SYMBOL_GPL(suspend_device_irqs);
42 130
131static void resume_irq(struct irq_desc *desc, int irq)
132{
133 irqd_clear(&desc->irq_data, IRQD_WAKEUP_ARMED);
134
135 if (desc->istate & IRQS_SUSPENDED)
136 goto resume;
137
138 /* Force resume the interrupt? */
139 if (!desc->force_resume_depth)
140 return;
141
142 /* Pretend that it got disabled ! */
143 desc->depth++;
144resume:
145 desc->istate &= ~IRQS_SUSPENDED;
146 __enable_irq(desc, irq);
147}
148
43static void resume_irqs(bool want_early) 149static void resume_irqs(bool want_early)
44{ 150{
45 struct irq_desc *desc; 151 struct irq_desc *desc;
@@ -54,7 +160,7 @@ static void resume_irqs(bool want_early)
54 continue; 160 continue;
55 161
56 raw_spin_lock_irqsave(&desc->lock, flags); 162 raw_spin_lock_irqsave(&desc->lock, flags);
57 __enable_irq(desc, irq, true); 163 resume_irq(desc, irq);
58 raw_spin_unlock_irqrestore(&desc->lock, flags); 164 raw_spin_unlock_irqrestore(&desc->lock, flags);
59 } 165 }
60} 166}
@@ -93,38 +199,3 @@ void resume_device_irqs(void)
93 resume_irqs(false); 199 resume_irqs(false);
94} 200}
95EXPORT_SYMBOL_GPL(resume_device_irqs); 201EXPORT_SYMBOL_GPL(resume_device_irqs);
96
97/**
98 * check_wakeup_irqs - check if any wake-up interrupts are pending
99 */
100int check_wakeup_irqs(void)
101{
102 struct irq_desc *desc;
103 int irq;
104
105 for_each_irq_desc(irq, desc) {
106 /*
107 * Only interrupts which are marked as wakeup source
108 * and have not been disabled before the suspend check
109 * can abort suspend.
110 */
111 if (irqd_is_wakeup_set(&desc->irq_data)) {
112 if (desc->depth == 1 && desc->istate & IRQS_PENDING)
113 return -EBUSY;
114 continue;
115 }
116 /*
117 * Check the non wakeup interrupts whether they need
118 * to be masked before finally going into suspend
119 * state. That's for hardware which has no wakeup
120 * source configuration facility. The chip
121 * implementation indicates that with
122 * IRQCHIP_MASK_ON_SUSPEND.
123 */
124 if (desc->istate & IRQS_SUSPENDED &&
125 irq_desc_get_chip(desc)->flags & IRQCHIP_MASK_ON_SUSPEND)
126 mask_irq(desc);
127 }
128
129 return 0;
130}
diff --git a/kernel/power/Kconfig b/kernel/power/Kconfig
index e4e4121fa327..bbef57f5bdfd 100644
--- a/kernel/power/Kconfig
+++ b/kernel/power/Kconfig
@@ -302,6 +302,10 @@ config PM_GENERIC_DOMAINS_RUNTIME
302 def_bool y 302 def_bool y
303 depends on PM_RUNTIME && PM_GENERIC_DOMAINS 303 depends on PM_RUNTIME && PM_GENERIC_DOMAINS
304 304
305config PM_GENERIC_DOMAINS_OF
306 def_bool y
307 depends on PM_GENERIC_DOMAINS && OF
308
305config CPU_PM 309config CPU_PM
306 bool 310 bool
307 depends on SUSPEND || CPU_IDLE 311 depends on SUSPEND || CPU_IDLE
diff --git a/kernel/power/process.c b/kernel/power/process.c
index 4ee194eb524b..7b323221b9ee 100644
--- a/kernel/power/process.c
+++ b/kernel/power/process.c
@@ -129,6 +129,7 @@ int freeze_processes(void)
129 if (!pm_freezing) 129 if (!pm_freezing)
130 atomic_inc(&system_freezing_cnt); 130 atomic_inc(&system_freezing_cnt);
131 131
132 pm_wakeup_clear();
132 printk("Freezing user space processes ... "); 133 printk("Freezing user space processes ... ");
133 pm_freezing = true; 134 pm_freezing = true;
134 error = try_to_freeze_tasks(true); 135 error = try_to_freeze_tasks(true);
diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c
index f1604d8cf489..791a61892bb5 100644
--- a/kernel/power/snapshot.c
+++ b/kernel/power/snapshot.c
@@ -725,6 +725,14 @@ static void memory_bm_clear_bit(struct memory_bitmap *bm, unsigned long pfn)
725 clear_bit(bit, addr); 725 clear_bit(bit, addr);
726} 726}
727 727
728static void memory_bm_clear_current(struct memory_bitmap *bm)
729{
730 int bit;
731
732 bit = max(bm->cur.node_bit - 1, 0);
733 clear_bit(bit, bm->cur.node->data);
734}
735
728static int memory_bm_test_bit(struct memory_bitmap *bm, unsigned long pfn) 736static int memory_bm_test_bit(struct memory_bitmap *bm, unsigned long pfn)
729{ 737{
730 void *addr; 738 void *addr;
@@ -1333,23 +1341,39 @@ static struct memory_bitmap copy_bm;
1333 1341
1334void swsusp_free(void) 1342void swsusp_free(void)
1335{ 1343{
1336 struct zone *zone; 1344 unsigned long fb_pfn, fr_pfn;
1337 unsigned long pfn, max_zone_pfn;
1338 1345
1339 for_each_populated_zone(zone) { 1346 if (!forbidden_pages_map || !free_pages_map)
1340 max_zone_pfn = zone_end_pfn(zone); 1347 goto out;
1341 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++) 1348
1342 if (pfn_valid(pfn)) { 1349 memory_bm_position_reset(forbidden_pages_map);
1343 struct page *page = pfn_to_page(pfn); 1350 memory_bm_position_reset(free_pages_map);
1344 1351
1345 if (swsusp_page_is_forbidden(page) && 1352loop:
1346 swsusp_page_is_free(page)) { 1353 fr_pfn = memory_bm_next_pfn(free_pages_map);
1347 swsusp_unset_page_forbidden(page); 1354 fb_pfn = memory_bm_next_pfn(forbidden_pages_map);
1348 swsusp_unset_page_free(page); 1355
1349 __free_page(page); 1356 /*
1350 } 1357 * Find the next bit set in both bitmaps. This is guaranteed to
1351 } 1358 * terminate when fb_pfn == fr_pfn == BM_END_OF_MAP.
1359 */
1360 do {
1361 if (fb_pfn < fr_pfn)
1362 fb_pfn = memory_bm_next_pfn(forbidden_pages_map);
1363 if (fr_pfn < fb_pfn)
1364 fr_pfn = memory_bm_next_pfn(free_pages_map);
1365 } while (fb_pfn != fr_pfn);
1366
1367 if (fr_pfn != BM_END_OF_MAP && pfn_valid(fr_pfn)) {
1368 struct page *page = pfn_to_page(fr_pfn);
1369
1370 memory_bm_clear_current(forbidden_pages_map);
1371 memory_bm_clear_current(free_pages_map);
1372 __free_page(page);
1373 goto loop;
1352 } 1374 }
1375
1376out:
1353 nr_copy_pages = 0; 1377 nr_copy_pages = 0;
1354 nr_meta_pages = 0; 1378 nr_meta_pages = 0;
1355 restore_pblist = NULL; 1379 restore_pblist = NULL;
diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c
index 18c62195660f..4ca9a33ff620 100644
--- a/kernel/power/suspend.c
+++ b/kernel/power/suspend.c
@@ -146,17 +146,29 @@ static int platform_suspend_prepare(suspend_state_t state)
146 146
147static int platform_suspend_prepare_late(suspend_state_t state) 147static int platform_suspend_prepare_late(suspend_state_t state)
148{ 148{
149 return state == PM_SUSPEND_FREEZE && freeze_ops->prepare ?
150 freeze_ops->prepare() : 0;
151}
152
153static int platform_suspend_prepare_noirq(suspend_state_t state)
154{
149 return state != PM_SUSPEND_FREEZE && suspend_ops->prepare_late ? 155 return state != PM_SUSPEND_FREEZE && suspend_ops->prepare_late ?
150 suspend_ops->prepare_late() : 0; 156 suspend_ops->prepare_late() : 0;
151} 157}
152 158
153static void platform_suspend_wake(suspend_state_t state) 159static void platform_resume_noirq(suspend_state_t state)
154{ 160{
155 if (state != PM_SUSPEND_FREEZE && suspend_ops->wake) 161 if (state != PM_SUSPEND_FREEZE && suspend_ops->wake)
156 suspend_ops->wake(); 162 suspend_ops->wake();
157} 163}
158 164
159static void platform_suspend_finish(suspend_state_t state) 165static void platform_resume_early(suspend_state_t state)
166{
167 if (state == PM_SUSPEND_FREEZE && freeze_ops->restore)
168 freeze_ops->restore();
169}
170
171static void platform_resume_finish(suspend_state_t state)
160{ 172{
161 if (state != PM_SUSPEND_FREEZE && suspend_ops->finish) 173 if (state != PM_SUSPEND_FREEZE && suspend_ops->finish)
162 suspend_ops->finish(); 174 suspend_ops->finish();
@@ -172,7 +184,7 @@ static int platform_suspend_begin(suspend_state_t state)
172 return 0; 184 return 0;
173} 185}
174 186
175static void platform_suspend_end(suspend_state_t state) 187static void platform_resume_end(suspend_state_t state)
176{ 188{
177 if (state == PM_SUSPEND_FREEZE && freeze_ops && freeze_ops->end) 189 if (state == PM_SUSPEND_FREEZE && freeze_ops && freeze_ops->end)
178 freeze_ops->end(); 190 freeze_ops->end();
@@ -180,7 +192,7 @@ static void platform_suspend_end(suspend_state_t state)
180 suspend_ops->end(); 192 suspend_ops->end();
181} 193}
182 194
183static void platform_suspend_recover(suspend_state_t state) 195static void platform_recover(suspend_state_t state)
184{ 196{
185 if (state != PM_SUSPEND_FREEZE && suspend_ops->recover) 197 if (state != PM_SUSPEND_FREEZE && suspend_ops->recover)
186 suspend_ops->recover(); 198 suspend_ops->recover();
@@ -265,13 +277,22 @@ static int suspend_enter(suspend_state_t state, bool *wakeup)
265 if (error) 277 if (error)
266 goto Platform_finish; 278 goto Platform_finish;
267 279
268 error = dpm_suspend_end(PMSG_SUSPEND); 280 error = dpm_suspend_late(PMSG_SUSPEND);
269 if (error) { 281 if (error) {
270 printk(KERN_ERR "PM: Some devices failed to power down\n"); 282 printk(KERN_ERR "PM: late suspend of devices failed\n");
271 goto Platform_finish; 283 goto Platform_finish;
272 } 284 }
273 error = platform_suspend_prepare_late(state); 285 error = platform_suspend_prepare_late(state);
274 if (error) 286 if (error)
287 goto Devices_early_resume;
288
289 error = dpm_suspend_noirq(PMSG_SUSPEND);
290 if (error) {
291 printk(KERN_ERR "PM: noirq suspend of devices failed\n");
292 goto Platform_early_resume;
293 }
294 error = platform_suspend_prepare_noirq(state);
295 if (error)
275 goto Platform_wake; 296 goto Platform_wake;
276 297
277 if (suspend_test(TEST_PLATFORM)) 298 if (suspend_test(TEST_PLATFORM))
@@ -318,11 +339,17 @@ static int suspend_enter(suspend_state_t state, bool *wakeup)
318 enable_nonboot_cpus(); 339 enable_nonboot_cpus();
319 340
320 Platform_wake: 341 Platform_wake:
321 platform_suspend_wake(state); 342 platform_resume_noirq(state);
322 dpm_resume_start(PMSG_RESUME); 343 dpm_resume_noirq(PMSG_RESUME);
344
345 Platform_early_resume:
346 platform_resume_early(state);
347
348 Devices_early_resume:
349 dpm_resume_early(PMSG_RESUME);
323 350
324 Platform_finish: 351 Platform_finish:
325 platform_suspend_finish(state); 352 platform_resume_finish(state);
326 return error; 353 return error;
327} 354}
328 355
@@ -361,14 +388,16 @@ int suspend_devices_and_enter(suspend_state_t state)
361 suspend_test_start(); 388 suspend_test_start();
362 dpm_resume_end(PMSG_RESUME); 389 dpm_resume_end(PMSG_RESUME);
363 suspend_test_finish("resume devices"); 390 suspend_test_finish("resume devices");
391 trace_suspend_resume(TPS("resume_console"), state, true);
364 resume_console(); 392 resume_console();
393 trace_suspend_resume(TPS("resume_console"), state, false);
365 394
366 Close: 395 Close:
367 platform_suspend_end(state); 396 platform_resume_end(state);
368 return error; 397 return error;
369 398
370 Recover_platform: 399 Recover_platform:
371 platform_suspend_recover(state); 400 platform_recover(state);
372 goto Resume_devices; 401 goto Resume_devices;
373} 402}
374 403
diff --git a/kernel/power/suspend_test.c b/kernel/power/suspend_test.c
index bd91bc177c93..084452e34a12 100644
--- a/kernel/power/suspend_test.c
+++ b/kernel/power/suspend_test.c
@@ -22,6 +22,8 @@
22#define TEST_SUSPEND_SECONDS 10 22#define TEST_SUSPEND_SECONDS 10
23 23
24static unsigned long suspend_test_start_time; 24static unsigned long suspend_test_start_time;
25static u32 test_repeat_count_max = 1;
26static u32 test_repeat_count_current;
25 27
26void suspend_test_start(void) 28void suspend_test_start(void)
27{ 29{
@@ -74,6 +76,7 @@ static void __init test_wakealarm(struct rtc_device *rtc, suspend_state_t state)
74 int status; 76 int status;
75 77
76 /* this may fail if the RTC hasn't been initialized */ 78 /* this may fail if the RTC hasn't been initialized */
79repeat:
77 status = rtc_read_time(rtc, &alm.time); 80 status = rtc_read_time(rtc, &alm.time);
78 if (status < 0) { 81 if (status < 0) {
79 printk(err_readtime, dev_name(&rtc->dev), status); 82 printk(err_readtime, dev_name(&rtc->dev), status);
@@ -100,10 +103,21 @@ static void __init test_wakealarm(struct rtc_device *rtc, suspend_state_t state)
100 if (state == PM_SUSPEND_STANDBY) { 103 if (state == PM_SUSPEND_STANDBY) {
101 printk(info_test, pm_states[state]); 104 printk(info_test, pm_states[state]);
102 status = pm_suspend(state); 105 status = pm_suspend(state);
106 if (status < 0)
107 state = PM_SUSPEND_FREEZE;
103 } 108 }
109 if (state == PM_SUSPEND_FREEZE) {
110 printk(info_test, pm_states[state]);
111 status = pm_suspend(state);
112 }
113
104 if (status < 0) 114 if (status < 0)
105 printk(err_suspend, status); 115 printk(err_suspend, status);
106 116
117 test_repeat_count_current++;
118 if (test_repeat_count_current < test_repeat_count_max)
119 goto repeat;
120
107 /* Some platforms can't detect that the alarm triggered the 121 /* Some platforms can't detect that the alarm triggered the
108 * wakeup, or (accordingly) disable it after it afterwards. 122 * wakeup, or (accordingly) disable it after it afterwards.
109 * It's supposed to give oneshot behavior; cope. 123 * It's supposed to give oneshot behavior; cope.
@@ -137,16 +151,28 @@ static char warn_bad_state[] __initdata =
137static int __init setup_test_suspend(char *value) 151static int __init setup_test_suspend(char *value)
138{ 152{
139 int i; 153 int i;
154 char *repeat;
155 char *suspend_type;
140 156
141 /* "=mem" ==> "mem" */ 157 /* example : "=mem[,N]" ==> "mem[,N]" */
142 value++; 158 value++;
159 suspend_type = strsep(&value, ",");
160 if (!suspend_type)
161 return 0;
162
163 repeat = strsep(&value, ",");
164 if (repeat) {
165 if (kstrtou32(repeat, 0, &test_repeat_count_max))
166 return 0;
167 }
168
143 for (i = 0; pm_labels[i]; i++) 169 for (i = 0; pm_labels[i]; i++)
144 if (!strcmp(pm_labels[i], value)) { 170 if (!strcmp(pm_labels[i], suspend_type)) {
145 test_state_label = pm_labels[i]; 171 test_state_label = pm_labels[i];
146 return 0; 172 return 0;
147 } 173 }
148 174
149 printk(warn_bad_state, value); 175 printk(warn_bad_state, suspend_type);
150 return 0; 176 return 0;
151} 177}
152__setup("test_suspend", setup_test_suspend); 178__setup("test_suspend", setup_test_suspend);