diff options
author | Rafael J. Wysocki <rafael.j.wysocki@intel.com> | 2015-02-12 17:33:15 -0500 |
---|---|---|
committer | Rafael J. Wysocki <rafael.j.wysocki@intel.com> | 2015-02-13 17:49:36 -0500 |
commit | 3810631332465d967ba5e27ea2c7dff2c9afac6c (patch) | |
tree | 9000632cbf95f7d3f1e03640195708b3146c64dc /kernel | |
parent | 18320f2a6871aaf2522f793fee4a67eccf5e131a (diff) |
PM / sleep: Re-implement suspend-to-idle handling
In preparation for adding support for quiescing timers in the final
stage of suspend-to-idle transitions, rework the freeze_enter()
function making the system wait on a wakeup event, the freeze_wake()
function terminating the suspend-to-idle loop and the mechanism by
which deep idle states are entered during suspend-to-idle.
First of all, introduce a simple state machine for suspend-to-idle
and make the code in question use it.
Second, prevent freeze_enter() from losing wakeup events due to race
conditions and ensure that the number of online CPUs won't change
while it is being executed. In addition to that, make it force
all of the CPUs re-enter the idle loop in case they are in idle
states already (so they can enter deeper idle states if possible).
Next, drop cpuidle_use_deepest_state() and replace use_deepest_state
checks in cpuidle_select() and cpuidle_reflect() with a single
suspend-to-idle state check in cpuidle_idle_call().
Finally, introduce cpuidle_enter_freeze() that will simply find the
deepest idle state available to the given CPU and enter it using
cpuidle_enter().
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/power/suspend.c | 43 | ||||
-rw-r--r-- | kernel/sched/idle.c | 16 |
2 files changed, 52 insertions, 7 deletions
diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c index c347e3ce3a55..b7d6b3a721b1 100644 --- a/kernel/power/suspend.c +++ b/kernel/power/suspend.c | |||
@@ -37,7 +37,9 @@ const char *pm_states[PM_SUSPEND_MAX]; | |||
37 | static const struct platform_suspend_ops *suspend_ops; | 37 | static const struct platform_suspend_ops *suspend_ops; |
38 | static const struct platform_freeze_ops *freeze_ops; | 38 | static const struct platform_freeze_ops *freeze_ops; |
39 | static DECLARE_WAIT_QUEUE_HEAD(suspend_freeze_wait_head); | 39 | static DECLARE_WAIT_QUEUE_HEAD(suspend_freeze_wait_head); |
40 | static bool suspend_freeze_wake; | 40 | |
41 | enum freeze_state __read_mostly suspend_freeze_state; | ||
42 | static DEFINE_SPINLOCK(suspend_freeze_lock); | ||
41 | 43 | ||
42 | void freeze_set_ops(const struct platform_freeze_ops *ops) | 44 | void freeze_set_ops(const struct platform_freeze_ops *ops) |
43 | { | 45 | { |
@@ -48,22 +50,49 @@ void freeze_set_ops(const struct platform_freeze_ops *ops) | |||
48 | 50 | ||
49 | static void freeze_begin(void) | 51 | static void freeze_begin(void) |
50 | { | 52 | { |
51 | suspend_freeze_wake = false; | 53 | suspend_freeze_state = FREEZE_STATE_NONE; |
52 | } | 54 | } |
53 | 55 | ||
54 | static void freeze_enter(void) | 56 | static void freeze_enter(void) |
55 | { | 57 | { |
56 | cpuidle_use_deepest_state(true); | 58 | spin_lock_irq(&suspend_freeze_lock); |
59 | if (pm_wakeup_pending()) | ||
60 | goto out; | ||
61 | |||
62 | suspend_freeze_state = FREEZE_STATE_ENTER; | ||
63 | spin_unlock_irq(&suspend_freeze_lock); | ||
64 | |||
65 | get_online_cpus(); | ||
57 | cpuidle_resume(); | 66 | cpuidle_resume(); |
58 | wait_event(suspend_freeze_wait_head, suspend_freeze_wake); | 67 | |
68 | /* Push all the CPUs into the idle loop. */ | ||
69 | wake_up_all_idle_cpus(); | ||
70 | pr_debug("PM: suspend-to-idle\n"); | ||
71 | /* Make the current CPU wait so it can enter the idle loop too. */ | ||
72 | wait_event(suspend_freeze_wait_head, | ||
73 | suspend_freeze_state == FREEZE_STATE_WAKE); | ||
74 | pr_debug("PM: resume from suspend-to-idle\n"); | ||
75 | |||
59 | cpuidle_pause(); | 76 | cpuidle_pause(); |
60 | cpuidle_use_deepest_state(false); | 77 | put_online_cpus(); |
78 | |||
79 | spin_lock_irq(&suspend_freeze_lock); | ||
80 | |||
81 | out: | ||
82 | suspend_freeze_state = FREEZE_STATE_NONE; | ||
83 | spin_unlock_irq(&suspend_freeze_lock); | ||
61 | } | 84 | } |
62 | 85 | ||
63 | void freeze_wake(void) | 86 | void freeze_wake(void) |
64 | { | 87 | { |
65 | suspend_freeze_wake = true; | 88 | unsigned long flags; |
66 | wake_up(&suspend_freeze_wait_head); | 89 | |
90 | spin_lock_irqsave(&suspend_freeze_lock, flags); | ||
91 | if (suspend_freeze_state > FREEZE_STATE_NONE) { | ||
92 | suspend_freeze_state = FREEZE_STATE_WAKE; | ||
93 | wake_up(&suspend_freeze_wait_head); | ||
94 | } | ||
95 | spin_unlock_irqrestore(&suspend_freeze_lock, flags); | ||
67 | } | 96 | } |
68 | EXPORT_SYMBOL_GPL(freeze_wake); | 97 | EXPORT_SYMBOL_GPL(freeze_wake); |
69 | 98 | ||
diff --git a/kernel/sched/idle.c b/kernel/sched/idle.c index aaf1c1d5cf5d..94b2d7b88a27 100644 --- a/kernel/sched/idle.c +++ b/kernel/sched/idle.c | |||
@@ -7,6 +7,7 @@ | |||
7 | #include <linux/tick.h> | 7 | #include <linux/tick.h> |
8 | #include <linux/mm.h> | 8 | #include <linux/mm.h> |
9 | #include <linux/stackprotector.h> | 9 | #include <linux/stackprotector.h> |
10 | #include <linux/suspend.h> | ||
10 | 11 | ||
11 | #include <asm/tlb.h> | 12 | #include <asm/tlb.h> |
12 | 13 | ||
@@ -105,6 +106,21 @@ static void cpuidle_idle_call(void) | |||
105 | rcu_idle_enter(); | 106 | rcu_idle_enter(); |
106 | 107 | ||
107 | /* | 108 | /* |
109 | * Suspend-to-idle ("freeze") is a system state in which all user space | ||
110 | * has been frozen, all I/O devices have been suspended and the only | ||
111 | * activity happens here and in iterrupts (if any). In that case bypass | ||
112 | * the cpuidle governor and go stratight for the deepest idle state | ||
113 | * available. Possibly also suspend the local tick and the entire | ||
114 | * timekeeping to prevent timer interrupts from kicking us out of idle | ||
115 | * until a proper wakeup interrupt happens. | ||
116 | */ | ||
117 | if (idle_should_freeze()) { | ||
118 | cpuidle_enter_freeze(); | ||
119 | local_irq_enable(); | ||
120 | goto exit_idle; | ||
121 | } | ||
122 | |||
123 | /* | ||
108 | * Ask the cpuidle framework to choose a convenient idle state. | 124 | * Ask the cpuidle framework to choose a convenient idle state. |
109 | * Fall back to the default arch idle method on errors. | 125 | * Fall back to the default arch idle method on errors. |
110 | */ | 126 | */ |