diff options
-rw-r--r-- | drivers/cpuidle/cpuidle.c | 49 | ||||
-rw-r--r-- | include/linux/cpuidle.h | 4 | ||||
-rw-r--r-- | include/linux/suspend.h | 16 | ||||
-rw-r--r-- | kernel/power/suspend.c | 43 | ||||
-rw-r--r-- | kernel/sched/idle.c | 16 |
5 files changed, 96 insertions, 32 deletions
diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c index 125150dc6e81..23a8d6cc8d30 100644 --- a/drivers/cpuidle/cpuidle.c +++ b/drivers/cpuidle/cpuidle.c | |||
@@ -19,6 +19,7 @@ | |||
19 | #include <linux/ktime.h> | 19 | #include <linux/ktime.h> |
20 | #include <linux/hrtimer.h> | 20 | #include <linux/hrtimer.h> |
21 | #include <linux/module.h> | 21 | #include <linux/module.h> |
22 | #include <linux/suspend.h> | ||
22 | #include <trace/events/power.h> | 23 | #include <trace/events/power.h> |
23 | 24 | ||
24 | #include "cpuidle.h" | 25 | #include "cpuidle.h" |
@@ -32,7 +33,6 @@ LIST_HEAD(cpuidle_detected_devices); | |||
32 | static int enabled_devices; | 33 | static int enabled_devices; |
33 | static int off __read_mostly; | 34 | static int off __read_mostly; |
34 | static int initialized __read_mostly; | 35 | static int initialized __read_mostly; |
35 | static bool use_deepest_state __read_mostly; | ||
36 | 36 | ||
37 | int cpuidle_disabled(void) | 37 | int cpuidle_disabled(void) |
38 | { | 38 | { |
@@ -66,24 +66,9 @@ int cpuidle_play_dead(void) | |||
66 | } | 66 | } |
67 | 67 | ||
68 | /** | 68 | /** |
69 | * cpuidle_use_deepest_state - Enable/disable the "deepest idle" mode. | 69 | * cpuidle_find_deepest_state - Find deepest state meeting specific conditions. |
70 | * @enable: Whether enable or disable the feature. | 70 | * @drv: cpuidle driver for the given CPU. |
71 | * | 71 | * @dev: cpuidle device for the given CPU. |
72 | * If the "deepest idle" mode is enabled, cpuidle will ignore the governor and | ||
73 | * always use the state with the greatest exit latency (out of the states that | ||
74 | * are not disabled). | ||
75 | * | ||
76 | * This function can only be called after cpuidle_pause() to avoid races. | ||
77 | */ | ||
78 | void cpuidle_use_deepest_state(bool enable) | ||
79 | { | ||
80 | use_deepest_state = enable; | ||
81 | } | ||
82 | |||
83 | /** | ||
84 | * cpuidle_find_deepest_state - Find the state of the greatest exit latency. | ||
85 | * @drv: cpuidle driver for a given CPU. | ||
86 | * @dev: cpuidle device for a given CPU. | ||
87 | */ | 72 | */ |
88 | static int cpuidle_find_deepest_state(struct cpuidle_driver *drv, | 73 | static int cpuidle_find_deepest_state(struct cpuidle_driver *drv, |
89 | struct cpuidle_device *dev) | 74 | struct cpuidle_device *dev) |
@@ -105,6 +90,27 @@ static int cpuidle_find_deepest_state(struct cpuidle_driver *drv, | |||
105 | } | 90 | } |
106 | 91 | ||
107 | /** | 92 | /** |
93 | * cpuidle_enter_freeze - Enter an idle state suitable for suspend-to-idle. | ||
94 | * | ||
95 | * Find the deepest state available and enter it. | ||
96 | */ | ||
97 | void cpuidle_enter_freeze(void) | ||
98 | { | ||
99 | struct cpuidle_device *dev = __this_cpu_read(cpuidle_devices); | ||
100 | struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev); | ||
101 | int index; | ||
102 | |||
103 | index = cpuidle_find_deepest_state(drv, dev); | ||
104 | if (index >= 0) | ||
105 | cpuidle_enter(drv, dev, index); | ||
106 | else | ||
107 | arch_cpu_idle(); | ||
108 | |||
109 | /* Interrupts are enabled again here. */ | ||
110 | local_irq_disable(); | ||
111 | } | ||
112 | |||
113 | /** | ||
108 | * cpuidle_enter_state - enter the state and update stats | 114 | * cpuidle_enter_state - enter the state and update stats |
109 | * @dev: cpuidle device for this cpu | 115 | * @dev: cpuidle device for this cpu |
110 | * @drv: cpuidle driver for this cpu | 116 | * @drv: cpuidle driver for this cpu |
@@ -166,9 +172,6 @@ int cpuidle_select(struct cpuidle_driver *drv, struct cpuidle_device *dev) | |||
166 | if (!drv || !dev || !dev->enabled) | 172 | if (!drv || !dev || !dev->enabled) |
167 | return -EBUSY; | 173 | return -EBUSY; |
168 | 174 | ||
169 | if (unlikely(use_deepest_state)) | ||
170 | return cpuidle_find_deepest_state(drv, dev); | ||
171 | |||
172 | return cpuidle_curr_governor->select(drv, dev); | 175 | return cpuidle_curr_governor->select(drv, dev); |
173 | } | 176 | } |
174 | 177 | ||
@@ -200,7 +203,7 @@ int cpuidle_enter(struct cpuidle_driver *drv, struct cpuidle_device *dev, | |||
200 | */ | 203 | */ |
201 | void cpuidle_reflect(struct cpuidle_device *dev, int index) | 204 | void cpuidle_reflect(struct cpuidle_device *dev, int index) |
202 | { | 205 | { |
203 | if (cpuidle_curr_governor->reflect && !unlikely(use_deepest_state)) | 206 | if (cpuidle_curr_governor->reflect) |
204 | cpuidle_curr_governor->reflect(dev, index); | 207 | cpuidle_curr_governor->reflect(dev, index); |
205 | } | 208 | } |
206 | 209 | ||
diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h index ab70f3bc44ad..f63aabf4ee90 100644 --- a/include/linux/cpuidle.h +++ b/include/linux/cpuidle.h | |||
@@ -141,7 +141,7 @@ extern void cpuidle_resume(void); | |||
141 | extern int cpuidle_enable_device(struct cpuidle_device *dev); | 141 | extern int cpuidle_enable_device(struct cpuidle_device *dev); |
142 | extern void cpuidle_disable_device(struct cpuidle_device *dev); | 142 | extern void cpuidle_disable_device(struct cpuidle_device *dev); |
143 | extern int cpuidle_play_dead(void); | 143 | extern int cpuidle_play_dead(void); |
144 | extern void cpuidle_use_deepest_state(bool enable); | 144 | extern void cpuidle_enter_freeze(void); |
145 | 145 | ||
146 | extern struct cpuidle_driver *cpuidle_get_cpu_driver(struct cpuidle_device *dev); | 146 | extern struct cpuidle_driver *cpuidle_get_cpu_driver(struct cpuidle_device *dev); |
147 | #else | 147 | #else |
@@ -174,7 +174,7 @@ static inline int cpuidle_enable_device(struct cpuidle_device *dev) | |||
174 | {return -ENODEV; } | 174 | {return -ENODEV; } |
175 | static inline void cpuidle_disable_device(struct cpuidle_device *dev) { } | 175 | static inline void cpuidle_disable_device(struct cpuidle_device *dev) { } |
176 | static inline int cpuidle_play_dead(void) {return -ENODEV; } | 176 | static inline int cpuidle_play_dead(void) {return -ENODEV; } |
177 | static inline void cpuidle_use_deepest_state(bool enable) {} | 177 | static inline void cpuidle_enter_freeze(void) { } |
178 | static inline struct cpuidle_driver *cpuidle_get_cpu_driver( | 178 | static inline struct cpuidle_driver *cpuidle_get_cpu_driver( |
179 | struct cpuidle_device *dev) {return NULL; } | 179 | struct cpuidle_device *dev) {return NULL; } |
180 | #endif | 180 | #endif |
diff --git a/include/linux/suspend.h b/include/linux/suspend.h index 3388c1b6f7d8..5efe743ce1e8 100644 --- a/include/linux/suspend.h +++ b/include/linux/suspend.h | |||
@@ -201,6 +201,21 @@ struct platform_freeze_ops { | |||
201 | */ | 201 | */ |
202 | extern void suspend_set_ops(const struct platform_suspend_ops *ops); | 202 | extern void suspend_set_ops(const struct platform_suspend_ops *ops); |
203 | extern int suspend_valid_only_mem(suspend_state_t state); | 203 | extern int suspend_valid_only_mem(suspend_state_t state); |
204 | |||
205 | /* Suspend-to-idle state machnine. */ | ||
206 | enum freeze_state { | ||
207 | FREEZE_STATE_NONE, /* Not suspended/suspending. */ | ||
208 | FREEZE_STATE_ENTER, /* Enter suspend-to-idle. */ | ||
209 | FREEZE_STATE_WAKE, /* Wake up from suspend-to-idle. */ | ||
210 | }; | ||
211 | |||
212 | extern enum freeze_state __read_mostly suspend_freeze_state; | ||
213 | |||
214 | static inline bool idle_should_freeze(void) | ||
215 | { | ||
216 | return unlikely(suspend_freeze_state == FREEZE_STATE_ENTER); | ||
217 | } | ||
218 | |||
204 | extern void freeze_set_ops(const struct platform_freeze_ops *ops); | 219 | extern void freeze_set_ops(const struct platform_freeze_ops *ops); |
205 | extern void freeze_wake(void); | 220 | extern void freeze_wake(void); |
206 | 221 | ||
@@ -228,6 +243,7 @@ extern int pm_suspend(suspend_state_t state); | |||
228 | 243 | ||
229 | static inline void suspend_set_ops(const struct platform_suspend_ops *ops) {} | 244 | static inline void suspend_set_ops(const struct platform_suspend_ops *ops) {} |
230 | static inline int pm_suspend(suspend_state_t state) { return -ENOSYS; } | 245 | static inline int pm_suspend(suspend_state_t state) { return -ENOSYS; } |
246 | static inline bool idle_should_freeze(void) { return false; } | ||
231 | static inline void freeze_set_ops(const struct platform_freeze_ops *ops) {} | 247 | static inline void freeze_set_ops(const struct platform_freeze_ops *ops) {} |
232 | static inline void freeze_wake(void) {} | 248 | static inline void freeze_wake(void) {} |
233 | #endif /* !CONFIG_SUSPEND */ | 249 | #endif /* !CONFIG_SUSPEND */ |
diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c index c347e3ce3a55..b7d6b3a721b1 100644 --- a/kernel/power/suspend.c +++ b/kernel/power/suspend.c | |||
@@ -37,7 +37,9 @@ const char *pm_states[PM_SUSPEND_MAX]; | |||
37 | static const struct platform_suspend_ops *suspend_ops; | 37 | static const struct platform_suspend_ops *suspend_ops; |
38 | static const struct platform_freeze_ops *freeze_ops; | 38 | static const struct platform_freeze_ops *freeze_ops; |
39 | static DECLARE_WAIT_QUEUE_HEAD(suspend_freeze_wait_head); | 39 | static DECLARE_WAIT_QUEUE_HEAD(suspend_freeze_wait_head); |
40 | static bool suspend_freeze_wake; | 40 | |
41 | enum freeze_state __read_mostly suspend_freeze_state; | ||
42 | static DEFINE_SPINLOCK(suspend_freeze_lock); | ||
41 | 43 | ||
42 | void freeze_set_ops(const struct platform_freeze_ops *ops) | 44 | void freeze_set_ops(const struct platform_freeze_ops *ops) |
43 | { | 45 | { |
@@ -48,22 +50,49 @@ void freeze_set_ops(const struct platform_freeze_ops *ops) | |||
48 | 50 | ||
49 | static void freeze_begin(void) | 51 | static void freeze_begin(void) |
50 | { | 52 | { |
51 | suspend_freeze_wake = false; | 53 | suspend_freeze_state = FREEZE_STATE_NONE; |
52 | } | 54 | } |
53 | 55 | ||
54 | static void freeze_enter(void) | 56 | static void freeze_enter(void) |
55 | { | 57 | { |
56 | cpuidle_use_deepest_state(true); | 58 | spin_lock_irq(&suspend_freeze_lock); |
59 | if (pm_wakeup_pending()) | ||
60 | goto out; | ||
61 | |||
62 | suspend_freeze_state = FREEZE_STATE_ENTER; | ||
63 | spin_unlock_irq(&suspend_freeze_lock); | ||
64 | |||
65 | get_online_cpus(); | ||
57 | cpuidle_resume(); | 66 | cpuidle_resume(); |
58 | wait_event(suspend_freeze_wait_head, suspend_freeze_wake); | 67 | |
68 | /* Push all the CPUs into the idle loop. */ | ||
69 | wake_up_all_idle_cpus(); | ||
70 | pr_debug("PM: suspend-to-idle\n"); | ||
71 | /* Make the current CPU wait so it can enter the idle loop too. */ | ||
72 | wait_event(suspend_freeze_wait_head, | ||
73 | suspend_freeze_state == FREEZE_STATE_WAKE); | ||
74 | pr_debug("PM: resume from suspend-to-idle\n"); | ||
75 | |||
59 | cpuidle_pause(); | 76 | cpuidle_pause(); |
60 | cpuidle_use_deepest_state(false); | 77 | put_online_cpus(); |
78 | |||
79 | spin_lock_irq(&suspend_freeze_lock); | ||
80 | |||
81 | out: | ||
82 | suspend_freeze_state = FREEZE_STATE_NONE; | ||
83 | spin_unlock_irq(&suspend_freeze_lock); | ||
61 | } | 84 | } |
62 | 85 | ||
63 | void freeze_wake(void) | 86 | void freeze_wake(void) |
64 | { | 87 | { |
65 | suspend_freeze_wake = true; | 88 | unsigned long flags; |
66 | wake_up(&suspend_freeze_wait_head); | 89 | |
90 | spin_lock_irqsave(&suspend_freeze_lock, flags); | ||
91 | if (suspend_freeze_state > FREEZE_STATE_NONE) { | ||
92 | suspend_freeze_state = FREEZE_STATE_WAKE; | ||
93 | wake_up(&suspend_freeze_wait_head); | ||
94 | } | ||
95 | spin_unlock_irqrestore(&suspend_freeze_lock, flags); | ||
67 | } | 96 | } |
68 | EXPORT_SYMBOL_GPL(freeze_wake); | 97 | EXPORT_SYMBOL_GPL(freeze_wake); |
69 | 98 | ||
diff --git a/kernel/sched/idle.c b/kernel/sched/idle.c index aaf1c1d5cf5d..94b2d7b88a27 100644 --- a/kernel/sched/idle.c +++ b/kernel/sched/idle.c | |||
@@ -7,6 +7,7 @@ | |||
7 | #include <linux/tick.h> | 7 | #include <linux/tick.h> |
8 | #include <linux/mm.h> | 8 | #include <linux/mm.h> |
9 | #include <linux/stackprotector.h> | 9 | #include <linux/stackprotector.h> |
10 | #include <linux/suspend.h> | ||
10 | 11 | ||
11 | #include <asm/tlb.h> | 12 | #include <asm/tlb.h> |
12 | 13 | ||
@@ -105,6 +106,21 @@ static void cpuidle_idle_call(void) | |||
105 | rcu_idle_enter(); | 106 | rcu_idle_enter(); |
106 | 107 | ||
107 | /* | 108 | /* |
109 | * Suspend-to-idle ("freeze") is a system state in which all user space | ||
110 | * has been frozen, all I/O devices have been suspended and the only | ||
111 | * activity happens here and in iterrupts (if any). In that case bypass | ||
112 | * the cpuidle governor and go stratight for the deepest idle state | ||
113 | * available. Possibly also suspend the local tick and the entire | ||
114 | * timekeeping to prevent timer interrupts from kicking us out of idle | ||
115 | * until a proper wakeup interrupt happens. | ||
116 | */ | ||
117 | if (idle_should_freeze()) { | ||
118 | cpuidle_enter_freeze(); | ||
119 | local_irq_enable(); | ||
120 | goto exit_idle; | ||
121 | } | ||
122 | |||
123 | /* | ||
108 | * Ask the cpuidle framework to choose a convenient idle state. | 124 | * Ask the cpuidle framework to choose a convenient idle state. |
109 | * Fall back to the default arch idle method on errors. | 125 | * Fall back to the default arch idle method on errors. |
110 | */ | 126 | */ |