aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorRafael J. Wysocki <rafael.j.wysocki@intel.com>2014-05-06 19:50:57 -0400
committerRafael J. Wysocki <rafael.j.wysocki@intel.com>2014-05-06 19:50:57 -0400
commitaa9abe2c8eddf426c9b89757129b132c484ee223 (patch)
tree1950d527609f6236405193495a410526a7eb19ea
parent2c730785d9532d2a9c46e059bd6a6c9a764c539f (diff)
parenta6220fc19afc07fe77cfd16f5b8e568615517091 (diff)
Merge branch 'pm-cpuidle' into pm-sleep
-rw-r--r--drivers/cpuidle/cpuidle.c55
-rw-r--r--drivers/cpuidle/governors/menu.c17
-rw-r--r--include/linux/cpuidle.h7
-rw-r--r--kernel/power/suspend.c2
-rw-r--r--kernel/sched/idle.c20
5 files changed, 62 insertions, 39 deletions
diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c
index 8236746e46bb..cb7019977c50 100644
--- a/drivers/cpuidle/cpuidle.c
+++ b/drivers/cpuidle/cpuidle.c
@@ -32,6 +32,7 @@ LIST_HEAD(cpuidle_detected_devices);
32static int enabled_devices; 32static int enabled_devices;
33static int off __read_mostly; 33static int off __read_mostly;
34static int initialized __read_mostly; 34static int initialized __read_mostly;
35static bool use_deepest_state __read_mostly;
35 36
36int cpuidle_disabled(void) 37int cpuidle_disabled(void)
37{ 38{
@@ -65,23 +66,42 @@ int cpuidle_play_dead(void)
65} 66}
66 67
67/** 68/**
68 * cpuidle_enabled - check if the cpuidle framework is ready 69 * cpuidle_use_deepest_state - Enable/disable the "deepest idle" mode.
69 * @dev: cpuidle device for this cpu 70 * @enable: Whether enable or disable the feature.
70 * @drv: cpuidle driver for this cpu 71 *
72 * If the "deepest idle" mode is enabled, cpuidle will ignore the governor and
73 * always use the state with the greatest exit latency (out of the states that
74 * are not disabled).
71 * 75 *
72 * Return 0 on success, otherwise: 76 * This function can only be called after cpuidle_pause() to avoid races.
73 * -NODEV : the cpuidle framework is not available
74 * -EBUSY : the cpuidle framework is not initialized
75 */ 77 */
76int cpuidle_enabled(struct cpuidle_driver *drv, struct cpuidle_device *dev) 78void cpuidle_use_deepest_state(bool enable)
77{ 79{
78 if (off || !initialized) 80 use_deepest_state = enable;
79 return -ENODEV; 81}
80 82
81 if (!drv || !dev || !dev->enabled) 83/**
82 return -EBUSY; 84 * cpuidle_find_deepest_state - Find the state of the greatest exit latency.
85 * @drv: cpuidle driver for a given CPU.
86 * @dev: cpuidle device for a given CPU.
87 */
88static int cpuidle_find_deepest_state(struct cpuidle_driver *drv,
89 struct cpuidle_device *dev)
90{
91 unsigned int latency_req = 0;
92 int i, ret = CPUIDLE_DRIVER_STATE_START - 1;
83 93
84 return 0; 94 for (i = CPUIDLE_DRIVER_STATE_START; i < drv->state_count; i++) {
95 struct cpuidle_state *s = &drv->states[i];
96 struct cpuidle_state_usage *su = &dev->states_usage[i];
97
98 if (s->disabled || su->disable || s->exit_latency <= latency_req)
99 continue;
100
101 latency_req = s->exit_latency;
102 ret = i;
103 }
104 return ret;
85} 105}
86 106
87/** 107/**
@@ -138,6 +158,15 @@ int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv,
138 */ 158 */
139int cpuidle_select(struct cpuidle_driver *drv, struct cpuidle_device *dev) 159int cpuidle_select(struct cpuidle_driver *drv, struct cpuidle_device *dev)
140{ 160{
161 if (off || !initialized)
162 return -ENODEV;
163
164 if (!drv || !dev || !dev->enabled)
165 return -EBUSY;
166
167 if (unlikely(use_deepest_state))
168 return cpuidle_find_deepest_state(drv, dev);
169
141 return cpuidle_curr_governor->select(drv, dev); 170 return cpuidle_curr_governor->select(drv, dev);
142} 171}
143 172
@@ -169,7 +198,7 @@ int cpuidle_enter(struct cpuidle_driver *drv, struct cpuidle_device *dev,
169 */ 198 */
170void cpuidle_reflect(struct cpuidle_device *dev, int index) 199void cpuidle_reflect(struct cpuidle_device *dev, int index)
171{ 200{
172 if (cpuidle_curr_governor->reflect) 201 if (cpuidle_curr_governor->reflect && !unlikely(use_deepest_state))
173 cpuidle_curr_governor->reflect(dev, index); 202 cpuidle_curr_governor->reflect(dev, index);
174} 203}
175 204
diff --git a/drivers/cpuidle/governors/menu.c b/drivers/cpuidle/governors/menu.c
index 71b523293354..c4f80c15a48d 100644
--- a/drivers/cpuidle/governors/menu.c
+++ b/drivers/cpuidle/governors/menu.c
@@ -296,7 +296,7 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev)
296 data->needs_update = 0; 296 data->needs_update = 0;
297 } 297 }
298 298
299 data->last_state_idx = 0; 299 data->last_state_idx = CPUIDLE_DRIVER_STATE_START - 1;
300 300
301 /* Special case when user has set very strict latency requirement */ 301 /* Special case when user has set very strict latency requirement */
302 if (unlikely(latency_req == 0)) 302 if (unlikely(latency_req == 0))
@@ -311,13 +311,6 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev)
311 data->bucket = which_bucket(data->next_timer_us); 311 data->bucket = which_bucket(data->next_timer_us);
312 312
313 /* 313 /*
314 * if the correction factor is 0 (eg first time init or cpu hotplug
315 * etc), we actually want to start out with a unity factor.
316 */
317 if (data->correction_factor[data->bucket] == 0)
318 data->correction_factor[data->bucket] = RESOLUTION * DECAY;
319
320 /*
321 * Force the result of multiplication to be 64 bits even if both 314 * Force the result of multiplication to be 64 bits even if both
322 * operands are 32 bits. 315 * operands are 32 bits.
323 * Make sure to round up for half microseconds. 316 * Make sure to round up for half microseconds.
@@ -466,9 +459,17 @@ static int menu_enable_device(struct cpuidle_driver *drv,
466 struct cpuidle_device *dev) 459 struct cpuidle_device *dev)
467{ 460{
468 struct menu_device *data = &per_cpu(menu_devices, dev->cpu); 461 struct menu_device *data = &per_cpu(menu_devices, dev->cpu);
462 int i;
469 463
470 memset(data, 0, sizeof(struct menu_device)); 464 memset(data, 0, sizeof(struct menu_device));
471 465
466 /*
467 * if the correction factor is 0 (eg first time init or cpu hotplug
468 * etc), we actually want to start out with a unity factor.
469 */
470 for(i = 0; i < BUCKETS; i++)
471 data->correction_factor[i] = RESOLUTION * DECAY;
472
472 return 0; 473 return 0;
473} 474}
474 475
diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h
index b0238cba440b..c51a436135c4 100644
--- a/include/linux/cpuidle.h
+++ b/include/linux/cpuidle.h
@@ -120,8 +120,6 @@ struct cpuidle_driver {
120#ifdef CONFIG_CPU_IDLE 120#ifdef CONFIG_CPU_IDLE
121extern void disable_cpuidle(void); 121extern void disable_cpuidle(void);
122 122
123extern int cpuidle_enabled(struct cpuidle_driver *drv,
124 struct cpuidle_device *dev);
125extern int cpuidle_select(struct cpuidle_driver *drv, 123extern int cpuidle_select(struct cpuidle_driver *drv,
126 struct cpuidle_device *dev); 124 struct cpuidle_device *dev);
127extern int cpuidle_enter(struct cpuidle_driver *drv, 125extern int cpuidle_enter(struct cpuidle_driver *drv,
@@ -145,13 +143,11 @@ extern void cpuidle_resume(void);
145extern int cpuidle_enable_device(struct cpuidle_device *dev); 143extern int cpuidle_enable_device(struct cpuidle_device *dev);
146extern void cpuidle_disable_device(struct cpuidle_device *dev); 144extern void cpuidle_disable_device(struct cpuidle_device *dev);
147extern int cpuidle_play_dead(void); 145extern int cpuidle_play_dead(void);
146extern void cpuidle_use_deepest_state(bool enable);
148 147
149extern struct cpuidle_driver *cpuidle_get_cpu_driver(struct cpuidle_device *dev); 148extern struct cpuidle_driver *cpuidle_get_cpu_driver(struct cpuidle_device *dev);
150#else 149#else
151static inline void disable_cpuidle(void) { } 150static inline void disable_cpuidle(void) { }
152static inline int cpuidle_enabled(struct cpuidle_driver *drv,
153 struct cpuidle_device *dev)
154{return -ENODEV; }
155static inline int cpuidle_select(struct cpuidle_driver *drv, 151static inline int cpuidle_select(struct cpuidle_driver *drv,
156 struct cpuidle_device *dev) 152 struct cpuidle_device *dev)
157{return -ENODEV; } 153{return -ENODEV; }
@@ -180,6 +176,7 @@ static inline int cpuidle_enable_device(struct cpuidle_device *dev)
180{return -ENODEV; } 176{return -ENODEV; }
181static inline void cpuidle_disable_device(struct cpuidle_device *dev) { } 177static inline void cpuidle_disable_device(struct cpuidle_device *dev) { }
182static inline int cpuidle_play_dead(void) {return -ENODEV; } 178static inline int cpuidle_play_dead(void) {return -ENODEV; }
179static inline void cpuidle_use_deepest_state(bool enable) {}
183static inline struct cpuidle_driver *cpuidle_get_cpu_driver( 180static inline struct cpuidle_driver *cpuidle_get_cpu_driver(
184 struct cpuidle_device *dev) {return NULL; } 181 struct cpuidle_device *dev) {return NULL; }
185#endif 182#endif
diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c
index 8233cd4047d7..155721f7f909 100644
--- a/kernel/power/suspend.c
+++ b/kernel/power/suspend.c
@@ -54,9 +54,11 @@ static void freeze_begin(void)
54 54
55static void freeze_enter(void) 55static void freeze_enter(void)
56{ 56{
57 cpuidle_use_deepest_state(true);
57 cpuidle_resume(); 58 cpuidle_resume();
58 wait_event(suspend_freeze_wait_head, suspend_freeze_wake); 59 wait_event(suspend_freeze_wait_head, suspend_freeze_wake);
59 cpuidle_pause(); 60 cpuidle_pause();
61 cpuidle_use_deepest_state(false);
60} 62}
61 63
62void freeze_wake(void) 64void freeze_wake(void)
diff --git a/kernel/sched/idle.c b/kernel/sched/idle.c
index 8f4390a079c7..a8f12247ce7c 100644
--- a/kernel/sched/idle.c
+++ b/kernel/sched/idle.c
@@ -101,19 +101,13 @@ static int cpuidle_idle_call(void)
101 rcu_idle_enter(); 101 rcu_idle_enter();
102 102
103 /* 103 /*
104 * Check if the cpuidle framework is ready, otherwise fallback 104 * Ask the cpuidle framework to choose a convenient idle state.
105 * to the default arch specific idle method 105 * Fall back to the default arch specific idle method on errors.
106 */ 106 */
107 ret = cpuidle_enabled(drv, dev); 107 next_state = cpuidle_select(drv, dev);
108
109 if (!ret) {
110 /*
111 * Ask the governor to choose an idle state it thinks
112 * it is convenient to go to. There is *always* a
113 * convenient idle state
114 */
115 next_state = cpuidle_select(drv, dev);
116 108
109 ret = next_state;
110 if (ret >= 0) {
117 /* 111 /*
118 * The idle task must be scheduled, it is pointless to 112 * The idle task must be scheduled, it is pointless to
119 * go to idle, just update no idle residency and get 113 * go to idle, just update no idle residency and get
@@ -140,7 +134,7 @@ static int cpuidle_idle_call(void)
140 CLOCK_EVT_NOTIFY_BROADCAST_ENTER, 134 CLOCK_EVT_NOTIFY_BROADCAST_ENTER,
141 &dev->cpu); 135 &dev->cpu);
142 136
143 if (!ret) { 137 if (ret >= 0) {
144 trace_cpu_idle_rcuidle(next_state, dev->cpu); 138 trace_cpu_idle_rcuidle(next_state, dev->cpu);
145 139
146 /* 140 /*
@@ -175,7 +169,7 @@ static int cpuidle_idle_call(void)
175 * We can't use the cpuidle framework, let's use the default 169 * We can't use the cpuidle framework, let's use the default
176 * idle routine 170 * idle routine
177 */ 171 */
178 if (ret) 172 if (ret < 0)
179 arch_cpu_idle(); 173 arch_cpu_idle();
180 174
181 __current_set_polling(); 175 __current_set_polling();