diff options
Diffstat (limited to 'drivers/cpuidle')
-rw-r--r-- | drivers/cpuidle/governors/ladder.c | 2 | ||||
-rw-r--r-- | drivers/cpuidle/governors/menu.c | 62 |
2 files changed, 61 insertions, 3 deletions
diff --git a/drivers/cpuidle/governors/ladder.c b/drivers/cpuidle/governors/ladder.c index 1c1ceb4f218f..12c98900dcf8 100644 --- a/drivers/cpuidle/governors/ladder.c +++ b/drivers/cpuidle/governors/ladder.c | |||
@@ -67,7 +67,7 @@ static int ladder_select_state(struct cpuidle_device *dev) | |||
67 | struct ladder_device *ldev = &__get_cpu_var(ladder_devices); | 67 | struct ladder_device *ldev = &__get_cpu_var(ladder_devices); |
68 | struct ladder_device_state *last_state; | 68 | struct ladder_device_state *last_state; |
69 | int last_residency, last_idx = ldev->last_state_idx; | 69 | int last_residency, last_idx = ldev->last_state_idx; |
70 | int latency_req = pm_qos_requirement(PM_QOS_CPU_DMA_LATENCY); | 70 | int latency_req = pm_qos_request(PM_QOS_CPU_DMA_LATENCY); |
71 | 71 | ||
72 | /* Special case when user has set very strict latency requirement */ | 72 | /* Special case when user has set very strict latency requirement */ |
73 | if (unlikely(latency_req == 0)) { | 73 | if (unlikely(latency_req == 0)) { |
diff --git a/drivers/cpuidle/governors/menu.c b/drivers/cpuidle/governors/menu.c index f8e57c6303f2..52ff8aa63f84 100644 --- a/drivers/cpuidle/governors/menu.c +++ b/drivers/cpuidle/governors/menu.c | |||
@@ -21,9 +21,12 @@ | |||
21 | #include <linux/math64.h> | 21 | #include <linux/math64.h> |
22 | 22 | ||
23 | #define BUCKETS 12 | 23 | #define BUCKETS 12 |
24 | #define INTERVALS 8 | ||
24 | #define RESOLUTION 1024 | 25 | #define RESOLUTION 1024 |
25 | #define DECAY 4 | 26 | #define DECAY 8 |
26 | #define MAX_INTERESTING 50000 | 27 | #define MAX_INTERESTING 50000 |
28 | #define STDDEV_THRESH 400 | ||
29 | |||
27 | 30 | ||
28 | /* | 31 | /* |
29 | * Concepts and ideas behind the menu governor | 32 | * Concepts and ideas behind the menu governor |
@@ -64,6 +67,16 @@ | |||
64 | * indexed based on the magnitude of the expected duration as well as the | 67 | * indexed based on the magnitude of the expected duration as well as the |
65 | * "is IO outstanding" property. | 68 | * "is IO outstanding" property. |
66 | * | 69 | * |
70 | * Repeatable-interval-detector | ||
71 | * ---------------------------- | ||
72 | * There are some cases where "next timer" is a completely unusable predictor: | ||
73 | * Those cases where the interval is fixed, for example due to hardware | ||
74 | * interrupt mitigation, but also due to fixed transfer rate devices such as | ||
75 | * mice. | ||
76 | * For this, we use a different predictor: We track the duration of the last 8 | ||
77 | * intervals and if the stand deviation of these 8 intervals is below a | ||
78 | * threshold value, we use the average of these intervals as prediction. | ||
79 | * | ||
67 | * Limiting Performance Impact | 80 | * Limiting Performance Impact |
68 | * --------------------------- | 81 | * --------------------------- |
69 | * C states, especially those with large exit latencies, can have a real | 82 | * C states, especially those with large exit latencies, can have a real |
@@ -104,6 +117,8 @@ struct menu_device { | |||
104 | unsigned int exit_us; | 117 | unsigned int exit_us; |
105 | unsigned int bucket; | 118 | unsigned int bucket; |
106 | u64 correction_factor[BUCKETS]; | 119 | u64 correction_factor[BUCKETS]; |
120 | u32 intervals[INTERVALS]; | ||
121 | int interval_ptr; | ||
107 | }; | 122 | }; |
108 | 123 | ||
109 | 124 | ||
@@ -175,6 +190,42 @@ static u64 div_round64(u64 dividend, u32 divisor) | |||
175 | return div_u64(dividend + (divisor / 2), divisor); | 190 | return div_u64(dividend + (divisor / 2), divisor); |
176 | } | 191 | } |
177 | 192 | ||
193 | /* | ||
194 | * Try detecting repeating patterns by keeping track of the last 8 | ||
195 | * intervals, and checking if the standard deviation of that set | ||
196 | * of points is below a threshold. If it is... then use the | ||
197 | * average of these 8 points as the estimated value. | ||
198 | */ | ||
199 | static void detect_repeating_patterns(struct menu_device *data) | ||
200 | { | ||
201 | int i; | ||
202 | uint64_t avg = 0; | ||
203 | uint64_t stddev = 0; /* contains the square of the std deviation */ | ||
204 | |||
205 | /* first calculate average and standard deviation of the past */ | ||
206 | for (i = 0; i < INTERVALS; i++) | ||
207 | avg += data->intervals[i]; | ||
208 | avg = avg / INTERVALS; | ||
209 | |||
210 | /* if the avg is beyond the known next tick, it's worthless */ | ||
211 | if (avg > data->expected_us) | ||
212 | return; | ||
213 | |||
214 | for (i = 0; i < INTERVALS; i++) | ||
215 | stddev += (data->intervals[i] - avg) * | ||
216 | (data->intervals[i] - avg); | ||
217 | |||
218 | stddev = stddev / INTERVALS; | ||
219 | |||
220 | /* | ||
221 | * now.. if stddev is small.. then assume we have a | ||
222 | * repeating pattern and predict we keep doing this. | ||
223 | */ | ||
224 | |||
225 | if (avg && stddev < STDDEV_THRESH) | ||
226 | data->predicted_us = avg; | ||
227 | } | ||
228 | |||
178 | /** | 229 | /** |
179 | * menu_select - selects the next idle state to enter | 230 | * menu_select - selects the next idle state to enter |
180 | * @dev: the CPU | 231 | * @dev: the CPU |
@@ -182,7 +233,7 @@ static u64 div_round64(u64 dividend, u32 divisor) | |||
182 | static int menu_select(struct cpuidle_device *dev) | 233 | static int menu_select(struct cpuidle_device *dev) |
183 | { | 234 | { |
184 | struct menu_device *data = &__get_cpu_var(menu_devices); | 235 | struct menu_device *data = &__get_cpu_var(menu_devices); |
185 | int latency_req = pm_qos_requirement(PM_QOS_CPU_DMA_LATENCY); | 236 | int latency_req = pm_qos_request(PM_QOS_CPU_DMA_LATENCY); |
186 | int i; | 237 | int i; |
187 | int multiplier; | 238 | int multiplier; |
188 | 239 | ||
@@ -218,6 +269,8 @@ static int menu_select(struct cpuidle_device *dev) | |||
218 | data->predicted_us = div_round64(data->expected_us * data->correction_factor[data->bucket], | 269 | data->predicted_us = div_round64(data->expected_us * data->correction_factor[data->bucket], |
219 | RESOLUTION * DECAY); | 270 | RESOLUTION * DECAY); |
220 | 271 | ||
272 | detect_repeating_patterns(data); | ||
273 | |||
221 | /* | 274 | /* |
222 | * We want to default to C1 (hlt), not to busy polling | 275 | * We want to default to C1 (hlt), not to busy polling |
223 | * unless the timer is happening really really soon. | 276 | * unless the timer is happening really really soon. |
@@ -310,6 +363,11 @@ static void menu_update(struct cpuidle_device *dev) | |||
310 | new_factor = 1; | 363 | new_factor = 1; |
311 | 364 | ||
312 | data->correction_factor[data->bucket] = new_factor; | 365 | data->correction_factor[data->bucket] = new_factor; |
366 | |||
367 | /* update the repeating-pattern data */ | ||
368 | data->intervals[data->interval_ptr++] = last_idle_us; | ||
369 | if (data->interval_ptr >= INTERVALS) | ||
370 | data->interval_ptr = 0; | ||
313 | } | 371 | } |
314 | 372 | ||
315 | /** | 373 | /** |