diff options
Diffstat (limited to 'drivers/cpufreq/cpufreq_ondemand.c')
-rw-r--r-- | drivers/cpufreq/cpufreq_ondemand.c | 117 |
1 files changed, 25 insertions, 92 deletions
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c index 0482bd49aba8..8bc38ab96400 100644 --- a/drivers/cpufreq/cpufreq_ondemand.c +++ b/drivers/cpufreq/cpufreq_ondemand.c | |||
@@ -34,13 +34,9 @@ | |||
34 | */ | 34 | */ |
35 | 35 | ||
36 | #define DEF_FREQUENCY_UP_THRESHOLD (80) | 36 | #define DEF_FREQUENCY_UP_THRESHOLD (80) |
37 | #define MIN_FREQUENCY_UP_THRESHOLD (0) | 37 | #define MIN_FREQUENCY_UP_THRESHOLD (11) |
38 | #define MAX_FREQUENCY_UP_THRESHOLD (100) | 38 | #define MAX_FREQUENCY_UP_THRESHOLD (100) |
39 | 39 | ||
40 | #define DEF_FREQUENCY_DOWN_THRESHOLD (20) | ||
41 | #define MIN_FREQUENCY_DOWN_THRESHOLD (0) | ||
42 | #define MAX_FREQUENCY_DOWN_THRESHOLD (100) | ||
43 | |||
44 | /* | 40 | /* |
45 | * The polling frequency of this governor depends on the capability of | 41 | * The polling frequency of this governor depends on the capability of |
46 | * the processor. Default polling frequency is 1000 times the transition | 42 | * the processor. Default polling frequency is 1000 times the transition |
@@ -77,14 +73,11 @@ struct dbs_tuners { | |||
77 | unsigned int sampling_rate; | 73 | unsigned int sampling_rate; |
78 | unsigned int sampling_down_factor; | 74 | unsigned int sampling_down_factor; |
79 | unsigned int up_threshold; | 75 | unsigned int up_threshold; |
80 | unsigned int down_threshold; | ||
81 | unsigned int ignore_nice; | 76 | unsigned int ignore_nice; |
82 | unsigned int freq_step; | ||
83 | }; | 77 | }; |
84 | 78 | ||
85 | static struct dbs_tuners dbs_tuners_ins = { | 79 | static struct dbs_tuners dbs_tuners_ins = { |
86 | .up_threshold = DEF_FREQUENCY_UP_THRESHOLD, | 80 | .up_threshold = DEF_FREQUENCY_UP_THRESHOLD, |
87 | .down_threshold = DEF_FREQUENCY_DOWN_THRESHOLD, | ||
88 | .sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR, | 81 | .sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR, |
89 | }; | 82 | }; |
90 | 83 | ||
@@ -125,9 +118,7 @@ static ssize_t show_##file_name \ | |||
125 | show_one(sampling_rate, sampling_rate); | 118 | show_one(sampling_rate, sampling_rate); |
126 | show_one(sampling_down_factor, sampling_down_factor); | 119 | show_one(sampling_down_factor, sampling_down_factor); |
127 | show_one(up_threshold, up_threshold); | 120 | show_one(up_threshold, up_threshold); |
128 | show_one(down_threshold, down_threshold); | ||
129 | show_one(ignore_nice, ignore_nice); | 121 | show_one(ignore_nice, ignore_nice); |
130 | show_one(freq_step, freq_step); | ||
131 | 122 | ||
132 | static ssize_t store_sampling_down_factor(struct cpufreq_policy *unused, | 123 | static ssize_t store_sampling_down_factor(struct cpufreq_policy *unused, |
133 | const char *buf, size_t count) | 124 | const char *buf, size_t count) |
@@ -173,8 +164,7 @@ static ssize_t store_up_threshold(struct cpufreq_policy *unused, | |||
173 | 164 | ||
174 | down(&dbs_sem); | 165 | down(&dbs_sem); |
175 | if (ret != 1 || input > MAX_FREQUENCY_UP_THRESHOLD || | 166 | if (ret != 1 || input > MAX_FREQUENCY_UP_THRESHOLD || |
176 | input < MIN_FREQUENCY_UP_THRESHOLD || | 167 | input < MIN_FREQUENCY_UP_THRESHOLD) { |
177 | input <= dbs_tuners_ins.down_threshold) { | ||
178 | up(&dbs_sem); | 168 | up(&dbs_sem); |
179 | return -EINVAL; | 169 | return -EINVAL; |
180 | } | 170 | } |
@@ -185,27 +175,6 @@ static ssize_t store_up_threshold(struct cpufreq_policy *unused, | |||
185 | return count; | 175 | return count; |
186 | } | 176 | } |
187 | 177 | ||
188 | static ssize_t store_down_threshold(struct cpufreq_policy *unused, | ||
189 | const char *buf, size_t count) | ||
190 | { | ||
191 | unsigned int input; | ||
192 | int ret; | ||
193 | ret = sscanf (buf, "%u", &input); | ||
194 | |||
195 | down(&dbs_sem); | ||
196 | if (ret != 1 || input > MAX_FREQUENCY_DOWN_THRESHOLD || | ||
197 | input < MIN_FREQUENCY_DOWN_THRESHOLD || | ||
198 | input >= dbs_tuners_ins.up_threshold) { | ||
199 | up(&dbs_sem); | ||
200 | return -EINVAL; | ||
201 | } | ||
202 | |||
203 | dbs_tuners_ins.down_threshold = input; | ||
204 | up(&dbs_sem); | ||
205 | |||
206 | return count; | ||
207 | } | ||
208 | |||
209 | static ssize_t store_ignore_nice(struct cpufreq_policy *policy, | 178 | static ssize_t store_ignore_nice(struct cpufreq_policy *policy, |
210 | const char *buf, size_t count) | 179 | const char *buf, size_t count) |
211 | { | 180 | { |
@@ -240,29 +209,6 @@ static ssize_t store_ignore_nice(struct cpufreq_policy *policy, | |||
240 | return count; | 209 | return count; |
241 | } | 210 | } |
242 | 211 | ||
243 | static ssize_t store_freq_step(struct cpufreq_policy *policy, | ||
244 | const char *buf, size_t count) | ||
245 | { | ||
246 | unsigned int input; | ||
247 | int ret; | ||
248 | |||
249 | ret = sscanf (buf, "%u", &input); | ||
250 | |||
251 | if ( ret != 1 ) | ||
252 | return -EINVAL; | ||
253 | |||
254 | if ( input > 100 ) | ||
255 | input = 100; | ||
256 | |||
257 | /* no need to test here if freq_step is zero as the user might actually | ||
258 | * want this, they would be crazy though :) */ | ||
259 | down(&dbs_sem); | ||
260 | dbs_tuners_ins.freq_step = input; | ||
261 | up(&dbs_sem); | ||
262 | |||
263 | return count; | ||
264 | } | ||
265 | |||
266 | #define define_one_rw(_name) \ | 212 | #define define_one_rw(_name) \ |
267 | static struct freq_attr _name = \ | 213 | static struct freq_attr _name = \ |
268 | __ATTR(_name, 0644, show_##_name, store_##_name) | 214 | __ATTR(_name, 0644, show_##_name, store_##_name) |
@@ -270,9 +216,7 @@ __ATTR(_name, 0644, show_##_name, store_##_name) | |||
270 | define_one_rw(sampling_rate); | 216 | define_one_rw(sampling_rate); |
271 | define_one_rw(sampling_down_factor); | 217 | define_one_rw(sampling_down_factor); |
272 | define_one_rw(up_threshold); | 218 | define_one_rw(up_threshold); |
273 | define_one_rw(down_threshold); | ||
274 | define_one_rw(ignore_nice); | 219 | define_one_rw(ignore_nice); |
275 | define_one_rw(freq_step); | ||
276 | 220 | ||
277 | static struct attribute * dbs_attributes[] = { | 221 | static struct attribute * dbs_attributes[] = { |
278 | &sampling_rate_max.attr, | 222 | &sampling_rate_max.attr, |
@@ -280,9 +224,7 @@ static struct attribute * dbs_attributes[] = { | |||
280 | &sampling_rate.attr, | 224 | &sampling_rate.attr, |
281 | &sampling_down_factor.attr, | 225 | &sampling_down_factor.attr, |
282 | &up_threshold.attr, | 226 | &up_threshold.attr, |
283 | &down_threshold.attr, | ||
284 | &ignore_nice.attr, | 227 | &ignore_nice.attr, |
285 | &freq_step.attr, | ||
286 | NULL | 228 | NULL |
287 | }; | 229 | }; |
288 | 230 | ||
@@ -295,8 +237,8 @@ static struct attribute_group dbs_attr_group = { | |||
295 | 237 | ||
296 | static void dbs_check_cpu(int cpu) | 238 | static void dbs_check_cpu(int cpu) |
297 | { | 239 | { |
298 | unsigned int idle_ticks, up_idle_ticks, down_idle_ticks; | 240 | unsigned int idle_ticks, up_idle_ticks, total_ticks; |
299 | unsigned int freq_down_step; | 241 | unsigned int freq_next; |
300 | unsigned int freq_down_sampling_rate; | 242 | unsigned int freq_down_sampling_rate; |
301 | static int down_skip[NR_CPUS]; | 243 | static int down_skip[NR_CPUS]; |
302 | struct cpu_dbs_info_s *this_dbs_info; | 244 | struct cpu_dbs_info_s *this_dbs_info; |
@@ -310,17 +252,15 @@ static void dbs_check_cpu(int cpu) | |||
310 | 252 | ||
311 | policy = this_dbs_info->cur_policy; | 253 | policy = this_dbs_info->cur_policy; |
312 | /* | 254 | /* |
313 | * The default safe range is 20% to 80% | 255 | * Every sampling_rate, we check, if current idle time is less |
314 | * Every sampling_rate, we check | 256 | * than 20% (default), then we try to increase frequency |
315 | * - If current idle time is less than 20%, then we try to | 257 | * Every sampling_rate*sampling_down_factor, we look for a the lowest |
316 | * increase frequency | 258 | * frequency which can sustain the load while keeping idle time over |
317 | * Every sampling_rate*sampling_down_factor, we check | 259 | * 30%. If such a frequency exist, we try to decrease to this frequency. |
318 | * - If current idle time is more than 80%, then we try to | ||
319 | * decrease frequency | ||
320 | * | 260 | * |
321 | * Any frequency increase takes it to the maximum frequency. | 261 | * Any frequency increase takes it to the maximum frequency. |
322 | * Frequency reduction happens at minimum steps of | 262 | * Frequency reduction happens at minimum steps of |
323 | * 5% (default) of max_frequency | 263 | * 5% (default) of current frequency |
324 | */ | 264 | */ |
325 | 265 | ||
326 | /* Check for frequency increase */ | 266 | /* Check for frequency increase */ |
@@ -383,33 +323,27 @@ static void dbs_check_cpu(int cpu) | |||
383 | idle_ticks = tmp_idle_ticks; | 323 | idle_ticks = tmp_idle_ticks; |
384 | } | 324 | } |
385 | 325 | ||
386 | /* Scale idle ticks by 100 and compare with up and down ticks */ | ||
387 | idle_ticks *= 100; | ||
388 | down_skip[cpu] = 0; | 326 | down_skip[cpu] = 0; |
327 | /* if we cannot reduce the frequency anymore, break out early */ | ||
328 | if (policy->cur == policy->min) | ||
329 | return; | ||
389 | 330 | ||
331 | /* Compute how many ticks there are between two measurements */ | ||
390 | freq_down_sampling_rate = dbs_tuners_ins.sampling_rate * | 332 | freq_down_sampling_rate = dbs_tuners_ins.sampling_rate * |
391 | dbs_tuners_ins.sampling_down_factor; | 333 | dbs_tuners_ins.sampling_down_factor; |
392 | down_idle_ticks = (100 - dbs_tuners_ins.down_threshold) * | 334 | total_ticks = usecs_to_jiffies(freq_down_sampling_rate); |
393 | usecs_to_jiffies(freq_down_sampling_rate); | ||
394 | |||
395 | if (idle_ticks > down_idle_ticks) { | ||
396 | /* if we are already at the lowest speed then break out early | ||
397 | * or if we 'cannot' reduce the speed as the user might want | ||
398 | * freq_step to be zero */ | ||
399 | if (policy->cur == policy->min || dbs_tuners_ins.freq_step == 0) | ||
400 | return; | ||
401 | 335 | ||
402 | freq_down_step = (dbs_tuners_ins.freq_step * policy->max) / 100; | 336 | /* |
403 | 337 | * The optimal frequency is the frequency that is the lowest that | |
404 | /* max freq cannot be less than 100. But who knows.... */ | 338 | * can support the current CPU usage without triggering the up |
405 | if (unlikely(freq_down_step == 0)) | 339 | * policy. To be safe, we focus 10 points under the threshold. |
406 | freq_down_step = 5; | 340 | */ |
341 | freq_next = ((total_ticks - idle_ticks) * 100) / total_ticks; | ||
342 | freq_next = (freq_next * policy->cur) / | ||
343 | (dbs_tuners_ins.up_threshold - 10); | ||
407 | 344 | ||
408 | __cpufreq_driver_target(policy, | 345 | if (freq_next <= ((policy->cur * 95) / 100)) |
409 | policy->cur - freq_down_step, | 346 | __cpufreq_driver_target(policy, freq_next, CPUFREQ_RELATION_L); |
410 | CPUFREQ_RELATION_H); | ||
411 | return; | ||
412 | } | ||
413 | } | 347 | } |
414 | 348 | ||
415 | static void do_dbs_timer(void *data) | 349 | static void do_dbs_timer(void *data) |
@@ -487,7 +421,6 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy, | |||
487 | DEF_SAMPLING_RATE_LATENCY_MULTIPLIER; | 421 | DEF_SAMPLING_RATE_LATENCY_MULTIPLIER; |
488 | dbs_tuners_ins.sampling_rate = def_sampling_rate; | 422 | dbs_tuners_ins.sampling_rate = def_sampling_rate; |
489 | dbs_tuners_ins.ignore_nice = 0; | 423 | dbs_tuners_ins.ignore_nice = 0; |
490 | dbs_tuners_ins.freq_step = 5; | ||
491 | 424 | ||
492 | dbs_timer_init(); | 425 | dbs_timer_init(); |
493 | } | 426 | } |