diff options
Diffstat (limited to 'drivers/cpufreq/cpufreq_ondemand.c')
-rw-r--r-- | drivers/cpufreq/cpufreq_ondemand.c | 263 |
1 files changed, 157 insertions, 106 deletions
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c index f3eb26cd848f..15e80ee61352 100644 --- a/drivers/cpufreq/cpufreq_ondemand.c +++ b/drivers/cpufreq/cpufreq_ondemand.c | |||
@@ -20,6 +20,7 @@ | |||
20 | #include <linux/module.h> | 20 | #include <linux/module.h> |
21 | #include <linux/mutex.h> | 21 | #include <linux/mutex.h> |
22 | #include <linux/percpu-defs.h> | 22 | #include <linux/percpu-defs.h> |
23 | #include <linux/slab.h> | ||
23 | #include <linux/sysfs.h> | 24 | #include <linux/sysfs.h> |
24 | #include <linux/tick.h> | 25 | #include <linux/tick.h> |
25 | #include <linux/types.h> | 26 | #include <linux/types.h> |
@@ -37,22 +38,12 @@ | |||
37 | #define MIN_FREQUENCY_UP_THRESHOLD (11) | 38 | #define MIN_FREQUENCY_UP_THRESHOLD (11) |
38 | #define MAX_FREQUENCY_UP_THRESHOLD (100) | 39 | #define MAX_FREQUENCY_UP_THRESHOLD (100) |
39 | 40 | ||
40 | static struct dbs_data od_dbs_data; | ||
41 | static DEFINE_PER_CPU(struct od_cpu_dbs_info_s, od_cpu_dbs_info); | 41 | static DEFINE_PER_CPU(struct od_cpu_dbs_info_s, od_cpu_dbs_info); |
42 | 42 | ||
43 | #ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND | 43 | #ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND |
44 | static struct cpufreq_governor cpufreq_gov_ondemand; | 44 | static struct cpufreq_governor cpufreq_gov_ondemand; |
45 | #endif | 45 | #endif |
46 | 46 | ||
47 | static struct od_dbs_tuners od_tuners = { | ||
48 | .up_threshold = DEF_FREQUENCY_UP_THRESHOLD, | ||
49 | .sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR, | ||
50 | .adj_up_threshold = DEF_FREQUENCY_UP_THRESHOLD - | ||
51 | DEF_FREQUENCY_DOWN_DIFFERENTIAL, | ||
52 | .ignore_nice = 0, | ||
53 | .powersave_bias = 0, | ||
54 | }; | ||
55 | |||
56 | static void ondemand_powersave_bias_init_cpu(int cpu) | 47 | static void ondemand_powersave_bias_init_cpu(int cpu) |
57 | { | 48 | { |
58 | struct od_cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info, cpu); | 49 | struct od_cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info, cpu); |
@@ -98,6 +89,8 @@ static unsigned int powersave_bias_target(struct cpufreq_policy *policy, | |||
98 | unsigned int jiffies_total, jiffies_hi, jiffies_lo; | 89 | unsigned int jiffies_total, jiffies_hi, jiffies_lo; |
99 | struct od_cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info, | 90 | struct od_cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info, |
100 | policy->cpu); | 91 | policy->cpu); |
92 | struct dbs_data *dbs_data = policy->governor_data; | ||
93 | struct od_dbs_tuners *od_tuners = dbs_data->tuners; | ||
101 | 94 | ||
102 | if (!dbs_info->freq_table) { | 95 | if (!dbs_info->freq_table) { |
103 | dbs_info->freq_lo = 0; | 96 | dbs_info->freq_lo = 0; |
@@ -108,7 +101,7 @@ static unsigned int powersave_bias_target(struct cpufreq_policy *policy, | |||
108 | cpufreq_frequency_table_target(policy, dbs_info->freq_table, freq_next, | 101 | cpufreq_frequency_table_target(policy, dbs_info->freq_table, freq_next, |
109 | relation, &index); | 102 | relation, &index); |
110 | freq_req = dbs_info->freq_table[index].frequency; | 103 | freq_req = dbs_info->freq_table[index].frequency; |
111 | freq_reduc = freq_req * od_tuners.powersave_bias / 1000; | 104 | freq_reduc = freq_req * od_tuners->powersave_bias / 1000; |
112 | freq_avg = freq_req - freq_reduc; | 105 | freq_avg = freq_req - freq_reduc; |
113 | 106 | ||
114 | /* Find freq bounds for freq_avg in freq_table */ | 107 | /* Find freq bounds for freq_avg in freq_table */ |
@@ -127,7 +120,7 @@ static unsigned int powersave_bias_target(struct cpufreq_policy *policy, | |||
127 | dbs_info->freq_lo_jiffies = 0; | 120 | dbs_info->freq_lo_jiffies = 0; |
128 | return freq_lo; | 121 | return freq_lo; |
129 | } | 122 | } |
130 | jiffies_total = usecs_to_jiffies(od_tuners.sampling_rate); | 123 | jiffies_total = usecs_to_jiffies(od_tuners->sampling_rate); |
131 | jiffies_hi = (freq_avg - freq_lo) * jiffies_total; | 124 | jiffies_hi = (freq_avg - freq_lo) * jiffies_total; |
132 | jiffies_hi += ((freq_hi - freq_lo) / 2); | 125 | jiffies_hi += ((freq_hi - freq_lo) / 2); |
133 | jiffies_hi /= (freq_hi - freq_lo); | 126 | jiffies_hi /= (freq_hi - freq_lo); |
@@ -148,12 +141,15 @@ static void ondemand_powersave_bias_init(void) | |||
148 | 141 | ||
149 | static void dbs_freq_increase(struct cpufreq_policy *p, unsigned int freq) | 142 | static void dbs_freq_increase(struct cpufreq_policy *p, unsigned int freq) |
150 | { | 143 | { |
151 | if (od_tuners.powersave_bias) | 144 | struct dbs_data *dbs_data = p->governor_data; |
145 | struct od_dbs_tuners *od_tuners = dbs_data->tuners; | ||
146 | |||
147 | if (od_tuners->powersave_bias) | ||
152 | freq = powersave_bias_target(p, freq, CPUFREQ_RELATION_H); | 148 | freq = powersave_bias_target(p, freq, CPUFREQ_RELATION_H); |
153 | else if (p->cur == p->max) | 149 | else if (p->cur == p->max) |
154 | return; | 150 | return; |
155 | 151 | ||
156 | __cpufreq_driver_target(p, freq, od_tuners.powersave_bias ? | 152 | __cpufreq_driver_target(p, freq, od_tuners->powersave_bias ? |
157 | CPUFREQ_RELATION_L : CPUFREQ_RELATION_H); | 153 | CPUFREQ_RELATION_L : CPUFREQ_RELATION_H); |
158 | } | 154 | } |
159 | 155 | ||
@@ -170,15 +166,17 @@ static void od_check_cpu(int cpu, unsigned int load_freq) | |||
170 | { | 166 | { |
171 | struct od_cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info, cpu); | 167 | struct od_cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info, cpu); |
172 | struct cpufreq_policy *policy = dbs_info->cdbs.cur_policy; | 168 | struct cpufreq_policy *policy = dbs_info->cdbs.cur_policy; |
169 | struct dbs_data *dbs_data = policy->governor_data; | ||
170 | struct od_dbs_tuners *od_tuners = dbs_data->tuners; | ||
173 | 171 | ||
174 | dbs_info->freq_lo = 0; | 172 | dbs_info->freq_lo = 0; |
175 | 173 | ||
176 | /* Check for frequency increase */ | 174 | /* Check for frequency increase */ |
177 | if (load_freq > od_tuners.up_threshold * policy->cur) { | 175 | if (load_freq > od_tuners->up_threshold * policy->cur) { |
178 | /* If switching to max speed, apply sampling_down_factor */ | 176 | /* If switching to max speed, apply sampling_down_factor */ |
179 | if (policy->cur < policy->max) | 177 | if (policy->cur < policy->max) |
180 | dbs_info->rate_mult = | 178 | dbs_info->rate_mult = |
181 | od_tuners.sampling_down_factor; | 179 | od_tuners->sampling_down_factor; |
182 | dbs_freq_increase(policy, policy->max); | 180 | dbs_freq_increase(policy, policy->max); |
183 | return; | 181 | return; |
184 | } | 182 | } |
@@ -193,9 +191,10 @@ static void od_check_cpu(int cpu, unsigned int load_freq) | |||
193 | * support the current CPU usage without triggering the up policy. To be | 191 | * support the current CPU usage without triggering the up policy. To be |
194 | * safe, we focus 10 points under the threshold. | 192 | * safe, we focus 10 points under the threshold. |
195 | */ | 193 | */ |
196 | if (load_freq < od_tuners.adj_up_threshold * policy->cur) { | 194 | if (load_freq < od_tuners->adj_up_threshold |
195 | * policy->cur) { | ||
197 | unsigned int freq_next; | 196 | unsigned int freq_next; |
198 | freq_next = load_freq / od_tuners.adj_up_threshold; | 197 | freq_next = load_freq / od_tuners->adj_up_threshold; |
199 | 198 | ||
200 | /* No longer fully busy, reset rate_mult */ | 199 | /* No longer fully busy, reset rate_mult */ |
201 | dbs_info->rate_mult = 1; | 200 | dbs_info->rate_mult = 1; |
@@ -203,7 +202,7 @@ static void od_check_cpu(int cpu, unsigned int load_freq) | |||
203 | if (freq_next < policy->min) | 202 | if (freq_next < policy->min) |
204 | freq_next = policy->min; | 203 | freq_next = policy->min; |
205 | 204 | ||
206 | if (!od_tuners.powersave_bias) { | 205 | if (!od_tuners->powersave_bias) { |
207 | __cpufreq_driver_target(policy, freq_next, | 206 | __cpufreq_driver_target(policy, freq_next, |
208 | CPUFREQ_RELATION_L); | 207 | CPUFREQ_RELATION_L); |
209 | } else { | 208 | } else { |
@@ -223,12 +222,14 @@ static void od_dbs_timer(struct work_struct *work) | |||
223 | unsigned int cpu = dbs_info->cdbs.cur_policy->cpu; | 222 | unsigned int cpu = dbs_info->cdbs.cur_policy->cpu; |
224 | struct od_cpu_dbs_info_s *core_dbs_info = &per_cpu(od_cpu_dbs_info, | 223 | struct od_cpu_dbs_info_s *core_dbs_info = &per_cpu(od_cpu_dbs_info, |
225 | cpu); | 224 | cpu); |
225 | struct dbs_data *dbs_data = dbs_info->cdbs.cur_policy->governor_data; | ||
226 | struct od_dbs_tuners *od_tuners = dbs_data->tuners; | ||
226 | int delay, sample_type = core_dbs_info->sample_type; | 227 | int delay, sample_type = core_dbs_info->sample_type; |
227 | bool eval_load; | 228 | bool eval_load; |
228 | 229 | ||
229 | mutex_lock(&core_dbs_info->cdbs.timer_mutex); | 230 | mutex_lock(&core_dbs_info->cdbs.timer_mutex); |
230 | eval_load = need_load_eval(&core_dbs_info->cdbs, | 231 | eval_load = need_load_eval(&core_dbs_info->cdbs, |
231 | od_tuners.sampling_rate); | 232 | od_tuners->sampling_rate); |
232 | 233 | ||
233 | /* Common NORMAL_SAMPLE setup */ | 234 | /* Common NORMAL_SAMPLE setup */ |
234 | core_dbs_info->sample_type = OD_NORMAL_SAMPLE; | 235 | core_dbs_info->sample_type = OD_NORMAL_SAMPLE; |
@@ -240,13 +241,13 @@ static void od_dbs_timer(struct work_struct *work) | |||
240 | CPUFREQ_RELATION_H); | 241 | CPUFREQ_RELATION_H); |
241 | } else { | 242 | } else { |
242 | if (eval_load) | 243 | if (eval_load) |
243 | dbs_check_cpu(&od_dbs_data, cpu); | 244 | dbs_check_cpu(dbs_data, cpu); |
244 | if (core_dbs_info->freq_lo) { | 245 | if (core_dbs_info->freq_lo) { |
245 | /* Setup timer for SUB_SAMPLE */ | 246 | /* Setup timer for SUB_SAMPLE */ |
246 | core_dbs_info->sample_type = OD_SUB_SAMPLE; | 247 | core_dbs_info->sample_type = OD_SUB_SAMPLE; |
247 | delay = core_dbs_info->freq_hi_jiffies; | 248 | delay = core_dbs_info->freq_hi_jiffies; |
248 | } else { | 249 | } else { |
249 | delay = delay_for_sampling_rate(od_tuners.sampling_rate | 250 | delay = delay_for_sampling_rate(od_tuners->sampling_rate |
250 | * core_dbs_info->rate_mult); | 251 | * core_dbs_info->rate_mult); |
251 | } | 252 | } |
252 | } | 253 | } |
@@ -256,12 +257,7 @@ static void od_dbs_timer(struct work_struct *work) | |||
256 | } | 257 | } |
257 | 258 | ||
258 | /************************** sysfs interface ************************/ | 259 | /************************** sysfs interface ************************/ |
259 | 260 | static struct common_dbs_data od_dbs_cdata; | |
260 | static ssize_t show_sampling_rate_min(struct kobject *kobj, | ||
261 | struct attribute *attr, char *buf) | ||
262 | { | ||
263 | return sprintf(buf, "%u\n", od_dbs_data.min_sampling_rate); | ||
264 | } | ||
265 | 261 | ||
266 | /** | 262 | /** |
267 | * update_sampling_rate - update sampling rate effective immediately if needed. | 263 | * update_sampling_rate - update sampling rate effective immediately if needed. |
@@ -276,12 +272,14 @@ static ssize_t show_sampling_rate_min(struct kobject *kobj, | |||
276 | * reducing the sampling rate, we need to make the new value effective | 272 | * reducing the sampling rate, we need to make the new value effective |
277 | * immediately. | 273 | * immediately. |
278 | */ | 274 | */ |
279 | static void update_sampling_rate(unsigned int new_rate) | 275 | static void update_sampling_rate(struct dbs_data *dbs_data, |
276 | unsigned int new_rate) | ||
280 | { | 277 | { |
278 | struct od_dbs_tuners *od_tuners = dbs_data->tuners; | ||
281 | int cpu; | 279 | int cpu; |
282 | 280 | ||
283 | od_tuners.sampling_rate = new_rate = max(new_rate, | 281 | od_tuners->sampling_rate = new_rate = max(new_rate, |
284 | od_dbs_data.min_sampling_rate); | 282 | dbs_data->min_sampling_rate); |
285 | 283 | ||
286 | for_each_online_cpu(cpu) { | 284 | for_each_online_cpu(cpu) { |
287 | struct cpufreq_policy *policy; | 285 | struct cpufreq_policy *policy; |
@@ -322,34 +320,37 @@ static void update_sampling_rate(unsigned int new_rate) | |||
322 | } | 320 | } |
323 | } | 321 | } |
324 | 322 | ||
325 | static ssize_t store_sampling_rate(struct kobject *a, struct attribute *b, | 323 | static ssize_t store_sampling_rate(struct dbs_data *dbs_data, const char *buf, |
326 | const char *buf, size_t count) | 324 | size_t count) |
327 | { | 325 | { |
328 | unsigned int input; | 326 | unsigned int input; |
329 | int ret; | 327 | int ret; |
330 | ret = sscanf(buf, "%u", &input); | 328 | ret = sscanf(buf, "%u", &input); |
331 | if (ret != 1) | 329 | if (ret != 1) |
332 | return -EINVAL; | 330 | return -EINVAL; |
333 | update_sampling_rate(input); | 331 | |
332 | update_sampling_rate(dbs_data, input); | ||
334 | return count; | 333 | return count; |
335 | } | 334 | } |
336 | 335 | ||
337 | static ssize_t store_io_is_busy(struct kobject *a, struct attribute *b, | 336 | static ssize_t store_io_is_busy(struct dbs_data *dbs_data, const char *buf, |
338 | const char *buf, size_t count) | 337 | size_t count) |
339 | { | 338 | { |
339 | struct od_dbs_tuners *od_tuners = dbs_data->tuners; | ||
340 | unsigned int input; | 340 | unsigned int input; |
341 | int ret; | 341 | int ret; |
342 | 342 | ||
343 | ret = sscanf(buf, "%u", &input); | 343 | ret = sscanf(buf, "%u", &input); |
344 | if (ret != 1) | 344 | if (ret != 1) |
345 | return -EINVAL; | 345 | return -EINVAL; |
346 | od_tuners.io_is_busy = !!input; | 346 | od_tuners->io_is_busy = !!input; |
347 | return count; | 347 | return count; |
348 | } | 348 | } |
349 | 349 | ||
350 | static ssize_t store_up_threshold(struct kobject *a, struct attribute *b, | 350 | static ssize_t store_up_threshold(struct dbs_data *dbs_data, const char *buf, |
351 | const char *buf, size_t count) | 351 | size_t count) |
352 | { | 352 | { |
353 | struct od_dbs_tuners *od_tuners = dbs_data->tuners; | ||
353 | unsigned int input; | 354 | unsigned int input; |
354 | int ret; | 355 | int ret; |
355 | ret = sscanf(buf, "%u", &input); | 356 | ret = sscanf(buf, "%u", &input); |
@@ -359,23 +360,24 @@ static ssize_t store_up_threshold(struct kobject *a, struct attribute *b, | |||
359 | return -EINVAL; | 360 | return -EINVAL; |
360 | } | 361 | } |
361 | /* Calculate the new adj_up_threshold */ | 362 | /* Calculate the new adj_up_threshold */ |
362 | od_tuners.adj_up_threshold += input; | 363 | od_tuners->adj_up_threshold += input; |
363 | od_tuners.adj_up_threshold -= od_tuners.up_threshold; | 364 | od_tuners->adj_up_threshold -= od_tuners->up_threshold; |
364 | 365 | ||
365 | od_tuners.up_threshold = input; | 366 | od_tuners->up_threshold = input; |
366 | return count; | 367 | return count; |
367 | } | 368 | } |
368 | 369 | ||
369 | static ssize_t store_sampling_down_factor(struct kobject *a, | 370 | static ssize_t store_sampling_down_factor(struct dbs_data *dbs_data, |
370 | struct attribute *b, const char *buf, size_t count) | 371 | const char *buf, size_t count) |
371 | { | 372 | { |
373 | struct od_dbs_tuners *od_tuners = dbs_data->tuners; | ||
372 | unsigned int input, j; | 374 | unsigned int input, j; |
373 | int ret; | 375 | int ret; |
374 | ret = sscanf(buf, "%u", &input); | 376 | ret = sscanf(buf, "%u", &input); |
375 | 377 | ||
376 | if (ret != 1 || input > MAX_SAMPLING_DOWN_FACTOR || input < 1) | 378 | if (ret != 1 || input > MAX_SAMPLING_DOWN_FACTOR || input < 1) |
377 | return -EINVAL; | 379 | return -EINVAL; |
378 | od_tuners.sampling_down_factor = input; | 380 | od_tuners->sampling_down_factor = input; |
379 | 381 | ||
380 | /* Reset down sampling multiplier in case it was active */ | 382 | /* Reset down sampling multiplier in case it was active */ |
381 | for_each_online_cpu(j) { | 383 | for_each_online_cpu(j) { |
@@ -386,9 +388,10 @@ static ssize_t store_sampling_down_factor(struct kobject *a, | |||
386 | return count; | 388 | return count; |
387 | } | 389 | } |
388 | 390 | ||
389 | static ssize_t store_ignore_nice_load(struct kobject *a, struct attribute *b, | 391 | static ssize_t store_ignore_nice(struct dbs_data *dbs_data, const char *buf, |
390 | const char *buf, size_t count) | 392 | size_t count) |
391 | { | 393 | { |
394 | struct od_dbs_tuners *od_tuners = dbs_data->tuners; | ||
392 | unsigned int input; | 395 | unsigned int input; |
393 | int ret; | 396 | int ret; |
394 | 397 | ||
@@ -401,10 +404,10 @@ static ssize_t store_ignore_nice_load(struct kobject *a, struct attribute *b, | |||
401 | if (input > 1) | 404 | if (input > 1) |
402 | input = 1; | 405 | input = 1; |
403 | 406 | ||
404 | if (input == od_tuners.ignore_nice) { /* nothing to do */ | 407 | if (input == od_tuners->ignore_nice) { /* nothing to do */ |
405 | return count; | 408 | return count; |
406 | } | 409 | } |
407 | od_tuners.ignore_nice = input; | 410 | od_tuners->ignore_nice = input; |
408 | 411 | ||
409 | /* we need to re-evaluate prev_cpu_idle */ | 412 | /* we need to re-evaluate prev_cpu_idle */ |
410 | for_each_online_cpu(j) { | 413 | for_each_online_cpu(j) { |
@@ -412,7 +415,7 @@ static ssize_t store_ignore_nice_load(struct kobject *a, struct attribute *b, | |||
412 | dbs_info = &per_cpu(od_cpu_dbs_info, j); | 415 | dbs_info = &per_cpu(od_cpu_dbs_info, j); |
413 | dbs_info->cdbs.prev_cpu_idle = get_cpu_idle_time(j, | 416 | dbs_info->cdbs.prev_cpu_idle = get_cpu_idle_time(j, |
414 | &dbs_info->cdbs.prev_cpu_wall); | 417 | &dbs_info->cdbs.prev_cpu_wall); |
415 | if (od_tuners.ignore_nice) | 418 | if (od_tuners->ignore_nice) |
416 | dbs_info->cdbs.prev_cpu_nice = | 419 | dbs_info->cdbs.prev_cpu_nice = |
417 | kcpustat_cpu(j).cpustat[CPUTIME_NICE]; | 420 | kcpustat_cpu(j).cpustat[CPUTIME_NICE]; |
418 | 421 | ||
@@ -420,9 +423,10 @@ static ssize_t store_ignore_nice_load(struct kobject *a, struct attribute *b, | |||
420 | return count; | 423 | return count; |
421 | } | 424 | } |
422 | 425 | ||
423 | static ssize_t store_powersave_bias(struct kobject *a, struct attribute *b, | 426 | static ssize_t store_powersave_bias(struct dbs_data *dbs_data, const char *buf, |
424 | const char *buf, size_t count) | 427 | size_t count) |
425 | { | 428 | { |
429 | struct od_dbs_tuners *od_tuners = dbs_data->tuners; | ||
426 | unsigned int input; | 430 | unsigned int input; |
427 | int ret; | 431 | int ret; |
428 | ret = sscanf(buf, "%u", &input); | 432 | ret = sscanf(buf, "%u", &input); |
@@ -433,68 +437,138 @@ static ssize_t store_powersave_bias(struct kobject *a, struct attribute *b, | |||
433 | if (input > 1000) | 437 | if (input > 1000) |
434 | input = 1000; | 438 | input = 1000; |
435 | 439 | ||
436 | od_tuners.powersave_bias = input; | 440 | od_tuners->powersave_bias = input; |
437 | ondemand_powersave_bias_init(); | 441 | ondemand_powersave_bias_init(); |
438 | return count; | 442 | return count; |
439 | } | 443 | } |
440 | 444 | ||
441 | show_one(od, sampling_rate, sampling_rate); | 445 | show_store_one(od, sampling_rate); |
442 | show_one(od, io_is_busy, io_is_busy); | 446 | show_store_one(od, io_is_busy); |
443 | show_one(od, up_threshold, up_threshold); | 447 | show_store_one(od, up_threshold); |
444 | show_one(od, sampling_down_factor, sampling_down_factor); | 448 | show_store_one(od, sampling_down_factor); |
445 | show_one(od, ignore_nice_load, ignore_nice); | 449 | show_store_one(od, ignore_nice); |
446 | show_one(od, powersave_bias, powersave_bias); | 450 | show_store_one(od, powersave_bias); |
447 | 451 | declare_show_sampling_rate_min(od); | |
448 | define_one_global_rw(sampling_rate); | 452 | |
449 | define_one_global_rw(io_is_busy); | 453 | gov_sys_pol_attr_rw(sampling_rate); |
450 | define_one_global_rw(up_threshold); | 454 | gov_sys_pol_attr_rw(io_is_busy); |
451 | define_one_global_rw(sampling_down_factor); | 455 | gov_sys_pol_attr_rw(up_threshold); |
452 | define_one_global_rw(ignore_nice_load); | 456 | gov_sys_pol_attr_rw(sampling_down_factor); |
453 | define_one_global_rw(powersave_bias); | 457 | gov_sys_pol_attr_rw(ignore_nice); |
454 | define_one_global_ro(sampling_rate_min); | 458 | gov_sys_pol_attr_rw(powersave_bias); |
455 | 459 | gov_sys_pol_attr_ro(sampling_rate_min); | |
456 | static struct attribute *dbs_attributes[] = { | 460 | |
457 | &sampling_rate_min.attr, | 461 | static struct attribute *dbs_attributes_gov_sys[] = { |
458 | &sampling_rate.attr, | 462 | &sampling_rate_min_gov_sys.attr, |
459 | &up_threshold.attr, | 463 | &sampling_rate_gov_sys.attr, |
460 | &sampling_down_factor.attr, | 464 | &up_threshold_gov_sys.attr, |
461 | &ignore_nice_load.attr, | 465 | &sampling_down_factor_gov_sys.attr, |
462 | &powersave_bias.attr, | 466 | &ignore_nice_gov_sys.attr, |
463 | &io_is_busy.attr, | 467 | &powersave_bias_gov_sys.attr, |
468 | &io_is_busy_gov_sys.attr, | ||
464 | NULL | 469 | NULL |
465 | }; | 470 | }; |
466 | 471 | ||
467 | static struct attribute_group od_attr_group = { | 472 | static struct attribute_group od_attr_group_gov_sys = { |
468 | .attrs = dbs_attributes, | 473 | .attrs = dbs_attributes_gov_sys, |
474 | .name = "ondemand", | ||
475 | }; | ||
476 | |||
477 | static struct attribute *dbs_attributes_gov_pol[] = { | ||
478 | &sampling_rate_min_gov_pol.attr, | ||
479 | &sampling_rate_gov_pol.attr, | ||
480 | &up_threshold_gov_pol.attr, | ||
481 | &sampling_down_factor_gov_pol.attr, | ||
482 | &ignore_nice_gov_pol.attr, | ||
483 | &powersave_bias_gov_pol.attr, | ||
484 | &io_is_busy_gov_pol.attr, | ||
485 | NULL | ||
486 | }; | ||
487 | |||
488 | static struct attribute_group od_attr_group_gov_pol = { | ||
489 | .attrs = dbs_attributes_gov_pol, | ||
469 | .name = "ondemand", | 490 | .name = "ondemand", |
470 | }; | 491 | }; |
471 | 492 | ||
472 | /************************** sysfs end ************************/ | 493 | /************************** sysfs end ************************/ |
473 | 494 | ||
495 | static int od_init(struct dbs_data *dbs_data) | ||
496 | { | ||
497 | struct od_dbs_tuners *tuners; | ||
498 | u64 idle_time; | ||
499 | int cpu; | ||
500 | |||
501 | tuners = kzalloc(sizeof(struct od_dbs_tuners), GFP_KERNEL); | ||
502 | if (!tuners) { | ||
503 | pr_err("%s: kzalloc failed\n", __func__); | ||
504 | return -ENOMEM; | ||
505 | } | ||
506 | |||
507 | cpu = get_cpu(); | ||
508 | idle_time = get_cpu_idle_time_us(cpu, NULL); | ||
509 | put_cpu(); | ||
510 | if (idle_time != -1ULL) { | ||
511 | /* Idle micro accounting is supported. Use finer thresholds */ | ||
512 | tuners->up_threshold = MICRO_FREQUENCY_UP_THRESHOLD; | ||
513 | tuners->adj_up_threshold = MICRO_FREQUENCY_UP_THRESHOLD - | ||
514 | MICRO_FREQUENCY_DOWN_DIFFERENTIAL; | ||
515 | /* | ||
516 | * In nohz/micro accounting case we set the minimum frequency | ||
517 | * not depending on HZ, but fixed (very low). The deferred | ||
518 | * timer might skip some samples if idle/sleeping as needed. | ||
519 | */ | ||
520 | dbs_data->min_sampling_rate = MICRO_FREQUENCY_MIN_SAMPLE_RATE; | ||
521 | } else { | ||
522 | tuners->up_threshold = DEF_FREQUENCY_UP_THRESHOLD; | ||
523 | tuners->adj_up_threshold = DEF_FREQUENCY_UP_THRESHOLD - | ||
524 | DEF_FREQUENCY_DOWN_DIFFERENTIAL; | ||
525 | |||
526 | /* For correct statistics, we need 10 ticks for each measure */ | ||
527 | dbs_data->min_sampling_rate = MIN_SAMPLING_RATE_RATIO * | ||
528 | jiffies_to_usecs(10); | ||
529 | } | ||
530 | |||
531 | tuners->sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR; | ||
532 | tuners->ignore_nice = 0; | ||
533 | tuners->powersave_bias = 0; | ||
534 | tuners->io_is_busy = should_io_be_busy(); | ||
535 | |||
536 | dbs_data->tuners = tuners; | ||
537 | pr_info("%s: tuners %p\n", __func__, tuners); | ||
538 | mutex_init(&dbs_data->mutex); | ||
539 | return 0; | ||
540 | } | ||
541 | |||
542 | static void od_exit(struct dbs_data *dbs_data) | ||
543 | { | ||
544 | kfree(dbs_data->tuners); | ||
545 | } | ||
546 | |||
474 | define_get_cpu_dbs_routines(od_cpu_dbs_info); | 547 | define_get_cpu_dbs_routines(od_cpu_dbs_info); |
475 | 548 | ||
476 | static struct od_ops od_ops = { | 549 | static struct od_ops od_ops = { |
477 | .io_busy = should_io_be_busy, | ||
478 | .powersave_bias_init_cpu = ondemand_powersave_bias_init_cpu, | 550 | .powersave_bias_init_cpu = ondemand_powersave_bias_init_cpu, |
479 | .powersave_bias_target = powersave_bias_target, | 551 | .powersave_bias_target = powersave_bias_target, |
480 | .freq_increase = dbs_freq_increase, | 552 | .freq_increase = dbs_freq_increase, |
481 | }; | 553 | }; |
482 | 554 | ||
483 | static struct dbs_data od_dbs_data = { | 555 | static struct common_dbs_data od_dbs_cdata = { |
484 | .governor = GOV_ONDEMAND, | 556 | .governor = GOV_ONDEMAND, |
485 | .attr_group = &od_attr_group, | 557 | .attr_group_gov_sys = &od_attr_group_gov_sys, |
486 | .tuners = &od_tuners, | 558 | .attr_group_gov_pol = &od_attr_group_gov_pol, |
487 | .get_cpu_cdbs = get_cpu_cdbs, | 559 | .get_cpu_cdbs = get_cpu_cdbs, |
488 | .get_cpu_dbs_info_s = get_cpu_dbs_info_s, | 560 | .get_cpu_dbs_info_s = get_cpu_dbs_info_s, |
489 | .gov_dbs_timer = od_dbs_timer, | 561 | .gov_dbs_timer = od_dbs_timer, |
490 | .gov_check_cpu = od_check_cpu, | 562 | .gov_check_cpu = od_check_cpu, |
491 | .gov_ops = &od_ops, | 563 | .gov_ops = &od_ops, |
564 | .init = od_init, | ||
565 | .exit = od_exit, | ||
492 | }; | 566 | }; |
493 | 567 | ||
494 | static int od_cpufreq_governor_dbs(struct cpufreq_policy *policy, | 568 | static int od_cpufreq_governor_dbs(struct cpufreq_policy *policy, |
495 | unsigned int event) | 569 | unsigned int event) |
496 | { | 570 | { |
497 | return cpufreq_governor_dbs(&od_dbs_data, policy, event); | 571 | return cpufreq_governor_dbs(policy, &od_dbs_cdata, event); |
498 | } | 572 | } |
499 | 573 | ||
500 | #ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND | 574 | #ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND |
@@ -509,29 +583,6 @@ struct cpufreq_governor cpufreq_gov_ondemand = { | |||
509 | 583 | ||
510 | static int __init cpufreq_gov_dbs_init(void) | 584 | static int __init cpufreq_gov_dbs_init(void) |
511 | { | 585 | { |
512 | u64 idle_time; | ||
513 | int cpu = get_cpu(); | ||
514 | |||
515 | mutex_init(&od_dbs_data.mutex); | ||
516 | idle_time = get_cpu_idle_time_us(cpu, NULL); | ||
517 | put_cpu(); | ||
518 | if (idle_time != -1ULL) { | ||
519 | /* Idle micro accounting is supported. Use finer thresholds */ | ||
520 | od_tuners.up_threshold = MICRO_FREQUENCY_UP_THRESHOLD; | ||
521 | od_tuners.adj_up_threshold = MICRO_FREQUENCY_UP_THRESHOLD - | ||
522 | MICRO_FREQUENCY_DOWN_DIFFERENTIAL; | ||
523 | /* | ||
524 | * In nohz/micro accounting case we set the minimum frequency | ||
525 | * not depending on HZ, but fixed (very low). The deferred | ||
526 | * timer might skip some samples if idle/sleeping as needed. | ||
527 | */ | ||
528 | od_dbs_data.min_sampling_rate = MICRO_FREQUENCY_MIN_SAMPLE_RATE; | ||
529 | } else { | ||
530 | /* For correct statistics, we need 10 ticks for each measure */ | ||
531 | od_dbs_data.min_sampling_rate = MIN_SAMPLING_RATE_RATIO * | ||
532 | jiffies_to_usecs(10); | ||
533 | } | ||
534 | |||
535 | return cpufreq_register_governor(&cpufreq_gov_ondemand); | 586 | return cpufreq_register_governor(&cpufreq_gov_ondemand); |
536 | } | 587 | } |
537 | 588 | ||