diff options
Diffstat (limited to 'drivers/cpufreq/cpufreq_conservative.c')
-rw-r--r-- | drivers/cpufreq/cpufreq_conservative.c | 145 |
1 files changed, 9 insertions, 136 deletions
diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c index 526bfbf69611..33b56e5c5c14 100644 --- a/drivers/cpufreq/cpufreq_conservative.c +++ b/drivers/cpufreq/cpufreq_conservative.c | |||
@@ -76,13 +76,10 @@ static DEFINE_PER_CPU(struct cpu_dbs_info_s, cs_cpu_dbs_info); | |||
76 | static unsigned int dbs_enable; /* number of CPUs using this policy */ | 76 | static unsigned int dbs_enable; /* number of CPUs using this policy */ |
77 | 77 | ||
78 | /* | 78 | /* |
79 | * dbs_mutex protects data in dbs_tuners_ins from concurrent changes on | 79 | * dbs_mutex protects dbs_enable in governor start/stop. |
80 | * different CPUs. It protects dbs_enable in governor start/stop. | ||
81 | */ | 80 | */ |
82 | static DEFINE_MUTEX(dbs_mutex); | 81 | static DEFINE_MUTEX(dbs_mutex); |
83 | 82 | ||
84 | static struct workqueue_struct *kconservative_wq; | ||
85 | |||
86 | static struct dbs_tuners { | 83 | static struct dbs_tuners { |
87 | unsigned int sampling_rate; | 84 | unsigned int sampling_rate; |
88 | unsigned int sampling_down_factor; | 85 | unsigned int sampling_down_factor; |
@@ -118,7 +115,7 @@ static inline cputime64_t get_cpu_idle_time_jiffy(unsigned int cpu, | |||
118 | if (wall) | 115 | if (wall) |
119 | *wall = (cputime64_t)jiffies_to_usecs(cur_wall_time); | 116 | *wall = (cputime64_t)jiffies_to_usecs(cur_wall_time); |
120 | 117 | ||
121 | return (cputime64_t)jiffies_to_usecs(idle_time);; | 118 | return (cputime64_t)jiffies_to_usecs(idle_time); |
122 | } | 119 | } |
123 | 120 | ||
124 | static inline cputime64_t get_cpu_idle_time(unsigned int cpu, cputime64_t *wall) | 121 | static inline cputime64_t get_cpu_idle_time(unsigned int cpu, cputime64_t *wall) |
@@ -164,21 +161,12 @@ static struct notifier_block dbs_cpufreq_notifier_block = { | |||
164 | }; | 161 | }; |
165 | 162 | ||
166 | /************************** sysfs interface ************************/ | 163 | /************************** sysfs interface ************************/ |
167 | static ssize_t show_sampling_rate_max(struct kobject *kobj, | ||
168 | struct attribute *attr, char *buf) | ||
169 | { | ||
170 | printk_once(KERN_INFO "CPUFREQ: conservative sampling_rate_max " | ||
171 | "sysfs file is deprecated - used by: %s\n", current->comm); | ||
172 | return sprintf(buf, "%u\n", -1U); | ||
173 | } | ||
174 | |||
175 | static ssize_t show_sampling_rate_min(struct kobject *kobj, | 164 | static ssize_t show_sampling_rate_min(struct kobject *kobj, |
176 | struct attribute *attr, char *buf) | 165 | struct attribute *attr, char *buf) |
177 | { | 166 | { |
178 | return sprintf(buf, "%u\n", min_sampling_rate); | 167 | return sprintf(buf, "%u\n", min_sampling_rate); |
179 | } | 168 | } |
180 | 169 | ||
181 | define_one_global_ro(sampling_rate_max); | ||
182 | define_one_global_ro(sampling_rate_min); | 170 | define_one_global_ro(sampling_rate_min); |
183 | 171 | ||
184 | /* cpufreq_conservative Governor Tunables */ | 172 | /* cpufreq_conservative Governor Tunables */ |
@@ -195,33 +183,6 @@ show_one(down_threshold, down_threshold); | |||
195 | show_one(ignore_nice_load, ignore_nice); | 183 | show_one(ignore_nice_load, ignore_nice); |
196 | show_one(freq_step, freq_step); | 184 | show_one(freq_step, freq_step); |
197 | 185 | ||
198 | /*** delete after deprecation time ***/ | ||
199 | #define DEPRECATION_MSG(file_name) \ | ||
200 | printk_once(KERN_INFO "CPUFREQ: Per core conservative sysfs " \ | ||
201 | "interface is deprecated - " #file_name "\n"); | ||
202 | |||
203 | #define show_one_old(file_name) \ | ||
204 | static ssize_t show_##file_name##_old \ | ||
205 | (struct cpufreq_policy *unused, char *buf) \ | ||
206 | { \ | ||
207 | printk_once(KERN_INFO "CPUFREQ: Per core conservative sysfs " \ | ||
208 | "interface is deprecated - " #file_name "\n"); \ | ||
209 | return show_##file_name(NULL, NULL, buf); \ | ||
210 | } | ||
211 | show_one_old(sampling_rate); | ||
212 | show_one_old(sampling_down_factor); | ||
213 | show_one_old(up_threshold); | ||
214 | show_one_old(down_threshold); | ||
215 | show_one_old(ignore_nice_load); | ||
216 | show_one_old(freq_step); | ||
217 | show_one_old(sampling_rate_min); | ||
218 | show_one_old(sampling_rate_max); | ||
219 | |||
220 | cpufreq_freq_attr_ro_old(sampling_rate_min); | ||
221 | cpufreq_freq_attr_ro_old(sampling_rate_max); | ||
222 | |||
223 | /*** delete after deprecation time ***/ | ||
224 | |||
225 | static ssize_t store_sampling_down_factor(struct kobject *a, | 186 | static ssize_t store_sampling_down_factor(struct kobject *a, |
226 | struct attribute *b, | 187 | struct attribute *b, |
227 | const char *buf, size_t count) | 188 | const char *buf, size_t count) |
@@ -233,10 +194,7 @@ static ssize_t store_sampling_down_factor(struct kobject *a, | |||
233 | if (ret != 1 || input > MAX_SAMPLING_DOWN_FACTOR || input < 1) | 194 | if (ret != 1 || input > MAX_SAMPLING_DOWN_FACTOR || input < 1) |
234 | return -EINVAL; | 195 | return -EINVAL; |
235 | 196 | ||
236 | mutex_lock(&dbs_mutex); | ||
237 | dbs_tuners_ins.sampling_down_factor = input; | 197 | dbs_tuners_ins.sampling_down_factor = input; |
238 | mutex_unlock(&dbs_mutex); | ||
239 | |||
240 | return count; | 198 | return count; |
241 | } | 199 | } |
242 | 200 | ||
@@ -250,10 +208,7 @@ static ssize_t store_sampling_rate(struct kobject *a, struct attribute *b, | |||
250 | if (ret != 1) | 208 | if (ret != 1) |
251 | return -EINVAL; | 209 | return -EINVAL; |
252 | 210 | ||
253 | mutex_lock(&dbs_mutex); | ||
254 | dbs_tuners_ins.sampling_rate = max(input, min_sampling_rate); | 211 | dbs_tuners_ins.sampling_rate = max(input, min_sampling_rate); |
255 | mutex_unlock(&dbs_mutex); | ||
256 | |||
257 | return count; | 212 | return count; |
258 | } | 213 | } |
259 | 214 | ||
@@ -264,16 +219,11 @@ static ssize_t store_up_threshold(struct kobject *a, struct attribute *b, | |||
264 | int ret; | 219 | int ret; |
265 | ret = sscanf(buf, "%u", &input); | 220 | ret = sscanf(buf, "%u", &input); |
266 | 221 | ||
267 | mutex_lock(&dbs_mutex); | ||
268 | if (ret != 1 || input > 100 || | 222 | if (ret != 1 || input > 100 || |
269 | input <= dbs_tuners_ins.down_threshold) { | 223 | input <= dbs_tuners_ins.down_threshold) |
270 | mutex_unlock(&dbs_mutex); | ||
271 | return -EINVAL; | 224 | return -EINVAL; |
272 | } | ||
273 | 225 | ||
274 | dbs_tuners_ins.up_threshold = input; | 226 | dbs_tuners_ins.up_threshold = input; |
275 | mutex_unlock(&dbs_mutex); | ||
276 | |||
277 | return count; | 227 | return count; |
278 | } | 228 | } |
279 | 229 | ||
@@ -284,17 +234,12 @@ static ssize_t store_down_threshold(struct kobject *a, struct attribute *b, | |||
284 | int ret; | 234 | int ret; |
285 | ret = sscanf(buf, "%u", &input); | 235 | ret = sscanf(buf, "%u", &input); |
286 | 236 | ||
287 | mutex_lock(&dbs_mutex); | ||
288 | /* cannot be lower than 11 otherwise freq will not fall */ | 237 | /* cannot be lower than 11 otherwise freq will not fall */ |
289 | if (ret != 1 || input < 11 || input > 100 || | 238 | if (ret != 1 || input < 11 || input > 100 || |
290 | input >= dbs_tuners_ins.up_threshold) { | 239 | input >= dbs_tuners_ins.up_threshold) |
291 | mutex_unlock(&dbs_mutex); | ||
292 | return -EINVAL; | 240 | return -EINVAL; |
293 | } | ||
294 | 241 | ||
295 | dbs_tuners_ins.down_threshold = input; | 242 | dbs_tuners_ins.down_threshold = input; |
296 | mutex_unlock(&dbs_mutex); | ||
297 | |||
298 | return count; | 243 | return count; |
299 | } | 244 | } |
300 | 245 | ||
@@ -313,11 +258,9 @@ static ssize_t store_ignore_nice_load(struct kobject *a, struct attribute *b, | |||
313 | if (input > 1) | 258 | if (input > 1) |
314 | input = 1; | 259 | input = 1; |
315 | 260 | ||
316 | mutex_lock(&dbs_mutex); | 261 | if (input == dbs_tuners_ins.ignore_nice) /* nothing to do */ |
317 | if (input == dbs_tuners_ins.ignore_nice) { /* nothing to do */ | ||
318 | mutex_unlock(&dbs_mutex); | ||
319 | return count; | 262 | return count; |
320 | } | 263 | |
321 | dbs_tuners_ins.ignore_nice = input; | 264 | dbs_tuners_ins.ignore_nice = input; |
322 | 265 | ||
323 | /* we need to re-evaluate prev_cpu_idle */ | 266 | /* we need to re-evaluate prev_cpu_idle */ |
@@ -329,8 +272,6 @@ static ssize_t store_ignore_nice_load(struct kobject *a, struct attribute *b, | |||
329 | if (dbs_tuners_ins.ignore_nice) | 272 | if (dbs_tuners_ins.ignore_nice) |
330 | dbs_info->prev_cpu_nice = kstat_cpu(j).cpustat.nice; | 273 | dbs_info->prev_cpu_nice = kstat_cpu(j).cpustat.nice; |
331 | } | 274 | } |
332 | mutex_unlock(&dbs_mutex); | ||
333 | |||
334 | return count; | 275 | return count; |
335 | } | 276 | } |
336 | 277 | ||
@@ -349,10 +290,7 @@ static ssize_t store_freq_step(struct kobject *a, struct attribute *b, | |||
349 | 290 | ||
350 | /* no need to test here if freq_step is zero as the user might actually | 291 | /* no need to test here if freq_step is zero as the user might actually |
351 | * want this, they would be crazy though :) */ | 292 | * want this, they would be crazy though :) */ |
352 | mutex_lock(&dbs_mutex); | ||
353 | dbs_tuners_ins.freq_step = input; | 293 | dbs_tuners_ins.freq_step = input; |
354 | mutex_unlock(&dbs_mutex); | ||
355 | |||
356 | return count; | 294 | return count; |
357 | } | 295 | } |
358 | 296 | ||
@@ -364,7 +302,6 @@ define_one_global_rw(ignore_nice_load); | |||
364 | define_one_global_rw(freq_step); | 302 | define_one_global_rw(freq_step); |
365 | 303 | ||
366 | static struct attribute *dbs_attributes[] = { | 304 | static struct attribute *dbs_attributes[] = { |
367 | &sampling_rate_max.attr, | ||
368 | &sampling_rate_min.attr, | 305 | &sampling_rate_min.attr, |
369 | &sampling_rate.attr, | 306 | &sampling_rate.attr, |
370 | &sampling_down_factor.attr, | 307 | &sampling_down_factor.attr, |
@@ -380,49 +317,6 @@ static struct attribute_group dbs_attr_group = { | |||
380 | .name = "conservative", | 317 | .name = "conservative", |
381 | }; | 318 | }; |
382 | 319 | ||
383 | /*** delete after deprecation time ***/ | ||
384 | |||
385 | #define write_one_old(file_name) \ | ||
386 | static ssize_t store_##file_name##_old \ | ||
387 | (struct cpufreq_policy *unused, const char *buf, size_t count) \ | ||
388 | { \ | ||
389 | printk_once(KERN_INFO "CPUFREQ: Per core conservative sysfs " \ | ||
390 | "interface is deprecated - " #file_name "\n"); \ | ||
391 | return store_##file_name(NULL, NULL, buf, count); \ | ||
392 | } | ||
393 | write_one_old(sampling_rate); | ||
394 | write_one_old(sampling_down_factor); | ||
395 | write_one_old(up_threshold); | ||
396 | write_one_old(down_threshold); | ||
397 | write_one_old(ignore_nice_load); | ||
398 | write_one_old(freq_step); | ||
399 | |||
400 | cpufreq_freq_attr_rw_old(sampling_rate); | ||
401 | cpufreq_freq_attr_rw_old(sampling_down_factor); | ||
402 | cpufreq_freq_attr_rw_old(up_threshold); | ||
403 | cpufreq_freq_attr_rw_old(down_threshold); | ||
404 | cpufreq_freq_attr_rw_old(ignore_nice_load); | ||
405 | cpufreq_freq_attr_rw_old(freq_step); | ||
406 | |||
407 | static struct attribute *dbs_attributes_old[] = { | ||
408 | &sampling_rate_max_old.attr, | ||
409 | &sampling_rate_min_old.attr, | ||
410 | &sampling_rate_old.attr, | ||
411 | &sampling_down_factor_old.attr, | ||
412 | &up_threshold_old.attr, | ||
413 | &down_threshold_old.attr, | ||
414 | &ignore_nice_load_old.attr, | ||
415 | &freq_step_old.attr, | ||
416 | NULL | ||
417 | }; | ||
418 | |||
419 | static struct attribute_group dbs_attr_group_old = { | ||
420 | .attrs = dbs_attributes_old, | ||
421 | .name = "conservative", | ||
422 | }; | ||
423 | |||
424 | /*** delete after deprecation time ***/ | ||
425 | |||
426 | /************************** sysfs end ************************/ | 320 | /************************** sysfs end ************************/ |
427 | 321 | ||
428 | static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info) | 322 | static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info) |
@@ -560,7 +454,7 @@ static void do_dbs_timer(struct work_struct *work) | |||
560 | 454 | ||
561 | dbs_check_cpu(dbs_info); | 455 | dbs_check_cpu(dbs_info); |
562 | 456 | ||
563 | queue_delayed_work_on(cpu, kconservative_wq, &dbs_info->work, delay); | 457 | schedule_delayed_work_on(cpu, &dbs_info->work, delay); |
564 | mutex_unlock(&dbs_info->timer_mutex); | 458 | mutex_unlock(&dbs_info->timer_mutex); |
565 | } | 459 | } |
566 | 460 | ||
@@ -572,8 +466,7 @@ static inline void dbs_timer_init(struct cpu_dbs_info_s *dbs_info) | |||
572 | 466 | ||
573 | dbs_info->enable = 1; | 467 | dbs_info->enable = 1; |
574 | INIT_DELAYED_WORK_DEFERRABLE(&dbs_info->work, do_dbs_timer); | 468 | INIT_DELAYED_WORK_DEFERRABLE(&dbs_info->work, do_dbs_timer); |
575 | queue_delayed_work_on(dbs_info->cpu, kconservative_wq, &dbs_info->work, | 469 | schedule_delayed_work_on(dbs_info->cpu, &dbs_info->work, delay); |
576 | delay); | ||
577 | } | 470 | } |
578 | 471 | ||
579 | static inline void dbs_timer_exit(struct cpu_dbs_info_s *dbs_info) | 472 | static inline void dbs_timer_exit(struct cpu_dbs_info_s *dbs_info) |
@@ -599,12 +492,6 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy, | |||
599 | 492 | ||
600 | mutex_lock(&dbs_mutex); | 493 | mutex_lock(&dbs_mutex); |
601 | 494 | ||
602 | rc = sysfs_create_group(&policy->kobj, &dbs_attr_group_old); | ||
603 | if (rc) { | ||
604 | mutex_unlock(&dbs_mutex); | ||
605 | return rc; | ||
606 | } | ||
607 | |||
608 | for_each_cpu(j, policy->cpus) { | 495 | for_each_cpu(j, policy->cpus) { |
609 | struct cpu_dbs_info_s *j_dbs_info; | 496 | struct cpu_dbs_info_s *j_dbs_info; |
610 | j_dbs_info = &per_cpu(cs_cpu_dbs_info, j); | 497 | j_dbs_info = &per_cpu(cs_cpu_dbs_info, j); |
@@ -667,7 +554,6 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy, | |||
667 | dbs_timer_exit(this_dbs_info); | 554 | dbs_timer_exit(this_dbs_info); |
668 | 555 | ||
669 | mutex_lock(&dbs_mutex); | 556 | mutex_lock(&dbs_mutex); |
670 | sysfs_remove_group(&policy->kobj, &dbs_attr_group_old); | ||
671 | dbs_enable--; | 557 | dbs_enable--; |
672 | mutex_destroy(&this_dbs_info->timer_mutex); | 558 | mutex_destroy(&this_dbs_info->timer_mutex); |
673 | 559 | ||
@@ -716,25 +602,12 @@ struct cpufreq_governor cpufreq_gov_conservative = { | |||
716 | 602 | ||
717 | static int __init cpufreq_gov_dbs_init(void) | 603 | static int __init cpufreq_gov_dbs_init(void) |
718 | { | 604 | { |
719 | int err; | 605 | return cpufreq_register_governor(&cpufreq_gov_conservative); |
720 | |||
721 | kconservative_wq = create_workqueue("kconservative"); | ||
722 | if (!kconservative_wq) { | ||
723 | printk(KERN_ERR "Creation of kconservative failed\n"); | ||
724 | return -EFAULT; | ||
725 | } | ||
726 | |||
727 | err = cpufreq_register_governor(&cpufreq_gov_conservative); | ||
728 | if (err) | ||
729 | destroy_workqueue(kconservative_wq); | ||
730 | |||
731 | return err; | ||
732 | } | 606 | } |
733 | 607 | ||
734 | static void __exit cpufreq_gov_dbs_exit(void) | 608 | static void __exit cpufreq_gov_dbs_exit(void) |
735 | { | 609 | { |
736 | cpufreq_unregister_governor(&cpufreq_gov_conservative); | 610 | cpufreq_unregister_governor(&cpufreq_gov_conservative); |
737 | destroy_workqueue(kconservative_wq); | ||
738 | } | 611 | } |
739 | 612 | ||
740 | 613 | ||