aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/cpufreq
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/cpufreq')
-rw-r--r--drivers/cpufreq/cpufreq_conservative.c12
-rw-r--r--drivers/cpufreq/cpufreq_ondemand.c15
2 files changed, 14 insertions, 13 deletions
diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c
index bdea7e2f94ba..bc33ddc9c97c 100644
--- a/drivers/cpufreq/cpufreq_conservative.c
+++ b/drivers/cpufreq/cpufreq_conservative.c
@@ -71,7 +71,7 @@ struct cpu_dbs_info_s {
71 */ 71 */
72 struct mutex timer_mutex; 72 struct mutex timer_mutex;
73}; 73};
74static DEFINE_PER_CPU(struct cpu_dbs_info_s, cpu_dbs_info); 74static DEFINE_PER_CPU(struct cpu_dbs_info_s, cs_cpu_dbs_info);
75 75
76static unsigned int dbs_enable; /* number of CPUs using this policy */ 76static unsigned int dbs_enable; /* number of CPUs using this policy */
77 77
@@ -137,7 +137,7 @@ dbs_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
137 void *data) 137 void *data)
138{ 138{
139 struct cpufreq_freqs *freq = data; 139 struct cpufreq_freqs *freq = data;
140 struct cpu_dbs_info_s *this_dbs_info = &per_cpu(cpu_dbs_info, 140 struct cpu_dbs_info_s *this_dbs_info = &per_cpu(cs_cpu_dbs_info,
141 freq->cpu); 141 freq->cpu);
142 142
143 struct cpufreq_policy *policy; 143 struct cpufreq_policy *policy;
@@ -297,7 +297,7 @@ static ssize_t store_ignore_nice_load(struct cpufreq_policy *policy,
297 /* we need to re-evaluate prev_cpu_idle */ 297 /* we need to re-evaluate prev_cpu_idle */
298 for_each_online_cpu(j) { 298 for_each_online_cpu(j) {
299 struct cpu_dbs_info_s *dbs_info; 299 struct cpu_dbs_info_s *dbs_info;
300 dbs_info = &per_cpu(cpu_dbs_info, j); 300 dbs_info = &per_cpu(cs_cpu_dbs_info, j);
301 dbs_info->prev_cpu_idle = get_cpu_idle_time(j, 301 dbs_info->prev_cpu_idle = get_cpu_idle_time(j,
302 &dbs_info->prev_cpu_wall); 302 &dbs_info->prev_cpu_wall);
303 if (dbs_tuners_ins.ignore_nice) 303 if (dbs_tuners_ins.ignore_nice)
@@ -387,7 +387,7 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
387 cputime64_t cur_wall_time, cur_idle_time; 387 cputime64_t cur_wall_time, cur_idle_time;
388 unsigned int idle_time, wall_time; 388 unsigned int idle_time, wall_time;
389 389
390 j_dbs_info = &per_cpu(cpu_dbs_info, j); 390 j_dbs_info = &per_cpu(cs_cpu_dbs_info, j);
391 391
392 cur_idle_time = get_cpu_idle_time(j, &cur_wall_time); 392 cur_idle_time = get_cpu_idle_time(j, &cur_wall_time);
393 393
@@ -521,7 +521,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
521 unsigned int j; 521 unsigned int j;
522 int rc; 522 int rc;
523 523
524 this_dbs_info = &per_cpu(cpu_dbs_info, cpu); 524 this_dbs_info = &per_cpu(cs_cpu_dbs_info, cpu);
525 525
526 switch (event) { 526 switch (event) {
527 case CPUFREQ_GOV_START: 527 case CPUFREQ_GOV_START:
@@ -538,7 +538,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
538 538
539 for_each_cpu(j, policy->cpus) { 539 for_each_cpu(j, policy->cpus) {
540 struct cpu_dbs_info_s *j_dbs_info; 540 struct cpu_dbs_info_s *j_dbs_info;
541 j_dbs_info = &per_cpu(cpu_dbs_info, j); 541 j_dbs_info = &per_cpu(cs_cpu_dbs_info, j);
542 j_dbs_info->cur_policy = policy; 542 j_dbs_info->cur_policy = policy;
543 543
544 j_dbs_info->prev_cpu_idle = get_cpu_idle_time(j, 544 j_dbs_info->prev_cpu_idle = get_cpu_idle_time(j,
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
index 1a3e5c7252ff..071699de50ee 100644
--- a/drivers/cpufreq/cpufreq_ondemand.c
+++ b/drivers/cpufreq/cpufreq_ondemand.c
@@ -90,7 +90,7 @@ struct cpu_dbs_info_s {
90 */ 90 */
91 struct mutex timer_mutex; 91 struct mutex timer_mutex;
92}; 92};
93static DEFINE_PER_CPU(struct cpu_dbs_info_s, cpu_dbs_info); 93static DEFINE_PER_CPU(struct cpu_dbs_info_s, od_cpu_dbs_info);
94 94
95static unsigned int dbs_enable; /* number of CPUs using this policy */ 95static unsigned int dbs_enable; /* number of CPUs using this policy */
96 96
@@ -161,7 +161,8 @@ static unsigned int powersave_bias_target(struct cpufreq_policy *policy,
161 unsigned int freq_hi, freq_lo; 161 unsigned int freq_hi, freq_lo;
162 unsigned int index = 0; 162 unsigned int index = 0;
163 unsigned int jiffies_total, jiffies_hi, jiffies_lo; 163 unsigned int jiffies_total, jiffies_hi, jiffies_lo;
164 struct cpu_dbs_info_s *dbs_info = &per_cpu(cpu_dbs_info, policy->cpu); 164 struct cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info,
165 policy->cpu);
165 166
166 if (!dbs_info->freq_table) { 167 if (!dbs_info->freq_table) {
167 dbs_info->freq_lo = 0; 168 dbs_info->freq_lo = 0;
@@ -204,7 +205,7 @@ static unsigned int powersave_bias_target(struct cpufreq_policy *policy,
204 205
205static void ondemand_powersave_bias_init_cpu(int cpu) 206static void ondemand_powersave_bias_init_cpu(int cpu)
206{ 207{
207 struct cpu_dbs_info_s *dbs_info = &per_cpu(cpu_dbs_info, cpu); 208 struct cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info, cpu);
208 dbs_info->freq_table = cpufreq_frequency_get_table(cpu); 209 dbs_info->freq_table = cpufreq_frequency_get_table(cpu);
209 dbs_info->freq_lo = 0; 210 dbs_info->freq_lo = 0;
210} 211}
@@ -342,7 +343,7 @@ static ssize_t store_ignore_nice_load(struct kobject *a, struct attribute *b,
342 /* we need to re-evaluate prev_cpu_idle */ 343 /* we need to re-evaluate prev_cpu_idle */
343 for_each_online_cpu(j) { 344 for_each_online_cpu(j) {
344 struct cpu_dbs_info_s *dbs_info; 345 struct cpu_dbs_info_s *dbs_info;
345 dbs_info = &per_cpu(cpu_dbs_info, j); 346 dbs_info = &per_cpu(od_cpu_dbs_info, j);
346 dbs_info->prev_cpu_idle = get_cpu_idle_time(j, 347 dbs_info->prev_cpu_idle = get_cpu_idle_time(j,
347 &dbs_info->prev_cpu_wall); 348 &dbs_info->prev_cpu_wall);
348 if (dbs_tuners_ins.ignore_nice) 349 if (dbs_tuners_ins.ignore_nice)
@@ -474,7 +475,7 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
474 unsigned int load, load_freq; 475 unsigned int load, load_freq;
475 int freq_avg; 476 int freq_avg;
476 477
477 j_dbs_info = &per_cpu(cpu_dbs_info, j); 478 j_dbs_info = &per_cpu(od_cpu_dbs_info, j);
478 479
479 cur_idle_time = get_cpu_idle_time(j, &cur_wall_time); 480 cur_idle_time = get_cpu_idle_time(j, &cur_wall_time);
480 481
@@ -621,7 +622,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
621 unsigned int j; 622 unsigned int j;
622 int rc; 623 int rc;
623 624
624 this_dbs_info = &per_cpu(cpu_dbs_info, cpu); 625 this_dbs_info = &per_cpu(od_cpu_dbs_info, cpu);
625 626
626 switch (event) { 627 switch (event) {
627 case CPUFREQ_GOV_START: 628 case CPUFREQ_GOV_START:
@@ -639,7 +640,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
639 dbs_enable++; 640 dbs_enable++;
640 for_each_cpu(j, policy->cpus) { 641 for_each_cpu(j, policy->cpus) {
641 struct cpu_dbs_info_s *j_dbs_info; 642 struct cpu_dbs_info_s *j_dbs_info;
642 j_dbs_info = &per_cpu(cpu_dbs_info, j); 643 j_dbs_info = &per_cpu(od_cpu_dbs_info, j);
643 j_dbs_info->cur_policy = policy; 644 j_dbs_info->cur_policy = policy;
644 645
645 j_dbs_info->prev_cpu_idle = get_cpu_idle_time(j, 646 j_dbs_info->prev_cpu_idle = get_cpu_idle_time(j,