aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/cpufreq
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/cpufreq')
-rw-r--r--drivers/cpufreq/cpufreq.c14
-rw-r--r--drivers/cpufreq/cpufreq_conservative.c2
-rw-r--r--drivers/cpufreq/cpufreq_ondemand.c4
-rw-r--r--drivers/cpufreq/cpufreq_userspace.c79
4 files changed, 53 insertions, 46 deletions
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index ee1df0d45e81..8d6a3ff02672 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -589,7 +589,7 @@ static ssize_t show_cpus(cpumask_t mask, char *buf)
589 ssize_t i = 0; 589 ssize_t i = 0;
590 unsigned int cpu; 590 unsigned int cpu;
591 591
592 for_each_cpu_mask(cpu, mask) { 592 for_each_cpu_mask_nr(cpu, mask) {
593 if (i) 593 if (i)
594 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), " "); 594 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), " ");
595 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), "%u", cpu); 595 i += scnprintf(&buf[i], (PAGE_SIZE - i - 2), "%u", cpu);
@@ -835,7 +835,7 @@ static int cpufreq_add_dev(struct sys_device *sys_dev)
835 } 835 }
836#endif 836#endif
837 837
838 for_each_cpu_mask(j, policy->cpus) { 838 for_each_cpu_mask_nr(j, policy->cpus) {
839 if (cpu == j) 839 if (cpu == j)
840 continue; 840 continue;
841 841
@@ -898,14 +898,14 @@ static int cpufreq_add_dev(struct sys_device *sys_dev)
898 } 898 }
899 899
900 spin_lock_irqsave(&cpufreq_driver_lock, flags); 900 spin_lock_irqsave(&cpufreq_driver_lock, flags);
901 for_each_cpu_mask(j, policy->cpus) { 901 for_each_cpu_mask_nr(j, policy->cpus) {
902 per_cpu(cpufreq_cpu_data, j) = policy; 902 per_cpu(cpufreq_cpu_data, j) = policy;
903 per_cpu(policy_cpu, j) = policy->cpu; 903 per_cpu(policy_cpu, j) = policy->cpu;
904 } 904 }
905 spin_unlock_irqrestore(&cpufreq_driver_lock, flags); 905 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
906 906
907 /* symlink affected CPUs */ 907 /* symlink affected CPUs */
908 for_each_cpu_mask(j, policy->cpus) { 908 for_each_cpu_mask_nr(j, policy->cpus) {
909 if (j == cpu) 909 if (j == cpu)
910 continue; 910 continue;
911 if (!cpu_online(j)) 911 if (!cpu_online(j))
@@ -945,7 +945,7 @@ static int cpufreq_add_dev(struct sys_device *sys_dev)
945 945
946err_out_unregister: 946err_out_unregister:
947 spin_lock_irqsave(&cpufreq_driver_lock, flags); 947 spin_lock_irqsave(&cpufreq_driver_lock, flags);
948 for_each_cpu_mask(j, policy->cpus) 948 for_each_cpu_mask_nr(j, policy->cpus)
949 per_cpu(cpufreq_cpu_data, j) = NULL; 949 per_cpu(cpufreq_cpu_data, j) = NULL;
950 spin_unlock_irqrestore(&cpufreq_driver_lock, flags); 950 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
951 951
@@ -1028,7 +1028,7 @@ static int __cpufreq_remove_dev(struct sys_device *sys_dev)
1028 * the sysfs links afterwards. 1028 * the sysfs links afterwards.
1029 */ 1029 */
1030 if (unlikely(cpus_weight(data->cpus) > 1)) { 1030 if (unlikely(cpus_weight(data->cpus) > 1)) {
1031 for_each_cpu_mask(j, data->cpus) { 1031 for_each_cpu_mask_nr(j, data->cpus) {
1032 if (j == cpu) 1032 if (j == cpu)
1033 continue; 1033 continue;
1034 per_cpu(cpufreq_cpu_data, j) = NULL; 1034 per_cpu(cpufreq_cpu_data, j) = NULL;
@@ -1038,7 +1038,7 @@ static int __cpufreq_remove_dev(struct sys_device *sys_dev)
1038 spin_unlock_irqrestore(&cpufreq_driver_lock, flags); 1038 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
1039 1039
1040 if (unlikely(cpus_weight(data->cpus) > 1)) { 1040 if (unlikely(cpus_weight(data->cpus) > 1)) {
1041 for_each_cpu_mask(j, data->cpus) { 1041 for_each_cpu_mask_nr(j, data->cpus) {
1042 if (j == cpu) 1042 if (j == cpu)
1043 continue; 1043 continue;
1044 dprintk("removing link for cpu %u\n", j); 1044 dprintk("removing link for cpu %u\n", j);
diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c
index 5d3a04ba6ad2..fe565ee43757 100644
--- a/drivers/cpufreq/cpufreq_conservative.c
+++ b/drivers/cpufreq/cpufreq_conservative.c
@@ -497,7 +497,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
497 return rc; 497 return rc;
498 } 498 }
499 499
500 for_each_cpu_mask(j, policy->cpus) { 500 for_each_cpu_mask_nr(j, policy->cpus) {
501 struct cpu_dbs_info_s *j_dbs_info; 501 struct cpu_dbs_info_s *j_dbs_info;
502 j_dbs_info = &per_cpu(cpu_dbs_info, j); 502 j_dbs_info = &per_cpu(cpu_dbs_info, j);
503 j_dbs_info->cur_policy = policy; 503 j_dbs_info->cur_policy = policy;
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
index d2af20dda382..33855cb3cf16 100644
--- a/drivers/cpufreq/cpufreq_ondemand.c
+++ b/drivers/cpufreq/cpufreq_ondemand.c
@@ -367,7 +367,7 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
367 367
368 /* Get Idle Time */ 368 /* Get Idle Time */
369 idle_ticks = UINT_MAX; 369 idle_ticks = UINT_MAX;
370 for_each_cpu_mask(j, policy->cpus) { 370 for_each_cpu_mask_nr(j, policy->cpus) {
371 cputime64_t total_idle_ticks; 371 cputime64_t total_idle_ticks;
372 unsigned int tmp_idle_ticks; 372 unsigned int tmp_idle_ticks;
373 struct cpu_dbs_info_s *j_dbs_info; 373 struct cpu_dbs_info_s *j_dbs_info;
@@ -521,7 +521,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
521 return rc; 521 return rc;
522 } 522 }
523 523
524 for_each_cpu_mask(j, policy->cpus) { 524 for_each_cpu_mask_nr(j, policy->cpus) {
525 struct cpu_dbs_info_s *j_dbs_info; 525 struct cpu_dbs_info_s *j_dbs_info;
526 j_dbs_info = &per_cpu(cpu_dbs_info, j); 526 j_dbs_info = &per_cpu(cpu_dbs_info, j);
527 j_dbs_info->cur_policy = policy; 527 j_dbs_info->cur_policy = policy;
diff --git a/drivers/cpufreq/cpufreq_userspace.c b/drivers/cpufreq/cpufreq_userspace.c
index cb2ac01a41a1..32244aa7cc0c 100644
--- a/drivers/cpufreq/cpufreq_userspace.c
+++ b/drivers/cpufreq/cpufreq_userspace.c
@@ -30,16 +30,18 @@
30/** 30/**
31 * A few values needed by the userspace governor 31 * A few values needed by the userspace governor
32 */ 32 */
33static unsigned int cpu_max_freq[NR_CPUS]; 33static DEFINE_PER_CPU(unsigned int, cpu_max_freq);
34static unsigned int cpu_min_freq[NR_CPUS]; 34static DEFINE_PER_CPU(unsigned int, cpu_min_freq);
35static unsigned int cpu_cur_freq[NR_CPUS]; /* current CPU freq */ 35static DEFINE_PER_CPU(unsigned int, cpu_cur_freq); /* current CPU freq */
36static unsigned int cpu_set_freq[NR_CPUS]; /* CPU freq desired by userspace */ 36static DEFINE_PER_CPU(unsigned int, cpu_set_freq); /* CPU freq desired by
37static unsigned int cpu_is_managed[NR_CPUS]; 37 userspace */
38static DEFINE_PER_CPU(unsigned int, cpu_is_managed);
38 39
39static DEFINE_MUTEX (userspace_mutex); 40static DEFINE_MUTEX (userspace_mutex);
40static int cpus_using_userspace_governor; 41static int cpus_using_userspace_governor;
41 42
42#define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_GOVERNOR, "userspace", msg) 43#define dprintk(msg...) \
44 cpufreq_debug_printk(CPUFREQ_DEBUG_GOVERNOR, "userspace", msg)
43 45
44/* keep track of frequency transitions */ 46/* keep track of frequency transitions */
45static int 47static int
@@ -48,12 +50,12 @@ userspace_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
48{ 50{
49 struct cpufreq_freqs *freq = data; 51 struct cpufreq_freqs *freq = data;
50 52
51 if (!cpu_is_managed[freq->cpu]) 53 if (!per_cpu(cpu_is_managed, freq->cpu))
52 return 0; 54 return 0;
53 55
54 dprintk("saving cpu_cur_freq of cpu %u to be %u kHz\n", 56 dprintk("saving cpu_cur_freq of cpu %u to be %u kHz\n",
55 freq->cpu, freq->new); 57 freq->cpu, freq->new);
56 cpu_cur_freq[freq->cpu] = freq->new; 58 per_cpu(cpu_cur_freq, freq->cpu) = freq->new;
57 59
58 return 0; 60 return 0;
59} 61}
@@ -77,15 +79,15 @@ static int cpufreq_set(struct cpufreq_policy *policy, unsigned int freq)
77 dprintk("cpufreq_set for cpu %u, freq %u kHz\n", policy->cpu, freq); 79 dprintk("cpufreq_set for cpu %u, freq %u kHz\n", policy->cpu, freq);
78 80
79 mutex_lock(&userspace_mutex); 81 mutex_lock(&userspace_mutex);
80 if (!cpu_is_managed[policy->cpu]) 82 if (!per_cpu(cpu_is_managed, policy->cpu))
81 goto err; 83 goto err;
82 84
83 cpu_set_freq[policy->cpu] = freq; 85 per_cpu(cpu_set_freq, policy->cpu) = freq;
84 86
85 if (freq < cpu_min_freq[policy->cpu]) 87 if (freq < per_cpu(cpu_min_freq, policy->cpu))
86 freq = cpu_min_freq[policy->cpu]; 88 freq = per_cpu(cpu_min_freq, policy->cpu);
87 if (freq > cpu_max_freq[policy->cpu]) 89 if (freq > per_cpu(cpu_max_freq, policy->cpu))
88 freq = cpu_max_freq[policy->cpu]; 90 freq = per_cpu(cpu_max_freq, policy->cpu);
89 91
90 /* 92 /*
91 * We're safe from concurrent calls to ->target() here 93 * We're safe from concurrent calls to ->target() here
@@ -104,7 +106,7 @@ static int cpufreq_set(struct cpufreq_policy *policy, unsigned int freq)
104 106
105static ssize_t show_speed(struct cpufreq_policy *policy, char *buf) 107static ssize_t show_speed(struct cpufreq_policy *policy, char *buf)
106{ 108{
107 return sprintf(buf, "%u\n", cpu_cur_freq[policy->cpu]); 109 return sprintf(buf, "%u\n", per_cpu(cpu_cur_freq, policy->cpu));
108} 110}
109 111
110static int cpufreq_governor_userspace(struct cpufreq_policy *policy, 112static int cpufreq_governor_userspace(struct cpufreq_policy *policy,
@@ -127,12 +129,17 @@ static int cpufreq_governor_userspace(struct cpufreq_policy *policy,
127 } 129 }
128 cpus_using_userspace_governor++; 130 cpus_using_userspace_governor++;
129 131
130 cpu_is_managed[cpu] = 1; 132 per_cpu(cpu_is_managed, cpu) = 1;
131 cpu_min_freq[cpu] = policy->min; 133 per_cpu(cpu_min_freq, cpu) = policy->min;
132 cpu_max_freq[cpu] = policy->max; 134 per_cpu(cpu_max_freq, cpu) = policy->max;
133 cpu_cur_freq[cpu] = policy->cur; 135 per_cpu(cpu_cur_freq, cpu) = policy->cur;
134 cpu_set_freq[cpu] = policy->cur; 136 per_cpu(cpu_set_freq, cpu) = policy->cur;
135 dprintk("managing cpu %u started (%u - %u kHz, currently %u kHz)\n", cpu, cpu_min_freq[cpu], cpu_max_freq[cpu], cpu_cur_freq[cpu]); 137 dprintk("managing cpu %u started "
138 "(%u - %u kHz, currently %u kHz)\n",
139 cpu,
140 per_cpu(cpu_min_freq, cpu),
141 per_cpu(cpu_max_freq, cpu),
142 per_cpu(cpu_cur_freq, cpu));
136 143
137 mutex_unlock(&userspace_mutex); 144 mutex_unlock(&userspace_mutex);
138 break; 145 break;
@@ -145,34 +152,34 @@ static int cpufreq_governor_userspace(struct cpufreq_policy *policy,
145 CPUFREQ_TRANSITION_NOTIFIER); 152 CPUFREQ_TRANSITION_NOTIFIER);
146 } 153 }
147 154
148 cpu_is_managed[cpu] = 0; 155 per_cpu(cpu_is_managed, cpu) = 0;
149 cpu_min_freq[cpu] = 0; 156 per_cpu(cpu_min_freq, cpu) = 0;
150 cpu_max_freq[cpu] = 0; 157 per_cpu(cpu_max_freq, cpu) = 0;
151 cpu_set_freq[cpu] = 0; 158 per_cpu(cpu_set_freq, cpu) = 0;
152 dprintk("managing cpu %u stopped\n", cpu); 159 dprintk("managing cpu %u stopped\n", cpu);
153 mutex_unlock(&userspace_mutex); 160 mutex_unlock(&userspace_mutex);
154 break; 161 break;
155 case CPUFREQ_GOV_LIMITS: 162 case CPUFREQ_GOV_LIMITS:
156 mutex_lock(&userspace_mutex); 163 mutex_lock(&userspace_mutex);
157 dprintk("limit event for cpu %u: %u - %u kHz," 164 dprintk("limit event for cpu %u: %u - %u kHz, "
158 "currently %u kHz, last set to %u kHz\n", 165 "currently %u kHz, last set to %u kHz\n",
159 cpu, policy->min, policy->max, 166 cpu, policy->min, policy->max,
160 cpu_cur_freq[cpu], cpu_set_freq[cpu]); 167 per_cpu(cpu_cur_freq, cpu),
161 if (policy->max < cpu_set_freq[cpu]) { 168 per_cpu(cpu_set_freq, cpu));
169 if (policy->max < per_cpu(cpu_set_freq, cpu)) {
162 __cpufreq_driver_target(policy, policy->max, 170 __cpufreq_driver_target(policy, policy->max,
163 CPUFREQ_RELATION_H); 171 CPUFREQ_RELATION_H);
164 } 172 } else if (policy->min > per_cpu(cpu_set_freq, cpu)) {
165 else if (policy->min > cpu_set_freq[cpu]) {
166 __cpufreq_driver_target(policy, policy->min, 173 __cpufreq_driver_target(policy, policy->min,
167 CPUFREQ_RELATION_L); 174 CPUFREQ_RELATION_L);
168 } 175 } else {
169 else { 176 __cpufreq_driver_target(policy,
170 __cpufreq_driver_target(policy, cpu_set_freq[cpu], 177 per_cpu(cpu_set_freq, cpu),
171 CPUFREQ_RELATION_L); 178 CPUFREQ_RELATION_L);
172 } 179 }
173 cpu_min_freq[cpu] = policy->min; 180 per_cpu(cpu_min_freq, cpu) = policy->min;
174 cpu_max_freq[cpu] = policy->max; 181 per_cpu(cpu_max_freq, cpu) = policy->max;
175 cpu_cur_freq[cpu] = policy->cur; 182 per_cpu(cpu_cur_freq, cpu) = policy->cur;
176 mutex_unlock(&userspace_mutex); 183 mutex_unlock(&userspace_mutex);
177 break; 184 break;
178 } 185 }