diff options
Diffstat (limited to 'drivers/cpufreq')
-rw-r--r-- | drivers/cpufreq/cpufreq.c | 95 | ||||
-rw-r--r-- | drivers/cpufreq/cpufreq_conservative.c | 12 | ||||
-rw-r--r-- | drivers/cpufreq/cpufreq_ondemand.c | 15 |
3 files changed, 21 insertions, 101 deletions
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index fd69086d08d5..2968ed6a9c49 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c | |||
@@ -1250,20 +1250,11 @@ static int cpufreq_suspend(struct sys_device *sysdev, pm_message_t pmsg) | |||
1250 | { | 1250 | { |
1251 | int ret = 0; | 1251 | int ret = 0; |
1252 | 1252 | ||
1253 | #ifdef __powerpc__ | ||
1254 | int cpu = sysdev->id; | 1253 | int cpu = sysdev->id; |
1255 | unsigned int cur_freq = 0; | ||
1256 | struct cpufreq_policy *cpu_policy; | 1254 | struct cpufreq_policy *cpu_policy; |
1257 | 1255 | ||
1258 | dprintk("suspending cpu %u\n", cpu); | 1256 | dprintk("suspending cpu %u\n", cpu); |
1259 | 1257 | ||
1260 | /* | ||
1261 | * This whole bogosity is here because Powerbooks are made of fail. | ||
1262 | * No sane platform should need any of the code below to be run. | ||
1263 | * (it's entirely the wrong thing to do, as driver->get may | ||
1264 | * reenable interrupts on some architectures). | ||
1265 | */ | ||
1266 | |||
1267 | if (!cpu_online(cpu)) | 1258 | if (!cpu_online(cpu)) |
1268 | return 0; | 1259 | return 0; |
1269 | 1260 | ||
@@ -1282,47 +1273,13 @@ static int cpufreq_suspend(struct sys_device *sysdev, pm_message_t pmsg) | |||
1282 | 1273 | ||
1283 | if (cpufreq_driver->suspend) { | 1274 | if (cpufreq_driver->suspend) { |
1284 | ret = cpufreq_driver->suspend(cpu_policy, pmsg); | 1275 | ret = cpufreq_driver->suspend(cpu_policy, pmsg); |
1285 | if (ret) { | 1276 | if (ret) |
1286 | printk(KERN_ERR "cpufreq: suspend failed in ->suspend " | 1277 | printk(KERN_ERR "cpufreq: suspend failed in ->suspend " |
1287 | "step on CPU %u\n", cpu_policy->cpu); | 1278 | "step on CPU %u\n", cpu_policy->cpu); |
1288 | goto out; | ||
1289 | } | ||
1290 | } | ||
1291 | |||
1292 | if (cpufreq_driver->flags & CPUFREQ_CONST_LOOPS) | ||
1293 | goto out; | ||
1294 | |||
1295 | if (cpufreq_driver->get) | ||
1296 | cur_freq = cpufreq_driver->get(cpu_policy->cpu); | ||
1297 | |||
1298 | if (!cur_freq || !cpu_policy->cur) { | ||
1299 | printk(KERN_ERR "cpufreq: suspend failed to assert current " | ||
1300 | "frequency is what timing core thinks it is.\n"); | ||
1301 | goto out; | ||
1302 | } | ||
1303 | |||
1304 | if (unlikely(cur_freq != cpu_policy->cur)) { | ||
1305 | struct cpufreq_freqs freqs; | ||
1306 | |||
1307 | if (!(cpufreq_driver->flags & CPUFREQ_PM_NO_WARN)) | ||
1308 | dprintk("Warning: CPU frequency is %u, " | ||
1309 | "cpufreq assumed %u kHz.\n", | ||
1310 | cur_freq, cpu_policy->cur); | ||
1311 | |||
1312 | freqs.cpu = cpu; | ||
1313 | freqs.old = cpu_policy->cur; | ||
1314 | freqs.new = cur_freq; | ||
1315 | |||
1316 | srcu_notifier_call_chain(&cpufreq_transition_notifier_list, | ||
1317 | CPUFREQ_SUSPENDCHANGE, &freqs); | ||
1318 | adjust_jiffies(CPUFREQ_SUSPENDCHANGE, &freqs); | ||
1319 | |||
1320 | cpu_policy->cur = cur_freq; | ||
1321 | } | 1279 | } |
1322 | 1280 | ||
1323 | out: | 1281 | out: |
1324 | cpufreq_cpu_put(cpu_policy); | 1282 | cpufreq_cpu_put(cpu_policy); |
1325 | #endif /* __powerpc__ */ | ||
1326 | return ret; | 1283 | return ret; |
1327 | } | 1284 | } |
1328 | 1285 | ||
@@ -1330,24 +1287,21 @@ out: | |||
1330 | * cpufreq_resume - restore proper CPU frequency handling after resume | 1287 | * cpufreq_resume - restore proper CPU frequency handling after resume |
1331 | * | 1288 | * |
1332 | * 1.) resume CPUfreq hardware support (cpufreq_driver->resume()) | 1289 | * 1.) resume CPUfreq hardware support (cpufreq_driver->resume()) |
1333 | * 2.) if ->target and !CPUFREQ_CONST_LOOPS: verify we're in sync | 1290 | * 2.) schedule call cpufreq_update_policy() ASAP as interrupts are |
1334 | * 3.) schedule call cpufreq_update_policy() ASAP as interrupts are | 1291 | * restored. It will verify that the current freq is in sync with |
1335 | * restored. | 1292 | * what we believe it to be. This is a bit later than when it |
1293 | * should be, but nonethteless it's better than calling | ||
1294 | * cpufreq_driver->get() here which might re-enable interrupts... | ||
1336 | */ | 1295 | */ |
1337 | static int cpufreq_resume(struct sys_device *sysdev) | 1296 | static int cpufreq_resume(struct sys_device *sysdev) |
1338 | { | 1297 | { |
1339 | int ret = 0; | 1298 | int ret = 0; |
1340 | 1299 | ||
1341 | #ifdef __powerpc__ | ||
1342 | int cpu = sysdev->id; | 1300 | int cpu = sysdev->id; |
1343 | struct cpufreq_policy *cpu_policy; | 1301 | struct cpufreq_policy *cpu_policy; |
1344 | 1302 | ||
1345 | dprintk("resuming cpu %u\n", cpu); | 1303 | dprintk("resuming cpu %u\n", cpu); |
1346 | 1304 | ||
1347 | /* As with the ->suspend method, all the code below is | ||
1348 | * only necessary because Powerbooks suck. | ||
1349 | * See commit 42d4dc3f4e1e for jokes. */ | ||
1350 | |||
1351 | if (!cpu_online(cpu)) | 1305 | if (!cpu_online(cpu)) |
1352 | return 0; | 1306 | return 0; |
1353 | 1307 | ||
@@ -1373,45 +1327,10 @@ static int cpufreq_resume(struct sys_device *sysdev) | |||
1373 | } | 1327 | } |
1374 | } | 1328 | } |
1375 | 1329 | ||
1376 | if (!(cpufreq_driver->flags & CPUFREQ_CONST_LOOPS)) { | ||
1377 | unsigned int cur_freq = 0; | ||
1378 | |||
1379 | if (cpufreq_driver->get) | ||
1380 | cur_freq = cpufreq_driver->get(cpu_policy->cpu); | ||
1381 | |||
1382 | if (!cur_freq || !cpu_policy->cur) { | ||
1383 | printk(KERN_ERR "cpufreq: resume failed to assert " | ||
1384 | "current frequency is what timing core " | ||
1385 | "thinks it is.\n"); | ||
1386 | goto out; | ||
1387 | } | ||
1388 | |||
1389 | if (unlikely(cur_freq != cpu_policy->cur)) { | ||
1390 | struct cpufreq_freqs freqs; | ||
1391 | |||
1392 | if (!(cpufreq_driver->flags & CPUFREQ_PM_NO_WARN)) | ||
1393 | dprintk("Warning: CPU frequency " | ||
1394 | "is %u, cpufreq assumed %u kHz.\n", | ||
1395 | cur_freq, cpu_policy->cur); | ||
1396 | |||
1397 | freqs.cpu = cpu; | ||
1398 | freqs.old = cpu_policy->cur; | ||
1399 | freqs.new = cur_freq; | ||
1400 | |||
1401 | srcu_notifier_call_chain( | ||
1402 | &cpufreq_transition_notifier_list, | ||
1403 | CPUFREQ_RESUMECHANGE, &freqs); | ||
1404 | adjust_jiffies(CPUFREQ_RESUMECHANGE, &freqs); | ||
1405 | |||
1406 | cpu_policy->cur = cur_freq; | ||
1407 | } | ||
1408 | } | ||
1409 | |||
1410 | out: | ||
1411 | schedule_work(&cpu_policy->update); | 1330 | schedule_work(&cpu_policy->update); |
1331 | |||
1412 | fail: | 1332 | fail: |
1413 | cpufreq_cpu_put(cpu_policy); | 1333 | cpufreq_cpu_put(cpu_policy); |
1414 | #endif /* __powerpc__ */ | ||
1415 | return ret; | 1334 | return ret; |
1416 | } | 1335 | } |
1417 | 1336 | ||
diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c index bdea7e2f94ba..bc33ddc9c97c 100644 --- a/drivers/cpufreq/cpufreq_conservative.c +++ b/drivers/cpufreq/cpufreq_conservative.c | |||
@@ -71,7 +71,7 @@ struct cpu_dbs_info_s { | |||
71 | */ | 71 | */ |
72 | struct mutex timer_mutex; | 72 | struct mutex timer_mutex; |
73 | }; | 73 | }; |
74 | static DEFINE_PER_CPU(struct cpu_dbs_info_s, cpu_dbs_info); | 74 | static DEFINE_PER_CPU(struct cpu_dbs_info_s, cs_cpu_dbs_info); |
75 | 75 | ||
76 | static unsigned int dbs_enable; /* number of CPUs using this policy */ | 76 | static unsigned int dbs_enable; /* number of CPUs using this policy */ |
77 | 77 | ||
@@ -137,7 +137,7 @@ dbs_cpufreq_notifier(struct notifier_block *nb, unsigned long val, | |||
137 | void *data) | 137 | void *data) |
138 | { | 138 | { |
139 | struct cpufreq_freqs *freq = data; | 139 | struct cpufreq_freqs *freq = data; |
140 | struct cpu_dbs_info_s *this_dbs_info = &per_cpu(cpu_dbs_info, | 140 | struct cpu_dbs_info_s *this_dbs_info = &per_cpu(cs_cpu_dbs_info, |
141 | freq->cpu); | 141 | freq->cpu); |
142 | 142 | ||
143 | struct cpufreq_policy *policy; | 143 | struct cpufreq_policy *policy; |
@@ -297,7 +297,7 @@ static ssize_t store_ignore_nice_load(struct cpufreq_policy *policy, | |||
297 | /* we need to re-evaluate prev_cpu_idle */ | 297 | /* we need to re-evaluate prev_cpu_idle */ |
298 | for_each_online_cpu(j) { | 298 | for_each_online_cpu(j) { |
299 | struct cpu_dbs_info_s *dbs_info; | 299 | struct cpu_dbs_info_s *dbs_info; |
300 | dbs_info = &per_cpu(cpu_dbs_info, j); | 300 | dbs_info = &per_cpu(cs_cpu_dbs_info, j); |
301 | dbs_info->prev_cpu_idle = get_cpu_idle_time(j, | 301 | dbs_info->prev_cpu_idle = get_cpu_idle_time(j, |
302 | &dbs_info->prev_cpu_wall); | 302 | &dbs_info->prev_cpu_wall); |
303 | if (dbs_tuners_ins.ignore_nice) | 303 | if (dbs_tuners_ins.ignore_nice) |
@@ -387,7 +387,7 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info) | |||
387 | cputime64_t cur_wall_time, cur_idle_time; | 387 | cputime64_t cur_wall_time, cur_idle_time; |
388 | unsigned int idle_time, wall_time; | 388 | unsigned int idle_time, wall_time; |
389 | 389 | ||
390 | j_dbs_info = &per_cpu(cpu_dbs_info, j); | 390 | j_dbs_info = &per_cpu(cs_cpu_dbs_info, j); |
391 | 391 | ||
392 | cur_idle_time = get_cpu_idle_time(j, &cur_wall_time); | 392 | cur_idle_time = get_cpu_idle_time(j, &cur_wall_time); |
393 | 393 | ||
@@ -521,7 +521,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy, | |||
521 | unsigned int j; | 521 | unsigned int j; |
522 | int rc; | 522 | int rc; |
523 | 523 | ||
524 | this_dbs_info = &per_cpu(cpu_dbs_info, cpu); | 524 | this_dbs_info = &per_cpu(cs_cpu_dbs_info, cpu); |
525 | 525 | ||
526 | switch (event) { | 526 | switch (event) { |
527 | case CPUFREQ_GOV_START: | 527 | case CPUFREQ_GOV_START: |
@@ -538,7 +538,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy, | |||
538 | 538 | ||
539 | for_each_cpu(j, policy->cpus) { | 539 | for_each_cpu(j, policy->cpus) { |
540 | struct cpu_dbs_info_s *j_dbs_info; | 540 | struct cpu_dbs_info_s *j_dbs_info; |
541 | j_dbs_info = &per_cpu(cpu_dbs_info, j); | 541 | j_dbs_info = &per_cpu(cs_cpu_dbs_info, j); |
542 | j_dbs_info->cur_policy = policy; | 542 | j_dbs_info->cur_policy = policy; |
543 | 543 | ||
544 | j_dbs_info->prev_cpu_idle = get_cpu_idle_time(j, | 544 | j_dbs_info->prev_cpu_idle = get_cpu_idle_time(j, |
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c index d6ba14276bb1..d7a528c80de8 100644 --- a/drivers/cpufreq/cpufreq_ondemand.c +++ b/drivers/cpufreq/cpufreq_ondemand.c | |||
@@ -78,7 +78,7 @@ struct cpu_dbs_info_s { | |||
78 | */ | 78 | */ |
79 | struct mutex timer_mutex; | 79 | struct mutex timer_mutex; |
80 | }; | 80 | }; |
81 | static DEFINE_PER_CPU(struct cpu_dbs_info_s, cpu_dbs_info); | 81 | static DEFINE_PER_CPU(struct cpu_dbs_info_s, od_cpu_dbs_info); |
82 | 82 | ||
83 | static unsigned int dbs_enable; /* number of CPUs using this policy */ | 83 | static unsigned int dbs_enable; /* number of CPUs using this policy */ |
84 | 84 | ||
@@ -149,7 +149,8 @@ static unsigned int powersave_bias_target(struct cpufreq_policy *policy, | |||
149 | unsigned int freq_hi, freq_lo; | 149 | unsigned int freq_hi, freq_lo; |
150 | unsigned int index = 0; | 150 | unsigned int index = 0; |
151 | unsigned int jiffies_total, jiffies_hi, jiffies_lo; | 151 | unsigned int jiffies_total, jiffies_hi, jiffies_lo; |
152 | struct cpu_dbs_info_s *dbs_info = &per_cpu(cpu_dbs_info, policy->cpu); | 152 | struct cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info, |
153 | policy->cpu); | ||
153 | 154 | ||
154 | if (!dbs_info->freq_table) { | 155 | if (!dbs_info->freq_table) { |
155 | dbs_info->freq_lo = 0; | 156 | dbs_info->freq_lo = 0; |
@@ -192,7 +193,7 @@ static unsigned int powersave_bias_target(struct cpufreq_policy *policy, | |||
192 | 193 | ||
193 | static void ondemand_powersave_bias_init_cpu(int cpu) | 194 | static void ondemand_powersave_bias_init_cpu(int cpu) |
194 | { | 195 | { |
195 | struct cpu_dbs_info_s *dbs_info = &per_cpu(cpu_dbs_info, cpu); | 196 | struct cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info, cpu); |
196 | dbs_info->freq_table = cpufreq_frequency_get_table(cpu); | 197 | dbs_info->freq_table = cpufreq_frequency_get_table(cpu); |
197 | dbs_info->freq_lo = 0; | 198 | dbs_info->freq_lo = 0; |
198 | } | 199 | } |
@@ -297,7 +298,7 @@ static ssize_t store_ignore_nice_load(struct cpufreq_policy *policy, | |||
297 | /* we need to re-evaluate prev_cpu_idle */ | 298 | /* we need to re-evaluate prev_cpu_idle */ |
298 | for_each_online_cpu(j) { | 299 | for_each_online_cpu(j) { |
299 | struct cpu_dbs_info_s *dbs_info; | 300 | struct cpu_dbs_info_s *dbs_info; |
300 | dbs_info = &per_cpu(cpu_dbs_info, j); | 301 | dbs_info = &per_cpu(od_cpu_dbs_info, j); |
301 | dbs_info->prev_cpu_idle = get_cpu_idle_time(j, | 302 | dbs_info->prev_cpu_idle = get_cpu_idle_time(j, |
302 | &dbs_info->prev_cpu_wall); | 303 | &dbs_info->prev_cpu_wall); |
303 | if (dbs_tuners_ins.ignore_nice) | 304 | if (dbs_tuners_ins.ignore_nice) |
@@ -388,7 +389,7 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info) | |||
388 | unsigned int load, load_freq; | 389 | unsigned int load, load_freq; |
389 | int freq_avg; | 390 | int freq_avg; |
390 | 391 | ||
391 | j_dbs_info = &per_cpu(cpu_dbs_info, j); | 392 | j_dbs_info = &per_cpu(od_cpu_dbs_info, j); |
392 | 393 | ||
393 | cur_idle_time = get_cpu_idle_time(j, &cur_wall_time); | 394 | cur_idle_time = get_cpu_idle_time(j, &cur_wall_time); |
394 | 395 | ||
@@ -535,7 +536,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy, | |||
535 | unsigned int j; | 536 | unsigned int j; |
536 | int rc; | 537 | int rc; |
537 | 538 | ||
538 | this_dbs_info = &per_cpu(cpu_dbs_info, cpu); | 539 | this_dbs_info = &per_cpu(od_cpu_dbs_info, cpu); |
539 | 540 | ||
540 | switch (event) { | 541 | switch (event) { |
541 | case CPUFREQ_GOV_START: | 542 | case CPUFREQ_GOV_START: |
@@ -553,7 +554,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy, | |||
553 | dbs_enable++; | 554 | dbs_enable++; |
554 | for_each_cpu(j, policy->cpus) { | 555 | for_each_cpu(j, policy->cpus) { |
555 | struct cpu_dbs_info_s *j_dbs_info; | 556 | struct cpu_dbs_info_s *j_dbs_info; |
556 | j_dbs_info = &per_cpu(cpu_dbs_info, j); | 557 | j_dbs_info = &per_cpu(od_cpu_dbs_info, j); |
557 | j_dbs_info->cur_policy = policy; | 558 | j_dbs_info->cur_policy = policy; |
558 | 559 | ||
559 | j_dbs_info->prev_cpu_idle = get_cpu_idle_time(j, | 560 | j_dbs_info->prev_cpu_idle = get_cpu_idle_time(j, |