diff options
Diffstat (limited to 'drivers/cpufreq/cpufreq_ondemand.c')
-rw-r--r-- | drivers/cpufreq/cpufreq_ondemand.c | 64 |
1 files changed, 32 insertions, 32 deletions
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c index f697449327c6..d60bcb9d14cc 100644 --- a/drivers/cpufreq/cpufreq_ondemand.c +++ b/drivers/cpufreq/cpufreq_ondemand.c | |||
@@ -52,19 +52,20 @@ static unsigned int def_sampling_rate; | |||
52 | static void do_dbs_timer(struct work_struct *work); | 52 | static void do_dbs_timer(struct work_struct *work); |
53 | 53 | ||
54 | /* Sampling types */ | 54 | /* Sampling types */ |
55 | enum dbs_sample {DBS_NORMAL_SAMPLE, DBS_SUB_SAMPLE}; | 55 | enum {DBS_NORMAL_SAMPLE, DBS_SUB_SAMPLE}; |
56 | 56 | ||
57 | struct cpu_dbs_info_s { | 57 | struct cpu_dbs_info_s { |
58 | cputime64_t prev_cpu_idle; | 58 | cputime64_t prev_cpu_idle; |
59 | cputime64_t prev_cpu_wall; | 59 | cputime64_t prev_cpu_wall; |
60 | struct cpufreq_policy *cur_policy; | 60 | struct cpufreq_policy *cur_policy; |
61 | struct delayed_work work; | 61 | struct delayed_work work; |
62 | enum dbs_sample sample_type; | ||
63 | unsigned int enable; | ||
64 | struct cpufreq_frequency_table *freq_table; | 62 | struct cpufreq_frequency_table *freq_table; |
65 | unsigned int freq_lo; | 63 | unsigned int freq_lo; |
66 | unsigned int freq_lo_jiffies; | 64 | unsigned int freq_lo_jiffies; |
67 | unsigned int freq_hi_jiffies; | 65 | unsigned int freq_hi_jiffies; |
66 | int cpu; | ||
67 | unsigned int enable:1, | ||
68 | sample_type:1; | ||
68 | }; | 69 | }; |
69 | static DEFINE_PER_CPU(struct cpu_dbs_info_s, cpu_dbs_info); | 70 | static DEFINE_PER_CPU(struct cpu_dbs_info_s, cpu_dbs_info); |
70 | 71 | ||
@@ -402,7 +403,7 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info) | |||
402 | if (load < (dbs_tuners_ins.up_threshold - 10)) { | 403 | if (load < (dbs_tuners_ins.up_threshold - 10)) { |
403 | unsigned int freq_next, freq_cur; | 404 | unsigned int freq_next, freq_cur; |
404 | 405 | ||
405 | freq_cur = cpufreq_driver_getavg(policy); | 406 | freq_cur = __cpufreq_driver_getavg(policy); |
406 | if (!freq_cur) | 407 | if (!freq_cur) |
407 | freq_cur = policy->cur; | 408 | freq_cur = policy->cur; |
408 | 409 | ||
@@ -423,9 +424,11 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info) | |||
423 | 424 | ||
424 | static void do_dbs_timer(struct work_struct *work) | 425 | static void do_dbs_timer(struct work_struct *work) |
425 | { | 426 | { |
426 | unsigned int cpu = smp_processor_id(); | 427 | struct cpu_dbs_info_s *dbs_info = |
427 | struct cpu_dbs_info_s *dbs_info = &per_cpu(cpu_dbs_info, cpu); | 428 | container_of(work, struct cpu_dbs_info_s, work.work); |
428 | enum dbs_sample sample_type = dbs_info->sample_type; | 429 | unsigned int cpu = dbs_info->cpu; |
430 | int sample_type = dbs_info->sample_type; | ||
431 | |||
429 | /* We want all CPUs to do sampling nearly on same jiffy */ | 432 | /* We want all CPUs to do sampling nearly on same jiffy */ |
430 | int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate); | 433 | int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate); |
431 | 434 | ||
@@ -434,15 +437,19 @@ static void do_dbs_timer(struct work_struct *work) | |||
434 | 437 | ||
435 | delay -= jiffies % delay; | 438 | delay -= jiffies % delay; |
436 | 439 | ||
437 | if (!dbs_info->enable) | 440 | if (lock_policy_rwsem_write(cpu) < 0) |
441 | return; | ||
442 | |||
443 | if (!dbs_info->enable) { | ||
444 | unlock_policy_rwsem_write(cpu); | ||
438 | return; | 445 | return; |
446 | } | ||
447 | |||
439 | /* Common NORMAL_SAMPLE setup */ | 448 | /* Common NORMAL_SAMPLE setup */ |
440 | dbs_info->sample_type = DBS_NORMAL_SAMPLE; | 449 | dbs_info->sample_type = DBS_NORMAL_SAMPLE; |
441 | if (!dbs_tuners_ins.powersave_bias || | 450 | if (!dbs_tuners_ins.powersave_bias || |
442 | sample_type == DBS_NORMAL_SAMPLE) { | 451 | sample_type == DBS_NORMAL_SAMPLE) { |
443 | lock_cpu_hotplug(); | ||
444 | dbs_check_cpu(dbs_info); | 452 | dbs_check_cpu(dbs_info); |
445 | unlock_cpu_hotplug(); | ||
446 | if (dbs_info->freq_lo) { | 453 | if (dbs_info->freq_lo) { |
447 | /* Setup timer for SUB_SAMPLE */ | 454 | /* Setup timer for SUB_SAMPLE */ |
448 | dbs_info->sample_type = DBS_SUB_SAMPLE; | 455 | dbs_info->sample_type = DBS_SUB_SAMPLE; |
@@ -454,26 +461,27 @@ static void do_dbs_timer(struct work_struct *work) | |||
454 | CPUFREQ_RELATION_H); | 461 | CPUFREQ_RELATION_H); |
455 | } | 462 | } |
456 | queue_delayed_work_on(cpu, kondemand_wq, &dbs_info->work, delay); | 463 | queue_delayed_work_on(cpu, kondemand_wq, &dbs_info->work, delay); |
464 | unlock_policy_rwsem_write(cpu); | ||
457 | } | 465 | } |
458 | 466 | ||
459 | static inline void dbs_timer_init(unsigned int cpu) | 467 | static inline void dbs_timer_init(struct cpu_dbs_info_s *dbs_info) |
460 | { | 468 | { |
461 | struct cpu_dbs_info_s *dbs_info = &per_cpu(cpu_dbs_info, cpu); | ||
462 | /* We want all CPUs to do sampling nearly on same jiffy */ | 469 | /* We want all CPUs to do sampling nearly on same jiffy */ |
463 | int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate); | 470 | int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate); |
464 | delay -= jiffies % delay; | 471 | delay -= jiffies % delay; |
465 | 472 | ||
473 | dbs_info->enable = 1; | ||
466 | ondemand_powersave_bias_init(); | 474 | ondemand_powersave_bias_init(); |
467 | INIT_DELAYED_WORK_NAR(&dbs_info->work, do_dbs_timer); | ||
468 | dbs_info->sample_type = DBS_NORMAL_SAMPLE; | 475 | dbs_info->sample_type = DBS_NORMAL_SAMPLE; |
469 | queue_delayed_work_on(cpu, kondemand_wq, &dbs_info->work, delay); | 476 | INIT_DELAYED_WORK_NAR(&dbs_info->work, do_dbs_timer); |
477 | queue_delayed_work_on(dbs_info->cpu, kondemand_wq, &dbs_info->work, | ||
478 | delay); | ||
470 | } | 479 | } |
471 | 480 | ||
472 | static inline void dbs_timer_exit(struct cpu_dbs_info_s *dbs_info) | 481 | static inline void dbs_timer_exit(struct cpu_dbs_info_s *dbs_info) |
473 | { | 482 | { |
474 | dbs_info->enable = 0; | 483 | dbs_info->enable = 0; |
475 | cancel_delayed_work(&dbs_info->work); | 484 | cancel_delayed_work(&dbs_info->work); |
476 | flush_workqueue(kondemand_wq); | ||
477 | } | 485 | } |
478 | 486 | ||
479 | static int cpufreq_governor_dbs(struct cpufreq_policy *policy, | 487 | static int cpufreq_governor_dbs(struct cpufreq_policy *policy, |
@@ -502,21 +510,9 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy, | |||
502 | 510 | ||
503 | mutex_lock(&dbs_mutex); | 511 | mutex_lock(&dbs_mutex); |
504 | dbs_enable++; | 512 | dbs_enable++; |
505 | if (dbs_enable == 1) { | ||
506 | kondemand_wq = create_workqueue("kondemand"); | ||
507 | if (!kondemand_wq) { | ||
508 | printk(KERN_ERR | ||
509 | "Creation of kondemand failed\n"); | ||
510 | dbs_enable--; | ||
511 | mutex_unlock(&dbs_mutex); | ||
512 | return -ENOSPC; | ||
513 | } | ||
514 | } | ||
515 | 513 | ||
516 | rc = sysfs_create_group(&policy->kobj, &dbs_attr_group); | 514 | rc = sysfs_create_group(&policy->kobj, &dbs_attr_group); |
517 | if (rc) { | 515 | if (rc) { |
518 | if (dbs_enable == 1) | ||
519 | destroy_workqueue(kondemand_wq); | ||
520 | dbs_enable--; | 516 | dbs_enable--; |
521 | mutex_unlock(&dbs_mutex); | 517 | mutex_unlock(&dbs_mutex); |
522 | return rc; | 518 | return rc; |
@@ -530,7 +526,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy, | |||
530 | j_dbs_info->prev_cpu_idle = get_cpu_idle_time(j); | 526 | j_dbs_info->prev_cpu_idle = get_cpu_idle_time(j); |
531 | j_dbs_info->prev_cpu_wall = get_jiffies_64(); | 527 | j_dbs_info->prev_cpu_wall = get_jiffies_64(); |
532 | } | 528 | } |
533 | this_dbs_info->enable = 1; | 529 | this_dbs_info->cpu = cpu; |
534 | /* | 530 | /* |
535 | * Start the timerschedule work, when this governor | 531 | * Start the timerschedule work, when this governor |
536 | * is used for first time | 532 | * is used for first time |
@@ -550,7 +546,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy, | |||
550 | 546 | ||
551 | dbs_tuners_ins.sampling_rate = def_sampling_rate; | 547 | dbs_tuners_ins.sampling_rate = def_sampling_rate; |
552 | } | 548 | } |
553 | dbs_timer_init(policy->cpu); | 549 | dbs_timer_init(this_dbs_info); |
554 | 550 | ||
555 | mutex_unlock(&dbs_mutex); | 551 | mutex_unlock(&dbs_mutex); |
556 | break; | 552 | break; |
@@ -560,9 +556,6 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy, | |||
560 | dbs_timer_exit(this_dbs_info); | 556 | dbs_timer_exit(this_dbs_info); |
561 | sysfs_remove_group(&policy->kobj, &dbs_attr_group); | 557 | sysfs_remove_group(&policy->kobj, &dbs_attr_group); |
562 | dbs_enable--; | 558 | dbs_enable--; |
563 | if (dbs_enable == 0) | ||
564 | destroy_workqueue(kondemand_wq); | ||
565 | |||
566 | mutex_unlock(&dbs_mutex); | 559 | mutex_unlock(&dbs_mutex); |
567 | 560 | ||
568 | break; | 561 | break; |
@@ -591,12 +584,18 @@ static struct cpufreq_governor cpufreq_gov_dbs = { | |||
591 | 584 | ||
592 | static int __init cpufreq_gov_dbs_init(void) | 585 | static int __init cpufreq_gov_dbs_init(void) |
593 | { | 586 | { |
587 | kondemand_wq = create_workqueue("kondemand"); | ||
588 | if (!kondemand_wq) { | ||
589 | printk(KERN_ERR "Creation of kondemand failed\n"); | ||
590 | return -EFAULT; | ||
591 | } | ||
594 | return cpufreq_register_governor(&cpufreq_gov_dbs); | 592 | return cpufreq_register_governor(&cpufreq_gov_dbs); |
595 | } | 593 | } |
596 | 594 | ||
597 | static void __exit cpufreq_gov_dbs_exit(void) | 595 | static void __exit cpufreq_gov_dbs_exit(void) |
598 | { | 596 | { |
599 | cpufreq_unregister_governor(&cpufreq_gov_dbs); | 597 | cpufreq_unregister_governor(&cpufreq_gov_dbs); |
598 | destroy_workqueue(kondemand_wq); | ||
600 | } | 599 | } |
601 | 600 | ||
602 | 601 | ||
@@ -608,3 +607,4 @@ MODULE_LICENSE("GPL"); | |||
608 | 607 | ||
609 | module_init(cpufreq_gov_dbs_init); | 608 | module_init(cpufreq_gov_dbs_init); |
610 | module_exit(cpufreq_gov_dbs_exit); | 609 | module_exit(cpufreq_gov_dbs_exit); |
610 | |||