diff options
author | Ingo Molnar <mingo@elte.hu> | 2009-07-18 09:50:22 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-07-18 09:50:40 -0400 |
commit | 5304d5fc74a269cc6c3e70f9713684ca729abdf0 (patch) | |
tree | 6a5db62915abd260241a2b9aee34086c93293ca6 /drivers/cpufreq | |
parent | 54d35f29f49224d86b994acb6e5969b9ba09022d (diff) | |
parent | 78af08d90b8f745044b1274430bc4bc6b2b27aca (diff) |
Merge branch 'linus' into sched/core
Merge reason: branch had an old upstream base (-rc1-ish), but also
merge to avoid a conflict.
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'drivers/cpufreq')
-rw-r--r-- | drivers/cpufreq/cpufreq.c | 76 | ||||
-rw-r--r-- | drivers/cpufreq/cpufreq_conservative.c | 49 | ||||
-rw-r--r-- | drivers/cpufreq/cpufreq_ondemand.c | 77 |
3 files changed, 96 insertions, 106 deletions
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index 6e2ec0b18948..b90eda8b3440 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c | |||
@@ -761,6 +761,10 @@ static struct kobj_type ktype_cpufreq = { | |||
761 | * cpufreq_add_dev - add a CPU device | 761 | * cpufreq_add_dev - add a CPU device |
762 | * | 762 | * |
763 | * Adds the cpufreq interface for a CPU device. | 763 | * Adds the cpufreq interface for a CPU device. |
764 | * | ||
765 | * The Oracle says: try running cpufreq registration/unregistration concurrently | ||
766 | * with with cpu hotplugging and all hell will break loose. Tried to clean this | ||
767 | * mess up, but more thorough testing is needed. - Mathieu | ||
764 | */ | 768 | */ |
765 | static int cpufreq_add_dev(struct sys_device *sys_dev) | 769 | static int cpufreq_add_dev(struct sys_device *sys_dev) |
766 | { | 770 | { |
@@ -772,9 +776,6 @@ static int cpufreq_add_dev(struct sys_device *sys_dev) | |||
772 | struct sys_device *cpu_sys_dev; | 776 | struct sys_device *cpu_sys_dev; |
773 | unsigned long flags; | 777 | unsigned long flags; |
774 | unsigned int j; | 778 | unsigned int j; |
775 | #ifdef CONFIG_SMP | ||
776 | struct cpufreq_policy *managed_policy; | ||
777 | #endif | ||
778 | 779 | ||
779 | if (cpu_is_offline(cpu)) | 780 | if (cpu_is_offline(cpu)) |
780 | return 0; | 781 | return 0; |
@@ -804,15 +805,12 @@ static int cpufreq_add_dev(struct sys_device *sys_dev) | |||
804 | goto nomem_out; | 805 | goto nomem_out; |
805 | } | 806 | } |
806 | if (!alloc_cpumask_var(&policy->cpus, GFP_KERNEL)) { | 807 | if (!alloc_cpumask_var(&policy->cpus, GFP_KERNEL)) { |
807 | kfree(policy); | ||
808 | ret = -ENOMEM; | 808 | ret = -ENOMEM; |
809 | goto nomem_out; | 809 | goto err_free_policy; |
810 | } | 810 | } |
811 | if (!zalloc_cpumask_var(&policy->related_cpus, GFP_KERNEL)) { | 811 | if (!zalloc_cpumask_var(&policy->related_cpus, GFP_KERNEL)) { |
812 | free_cpumask_var(policy->cpus); | ||
813 | kfree(policy); | ||
814 | ret = -ENOMEM; | 812 | ret = -ENOMEM; |
815 | goto nomem_out; | 813 | goto err_free_cpumask; |
816 | } | 814 | } |
817 | 815 | ||
818 | policy->cpu = cpu; | 816 | policy->cpu = cpu; |
@@ -820,7 +818,8 @@ static int cpufreq_add_dev(struct sys_device *sys_dev) | |||
820 | 818 | ||
821 | /* Initially set CPU itself as the policy_cpu */ | 819 | /* Initially set CPU itself as the policy_cpu */ |
822 | per_cpu(policy_cpu, cpu) = cpu; | 820 | per_cpu(policy_cpu, cpu) = cpu; |
823 | lock_policy_rwsem_write(cpu); | 821 | ret = (lock_policy_rwsem_write(cpu) < 0); |
822 | WARN_ON(ret); | ||
824 | 823 | ||
825 | init_completion(&policy->kobj_unregister); | 824 | init_completion(&policy->kobj_unregister); |
826 | INIT_WORK(&policy->update, handle_update); | 825 | INIT_WORK(&policy->update, handle_update); |
@@ -833,7 +832,7 @@ static int cpufreq_add_dev(struct sys_device *sys_dev) | |||
833 | ret = cpufreq_driver->init(policy); | 832 | ret = cpufreq_driver->init(policy); |
834 | if (ret) { | 833 | if (ret) { |
835 | dprintk("initialization failed\n"); | 834 | dprintk("initialization failed\n"); |
836 | goto err_out; | 835 | goto err_unlock_policy; |
837 | } | 836 | } |
838 | policy->user_policy.min = policy->min; | 837 | policy->user_policy.min = policy->min; |
839 | policy->user_policy.max = policy->max; | 838 | policy->user_policy.max = policy->max; |
@@ -852,21 +851,29 @@ static int cpufreq_add_dev(struct sys_device *sys_dev) | |||
852 | #endif | 851 | #endif |
853 | 852 | ||
854 | for_each_cpu(j, policy->cpus) { | 853 | for_each_cpu(j, policy->cpus) { |
854 | struct cpufreq_policy *managed_policy; | ||
855 | |||
855 | if (cpu == j) | 856 | if (cpu == j) |
856 | continue; | 857 | continue; |
857 | 858 | ||
858 | /* Check for existing affected CPUs. | 859 | /* Check for existing affected CPUs. |
859 | * They may not be aware of it due to CPU Hotplug. | 860 | * They may not be aware of it due to CPU Hotplug. |
860 | */ | 861 | */ |
861 | managed_policy = cpufreq_cpu_get(j); /* FIXME: Where is this released? What about error paths? */ | 862 | managed_policy = cpufreq_cpu_get(j); |
862 | if (unlikely(managed_policy)) { | 863 | if (unlikely(managed_policy)) { |
863 | 864 | ||
864 | /* Set proper policy_cpu */ | 865 | /* Set proper policy_cpu */ |
865 | unlock_policy_rwsem_write(cpu); | 866 | unlock_policy_rwsem_write(cpu); |
866 | per_cpu(policy_cpu, cpu) = managed_policy->cpu; | 867 | per_cpu(policy_cpu, cpu) = managed_policy->cpu; |
867 | 868 | ||
868 | if (lock_policy_rwsem_write(cpu) < 0) | 869 | if (lock_policy_rwsem_write(cpu) < 0) { |
869 | goto err_out_driver_exit; | 870 | /* Should not go through policy unlock path */ |
871 | if (cpufreq_driver->exit) | ||
872 | cpufreq_driver->exit(policy); | ||
873 | ret = -EBUSY; | ||
874 | cpufreq_cpu_put(managed_policy); | ||
875 | goto err_free_cpumask; | ||
876 | } | ||
870 | 877 | ||
871 | spin_lock_irqsave(&cpufreq_driver_lock, flags); | 878 | spin_lock_irqsave(&cpufreq_driver_lock, flags); |
872 | cpumask_copy(managed_policy->cpus, policy->cpus); | 879 | cpumask_copy(managed_policy->cpus, policy->cpus); |
@@ -877,12 +884,14 @@ static int cpufreq_add_dev(struct sys_device *sys_dev) | |||
877 | ret = sysfs_create_link(&sys_dev->kobj, | 884 | ret = sysfs_create_link(&sys_dev->kobj, |
878 | &managed_policy->kobj, | 885 | &managed_policy->kobj, |
879 | "cpufreq"); | 886 | "cpufreq"); |
880 | if (ret) | 887 | if (!ret) |
881 | goto err_out_driver_exit; | 888 | cpufreq_cpu_put(managed_policy); |
882 | 889 | /* | |
883 | cpufreq_debug_enable_ratelimit(); | 890 | * Success. We only needed to be added to the mask. |
884 | ret = 0; | 891 | * Call driver->exit() because only the cpu parent of |
885 | goto err_out_driver_exit; /* call driver->exit() */ | 892 | * the kobj needed to call init(). |
893 | */ | ||
894 | goto out_driver_exit; /* call driver->exit() */ | ||
886 | } | 895 | } |
887 | } | 896 | } |
888 | #endif | 897 | #endif |
@@ -892,25 +901,25 @@ static int cpufreq_add_dev(struct sys_device *sys_dev) | |||
892 | ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq, &sys_dev->kobj, | 901 | ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq, &sys_dev->kobj, |
893 | "cpufreq"); | 902 | "cpufreq"); |
894 | if (ret) | 903 | if (ret) |
895 | goto err_out_driver_exit; | 904 | goto out_driver_exit; |
896 | 905 | ||
897 | /* set up files for this cpu device */ | 906 | /* set up files for this cpu device */ |
898 | drv_attr = cpufreq_driver->attr; | 907 | drv_attr = cpufreq_driver->attr; |
899 | while ((drv_attr) && (*drv_attr)) { | 908 | while ((drv_attr) && (*drv_attr)) { |
900 | ret = sysfs_create_file(&policy->kobj, &((*drv_attr)->attr)); | 909 | ret = sysfs_create_file(&policy->kobj, &((*drv_attr)->attr)); |
901 | if (ret) | 910 | if (ret) |
902 | goto err_out_driver_exit; | 911 | goto err_out_kobj_put; |
903 | drv_attr++; | 912 | drv_attr++; |
904 | } | 913 | } |
905 | if (cpufreq_driver->get) { | 914 | if (cpufreq_driver->get) { |
906 | ret = sysfs_create_file(&policy->kobj, &cpuinfo_cur_freq.attr); | 915 | ret = sysfs_create_file(&policy->kobj, &cpuinfo_cur_freq.attr); |
907 | if (ret) | 916 | if (ret) |
908 | goto err_out_driver_exit; | 917 | goto err_out_kobj_put; |
909 | } | 918 | } |
910 | if (cpufreq_driver->target) { | 919 | if (cpufreq_driver->target) { |
911 | ret = sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr); | 920 | ret = sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr); |
912 | if (ret) | 921 | if (ret) |
913 | goto err_out_driver_exit; | 922 | goto err_out_kobj_put; |
914 | } | 923 | } |
915 | 924 | ||
916 | spin_lock_irqsave(&cpufreq_driver_lock, flags); | 925 | spin_lock_irqsave(&cpufreq_driver_lock, flags); |
@@ -922,18 +931,22 @@ static int cpufreq_add_dev(struct sys_device *sys_dev) | |||
922 | 931 | ||
923 | /* symlink affected CPUs */ | 932 | /* symlink affected CPUs */ |
924 | for_each_cpu(j, policy->cpus) { | 933 | for_each_cpu(j, policy->cpus) { |
934 | struct cpufreq_policy *managed_policy; | ||
935 | |||
925 | if (j == cpu) | 936 | if (j == cpu) |
926 | continue; | 937 | continue; |
927 | if (!cpu_online(j)) | 938 | if (!cpu_online(j)) |
928 | continue; | 939 | continue; |
929 | 940 | ||
930 | dprintk("CPU %u already managed, adding link\n", j); | 941 | dprintk("CPU %u already managed, adding link\n", j); |
931 | cpufreq_cpu_get(cpu); | 942 | managed_policy = cpufreq_cpu_get(cpu); |
932 | cpu_sys_dev = get_cpu_sysdev(j); | 943 | cpu_sys_dev = get_cpu_sysdev(j); |
933 | ret = sysfs_create_link(&cpu_sys_dev->kobj, &policy->kobj, | 944 | ret = sysfs_create_link(&cpu_sys_dev->kobj, &policy->kobj, |
934 | "cpufreq"); | 945 | "cpufreq"); |
935 | if (ret) | 946 | if (ret) { |
947 | cpufreq_cpu_put(managed_policy); | ||
936 | goto err_out_unregister; | 948 | goto err_out_unregister; |
949 | } | ||
937 | } | 950 | } |
938 | 951 | ||
939 | policy->governor = NULL; /* to assure that the starting sequence is | 952 | policy->governor = NULL; /* to assure that the starting sequence is |
@@ -965,17 +978,20 @@ err_out_unregister: | |||
965 | per_cpu(cpufreq_cpu_data, j) = NULL; | 978 | per_cpu(cpufreq_cpu_data, j) = NULL; |
966 | spin_unlock_irqrestore(&cpufreq_driver_lock, flags); | 979 | spin_unlock_irqrestore(&cpufreq_driver_lock, flags); |
967 | 980 | ||
981 | err_out_kobj_put: | ||
968 | kobject_put(&policy->kobj); | 982 | kobject_put(&policy->kobj); |
969 | wait_for_completion(&policy->kobj_unregister); | 983 | wait_for_completion(&policy->kobj_unregister); |
970 | 984 | ||
971 | err_out_driver_exit: | 985 | out_driver_exit: |
972 | if (cpufreq_driver->exit) | 986 | if (cpufreq_driver->exit) |
973 | cpufreq_driver->exit(policy); | 987 | cpufreq_driver->exit(policy); |
974 | 988 | ||
975 | err_out: | 989 | err_unlock_policy: |
976 | unlock_policy_rwsem_write(cpu); | 990 | unlock_policy_rwsem_write(cpu); |
991 | err_free_cpumask: | ||
992 | free_cpumask_var(policy->cpus); | ||
993 | err_free_policy: | ||
977 | kfree(policy); | 994 | kfree(policy); |
978 | |||
979 | nomem_out: | 995 | nomem_out: |
980 | module_put(cpufreq_driver->owner); | 996 | module_put(cpufreq_driver->owner); |
981 | module_out: | 997 | module_out: |
@@ -1070,8 +1086,6 @@ static int __cpufreq_remove_dev(struct sys_device *sys_dev) | |||
1070 | spin_unlock_irqrestore(&cpufreq_driver_lock, flags); | 1086 | spin_unlock_irqrestore(&cpufreq_driver_lock, flags); |
1071 | #endif | 1087 | #endif |
1072 | 1088 | ||
1073 | unlock_policy_rwsem_write(cpu); | ||
1074 | |||
1075 | if (cpufreq_driver->target) | 1089 | if (cpufreq_driver->target) |
1076 | __cpufreq_governor(data, CPUFREQ_GOV_STOP); | 1090 | __cpufreq_governor(data, CPUFREQ_GOV_STOP); |
1077 | 1091 | ||
@@ -1088,6 +1102,8 @@ static int __cpufreq_remove_dev(struct sys_device *sys_dev) | |||
1088 | if (cpufreq_driver->exit) | 1102 | if (cpufreq_driver->exit) |
1089 | cpufreq_driver->exit(data); | 1103 | cpufreq_driver->exit(data); |
1090 | 1104 | ||
1105 | unlock_policy_rwsem_write(cpu); | ||
1106 | |||
1091 | free_cpumask_var(data->related_cpus); | 1107 | free_cpumask_var(data->related_cpus); |
1092 | free_cpumask_var(data->cpus); | 1108 | free_cpumask_var(data->cpus); |
1093 | kfree(data); | 1109 | kfree(data); |
diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c index 7fc58af748b4..57490502b21c 100644 --- a/drivers/cpufreq/cpufreq_conservative.c +++ b/drivers/cpufreq/cpufreq_conservative.c | |||
@@ -63,22 +63,20 @@ struct cpu_dbs_info_s { | |||
63 | unsigned int down_skip; | 63 | unsigned int down_skip; |
64 | unsigned int requested_freq; | 64 | unsigned int requested_freq; |
65 | int cpu; | 65 | int cpu; |
66 | unsigned int enable:1; | 66 | /* |
67 | * percpu mutex that serializes governor limit change with | ||
68 | * do_dbs_timer invocation. We do not want do_dbs_timer to run | ||
69 | * when user is changing the governor or limits. | ||
70 | */ | ||
71 | struct mutex timer_mutex; | ||
67 | }; | 72 | }; |
68 | static DEFINE_PER_CPU(struct cpu_dbs_info_s, cpu_dbs_info); | 73 | static DEFINE_PER_CPU(struct cpu_dbs_info_s, cpu_dbs_info); |
69 | 74 | ||
70 | static unsigned int dbs_enable; /* number of CPUs using this policy */ | 75 | static unsigned int dbs_enable; /* number of CPUs using this policy */ |
71 | 76 | ||
72 | /* | 77 | /* |
73 | * DEADLOCK ALERT! There is a ordering requirement between cpu_hotplug | 78 | * dbs_mutex protects data in dbs_tuners_ins from concurrent changes on |
74 | * lock and dbs_mutex. cpu_hotplug lock should always be held before | 79 | * different CPUs. It protects dbs_enable in governor start/stop. |
75 | * dbs_mutex. If any function that can potentially take cpu_hotplug lock | ||
76 | * (like __cpufreq_driver_target()) is being called with dbs_mutex taken, then | ||
77 | * cpu_hotplug lock should be taken before that. Note that cpu_hotplug lock | ||
78 | * is recursive for the same process. -Venki | ||
79 | * DEADLOCK ALERT! (2) : do_dbs_timer() must not take the dbs_mutex, because it | ||
80 | * would deadlock with cancel_delayed_work_sync(), which is needed for proper | ||
81 | * raceless workqueue teardown. | ||
82 | */ | 80 | */ |
83 | static DEFINE_MUTEX(dbs_mutex); | 81 | static DEFINE_MUTEX(dbs_mutex); |
84 | 82 | ||
@@ -143,9 +141,6 @@ dbs_cpufreq_notifier(struct notifier_block *nb, unsigned long val, | |||
143 | 141 | ||
144 | struct cpufreq_policy *policy; | 142 | struct cpufreq_policy *policy; |
145 | 143 | ||
146 | if (!this_dbs_info->enable) | ||
147 | return 0; | ||
148 | |||
149 | policy = this_dbs_info->cur_policy; | 144 | policy = this_dbs_info->cur_policy; |
150 | 145 | ||
151 | /* | 146 | /* |
@@ -488,18 +483,12 @@ static void do_dbs_timer(struct work_struct *work) | |||
488 | 483 | ||
489 | delay -= jiffies % delay; | 484 | delay -= jiffies % delay; |
490 | 485 | ||
491 | if (lock_policy_rwsem_write(cpu) < 0) | 486 | mutex_lock(&dbs_info->timer_mutex); |
492 | return; | ||
493 | |||
494 | if (!dbs_info->enable) { | ||
495 | unlock_policy_rwsem_write(cpu); | ||
496 | return; | ||
497 | } | ||
498 | 487 | ||
499 | dbs_check_cpu(dbs_info); | 488 | dbs_check_cpu(dbs_info); |
500 | 489 | ||
501 | queue_delayed_work_on(cpu, kconservative_wq, &dbs_info->work, delay); | 490 | queue_delayed_work_on(cpu, kconservative_wq, &dbs_info->work, delay); |
502 | unlock_policy_rwsem_write(cpu); | 491 | mutex_unlock(&dbs_info->timer_mutex); |
503 | } | 492 | } |
504 | 493 | ||
505 | static inline void dbs_timer_init(struct cpu_dbs_info_s *dbs_info) | 494 | static inline void dbs_timer_init(struct cpu_dbs_info_s *dbs_info) |
@@ -508,7 +497,6 @@ static inline void dbs_timer_init(struct cpu_dbs_info_s *dbs_info) | |||
508 | int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate); | 497 | int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate); |
509 | delay -= jiffies % delay; | 498 | delay -= jiffies % delay; |
510 | 499 | ||
511 | dbs_info->enable = 1; | ||
512 | INIT_DELAYED_WORK_DEFERRABLE(&dbs_info->work, do_dbs_timer); | 500 | INIT_DELAYED_WORK_DEFERRABLE(&dbs_info->work, do_dbs_timer); |
513 | queue_delayed_work_on(dbs_info->cpu, kconservative_wq, &dbs_info->work, | 501 | queue_delayed_work_on(dbs_info->cpu, kconservative_wq, &dbs_info->work, |
514 | delay); | 502 | delay); |
@@ -516,7 +504,6 @@ static inline void dbs_timer_init(struct cpu_dbs_info_s *dbs_info) | |||
516 | 504 | ||
517 | static inline void dbs_timer_exit(struct cpu_dbs_info_s *dbs_info) | 505 | static inline void dbs_timer_exit(struct cpu_dbs_info_s *dbs_info) |
518 | { | 506 | { |
519 | dbs_info->enable = 0; | ||
520 | cancel_delayed_work_sync(&dbs_info->work); | 507 | cancel_delayed_work_sync(&dbs_info->work); |
521 | } | 508 | } |
522 | 509 | ||
@@ -535,9 +522,6 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy, | |||
535 | if ((!cpu_online(cpu)) || (!policy->cur)) | 522 | if ((!cpu_online(cpu)) || (!policy->cur)) |
536 | return -EINVAL; | 523 | return -EINVAL; |
537 | 524 | ||
538 | if (this_dbs_info->enable) /* Already enabled */ | ||
539 | break; | ||
540 | |||
541 | mutex_lock(&dbs_mutex); | 525 | mutex_lock(&dbs_mutex); |
542 | 526 | ||
543 | rc = sysfs_create_group(&policy->kobj, &dbs_attr_group); | 527 | rc = sysfs_create_group(&policy->kobj, &dbs_attr_group); |
@@ -561,6 +545,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy, | |||
561 | this_dbs_info->down_skip = 0; | 545 | this_dbs_info->down_skip = 0; |
562 | this_dbs_info->requested_freq = policy->cur; | 546 | this_dbs_info->requested_freq = policy->cur; |
563 | 547 | ||
548 | mutex_init(&this_dbs_info->timer_mutex); | ||
564 | dbs_enable++; | 549 | dbs_enable++; |
565 | /* | 550 | /* |
566 | * Start the timerschedule work, when this governor | 551 | * Start the timerschedule work, when this governor |
@@ -590,17 +575,19 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy, | |||
590 | &dbs_cpufreq_notifier_block, | 575 | &dbs_cpufreq_notifier_block, |
591 | CPUFREQ_TRANSITION_NOTIFIER); | 576 | CPUFREQ_TRANSITION_NOTIFIER); |
592 | } | 577 | } |
593 | dbs_timer_init(this_dbs_info); | ||
594 | |||
595 | mutex_unlock(&dbs_mutex); | 578 | mutex_unlock(&dbs_mutex); |
596 | 579 | ||
580 | dbs_timer_init(this_dbs_info); | ||
581 | |||
597 | break; | 582 | break; |
598 | 583 | ||
599 | case CPUFREQ_GOV_STOP: | 584 | case CPUFREQ_GOV_STOP: |
600 | mutex_lock(&dbs_mutex); | ||
601 | dbs_timer_exit(this_dbs_info); | 585 | dbs_timer_exit(this_dbs_info); |
586 | |||
587 | mutex_lock(&dbs_mutex); | ||
602 | sysfs_remove_group(&policy->kobj, &dbs_attr_group); | 588 | sysfs_remove_group(&policy->kobj, &dbs_attr_group); |
603 | dbs_enable--; | 589 | dbs_enable--; |
590 | mutex_destroy(&this_dbs_info->timer_mutex); | ||
604 | 591 | ||
605 | /* | 592 | /* |
606 | * Stop the timerschedule work, when this governor | 593 | * Stop the timerschedule work, when this governor |
@@ -616,7 +603,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy, | |||
616 | break; | 603 | break; |
617 | 604 | ||
618 | case CPUFREQ_GOV_LIMITS: | 605 | case CPUFREQ_GOV_LIMITS: |
619 | mutex_lock(&dbs_mutex); | 606 | mutex_lock(&this_dbs_info->timer_mutex); |
620 | if (policy->max < this_dbs_info->cur_policy->cur) | 607 | if (policy->max < this_dbs_info->cur_policy->cur) |
621 | __cpufreq_driver_target( | 608 | __cpufreq_driver_target( |
622 | this_dbs_info->cur_policy, | 609 | this_dbs_info->cur_policy, |
@@ -625,7 +612,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy, | |||
625 | __cpufreq_driver_target( | 612 | __cpufreq_driver_target( |
626 | this_dbs_info->cur_policy, | 613 | this_dbs_info->cur_policy, |
627 | policy->min, CPUFREQ_RELATION_L); | 614 | policy->min, CPUFREQ_RELATION_L); |
628 | mutex_unlock(&dbs_mutex); | 615 | mutex_unlock(&this_dbs_info->timer_mutex); |
629 | 616 | ||
630 | break; | 617 | break; |
631 | } | 618 | } |
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c index 1911d1729353..d6ba14276bb1 100644 --- a/drivers/cpufreq/cpufreq_ondemand.c +++ b/drivers/cpufreq/cpufreq_ondemand.c | |||
@@ -70,23 +70,21 @@ struct cpu_dbs_info_s { | |||
70 | unsigned int freq_lo_jiffies; | 70 | unsigned int freq_lo_jiffies; |
71 | unsigned int freq_hi_jiffies; | 71 | unsigned int freq_hi_jiffies; |
72 | int cpu; | 72 | int cpu; |
73 | unsigned int enable:1, | 73 | unsigned int sample_type:1; |
74 | sample_type:1; | 74 | /* |
75 | * percpu mutex that serializes governor limit change with | ||
76 | * do_dbs_timer invocation. We do not want do_dbs_timer to run | ||
77 | * when user is changing the governor or limits. | ||
78 | */ | ||
79 | struct mutex timer_mutex; | ||
75 | }; | 80 | }; |
76 | static DEFINE_PER_CPU(struct cpu_dbs_info_s, cpu_dbs_info); | 81 | static DEFINE_PER_CPU(struct cpu_dbs_info_s, cpu_dbs_info); |
77 | 82 | ||
78 | static unsigned int dbs_enable; /* number of CPUs using this policy */ | 83 | static unsigned int dbs_enable; /* number of CPUs using this policy */ |
79 | 84 | ||
80 | /* | 85 | /* |
81 | * DEADLOCK ALERT! There is a ordering requirement between cpu_hotplug | 86 | * dbs_mutex protects data in dbs_tuners_ins from concurrent changes on |
82 | * lock and dbs_mutex. cpu_hotplug lock should always be held before | 87 | * different CPUs. It protects dbs_enable in governor start/stop. |
83 | * dbs_mutex. If any function that can potentially take cpu_hotplug lock | ||
84 | * (like __cpufreq_driver_target()) is being called with dbs_mutex taken, then | ||
85 | * cpu_hotplug lock should be taken before that. Note that cpu_hotplug lock | ||
86 | * is recursive for the same process. -Venki | ||
87 | * DEADLOCK ALERT! (2) : do_dbs_timer() must not take the dbs_mutex, because it | ||
88 | * would deadlock with cancel_delayed_work_sync(), which is needed for proper | ||
89 | * raceless workqueue teardown. | ||
90 | */ | 88 | */ |
91 | static DEFINE_MUTEX(dbs_mutex); | 89 | static DEFINE_MUTEX(dbs_mutex); |
92 | 90 | ||
@@ -192,13 +190,18 @@ static unsigned int powersave_bias_target(struct cpufreq_policy *policy, | |||
192 | return freq_hi; | 190 | return freq_hi; |
193 | } | 191 | } |
194 | 192 | ||
193 | static void ondemand_powersave_bias_init_cpu(int cpu) | ||
194 | { | ||
195 | struct cpu_dbs_info_s *dbs_info = &per_cpu(cpu_dbs_info, cpu); | ||
196 | dbs_info->freq_table = cpufreq_frequency_get_table(cpu); | ||
197 | dbs_info->freq_lo = 0; | ||
198 | } | ||
199 | |||
195 | static void ondemand_powersave_bias_init(void) | 200 | static void ondemand_powersave_bias_init(void) |
196 | { | 201 | { |
197 | int i; | 202 | int i; |
198 | for_each_online_cpu(i) { | 203 | for_each_online_cpu(i) { |
199 | struct cpu_dbs_info_s *dbs_info = &per_cpu(cpu_dbs_info, i); | 204 | ondemand_powersave_bias_init_cpu(i); |
200 | dbs_info->freq_table = cpufreq_frequency_get_table(i); | ||
201 | dbs_info->freq_lo = 0; | ||
202 | } | 205 | } |
203 | } | 206 | } |
204 | 207 | ||
@@ -240,12 +243,10 @@ static ssize_t store_sampling_rate(struct cpufreq_policy *unused, | |||
240 | unsigned int input; | 243 | unsigned int input; |
241 | int ret; | 244 | int ret; |
242 | ret = sscanf(buf, "%u", &input); | 245 | ret = sscanf(buf, "%u", &input); |
246 | if (ret != 1) | ||
247 | return -EINVAL; | ||
243 | 248 | ||
244 | mutex_lock(&dbs_mutex); | 249 | mutex_lock(&dbs_mutex); |
245 | if (ret != 1) { | ||
246 | mutex_unlock(&dbs_mutex); | ||
247 | return -EINVAL; | ||
248 | } | ||
249 | dbs_tuners_ins.sampling_rate = max(input, min_sampling_rate); | 250 | dbs_tuners_ins.sampling_rate = max(input, min_sampling_rate); |
250 | mutex_unlock(&dbs_mutex); | 251 | mutex_unlock(&dbs_mutex); |
251 | 252 | ||
@@ -259,13 +260,12 @@ static ssize_t store_up_threshold(struct cpufreq_policy *unused, | |||
259 | int ret; | 260 | int ret; |
260 | ret = sscanf(buf, "%u", &input); | 261 | ret = sscanf(buf, "%u", &input); |
261 | 262 | ||
262 | mutex_lock(&dbs_mutex); | ||
263 | if (ret != 1 || input > MAX_FREQUENCY_UP_THRESHOLD || | 263 | if (ret != 1 || input > MAX_FREQUENCY_UP_THRESHOLD || |
264 | input < MIN_FREQUENCY_UP_THRESHOLD) { | 264 | input < MIN_FREQUENCY_UP_THRESHOLD) { |
265 | mutex_unlock(&dbs_mutex); | ||
266 | return -EINVAL; | 265 | return -EINVAL; |
267 | } | 266 | } |
268 | 267 | ||
268 | mutex_lock(&dbs_mutex); | ||
269 | dbs_tuners_ins.up_threshold = input; | 269 | dbs_tuners_ins.up_threshold = input; |
270 | mutex_unlock(&dbs_mutex); | 270 | mutex_unlock(&dbs_mutex); |
271 | 271 | ||
@@ -363,9 +363,6 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info) | |||
363 | struct cpufreq_policy *policy; | 363 | struct cpufreq_policy *policy; |
364 | unsigned int j; | 364 | unsigned int j; |
365 | 365 | ||
366 | if (!this_dbs_info->enable) | ||
367 | return; | ||
368 | |||
369 | this_dbs_info->freq_lo = 0; | 366 | this_dbs_info->freq_lo = 0; |
370 | policy = this_dbs_info->cur_policy; | 367 | policy = this_dbs_info->cur_policy; |
371 | 368 | ||
@@ -493,14 +490,7 @@ static void do_dbs_timer(struct work_struct *work) | |||
493 | int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate); | 490 | int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate); |
494 | 491 | ||
495 | delay -= jiffies % delay; | 492 | delay -= jiffies % delay; |
496 | 493 | mutex_lock(&dbs_info->timer_mutex); | |
497 | if (lock_policy_rwsem_write(cpu) < 0) | ||
498 | return; | ||
499 | |||
500 | if (!dbs_info->enable) { | ||
501 | unlock_policy_rwsem_write(cpu); | ||
502 | return; | ||
503 | } | ||
504 | 494 | ||
505 | /* Common NORMAL_SAMPLE setup */ | 495 | /* Common NORMAL_SAMPLE setup */ |
506 | dbs_info->sample_type = DBS_NORMAL_SAMPLE; | 496 | dbs_info->sample_type = DBS_NORMAL_SAMPLE; |
@@ -517,7 +507,7 @@ static void do_dbs_timer(struct work_struct *work) | |||
517 | dbs_info->freq_lo, CPUFREQ_RELATION_H); | 507 | dbs_info->freq_lo, CPUFREQ_RELATION_H); |
518 | } | 508 | } |
519 | queue_delayed_work_on(cpu, kondemand_wq, &dbs_info->work, delay); | 509 | queue_delayed_work_on(cpu, kondemand_wq, &dbs_info->work, delay); |
520 | unlock_policy_rwsem_write(cpu); | 510 | mutex_unlock(&dbs_info->timer_mutex); |
521 | } | 511 | } |
522 | 512 | ||
523 | static inline void dbs_timer_init(struct cpu_dbs_info_s *dbs_info) | 513 | static inline void dbs_timer_init(struct cpu_dbs_info_s *dbs_info) |
@@ -526,8 +516,6 @@ static inline void dbs_timer_init(struct cpu_dbs_info_s *dbs_info) | |||
526 | int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate); | 516 | int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate); |
527 | delay -= jiffies % delay; | 517 | delay -= jiffies % delay; |
528 | 518 | ||
529 | dbs_info->enable = 1; | ||
530 | ondemand_powersave_bias_init(); | ||
531 | dbs_info->sample_type = DBS_NORMAL_SAMPLE; | 519 | dbs_info->sample_type = DBS_NORMAL_SAMPLE; |
532 | INIT_DELAYED_WORK_DEFERRABLE(&dbs_info->work, do_dbs_timer); | 520 | INIT_DELAYED_WORK_DEFERRABLE(&dbs_info->work, do_dbs_timer); |
533 | queue_delayed_work_on(dbs_info->cpu, kondemand_wq, &dbs_info->work, | 521 | queue_delayed_work_on(dbs_info->cpu, kondemand_wq, &dbs_info->work, |
@@ -536,7 +524,6 @@ static inline void dbs_timer_init(struct cpu_dbs_info_s *dbs_info) | |||
536 | 524 | ||
537 | static inline void dbs_timer_exit(struct cpu_dbs_info_s *dbs_info) | 525 | static inline void dbs_timer_exit(struct cpu_dbs_info_s *dbs_info) |
538 | { | 526 | { |
539 | dbs_info->enable = 0; | ||
540 | cancel_delayed_work_sync(&dbs_info->work); | 527 | cancel_delayed_work_sync(&dbs_info->work); |
541 | } | 528 | } |
542 | 529 | ||
@@ -555,19 +542,15 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy, | |||
555 | if ((!cpu_online(cpu)) || (!policy->cur)) | 542 | if ((!cpu_online(cpu)) || (!policy->cur)) |
556 | return -EINVAL; | 543 | return -EINVAL; |
557 | 544 | ||
558 | if (this_dbs_info->enable) /* Already enabled */ | ||
559 | break; | ||
560 | |||
561 | mutex_lock(&dbs_mutex); | 545 | mutex_lock(&dbs_mutex); |
562 | dbs_enable++; | ||
563 | 546 | ||
564 | rc = sysfs_create_group(&policy->kobj, &dbs_attr_group); | 547 | rc = sysfs_create_group(&policy->kobj, &dbs_attr_group); |
565 | if (rc) { | 548 | if (rc) { |
566 | dbs_enable--; | ||
567 | mutex_unlock(&dbs_mutex); | 549 | mutex_unlock(&dbs_mutex); |
568 | return rc; | 550 | return rc; |
569 | } | 551 | } |
570 | 552 | ||
553 | dbs_enable++; | ||
571 | for_each_cpu(j, policy->cpus) { | 554 | for_each_cpu(j, policy->cpus) { |
572 | struct cpu_dbs_info_s *j_dbs_info; | 555 | struct cpu_dbs_info_s *j_dbs_info; |
573 | j_dbs_info = &per_cpu(cpu_dbs_info, j); | 556 | j_dbs_info = &per_cpu(cpu_dbs_info, j); |
@@ -581,6 +564,8 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy, | |||
581 | } | 564 | } |
582 | } | 565 | } |
583 | this_dbs_info->cpu = cpu; | 566 | this_dbs_info->cpu = cpu; |
567 | ondemand_powersave_bias_init_cpu(cpu); | ||
568 | mutex_init(&this_dbs_info->timer_mutex); | ||
584 | /* | 569 | /* |
585 | * Start the timerschedule work, when this governor | 570 | * Start the timerschedule work, when this governor |
586 | * is used for first time | 571 | * is used for first time |
@@ -598,29 +583,31 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy, | |||
598 | max(min_sampling_rate, | 583 | max(min_sampling_rate, |
599 | latency * LATENCY_MULTIPLIER); | 584 | latency * LATENCY_MULTIPLIER); |
600 | } | 585 | } |
601 | dbs_timer_init(this_dbs_info); | ||
602 | |||
603 | mutex_unlock(&dbs_mutex); | 586 | mutex_unlock(&dbs_mutex); |
587 | |||
588 | dbs_timer_init(this_dbs_info); | ||
604 | break; | 589 | break; |
605 | 590 | ||
606 | case CPUFREQ_GOV_STOP: | 591 | case CPUFREQ_GOV_STOP: |
607 | mutex_lock(&dbs_mutex); | ||
608 | dbs_timer_exit(this_dbs_info); | 592 | dbs_timer_exit(this_dbs_info); |
593 | |||
594 | mutex_lock(&dbs_mutex); | ||
609 | sysfs_remove_group(&policy->kobj, &dbs_attr_group); | 595 | sysfs_remove_group(&policy->kobj, &dbs_attr_group); |
596 | mutex_destroy(&this_dbs_info->timer_mutex); | ||
610 | dbs_enable--; | 597 | dbs_enable--; |
611 | mutex_unlock(&dbs_mutex); | 598 | mutex_unlock(&dbs_mutex); |
612 | 599 | ||
613 | break; | 600 | break; |
614 | 601 | ||
615 | case CPUFREQ_GOV_LIMITS: | 602 | case CPUFREQ_GOV_LIMITS: |
616 | mutex_lock(&dbs_mutex); | 603 | mutex_lock(&this_dbs_info->timer_mutex); |
617 | if (policy->max < this_dbs_info->cur_policy->cur) | 604 | if (policy->max < this_dbs_info->cur_policy->cur) |
618 | __cpufreq_driver_target(this_dbs_info->cur_policy, | 605 | __cpufreq_driver_target(this_dbs_info->cur_policy, |
619 | policy->max, CPUFREQ_RELATION_H); | 606 | policy->max, CPUFREQ_RELATION_H); |
620 | else if (policy->min > this_dbs_info->cur_policy->cur) | 607 | else if (policy->min > this_dbs_info->cur_policy->cur) |
621 | __cpufreq_driver_target(this_dbs_info->cur_policy, | 608 | __cpufreq_driver_target(this_dbs_info->cur_policy, |
622 | policy->min, CPUFREQ_RELATION_L); | 609 | policy->min, CPUFREQ_RELATION_L); |
623 | mutex_unlock(&dbs_mutex); | 610 | mutex_unlock(&this_dbs_info->timer_mutex); |
624 | break; | 611 | break; |
625 | } | 612 | } |
626 | return 0; | 613 | return 0; |