diff options
author | Tejun Heo <tj@kernel.org> | 2009-08-14 01:41:02 -0400 |
---|---|---|
committer | Tejun Heo <tj@kernel.org> | 2009-08-14 01:45:31 -0400 |
commit | 384be2b18a5f9475eab9ca2bdfa95cc1a04ef59c (patch) | |
tree | 04c93f391a1b65c8bf8d7ba8643c07d26c26590a /drivers/cpufreq | |
parent | a76761b621bcd8336065c4fe3a74f046858bc34c (diff) | |
parent | 142d44b0dd6741a64a7bdbe029110e7c1dcf1d23 (diff) |
Merge branch 'percpu-for-linus' into percpu-for-next
Conflicts:
arch/sparc/kernel/smp_64.c
arch/x86/kernel/cpu/perf_counter.c
arch/x86/kernel/setup_percpu.c
drivers/cpufreq/cpufreq_ondemand.c
mm/percpu.c
Conflicts in core and arch percpu codes are mostly from commit
ed78e1e078dd44249f88b1dd8c76dafb39567161 which substituted many
num_possible_cpus() with nr_cpu_ids. As for-next branch has moved all
the first chunk allocators into mm/percpu.c, the changes are moved
from arch code to mm/percpu.c.
Signed-off-by: Tejun Heo <tj@kernel.org>
Diffstat (limited to 'drivers/cpufreq')
-rw-r--r-- | drivers/cpufreq/cpufreq.c | 99 | ||||
-rw-r--r-- | drivers/cpufreq/cpufreq_conservative.c | 43 | ||||
-rw-r--r-- | drivers/cpufreq/cpufreq_ondemand.c | 77 |
3 files changed, 118 insertions, 101 deletions
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index 6e2ec0b18948..fd69086d08d5 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c | |||
@@ -761,6 +761,10 @@ static struct kobj_type ktype_cpufreq = { | |||
761 | * cpufreq_add_dev - add a CPU device | 761 | * cpufreq_add_dev - add a CPU device |
762 | * | 762 | * |
763 | * Adds the cpufreq interface for a CPU device. | 763 | * Adds the cpufreq interface for a CPU device. |
764 | * | ||
765 | * The Oracle says: try running cpufreq registration/unregistration concurrently | ||
766 | * with with cpu hotplugging and all hell will break loose. Tried to clean this | ||
767 | * mess up, but more thorough testing is needed. - Mathieu | ||
764 | */ | 768 | */ |
765 | static int cpufreq_add_dev(struct sys_device *sys_dev) | 769 | static int cpufreq_add_dev(struct sys_device *sys_dev) |
766 | { | 770 | { |
@@ -772,9 +776,6 @@ static int cpufreq_add_dev(struct sys_device *sys_dev) | |||
772 | struct sys_device *cpu_sys_dev; | 776 | struct sys_device *cpu_sys_dev; |
773 | unsigned long flags; | 777 | unsigned long flags; |
774 | unsigned int j; | 778 | unsigned int j; |
775 | #ifdef CONFIG_SMP | ||
776 | struct cpufreq_policy *managed_policy; | ||
777 | #endif | ||
778 | 779 | ||
779 | if (cpu_is_offline(cpu)) | 780 | if (cpu_is_offline(cpu)) |
780 | return 0; | 781 | return 0; |
@@ -804,15 +805,12 @@ static int cpufreq_add_dev(struct sys_device *sys_dev) | |||
804 | goto nomem_out; | 805 | goto nomem_out; |
805 | } | 806 | } |
806 | if (!alloc_cpumask_var(&policy->cpus, GFP_KERNEL)) { | 807 | if (!alloc_cpumask_var(&policy->cpus, GFP_KERNEL)) { |
807 | kfree(policy); | ||
808 | ret = -ENOMEM; | 808 | ret = -ENOMEM; |
809 | goto nomem_out; | 809 | goto err_free_policy; |
810 | } | 810 | } |
811 | if (!zalloc_cpumask_var(&policy->related_cpus, GFP_KERNEL)) { | 811 | if (!zalloc_cpumask_var(&policy->related_cpus, GFP_KERNEL)) { |
812 | free_cpumask_var(policy->cpus); | ||
813 | kfree(policy); | ||
814 | ret = -ENOMEM; | 812 | ret = -ENOMEM; |
815 | goto nomem_out; | 813 | goto err_free_cpumask; |
816 | } | 814 | } |
817 | 815 | ||
818 | policy->cpu = cpu; | 816 | policy->cpu = cpu; |
@@ -820,7 +818,8 @@ static int cpufreq_add_dev(struct sys_device *sys_dev) | |||
820 | 818 | ||
821 | /* Initially set CPU itself as the policy_cpu */ | 819 | /* Initially set CPU itself as the policy_cpu */ |
822 | per_cpu(policy_cpu, cpu) = cpu; | 820 | per_cpu(policy_cpu, cpu) = cpu; |
823 | lock_policy_rwsem_write(cpu); | 821 | ret = (lock_policy_rwsem_write(cpu) < 0); |
822 | WARN_ON(ret); | ||
824 | 823 | ||
825 | init_completion(&policy->kobj_unregister); | 824 | init_completion(&policy->kobj_unregister); |
826 | INIT_WORK(&policy->update, handle_update); | 825 | INIT_WORK(&policy->update, handle_update); |
@@ -833,7 +832,7 @@ static int cpufreq_add_dev(struct sys_device *sys_dev) | |||
833 | ret = cpufreq_driver->init(policy); | 832 | ret = cpufreq_driver->init(policy); |
834 | if (ret) { | 833 | if (ret) { |
835 | dprintk("initialization failed\n"); | 834 | dprintk("initialization failed\n"); |
836 | goto err_out; | 835 | goto err_unlock_policy; |
837 | } | 836 | } |
838 | policy->user_policy.min = policy->min; | 837 | policy->user_policy.min = policy->min; |
839 | policy->user_policy.max = policy->max; | 838 | policy->user_policy.max = policy->max; |
@@ -852,21 +851,31 @@ static int cpufreq_add_dev(struct sys_device *sys_dev) | |||
852 | #endif | 851 | #endif |
853 | 852 | ||
854 | for_each_cpu(j, policy->cpus) { | 853 | for_each_cpu(j, policy->cpus) { |
854 | struct cpufreq_policy *managed_policy; | ||
855 | |||
855 | if (cpu == j) | 856 | if (cpu == j) |
856 | continue; | 857 | continue; |
857 | 858 | ||
858 | /* Check for existing affected CPUs. | 859 | /* Check for existing affected CPUs. |
859 | * They may not be aware of it due to CPU Hotplug. | 860 | * They may not be aware of it due to CPU Hotplug. |
861 | * cpufreq_cpu_put is called when the device is removed | ||
862 | * in __cpufreq_remove_dev() | ||
860 | */ | 863 | */ |
861 | managed_policy = cpufreq_cpu_get(j); /* FIXME: Where is this released? What about error paths? */ | 864 | managed_policy = cpufreq_cpu_get(j); |
862 | if (unlikely(managed_policy)) { | 865 | if (unlikely(managed_policy)) { |
863 | 866 | ||
864 | /* Set proper policy_cpu */ | 867 | /* Set proper policy_cpu */ |
865 | unlock_policy_rwsem_write(cpu); | 868 | unlock_policy_rwsem_write(cpu); |
866 | per_cpu(policy_cpu, cpu) = managed_policy->cpu; | 869 | per_cpu(policy_cpu, cpu) = managed_policy->cpu; |
867 | 870 | ||
868 | if (lock_policy_rwsem_write(cpu) < 0) | 871 | if (lock_policy_rwsem_write(cpu) < 0) { |
869 | goto err_out_driver_exit; | 872 | /* Should not go through policy unlock path */ |
873 | if (cpufreq_driver->exit) | ||
874 | cpufreq_driver->exit(policy); | ||
875 | ret = -EBUSY; | ||
876 | cpufreq_cpu_put(managed_policy); | ||
877 | goto err_free_cpumask; | ||
878 | } | ||
870 | 879 | ||
871 | spin_lock_irqsave(&cpufreq_driver_lock, flags); | 880 | spin_lock_irqsave(&cpufreq_driver_lock, flags); |
872 | cpumask_copy(managed_policy->cpus, policy->cpus); | 881 | cpumask_copy(managed_policy->cpus, policy->cpus); |
@@ -878,11 +887,13 @@ static int cpufreq_add_dev(struct sys_device *sys_dev) | |||
878 | &managed_policy->kobj, | 887 | &managed_policy->kobj, |
879 | "cpufreq"); | 888 | "cpufreq"); |
880 | if (ret) | 889 | if (ret) |
881 | goto err_out_driver_exit; | 890 | cpufreq_cpu_put(managed_policy); |
882 | 891 | /* | |
883 | cpufreq_debug_enable_ratelimit(); | 892 | * Success. We only needed to be added to the mask. |
884 | ret = 0; | 893 | * Call driver->exit() because only the cpu parent of |
885 | goto err_out_driver_exit; /* call driver->exit() */ | 894 | * the kobj needed to call init(). |
895 | */ | ||
896 | goto out_driver_exit; /* call driver->exit() */ | ||
886 | } | 897 | } |
887 | } | 898 | } |
888 | #endif | 899 | #endif |
@@ -892,29 +903,31 @@ static int cpufreq_add_dev(struct sys_device *sys_dev) | |||
892 | ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq, &sys_dev->kobj, | 903 | ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq, &sys_dev->kobj, |
893 | "cpufreq"); | 904 | "cpufreq"); |
894 | if (ret) | 905 | if (ret) |
895 | goto err_out_driver_exit; | 906 | goto out_driver_exit; |
896 | 907 | ||
897 | /* set up files for this cpu device */ | 908 | /* set up files for this cpu device */ |
898 | drv_attr = cpufreq_driver->attr; | 909 | drv_attr = cpufreq_driver->attr; |
899 | while ((drv_attr) && (*drv_attr)) { | 910 | while ((drv_attr) && (*drv_attr)) { |
900 | ret = sysfs_create_file(&policy->kobj, &((*drv_attr)->attr)); | 911 | ret = sysfs_create_file(&policy->kobj, &((*drv_attr)->attr)); |
901 | if (ret) | 912 | if (ret) |
902 | goto err_out_driver_exit; | 913 | goto err_out_kobj_put; |
903 | drv_attr++; | 914 | drv_attr++; |
904 | } | 915 | } |
905 | if (cpufreq_driver->get) { | 916 | if (cpufreq_driver->get) { |
906 | ret = sysfs_create_file(&policy->kobj, &cpuinfo_cur_freq.attr); | 917 | ret = sysfs_create_file(&policy->kobj, &cpuinfo_cur_freq.attr); |
907 | if (ret) | 918 | if (ret) |
908 | goto err_out_driver_exit; | 919 | goto err_out_kobj_put; |
909 | } | 920 | } |
910 | if (cpufreq_driver->target) { | 921 | if (cpufreq_driver->target) { |
911 | ret = sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr); | 922 | ret = sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr); |
912 | if (ret) | 923 | if (ret) |
913 | goto err_out_driver_exit; | 924 | goto err_out_kobj_put; |
914 | } | 925 | } |
915 | 926 | ||
916 | spin_lock_irqsave(&cpufreq_driver_lock, flags); | 927 | spin_lock_irqsave(&cpufreq_driver_lock, flags); |
917 | for_each_cpu(j, policy->cpus) { | 928 | for_each_cpu(j, policy->cpus) { |
929 | if (!cpu_online(j)) | ||
930 | continue; | ||
918 | per_cpu(cpufreq_cpu_data, j) = policy; | 931 | per_cpu(cpufreq_cpu_data, j) = policy; |
919 | per_cpu(policy_cpu, j) = policy->cpu; | 932 | per_cpu(policy_cpu, j) = policy->cpu; |
920 | } | 933 | } |
@@ -922,18 +935,22 @@ static int cpufreq_add_dev(struct sys_device *sys_dev) | |||
922 | 935 | ||
923 | /* symlink affected CPUs */ | 936 | /* symlink affected CPUs */ |
924 | for_each_cpu(j, policy->cpus) { | 937 | for_each_cpu(j, policy->cpus) { |
938 | struct cpufreq_policy *managed_policy; | ||
939 | |||
925 | if (j == cpu) | 940 | if (j == cpu) |
926 | continue; | 941 | continue; |
927 | if (!cpu_online(j)) | 942 | if (!cpu_online(j)) |
928 | continue; | 943 | continue; |
929 | 944 | ||
930 | dprintk("CPU %u already managed, adding link\n", j); | 945 | dprintk("CPU %u already managed, adding link\n", j); |
931 | cpufreq_cpu_get(cpu); | 946 | managed_policy = cpufreq_cpu_get(cpu); |
932 | cpu_sys_dev = get_cpu_sysdev(j); | 947 | cpu_sys_dev = get_cpu_sysdev(j); |
933 | ret = sysfs_create_link(&cpu_sys_dev->kobj, &policy->kobj, | 948 | ret = sysfs_create_link(&cpu_sys_dev->kobj, &policy->kobj, |
934 | "cpufreq"); | 949 | "cpufreq"); |
935 | if (ret) | 950 | if (ret) { |
951 | cpufreq_cpu_put(managed_policy); | ||
936 | goto err_out_unregister; | 952 | goto err_out_unregister; |
953 | } | ||
937 | } | 954 | } |
938 | 955 | ||
939 | policy->governor = NULL; /* to assure that the starting sequence is | 956 | policy->governor = NULL; /* to assure that the starting sequence is |
@@ -965,17 +982,20 @@ err_out_unregister: | |||
965 | per_cpu(cpufreq_cpu_data, j) = NULL; | 982 | per_cpu(cpufreq_cpu_data, j) = NULL; |
966 | spin_unlock_irqrestore(&cpufreq_driver_lock, flags); | 983 | spin_unlock_irqrestore(&cpufreq_driver_lock, flags); |
967 | 984 | ||
985 | err_out_kobj_put: | ||
968 | kobject_put(&policy->kobj); | 986 | kobject_put(&policy->kobj); |
969 | wait_for_completion(&policy->kobj_unregister); | 987 | wait_for_completion(&policy->kobj_unregister); |
970 | 988 | ||
971 | err_out_driver_exit: | 989 | out_driver_exit: |
972 | if (cpufreq_driver->exit) | 990 | if (cpufreq_driver->exit) |
973 | cpufreq_driver->exit(policy); | 991 | cpufreq_driver->exit(policy); |
974 | 992 | ||
975 | err_out: | 993 | err_unlock_policy: |
976 | unlock_policy_rwsem_write(cpu); | 994 | unlock_policy_rwsem_write(cpu); |
995 | err_free_cpumask: | ||
996 | free_cpumask_var(policy->cpus); | ||
997 | err_free_policy: | ||
977 | kfree(policy); | 998 | kfree(policy); |
978 | |||
979 | nomem_out: | 999 | nomem_out: |
980 | module_put(cpufreq_driver->owner); | 1000 | module_put(cpufreq_driver->owner); |
981 | module_out: | 1001 | module_out: |
@@ -1070,8 +1090,6 @@ static int __cpufreq_remove_dev(struct sys_device *sys_dev) | |||
1070 | spin_unlock_irqrestore(&cpufreq_driver_lock, flags); | 1090 | spin_unlock_irqrestore(&cpufreq_driver_lock, flags); |
1071 | #endif | 1091 | #endif |
1072 | 1092 | ||
1073 | unlock_policy_rwsem_write(cpu); | ||
1074 | |||
1075 | if (cpufreq_driver->target) | 1093 | if (cpufreq_driver->target) |
1076 | __cpufreq_governor(data, CPUFREQ_GOV_STOP); | 1094 | __cpufreq_governor(data, CPUFREQ_GOV_STOP); |
1077 | 1095 | ||
@@ -1088,6 +1106,8 @@ static int __cpufreq_remove_dev(struct sys_device *sys_dev) | |||
1088 | if (cpufreq_driver->exit) | 1106 | if (cpufreq_driver->exit) |
1089 | cpufreq_driver->exit(data); | 1107 | cpufreq_driver->exit(data); |
1090 | 1108 | ||
1109 | unlock_policy_rwsem_write(cpu); | ||
1110 | |||
1091 | free_cpumask_var(data->related_cpus); | 1111 | free_cpumask_var(data->related_cpus); |
1092 | free_cpumask_var(data->cpus); | 1112 | free_cpumask_var(data->cpus); |
1093 | kfree(data); | 1113 | kfree(data); |
@@ -1228,13 +1248,22 @@ EXPORT_SYMBOL(cpufreq_get); | |||
1228 | 1248 | ||
1229 | static int cpufreq_suspend(struct sys_device *sysdev, pm_message_t pmsg) | 1249 | static int cpufreq_suspend(struct sys_device *sysdev, pm_message_t pmsg) |
1230 | { | 1250 | { |
1231 | int cpu = sysdev->id; | ||
1232 | int ret = 0; | 1251 | int ret = 0; |
1252 | |||
1253 | #ifdef __powerpc__ | ||
1254 | int cpu = sysdev->id; | ||
1233 | unsigned int cur_freq = 0; | 1255 | unsigned int cur_freq = 0; |
1234 | struct cpufreq_policy *cpu_policy; | 1256 | struct cpufreq_policy *cpu_policy; |
1235 | 1257 | ||
1236 | dprintk("suspending cpu %u\n", cpu); | 1258 | dprintk("suspending cpu %u\n", cpu); |
1237 | 1259 | ||
1260 | /* | ||
1261 | * This whole bogosity is here because Powerbooks are made of fail. | ||
1262 | * No sane platform should need any of the code below to be run. | ||
1263 | * (it's entirely the wrong thing to do, as driver->get may | ||
1264 | * reenable interrupts on some architectures). | ||
1265 | */ | ||
1266 | |||
1238 | if (!cpu_online(cpu)) | 1267 | if (!cpu_online(cpu)) |
1239 | return 0; | 1268 | return 0; |
1240 | 1269 | ||
@@ -1293,6 +1322,7 @@ static int cpufreq_suspend(struct sys_device *sysdev, pm_message_t pmsg) | |||
1293 | 1322 | ||
1294 | out: | 1323 | out: |
1295 | cpufreq_cpu_put(cpu_policy); | 1324 | cpufreq_cpu_put(cpu_policy); |
1325 | #endif /* __powerpc__ */ | ||
1296 | return ret; | 1326 | return ret; |
1297 | } | 1327 | } |
1298 | 1328 | ||
@@ -1306,12 +1336,18 @@ out: | |||
1306 | */ | 1336 | */ |
1307 | static int cpufreq_resume(struct sys_device *sysdev) | 1337 | static int cpufreq_resume(struct sys_device *sysdev) |
1308 | { | 1338 | { |
1309 | int cpu = sysdev->id; | ||
1310 | int ret = 0; | 1339 | int ret = 0; |
1340 | |||
1341 | #ifdef __powerpc__ | ||
1342 | int cpu = sysdev->id; | ||
1311 | struct cpufreq_policy *cpu_policy; | 1343 | struct cpufreq_policy *cpu_policy; |
1312 | 1344 | ||
1313 | dprintk("resuming cpu %u\n", cpu); | 1345 | dprintk("resuming cpu %u\n", cpu); |
1314 | 1346 | ||
1347 | /* As with the ->suspend method, all the code below is | ||
1348 | * only necessary because Powerbooks suck. | ||
1349 | * See commit 42d4dc3f4e1e for jokes. */ | ||
1350 | |||
1315 | if (!cpu_online(cpu)) | 1351 | if (!cpu_online(cpu)) |
1316 | return 0; | 1352 | return 0; |
1317 | 1353 | ||
@@ -1375,6 +1411,7 @@ out: | |||
1375 | schedule_work(&cpu_policy->update); | 1411 | schedule_work(&cpu_policy->update); |
1376 | fail: | 1412 | fail: |
1377 | cpufreq_cpu_put(cpu_policy); | 1413 | cpufreq_cpu_put(cpu_policy); |
1414 | #endif /* __powerpc__ */ | ||
1378 | return ret; | 1415 | return ret; |
1379 | } | 1416 | } |
1380 | 1417 | ||
diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c index a7ef465c83b9..bc33ddc9c97c 100644 --- a/drivers/cpufreq/cpufreq_conservative.c +++ b/drivers/cpufreq/cpufreq_conservative.c | |||
@@ -64,21 +64,20 @@ struct cpu_dbs_info_s { | |||
64 | unsigned int requested_freq; | 64 | unsigned int requested_freq; |
65 | int cpu; | 65 | int cpu; |
66 | unsigned int enable:1; | 66 | unsigned int enable:1; |
67 | /* | ||
68 | * percpu mutex that serializes governor limit change with | ||
69 | * do_dbs_timer invocation. We do not want do_dbs_timer to run | ||
70 | * when user is changing the governor or limits. | ||
71 | */ | ||
72 | struct mutex timer_mutex; | ||
67 | }; | 73 | }; |
68 | static DEFINE_PER_CPU(struct cpu_dbs_info_s, cs_cpu_dbs_info); | 74 | static DEFINE_PER_CPU(struct cpu_dbs_info_s, cs_cpu_dbs_info); |
69 | 75 | ||
70 | static unsigned int dbs_enable; /* number of CPUs using this policy */ | 76 | static unsigned int dbs_enable; /* number of CPUs using this policy */ |
71 | 77 | ||
72 | /* | 78 | /* |
73 | * DEADLOCK ALERT! There is a ordering requirement between cpu_hotplug | 79 | * dbs_mutex protects data in dbs_tuners_ins from concurrent changes on |
74 | * lock and dbs_mutex. cpu_hotplug lock should always be held before | 80 | * different CPUs. It protects dbs_enable in governor start/stop. |
75 | * dbs_mutex. If any function that can potentially take cpu_hotplug lock | ||
76 | * (like __cpufreq_driver_target()) is being called with dbs_mutex taken, then | ||
77 | * cpu_hotplug lock should be taken before that. Note that cpu_hotplug lock | ||
78 | * is recursive for the same process. -Venki | ||
79 | * DEADLOCK ALERT! (2) : do_dbs_timer() must not take the dbs_mutex, because it | ||
80 | * would deadlock with cancel_delayed_work_sync(), which is needed for proper | ||
81 | * raceless workqueue teardown. | ||
82 | */ | 81 | */ |
83 | static DEFINE_MUTEX(dbs_mutex); | 82 | static DEFINE_MUTEX(dbs_mutex); |
84 | 83 | ||
@@ -488,18 +487,12 @@ static void do_dbs_timer(struct work_struct *work) | |||
488 | 487 | ||
489 | delay -= jiffies % delay; | 488 | delay -= jiffies % delay; |
490 | 489 | ||
491 | if (lock_policy_rwsem_write(cpu) < 0) | 490 | mutex_lock(&dbs_info->timer_mutex); |
492 | return; | ||
493 | |||
494 | if (!dbs_info->enable) { | ||
495 | unlock_policy_rwsem_write(cpu); | ||
496 | return; | ||
497 | } | ||
498 | 491 | ||
499 | dbs_check_cpu(dbs_info); | 492 | dbs_check_cpu(dbs_info); |
500 | 493 | ||
501 | queue_delayed_work_on(cpu, kconservative_wq, &dbs_info->work, delay); | 494 | queue_delayed_work_on(cpu, kconservative_wq, &dbs_info->work, delay); |
502 | unlock_policy_rwsem_write(cpu); | 495 | mutex_unlock(&dbs_info->timer_mutex); |
503 | } | 496 | } |
504 | 497 | ||
505 | static inline void dbs_timer_init(struct cpu_dbs_info_s *dbs_info) | 498 | static inline void dbs_timer_init(struct cpu_dbs_info_s *dbs_info) |
@@ -535,9 +528,6 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy, | |||
535 | if ((!cpu_online(cpu)) || (!policy->cur)) | 528 | if ((!cpu_online(cpu)) || (!policy->cur)) |
536 | return -EINVAL; | 529 | return -EINVAL; |
537 | 530 | ||
538 | if (this_dbs_info->enable) /* Already enabled */ | ||
539 | break; | ||
540 | |||
541 | mutex_lock(&dbs_mutex); | 531 | mutex_lock(&dbs_mutex); |
542 | 532 | ||
543 | rc = sysfs_create_group(&policy->kobj, &dbs_attr_group); | 533 | rc = sysfs_create_group(&policy->kobj, &dbs_attr_group); |
@@ -561,6 +551,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy, | |||
561 | this_dbs_info->down_skip = 0; | 551 | this_dbs_info->down_skip = 0; |
562 | this_dbs_info->requested_freq = policy->cur; | 552 | this_dbs_info->requested_freq = policy->cur; |
563 | 553 | ||
554 | mutex_init(&this_dbs_info->timer_mutex); | ||
564 | dbs_enable++; | 555 | dbs_enable++; |
565 | /* | 556 | /* |
566 | * Start the timerschedule work, when this governor | 557 | * Start the timerschedule work, when this governor |
@@ -590,17 +581,19 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy, | |||
590 | &dbs_cpufreq_notifier_block, | 581 | &dbs_cpufreq_notifier_block, |
591 | CPUFREQ_TRANSITION_NOTIFIER); | 582 | CPUFREQ_TRANSITION_NOTIFIER); |
592 | } | 583 | } |
593 | dbs_timer_init(this_dbs_info); | ||
594 | |||
595 | mutex_unlock(&dbs_mutex); | 584 | mutex_unlock(&dbs_mutex); |
596 | 585 | ||
586 | dbs_timer_init(this_dbs_info); | ||
587 | |||
597 | break; | 588 | break; |
598 | 589 | ||
599 | case CPUFREQ_GOV_STOP: | 590 | case CPUFREQ_GOV_STOP: |
600 | mutex_lock(&dbs_mutex); | ||
601 | dbs_timer_exit(this_dbs_info); | 591 | dbs_timer_exit(this_dbs_info); |
592 | |||
593 | mutex_lock(&dbs_mutex); | ||
602 | sysfs_remove_group(&policy->kobj, &dbs_attr_group); | 594 | sysfs_remove_group(&policy->kobj, &dbs_attr_group); |
603 | dbs_enable--; | 595 | dbs_enable--; |
596 | mutex_destroy(&this_dbs_info->timer_mutex); | ||
604 | 597 | ||
605 | /* | 598 | /* |
606 | * Stop the timerschedule work, when this governor | 599 | * Stop the timerschedule work, when this governor |
@@ -616,7 +609,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy, | |||
616 | break; | 609 | break; |
617 | 610 | ||
618 | case CPUFREQ_GOV_LIMITS: | 611 | case CPUFREQ_GOV_LIMITS: |
619 | mutex_lock(&dbs_mutex); | 612 | mutex_lock(&this_dbs_info->timer_mutex); |
620 | if (policy->max < this_dbs_info->cur_policy->cur) | 613 | if (policy->max < this_dbs_info->cur_policy->cur) |
621 | __cpufreq_driver_target( | 614 | __cpufreq_driver_target( |
622 | this_dbs_info->cur_policy, | 615 | this_dbs_info->cur_policy, |
@@ -625,7 +618,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy, | |||
625 | __cpufreq_driver_target( | 618 | __cpufreq_driver_target( |
626 | this_dbs_info->cur_policy, | 619 | this_dbs_info->cur_policy, |
627 | policy->min, CPUFREQ_RELATION_L); | 620 | policy->min, CPUFREQ_RELATION_L); |
628 | mutex_unlock(&dbs_mutex); | 621 | mutex_unlock(&this_dbs_info->timer_mutex); |
629 | 622 | ||
630 | break; | 623 | break; |
631 | } | 624 | } |
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c index 36f292a7bd01..d7a528c80de8 100644 --- a/drivers/cpufreq/cpufreq_ondemand.c +++ b/drivers/cpufreq/cpufreq_ondemand.c | |||
@@ -70,23 +70,21 @@ struct cpu_dbs_info_s { | |||
70 | unsigned int freq_lo_jiffies; | 70 | unsigned int freq_lo_jiffies; |
71 | unsigned int freq_hi_jiffies; | 71 | unsigned int freq_hi_jiffies; |
72 | int cpu; | 72 | int cpu; |
73 | unsigned int enable:1, | 73 | unsigned int sample_type:1; |
74 | sample_type:1; | 74 | /* |
75 | * percpu mutex that serializes governor limit change with | ||
76 | * do_dbs_timer invocation. We do not want do_dbs_timer to run | ||
77 | * when user is changing the governor or limits. | ||
78 | */ | ||
79 | struct mutex timer_mutex; | ||
75 | }; | 80 | }; |
76 | static DEFINE_PER_CPU(struct cpu_dbs_info_s, od_cpu_dbs_info); | 81 | static DEFINE_PER_CPU(struct cpu_dbs_info_s, od_cpu_dbs_info); |
77 | 82 | ||
78 | static unsigned int dbs_enable; /* number of CPUs using this policy */ | 83 | static unsigned int dbs_enable; /* number of CPUs using this policy */ |
79 | 84 | ||
80 | /* | 85 | /* |
81 | * DEADLOCK ALERT! There is a ordering requirement between cpu_hotplug | 86 | * dbs_mutex protects data in dbs_tuners_ins from concurrent changes on |
82 | * lock and dbs_mutex. cpu_hotplug lock should always be held before | 87 | * different CPUs. It protects dbs_enable in governor start/stop. |
83 | * dbs_mutex. If any function that can potentially take cpu_hotplug lock | ||
84 | * (like __cpufreq_driver_target()) is being called with dbs_mutex taken, then | ||
85 | * cpu_hotplug lock should be taken before that. Note that cpu_hotplug lock | ||
86 | * is recursive for the same process. -Venki | ||
87 | * DEADLOCK ALERT! (2) : do_dbs_timer() must not take the dbs_mutex, because it | ||
88 | * would deadlock with cancel_delayed_work_sync(), which is needed for proper | ||
89 | * raceless workqueue teardown. | ||
90 | */ | 88 | */ |
91 | static DEFINE_MUTEX(dbs_mutex); | 89 | static DEFINE_MUTEX(dbs_mutex); |
92 | 90 | ||
@@ -193,13 +191,18 @@ static unsigned int powersave_bias_target(struct cpufreq_policy *policy, | |||
193 | return freq_hi; | 191 | return freq_hi; |
194 | } | 192 | } |
195 | 193 | ||
194 | static void ondemand_powersave_bias_init_cpu(int cpu) | ||
195 | { | ||
196 | struct cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info, cpu); | ||
197 | dbs_info->freq_table = cpufreq_frequency_get_table(cpu); | ||
198 | dbs_info->freq_lo = 0; | ||
199 | } | ||
200 | |||
196 | static void ondemand_powersave_bias_init(void) | 201 | static void ondemand_powersave_bias_init(void) |
197 | { | 202 | { |
198 | int i; | 203 | int i; |
199 | for_each_online_cpu(i) { | 204 | for_each_online_cpu(i) { |
200 | struct cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info, i); | 205 | ondemand_powersave_bias_init_cpu(i); |
201 | dbs_info->freq_table = cpufreq_frequency_get_table(i); | ||
202 | dbs_info->freq_lo = 0; | ||
203 | } | 206 | } |
204 | } | 207 | } |
205 | 208 | ||
@@ -241,12 +244,10 @@ static ssize_t store_sampling_rate(struct cpufreq_policy *unused, | |||
241 | unsigned int input; | 244 | unsigned int input; |
242 | int ret; | 245 | int ret; |
243 | ret = sscanf(buf, "%u", &input); | 246 | ret = sscanf(buf, "%u", &input); |
247 | if (ret != 1) | ||
248 | return -EINVAL; | ||
244 | 249 | ||
245 | mutex_lock(&dbs_mutex); | 250 | mutex_lock(&dbs_mutex); |
246 | if (ret != 1) { | ||
247 | mutex_unlock(&dbs_mutex); | ||
248 | return -EINVAL; | ||
249 | } | ||
250 | dbs_tuners_ins.sampling_rate = max(input, min_sampling_rate); | 251 | dbs_tuners_ins.sampling_rate = max(input, min_sampling_rate); |
251 | mutex_unlock(&dbs_mutex); | 252 | mutex_unlock(&dbs_mutex); |
252 | 253 | ||
@@ -260,13 +261,12 @@ static ssize_t store_up_threshold(struct cpufreq_policy *unused, | |||
260 | int ret; | 261 | int ret; |
261 | ret = sscanf(buf, "%u", &input); | 262 | ret = sscanf(buf, "%u", &input); |
262 | 263 | ||
263 | mutex_lock(&dbs_mutex); | ||
264 | if (ret != 1 || input > MAX_FREQUENCY_UP_THRESHOLD || | 264 | if (ret != 1 || input > MAX_FREQUENCY_UP_THRESHOLD || |
265 | input < MIN_FREQUENCY_UP_THRESHOLD) { | 265 | input < MIN_FREQUENCY_UP_THRESHOLD) { |
266 | mutex_unlock(&dbs_mutex); | ||
267 | return -EINVAL; | 266 | return -EINVAL; |
268 | } | 267 | } |
269 | 268 | ||
269 | mutex_lock(&dbs_mutex); | ||
270 | dbs_tuners_ins.up_threshold = input; | 270 | dbs_tuners_ins.up_threshold = input; |
271 | mutex_unlock(&dbs_mutex); | 271 | mutex_unlock(&dbs_mutex); |
272 | 272 | ||
@@ -364,9 +364,6 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info) | |||
364 | struct cpufreq_policy *policy; | 364 | struct cpufreq_policy *policy; |
365 | unsigned int j; | 365 | unsigned int j; |
366 | 366 | ||
367 | if (!this_dbs_info->enable) | ||
368 | return; | ||
369 | |||
370 | this_dbs_info->freq_lo = 0; | 367 | this_dbs_info->freq_lo = 0; |
371 | policy = this_dbs_info->cur_policy; | 368 | policy = this_dbs_info->cur_policy; |
372 | 369 | ||
@@ -494,14 +491,7 @@ static void do_dbs_timer(struct work_struct *work) | |||
494 | int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate); | 491 | int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate); |
495 | 492 | ||
496 | delay -= jiffies % delay; | 493 | delay -= jiffies % delay; |
497 | 494 | mutex_lock(&dbs_info->timer_mutex); | |
498 | if (lock_policy_rwsem_write(cpu) < 0) | ||
499 | return; | ||
500 | |||
501 | if (!dbs_info->enable) { | ||
502 | unlock_policy_rwsem_write(cpu); | ||
503 | return; | ||
504 | } | ||
505 | 495 | ||
506 | /* Common NORMAL_SAMPLE setup */ | 496 | /* Common NORMAL_SAMPLE setup */ |
507 | dbs_info->sample_type = DBS_NORMAL_SAMPLE; | 497 | dbs_info->sample_type = DBS_NORMAL_SAMPLE; |
@@ -518,7 +508,7 @@ static void do_dbs_timer(struct work_struct *work) | |||
518 | dbs_info->freq_lo, CPUFREQ_RELATION_H); | 508 | dbs_info->freq_lo, CPUFREQ_RELATION_H); |
519 | } | 509 | } |
520 | queue_delayed_work_on(cpu, kondemand_wq, &dbs_info->work, delay); | 510 | queue_delayed_work_on(cpu, kondemand_wq, &dbs_info->work, delay); |
521 | unlock_policy_rwsem_write(cpu); | 511 | mutex_unlock(&dbs_info->timer_mutex); |
522 | } | 512 | } |
523 | 513 | ||
524 | static inline void dbs_timer_init(struct cpu_dbs_info_s *dbs_info) | 514 | static inline void dbs_timer_init(struct cpu_dbs_info_s *dbs_info) |
@@ -527,8 +517,6 @@ static inline void dbs_timer_init(struct cpu_dbs_info_s *dbs_info) | |||
527 | int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate); | 517 | int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate); |
528 | delay -= jiffies % delay; | 518 | delay -= jiffies % delay; |
529 | 519 | ||
530 | dbs_info->enable = 1; | ||
531 | ondemand_powersave_bias_init(); | ||
532 | dbs_info->sample_type = DBS_NORMAL_SAMPLE; | 520 | dbs_info->sample_type = DBS_NORMAL_SAMPLE; |
533 | INIT_DELAYED_WORK_DEFERRABLE(&dbs_info->work, do_dbs_timer); | 521 | INIT_DELAYED_WORK_DEFERRABLE(&dbs_info->work, do_dbs_timer); |
534 | queue_delayed_work_on(dbs_info->cpu, kondemand_wq, &dbs_info->work, | 522 | queue_delayed_work_on(dbs_info->cpu, kondemand_wq, &dbs_info->work, |
@@ -537,7 +525,6 @@ static inline void dbs_timer_init(struct cpu_dbs_info_s *dbs_info) | |||
537 | 525 | ||
538 | static inline void dbs_timer_exit(struct cpu_dbs_info_s *dbs_info) | 526 | static inline void dbs_timer_exit(struct cpu_dbs_info_s *dbs_info) |
539 | { | 527 | { |
540 | dbs_info->enable = 0; | ||
541 | cancel_delayed_work_sync(&dbs_info->work); | 528 | cancel_delayed_work_sync(&dbs_info->work); |
542 | } | 529 | } |
543 | 530 | ||
@@ -556,19 +543,15 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy, | |||
556 | if ((!cpu_online(cpu)) || (!policy->cur)) | 543 | if ((!cpu_online(cpu)) || (!policy->cur)) |
557 | return -EINVAL; | 544 | return -EINVAL; |
558 | 545 | ||
559 | if (this_dbs_info->enable) /* Already enabled */ | ||
560 | break; | ||
561 | |||
562 | mutex_lock(&dbs_mutex); | 546 | mutex_lock(&dbs_mutex); |
563 | dbs_enable++; | ||
564 | 547 | ||
565 | rc = sysfs_create_group(&policy->kobj, &dbs_attr_group); | 548 | rc = sysfs_create_group(&policy->kobj, &dbs_attr_group); |
566 | if (rc) { | 549 | if (rc) { |
567 | dbs_enable--; | ||
568 | mutex_unlock(&dbs_mutex); | 550 | mutex_unlock(&dbs_mutex); |
569 | return rc; | 551 | return rc; |
570 | } | 552 | } |
571 | 553 | ||
554 | dbs_enable++; | ||
572 | for_each_cpu(j, policy->cpus) { | 555 | for_each_cpu(j, policy->cpus) { |
573 | struct cpu_dbs_info_s *j_dbs_info; | 556 | struct cpu_dbs_info_s *j_dbs_info; |
574 | j_dbs_info = &per_cpu(od_cpu_dbs_info, j); | 557 | j_dbs_info = &per_cpu(od_cpu_dbs_info, j); |
@@ -582,6 +565,8 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy, | |||
582 | } | 565 | } |
583 | } | 566 | } |
584 | this_dbs_info->cpu = cpu; | 567 | this_dbs_info->cpu = cpu; |
568 | ondemand_powersave_bias_init_cpu(cpu); | ||
569 | mutex_init(&this_dbs_info->timer_mutex); | ||
585 | /* | 570 | /* |
586 | * Start the timerschedule work, when this governor | 571 | * Start the timerschedule work, when this governor |
587 | * is used for first time | 572 | * is used for first time |
@@ -599,29 +584,31 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy, | |||
599 | max(min_sampling_rate, | 584 | max(min_sampling_rate, |
600 | latency * LATENCY_MULTIPLIER); | 585 | latency * LATENCY_MULTIPLIER); |
601 | } | 586 | } |
602 | dbs_timer_init(this_dbs_info); | ||
603 | |||
604 | mutex_unlock(&dbs_mutex); | 587 | mutex_unlock(&dbs_mutex); |
588 | |||
589 | dbs_timer_init(this_dbs_info); | ||
605 | break; | 590 | break; |
606 | 591 | ||
607 | case CPUFREQ_GOV_STOP: | 592 | case CPUFREQ_GOV_STOP: |
608 | mutex_lock(&dbs_mutex); | ||
609 | dbs_timer_exit(this_dbs_info); | 593 | dbs_timer_exit(this_dbs_info); |
594 | |||
595 | mutex_lock(&dbs_mutex); | ||
610 | sysfs_remove_group(&policy->kobj, &dbs_attr_group); | 596 | sysfs_remove_group(&policy->kobj, &dbs_attr_group); |
597 | mutex_destroy(&this_dbs_info->timer_mutex); | ||
611 | dbs_enable--; | 598 | dbs_enable--; |
612 | mutex_unlock(&dbs_mutex); | 599 | mutex_unlock(&dbs_mutex); |
613 | 600 | ||
614 | break; | 601 | break; |
615 | 602 | ||
616 | case CPUFREQ_GOV_LIMITS: | 603 | case CPUFREQ_GOV_LIMITS: |
617 | mutex_lock(&dbs_mutex); | 604 | mutex_lock(&this_dbs_info->timer_mutex); |
618 | if (policy->max < this_dbs_info->cur_policy->cur) | 605 | if (policy->max < this_dbs_info->cur_policy->cur) |
619 | __cpufreq_driver_target(this_dbs_info->cur_policy, | 606 | __cpufreq_driver_target(this_dbs_info->cur_policy, |
620 | policy->max, CPUFREQ_RELATION_H); | 607 | policy->max, CPUFREQ_RELATION_H); |
621 | else if (policy->min > this_dbs_info->cur_policy->cur) | 608 | else if (policy->min > this_dbs_info->cur_policy->cur) |
622 | __cpufreq_driver_target(this_dbs_info->cur_policy, | 609 | __cpufreq_driver_target(this_dbs_info->cur_policy, |
623 | policy->min, CPUFREQ_RELATION_L); | 610 | policy->min, CPUFREQ_RELATION_L); |
624 | mutex_unlock(&dbs_mutex); | 611 | mutex_unlock(&this_dbs_info->timer_mutex); |
625 | break; | 612 | break; |
626 | } | 613 | } |
627 | return 0; | 614 | return 0; |