aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/cpufreq
diff options
context:
space:
mode:
authorViresh Kumar <viresh.kumar@linaro.org>2013-02-07 00:25:00 -0500
committerRafael J. Wysocki <rafael.j.wysocki@intel.com>2013-02-08 19:22:57 -0500
commit2eaa3e2df185997e92596ab14a2a67dde3876d2e (patch)
treee345655a2cb6c44d4c1fe9f3727fc4117f4ca1dc /drivers/cpufreq
parentfa1d8af47f42671fa82779582ca60948f876a73e (diff)
cpufreq: Fix locking issues
cpufreq core uses two locks: - cpufreq_driver_lock: General lock for driver and cpufreq_cpu_data array. - cpu_policy_rwsemfix locking: per CPU reader-writer semaphore designed to cure all cpufreq/hotplug/workqueue/etc related lock issues. These locks were not used properly and are placed against their principle (present before their definition) at various places. This patch is an attempt to fix their use. Signed-off-by: Viresh Kumar <viresh.kumar@linaro.org> Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Diffstat (limited to 'drivers/cpufreq')
-rw-r--r--drivers/cpufreq/cpufreq.c72
1 files changed, 38 insertions, 34 deletions
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 7cbf1d53804f..e000f3691661 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -59,8 +59,6 @@ static DEFINE_SPINLOCK(cpufreq_driver_lock);
59 * mode before doing so. 59 * mode before doing so.
60 * 60 *
61 * Additional rules: 61 * Additional rules:
62 * - All holders of the lock should check to make sure that the CPU they
63 * are concerned with are online after they get the lock.
64 * - Governor routines that can be called in cpufreq hotplug path should not 62 * - Governor routines that can be called in cpufreq hotplug path should not
65 * take this sem as top level hotplug notifier handler takes this. 63 * take this sem as top level hotplug notifier handler takes this.
66 * - Lock should not be held across 64 * - Lock should not be held across
@@ -257,6 +255,7 @@ static inline void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
257void cpufreq_notify_transition(struct cpufreq_freqs *freqs, unsigned int state) 255void cpufreq_notify_transition(struct cpufreq_freqs *freqs, unsigned int state)
258{ 256{
259 struct cpufreq_policy *policy; 257 struct cpufreq_policy *policy;
258 unsigned long flags;
260 259
261 BUG_ON(irqs_disabled()); 260 BUG_ON(irqs_disabled());
262 261
@@ -267,7 +266,10 @@ void cpufreq_notify_transition(struct cpufreq_freqs *freqs, unsigned int state)
267 pr_debug("notification %u of frequency transition to %u kHz\n", 266 pr_debug("notification %u of frequency transition to %u kHz\n",
268 state, freqs->new); 267 state, freqs->new);
269 268
269 spin_lock_irqsave(&cpufreq_driver_lock, flags);
270 policy = per_cpu(cpufreq_cpu_data, freqs->cpu); 270 policy = per_cpu(cpufreq_cpu_data, freqs->cpu);
271 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
272
271 switch (state) { 273 switch (state) {
272 274
273 case CPUFREQ_PRECHANGE: 275 case CPUFREQ_PRECHANGE:
@@ -808,22 +810,22 @@ static int cpufreq_add_policy_cpu(unsigned int cpu, unsigned int sibling,
808 policy = cpufreq_cpu_get(sibling); 810 policy = cpufreq_cpu_get(sibling);
809 WARN_ON(!policy); 811 WARN_ON(!policy);
810 812
811 per_cpu(cpufreq_policy_cpu, cpu) = policy->cpu;
812
813 lock_policy_rwsem_write(cpu);
814
815 __cpufreq_governor(policy, CPUFREQ_GOV_STOP); 813 __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
816 814
815 lock_policy_rwsem_write(sibling);
816
817 spin_lock_irqsave(&cpufreq_driver_lock, flags); 817 spin_lock_irqsave(&cpufreq_driver_lock, flags);
818
818 cpumask_set_cpu(cpu, policy->cpus); 819 cpumask_set_cpu(cpu, policy->cpus);
820 per_cpu(cpufreq_policy_cpu, cpu) = policy->cpu;
819 per_cpu(cpufreq_cpu_data, cpu) = policy; 821 per_cpu(cpufreq_cpu_data, cpu) = policy;
820 spin_unlock_irqrestore(&cpufreq_driver_lock, flags); 822 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
821 823
824 unlock_policy_rwsem_write(sibling);
825
822 __cpufreq_governor(policy, CPUFREQ_GOV_START); 826 __cpufreq_governor(policy, CPUFREQ_GOV_START);
823 __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS); 827 __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
824 828
825 unlock_policy_rwsem_write(cpu);
826
827 ret = sysfs_create_link(&dev->kobj, &policy->kobj, "cpufreq"); 829 ret = sysfs_create_link(&dev->kobj, &policy->kobj, "cpufreq");
828 if (ret) { 830 if (ret) {
829 cpufreq_cpu_put(policy); 831 cpufreq_cpu_put(policy);
@@ -870,11 +872,15 @@ static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
870 872
871#ifdef CONFIG_HOTPLUG_CPU 873#ifdef CONFIG_HOTPLUG_CPU
872 /* Check if this cpu was hot-unplugged earlier and has siblings */ 874 /* Check if this cpu was hot-unplugged earlier and has siblings */
875 spin_lock_irqsave(&cpufreq_driver_lock, flags);
873 for_each_online_cpu(sibling) { 876 for_each_online_cpu(sibling) {
874 struct cpufreq_policy *cp = per_cpu(cpufreq_cpu_data, sibling); 877 struct cpufreq_policy *cp = per_cpu(cpufreq_cpu_data, sibling);
875 if (cp && cpumask_test_cpu(cpu, cp->related_cpus)) 878 if (cp && cpumask_test_cpu(cpu, cp->related_cpus)) {
879 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
876 return cpufreq_add_policy_cpu(cpu, sibling, dev); 880 return cpufreq_add_policy_cpu(cpu, sibling, dev);
881 }
877 } 882 }
883 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
878#endif 884#endif
879#endif 885#endif
880 886
@@ -899,8 +905,6 @@ static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
899 905
900 /* Initially set CPU itself as the policy_cpu */ 906 /* Initially set CPU itself as the policy_cpu */
901 per_cpu(cpufreq_policy_cpu, cpu) = cpu; 907 per_cpu(cpufreq_policy_cpu, cpu) = cpu;
902 ret = (lock_policy_rwsem_write(cpu) < 0);
903 WARN_ON(ret);
904 908
905 init_completion(&policy->kobj_unregister); 909 init_completion(&policy->kobj_unregister);
906 INIT_WORK(&policy->update, handle_update); 910 INIT_WORK(&policy->update, handle_update);
@@ -911,7 +915,7 @@ static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
911 ret = cpufreq_driver->init(policy); 915 ret = cpufreq_driver->init(policy);
912 if (ret) { 916 if (ret) {
913 pr_debug("initialization failed\n"); 917 pr_debug("initialization failed\n");
914 goto err_unlock_policy; 918 goto err_set_policy_cpu;
915 } 919 }
916 920
917 /* related cpus should atleast have policy->cpus */ 921 /* related cpus should atleast have policy->cpus */
@@ -942,8 +946,6 @@ static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
942 if (ret) 946 if (ret)
943 goto err_out_unregister; 947 goto err_out_unregister;
944 948
945 unlock_policy_rwsem_write(cpu);
946
947 kobject_uevent(&policy->kobj, KOBJ_ADD); 949 kobject_uevent(&policy->kobj, KOBJ_ADD);
948 module_put(cpufreq_driver->owner); 950 module_put(cpufreq_driver->owner);
949 pr_debug("initialization complete\n"); 951 pr_debug("initialization complete\n");
@@ -959,8 +961,8 @@ err_out_unregister:
959 kobject_put(&policy->kobj); 961 kobject_put(&policy->kobj);
960 wait_for_completion(&policy->kobj_unregister); 962 wait_for_completion(&policy->kobj_unregister);
961 963
962err_unlock_policy: 964err_set_policy_cpu:
963 unlock_policy_rwsem_write(cpu); 965 per_cpu(cpufreq_policy_cpu, cpu) = -1;
964 free_cpumask_var(policy->related_cpus); 966 free_cpumask_var(policy->related_cpus);
965err_free_cpumask: 967err_free_cpumask:
966 free_cpumask_var(policy->cpus); 968 free_cpumask_var(policy->cpus);
@@ -1008,12 +1010,14 @@ static int __cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif
1008 pr_debug("%s: unregistering CPU %u\n", __func__, cpu); 1010 pr_debug("%s: unregistering CPU %u\n", __func__, cpu);
1009 1011
1010 spin_lock_irqsave(&cpufreq_driver_lock, flags); 1012 spin_lock_irqsave(&cpufreq_driver_lock, flags);
1013
1011 data = per_cpu(cpufreq_cpu_data, cpu); 1014 data = per_cpu(cpufreq_cpu_data, cpu);
1015 per_cpu(cpufreq_cpu_data, cpu) = NULL;
1016
1017 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
1012 1018
1013 if (!data) { 1019 if (!data) {
1014 pr_debug("%s: No cpu_data found\n", __func__); 1020 pr_debug("%s: No cpu_data found\n", __func__);
1015 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
1016 unlock_policy_rwsem_write(cpu);
1017 return -EINVAL; 1021 return -EINVAL;
1018 } 1022 }
1019 1023
@@ -1025,9 +1029,10 @@ static int __cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif
1025 CPUFREQ_NAME_LEN); 1029 CPUFREQ_NAME_LEN);
1026#endif 1030#endif
1027 1031
1028 per_cpu(cpufreq_cpu_data, cpu) = NULL; 1032 WARN_ON(lock_policy_rwsem_write(cpu));
1029 cpus = cpumask_weight(data->cpus); 1033 cpus = cpumask_weight(data->cpus);
1030 cpumask_clear_cpu(cpu, data->cpus); 1034 cpumask_clear_cpu(cpu, data->cpus);
1035 unlock_policy_rwsem_write(cpu);
1031 1036
1032 if (cpu != data->cpu) { 1037 if (cpu != data->cpu) {
1033 sysfs_remove_link(&dev->kobj, "cpufreq"); 1038 sysfs_remove_link(&dev->kobj, "cpufreq");
@@ -1038,31 +1043,37 @@ static int __cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif
1038 ret = kobject_move(&data->kobj, &cpu_dev->kobj); 1043 ret = kobject_move(&data->kobj, &cpu_dev->kobj);
1039 if (ret) { 1044 if (ret) {
1040 pr_err("%s: Failed to move kobj: %d", __func__, ret); 1045 pr_err("%s: Failed to move kobj: %d", __func__, ret);
1046
1047 WARN_ON(lock_policy_rwsem_write(cpu));
1041 cpumask_set_cpu(cpu, data->cpus); 1048 cpumask_set_cpu(cpu, data->cpus);
1042 ret = sysfs_create_link(&cpu_dev->kobj, &data->kobj, 1049
1043 "cpufreq"); 1050 spin_lock_irqsave(&cpufreq_driver_lock, flags);
1051 per_cpu(cpufreq_cpu_data, cpu) = data;
1044 spin_unlock_irqrestore(&cpufreq_driver_lock, flags); 1052 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
1053
1045 unlock_policy_rwsem_write(cpu); 1054 unlock_policy_rwsem_write(cpu);
1055
1056 ret = sysfs_create_link(&cpu_dev->kobj, &data->kobj,
1057 "cpufreq");
1046 return -EINVAL; 1058 return -EINVAL;
1047 } 1059 }
1048 1060
1061 WARN_ON(lock_policy_rwsem_write(cpu));
1049 update_policy_cpu(data, cpu_dev->id); 1062 update_policy_cpu(data, cpu_dev->id);
1063 unlock_policy_rwsem_write(cpu);
1050 pr_debug("%s: policy Kobject moved to cpu: %d from: %d\n", 1064 pr_debug("%s: policy Kobject moved to cpu: %d from: %d\n",
1051 __func__, cpu_dev->id, cpu); 1065 __func__, cpu_dev->id, cpu);
1052 } 1066 }
1053 1067
1054 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
1055
1056 pr_debug("%s: removing link, cpu: %d\n", __func__, cpu); 1068 pr_debug("%s: removing link, cpu: %d\n", __func__, cpu);
1057 cpufreq_cpu_put(data); 1069 cpufreq_cpu_put(data);
1058 unlock_policy_rwsem_write(cpu);
1059 1070
1060 /* If cpu is last user of policy, free policy */ 1071 /* If cpu is last user of policy, free policy */
1061 if (cpus == 1) { 1072 if (cpus == 1) {
1062 lock_policy_rwsem_write(cpu); 1073 lock_policy_rwsem_read(cpu);
1063 kobj = &data->kobj; 1074 kobj = &data->kobj;
1064 cmp = &data->kobj_unregister; 1075 cmp = &data->kobj_unregister;
1065 unlock_policy_rwsem_write(cpu); 1076 unlock_policy_rwsem_read(cpu);
1066 kobject_put(kobj); 1077 kobject_put(kobj);
1067 1078
1068 /* we need to make sure that the underlying kobj is actually 1079 /* we need to make sure that the underlying kobj is actually
@@ -1073,10 +1084,8 @@ static int __cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif
1073 wait_for_completion(cmp); 1084 wait_for_completion(cmp);
1074 pr_debug("wait complete\n"); 1085 pr_debug("wait complete\n");
1075 1086
1076 lock_policy_rwsem_write(cpu);
1077 if (cpufreq_driver->exit) 1087 if (cpufreq_driver->exit)
1078 cpufreq_driver->exit(data); 1088 cpufreq_driver->exit(data);
1079 unlock_policy_rwsem_write(cpu);
1080 1089
1081 free_cpumask_var(data->related_cpus); 1090 free_cpumask_var(data->related_cpus);
1082 free_cpumask_var(data->cpus); 1091 free_cpumask_var(data->cpus);
@@ -1086,6 +1095,7 @@ static int __cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif
1086 __cpufreq_governor(data, CPUFREQ_GOV_LIMITS); 1095 __cpufreq_governor(data, CPUFREQ_GOV_LIMITS);
1087 } 1096 }
1088 1097
1098 per_cpu(cpufreq_policy_cpu, cpu) = -1;
1089 return 0; 1099 return 0;
1090} 1100}
1091 1101
@@ -1098,9 +1108,6 @@ static int cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
1098 if (cpu_is_offline(cpu)) 1108 if (cpu_is_offline(cpu))
1099 return 0; 1109 return 0;
1100 1110
1101 if (unlikely(lock_policy_rwsem_write(cpu)))
1102 BUG();
1103
1104 retval = __cpufreq_remove_dev(dev, sif); 1111 retval = __cpufreq_remove_dev(dev, sif);
1105 return retval; 1112 return retval;
1106} 1113}
@@ -1791,9 +1798,6 @@ static int __cpuinit cpufreq_cpu_callback(struct notifier_block *nfb,
1791 break; 1798 break;
1792 case CPU_DOWN_PREPARE: 1799 case CPU_DOWN_PREPARE:
1793 case CPU_DOWN_PREPARE_FROZEN: 1800 case CPU_DOWN_PREPARE_FROZEN:
1794 if (unlikely(lock_policy_rwsem_write(cpu)))
1795 BUG();
1796
1797 __cpufreq_remove_dev(dev, NULL); 1801 __cpufreq_remove_dev(dev, NULL);
1798 break; 1802 break;
1799 case CPU_DOWN_FAILED: 1803 case CPU_DOWN_FAILED: