aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/cpufreq/cpufreq.c
diff options
context:
space:
mode:
authorviresh kumar <viresh.kumar@linaro.org>2013-10-18 09:40:15 -0400
committerRafael J. Wysocki <rafael.j.wysocki@intel.com>2013-10-25 17:54:12 -0400
commitad7722dab7292dbc1c4586d701ac226b68122d39 (patch)
tree9d66845a9e2f1fbb6c2641d30f89f164724a5f76 /drivers/cpufreq/cpufreq.c
parent19e77c28dbf1972305da0dfeb92a62f83df3a91d (diff)
cpufreq: create per policy rwsem instead of per CPU cpu_policy_rwsem
We have per-CPU cpu_policy_rwsem for cpufreq core, but we never use all of them. We always use rwsem of policy->cpu and so we can actually make this rwsem per policy instead. This patch does this change. With this change other tricky situations are also avoided now, like which lock to take while we are changing policy->cpu, etc. Suggested-by: Srivatsa S. Bhat <srivatsa.bhat@linux.vnet.ibm.com> Signed-off-by: Viresh Kumar <viresh.kumar@linaro.org> Reviewed-by: Srivatsa S. Bhat <srivatsa.bhat@linux.vnet.ibm.com> Tested-by: Andrew Lunn <andrew@lunn.ch> Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Diffstat (limited to 'drivers/cpufreq/cpufreq.c')
-rw-r--r--drivers/cpufreq/cpufreq.c110
1 files changed, 31 insertions, 79 deletions
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 87ed83a6c894..6c9cbb9ebd1f 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -53,47 +53,6 @@ static inline bool has_target(void)
53} 53}
54 54
55/* 55/*
56 * cpu_policy_rwsem is a per CPU reader-writer semaphore designed to cure
57 * all cpufreq/hotplug/workqueue/etc related lock issues.
58 *
59 * The rules for this semaphore:
60 * - Any routine that wants to read from the policy structure will
61 * do a down_read on this semaphore.
62 * - Any routine that will write to the policy structure and/or may take away
63 * the policy altogether (eg. CPU hotplug), will hold this lock in write
64 * mode before doing so.
65 *
66 * Additional rules:
67 * - Governor routines that can be called in cpufreq hotplug path should not
68 * take this sem as top level hotplug notifier handler takes this.
69 * - Lock should not be held across
70 * __cpufreq_governor(data, CPUFREQ_GOV_STOP);
71 */
72static DEFINE_PER_CPU(struct rw_semaphore, cpu_policy_rwsem);
73
74#define lock_policy_rwsem(mode, cpu) \
75static void lock_policy_rwsem_##mode(int cpu) \
76{ \
77 struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu); \
78 BUG_ON(!policy); \
79 down_##mode(&per_cpu(cpu_policy_rwsem, policy->cpu)); \
80}
81
82lock_policy_rwsem(read, cpu);
83lock_policy_rwsem(write, cpu);
84
85#define unlock_policy_rwsem(mode, cpu) \
86static void unlock_policy_rwsem_##mode(int cpu) \
87{ \
88 struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu); \
89 BUG_ON(!policy); \
90 up_##mode(&per_cpu(cpu_policy_rwsem, policy->cpu)); \
91}
92
93unlock_policy_rwsem(read, cpu);
94unlock_policy_rwsem(write, cpu);
95
96/*
97 * rwsem to guarantee that cpufreq driver module doesn't unload during critical 56 * rwsem to guarantee that cpufreq driver module doesn't unload during critical
98 * sections 57 * sections
99 */ 58 */
@@ -688,14 +647,14 @@ static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
688 if (!down_read_trylock(&cpufreq_rwsem)) 647 if (!down_read_trylock(&cpufreq_rwsem))
689 return -EINVAL; 648 return -EINVAL;
690 649
691 lock_policy_rwsem_read(policy->cpu); 650 down_read(&policy->rwsem);
692 651
693 if (fattr->show) 652 if (fattr->show)
694 ret = fattr->show(policy, buf); 653 ret = fattr->show(policy, buf);
695 else 654 else
696 ret = -EIO; 655 ret = -EIO;
697 656
698 unlock_policy_rwsem_read(policy->cpu); 657 up_read(&policy->rwsem);
699 up_read(&cpufreq_rwsem); 658 up_read(&cpufreq_rwsem);
700 659
701 return ret; 660 return ret;
@@ -716,14 +675,14 @@ static ssize_t store(struct kobject *kobj, struct attribute *attr,
716 if (!down_read_trylock(&cpufreq_rwsem)) 675 if (!down_read_trylock(&cpufreq_rwsem))
717 goto unlock; 676 goto unlock;
718 677
719 lock_policy_rwsem_write(policy->cpu); 678 down_write(&policy->rwsem);
720 679
721 if (fattr->store) 680 if (fattr->store)
722 ret = fattr->store(policy, buf, count); 681 ret = fattr->store(policy, buf, count);
723 else 682 else
724 ret = -EIO; 683 ret = -EIO;
725 684
726 unlock_policy_rwsem_write(policy->cpu); 685 up_write(&policy->rwsem);
727 686
728 up_read(&cpufreq_rwsem); 687 up_read(&cpufreq_rwsem);
729unlock: 688unlock:
@@ -900,7 +859,7 @@ static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy,
900 } 859 }
901 } 860 }
902 861
903 lock_policy_rwsem_write(policy->cpu); 862 down_write(&policy->rwsem);
904 863
905 write_lock_irqsave(&cpufreq_driver_lock, flags); 864 write_lock_irqsave(&cpufreq_driver_lock, flags);
906 865
@@ -908,7 +867,7 @@ static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy,
908 per_cpu(cpufreq_cpu_data, cpu) = policy; 867 per_cpu(cpufreq_cpu_data, cpu) = policy;
909 write_unlock_irqrestore(&cpufreq_driver_lock, flags); 868 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
910 869
911 unlock_policy_rwsem_write(policy->cpu); 870 up_write(&policy->rwsem);
912 871
913 if (has_target()) { 872 if (has_target()) {
914 if ((ret = __cpufreq_governor(policy, CPUFREQ_GOV_START)) || 873 if ((ret = __cpufreq_governor(policy, CPUFREQ_GOV_START)) ||
@@ -955,6 +914,8 @@ static struct cpufreq_policy *cpufreq_policy_alloc(void)
955 goto err_free_cpumask; 914 goto err_free_cpumask;
956 915
957 INIT_LIST_HEAD(&policy->policy_list); 916 INIT_LIST_HEAD(&policy->policy_list);
917 init_rwsem(&policy->rwsem);
918
958 return policy; 919 return policy;
959 920
960err_free_cpumask: 921err_free_cpumask:
@@ -977,19 +938,12 @@ static void update_policy_cpu(struct cpufreq_policy *policy, unsigned int cpu)
977 if (WARN_ON(cpu == policy->cpu)) 938 if (WARN_ON(cpu == policy->cpu))
978 return; 939 return;
979 940
980 /* 941 down_write(&policy->rwsem);
981 * Take direct locks as lock_policy_rwsem_write wouldn't work here.
982 * Also lock for last cpu is enough here as contention will happen only
983 * after policy->cpu is changed and after it is changed, other threads
984 * will try to acquire lock for new cpu. And policy is already updated
985 * by then.
986 */
987 down_write(&per_cpu(cpu_policy_rwsem, policy->cpu));
988 942
989 policy->last_cpu = policy->cpu; 943 policy->last_cpu = policy->cpu;
990 policy->cpu = cpu; 944 policy->cpu = cpu;
991 945
992 up_write(&per_cpu(cpu_policy_rwsem, policy->last_cpu)); 946 up_write(&policy->rwsem);
993 947
994 cpufreq_frequency_table_update_policy_cpu(policy); 948 cpufreq_frequency_table_update_policy_cpu(policy);
995 blocking_notifier_call_chain(&cpufreq_policy_notifier_list, 949 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
@@ -1181,9 +1135,9 @@ static int cpufreq_nominate_new_policy_cpu(struct cpufreq_policy *policy,
1181 if (ret) { 1135 if (ret) {
1182 pr_err("%s: Failed to move kobj: %d", __func__, ret); 1136 pr_err("%s: Failed to move kobj: %d", __func__, ret);
1183 1137
1184 lock_policy_rwsem_write(old_cpu); 1138 down_write(&policy->rwsem);
1185 cpumask_set_cpu(old_cpu, policy->cpus); 1139 cpumask_set_cpu(old_cpu, policy->cpus);
1186 unlock_policy_rwsem_write(old_cpu); 1140 up_write(&policy->rwsem);
1187 1141
1188 ret = sysfs_create_link(&cpu_dev->kobj, &policy->kobj, 1142 ret = sysfs_create_link(&cpu_dev->kobj, &policy->kobj,
1189 "cpufreq"); 1143 "cpufreq");
@@ -1234,9 +1188,9 @@ static int __cpufreq_remove_dev_prepare(struct device *dev,
1234 policy->governor->name, CPUFREQ_NAME_LEN); 1188 policy->governor->name, CPUFREQ_NAME_LEN);
1235#endif 1189#endif
1236 1190
1237 lock_policy_rwsem_read(cpu); 1191 down_read(&policy->rwsem);
1238 cpus = cpumask_weight(policy->cpus); 1192 cpus = cpumask_weight(policy->cpus);
1239 unlock_policy_rwsem_read(cpu); 1193 up_read(&policy->rwsem);
1240 1194
1241 if (cpu != policy->cpu) { 1195 if (cpu != policy->cpu) {
1242 if (!frozen) 1196 if (!frozen)
@@ -1276,12 +1230,12 @@ static int __cpufreq_remove_dev_finish(struct device *dev,
1276 return -EINVAL; 1230 return -EINVAL;
1277 } 1231 }
1278 1232
1279 lock_policy_rwsem_write(cpu); 1233 down_write(&policy->rwsem);
1280 cpus = cpumask_weight(policy->cpus); 1234 cpus = cpumask_weight(policy->cpus);
1281 1235
1282 if (cpus > 1) 1236 if (cpus > 1)
1283 cpumask_clear_cpu(cpu, policy->cpus); 1237 cpumask_clear_cpu(cpu, policy->cpus);
1284 unlock_policy_rwsem_write(cpu); 1238 up_write(&policy->rwsem);
1285 1239
1286 /* If cpu is last user of policy, free policy */ 1240 /* If cpu is last user of policy, free policy */
1287 if (cpus == 1) { 1241 if (cpus == 1) {
@@ -1296,10 +1250,10 @@ static int __cpufreq_remove_dev_finish(struct device *dev,
1296 } 1250 }
1297 1251
1298 if (!frozen) { 1252 if (!frozen) {
1299 lock_policy_rwsem_read(cpu); 1253 down_read(&policy->rwsem);
1300 kobj = &policy->kobj; 1254 kobj = &policy->kobj;
1301 cmp = &policy->kobj_unregister; 1255 cmp = &policy->kobj_unregister;
1302 unlock_policy_rwsem_read(cpu); 1256 up_read(&policy->rwsem);
1303 kobject_put(kobj); 1257 kobject_put(kobj);
1304 1258
1305 /* 1259 /*
@@ -1479,19 +1433,22 @@ static unsigned int __cpufreq_get(unsigned int cpu)
1479 */ 1433 */
1480unsigned int cpufreq_get(unsigned int cpu) 1434unsigned int cpufreq_get(unsigned int cpu)
1481{ 1435{
1436 struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
1482 unsigned int ret_freq = 0; 1437 unsigned int ret_freq = 0;
1483 1438
1484 if (cpufreq_disabled() || !cpufreq_driver) 1439 if (cpufreq_disabled() || !cpufreq_driver)
1485 return -ENOENT; 1440 return -ENOENT;
1486 1441
1442 BUG_ON(!policy);
1443
1487 if (!down_read_trylock(&cpufreq_rwsem)) 1444 if (!down_read_trylock(&cpufreq_rwsem))
1488 return 0; 1445 return 0;
1489 1446
1490 lock_policy_rwsem_read(cpu); 1447 down_read(&policy->rwsem);
1491 1448
1492 ret_freq = __cpufreq_get(cpu); 1449 ret_freq = __cpufreq_get(cpu);
1493 1450
1494 unlock_policy_rwsem_read(cpu); 1451 up_read(&policy->rwsem);
1495 up_read(&cpufreq_rwsem); 1452 up_read(&cpufreq_rwsem);
1496 1453
1497 return ret_freq; 1454 return ret_freq;
@@ -1744,11 +1701,11 @@ int cpufreq_driver_target(struct cpufreq_policy *policy,
1744{ 1701{
1745 int ret = -EINVAL; 1702 int ret = -EINVAL;
1746 1703
1747 lock_policy_rwsem_write(policy->cpu); 1704 down_write(&policy->rwsem);
1748 1705
1749 ret = __cpufreq_driver_target(policy, target_freq, relation); 1706 ret = __cpufreq_driver_target(policy, target_freq, relation);
1750 1707
1751 unlock_policy_rwsem_write(policy->cpu); 1708 up_write(&policy->rwsem);
1752 1709
1753 return ret; 1710 return ret;
1754} 1711}
@@ -1979,10 +1936,10 @@ static int cpufreq_set_policy(struct cpufreq_policy *policy,
1979 /* end old governor */ 1936 /* end old governor */
1980 if (policy->governor) { 1937 if (policy->governor) {
1981 __cpufreq_governor(policy, CPUFREQ_GOV_STOP); 1938 __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
1982 unlock_policy_rwsem_write(new_policy->cpu); 1939 up_write(&policy->rwsem);
1983 __cpufreq_governor(policy, 1940 __cpufreq_governor(policy,
1984 CPUFREQ_GOV_POLICY_EXIT); 1941 CPUFREQ_GOV_POLICY_EXIT);
1985 lock_policy_rwsem_write(new_policy->cpu); 1942 down_write(&policy->rwsem);
1986 } 1943 }
1987 1944
1988 /* start new governor */ 1945 /* start new governor */
@@ -1991,10 +1948,10 @@ static int cpufreq_set_policy(struct cpufreq_policy *policy,
1991 if (!__cpufreq_governor(policy, CPUFREQ_GOV_START)) { 1948 if (!__cpufreq_governor(policy, CPUFREQ_GOV_START)) {
1992 failed = 0; 1949 failed = 0;
1993 } else { 1950 } else {
1994 unlock_policy_rwsem_write(new_policy->cpu); 1951 up_write(&policy->rwsem);
1995 __cpufreq_governor(policy, 1952 __cpufreq_governor(policy,
1996 CPUFREQ_GOV_POLICY_EXIT); 1953 CPUFREQ_GOV_POLICY_EXIT);
1997 lock_policy_rwsem_write(new_policy->cpu); 1954 down_write(&policy->rwsem);
1998 } 1955 }
1999 } 1956 }
2000 1957
@@ -2040,7 +1997,7 @@ int cpufreq_update_policy(unsigned int cpu)
2040 goto no_policy; 1997 goto no_policy;
2041 } 1998 }
2042 1999
2043 lock_policy_rwsem_write(cpu); 2000 down_write(&policy->rwsem);
2044 2001
2045 pr_debug("updating policy for CPU %u\n", cpu); 2002 pr_debug("updating policy for CPU %u\n", cpu);
2046 memcpy(&new_policy, policy, sizeof(*policy)); 2003 memcpy(&new_policy, policy, sizeof(*policy));
@@ -2067,7 +2024,7 @@ int cpufreq_update_policy(unsigned int cpu)
2067 2024
2068 ret = cpufreq_set_policy(policy, &new_policy); 2025 ret = cpufreq_set_policy(policy, &new_policy);
2069 2026
2070 unlock_policy_rwsem_write(cpu); 2027 up_write(&policy->rwsem);
2071 2028
2072 cpufreq_cpu_put(policy); 2029 cpufreq_cpu_put(policy);
2073no_policy: 2030no_policy:
@@ -2225,14 +2182,9 @@ EXPORT_SYMBOL_GPL(cpufreq_unregister_driver);
2225 2182
2226static int __init cpufreq_core_init(void) 2183static int __init cpufreq_core_init(void)
2227{ 2184{
2228 int cpu;
2229
2230 if (cpufreq_disabled()) 2185 if (cpufreq_disabled())
2231 return -ENODEV; 2186 return -ENODEV;
2232 2187
2233 for_each_possible_cpu(cpu)
2234 init_rwsem(&per_cpu(cpu_policy_rwsem, cpu));
2235
2236 cpufreq_global_kobject = kobject_create(); 2188 cpufreq_global_kobject = kobject_create();
2237 BUG_ON(!cpufreq_global_kobject); 2189 BUG_ON(!cpufreq_global_kobject);
2238 register_syscore_ops(&cpufreq_syscore_ops); 2190 register_syscore_ops(&cpufreq_syscore_ops);