aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@g5.osdl.org>2006-01-31 18:09:20 -0500
committerLinus Torvalds <torvalds@g5.osdl.org>2006-01-31 18:09:20 -0500
commit28e0cf22c1221650b4bfba48808d966160c42320 (patch)
tree79e530ac09f62000c1d0ec998a1bfa5404a2577a
parent9aef3b7c208b216b54a2e6614c6287ca8a09cf6f (diff)
parentc0672860199ac009af7cf198a134ee7a4c3a9bb3 (diff)
Merge master.kernel.org:/pub/scm/linux/kernel/git/davej/cpufreq
-rw-r--r--arch/i386/kernel/cpu/cpufreq/Kconfig1
-rw-r--r--arch/i386/kernel/cpu/cpufreq/p4-clockmod.c9
-rw-r--r--drivers/cpufreq/cpufreq.c70
-rw-r--r--drivers/cpufreq/cpufreq_conservative.c52
-rw-r--r--drivers/cpufreq/cpufreq_ondemand.c41
-rw-r--r--drivers/cpufreq/cpufreq_userspace.c78
-rw-r--r--include/linux/cpufreq.h3
7 files changed, 141 insertions, 113 deletions
diff --git a/arch/i386/kernel/cpu/cpufreq/Kconfig b/arch/i386/kernel/cpu/cpufreq/Kconfig
index 0f1eb507233b..26892d2099b0 100644
--- a/arch/i386/kernel/cpu/cpufreq/Kconfig
+++ b/arch/i386/kernel/cpu/cpufreq/Kconfig
@@ -96,6 +96,7 @@ config X86_POWERNOW_K8_ACPI
96 96
97config X86_GX_SUSPMOD 97config X86_GX_SUSPMOD
98 tristate "Cyrix MediaGX/NatSemi Geode Suspend Modulation" 98 tristate "Cyrix MediaGX/NatSemi Geode Suspend Modulation"
99 depends on PCI
99 help 100 help
100 This add the CPUFreq driver for NatSemi Geode processors which 101 This add the CPUFreq driver for NatSemi Geode processors which
101 support suspend modulation. 102 support suspend modulation.
diff --git a/arch/i386/kernel/cpu/cpufreq/p4-clockmod.c b/arch/i386/kernel/cpu/cpufreq/p4-clockmod.c
index 270f2188d68b..cc73a7ae34bc 100644
--- a/arch/i386/kernel/cpu/cpufreq/p4-clockmod.c
+++ b/arch/i386/kernel/cpu/cpufreq/p4-clockmod.c
@@ -52,6 +52,7 @@ enum {
52 52
53 53
54static int has_N44_O17_errata[NR_CPUS]; 54static int has_N44_O17_errata[NR_CPUS];
55static int has_N60_errata[NR_CPUS];
55static unsigned int stock_freq; 56static unsigned int stock_freq;
56static struct cpufreq_driver p4clockmod_driver; 57static struct cpufreq_driver p4clockmod_driver;
57static unsigned int cpufreq_p4_get(unsigned int cpu); 58static unsigned int cpufreq_p4_get(unsigned int cpu);
@@ -226,6 +227,12 @@ static int cpufreq_p4_cpu_init(struct cpufreq_policy *policy)
226 case 0x0f12: 227 case 0x0f12:
227 has_N44_O17_errata[policy->cpu] = 1; 228 has_N44_O17_errata[policy->cpu] = 1;
228 dprintk("has errata -- disabling low frequencies\n"); 229 dprintk("has errata -- disabling low frequencies\n");
230 break;
231
232 case 0x0f29:
233 has_N60_errata[policy->cpu] = 1;
234 dprintk("has errata -- disabling frequencies lower than 2ghz\n");
235 break;
229 } 236 }
230 237
231 /* get max frequency */ 238 /* get max frequency */
@@ -237,6 +244,8 @@ static int cpufreq_p4_cpu_init(struct cpufreq_policy *policy)
237 for (i=1; (p4clockmod_table[i].frequency != CPUFREQ_TABLE_END); i++) { 244 for (i=1; (p4clockmod_table[i].frequency != CPUFREQ_TABLE_END); i++) {
238 if ((i<2) && (has_N44_O17_errata[policy->cpu])) 245 if ((i<2) && (has_N44_O17_errata[policy->cpu]))
239 p4clockmod_table[i].frequency = CPUFREQ_ENTRY_INVALID; 246 p4clockmod_table[i].frequency = CPUFREQ_ENTRY_INVALID;
247 else if (has_N60_errata[policy->cpu] && p4clockmod_table[i].frequency < 2000000)
248 p4clockmod_table[i].frequency = CPUFREQ_ENTRY_INVALID;
240 else 249 else
241 p4clockmod_table[i].frequency = (stock_freq * i)/8; 250 p4clockmod_table[i].frequency = (stock_freq * i)/8;
242 } 251 }
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 277a843a87a6..7a511479ae29 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -26,6 +26,7 @@
26#include <linux/slab.h> 26#include <linux/slab.h>
27#include <linux/cpu.h> 27#include <linux/cpu.h>
28#include <linux/completion.h> 28#include <linux/completion.h>
29#include <linux/mutex.h>
29 30
30#define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_CORE, "cpufreq-core", msg) 31#define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_CORE, "cpufreq-core", msg)
31 32
@@ -55,7 +56,7 @@ static DECLARE_RWSEM (cpufreq_notifier_rwsem);
55 56
56 57
57static LIST_HEAD(cpufreq_governor_list); 58static LIST_HEAD(cpufreq_governor_list);
58static DECLARE_MUTEX (cpufreq_governor_sem); 59static DEFINE_MUTEX (cpufreq_governor_mutex);
59 60
60struct cpufreq_policy * cpufreq_cpu_get(unsigned int cpu) 61struct cpufreq_policy * cpufreq_cpu_get(unsigned int cpu)
61{ 62{
@@ -297,18 +298,18 @@ static int cpufreq_parse_governor (char *str_governor, unsigned int *policy,
297 return -EINVAL; 298 return -EINVAL;
298 } else { 299 } else {
299 struct cpufreq_governor *t; 300 struct cpufreq_governor *t;
300 down(&cpufreq_governor_sem); 301 mutex_lock(&cpufreq_governor_mutex);
301 if (!cpufreq_driver || !cpufreq_driver->target) 302 if (!cpufreq_driver || !cpufreq_driver->target)
302 goto out; 303 goto out;
303 list_for_each_entry(t, &cpufreq_governor_list, governor_list) { 304 list_for_each_entry(t, &cpufreq_governor_list, governor_list) {
304 if (!strnicmp(str_governor,t->name,CPUFREQ_NAME_LEN)) { 305 if (!strnicmp(str_governor,t->name,CPUFREQ_NAME_LEN)) {
305 *governor = t; 306 *governor = t;
306 up(&cpufreq_governor_sem); 307 mutex_unlock(&cpufreq_governor_mutex);
307 return 0; 308 return 0;
308 } 309 }
309 } 310 }
310 out: 311 out:
311 up(&cpufreq_governor_sem); 312 mutex_unlock(&cpufreq_governor_mutex);
312 } 313 }
313 return -EINVAL; 314 return -EINVAL;
314} 315}
@@ -600,7 +601,8 @@ static int cpufreq_add_dev (struct sys_device * sys_dev)
600 policy->cpu = cpu; 601 policy->cpu = cpu;
601 policy->cpus = cpumask_of_cpu(cpu); 602 policy->cpus = cpumask_of_cpu(cpu);
602 603
603 init_MUTEX_LOCKED(&policy->lock); 604 mutex_init(&policy->lock);
605 mutex_lock(&policy->lock);
604 init_completion(&policy->kobj_unregister); 606 init_completion(&policy->kobj_unregister);
605 INIT_WORK(&policy->update, handle_update, (void *)(long)cpu); 607 INIT_WORK(&policy->update, handle_update, (void *)(long)cpu);
606 608
@@ -610,6 +612,7 @@ static int cpufreq_add_dev (struct sys_device * sys_dev)
610 ret = cpufreq_driver->init(policy); 612 ret = cpufreq_driver->init(policy);
611 if (ret) { 613 if (ret) {
612 dprintk("initialization failed\n"); 614 dprintk("initialization failed\n");
615 mutex_unlock(&policy->lock);
613 goto err_out; 616 goto err_out;
614 } 617 }
615 618
@@ -621,9 +624,10 @@ static int cpufreq_add_dev (struct sys_device * sys_dev)
621 strlcpy(policy->kobj.name, "cpufreq", KOBJ_NAME_LEN); 624 strlcpy(policy->kobj.name, "cpufreq", KOBJ_NAME_LEN);
622 625
623 ret = kobject_register(&policy->kobj); 626 ret = kobject_register(&policy->kobj);
624 if (ret) 627 if (ret) {
628 mutex_unlock(&policy->lock);
625 goto err_out_driver_exit; 629 goto err_out_driver_exit;
626 630 }
627 /* set up files for this cpu device */ 631 /* set up files for this cpu device */
628 drv_attr = cpufreq_driver->attr; 632 drv_attr = cpufreq_driver->attr;
629 while ((drv_attr) && (*drv_attr)) { 633 while ((drv_attr) && (*drv_attr)) {
@@ -641,7 +645,7 @@ static int cpufreq_add_dev (struct sys_device * sys_dev)
641 spin_unlock_irqrestore(&cpufreq_driver_lock, flags); 645 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
642 policy->governor = NULL; /* to assure that the starting sequence is 646 policy->governor = NULL; /* to assure that the starting sequence is
643 * run in cpufreq_set_policy */ 647 * run in cpufreq_set_policy */
644 up(&policy->lock); 648 mutex_unlock(&policy->lock);
645 649
646 /* set default policy */ 650 /* set default policy */
647 651
@@ -762,10 +766,10 @@ static int cpufreq_remove_dev (struct sys_device * sys_dev)
762 spin_unlock_irqrestore(&cpufreq_driver_lock, flags); 766 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
763#endif 767#endif
764 768
765 down(&data->lock); 769 mutex_lock(&data->lock);
766 if (cpufreq_driver->target) 770 if (cpufreq_driver->target)
767 __cpufreq_governor(data, CPUFREQ_GOV_STOP); 771 __cpufreq_governor(data, CPUFREQ_GOV_STOP);
768 up(&data->lock); 772 mutex_unlock(&data->lock);
769 773
770 kobject_unregister(&data->kobj); 774 kobject_unregister(&data->kobj);
771 775
@@ -834,9 +838,9 @@ unsigned int cpufreq_quick_get(unsigned int cpu)
834 unsigned int ret = 0; 838 unsigned int ret = 0;
835 839
836 if (policy) { 840 if (policy) {
837 down(&policy->lock); 841 mutex_lock(&policy->lock);
838 ret = policy->cur; 842 ret = policy->cur;
839 up(&policy->lock); 843 mutex_unlock(&policy->lock);
840 cpufreq_cpu_put(policy); 844 cpufreq_cpu_put(policy);
841 } 845 }
842 846
@@ -862,7 +866,7 @@ unsigned int cpufreq_get(unsigned int cpu)
862 if (!cpufreq_driver->get) 866 if (!cpufreq_driver->get)
863 goto out; 867 goto out;
864 868
865 down(&policy->lock); 869 mutex_lock(&policy->lock);
866 870
867 ret = cpufreq_driver->get(cpu); 871 ret = cpufreq_driver->get(cpu);
868 872
@@ -875,7 +879,7 @@ unsigned int cpufreq_get(unsigned int cpu)
875 } 879 }
876 } 880 }
877 881
878 up(&policy->lock); 882 mutex_unlock(&policy->lock);
879 883
880 out: 884 out:
881 cpufreq_cpu_put(policy); 885 cpufreq_cpu_put(policy);
@@ -1158,11 +1162,11 @@ int cpufreq_driver_target(struct cpufreq_policy *policy,
1158 if (!policy) 1162 if (!policy)
1159 return -EINVAL; 1163 return -EINVAL;
1160 1164
1161 down(&policy->lock); 1165 mutex_lock(&policy->lock);
1162 1166
1163 ret = __cpufreq_driver_target(policy, target_freq, relation); 1167 ret = __cpufreq_driver_target(policy, target_freq, relation);
1164 1168
1165 up(&policy->lock); 1169 mutex_unlock(&policy->lock);
1166 1170
1167 cpufreq_cpu_put(policy); 1171 cpufreq_cpu_put(policy);
1168 1172
@@ -1199,9 +1203,9 @@ int cpufreq_governor(unsigned int cpu, unsigned int event)
1199 if (!policy) 1203 if (!policy)
1200 return -EINVAL; 1204 return -EINVAL;
1201 1205
1202 down(&policy->lock); 1206 mutex_lock(&policy->lock);
1203 ret = __cpufreq_governor(policy, event); 1207 ret = __cpufreq_governor(policy, event);
1204 up(&policy->lock); 1208 mutex_unlock(&policy->lock);
1205 1209
1206 cpufreq_cpu_put(policy); 1210 cpufreq_cpu_put(policy);
1207 1211
@@ -1217,17 +1221,17 @@ int cpufreq_register_governor(struct cpufreq_governor *governor)
1217 if (!governor) 1221 if (!governor)
1218 return -EINVAL; 1222 return -EINVAL;
1219 1223
1220 down(&cpufreq_governor_sem); 1224 mutex_lock(&cpufreq_governor_mutex);
1221 1225
1222 list_for_each_entry(t, &cpufreq_governor_list, governor_list) { 1226 list_for_each_entry(t, &cpufreq_governor_list, governor_list) {
1223 if (!strnicmp(governor->name,t->name,CPUFREQ_NAME_LEN)) { 1227 if (!strnicmp(governor->name,t->name,CPUFREQ_NAME_LEN)) {
1224 up(&cpufreq_governor_sem); 1228 mutex_unlock(&cpufreq_governor_mutex);
1225 return -EBUSY; 1229 return -EBUSY;
1226 } 1230 }
1227 } 1231 }
1228 list_add(&governor->governor_list, &cpufreq_governor_list); 1232 list_add(&governor->governor_list, &cpufreq_governor_list);
1229 1233
1230 up(&cpufreq_governor_sem); 1234 mutex_unlock(&cpufreq_governor_mutex);
1231 1235
1232 return 0; 1236 return 0;
1233} 1237}
@@ -1239,9 +1243,9 @@ void cpufreq_unregister_governor(struct cpufreq_governor *governor)
1239 if (!governor) 1243 if (!governor)
1240 return; 1244 return;
1241 1245
1242 down(&cpufreq_governor_sem); 1246 mutex_lock(&cpufreq_governor_mutex);
1243 list_del(&governor->governor_list); 1247 list_del(&governor->governor_list);
1244 up(&cpufreq_governor_sem); 1248 mutex_unlock(&cpufreq_governor_mutex);
1245 return; 1249 return;
1246} 1250}
1247EXPORT_SYMBOL_GPL(cpufreq_unregister_governor); 1251EXPORT_SYMBOL_GPL(cpufreq_unregister_governor);
@@ -1268,9 +1272,9 @@ int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu)
1268 if (!cpu_policy) 1272 if (!cpu_policy)
1269 return -EINVAL; 1273 return -EINVAL;
1270 1274
1271 down(&cpu_policy->lock); 1275 mutex_lock(&cpu_policy->lock);
1272 memcpy(policy, cpu_policy, sizeof(struct cpufreq_policy)); 1276 memcpy(policy, cpu_policy, sizeof(struct cpufreq_policy));
1273 up(&cpu_policy->lock); 1277 mutex_unlock(&cpu_policy->lock);
1274 1278
1275 cpufreq_cpu_put(cpu_policy); 1279 cpufreq_cpu_put(cpu_policy);
1276 1280
@@ -1382,7 +1386,7 @@ int cpufreq_set_policy(struct cpufreq_policy *policy)
1382 return -EINVAL; 1386 return -EINVAL;
1383 1387
1384 /* lock this CPU */ 1388 /* lock this CPU */
1385 down(&data->lock); 1389 mutex_lock(&data->lock);
1386 1390
1387 ret = __cpufreq_set_policy(data, policy); 1391 ret = __cpufreq_set_policy(data, policy);
1388 data->user_policy.min = data->min; 1392 data->user_policy.min = data->min;
@@ -1390,7 +1394,7 @@ int cpufreq_set_policy(struct cpufreq_policy *policy)
1390 data->user_policy.policy = data->policy; 1394 data->user_policy.policy = data->policy;
1391 data->user_policy.governor = data->governor; 1395 data->user_policy.governor = data->governor;
1392 1396
1393 up(&data->lock); 1397 mutex_unlock(&data->lock);
1394 cpufreq_cpu_put(data); 1398 cpufreq_cpu_put(data);
1395 1399
1396 return ret; 1400 return ret;
@@ -1414,7 +1418,7 @@ int cpufreq_update_policy(unsigned int cpu)
1414 if (!data) 1418 if (!data)
1415 return -ENODEV; 1419 return -ENODEV;
1416 1420
1417 down(&data->lock); 1421 mutex_lock(&data->lock);
1418 1422
1419 dprintk("updating policy for CPU %u\n", cpu); 1423 dprintk("updating policy for CPU %u\n", cpu);
1420 memcpy(&policy, 1424 memcpy(&policy,
@@ -1425,9 +1429,17 @@ int cpufreq_update_policy(unsigned int cpu)
1425 policy.policy = data->user_policy.policy; 1429 policy.policy = data->user_policy.policy;
1426 policy.governor = data->user_policy.governor; 1430 policy.governor = data->user_policy.governor;
1427 1431
1432 /* BIOS might change freq behind our back
1433 -> ask driver for current freq and notify governors about a change */
1434 if (cpufreq_driver->get) {
1435 policy.cur = cpufreq_driver->get(cpu);
1436 if (data->cur != policy.cur)
1437 cpufreq_out_of_sync(cpu, data->cur, policy.cur);
1438 }
1439
1428 ret = __cpufreq_set_policy(data, &policy); 1440 ret = __cpufreq_set_policy(data, &policy);
1429 1441
1430 up(&data->lock); 1442 mutex_unlock(&data->lock);
1431 1443
1432 cpufreq_cpu_put(data); 1444 cpufreq_cpu_put(data);
1433 return ret; 1445 return ret;
diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c
index 39543a2bed0f..ac38766b2583 100644
--- a/drivers/cpufreq/cpufreq_conservative.c
+++ b/drivers/cpufreq/cpufreq_conservative.c
@@ -28,7 +28,7 @@
28#include <linux/jiffies.h> 28#include <linux/jiffies.h>
29#include <linux/kernel_stat.h> 29#include <linux/kernel_stat.h>
30#include <linux/percpu.h> 30#include <linux/percpu.h>
31 31#include <linux/mutex.h>
32/* 32/*
33 * dbs is used in this file as a shortform for demandbased switching 33 * dbs is used in this file as a shortform for demandbased switching
34 * It helps to keep variable names smaller, simpler 34 * It helps to keep variable names smaller, simpler
@@ -71,7 +71,7 @@ static DEFINE_PER_CPU(struct cpu_dbs_info_s, cpu_dbs_info);
71 71
72static unsigned int dbs_enable; /* number of CPUs using this policy */ 72static unsigned int dbs_enable; /* number of CPUs using this policy */
73 73
74static DECLARE_MUTEX (dbs_sem); 74static DEFINE_MUTEX (dbs_mutex);
75static DECLARE_WORK (dbs_work, do_dbs_timer, NULL); 75static DECLARE_WORK (dbs_work, do_dbs_timer, NULL);
76 76
77struct dbs_tuners { 77struct dbs_tuners {
@@ -139,9 +139,9 @@ static ssize_t store_sampling_down_factor(struct cpufreq_policy *unused,
139 if (ret != 1 ) 139 if (ret != 1 )
140 return -EINVAL; 140 return -EINVAL;
141 141
142 down(&dbs_sem); 142 mutex_lock(&dbs_mutex);
143 dbs_tuners_ins.sampling_down_factor = input; 143 dbs_tuners_ins.sampling_down_factor = input;
144 up(&dbs_sem); 144 mutex_unlock(&dbs_mutex);
145 145
146 return count; 146 return count;
147} 147}
@@ -153,14 +153,14 @@ static ssize_t store_sampling_rate(struct cpufreq_policy *unused,
153 int ret; 153 int ret;
154 ret = sscanf (buf, "%u", &input); 154 ret = sscanf (buf, "%u", &input);
155 155
156 down(&dbs_sem); 156 mutex_lock(&dbs_mutex);
157 if (ret != 1 || input > MAX_SAMPLING_RATE || input < MIN_SAMPLING_RATE) { 157 if (ret != 1 || input > MAX_SAMPLING_RATE || input < MIN_SAMPLING_RATE) {
158 up(&dbs_sem); 158 mutex_unlock(&dbs_mutex);
159 return -EINVAL; 159 return -EINVAL;
160 } 160 }
161 161
162 dbs_tuners_ins.sampling_rate = input; 162 dbs_tuners_ins.sampling_rate = input;
163 up(&dbs_sem); 163 mutex_unlock(&dbs_mutex);
164 164
165 return count; 165 return count;
166} 166}
@@ -172,16 +172,16 @@ static ssize_t store_up_threshold(struct cpufreq_policy *unused,
172 int ret; 172 int ret;
173 ret = sscanf (buf, "%u", &input); 173 ret = sscanf (buf, "%u", &input);
174 174
175 down(&dbs_sem); 175 mutex_lock(&dbs_mutex);
176 if (ret != 1 || input > MAX_FREQUENCY_UP_THRESHOLD || 176 if (ret != 1 || input > MAX_FREQUENCY_UP_THRESHOLD ||
177 input < MIN_FREQUENCY_UP_THRESHOLD || 177 input < MIN_FREQUENCY_UP_THRESHOLD ||
178 input <= dbs_tuners_ins.down_threshold) { 178 input <= dbs_tuners_ins.down_threshold) {
179 up(&dbs_sem); 179 mutex_unlock(&dbs_mutex);
180 return -EINVAL; 180 return -EINVAL;
181 } 181 }
182 182
183 dbs_tuners_ins.up_threshold = input; 183 dbs_tuners_ins.up_threshold = input;
184 up(&dbs_sem); 184 mutex_unlock(&dbs_mutex);
185 185
186 return count; 186 return count;
187} 187}
@@ -193,16 +193,16 @@ static ssize_t store_down_threshold(struct cpufreq_policy *unused,
193 int ret; 193 int ret;
194 ret = sscanf (buf, "%u", &input); 194 ret = sscanf (buf, "%u", &input);
195 195
196 down(&dbs_sem); 196 mutex_lock(&dbs_mutex);
197 if (ret != 1 || input > MAX_FREQUENCY_DOWN_THRESHOLD || 197 if (ret != 1 || input > MAX_FREQUENCY_DOWN_THRESHOLD ||
198 input < MIN_FREQUENCY_DOWN_THRESHOLD || 198 input < MIN_FREQUENCY_DOWN_THRESHOLD ||
199 input >= dbs_tuners_ins.up_threshold) { 199 input >= dbs_tuners_ins.up_threshold) {
200 up(&dbs_sem); 200 mutex_unlock(&dbs_mutex);
201 return -EINVAL; 201 return -EINVAL;
202 } 202 }
203 203
204 dbs_tuners_ins.down_threshold = input; 204 dbs_tuners_ins.down_threshold = input;
205 up(&dbs_sem); 205 mutex_unlock(&dbs_mutex);
206 206
207 return count; 207 return count;
208} 208}
@@ -222,9 +222,9 @@ static ssize_t store_ignore_nice_load(struct cpufreq_policy *policy,
222 if ( input > 1 ) 222 if ( input > 1 )
223 input = 1; 223 input = 1;
224 224
225 down(&dbs_sem); 225 mutex_lock(&dbs_mutex);
226 if ( input == dbs_tuners_ins.ignore_nice ) { /* nothing to do */ 226 if ( input == dbs_tuners_ins.ignore_nice ) { /* nothing to do */
227 up(&dbs_sem); 227 mutex_unlock(&dbs_mutex);
228 return count; 228 return count;
229 } 229 }
230 dbs_tuners_ins.ignore_nice = input; 230 dbs_tuners_ins.ignore_nice = input;
@@ -236,7 +236,7 @@ static ssize_t store_ignore_nice_load(struct cpufreq_policy *policy,
236 j_dbs_info->prev_cpu_idle_up = get_cpu_idle_time(j); 236 j_dbs_info->prev_cpu_idle_up = get_cpu_idle_time(j);
237 j_dbs_info->prev_cpu_idle_down = j_dbs_info->prev_cpu_idle_up; 237 j_dbs_info->prev_cpu_idle_down = j_dbs_info->prev_cpu_idle_up;
238 } 238 }
239 up(&dbs_sem); 239 mutex_unlock(&dbs_mutex);
240 240
241 return count; 241 return count;
242} 242}
@@ -257,9 +257,9 @@ static ssize_t store_freq_step(struct cpufreq_policy *policy,
257 257
258 /* no need to test here if freq_step is zero as the user might actually 258 /* no need to test here if freq_step is zero as the user might actually
259 * want this, they would be crazy though :) */ 259 * want this, they would be crazy though :) */
260 down(&dbs_sem); 260 mutex_lock(&dbs_mutex);
261 dbs_tuners_ins.freq_step = input; 261 dbs_tuners_ins.freq_step = input;
262 up(&dbs_sem); 262 mutex_unlock(&dbs_mutex);
263 263
264 return count; 264 return count;
265} 265}
@@ -444,12 +444,12 @@ static void dbs_check_cpu(int cpu)
444static void do_dbs_timer(void *data) 444static void do_dbs_timer(void *data)
445{ 445{
446 int i; 446 int i;
447 down(&dbs_sem); 447 mutex_lock(&dbs_mutex);
448 for_each_online_cpu(i) 448 for_each_online_cpu(i)
449 dbs_check_cpu(i); 449 dbs_check_cpu(i);
450 schedule_delayed_work(&dbs_work, 450 schedule_delayed_work(&dbs_work,
451 usecs_to_jiffies(dbs_tuners_ins.sampling_rate)); 451 usecs_to_jiffies(dbs_tuners_ins.sampling_rate));
452 up(&dbs_sem); 452 mutex_unlock(&dbs_mutex);
453} 453}
454 454
455static inline void dbs_timer_init(void) 455static inline void dbs_timer_init(void)
@@ -487,7 +487,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
487 if (this_dbs_info->enable) /* Already enabled */ 487 if (this_dbs_info->enable) /* Already enabled */
488 break; 488 break;
489 489
490 down(&dbs_sem); 490 mutex_lock(&dbs_mutex);
491 for_each_cpu_mask(j, policy->cpus) { 491 for_each_cpu_mask(j, policy->cpus) {
492 struct cpu_dbs_info_s *j_dbs_info; 492 struct cpu_dbs_info_s *j_dbs_info;
493 j_dbs_info = &per_cpu(cpu_dbs_info, j); 493 j_dbs_info = &per_cpu(cpu_dbs_info, j);
@@ -521,11 +521,11 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
521 dbs_timer_init(); 521 dbs_timer_init();
522 } 522 }
523 523
524 up(&dbs_sem); 524 mutex_unlock(&dbs_mutex);
525 break; 525 break;
526 526
527 case CPUFREQ_GOV_STOP: 527 case CPUFREQ_GOV_STOP:
528 down(&dbs_sem); 528 mutex_lock(&dbs_mutex);
529 this_dbs_info->enable = 0; 529 this_dbs_info->enable = 0;
530 sysfs_remove_group(&policy->kobj, &dbs_attr_group); 530 sysfs_remove_group(&policy->kobj, &dbs_attr_group);
531 dbs_enable--; 531 dbs_enable--;
@@ -536,12 +536,12 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
536 if (dbs_enable == 0) 536 if (dbs_enable == 0)
537 dbs_timer_exit(); 537 dbs_timer_exit();
538 538
539 up(&dbs_sem); 539 mutex_unlock(&dbs_mutex);
540 540
541 break; 541 break;
542 542
543 case CPUFREQ_GOV_LIMITS: 543 case CPUFREQ_GOV_LIMITS:
544 down(&dbs_sem); 544 mutex_lock(&dbs_mutex);
545 if (policy->max < this_dbs_info->cur_policy->cur) 545 if (policy->max < this_dbs_info->cur_policy->cur)
546 __cpufreq_driver_target( 546 __cpufreq_driver_target(
547 this_dbs_info->cur_policy, 547 this_dbs_info->cur_policy,
@@ -550,7 +550,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
550 __cpufreq_driver_target( 550 __cpufreq_driver_target(
551 this_dbs_info->cur_policy, 551 this_dbs_info->cur_policy,
552 policy->min, CPUFREQ_RELATION_L); 552 policy->min, CPUFREQ_RELATION_L);
553 up(&dbs_sem); 553 mutex_unlock(&dbs_mutex);
554 break; 554 break;
555 } 555 }
556 return 0; 556 return 0;
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
index e69fd8dd1f1c..9ee9411f186f 100644
--- a/drivers/cpufreq/cpufreq_ondemand.c
+++ b/drivers/cpufreq/cpufreq_ondemand.c
@@ -27,6 +27,7 @@
27#include <linux/jiffies.h> 27#include <linux/jiffies.h>
28#include <linux/kernel_stat.h> 28#include <linux/kernel_stat.h>
29#include <linux/percpu.h> 29#include <linux/percpu.h>
30#include <linux/mutex.h>
30 31
31/* 32/*
32 * dbs is used in this file as a shortform for demandbased switching 33 * dbs is used in this file as a shortform for demandbased switching
@@ -70,7 +71,7 @@ static DEFINE_PER_CPU(struct cpu_dbs_info_s, cpu_dbs_info);
70 71
71static unsigned int dbs_enable; /* number of CPUs using this policy */ 72static unsigned int dbs_enable; /* number of CPUs using this policy */
72 73
73static DECLARE_MUTEX (dbs_sem); 74static DEFINE_MUTEX (dbs_mutex);
74static DECLARE_WORK (dbs_work, do_dbs_timer, NULL); 75static DECLARE_WORK (dbs_work, do_dbs_timer, NULL);
75 76
76struct dbs_tuners { 77struct dbs_tuners {
@@ -136,9 +137,9 @@ static ssize_t store_sampling_down_factor(struct cpufreq_policy *unused,
136 if (input > MAX_SAMPLING_DOWN_FACTOR || input < 1) 137 if (input > MAX_SAMPLING_DOWN_FACTOR || input < 1)
137 return -EINVAL; 138 return -EINVAL;
138 139
139 down(&dbs_sem); 140 mutex_lock(&dbs_mutex);
140 dbs_tuners_ins.sampling_down_factor = input; 141 dbs_tuners_ins.sampling_down_factor = input;
141 up(&dbs_sem); 142 mutex_unlock(&dbs_mutex);
142 143
143 return count; 144 return count;
144} 145}
@@ -150,14 +151,14 @@ static ssize_t store_sampling_rate(struct cpufreq_policy *unused,
150 int ret; 151 int ret;
151 ret = sscanf (buf, "%u", &input); 152 ret = sscanf (buf, "%u", &input);
152 153
153 down(&dbs_sem); 154 mutex_lock(&dbs_mutex);
154 if (ret != 1 || input > MAX_SAMPLING_RATE || input < MIN_SAMPLING_RATE) { 155 if (ret != 1 || input > MAX_SAMPLING_RATE || input < MIN_SAMPLING_RATE) {
155 up(&dbs_sem); 156 mutex_unlock(&dbs_mutex);
156 return -EINVAL; 157 return -EINVAL;
157 } 158 }
158 159
159 dbs_tuners_ins.sampling_rate = input; 160 dbs_tuners_ins.sampling_rate = input;
160 up(&dbs_sem); 161 mutex_unlock(&dbs_mutex);
161 162
162 return count; 163 return count;
163} 164}
@@ -169,15 +170,15 @@ static ssize_t store_up_threshold(struct cpufreq_policy *unused,
169 int ret; 170 int ret;
170 ret = sscanf (buf, "%u", &input); 171 ret = sscanf (buf, "%u", &input);
171 172
172 down(&dbs_sem); 173 mutex_lock(&dbs_mutex);
173 if (ret != 1 || input > MAX_FREQUENCY_UP_THRESHOLD || 174 if (ret != 1 || input > MAX_FREQUENCY_UP_THRESHOLD ||
174 input < MIN_FREQUENCY_UP_THRESHOLD) { 175 input < MIN_FREQUENCY_UP_THRESHOLD) {
175 up(&dbs_sem); 176 mutex_unlock(&dbs_mutex);
176 return -EINVAL; 177 return -EINVAL;
177 } 178 }
178 179
179 dbs_tuners_ins.up_threshold = input; 180 dbs_tuners_ins.up_threshold = input;
180 up(&dbs_sem); 181 mutex_unlock(&dbs_mutex);
181 182
182 return count; 183 return count;
183} 184}
@@ -197,9 +198,9 @@ static ssize_t store_ignore_nice_load(struct cpufreq_policy *policy,
197 if ( input > 1 ) 198 if ( input > 1 )
198 input = 1; 199 input = 1;
199 200
200 down(&dbs_sem); 201 mutex_lock(&dbs_mutex);
201 if ( input == dbs_tuners_ins.ignore_nice ) { /* nothing to do */ 202 if ( input == dbs_tuners_ins.ignore_nice ) { /* nothing to do */
202 up(&dbs_sem); 203 mutex_unlock(&dbs_mutex);
203 return count; 204 return count;
204 } 205 }
205 dbs_tuners_ins.ignore_nice = input; 206 dbs_tuners_ins.ignore_nice = input;
@@ -211,7 +212,7 @@ static ssize_t store_ignore_nice_load(struct cpufreq_policy *policy,
211 j_dbs_info->prev_cpu_idle_up = get_cpu_idle_time(j); 212 j_dbs_info->prev_cpu_idle_up = get_cpu_idle_time(j);
212 j_dbs_info->prev_cpu_idle_down = j_dbs_info->prev_cpu_idle_up; 213 j_dbs_info->prev_cpu_idle_down = j_dbs_info->prev_cpu_idle_up;
213 } 214 }
214 up(&dbs_sem); 215 mutex_unlock(&dbs_mutex);
215 216
216 return count; 217 return count;
217} 218}
@@ -356,12 +357,12 @@ static void dbs_check_cpu(int cpu)
356static void do_dbs_timer(void *data) 357static void do_dbs_timer(void *data)
357{ 358{
358 int i; 359 int i;
359 down(&dbs_sem); 360 mutex_lock(&dbs_mutex);
360 for_each_online_cpu(i) 361 for_each_online_cpu(i)
361 dbs_check_cpu(i); 362 dbs_check_cpu(i);
362 schedule_delayed_work(&dbs_work, 363 schedule_delayed_work(&dbs_work,
363 usecs_to_jiffies(dbs_tuners_ins.sampling_rate)); 364 usecs_to_jiffies(dbs_tuners_ins.sampling_rate));
364 up(&dbs_sem); 365 mutex_unlock(&dbs_mutex);
365} 366}
366 367
367static inline void dbs_timer_init(void) 368static inline void dbs_timer_init(void)
@@ -399,7 +400,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
399 if (this_dbs_info->enable) /* Already enabled */ 400 if (this_dbs_info->enable) /* Already enabled */
400 break; 401 break;
401 402
402 down(&dbs_sem); 403 mutex_lock(&dbs_mutex);
403 for_each_cpu_mask(j, policy->cpus) { 404 for_each_cpu_mask(j, policy->cpus) {
404 struct cpu_dbs_info_s *j_dbs_info; 405 struct cpu_dbs_info_s *j_dbs_info;
405 j_dbs_info = &per_cpu(cpu_dbs_info, j); 406 j_dbs_info = &per_cpu(cpu_dbs_info, j);
@@ -435,11 +436,11 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
435 dbs_timer_init(); 436 dbs_timer_init();
436 } 437 }
437 438
438 up(&dbs_sem); 439 mutex_unlock(&dbs_mutex);
439 break; 440 break;
440 441
441 case CPUFREQ_GOV_STOP: 442 case CPUFREQ_GOV_STOP:
442 down(&dbs_sem); 443 mutex_lock(&dbs_mutex);
443 this_dbs_info->enable = 0; 444 this_dbs_info->enable = 0;
444 sysfs_remove_group(&policy->kobj, &dbs_attr_group); 445 sysfs_remove_group(&policy->kobj, &dbs_attr_group);
445 dbs_enable--; 446 dbs_enable--;
@@ -450,12 +451,12 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
450 if (dbs_enable == 0) 451 if (dbs_enable == 0)
451 dbs_timer_exit(); 452 dbs_timer_exit();
452 453
453 up(&dbs_sem); 454 mutex_unlock(&dbs_mutex);
454 455
455 break; 456 break;
456 457
457 case CPUFREQ_GOV_LIMITS: 458 case CPUFREQ_GOV_LIMITS:
458 down(&dbs_sem); 459 mutex_lock(&dbs_mutex);
459 if (policy->max < this_dbs_info->cur_policy->cur) 460 if (policy->max < this_dbs_info->cur_policy->cur)
460 __cpufreq_driver_target( 461 __cpufreq_driver_target(
461 this_dbs_info->cur_policy, 462 this_dbs_info->cur_policy,
@@ -464,7 +465,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
464 __cpufreq_driver_target( 465 __cpufreq_driver_target(
465 this_dbs_info->cur_policy, 466 this_dbs_info->cur_policy,
466 policy->min, CPUFREQ_RELATION_L); 467 policy->min, CPUFREQ_RELATION_L);
467 up(&dbs_sem); 468 mutex_unlock(&dbs_mutex);
468 break; 469 break;
469 } 470 }
470 return 0; 471 return 0;
diff --git a/drivers/cpufreq/cpufreq_userspace.c b/drivers/cpufreq/cpufreq_userspace.c
index d32bf3593cd3..92a0be22a2a9 100644
--- a/drivers/cpufreq/cpufreq_userspace.c
+++ b/drivers/cpufreq/cpufreq_userspace.c
@@ -1,3 +1,4 @@
1
1/* 2/*
2 * linux/drivers/cpufreq/cpufreq_userspace.c 3 * linux/drivers/cpufreq/cpufreq_userspace.c
3 * 4 *
@@ -21,6 +22,7 @@
21#include <linux/types.h> 22#include <linux/types.h>
22#include <linux/fs.h> 23#include <linux/fs.h>
23#include <linux/sysfs.h> 24#include <linux/sysfs.h>
25#include <linux/mutex.h>
24 26
25#include <asm/uaccess.h> 27#include <asm/uaccess.h>
26 28
@@ -33,9 +35,8 @@ static unsigned int cpu_min_freq[NR_CPUS];
33static unsigned int cpu_cur_freq[NR_CPUS]; /* current CPU freq */ 35static unsigned int cpu_cur_freq[NR_CPUS]; /* current CPU freq */
34static unsigned int cpu_set_freq[NR_CPUS]; /* CPU freq desired by userspace */ 36static unsigned int cpu_set_freq[NR_CPUS]; /* CPU freq desired by userspace */
35static unsigned int cpu_is_managed[NR_CPUS]; 37static unsigned int cpu_is_managed[NR_CPUS];
36static struct cpufreq_policy current_policy[NR_CPUS];
37 38
38static DECLARE_MUTEX (userspace_sem); 39static DEFINE_MUTEX (userspace_mutex);
39 40
40#define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_GOVERNOR, "userspace", msg) 41#define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_GOVERNOR, "userspace", msg)
41 42
@@ -64,35 +65,34 @@ static struct notifier_block userspace_cpufreq_notifier_block = {
64 * 65 *
65 * Sets the CPU frequency to freq. 66 * Sets the CPU frequency to freq.
66 */ 67 */
67static int cpufreq_set(unsigned int freq, unsigned int cpu) 68static int cpufreq_set(unsigned int freq, struct cpufreq_policy *policy)
68{ 69{
69 int ret = -EINVAL; 70 int ret = -EINVAL;
70 71
71 dprintk("cpufreq_set for cpu %u, freq %u kHz\n", cpu, freq); 72 dprintk("cpufreq_set for cpu %u, freq %u kHz\n", policy->cpu, freq);
72 73
73 down(&userspace_sem); 74 mutex_lock(&userspace_mutex);
74 if (!cpu_is_managed[cpu]) 75 if (!cpu_is_managed[policy->cpu])
75 goto err; 76 goto err;
76 77
77 cpu_set_freq[cpu] = freq; 78 cpu_set_freq[policy->cpu] = freq;
78 79
79 if (freq < cpu_min_freq[cpu]) 80 if (freq < cpu_min_freq[policy->cpu])
80 freq = cpu_min_freq[cpu]; 81 freq = cpu_min_freq[policy->cpu];
81 if (freq > cpu_max_freq[cpu]) 82 if (freq > cpu_max_freq[policy->cpu])
82 freq = cpu_max_freq[cpu]; 83 freq = cpu_max_freq[policy->cpu];
83 84
84 /* 85 /*
85 * We're safe from concurrent calls to ->target() here 86 * We're safe from concurrent calls to ->target() here
86 * as we hold the userspace_sem lock. If we were calling 87 * as we hold the userspace_mutex lock. If we were calling
87 * cpufreq_driver_target, a deadlock situation might occur: 88 * cpufreq_driver_target, a deadlock situation might occur:
88 * A: cpufreq_set (lock userspace_sem) -> cpufreq_driver_target(lock policy->lock) 89 * A: cpufreq_set (lock userspace_mutex) -> cpufreq_driver_target(lock policy->lock)
89 * B: cpufreq_set_policy(lock policy->lock) -> __cpufreq_governor -> cpufreq_governor_userspace (lock userspace_sem) 90 * B: cpufreq_set_policy(lock policy->lock) -> __cpufreq_governor -> cpufreq_governor_userspace (lock userspace_mutex)
90 */ 91 */
91 ret = __cpufreq_driver_target(&current_policy[cpu], freq, 92 ret = __cpufreq_driver_target(policy, freq, CPUFREQ_RELATION_L);
92 CPUFREQ_RELATION_L);
93 93
94 err: 94 err:
95 up(&userspace_sem); 95 mutex_unlock(&userspace_mutex);
96 return ret; 96 return ret;
97} 97}
98 98
@@ -113,7 +113,7 @@ store_speed (struct cpufreq_policy *policy, const char *buf, size_t count)
113 if (ret != 1) 113 if (ret != 1)
114 return -EINVAL; 114 return -EINVAL;
115 115
116 cpufreq_set(freq, policy->cpu); 116 cpufreq_set(freq, policy);
117 117
118 return count; 118 return count;
119} 119}
@@ -134,44 +134,48 @@ static int cpufreq_governor_userspace(struct cpufreq_policy *policy,
134 if (!cpu_online(cpu)) 134 if (!cpu_online(cpu))
135 return -EINVAL; 135 return -EINVAL;
136 BUG_ON(!policy->cur); 136 BUG_ON(!policy->cur);
137 down(&userspace_sem); 137 mutex_lock(&userspace_mutex);
138 cpu_is_managed[cpu] = 1; 138 cpu_is_managed[cpu] = 1;
139 cpu_min_freq[cpu] = policy->min; 139 cpu_min_freq[cpu] = policy->min;
140 cpu_max_freq[cpu] = policy->max; 140 cpu_max_freq[cpu] = policy->max;
141 cpu_cur_freq[cpu] = policy->cur; 141 cpu_cur_freq[cpu] = policy->cur;
142 cpu_set_freq[cpu] = policy->cur; 142 cpu_set_freq[cpu] = policy->cur;
143 sysfs_create_file (&policy->kobj, &freq_attr_scaling_setspeed.attr); 143 sysfs_create_file (&policy->kobj, &freq_attr_scaling_setspeed.attr);
144 memcpy (&current_policy[cpu], policy, sizeof(struct cpufreq_policy));
145 dprintk("managing cpu %u started (%u - %u kHz, currently %u kHz)\n", cpu, cpu_min_freq[cpu], cpu_max_freq[cpu], cpu_cur_freq[cpu]); 144 dprintk("managing cpu %u started (%u - %u kHz, currently %u kHz)\n", cpu, cpu_min_freq[cpu], cpu_max_freq[cpu], cpu_cur_freq[cpu]);
146 up(&userspace_sem); 145 mutex_unlock(&userspace_mutex);
147 break; 146 break;
148 case CPUFREQ_GOV_STOP: 147 case CPUFREQ_GOV_STOP:
149 down(&userspace_sem); 148 mutex_lock(&userspace_mutex);
150 cpu_is_managed[cpu] = 0; 149 cpu_is_managed[cpu] = 0;
151 cpu_min_freq[cpu] = 0; 150 cpu_min_freq[cpu] = 0;
152 cpu_max_freq[cpu] = 0; 151 cpu_max_freq[cpu] = 0;
153 cpu_set_freq[cpu] = 0; 152 cpu_set_freq[cpu] = 0;
154 sysfs_remove_file (&policy->kobj, &freq_attr_scaling_setspeed.attr); 153 sysfs_remove_file (&policy->kobj, &freq_attr_scaling_setspeed.attr);
155 dprintk("managing cpu %u stopped\n", cpu); 154 dprintk("managing cpu %u stopped\n", cpu);
156 up(&userspace_sem); 155 mutex_unlock(&userspace_mutex);
157 break; 156 break;
158 case CPUFREQ_GOV_LIMITS: 157 case CPUFREQ_GOV_LIMITS:
159 down(&userspace_sem); 158 mutex_lock(&userspace_mutex);
160 cpu_min_freq[cpu] = policy->min; 159 dprintk("limit event for cpu %u: %u - %u kHz,"
161 cpu_max_freq[cpu] = policy->max; 160 "currently %u kHz, last set to %u kHz\n",
162 dprintk("limit event for cpu %u: %u - %u kHz, currently %u kHz, last set to %u kHz\n", cpu, cpu_min_freq[cpu], cpu_max_freq[cpu], cpu_cur_freq[cpu], cpu_set_freq[cpu]); 161 cpu, policy->min, policy->max,
162 cpu_cur_freq[cpu], cpu_set_freq[cpu]);
163 if (policy->max < cpu_set_freq[cpu]) { 163 if (policy->max < cpu_set_freq[cpu]) {
164 __cpufreq_driver_target(&current_policy[cpu], policy->max, 164 __cpufreq_driver_target(policy, policy->max,
165 CPUFREQ_RELATION_H); 165 CPUFREQ_RELATION_H);
166 } else if (policy->min > cpu_set_freq[cpu]) { 166 }
167 __cpufreq_driver_target(&current_policy[cpu], policy->min, 167 else if (policy->min > cpu_set_freq[cpu]) {
168 CPUFREQ_RELATION_L); 168 __cpufreq_driver_target(policy, policy->min,
169 } else { 169 CPUFREQ_RELATION_L);
170 __cpufreq_driver_target(&current_policy[cpu], cpu_set_freq[cpu],
171 CPUFREQ_RELATION_L);
172 } 170 }
173 memcpy (&current_policy[cpu], policy, sizeof(struct cpufreq_policy)); 171 else {
174 up(&userspace_sem); 172 __cpufreq_driver_target(policy, cpu_set_freq[cpu],
173 CPUFREQ_RELATION_L);
174 }
175 cpu_min_freq[cpu] = policy->min;
176 cpu_max_freq[cpu] = policy->max;
177 cpu_cur_freq[cpu] = policy->cur;
178 mutex_unlock(&userspace_mutex);
175 break; 179 break;
176 } 180 }
177 return 0; 181 return 0;
diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
index c31650df9241..17866d7e2b71 100644
--- a/include/linux/cpufreq.h
+++ b/include/linux/cpufreq.h
@@ -14,6 +14,7 @@
14#ifndef _LINUX_CPUFREQ_H 14#ifndef _LINUX_CPUFREQ_H
15#define _LINUX_CPUFREQ_H 15#define _LINUX_CPUFREQ_H
16 16
17#include <linux/mutex.h>
17#include <linux/config.h> 18#include <linux/config.h>
18#include <linux/notifier.h> 19#include <linux/notifier.h>
19#include <linux/threads.h> 20#include <linux/threads.h>
@@ -82,7 +83,7 @@ struct cpufreq_policy {
82 unsigned int policy; /* see above */ 83 unsigned int policy; /* see above */
83 struct cpufreq_governor *governor; /* see below */ 84 struct cpufreq_governor *governor; /* see below */
84 85
85 struct semaphore lock; /* CPU ->setpolicy or ->target may 86 struct mutex lock; /* CPU ->setpolicy or ->target may
86 only be called once a time */ 87 only be called once a time */
87 88
88 struct work_struct update; /* if update_policy() needs to be 89 struct work_struct update; /* if update_policy() needs to be