aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/cpufreq
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/cpufreq')
-rw-r--r--drivers/cpufreq/Kconfig2
-rw-r--r--drivers/cpufreq/cpufreq.c258
-rw-r--r--drivers/cpufreq/cpufreq_conservative.c2
-rw-r--r--drivers/cpufreq/cpufreq_ondemand.c64
-rw-r--r--drivers/cpufreq/cpufreq_stats.c2
-rw-r--r--drivers/cpufreq/cpufreq_userspace.c2
6 files changed, 213 insertions, 117 deletions
diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig
index 491779af8d55..d155e81b5c97 100644
--- a/drivers/cpufreq/Kconfig
+++ b/drivers/cpufreq/Kconfig
@@ -16,7 +16,7 @@ config CPU_FREQ
16if CPU_FREQ 16if CPU_FREQ
17 17
18config CPU_FREQ_TABLE 18config CPU_FREQ_TABLE
19 def_tristate m 19 tristate
20 20
21config CPU_FREQ_DEBUG 21config CPU_FREQ_DEBUG
22 bool "Enable CPUfreq debugging" 22 bool "Enable CPUfreq debugging"
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index a45cc89e387a..f52facc570f5 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -41,8 +41,67 @@ static struct cpufreq_driver *cpufreq_driver;
41static struct cpufreq_policy *cpufreq_cpu_data[NR_CPUS]; 41static struct cpufreq_policy *cpufreq_cpu_data[NR_CPUS];
42static DEFINE_SPINLOCK(cpufreq_driver_lock); 42static DEFINE_SPINLOCK(cpufreq_driver_lock);
43 43
44/*
45 * cpu_policy_rwsem is a per CPU reader-writer semaphore designed to cure
46 * all cpufreq/hotplug/workqueue/etc related lock issues.
47 *
48 * The rules for this semaphore:
49 * - Any routine that wants to read from the policy structure will
50 * do a down_read on this semaphore.
51 * - Any routine that will write to the policy structure and/or may take away
52 * the policy altogether (eg. CPU hotplug), will hold this lock in write
53 * mode before doing so.
54 *
55 * Additional rules:
56 * - All holders of the lock should check to make sure that the CPU they
57 * are concerned with are online after they get the lock.
58 * - Governor routines that can be called in cpufreq hotplug path should not
59 * take this sem as top level hotplug notifier handler takes this.
60 */
61static DEFINE_PER_CPU(int, policy_cpu);
62static DEFINE_PER_CPU(struct rw_semaphore, cpu_policy_rwsem);
63
64#define lock_policy_rwsem(mode, cpu) \
65int lock_policy_rwsem_##mode \
66(int cpu) \
67{ \
68 int policy_cpu = per_cpu(policy_cpu, cpu); \
69 BUG_ON(policy_cpu == -1); \
70 down_##mode(&per_cpu(cpu_policy_rwsem, policy_cpu)); \
71 if (unlikely(!cpu_online(cpu))) { \
72 up_##mode(&per_cpu(cpu_policy_rwsem, policy_cpu)); \
73 return -1; \
74 } \
75 \
76 return 0; \
77}
78
79lock_policy_rwsem(read, cpu);
80EXPORT_SYMBOL_GPL(lock_policy_rwsem_read);
81
82lock_policy_rwsem(write, cpu);
83EXPORT_SYMBOL_GPL(lock_policy_rwsem_write);
84
85void unlock_policy_rwsem_read(int cpu)
86{
87 int policy_cpu = per_cpu(policy_cpu, cpu);
88 BUG_ON(policy_cpu == -1);
89 up_read(&per_cpu(cpu_policy_rwsem, policy_cpu));
90}
91EXPORT_SYMBOL_GPL(unlock_policy_rwsem_read);
92
93void unlock_policy_rwsem_write(int cpu)
94{
95 int policy_cpu = per_cpu(policy_cpu, cpu);
96 BUG_ON(policy_cpu == -1);
97 up_write(&per_cpu(cpu_policy_rwsem, policy_cpu));
98}
99EXPORT_SYMBOL_GPL(unlock_policy_rwsem_write);
100
101
44/* internal prototypes */ 102/* internal prototypes */
45static int __cpufreq_governor(struct cpufreq_policy *policy, unsigned int event); 103static int __cpufreq_governor(struct cpufreq_policy *policy, unsigned int event);
104static unsigned int __cpufreq_get(unsigned int cpu);
46static void handle_update(struct work_struct *work); 105static void handle_update(struct work_struct *work);
47 106
48/** 107/**
@@ -415,12 +474,8 @@ static ssize_t store_##file_name \
415 if (ret != 1) \ 474 if (ret != 1) \
416 return -EINVAL; \ 475 return -EINVAL; \
417 \ 476 \
418 lock_cpu_hotplug(); \
419 mutex_lock(&policy->lock); \
420 ret = __cpufreq_set_policy(policy, &new_policy); \ 477 ret = __cpufreq_set_policy(policy, &new_policy); \
421 policy->user_policy.object = policy->object; \ 478 policy->user_policy.object = policy->object; \
422 mutex_unlock(&policy->lock); \
423 unlock_cpu_hotplug(); \
424 \ 479 \
425 return ret ? ret : count; \ 480 return ret ? ret : count; \
426} 481}
@@ -434,7 +489,7 @@ store_one(scaling_max_freq,max);
434static ssize_t show_cpuinfo_cur_freq (struct cpufreq_policy * policy, 489static ssize_t show_cpuinfo_cur_freq (struct cpufreq_policy * policy,
435 char *buf) 490 char *buf)
436{ 491{
437 unsigned int cur_freq = cpufreq_get(policy->cpu); 492 unsigned int cur_freq = __cpufreq_get(policy->cpu);
438 if (!cur_freq) 493 if (!cur_freq)
439 return sprintf(buf, "<unknown>"); 494 return sprintf(buf, "<unknown>");
440 return sprintf(buf, "%u\n", cur_freq); 495 return sprintf(buf, "%u\n", cur_freq);
@@ -479,18 +534,12 @@ static ssize_t store_scaling_governor (struct cpufreq_policy * policy,
479 &new_policy.governor)) 534 &new_policy.governor))
480 return -EINVAL; 535 return -EINVAL;
481 536
482 lock_cpu_hotplug();
483
484 /* Do not use cpufreq_set_policy here or the user_policy.max 537 /* Do not use cpufreq_set_policy here or the user_policy.max
485 will be wrongly overridden */ 538 will be wrongly overridden */
486 mutex_lock(&policy->lock);
487 ret = __cpufreq_set_policy(policy, &new_policy); 539 ret = __cpufreq_set_policy(policy, &new_policy);
488 540
489 policy->user_policy.policy = policy->policy; 541 policy->user_policy.policy = policy->policy;
490 policy->user_policy.governor = policy->governor; 542 policy->user_policy.governor = policy->governor;
491 mutex_unlock(&policy->lock);
492
493 unlock_cpu_hotplug();
494 543
495 if (ret) 544 if (ret)
496 return ret; 545 return ret;
@@ -595,11 +644,17 @@ static ssize_t show(struct kobject * kobj, struct attribute * attr ,char * buf)
595 policy = cpufreq_cpu_get(policy->cpu); 644 policy = cpufreq_cpu_get(policy->cpu);
596 if (!policy) 645 if (!policy)
597 return -EINVAL; 646 return -EINVAL;
647
648 if (lock_policy_rwsem_read(policy->cpu) < 0)
649 return -EINVAL;
650
598 if (fattr->show) 651 if (fattr->show)
599 ret = fattr->show(policy, buf); 652 ret = fattr->show(policy, buf);
600 else 653 else
601 ret = -EIO; 654 ret = -EIO;
602 655
656 unlock_policy_rwsem_read(policy->cpu);
657
603 cpufreq_cpu_put(policy); 658 cpufreq_cpu_put(policy);
604 return ret; 659 return ret;
605} 660}
@@ -613,11 +668,17 @@ static ssize_t store(struct kobject * kobj, struct attribute * attr,
613 policy = cpufreq_cpu_get(policy->cpu); 668 policy = cpufreq_cpu_get(policy->cpu);
614 if (!policy) 669 if (!policy)
615 return -EINVAL; 670 return -EINVAL;
671
672 if (lock_policy_rwsem_write(policy->cpu) < 0)
673 return -EINVAL;
674
616 if (fattr->store) 675 if (fattr->store)
617 ret = fattr->store(policy, buf, count); 676 ret = fattr->store(policy, buf, count);
618 else 677 else
619 ret = -EIO; 678 ret = -EIO;
620 679
680 unlock_policy_rwsem_write(policy->cpu);
681
621 cpufreq_cpu_put(policy); 682 cpufreq_cpu_put(policy);
622 return ret; 683 return ret;
623} 684}
@@ -691,8 +752,10 @@ static int cpufreq_add_dev (struct sys_device * sys_dev)
691 policy->cpu = cpu; 752 policy->cpu = cpu;
692 policy->cpus = cpumask_of_cpu(cpu); 753 policy->cpus = cpumask_of_cpu(cpu);
693 754
694 mutex_init(&policy->lock); 755 /* Initially set CPU itself as the policy_cpu */
695 mutex_lock(&policy->lock); 756 per_cpu(policy_cpu, cpu) = cpu;
757 lock_policy_rwsem_write(cpu);
758
696 init_completion(&policy->kobj_unregister); 759 init_completion(&policy->kobj_unregister);
697 INIT_WORK(&policy->update, handle_update); 760 INIT_WORK(&policy->update, handle_update);
698 761
@@ -702,7 +765,7 @@ static int cpufreq_add_dev (struct sys_device * sys_dev)
702 ret = cpufreq_driver->init(policy); 765 ret = cpufreq_driver->init(policy);
703 if (ret) { 766 if (ret) {
704 dprintk("initialization failed\n"); 767 dprintk("initialization failed\n");
705 mutex_unlock(&policy->lock); 768 unlock_policy_rwsem_write(cpu);
706 goto err_out; 769 goto err_out;
707 } 770 }
708 771
@@ -716,6 +779,14 @@ static int cpufreq_add_dev (struct sys_device * sys_dev)
716 */ 779 */
717 managed_policy = cpufreq_cpu_get(j); 780 managed_policy = cpufreq_cpu_get(j);
718 if (unlikely(managed_policy)) { 781 if (unlikely(managed_policy)) {
782
783 /* Set proper policy_cpu */
784 unlock_policy_rwsem_write(cpu);
785 per_cpu(policy_cpu, cpu) = managed_policy->cpu;
786
787 if (lock_policy_rwsem_write(cpu) < 0)
788 goto err_out_driver_exit;
789
719 spin_lock_irqsave(&cpufreq_driver_lock, flags); 790 spin_lock_irqsave(&cpufreq_driver_lock, flags);
720 managed_policy->cpus = policy->cpus; 791 managed_policy->cpus = policy->cpus;
721 cpufreq_cpu_data[cpu] = managed_policy; 792 cpufreq_cpu_data[cpu] = managed_policy;
@@ -726,13 +797,13 @@ static int cpufreq_add_dev (struct sys_device * sys_dev)
726 &managed_policy->kobj, 797 &managed_policy->kobj,
727 "cpufreq"); 798 "cpufreq");
728 if (ret) { 799 if (ret) {
729 mutex_unlock(&policy->lock); 800 unlock_policy_rwsem_write(cpu);
730 goto err_out_driver_exit; 801 goto err_out_driver_exit;
731 } 802 }
732 803
733 cpufreq_debug_enable_ratelimit(); 804 cpufreq_debug_enable_ratelimit();
734 mutex_unlock(&policy->lock);
735 ret = 0; 805 ret = 0;
806 unlock_policy_rwsem_write(cpu);
736 goto err_out_driver_exit; /* call driver->exit() */ 807 goto err_out_driver_exit; /* call driver->exit() */
737 } 808 }
738 } 809 }
@@ -746,7 +817,7 @@ static int cpufreq_add_dev (struct sys_device * sys_dev)
746 817
747 ret = kobject_register(&policy->kobj); 818 ret = kobject_register(&policy->kobj);
748 if (ret) { 819 if (ret) {
749 mutex_unlock(&policy->lock); 820 unlock_policy_rwsem_write(cpu);
750 goto err_out_driver_exit; 821 goto err_out_driver_exit;
751 } 822 }
752 /* set up files for this cpu device */ 823 /* set up files for this cpu device */
@@ -761,8 +832,10 @@ static int cpufreq_add_dev (struct sys_device * sys_dev)
761 sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr); 832 sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr);
762 833
763 spin_lock_irqsave(&cpufreq_driver_lock, flags); 834 spin_lock_irqsave(&cpufreq_driver_lock, flags);
764 for_each_cpu_mask(j, policy->cpus) 835 for_each_cpu_mask(j, policy->cpus) {
765 cpufreq_cpu_data[j] = policy; 836 cpufreq_cpu_data[j] = policy;
837 per_cpu(policy_cpu, j) = policy->cpu;
838 }
766 spin_unlock_irqrestore(&cpufreq_driver_lock, flags); 839 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
767 840
768 /* symlink affected CPUs */ 841 /* symlink affected CPUs */
@@ -778,14 +851,14 @@ static int cpufreq_add_dev (struct sys_device * sys_dev)
778 ret = sysfs_create_link(&cpu_sys_dev->kobj, &policy->kobj, 851 ret = sysfs_create_link(&cpu_sys_dev->kobj, &policy->kobj,
779 "cpufreq"); 852 "cpufreq");
780 if (ret) { 853 if (ret) {
781 mutex_unlock(&policy->lock); 854 unlock_policy_rwsem_write(cpu);
782 goto err_out_unregister; 855 goto err_out_unregister;
783 } 856 }
784 } 857 }
785 858
786 policy->governor = NULL; /* to assure that the starting sequence is 859 policy->governor = NULL; /* to assure that the starting sequence is
787 * run in cpufreq_set_policy */ 860 * run in cpufreq_set_policy */
788 mutex_unlock(&policy->lock); 861 unlock_policy_rwsem_write(cpu);
789 862
790 /* set default policy */ 863 /* set default policy */
791 ret = cpufreq_set_policy(&new_policy); 864 ret = cpufreq_set_policy(&new_policy);
@@ -826,11 +899,13 @@ module_out:
826 899
827 900
828/** 901/**
829 * cpufreq_remove_dev - remove a CPU device 902 * __cpufreq_remove_dev - remove a CPU device
830 * 903 *
831 * Removes the cpufreq interface for a CPU device. 904 * Removes the cpufreq interface for a CPU device.
905 * Caller should already have policy_rwsem in write mode for this CPU.
906 * This routine frees the rwsem before returning.
832 */ 907 */
833static int cpufreq_remove_dev (struct sys_device * sys_dev) 908static int __cpufreq_remove_dev (struct sys_device * sys_dev)
834{ 909{
835 unsigned int cpu = sys_dev->id; 910 unsigned int cpu = sys_dev->id;
836 unsigned long flags; 911 unsigned long flags;
@@ -849,6 +924,7 @@ static int cpufreq_remove_dev (struct sys_device * sys_dev)
849 if (!data) { 924 if (!data) {
850 spin_unlock_irqrestore(&cpufreq_driver_lock, flags); 925 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
851 cpufreq_debug_enable_ratelimit(); 926 cpufreq_debug_enable_ratelimit();
927 unlock_policy_rwsem_write(cpu);
852 return -EINVAL; 928 return -EINVAL;
853 } 929 }
854 cpufreq_cpu_data[cpu] = NULL; 930 cpufreq_cpu_data[cpu] = NULL;
@@ -865,6 +941,7 @@ static int cpufreq_remove_dev (struct sys_device * sys_dev)
865 sysfs_remove_link(&sys_dev->kobj, "cpufreq"); 941 sysfs_remove_link(&sys_dev->kobj, "cpufreq");
866 cpufreq_cpu_put(data); 942 cpufreq_cpu_put(data);
867 cpufreq_debug_enable_ratelimit(); 943 cpufreq_debug_enable_ratelimit();
944 unlock_policy_rwsem_write(cpu);
868 return 0; 945 return 0;
869 } 946 }
870#endif 947#endif
@@ -873,6 +950,7 @@ static int cpufreq_remove_dev (struct sys_device * sys_dev)
873 if (!kobject_get(&data->kobj)) { 950 if (!kobject_get(&data->kobj)) {
874 spin_unlock_irqrestore(&cpufreq_driver_lock, flags); 951 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
875 cpufreq_debug_enable_ratelimit(); 952 cpufreq_debug_enable_ratelimit();
953 unlock_policy_rwsem_write(cpu);
876 return -EFAULT; 954 return -EFAULT;
877 } 955 }
878 956
@@ -906,10 +984,10 @@ static int cpufreq_remove_dev (struct sys_device * sys_dev)
906 spin_unlock_irqrestore(&cpufreq_driver_lock, flags); 984 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
907#endif 985#endif
908 986
909 mutex_lock(&data->lock);
910 if (cpufreq_driver->target) 987 if (cpufreq_driver->target)
911 __cpufreq_governor(data, CPUFREQ_GOV_STOP); 988 __cpufreq_governor(data, CPUFREQ_GOV_STOP);
912 mutex_unlock(&data->lock); 989
990 unlock_policy_rwsem_write(cpu);
913 991
914 kobject_unregister(&data->kobj); 992 kobject_unregister(&data->kobj);
915 993
@@ -933,6 +1011,18 @@ static int cpufreq_remove_dev (struct sys_device * sys_dev)
933} 1011}
934 1012
935 1013
1014static int cpufreq_remove_dev (struct sys_device * sys_dev)
1015{
1016 unsigned int cpu = sys_dev->id;
1017 int retval;
1018 if (unlikely(lock_policy_rwsem_write(cpu)))
1019 BUG();
1020
1021 retval = __cpufreq_remove_dev(sys_dev);
1022 return retval;
1023}
1024
1025
936static void handle_update(struct work_struct *work) 1026static void handle_update(struct work_struct *work)
937{ 1027{
938 struct cpufreq_policy *policy = 1028 struct cpufreq_policy *policy =
@@ -980,9 +1070,12 @@ unsigned int cpufreq_quick_get(unsigned int cpu)
980 unsigned int ret_freq = 0; 1070 unsigned int ret_freq = 0;
981 1071
982 if (policy) { 1072 if (policy) {
983 mutex_lock(&policy->lock); 1073 if (unlikely(lock_policy_rwsem_read(cpu)))
1074 return ret_freq;
1075
984 ret_freq = policy->cur; 1076 ret_freq = policy->cur;
985 mutex_unlock(&policy->lock); 1077
1078 unlock_policy_rwsem_read(cpu);
986 cpufreq_cpu_put(policy); 1079 cpufreq_cpu_put(policy);
987 } 1080 }
988 1081
@@ -991,24 +1084,13 @@ unsigned int cpufreq_quick_get(unsigned int cpu)
991EXPORT_SYMBOL(cpufreq_quick_get); 1084EXPORT_SYMBOL(cpufreq_quick_get);
992 1085
993 1086
994/** 1087static unsigned int __cpufreq_get(unsigned int cpu)
995 * cpufreq_get - get the current CPU frequency (in kHz)
996 * @cpu: CPU number
997 *
998 * Get the CPU current (static) CPU frequency
999 */
1000unsigned int cpufreq_get(unsigned int cpu)
1001{ 1088{
1002 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu); 1089 struct cpufreq_policy *policy = cpufreq_cpu_data[cpu];
1003 unsigned int ret_freq = 0; 1090 unsigned int ret_freq = 0;
1004 1091
1005 if (!policy)
1006 return 0;
1007
1008 if (!cpufreq_driver->get) 1092 if (!cpufreq_driver->get)
1009 goto out; 1093 return (ret_freq);
1010
1011 mutex_lock(&policy->lock);
1012 1094
1013 ret_freq = cpufreq_driver->get(cpu); 1095 ret_freq = cpufreq_driver->get(cpu);
1014 1096
@@ -1022,11 +1104,33 @@ unsigned int cpufreq_get(unsigned int cpu)
1022 } 1104 }
1023 } 1105 }
1024 1106
1025 mutex_unlock(&policy->lock); 1107 return (ret_freq);
1108}
1026 1109
1027out: 1110/**
1028 cpufreq_cpu_put(policy); 1111 * cpufreq_get - get the current CPU frequency (in kHz)
1112 * @cpu: CPU number
1113 *
1114 * Get the CPU current (static) CPU frequency
1115 */
1116unsigned int cpufreq_get(unsigned int cpu)
1117{
1118 unsigned int ret_freq = 0;
1119 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1120
1121 if (!policy)
1122 goto out;
1123
1124 if (unlikely(lock_policy_rwsem_read(cpu)))
1125 goto out_policy;
1126
1127 ret_freq = __cpufreq_get(cpu);
1029 1128
1129 unlock_policy_rwsem_read(cpu);
1130
1131out_policy:
1132 cpufreq_cpu_put(policy);
1133out:
1030 return (ret_freq); 1134 return (ret_freq);
1031} 1135}
1032EXPORT_SYMBOL(cpufreq_get); 1136EXPORT_SYMBOL(cpufreq_get);
@@ -1278,7 +1382,6 @@ EXPORT_SYMBOL(cpufreq_unregister_notifier);
1278 *********************************************************************/ 1382 *********************************************************************/
1279 1383
1280 1384
1281/* Must be called with lock_cpu_hotplug held */
1282int __cpufreq_driver_target(struct cpufreq_policy *policy, 1385int __cpufreq_driver_target(struct cpufreq_policy *policy,
1283 unsigned int target_freq, 1386 unsigned int target_freq,
1284 unsigned int relation) 1387 unsigned int relation)
@@ -1304,20 +1407,19 @@ int cpufreq_driver_target(struct cpufreq_policy *policy,
1304 if (!policy) 1407 if (!policy)
1305 return -EINVAL; 1408 return -EINVAL;
1306 1409
1307 lock_cpu_hotplug(); 1410 if (unlikely(lock_policy_rwsem_write(policy->cpu)))
1308 mutex_lock(&policy->lock); 1411 return -EINVAL;
1309 1412
1310 ret = __cpufreq_driver_target(policy, target_freq, relation); 1413 ret = __cpufreq_driver_target(policy, target_freq, relation);
1311 1414
1312 mutex_unlock(&policy->lock); 1415 unlock_policy_rwsem_write(policy->cpu);
1313 unlock_cpu_hotplug();
1314 1416
1315 cpufreq_cpu_put(policy); 1417 cpufreq_cpu_put(policy);
1316 return ret; 1418 return ret;
1317} 1419}
1318EXPORT_SYMBOL_GPL(cpufreq_driver_target); 1420EXPORT_SYMBOL_GPL(cpufreq_driver_target);
1319 1421
1320int cpufreq_driver_getavg(struct cpufreq_policy *policy) 1422int __cpufreq_driver_getavg(struct cpufreq_policy *policy)
1321{ 1423{
1322 int ret = 0; 1424 int ret = 0;
1323 1425
@@ -1325,20 +1427,15 @@ int cpufreq_driver_getavg(struct cpufreq_policy *policy)
1325 if (!policy) 1427 if (!policy)
1326 return -EINVAL; 1428 return -EINVAL;
1327 1429
1328 mutex_lock(&policy->lock);
1329
1330 if (cpu_online(policy->cpu) && cpufreq_driver->getavg) 1430 if (cpu_online(policy->cpu) && cpufreq_driver->getavg)
1331 ret = cpufreq_driver->getavg(policy->cpu); 1431 ret = cpufreq_driver->getavg(policy->cpu);
1332 1432
1333 mutex_unlock(&policy->lock);
1334
1335 cpufreq_cpu_put(policy); 1433 cpufreq_cpu_put(policy);
1336 return ret; 1434 return ret;
1337} 1435}
1338EXPORT_SYMBOL_GPL(cpufreq_driver_getavg); 1436EXPORT_SYMBOL_GPL(__cpufreq_driver_getavg);
1339 1437
1340/* 1438/*
1341 * Locking: Must be called with the lock_cpu_hotplug() lock held
1342 * when "event" is CPUFREQ_GOV_LIMITS 1439 * when "event" is CPUFREQ_GOV_LIMITS
1343 */ 1440 */
1344 1441
@@ -1420,9 +1517,7 @@ int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu)
1420 if (!cpu_policy) 1517 if (!cpu_policy)
1421 return -EINVAL; 1518 return -EINVAL;
1422 1519
1423 mutex_lock(&cpu_policy->lock);
1424 memcpy(policy, cpu_policy, sizeof(struct cpufreq_policy)); 1520 memcpy(policy, cpu_policy, sizeof(struct cpufreq_policy));
1425 mutex_unlock(&cpu_policy->lock);
1426 1521
1427 cpufreq_cpu_put(cpu_policy); 1522 cpufreq_cpu_put(cpu_policy);
1428 return 0; 1523 return 0;
@@ -1433,7 +1528,6 @@ EXPORT_SYMBOL(cpufreq_get_policy);
1433/* 1528/*
1434 * data : current policy. 1529 * data : current policy.
1435 * policy : policy to be set. 1530 * policy : policy to be set.
1436 * Locking: Must be called with the lock_cpu_hotplug() lock held
1437 */ 1531 */
1438static int __cpufreq_set_policy(struct cpufreq_policy *data, 1532static int __cpufreq_set_policy(struct cpufreq_policy *data,
1439 struct cpufreq_policy *policy) 1533 struct cpufreq_policy *policy)
@@ -1539,10 +1633,9 @@ int cpufreq_set_policy(struct cpufreq_policy *policy)
1539 if (!data) 1633 if (!data)
1540 return -EINVAL; 1634 return -EINVAL;
1541 1635
1542 lock_cpu_hotplug(); 1636 if (unlikely(lock_policy_rwsem_write(policy->cpu)))
1637 return -EINVAL;
1543 1638
1544 /* lock this CPU */
1545 mutex_lock(&data->lock);
1546 1639
1547 ret = __cpufreq_set_policy(data, policy); 1640 ret = __cpufreq_set_policy(data, policy);
1548 data->user_policy.min = data->min; 1641 data->user_policy.min = data->min;
@@ -1550,9 +1643,8 @@ int cpufreq_set_policy(struct cpufreq_policy *policy)
1550 data->user_policy.policy = data->policy; 1643 data->user_policy.policy = data->policy;
1551 data->user_policy.governor = data->governor; 1644 data->user_policy.governor = data->governor;
1552 1645
1553 mutex_unlock(&data->lock); 1646 unlock_policy_rwsem_write(policy->cpu);
1554 1647
1555 unlock_cpu_hotplug();
1556 cpufreq_cpu_put(data); 1648 cpufreq_cpu_put(data);
1557 1649
1558 return ret; 1650 return ret;
@@ -1576,8 +1668,8 @@ int cpufreq_update_policy(unsigned int cpu)
1576 if (!data) 1668 if (!data)
1577 return -ENODEV; 1669 return -ENODEV;
1578 1670
1579 lock_cpu_hotplug(); 1671 if (unlikely(lock_policy_rwsem_write(cpu)))
1580 mutex_lock(&data->lock); 1672 return -EINVAL;
1581 1673
1582 dprintk("updating policy for CPU %u\n", cpu); 1674 dprintk("updating policy for CPU %u\n", cpu);
1583 memcpy(&policy, data, sizeof(struct cpufreq_policy)); 1675 memcpy(&policy, data, sizeof(struct cpufreq_policy));
@@ -1602,8 +1694,8 @@ int cpufreq_update_policy(unsigned int cpu)
1602 1694
1603 ret = __cpufreq_set_policy(data, &policy); 1695 ret = __cpufreq_set_policy(data, &policy);
1604 1696
1605 mutex_unlock(&data->lock); 1697 unlock_policy_rwsem_write(cpu);
1606 unlock_cpu_hotplug(); 1698
1607 cpufreq_cpu_put(data); 1699 cpufreq_cpu_put(data);
1608 return ret; 1700 return ret;
1609} 1701}
@@ -1613,31 +1705,28 @@ static int cpufreq_cpu_callback(struct notifier_block *nfb,
1613 unsigned long action, void *hcpu) 1705 unsigned long action, void *hcpu)
1614{ 1706{
1615 unsigned int cpu = (unsigned long)hcpu; 1707 unsigned int cpu = (unsigned long)hcpu;
1616 struct cpufreq_policy *policy;
1617 struct sys_device *sys_dev; 1708 struct sys_device *sys_dev;
1709 struct cpufreq_policy *policy;
1618 1710
1619 sys_dev = get_cpu_sysdev(cpu); 1711 sys_dev = get_cpu_sysdev(cpu);
1620
1621 if (sys_dev) { 1712 if (sys_dev) {
1622 switch (action) { 1713 switch (action) {
1623 case CPU_ONLINE: 1714 case CPU_ONLINE:
1624 cpufreq_add_dev(sys_dev); 1715 cpufreq_add_dev(sys_dev);
1625 break; 1716 break;
1626 case CPU_DOWN_PREPARE: 1717 case CPU_DOWN_PREPARE:
1627 /* 1718 if (unlikely(lock_policy_rwsem_write(cpu)))
1628 * We attempt to put this cpu in lowest frequency 1719 BUG();
1629 * possible before going down. This will permit 1720
1630 * hardware-managed P-State to switch other related
1631 * threads to min or higher speeds if possible.
1632 */
1633 policy = cpufreq_cpu_data[cpu]; 1721 policy = cpufreq_cpu_data[cpu];
1634 if (policy) { 1722 if (policy) {
1635 cpufreq_driver_target(policy, policy->min, 1723 __cpufreq_driver_target(policy, policy->min,
1636 CPUFREQ_RELATION_H); 1724 CPUFREQ_RELATION_H);
1637 } 1725 }
1726 __cpufreq_remove_dev(sys_dev);
1638 break; 1727 break;
1639 case CPU_DEAD: 1728 case CPU_DOWN_FAILED:
1640 cpufreq_remove_dev(sys_dev); 1729 cpufreq_add_dev(sys_dev);
1641 break; 1730 break;
1642 } 1731 }
1643 } 1732 }
@@ -1751,3 +1840,16 @@ int cpufreq_unregister_driver(struct cpufreq_driver *driver)
1751 return 0; 1840 return 0;
1752} 1841}
1753EXPORT_SYMBOL_GPL(cpufreq_unregister_driver); 1842EXPORT_SYMBOL_GPL(cpufreq_unregister_driver);
1843
1844static int __init cpufreq_core_init(void)
1845{
1846 int cpu;
1847
1848 for_each_possible_cpu(cpu) {
1849 per_cpu(policy_cpu, cpu) = -1;
1850 init_rwsem(&per_cpu(cpu_policy_rwsem, cpu));
1851 }
1852 return 0;
1853}
1854
1855core_initcall(cpufreq_core_init);
diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c
index 05d6c22ba07c..26f440ccc3fb 100644
--- a/drivers/cpufreq/cpufreq_conservative.c
+++ b/drivers/cpufreq/cpufreq_conservative.c
@@ -429,14 +429,12 @@ static void dbs_check_cpu(int cpu)
429static void do_dbs_timer(struct work_struct *work) 429static void do_dbs_timer(struct work_struct *work)
430{ 430{
431 int i; 431 int i;
432 lock_cpu_hotplug();
433 mutex_lock(&dbs_mutex); 432 mutex_lock(&dbs_mutex);
434 for_each_online_cpu(i) 433 for_each_online_cpu(i)
435 dbs_check_cpu(i); 434 dbs_check_cpu(i);
436 schedule_delayed_work(&dbs_work, 435 schedule_delayed_work(&dbs_work,
437 usecs_to_jiffies(dbs_tuners_ins.sampling_rate)); 436 usecs_to_jiffies(dbs_tuners_ins.sampling_rate));
438 mutex_unlock(&dbs_mutex); 437 mutex_unlock(&dbs_mutex);
439 unlock_cpu_hotplug();
440} 438}
441 439
442static inline void dbs_timer_init(void) 440static inline void dbs_timer_init(void)
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
index f697449327c6..d60bcb9d14cc 100644
--- a/drivers/cpufreq/cpufreq_ondemand.c
+++ b/drivers/cpufreq/cpufreq_ondemand.c
@@ -52,19 +52,20 @@ static unsigned int def_sampling_rate;
52static void do_dbs_timer(struct work_struct *work); 52static void do_dbs_timer(struct work_struct *work);
53 53
54/* Sampling types */ 54/* Sampling types */
55enum dbs_sample {DBS_NORMAL_SAMPLE, DBS_SUB_SAMPLE}; 55enum {DBS_NORMAL_SAMPLE, DBS_SUB_SAMPLE};
56 56
57struct cpu_dbs_info_s { 57struct cpu_dbs_info_s {
58 cputime64_t prev_cpu_idle; 58 cputime64_t prev_cpu_idle;
59 cputime64_t prev_cpu_wall; 59 cputime64_t prev_cpu_wall;
60 struct cpufreq_policy *cur_policy; 60 struct cpufreq_policy *cur_policy;
61 struct delayed_work work; 61 struct delayed_work work;
62 enum dbs_sample sample_type;
63 unsigned int enable;
64 struct cpufreq_frequency_table *freq_table; 62 struct cpufreq_frequency_table *freq_table;
65 unsigned int freq_lo; 63 unsigned int freq_lo;
66 unsigned int freq_lo_jiffies; 64 unsigned int freq_lo_jiffies;
67 unsigned int freq_hi_jiffies; 65 unsigned int freq_hi_jiffies;
66 int cpu;
67 unsigned int enable:1,
68 sample_type:1;
68}; 69};
69static DEFINE_PER_CPU(struct cpu_dbs_info_s, cpu_dbs_info); 70static DEFINE_PER_CPU(struct cpu_dbs_info_s, cpu_dbs_info);
70 71
@@ -402,7 +403,7 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
402 if (load < (dbs_tuners_ins.up_threshold - 10)) { 403 if (load < (dbs_tuners_ins.up_threshold - 10)) {
403 unsigned int freq_next, freq_cur; 404 unsigned int freq_next, freq_cur;
404 405
405 freq_cur = cpufreq_driver_getavg(policy); 406 freq_cur = __cpufreq_driver_getavg(policy);
406 if (!freq_cur) 407 if (!freq_cur)
407 freq_cur = policy->cur; 408 freq_cur = policy->cur;
408 409
@@ -423,9 +424,11 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
423 424
424static void do_dbs_timer(struct work_struct *work) 425static void do_dbs_timer(struct work_struct *work)
425{ 426{
426 unsigned int cpu = smp_processor_id(); 427 struct cpu_dbs_info_s *dbs_info =
427 struct cpu_dbs_info_s *dbs_info = &per_cpu(cpu_dbs_info, cpu); 428 container_of(work, struct cpu_dbs_info_s, work.work);
428 enum dbs_sample sample_type = dbs_info->sample_type; 429 unsigned int cpu = dbs_info->cpu;
430 int sample_type = dbs_info->sample_type;
431
429 /* We want all CPUs to do sampling nearly on same jiffy */ 432 /* We want all CPUs to do sampling nearly on same jiffy */
430 int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate); 433 int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate);
431 434
@@ -434,15 +437,19 @@ static void do_dbs_timer(struct work_struct *work)
434 437
435 delay -= jiffies % delay; 438 delay -= jiffies % delay;
436 439
437 if (!dbs_info->enable) 440 if (lock_policy_rwsem_write(cpu) < 0)
441 return;
442
443 if (!dbs_info->enable) {
444 unlock_policy_rwsem_write(cpu);
438 return; 445 return;
446 }
447
439 /* Common NORMAL_SAMPLE setup */ 448 /* Common NORMAL_SAMPLE setup */
440 dbs_info->sample_type = DBS_NORMAL_SAMPLE; 449 dbs_info->sample_type = DBS_NORMAL_SAMPLE;
441 if (!dbs_tuners_ins.powersave_bias || 450 if (!dbs_tuners_ins.powersave_bias ||
442 sample_type == DBS_NORMAL_SAMPLE) { 451 sample_type == DBS_NORMAL_SAMPLE) {
443 lock_cpu_hotplug();
444 dbs_check_cpu(dbs_info); 452 dbs_check_cpu(dbs_info);
445 unlock_cpu_hotplug();
446 if (dbs_info->freq_lo) { 453 if (dbs_info->freq_lo) {
447 /* Setup timer for SUB_SAMPLE */ 454 /* Setup timer for SUB_SAMPLE */
448 dbs_info->sample_type = DBS_SUB_SAMPLE; 455 dbs_info->sample_type = DBS_SUB_SAMPLE;
@@ -454,26 +461,27 @@ static void do_dbs_timer(struct work_struct *work)
454 CPUFREQ_RELATION_H); 461 CPUFREQ_RELATION_H);
455 } 462 }
456 queue_delayed_work_on(cpu, kondemand_wq, &dbs_info->work, delay); 463 queue_delayed_work_on(cpu, kondemand_wq, &dbs_info->work, delay);
464 unlock_policy_rwsem_write(cpu);
457} 465}
458 466
459static inline void dbs_timer_init(unsigned int cpu) 467static inline void dbs_timer_init(struct cpu_dbs_info_s *dbs_info)
460{ 468{
461 struct cpu_dbs_info_s *dbs_info = &per_cpu(cpu_dbs_info, cpu);
462 /* We want all CPUs to do sampling nearly on same jiffy */ 469 /* We want all CPUs to do sampling nearly on same jiffy */
463 int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate); 470 int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate);
464 delay -= jiffies % delay; 471 delay -= jiffies % delay;
465 472
473 dbs_info->enable = 1;
466 ondemand_powersave_bias_init(); 474 ondemand_powersave_bias_init();
467 INIT_DELAYED_WORK_NAR(&dbs_info->work, do_dbs_timer);
468 dbs_info->sample_type = DBS_NORMAL_SAMPLE; 475 dbs_info->sample_type = DBS_NORMAL_SAMPLE;
469 queue_delayed_work_on(cpu, kondemand_wq, &dbs_info->work, delay); 476 INIT_DELAYED_WORK_NAR(&dbs_info->work, do_dbs_timer);
477 queue_delayed_work_on(dbs_info->cpu, kondemand_wq, &dbs_info->work,
478 delay);
470} 479}
471 480
472static inline void dbs_timer_exit(struct cpu_dbs_info_s *dbs_info) 481static inline void dbs_timer_exit(struct cpu_dbs_info_s *dbs_info)
473{ 482{
474 dbs_info->enable = 0; 483 dbs_info->enable = 0;
475 cancel_delayed_work(&dbs_info->work); 484 cancel_delayed_work(&dbs_info->work);
476 flush_workqueue(kondemand_wq);
477} 485}
478 486
479static int cpufreq_governor_dbs(struct cpufreq_policy *policy, 487static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
@@ -502,21 +510,9 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
502 510
503 mutex_lock(&dbs_mutex); 511 mutex_lock(&dbs_mutex);
504 dbs_enable++; 512 dbs_enable++;
505 if (dbs_enable == 1) {
506 kondemand_wq = create_workqueue("kondemand");
507 if (!kondemand_wq) {
508 printk(KERN_ERR
509 "Creation of kondemand failed\n");
510 dbs_enable--;
511 mutex_unlock(&dbs_mutex);
512 return -ENOSPC;
513 }
514 }
515 513
516 rc = sysfs_create_group(&policy->kobj, &dbs_attr_group); 514 rc = sysfs_create_group(&policy->kobj, &dbs_attr_group);
517 if (rc) { 515 if (rc) {
518 if (dbs_enable == 1)
519 destroy_workqueue(kondemand_wq);
520 dbs_enable--; 516 dbs_enable--;
521 mutex_unlock(&dbs_mutex); 517 mutex_unlock(&dbs_mutex);
522 return rc; 518 return rc;
@@ -530,7 +526,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
530 j_dbs_info->prev_cpu_idle = get_cpu_idle_time(j); 526 j_dbs_info->prev_cpu_idle = get_cpu_idle_time(j);
531 j_dbs_info->prev_cpu_wall = get_jiffies_64(); 527 j_dbs_info->prev_cpu_wall = get_jiffies_64();
532 } 528 }
533 this_dbs_info->enable = 1; 529 this_dbs_info->cpu = cpu;
534 /* 530 /*
535 * Start the timerschedule work, when this governor 531 * Start the timerschedule work, when this governor
536 * is used for first time 532 * is used for first time
@@ -550,7 +546,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
550 546
551 dbs_tuners_ins.sampling_rate = def_sampling_rate; 547 dbs_tuners_ins.sampling_rate = def_sampling_rate;
552 } 548 }
553 dbs_timer_init(policy->cpu); 549 dbs_timer_init(this_dbs_info);
554 550
555 mutex_unlock(&dbs_mutex); 551 mutex_unlock(&dbs_mutex);
556 break; 552 break;
@@ -560,9 +556,6 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
560 dbs_timer_exit(this_dbs_info); 556 dbs_timer_exit(this_dbs_info);
561 sysfs_remove_group(&policy->kobj, &dbs_attr_group); 557 sysfs_remove_group(&policy->kobj, &dbs_attr_group);
562 dbs_enable--; 558 dbs_enable--;
563 if (dbs_enable == 0)
564 destroy_workqueue(kondemand_wq);
565
566 mutex_unlock(&dbs_mutex); 559 mutex_unlock(&dbs_mutex);
567 560
568 break; 561 break;
@@ -591,12 +584,18 @@ static struct cpufreq_governor cpufreq_gov_dbs = {
591 584
592static int __init cpufreq_gov_dbs_init(void) 585static int __init cpufreq_gov_dbs_init(void)
593{ 586{
587 kondemand_wq = create_workqueue("kondemand");
588 if (!kondemand_wq) {
589 printk(KERN_ERR "Creation of kondemand failed\n");
590 return -EFAULT;
591 }
594 return cpufreq_register_governor(&cpufreq_gov_dbs); 592 return cpufreq_register_governor(&cpufreq_gov_dbs);
595} 593}
596 594
597static void __exit cpufreq_gov_dbs_exit(void) 595static void __exit cpufreq_gov_dbs_exit(void)
598{ 596{
599 cpufreq_unregister_governor(&cpufreq_gov_dbs); 597 cpufreq_unregister_governor(&cpufreq_gov_dbs);
598 destroy_workqueue(kondemand_wq);
600} 599}
601 600
602 601
@@ -608,3 +607,4 @@ MODULE_LICENSE("GPL");
608 607
609module_init(cpufreq_gov_dbs_init); 608module_init(cpufreq_gov_dbs_init);
610module_exit(cpufreq_gov_dbs_exit); 609module_exit(cpufreq_gov_dbs_exit);
610
diff --git a/drivers/cpufreq/cpufreq_stats.c b/drivers/cpufreq/cpufreq_stats.c
index 91ad342a6051..d1c7cac9316c 100644
--- a/drivers/cpufreq/cpufreq_stats.c
+++ b/drivers/cpufreq/cpufreq_stats.c
@@ -370,12 +370,10 @@ __exit cpufreq_stats_exit(void)
370 cpufreq_unregister_notifier(&notifier_trans_block, 370 cpufreq_unregister_notifier(&notifier_trans_block,
371 CPUFREQ_TRANSITION_NOTIFIER); 371 CPUFREQ_TRANSITION_NOTIFIER);
372 unregister_hotcpu_notifier(&cpufreq_stat_cpu_notifier); 372 unregister_hotcpu_notifier(&cpufreq_stat_cpu_notifier);
373 lock_cpu_hotplug();
374 for_each_online_cpu(cpu) { 373 for_each_online_cpu(cpu) {
375 cpufreq_stat_cpu_callback(&cpufreq_stat_cpu_notifier, 374 cpufreq_stat_cpu_callback(&cpufreq_stat_cpu_notifier,
376 CPU_DEAD, (void *)(long)cpu); 375 CPU_DEAD, (void *)(long)cpu);
377 } 376 }
378 unlock_cpu_hotplug();
379} 377}
380 378
381MODULE_AUTHOR ("Zou Nan hai <nanhai.zou@intel.com>"); 379MODULE_AUTHOR ("Zou Nan hai <nanhai.zou@intel.com>");
diff --git a/drivers/cpufreq/cpufreq_userspace.c b/drivers/cpufreq/cpufreq_userspace.c
index 2a4eb0bfaf30..860345c7799a 100644
--- a/drivers/cpufreq/cpufreq_userspace.c
+++ b/drivers/cpufreq/cpufreq_userspace.c
@@ -71,7 +71,6 @@ static int cpufreq_set(unsigned int freq, struct cpufreq_policy *policy)
71 71
72 dprintk("cpufreq_set for cpu %u, freq %u kHz\n", policy->cpu, freq); 72 dprintk("cpufreq_set for cpu %u, freq %u kHz\n", policy->cpu, freq);
73 73
74 lock_cpu_hotplug();
75 mutex_lock(&userspace_mutex); 74 mutex_lock(&userspace_mutex);
76 if (!cpu_is_managed[policy->cpu]) 75 if (!cpu_is_managed[policy->cpu])
77 goto err; 76 goto err;
@@ -94,7 +93,6 @@ static int cpufreq_set(unsigned int freq, struct cpufreq_policy *policy)
94 93
95 err: 94 err:
96 mutex_unlock(&userspace_mutex); 95 mutex_unlock(&userspace_mutex);
97 unlock_cpu_hotplug();
98 return ret; 96 return ret;
99} 97}
100 98