diff options
Diffstat (limited to 'drivers/cpufreq/cpufreq.c')
-rw-r--r-- | drivers/cpufreq/cpufreq.c | 258 |
1 files changed, 180 insertions, 78 deletions
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index a45cc89e387a..f52facc570f5 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c | |||
@@ -41,8 +41,67 @@ static struct cpufreq_driver *cpufreq_driver; | |||
41 | static struct cpufreq_policy *cpufreq_cpu_data[NR_CPUS]; | 41 | static struct cpufreq_policy *cpufreq_cpu_data[NR_CPUS]; |
42 | static DEFINE_SPINLOCK(cpufreq_driver_lock); | 42 | static DEFINE_SPINLOCK(cpufreq_driver_lock); |
43 | 43 | ||
44 | /* | ||
45 | * cpu_policy_rwsem is a per CPU reader-writer semaphore designed to cure | ||
46 | * all cpufreq/hotplug/workqueue/etc related lock issues. | ||
47 | * | ||
48 | * The rules for this semaphore: | ||
49 | * - Any routine that wants to read from the policy structure will | ||
50 | * do a down_read on this semaphore. | ||
51 | * - Any routine that will write to the policy structure and/or may take away | ||
52 | * the policy altogether (eg. CPU hotplug), will hold this lock in write | ||
53 | * mode before doing so. | ||
54 | * | ||
55 | * Additional rules: | ||
56 | * - All holders of the lock should check to make sure that the CPU they | ||
57 | * are concerned with are online after they get the lock. | ||
58 | * - Governor routines that can be called in cpufreq hotplug path should not | ||
59 | * take this sem as top level hotplug notifier handler takes this. | ||
60 | */ | ||
61 | static DEFINE_PER_CPU(int, policy_cpu); | ||
62 | static DEFINE_PER_CPU(struct rw_semaphore, cpu_policy_rwsem); | ||
63 | |||
64 | #define lock_policy_rwsem(mode, cpu) \ | ||
65 | int lock_policy_rwsem_##mode \ | ||
66 | (int cpu) \ | ||
67 | { \ | ||
68 | int policy_cpu = per_cpu(policy_cpu, cpu); \ | ||
69 | BUG_ON(policy_cpu == -1); \ | ||
70 | down_##mode(&per_cpu(cpu_policy_rwsem, policy_cpu)); \ | ||
71 | if (unlikely(!cpu_online(cpu))) { \ | ||
72 | up_##mode(&per_cpu(cpu_policy_rwsem, policy_cpu)); \ | ||
73 | return -1; \ | ||
74 | } \ | ||
75 | \ | ||
76 | return 0; \ | ||
77 | } | ||
78 | |||
79 | lock_policy_rwsem(read, cpu); | ||
80 | EXPORT_SYMBOL_GPL(lock_policy_rwsem_read); | ||
81 | |||
82 | lock_policy_rwsem(write, cpu); | ||
83 | EXPORT_SYMBOL_GPL(lock_policy_rwsem_write); | ||
84 | |||
85 | void unlock_policy_rwsem_read(int cpu) | ||
86 | { | ||
87 | int policy_cpu = per_cpu(policy_cpu, cpu); | ||
88 | BUG_ON(policy_cpu == -1); | ||
89 | up_read(&per_cpu(cpu_policy_rwsem, policy_cpu)); | ||
90 | } | ||
91 | EXPORT_SYMBOL_GPL(unlock_policy_rwsem_read); | ||
92 | |||
93 | void unlock_policy_rwsem_write(int cpu) | ||
94 | { | ||
95 | int policy_cpu = per_cpu(policy_cpu, cpu); | ||
96 | BUG_ON(policy_cpu == -1); | ||
97 | up_write(&per_cpu(cpu_policy_rwsem, policy_cpu)); | ||
98 | } | ||
99 | EXPORT_SYMBOL_GPL(unlock_policy_rwsem_write); | ||
100 | |||
101 | |||
44 | /* internal prototypes */ | 102 | /* internal prototypes */ |
45 | static int __cpufreq_governor(struct cpufreq_policy *policy, unsigned int event); | 103 | static int __cpufreq_governor(struct cpufreq_policy *policy, unsigned int event); |
104 | static unsigned int __cpufreq_get(unsigned int cpu); | ||
46 | static void handle_update(struct work_struct *work); | 105 | static void handle_update(struct work_struct *work); |
47 | 106 | ||
48 | /** | 107 | /** |
@@ -415,12 +474,8 @@ static ssize_t store_##file_name \ | |||
415 | if (ret != 1) \ | 474 | if (ret != 1) \ |
416 | return -EINVAL; \ | 475 | return -EINVAL; \ |
417 | \ | 476 | \ |
418 | lock_cpu_hotplug(); \ | ||
419 | mutex_lock(&policy->lock); \ | ||
420 | ret = __cpufreq_set_policy(policy, &new_policy); \ | 477 | ret = __cpufreq_set_policy(policy, &new_policy); \ |
421 | policy->user_policy.object = policy->object; \ | 478 | policy->user_policy.object = policy->object; \ |
422 | mutex_unlock(&policy->lock); \ | ||
423 | unlock_cpu_hotplug(); \ | ||
424 | \ | 479 | \ |
425 | return ret ? ret : count; \ | 480 | return ret ? ret : count; \ |
426 | } | 481 | } |
@@ -434,7 +489,7 @@ store_one(scaling_max_freq,max); | |||
434 | static ssize_t show_cpuinfo_cur_freq (struct cpufreq_policy * policy, | 489 | static ssize_t show_cpuinfo_cur_freq (struct cpufreq_policy * policy, |
435 | char *buf) | 490 | char *buf) |
436 | { | 491 | { |
437 | unsigned int cur_freq = cpufreq_get(policy->cpu); | 492 | unsigned int cur_freq = __cpufreq_get(policy->cpu); |
438 | if (!cur_freq) | 493 | if (!cur_freq) |
439 | return sprintf(buf, "<unknown>"); | 494 | return sprintf(buf, "<unknown>"); |
440 | return sprintf(buf, "%u\n", cur_freq); | 495 | return sprintf(buf, "%u\n", cur_freq); |
@@ -479,18 +534,12 @@ static ssize_t store_scaling_governor (struct cpufreq_policy * policy, | |||
479 | &new_policy.governor)) | 534 | &new_policy.governor)) |
480 | return -EINVAL; | 535 | return -EINVAL; |
481 | 536 | ||
482 | lock_cpu_hotplug(); | ||
483 | |||
484 | /* Do not use cpufreq_set_policy here or the user_policy.max | 537 | /* Do not use cpufreq_set_policy here or the user_policy.max |
485 | will be wrongly overridden */ | 538 | will be wrongly overridden */ |
486 | mutex_lock(&policy->lock); | ||
487 | ret = __cpufreq_set_policy(policy, &new_policy); | 539 | ret = __cpufreq_set_policy(policy, &new_policy); |
488 | 540 | ||
489 | policy->user_policy.policy = policy->policy; | 541 | policy->user_policy.policy = policy->policy; |
490 | policy->user_policy.governor = policy->governor; | 542 | policy->user_policy.governor = policy->governor; |
491 | mutex_unlock(&policy->lock); | ||
492 | |||
493 | unlock_cpu_hotplug(); | ||
494 | 543 | ||
495 | if (ret) | 544 | if (ret) |
496 | return ret; | 545 | return ret; |
@@ -595,11 +644,17 @@ static ssize_t show(struct kobject * kobj, struct attribute * attr ,char * buf) | |||
595 | policy = cpufreq_cpu_get(policy->cpu); | 644 | policy = cpufreq_cpu_get(policy->cpu); |
596 | if (!policy) | 645 | if (!policy) |
597 | return -EINVAL; | 646 | return -EINVAL; |
647 | |||
648 | if (lock_policy_rwsem_read(policy->cpu) < 0) | ||
649 | return -EINVAL; | ||
650 | |||
598 | if (fattr->show) | 651 | if (fattr->show) |
599 | ret = fattr->show(policy, buf); | 652 | ret = fattr->show(policy, buf); |
600 | else | 653 | else |
601 | ret = -EIO; | 654 | ret = -EIO; |
602 | 655 | ||
656 | unlock_policy_rwsem_read(policy->cpu); | ||
657 | |||
603 | cpufreq_cpu_put(policy); | 658 | cpufreq_cpu_put(policy); |
604 | return ret; | 659 | return ret; |
605 | } | 660 | } |
@@ -613,11 +668,17 @@ static ssize_t store(struct kobject * kobj, struct attribute * attr, | |||
613 | policy = cpufreq_cpu_get(policy->cpu); | 668 | policy = cpufreq_cpu_get(policy->cpu); |
614 | if (!policy) | 669 | if (!policy) |
615 | return -EINVAL; | 670 | return -EINVAL; |
671 | |||
672 | if (lock_policy_rwsem_write(policy->cpu) < 0) | ||
673 | return -EINVAL; | ||
674 | |||
616 | if (fattr->store) | 675 | if (fattr->store) |
617 | ret = fattr->store(policy, buf, count); | 676 | ret = fattr->store(policy, buf, count); |
618 | else | 677 | else |
619 | ret = -EIO; | 678 | ret = -EIO; |
620 | 679 | ||
680 | unlock_policy_rwsem_write(policy->cpu); | ||
681 | |||
621 | cpufreq_cpu_put(policy); | 682 | cpufreq_cpu_put(policy); |
622 | return ret; | 683 | return ret; |
623 | } | 684 | } |
@@ -691,8 +752,10 @@ static int cpufreq_add_dev (struct sys_device * sys_dev) | |||
691 | policy->cpu = cpu; | 752 | policy->cpu = cpu; |
692 | policy->cpus = cpumask_of_cpu(cpu); | 753 | policy->cpus = cpumask_of_cpu(cpu); |
693 | 754 | ||
694 | mutex_init(&policy->lock); | 755 | /* Initially set CPU itself as the policy_cpu */ |
695 | mutex_lock(&policy->lock); | 756 | per_cpu(policy_cpu, cpu) = cpu; |
757 | lock_policy_rwsem_write(cpu); | ||
758 | |||
696 | init_completion(&policy->kobj_unregister); | 759 | init_completion(&policy->kobj_unregister); |
697 | INIT_WORK(&policy->update, handle_update); | 760 | INIT_WORK(&policy->update, handle_update); |
698 | 761 | ||
@@ -702,7 +765,7 @@ static int cpufreq_add_dev (struct sys_device * sys_dev) | |||
702 | ret = cpufreq_driver->init(policy); | 765 | ret = cpufreq_driver->init(policy); |
703 | if (ret) { | 766 | if (ret) { |
704 | dprintk("initialization failed\n"); | 767 | dprintk("initialization failed\n"); |
705 | mutex_unlock(&policy->lock); | 768 | unlock_policy_rwsem_write(cpu); |
706 | goto err_out; | 769 | goto err_out; |
707 | } | 770 | } |
708 | 771 | ||
@@ -716,6 +779,14 @@ static int cpufreq_add_dev (struct sys_device * sys_dev) | |||
716 | */ | 779 | */ |
717 | managed_policy = cpufreq_cpu_get(j); | 780 | managed_policy = cpufreq_cpu_get(j); |
718 | if (unlikely(managed_policy)) { | 781 | if (unlikely(managed_policy)) { |
782 | |||
783 | /* Set proper policy_cpu */ | ||
784 | unlock_policy_rwsem_write(cpu); | ||
785 | per_cpu(policy_cpu, cpu) = managed_policy->cpu; | ||
786 | |||
787 | if (lock_policy_rwsem_write(cpu) < 0) | ||
788 | goto err_out_driver_exit; | ||
789 | |||
719 | spin_lock_irqsave(&cpufreq_driver_lock, flags); | 790 | spin_lock_irqsave(&cpufreq_driver_lock, flags); |
720 | managed_policy->cpus = policy->cpus; | 791 | managed_policy->cpus = policy->cpus; |
721 | cpufreq_cpu_data[cpu] = managed_policy; | 792 | cpufreq_cpu_data[cpu] = managed_policy; |
@@ -726,13 +797,13 @@ static int cpufreq_add_dev (struct sys_device * sys_dev) | |||
726 | &managed_policy->kobj, | 797 | &managed_policy->kobj, |
727 | "cpufreq"); | 798 | "cpufreq"); |
728 | if (ret) { | 799 | if (ret) { |
729 | mutex_unlock(&policy->lock); | 800 | unlock_policy_rwsem_write(cpu); |
730 | goto err_out_driver_exit; | 801 | goto err_out_driver_exit; |
731 | } | 802 | } |
732 | 803 | ||
733 | cpufreq_debug_enable_ratelimit(); | 804 | cpufreq_debug_enable_ratelimit(); |
734 | mutex_unlock(&policy->lock); | ||
735 | ret = 0; | 805 | ret = 0; |
806 | unlock_policy_rwsem_write(cpu); | ||
736 | goto err_out_driver_exit; /* call driver->exit() */ | 807 | goto err_out_driver_exit; /* call driver->exit() */ |
737 | } | 808 | } |
738 | } | 809 | } |
@@ -746,7 +817,7 @@ static int cpufreq_add_dev (struct sys_device * sys_dev) | |||
746 | 817 | ||
747 | ret = kobject_register(&policy->kobj); | 818 | ret = kobject_register(&policy->kobj); |
748 | if (ret) { | 819 | if (ret) { |
749 | mutex_unlock(&policy->lock); | 820 | unlock_policy_rwsem_write(cpu); |
750 | goto err_out_driver_exit; | 821 | goto err_out_driver_exit; |
751 | } | 822 | } |
752 | /* set up files for this cpu device */ | 823 | /* set up files for this cpu device */ |
@@ -761,8 +832,10 @@ static int cpufreq_add_dev (struct sys_device * sys_dev) | |||
761 | sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr); | 832 | sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr); |
762 | 833 | ||
763 | spin_lock_irqsave(&cpufreq_driver_lock, flags); | 834 | spin_lock_irqsave(&cpufreq_driver_lock, flags); |
764 | for_each_cpu_mask(j, policy->cpus) | 835 | for_each_cpu_mask(j, policy->cpus) { |
765 | cpufreq_cpu_data[j] = policy; | 836 | cpufreq_cpu_data[j] = policy; |
837 | per_cpu(policy_cpu, j) = policy->cpu; | ||
838 | } | ||
766 | spin_unlock_irqrestore(&cpufreq_driver_lock, flags); | 839 | spin_unlock_irqrestore(&cpufreq_driver_lock, flags); |
767 | 840 | ||
768 | /* symlink affected CPUs */ | 841 | /* symlink affected CPUs */ |
@@ -778,14 +851,14 @@ static int cpufreq_add_dev (struct sys_device * sys_dev) | |||
778 | ret = sysfs_create_link(&cpu_sys_dev->kobj, &policy->kobj, | 851 | ret = sysfs_create_link(&cpu_sys_dev->kobj, &policy->kobj, |
779 | "cpufreq"); | 852 | "cpufreq"); |
780 | if (ret) { | 853 | if (ret) { |
781 | mutex_unlock(&policy->lock); | 854 | unlock_policy_rwsem_write(cpu); |
782 | goto err_out_unregister; | 855 | goto err_out_unregister; |
783 | } | 856 | } |
784 | } | 857 | } |
785 | 858 | ||
786 | policy->governor = NULL; /* to assure that the starting sequence is | 859 | policy->governor = NULL; /* to assure that the starting sequence is |
787 | * run in cpufreq_set_policy */ | 860 | * run in cpufreq_set_policy */ |
788 | mutex_unlock(&policy->lock); | 861 | unlock_policy_rwsem_write(cpu); |
789 | 862 | ||
790 | /* set default policy */ | 863 | /* set default policy */ |
791 | ret = cpufreq_set_policy(&new_policy); | 864 | ret = cpufreq_set_policy(&new_policy); |
@@ -826,11 +899,13 @@ module_out: | |||
826 | 899 | ||
827 | 900 | ||
828 | /** | 901 | /** |
829 | * cpufreq_remove_dev - remove a CPU device | 902 | * __cpufreq_remove_dev - remove a CPU device |
830 | * | 903 | * |
831 | * Removes the cpufreq interface for a CPU device. | 904 | * Removes the cpufreq interface for a CPU device. |
905 | * Caller should already have policy_rwsem in write mode for this CPU. | ||
906 | * This routine frees the rwsem before returning. | ||
832 | */ | 907 | */ |
833 | static int cpufreq_remove_dev (struct sys_device * sys_dev) | 908 | static int __cpufreq_remove_dev (struct sys_device * sys_dev) |
834 | { | 909 | { |
835 | unsigned int cpu = sys_dev->id; | 910 | unsigned int cpu = sys_dev->id; |
836 | unsigned long flags; | 911 | unsigned long flags; |
@@ -849,6 +924,7 @@ static int cpufreq_remove_dev (struct sys_device * sys_dev) | |||
849 | if (!data) { | 924 | if (!data) { |
850 | spin_unlock_irqrestore(&cpufreq_driver_lock, flags); | 925 | spin_unlock_irqrestore(&cpufreq_driver_lock, flags); |
851 | cpufreq_debug_enable_ratelimit(); | 926 | cpufreq_debug_enable_ratelimit(); |
927 | unlock_policy_rwsem_write(cpu); | ||
852 | return -EINVAL; | 928 | return -EINVAL; |
853 | } | 929 | } |
854 | cpufreq_cpu_data[cpu] = NULL; | 930 | cpufreq_cpu_data[cpu] = NULL; |
@@ -865,6 +941,7 @@ static int cpufreq_remove_dev (struct sys_device * sys_dev) | |||
865 | sysfs_remove_link(&sys_dev->kobj, "cpufreq"); | 941 | sysfs_remove_link(&sys_dev->kobj, "cpufreq"); |
866 | cpufreq_cpu_put(data); | 942 | cpufreq_cpu_put(data); |
867 | cpufreq_debug_enable_ratelimit(); | 943 | cpufreq_debug_enable_ratelimit(); |
944 | unlock_policy_rwsem_write(cpu); | ||
868 | return 0; | 945 | return 0; |
869 | } | 946 | } |
870 | #endif | 947 | #endif |
@@ -873,6 +950,7 @@ static int cpufreq_remove_dev (struct sys_device * sys_dev) | |||
873 | if (!kobject_get(&data->kobj)) { | 950 | if (!kobject_get(&data->kobj)) { |
874 | spin_unlock_irqrestore(&cpufreq_driver_lock, flags); | 951 | spin_unlock_irqrestore(&cpufreq_driver_lock, flags); |
875 | cpufreq_debug_enable_ratelimit(); | 952 | cpufreq_debug_enable_ratelimit(); |
953 | unlock_policy_rwsem_write(cpu); | ||
876 | return -EFAULT; | 954 | return -EFAULT; |
877 | } | 955 | } |
878 | 956 | ||
@@ -906,10 +984,10 @@ static int cpufreq_remove_dev (struct sys_device * sys_dev) | |||
906 | spin_unlock_irqrestore(&cpufreq_driver_lock, flags); | 984 | spin_unlock_irqrestore(&cpufreq_driver_lock, flags); |
907 | #endif | 985 | #endif |
908 | 986 | ||
909 | mutex_lock(&data->lock); | ||
910 | if (cpufreq_driver->target) | 987 | if (cpufreq_driver->target) |
911 | __cpufreq_governor(data, CPUFREQ_GOV_STOP); | 988 | __cpufreq_governor(data, CPUFREQ_GOV_STOP); |
912 | mutex_unlock(&data->lock); | 989 | |
990 | unlock_policy_rwsem_write(cpu); | ||
913 | 991 | ||
914 | kobject_unregister(&data->kobj); | 992 | kobject_unregister(&data->kobj); |
915 | 993 | ||
@@ -933,6 +1011,18 @@ static int cpufreq_remove_dev (struct sys_device * sys_dev) | |||
933 | } | 1011 | } |
934 | 1012 | ||
935 | 1013 | ||
1014 | static int cpufreq_remove_dev (struct sys_device * sys_dev) | ||
1015 | { | ||
1016 | unsigned int cpu = sys_dev->id; | ||
1017 | int retval; | ||
1018 | if (unlikely(lock_policy_rwsem_write(cpu))) | ||
1019 | BUG(); | ||
1020 | |||
1021 | retval = __cpufreq_remove_dev(sys_dev); | ||
1022 | return retval; | ||
1023 | } | ||
1024 | |||
1025 | |||
936 | static void handle_update(struct work_struct *work) | 1026 | static void handle_update(struct work_struct *work) |
937 | { | 1027 | { |
938 | struct cpufreq_policy *policy = | 1028 | struct cpufreq_policy *policy = |
@@ -980,9 +1070,12 @@ unsigned int cpufreq_quick_get(unsigned int cpu) | |||
980 | unsigned int ret_freq = 0; | 1070 | unsigned int ret_freq = 0; |
981 | 1071 | ||
982 | if (policy) { | 1072 | if (policy) { |
983 | mutex_lock(&policy->lock); | 1073 | if (unlikely(lock_policy_rwsem_read(cpu))) |
1074 | return ret_freq; | ||
1075 | |||
984 | ret_freq = policy->cur; | 1076 | ret_freq = policy->cur; |
985 | mutex_unlock(&policy->lock); | 1077 | |
1078 | unlock_policy_rwsem_read(cpu); | ||
986 | cpufreq_cpu_put(policy); | 1079 | cpufreq_cpu_put(policy); |
987 | } | 1080 | } |
988 | 1081 | ||
@@ -991,24 +1084,13 @@ unsigned int cpufreq_quick_get(unsigned int cpu) | |||
991 | EXPORT_SYMBOL(cpufreq_quick_get); | 1084 | EXPORT_SYMBOL(cpufreq_quick_get); |
992 | 1085 | ||
993 | 1086 | ||
994 | /** | 1087 | static unsigned int __cpufreq_get(unsigned int cpu) |
995 | * cpufreq_get - get the current CPU frequency (in kHz) | ||
996 | * @cpu: CPU number | ||
997 | * | ||
998 | * Get the CPU current (static) CPU frequency | ||
999 | */ | ||
1000 | unsigned int cpufreq_get(unsigned int cpu) | ||
1001 | { | 1088 | { |
1002 | struct cpufreq_policy *policy = cpufreq_cpu_get(cpu); | 1089 | struct cpufreq_policy *policy = cpufreq_cpu_data[cpu]; |
1003 | unsigned int ret_freq = 0; | 1090 | unsigned int ret_freq = 0; |
1004 | 1091 | ||
1005 | if (!policy) | ||
1006 | return 0; | ||
1007 | |||
1008 | if (!cpufreq_driver->get) | 1092 | if (!cpufreq_driver->get) |
1009 | goto out; | 1093 | return (ret_freq); |
1010 | |||
1011 | mutex_lock(&policy->lock); | ||
1012 | 1094 | ||
1013 | ret_freq = cpufreq_driver->get(cpu); | 1095 | ret_freq = cpufreq_driver->get(cpu); |
1014 | 1096 | ||
@@ -1022,11 +1104,33 @@ unsigned int cpufreq_get(unsigned int cpu) | |||
1022 | } | 1104 | } |
1023 | } | 1105 | } |
1024 | 1106 | ||
1025 | mutex_unlock(&policy->lock); | 1107 | return (ret_freq); |
1108 | } | ||
1026 | 1109 | ||
1027 | out: | 1110 | /** |
1028 | cpufreq_cpu_put(policy); | 1111 | * cpufreq_get - get the current CPU frequency (in kHz) |
1112 | * @cpu: CPU number | ||
1113 | * | ||
1114 | * Get the CPU current (static) CPU frequency | ||
1115 | */ | ||
1116 | unsigned int cpufreq_get(unsigned int cpu) | ||
1117 | { | ||
1118 | unsigned int ret_freq = 0; | ||
1119 | struct cpufreq_policy *policy = cpufreq_cpu_get(cpu); | ||
1120 | |||
1121 | if (!policy) | ||
1122 | goto out; | ||
1123 | |||
1124 | if (unlikely(lock_policy_rwsem_read(cpu))) | ||
1125 | goto out_policy; | ||
1126 | |||
1127 | ret_freq = __cpufreq_get(cpu); | ||
1029 | 1128 | ||
1129 | unlock_policy_rwsem_read(cpu); | ||
1130 | |||
1131 | out_policy: | ||
1132 | cpufreq_cpu_put(policy); | ||
1133 | out: | ||
1030 | return (ret_freq); | 1134 | return (ret_freq); |
1031 | } | 1135 | } |
1032 | EXPORT_SYMBOL(cpufreq_get); | 1136 | EXPORT_SYMBOL(cpufreq_get); |
@@ -1278,7 +1382,6 @@ EXPORT_SYMBOL(cpufreq_unregister_notifier); | |||
1278 | *********************************************************************/ | 1382 | *********************************************************************/ |
1279 | 1383 | ||
1280 | 1384 | ||
1281 | /* Must be called with lock_cpu_hotplug held */ | ||
1282 | int __cpufreq_driver_target(struct cpufreq_policy *policy, | 1385 | int __cpufreq_driver_target(struct cpufreq_policy *policy, |
1283 | unsigned int target_freq, | 1386 | unsigned int target_freq, |
1284 | unsigned int relation) | 1387 | unsigned int relation) |
@@ -1304,20 +1407,19 @@ int cpufreq_driver_target(struct cpufreq_policy *policy, | |||
1304 | if (!policy) | 1407 | if (!policy) |
1305 | return -EINVAL; | 1408 | return -EINVAL; |
1306 | 1409 | ||
1307 | lock_cpu_hotplug(); | 1410 | if (unlikely(lock_policy_rwsem_write(policy->cpu))) |
1308 | mutex_lock(&policy->lock); | 1411 | return -EINVAL; |
1309 | 1412 | ||
1310 | ret = __cpufreq_driver_target(policy, target_freq, relation); | 1413 | ret = __cpufreq_driver_target(policy, target_freq, relation); |
1311 | 1414 | ||
1312 | mutex_unlock(&policy->lock); | 1415 | unlock_policy_rwsem_write(policy->cpu); |
1313 | unlock_cpu_hotplug(); | ||
1314 | 1416 | ||
1315 | cpufreq_cpu_put(policy); | 1417 | cpufreq_cpu_put(policy); |
1316 | return ret; | 1418 | return ret; |
1317 | } | 1419 | } |
1318 | EXPORT_SYMBOL_GPL(cpufreq_driver_target); | 1420 | EXPORT_SYMBOL_GPL(cpufreq_driver_target); |
1319 | 1421 | ||
1320 | int cpufreq_driver_getavg(struct cpufreq_policy *policy) | 1422 | int __cpufreq_driver_getavg(struct cpufreq_policy *policy) |
1321 | { | 1423 | { |
1322 | int ret = 0; | 1424 | int ret = 0; |
1323 | 1425 | ||
@@ -1325,20 +1427,15 @@ int cpufreq_driver_getavg(struct cpufreq_policy *policy) | |||
1325 | if (!policy) | 1427 | if (!policy) |
1326 | return -EINVAL; | 1428 | return -EINVAL; |
1327 | 1429 | ||
1328 | mutex_lock(&policy->lock); | ||
1329 | |||
1330 | if (cpu_online(policy->cpu) && cpufreq_driver->getavg) | 1430 | if (cpu_online(policy->cpu) && cpufreq_driver->getavg) |
1331 | ret = cpufreq_driver->getavg(policy->cpu); | 1431 | ret = cpufreq_driver->getavg(policy->cpu); |
1332 | 1432 | ||
1333 | mutex_unlock(&policy->lock); | ||
1334 | |||
1335 | cpufreq_cpu_put(policy); | 1433 | cpufreq_cpu_put(policy); |
1336 | return ret; | 1434 | return ret; |
1337 | } | 1435 | } |
1338 | EXPORT_SYMBOL_GPL(cpufreq_driver_getavg); | 1436 | EXPORT_SYMBOL_GPL(__cpufreq_driver_getavg); |
1339 | 1437 | ||
1340 | /* | 1438 | /* |
1341 | * Locking: Must be called with the lock_cpu_hotplug() lock held | ||
1342 | * when "event" is CPUFREQ_GOV_LIMITS | 1439 | * when "event" is CPUFREQ_GOV_LIMITS |
1343 | */ | 1440 | */ |
1344 | 1441 | ||
@@ -1420,9 +1517,7 @@ int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu) | |||
1420 | if (!cpu_policy) | 1517 | if (!cpu_policy) |
1421 | return -EINVAL; | 1518 | return -EINVAL; |
1422 | 1519 | ||
1423 | mutex_lock(&cpu_policy->lock); | ||
1424 | memcpy(policy, cpu_policy, sizeof(struct cpufreq_policy)); | 1520 | memcpy(policy, cpu_policy, sizeof(struct cpufreq_policy)); |
1425 | mutex_unlock(&cpu_policy->lock); | ||
1426 | 1521 | ||
1427 | cpufreq_cpu_put(cpu_policy); | 1522 | cpufreq_cpu_put(cpu_policy); |
1428 | return 0; | 1523 | return 0; |
@@ -1433,7 +1528,6 @@ EXPORT_SYMBOL(cpufreq_get_policy); | |||
1433 | /* | 1528 | /* |
1434 | * data : current policy. | 1529 | * data : current policy. |
1435 | * policy : policy to be set. | 1530 | * policy : policy to be set. |
1436 | * Locking: Must be called with the lock_cpu_hotplug() lock held | ||
1437 | */ | 1531 | */ |
1438 | static int __cpufreq_set_policy(struct cpufreq_policy *data, | 1532 | static int __cpufreq_set_policy(struct cpufreq_policy *data, |
1439 | struct cpufreq_policy *policy) | 1533 | struct cpufreq_policy *policy) |
@@ -1539,10 +1633,9 @@ int cpufreq_set_policy(struct cpufreq_policy *policy) | |||
1539 | if (!data) | 1633 | if (!data) |
1540 | return -EINVAL; | 1634 | return -EINVAL; |
1541 | 1635 | ||
1542 | lock_cpu_hotplug(); | 1636 | if (unlikely(lock_policy_rwsem_write(policy->cpu))) |
1637 | return -EINVAL; | ||
1543 | 1638 | ||
1544 | /* lock this CPU */ | ||
1545 | mutex_lock(&data->lock); | ||
1546 | 1639 | ||
1547 | ret = __cpufreq_set_policy(data, policy); | 1640 | ret = __cpufreq_set_policy(data, policy); |
1548 | data->user_policy.min = data->min; | 1641 | data->user_policy.min = data->min; |
@@ -1550,9 +1643,8 @@ int cpufreq_set_policy(struct cpufreq_policy *policy) | |||
1550 | data->user_policy.policy = data->policy; | 1643 | data->user_policy.policy = data->policy; |
1551 | data->user_policy.governor = data->governor; | 1644 | data->user_policy.governor = data->governor; |
1552 | 1645 | ||
1553 | mutex_unlock(&data->lock); | 1646 | unlock_policy_rwsem_write(policy->cpu); |
1554 | 1647 | ||
1555 | unlock_cpu_hotplug(); | ||
1556 | cpufreq_cpu_put(data); | 1648 | cpufreq_cpu_put(data); |
1557 | 1649 | ||
1558 | return ret; | 1650 | return ret; |
@@ -1576,8 +1668,8 @@ int cpufreq_update_policy(unsigned int cpu) | |||
1576 | if (!data) | 1668 | if (!data) |
1577 | return -ENODEV; | 1669 | return -ENODEV; |
1578 | 1670 | ||
1579 | lock_cpu_hotplug(); | 1671 | if (unlikely(lock_policy_rwsem_write(cpu))) |
1580 | mutex_lock(&data->lock); | 1672 | return -EINVAL; |
1581 | 1673 | ||
1582 | dprintk("updating policy for CPU %u\n", cpu); | 1674 | dprintk("updating policy for CPU %u\n", cpu); |
1583 | memcpy(&policy, data, sizeof(struct cpufreq_policy)); | 1675 | memcpy(&policy, data, sizeof(struct cpufreq_policy)); |
@@ -1602,8 +1694,8 @@ int cpufreq_update_policy(unsigned int cpu) | |||
1602 | 1694 | ||
1603 | ret = __cpufreq_set_policy(data, &policy); | 1695 | ret = __cpufreq_set_policy(data, &policy); |
1604 | 1696 | ||
1605 | mutex_unlock(&data->lock); | 1697 | unlock_policy_rwsem_write(cpu); |
1606 | unlock_cpu_hotplug(); | 1698 | |
1607 | cpufreq_cpu_put(data); | 1699 | cpufreq_cpu_put(data); |
1608 | return ret; | 1700 | return ret; |
1609 | } | 1701 | } |
@@ -1613,31 +1705,28 @@ static int cpufreq_cpu_callback(struct notifier_block *nfb, | |||
1613 | unsigned long action, void *hcpu) | 1705 | unsigned long action, void *hcpu) |
1614 | { | 1706 | { |
1615 | unsigned int cpu = (unsigned long)hcpu; | 1707 | unsigned int cpu = (unsigned long)hcpu; |
1616 | struct cpufreq_policy *policy; | ||
1617 | struct sys_device *sys_dev; | 1708 | struct sys_device *sys_dev; |
1709 | struct cpufreq_policy *policy; | ||
1618 | 1710 | ||
1619 | sys_dev = get_cpu_sysdev(cpu); | 1711 | sys_dev = get_cpu_sysdev(cpu); |
1620 | |||
1621 | if (sys_dev) { | 1712 | if (sys_dev) { |
1622 | switch (action) { | 1713 | switch (action) { |
1623 | case CPU_ONLINE: | 1714 | case CPU_ONLINE: |
1624 | cpufreq_add_dev(sys_dev); | 1715 | cpufreq_add_dev(sys_dev); |
1625 | break; | 1716 | break; |
1626 | case CPU_DOWN_PREPARE: | 1717 | case CPU_DOWN_PREPARE: |
1627 | /* | 1718 | if (unlikely(lock_policy_rwsem_write(cpu))) |
1628 | * We attempt to put this cpu in lowest frequency | 1719 | BUG(); |
1629 | * possible before going down. This will permit | 1720 | |
1630 | * hardware-managed P-State to switch other related | ||
1631 | * threads to min or higher speeds if possible. | ||
1632 | */ | ||
1633 | policy = cpufreq_cpu_data[cpu]; | 1721 | policy = cpufreq_cpu_data[cpu]; |
1634 | if (policy) { | 1722 | if (policy) { |
1635 | cpufreq_driver_target(policy, policy->min, | 1723 | __cpufreq_driver_target(policy, policy->min, |
1636 | CPUFREQ_RELATION_H); | 1724 | CPUFREQ_RELATION_H); |
1637 | } | 1725 | } |
1726 | __cpufreq_remove_dev(sys_dev); | ||
1638 | break; | 1727 | break; |
1639 | case CPU_DEAD: | 1728 | case CPU_DOWN_FAILED: |
1640 | cpufreq_remove_dev(sys_dev); | 1729 | cpufreq_add_dev(sys_dev); |
1641 | break; | 1730 | break; |
1642 | } | 1731 | } |
1643 | } | 1732 | } |
@@ -1751,3 +1840,16 @@ int cpufreq_unregister_driver(struct cpufreq_driver *driver) | |||
1751 | return 0; | 1840 | return 0; |
1752 | } | 1841 | } |
1753 | EXPORT_SYMBOL_GPL(cpufreq_unregister_driver); | 1842 | EXPORT_SYMBOL_GPL(cpufreq_unregister_driver); |
1843 | |||
1844 | static int __init cpufreq_core_init(void) | ||
1845 | { | ||
1846 | int cpu; | ||
1847 | |||
1848 | for_each_possible_cpu(cpu) { | ||
1849 | per_cpu(policy_cpu, cpu) = -1; | ||
1850 | init_rwsem(&per_cpu(cpu_policy_rwsem, cpu)); | ||
1851 | } | ||
1852 | return 0; | ||
1853 | } | ||
1854 | |||
1855 | core_initcall(cpufreq_core_init); | ||