diff options
Diffstat (limited to 'drivers/cpufreq/cpufreq.c')
-rw-r--r-- | drivers/cpufreq/cpufreq.c | 166 |
1 files changed, 90 insertions, 76 deletions
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index 0ee008da46f2..845687884c1e 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c | |||
@@ -756,6 +756,75 @@ static struct kobj_type ktype_cpufreq = { | |||
756 | .release = cpufreq_sysfs_release, | 756 | .release = cpufreq_sysfs_release, |
757 | }; | 757 | }; |
758 | 758 | ||
759 | |||
760 | int cpufreq_add_dev_policy(unsigned int cpu, struct cpufreq_policy *policy, | ||
761 | struct sys_device *sys_dev) | ||
762 | { | ||
763 | int ret = 0; | ||
764 | #ifdef CONFIG_SMP | ||
765 | unsigned long flags; | ||
766 | unsigned int j; | ||
767 | |||
768 | #ifdef CONFIG_HOTPLUG_CPU | ||
769 | if (per_cpu(cpufreq_cpu_governor, cpu)) { | ||
770 | policy->governor = per_cpu(cpufreq_cpu_governor, cpu); | ||
771 | dprintk("Restoring governor %s for cpu %d\n", | ||
772 | policy->governor->name, cpu); | ||
773 | } | ||
774 | #endif | ||
775 | |||
776 | for_each_cpu(j, policy->cpus) { | ||
777 | struct cpufreq_policy *managed_policy; | ||
778 | |||
779 | if (cpu == j) | ||
780 | continue; | ||
781 | |||
782 | /* Check for existing affected CPUs. | ||
783 | * They may not be aware of it due to CPU Hotplug. | ||
784 | * cpufreq_cpu_put is called when the device is removed | ||
785 | * in __cpufreq_remove_dev() | ||
786 | */ | ||
787 | managed_policy = cpufreq_cpu_get(j); | ||
788 | if (unlikely(managed_policy)) { | ||
789 | |||
790 | /* Set proper policy_cpu */ | ||
791 | unlock_policy_rwsem_write(cpu); | ||
792 | per_cpu(policy_cpu, cpu) = managed_policy->cpu; | ||
793 | |||
794 | if (lock_policy_rwsem_write(cpu) < 0) { | ||
795 | /* Should not go through policy unlock path */ | ||
796 | if (cpufreq_driver->exit) | ||
797 | cpufreq_driver->exit(policy); | ||
798 | cpufreq_cpu_put(managed_policy); | ||
799 | return -EBUSY; | ||
800 | } | ||
801 | |||
802 | spin_lock_irqsave(&cpufreq_driver_lock, flags); | ||
803 | cpumask_copy(managed_policy->cpus, policy->cpus); | ||
804 | per_cpu(cpufreq_cpu_data, cpu) = managed_policy; | ||
805 | spin_unlock_irqrestore(&cpufreq_driver_lock, flags); | ||
806 | |||
807 | dprintk("CPU already managed, adding link\n"); | ||
808 | ret = sysfs_create_link(&sys_dev->kobj, | ||
809 | &managed_policy->kobj, | ||
810 | "cpufreq"); | ||
811 | if (ret) | ||
812 | cpufreq_cpu_put(managed_policy); | ||
813 | /* | ||
814 | * Success. We only needed to be added to the mask. | ||
815 | * Call driver->exit() because only the cpu parent of | ||
816 | * the kobj needed to call init(). | ||
817 | */ | ||
818 | if (cpufreq_driver->exit) | ||
819 | cpufreq_driver->exit(policy); | ||
820 | return ret; | ||
821 | } | ||
822 | } | ||
823 | #endif | ||
824 | return ret; | ||
825 | } | ||
826 | |||
827 | |||
759 | /* symlink affected CPUs */ | 828 | /* symlink affected CPUs */ |
760 | int cpufreq_add_dev_symlink(unsigned int cpu, struct cpufreq_policy *policy) | 829 | int cpufreq_add_dev_symlink(unsigned int cpu, struct cpufreq_policy *policy) |
761 | { | 830 | { |
@@ -787,6 +856,7 @@ int cpufreq_add_dev_symlink(unsigned int cpu, struct cpufreq_policy *policy) | |||
787 | int cpufreq_add_dev_interface(unsigned int cpu, struct cpufreq_policy *policy, | 856 | int cpufreq_add_dev_interface(unsigned int cpu, struct cpufreq_policy *policy, |
788 | struct sys_device *sys_dev) | 857 | struct sys_device *sys_dev) |
789 | { | 858 | { |
859 | struct cpufreq_policy new_policy; | ||
790 | struct freq_attr **drv_attr; | 860 | struct freq_attr **drv_attr; |
791 | unsigned long flags; | 861 | unsigned long flags; |
792 | int ret = 0; | 862 | int ret = 0; |
@@ -827,6 +897,23 @@ int cpufreq_add_dev_interface(unsigned int cpu, struct cpufreq_policy *policy, | |||
827 | spin_unlock_irqrestore(&cpufreq_driver_lock, flags); | 897 | spin_unlock_irqrestore(&cpufreq_driver_lock, flags); |
828 | 898 | ||
829 | ret = cpufreq_add_dev_symlink(cpu, policy); | 899 | ret = cpufreq_add_dev_symlink(cpu, policy); |
900 | if (ret) | ||
901 | goto err_out_kobj_put; | ||
902 | |||
903 | memcpy(&new_policy, policy, sizeof(struct cpufreq_policy)); | ||
904 | /* assure that the starting sequence is run in __cpufreq_set_policy */ | ||
905 | policy->governor = NULL; | ||
906 | |||
907 | /* set default policy */ | ||
908 | ret = __cpufreq_set_policy(policy, &new_policy); | ||
909 | policy->user_policy.policy = policy->policy; | ||
910 | policy->user_policy.governor = policy->governor; | ||
911 | |||
912 | if (ret) { | ||
913 | dprintk("setting policy failed\n"); | ||
914 | if (cpufreq_driver->exit) | ||
915 | cpufreq_driver->exit(policy); | ||
916 | } | ||
830 | return ret; | 917 | return ret; |
831 | 918 | ||
832 | err_out_kobj_put: | 919 | err_out_kobj_put: |
@@ -849,7 +936,6 @@ static int cpufreq_add_dev(struct sys_device *sys_dev) | |||
849 | { | 936 | { |
850 | unsigned int cpu = sys_dev->id; | 937 | unsigned int cpu = sys_dev->id; |
851 | int ret = 0; | 938 | int ret = 0; |
852 | struct cpufreq_policy new_policy; | ||
853 | struct cpufreq_policy *policy; | 939 | struct cpufreq_policy *policy; |
854 | unsigned long flags; | 940 | unsigned long flags; |
855 | unsigned int j; | 941 | unsigned int j; |
@@ -914,82 +1000,14 @@ static int cpufreq_add_dev(struct sys_device *sys_dev) | |||
914 | blocking_notifier_call_chain(&cpufreq_policy_notifier_list, | 1000 | blocking_notifier_call_chain(&cpufreq_policy_notifier_list, |
915 | CPUFREQ_START, policy); | 1001 | CPUFREQ_START, policy); |
916 | 1002 | ||
917 | #ifdef CONFIG_SMP | 1003 | ret = cpufreq_add_dev_policy(cpu, policy, sys_dev); |
918 | 1004 | if (ret) | |
919 | #ifdef CONFIG_HOTPLUG_CPU | 1005 | goto err_unlock_policy; |
920 | if (per_cpu(cpufreq_cpu_governor, cpu)) { | ||
921 | policy->governor = per_cpu(cpufreq_cpu_governor, cpu); | ||
922 | dprintk("Restoring governor %s for cpu %d\n", | ||
923 | policy->governor->name, cpu); | ||
924 | } | ||
925 | #endif | ||
926 | |||
927 | for_each_cpu(j, policy->cpus) { | ||
928 | struct cpufreq_policy *managed_policy; | ||
929 | |||
930 | if (cpu == j) | ||
931 | continue; | ||
932 | |||
933 | /* Check for existing affected CPUs. | ||
934 | * They may not be aware of it due to CPU Hotplug. | ||
935 | * cpufreq_cpu_put is called when the device is removed | ||
936 | * in __cpufreq_remove_dev() | ||
937 | */ | ||
938 | managed_policy = cpufreq_cpu_get(j); | ||
939 | if (unlikely(managed_policy)) { | ||
940 | |||
941 | /* Set proper policy_cpu */ | ||
942 | unlock_policy_rwsem_write(cpu); | ||
943 | per_cpu(policy_cpu, cpu) = managed_policy->cpu; | ||
944 | |||
945 | if (lock_policy_rwsem_write(cpu) < 0) { | ||
946 | /* Should not go through policy unlock path */ | ||
947 | if (cpufreq_driver->exit) | ||
948 | cpufreq_driver->exit(policy); | ||
949 | ret = -EBUSY; | ||
950 | cpufreq_cpu_put(managed_policy); | ||
951 | goto err_free_cpumask; | ||
952 | } | ||
953 | |||
954 | spin_lock_irqsave(&cpufreq_driver_lock, flags); | ||
955 | cpumask_copy(managed_policy->cpus, policy->cpus); | ||
956 | per_cpu(cpufreq_cpu_data, cpu) = managed_policy; | ||
957 | spin_unlock_irqrestore(&cpufreq_driver_lock, flags); | ||
958 | |||
959 | dprintk("CPU already managed, adding link\n"); | ||
960 | ret = sysfs_create_link(&sys_dev->kobj, | ||
961 | &managed_policy->kobj, | ||
962 | "cpufreq"); | ||
963 | if (ret) | ||
964 | cpufreq_cpu_put(managed_policy); | ||
965 | /* | ||
966 | * Success. We only needed to be added to the mask. | ||
967 | * Call driver->exit() because only the cpu parent of | ||
968 | * the kobj needed to call init(). | ||
969 | */ | ||
970 | goto out_driver_exit; /* call driver->exit() */ | ||
971 | } | ||
972 | } | ||
973 | #endif | ||
974 | memcpy(&new_policy, policy, sizeof(struct cpufreq_policy)); | ||
975 | 1006 | ||
976 | ret = cpufreq_add_dev_interface(cpu, policy, sys_dev); | 1007 | ret = cpufreq_add_dev_interface(cpu, policy, sys_dev); |
977 | if (ret) | 1008 | if (ret) |
978 | goto err_out_unregister; | 1009 | goto err_out_unregister; |
979 | 1010 | ||
980 | policy->governor = NULL; /* to assure that the starting sequence is | ||
981 | * run in cpufreq_set_policy */ | ||
982 | |||
983 | /* set default policy */ | ||
984 | ret = __cpufreq_set_policy(policy, &new_policy); | ||
985 | policy->user_policy.policy = policy->policy; | ||
986 | policy->user_policy.governor = policy->governor; | ||
987 | |||
988 | if (ret) { | ||
989 | dprintk("setting policy failed\n"); | ||
990 | goto err_out_unregister; | ||
991 | } | ||
992 | |||
993 | unlock_policy_rwsem_write(cpu); | 1011 | unlock_policy_rwsem_write(cpu); |
994 | 1012 | ||
995 | kobject_uevent(&policy->kobj, KOBJ_ADD); | 1013 | kobject_uevent(&policy->kobj, KOBJ_ADD); |
@@ -1009,10 +1027,6 @@ err_out_unregister: | |||
1009 | kobject_put(&policy->kobj); | 1027 | kobject_put(&policy->kobj); |
1010 | wait_for_completion(&policy->kobj_unregister); | 1028 | wait_for_completion(&policy->kobj_unregister); |
1011 | 1029 | ||
1012 | out_driver_exit: | ||
1013 | if (cpufreq_driver->exit) | ||
1014 | cpufreq_driver->exit(policy); | ||
1015 | |||
1016 | err_unlock_policy: | 1030 | err_unlock_policy: |
1017 | unlock_policy_rwsem_write(cpu); | 1031 | unlock_policy_rwsem_write(cpu); |
1018 | err_free_cpumask: | 1032 | err_free_cpumask: |