aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorViresh Kumar <viresh.kumar@linaro.org>2013-01-29 09:39:08 -0500
committerRafael J. Wysocki <rafael.j.wysocki@intel.com>2013-02-01 18:01:16 -0500
commitfcf8058296edbc3de43adf095824fc32b067b9f8 (patch)
tree7f290019ecffbe50af0f4012eeadf4f08e162bb3
parentb26f72042e433642787e51fb3f40dbdd9969f6e1 (diff)
cpufreq: Simplify cpufreq_add_dev()
Currently cpufreq_add_dev() firsts allocates policy, calls driver->init() and then checks if this CPU is already managed or not. And if it is already managed, its policy is freed. We can save all this if we somehow know that CPU is managed or not in advance. policy->related_cpus contains the list of all valid sibling CPUs of policy->cpu. We can check this to see if the current CPU is already managed. From now on, platforms don't really need to set related_cpus from their init() routines, as the same work is done by core too. If a platform driver needs to set the related_cpus mask with some additional CPUs, other than CPUs present in policy->cpus, they are free to do it, though, as we don't override anything. [rjw: Changelog] Signed-off-by: Viresh Kumar <viresh.kumar@linaro.org> Tested-by: Shawn Guo <shawn.guo@linaro.org> Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
-rw-r--r--drivers/cpufreq/cpufreq.c158
-rw-r--r--drivers/cpufreq/spear-cpufreq.c1
2 files changed, 58 insertions, 101 deletions
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index d474421d219b..1cea7a1eac13 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -552,8 +552,6 @@ static ssize_t show_cpus(const struct cpumask *mask, char *buf)
552 */ 552 */
553static ssize_t show_related_cpus(struct cpufreq_policy *policy, char *buf) 553static ssize_t show_related_cpus(struct cpufreq_policy *policy, char *buf)
554{ 554{
555 if (cpumask_empty(policy->related_cpus))
556 return show_cpus(policy->cpus, buf);
557 return show_cpus(policy->related_cpus, buf); 555 return show_cpus(policy->related_cpus, buf);
558} 556}
559 557
@@ -709,92 +707,6 @@ static struct kobj_type ktype_cpufreq = {
709 .release = cpufreq_sysfs_release, 707 .release = cpufreq_sysfs_release,
710}; 708};
711 709
712/*
713 * Returns:
714 * Negative: Failure
715 * 0: Success
716 * Positive: When we have a managed CPU and the sysfs got symlinked
717 */
718static int cpufreq_add_dev_policy(unsigned int cpu,
719 struct cpufreq_policy *policy,
720 struct device *dev)
721{
722 int ret = 0;
723#ifdef CONFIG_SMP
724 unsigned long flags;
725 unsigned int j;
726#ifdef CONFIG_HOTPLUG_CPU
727 struct cpufreq_governor *gov;
728
729 gov = __find_governor(per_cpu(cpufreq_cpu_governor, cpu));
730 if (gov) {
731 policy->governor = gov;
732 pr_debug("Restoring governor %s for cpu %d\n",
733 policy->governor->name, cpu);
734 }
735#endif
736
737 for_each_cpu(j, policy->cpus) {
738 struct cpufreq_policy *managed_policy;
739
740 if (cpu == j)
741 continue;
742
743 /* Check for existing affected CPUs.
744 * They may not be aware of it due to CPU Hotplug.
745 * cpufreq_cpu_put is called when the device is removed
746 * in __cpufreq_remove_dev()
747 */
748 managed_policy = cpufreq_cpu_get(j);
749 if (unlikely(managed_policy)) {
750
751 /* Set proper policy_cpu */
752 unlock_policy_rwsem_write(cpu);
753 per_cpu(cpufreq_policy_cpu, cpu) = managed_policy->cpu;
754
755 if (lock_policy_rwsem_write(cpu) < 0) {
756 /* Should not go through policy unlock path */
757 if (cpufreq_driver->exit)
758 cpufreq_driver->exit(policy);
759 cpufreq_cpu_put(managed_policy);
760 return -EBUSY;
761 }
762
763 __cpufreq_governor(managed_policy, CPUFREQ_GOV_STOP);
764
765 spin_lock_irqsave(&cpufreq_driver_lock, flags);
766 cpumask_copy(managed_policy->cpus, policy->cpus);
767 per_cpu(cpufreq_cpu_data, cpu) = managed_policy;
768 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
769
770 __cpufreq_governor(managed_policy, CPUFREQ_GOV_START);
771 __cpufreq_governor(managed_policy, CPUFREQ_GOV_LIMITS);
772
773 pr_debug("CPU already managed, adding link\n");
774 ret = sysfs_create_link(&dev->kobj,
775 &managed_policy->kobj,
776 "cpufreq");
777 if (ret)
778 cpufreq_cpu_put(managed_policy);
779 /*
780 * Success. We only needed to be added to the mask.
781 * Call driver->exit() because only the cpu parent of
782 * the kobj needed to call init().
783 */
784 if (cpufreq_driver->exit)
785 cpufreq_driver->exit(policy);
786
787 if (!ret)
788 return 1;
789 else
790 return ret;
791 }
792 }
793#endif
794 return ret;
795}
796
797
798/* symlink affected CPUs */ 710/* symlink affected CPUs */
799static int cpufreq_add_dev_symlink(unsigned int cpu, 711static int cpufreq_add_dev_symlink(unsigned int cpu,
800 struct cpufreq_policy *policy) 712 struct cpufreq_policy *policy)
@@ -899,6 +811,42 @@ err_out_kobj_put:
899 return ret; 811 return ret;
900} 812}
901 813
814#ifdef CONFIG_HOTPLUG_CPU
815static int cpufreq_add_policy_cpu(unsigned int cpu, unsigned int sibling,
816 struct device *dev)
817{
818 struct cpufreq_policy *policy;
819 int ret = 0;
820 unsigned long flags;
821
822 policy = cpufreq_cpu_get(sibling);
823 WARN_ON(!policy);
824
825 per_cpu(cpufreq_policy_cpu, cpu) = policy->cpu;
826
827 lock_policy_rwsem_write(cpu);
828
829 __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
830
831 spin_lock_irqsave(&cpufreq_driver_lock, flags);
832 cpumask_set_cpu(cpu, policy->cpus);
833 per_cpu(cpufreq_cpu_data, cpu) = policy;
834 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
835
836 __cpufreq_governor(policy, CPUFREQ_GOV_START);
837 __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS);
838
839 unlock_policy_rwsem_write(cpu);
840
841 ret = sysfs_create_link(&dev->kobj, &policy->kobj, "cpufreq");
842 if (ret) {
843 cpufreq_cpu_put(policy);
844 return ret;
845 }
846
847 return 0;
848}
849#endif
902 850
903/** 851/**
904 * cpufreq_add_dev - add a CPU device 852 * cpufreq_add_dev - add a CPU device
@@ -911,12 +859,12 @@ err_out_kobj_put:
911 */ 859 */
912static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif) 860static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
913{ 861{
914 unsigned int cpu = dev->id; 862 unsigned int j, cpu = dev->id;
915 int ret = 0, found = 0; 863 int ret = -ENOMEM, found = 0;
916 struct cpufreq_policy *policy; 864 struct cpufreq_policy *policy;
917 unsigned long flags; 865 unsigned long flags;
918 unsigned int j;
919#ifdef CONFIG_HOTPLUG_CPU 866#ifdef CONFIG_HOTPLUG_CPU
867 struct cpufreq_governor *gov;
920 int sibling; 868 int sibling;
921#endif 869#endif
922 870
@@ -933,6 +881,15 @@ static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
933 cpufreq_cpu_put(policy); 881 cpufreq_cpu_put(policy);
934 return 0; 882 return 0;
935 } 883 }
884
885#ifdef CONFIG_HOTPLUG_CPU
886 /* Check if this cpu was hot-unplugged earlier and has siblings */
887 for_each_online_cpu(sibling) {
888 struct cpufreq_policy *cp = per_cpu(cpufreq_cpu_data, sibling);
889 if (cp && cpumask_test_cpu(cpu, cp->related_cpus))
890 return cpufreq_add_policy_cpu(cpu, sibling, dev);
891 }
892#endif
936#endif 893#endif
937 894
938 if (!try_module_get(cpufreq_driver->owner)) { 895 if (!try_module_get(cpufreq_driver->owner)) {
@@ -940,7 +897,6 @@ static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
940 goto module_out; 897 goto module_out;
941 } 898 }
942 899
943 ret = -ENOMEM;
944 policy = kzalloc(sizeof(struct cpufreq_policy), GFP_KERNEL); 900 policy = kzalloc(sizeof(struct cpufreq_policy), GFP_KERNEL);
945 if (!policy) 901 if (!policy)
946 goto nomem_out; 902 goto nomem_out;
@@ -985,6 +941,9 @@ static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
985 goto err_unlock_policy; 941 goto err_unlock_policy;
986 } 942 }
987 943
944 /* related cpus should atleast have policy->cpus */
945 cpumask_or(policy->related_cpus, policy->related_cpus, policy->cpus);
946
988 /* 947 /*
989 * affected cpus must always be the one, which are online. We aren't 948 * affected cpus must always be the one, which are online. We aren't
990 * managing offline cpus here. 949 * managing offline cpus here.
@@ -997,14 +956,14 @@ static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
997 blocking_notifier_call_chain(&cpufreq_policy_notifier_list, 956 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
998 CPUFREQ_START, policy); 957 CPUFREQ_START, policy);
999 958
1000 ret = cpufreq_add_dev_policy(cpu, policy, dev); 959#ifdef CONFIG_HOTPLUG_CPU
1001 if (ret) { 960 gov = __find_governor(per_cpu(cpufreq_cpu_governor, cpu));
1002 if (ret > 0) 961 if (gov) {
1003 /* This is a managed cpu, symlink created, 962 policy->governor = gov;
1004 exit with 0 */ 963 pr_debug("Restoring governor %s for cpu %d\n",
1005 ret = 0; 964 policy->governor->name, cpu);
1006 goto err_unlock_policy;
1007 } 965 }
966#endif
1008 967
1009 ret = cpufreq_add_dev_interface(cpu, policy, dev); 968 ret = cpufreq_add_dev_interface(cpu, policy, dev);
1010 if (ret) 969 if (ret)
@@ -1018,7 +977,6 @@ static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
1018 977
1019 return 0; 978 return 0;
1020 979
1021
1022err_out_unregister: 980err_out_unregister:
1023 spin_lock_irqsave(&cpufreq_driver_lock, flags); 981 spin_lock_irqsave(&cpufreq_driver_lock, flags);
1024 for_each_cpu(j, policy->cpus) 982 for_each_cpu(j, policy->cpus)
diff --git a/drivers/cpufreq/spear-cpufreq.c b/drivers/cpufreq/spear-cpufreq.c
index 8ff26af622ea..fc714a65fa15 100644
--- a/drivers/cpufreq/spear-cpufreq.c
+++ b/drivers/cpufreq/spear-cpufreq.c
@@ -189,7 +189,6 @@ static int spear_cpufreq_init(struct cpufreq_policy *policy)
189 policy->cur = spear_cpufreq_get(0); 189 policy->cur = spear_cpufreq_get(0);
190 190
191 cpumask_copy(policy->cpus, topology_core_cpumask(policy->cpu)); 191 cpumask_copy(policy->cpus, topology_core_cpumask(policy->cpu));
192 cpumask_copy(policy->related_cpus, policy->cpus);
193 192
194 return 0; 193 return 0;
195} 194}