aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/cpufreq
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2009-09-18 12:16:57 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2009-09-18 12:16:57 -0400
commit714af0693863dfb6f075f4465053976d2d076a21 (patch)
tree4da5efd5b229611cdee6a503dbae090adff3edf0 /drivers/cpufreq
parenta03fdb7612874834d6847107198712d18b5242c7 (diff)
parentf0adb134d8dc9993a9998dc50845ec4f6ff4fadc (diff)
Merge branch 'next' of git://git.kernel.org/pub/scm/linux/kernel/git/davej/cpufreq
* 'next' of git://git.kernel.org/pub/scm/linux/kernel/git/davej/cpufreq: [CPUFREQ] Fix NULL ptr regression in powernow-k8 [CPUFREQ] Create a blacklist for processors that should not load the acpi-cpufreq module. [CPUFREQ] Powernow-k8: Enable more than 2 low P-states [CPUFREQ] remove rwsem lock from CPUFREQ_GOV_STOP call (second call site) [CPUFREQ] ondemand - Use global sysfs dir for tuning settings [CPUFREQ] Introduce global, not per core: /sys/devices/system/cpu/cpufreq [CPUFREQ] Bail out of cpufreq_add_dev if the link for a managed CPU got created [CPUFREQ] Factor out policy setting from cpufreq_add_dev [CPUFREQ] Factor out interface creation from cpufreq_add_dev [CPUFREQ] Factor out symlink creation from cpufreq_add_dev [CPUFREQ] cleanup up -ENOMEM handling in cpufreq_add_dev [CPUFREQ] Reduce scope of cpu_sys_dev in cpufreq_add_dev [CPUFREQ] update Doc for cpuinfo_cur_freq and scaling_cur_freq
Diffstat (limited to 'drivers/cpufreq')
-rw-r--r--drivers/cpufreq/cpufreq.c305
-rw-r--r--drivers/cpufreq/cpufreq_ondemand.c139
2 files changed, 300 insertions, 144 deletions
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 2968ed6a9c4..3938c781709 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -61,6 +61,8 @@ static DEFINE_SPINLOCK(cpufreq_driver_lock);
61 * are concerned with are online after they get the lock. 61 * are concerned with are online after they get the lock.
62 * - Governor routines that can be called in cpufreq hotplug path should not 62 * - Governor routines that can be called in cpufreq hotplug path should not
63 * take this sem as top level hotplug notifier handler takes this. 63 * take this sem as top level hotplug notifier handler takes this.
64 * - Lock should not be held across
65 * __cpufreq_governor(data, CPUFREQ_GOV_STOP);
64 */ 66 */
65static DEFINE_PER_CPU(int, policy_cpu); 67static DEFINE_PER_CPU(int, policy_cpu);
66static DEFINE_PER_CPU(struct rw_semaphore, cpu_policy_rwsem); 68static DEFINE_PER_CPU(struct rw_semaphore, cpu_policy_rwsem);
@@ -686,6 +688,9 @@ static struct attribute *default_attrs[] = {
686 NULL 688 NULL
687}; 689};
688 690
691struct kobject *cpufreq_global_kobject;
692EXPORT_SYMBOL(cpufreq_global_kobject);
693
689#define to_policy(k) container_of(k, struct cpufreq_policy, kobj) 694#define to_policy(k) container_of(k, struct cpufreq_policy, kobj)
690#define to_attr(a) container_of(a, struct freq_attr, attr) 695#define to_attr(a) container_of(a, struct freq_attr, attr)
691 696
@@ -756,92 +761,20 @@ static struct kobj_type ktype_cpufreq = {
756 .release = cpufreq_sysfs_release, 761 .release = cpufreq_sysfs_release,
757}; 762};
758 763
759 764/*
760/** 765 * Returns:
761 * cpufreq_add_dev - add a CPU device 766 * Negative: Failure
762 * 767 * 0: Success
763 * Adds the cpufreq interface for a CPU device. 768 * Positive: When we have a managed CPU and the sysfs got symlinked
764 *
765 * The Oracle says: try running cpufreq registration/unregistration concurrently
766 * with with cpu hotplugging and all hell will break loose. Tried to clean this
767 * mess up, but more thorough testing is needed. - Mathieu
768 */ 769 */
769static int cpufreq_add_dev(struct sys_device *sys_dev) 770int cpufreq_add_dev_policy(unsigned int cpu, struct cpufreq_policy *policy,
771 struct sys_device *sys_dev)
770{ 772{
771 unsigned int cpu = sys_dev->id;
772 int ret = 0; 773 int ret = 0;
773 struct cpufreq_policy new_policy; 774#ifdef CONFIG_SMP
774 struct cpufreq_policy *policy;
775 struct freq_attr **drv_attr;
776 struct sys_device *cpu_sys_dev;
777 unsigned long flags; 775 unsigned long flags;
778 unsigned int j; 776 unsigned int j;
779 777
780 if (cpu_is_offline(cpu))
781 return 0;
782
783 cpufreq_debug_disable_ratelimit();
784 dprintk("adding CPU %u\n", cpu);
785
786#ifdef CONFIG_SMP
787 /* check whether a different CPU already registered this
788 * CPU because it is in the same boat. */
789 policy = cpufreq_cpu_get(cpu);
790 if (unlikely(policy)) {
791 cpufreq_cpu_put(policy);
792 cpufreq_debug_enable_ratelimit();
793 return 0;
794 }
795#endif
796
797 if (!try_module_get(cpufreq_driver->owner)) {
798 ret = -EINVAL;
799 goto module_out;
800 }
801
802 policy = kzalloc(sizeof(struct cpufreq_policy), GFP_KERNEL);
803 if (!policy) {
804 ret = -ENOMEM;
805 goto nomem_out;
806 }
807 if (!alloc_cpumask_var(&policy->cpus, GFP_KERNEL)) {
808 ret = -ENOMEM;
809 goto err_free_policy;
810 }
811 if (!zalloc_cpumask_var(&policy->related_cpus, GFP_KERNEL)) {
812 ret = -ENOMEM;
813 goto err_free_cpumask;
814 }
815
816 policy->cpu = cpu;
817 cpumask_copy(policy->cpus, cpumask_of(cpu));
818
819 /* Initially set CPU itself as the policy_cpu */
820 per_cpu(policy_cpu, cpu) = cpu;
821 ret = (lock_policy_rwsem_write(cpu) < 0);
822 WARN_ON(ret);
823
824 init_completion(&policy->kobj_unregister);
825 INIT_WORK(&policy->update, handle_update);
826
827 /* Set governor before ->init, so that driver could check it */
828 policy->governor = CPUFREQ_DEFAULT_GOVERNOR;
829 /* call driver. From then on the cpufreq must be able
830 * to accept all calls to ->verify and ->setpolicy for this CPU
831 */
832 ret = cpufreq_driver->init(policy);
833 if (ret) {
834 dprintk("initialization failed\n");
835 goto err_unlock_policy;
836 }
837 policy->user_policy.min = policy->min;
838 policy->user_policy.max = policy->max;
839
840 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
841 CPUFREQ_START, policy);
842
843#ifdef CONFIG_SMP
844
845#ifdef CONFIG_HOTPLUG_CPU 778#ifdef CONFIG_HOTPLUG_CPU
846 if (per_cpu(cpufreq_cpu_governor, cpu)) { 779 if (per_cpu(cpufreq_cpu_governor, cpu)) {
847 policy->governor = per_cpu(cpufreq_cpu_governor, cpu); 780 policy->governor = per_cpu(cpufreq_cpu_governor, cpu);
@@ -872,9 +805,8 @@ static int cpufreq_add_dev(struct sys_device *sys_dev)
872 /* Should not go through policy unlock path */ 805 /* Should not go through policy unlock path */
873 if (cpufreq_driver->exit) 806 if (cpufreq_driver->exit)
874 cpufreq_driver->exit(policy); 807 cpufreq_driver->exit(policy);
875 ret = -EBUSY;
876 cpufreq_cpu_put(managed_policy); 808 cpufreq_cpu_put(managed_policy);
877 goto err_free_cpumask; 809 return -EBUSY;
878 } 810 }
879 811
880 spin_lock_irqsave(&cpufreq_driver_lock, flags); 812 spin_lock_irqsave(&cpufreq_driver_lock, flags);
@@ -893,17 +825,62 @@ static int cpufreq_add_dev(struct sys_device *sys_dev)
893 * Call driver->exit() because only the cpu parent of 825 * Call driver->exit() because only the cpu parent of
894 * the kobj needed to call init(). 826 * the kobj needed to call init().
895 */ 827 */
896 goto out_driver_exit; /* call driver->exit() */ 828 if (cpufreq_driver->exit)
829 cpufreq_driver->exit(policy);
830
831 if (!ret)
832 return 1;
833 else
834 return ret;
897 } 835 }
898 } 836 }
899#endif 837#endif
900 memcpy(&new_policy, policy, sizeof(struct cpufreq_policy)); 838 return ret;
839}
840
841
842/* symlink affected CPUs */
843int cpufreq_add_dev_symlink(unsigned int cpu, struct cpufreq_policy *policy)
844{
845 unsigned int j;
846 int ret = 0;
847
848 for_each_cpu(j, policy->cpus) {
849 struct cpufreq_policy *managed_policy;
850 struct sys_device *cpu_sys_dev;
851
852 if (j == cpu)
853 continue;
854 if (!cpu_online(j))
855 continue;
856
857 dprintk("CPU %u already managed, adding link\n", j);
858 managed_policy = cpufreq_cpu_get(cpu);
859 cpu_sys_dev = get_cpu_sysdev(j);
860 ret = sysfs_create_link(&cpu_sys_dev->kobj, &policy->kobj,
861 "cpufreq");
862 if (ret) {
863 cpufreq_cpu_put(managed_policy);
864 return ret;
865 }
866 }
867 return ret;
868}
869
870int cpufreq_add_dev_interface(unsigned int cpu, struct cpufreq_policy *policy,
871 struct sys_device *sys_dev)
872{
873 struct cpufreq_policy new_policy;
874 struct freq_attr **drv_attr;
875 unsigned long flags;
876 int ret = 0;
877 unsigned int j;
901 878
902 /* prepare interface data */ 879 /* prepare interface data */
903 ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq, &sys_dev->kobj, 880 ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq,
904 "cpufreq"); 881 &sys_dev->kobj, "cpufreq");
905 if (ret) 882 if (ret)
906 goto out_driver_exit; 883 return ret;
907 884
908 /* set up files for this cpu device */ 885 /* set up files for this cpu device */
909 drv_attr = cpufreq_driver->attr; 886 drv_attr = cpufreq_driver->attr;
@@ -926,35 +903,20 @@ static int cpufreq_add_dev(struct sys_device *sys_dev)
926 903
927 spin_lock_irqsave(&cpufreq_driver_lock, flags); 904 spin_lock_irqsave(&cpufreq_driver_lock, flags);
928 for_each_cpu(j, policy->cpus) { 905 for_each_cpu(j, policy->cpus) {
929 if (!cpu_online(j)) 906 if (!cpu_online(j))
930 continue; 907 continue;
931 per_cpu(cpufreq_cpu_data, j) = policy; 908 per_cpu(cpufreq_cpu_data, j) = policy;
932 per_cpu(policy_cpu, j) = policy->cpu; 909 per_cpu(policy_cpu, j) = policy->cpu;
933 } 910 }
934 spin_unlock_irqrestore(&cpufreq_driver_lock, flags); 911 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
935 912
936 /* symlink affected CPUs */ 913 ret = cpufreq_add_dev_symlink(cpu, policy);
937 for_each_cpu(j, policy->cpus) { 914 if (ret)
938 struct cpufreq_policy *managed_policy; 915 goto err_out_kobj_put;
939
940 if (j == cpu)
941 continue;
942 if (!cpu_online(j))
943 continue;
944
945 dprintk("CPU %u already managed, adding link\n", j);
946 managed_policy = cpufreq_cpu_get(cpu);
947 cpu_sys_dev = get_cpu_sysdev(j);
948 ret = sysfs_create_link(&cpu_sys_dev->kobj, &policy->kobj,
949 "cpufreq");
950 if (ret) {
951 cpufreq_cpu_put(managed_policy);
952 goto err_out_unregister;
953 }
954 }
955 916
956 policy->governor = NULL; /* to assure that the starting sequence is 917 memcpy(&new_policy, policy, sizeof(struct cpufreq_policy));
957 * run in cpufreq_set_policy */ 918 /* assure that the starting sequence is run in __cpufreq_set_policy */
919 policy->governor = NULL;
958 920
959 /* set default policy */ 921 /* set default policy */
960 ret = __cpufreq_set_policy(policy, &new_policy); 922 ret = __cpufreq_set_policy(policy, &new_policy);
@@ -963,8 +925,107 @@ static int cpufreq_add_dev(struct sys_device *sys_dev)
963 925
964 if (ret) { 926 if (ret) {
965 dprintk("setting policy failed\n"); 927 dprintk("setting policy failed\n");
966 goto err_out_unregister; 928 if (cpufreq_driver->exit)
929 cpufreq_driver->exit(policy);
930 }
931 return ret;
932
933err_out_kobj_put:
934 kobject_put(&policy->kobj);
935 wait_for_completion(&policy->kobj_unregister);
936 return ret;
937}
938
939
940/**
941 * cpufreq_add_dev - add a CPU device
942 *
943 * Adds the cpufreq interface for a CPU device.
944 *
945 * The Oracle says: try running cpufreq registration/unregistration concurrently
946 * with with cpu hotplugging and all hell will break loose. Tried to clean this
947 * mess up, but more thorough testing is needed. - Mathieu
948 */
949static int cpufreq_add_dev(struct sys_device *sys_dev)
950{
951 unsigned int cpu = sys_dev->id;
952 int ret = 0;
953 struct cpufreq_policy *policy;
954 unsigned long flags;
955 unsigned int j;
956
957 if (cpu_is_offline(cpu))
958 return 0;
959
960 cpufreq_debug_disable_ratelimit();
961 dprintk("adding CPU %u\n", cpu);
962
963#ifdef CONFIG_SMP
964 /* check whether a different CPU already registered this
965 * CPU because it is in the same boat. */
966 policy = cpufreq_cpu_get(cpu);
967 if (unlikely(policy)) {
968 cpufreq_cpu_put(policy);
969 cpufreq_debug_enable_ratelimit();
970 return 0;
971 }
972#endif
973
974 if (!try_module_get(cpufreq_driver->owner)) {
975 ret = -EINVAL;
976 goto module_out;
977 }
978
979 ret = -ENOMEM;
980 policy = kzalloc(sizeof(struct cpufreq_policy), GFP_KERNEL);
981 if (!policy)
982 goto nomem_out;
983
984 if (!alloc_cpumask_var(&policy->cpus, GFP_KERNEL))
985 goto err_free_policy;
986
987 if (!zalloc_cpumask_var(&policy->related_cpus, GFP_KERNEL))
988 goto err_free_cpumask;
989
990 policy->cpu = cpu;
991 cpumask_copy(policy->cpus, cpumask_of(cpu));
992
993 /* Initially set CPU itself as the policy_cpu */
994 per_cpu(policy_cpu, cpu) = cpu;
995 ret = (lock_policy_rwsem_write(cpu) < 0);
996 WARN_ON(ret);
997
998 init_completion(&policy->kobj_unregister);
999 INIT_WORK(&policy->update, handle_update);
1000
1001 /* Set governor before ->init, so that driver could check it */
1002 policy->governor = CPUFREQ_DEFAULT_GOVERNOR;
1003 /* call driver. From then on the cpufreq must be able
1004 * to accept all calls to ->verify and ->setpolicy for this CPU
1005 */
1006 ret = cpufreq_driver->init(policy);
1007 if (ret) {
1008 dprintk("initialization failed\n");
1009 goto err_unlock_policy;
967 } 1010 }
1011 policy->user_policy.min = policy->min;
1012 policy->user_policy.max = policy->max;
1013
1014 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1015 CPUFREQ_START, policy);
1016
1017 ret = cpufreq_add_dev_policy(cpu, policy, sys_dev);
1018 if (ret) {
1019 if (ret > 0)
1020 /* This is a managed cpu, symlink created,
1021 exit with 0 */
1022 ret = 0;
1023 goto err_unlock_policy;
1024 }
1025
1026 ret = cpufreq_add_dev_interface(cpu, policy, sys_dev);
1027 if (ret)
1028 goto err_out_unregister;
968 1029
969 unlock_policy_rwsem_write(cpu); 1030 unlock_policy_rwsem_write(cpu);
970 1031
@@ -982,14 +1043,9 @@ err_out_unregister:
982 per_cpu(cpufreq_cpu_data, j) = NULL; 1043 per_cpu(cpufreq_cpu_data, j) = NULL;
983 spin_unlock_irqrestore(&cpufreq_driver_lock, flags); 1044 spin_unlock_irqrestore(&cpufreq_driver_lock, flags);
984 1045
985err_out_kobj_put:
986 kobject_put(&policy->kobj); 1046 kobject_put(&policy->kobj);
987 wait_for_completion(&policy->kobj_unregister); 1047 wait_for_completion(&policy->kobj_unregister);
988 1048
989out_driver_exit:
990 if (cpufreq_driver->exit)
991 cpufreq_driver->exit(policy);
992
993err_unlock_policy: 1049err_unlock_policy:
994 unlock_policy_rwsem_write(cpu); 1050 unlock_policy_rwsem_write(cpu);
995err_free_cpumask: 1051err_free_cpumask:
@@ -1653,8 +1709,17 @@ static int __cpufreq_set_policy(struct cpufreq_policy *data,
1653 dprintk("governor switch\n"); 1709 dprintk("governor switch\n");
1654 1710
1655 /* end old governor */ 1711 /* end old governor */
1656 if (data->governor) 1712 if (data->governor) {
1713 /*
1714 * Need to release the rwsem around governor
1715 * stop due to lock dependency between
1716 * cancel_delayed_work_sync and the read lock
1717 * taken in the delayed work handler.
1718 */
1719 unlock_policy_rwsem_write(data->cpu);
1657 __cpufreq_governor(data, CPUFREQ_GOV_STOP); 1720 __cpufreq_governor(data, CPUFREQ_GOV_STOP);
1721 lock_policy_rwsem_write(data->cpu);
1722 }
1658 1723
1659 /* start new governor */ 1724 /* start new governor */
1660 data->governor = policy->governor; 1725 data->governor = policy->governor;
@@ -1884,7 +1949,11 @@ static int __init cpufreq_core_init(void)
1884 per_cpu(policy_cpu, cpu) = -1; 1949 per_cpu(policy_cpu, cpu) = -1;
1885 init_rwsem(&per_cpu(cpu_policy_rwsem, cpu)); 1950 init_rwsem(&per_cpu(cpu_policy_rwsem, cpu));
1886 } 1951 }
1952
1953 cpufreq_global_kobject = kobject_create_and_add("cpufreq",
1954 &cpu_sysdev_class.kset.kobj);
1955 BUG_ON(!cpufreq_global_kobject);
1956
1887 return 0; 1957 return 0;
1888} 1958}
1889
1890core_initcall(cpufreq_core_init); 1959core_initcall(cpufreq_core_init);
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
index d7a528c80de..071699de50e 100644
--- a/drivers/cpufreq/cpufreq_ondemand.c
+++ b/drivers/cpufreq/cpufreq_ondemand.c
@@ -55,6 +55,18 @@ static unsigned int min_sampling_rate;
55#define TRANSITION_LATENCY_LIMIT (10 * 1000 * 1000) 55#define TRANSITION_LATENCY_LIMIT (10 * 1000 * 1000)
56 56
57static void do_dbs_timer(struct work_struct *work); 57static void do_dbs_timer(struct work_struct *work);
58static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
59 unsigned int event);
60
61#ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND
62static
63#endif
64struct cpufreq_governor cpufreq_gov_ondemand = {
65 .name = "ondemand",
66 .governor = cpufreq_governor_dbs,
67 .max_transition_latency = TRANSITION_LATENCY_LIMIT,
68 .owner = THIS_MODULE,
69};
58 70
59/* Sampling types */ 71/* Sampling types */
60enum {DBS_NORMAL_SAMPLE, DBS_SUB_SAMPLE}; 72enum {DBS_NORMAL_SAMPLE, DBS_SUB_SAMPLE};
@@ -207,20 +219,23 @@ static void ondemand_powersave_bias_init(void)
207} 219}
208 220
209/************************** sysfs interface ************************/ 221/************************** sysfs interface ************************/
210static ssize_t show_sampling_rate_max(struct cpufreq_policy *policy, char *buf) 222
223static ssize_t show_sampling_rate_max(struct kobject *kobj,
224 struct attribute *attr, char *buf)
211{ 225{
212 printk_once(KERN_INFO "CPUFREQ: ondemand sampling_rate_max " 226 printk_once(KERN_INFO "CPUFREQ: ondemand sampling_rate_max "
213 "sysfs file is deprecated - used by: %s\n", current->comm); 227 "sysfs file is deprecated - used by: %s\n", current->comm);
214 return sprintf(buf, "%u\n", -1U); 228 return sprintf(buf, "%u\n", -1U);
215} 229}
216 230
217static ssize_t show_sampling_rate_min(struct cpufreq_policy *policy, char *buf) 231static ssize_t show_sampling_rate_min(struct kobject *kobj,
232 struct attribute *attr, char *buf)
218{ 233{
219 return sprintf(buf, "%u\n", min_sampling_rate); 234 return sprintf(buf, "%u\n", min_sampling_rate);
220} 235}
221 236
222#define define_one_ro(_name) \ 237#define define_one_ro(_name) \
223static struct freq_attr _name = \ 238static struct global_attr _name = \
224__ATTR(_name, 0444, show_##_name, NULL) 239__ATTR(_name, 0444, show_##_name, NULL)
225 240
226define_one_ro(sampling_rate_max); 241define_one_ro(sampling_rate_max);
@@ -229,7 +244,7 @@ define_one_ro(sampling_rate_min);
229/* cpufreq_ondemand Governor Tunables */ 244/* cpufreq_ondemand Governor Tunables */
230#define show_one(file_name, object) \ 245#define show_one(file_name, object) \
231static ssize_t show_##file_name \ 246static ssize_t show_##file_name \
232(struct cpufreq_policy *unused, char *buf) \ 247(struct kobject *kobj, struct attribute *attr, char *buf) \
233{ \ 248{ \
234 return sprintf(buf, "%u\n", dbs_tuners_ins.object); \ 249 return sprintf(buf, "%u\n", dbs_tuners_ins.object); \
235} 250}
@@ -238,8 +253,38 @@ show_one(up_threshold, up_threshold);
238show_one(ignore_nice_load, ignore_nice); 253show_one(ignore_nice_load, ignore_nice);
239show_one(powersave_bias, powersave_bias); 254show_one(powersave_bias, powersave_bias);
240 255
241static ssize_t store_sampling_rate(struct cpufreq_policy *unused, 256/*** delete after deprecation time ***/
242 const char *buf, size_t count) 257
258#define DEPRECATION_MSG(file_name) \
259 printk_once(KERN_INFO "CPUFREQ: Per core ondemand sysfs " \
260 "interface is deprecated - " #file_name "\n");
261
262#define show_one_old(file_name) \
263static ssize_t show_##file_name##_old \
264(struct cpufreq_policy *unused, char *buf) \
265{ \
266 printk_once(KERN_INFO "CPUFREQ: Per core ondemand sysfs " \
267 "interface is deprecated - " #file_name "\n"); \
268 return show_##file_name(NULL, NULL, buf); \
269}
270show_one_old(sampling_rate);
271show_one_old(up_threshold);
272show_one_old(ignore_nice_load);
273show_one_old(powersave_bias);
274show_one_old(sampling_rate_min);
275show_one_old(sampling_rate_max);
276
277#define define_one_ro_old(object, _name) \
278static struct freq_attr object = \
279__ATTR(_name, 0444, show_##_name##_old, NULL)
280
281define_one_ro_old(sampling_rate_min_old, sampling_rate_min);
282define_one_ro_old(sampling_rate_max_old, sampling_rate_max);
283
284/*** delete after deprecation time ***/
285
286static ssize_t store_sampling_rate(struct kobject *a, struct attribute *b,
287 const char *buf, size_t count)
243{ 288{
244 unsigned int input; 289 unsigned int input;
245 int ret; 290 int ret;
@@ -254,8 +299,8 @@ static ssize_t store_sampling_rate(struct cpufreq_policy *unused,
254 return count; 299 return count;
255} 300}
256 301
257static ssize_t store_up_threshold(struct cpufreq_policy *unused, 302static ssize_t store_up_threshold(struct kobject *a, struct attribute *b,
258 const char *buf, size_t count) 303 const char *buf, size_t count)
259{ 304{
260 unsigned int input; 305 unsigned int input;
261 int ret; 306 int ret;
@@ -273,8 +318,8 @@ static ssize_t store_up_threshold(struct cpufreq_policy *unused,
273 return count; 318 return count;
274} 319}
275 320
276static ssize_t store_ignore_nice_load(struct cpufreq_policy *policy, 321static ssize_t store_ignore_nice_load(struct kobject *a, struct attribute *b,
277 const char *buf, size_t count) 322 const char *buf, size_t count)
278{ 323{
279 unsigned int input; 324 unsigned int input;
280 int ret; 325 int ret;
@@ -310,8 +355,8 @@ static ssize_t store_ignore_nice_load(struct cpufreq_policy *policy,
310 return count; 355 return count;
311} 356}
312 357
313static ssize_t store_powersave_bias(struct cpufreq_policy *unused, 358static ssize_t store_powersave_bias(struct kobject *a, struct attribute *b,
314 const char *buf, size_t count) 359 const char *buf, size_t count)
315{ 360{
316 unsigned int input; 361 unsigned int input;
317 int ret; 362 int ret;
@@ -332,7 +377,7 @@ static ssize_t store_powersave_bias(struct cpufreq_policy *unused,
332} 377}
333 378
334#define define_one_rw(_name) \ 379#define define_one_rw(_name) \
335static struct freq_attr _name = \ 380static struct global_attr _name = \
336__ATTR(_name, 0644, show_##_name, store_##_name) 381__ATTR(_name, 0644, show_##_name, store_##_name)
337 382
338define_one_rw(sampling_rate); 383define_one_rw(sampling_rate);
@@ -355,6 +400,47 @@ static struct attribute_group dbs_attr_group = {
355 .name = "ondemand", 400 .name = "ondemand",
356}; 401};
357 402
403/*** delete after deprecation time ***/
404
405#define write_one_old(file_name) \
406static ssize_t store_##file_name##_old \
407(struct cpufreq_policy *unused, const char *buf, size_t count) \
408{ \
409 printk_once(KERN_INFO "CPUFREQ: Per core ondemand sysfs " \
410 "interface is deprecated - " #file_name "\n"); \
411 return store_##file_name(NULL, NULL, buf, count); \
412}
413write_one_old(sampling_rate);
414write_one_old(up_threshold);
415write_one_old(ignore_nice_load);
416write_one_old(powersave_bias);
417
418#define define_one_rw_old(object, _name) \
419static struct freq_attr object = \
420__ATTR(_name, 0644, show_##_name##_old, store_##_name##_old)
421
422define_one_rw_old(sampling_rate_old, sampling_rate);
423define_one_rw_old(up_threshold_old, up_threshold);
424define_one_rw_old(ignore_nice_load_old, ignore_nice_load);
425define_one_rw_old(powersave_bias_old, powersave_bias);
426
427static struct attribute *dbs_attributes_old[] = {
428 &sampling_rate_max_old.attr,
429 &sampling_rate_min_old.attr,
430 &sampling_rate_old.attr,
431 &up_threshold_old.attr,
432 &ignore_nice_load_old.attr,
433 &powersave_bias_old.attr,
434 NULL
435};
436
437static struct attribute_group dbs_attr_group_old = {
438 .attrs = dbs_attributes_old,
439 .name = "ondemand",
440};
441
442/*** delete after deprecation time ***/
443
358/************************** sysfs end ************************/ 444/************************** sysfs end ************************/
359 445
360static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info) 446static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
@@ -545,7 +631,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
545 631
546 mutex_lock(&dbs_mutex); 632 mutex_lock(&dbs_mutex);
547 633
548 rc = sysfs_create_group(&policy->kobj, &dbs_attr_group); 634 rc = sysfs_create_group(&policy->kobj, &dbs_attr_group_old);
549 if (rc) { 635 if (rc) {
550 mutex_unlock(&dbs_mutex); 636 mutex_unlock(&dbs_mutex);
551 return rc; 637 return rc;
@@ -566,13 +652,20 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
566 } 652 }
567 this_dbs_info->cpu = cpu; 653 this_dbs_info->cpu = cpu;
568 ondemand_powersave_bias_init_cpu(cpu); 654 ondemand_powersave_bias_init_cpu(cpu);
569 mutex_init(&this_dbs_info->timer_mutex);
570 /* 655 /*
571 * Start the timerschedule work, when this governor 656 * Start the timerschedule work, when this governor
572 * is used for first time 657 * is used for first time
573 */ 658 */
574 if (dbs_enable == 1) { 659 if (dbs_enable == 1) {
575 unsigned int latency; 660 unsigned int latency;
661
662 rc = sysfs_create_group(cpufreq_global_kobject,
663 &dbs_attr_group);
664 if (rc) {
665 mutex_unlock(&dbs_mutex);
666 return rc;
667 }
668
576 /* policy latency is in nS. Convert it to uS first */ 669 /* policy latency is in nS. Convert it to uS first */
577 latency = policy->cpuinfo.transition_latency / 1000; 670 latency = policy->cpuinfo.transition_latency / 1000;
578 if (latency == 0) 671 if (latency == 0)
@@ -586,6 +679,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
586 } 679 }
587 mutex_unlock(&dbs_mutex); 680 mutex_unlock(&dbs_mutex);
588 681
682 mutex_init(&this_dbs_info->timer_mutex);
589 dbs_timer_init(this_dbs_info); 683 dbs_timer_init(this_dbs_info);
590 break; 684 break;
591 685
@@ -593,10 +687,13 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
593 dbs_timer_exit(this_dbs_info); 687 dbs_timer_exit(this_dbs_info);
594 688
595 mutex_lock(&dbs_mutex); 689 mutex_lock(&dbs_mutex);
596 sysfs_remove_group(&policy->kobj, &dbs_attr_group); 690 sysfs_remove_group(&policy->kobj, &dbs_attr_group_old);
597 mutex_destroy(&this_dbs_info->timer_mutex); 691 mutex_destroy(&this_dbs_info->timer_mutex);
598 dbs_enable--; 692 dbs_enable--;
599 mutex_unlock(&dbs_mutex); 693 mutex_unlock(&dbs_mutex);
694 if (!dbs_enable)
695 sysfs_remove_group(cpufreq_global_kobject,
696 &dbs_attr_group);
600 697
601 break; 698 break;
602 699
@@ -614,16 +711,6 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
614 return 0; 711 return 0;
615} 712}
616 713
617#ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND
618static
619#endif
620struct cpufreq_governor cpufreq_gov_ondemand = {
621 .name = "ondemand",
622 .governor = cpufreq_governor_dbs,
623 .max_transition_latency = TRANSITION_LATENCY_LIMIT,
624 .owner = THIS_MODULE,
625};
626
627static int __init cpufreq_gov_dbs_init(void) 714static int __init cpufreq_gov_dbs_init(void)
628{ 715{
629 int err; 716 int err;