diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2009-09-18 12:16:57 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2009-09-18 12:16:57 -0400 |
commit | 714af0693863dfb6f075f4465053976d2d076a21 (patch) | |
tree | 4da5efd5b229611cdee6a503dbae090adff3edf0 | |
parent | a03fdb7612874834d6847107198712d18b5242c7 (diff) | |
parent | f0adb134d8dc9993a9998dc50845ec4f6ff4fadc (diff) |
Merge branch 'next' of git://git.kernel.org/pub/scm/linux/kernel/git/davej/cpufreq
* 'next' of git://git.kernel.org/pub/scm/linux/kernel/git/davej/cpufreq:
[CPUFREQ] Fix NULL ptr regression in powernow-k8
[CPUFREQ] Create a blacklist for processors that should not load the acpi-cpufreq module.
[CPUFREQ] Powernow-k8: Enable more than 2 low P-states
[CPUFREQ] remove rwsem lock from CPUFREQ_GOV_STOP call (second call site)
[CPUFREQ] ondemand - Use global sysfs dir for tuning settings
[CPUFREQ] Introduce global, not per core: /sys/devices/system/cpu/cpufreq
[CPUFREQ] Bail out of cpufreq_add_dev if the link for a managed CPU got created
[CPUFREQ] Factor out policy setting from cpufreq_add_dev
[CPUFREQ] Factor out interface creation from cpufreq_add_dev
[CPUFREQ] Factor out symlink creation from cpufreq_add_dev
[CPUFREQ] cleanup up -ENOMEM handling in cpufreq_add_dev
[CPUFREQ] Reduce scope of cpu_sys_dev in cpufreq_add_dev
[CPUFREQ] update Doc for cpuinfo_cur_freq and scaling_cur_freq
-rw-r--r-- | Documentation/cpu-freq/user-guide.txt | 9 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c | 21 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/cpufreq/powernow-k8.c | 44 | ||||
-rw-r--r-- | drivers/cpufreq/cpufreq.c | 305 | ||||
-rw-r--r-- | drivers/cpufreq/cpufreq_ondemand.c | 139 | ||||
-rw-r--r-- | include/linux/cpufreq.h | 10 |
6 files changed, 350 insertions, 178 deletions
diff --git a/Documentation/cpu-freq/user-guide.txt b/Documentation/cpu-freq/user-guide.txt index 5d5f5fadd1c2..2a5b850847c0 100644 --- a/Documentation/cpu-freq/user-guide.txt +++ b/Documentation/cpu-freq/user-guide.txt | |||
@@ -176,7 +176,9 @@ scaling_governor, and by "echoing" the name of another | |||
176 | work on some specific architectures or | 176 | work on some specific architectures or |
177 | processors. | 177 | processors. |
178 | 178 | ||
179 | cpuinfo_cur_freq : Current speed of the CPU, in KHz. | 179 | cpuinfo_cur_freq : Current frequency of the CPU as obtained from |
180 | the hardware, in KHz. This is the frequency | ||
181 | the CPU actually runs at. | ||
180 | 182 | ||
181 | scaling_available_frequencies : List of available frequencies, in KHz. | 183 | scaling_available_frequencies : List of available frequencies, in KHz. |
182 | 184 | ||
@@ -196,7 +198,10 @@ related_cpus : List of CPUs that need some sort of frequency | |||
196 | 198 | ||
197 | scaling_driver : Hardware driver for cpufreq. | 199 | scaling_driver : Hardware driver for cpufreq. |
198 | 200 | ||
199 | scaling_cur_freq : Current frequency of the CPU, in KHz. | 201 | scaling_cur_freq : Current frequency of the CPU as determined by |
202 | the governor and cpufreq core, in KHz. This is | ||
203 | the frequency the kernel thinks the CPU runs | ||
204 | at. | ||
200 | 205 | ||
201 | If you have selected the "userspace" governor which allows you to | 206 | If you have selected the "userspace" governor which allows you to |
202 | set the CPU operating frequency to a specific value, you can read out | 207 | set the CPU operating frequency to a specific value, you can read out |
diff --git a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c index 4109679863c1..7bb676c533aa 100644 --- a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c +++ b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c | |||
@@ -526,6 +526,21 @@ static const struct dmi_system_id sw_any_bug_dmi_table[] = { | |||
526 | }, | 526 | }, |
527 | { } | 527 | { } |
528 | }; | 528 | }; |
529 | |||
530 | static int acpi_cpufreq_blacklist(struct cpuinfo_x86 *c) | ||
531 | { | ||
532 | /* http://www.intel.com/Assets/PDF/specupdate/314554.pdf | ||
533 | * AL30: A Machine Check Exception (MCE) Occurring during an | ||
534 | * Enhanced Intel SpeedStep Technology Ratio Change May Cause | ||
535 | * Both Processor Cores to Lock Up when HT is enabled*/ | ||
536 | if (c->x86_vendor == X86_VENDOR_INTEL) { | ||
537 | if ((c->x86 == 15) && | ||
538 | (c->x86_model == 6) && | ||
539 | (c->x86_mask == 8) && smt_capable()) | ||
540 | return -ENODEV; | ||
541 | } | ||
542 | return 0; | ||
543 | } | ||
529 | #endif | 544 | #endif |
530 | 545 | ||
531 | static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy) | 546 | static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy) |
@@ -540,6 +555,12 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy) | |||
540 | 555 | ||
541 | dprintk("acpi_cpufreq_cpu_init\n"); | 556 | dprintk("acpi_cpufreq_cpu_init\n"); |
542 | 557 | ||
558 | #ifdef CONFIG_SMP | ||
559 | result = acpi_cpufreq_blacklist(c); | ||
560 | if (result) | ||
561 | return result; | ||
562 | #endif | ||
563 | |||
543 | data = kzalloc(sizeof(struct acpi_cpufreq_data), GFP_KERNEL); | 564 | data = kzalloc(sizeof(struct acpi_cpufreq_data), GFP_KERNEL); |
544 | if (!data) | 565 | if (!data) |
545 | return -ENOMEM; | 566 | return -ENOMEM; |
diff --git a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c index 2a50ef891000..6394aa5c7985 100644 --- a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c +++ b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c | |||
@@ -605,9 +605,10 @@ static int check_pst_table(struct powernow_k8_data *data, struct pst_s *pst, | |||
605 | return 0; | 605 | return 0; |
606 | } | 606 | } |
607 | 607 | ||
608 | static void invalidate_entry(struct powernow_k8_data *data, unsigned int entry) | 608 | static void invalidate_entry(struct cpufreq_frequency_table *powernow_table, |
609 | unsigned int entry) | ||
609 | { | 610 | { |
610 | data->powernow_table[entry].frequency = CPUFREQ_ENTRY_INVALID; | 611 | powernow_table[entry].frequency = CPUFREQ_ENTRY_INVALID; |
611 | } | 612 | } |
612 | 613 | ||
613 | static void print_basics(struct powernow_k8_data *data) | 614 | static void print_basics(struct powernow_k8_data *data) |
@@ -854,6 +855,10 @@ static int powernow_k8_cpu_init_acpi(struct powernow_k8_data *data) | |||
854 | goto err_out; | 855 | goto err_out; |
855 | } | 856 | } |
856 | 857 | ||
858 | /* fill in data */ | ||
859 | data->numps = data->acpi_data.state_count; | ||
860 | powernow_k8_acpi_pst_values(data, 0); | ||
861 | |||
857 | if (cpu_family == CPU_HW_PSTATE) | 862 | if (cpu_family == CPU_HW_PSTATE) |
858 | ret_val = fill_powernow_table_pstate(data, powernow_table); | 863 | ret_val = fill_powernow_table_pstate(data, powernow_table); |
859 | else | 864 | else |
@@ -866,11 +871,8 @@ static int powernow_k8_cpu_init_acpi(struct powernow_k8_data *data) | |||
866 | powernow_table[data->acpi_data.state_count].index = 0; | 871 | powernow_table[data->acpi_data.state_count].index = 0; |
867 | data->powernow_table = powernow_table; | 872 | data->powernow_table = powernow_table; |
868 | 873 | ||
869 | /* fill in data */ | ||
870 | data->numps = data->acpi_data.state_count; | ||
871 | if (cpumask_first(cpu_core_mask(data->cpu)) == data->cpu) | 874 | if (cpumask_first(cpu_core_mask(data->cpu)) == data->cpu) |
872 | print_basics(data); | 875 | print_basics(data); |
873 | powernow_k8_acpi_pst_values(data, 0); | ||
874 | 876 | ||
875 | /* notify BIOS that we exist */ | 877 | /* notify BIOS that we exist */ |
876 | acpi_processor_notify_smm(THIS_MODULE); | 878 | acpi_processor_notify_smm(THIS_MODULE); |
@@ -914,13 +916,13 @@ static int fill_powernow_table_pstate(struct powernow_k8_data *data, | |||
914 | "bad value %d.\n", i, index); | 916 | "bad value %d.\n", i, index); |
915 | printk(KERN_ERR PFX "Please report to BIOS " | 917 | printk(KERN_ERR PFX "Please report to BIOS " |
916 | "manufacturer\n"); | 918 | "manufacturer\n"); |
917 | invalidate_entry(data, i); | 919 | invalidate_entry(powernow_table, i); |
918 | continue; | 920 | continue; |
919 | } | 921 | } |
920 | rdmsr(MSR_PSTATE_DEF_BASE + index, lo, hi); | 922 | rdmsr(MSR_PSTATE_DEF_BASE + index, lo, hi); |
921 | if (!(hi & HW_PSTATE_VALID_MASK)) { | 923 | if (!(hi & HW_PSTATE_VALID_MASK)) { |
922 | dprintk("invalid pstate %d, ignoring\n", index); | 924 | dprintk("invalid pstate %d, ignoring\n", index); |
923 | invalidate_entry(data, i); | 925 | invalidate_entry(powernow_table, i); |
924 | continue; | 926 | continue; |
925 | } | 927 | } |
926 | 928 | ||
@@ -941,7 +943,6 @@ static int fill_powernow_table_fidvid(struct powernow_k8_data *data, | |||
941 | struct cpufreq_frequency_table *powernow_table) | 943 | struct cpufreq_frequency_table *powernow_table) |
942 | { | 944 | { |
943 | int i; | 945 | int i; |
944 | int cntlofreq = 0; | ||
945 | 946 | ||
946 | for (i = 0; i < data->acpi_data.state_count; i++) { | 947 | for (i = 0; i < data->acpi_data.state_count; i++) { |
947 | u32 fid; | 948 | u32 fid; |
@@ -970,7 +971,7 @@ static int fill_powernow_table_fidvid(struct powernow_k8_data *data, | |||
970 | /* verify frequency is OK */ | 971 | /* verify frequency is OK */ |
971 | if ((freq > (MAX_FREQ * 1000)) || (freq < (MIN_FREQ * 1000))) { | 972 | if ((freq > (MAX_FREQ * 1000)) || (freq < (MIN_FREQ * 1000))) { |
972 | dprintk("invalid freq %u kHz, ignoring\n", freq); | 973 | dprintk("invalid freq %u kHz, ignoring\n", freq); |
973 | invalidate_entry(data, i); | 974 | invalidate_entry(powernow_table, i); |
974 | continue; | 975 | continue; |
975 | } | 976 | } |
976 | 977 | ||
@@ -978,38 +979,17 @@ static int fill_powernow_table_fidvid(struct powernow_k8_data *data, | |||
978 | * BIOSs are using "off" to indicate invalid */ | 979 | * BIOSs are using "off" to indicate invalid */ |
979 | if (vid == VID_OFF) { | 980 | if (vid == VID_OFF) { |
980 | dprintk("invalid vid %u, ignoring\n", vid); | 981 | dprintk("invalid vid %u, ignoring\n", vid); |
981 | invalidate_entry(data, i); | 982 | invalidate_entry(powernow_table, i); |
982 | continue; | 983 | continue; |
983 | } | 984 | } |
984 | 985 | ||
985 | /* verify only 1 entry from the lo frequency table */ | ||
986 | if (fid < HI_FID_TABLE_BOTTOM) { | ||
987 | if (cntlofreq) { | ||
988 | /* if both entries are the same, | ||
989 | * ignore this one ... */ | ||
990 | if ((freq != powernow_table[cntlofreq].frequency) || | ||
991 | (index != powernow_table[cntlofreq].index)) { | ||
992 | printk(KERN_ERR PFX | ||
993 | "Too many lo freq table " | ||
994 | "entries\n"); | ||
995 | return 1; | ||
996 | } | ||
997 | |||
998 | dprintk("double low frequency table entry, " | ||
999 | "ignoring it.\n"); | ||
1000 | invalidate_entry(data, i); | ||
1001 | continue; | ||
1002 | } else | ||
1003 | cntlofreq = i; | ||
1004 | } | ||
1005 | |||
1006 | if (freq != (data->acpi_data.states[i].core_frequency * 1000)) { | 986 | if (freq != (data->acpi_data.states[i].core_frequency * 1000)) { |
1007 | printk(KERN_INFO PFX "invalid freq entries " | 987 | printk(KERN_INFO PFX "invalid freq entries " |
1008 | "%u kHz vs. %u kHz\n", freq, | 988 | "%u kHz vs. %u kHz\n", freq, |
1009 | (unsigned int) | 989 | (unsigned int) |
1010 | (data->acpi_data.states[i].core_frequency | 990 | (data->acpi_data.states[i].core_frequency |
1011 | * 1000)); | 991 | * 1000)); |
1012 | invalidate_entry(data, i); | 992 | invalidate_entry(powernow_table, i); |
1013 | continue; | 993 | continue; |
1014 | } | 994 | } |
1015 | } | 995 | } |
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index 2968ed6a9c49..3938c7817095 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c | |||
@@ -61,6 +61,8 @@ static DEFINE_SPINLOCK(cpufreq_driver_lock); | |||
61 | * are concerned with are online after they get the lock. | 61 | * are concerned with are online after they get the lock. |
62 | * - Governor routines that can be called in cpufreq hotplug path should not | 62 | * - Governor routines that can be called in cpufreq hotplug path should not |
63 | * take this sem as top level hotplug notifier handler takes this. | 63 | * take this sem as top level hotplug notifier handler takes this. |
64 | * - Lock should not be held across | ||
65 | * __cpufreq_governor(data, CPUFREQ_GOV_STOP); | ||
64 | */ | 66 | */ |
65 | static DEFINE_PER_CPU(int, policy_cpu); | 67 | static DEFINE_PER_CPU(int, policy_cpu); |
66 | static DEFINE_PER_CPU(struct rw_semaphore, cpu_policy_rwsem); | 68 | static DEFINE_PER_CPU(struct rw_semaphore, cpu_policy_rwsem); |
@@ -686,6 +688,9 @@ static struct attribute *default_attrs[] = { | |||
686 | NULL | 688 | NULL |
687 | }; | 689 | }; |
688 | 690 | ||
691 | struct kobject *cpufreq_global_kobject; | ||
692 | EXPORT_SYMBOL(cpufreq_global_kobject); | ||
693 | |||
689 | #define to_policy(k) container_of(k, struct cpufreq_policy, kobj) | 694 | #define to_policy(k) container_of(k, struct cpufreq_policy, kobj) |
690 | #define to_attr(a) container_of(a, struct freq_attr, attr) | 695 | #define to_attr(a) container_of(a, struct freq_attr, attr) |
691 | 696 | ||
@@ -756,92 +761,20 @@ static struct kobj_type ktype_cpufreq = { | |||
756 | .release = cpufreq_sysfs_release, | 761 | .release = cpufreq_sysfs_release, |
757 | }; | 762 | }; |
758 | 763 | ||
759 | 764 | /* | |
760 | /** | 765 | * Returns: |
761 | * cpufreq_add_dev - add a CPU device | 766 | * Negative: Failure |
762 | * | 767 | * 0: Success |
763 | * Adds the cpufreq interface for a CPU device. | 768 | * Positive: When we have a managed CPU and the sysfs got symlinked |
764 | * | ||
765 | * The Oracle says: try running cpufreq registration/unregistration concurrently | ||
766 | * with with cpu hotplugging and all hell will break loose. Tried to clean this | ||
767 | * mess up, but more thorough testing is needed. - Mathieu | ||
768 | */ | 769 | */ |
769 | static int cpufreq_add_dev(struct sys_device *sys_dev) | 770 | int cpufreq_add_dev_policy(unsigned int cpu, struct cpufreq_policy *policy, |
771 | struct sys_device *sys_dev) | ||
770 | { | 772 | { |
771 | unsigned int cpu = sys_dev->id; | ||
772 | int ret = 0; | 773 | int ret = 0; |
773 | struct cpufreq_policy new_policy; | 774 | #ifdef CONFIG_SMP |
774 | struct cpufreq_policy *policy; | ||
775 | struct freq_attr **drv_attr; | ||
776 | struct sys_device *cpu_sys_dev; | ||
777 | unsigned long flags; | 775 | unsigned long flags; |
778 | unsigned int j; | 776 | unsigned int j; |
779 | 777 | ||
780 | if (cpu_is_offline(cpu)) | ||
781 | return 0; | ||
782 | |||
783 | cpufreq_debug_disable_ratelimit(); | ||
784 | dprintk("adding CPU %u\n", cpu); | ||
785 | |||
786 | #ifdef CONFIG_SMP | ||
787 | /* check whether a different CPU already registered this | ||
788 | * CPU because it is in the same boat. */ | ||
789 | policy = cpufreq_cpu_get(cpu); | ||
790 | if (unlikely(policy)) { | ||
791 | cpufreq_cpu_put(policy); | ||
792 | cpufreq_debug_enable_ratelimit(); | ||
793 | return 0; | ||
794 | } | ||
795 | #endif | ||
796 | |||
797 | if (!try_module_get(cpufreq_driver->owner)) { | ||
798 | ret = -EINVAL; | ||
799 | goto module_out; | ||
800 | } | ||
801 | |||
802 | policy = kzalloc(sizeof(struct cpufreq_policy), GFP_KERNEL); | ||
803 | if (!policy) { | ||
804 | ret = -ENOMEM; | ||
805 | goto nomem_out; | ||
806 | } | ||
807 | if (!alloc_cpumask_var(&policy->cpus, GFP_KERNEL)) { | ||
808 | ret = -ENOMEM; | ||
809 | goto err_free_policy; | ||
810 | } | ||
811 | if (!zalloc_cpumask_var(&policy->related_cpus, GFP_KERNEL)) { | ||
812 | ret = -ENOMEM; | ||
813 | goto err_free_cpumask; | ||
814 | } | ||
815 | |||
816 | policy->cpu = cpu; | ||
817 | cpumask_copy(policy->cpus, cpumask_of(cpu)); | ||
818 | |||
819 | /* Initially set CPU itself as the policy_cpu */ | ||
820 | per_cpu(policy_cpu, cpu) = cpu; | ||
821 | ret = (lock_policy_rwsem_write(cpu) < 0); | ||
822 | WARN_ON(ret); | ||
823 | |||
824 | init_completion(&policy->kobj_unregister); | ||
825 | INIT_WORK(&policy->update, handle_update); | ||
826 | |||
827 | /* Set governor before ->init, so that driver could check it */ | ||
828 | policy->governor = CPUFREQ_DEFAULT_GOVERNOR; | ||
829 | /* call driver. From then on the cpufreq must be able | ||
830 | * to accept all calls to ->verify and ->setpolicy for this CPU | ||
831 | */ | ||
832 | ret = cpufreq_driver->init(policy); | ||
833 | if (ret) { | ||
834 | dprintk("initialization failed\n"); | ||
835 | goto err_unlock_policy; | ||
836 | } | ||
837 | policy->user_policy.min = policy->min; | ||
838 | policy->user_policy.max = policy->max; | ||
839 | |||
840 | blocking_notifier_call_chain(&cpufreq_policy_notifier_list, | ||
841 | CPUFREQ_START, policy); | ||
842 | |||
843 | #ifdef CONFIG_SMP | ||
844 | |||
845 | #ifdef CONFIG_HOTPLUG_CPU | 778 | #ifdef CONFIG_HOTPLUG_CPU |
846 | if (per_cpu(cpufreq_cpu_governor, cpu)) { | 779 | if (per_cpu(cpufreq_cpu_governor, cpu)) { |
847 | policy->governor = per_cpu(cpufreq_cpu_governor, cpu); | 780 | policy->governor = per_cpu(cpufreq_cpu_governor, cpu); |
@@ -872,9 +805,8 @@ static int cpufreq_add_dev(struct sys_device *sys_dev) | |||
872 | /* Should not go through policy unlock path */ | 805 | /* Should not go through policy unlock path */ |
873 | if (cpufreq_driver->exit) | 806 | if (cpufreq_driver->exit) |
874 | cpufreq_driver->exit(policy); | 807 | cpufreq_driver->exit(policy); |
875 | ret = -EBUSY; | ||
876 | cpufreq_cpu_put(managed_policy); | 808 | cpufreq_cpu_put(managed_policy); |
877 | goto err_free_cpumask; | 809 | return -EBUSY; |
878 | } | 810 | } |
879 | 811 | ||
880 | spin_lock_irqsave(&cpufreq_driver_lock, flags); | 812 | spin_lock_irqsave(&cpufreq_driver_lock, flags); |
@@ -893,17 +825,62 @@ static int cpufreq_add_dev(struct sys_device *sys_dev) | |||
893 | * Call driver->exit() because only the cpu parent of | 825 | * Call driver->exit() because only the cpu parent of |
894 | * the kobj needed to call init(). | 826 | * the kobj needed to call init(). |
895 | */ | 827 | */ |
896 | goto out_driver_exit; /* call driver->exit() */ | 828 | if (cpufreq_driver->exit) |
829 | cpufreq_driver->exit(policy); | ||
830 | |||
831 | if (!ret) | ||
832 | return 1; | ||
833 | else | ||
834 | return ret; | ||
897 | } | 835 | } |
898 | } | 836 | } |
899 | #endif | 837 | #endif |
900 | memcpy(&new_policy, policy, sizeof(struct cpufreq_policy)); | 838 | return ret; |
839 | } | ||
840 | |||
841 | |||
842 | /* symlink affected CPUs */ | ||
843 | int cpufreq_add_dev_symlink(unsigned int cpu, struct cpufreq_policy *policy) | ||
844 | { | ||
845 | unsigned int j; | ||
846 | int ret = 0; | ||
847 | |||
848 | for_each_cpu(j, policy->cpus) { | ||
849 | struct cpufreq_policy *managed_policy; | ||
850 | struct sys_device *cpu_sys_dev; | ||
851 | |||
852 | if (j == cpu) | ||
853 | continue; | ||
854 | if (!cpu_online(j)) | ||
855 | continue; | ||
856 | |||
857 | dprintk("CPU %u already managed, adding link\n", j); | ||
858 | managed_policy = cpufreq_cpu_get(cpu); | ||
859 | cpu_sys_dev = get_cpu_sysdev(j); | ||
860 | ret = sysfs_create_link(&cpu_sys_dev->kobj, &policy->kobj, | ||
861 | "cpufreq"); | ||
862 | if (ret) { | ||
863 | cpufreq_cpu_put(managed_policy); | ||
864 | return ret; | ||
865 | } | ||
866 | } | ||
867 | return ret; | ||
868 | } | ||
869 | |||
870 | int cpufreq_add_dev_interface(unsigned int cpu, struct cpufreq_policy *policy, | ||
871 | struct sys_device *sys_dev) | ||
872 | { | ||
873 | struct cpufreq_policy new_policy; | ||
874 | struct freq_attr **drv_attr; | ||
875 | unsigned long flags; | ||
876 | int ret = 0; | ||
877 | unsigned int j; | ||
901 | 878 | ||
902 | /* prepare interface data */ | 879 | /* prepare interface data */ |
903 | ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq, &sys_dev->kobj, | 880 | ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq, |
904 | "cpufreq"); | 881 | &sys_dev->kobj, "cpufreq"); |
905 | if (ret) | 882 | if (ret) |
906 | goto out_driver_exit; | 883 | return ret; |
907 | 884 | ||
908 | /* set up files for this cpu device */ | 885 | /* set up files for this cpu device */ |
909 | drv_attr = cpufreq_driver->attr; | 886 | drv_attr = cpufreq_driver->attr; |
@@ -926,35 +903,20 @@ static int cpufreq_add_dev(struct sys_device *sys_dev) | |||
926 | 903 | ||
927 | spin_lock_irqsave(&cpufreq_driver_lock, flags); | 904 | spin_lock_irqsave(&cpufreq_driver_lock, flags); |
928 | for_each_cpu(j, policy->cpus) { | 905 | for_each_cpu(j, policy->cpus) { |
929 | if (!cpu_online(j)) | 906 | if (!cpu_online(j)) |
930 | continue; | 907 | continue; |
931 | per_cpu(cpufreq_cpu_data, j) = policy; | 908 | per_cpu(cpufreq_cpu_data, j) = policy; |
932 | per_cpu(policy_cpu, j) = policy->cpu; | 909 | per_cpu(policy_cpu, j) = policy->cpu; |
933 | } | 910 | } |
934 | spin_unlock_irqrestore(&cpufreq_driver_lock, flags); | 911 | spin_unlock_irqrestore(&cpufreq_driver_lock, flags); |
935 | 912 | ||
936 | /* symlink affected CPUs */ | 913 | ret = cpufreq_add_dev_symlink(cpu, policy); |
937 | for_each_cpu(j, policy->cpus) { | 914 | if (ret) |
938 | struct cpufreq_policy *managed_policy; | 915 | goto err_out_kobj_put; |
939 | |||
940 | if (j == cpu) | ||
941 | continue; | ||
942 | if (!cpu_online(j)) | ||
943 | continue; | ||
944 | |||
945 | dprintk("CPU %u already managed, adding link\n", j); | ||
946 | managed_policy = cpufreq_cpu_get(cpu); | ||
947 | cpu_sys_dev = get_cpu_sysdev(j); | ||
948 | ret = sysfs_create_link(&cpu_sys_dev->kobj, &policy->kobj, | ||
949 | "cpufreq"); | ||
950 | if (ret) { | ||
951 | cpufreq_cpu_put(managed_policy); | ||
952 | goto err_out_unregister; | ||
953 | } | ||
954 | } | ||
955 | 916 | ||
956 | policy->governor = NULL; /* to assure that the starting sequence is | 917 | memcpy(&new_policy, policy, sizeof(struct cpufreq_policy)); |
957 | * run in cpufreq_set_policy */ | 918 | /* assure that the starting sequence is run in __cpufreq_set_policy */ |
919 | policy->governor = NULL; | ||
958 | 920 | ||
959 | /* set default policy */ | 921 | /* set default policy */ |
960 | ret = __cpufreq_set_policy(policy, &new_policy); | 922 | ret = __cpufreq_set_policy(policy, &new_policy); |
@@ -963,8 +925,107 @@ static int cpufreq_add_dev(struct sys_device *sys_dev) | |||
963 | 925 | ||
964 | if (ret) { | 926 | if (ret) { |
965 | dprintk("setting policy failed\n"); | 927 | dprintk("setting policy failed\n"); |
966 | goto err_out_unregister; | 928 | if (cpufreq_driver->exit) |
929 | cpufreq_driver->exit(policy); | ||
930 | } | ||
931 | return ret; | ||
932 | |||
933 | err_out_kobj_put: | ||
934 | kobject_put(&policy->kobj); | ||
935 | wait_for_completion(&policy->kobj_unregister); | ||
936 | return ret; | ||
937 | } | ||
938 | |||
939 | |||
940 | /** | ||
941 | * cpufreq_add_dev - add a CPU device | ||
942 | * | ||
943 | * Adds the cpufreq interface for a CPU device. | ||
944 | * | ||
945 | * The Oracle says: try running cpufreq registration/unregistration concurrently | ||
946 | * with with cpu hotplugging and all hell will break loose. Tried to clean this | ||
947 | * mess up, but more thorough testing is needed. - Mathieu | ||
948 | */ | ||
949 | static int cpufreq_add_dev(struct sys_device *sys_dev) | ||
950 | { | ||
951 | unsigned int cpu = sys_dev->id; | ||
952 | int ret = 0; | ||
953 | struct cpufreq_policy *policy; | ||
954 | unsigned long flags; | ||
955 | unsigned int j; | ||
956 | |||
957 | if (cpu_is_offline(cpu)) | ||
958 | return 0; | ||
959 | |||
960 | cpufreq_debug_disable_ratelimit(); | ||
961 | dprintk("adding CPU %u\n", cpu); | ||
962 | |||
963 | #ifdef CONFIG_SMP | ||
964 | /* check whether a different CPU already registered this | ||
965 | * CPU because it is in the same boat. */ | ||
966 | policy = cpufreq_cpu_get(cpu); | ||
967 | if (unlikely(policy)) { | ||
968 | cpufreq_cpu_put(policy); | ||
969 | cpufreq_debug_enable_ratelimit(); | ||
970 | return 0; | ||
971 | } | ||
972 | #endif | ||
973 | |||
974 | if (!try_module_get(cpufreq_driver->owner)) { | ||
975 | ret = -EINVAL; | ||
976 | goto module_out; | ||
977 | } | ||
978 | |||
979 | ret = -ENOMEM; | ||
980 | policy = kzalloc(sizeof(struct cpufreq_policy), GFP_KERNEL); | ||
981 | if (!policy) | ||
982 | goto nomem_out; | ||
983 | |||
984 | if (!alloc_cpumask_var(&policy->cpus, GFP_KERNEL)) | ||
985 | goto err_free_policy; | ||
986 | |||
987 | if (!zalloc_cpumask_var(&policy->related_cpus, GFP_KERNEL)) | ||
988 | goto err_free_cpumask; | ||
989 | |||
990 | policy->cpu = cpu; | ||
991 | cpumask_copy(policy->cpus, cpumask_of(cpu)); | ||
992 | |||
993 | /* Initially set CPU itself as the policy_cpu */ | ||
994 | per_cpu(policy_cpu, cpu) = cpu; | ||
995 | ret = (lock_policy_rwsem_write(cpu) < 0); | ||
996 | WARN_ON(ret); | ||
997 | |||
998 | init_completion(&policy->kobj_unregister); | ||
999 | INIT_WORK(&policy->update, handle_update); | ||
1000 | |||
1001 | /* Set governor before ->init, so that driver could check it */ | ||
1002 | policy->governor = CPUFREQ_DEFAULT_GOVERNOR; | ||
1003 | /* call driver. From then on the cpufreq must be able | ||
1004 | * to accept all calls to ->verify and ->setpolicy for this CPU | ||
1005 | */ | ||
1006 | ret = cpufreq_driver->init(policy); | ||
1007 | if (ret) { | ||
1008 | dprintk("initialization failed\n"); | ||
1009 | goto err_unlock_policy; | ||
967 | } | 1010 | } |
1011 | policy->user_policy.min = policy->min; | ||
1012 | policy->user_policy.max = policy->max; | ||
1013 | |||
1014 | blocking_notifier_call_chain(&cpufreq_policy_notifier_list, | ||
1015 | CPUFREQ_START, policy); | ||
1016 | |||
1017 | ret = cpufreq_add_dev_policy(cpu, policy, sys_dev); | ||
1018 | if (ret) { | ||
1019 | if (ret > 0) | ||
1020 | /* This is a managed cpu, symlink created, | ||
1021 | exit with 0 */ | ||
1022 | ret = 0; | ||
1023 | goto err_unlock_policy; | ||
1024 | } | ||
1025 | |||
1026 | ret = cpufreq_add_dev_interface(cpu, policy, sys_dev); | ||
1027 | if (ret) | ||
1028 | goto err_out_unregister; | ||
968 | 1029 | ||
969 | unlock_policy_rwsem_write(cpu); | 1030 | unlock_policy_rwsem_write(cpu); |
970 | 1031 | ||
@@ -982,14 +1043,9 @@ err_out_unregister: | |||
982 | per_cpu(cpufreq_cpu_data, j) = NULL; | 1043 | per_cpu(cpufreq_cpu_data, j) = NULL; |
983 | spin_unlock_irqrestore(&cpufreq_driver_lock, flags); | 1044 | spin_unlock_irqrestore(&cpufreq_driver_lock, flags); |
984 | 1045 | ||
985 | err_out_kobj_put: | ||
986 | kobject_put(&policy->kobj); | 1046 | kobject_put(&policy->kobj); |
987 | wait_for_completion(&policy->kobj_unregister); | 1047 | wait_for_completion(&policy->kobj_unregister); |
988 | 1048 | ||
989 | out_driver_exit: | ||
990 | if (cpufreq_driver->exit) | ||
991 | cpufreq_driver->exit(policy); | ||
992 | |||
993 | err_unlock_policy: | 1049 | err_unlock_policy: |
994 | unlock_policy_rwsem_write(cpu); | 1050 | unlock_policy_rwsem_write(cpu); |
995 | err_free_cpumask: | 1051 | err_free_cpumask: |
@@ -1653,8 +1709,17 @@ static int __cpufreq_set_policy(struct cpufreq_policy *data, | |||
1653 | dprintk("governor switch\n"); | 1709 | dprintk("governor switch\n"); |
1654 | 1710 | ||
1655 | /* end old governor */ | 1711 | /* end old governor */ |
1656 | if (data->governor) | 1712 | if (data->governor) { |
1713 | /* | ||
1714 | * Need to release the rwsem around governor | ||
1715 | * stop due to lock dependency between | ||
1716 | * cancel_delayed_work_sync and the read lock | ||
1717 | * taken in the delayed work handler. | ||
1718 | */ | ||
1719 | unlock_policy_rwsem_write(data->cpu); | ||
1657 | __cpufreq_governor(data, CPUFREQ_GOV_STOP); | 1720 | __cpufreq_governor(data, CPUFREQ_GOV_STOP); |
1721 | lock_policy_rwsem_write(data->cpu); | ||
1722 | } | ||
1658 | 1723 | ||
1659 | /* start new governor */ | 1724 | /* start new governor */ |
1660 | data->governor = policy->governor; | 1725 | data->governor = policy->governor; |
@@ -1884,7 +1949,11 @@ static int __init cpufreq_core_init(void) | |||
1884 | per_cpu(policy_cpu, cpu) = -1; | 1949 | per_cpu(policy_cpu, cpu) = -1; |
1885 | init_rwsem(&per_cpu(cpu_policy_rwsem, cpu)); | 1950 | init_rwsem(&per_cpu(cpu_policy_rwsem, cpu)); |
1886 | } | 1951 | } |
1952 | |||
1953 | cpufreq_global_kobject = kobject_create_and_add("cpufreq", | ||
1954 | &cpu_sysdev_class.kset.kobj); | ||
1955 | BUG_ON(!cpufreq_global_kobject); | ||
1956 | |||
1887 | return 0; | 1957 | return 0; |
1888 | } | 1958 | } |
1889 | |||
1890 | core_initcall(cpufreq_core_init); | 1959 | core_initcall(cpufreq_core_init); |
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c index d7a528c80de8..071699de50ee 100644 --- a/drivers/cpufreq/cpufreq_ondemand.c +++ b/drivers/cpufreq/cpufreq_ondemand.c | |||
@@ -55,6 +55,18 @@ static unsigned int min_sampling_rate; | |||
55 | #define TRANSITION_LATENCY_LIMIT (10 * 1000 * 1000) | 55 | #define TRANSITION_LATENCY_LIMIT (10 * 1000 * 1000) |
56 | 56 | ||
57 | static void do_dbs_timer(struct work_struct *work); | 57 | static void do_dbs_timer(struct work_struct *work); |
58 | static int cpufreq_governor_dbs(struct cpufreq_policy *policy, | ||
59 | unsigned int event); | ||
60 | |||
61 | #ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND | ||
62 | static | ||
63 | #endif | ||
64 | struct cpufreq_governor cpufreq_gov_ondemand = { | ||
65 | .name = "ondemand", | ||
66 | .governor = cpufreq_governor_dbs, | ||
67 | .max_transition_latency = TRANSITION_LATENCY_LIMIT, | ||
68 | .owner = THIS_MODULE, | ||
69 | }; | ||
58 | 70 | ||
59 | /* Sampling types */ | 71 | /* Sampling types */ |
60 | enum {DBS_NORMAL_SAMPLE, DBS_SUB_SAMPLE}; | 72 | enum {DBS_NORMAL_SAMPLE, DBS_SUB_SAMPLE}; |
@@ -207,20 +219,23 @@ static void ondemand_powersave_bias_init(void) | |||
207 | } | 219 | } |
208 | 220 | ||
209 | /************************** sysfs interface ************************/ | 221 | /************************** sysfs interface ************************/ |
210 | static ssize_t show_sampling_rate_max(struct cpufreq_policy *policy, char *buf) | 222 | |
223 | static ssize_t show_sampling_rate_max(struct kobject *kobj, | ||
224 | struct attribute *attr, char *buf) | ||
211 | { | 225 | { |
212 | printk_once(KERN_INFO "CPUFREQ: ondemand sampling_rate_max " | 226 | printk_once(KERN_INFO "CPUFREQ: ondemand sampling_rate_max " |
213 | "sysfs file is deprecated - used by: %s\n", current->comm); | 227 | "sysfs file is deprecated - used by: %s\n", current->comm); |
214 | return sprintf(buf, "%u\n", -1U); | 228 | return sprintf(buf, "%u\n", -1U); |
215 | } | 229 | } |
216 | 230 | ||
217 | static ssize_t show_sampling_rate_min(struct cpufreq_policy *policy, char *buf) | 231 | static ssize_t show_sampling_rate_min(struct kobject *kobj, |
232 | struct attribute *attr, char *buf) | ||
218 | { | 233 | { |
219 | return sprintf(buf, "%u\n", min_sampling_rate); | 234 | return sprintf(buf, "%u\n", min_sampling_rate); |
220 | } | 235 | } |
221 | 236 | ||
222 | #define define_one_ro(_name) \ | 237 | #define define_one_ro(_name) \ |
223 | static struct freq_attr _name = \ | 238 | static struct global_attr _name = \ |
224 | __ATTR(_name, 0444, show_##_name, NULL) | 239 | __ATTR(_name, 0444, show_##_name, NULL) |
225 | 240 | ||
226 | define_one_ro(sampling_rate_max); | 241 | define_one_ro(sampling_rate_max); |
@@ -229,7 +244,7 @@ define_one_ro(sampling_rate_min); | |||
229 | /* cpufreq_ondemand Governor Tunables */ | 244 | /* cpufreq_ondemand Governor Tunables */ |
230 | #define show_one(file_name, object) \ | 245 | #define show_one(file_name, object) \ |
231 | static ssize_t show_##file_name \ | 246 | static ssize_t show_##file_name \ |
232 | (struct cpufreq_policy *unused, char *buf) \ | 247 | (struct kobject *kobj, struct attribute *attr, char *buf) \ |
233 | { \ | 248 | { \ |
234 | return sprintf(buf, "%u\n", dbs_tuners_ins.object); \ | 249 | return sprintf(buf, "%u\n", dbs_tuners_ins.object); \ |
235 | } | 250 | } |
@@ -238,8 +253,38 @@ show_one(up_threshold, up_threshold); | |||
238 | show_one(ignore_nice_load, ignore_nice); | 253 | show_one(ignore_nice_load, ignore_nice); |
239 | show_one(powersave_bias, powersave_bias); | 254 | show_one(powersave_bias, powersave_bias); |
240 | 255 | ||
241 | static ssize_t store_sampling_rate(struct cpufreq_policy *unused, | 256 | /*** delete after deprecation time ***/ |
242 | const char *buf, size_t count) | 257 | |
258 | #define DEPRECATION_MSG(file_name) \ | ||
259 | printk_once(KERN_INFO "CPUFREQ: Per core ondemand sysfs " \ | ||
260 | "interface is deprecated - " #file_name "\n"); | ||
261 | |||
262 | #define show_one_old(file_name) \ | ||
263 | static ssize_t show_##file_name##_old \ | ||
264 | (struct cpufreq_policy *unused, char *buf) \ | ||
265 | { \ | ||
266 | printk_once(KERN_INFO "CPUFREQ: Per core ondemand sysfs " \ | ||
267 | "interface is deprecated - " #file_name "\n"); \ | ||
268 | return show_##file_name(NULL, NULL, buf); \ | ||
269 | } | ||
270 | show_one_old(sampling_rate); | ||
271 | show_one_old(up_threshold); | ||
272 | show_one_old(ignore_nice_load); | ||
273 | show_one_old(powersave_bias); | ||
274 | show_one_old(sampling_rate_min); | ||
275 | show_one_old(sampling_rate_max); | ||
276 | |||
277 | #define define_one_ro_old(object, _name) \ | ||
278 | static struct freq_attr object = \ | ||
279 | __ATTR(_name, 0444, show_##_name##_old, NULL) | ||
280 | |||
281 | define_one_ro_old(sampling_rate_min_old, sampling_rate_min); | ||
282 | define_one_ro_old(sampling_rate_max_old, sampling_rate_max); | ||
283 | |||
284 | /*** delete after deprecation time ***/ | ||
285 | |||
286 | static ssize_t store_sampling_rate(struct kobject *a, struct attribute *b, | ||
287 | const char *buf, size_t count) | ||
243 | { | 288 | { |
244 | unsigned int input; | 289 | unsigned int input; |
245 | int ret; | 290 | int ret; |
@@ -254,8 +299,8 @@ static ssize_t store_sampling_rate(struct cpufreq_policy *unused, | |||
254 | return count; | 299 | return count; |
255 | } | 300 | } |
256 | 301 | ||
257 | static ssize_t store_up_threshold(struct cpufreq_policy *unused, | 302 | static ssize_t store_up_threshold(struct kobject *a, struct attribute *b, |
258 | const char *buf, size_t count) | 303 | const char *buf, size_t count) |
259 | { | 304 | { |
260 | unsigned int input; | 305 | unsigned int input; |
261 | int ret; | 306 | int ret; |
@@ -273,8 +318,8 @@ static ssize_t store_up_threshold(struct cpufreq_policy *unused, | |||
273 | return count; | 318 | return count; |
274 | } | 319 | } |
275 | 320 | ||
276 | static ssize_t store_ignore_nice_load(struct cpufreq_policy *policy, | 321 | static ssize_t store_ignore_nice_load(struct kobject *a, struct attribute *b, |
277 | const char *buf, size_t count) | 322 | const char *buf, size_t count) |
278 | { | 323 | { |
279 | unsigned int input; | 324 | unsigned int input; |
280 | int ret; | 325 | int ret; |
@@ -310,8 +355,8 @@ static ssize_t store_ignore_nice_load(struct cpufreq_policy *policy, | |||
310 | return count; | 355 | return count; |
311 | } | 356 | } |
312 | 357 | ||
313 | static ssize_t store_powersave_bias(struct cpufreq_policy *unused, | 358 | static ssize_t store_powersave_bias(struct kobject *a, struct attribute *b, |
314 | const char *buf, size_t count) | 359 | const char *buf, size_t count) |
315 | { | 360 | { |
316 | unsigned int input; | 361 | unsigned int input; |
317 | int ret; | 362 | int ret; |
@@ -332,7 +377,7 @@ static ssize_t store_powersave_bias(struct cpufreq_policy *unused, | |||
332 | } | 377 | } |
333 | 378 | ||
334 | #define define_one_rw(_name) \ | 379 | #define define_one_rw(_name) \ |
335 | static struct freq_attr _name = \ | 380 | static struct global_attr _name = \ |
336 | __ATTR(_name, 0644, show_##_name, store_##_name) | 381 | __ATTR(_name, 0644, show_##_name, store_##_name) |
337 | 382 | ||
338 | define_one_rw(sampling_rate); | 383 | define_one_rw(sampling_rate); |
@@ -355,6 +400,47 @@ static struct attribute_group dbs_attr_group = { | |||
355 | .name = "ondemand", | 400 | .name = "ondemand", |
356 | }; | 401 | }; |
357 | 402 | ||
403 | /*** delete after deprecation time ***/ | ||
404 | |||
405 | #define write_one_old(file_name) \ | ||
406 | static ssize_t store_##file_name##_old \ | ||
407 | (struct cpufreq_policy *unused, const char *buf, size_t count) \ | ||
408 | { \ | ||
409 | printk_once(KERN_INFO "CPUFREQ: Per core ondemand sysfs " \ | ||
410 | "interface is deprecated - " #file_name "\n"); \ | ||
411 | return store_##file_name(NULL, NULL, buf, count); \ | ||
412 | } | ||
413 | write_one_old(sampling_rate); | ||
414 | write_one_old(up_threshold); | ||
415 | write_one_old(ignore_nice_load); | ||
416 | write_one_old(powersave_bias); | ||
417 | |||
418 | #define define_one_rw_old(object, _name) \ | ||
419 | static struct freq_attr object = \ | ||
420 | __ATTR(_name, 0644, show_##_name##_old, store_##_name##_old) | ||
421 | |||
422 | define_one_rw_old(sampling_rate_old, sampling_rate); | ||
423 | define_one_rw_old(up_threshold_old, up_threshold); | ||
424 | define_one_rw_old(ignore_nice_load_old, ignore_nice_load); | ||
425 | define_one_rw_old(powersave_bias_old, powersave_bias); | ||
426 | |||
427 | static struct attribute *dbs_attributes_old[] = { | ||
428 | &sampling_rate_max_old.attr, | ||
429 | &sampling_rate_min_old.attr, | ||
430 | &sampling_rate_old.attr, | ||
431 | &up_threshold_old.attr, | ||
432 | &ignore_nice_load_old.attr, | ||
433 | &powersave_bias_old.attr, | ||
434 | NULL | ||
435 | }; | ||
436 | |||
437 | static struct attribute_group dbs_attr_group_old = { | ||
438 | .attrs = dbs_attributes_old, | ||
439 | .name = "ondemand", | ||
440 | }; | ||
441 | |||
442 | /*** delete after deprecation time ***/ | ||
443 | |||
358 | /************************** sysfs end ************************/ | 444 | /************************** sysfs end ************************/ |
359 | 445 | ||
360 | static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info) | 446 | static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info) |
@@ -545,7 +631,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy, | |||
545 | 631 | ||
546 | mutex_lock(&dbs_mutex); | 632 | mutex_lock(&dbs_mutex); |
547 | 633 | ||
548 | rc = sysfs_create_group(&policy->kobj, &dbs_attr_group); | 634 | rc = sysfs_create_group(&policy->kobj, &dbs_attr_group_old); |
549 | if (rc) { | 635 | if (rc) { |
550 | mutex_unlock(&dbs_mutex); | 636 | mutex_unlock(&dbs_mutex); |
551 | return rc; | 637 | return rc; |
@@ -566,13 +652,20 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy, | |||
566 | } | 652 | } |
567 | this_dbs_info->cpu = cpu; | 653 | this_dbs_info->cpu = cpu; |
568 | ondemand_powersave_bias_init_cpu(cpu); | 654 | ondemand_powersave_bias_init_cpu(cpu); |
569 | mutex_init(&this_dbs_info->timer_mutex); | ||
570 | /* | 655 | /* |
571 | * Start the timerschedule work, when this governor | 656 | * Start the timerschedule work, when this governor |
572 | * is used for first time | 657 | * is used for first time |
573 | */ | 658 | */ |
574 | if (dbs_enable == 1) { | 659 | if (dbs_enable == 1) { |
575 | unsigned int latency; | 660 | unsigned int latency; |
661 | |||
662 | rc = sysfs_create_group(cpufreq_global_kobject, | ||
663 | &dbs_attr_group); | ||
664 | if (rc) { | ||
665 | mutex_unlock(&dbs_mutex); | ||
666 | return rc; | ||
667 | } | ||
668 | |||
576 | /* policy latency is in nS. Convert it to uS first */ | 669 | /* policy latency is in nS. Convert it to uS first */ |
577 | latency = policy->cpuinfo.transition_latency / 1000; | 670 | latency = policy->cpuinfo.transition_latency / 1000; |
578 | if (latency == 0) | 671 | if (latency == 0) |
@@ -586,6 +679,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy, | |||
586 | } | 679 | } |
587 | mutex_unlock(&dbs_mutex); | 680 | mutex_unlock(&dbs_mutex); |
588 | 681 | ||
682 | mutex_init(&this_dbs_info->timer_mutex); | ||
589 | dbs_timer_init(this_dbs_info); | 683 | dbs_timer_init(this_dbs_info); |
590 | break; | 684 | break; |
591 | 685 | ||
@@ -593,10 +687,13 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy, | |||
593 | dbs_timer_exit(this_dbs_info); | 687 | dbs_timer_exit(this_dbs_info); |
594 | 688 | ||
595 | mutex_lock(&dbs_mutex); | 689 | mutex_lock(&dbs_mutex); |
596 | sysfs_remove_group(&policy->kobj, &dbs_attr_group); | 690 | sysfs_remove_group(&policy->kobj, &dbs_attr_group_old); |
597 | mutex_destroy(&this_dbs_info->timer_mutex); | 691 | mutex_destroy(&this_dbs_info->timer_mutex); |
598 | dbs_enable--; | 692 | dbs_enable--; |
599 | mutex_unlock(&dbs_mutex); | 693 | mutex_unlock(&dbs_mutex); |
694 | if (!dbs_enable) | ||
695 | sysfs_remove_group(cpufreq_global_kobject, | ||
696 | &dbs_attr_group); | ||
600 | 697 | ||
601 | break; | 698 | break; |
602 | 699 | ||
@@ -614,16 +711,6 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy, | |||
614 | return 0; | 711 | return 0; |
615 | } | 712 | } |
616 | 713 | ||
617 | #ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND | ||
618 | static | ||
619 | #endif | ||
620 | struct cpufreq_governor cpufreq_gov_ondemand = { | ||
621 | .name = "ondemand", | ||
622 | .governor = cpufreq_governor_dbs, | ||
623 | .max_transition_latency = TRANSITION_LATENCY_LIMIT, | ||
624 | .owner = THIS_MODULE, | ||
625 | }; | ||
626 | |||
627 | static int __init cpufreq_gov_dbs_init(void) | 714 | static int __init cpufreq_gov_dbs_init(void) |
628 | { | 715 | { |
629 | int err; | 716 | int err; |
diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h index 161042746afc..44717eb47639 100644 --- a/include/linux/cpufreq.h +++ b/include/linux/cpufreq.h | |||
@@ -65,6 +65,9 @@ static inline int cpufreq_unregister_notifier(struct notifier_block *nb, | |||
65 | 65 | ||
66 | struct cpufreq_governor; | 66 | struct cpufreq_governor; |
67 | 67 | ||
68 | /* /sys/devices/system/cpu/cpufreq: entry point for global variables */ | ||
69 | extern struct kobject *cpufreq_global_kobject; | ||
70 | |||
68 | #define CPUFREQ_ETERNAL (-1) | 71 | #define CPUFREQ_ETERNAL (-1) |
69 | struct cpufreq_cpuinfo { | 72 | struct cpufreq_cpuinfo { |
70 | unsigned int max_freq; | 73 | unsigned int max_freq; |
@@ -274,6 +277,13 @@ struct freq_attr { | |||
274 | ssize_t (*store)(struct cpufreq_policy *, const char *, size_t count); | 277 | ssize_t (*store)(struct cpufreq_policy *, const char *, size_t count); |
275 | }; | 278 | }; |
276 | 279 | ||
280 | struct global_attr { | ||
281 | struct attribute attr; | ||
282 | ssize_t (*show)(struct kobject *kobj, | ||
283 | struct attribute *attr, char *buf); | ||
284 | ssize_t (*store)(struct kobject *a, struct attribute *b, | ||
285 | const char *c, size_t count); | ||
286 | }; | ||
277 | 287 | ||
278 | /********************************************************************* | 288 | /********************************************************************* |
279 | * CPUFREQ 2.6. INTERFACE * | 289 | * CPUFREQ 2.6. INTERFACE * |