diff options
author | Rafael J. Wysocki <rafael.j.wysocki@intel.com> | 2013-02-15 07:59:07 -0500 |
---|---|---|
committer | Rafael J. Wysocki <rafael.j.wysocki@intel.com> | 2013-02-15 07:59:07 -0500 |
commit | 4419fbd4b408c3a8634b3a8dd952055d0f0b601f (patch) | |
tree | dfa48db949d2b092a92a5adb3c070db6287a37be /drivers/cpufreq | |
parent | 95ecb407699825278f4031f153dbbe0f0713ff28 (diff) | |
parent | 191e5edf96dc4939f5db0605cc65de9f4d88d155 (diff) |
Merge branch 'pm-cpufreq'
* pm-cpufreq: (55 commits)
cpufreq / intel_pstate: Fix 32 bit build
cpufreq: conservative: Fix typos in comments
cpufreq: ondemand: Fix typos in comments
cpufreq: exynos: simplify .init() for setting policy->cpus
cpufreq: kirkwood: Add a cpufreq driver for Marvell Kirkwood SoCs
cpufreq/x86: Add P-state driver for sandy bridge.
cpufreq_stats: do not remove sysfs files if frequency table is not present
cpufreq: Do not track governor name for scaling drivers with internal governors.
cpufreq: Only call cpufreq_out_of_sync() for driver that implement cpufreq_driver.target()
cpufreq: Retrieve current frequency from scaling drivers with internal governors
cpufreq: Fix locking issues
cpufreq: Create a macro for unlock_policy_rwsem{read,write}
cpufreq: Remove unused HOTPLUG_CPU code
cpufreq: governors: Fix WARN_ON() for multi-policy platforms
cpufreq: ondemand: Replace down_differential tuner with adj_up_threshold
cpufreq / stats: Get rid of CPUFREQ_STATDEVICE_ATTR
cpufreq: Don't check cpu_online(policy->cpu)
cpufreq: add imx6q-cpufreq driver
cpufreq: Don't remove sysfs link for policy->cpu
cpufreq: Remove unnecessary use of policy->shared_type
...
Diffstat (limited to 'drivers/cpufreq')
24 files changed, 2051 insertions, 415 deletions
diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig index e0a899f25e37..cbcb21e32771 100644 --- a/drivers/cpufreq/Kconfig +++ b/drivers/cpufreq/Kconfig | |||
@@ -185,7 +185,7 @@ config CPU_FREQ_GOV_CONSERVATIVE | |||
185 | If in doubt, say N. | 185 | If in doubt, say N. |
186 | 186 | ||
187 | config GENERIC_CPUFREQ_CPU0 | 187 | config GENERIC_CPUFREQ_CPU0 |
188 | bool "Generic CPU0 cpufreq driver" | 188 | tristate "Generic CPU0 cpufreq driver" |
189 | depends on HAVE_CLK && REGULATOR && PM_OPP && OF | 189 | depends on HAVE_CLK && REGULATOR && PM_OPP && OF |
190 | select CPU_FREQ_TABLE | 190 | select CPU_FREQ_TABLE |
191 | help | 191 | help |
diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm index a0b3661d90b0..7f333af1c059 100644 --- a/drivers/cpufreq/Kconfig.arm +++ b/drivers/cpufreq/Kconfig.arm | |||
@@ -77,9 +77,39 @@ config ARM_EXYNOS5250_CPUFREQ | |||
77 | This adds the CPUFreq driver for Samsung EXYNOS5250 | 77 | This adds the CPUFreq driver for Samsung EXYNOS5250 |
78 | SoC. | 78 | SoC. |
79 | 79 | ||
80 | config ARM_KIRKWOOD_CPUFREQ | ||
81 | def_bool ARCH_KIRKWOOD && OF | ||
82 | help | ||
83 | This adds the CPUFreq driver for Marvell Kirkwood | ||
84 | SoCs. | ||
85 | |||
86 | config ARM_IMX6Q_CPUFREQ | ||
87 | tristate "Freescale i.MX6Q cpufreq support" | ||
88 | depends on SOC_IMX6Q | ||
89 | depends on REGULATOR_ANATOP | ||
90 | help | ||
91 | This adds cpufreq driver support for Freescale i.MX6Q SOC. | ||
92 | |||
93 | If in doubt, say N. | ||
94 | |||
80 | config ARM_SPEAR_CPUFREQ | 95 | config ARM_SPEAR_CPUFREQ |
81 | bool "SPEAr CPUFreq support" | 96 | bool "SPEAr CPUFreq support" |
82 | depends on PLAT_SPEAR | 97 | depends on PLAT_SPEAR |
83 | default y | 98 | default y |
84 | help | 99 | help |
85 | This adds the CPUFreq driver support for SPEAr SOCs. | 100 | This adds the CPUFreq driver support for SPEAr SOCs. |
101 | |||
102 | config ARM_HIGHBANK_CPUFREQ | ||
103 | tristate "Calxeda Highbank-based" | ||
104 | depends on ARCH_HIGHBANK | ||
105 | select CPU_FREQ_TABLE | ||
106 | select GENERIC_CPUFREQ_CPU0 | ||
107 | select PM_OPP | ||
108 | select REGULATOR | ||
109 | |||
110 | default m | ||
111 | help | ||
112 | This adds the CPUFreq driver for Calxeda Highbank SoC | ||
113 | based boards. | ||
114 | |||
115 | If in doubt, say N. | ||
diff --git a/drivers/cpufreq/Kconfig.x86 b/drivers/cpufreq/Kconfig.x86 index 7227cd734042..6aa7053ce2ef 100644 --- a/drivers/cpufreq/Kconfig.x86 +++ b/drivers/cpufreq/Kconfig.x86 | |||
@@ -2,6 +2,24 @@ | |||
2 | # x86 CPU Frequency scaling drivers | 2 | # x86 CPU Frequency scaling drivers |
3 | # | 3 | # |
4 | 4 | ||
5 | config X86_INTEL_PSTATE | ||
6 | tristate "Intel P state control" | ||
7 | depends on X86 | ||
8 | help | ||
9 | This driver provides a P state for Intel core processors. | ||
10 | The driver implements an internal governor and will become | ||
11 | the scaling driver and governor for Sandy bridge processors. | ||
12 | |||
13 | When this driver is enabled it will become the perferred | ||
14 | scaling driver for Sandy bridge processors. | ||
15 | |||
16 | Note: This driver should be built with the same settings as | ||
17 | the other scaling drivers configured into the system | ||
18 | (module/built-in) in order for the driver to register itself | ||
19 | as the scaling driver on the system. | ||
20 | |||
21 | If in doubt, say N. | ||
22 | |||
5 | config X86_PCC_CPUFREQ | 23 | config X86_PCC_CPUFREQ |
6 | tristate "Processor Clocking Control interface driver" | 24 | tristate "Processor Clocking Control interface driver" |
7 | depends on ACPI && ACPI_PROCESSOR | 25 | depends on ACPI && ACPI_PROCESSOR |
diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile index fadc4d496e2f..5399c45ac311 100644 --- a/drivers/cpufreq/Makefile +++ b/drivers/cpufreq/Makefile | |||
@@ -19,11 +19,12 @@ obj-$(CONFIG_GENERIC_CPUFREQ_CPU0) += cpufreq-cpu0.o | |||
19 | ################################################################################## | 19 | ################################################################################## |
20 | # x86 drivers. | 20 | # x86 drivers. |
21 | # Link order matters. K8 is preferred to ACPI because of firmware bugs in early | 21 | # Link order matters. K8 is preferred to ACPI because of firmware bugs in early |
22 | # K8 systems. ACPI is preferred to all other hardware-specific drivers. | 22 | # K8 systems. This is still the case but acpi-cpufreq errors out so that |
23 | # powernow-k8 can load then. ACPI is preferred to all other hardware-specific drivers. | ||
23 | # speedstep-* is preferred over p4-clockmod. | 24 | # speedstep-* is preferred over p4-clockmod. |
24 | 25 | ||
25 | obj-$(CONFIG_X86_POWERNOW_K8) += powernow-k8.o | ||
26 | obj-$(CONFIG_X86_ACPI_CPUFREQ) += acpi-cpufreq.o mperf.o | 26 | obj-$(CONFIG_X86_ACPI_CPUFREQ) += acpi-cpufreq.o mperf.o |
27 | obj-$(CONFIG_X86_POWERNOW_K8) += powernow-k8.o | ||
27 | obj-$(CONFIG_X86_PCC_CPUFREQ) += pcc-cpufreq.o | 28 | obj-$(CONFIG_X86_PCC_CPUFREQ) += pcc-cpufreq.o |
28 | obj-$(CONFIG_X86_POWERNOW_K6) += powernow-k6.o | 29 | obj-$(CONFIG_X86_POWERNOW_K6) += powernow-k6.o |
29 | obj-$(CONFIG_X86_POWERNOW_K7) += powernow-k7.o | 30 | obj-$(CONFIG_X86_POWERNOW_K7) += powernow-k7.o |
@@ -39,6 +40,7 @@ obj-$(CONFIG_X86_SPEEDSTEP_SMI) += speedstep-smi.o | |||
39 | obj-$(CONFIG_X86_SPEEDSTEP_CENTRINO) += speedstep-centrino.o | 40 | obj-$(CONFIG_X86_SPEEDSTEP_CENTRINO) += speedstep-centrino.o |
40 | obj-$(CONFIG_X86_P4_CLOCKMOD) += p4-clockmod.o | 41 | obj-$(CONFIG_X86_P4_CLOCKMOD) += p4-clockmod.o |
41 | obj-$(CONFIG_X86_CPUFREQ_NFORCE2) += cpufreq-nforce2.o | 42 | obj-$(CONFIG_X86_CPUFREQ_NFORCE2) += cpufreq-nforce2.o |
43 | obj-$(CONFIG_X86_INTEL_PSTATE) += intel_pstate.o | ||
42 | 44 | ||
43 | ################################################################################## | 45 | ################################################################################## |
44 | # ARM SoC drivers | 46 | # ARM SoC drivers |
@@ -50,8 +52,11 @@ obj-$(CONFIG_ARM_EXYNOS_CPUFREQ) += exynos-cpufreq.o | |||
50 | obj-$(CONFIG_ARM_EXYNOS4210_CPUFREQ) += exynos4210-cpufreq.o | 52 | obj-$(CONFIG_ARM_EXYNOS4210_CPUFREQ) += exynos4210-cpufreq.o |
51 | obj-$(CONFIG_ARM_EXYNOS4X12_CPUFREQ) += exynos4x12-cpufreq.o | 53 | obj-$(CONFIG_ARM_EXYNOS4X12_CPUFREQ) += exynos4x12-cpufreq.o |
52 | obj-$(CONFIG_ARM_EXYNOS5250_CPUFREQ) += exynos5250-cpufreq.o | 54 | obj-$(CONFIG_ARM_EXYNOS5250_CPUFREQ) += exynos5250-cpufreq.o |
53 | obj-$(CONFIG_ARM_OMAP2PLUS_CPUFREQ) += omap-cpufreq.o | 55 | obj-$(CONFIG_ARM_KIRKWOOD_CPUFREQ) += kirkwood-cpufreq.o |
56 | obj-$(CONFIG_ARM_OMAP2PLUS_CPUFREQ) += omap-cpufreq.o | ||
54 | obj-$(CONFIG_ARM_SPEAR_CPUFREQ) += spear-cpufreq.o | 57 | obj-$(CONFIG_ARM_SPEAR_CPUFREQ) += spear-cpufreq.o |
58 | obj-$(CONFIG_ARM_HIGHBANK_CPUFREQ) += highbank-cpufreq.o | ||
59 | obj-$(CONFIG_ARM_IMX6Q_CPUFREQ) += imx6q-cpufreq.o | ||
55 | 60 | ||
56 | ################################################################################## | 61 | ################################################################################## |
57 | # PowerPC platform drivers | 62 | # PowerPC platform drivers |
diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c index 7b0d49d78c61..937bc286591f 100644 --- a/drivers/cpufreq/acpi-cpufreq.c +++ b/drivers/cpufreq/acpi-cpufreq.c | |||
@@ -734,7 +734,7 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy) | |||
734 | 734 | ||
735 | #ifdef CONFIG_SMP | 735 | #ifdef CONFIG_SMP |
736 | dmi_check_system(sw_any_bug_dmi_table); | 736 | dmi_check_system(sw_any_bug_dmi_table); |
737 | if (bios_with_sw_any_bug && cpumask_weight(policy->cpus) == 1) { | 737 | if (bios_with_sw_any_bug && !policy_is_shared(policy)) { |
738 | policy->shared_type = CPUFREQ_SHARED_TYPE_ALL; | 738 | policy->shared_type = CPUFREQ_SHARED_TYPE_ALL; |
739 | cpumask_copy(policy->cpus, cpu_core_mask(cpu)); | 739 | cpumask_copy(policy->cpus, cpu_core_mask(cpu)); |
740 | } | 740 | } |
@@ -762,6 +762,12 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy) | |||
762 | 762 | ||
763 | switch (perf->control_register.space_id) { | 763 | switch (perf->control_register.space_id) { |
764 | case ACPI_ADR_SPACE_SYSTEM_IO: | 764 | case ACPI_ADR_SPACE_SYSTEM_IO: |
765 | if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD && | ||
766 | boot_cpu_data.x86 == 0xf) { | ||
767 | pr_debug("AMD K8 systems must use native drivers.\n"); | ||
768 | result = -ENODEV; | ||
769 | goto err_unreg; | ||
770 | } | ||
765 | pr_debug("SYSTEM IO addr space\n"); | 771 | pr_debug("SYSTEM IO addr space\n"); |
766 | data->cpu_feature = SYSTEM_IO_CAPABLE; | 772 | data->cpu_feature = SYSTEM_IO_CAPABLE; |
767 | break; | 773 | break; |
diff --git a/drivers/cpufreq/cpufreq-cpu0.c b/drivers/cpufreq/cpufreq-cpu0.c index debc5a7c8db6..4e5b7fb8927c 100644 --- a/drivers/cpufreq/cpufreq-cpu0.c +++ b/drivers/cpufreq/cpufreq-cpu0.c | |||
@@ -12,12 +12,12 @@ | |||
12 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | 12 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
13 | 13 | ||
14 | #include <linux/clk.h> | 14 | #include <linux/clk.h> |
15 | #include <linux/cpu.h> | ||
16 | #include <linux/cpufreq.h> | 15 | #include <linux/cpufreq.h> |
17 | #include <linux/err.h> | 16 | #include <linux/err.h> |
18 | #include <linux/module.h> | 17 | #include <linux/module.h> |
19 | #include <linux/of.h> | 18 | #include <linux/of.h> |
20 | #include <linux/opp.h> | 19 | #include <linux/opp.h> |
20 | #include <linux/platform_device.h> | ||
21 | #include <linux/regulator/consumer.h> | 21 | #include <linux/regulator/consumer.h> |
22 | #include <linux/slab.h> | 22 | #include <linux/slab.h> |
23 | 23 | ||
@@ -146,7 +146,6 @@ static int cpu0_cpufreq_init(struct cpufreq_policy *policy) | |||
146 | * share the clock and voltage and clock. Use cpufreq affected_cpus | 146 | * share the clock and voltage and clock. Use cpufreq affected_cpus |
147 | * interface to have all CPUs scaled together. | 147 | * interface to have all CPUs scaled together. |
148 | */ | 148 | */ |
149 | policy->shared_type = CPUFREQ_SHARED_TYPE_ANY; | ||
150 | cpumask_setall(policy->cpus); | 149 | cpumask_setall(policy->cpus); |
151 | 150 | ||
152 | cpufreq_frequency_table_get_attr(freq_table, policy->cpu); | 151 | cpufreq_frequency_table_get_attr(freq_table, policy->cpu); |
@@ -177,34 +176,32 @@ static struct cpufreq_driver cpu0_cpufreq_driver = { | |||
177 | .attr = cpu0_cpufreq_attr, | 176 | .attr = cpu0_cpufreq_attr, |
178 | }; | 177 | }; |
179 | 178 | ||
180 | static int cpu0_cpufreq_driver_init(void) | 179 | static int cpu0_cpufreq_probe(struct platform_device *pdev) |
181 | { | 180 | { |
182 | struct device_node *np; | 181 | struct device_node *np; |
183 | int ret; | 182 | int ret; |
184 | 183 | ||
185 | np = of_find_node_by_path("/cpus/cpu@0"); | 184 | for_each_child_of_node(of_find_node_by_path("/cpus"), np) { |
185 | if (of_get_property(np, "operating-points", NULL)) | ||
186 | break; | ||
187 | } | ||
188 | |||
186 | if (!np) { | 189 | if (!np) { |
187 | pr_err("failed to find cpu0 node\n"); | 190 | pr_err("failed to find cpu0 node\n"); |
188 | return -ENOENT; | 191 | return -ENOENT; |
189 | } | 192 | } |
190 | 193 | ||
191 | cpu_dev = get_cpu_device(0); | 194 | cpu_dev = &pdev->dev; |
192 | if (!cpu_dev) { | ||
193 | pr_err("failed to get cpu0 device\n"); | ||
194 | ret = -ENODEV; | ||
195 | goto out_put_node; | ||
196 | } | ||
197 | |||
198 | cpu_dev->of_node = np; | 195 | cpu_dev->of_node = np; |
199 | 196 | ||
200 | cpu_clk = clk_get(cpu_dev, NULL); | 197 | cpu_clk = devm_clk_get(cpu_dev, NULL); |
201 | if (IS_ERR(cpu_clk)) { | 198 | if (IS_ERR(cpu_clk)) { |
202 | ret = PTR_ERR(cpu_clk); | 199 | ret = PTR_ERR(cpu_clk); |
203 | pr_err("failed to get cpu0 clock: %d\n", ret); | 200 | pr_err("failed to get cpu0 clock: %d\n", ret); |
204 | goto out_put_node; | 201 | goto out_put_node; |
205 | } | 202 | } |
206 | 203 | ||
207 | cpu_reg = regulator_get(cpu_dev, "cpu0"); | 204 | cpu_reg = devm_regulator_get(cpu_dev, "cpu0"); |
208 | if (IS_ERR(cpu_reg)) { | 205 | if (IS_ERR(cpu_reg)) { |
209 | pr_warn("failed to get cpu0 regulator\n"); | 206 | pr_warn("failed to get cpu0 regulator\n"); |
210 | cpu_reg = NULL; | 207 | cpu_reg = NULL; |
@@ -267,7 +264,24 @@ out_put_node: | |||
267 | of_node_put(np); | 264 | of_node_put(np); |
268 | return ret; | 265 | return ret; |
269 | } | 266 | } |
270 | late_initcall(cpu0_cpufreq_driver_init); | 267 | |
268 | static int cpu0_cpufreq_remove(struct platform_device *pdev) | ||
269 | { | ||
270 | cpufreq_unregister_driver(&cpu0_cpufreq_driver); | ||
271 | opp_free_cpufreq_table(cpu_dev, &freq_table); | ||
272 | |||
273 | return 0; | ||
274 | } | ||
275 | |||
276 | static struct platform_driver cpu0_cpufreq_platdrv = { | ||
277 | .driver = { | ||
278 | .name = "cpufreq-cpu0", | ||
279 | .owner = THIS_MODULE, | ||
280 | }, | ||
281 | .probe = cpu0_cpufreq_probe, | ||
282 | .remove = cpu0_cpufreq_remove, | ||
283 | }; | ||
284 | module_platform_driver(cpu0_cpufreq_platdrv); | ||
271 | 285 | ||
272 | MODULE_AUTHOR("Shawn Guo <shawn.guo@linaro.org>"); | 286 | MODULE_AUTHOR("Shawn Guo <shawn.guo@linaro.org>"); |
273 | MODULE_DESCRIPTION("Generic CPU0 cpufreq driver"); | 287 | MODULE_DESCRIPTION("Generic CPU0 cpufreq driver"); |
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index 99faadf454ec..b02824d092e7 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c | |||
@@ -59,8 +59,6 @@ static DEFINE_SPINLOCK(cpufreq_driver_lock); | |||
59 | * mode before doing so. | 59 | * mode before doing so. |
60 | * | 60 | * |
61 | * Additional rules: | 61 | * Additional rules: |
62 | * - All holders of the lock should check to make sure that the CPU they | ||
63 | * are concerned with are online after they get the lock. | ||
64 | * - Governor routines that can be called in cpufreq hotplug path should not | 62 | * - Governor routines that can be called in cpufreq hotplug path should not |
65 | * take this sem as top level hotplug notifier handler takes this. | 63 | * take this sem as top level hotplug notifier handler takes this. |
66 | * - Lock should not be held across | 64 | * - Lock should not be held across |
@@ -70,38 +68,28 @@ static DEFINE_PER_CPU(int, cpufreq_policy_cpu); | |||
70 | static DEFINE_PER_CPU(struct rw_semaphore, cpu_policy_rwsem); | 68 | static DEFINE_PER_CPU(struct rw_semaphore, cpu_policy_rwsem); |
71 | 69 | ||
72 | #define lock_policy_rwsem(mode, cpu) \ | 70 | #define lock_policy_rwsem(mode, cpu) \ |
73 | static int lock_policy_rwsem_##mode \ | 71 | static int lock_policy_rwsem_##mode(int cpu) \ |
74 | (int cpu) \ | ||
75 | { \ | 72 | { \ |
76 | int policy_cpu = per_cpu(cpufreq_policy_cpu, cpu); \ | 73 | int policy_cpu = per_cpu(cpufreq_policy_cpu, cpu); \ |
77 | BUG_ON(policy_cpu == -1); \ | 74 | BUG_ON(policy_cpu == -1); \ |
78 | down_##mode(&per_cpu(cpu_policy_rwsem, policy_cpu)); \ | 75 | down_##mode(&per_cpu(cpu_policy_rwsem, policy_cpu)); \ |
79 | if (unlikely(!cpu_online(cpu))) { \ | ||
80 | up_##mode(&per_cpu(cpu_policy_rwsem, policy_cpu)); \ | ||
81 | return -1; \ | ||
82 | } \ | ||
83 | \ | 76 | \ |
84 | return 0; \ | 77 | return 0; \ |
85 | } | 78 | } |
86 | 79 | ||
87 | lock_policy_rwsem(read, cpu); | 80 | lock_policy_rwsem(read, cpu); |
88 | |||
89 | lock_policy_rwsem(write, cpu); | 81 | lock_policy_rwsem(write, cpu); |
90 | 82 | ||
91 | static void unlock_policy_rwsem_read(int cpu) | 83 | #define unlock_policy_rwsem(mode, cpu) \ |
92 | { | 84 | static void unlock_policy_rwsem_##mode(int cpu) \ |
93 | int policy_cpu = per_cpu(cpufreq_policy_cpu, cpu); | 85 | { \ |
94 | BUG_ON(policy_cpu == -1); | 86 | int policy_cpu = per_cpu(cpufreq_policy_cpu, cpu); \ |
95 | up_read(&per_cpu(cpu_policy_rwsem, policy_cpu)); | 87 | BUG_ON(policy_cpu == -1); \ |
96 | } | 88 | up_##mode(&per_cpu(cpu_policy_rwsem, policy_cpu)); \ |
97 | |||
98 | static void unlock_policy_rwsem_write(int cpu) | ||
99 | { | ||
100 | int policy_cpu = per_cpu(cpufreq_policy_cpu, cpu); | ||
101 | BUG_ON(policy_cpu == -1); | ||
102 | up_write(&per_cpu(cpu_policy_rwsem, policy_cpu)); | ||
103 | } | 89 | } |
104 | 90 | ||
91 | unlock_policy_rwsem(read, cpu); | ||
92 | unlock_policy_rwsem(write, cpu); | ||
105 | 93 | ||
106 | /* internal prototypes */ | 94 | /* internal prototypes */ |
107 | static int __cpufreq_governor(struct cpufreq_policy *policy, | 95 | static int __cpufreq_governor(struct cpufreq_policy *policy, |
@@ -180,6 +168,9 @@ err_out: | |||
180 | 168 | ||
181 | struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu) | 169 | struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu) |
182 | { | 170 | { |
171 | if (cpufreq_disabled()) | ||
172 | return NULL; | ||
173 | |||
183 | return __cpufreq_cpu_get(cpu, false); | 174 | return __cpufreq_cpu_get(cpu, false); |
184 | } | 175 | } |
185 | EXPORT_SYMBOL_GPL(cpufreq_cpu_get); | 176 | EXPORT_SYMBOL_GPL(cpufreq_cpu_get); |
@@ -198,6 +189,9 @@ static void __cpufreq_cpu_put(struct cpufreq_policy *data, bool sysfs) | |||
198 | 189 | ||
199 | void cpufreq_cpu_put(struct cpufreq_policy *data) | 190 | void cpufreq_cpu_put(struct cpufreq_policy *data) |
200 | { | 191 | { |
192 | if (cpufreq_disabled()) | ||
193 | return; | ||
194 | |||
201 | __cpufreq_cpu_put(data, false); | 195 | __cpufreq_cpu_put(data, false); |
202 | } | 196 | } |
203 | EXPORT_SYMBOL_GPL(cpufreq_cpu_put); | 197 | EXPORT_SYMBOL_GPL(cpufreq_cpu_put); |
@@ -261,14 +255,21 @@ static inline void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci) | |||
261 | void cpufreq_notify_transition(struct cpufreq_freqs *freqs, unsigned int state) | 255 | void cpufreq_notify_transition(struct cpufreq_freqs *freqs, unsigned int state) |
262 | { | 256 | { |
263 | struct cpufreq_policy *policy; | 257 | struct cpufreq_policy *policy; |
258 | unsigned long flags; | ||
264 | 259 | ||
265 | BUG_ON(irqs_disabled()); | 260 | BUG_ON(irqs_disabled()); |
266 | 261 | ||
262 | if (cpufreq_disabled()) | ||
263 | return; | ||
264 | |||
267 | freqs->flags = cpufreq_driver->flags; | 265 | freqs->flags = cpufreq_driver->flags; |
268 | pr_debug("notification %u of frequency transition to %u kHz\n", | 266 | pr_debug("notification %u of frequency transition to %u kHz\n", |
269 | state, freqs->new); | 267 | state, freqs->new); |
270 | 268 | ||
269 | spin_lock_irqsave(&cpufreq_driver_lock, flags); | ||
271 | policy = per_cpu(cpufreq_cpu_data, freqs->cpu); | 270 | policy = per_cpu(cpufreq_cpu_data, freqs->cpu); |
271 | spin_unlock_irqrestore(&cpufreq_driver_lock, flags); | ||
272 | |||
272 | switch (state) { | 273 | switch (state) { |
273 | 274 | ||
274 | case CPUFREQ_PRECHANGE: | 275 | case CPUFREQ_PRECHANGE: |
@@ -542,8 +543,6 @@ static ssize_t show_cpus(const struct cpumask *mask, char *buf) | |||
542 | */ | 543 | */ |
543 | static ssize_t show_related_cpus(struct cpufreq_policy *policy, char *buf) | 544 | static ssize_t show_related_cpus(struct cpufreq_policy *policy, char *buf) |
544 | { | 545 | { |
545 | if (cpumask_empty(policy->related_cpus)) | ||
546 | return show_cpus(policy->cpus, buf); | ||
547 | return show_cpus(policy->related_cpus, buf); | 546 | return show_cpus(policy->related_cpus, buf); |
548 | } | 547 | } |
549 | 548 | ||
@@ -699,87 +698,6 @@ static struct kobj_type ktype_cpufreq = { | |||
699 | .release = cpufreq_sysfs_release, | 698 | .release = cpufreq_sysfs_release, |
700 | }; | 699 | }; |
701 | 700 | ||
702 | /* | ||
703 | * Returns: | ||
704 | * Negative: Failure | ||
705 | * 0: Success | ||
706 | * Positive: When we have a managed CPU and the sysfs got symlinked | ||
707 | */ | ||
708 | static int cpufreq_add_dev_policy(unsigned int cpu, | ||
709 | struct cpufreq_policy *policy, | ||
710 | struct device *dev) | ||
711 | { | ||
712 | int ret = 0; | ||
713 | #ifdef CONFIG_SMP | ||
714 | unsigned long flags; | ||
715 | unsigned int j; | ||
716 | #ifdef CONFIG_HOTPLUG_CPU | ||
717 | struct cpufreq_governor *gov; | ||
718 | |||
719 | gov = __find_governor(per_cpu(cpufreq_cpu_governor, cpu)); | ||
720 | if (gov) { | ||
721 | policy->governor = gov; | ||
722 | pr_debug("Restoring governor %s for cpu %d\n", | ||
723 | policy->governor->name, cpu); | ||
724 | } | ||
725 | #endif | ||
726 | |||
727 | for_each_cpu(j, policy->cpus) { | ||
728 | struct cpufreq_policy *managed_policy; | ||
729 | |||
730 | if (cpu == j) | ||
731 | continue; | ||
732 | |||
733 | /* Check for existing affected CPUs. | ||
734 | * They may not be aware of it due to CPU Hotplug. | ||
735 | * cpufreq_cpu_put is called when the device is removed | ||
736 | * in __cpufreq_remove_dev() | ||
737 | */ | ||
738 | managed_policy = cpufreq_cpu_get(j); | ||
739 | if (unlikely(managed_policy)) { | ||
740 | |||
741 | /* Set proper policy_cpu */ | ||
742 | unlock_policy_rwsem_write(cpu); | ||
743 | per_cpu(cpufreq_policy_cpu, cpu) = managed_policy->cpu; | ||
744 | |||
745 | if (lock_policy_rwsem_write(cpu) < 0) { | ||
746 | /* Should not go through policy unlock path */ | ||
747 | if (cpufreq_driver->exit) | ||
748 | cpufreq_driver->exit(policy); | ||
749 | cpufreq_cpu_put(managed_policy); | ||
750 | return -EBUSY; | ||
751 | } | ||
752 | |||
753 | spin_lock_irqsave(&cpufreq_driver_lock, flags); | ||
754 | cpumask_copy(managed_policy->cpus, policy->cpus); | ||
755 | per_cpu(cpufreq_cpu_data, cpu) = managed_policy; | ||
756 | spin_unlock_irqrestore(&cpufreq_driver_lock, flags); | ||
757 | |||
758 | pr_debug("CPU already managed, adding link\n"); | ||
759 | ret = sysfs_create_link(&dev->kobj, | ||
760 | &managed_policy->kobj, | ||
761 | "cpufreq"); | ||
762 | if (ret) | ||
763 | cpufreq_cpu_put(managed_policy); | ||
764 | /* | ||
765 | * Success. We only needed to be added to the mask. | ||
766 | * Call driver->exit() because only the cpu parent of | ||
767 | * the kobj needed to call init(). | ||
768 | */ | ||
769 | if (cpufreq_driver->exit) | ||
770 | cpufreq_driver->exit(policy); | ||
771 | |||
772 | if (!ret) | ||
773 | return 1; | ||
774 | else | ||
775 | return ret; | ||
776 | } | ||
777 | } | ||
778 | #endif | ||
779 | return ret; | ||
780 | } | ||
781 | |||
782 | |||
783 | /* symlink affected CPUs */ | 701 | /* symlink affected CPUs */ |
784 | static int cpufreq_add_dev_symlink(unsigned int cpu, | 702 | static int cpufreq_add_dev_symlink(unsigned int cpu, |
785 | struct cpufreq_policy *policy) | 703 | struct cpufreq_policy *policy) |
@@ -793,8 +711,6 @@ static int cpufreq_add_dev_symlink(unsigned int cpu, | |||
793 | 711 | ||
794 | if (j == cpu) | 712 | if (j == cpu) |
795 | continue; | 713 | continue; |
796 | if (!cpu_online(j)) | ||
797 | continue; | ||
798 | 714 | ||
799 | pr_debug("CPU %u already managed, adding link\n", j); | 715 | pr_debug("CPU %u already managed, adding link\n", j); |
800 | managed_policy = cpufreq_cpu_get(cpu); | 716 | managed_policy = cpufreq_cpu_get(cpu); |
@@ -851,8 +767,6 @@ static int cpufreq_add_dev_interface(unsigned int cpu, | |||
851 | 767 | ||
852 | spin_lock_irqsave(&cpufreq_driver_lock, flags); | 768 | spin_lock_irqsave(&cpufreq_driver_lock, flags); |
853 | for_each_cpu(j, policy->cpus) { | 769 | for_each_cpu(j, policy->cpus) { |
854 | if (!cpu_online(j)) | ||
855 | continue; | ||
856 | per_cpu(cpufreq_cpu_data, j) = policy; | 770 | per_cpu(cpufreq_cpu_data, j) = policy; |
857 | per_cpu(cpufreq_policy_cpu, j) = policy->cpu; | 771 | per_cpu(cpufreq_policy_cpu, j) = policy->cpu; |
858 | } | 772 | } |
@@ -884,6 +798,42 @@ err_out_kobj_put: | |||
884 | return ret; | 798 | return ret; |
885 | } | 799 | } |
886 | 800 | ||
801 | #ifdef CONFIG_HOTPLUG_CPU | ||
802 | static int cpufreq_add_policy_cpu(unsigned int cpu, unsigned int sibling, | ||
803 | struct device *dev) | ||
804 | { | ||
805 | struct cpufreq_policy *policy; | ||
806 | int ret = 0; | ||
807 | unsigned long flags; | ||
808 | |||
809 | policy = cpufreq_cpu_get(sibling); | ||
810 | WARN_ON(!policy); | ||
811 | |||
812 | __cpufreq_governor(policy, CPUFREQ_GOV_STOP); | ||
813 | |||
814 | lock_policy_rwsem_write(sibling); | ||
815 | |||
816 | spin_lock_irqsave(&cpufreq_driver_lock, flags); | ||
817 | |||
818 | cpumask_set_cpu(cpu, policy->cpus); | ||
819 | per_cpu(cpufreq_policy_cpu, cpu) = policy->cpu; | ||
820 | per_cpu(cpufreq_cpu_data, cpu) = policy; | ||
821 | spin_unlock_irqrestore(&cpufreq_driver_lock, flags); | ||
822 | |||
823 | unlock_policy_rwsem_write(sibling); | ||
824 | |||
825 | __cpufreq_governor(policy, CPUFREQ_GOV_START); | ||
826 | __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS); | ||
827 | |||
828 | ret = sysfs_create_link(&dev->kobj, &policy->kobj, "cpufreq"); | ||
829 | if (ret) { | ||
830 | cpufreq_cpu_put(policy); | ||
831 | return ret; | ||
832 | } | ||
833 | |||
834 | return 0; | ||
835 | } | ||
836 | #endif | ||
887 | 837 | ||
888 | /** | 838 | /** |
889 | * cpufreq_add_dev - add a CPU device | 839 | * cpufreq_add_dev - add a CPU device |
@@ -896,12 +846,12 @@ err_out_kobj_put: | |||
896 | */ | 846 | */ |
897 | static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif) | 847 | static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif) |
898 | { | 848 | { |
899 | unsigned int cpu = dev->id; | 849 | unsigned int j, cpu = dev->id; |
900 | int ret = 0, found = 0; | 850 | int ret = -ENOMEM; |
901 | struct cpufreq_policy *policy; | 851 | struct cpufreq_policy *policy; |
902 | unsigned long flags; | 852 | unsigned long flags; |
903 | unsigned int j; | ||
904 | #ifdef CONFIG_HOTPLUG_CPU | 853 | #ifdef CONFIG_HOTPLUG_CPU |
854 | struct cpufreq_governor *gov; | ||
905 | int sibling; | 855 | int sibling; |
906 | #endif | 856 | #endif |
907 | 857 | ||
@@ -918,6 +868,19 @@ static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif) | |||
918 | cpufreq_cpu_put(policy); | 868 | cpufreq_cpu_put(policy); |
919 | return 0; | 869 | return 0; |
920 | } | 870 | } |
871 | |||
872 | #ifdef CONFIG_HOTPLUG_CPU | ||
873 | /* Check if this cpu was hot-unplugged earlier and has siblings */ | ||
874 | spin_lock_irqsave(&cpufreq_driver_lock, flags); | ||
875 | for_each_online_cpu(sibling) { | ||
876 | struct cpufreq_policy *cp = per_cpu(cpufreq_cpu_data, sibling); | ||
877 | if (cp && cpumask_test_cpu(cpu, cp->related_cpus)) { | ||
878 | spin_unlock_irqrestore(&cpufreq_driver_lock, flags); | ||
879 | return cpufreq_add_policy_cpu(cpu, sibling, dev); | ||
880 | } | ||
881 | } | ||
882 | spin_unlock_irqrestore(&cpufreq_driver_lock, flags); | ||
883 | #endif | ||
921 | #endif | 884 | #endif |
922 | 885 | ||
923 | if (!try_module_get(cpufreq_driver->owner)) { | 886 | if (!try_module_get(cpufreq_driver->owner)) { |
@@ -925,7 +888,6 @@ static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif) | |||
925 | goto module_out; | 888 | goto module_out; |
926 | } | 889 | } |
927 | 890 | ||
928 | ret = -ENOMEM; | ||
929 | policy = kzalloc(sizeof(struct cpufreq_policy), GFP_KERNEL); | 891 | policy = kzalloc(sizeof(struct cpufreq_policy), GFP_KERNEL); |
930 | if (!policy) | 892 | if (!policy) |
931 | goto nomem_out; | 893 | goto nomem_out; |
@@ -937,66 +899,58 @@ static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif) | |||
937 | goto err_free_cpumask; | 899 | goto err_free_cpumask; |
938 | 900 | ||
939 | policy->cpu = cpu; | 901 | policy->cpu = cpu; |
902 | policy->governor = CPUFREQ_DEFAULT_GOVERNOR; | ||
940 | cpumask_copy(policy->cpus, cpumask_of(cpu)); | 903 | cpumask_copy(policy->cpus, cpumask_of(cpu)); |
941 | 904 | ||
942 | /* Initially set CPU itself as the policy_cpu */ | 905 | /* Initially set CPU itself as the policy_cpu */ |
943 | per_cpu(cpufreq_policy_cpu, cpu) = cpu; | 906 | per_cpu(cpufreq_policy_cpu, cpu) = cpu; |
944 | ret = (lock_policy_rwsem_write(cpu) < 0); | ||
945 | WARN_ON(ret); | ||
946 | 907 | ||
947 | init_completion(&policy->kobj_unregister); | 908 | init_completion(&policy->kobj_unregister); |
948 | INIT_WORK(&policy->update, handle_update); | 909 | INIT_WORK(&policy->update, handle_update); |
949 | 910 | ||
950 | /* Set governor before ->init, so that driver could check it */ | ||
951 | #ifdef CONFIG_HOTPLUG_CPU | ||
952 | for_each_online_cpu(sibling) { | ||
953 | struct cpufreq_policy *cp = per_cpu(cpufreq_cpu_data, sibling); | ||
954 | if (cp && cp->governor && | ||
955 | (cpumask_test_cpu(cpu, cp->related_cpus))) { | ||
956 | policy->governor = cp->governor; | ||
957 | found = 1; | ||
958 | break; | ||
959 | } | ||
960 | } | ||
961 | #endif | ||
962 | if (!found) | ||
963 | policy->governor = CPUFREQ_DEFAULT_GOVERNOR; | ||
964 | /* call driver. From then on the cpufreq must be able | 911 | /* call driver. From then on the cpufreq must be able |
965 | * to accept all calls to ->verify and ->setpolicy for this CPU | 912 | * to accept all calls to ->verify and ->setpolicy for this CPU |
966 | */ | 913 | */ |
967 | ret = cpufreq_driver->init(policy); | 914 | ret = cpufreq_driver->init(policy); |
968 | if (ret) { | 915 | if (ret) { |
969 | pr_debug("initialization failed\n"); | 916 | pr_debug("initialization failed\n"); |
970 | goto err_unlock_policy; | 917 | goto err_set_policy_cpu; |
971 | } | 918 | } |
919 | |||
920 | /* related cpus should atleast have policy->cpus */ | ||
921 | cpumask_or(policy->related_cpus, policy->related_cpus, policy->cpus); | ||
922 | |||
923 | /* | ||
924 | * affected cpus must always be the one, which are online. We aren't | ||
925 | * managing offline cpus here. | ||
926 | */ | ||
927 | cpumask_and(policy->cpus, policy->cpus, cpu_online_mask); | ||
928 | |||
972 | policy->user_policy.min = policy->min; | 929 | policy->user_policy.min = policy->min; |
973 | policy->user_policy.max = policy->max; | 930 | policy->user_policy.max = policy->max; |
974 | 931 | ||
975 | blocking_notifier_call_chain(&cpufreq_policy_notifier_list, | 932 | blocking_notifier_call_chain(&cpufreq_policy_notifier_list, |
976 | CPUFREQ_START, policy); | 933 | CPUFREQ_START, policy); |
977 | 934 | ||
978 | ret = cpufreq_add_dev_policy(cpu, policy, dev); | 935 | #ifdef CONFIG_HOTPLUG_CPU |
979 | if (ret) { | 936 | gov = __find_governor(per_cpu(cpufreq_cpu_governor, cpu)); |
980 | if (ret > 0) | 937 | if (gov) { |
981 | /* This is a managed cpu, symlink created, | 938 | policy->governor = gov; |
982 | exit with 0 */ | 939 | pr_debug("Restoring governor %s for cpu %d\n", |
983 | ret = 0; | 940 | policy->governor->name, cpu); |
984 | goto err_unlock_policy; | ||
985 | } | 941 | } |
942 | #endif | ||
986 | 943 | ||
987 | ret = cpufreq_add_dev_interface(cpu, policy, dev); | 944 | ret = cpufreq_add_dev_interface(cpu, policy, dev); |
988 | if (ret) | 945 | if (ret) |
989 | goto err_out_unregister; | 946 | goto err_out_unregister; |
990 | 947 | ||
991 | unlock_policy_rwsem_write(cpu); | ||
992 | |||
993 | kobject_uevent(&policy->kobj, KOBJ_ADD); | 948 | kobject_uevent(&policy->kobj, KOBJ_ADD); |
994 | module_put(cpufreq_driver->owner); | 949 | module_put(cpufreq_driver->owner); |
995 | pr_debug("initialization complete\n"); | 950 | pr_debug("initialization complete\n"); |
996 | 951 | ||
997 | return 0; | 952 | return 0; |
998 | 953 | ||
999 | |||
1000 | err_out_unregister: | 954 | err_out_unregister: |
1001 | spin_lock_irqsave(&cpufreq_driver_lock, flags); | 955 | spin_lock_irqsave(&cpufreq_driver_lock, flags); |
1002 | for_each_cpu(j, policy->cpus) | 956 | for_each_cpu(j, policy->cpus) |
@@ -1006,8 +960,8 @@ err_out_unregister: | |||
1006 | kobject_put(&policy->kobj); | 960 | kobject_put(&policy->kobj); |
1007 | wait_for_completion(&policy->kobj_unregister); | 961 | wait_for_completion(&policy->kobj_unregister); |
1008 | 962 | ||
1009 | err_unlock_policy: | 963 | err_set_policy_cpu: |
1010 | unlock_policy_rwsem_write(cpu); | 964 | per_cpu(cpufreq_policy_cpu, cpu) = -1; |
1011 | free_cpumask_var(policy->related_cpus); | 965 | free_cpumask_var(policy->related_cpus); |
1012 | err_free_cpumask: | 966 | err_free_cpumask: |
1013 | free_cpumask_var(policy->cpus); | 967 | free_cpumask_var(policy->cpus); |
@@ -1019,6 +973,22 @@ module_out: | |||
1019 | return ret; | 973 | return ret; |
1020 | } | 974 | } |
1021 | 975 | ||
976 | static void update_policy_cpu(struct cpufreq_policy *policy, unsigned int cpu) | ||
977 | { | ||
978 | int j; | ||
979 | |||
980 | policy->last_cpu = policy->cpu; | ||
981 | policy->cpu = cpu; | ||
982 | |||
983 | for_each_cpu(j, policy->cpus) | ||
984 | per_cpu(cpufreq_policy_cpu, j) = cpu; | ||
985 | |||
986 | #ifdef CONFIG_CPU_FREQ_TABLE | ||
987 | cpufreq_frequency_table_update_policy_cpu(policy); | ||
988 | #endif | ||
989 | blocking_notifier_call_chain(&cpufreq_policy_notifier_list, | ||
990 | CPUFREQ_UPDATE_POLICY_CPU, policy); | ||
991 | } | ||
1022 | 992 | ||
1023 | /** | 993 | /** |
1024 | * __cpufreq_remove_dev - remove a CPU device | 994 | * __cpufreq_remove_dev - remove a CPU device |
@@ -1029,129 +999,103 @@ module_out: | |||
1029 | */ | 999 | */ |
1030 | static int __cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif) | 1000 | static int __cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif) |
1031 | { | 1001 | { |
1032 | unsigned int cpu = dev->id; | 1002 | unsigned int cpu = dev->id, ret, cpus; |
1033 | unsigned long flags; | 1003 | unsigned long flags; |
1034 | struct cpufreq_policy *data; | 1004 | struct cpufreq_policy *data; |
1035 | struct kobject *kobj; | 1005 | struct kobject *kobj; |
1036 | struct completion *cmp; | 1006 | struct completion *cmp; |
1037 | #ifdef CONFIG_SMP | ||
1038 | struct device *cpu_dev; | 1007 | struct device *cpu_dev; |
1039 | unsigned int j; | ||
1040 | #endif | ||
1041 | 1008 | ||
1042 | pr_debug("unregistering CPU %u\n", cpu); | 1009 | pr_debug("%s: unregistering CPU %u\n", __func__, cpu); |
1043 | 1010 | ||
1044 | spin_lock_irqsave(&cpufreq_driver_lock, flags); | 1011 | spin_lock_irqsave(&cpufreq_driver_lock, flags); |
1012 | |||
1045 | data = per_cpu(cpufreq_cpu_data, cpu); | 1013 | data = per_cpu(cpufreq_cpu_data, cpu); |
1014 | per_cpu(cpufreq_cpu_data, cpu) = NULL; | ||
1015 | |||
1016 | spin_unlock_irqrestore(&cpufreq_driver_lock, flags); | ||
1046 | 1017 | ||
1047 | if (!data) { | 1018 | if (!data) { |
1048 | spin_unlock_irqrestore(&cpufreq_driver_lock, flags); | 1019 | pr_debug("%s: No cpu_data found\n", __func__); |
1049 | unlock_policy_rwsem_write(cpu); | ||
1050 | return -EINVAL; | 1020 | return -EINVAL; |
1051 | } | 1021 | } |
1052 | per_cpu(cpufreq_cpu_data, cpu) = NULL; | ||
1053 | 1022 | ||
1023 | if (cpufreq_driver->target) | ||
1024 | __cpufreq_governor(data, CPUFREQ_GOV_STOP); | ||
1054 | 1025 | ||
1055 | #ifdef CONFIG_SMP | 1026 | #ifdef CONFIG_HOTPLUG_CPU |
1056 | /* if this isn't the CPU which is the parent of the kobj, we | 1027 | if (!cpufreq_driver->setpolicy) |
1057 | * only need to unlink, put and exit | 1028 | strncpy(per_cpu(cpufreq_cpu_governor, cpu), |
1058 | */ | 1029 | data->governor->name, CPUFREQ_NAME_LEN); |
1059 | if (unlikely(cpu != data->cpu)) { | ||
1060 | pr_debug("removing link\n"); | ||
1061 | cpumask_clear_cpu(cpu, data->cpus); | ||
1062 | spin_unlock_irqrestore(&cpufreq_driver_lock, flags); | ||
1063 | kobj = &dev->kobj; | ||
1064 | cpufreq_cpu_put(data); | ||
1065 | unlock_policy_rwsem_write(cpu); | ||
1066 | sysfs_remove_link(kobj, "cpufreq"); | ||
1067 | return 0; | ||
1068 | } | ||
1069 | #endif | 1030 | #endif |
1070 | 1031 | ||
1071 | #ifdef CONFIG_SMP | 1032 | WARN_ON(lock_policy_rwsem_write(cpu)); |
1033 | cpus = cpumask_weight(data->cpus); | ||
1034 | cpumask_clear_cpu(cpu, data->cpus); | ||
1035 | unlock_policy_rwsem_write(cpu); | ||
1072 | 1036 | ||
1073 | #ifdef CONFIG_HOTPLUG_CPU | 1037 | if (cpu != data->cpu) { |
1074 | strncpy(per_cpu(cpufreq_cpu_governor, cpu), data->governor->name, | 1038 | sysfs_remove_link(&dev->kobj, "cpufreq"); |
1075 | CPUFREQ_NAME_LEN); | 1039 | } else if (cpus > 1) { |
1076 | #endif | 1040 | /* first sibling now owns the new sysfs dir */ |
1041 | cpu_dev = get_cpu_device(cpumask_first(data->cpus)); | ||
1042 | sysfs_remove_link(&cpu_dev->kobj, "cpufreq"); | ||
1043 | ret = kobject_move(&data->kobj, &cpu_dev->kobj); | ||
1044 | if (ret) { | ||
1045 | pr_err("%s: Failed to move kobj: %d", __func__, ret); | ||
1077 | 1046 | ||
1078 | /* if we have other CPUs still registered, we need to unlink them, | 1047 | WARN_ON(lock_policy_rwsem_write(cpu)); |
1079 | * or else wait_for_completion below will lock up. Clean the | 1048 | cpumask_set_cpu(cpu, data->cpus); |
1080 | * per_cpu(cpufreq_cpu_data) while holding the lock, and remove | ||
1081 | * the sysfs links afterwards. | ||
1082 | */ | ||
1083 | if (unlikely(cpumask_weight(data->cpus) > 1)) { | ||
1084 | for_each_cpu(j, data->cpus) { | ||
1085 | if (j == cpu) | ||
1086 | continue; | ||
1087 | per_cpu(cpufreq_cpu_data, j) = NULL; | ||
1088 | } | ||
1089 | } | ||
1090 | 1049 | ||
1091 | spin_unlock_irqrestore(&cpufreq_driver_lock, flags); | 1050 | spin_lock_irqsave(&cpufreq_driver_lock, flags); |
1051 | per_cpu(cpufreq_cpu_data, cpu) = data; | ||
1052 | spin_unlock_irqrestore(&cpufreq_driver_lock, flags); | ||
1092 | 1053 | ||
1093 | if (unlikely(cpumask_weight(data->cpus) > 1)) { | ||
1094 | for_each_cpu(j, data->cpus) { | ||
1095 | if (j == cpu) | ||
1096 | continue; | ||
1097 | pr_debug("removing link for cpu %u\n", j); | ||
1098 | #ifdef CONFIG_HOTPLUG_CPU | ||
1099 | strncpy(per_cpu(cpufreq_cpu_governor, j), | ||
1100 | data->governor->name, CPUFREQ_NAME_LEN); | ||
1101 | #endif | ||
1102 | cpu_dev = get_cpu_device(j); | ||
1103 | kobj = &cpu_dev->kobj; | ||
1104 | unlock_policy_rwsem_write(cpu); | 1054 | unlock_policy_rwsem_write(cpu); |
1105 | sysfs_remove_link(kobj, "cpufreq"); | ||
1106 | lock_policy_rwsem_write(cpu); | ||
1107 | cpufreq_cpu_put(data); | ||
1108 | } | ||
1109 | } | ||
1110 | #else | ||
1111 | spin_unlock_irqrestore(&cpufreq_driver_lock, flags); | ||
1112 | #endif | ||
1113 | 1055 | ||
1114 | if (cpufreq_driver->target) | 1056 | ret = sysfs_create_link(&cpu_dev->kobj, &data->kobj, |
1115 | __cpufreq_governor(data, CPUFREQ_GOV_STOP); | 1057 | "cpufreq"); |
1058 | return -EINVAL; | ||
1059 | } | ||
1116 | 1060 | ||
1117 | kobj = &data->kobj; | 1061 | WARN_ON(lock_policy_rwsem_write(cpu)); |
1118 | cmp = &data->kobj_unregister; | 1062 | update_policy_cpu(data, cpu_dev->id); |
1119 | unlock_policy_rwsem_write(cpu); | 1063 | unlock_policy_rwsem_write(cpu); |
1120 | kobject_put(kobj); | 1064 | pr_debug("%s: policy Kobject moved to cpu: %d from: %d\n", |
1065 | __func__, cpu_dev->id, cpu); | ||
1066 | } | ||
1121 | 1067 | ||
1122 | /* we need to make sure that the underlying kobj is actually | 1068 | pr_debug("%s: removing link, cpu: %d\n", __func__, cpu); |
1123 | * not referenced anymore by anybody before we proceed with | 1069 | cpufreq_cpu_put(data); |
1124 | * unloading. | ||
1125 | */ | ||
1126 | pr_debug("waiting for dropping of refcount\n"); | ||
1127 | wait_for_completion(cmp); | ||
1128 | pr_debug("wait complete\n"); | ||
1129 | 1070 | ||
1130 | lock_policy_rwsem_write(cpu); | 1071 | /* If cpu is last user of policy, free policy */ |
1131 | if (cpufreq_driver->exit) | 1072 | if (cpus == 1) { |
1132 | cpufreq_driver->exit(data); | 1073 | lock_policy_rwsem_read(cpu); |
1133 | unlock_policy_rwsem_write(cpu); | 1074 | kobj = &data->kobj; |
1075 | cmp = &data->kobj_unregister; | ||
1076 | unlock_policy_rwsem_read(cpu); | ||
1077 | kobject_put(kobj); | ||
1078 | |||
1079 | /* we need to make sure that the underlying kobj is actually | ||
1080 | * not referenced anymore by anybody before we proceed with | ||
1081 | * unloading. | ||
1082 | */ | ||
1083 | pr_debug("waiting for dropping of refcount\n"); | ||
1084 | wait_for_completion(cmp); | ||
1085 | pr_debug("wait complete\n"); | ||
1134 | 1086 | ||
1135 | #ifdef CONFIG_HOTPLUG_CPU | 1087 | if (cpufreq_driver->exit) |
1136 | /* when the CPU which is the parent of the kobj is hotplugged | 1088 | cpufreq_driver->exit(data); |
1137 | * offline, check for siblings, and create cpufreq sysfs interface | ||
1138 | * and symlinks | ||
1139 | */ | ||
1140 | if (unlikely(cpumask_weight(data->cpus) > 1)) { | ||
1141 | /* first sibling now owns the new sysfs dir */ | ||
1142 | cpumask_clear_cpu(cpu, data->cpus); | ||
1143 | cpufreq_add_dev(get_cpu_device(cpumask_first(data->cpus)), NULL); | ||
1144 | 1089 | ||
1145 | /* finally remove our own symlink */ | 1090 | free_cpumask_var(data->related_cpus); |
1146 | lock_policy_rwsem_write(cpu); | 1091 | free_cpumask_var(data->cpus); |
1147 | __cpufreq_remove_dev(dev, sif); | 1092 | kfree(data); |
1093 | } else if (cpufreq_driver->target) { | ||
1094 | __cpufreq_governor(data, CPUFREQ_GOV_START); | ||
1095 | __cpufreq_governor(data, CPUFREQ_GOV_LIMITS); | ||
1148 | } | 1096 | } |
1149 | #endif | ||
1150 | |||
1151 | free_cpumask_var(data->related_cpus); | ||
1152 | free_cpumask_var(data->cpus); | ||
1153 | kfree(data); | ||
1154 | 1097 | ||
1098 | per_cpu(cpufreq_policy_cpu, cpu) = -1; | ||
1155 | return 0; | 1099 | return 0; |
1156 | } | 1100 | } |
1157 | 1101 | ||
@@ -1164,9 +1108,6 @@ static int cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif) | |||
1164 | if (cpu_is_offline(cpu)) | 1108 | if (cpu_is_offline(cpu)) |
1165 | return 0; | 1109 | return 0; |
1166 | 1110 | ||
1167 | if (unlikely(lock_policy_rwsem_write(cpu))) | ||
1168 | BUG(); | ||
1169 | |||
1170 | retval = __cpufreq_remove_dev(dev, sif); | 1111 | retval = __cpufreq_remove_dev(dev, sif); |
1171 | return retval; | 1112 | return retval; |
1172 | } | 1113 | } |
@@ -1215,9 +1156,13 @@ static void cpufreq_out_of_sync(unsigned int cpu, unsigned int old_freq, | |||
1215 | */ | 1156 | */ |
1216 | unsigned int cpufreq_quick_get(unsigned int cpu) | 1157 | unsigned int cpufreq_quick_get(unsigned int cpu) |
1217 | { | 1158 | { |
1218 | struct cpufreq_policy *policy = cpufreq_cpu_get(cpu); | 1159 | struct cpufreq_policy *policy; |
1219 | unsigned int ret_freq = 0; | 1160 | unsigned int ret_freq = 0; |
1220 | 1161 | ||
1162 | if (cpufreq_driver && cpufreq_driver->setpolicy && cpufreq_driver->get) | ||
1163 | return cpufreq_driver->get(cpu); | ||
1164 | |||
1165 | policy = cpufreq_cpu_get(cpu); | ||
1221 | if (policy) { | 1166 | if (policy) { |
1222 | ret_freq = policy->cur; | 1167 | ret_freq = policy->cur; |
1223 | cpufreq_cpu_put(policy); | 1168 | cpufreq_cpu_put(policy); |
@@ -1385,6 +1330,20 @@ static struct syscore_ops cpufreq_syscore_ops = { | |||
1385 | .resume = cpufreq_bp_resume, | 1330 | .resume = cpufreq_bp_resume, |
1386 | }; | 1331 | }; |
1387 | 1332 | ||
1333 | /** | ||
1334 | * cpufreq_get_current_driver - return current driver's name | ||
1335 | * | ||
1336 | * Return the name string of the currently loaded cpufreq driver | ||
1337 | * or NULL, if none. | ||
1338 | */ | ||
1339 | const char *cpufreq_get_current_driver(void) | ||
1340 | { | ||
1341 | if (cpufreq_driver) | ||
1342 | return cpufreq_driver->name; | ||
1343 | |||
1344 | return NULL; | ||
1345 | } | ||
1346 | EXPORT_SYMBOL_GPL(cpufreq_get_current_driver); | ||
1388 | 1347 | ||
1389 | /********************************************************************* | 1348 | /********************************************************************* |
1390 | * NOTIFIER LISTS INTERFACE * | 1349 | * NOTIFIER LISTS INTERFACE * |
@@ -1407,6 +1366,9 @@ int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list) | |||
1407 | { | 1366 | { |
1408 | int ret; | 1367 | int ret; |
1409 | 1368 | ||
1369 | if (cpufreq_disabled()) | ||
1370 | return -EINVAL; | ||
1371 | |||
1410 | WARN_ON(!init_cpufreq_transition_notifier_list_called); | 1372 | WARN_ON(!init_cpufreq_transition_notifier_list_called); |
1411 | 1373 | ||
1412 | switch (list) { | 1374 | switch (list) { |
@@ -1441,6 +1403,9 @@ int cpufreq_unregister_notifier(struct notifier_block *nb, unsigned int list) | |||
1441 | { | 1403 | { |
1442 | int ret; | 1404 | int ret; |
1443 | 1405 | ||
1406 | if (cpufreq_disabled()) | ||
1407 | return -EINVAL; | ||
1408 | |||
1444 | switch (list) { | 1409 | switch (list) { |
1445 | case CPUFREQ_TRANSITION_NOTIFIER: | 1410 | case CPUFREQ_TRANSITION_NOTIFIER: |
1446 | ret = srcu_notifier_chain_unregister( | 1411 | ret = srcu_notifier_chain_unregister( |
@@ -1486,7 +1451,7 @@ int __cpufreq_driver_target(struct cpufreq_policy *policy, | |||
1486 | if (target_freq == policy->cur) | 1451 | if (target_freq == policy->cur) |
1487 | return 0; | 1452 | return 0; |
1488 | 1453 | ||
1489 | if (cpu_online(policy->cpu) && cpufreq_driver->target) | 1454 | if (cpufreq_driver->target) |
1490 | retval = cpufreq_driver->target(policy, target_freq, relation); | 1455 | retval = cpufreq_driver->target(policy, target_freq, relation); |
1491 | 1456 | ||
1492 | return retval; | 1457 | return retval; |
@@ -1521,7 +1486,10 @@ int __cpufreq_driver_getavg(struct cpufreq_policy *policy, unsigned int cpu) | |||
1521 | { | 1486 | { |
1522 | int ret = 0; | 1487 | int ret = 0; |
1523 | 1488 | ||
1524 | if (!(cpu_online(cpu) && cpufreq_driver->getavg)) | 1489 | if (cpufreq_disabled()) |
1490 | return ret; | ||
1491 | |||
1492 | if (!cpufreq_driver->getavg) | ||
1525 | return 0; | 1493 | return 0; |
1526 | 1494 | ||
1527 | policy = cpufreq_cpu_get(policy->cpu); | 1495 | policy = cpufreq_cpu_get(policy->cpu); |
@@ -1576,6 +1544,11 @@ static int __cpufreq_governor(struct cpufreq_policy *policy, | |||
1576 | policy->cpu, event); | 1544 | policy->cpu, event); |
1577 | ret = policy->governor->governor(policy, event); | 1545 | ret = policy->governor->governor(policy, event); |
1578 | 1546 | ||
1547 | if (event == CPUFREQ_GOV_START) | ||
1548 | policy->governor->initialized++; | ||
1549 | else if (event == CPUFREQ_GOV_STOP) | ||
1550 | policy->governor->initialized--; | ||
1551 | |||
1579 | /* we keep one module reference alive for | 1552 | /* we keep one module reference alive for |
1580 | each CPU governed by this CPU */ | 1553 | each CPU governed by this CPU */ |
1581 | if ((event != CPUFREQ_GOV_START) || ret) | 1554 | if ((event != CPUFREQ_GOV_START) || ret) |
@@ -1599,6 +1572,7 @@ int cpufreq_register_governor(struct cpufreq_governor *governor) | |||
1599 | 1572 | ||
1600 | mutex_lock(&cpufreq_governor_mutex); | 1573 | mutex_lock(&cpufreq_governor_mutex); |
1601 | 1574 | ||
1575 | governor->initialized = 0; | ||
1602 | err = -EBUSY; | 1576 | err = -EBUSY; |
1603 | if (__find_governor(governor->name) == NULL) { | 1577 | if (__find_governor(governor->name) == NULL) { |
1604 | err = 0; | 1578 | err = 0; |
@@ -1796,7 +1770,7 @@ int cpufreq_update_policy(unsigned int cpu) | |||
1796 | pr_debug("Driver did not initialize current freq"); | 1770 | pr_debug("Driver did not initialize current freq"); |
1797 | data->cur = policy.cur; | 1771 | data->cur = policy.cur; |
1798 | } else { | 1772 | } else { |
1799 | if (data->cur != policy.cur) | 1773 | if (data->cur != policy.cur && cpufreq_driver->target) |
1800 | cpufreq_out_of_sync(cpu, data->cur, | 1774 | cpufreq_out_of_sync(cpu, data->cur, |
1801 | policy.cur); | 1775 | policy.cur); |
1802 | } | 1776 | } |
@@ -1828,9 +1802,6 @@ static int __cpuinit cpufreq_cpu_callback(struct notifier_block *nfb, | |||
1828 | break; | 1802 | break; |
1829 | case CPU_DOWN_PREPARE: | 1803 | case CPU_DOWN_PREPARE: |
1830 | case CPU_DOWN_PREPARE_FROZEN: | 1804 | case CPU_DOWN_PREPARE_FROZEN: |
1831 | if (unlikely(lock_policy_rwsem_write(cpu))) | ||
1832 | BUG(); | ||
1833 | |||
1834 | __cpufreq_remove_dev(dev, NULL); | 1805 | __cpufreq_remove_dev(dev, NULL); |
1835 | break; | 1806 | break; |
1836 | case CPU_DOWN_FAILED: | 1807 | case CPU_DOWN_FAILED: |
diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c index 64ef737e7e72..4fd0006b1291 100644 --- a/drivers/cpufreq/cpufreq_conservative.c +++ b/drivers/cpufreq/cpufreq_conservative.c | |||
@@ -25,7 +25,7 @@ | |||
25 | 25 | ||
26 | #include "cpufreq_governor.h" | 26 | #include "cpufreq_governor.h" |
27 | 27 | ||
28 | /* Conservative governor macors */ | 28 | /* Conservative governor macros */ |
29 | #define DEF_FREQUENCY_UP_THRESHOLD (80) | 29 | #define DEF_FREQUENCY_UP_THRESHOLD (80) |
30 | #define DEF_FREQUENCY_DOWN_THRESHOLD (20) | 30 | #define DEF_FREQUENCY_DOWN_THRESHOLD (20) |
31 | #define DEF_SAMPLING_DOWN_FACTOR (1) | 31 | #define DEF_SAMPLING_DOWN_FACTOR (1) |
@@ -113,17 +113,20 @@ static void cs_check_cpu(int cpu, unsigned int load) | |||
113 | 113 | ||
114 | static void cs_dbs_timer(struct work_struct *work) | 114 | static void cs_dbs_timer(struct work_struct *work) |
115 | { | 115 | { |
116 | struct delayed_work *dw = to_delayed_work(work); | ||
116 | struct cs_cpu_dbs_info_s *dbs_info = container_of(work, | 117 | struct cs_cpu_dbs_info_s *dbs_info = container_of(work, |
117 | struct cs_cpu_dbs_info_s, cdbs.work.work); | 118 | struct cs_cpu_dbs_info_s, cdbs.work.work); |
118 | unsigned int cpu = dbs_info->cdbs.cpu; | 119 | unsigned int cpu = dbs_info->cdbs.cur_policy->cpu; |
120 | struct cs_cpu_dbs_info_s *core_dbs_info = &per_cpu(cs_cpu_dbs_info, | ||
121 | cpu); | ||
119 | int delay = delay_for_sampling_rate(cs_tuners.sampling_rate); | 122 | int delay = delay_for_sampling_rate(cs_tuners.sampling_rate); |
120 | 123 | ||
121 | mutex_lock(&dbs_info->cdbs.timer_mutex); | 124 | mutex_lock(&core_dbs_info->cdbs.timer_mutex); |
125 | if (need_load_eval(&core_dbs_info->cdbs, cs_tuners.sampling_rate)) | ||
126 | dbs_check_cpu(&cs_dbs_data, cpu); | ||
122 | 127 | ||
123 | dbs_check_cpu(&cs_dbs_data, cpu); | 128 | schedule_delayed_work_on(smp_processor_id(), dw, delay); |
124 | 129 | mutex_unlock(&core_dbs_info->cdbs.timer_mutex); | |
125 | schedule_delayed_work_on(cpu, &dbs_info->cdbs.work, delay); | ||
126 | mutex_unlock(&dbs_info->cdbs.timer_mutex); | ||
127 | } | 130 | } |
128 | 131 | ||
129 | static int dbs_cpufreq_notifier(struct notifier_block *nb, unsigned long val, | 132 | static int dbs_cpufreq_notifier(struct notifier_block *nb, unsigned long val, |
@@ -141,7 +144,7 @@ static int dbs_cpufreq_notifier(struct notifier_block *nb, unsigned long val, | |||
141 | 144 | ||
142 | /* | 145 | /* |
143 | * we only care if our internally tracked freq moves outside the 'valid' | 146 | * we only care if our internally tracked freq moves outside the 'valid' |
144 | * ranges of freqency available to us otherwise we do not change it | 147 | * ranges of frequency available to us otherwise we do not change it |
145 | */ | 148 | */ |
146 | if (dbs_info->requested_freq > policy->max | 149 | if (dbs_info->requested_freq > policy->max |
147 | || dbs_info->requested_freq < policy->min) | 150 | || dbs_info->requested_freq < policy->min) |
diff --git a/drivers/cpufreq/cpufreq_governor.c b/drivers/cpufreq/cpufreq_governor.c index 6c5f1d383cdc..5a76086ff09b 100644 --- a/drivers/cpufreq/cpufreq_governor.c +++ b/drivers/cpufreq/cpufreq_governor.c | |||
@@ -161,25 +161,48 @@ void dbs_check_cpu(struct dbs_data *dbs_data, int cpu) | |||
161 | } | 161 | } |
162 | EXPORT_SYMBOL_GPL(dbs_check_cpu); | 162 | EXPORT_SYMBOL_GPL(dbs_check_cpu); |
163 | 163 | ||
164 | static inline void dbs_timer_init(struct dbs_data *dbs_data, | 164 | static inline void dbs_timer_init(struct dbs_data *dbs_data, int cpu, |
165 | struct cpu_dbs_common_info *cdbs, unsigned int sampling_rate) | 165 | unsigned int sampling_rate) |
166 | { | 166 | { |
167 | int delay = delay_for_sampling_rate(sampling_rate); | 167 | int delay = delay_for_sampling_rate(sampling_rate); |
168 | struct cpu_dbs_common_info *cdbs = dbs_data->get_cpu_cdbs(cpu); | ||
168 | 169 | ||
169 | INIT_DEFERRABLE_WORK(&cdbs->work, dbs_data->gov_dbs_timer); | 170 | schedule_delayed_work_on(cpu, &cdbs->work, delay); |
170 | schedule_delayed_work_on(cdbs->cpu, &cdbs->work, delay); | ||
171 | } | 171 | } |
172 | 172 | ||
173 | static inline void dbs_timer_exit(struct cpu_dbs_common_info *cdbs) | 173 | static inline void dbs_timer_exit(struct dbs_data *dbs_data, int cpu) |
174 | { | 174 | { |
175 | struct cpu_dbs_common_info *cdbs = dbs_data->get_cpu_cdbs(cpu); | ||
176 | |||
175 | cancel_delayed_work_sync(&cdbs->work); | 177 | cancel_delayed_work_sync(&cdbs->work); |
176 | } | 178 | } |
177 | 179 | ||
180 | /* Will return if we need to evaluate cpu load again or not */ | ||
181 | bool need_load_eval(struct cpu_dbs_common_info *cdbs, | ||
182 | unsigned int sampling_rate) | ||
183 | { | ||
184 | if (policy_is_shared(cdbs->cur_policy)) { | ||
185 | ktime_t time_now = ktime_get(); | ||
186 | s64 delta_us = ktime_us_delta(time_now, cdbs->time_stamp); | ||
187 | |||
188 | /* Do nothing if we recently have sampled */ | ||
189 | if (delta_us < (s64)(sampling_rate / 2)) | ||
190 | return false; | ||
191 | else | ||
192 | cdbs->time_stamp = time_now; | ||
193 | } | ||
194 | |||
195 | return true; | ||
196 | } | ||
197 | EXPORT_SYMBOL_GPL(need_load_eval); | ||
198 | |||
178 | int cpufreq_governor_dbs(struct dbs_data *dbs_data, | 199 | int cpufreq_governor_dbs(struct dbs_data *dbs_data, |
179 | struct cpufreq_policy *policy, unsigned int event) | 200 | struct cpufreq_policy *policy, unsigned int event) |
180 | { | 201 | { |
181 | struct od_cpu_dbs_info_s *od_dbs_info = NULL; | 202 | struct od_cpu_dbs_info_s *od_dbs_info = NULL; |
182 | struct cs_cpu_dbs_info_s *cs_dbs_info = NULL; | 203 | struct cs_cpu_dbs_info_s *cs_dbs_info = NULL; |
204 | struct cs_ops *cs_ops = NULL; | ||
205 | struct od_ops *od_ops = NULL; | ||
183 | struct od_dbs_tuners *od_tuners = dbs_data->tuners; | 206 | struct od_dbs_tuners *od_tuners = dbs_data->tuners; |
184 | struct cs_dbs_tuners *cs_tuners = dbs_data->tuners; | 207 | struct cs_dbs_tuners *cs_tuners = dbs_data->tuners; |
185 | struct cpu_dbs_common_info *cpu_cdbs; | 208 | struct cpu_dbs_common_info *cpu_cdbs; |
@@ -192,109 +215,111 @@ int cpufreq_governor_dbs(struct dbs_data *dbs_data, | |||
192 | cs_dbs_info = dbs_data->get_cpu_dbs_info_s(cpu); | 215 | cs_dbs_info = dbs_data->get_cpu_dbs_info_s(cpu); |
193 | sampling_rate = &cs_tuners->sampling_rate; | 216 | sampling_rate = &cs_tuners->sampling_rate; |
194 | ignore_nice = cs_tuners->ignore_nice; | 217 | ignore_nice = cs_tuners->ignore_nice; |
218 | cs_ops = dbs_data->gov_ops; | ||
195 | } else { | 219 | } else { |
196 | od_dbs_info = dbs_data->get_cpu_dbs_info_s(cpu); | 220 | od_dbs_info = dbs_data->get_cpu_dbs_info_s(cpu); |
197 | sampling_rate = &od_tuners->sampling_rate; | 221 | sampling_rate = &od_tuners->sampling_rate; |
198 | ignore_nice = od_tuners->ignore_nice; | 222 | ignore_nice = od_tuners->ignore_nice; |
223 | od_ops = dbs_data->gov_ops; | ||
199 | } | 224 | } |
200 | 225 | ||
201 | switch (event) { | 226 | switch (event) { |
202 | case CPUFREQ_GOV_START: | 227 | case CPUFREQ_GOV_START: |
203 | if ((!cpu_online(cpu)) || (!policy->cur)) | 228 | if (!policy->cur) |
204 | return -EINVAL; | 229 | return -EINVAL; |
205 | 230 | ||
206 | mutex_lock(&dbs_data->mutex); | 231 | mutex_lock(&dbs_data->mutex); |
207 | 232 | ||
208 | dbs_data->enable++; | ||
209 | cpu_cdbs->cpu = cpu; | ||
210 | for_each_cpu(j, policy->cpus) { | 233 | for_each_cpu(j, policy->cpus) { |
211 | struct cpu_dbs_common_info *j_cdbs; | 234 | struct cpu_dbs_common_info *j_cdbs = |
212 | j_cdbs = dbs_data->get_cpu_cdbs(j); | 235 | dbs_data->get_cpu_cdbs(j); |
213 | 236 | ||
237 | j_cdbs->cpu = j; | ||
214 | j_cdbs->cur_policy = policy; | 238 | j_cdbs->cur_policy = policy; |
215 | j_cdbs->prev_cpu_idle = get_cpu_idle_time(j, | 239 | j_cdbs->prev_cpu_idle = get_cpu_idle_time(j, |
216 | &j_cdbs->prev_cpu_wall); | 240 | &j_cdbs->prev_cpu_wall); |
217 | if (ignore_nice) | 241 | if (ignore_nice) |
218 | j_cdbs->prev_cpu_nice = | 242 | j_cdbs->prev_cpu_nice = |
219 | kcpustat_cpu(j).cpustat[CPUTIME_NICE]; | 243 | kcpustat_cpu(j).cpustat[CPUTIME_NICE]; |
220 | } | ||
221 | 244 | ||
222 | /* | 245 | mutex_init(&j_cdbs->timer_mutex); |
223 | * Start the timerschedule work, when this governor is used for | 246 | INIT_DEFERRABLE_WORK(&j_cdbs->work, |
224 | * first time | 247 | dbs_data->gov_dbs_timer); |
225 | */ | ||
226 | if (dbs_data->enable != 1) | ||
227 | goto second_time; | ||
228 | |||
229 | rc = sysfs_create_group(cpufreq_global_kobject, | ||
230 | dbs_data->attr_group); | ||
231 | if (rc) { | ||
232 | mutex_unlock(&dbs_data->mutex); | ||
233 | return rc; | ||
234 | } | 248 | } |
235 | 249 | ||
236 | /* policy latency is in nS. Convert it to uS first */ | 250 | if (!policy->governor->initialized) { |
237 | latency = policy->cpuinfo.transition_latency / 1000; | 251 | rc = sysfs_create_group(cpufreq_global_kobject, |
238 | if (latency == 0) | 252 | dbs_data->attr_group); |
239 | latency = 1; | 253 | if (rc) { |
254 | mutex_unlock(&dbs_data->mutex); | ||
255 | return rc; | ||
256 | } | ||
257 | } | ||
240 | 258 | ||
241 | /* | 259 | /* |
242 | * conservative does not implement micro like ondemand | 260 | * conservative does not implement micro like ondemand |
243 | * governor, thus we are bound to jiffes/HZ | 261 | * governor, thus we are bound to jiffes/HZ |
244 | */ | 262 | */ |
245 | if (dbs_data->governor == GOV_CONSERVATIVE) { | 263 | if (dbs_data->governor == GOV_CONSERVATIVE) { |
246 | struct cs_ops *ops = dbs_data->gov_ops; | 264 | cs_dbs_info->down_skip = 0; |
265 | cs_dbs_info->enable = 1; | ||
266 | cs_dbs_info->requested_freq = policy->cur; | ||
247 | 267 | ||
248 | cpufreq_register_notifier(ops->notifier_block, | 268 | if (!policy->governor->initialized) { |
249 | CPUFREQ_TRANSITION_NOTIFIER); | 269 | cpufreq_register_notifier(cs_ops->notifier_block, |
270 | CPUFREQ_TRANSITION_NOTIFIER); | ||
250 | 271 | ||
251 | dbs_data->min_sampling_rate = MIN_SAMPLING_RATE_RATIO * | 272 | dbs_data->min_sampling_rate = |
252 | jiffies_to_usecs(10); | 273 | MIN_SAMPLING_RATE_RATIO * |
274 | jiffies_to_usecs(10); | ||
275 | } | ||
253 | } else { | 276 | } else { |
254 | struct od_ops *ops = dbs_data->gov_ops; | 277 | od_dbs_info->rate_mult = 1; |
278 | od_dbs_info->sample_type = OD_NORMAL_SAMPLE; | ||
279 | od_ops->powersave_bias_init_cpu(cpu); | ||
255 | 280 | ||
256 | od_tuners->io_is_busy = ops->io_busy(); | 281 | if (!policy->governor->initialized) |
282 | od_tuners->io_is_busy = od_ops->io_busy(); | ||
257 | } | 283 | } |
258 | 284 | ||
285 | if (policy->governor->initialized) | ||
286 | goto unlock; | ||
287 | |||
288 | /* policy latency is in nS. Convert it to uS first */ | ||
289 | latency = policy->cpuinfo.transition_latency / 1000; | ||
290 | if (latency == 0) | ||
291 | latency = 1; | ||
292 | |||
259 | /* Bring kernel and HW constraints together */ | 293 | /* Bring kernel and HW constraints together */ |
260 | dbs_data->min_sampling_rate = max(dbs_data->min_sampling_rate, | 294 | dbs_data->min_sampling_rate = max(dbs_data->min_sampling_rate, |
261 | MIN_LATENCY_MULTIPLIER * latency); | 295 | MIN_LATENCY_MULTIPLIER * latency); |
262 | *sampling_rate = max(dbs_data->min_sampling_rate, latency * | 296 | *sampling_rate = max(dbs_data->min_sampling_rate, latency * |
263 | LATENCY_MULTIPLIER); | 297 | LATENCY_MULTIPLIER); |
264 | 298 | unlock: | |
265 | second_time: | ||
266 | if (dbs_data->governor == GOV_CONSERVATIVE) { | ||
267 | cs_dbs_info->down_skip = 0; | ||
268 | cs_dbs_info->enable = 1; | ||
269 | cs_dbs_info->requested_freq = policy->cur; | ||
270 | } else { | ||
271 | struct od_ops *ops = dbs_data->gov_ops; | ||
272 | od_dbs_info->rate_mult = 1; | ||
273 | od_dbs_info->sample_type = OD_NORMAL_SAMPLE; | ||
274 | ops->powersave_bias_init_cpu(cpu); | ||
275 | } | ||
276 | mutex_unlock(&dbs_data->mutex); | 299 | mutex_unlock(&dbs_data->mutex); |
277 | 300 | ||
278 | mutex_init(&cpu_cdbs->timer_mutex); | 301 | /* Initiate timer time stamp */ |
279 | dbs_timer_init(dbs_data, cpu_cdbs, *sampling_rate); | 302 | cpu_cdbs->time_stamp = ktime_get(); |
303 | |||
304 | for_each_cpu(j, policy->cpus) | ||
305 | dbs_timer_init(dbs_data, j, *sampling_rate); | ||
280 | break; | 306 | break; |
281 | 307 | ||
282 | case CPUFREQ_GOV_STOP: | 308 | case CPUFREQ_GOV_STOP: |
283 | if (dbs_data->governor == GOV_CONSERVATIVE) | 309 | if (dbs_data->governor == GOV_CONSERVATIVE) |
284 | cs_dbs_info->enable = 0; | 310 | cs_dbs_info->enable = 0; |
285 | 311 | ||
286 | dbs_timer_exit(cpu_cdbs); | 312 | for_each_cpu(j, policy->cpus) |
313 | dbs_timer_exit(dbs_data, j); | ||
287 | 314 | ||
288 | mutex_lock(&dbs_data->mutex); | 315 | mutex_lock(&dbs_data->mutex); |
289 | mutex_destroy(&cpu_cdbs->timer_mutex); | 316 | mutex_destroy(&cpu_cdbs->timer_mutex); |
290 | dbs_data->enable--; | ||
291 | if (!dbs_data->enable) { | ||
292 | struct cs_ops *ops = dbs_data->gov_ops; | ||
293 | 317 | ||
318 | if (policy->governor->initialized == 1) { | ||
294 | sysfs_remove_group(cpufreq_global_kobject, | 319 | sysfs_remove_group(cpufreq_global_kobject, |
295 | dbs_data->attr_group); | 320 | dbs_data->attr_group); |
296 | if (dbs_data->governor == GOV_CONSERVATIVE) | 321 | if (dbs_data->governor == GOV_CONSERVATIVE) |
297 | cpufreq_unregister_notifier(ops->notifier_block, | 322 | cpufreq_unregister_notifier(cs_ops->notifier_block, |
298 | CPUFREQ_TRANSITION_NOTIFIER); | 323 | CPUFREQ_TRANSITION_NOTIFIER); |
299 | } | 324 | } |
300 | mutex_unlock(&dbs_data->mutex); | 325 | mutex_unlock(&dbs_data->mutex); |
diff --git a/drivers/cpufreq/cpufreq_governor.h b/drivers/cpufreq/cpufreq_governor.h index f6616540c53d..d2ac91150600 100644 --- a/drivers/cpufreq/cpufreq_governor.h +++ b/drivers/cpufreq/cpufreq_governor.h | |||
@@ -82,6 +82,7 @@ struct cpu_dbs_common_info { | |||
82 | * the governor or limits. | 82 | * the governor or limits. |
83 | */ | 83 | */ |
84 | struct mutex timer_mutex; | 84 | struct mutex timer_mutex; |
85 | ktime_t time_stamp; | ||
85 | }; | 86 | }; |
86 | 87 | ||
87 | struct od_cpu_dbs_info_s { | 88 | struct od_cpu_dbs_info_s { |
@@ -108,7 +109,7 @@ struct od_dbs_tuners { | |||
108 | unsigned int sampling_rate; | 109 | unsigned int sampling_rate; |
109 | unsigned int sampling_down_factor; | 110 | unsigned int sampling_down_factor; |
110 | unsigned int up_threshold; | 111 | unsigned int up_threshold; |
111 | unsigned int down_differential; | 112 | unsigned int adj_up_threshold; |
112 | unsigned int powersave_bias; | 113 | unsigned int powersave_bias; |
113 | unsigned int io_is_busy; | 114 | unsigned int io_is_busy; |
114 | }; | 115 | }; |
@@ -129,7 +130,6 @@ struct dbs_data { | |||
129 | #define GOV_CONSERVATIVE 1 | 130 | #define GOV_CONSERVATIVE 1 |
130 | int governor; | 131 | int governor; |
131 | unsigned int min_sampling_rate; | 132 | unsigned int min_sampling_rate; |
132 | unsigned int enable; /* number of CPUs using this policy */ | ||
133 | struct attribute_group *attr_group; | 133 | struct attribute_group *attr_group; |
134 | void *tuners; | 134 | void *tuners; |
135 | 135 | ||
@@ -171,6 +171,8 @@ static inline int delay_for_sampling_rate(unsigned int sampling_rate) | |||
171 | 171 | ||
172 | u64 get_cpu_idle_time(unsigned int cpu, u64 *wall); | 172 | u64 get_cpu_idle_time(unsigned int cpu, u64 *wall); |
173 | void dbs_check_cpu(struct dbs_data *dbs_data, int cpu); | 173 | void dbs_check_cpu(struct dbs_data *dbs_data, int cpu); |
174 | bool need_load_eval(struct cpu_dbs_common_info *cdbs, | ||
175 | unsigned int sampling_rate); | ||
174 | int cpufreq_governor_dbs(struct dbs_data *dbs_data, | 176 | int cpufreq_governor_dbs(struct dbs_data *dbs_data, |
175 | struct cpufreq_policy *policy, unsigned int event); | 177 | struct cpufreq_policy *policy, unsigned int event); |
176 | #endif /* _CPUFREQ_GOVERNER_H */ | 178 | #endif /* _CPUFREQ_GOVERNER_H */ |
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c index 7731f7c7e79a..f3eb26cd848f 100644 --- a/drivers/cpufreq/cpufreq_ondemand.c +++ b/drivers/cpufreq/cpufreq_ondemand.c | |||
@@ -26,7 +26,7 @@ | |||
26 | 26 | ||
27 | #include "cpufreq_governor.h" | 27 | #include "cpufreq_governor.h" |
28 | 28 | ||
29 | /* On-demand governor macors */ | 29 | /* On-demand governor macros */ |
30 | #define DEF_FREQUENCY_DOWN_DIFFERENTIAL (10) | 30 | #define DEF_FREQUENCY_DOWN_DIFFERENTIAL (10) |
31 | #define DEF_FREQUENCY_UP_THRESHOLD (80) | 31 | #define DEF_FREQUENCY_UP_THRESHOLD (80) |
32 | #define DEF_SAMPLING_DOWN_FACTOR (1) | 32 | #define DEF_SAMPLING_DOWN_FACTOR (1) |
@@ -47,7 +47,8 @@ static struct cpufreq_governor cpufreq_gov_ondemand; | |||
47 | static struct od_dbs_tuners od_tuners = { | 47 | static struct od_dbs_tuners od_tuners = { |
48 | .up_threshold = DEF_FREQUENCY_UP_THRESHOLD, | 48 | .up_threshold = DEF_FREQUENCY_UP_THRESHOLD, |
49 | .sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR, | 49 | .sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR, |
50 | .down_differential = DEF_FREQUENCY_DOWN_DIFFERENTIAL, | 50 | .adj_up_threshold = DEF_FREQUENCY_UP_THRESHOLD - |
51 | DEF_FREQUENCY_DOWN_DIFFERENTIAL, | ||
51 | .ignore_nice = 0, | 52 | .ignore_nice = 0, |
52 | .powersave_bias = 0, | 53 | .powersave_bias = 0, |
53 | }; | 54 | }; |
@@ -65,7 +66,7 @@ static void ondemand_powersave_bias_init_cpu(int cpu) | |||
65 | * efficient idling at a higher frequency/voltage is. | 66 | * efficient idling at a higher frequency/voltage is. |
66 | * Pavel Machek says this is not so for various generations of AMD and old | 67 | * Pavel Machek says this is not so for various generations of AMD and old |
67 | * Intel systems. | 68 | * Intel systems. |
68 | * Mike Chan (androidlcom) calis this is also not true for ARM. | 69 | * Mike Chan (android.com) claims this is also not true for ARM. |
69 | * Because of this, whitelist specific known (series) of CPUs by default, and | 70 | * Because of this, whitelist specific known (series) of CPUs by default, and |
70 | * leave all others up to the user. | 71 | * leave all others up to the user. |
71 | */ | 72 | */ |
@@ -73,7 +74,7 @@ static int should_io_be_busy(void) | |||
73 | { | 74 | { |
74 | #if defined(CONFIG_X86) | 75 | #if defined(CONFIG_X86) |
75 | /* | 76 | /* |
76 | * For Intel, Core 2 (model 15) andl later have an efficient idle. | 77 | * For Intel, Core 2 (model 15) and later have an efficient idle. |
77 | */ | 78 | */ |
78 | if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL && | 79 | if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL && |
79 | boot_cpu_data.x86 == 6 && | 80 | boot_cpu_data.x86 == 6 && |
@@ -158,8 +159,8 @@ static void dbs_freq_increase(struct cpufreq_policy *p, unsigned int freq) | |||
158 | 159 | ||
159 | /* | 160 | /* |
160 | * Every sampling_rate, we check, if current idle time is less than 20% | 161 | * Every sampling_rate, we check, if current idle time is less than 20% |
161 | * (default), then we try to increase frequency Every sampling_rate, we look for | 162 | * (default), then we try to increase frequency. Every sampling_rate, we look |
162 | * a the lowest frequency which can sustain the load while keeping idle time | 163 | * for the lowest frequency which can sustain the load while keeping idle time |
163 | * over 30%. If such a frequency exist, we try to decrease to this frequency. | 164 | * over 30%. If such a frequency exist, we try to decrease to this frequency. |
164 | * | 165 | * |
165 | * Any frequency increase takes it to the maximum frequency. Frequency reduction | 166 | * Any frequency increase takes it to the maximum frequency. Frequency reduction |
@@ -192,11 +193,9 @@ static void od_check_cpu(int cpu, unsigned int load_freq) | |||
192 | * support the current CPU usage without triggering the up policy. To be | 193 | * support the current CPU usage without triggering the up policy. To be |
193 | * safe, we focus 10 points under the threshold. | 194 | * safe, we focus 10 points under the threshold. |
194 | */ | 195 | */ |
195 | if (load_freq < (od_tuners.up_threshold - od_tuners.down_differential) * | 196 | if (load_freq < od_tuners.adj_up_threshold * policy->cur) { |
196 | policy->cur) { | ||
197 | unsigned int freq_next; | 197 | unsigned int freq_next; |
198 | freq_next = load_freq / (od_tuners.up_threshold - | 198 | freq_next = load_freq / od_tuners.adj_up_threshold; |
199 | od_tuners.down_differential); | ||
200 | 199 | ||
201 | /* No longer fully busy, reset rate_mult */ | 200 | /* No longer fully busy, reset rate_mult */ |
202 | dbs_info->rate_mult = 1; | 201 | dbs_info->rate_mult = 1; |
@@ -218,33 +217,42 @@ static void od_check_cpu(int cpu, unsigned int load_freq) | |||
218 | 217 | ||
219 | static void od_dbs_timer(struct work_struct *work) | 218 | static void od_dbs_timer(struct work_struct *work) |
220 | { | 219 | { |
220 | struct delayed_work *dw = to_delayed_work(work); | ||
221 | struct od_cpu_dbs_info_s *dbs_info = | 221 | struct od_cpu_dbs_info_s *dbs_info = |
222 | container_of(work, struct od_cpu_dbs_info_s, cdbs.work.work); | 222 | container_of(work, struct od_cpu_dbs_info_s, cdbs.work.work); |
223 | unsigned int cpu = dbs_info->cdbs.cpu; | 223 | unsigned int cpu = dbs_info->cdbs.cur_policy->cpu; |
224 | int delay, sample_type = dbs_info->sample_type; | 224 | struct od_cpu_dbs_info_s *core_dbs_info = &per_cpu(od_cpu_dbs_info, |
225 | cpu); | ||
226 | int delay, sample_type = core_dbs_info->sample_type; | ||
227 | bool eval_load; | ||
225 | 228 | ||
226 | mutex_lock(&dbs_info->cdbs.timer_mutex); | 229 | mutex_lock(&core_dbs_info->cdbs.timer_mutex); |
230 | eval_load = need_load_eval(&core_dbs_info->cdbs, | ||
231 | od_tuners.sampling_rate); | ||
227 | 232 | ||
228 | /* Common NORMAL_SAMPLE setup */ | 233 | /* Common NORMAL_SAMPLE setup */ |
229 | dbs_info->sample_type = OD_NORMAL_SAMPLE; | 234 | core_dbs_info->sample_type = OD_NORMAL_SAMPLE; |
230 | if (sample_type == OD_SUB_SAMPLE) { | 235 | if (sample_type == OD_SUB_SAMPLE) { |
231 | delay = dbs_info->freq_lo_jiffies; | 236 | delay = core_dbs_info->freq_lo_jiffies; |
232 | __cpufreq_driver_target(dbs_info->cdbs.cur_policy, | 237 | if (eval_load) |
233 | dbs_info->freq_lo, CPUFREQ_RELATION_H); | 238 | __cpufreq_driver_target(core_dbs_info->cdbs.cur_policy, |
239 | core_dbs_info->freq_lo, | ||
240 | CPUFREQ_RELATION_H); | ||
234 | } else { | 241 | } else { |
235 | dbs_check_cpu(&od_dbs_data, cpu); | 242 | if (eval_load) |
236 | if (dbs_info->freq_lo) { | 243 | dbs_check_cpu(&od_dbs_data, cpu); |
244 | if (core_dbs_info->freq_lo) { | ||
237 | /* Setup timer for SUB_SAMPLE */ | 245 | /* Setup timer for SUB_SAMPLE */ |
238 | dbs_info->sample_type = OD_SUB_SAMPLE; | 246 | core_dbs_info->sample_type = OD_SUB_SAMPLE; |
239 | delay = dbs_info->freq_hi_jiffies; | 247 | delay = core_dbs_info->freq_hi_jiffies; |
240 | } else { | 248 | } else { |
241 | delay = delay_for_sampling_rate(od_tuners.sampling_rate | 249 | delay = delay_for_sampling_rate(od_tuners.sampling_rate |
242 | * dbs_info->rate_mult); | 250 | * core_dbs_info->rate_mult); |
243 | } | 251 | } |
244 | } | 252 | } |
245 | 253 | ||
246 | schedule_delayed_work_on(cpu, &dbs_info->cdbs.work, delay); | 254 | schedule_delayed_work_on(smp_processor_id(), dw, delay); |
247 | mutex_unlock(&dbs_info->cdbs.timer_mutex); | 255 | mutex_unlock(&core_dbs_info->cdbs.timer_mutex); |
248 | } | 256 | } |
249 | 257 | ||
250 | /************************** sysfs interface ************************/ | 258 | /************************** sysfs interface ************************/ |
@@ -259,7 +267,7 @@ static ssize_t show_sampling_rate_min(struct kobject *kobj, | |||
259 | * update_sampling_rate - update sampling rate effective immediately if needed. | 267 | * update_sampling_rate - update sampling rate effective immediately if needed. |
260 | * @new_rate: new sampling rate | 268 | * @new_rate: new sampling rate |
261 | * | 269 | * |
262 | * If new rate is smaller than the old, simply updaing | 270 | * If new rate is smaller than the old, simply updating |
263 | * dbs_tuners_int.sampling_rate might not be appropriate. For example, if the | 271 | * dbs_tuners_int.sampling_rate might not be appropriate. For example, if the |
264 | * original sampling_rate was 1 second and the requested new sampling rate is 10 | 272 | * original sampling_rate was 1 second and the requested new sampling rate is 10 |
265 | * ms because the user needs immediate reaction from ondemand governor, but not | 273 | * ms because the user needs immediate reaction from ondemand governor, but not |
@@ -287,7 +295,7 @@ static void update_sampling_rate(unsigned int new_rate) | |||
287 | cpufreq_cpu_put(policy); | 295 | cpufreq_cpu_put(policy); |
288 | continue; | 296 | continue; |
289 | } | 297 | } |
290 | dbs_info = &per_cpu(od_cpu_dbs_info, policy->cpu); | 298 | dbs_info = &per_cpu(od_cpu_dbs_info, cpu); |
291 | cpufreq_cpu_put(policy); | 299 | cpufreq_cpu_put(policy); |
292 | 300 | ||
293 | mutex_lock(&dbs_info->cdbs.timer_mutex); | 301 | mutex_lock(&dbs_info->cdbs.timer_mutex); |
@@ -306,8 +314,7 @@ static void update_sampling_rate(unsigned int new_rate) | |||
306 | cancel_delayed_work_sync(&dbs_info->cdbs.work); | 314 | cancel_delayed_work_sync(&dbs_info->cdbs.work); |
307 | mutex_lock(&dbs_info->cdbs.timer_mutex); | 315 | mutex_lock(&dbs_info->cdbs.timer_mutex); |
308 | 316 | ||
309 | schedule_delayed_work_on(dbs_info->cdbs.cpu, | 317 | schedule_delayed_work_on(cpu, &dbs_info->cdbs.work, |
310 | &dbs_info->cdbs.work, | ||
311 | usecs_to_jiffies(new_rate)); | 318 | usecs_to_jiffies(new_rate)); |
312 | 319 | ||
313 | } | 320 | } |
@@ -351,6 +358,10 @@ static ssize_t store_up_threshold(struct kobject *a, struct attribute *b, | |||
351 | input < MIN_FREQUENCY_UP_THRESHOLD) { | 358 | input < MIN_FREQUENCY_UP_THRESHOLD) { |
352 | return -EINVAL; | 359 | return -EINVAL; |
353 | } | 360 | } |
361 | /* Calculate the new adj_up_threshold */ | ||
362 | od_tuners.adj_up_threshold += input; | ||
363 | od_tuners.adj_up_threshold -= od_tuners.up_threshold; | ||
364 | |||
354 | od_tuners.up_threshold = input; | 365 | od_tuners.up_threshold = input; |
355 | return count; | 366 | return count; |
356 | } | 367 | } |
@@ -507,7 +518,8 @@ static int __init cpufreq_gov_dbs_init(void) | |||
507 | if (idle_time != -1ULL) { | 518 | if (idle_time != -1ULL) { |
508 | /* Idle micro accounting is supported. Use finer thresholds */ | 519 | /* Idle micro accounting is supported. Use finer thresholds */ |
509 | od_tuners.up_threshold = MICRO_FREQUENCY_UP_THRESHOLD; | 520 | od_tuners.up_threshold = MICRO_FREQUENCY_UP_THRESHOLD; |
510 | od_tuners.down_differential = MICRO_FREQUENCY_DOWN_DIFFERENTIAL; | 521 | od_tuners.adj_up_threshold = MICRO_FREQUENCY_UP_THRESHOLD - |
522 | MICRO_FREQUENCY_DOWN_DIFFERENTIAL; | ||
511 | /* | 523 | /* |
512 | * In nohz/micro accounting case we set the minimum frequency | 524 | * In nohz/micro accounting case we set the minimum frequency |
513 | * not depending on HZ, but fixed (very low). The deferred | 525 | * not depending on HZ, but fixed (very low). The deferred |
diff --git a/drivers/cpufreq/cpufreq_stats.c b/drivers/cpufreq/cpufreq_stats.c index 9d7732b81044..2fd779eb1ed1 100644 --- a/drivers/cpufreq/cpufreq_stats.c +++ b/drivers/cpufreq/cpufreq_stats.c | |||
@@ -24,12 +24,6 @@ | |||
24 | 24 | ||
25 | static spinlock_t cpufreq_stats_lock; | 25 | static spinlock_t cpufreq_stats_lock; |
26 | 26 | ||
27 | #define CPUFREQ_STATDEVICE_ATTR(_name, _mode, _show) \ | ||
28 | static struct freq_attr _attr_##_name = {\ | ||
29 | .attr = {.name = __stringify(_name), .mode = _mode, }, \ | ||
30 | .show = _show,\ | ||
31 | }; | ||
32 | |||
33 | struct cpufreq_stats { | 27 | struct cpufreq_stats { |
34 | unsigned int cpu; | 28 | unsigned int cpu; |
35 | unsigned int total_trans; | 29 | unsigned int total_trans; |
@@ -136,17 +130,17 @@ static ssize_t show_trans_table(struct cpufreq_policy *policy, char *buf) | |||
136 | return PAGE_SIZE; | 130 | return PAGE_SIZE; |
137 | return len; | 131 | return len; |
138 | } | 132 | } |
139 | CPUFREQ_STATDEVICE_ATTR(trans_table, 0444, show_trans_table); | 133 | cpufreq_freq_attr_ro(trans_table); |
140 | #endif | 134 | #endif |
141 | 135 | ||
142 | CPUFREQ_STATDEVICE_ATTR(total_trans, 0444, show_total_trans); | 136 | cpufreq_freq_attr_ro(total_trans); |
143 | CPUFREQ_STATDEVICE_ATTR(time_in_state, 0444, show_time_in_state); | 137 | cpufreq_freq_attr_ro(time_in_state); |
144 | 138 | ||
145 | static struct attribute *default_attrs[] = { | 139 | static struct attribute *default_attrs[] = { |
146 | &_attr_total_trans.attr, | 140 | &total_trans.attr, |
147 | &_attr_time_in_state.attr, | 141 | &time_in_state.attr, |
148 | #ifdef CONFIG_CPU_FREQ_STAT_DETAILS | 142 | #ifdef CONFIG_CPU_FREQ_STAT_DETAILS |
149 | &_attr_trans_table.attr, | 143 | &trans_table.attr, |
150 | #endif | 144 | #endif |
151 | NULL | 145 | NULL |
152 | }; | 146 | }; |
@@ -170,11 +164,13 @@ static int freq_table_get_index(struct cpufreq_stats *stat, unsigned int freq) | |||
170 | static void cpufreq_stats_free_table(unsigned int cpu) | 164 | static void cpufreq_stats_free_table(unsigned int cpu) |
171 | { | 165 | { |
172 | struct cpufreq_stats *stat = per_cpu(cpufreq_stats_table, cpu); | 166 | struct cpufreq_stats *stat = per_cpu(cpufreq_stats_table, cpu); |
167 | |||
173 | if (stat) { | 168 | if (stat) { |
169 | pr_debug("%s: Free stat table\n", __func__); | ||
174 | kfree(stat->time_in_state); | 170 | kfree(stat->time_in_state); |
175 | kfree(stat); | 171 | kfree(stat); |
172 | per_cpu(cpufreq_stats_table, cpu) = NULL; | ||
176 | } | 173 | } |
177 | per_cpu(cpufreq_stats_table, cpu) = NULL; | ||
178 | } | 174 | } |
179 | 175 | ||
180 | /* must be called early in the CPU removal sequence (before | 176 | /* must be called early in the CPU removal sequence (before |
@@ -183,8 +179,14 @@ static void cpufreq_stats_free_table(unsigned int cpu) | |||
183 | static void cpufreq_stats_free_sysfs(unsigned int cpu) | 179 | static void cpufreq_stats_free_sysfs(unsigned int cpu) |
184 | { | 180 | { |
185 | struct cpufreq_policy *policy = cpufreq_cpu_get(cpu); | 181 | struct cpufreq_policy *policy = cpufreq_cpu_get(cpu); |
186 | if (policy && policy->cpu == cpu) | 182 | |
183 | if (!cpufreq_frequency_get_table(cpu)) | ||
184 | return; | ||
185 | |||
186 | if (policy && !policy_is_shared(policy)) { | ||
187 | pr_debug("%s: Free sysfs stat\n", __func__); | ||
187 | sysfs_remove_group(&policy->kobj, &stats_attr_group); | 188 | sysfs_remove_group(&policy->kobj, &stats_attr_group); |
189 | } | ||
188 | if (policy) | 190 | if (policy) |
189 | cpufreq_cpu_put(policy); | 191 | cpufreq_cpu_put(policy); |
190 | } | 192 | } |
@@ -262,6 +264,19 @@ error_get_fail: | |||
262 | return ret; | 264 | return ret; |
263 | } | 265 | } |
264 | 266 | ||
267 | static void cpufreq_stats_update_policy_cpu(struct cpufreq_policy *policy) | ||
268 | { | ||
269 | struct cpufreq_stats *stat = per_cpu(cpufreq_stats_table, | ||
270 | policy->last_cpu); | ||
271 | |||
272 | pr_debug("Updating stats_table for new_cpu %u from last_cpu %u\n", | ||
273 | policy->cpu, policy->last_cpu); | ||
274 | per_cpu(cpufreq_stats_table, policy->cpu) = per_cpu(cpufreq_stats_table, | ||
275 | policy->last_cpu); | ||
276 | per_cpu(cpufreq_stats_table, policy->last_cpu) = NULL; | ||
277 | stat->cpu = policy->cpu; | ||
278 | } | ||
279 | |||
265 | static int cpufreq_stat_notifier_policy(struct notifier_block *nb, | 280 | static int cpufreq_stat_notifier_policy(struct notifier_block *nb, |
266 | unsigned long val, void *data) | 281 | unsigned long val, void *data) |
267 | { | 282 | { |
@@ -269,6 +284,12 @@ static int cpufreq_stat_notifier_policy(struct notifier_block *nb, | |||
269 | struct cpufreq_policy *policy = data; | 284 | struct cpufreq_policy *policy = data; |
270 | struct cpufreq_frequency_table *table; | 285 | struct cpufreq_frequency_table *table; |
271 | unsigned int cpu = policy->cpu; | 286 | unsigned int cpu = policy->cpu; |
287 | |||
288 | if (val == CPUFREQ_UPDATE_POLICY_CPU) { | ||
289 | cpufreq_stats_update_policy_cpu(policy); | ||
290 | return 0; | ||
291 | } | ||
292 | |||
272 | if (val != CPUFREQ_NOTIFY) | 293 | if (val != CPUFREQ_NOTIFY) |
273 | return 0; | 294 | return 0; |
274 | table = cpufreq_frequency_get_table(cpu); | 295 | table = cpufreq_frequency_get_table(cpu); |
diff --git a/drivers/cpufreq/cpufreq_userspace.c b/drivers/cpufreq/cpufreq_userspace.c index c8c3d293cc57..bbeb9c0720a6 100644 --- a/drivers/cpufreq/cpufreq_userspace.c +++ b/drivers/cpufreq/cpufreq_userspace.c | |||
@@ -118,8 +118,6 @@ static int cpufreq_governor_userspace(struct cpufreq_policy *policy, | |||
118 | 118 | ||
119 | switch (event) { | 119 | switch (event) { |
120 | case CPUFREQ_GOV_START: | 120 | case CPUFREQ_GOV_START: |
121 | if (!cpu_online(cpu)) | ||
122 | return -EINVAL; | ||
123 | BUG_ON(!policy->cur); | 121 | BUG_ON(!policy->cur); |
124 | mutex_lock(&userspace_mutex); | 122 | mutex_lock(&userspace_mutex); |
125 | 123 | ||
diff --git a/drivers/cpufreq/db8500-cpufreq.c b/drivers/cpufreq/db8500-cpufreq.c index 4f154bc0ebe4..79a84860ea56 100644 --- a/drivers/cpufreq/db8500-cpufreq.c +++ b/drivers/cpufreq/db8500-cpufreq.c | |||
@@ -128,9 +128,7 @@ static int __cpuinit db8500_cpufreq_init(struct cpufreq_policy *policy) | |||
128 | policy->cpuinfo.transition_latency = 20 * 1000; /* in ns */ | 128 | policy->cpuinfo.transition_latency = 20 * 1000; /* in ns */ |
129 | 129 | ||
130 | /* policy sharing between dual CPUs */ | 130 | /* policy sharing between dual CPUs */ |
131 | cpumask_copy(policy->cpus, cpu_present_mask); | 131 | cpumask_setall(policy->cpus); |
132 | |||
133 | policy->shared_type = CPUFREQ_SHARED_TYPE_ALL; | ||
134 | 132 | ||
135 | return 0; | 133 | return 0; |
136 | } | 134 | } |
diff --git a/drivers/cpufreq/exynos-cpufreq.c b/drivers/cpufreq/exynos-cpufreq.c index 7012ea8bf1e7..81eb84a24fa7 100644 --- a/drivers/cpufreq/exynos-cpufreq.c +++ b/drivers/cpufreq/exynos-cpufreq.c | |||
@@ -227,19 +227,7 @@ static int exynos_cpufreq_cpu_init(struct cpufreq_policy *policy) | |||
227 | /* set the transition latency value */ | 227 | /* set the transition latency value */ |
228 | policy->cpuinfo.transition_latency = 100000; | 228 | policy->cpuinfo.transition_latency = 100000; |
229 | 229 | ||
230 | /* | 230 | cpumask_setall(policy->cpus); |
231 | * EXYNOS4 multi-core processors has 2 cores | ||
232 | * that the frequency cannot be set independently. | ||
233 | * Each cpu is bound to the same speed. | ||
234 | * So the affected cpu is all of the cpus. | ||
235 | */ | ||
236 | if (num_online_cpus() == 1) { | ||
237 | cpumask_copy(policy->related_cpus, cpu_possible_mask); | ||
238 | cpumask_copy(policy->cpus, cpu_online_mask); | ||
239 | } else { | ||
240 | policy->shared_type = CPUFREQ_SHARED_TYPE_ANY; | ||
241 | cpumask_setall(policy->cpus); | ||
242 | } | ||
243 | 231 | ||
244 | return cpufreq_frequency_table_cpuinfo(policy, exynos_info->freq_table); | 232 | return cpufreq_frequency_table_cpuinfo(policy, exynos_info->freq_table); |
245 | } | 233 | } |
diff --git a/drivers/cpufreq/freq_table.c b/drivers/cpufreq/freq_table.c index 49cda256efb2..d7a79662e24c 100644 --- a/drivers/cpufreq/freq_table.c +++ b/drivers/cpufreq/freq_table.c | |||
@@ -63,9 +63,6 @@ int cpufreq_frequency_table_verify(struct cpufreq_policy *policy, | |||
63 | pr_debug("request for verification of policy (%u - %u kHz) for cpu %u\n", | 63 | pr_debug("request for verification of policy (%u - %u kHz) for cpu %u\n", |
64 | policy->min, policy->max, policy->cpu); | 64 | policy->min, policy->max, policy->cpu); |
65 | 65 | ||
66 | if (!cpu_online(policy->cpu)) | ||
67 | return -EINVAL; | ||
68 | |||
69 | cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq, | 66 | cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq, |
70 | policy->cpuinfo.max_freq); | 67 | policy->cpuinfo.max_freq); |
71 | 68 | ||
@@ -121,9 +118,6 @@ int cpufreq_frequency_table_target(struct cpufreq_policy *policy, | |||
121 | break; | 118 | break; |
122 | } | 119 | } |
123 | 120 | ||
124 | if (!cpu_online(policy->cpu)) | ||
125 | return -EINVAL; | ||
126 | |||
127 | for (i = 0; (table[i].frequency != CPUFREQ_TABLE_END); i++) { | 121 | for (i = 0; (table[i].frequency != CPUFREQ_TABLE_END); i++) { |
128 | unsigned int freq = table[i].frequency; | 122 | unsigned int freq = table[i].frequency; |
129 | if (freq == CPUFREQ_ENTRY_INVALID) | 123 | if (freq == CPUFREQ_ENTRY_INVALID) |
@@ -227,6 +221,15 @@ void cpufreq_frequency_table_put_attr(unsigned int cpu) | |||
227 | } | 221 | } |
228 | EXPORT_SYMBOL_GPL(cpufreq_frequency_table_put_attr); | 222 | EXPORT_SYMBOL_GPL(cpufreq_frequency_table_put_attr); |
229 | 223 | ||
224 | void cpufreq_frequency_table_update_policy_cpu(struct cpufreq_policy *policy) | ||
225 | { | ||
226 | pr_debug("Updating show_table for new_cpu %u from last_cpu %u\n", | ||
227 | policy->cpu, policy->last_cpu); | ||
228 | per_cpu(cpufreq_show_table, policy->cpu) = per_cpu(cpufreq_show_table, | ||
229 | policy->last_cpu); | ||
230 | per_cpu(cpufreq_show_table, policy->last_cpu) = NULL; | ||
231 | } | ||
232 | |||
230 | struct cpufreq_frequency_table *cpufreq_frequency_get_table(unsigned int cpu) | 233 | struct cpufreq_frequency_table *cpufreq_frequency_get_table(unsigned int cpu) |
231 | { | 234 | { |
232 | return per_cpu(cpufreq_show_table, cpu); | 235 | return per_cpu(cpufreq_show_table, cpu); |
diff --git a/drivers/cpufreq/highbank-cpufreq.c b/drivers/cpufreq/highbank-cpufreq.c new file mode 100644 index 000000000000..66e3a71b81a3 --- /dev/null +++ b/drivers/cpufreq/highbank-cpufreq.c | |||
@@ -0,0 +1,120 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2012 Calxeda, Inc. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify | ||
5 | * it under the terms of the GNU General Public License version 2 as | ||
6 | * published by the Free Software Foundation. | ||
7 | * | ||
8 | * This driver provides the clk notifier callbacks that are used when | ||
9 | * the cpufreq-cpu0 driver changes to frequency to alert the highbank | ||
10 | * EnergyCore Management Engine (ECME) about the need to change | ||
11 | * voltage. The ECME interfaces with the actual voltage regulators. | ||
12 | */ | ||
13 | |||
14 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
15 | |||
16 | #include <linux/kernel.h> | ||
17 | #include <linux/module.h> | ||
18 | #include <linux/clk.h> | ||
19 | #include <linux/cpu.h> | ||
20 | #include <linux/err.h> | ||
21 | #include <linux/of.h> | ||
22 | #include <linux/mailbox.h> | ||
23 | #include <linux/platform_device.h> | ||
24 | |||
25 | #define HB_CPUFREQ_CHANGE_NOTE 0x80000001 | ||
26 | #define HB_CPUFREQ_IPC_LEN 7 | ||
27 | #define HB_CPUFREQ_VOLT_RETRIES 15 | ||
28 | |||
29 | static int hb_voltage_change(unsigned int freq) | ||
30 | { | ||
31 | int i; | ||
32 | u32 msg[HB_CPUFREQ_IPC_LEN]; | ||
33 | |||
34 | msg[0] = HB_CPUFREQ_CHANGE_NOTE; | ||
35 | msg[1] = freq / 1000000; | ||
36 | for (i = 2; i < HB_CPUFREQ_IPC_LEN; i++) | ||
37 | msg[i] = 0; | ||
38 | |||
39 | return pl320_ipc_transmit(msg); | ||
40 | } | ||
41 | |||
42 | static int hb_cpufreq_clk_notify(struct notifier_block *nb, | ||
43 | unsigned long action, void *hclk) | ||
44 | { | ||
45 | struct clk_notifier_data *clk_data = hclk; | ||
46 | int i = 0; | ||
47 | |||
48 | if (action == PRE_RATE_CHANGE) { | ||
49 | if (clk_data->new_rate > clk_data->old_rate) | ||
50 | while (hb_voltage_change(clk_data->new_rate)) | ||
51 | if (i++ > HB_CPUFREQ_VOLT_RETRIES) | ||
52 | return NOTIFY_BAD; | ||
53 | } else if (action == POST_RATE_CHANGE) { | ||
54 | if (clk_data->new_rate < clk_data->old_rate) | ||
55 | while (hb_voltage_change(clk_data->new_rate)) | ||
56 | if (i++ > HB_CPUFREQ_VOLT_RETRIES) | ||
57 | return NOTIFY_BAD; | ||
58 | } | ||
59 | |||
60 | return NOTIFY_DONE; | ||
61 | } | ||
62 | |||
63 | static struct notifier_block hb_cpufreq_clk_nb = { | ||
64 | .notifier_call = hb_cpufreq_clk_notify, | ||
65 | }; | ||
66 | |||
67 | static int hb_cpufreq_driver_init(void) | ||
68 | { | ||
69 | struct platform_device_info devinfo = { .name = "cpufreq-cpu0", }; | ||
70 | struct device *cpu_dev; | ||
71 | struct clk *cpu_clk; | ||
72 | struct device_node *np; | ||
73 | int ret; | ||
74 | |||
75 | if (!of_machine_is_compatible("calxeda,highbank")) | ||
76 | return -ENODEV; | ||
77 | |||
78 | for_each_child_of_node(of_find_node_by_path("/cpus"), np) | ||
79 | if (of_get_property(np, "operating-points", NULL)) | ||
80 | break; | ||
81 | |||
82 | if (!np) { | ||
83 | pr_err("failed to find highbank cpufreq node\n"); | ||
84 | return -ENOENT; | ||
85 | } | ||
86 | |||
87 | cpu_dev = get_cpu_device(0); | ||
88 | if (!cpu_dev) { | ||
89 | pr_err("failed to get highbank cpufreq device\n"); | ||
90 | ret = -ENODEV; | ||
91 | goto out_put_node; | ||
92 | } | ||
93 | |||
94 | cpu_dev->of_node = np; | ||
95 | |||
96 | cpu_clk = clk_get(cpu_dev, NULL); | ||
97 | if (IS_ERR(cpu_clk)) { | ||
98 | ret = PTR_ERR(cpu_clk); | ||
99 | pr_err("failed to get cpu0 clock: %d\n", ret); | ||
100 | goto out_put_node; | ||
101 | } | ||
102 | |||
103 | ret = clk_notifier_register(cpu_clk, &hb_cpufreq_clk_nb); | ||
104 | if (ret) { | ||
105 | pr_err("failed to register clk notifier: %d\n", ret); | ||
106 | goto out_put_node; | ||
107 | } | ||
108 | |||
109 | /* Instantiate cpufreq-cpu0 */ | ||
110 | platform_device_register_full(&devinfo); | ||
111 | |||
112 | out_put_node: | ||
113 | of_node_put(np); | ||
114 | return ret; | ||
115 | } | ||
116 | module_init(hb_cpufreq_driver_init); | ||
117 | |||
118 | MODULE_AUTHOR("Mark Langsdorf <mark.langsdorf@calxeda.com>"); | ||
119 | MODULE_DESCRIPTION("Calxeda Highbank cpufreq driver"); | ||
120 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/cpufreq/imx6q-cpufreq.c b/drivers/cpufreq/imx6q-cpufreq.c new file mode 100644 index 000000000000..d6b6ef350cb6 --- /dev/null +++ b/drivers/cpufreq/imx6q-cpufreq.c | |||
@@ -0,0 +1,336 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2013 Freescale Semiconductor, Inc. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify | ||
5 | * it under the terms of the GNU General Public License version 2 as | ||
6 | * published by the Free Software Foundation. | ||
7 | */ | ||
8 | |||
9 | #include <linux/clk.h> | ||
10 | #include <linux/cpufreq.h> | ||
11 | #include <linux/delay.h> | ||
12 | #include <linux/err.h> | ||
13 | #include <linux/module.h> | ||
14 | #include <linux/of.h> | ||
15 | #include <linux/opp.h> | ||
16 | #include <linux/platform_device.h> | ||
17 | #include <linux/regulator/consumer.h> | ||
18 | |||
19 | #define PU_SOC_VOLTAGE_NORMAL 1250000 | ||
20 | #define PU_SOC_VOLTAGE_HIGH 1275000 | ||
21 | #define FREQ_1P2_GHZ 1200000000 | ||
22 | |||
23 | static struct regulator *arm_reg; | ||
24 | static struct regulator *pu_reg; | ||
25 | static struct regulator *soc_reg; | ||
26 | |||
27 | static struct clk *arm_clk; | ||
28 | static struct clk *pll1_sys_clk; | ||
29 | static struct clk *pll1_sw_clk; | ||
30 | static struct clk *step_clk; | ||
31 | static struct clk *pll2_pfd2_396m_clk; | ||
32 | |||
33 | static struct device *cpu_dev; | ||
34 | static struct cpufreq_frequency_table *freq_table; | ||
35 | static unsigned int transition_latency; | ||
36 | |||
37 | static int imx6q_verify_speed(struct cpufreq_policy *policy) | ||
38 | { | ||
39 | return cpufreq_frequency_table_verify(policy, freq_table); | ||
40 | } | ||
41 | |||
42 | static unsigned int imx6q_get_speed(unsigned int cpu) | ||
43 | { | ||
44 | return clk_get_rate(arm_clk) / 1000; | ||
45 | } | ||
46 | |||
47 | static int imx6q_set_target(struct cpufreq_policy *policy, | ||
48 | unsigned int target_freq, unsigned int relation) | ||
49 | { | ||
50 | struct cpufreq_freqs freqs; | ||
51 | struct opp *opp; | ||
52 | unsigned long freq_hz, volt, volt_old; | ||
53 | unsigned int index, cpu; | ||
54 | int ret; | ||
55 | |||
56 | ret = cpufreq_frequency_table_target(policy, freq_table, target_freq, | ||
57 | relation, &index); | ||
58 | if (ret) { | ||
59 | dev_err(cpu_dev, "failed to match target frequency %d: %d\n", | ||
60 | target_freq, ret); | ||
61 | return ret; | ||
62 | } | ||
63 | |||
64 | freqs.new = freq_table[index].frequency; | ||
65 | freq_hz = freqs.new * 1000; | ||
66 | freqs.old = clk_get_rate(arm_clk) / 1000; | ||
67 | |||
68 | if (freqs.old == freqs.new) | ||
69 | return 0; | ||
70 | |||
71 | for_each_online_cpu(cpu) { | ||
72 | freqs.cpu = cpu; | ||
73 | cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); | ||
74 | } | ||
75 | |||
76 | rcu_read_lock(); | ||
77 | opp = opp_find_freq_ceil(cpu_dev, &freq_hz); | ||
78 | if (IS_ERR(opp)) { | ||
79 | rcu_read_unlock(); | ||
80 | dev_err(cpu_dev, "failed to find OPP for %ld\n", freq_hz); | ||
81 | return PTR_ERR(opp); | ||
82 | } | ||
83 | |||
84 | volt = opp_get_voltage(opp); | ||
85 | rcu_read_unlock(); | ||
86 | volt_old = regulator_get_voltage(arm_reg); | ||
87 | |||
88 | dev_dbg(cpu_dev, "%u MHz, %ld mV --> %u MHz, %ld mV\n", | ||
89 | freqs.old / 1000, volt_old / 1000, | ||
90 | freqs.new / 1000, volt / 1000); | ||
91 | |||
92 | /* scaling up? scale voltage before frequency */ | ||
93 | if (freqs.new > freqs.old) { | ||
94 | ret = regulator_set_voltage_tol(arm_reg, volt, 0); | ||
95 | if (ret) { | ||
96 | dev_err(cpu_dev, | ||
97 | "failed to scale vddarm up: %d\n", ret); | ||
98 | return ret; | ||
99 | } | ||
100 | |||
101 | /* | ||
102 | * Need to increase vddpu and vddsoc for safety | ||
103 | * if we are about to run at 1.2 GHz. | ||
104 | */ | ||
105 | if (freqs.new == FREQ_1P2_GHZ / 1000) { | ||
106 | regulator_set_voltage_tol(pu_reg, | ||
107 | PU_SOC_VOLTAGE_HIGH, 0); | ||
108 | regulator_set_voltage_tol(soc_reg, | ||
109 | PU_SOC_VOLTAGE_HIGH, 0); | ||
110 | } | ||
111 | } | ||
112 | |||
113 | /* | ||
114 | * The setpoints are selected per PLL/PDF frequencies, so we need to | ||
115 | * reprogram PLL for frequency scaling. The procedure of reprogramming | ||
116 | * PLL1 is as below. | ||
117 | * | ||
118 | * - Enable pll2_pfd2_396m_clk and reparent pll1_sw_clk to it | ||
119 | * - Reprogram pll1_sys_clk and reparent pll1_sw_clk back to it | ||
120 | * - Disable pll2_pfd2_396m_clk | ||
121 | */ | ||
122 | clk_prepare_enable(pll2_pfd2_396m_clk); | ||
123 | clk_set_parent(step_clk, pll2_pfd2_396m_clk); | ||
124 | clk_set_parent(pll1_sw_clk, step_clk); | ||
125 | if (freq_hz > clk_get_rate(pll2_pfd2_396m_clk)) { | ||
126 | clk_set_rate(pll1_sys_clk, freqs.new * 1000); | ||
127 | /* | ||
128 | * If we are leaving 396 MHz set-point, we need to enable | ||
129 | * pll1_sys_clk and disable pll2_pfd2_396m_clk to keep | ||
130 | * their use count correct. | ||
131 | */ | ||
132 | if (freqs.old * 1000 <= clk_get_rate(pll2_pfd2_396m_clk)) { | ||
133 | clk_prepare_enable(pll1_sys_clk); | ||
134 | clk_disable_unprepare(pll2_pfd2_396m_clk); | ||
135 | } | ||
136 | clk_set_parent(pll1_sw_clk, pll1_sys_clk); | ||
137 | clk_disable_unprepare(pll2_pfd2_396m_clk); | ||
138 | } else { | ||
139 | /* | ||
140 | * Disable pll1_sys_clk if pll2_pfd2_396m_clk is sufficient | ||
141 | * to provide the frequency. | ||
142 | */ | ||
143 | clk_disable_unprepare(pll1_sys_clk); | ||
144 | } | ||
145 | |||
146 | /* Ensure the arm clock divider is what we expect */ | ||
147 | ret = clk_set_rate(arm_clk, freqs.new * 1000); | ||
148 | if (ret) { | ||
149 | dev_err(cpu_dev, "failed to set clock rate: %d\n", ret); | ||
150 | regulator_set_voltage_tol(arm_reg, volt_old, 0); | ||
151 | return ret; | ||
152 | } | ||
153 | |||
154 | /* scaling down? scale voltage after frequency */ | ||
155 | if (freqs.new < freqs.old) { | ||
156 | ret = regulator_set_voltage_tol(arm_reg, volt, 0); | ||
157 | if (ret) | ||
158 | dev_warn(cpu_dev, | ||
159 | "failed to scale vddarm down: %d\n", ret); | ||
160 | |||
161 | if (freqs.old == FREQ_1P2_GHZ / 1000) { | ||
162 | regulator_set_voltage_tol(pu_reg, | ||
163 | PU_SOC_VOLTAGE_NORMAL, 0); | ||
164 | regulator_set_voltage_tol(soc_reg, | ||
165 | PU_SOC_VOLTAGE_NORMAL, 0); | ||
166 | } | ||
167 | } | ||
168 | |||
169 | for_each_online_cpu(cpu) { | ||
170 | freqs.cpu = cpu; | ||
171 | cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); | ||
172 | } | ||
173 | |||
174 | return 0; | ||
175 | } | ||
176 | |||
177 | static int imx6q_cpufreq_init(struct cpufreq_policy *policy) | ||
178 | { | ||
179 | int ret; | ||
180 | |||
181 | ret = cpufreq_frequency_table_cpuinfo(policy, freq_table); | ||
182 | if (ret) { | ||
183 | dev_err(cpu_dev, "invalid frequency table: %d\n", ret); | ||
184 | return ret; | ||
185 | } | ||
186 | |||
187 | policy->cpuinfo.transition_latency = transition_latency; | ||
188 | policy->cur = clk_get_rate(arm_clk) / 1000; | ||
189 | cpumask_setall(policy->cpus); | ||
190 | cpufreq_frequency_table_get_attr(freq_table, policy->cpu); | ||
191 | |||
192 | return 0; | ||
193 | } | ||
194 | |||
195 | static int imx6q_cpufreq_exit(struct cpufreq_policy *policy) | ||
196 | { | ||
197 | cpufreq_frequency_table_put_attr(policy->cpu); | ||
198 | return 0; | ||
199 | } | ||
200 | |||
201 | static struct freq_attr *imx6q_cpufreq_attr[] = { | ||
202 | &cpufreq_freq_attr_scaling_available_freqs, | ||
203 | NULL, | ||
204 | }; | ||
205 | |||
206 | static struct cpufreq_driver imx6q_cpufreq_driver = { | ||
207 | .verify = imx6q_verify_speed, | ||
208 | .target = imx6q_set_target, | ||
209 | .get = imx6q_get_speed, | ||
210 | .init = imx6q_cpufreq_init, | ||
211 | .exit = imx6q_cpufreq_exit, | ||
212 | .name = "imx6q-cpufreq", | ||
213 | .attr = imx6q_cpufreq_attr, | ||
214 | }; | ||
215 | |||
216 | static int imx6q_cpufreq_probe(struct platform_device *pdev) | ||
217 | { | ||
218 | struct device_node *np; | ||
219 | struct opp *opp; | ||
220 | unsigned long min_volt, max_volt; | ||
221 | int num, ret; | ||
222 | |||
223 | cpu_dev = &pdev->dev; | ||
224 | |||
225 | np = of_find_node_by_path("/cpus/cpu@0"); | ||
226 | if (!np) { | ||
227 | dev_err(cpu_dev, "failed to find cpu0 node\n"); | ||
228 | return -ENOENT; | ||
229 | } | ||
230 | |||
231 | cpu_dev->of_node = np; | ||
232 | |||
233 | arm_clk = devm_clk_get(cpu_dev, "arm"); | ||
234 | pll1_sys_clk = devm_clk_get(cpu_dev, "pll1_sys"); | ||
235 | pll1_sw_clk = devm_clk_get(cpu_dev, "pll1_sw"); | ||
236 | step_clk = devm_clk_get(cpu_dev, "step"); | ||
237 | pll2_pfd2_396m_clk = devm_clk_get(cpu_dev, "pll2_pfd2_396m"); | ||
238 | if (IS_ERR(arm_clk) || IS_ERR(pll1_sys_clk) || IS_ERR(pll1_sw_clk) || | ||
239 | IS_ERR(step_clk) || IS_ERR(pll2_pfd2_396m_clk)) { | ||
240 | dev_err(cpu_dev, "failed to get clocks\n"); | ||
241 | ret = -ENOENT; | ||
242 | goto put_node; | ||
243 | } | ||
244 | |||
245 | arm_reg = devm_regulator_get(cpu_dev, "arm"); | ||
246 | pu_reg = devm_regulator_get(cpu_dev, "pu"); | ||
247 | soc_reg = devm_regulator_get(cpu_dev, "soc"); | ||
248 | if (!arm_reg || !pu_reg || !soc_reg) { | ||
249 | dev_err(cpu_dev, "failed to get regulators\n"); | ||
250 | ret = -ENOENT; | ||
251 | goto put_node; | ||
252 | } | ||
253 | |||
254 | /* We expect an OPP table supplied by platform */ | ||
255 | num = opp_get_opp_count(cpu_dev); | ||
256 | if (num < 0) { | ||
257 | ret = num; | ||
258 | dev_err(cpu_dev, "no OPP table is found: %d\n", ret); | ||
259 | goto put_node; | ||
260 | } | ||
261 | |||
262 | ret = opp_init_cpufreq_table(cpu_dev, &freq_table); | ||
263 | if (ret) { | ||
264 | dev_err(cpu_dev, "failed to init cpufreq table: %d\n", ret); | ||
265 | goto put_node; | ||
266 | } | ||
267 | |||
268 | if (of_property_read_u32(np, "clock-latency", &transition_latency)) | ||
269 | transition_latency = CPUFREQ_ETERNAL; | ||
270 | |||
271 | /* | ||
272 | * OPP is maintained in order of increasing frequency, and | ||
273 | * freq_table initialised from OPP is therefore sorted in the | ||
274 | * same order. | ||
275 | */ | ||
276 | rcu_read_lock(); | ||
277 | opp = opp_find_freq_exact(cpu_dev, | ||
278 | freq_table[0].frequency * 1000, true); | ||
279 | min_volt = opp_get_voltage(opp); | ||
280 | opp = opp_find_freq_exact(cpu_dev, | ||
281 | freq_table[--num].frequency * 1000, true); | ||
282 | max_volt = opp_get_voltage(opp); | ||
283 | rcu_read_unlock(); | ||
284 | ret = regulator_set_voltage_time(arm_reg, min_volt, max_volt); | ||
285 | if (ret > 0) | ||
286 | transition_latency += ret * 1000; | ||
287 | |||
288 | /* Count vddpu and vddsoc latency in for 1.2 GHz support */ | ||
289 | if (freq_table[num].frequency == FREQ_1P2_GHZ / 1000) { | ||
290 | ret = regulator_set_voltage_time(pu_reg, PU_SOC_VOLTAGE_NORMAL, | ||
291 | PU_SOC_VOLTAGE_HIGH); | ||
292 | if (ret > 0) | ||
293 | transition_latency += ret * 1000; | ||
294 | ret = regulator_set_voltage_time(soc_reg, PU_SOC_VOLTAGE_NORMAL, | ||
295 | PU_SOC_VOLTAGE_HIGH); | ||
296 | if (ret > 0) | ||
297 | transition_latency += ret * 1000; | ||
298 | } | ||
299 | |||
300 | ret = cpufreq_register_driver(&imx6q_cpufreq_driver); | ||
301 | if (ret) { | ||
302 | dev_err(cpu_dev, "failed register driver: %d\n", ret); | ||
303 | goto free_freq_table; | ||
304 | } | ||
305 | |||
306 | of_node_put(np); | ||
307 | return 0; | ||
308 | |||
309 | free_freq_table: | ||
310 | opp_free_cpufreq_table(cpu_dev, &freq_table); | ||
311 | put_node: | ||
312 | of_node_put(np); | ||
313 | return ret; | ||
314 | } | ||
315 | |||
316 | static int imx6q_cpufreq_remove(struct platform_device *pdev) | ||
317 | { | ||
318 | cpufreq_unregister_driver(&imx6q_cpufreq_driver); | ||
319 | opp_free_cpufreq_table(cpu_dev, &freq_table); | ||
320 | |||
321 | return 0; | ||
322 | } | ||
323 | |||
324 | static struct platform_driver imx6q_cpufreq_platdrv = { | ||
325 | .driver = { | ||
326 | .name = "imx6q-cpufreq", | ||
327 | .owner = THIS_MODULE, | ||
328 | }, | ||
329 | .probe = imx6q_cpufreq_probe, | ||
330 | .remove = imx6q_cpufreq_remove, | ||
331 | }; | ||
332 | module_platform_driver(imx6q_cpufreq_platdrv); | ||
333 | |||
334 | MODULE_AUTHOR("Shawn Guo <shawn.guo@linaro.org>"); | ||
335 | MODULE_DESCRIPTION("Freescale i.MX6Q cpufreq driver"); | ||
336 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c new file mode 100644 index 000000000000..e87996355da0 --- /dev/null +++ b/drivers/cpufreq/intel_pstate.c | |||
@@ -0,0 +1,807 @@ | |||
1 | /* | ||
2 | * cpufreq_snb.c: Native P state management for Intel processors | ||
3 | * | ||
4 | * (C) Copyright 2012 Intel Corporation | ||
5 | * Author: Dirk Brandewie <dirk.j.brandewie@intel.com> | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or | ||
8 | * modify it under the terms of the GNU General Public License | ||
9 | * as published by the Free Software Foundation; version 2 | ||
10 | * of the License. | ||
11 | */ | ||
12 | |||
13 | #include <linux/kernel.h> | ||
14 | #include <linux/kernel_stat.h> | ||
15 | #include <linux/module.h> | ||
16 | #include <linux/ktime.h> | ||
17 | #include <linux/hrtimer.h> | ||
18 | #include <linux/tick.h> | ||
19 | #include <linux/slab.h> | ||
20 | #include <linux/sched.h> | ||
21 | #include <linux/list.h> | ||
22 | #include <linux/cpu.h> | ||
23 | #include <linux/cpufreq.h> | ||
24 | #include <linux/sysfs.h> | ||
25 | #include <linux/types.h> | ||
26 | #include <linux/fs.h> | ||
27 | #include <linux/debugfs.h> | ||
28 | #include <trace/events/power.h> | ||
29 | |||
30 | #include <asm/div64.h> | ||
31 | #include <asm/msr.h> | ||
32 | #include <asm/cpu_device_id.h> | ||
33 | |||
34 | #define SAMPLE_COUNT 3 | ||
35 | |||
36 | #define FRAC_BITS 8 | ||
37 | #define int_tofp(X) ((int64_t)(X) << FRAC_BITS) | ||
38 | #define fp_toint(X) ((X) >> FRAC_BITS) | ||
39 | |||
40 | static inline int32_t mul_fp(int32_t x, int32_t y) | ||
41 | { | ||
42 | return ((int64_t)x * (int64_t)y) >> FRAC_BITS; | ||
43 | } | ||
44 | |||
45 | static inline int32_t div_fp(int32_t x, int32_t y) | ||
46 | { | ||
47 | return div_s64((int64_t)x << FRAC_BITS, (int64_t)y); | ||
48 | } | ||
49 | |||
50 | struct sample { | ||
51 | ktime_t start_time; | ||
52 | ktime_t end_time; | ||
53 | int core_pct_busy; | ||
54 | int pstate_pct_busy; | ||
55 | u64 duration_us; | ||
56 | u64 idletime_us; | ||
57 | u64 aperf; | ||
58 | u64 mperf; | ||
59 | int freq; | ||
60 | }; | ||
61 | |||
62 | struct pstate_data { | ||
63 | int current_pstate; | ||
64 | int min_pstate; | ||
65 | int max_pstate; | ||
66 | int turbo_pstate; | ||
67 | }; | ||
68 | |||
69 | struct _pid { | ||
70 | int setpoint; | ||
71 | int32_t integral; | ||
72 | int32_t p_gain; | ||
73 | int32_t i_gain; | ||
74 | int32_t d_gain; | ||
75 | int deadband; | ||
76 | int last_err; | ||
77 | }; | ||
78 | |||
79 | struct cpudata { | ||
80 | int cpu; | ||
81 | |||
82 | char name[64]; | ||
83 | |||
84 | struct timer_list timer; | ||
85 | |||
86 | struct pstate_adjust_policy *pstate_policy; | ||
87 | struct pstate_data pstate; | ||
88 | struct _pid pid; | ||
89 | struct _pid idle_pid; | ||
90 | |||
91 | int min_pstate_count; | ||
92 | int idle_mode; | ||
93 | |||
94 | ktime_t prev_sample; | ||
95 | u64 prev_idle_time_us; | ||
96 | u64 prev_aperf; | ||
97 | u64 prev_mperf; | ||
98 | int sample_ptr; | ||
99 | struct sample samples[SAMPLE_COUNT]; | ||
100 | }; | ||
101 | |||
102 | static struct cpudata **all_cpu_data; | ||
103 | struct pstate_adjust_policy { | ||
104 | int sample_rate_ms; | ||
105 | int deadband; | ||
106 | int setpoint; | ||
107 | int p_gain_pct; | ||
108 | int d_gain_pct; | ||
109 | int i_gain_pct; | ||
110 | }; | ||
111 | |||
112 | static struct pstate_adjust_policy default_policy = { | ||
113 | .sample_rate_ms = 10, | ||
114 | .deadband = 0, | ||
115 | .setpoint = 109, | ||
116 | .p_gain_pct = 17, | ||
117 | .d_gain_pct = 0, | ||
118 | .i_gain_pct = 4, | ||
119 | }; | ||
120 | |||
121 | struct perf_limits { | ||
122 | int no_turbo; | ||
123 | int max_perf_pct; | ||
124 | int min_perf_pct; | ||
125 | int32_t max_perf; | ||
126 | int32_t min_perf; | ||
127 | }; | ||
128 | |||
129 | static struct perf_limits limits = { | ||
130 | .no_turbo = 0, | ||
131 | .max_perf_pct = 100, | ||
132 | .max_perf = int_tofp(1), | ||
133 | .min_perf_pct = 0, | ||
134 | .min_perf = 0, | ||
135 | }; | ||
136 | |||
137 | static inline void pid_reset(struct _pid *pid, int setpoint, int busy, | ||
138 | int deadband, int integral) { | ||
139 | pid->setpoint = setpoint; | ||
140 | pid->deadband = deadband; | ||
141 | pid->integral = int_tofp(integral); | ||
142 | pid->last_err = setpoint - busy; | ||
143 | } | ||
144 | |||
145 | static inline void pid_p_gain_set(struct _pid *pid, int percent) | ||
146 | { | ||
147 | pid->p_gain = div_fp(int_tofp(percent), int_tofp(100)); | ||
148 | } | ||
149 | |||
150 | static inline void pid_i_gain_set(struct _pid *pid, int percent) | ||
151 | { | ||
152 | pid->i_gain = div_fp(int_tofp(percent), int_tofp(100)); | ||
153 | } | ||
154 | |||
155 | static inline void pid_d_gain_set(struct _pid *pid, int percent) | ||
156 | { | ||
157 | |||
158 | pid->d_gain = div_fp(int_tofp(percent), int_tofp(100)); | ||
159 | } | ||
160 | |||
161 | static signed int pid_calc(struct _pid *pid, int busy) | ||
162 | { | ||
163 | signed int err, result; | ||
164 | int32_t pterm, dterm, fp_error; | ||
165 | int32_t integral_limit; | ||
166 | |||
167 | err = pid->setpoint - busy; | ||
168 | fp_error = int_tofp(err); | ||
169 | |||
170 | if (abs(err) <= pid->deadband) | ||
171 | return 0; | ||
172 | |||
173 | pterm = mul_fp(pid->p_gain, fp_error); | ||
174 | |||
175 | pid->integral += fp_error; | ||
176 | |||
177 | /* limit the integral term */ | ||
178 | integral_limit = int_tofp(30); | ||
179 | if (pid->integral > integral_limit) | ||
180 | pid->integral = integral_limit; | ||
181 | if (pid->integral < -integral_limit) | ||
182 | pid->integral = -integral_limit; | ||
183 | |||
184 | dterm = mul_fp(pid->d_gain, (err - pid->last_err)); | ||
185 | pid->last_err = err; | ||
186 | |||
187 | result = pterm + mul_fp(pid->integral, pid->i_gain) + dterm; | ||
188 | |||
189 | return (signed int)fp_toint(result); | ||
190 | } | ||
191 | |||
192 | static inline void intel_pstate_busy_pid_reset(struct cpudata *cpu) | ||
193 | { | ||
194 | pid_p_gain_set(&cpu->pid, cpu->pstate_policy->p_gain_pct); | ||
195 | pid_d_gain_set(&cpu->pid, cpu->pstate_policy->d_gain_pct); | ||
196 | pid_i_gain_set(&cpu->pid, cpu->pstate_policy->i_gain_pct); | ||
197 | |||
198 | pid_reset(&cpu->pid, | ||
199 | cpu->pstate_policy->setpoint, | ||
200 | 100, | ||
201 | cpu->pstate_policy->deadband, | ||
202 | 0); | ||
203 | } | ||
204 | |||
205 | static inline void intel_pstate_idle_pid_reset(struct cpudata *cpu) | ||
206 | { | ||
207 | pid_p_gain_set(&cpu->idle_pid, cpu->pstate_policy->p_gain_pct); | ||
208 | pid_d_gain_set(&cpu->idle_pid, cpu->pstate_policy->d_gain_pct); | ||
209 | pid_i_gain_set(&cpu->idle_pid, cpu->pstate_policy->i_gain_pct); | ||
210 | |||
211 | pid_reset(&cpu->idle_pid, | ||
212 | 75, | ||
213 | 50, | ||
214 | cpu->pstate_policy->deadband, | ||
215 | 0); | ||
216 | } | ||
217 | |||
218 | static inline void intel_pstate_reset_all_pid(void) | ||
219 | { | ||
220 | unsigned int cpu; | ||
221 | for_each_online_cpu(cpu) { | ||
222 | if (all_cpu_data[cpu]) | ||
223 | intel_pstate_busy_pid_reset(all_cpu_data[cpu]); | ||
224 | } | ||
225 | } | ||
226 | |||
227 | /************************** debugfs begin ************************/ | ||
228 | static int pid_param_set(void *data, u64 val) | ||
229 | { | ||
230 | *(u32 *)data = val; | ||
231 | intel_pstate_reset_all_pid(); | ||
232 | return 0; | ||
233 | } | ||
234 | static int pid_param_get(void *data, u64 *val) | ||
235 | { | ||
236 | *val = *(u32 *)data; | ||
237 | return 0; | ||
238 | } | ||
239 | DEFINE_SIMPLE_ATTRIBUTE(fops_pid_param, pid_param_get, | ||
240 | pid_param_set, "%llu\n"); | ||
241 | |||
242 | struct pid_param { | ||
243 | char *name; | ||
244 | void *value; | ||
245 | }; | ||
246 | |||
247 | static struct pid_param pid_files[] = { | ||
248 | {"sample_rate_ms", &default_policy.sample_rate_ms}, | ||
249 | {"d_gain_pct", &default_policy.d_gain_pct}, | ||
250 | {"i_gain_pct", &default_policy.i_gain_pct}, | ||
251 | {"deadband", &default_policy.deadband}, | ||
252 | {"setpoint", &default_policy.setpoint}, | ||
253 | {"p_gain_pct", &default_policy.p_gain_pct}, | ||
254 | {NULL, NULL} | ||
255 | }; | ||
256 | |||
257 | static struct dentry *debugfs_parent; | ||
258 | static void intel_pstate_debug_expose_params(void) | ||
259 | { | ||
260 | int i = 0; | ||
261 | |||
262 | debugfs_parent = debugfs_create_dir("pstate_snb", NULL); | ||
263 | if (IS_ERR_OR_NULL(debugfs_parent)) | ||
264 | return; | ||
265 | while (pid_files[i].name) { | ||
266 | debugfs_create_file(pid_files[i].name, 0660, | ||
267 | debugfs_parent, pid_files[i].value, | ||
268 | &fops_pid_param); | ||
269 | i++; | ||
270 | } | ||
271 | } | ||
272 | |||
273 | /************************** debugfs end ************************/ | ||
274 | |||
275 | /************************** sysfs begin ************************/ | ||
276 | #define show_one(file_name, object) \ | ||
277 | static ssize_t show_##file_name \ | ||
278 | (struct kobject *kobj, struct attribute *attr, char *buf) \ | ||
279 | { \ | ||
280 | return sprintf(buf, "%u\n", limits.object); \ | ||
281 | } | ||
282 | |||
283 | static ssize_t store_no_turbo(struct kobject *a, struct attribute *b, | ||
284 | const char *buf, size_t count) | ||
285 | { | ||
286 | unsigned int input; | ||
287 | int ret; | ||
288 | ret = sscanf(buf, "%u", &input); | ||
289 | if (ret != 1) | ||
290 | return -EINVAL; | ||
291 | limits.no_turbo = clamp_t(int, input, 0 , 1); | ||
292 | |||
293 | return count; | ||
294 | } | ||
295 | |||
296 | static ssize_t store_max_perf_pct(struct kobject *a, struct attribute *b, | ||
297 | const char *buf, size_t count) | ||
298 | { | ||
299 | unsigned int input; | ||
300 | int ret; | ||
301 | ret = sscanf(buf, "%u", &input); | ||
302 | if (ret != 1) | ||
303 | return -EINVAL; | ||
304 | |||
305 | limits.max_perf_pct = clamp_t(int, input, 0 , 100); | ||
306 | limits.max_perf = div_fp(int_tofp(limits.max_perf_pct), int_tofp(100)); | ||
307 | return count; | ||
308 | } | ||
309 | |||
310 | static ssize_t store_min_perf_pct(struct kobject *a, struct attribute *b, | ||
311 | const char *buf, size_t count) | ||
312 | { | ||
313 | unsigned int input; | ||
314 | int ret; | ||
315 | ret = sscanf(buf, "%u", &input); | ||
316 | if (ret != 1) | ||
317 | return -EINVAL; | ||
318 | limits.min_perf_pct = clamp_t(int, input, 0 , 100); | ||
319 | limits.min_perf = div_fp(int_tofp(limits.min_perf_pct), int_tofp(100)); | ||
320 | |||
321 | return count; | ||
322 | } | ||
323 | |||
324 | show_one(no_turbo, no_turbo); | ||
325 | show_one(max_perf_pct, max_perf_pct); | ||
326 | show_one(min_perf_pct, min_perf_pct); | ||
327 | |||
328 | define_one_global_rw(no_turbo); | ||
329 | define_one_global_rw(max_perf_pct); | ||
330 | define_one_global_rw(min_perf_pct); | ||
331 | |||
332 | static struct attribute *intel_pstate_attributes[] = { | ||
333 | &no_turbo.attr, | ||
334 | &max_perf_pct.attr, | ||
335 | &min_perf_pct.attr, | ||
336 | NULL | ||
337 | }; | ||
338 | |||
339 | static struct attribute_group intel_pstate_attr_group = { | ||
340 | .attrs = intel_pstate_attributes, | ||
341 | }; | ||
342 | static struct kobject *intel_pstate_kobject; | ||
343 | |||
344 | static void intel_pstate_sysfs_expose_params(void) | ||
345 | { | ||
346 | int rc; | ||
347 | |||
348 | intel_pstate_kobject = kobject_create_and_add("intel_pstate", | ||
349 | &cpu_subsys.dev_root->kobj); | ||
350 | BUG_ON(!intel_pstate_kobject); | ||
351 | rc = sysfs_create_group(intel_pstate_kobject, | ||
352 | &intel_pstate_attr_group); | ||
353 | BUG_ON(rc); | ||
354 | } | ||
355 | |||
356 | /************************** sysfs end ************************/ | ||
357 | |||
358 | static int intel_pstate_min_pstate(void) | ||
359 | { | ||
360 | u64 value; | ||
361 | rdmsrl(0xCE, value); | ||
362 | return (value >> 40) & 0xFF; | ||
363 | } | ||
364 | |||
365 | static int intel_pstate_max_pstate(void) | ||
366 | { | ||
367 | u64 value; | ||
368 | rdmsrl(0xCE, value); | ||
369 | return (value >> 8) & 0xFF; | ||
370 | } | ||
371 | |||
372 | static int intel_pstate_turbo_pstate(void) | ||
373 | { | ||
374 | u64 value; | ||
375 | int nont, ret; | ||
376 | rdmsrl(0x1AD, value); | ||
377 | nont = intel_pstate_max_pstate(); | ||
378 | ret = ((value) & 255); | ||
379 | if (ret <= nont) | ||
380 | ret = nont; | ||
381 | return ret; | ||
382 | } | ||
383 | |||
384 | static void intel_pstate_get_min_max(struct cpudata *cpu, int *min, int *max) | ||
385 | { | ||
386 | int max_perf = cpu->pstate.turbo_pstate; | ||
387 | int min_perf; | ||
388 | if (limits.no_turbo) | ||
389 | max_perf = cpu->pstate.max_pstate; | ||
390 | |||
391 | max_perf = fp_toint(mul_fp(int_tofp(max_perf), limits.max_perf)); | ||
392 | *max = clamp_t(int, max_perf, | ||
393 | cpu->pstate.min_pstate, cpu->pstate.turbo_pstate); | ||
394 | |||
395 | min_perf = fp_toint(mul_fp(int_tofp(max_perf), limits.min_perf)); | ||
396 | *min = clamp_t(int, min_perf, | ||
397 | cpu->pstate.min_pstate, max_perf); | ||
398 | } | ||
399 | |||
400 | static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate) | ||
401 | { | ||
402 | int max_perf, min_perf; | ||
403 | |||
404 | intel_pstate_get_min_max(cpu, &min_perf, &max_perf); | ||
405 | |||
406 | pstate = clamp_t(int, pstate, min_perf, max_perf); | ||
407 | |||
408 | if (pstate == cpu->pstate.current_pstate) | ||
409 | return; | ||
410 | |||
411 | #ifndef MODULE | ||
412 | trace_cpu_frequency(pstate * 100000, cpu->cpu); | ||
413 | #endif | ||
414 | cpu->pstate.current_pstate = pstate; | ||
415 | wrmsrl(MSR_IA32_PERF_CTL, pstate << 8); | ||
416 | |||
417 | } | ||
418 | |||
419 | static inline void intel_pstate_pstate_increase(struct cpudata *cpu, int steps) | ||
420 | { | ||
421 | int target; | ||
422 | target = cpu->pstate.current_pstate + steps; | ||
423 | |||
424 | intel_pstate_set_pstate(cpu, target); | ||
425 | } | ||
426 | |||
427 | static inline void intel_pstate_pstate_decrease(struct cpudata *cpu, int steps) | ||
428 | { | ||
429 | int target; | ||
430 | target = cpu->pstate.current_pstate - steps; | ||
431 | intel_pstate_set_pstate(cpu, target); | ||
432 | } | ||
433 | |||
434 | static void intel_pstate_get_cpu_pstates(struct cpudata *cpu) | ||
435 | { | ||
436 | sprintf(cpu->name, "Intel 2nd generation core"); | ||
437 | |||
438 | cpu->pstate.min_pstate = intel_pstate_min_pstate(); | ||
439 | cpu->pstate.max_pstate = intel_pstate_max_pstate(); | ||
440 | cpu->pstate.turbo_pstate = intel_pstate_turbo_pstate(); | ||
441 | |||
442 | /* | ||
443 | * goto max pstate so we don't slow up boot if we are built-in if we are | ||
444 | * a module we will take care of it during normal operation | ||
445 | */ | ||
446 | intel_pstate_set_pstate(cpu, cpu->pstate.max_pstate); | ||
447 | } | ||
448 | |||
449 | static inline void intel_pstate_calc_busy(struct cpudata *cpu, | ||
450 | struct sample *sample) | ||
451 | { | ||
452 | u64 core_pct; | ||
453 | sample->pstate_pct_busy = 100 - div64_u64( | ||
454 | sample->idletime_us * 100, | ||
455 | sample->duration_us); | ||
456 | core_pct = div64_u64(sample->aperf * 100, sample->mperf); | ||
457 | sample->freq = cpu->pstate.turbo_pstate * core_pct * 1000; | ||
458 | |||
459 | sample->core_pct_busy = div_s64((sample->pstate_pct_busy * core_pct), | ||
460 | 100); | ||
461 | } | ||
462 | |||
463 | static inline void intel_pstate_sample(struct cpudata *cpu) | ||
464 | { | ||
465 | ktime_t now; | ||
466 | u64 idle_time_us; | ||
467 | u64 aperf, mperf; | ||
468 | |||
469 | now = ktime_get(); | ||
470 | idle_time_us = get_cpu_idle_time_us(cpu->cpu, NULL); | ||
471 | |||
472 | rdmsrl(MSR_IA32_APERF, aperf); | ||
473 | rdmsrl(MSR_IA32_MPERF, mperf); | ||
474 | /* for the first sample, don't actually record a sample, just | ||
475 | * set the baseline */ | ||
476 | if (cpu->prev_idle_time_us > 0) { | ||
477 | cpu->sample_ptr = (cpu->sample_ptr + 1) % SAMPLE_COUNT; | ||
478 | cpu->samples[cpu->sample_ptr].start_time = cpu->prev_sample; | ||
479 | cpu->samples[cpu->sample_ptr].end_time = now; | ||
480 | cpu->samples[cpu->sample_ptr].duration_us = | ||
481 | ktime_us_delta(now, cpu->prev_sample); | ||
482 | cpu->samples[cpu->sample_ptr].idletime_us = | ||
483 | idle_time_us - cpu->prev_idle_time_us; | ||
484 | |||
485 | cpu->samples[cpu->sample_ptr].aperf = aperf; | ||
486 | cpu->samples[cpu->sample_ptr].mperf = mperf; | ||
487 | cpu->samples[cpu->sample_ptr].aperf -= cpu->prev_aperf; | ||
488 | cpu->samples[cpu->sample_ptr].mperf -= cpu->prev_mperf; | ||
489 | |||
490 | intel_pstate_calc_busy(cpu, &cpu->samples[cpu->sample_ptr]); | ||
491 | } | ||
492 | |||
493 | cpu->prev_sample = now; | ||
494 | cpu->prev_idle_time_us = idle_time_us; | ||
495 | cpu->prev_aperf = aperf; | ||
496 | cpu->prev_mperf = mperf; | ||
497 | } | ||
498 | |||
499 | static inline void intel_pstate_set_sample_time(struct cpudata *cpu) | ||
500 | { | ||
501 | int sample_time, delay; | ||
502 | |||
503 | sample_time = cpu->pstate_policy->sample_rate_ms; | ||
504 | delay = msecs_to_jiffies(sample_time); | ||
505 | delay -= jiffies % delay; | ||
506 | mod_timer_pinned(&cpu->timer, jiffies + delay); | ||
507 | } | ||
508 | |||
509 | static inline void intel_pstate_idle_mode(struct cpudata *cpu) | ||
510 | { | ||
511 | cpu->idle_mode = 1; | ||
512 | } | ||
513 | |||
514 | static inline void intel_pstate_normal_mode(struct cpudata *cpu) | ||
515 | { | ||
516 | cpu->idle_mode = 0; | ||
517 | } | ||
518 | |||
519 | static inline int intel_pstate_get_scaled_busy(struct cpudata *cpu) | ||
520 | { | ||
521 | int32_t busy_scaled; | ||
522 | int32_t core_busy, turbo_pstate, current_pstate; | ||
523 | |||
524 | core_busy = int_tofp(cpu->samples[cpu->sample_ptr].core_pct_busy); | ||
525 | turbo_pstate = int_tofp(cpu->pstate.turbo_pstate); | ||
526 | current_pstate = int_tofp(cpu->pstate.current_pstate); | ||
527 | busy_scaled = mul_fp(core_busy, div_fp(turbo_pstate, current_pstate)); | ||
528 | |||
529 | return fp_toint(busy_scaled); | ||
530 | } | ||
531 | |||
532 | static inline void intel_pstate_adjust_busy_pstate(struct cpudata *cpu) | ||
533 | { | ||
534 | int busy_scaled; | ||
535 | struct _pid *pid; | ||
536 | signed int ctl = 0; | ||
537 | int steps; | ||
538 | |||
539 | pid = &cpu->pid; | ||
540 | busy_scaled = intel_pstate_get_scaled_busy(cpu); | ||
541 | |||
542 | ctl = pid_calc(pid, busy_scaled); | ||
543 | |||
544 | steps = abs(ctl); | ||
545 | if (ctl < 0) | ||
546 | intel_pstate_pstate_increase(cpu, steps); | ||
547 | else | ||
548 | intel_pstate_pstate_decrease(cpu, steps); | ||
549 | } | ||
550 | |||
551 | static inline void intel_pstate_adjust_idle_pstate(struct cpudata *cpu) | ||
552 | { | ||
553 | int busy_scaled; | ||
554 | struct _pid *pid; | ||
555 | int ctl = 0; | ||
556 | int steps; | ||
557 | |||
558 | pid = &cpu->idle_pid; | ||
559 | |||
560 | busy_scaled = intel_pstate_get_scaled_busy(cpu); | ||
561 | |||
562 | ctl = pid_calc(pid, 100 - busy_scaled); | ||
563 | |||
564 | steps = abs(ctl); | ||
565 | if (ctl < 0) | ||
566 | intel_pstate_pstate_decrease(cpu, steps); | ||
567 | else | ||
568 | intel_pstate_pstate_increase(cpu, steps); | ||
569 | |||
570 | if (cpu->pstate.current_pstate == cpu->pstate.min_pstate) | ||
571 | intel_pstate_normal_mode(cpu); | ||
572 | } | ||
573 | |||
574 | static void intel_pstate_timer_func(unsigned long __data) | ||
575 | { | ||
576 | struct cpudata *cpu = (struct cpudata *) __data; | ||
577 | |||
578 | intel_pstate_sample(cpu); | ||
579 | |||
580 | if (!cpu->idle_mode) | ||
581 | intel_pstate_adjust_busy_pstate(cpu); | ||
582 | else | ||
583 | intel_pstate_adjust_idle_pstate(cpu); | ||
584 | |||
585 | #if defined(XPERF_FIX) | ||
586 | if (cpu->pstate.current_pstate == cpu->pstate.min_pstate) { | ||
587 | cpu->min_pstate_count++; | ||
588 | if (!(cpu->min_pstate_count % 5)) { | ||
589 | intel_pstate_set_pstate(cpu, cpu->pstate.max_pstate); | ||
590 | intel_pstate_idle_mode(cpu); | ||
591 | } | ||
592 | } else | ||
593 | cpu->min_pstate_count = 0; | ||
594 | #endif | ||
595 | intel_pstate_set_sample_time(cpu); | ||
596 | } | ||
597 | |||
598 | #define ICPU(model, policy) \ | ||
599 | { X86_VENDOR_INTEL, 6, model, X86_FEATURE_ANY, (unsigned long)&policy } | ||
600 | |||
601 | static const struct x86_cpu_id intel_pstate_cpu_ids[] = { | ||
602 | ICPU(0x2a, default_policy), | ||
603 | ICPU(0x2d, default_policy), | ||
604 | {} | ||
605 | }; | ||
606 | MODULE_DEVICE_TABLE(x86cpu, intel_pstate_cpu_ids); | ||
607 | |||
608 | static int intel_pstate_init_cpu(unsigned int cpunum) | ||
609 | { | ||
610 | |||
611 | const struct x86_cpu_id *id; | ||
612 | struct cpudata *cpu; | ||
613 | |||
614 | id = x86_match_cpu(intel_pstate_cpu_ids); | ||
615 | if (!id) | ||
616 | return -ENODEV; | ||
617 | |||
618 | all_cpu_data[cpunum] = kzalloc(sizeof(struct cpudata), GFP_KERNEL); | ||
619 | if (!all_cpu_data[cpunum]) | ||
620 | return -ENOMEM; | ||
621 | |||
622 | cpu = all_cpu_data[cpunum]; | ||
623 | |||
624 | intel_pstate_get_cpu_pstates(cpu); | ||
625 | |||
626 | cpu->cpu = cpunum; | ||
627 | cpu->pstate_policy = | ||
628 | (struct pstate_adjust_policy *)id->driver_data; | ||
629 | init_timer_deferrable(&cpu->timer); | ||
630 | cpu->timer.function = intel_pstate_timer_func; | ||
631 | cpu->timer.data = | ||
632 | (unsigned long)cpu; | ||
633 | cpu->timer.expires = jiffies + HZ/100; | ||
634 | intel_pstate_busy_pid_reset(cpu); | ||
635 | intel_pstate_idle_pid_reset(cpu); | ||
636 | intel_pstate_sample(cpu); | ||
637 | intel_pstate_set_pstate(cpu, cpu->pstate.max_pstate); | ||
638 | |||
639 | add_timer_on(&cpu->timer, cpunum); | ||
640 | |||
641 | pr_info("Intel pstate controlling: cpu %d\n", cpunum); | ||
642 | |||
643 | return 0; | ||
644 | } | ||
645 | |||
646 | static unsigned int intel_pstate_get(unsigned int cpu_num) | ||
647 | { | ||
648 | struct sample *sample; | ||
649 | struct cpudata *cpu; | ||
650 | |||
651 | cpu = all_cpu_data[cpu_num]; | ||
652 | if (!cpu) | ||
653 | return 0; | ||
654 | sample = &cpu->samples[cpu->sample_ptr]; | ||
655 | return sample->freq; | ||
656 | } | ||
657 | |||
658 | static int intel_pstate_set_policy(struct cpufreq_policy *policy) | ||
659 | { | ||
660 | struct cpudata *cpu; | ||
661 | int min, max; | ||
662 | |||
663 | cpu = all_cpu_data[policy->cpu]; | ||
664 | |||
665 | intel_pstate_get_min_max(cpu, &min, &max); | ||
666 | |||
667 | limits.min_perf_pct = (policy->min * 100) / policy->cpuinfo.max_freq; | ||
668 | limits.min_perf_pct = clamp_t(int, limits.min_perf_pct, 0 , 100); | ||
669 | limits.min_perf = div_fp(int_tofp(limits.min_perf_pct), int_tofp(100)); | ||
670 | |||
671 | limits.max_perf_pct = policy->max * 100 / policy->cpuinfo.max_freq; | ||
672 | limits.max_perf_pct = clamp_t(int, limits.max_perf_pct, 0 , 100); | ||
673 | limits.max_perf = div_fp(int_tofp(limits.max_perf_pct), int_tofp(100)); | ||
674 | |||
675 | if (policy->policy == CPUFREQ_POLICY_PERFORMANCE) { | ||
676 | limits.min_perf_pct = 100; | ||
677 | limits.min_perf = int_tofp(1); | ||
678 | limits.max_perf_pct = 100; | ||
679 | limits.max_perf = int_tofp(1); | ||
680 | limits.no_turbo = 0; | ||
681 | } | ||
682 | |||
683 | return 0; | ||
684 | } | ||
685 | |||
686 | static int intel_pstate_verify_policy(struct cpufreq_policy *policy) | ||
687 | { | ||
688 | cpufreq_verify_within_limits(policy, | ||
689 | policy->cpuinfo.min_freq, | ||
690 | policy->cpuinfo.max_freq); | ||
691 | |||
692 | if ((policy->policy != CPUFREQ_POLICY_POWERSAVE) && | ||
693 | (policy->policy != CPUFREQ_POLICY_PERFORMANCE)) | ||
694 | return -EINVAL; | ||
695 | |||
696 | return 0; | ||
697 | } | ||
698 | |||
699 | static int __cpuinit intel_pstate_cpu_exit(struct cpufreq_policy *policy) | ||
700 | { | ||
701 | int cpu = policy->cpu; | ||
702 | |||
703 | del_timer(&all_cpu_data[cpu]->timer); | ||
704 | kfree(all_cpu_data[cpu]); | ||
705 | all_cpu_data[cpu] = NULL; | ||
706 | return 0; | ||
707 | } | ||
708 | |||
709 | static int __cpuinit intel_pstate_cpu_init(struct cpufreq_policy *policy) | ||
710 | { | ||
711 | int rc, min_pstate, max_pstate; | ||
712 | struct cpudata *cpu; | ||
713 | |||
714 | rc = intel_pstate_init_cpu(policy->cpu); | ||
715 | if (rc) | ||
716 | return rc; | ||
717 | |||
718 | cpu = all_cpu_data[policy->cpu]; | ||
719 | |||
720 | if (!limits.no_turbo && | ||
721 | limits.min_perf_pct == 100 && limits.max_perf_pct == 100) | ||
722 | policy->policy = CPUFREQ_POLICY_PERFORMANCE; | ||
723 | else | ||
724 | policy->policy = CPUFREQ_POLICY_POWERSAVE; | ||
725 | |||
726 | intel_pstate_get_min_max(cpu, &min_pstate, &max_pstate); | ||
727 | policy->min = min_pstate * 100000; | ||
728 | policy->max = max_pstate * 100000; | ||
729 | |||
730 | /* cpuinfo and default policy values */ | ||
731 | policy->cpuinfo.min_freq = cpu->pstate.min_pstate * 100000; | ||
732 | policy->cpuinfo.max_freq = cpu->pstate.turbo_pstate * 100000; | ||
733 | policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL; | ||
734 | cpumask_set_cpu(policy->cpu, policy->cpus); | ||
735 | |||
736 | return 0; | ||
737 | } | ||
738 | |||
739 | static struct cpufreq_driver intel_pstate_driver = { | ||
740 | .flags = CPUFREQ_CONST_LOOPS, | ||
741 | .verify = intel_pstate_verify_policy, | ||
742 | .setpolicy = intel_pstate_set_policy, | ||
743 | .get = intel_pstate_get, | ||
744 | .init = intel_pstate_cpu_init, | ||
745 | .exit = intel_pstate_cpu_exit, | ||
746 | .name = "intel_pstate", | ||
747 | .owner = THIS_MODULE, | ||
748 | }; | ||
749 | |||
750 | static void intel_pstate_exit(void) | ||
751 | { | ||
752 | int cpu; | ||
753 | |||
754 | sysfs_remove_group(intel_pstate_kobject, | ||
755 | &intel_pstate_attr_group); | ||
756 | debugfs_remove_recursive(debugfs_parent); | ||
757 | |||
758 | cpufreq_unregister_driver(&intel_pstate_driver); | ||
759 | |||
760 | if (!all_cpu_data) | ||
761 | return; | ||
762 | |||
763 | get_online_cpus(); | ||
764 | for_each_online_cpu(cpu) { | ||
765 | if (all_cpu_data[cpu]) { | ||
766 | del_timer_sync(&all_cpu_data[cpu]->timer); | ||
767 | kfree(all_cpu_data[cpu]); | ||
768 | } | ||
769 | } | ||
770 | |||
771 | put_online_cpus(); | ||
772 | vfree(all_cpu_data); | ||
773 | } | ||
774 | module_exit(intel_pstate_exit); | ||
775 | |||
776 | static int __init intel_pstate_init(void) | ||
777 | { | ||
778 | int rc = 0; | ||
779 | const struct x86_cpu_id *id; | ||
780 | |||
781 | id = x86_match_cpu(intel_pstate_cpu_ids); | ||
782 | if (!id) | ||
783 | return -ENODEV; | ||
784 | |||
785 | pr_info("Intel P-state driver initializing.\n"); | ||
786 | |||
787 | all_cpu_data = vmalloc(sizeof(void *) * num_possible_cpus()); | ||
788 | if (!all_cpu_data) | ||
789 | return -ENOMEM; | ||
790 | memset(all_cpu_data, 0, sizeof(void *) * num_possible_cpus()); | ||
791 | |||
792 | rc = cpufreq_register_driver(&intel_pstate_driver); | ||
793 | if (rc) | ||
794 | goto out; | ||
795 | |||
796 | intel_pstate_debug_expose_params(); | ||
797 | intel_pstate_sysfs_expose_params(); | ||
798 | return rc; | ||
799 | out: | ||
800 | intel_pstate_exit(); | ||
801 | return -ENODEV; | ||
802 | } | ||
803 | device_initcall(intel_pstate_init); | ||
804 | |||
805 | MODULE_AUTHOR("Dirk Brandewie <dirk.j.brandewie@intel.com>"); | ||
806 | MODULE_DESCRIPTION("'intel_pstate' - P state driver Intel Core processors"); | ||
807 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/cpufreq/kirkwood-cpufreq.c b/drivers/cpufreq/kirkwood-cpufreq.c new file mode 100644 index 000000000000..0e83e3c24f5b --- /dev/null +++ b/drivers/cpufreq/kirkwood-cpufreq.c | |||
@@ -0,0 +1,259 @@ | |||
1 | /* | ||
2 | * kirkwood_freq.c: cpufreq driver for the Marvell kirkwood | ||
3 | * | ||
4 | * Copyright (C) 2013 Andrew Lunn <andrew@lunn.ch> | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License | ||
8 | * as published by the Free Software Foundation; either version | ||
9 | * 2 of the License, or (at your option) any later version. | ||
10 | */ | ||
11 | |||
12 | #include <linux/kernel.h> | ||
13 | #include <linux/module.h> | ||
14 | #include <linux/clk.h> | ||
15 | #include <linux/clk-provider.h> | ||
16 | #include <linux/cpufreq.h> | ||
17 | #include <linux/of.h> | ||
18 | #include <linux/platform_device.h> | ||
19 | #include <linux/io.h> | ||
20 | #include <asm/proc-fns.h> | ||
21 | |||
22 | #define CPU_SW_INT_BLK BIT(28) | ||
23 | |||
24 | static struct priv | ||
25 | { | ||
26 | struct clk *cpu_clk; | ||
27 | struct clk *ddr_clk; | ||
28 | struct clk *powersave_clk; | ||
29 | struct device *dev; | ||
30 | void __iomem *base; | ||
31 | } priv; | ||
32 | |||
33 | #define STATE_CPU_FREQ 0x01 | ||
34 | #define STATE_DDR_FREQ 0x02 | ||
35 | |||
36 | /* | ||
37 | * Kirkwood can swap the clock to the CPU between two clocks: | ||
38 | * | ||
39 | * - cpu clk | ||
40 | * - ddr clk | ||
41 | * | ||
42 | * The frequencies are set at runtime before registering this * | ||
43 | * table. | ||
44 | */ | ||
45 | static struct cpufreq_frequency_table kirkwood_freq_table[] = { | ||
46 | {STATE_CPU_FREQ, 0}, /* CPU uses cpuclk */ | ||
47 | {STATE_DDR_FREQ, 0}, /* CPU uses ddrclk */ | ||
48 | {0, CPUFREQ_TABLE_END}, | ||
49 | }; | ||
50 | |||
51 | static unsigned int kirkwood_cpufreq_get_cpu_frequency(unsigned int cpu) | ||
52 | { | ||
53 | if (__clk_is_enabled(priv.powersave_clk)) | ||
54 | return kirkwood_freq_table[1].frequency; | ||
55 | return kirkwood_freq_table[0].frequency; | ||
56 | } | ||
57 | |||
58 | static void kirkwood_cpufreq_set_cpu_state(unsigned int index) | ||
59 | { | ||
60 | struct cpufreq_freqs freqs; | ||
61 | unsigned int state = kirkwood_freq_table[index].index; | ||
62 | unsigned long reg; | ||
63 | |||
64 | freqs.old = kirkwood_cpufreq_get_cpu_frequency(0); | ||
65 | freqs.new = kirkwood_freq_table[index].frequency; | ||
66 | freqs.cpu = 0; /* Kirkwood is UP */ | ||
67 | |||
68 | cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); | ||
69 | |||
70 | dev_dbg(priv.dev, "Attempting to set frequency to %i KHz\n", | ||
71 | kirkwood_freq_table[index].frequency); | ||
72 | dev_dbg(priv.dev, "old frequency was %i KHz\n", | ||
73 | kirkwood_cpufreq_get_cpu_frequency(0)); | ||
74 | |||
75 | if (freqs.old != freqs.new) { | ||
76 | local_irq_disable(); | ||
77 | |||
78 | /* Disable interrupts to the CPU */ | ||
79 | reg = readl_relaxed(priv.base); | ||
80 | reg |= CPU_SW_INT_BLK; | ||
81 | writel_relaxed(reg, priv.base); | ||
82 | |||
83 | switch (state) { | ||
84 | case STATE_CPU_FREQ: | ||
85 | clk_disable(priv.powersave_clk); | ||
86 | break; | ||
87 | case STATE_DDR_FREQ: | ||
88 | clk_enable(priv.powersave_clk); | ||
89 | break; | ||
90 | } | ||
91 | |||
92 | /* Wait-for-Interrupt, while the hardware changes frequency */ | ||
93 | cpu_do_idle(); | ||
94 | |||
95 | /* Enable interrupts to the CPU */ | ||
96 | reg = readl_relaxed(priv.base); | ||
97 | reg &= ~CPU_SW_INT_BLK; | ||
98 | writel_relaxed(reg, priv.base); | ||
99 | |||
100 | local_irq_enable(); | ||
101 | } | ||
102 | cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); | ||
103 | }; | ||
104 | |||
105 | static int kirkwood_cpufreq_verify(struct cpufreq_policy *policy) | ||
106 | { | ||
107 | return cpufreq_frequency_table_verify(policy, kirkwood_freq_table); | ||
108 | } | ||
109 | |||
110 | static int kirkwood_cpufreq_target(struct cpufreq_policy *policy, | ||
111 | unsigned int target_freq, | ||
112 | unsigned int relation) | ||
113 | { | ||
114 | unsigned int index = 0; | ||
115 | |||
116 | if (cpufreq_frequency_table_target(policy, kirkwood_freq_table, | ||
117 | target_freq, relation, &index)) | ||
118 | return -EINVAL; | ||
119 | |||
120 | kirkwood_cpufreq_set_cpu_state(index); | ||
121 | |||
122 | return 0; | ||
123 | } | ||
124 | |||
125 | /* Module init and exit code */ | ||
126 | static int kirkwood_cpufreq_cpu_init(struct cpufreq_policy *policy) | ||
127 | { | ||
128 | int result; | ||
129 | |||
130 | /* cpuinfo and default policy values */ | ||
131 | policy->cpuinfo.transition_latency = 5000; /* 5uS */ | ||
132 | policy->cur = kirkwood_cpufreq_get_cpu_frequency(0); | ||
133 | |||
134 | result = cpufreq_frequency_table_cpuinfo(policy, kirkwood_freq_table); | ||
135 | if (result) | ||
136 | return result; | ||
137 | |||
138 | cpufreq_frequency_table_get_attr(kirkwood_freq_table, policy->cpu); | ||
139 | |||
140 | return 0; | ||
141 | } | ||
142 | |||
143 | static int kirkwood_cpufreq_cpu_exit(struct cpufreq_policy *policy) | ||
144 | { | ||
145 | cpufreq_frequency_table_put_attr(policy->cpu); | ||
146 | return 0; | ||
147 | } | ||
148 | |||
149 | static struct freq_attr *kirkwood_cpufreq_attr[] = { | ||
150 | &cpufreq_freq_attr_scaling_available_freqs, | ||
151 | NULL, | ||
152 | }; | ||
153 | |||
154 | static struct cpufreq_driver kirkwood_cpufreq_driver = { | ||
155 | .get = kirkwood_cpufreq_get_cpu_frequency, | ||
156 | .verify = kirkwood_cpufreq_verify, | ||
157 | .target = kirkwood_cpufreq_target, | ||
158 | .init = kirkwood_cpufreq_cpu_init, | ||
159 | .exit = kirkwood_cpufreq_cpu_exit, | ||
160 | .name = "kirkwood-cpufreq", | ||
161 | .owner = THIS_MODULE, | ||
162 | .attr = kirkwood_cpufreq_attr, | ||
163 | }; | ||
164 | |||
165 | static int kirkwood_cpufreq_probe(struct platform_device *pdev) | ||
166 | { | ||
167 | struct device_node *np; | ||
168 | struct resource *res; | ||
169 | int err; | ||
170 | |||
171 | priv.dev = &pdev->dev; | ||
172 | |||
173 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
174 | if (!res) { | ||
175 | dev_err(&pdev->dev, "Cannot get memory resource\n"); | ||
176 | return -ENODEV; | ||
177 | } | ||
178 | priv.base = devm_request_and_ioremap(&pdev->dev, res); | ||
179 | if (!priv.base) { | ||
180 | dev_err(&pdev->dev, "Cannot ioremap\n"); | ||
181 | return -EADDRNOTAVAIL; | ||
182 | } | ||
183 | |||
184 | np = of_find_node_by_path("/cpus/cpu@0"); | ||
185 | if (!np) | ||
186 | return -ENODEV; | ||
187 | |||
188 | priv.cpu_clk = of_clk_get_by_name(np, "cpu_clk"); | ||
189 | if (IS_ERR(priv.cpu_clk)) { | ||
190 | dev_err(priv.dev, "Unable to get cpuclk"); | ||
191 | return PTR_ERR(priv.cpu_clk); | ||
192 | } | ||
193 | |||
194 | clk_prepare_enable(priv.cpu_clk); | ||
195 | kirkwood_freq_table[0].frequency = clk_get_rate(priv.cpu_clk) / 1000; | ||
196 | |||
197 | priv.ddr_clk = of_clk_get_by_name(np, "ddrclk"); | ||
198 | if (IS_ERR(priv.ddr_clk)) { | ||
199 | dev_err(priv.dev, "Unable to get ddrclk"); | ||
200 | err = PTR_ERR(priv.ddr_clk); | ||
201 | goto out_cpu; | ||
202 | } | ||
203 | |||
204 | clk_prepare_enable(priv.ddr_clk); | ||
205 | kirkwood_freq_table[1].frequency = clk_get_rate(priv.ddr_clk) / 1000; | ||
206 | |||
207 | priv.powersave_clk = of_clk_get_by_name(np, "powersave"); | ||
208 | if (IS_ERR(priv.powersave_clk)) { | ||
209 | dev_err(priv.dev, "Unable to get powersave"); | ||
210 | err = PTR_ERR(priv.powersave_clk); | ||
211 | goto out_ddr; | ||
212 | } | ||
213 | clk_prepare(priv.powersave_clk); | ||
214 | |||
215 | of_node_put(np); | ||
216 | np = NULL; | ||
217 | |||
218 | err = cpufreq_register_driver(&kirkwood_cpufreq_driver); | ||
219 | if (!err) | ||
220 | return 0; | ||
221 | |||
222 | dev_err(priv.dev, "Failed to register cpufreq driver"); | ||
223 | |||
224 | clk_disable_unprepare(priv.powersave_clk); | ||
225 | out_ddr: | ||
226 | clk_disable_unprepare(priv.ddr_clk); | ||
227 | out_cpu: | ||
228 | clk_disable_unprepare(priv.cpu_clk); | ||
229 | of_node_put(np); | ||
230 | |||
231 | return err; | ||
232 | } | ||
233 | |||
234 | static int kirkwood_cpufreq_remove(struct platform_device *pdev) | ||
235 | { | ||
236 | cpufreq_unregister_driver(&kirkwood_cpufreq_driver); | ||
237 | |||
238 | clk_disable_unprepare(priv.powersave_clk); | ||
239 | clk_disable_unprepare(priv.ddr_clk); | ||
240 | clk_disable_unprepare(priv.cpu_clk); | ||
241 | |||
242 | return 0; | ||
243 | } | ||
244 | |||
245 | static struct platform_driver kirkwood_cpufreq_platform_driver = { | ||
246 | .probe = kirkwood_cpufreq_probe, | ||
247 | .remove = kirkwood_cpufreq_remove, | ||
248 | .driver = { | ||
249 | .name = "kirkwood-cpufreq", | ||
250 | .owner = THIS_MODULE, | ||
251 | }, | ||
252 | }; | ||
253 | |||
254 | module_platform_driver(kirkwood_cpufreq_platform_driver); | ||
255 | |||
256 | MODULE_LICENSE("GPL v2"); | ||
257 | MODULE_AUTHOR("Andrew Lunn <andrew@lunn.ch"); | ||
258 | MODULE_DESCRIPTION("cpufreq driver for Marvell's kirkwood CPU"); | ||
259 | MODULE_ALIAS("platform:kirkwood-cpufreq"); | ||
diff --git a/drivers/cpufreq/maple-cpufreq.c b/drivers/cpufreq/maple-cpufreq.c index 89b178a3f849..d4c4989823dc 100644 --- a/drivers/cpufreq/maple-cpufreq.c +++ b/drivers/cpufreq/maple-cpufreq.c | |||
@@ -181,7 +181,7 @@ static int maple_cpufreq_cpu_init(struct cpufreq_policy *policy) | |||
181 | /* secondary CPUs are tied to the primary one by the | 181 | /* secondary CPUs are tied to the primary one by the |
182 | * cpufreq core if in the secondary policy we tell it that | 182 | * cpufreq core if in the secondary policy we tell it that |
183 | * it actually must be one policy together with all others. */ | 183 | * it actually must be one policy together with all others. */ |
184 | cpumask_copy(policy->cpus, cpu_online_mask); | 184 | cpumask_setall(policy->cpus); |
185 | cpufreq_frequency_table_get_attr(maple_cpu_freqs, policy->cpu); | 185 | cpufreq_frequency_table_get_attr(maple_cpu_freqs, policy->cpu); |
186 | 186 | ||
187 | return cpufreq_frequency_table_cpuinfo(policy, | 187 | return cpufreq_frequency_table_cpuinfo(policy, |
diff --git a/drivers/cpufreq/omap-cpufreq.c b/drivers/cpufreq/omap-cpufreq.c index 97102b05843f..9128c07bafba 100644 --- a/drivers/cpufreq/omap-cpufreq.c +++ b/drivers/cpufreq/omap-cpufreq.c | |||
@@ -214,10 +214,8 @@ static int __cpuinit omap_cpu_init(struct cpufreq_policy *policy) | |||
214 | * interface to handle this scenario. Additional is_smp() check | 214 | * interface to handle this scenario. Additional is_smp() check |
215 | * is to keep SMP_ON_UP build working. | 215 | * is to keep SMP_ON_UP build working. |
216 | */ | 216 | */ |
217 | if (is_smp()) { | 217 | if (is_smp()) |
218 | policy->shared_type = CPUFREQ_SHARED_TYPE_ANY; | ||
219 | cpumask_setall(policy->cpus); | 218 | cpumask_setall(policy->cpus); |
220 | } | ||
221 | 219 | ||
222 | /* FIXME: what's the actual transition time? */ | 220 | /* FIXME: what's the actual transition time? */ |
223 | policy->cpuinfo.transition_latency = 300 * 1000; | 221 | policy->cpuinfo.transition_latency = 300 * 1000; |
diff --git a/drivers/cpufreq/powernow-k8.c b/drivers/cpufreq/powernow-k8.c index 056faf6af1a9..d13a13678b5f 100644 --- a/drivers/cpufreq/powernow-k8.c +++ b/drivers/cpufreq/powernow-k8.c | |||
@@ -1249,39 +1249,59 @@ static struct cpufreq_driver cpufreq_amd64_driver = { | |||
1249 | .attr = powernow_k8_attr, | 1249 | .attr = powernow_k8_attr, |
1250 | }; | 1250 | }; |
1251 | 1251 | ||
1252 | static void __request_acpi_cpufreq(void) | ||
1253 | { | ||
1254 | const char *cur_drv, *drv = "acpi-cpufreq"; | ||
1255 | |||
1256 | cur_drv = cpufreq_get_current_driver(); | ||
1257 | if (!cur_drv) | ||
1258 | goto request; | ||
1259 | |||
1260 | if (strncmp(cur_drv, drv, min_t(size_t, strlen(cur_drv), strlen(drv)))) | ||
1261 | pr_warn(PFX "WTF driver: %s\n", cur_drv); | ||
1262 | |||
1263 | return; | ||
1264 | |||
1265 | request: | ||
1266 | pr_warn(PFX "This CPU is not supported anymore, using acpi-cpufreq instead.\n"); | ||
1267 | request_module(drv); | ||
1268 | } | ||
1269 | |||
1252 | /* driver entry point for init */ | 1270 | /* driver entry point for init */ |
1253 | static int __cpuinit powernowk8_init(void) | 1271 | static int __cpuinit powernowk8_init(void) |
1254 | { | 1272 | { |
1255 | unsigned int i, supported_cpus = 0; | 1273 | unsigned int i, supported_cpus = 0; |
1256 | int rv; | 1274 | int ret; |
1257 | 1275 | ||
1258 | if (static_cpu_has(X86_FEATURE_HW_PSTATE)) { | 1276 | if (static_cpu_has(X86_FEATURE_HW_PSTATE)) { |
1259 | pr_warn(PFX "this CPU is not supported anymore, using acpi-cpufreq instead.\n"); | 1277 | __request_acpi_cpufreq(); |
1260 | request_module("acpi-cpufreq"); | ||
1261 | return -ENODEV; | 1278 | return -ENODEV; |
1262 | } | 1279 | } |
1263 | 1280 | ||
1264 | if (!x86_match_cpu(powernow_k8_ids)) | 1281 | if (!x86_match_cpu(powernow_k8_ids)) |
1265 | return -ENODEV; | 1282 | return -ENODEV; |
1266 | 1283 | ||
1284 | get_online_cpus(); | ||
1267 | for_each_online_cpu(i) { | 1285 | for_each_online_cpu(i) { |
1268 | int rc; | 1286 | smp_call_function_single(i, check_supported_cpu, &ret, 1); |
1269 | smp_call_function_single(i, check_supported_cpu, &rc, 1); | 1287 | if (!ret) |
1270 | if (rc == 0) | ||
1271 | supported_cpus++; | 1288 | supported_cpus++; |
1272 | } | 1289 | } |
1273 | 1290 | ||
1274 | if (supported_cpus != num_online_cpus()) | 1291 | if (supported_cpus != num_online_cpus()) { |
1292 | put_online_cpus(); | ||
1275 | return -ENODEV; | 1293 | return -ENODEV; |
1294 | } | ||
1295 | put_online_cpus(); | ||
1276 | 1296 | ||
1277 | rv = cpufreq_register_driver(&cpufreq_amd64_driver); | 1297 | ret = cpufreq_register_driver(&cpufreq_amd64_driver); |
1298 | if (ret) | ||
1299 | return ret; | ||
1278 | 1300 | ||
1279 | if (!rv) | 1301 | pr_info(PFX "Found %d %s (%d cpu cores) (" VERSION ")\n", |
1280 | pr_info(PFX "Found %d %s (%d cpu cores) (" VERSION ")\n", | 1302 | num_online_nodes(), boot_cpu_data.x86_model_id, supported_cpus); |
1281 | num_online_nodes(), boot_cpu_data.x86_model_id, | ||
1282 | supported_cpus); | ||
1283 | 1303 | ||
1284 | return rv; | 1304 | return ret; |
1285 | } | 1305 | } |
1286 | 1306 | ||
1287 | /* driver entry point for term */ | 1307 | /* driver entry point for term */ |
diff --git a/drivers/cpufreq/spear-cpufreq.c b/drivers/cpufreq/spear-cpufreq.c index 4575cfe41755..7e4d77327957 100644 --- a/drivers/cpufreq/spear-cpufreq.c +++ b/drivers/cpufreq/spear-cpufreq.c | |||
@@ -30,7 +30,7 @@ static struct { | |||
30 | u32 cnt; | 30 | u32 cnt; |
31 | } spear_cpufreq; | 31 | } spear_cpufreq; |
32 | 32 | ||
33 | int spear_cpufreq_verify(struct cpufreq_policy *policy) | 33 | static int spear_cpufreq_verify(struct cpufreq_policy *policy) |
34 | { | 34 | { |
35 | return cpufreq_frequency_table_verify(policy, spear_cpufreq.freq_tbl); | 35 | return cpufreq_frequency_table_verify(policy, spear_cpufreq.freq_tbl); |
36 | } | 36 | } |
@@ -157,7 +157,9 @@ static int spear_cpufreq_target(struct cpufreq_policy *policy, | |||
157 | 157 | ||
158 | freqs.new = newfreq / 1000; | 158 | freqs.new = newfreq / 1000; |
159 | freqs.new /= mult; | 159 | freqs.new /= mult; |
160 | cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); | 160 | |
161 | for_each_cpu(freqs.cpu, policy->cpus) | ||
162 | cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); | ||
161 | 163 | ||
162 | if (mult == 2) | 164 | if (mult == 2) |
163 | ret = spear1340_set_cpu_rate(srcclk, newfreq); | 165 | ret = spear1340_set_cpu_rate(srcclk, newfreq); |
@@ -170,7 +172,8 @@ static int spear_cpufreq_target(struct cpufreq_policy *policy, | |||
170 | freqs.new = clk_get_rate(spear_cpufreq.clk) / 1000; | 172 | freqs.new = clk_get_rate(spear_cpufreq.clk) / 1000; |
171 | } | 173 | } |
172 | 174 | ||
173 | cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); | 175 | for_each_cpu(freqs.cpu, policy->cpus) |
176 | cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); | ||
174 | return ret; | 177 | return ret; |
175 | } | 178 | } |
176 | 179 | ||
@@ -188,8 +191,7 @@ static int spear_cpufreq_init(struct cpufreq_policy *policy) | |||
188 | policy->cpuinfo.transition_latency = spear_cpufreq.transition_latency; | 191 | policy->cpuinfo.transition_latency = spear_cpufreq.transition_latency; |
189 | policy->cur = spear_cpufreq_get(0); | 192 | policy->cur = spear_cpufreq_get(0); |
190 | 193 | ||
191 | cpumask_copy(policy->cpus, topology_core_cpumask(policy->cpu)); | 194 | cpumask_setall(policy->cpus); |
192 | cpumask_copy(policy->related_cpus, policy->cpus); | ||
193 | 195 | ||
194 | return 0; | 196 | return 0; |
195 | } | 197 | } |