diff options
author | Rafael J. Wysocki <rafael.j.wysocki@intel.com> | 2013-08-30 08:16:03 -0400 |
---|---|---|
committer | Rafael J. Wysocki <rafael.j.wysocki@intel.com> | 2013-08-30 08:16:03 -0400 |
commit | f41b83126cba53849dd2353476a7715613af648f (patch) | |
tree | 582b81ee2f4a8de39db7c58e12dd09a57f62b208 | |
parent | 499aa70a016591e4db5f51745e554aae4471c12a (diff) | |
parent | 6932078376e2c1fd49b6c4aa41cc5e162ee83d8a (diff) |
Merge branch 'pm-cpufreq'
* pm-cpufreq:
cpufreq: Don't use smp_processor_id() in preemptible context
cpufreq: governor: Fix typos in comments
cpufreq: governors: Remove duplicate check of target freq in supported range
cpufreq: Fix timer/workqueue corruption due to double queueing
cpufreq: imx6q: Fix clock enable balance
cpufreq: tegra: fix the wrong clock name
-rw-r--r-- | drivers/cpufreq/cpufreq_conservative.c | 4 | ||||
-rw-r--r-- | drivers/cpufreq/cpufreq_governor.c | 14 | ||||
-rw-r--r-- | drivers/cpufreq/cpufreq_governor.h | 12 | ||||
-rw-r--r-- | drivers/cpufreq/cpufreq_ondemand.c | 3 | ||||
-rw-r--r-- | drivers/cpufreq/imx6q-cpufreq.c | 17 | ||||
-rw-r--r-- | drivers/cpufreq/tegra-cpufreq.c | 4 |
6 files changed, 20 insertions, 34 deletions
diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c index 7f67a75b3c3c..f62d822048e6 100644 --- a/drivers/cpufreq/cpufreq_conservative.c +++ b/drivers/cpufreq/cpufreq_conservative.c | |||
@@ -67,8 +67,6 @@ static void cs_check_cpu(int cpu, unsigned int load) | |||
67 | return; | 67 | return; |
68 | 68 | ||
69 | dbs_info->requested_freq += get_freq_target(cs_tuners, policy); | 69 | dbs_info->requested_freq += get_freq_target(cs_tuners, policy); |
70 | if (dbs_info->requested_freq > policy->max) | ||
71 | dbs_info->requested_freq = policy->max; | ||
72 | 70 | ||
73 | __cpufreq_driver_target(policy, dbs_info->requested_freq, | 71 | __cpufreq_driver_target(policy, dbs_info->requested_freq, |
74 | CPUFREQ_RELATION_H); | 72 | CPUFREQ_RELATION_H); |
@@ -89,8 +87,6 @@ static void cs_check_cpu(int cpu, unsigned int load) | |||
89 | return; | 87 | return; |
90 | 88 | ||
91 | dbs_info->requested_freq -= get_freq_target(cs_tuners, policy); | 89 | dbs_info->requested_freq -= get_freq_target(cs_tuners, policy); |
92 | if (dbs_info->requested_freq < policy->min) | ||
93 | dbs_info->requested_freq = policy->min; | ||
94 | 90 | ||
95 | __cpufreq_driver_target(policy, dbs_info->requested_freq, | 91 | __cpufreq_driver_target(policy, dbs_info->requested_freq, |
96 | CPUFREQ_RELATION_L); | 92 | CPUFREQ_RELATION_L); |
diff --git a/drivers/cpufreq/cpufreq_governor.c b/drivers/cpufreq/cpufreq_governor.c index 87427360c77f..0806c31e5764 100644 --- a/drivers/cpufreq/cpufreq_governor.c +++ b/drivers/cpufreq/cpufreq_governor.c | |||
@@ -119,8 +119,18 @@ void gov_queue_work(struct dbs_data *dbs_data, struct cpufreq_policy *policy, | |||
119 | { | 119 | { |
120 | int i; | 120 | int i; |
121 | 121 | ||
122 | if (!policy->governor_enabled) | ||
123 | return; | ||
124 | |||
122 | if (!all_cpus) { | 125 | if (!all_cpus) { |
123 | __gov_queue_work(smp_processor_id(), dbs_data, delay); | 126 | /* |
127 | * Use raw_smp_processor_id() to avoid preemptible warnings. | ||
128 | * We know that this is only called with all_cpus == false from | ||
129 | * works that have been queued with *_work_on() functions and | ||
130 | * those works are canceled during CPU_DOWN_PREPARE so they | ||
131 | * can't possibly run on any other CPU. | ||
132 | */ | ||
133 | __gov_queue_work(raw_smp_processor_id(), dbs_data, delay); | ||
124 | } else { | 134 | } else { |
125 | for_each_cpu(i, policy->cpus) | 135 | for_each_cpu(i, policy->cpus) |
126 | __gov_queue_work(i, dbs_data, delay); | 136 | __gov_queue_work(i, dbs_data, delay); |
@@ -230,7 +240,7 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy, | |||
230 | 240 | ||
231 | policy->governor_data = dbs_data; | 241 | policy->governor_data = dbs_data; |
232 | 242 | ||
233 | /* policy latency is in nS. Convert it to uS first */ | 243 | /* policy latency is in ns. Convert it to us first */ |
234 | latency = policy->cpuinfo.transition_latency / 1000; | 244 | latency = policy->cpuinfo.transition_latency / 1000; |
235 | if (latency == 0) | 245 | if (latency == 0) |
236 | latency = 1; | 246 | latency = 1; |
diff --git a/drivers/cpufreq/cpufreq_governor.h b/drivers/cpufreq/cpufreq_governor.h index a02d78b25898..88cd39f7b0e9 100644 --- a/drivers/cpufreq/cpufreq_governor.h +++ b/drivers/cpufreq/cpufreq_governor.h | |||
@@ -25,11 +25,11 @@ | |||
25 | /* | 25 | /* |
26 | * The polling frequency depends on the capability of the processor. Default | 26 | * The polling frequency depends on the capability of the processor. Default |
27 | * polling frequency is 1000 times the transition latency of the processor. The | 27 | * polling frequency is 1000 times the transition latency of the processor. The |
28 | * governor will work on any processor with transition latency <= 10mS, using | 28 | * governor will work on any processor with transition latency <= 10ms, using |
29 | * appropriate sampling rate. | 29 | * appropriate sampling rate. |
30 | * | 30 | * |
31 | * For CPUs with transition latency > 10mS (mostly drivers with CPUFREQ_ETERNAL) | 31 | * For CPUs with transition latency > 10ms (mostly drivers with CPUFREQ_ETERNAL) |
32 | * this governor will not work. All times here are in uS. | 32 | * this governor will not work. All times here are in us (micro seconds). |
33 | */ | 33 | */ |
34 | #define MIN_SAMPLING_RATE_RATIO (2) | 34 | #define MIN_SAMPLING_RATE_RATIO (2) |
35 | #define LATENCY_MULTIPLIER (1000) | 35 | #define LATENCY_MULTIPLIER (1000) |
@@ -162,7 +162,7 @@ struct cs_cpu_dbs_info_s { | |||
162 | unsigned int enable:1; | 162 | unsigned int enable:1; |
163 | }; | 163 | }; |
164 | 164 | ||
165 | /* Per policy Governers sysfs tunables */ | 165 | /* Per policy Governors sysfs tunables */ |
166 | struct od_dbs_tuners { | 166 | struct od_dbs_tuners { |
167 | unsigned int ignore_nice_load; | 167 | unsigned int ignore_nice_load; |
168 | unsigned int sampling_rate; | 168 | unsigned int sampling_rate; |
@@ -181,7 +181,7 @@ struct cs_dbs_tuners { | |||
181 | unsigned int freq_step; | 181 | unsigned int freq_step; |
182 | }; | 182 | }; |
183 | 183 | ||
184 | /* Common Governer data across policies */ | 184 | /* Common Governor data across policies */ |
185 | struct dbs_data; | 185 | struct dbs_data; |
186 | struct common_dbs_data { | 186 | struct common_dbs_data { |
187 | /* Common across governors */ | 187 | /* Common across governors */ |
@@ -205,7 +205,7 @@ struct common_dbs_data { | |||
205 | void *gov_ops; | 205 | void *gov_ops; |
206 | }; | 206 | }; |
207 | 207 | ||
208 | /* Governer Per policy data */ | 208 | /* Governor Per policy data */ |
209 | struct dbs_data { | 209 | struct dbs_data { |
210 | struct common_dbs_data *cdata; | 210 | struct common_dbs_data *cdata; |
211 | unsigned int min_sampling_rate; | 211 | unsigned int min_sampling_rate; |
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c index 87f3305e80a6..32f26f6e17c5 100644 --- a/drivers/cpufreq/cpufreq_ondemand.c +++ b/drivers/cpufreq/cpufreq_ondemand.c | |||
@@ -177,9 +177,6 @@ static void od_check_cpu(int cpu, unsigned int load) | |||
177 | /* No longer fully busy, reset rate_mult */ | 177 | /* No longer fully busy, reset rate_mult */ |
178 | dbs_info->rate_mult = 1; | 178 | dbs_info->rate_mult = 1; |
179 | 179 | ||
180 | if (freq_next < policy->min) | ||
181 | freq_next = policy->min; | ||
182 | |||
183 | if (!od_tuners->powersave_bias) { | 180 | if (!od_tuners->powersave_bias) { |
184 | __cpufreq_driver_target(policy, freq_next, | 181 | __cpufreq_driver_target(policy, freq_next, |
185 | CPUFREQ_RELATION_L); | 182 | CPUFREQ_RELATION_L); |
diff --git a/drivers/cpufreq/imx6q-cpufreq.c b/drivers/cpufreq/imx6q-cpufreq.c index b16632bb5a56..3e396543aea4 100644 --- a/drivers/cpufreq/imx6q-cpufreq.c +++ b/drivers/cpufreq/imx6q-cpufreq.c | |||
@@ -117,28 +117,11 @@ static int imx6q_set_target(struct cpufreq_policy *policy, | |||
117 | * - Reprogram pll1_sys_clk and reparent pll1_sw_clk back to it | 117 | * - Reprogram pll1_sys_clk and reparent pll1_sw_clk back to it |
118 | * - Disable pll2_pfd2_396m_clk | 118 | * - Disable pll2_pfd2_396m_clk |
119 | */ | 119 | */ |
120 | clk_prepare_enable(pll2_pfd2_396m_clk); | ||
121 | clk_set_parent(step_clk, pll2_pfd2_396m_clk); | 120 | clk_set_parent(step_clk, pll2_pfd2_396m_clk); |
122 | clk_set_parent(pll1_sw_clk, step_clk); | 121 | clk_set_parent(pll1_sw_clk, step_clk); |
123 | if (freq_hz > clk_get_rate(pll2_pfd2_396m_clk)) { | 122 | if (freq_hz > clk_get_rate(pll2_pfd2_396m_clk)) { |
124 | clk_set_rate(pll1_sys_clk, freqs.new * 1000); | 123 | clk_set_rate(pll1_sys_clk, freqs.new * 1000); |
125 | /* | ||
126 | * If we are leaving 396 MHz set-point, we need to enable | ||
127 | * pll1_sys_clk and disable pll2_pfd2_396m_clk to keep | ||
128 | * their use count correct. | ||
129 | */ | ||
130 | if (freqs.old * 1000 <= clk_get_rate(pll2_pfd2_396m_clk)) { | ||
131 | clk_prepare_enable(pll1_sys_clk); | ||
132 | clk_disable_unprepare(pll2_pfd2_396m_clk); | ||
133 | } | ||
134 | clk_set_parent(pll1_sw_clk, pll1_sys_clk); | 124 | clk_set_parent(pll1_sw_clk, pll1_sys_clk); |
135 | clk_disable_unprepare(pll2_pfd2_396m_clk); | ||
136 | } else { | ||
137 | /* | ||
138 | * Disable pll1_sys_clk if pll2_pfd2_396m_clk is sufficient | ||
139 | * to provide the frequency. | ||
140 | */ | ||
141 | clk_disable_unprepare(pll1_sys_clk); | ||
142 | } | 125 | } |
143 | 126 | ||
144 | /* Ensure the arm clock divider is what we expect */ | 127 | /* Ensure the arm clock divider is what we expect */ |
diff --git a/drivers/cpufreq/tegra-cpufreq.c b/drivers/cpufreq/tegra-cpufreq.c index cd66b85d927c..a7b876fdc1d8 100644 --- a/drivers/cpufreq/tegra-cpufreq.c +++ b/drivers/cpufreq/tegra-cpufreq.c | |||
@@ -255,7 +255,7 @@ static struct cpufreq_driver tegra_cpufreq_driver = { | |||
255 | 255 | ||
256 | static int __init tegra_cpufreq_init(void) | 256 | static int __init tegra_cpufreq_init(void) |
257 | { | 257 | { |
258 | cpu_clk = clk_get_sys(NULL, "cpu"); | 258 | cpu_clk = clk_get_sys(NULL, "cclk"); |
259 | if (IS_ERR(cpu_clk)) | 259 | if (IS_ERR(cpu_clk)) |
260 | return PTR_ERR(cpu_clk); | 260 | return PTR_ERR(cpu_clk); |
261 | 261 | ||
@@ -263,7 +263,7 @@ static int __init tegra_cpufreq_init(void) | |||
263 | if (IS_ERR(pll_x_clk)) | 263 | if (IS_ERR(pll_x_clk)) |
264 | return PTR_ERR(pll_x_clk); | 264 | return PTR_ERR(pll_x_clk); |
265 | 265 | ||
266 | pll_p_clk = clk_get_sys(NULL, "pll_p_cclk"); | 266 | pll_p_clk = clk_get_sys(NULL, "pll_p"); |
267 | if (IS_ERR(pll_p_clk)) | 267 | if (IS_ERR(pll_p_clk)) |
268 | return PTR_ERR(pll_p_clk); | 268 | return PTR_ERR(pll_p_clk); |
269 | 269 | ||