aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorRafael J. Wysocki <rafael.j.wysocki@intel.com>2013-05-14 09:11:55 -0400
committerRafael J. Wysocki <rafael.j.wysocki@intel.com>2013-05-14 09:11:55 -0400
commitd5a2fa27ecd772b51d93a91209205ec4d6ea2da1 (patch)
tree2bf1b2308e05ba9566cf61091e72167708d5a365
parentf722406faae2d073cc1d01063d1123c35425939e (diff)
parentb57ffac5e57bff33dde3cff35dff5c41876a6d12 (diff)
Merge branch 'pm-cpufreq'
* pm-cpufreq: cpufreq / intel_pstate: use vzalloc() instead of vmalloc()/memset(0) cpufreq, ondemand: Remove leftover debug line cpufreq / kirkwood: don't check resource with devm_ioremap_resource cpufreq / intel_pstate: remove #ifdef MODULE compile fence cpufreq / intel_pstate: Remove idle mode PID cpufreq / intel_pstate: fix ffmpeg regression cpufreq / intel_pstate: use lowest requested max performance cpufreq / intel_pstate: remove idle time and duration from sample and calculations cpufreq: Fix incorrect dependecies for ARM SA11xx drivers cpufreq: ARM big LITTLE: Fix Kconfig entries cpufreq: cpufreq-cpu0: Free parent node for error cases cpufreq: cpufreq-cpu0: defer probe when regulator is not ready cpufreq: Issue CPUFREQ_GOV_POLICY_EXIT notifier before dropping policy refcount cpufreq: governors: Fix CPUFREQ_GOV_POLICY_{INIT|EXIT} notifiers cpufreq: ARM big LITTLE: Improve print message cpufreq: ARM big LITTLE: Move cpu_to_cluster() to arm_big_little.h cpufreq: ARM big LITTLE DT: Return CPUFREQ_ETERNAL if clock-latency isn't found cpufreq: ARM big LITTLE DT: Return correct transition latency cpufreq: ARM big LITTLE: Select PM_OPP
-rw-r--r--drivers/cpufreq/Kconfig2
-rw-r--r--drivers/cpufreq/Kconfig.arm15
-rw-r--r--drivers/cpufreq/arm_big_little.c7
-rw-r--r--drivers/cpufreq/arm_big_little.h5
-rw-r--r--drivers/cpufreq/arm_big_little_dt.c9
-rw-r--r--drivers/cpufreq/cpufreq-cpu0.c27
-rw-r--r--drivers/cpufreq/cpufreq.c6
-rw-r--r--drivers/cpufreq/cpufreq_governor.c11
-rw-r--r--drivers/cpufreq/cpufreq_governor.h1
-rw-r--r--drivers/cpufreq/cpufreq_ondemand.c1
-rw-r--r--drivers/cpufreq/intel_pstate.c122
-rw-r--r--drivers/cpufreq/kirkwood-cpufreq.c4
12 files changed, 72 insertions, 138 deletions
diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig
index a1488f58f6ca..534fcb825153 100644
--- a/drivers/cpufreq/Kconfig
+++ b/drivers/cpufreq/Kconfig
@@ -47,7 +47,7 @@ config CPU_FREQ_STAT_DETAILS
47 47
48choice 48choice
49 prompt "Default CPUFreq governor" 49 prompt "Default CPUFreq governor"
50 default CPU_FREQ_DEFAULT_GOV_USERSPACE if CPU_FREQ_SA1100 || CPU_FREQ_SA1110 50 default CPU_FREQ_DEFAULT_GOV_USERSPACE if ARM_SA1100_CPUFREQ || ARM_SA1110_CPUFREQ
51 default CPU_FREQ_DEFAULT_GOV_PERFORMANCE 51 default CPU_FREQ_DEFAULT_GOV_PERFORMANCE
52 help 52 help
53 This option sets which CPUFreq governor shall be loaded at 53 This option sets which CPUFreq governor shall be loaded at
diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm
index f3af18b9acc5..6e57543fe0b9 100644
--- a/drivers/cpufreq/Kconfig.arm
+++ b/drivers/cpufreq/Kconfig.arm
@@ -3,16 +3,17 @@
3# 3#
4 4
5config ARM_BIG_LITTLE_CPUFREQ 5config ARM_BIG_LITTLE_CPUFREQ
6 tristate 6 tristate "Generic ARM big LITTLE CPUfreq driver"
7 depends on ARM_CPU_TOPOLOGY 7 depends on ARM_CPU_TOPOLOGY && PM_OPP && HAVE_CLK
8 help
9 This enables the Generic CPUfreq driver for ARM big.LITTLE platforms.
8 10
9config ARM_DT_BL_CPUFREQ 11config ARM_DT_BL_CPUFREQ
10 tristate "Generic ARM big LITTLE CPUfreq driver probed via DT" 12 tristate "Generic probing via DT for ARM big LITTLE CPUfreq driver"
11 select ARM_BIG_LITTLE_CPUFREQ 13 depends on ARM_BIG_LITTLE_CPUFREQ && OF
12 depends on OF && HAVE_CLK
13 help 14 help
14 This enables the Generic CPUfreq driver for ARM big.LITTLE platform. 15 This enables probing via DT for Generic CPUfreq driver for ARM
15 This gets frequency tables from DT. 16 big.LITTLE platform. This gets frequency tables from DT.
16 17
17config ARM_EXYNOS_CPUFREQ 18config ARM_EXYNOS_CPUFREQ
18 bool "SAMSUNG EXYNOS SoCs" 19 bool "SAMSUNG EXYNOS SoCs"
diff --git a/drivers/cpufreq/arm_big_little.c b/drivers/cpufreq/arm_big_little.c
index dbdf677d2f36..5d7f53fcd6f5 100644
--- a/drivers/cpufreq/arm_big_little.c
+++ b/drivers/cpufreq/arm_big_little.c
@@ -40,11 +40,6 @@ static struct clk *clk[MAX_CLUSTERS];
40static struct cpufreq_frequency_table *freq_table[MAX_CLUSTERS]; 40static struct cpufreq_frequency_table *freq_table[MAX_CLUSTERS];
41static atomic_t cluster_usage[MAX_CLUSTERS] = {ATOMIC_INIT(0), ATOMIC_INIT(0)}; 41static atomic_t cluster_usage[MAX_CLUSTERS] = {ATOMIC_INIT(0), ATOMIC_INIT(0)};
42 42
43static int cpu_to_cluster(int cpu)
44{
45 return topology_physical_package_id(cpu);
46}
47
48static unsigned int bL_cpufreq_get(unsigned int cpu) 43static unsigned int bL_cpufreq_get(unsigned int cpu)
49{ 44{
50 u32 cur_cluster = cpu_to_cluster(cpu); 45 u32 cur_cluster = cpu_to_cluster(cpu);
@@ -192,7 +187,7 @@ static int bL_cpufreq_init(struct cpufreq_policy *policy)
192 187
193 cpumask_copy(policy->cpus, topology_core_cpumask(policy->cpu)); 188 cpumask_copy(policy->cpus, topology_core_cpumask(policy->cpu));
194 189
195 dev_info(cpu_dev, "CPU %d initialized\n", policy->cpu); 190 dev_info(cpu_dev, "%s: CPU %d initialized\n", __func__, policy->cpu);
196 return 0; 191 return 0;
197} 192}
198 193
diff --git a/drivers/cpufreq/arm_big_little.h b/drivers/cpufreq/arm_big_little.h
index 70f18fc12d4a..79b2ce17884d 100644
--- a/drivers/cpufreq/arm_big_little.h
+++ b/drivers/cpufreq/arm_big_little.h
@@ -34,6 +34,11 @@ struct cpufreq_arm_bL_ops {
34 int (*init_opp_table)(struct device *cpu_dev); 34 int (*init_opp_table)(struct device *cpu_dev);
35}; 35};
36 36
37static inline int cpu_to_cluster(int cpu)
38{
39 return topology_physical_package_id(cpu);
40}
41
37int bL_cpufreq_register(struct cpufreq_arm_bL_ops *ops); 42int bL_cpufreq_register(struct cpufreq_arm_bL_ops *ops);
38void bL_cpufreq_unregister(struct cpufreq_arm_bL_ops *ops); 43void bL_cpufreq_unregister(struct cpufreq_arm_bL_ops *ops);
39 44
diff --git a/drivers/cpufreq/arm_big_little_dt.c b/drivers/cpufreq/arm_big_little_dt.c
index 44be3115375c..173ed059d95f 100644
--- a/drivers/cpufreq/arm_big_little_dt.c
+++ b/drivers/cpufreq/arm_big_little_dt.c
@@ -66,8 +66,8 @@ static int dt_get_transition_latency(struct device *cpu_dev)
66 66
67 parent = of_find_node_by_path("/cpus"); 67 parent = of_find_node_by_path("/cpus");
68 if (!parent) { 68 if (!parent) {
69 pr_err("failed to find OF /cpus\n"); 69 pr_info("Failed to find OF /cpus. Use CPUFREQ_ETERNAL transition latency\n");
70 return -ENOENT; 70 return CPUFREQ_ETERNAL;
71 } 71 }
72 72
73 for_each_child_of_node(parent, np) { 73 for_each_child_of_node(parent, np) {
@@ -78,10 +78,11 @@ static int dt_get_transition_latency(struct device *cpu_dev)
78 of_node_put(np); 78 of_node_put(np);
79 of_node_put(parent); 79 of_node_put(parent);
80 80
81 return 0; 81 return transition_latency;
82 } 82 }
83 83
84 return -ENODEV; 84 pr_info("clock-latency isn't found, use CPUFREQ_ETERNAL transition latency\n");
85 return CPUFREQ_ETERNAL;
85} 86}
86 87
87static struct cpufreq_arm_bL_ops dt_bL_ops = { 88static struct cpufreq_arm_bL_ops dt_bL_ops = {
diff --git a/drivers/cpufreq/cpufreq-cpu0.c b/drivers/cpufreq/cpufreq-cpu0.c
index 3ab8294eab04..a64eb8b70444 100644
--- a/drivers/cpufreq/cpufreq-cpu0.c
+++ b/drivers/cpufreq/cpufreq-cpu0.c
@@ -189,12 +189,29 @@ static int cpu0_cpufreq_probe(struct platform_device *pdev)
189 189
190 if (!np) { 190 if (!np) {
191 pr_err("failed to find cpu0 node\n"); 191 pr_err("failed to find cpu0 node\n");
192 return -ENOENT; 192 ret = -ENOENT;
193 goto out_put_parent;
193 } 194 }
194 195
195 cpu_dev = &pdev->dev; 196 cpu_dev = &pdev->dev;
196 cpu_dev->of_node = np; 197 cpu_dev->of_node = np;
197 198
199 cpu_reg = devm_regulator_get(cpu_dev, "cpu0");
200 if (IS_ERR(cpu_reg)) {
201 /*
202 * If cpu0 regulator supply node is present, but regulator is
203 * not yet registered, we should try defering probe.
204 */
205 if (PTR_ERR(cpu_reg) == -EPROBE_DEFER) {
206 dev_err(cpu_dev, "cpu0 regulator not ready, retry\n");
207 ret = -EPROBE_DEFER;
208 goto out_put_node;
209 }
210 pr_warn("failed to get cpu0 regulator: %ld\n",
211 PTR_ERR(cpu_reg));
212 cpu_reg = NULL;
213 }
214
198 cpu_clk = devm_clk_get(cpu_dev, NULL); 215 cpu_clk = devm_clk_get(cpu_dev, NULL);
199 if (IS_ERR(cpu_clk)) { 216 if (IS_ERR(cpu_clk)) {
200 ret = PTR_ERR(cpu_clk); 217 ret = PTR_ERR(cpu_clk);
@@ -202,12 +219,6 @@ static int cpu0_cpufreq_probe(struct platform_device *pdev)
202 goto out_put_node; 219 goto out_put_node;
203 } 220 }
204 221
205 cpu_reg = devm_regulator_get(cpu_dev, "cpu0");
206 if (IS_ERR(cpu_reg)) {
207 pr_warn("failed to get cpu0 regulator\n");
208 cpu_reg = NULL;
209 }
210
211 ret = of_init_opp_table(cpu_dev); 222 ret = of_init_opp_table(cpu_dev);
212 if (ret) { 223 if (ret) {
213 pr_err("failed to init OPP table: %d\n", ret); 224 pr_err("failed to init OPP table: %d\n", ret);
@@ -264,6 +275,8 @@ out_free_table:
264 opp_free_cpufreq_table(cpu_dev, &freq_table); 275 opp_free_cpufreq_table(cpu_dev, &freq_table);
265out_put_node: 276out_put_node:
266 of_node_put(np); 277 of_node_put(np);
278out_put_parent:
279 of_node_put(parent);
267 return ret; 280 return ret;
268} 281}
269 282
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 1b8a48eaf90f..b7acfd153bf9 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -1075,14 +1075,14 @@ static int __cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif
1075 __func__, cpu_dev->id, cpu); 1075 __func__, cpu_dev->id, cpu);
1076 } 1076 }
1077 1077
1078 if ((cpus == 1) && (cpufreq_driver->target))
1079 __cpufreq_governor(data, CPUFREQ_GOV_POLICY_EXIT);
1080
1078 pr_debug("%s: removing link, cpu: %d\n", __func__, cpu); 1081 pr_debug("%s: removing link, cpu: %d\n", __func__, cpu);
1079 cpufreq_cpu_put(data); 1082 cpufreq_cpu_put(data);
1080 1083
1081 /* If cpu is last user of policy, free policy */ 1084 /* If cpu is last user of policy, free policy */
1082 if (cpus == 1) { 1085 if (cpus == 1) {
1083 if (cpufreq_driver->target)
1084 __cpufreq_governor(data, CPUFREQ_GOV_POLICY_EXIT);
1085
1086 lock_policy_rwsem_read(cpu); 1086 lock_policy_rwsem_read(cpu);
1087 kobj = &data->kobj; 1087 kobj = &data->kobj;
1088 cmp = &data->kobj_unregister; 1088 cmp = &data->kobj_unregister;
diff --git a/drivers/cpufreq/cpufreq_governor.c b/drivers/cpufreq/cpufreq_governor.c
index 443442df113b..5af40ad82d23 100644
--- a/drivers/cpufreq/cpufreq_governor.c
+++ b/drivers/cpufreq/cpufreq_governor.c
@@ -255,6 +255,7 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy,
255 if (have_governor_per_policy()) { 255 if (have_governor_per_policy()) {
256 WARN_ON(dbs_data); 256 WARN_ON(dbs_data);
257 } else if (dbs_data) { 257 } else if (dbs_data) {
258 dbs_data->usage_count++;
258 policy->governor_data = dbs_data; 259 policy->governor_data = dbs_data;
259 return 0; 260 return 0;
260 } 261 }
@@ -266,6 +267,7 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy,
266 } 267 }
267 268
268 dbs_data->cdata = cdata; 269 dbs_data->cdata = cdata;
270 dbs_data->usage_count = 1;
269 rc = cdata->init(dbs_data); 271 rc = cdata->init(dbs_data);
270 if (rc) { 272 if (rc) {
271 pr_err("%s: POLICY_INIT: init() failed\n", __func__); 273 pr_err("%s: POLICY_INIT: init() failed\n", __func__);
@@ -294,7 +296,8 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy,
294 set_sampling_rate(dbs_data, max(dbs_data->min_sampling_rate, 296 set_sampling_rate(dbs_data, max(dbs_data->min_sampling_rate,
295 latency * LATENCY_MULTIPLIER)); 297 latency * LATENCY_MULTIPLIER));
296 298
297 if (dbs_data->cdata->governor == GOV_CONSERVATIVE) { 299 if ((cdata->governor == GOV_CONSERVATIVE) &&
300 (!policy->governor->initialized)) {
298 struct cs_ops *cs_ops = dbs_data->cdata->gov_ops; 301 struct cs_ops *cs_ops = dbs_data->cdata->gov_ops;
299 302
300 cpufreq_register_notifier(cs_ops->notifier_block, 303 cpufreq_register_notifier(cs_ops->notifier_block,
@@ -306,12 +309,12 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy,
306 309
307 return 0; 310 return 0;
308 case CPUFREQ_GOV_POLICY_EXIT: 311 case CPUFREQ_GOV_POLICY_EXIT:
309 if ((policy->governor->initialized == 1) || 312 if (!--dbs_data->usage_count) {
310 have_governor_per_policy()) {
311 sysfs_remove_group(get_governor_parent_kobj(policy), 313 sysfs_remove_group(get_governor_parent_kobj(policy),
312 get_sysfs_attr(dbs_data)); 314 get_sysfs_attr(dbs_data));
313 315
314 if (dbs_data->cdata->governor == GOV_CONSERVATIVE) { 316 if ((dbs_data->cdata->governor == GOV_CONSERVATIVE) &&
317 (policy->governor->initialized == 1)) {
315 struct cs_ops *cs_ops = dbs_data->cdata->gov_ops; 318 struct cs_ops *cs_ops = dbs_data->cdata->gov_ops;
316 319
317 cpufreq_unregister_notifier(cs_ops->notifier_block, 320 cpufreq_unregister_notifier(cs_ops->notifier_block,
diff --git a/drivers/cpufreq/cpufreq_governor.h b/drivers/cpufreq/cpufreq_governor.h
index 8ac33538d0bd..e16a96130cb3 100644
--- a/drivers/cpufreq/cpufreq_governor.h
+++ b/drivers/cpufreq/cpufreq_governor.h
@@ -211,6 +211,7 @@ struct common_dbs_data {
211struct dbs_data { 211struct dbs_data {
212 struct common_dbs_data *cdata; 212 struct common_dbs_data *cdata;
213 unsigned int min_sampling_rate; 213 unsigned int min_sampling_rate;
214 int usage_count;
214 void *tuners; 215 void *tuners;
215 216
216 /* dbs_mutex protects dbs_enable in governor start/stop */ 217 /* dbs_mutex protects dbs_enable in governor start/stop */
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
index b0ffef96bf77..4b9bb5def6f1 100644
--- a/drivers/cpufreq/cpufreq_ondemand.c
+++ b/drivers/cpufreq/cpufreq_ondemand.c
@@ -547,7 +547,6 @@ static int od_init(struct dbs_data *dbs_data)
547 tuners->io_is_busy = should_io_be_busy(); 547 tuners->io_is_busy = should_io_be_busy();
548 548
549 dbs_data->tuners = tuners; 549 dbs_data->tuners = tuners;
550 pr_info("%s: tuners %p\n", __func__, tuners);
551 mutex_init(&dbs_data->mutex); 550 mutex_init(&dbs_data->mutex);
552 return 0; 551 return 0;
553} 552}
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index cc3a8e6c92be..9c36ace92a39 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -48,12 +48,7 @@ static inline int32_t div_fp(int32_t x, int32_t y)
48} 48}
49 49
50struct sample { 50struct sample {
51 ktime_t start_time;
52 ktime_t end_time;
53 int core_pct_busy; 51 int core_pct_busy;
54 int pstate_pct_busy;
55 u64 duration_us;
56 u64 idletime_us;
57 u64 aperf; 52 u64 aperf;
58 u64 mperf; 53 u64 mperf;
59 int freq; 54 int freq;
@@ -86,13 +81,9 @@ struct cpudata {
86 struct pstate_adjust_policy *pstate_policy; 81 struct pstate_adjust_policy *pstate_policy;
87 struct pstate_data pstate; 82 struct pstate_data pstate;
88 struct _pid pid; 83 struct _pid pid;
89 struct _pid idle_pid;
90 84
91 int min_pstate_count; 85 int min_pstate_count;
92 int idle_mode;
93 86
94 ktime_t prev_sample;
95 u64 prev_idle_time_us;
96 u64 prev_aperf; 87 u64 prev_aperf;
97 u64 prev_mperf; 88 u64 prev_mperf;
98 int sample_ptr; 89 int sample_ptr;
@@ -124,6 +115,8 @@ struct perf_limits {
124 int min_perf_pct; 115 int min_perf_pct;
125 int32_t max_perf; 116 int32_t max_perf;
126 int32_t min_perf; 117 int32_t min_perf;
118 int max_policy_pct;
119 int max_sysfs_pct;
127}; 120};
128 121
129static struct perf_limits limits = { 122static struct perf_limits limits = {
@@ -132,6 +125,8 @@ static struct perf_limits limits = {
132 .max_perf = int_tofp(1), 125 .max_perf = int_tofp(1),
133 .min_perf_pct = 0, 126 .min_perf_pct = 0,
134 .min_perf = 0, 127 .min_perf = 0,
128 .max_policy_pct = 100,
129 .max_sysfs_pct = 100,
135}; 130};
136 131
137static inline void pid_reset(struct _pid *pid, int setpoint, int busy, 132static inline void pid_reset(struct _pid *pid, int setpoint, int busy,
@@ -202,19 +197,6 @@ static inline void intel_pstate_busy_pid_reset(struct cpudata *cpu)
202 0); 197 0);
203} 198}
204 199
205static inline void intel_pstate_idle_pid_reset(struct cpudata *cpu)
206{
207 pid_p_gain_set(&cpu->idle_pid, cpu->pstate_policy->p_gain_pct);
208 pid_d_gain_set(&cpu->idle_pid, cpu->pstate_policy->d_gain_pct);
209 pid_i_gain_set(&cpu->idle_pid, cpu->pstate_policy->i_gain_pct);
210
211 pid_reset(&cpu->idle_pid,
212 75,
213 50,
214 cpu->pstate_policy->deadband,
215 0);
216}
217
218static inline void intel_pstate_reset_all_pid(void) 200static inline void intel_pstate_reset_all_pid(void)
219{ 201{
220 unsigned int cpu; 202 unsigned int cpu;
@@ -302,7 +284,8 @@ static ssize_t store_max_perf_pct(struct kobject *a, struct attribute *b,
302 if (ret != 1) 284 if (ret != 1)
303 return -EINVAL; 285 return -EINVAL;
304 286
305 limits.max_perf_pct = clamp_t(int, input, 0 , 100); 287 limits.max_sysfs_pct = clamp_t(int, input, 0 , 100);
288 limits.max_perf_pct = min(limits.max_policy_pct, limits.max_sysfs_pct);
306 limits.max_perf = div_fp(int_tofp(limits.max_perf_pct), int_tofp(100)); 289 limits.max_perf = div_fp(int_tofp(limits.max_perf_pct), int_tofp(100));
307 return count; 290 return count;
308} 291}
@@ -408,9 +391,8 @@ static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate)
408 if (pstate == cpu->pstate.current_pstate) 391 if (pstate == cpu->pstate.current_pstate)
409 return; 392 return;
410 393
411#ifndef MODULE
412 trace_cpu_frequency(pstate * 100000, cpu->cpu); 394 trace_cpu_frequency(pstate * 100000, cpu->cpu);
413#endif 395
414 cpu->pstate.current_pstate = pstate; 396 cpu->pstate.current_pstate = pstate;
415 wrmsrl(MSR_IA32_PERF_CTL, pstate << 8); 397 wrmsrl(MSR_IA32_PERF_CTL, pstate << 8);
416 398
@@ -450,48 +432,26 @@ static inline void intel_pstate_calc_busy(struct cpudata *cpu,
450 struct sample *sample) 432 struct sample *sample)
451{ 433{
452 u64 core_pct; 434 u64 core_pct;
453 sample->pstate_pct_busy = 100 - div64_u64(
454 sample->idletime_us * 100,
455 sample->duration_us);
456 core_pct = div64_u64(sample->aperf * 100, sample->mperf); 435 core_pct = div64_u64(sample->aperf * 100, sample->mperf);
457 sample->freq = cpu->pstate.max_pstate * core_pct * 1000; 436 sample->freq = cpu->pstate.max_pstate * core_pct * 1000;
458 437
459 sample->core_pct_busy = div_s64((sample->pstate_pct_busy * core_pct), 438 sample->core_pct_busy = core_pct;
460 100);
461} 439}
462 440
463static inline void intel_pstate_sample(struct cpudata *cpu) 441static inline void intel_pstate_sample(struct cpudata *cpu)
464{ 442{
465 ktime_t now;
466 u64 idle_time_us;
467 u64 aperf, mperf; 443 u64 aperf, mperf;
468 444
469 now = ktime_get();
470 idle_time_us = get_cpu_idle_time_us(cpu->cpu, NULL);
471
472 rdmsrl(MSR_IA32_APERF, aperf); 445 rdmsrl(MSR_IA32_APERF, aperf);
473 rdmsrl(MSR_IA32_MPERF, mperf); 446 rdmsrl(MSR_IA32_MPERF, mperf);
474 /* for the first sample, don't actually record a sample, just 447 cpu->sample_ptr = (cpu->sample_ptr + 1) % SAMPLE_COUNT;
475 * set the baseline */ 448 cpu->samples[cpu->sample_ptr].aperf = aperf;
476 if (cpu->prev_idle_time_us > 0) { 449 cpu->samples[cpu->sample_ptr].mperf = mperf;
477 cpu->sample_ptr = (cpu->sample_ptr + 1) % SAMPLE_COUNT; 450 cpu->samples[cpu->sample_ptr].aperf -= cpu->prev_aperf;
478 cpu->samples[cpu->sample_ptr].start_time = cpu->prev_sample; 451 cpu->samples[cpu->sample_ptr].mperf -= cpu->prev_mperf;
479 cpu->samples[cpu->sample_ptr].end_time = now; 452
480 cpu->samples[cpu->sample_ptr].duration_us = 453 intel_pstate_calc_busy(cpu, &cpu->samples[cpu->sample_ptr]);
481 ktime_us_delta(now, cpu->prev_sample);
482 cpu->samples[cpu->sample_ptr].idletime_us =
483 idle_time_us - cpu->prev_idle_time_us;
484
485 cpu->samples[cpu->sample_ptr].aperf = aperf;
486 cpu->samples[cpu->sample_ptr].mperf = mperf;
487 cpu->samples[cpu->sample_ptr].aperf -= cpu->prev_aperf;
488 cpu->samples[cpu->sample_ptr].mperf -= cpu->prev_mperf;
489
490 intel_pstate_calc_busy(cpu, &cpu->samples[cpu->sample_ptr]);
491 }
492 454
493 cpu->prev_sample = now;
494 cpu->prev_idle_time_us = idle_time_us;
495 cpu->prev_aperf = aperf; 455 cpu->prev_aperf = aperf;
496 cpu->prev_mperf = mperf; 456 cpu->prev_mperf = mperf;
497} 457}
@@ -505,16 +465,6 @@ static inline void intel_pstate_set_sample_time(struct cpudata *cpu)
505 mod_timer_pinned(&cpu->timer, jiffies + delay); 465 mod_timer_pinned(&cpu->timer, jiffies + delay);
506} 466}
507 467
508static inline void intel_pstate_idle_mode(struct cpudata *cpu)
509{
510 cpu->idle_mode = 1;
511}
512
513static inline void intel_pstate_normal_mode(struct cpudata *cpu)
514{
515 cpu->idle_mode = 0;
516}
517
518static inline int intel_pstate_get_scaled_busy(struct cpudata *cpu) 468static inline int intel_pstate_get_scaled_busy(struct cpudata *cpu)
519{ 469{
520 int32_t busy_scaled; 470 int32_t busy_scaled;
@@ -547,50 +497,21 @@ static inline void intel_pstate_adjust_busy_pstate(struct cpudata *cpu)
547 intel_pstate_pstate_decrease(cpu, steps); 497 intel_pstate_pstate_decrease(cpu, steps);
548} 498}
549 499
550static inline void intel_pstate_adjust_idle_pstate(struct cpudata *cpu)
551{
552 int busy_scaled;
553 struct _pid *pid;
554 int ctl = 0;
555 int steps;
556
557 pid = &cpu->idle_pid;
558
559 busy_scaled = intel_pstate_get_scaled_busy(cpu);
560
561 ctl = pid_calc(pid, 100 - busy_scaled);
562
563 steps = abs(ctl);
564 if (ctl < 0)
565 intel_pstate_pstate_decrease(cpu, steps);
566 else
567 intel_pstate_pstate_increase(cpu, steps);
568
569 if (cpu->pstate.current_pstate == cpu->pstate.min_pstate)
570 intel_pstate_normal_mode(cpu);
571}
572
573static void intel_pstate_timer_func(unsigned long __data) 500static void intel_pstate_timer_func(unsigned long __data)
574{ 501{
575 struct cpudata *cpu = (struct cpudata *) __data; 502 struct cpudata *cpu = (struct cpudata *) __data;
576 503
577 intel_pstate_sample(cpu); 504 intel_pstate_sample(cpu);
505 intel_pstate_adjust_busy_pstate(cpu);
578 506
579 if (!cpu->idle_mode)
580 intel_pstate_adjust_busy_pstate(cpu);
581 else
582 intel_pstate_adjust_idle_pstate(cpu);
583
584#if defined(XPERF_FIX)
585 if (cpu->pstate.current_pstate == cpu->pstate.min_pstate) { 507 if (cpu->pstate.current_pstate == cpu->pstate.min_pstate) {
586 cpu->min_pstate_count++; 508 cpu->min_pstate_count++;
587 if (!(cpu->min_pstate_count % 5)) { 509 if (!(cpu->min_pstate_count % 5)) {
588 intel_pstate_set_pstate(cpu, cpu->pstate.max_pstate); 510 intel_pstate_set_pstate(cpu, cpu->pstate.max_pstate);
589 intel_pstate_idle_mode(cpu);
590 } 511 }
591 } else 512 } else
592 cpu->min_pstate_count = 0; 513 cpu->min_pstate_count = 0;
593#endif 514
594 intel_pstate_set_sample_time(cpu); 515 intel_pstate_set_sample_time(cpu);
595} 516}
596 517
@@ -631,7 +552,6 @@ static int intel_pstate_init_cpu(unsigned int cpunum)
631 (unsigned long)cpu; 552 (unsigned long)cpu;
632 cpu->timer.expires = jiffies + HZ/100; 553 cpu->timer.expires = jiffies + HZ/100;
633 intel_pstate_busy_pid_reset(cpu); 554 intel_pstate_busy_pid_reset(cpu);
634 intel_pstate_idle_pid_reset(cpu);
635 intel_pstate_sample(cpu); 555 intel_pstate_sample(cpu);
636 intel_pstate_set_pstate(cpu, cpu->pstate.max_pstate); 556 intel_pstate_set_pstate(cpu, cpu->pstate.max_pstate);
637 557
@@ -675,8 +595,9 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy)
675 limits.min_perf_pct = clamp_t(int, limits.min_perf_pct, 0 , 100); 595 limits.min_perf_pct = clamp_t(int, limits.min_perf_pct, 0 , 100);
676 limits.min_perf = div_fp(int_tofp(limits.min_perf_pct), int_tofp(100)); 596 limits.min_perf = div_fp(int_tofp(limits.min_perf_pct), int_tofp(100));
677 597
678 limits.max_perf_pct = policy->max * 100 / policy->cpuinfo.max_freq; 598 limits.max_policy_pct = policy->max * 100 / policy->cpuinfo.max_freq;
679 limits.max_perf_pct = clamp_t(int, limits.max_perf_pct, 0 , 100); 599 limits.max_policy_pct = clamp_t(int, limits.max_policy_pct, 0 , 100);
600 limits.max_perf_pct = min(limits.max_policy_pct, limits.max_sysfs_pct);
680 limits.max_perf = div_fp(int_tofp(limits.max_perf_pct), int_tofp(100)); 601 limits.max_perf = div_fp(int_tofp(limits.max_perf_pct), int_tofp(100));
681 602
682 return 0; 603 return 0;
@@ -788,10 +709,9 @@ static int __init intel_pstate_init(void)
788 709
789 pr_info("Intel P-state driver initializing.\n"); 710 pr_info("Intel P-state driver initializing.\n");
790 711
791 all_cpu_data = vmalloc(sizeof(void *) * num_possible_cpus()); 712 all_cpu_data = vzalloc(sizeof(void *) * num_possible_cpus());
792 if (!all_cpu_data) 713 if (!all_cpu_data)
793 return -ENOMEM; 714 return -ENOMEM;
794 memset(all_cpu_data, 0, sizeof(void *) * num_possible_cpus());
795 715
796 rc = cpufreq_register_driver(&intel_pstate_driver); 716 rc = cpufreq_register_driver(&intel_pstate_driver);
797 if (rc) 717 if (rc)
diff --git a/drivers/cpufreq/kirkwood-cpufreq.c b/drivers/cpufreq/kirkwood-cpufreq.c
index d36ea8dc96eb..b2644af985ec 100644
--- a/drivers/cpufreq/kirkwood-cpufreq.c
+++ b/drivers/cpufreq/kirkwood-cpufreq.c
@@ -171,10 +171,6 @@ static int kirkwood_cpufreq_probe(struct platform_device *pdev)
171 priv.dev = &pdev->dev; 171 priv.dev = &pdev->dev;
172 172
173 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 173 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
174 if (!res) {
175 dev_err(&pdev->dev, "Cannot get memory resource\n");
176 return -ENODEV;
177 }
178 priv.base = devm_ioremap_resource(&pdev->dev, res); 174 priv.base = devm_ioremap_resource(&pdev->dev, res);
179 if (IS_ERR(priv.base)) 175 if (IS_ERR(priv.base))
180 return PTR_ERR(priv.base); 176 return PTR_ERR(priv.base);