aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/cpufreq/cpufreq.c74
-rw-r--r--drivers/cpufreq/cpufreq_ondemand.c11
-rw-r--r--drivers/cpufreq/freq_table.c12
-rw-r--r--drivers/cpufreq/imx6q-cpufreq.c35
-rw-r--r--drivers/cpufreq/intel_pstate.c131
-rw-r--r--drivers/cpufreq/powernow-k6.c1
-rw-r--r--include/linux/cpufreq.h1
7 files changed, 127 insertions, 138 deletions
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 6f024852c6fb..d9fdeddcef96 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -1076,10 +1076,20 @@ static void cpufreq_policy_free(struct cpufreq_policy *policy)
1076 kfree(policy); 1076 kfree(policy);
1077} 1077}
1078 1078
1079static void update_policy_cpu(struct cpufreq_policy *policy, unsigned int cpu) 1079static int update_policy_cpu(struct cpufreq_policy *policy, unsigned int cpu,
1080 struct device *cpu_dev)
1080{ 1081{
1082 int ret;
1083
1081 if (WARN_ON(cpu == policy->cpu)) 1084 if (WARN_ON(cpu == policy->cpu))
1082 return; 1085 return 0;
1086
1087 /* Move kobject to the new policy->cpu */
1088 ret = kobject_move(&policy->kobj, &cpu_dev->kobj);
1089 if (ret) {
1090 pr_err("%s: Failed to move kobj: %d\n", __func__, ret);
1091 return ret;
1092 }
1083 1093
1084 down_write(&policy->rwsem); 1094 down_write(&policy->rwsem);
1085 1095
@@ -1090,6 +1100,8 @@ static void update_policy_cpu(struct cpufreq_policy *policy, unsigned int cpu)
1090 1100
1091 blocking_notifier_call_chain(&cpufreq_policy_notifier_list, 1101 blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1092 CPUFREQ_UPDATE_POLICY_CPU, policy); 1102 CPUFREQ_UPDATE_POLICY_CPU, policy);
1103
1104 return 0;
1093} 1105}
1094 1106
1095static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif) 1107static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
@@ -1153,12 +1165,10 @@ static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
1153 * the creation of a brand new one. So we need to perform this update 1165 * the creation of a brand new one. So we need to perform this update
1154 * by invoking update_policy_cpu(). 1166 * by invoking update_policy_cpu().
1155 */ 1167 */
1156 if (recover_policy && cpu != policy->cpu) { 1168 if (recover_policy && cpu != policy->cpu)
1157 update_policy_cpu(policy, cpu); 1169 WARN_ON(update_policy_cpu(policy, cpu, dev));
1158 WARN_ON(kobject_move(&policy->kobj, &dev->kobj)); 1170 else
1159 } else {
1160 policy->cpu = cpu; 1171 policy->cpu = cpu;
1161 }
1162 1172
1163 cpumask_copy(policy->cpus, cpumask_of(cpu)); 1173 cpumask_copy(policy->cpus, cpumask_of(cpu));
1164 1174
@@ -1309,38 +1319,11 @@ static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
1309 return __cpufreq_add_dev(dev, sif); 1319 return __cpufreq_add_dev(dev, sif);
1310} 1320}
1311 1321
1312static int cpufreq_nominate_new_policy_cpu(struct cpufreq_policy *policy,
1313 unsigned int old_cpu)
1314{
1315 struct device *cpu_dev;
1316 int ret;
1317
1318 /* first sibling now owns the new sysfs dir */
1319 cpu_dev = get_cpu_device(cpumask_any_but(policy->cpus, old_cpu));
1320
1321 sysfs_remove_link(&cpu_dev->kobj, "cpufreq");
1322 ret = kobject_move(&policy->kobj, &cpu_dev->kobj);
1323 if (ret) {
1324 pr_err("%s: Failed to move kobj: %d\n", __func__, ret);
1325
1326 down_write(&policy->rwsem);
1327 cpumask_set_cpu(old_cpu, policy->cpus);
1328 up_write(&policy->rwsem);
1329
1330 ret = sysfs_create_link(&cpu_dev->kobj, &policy->kobj,
1331 "cpufreq");
1332
1333 return -EINVAL;
1334 }
1335
1336 return cpu_dev->id;
1337}
1338
1339static int __cpufreq_remove_dev_prepare(struct device *dev, 1322static int __cpufreq_remove_dev_prepare(struct device *dev,
1340 struct subsys_interface *sif) 1323 struct subsys_interface *sif)
1341{ 1324{
1342 unsigned int cpu = dev->id, cpus; 1325 unsigned int cpu = dev->id, cpus;
1343 int new_cpu, ret; 1326 int ret;
1344 unsigned long flags; 1327 unsigned long flags;
1345 struct cpufreq_policy *policy; 1328 struct cpufreq_policy *policy;
1346 1329
@@ -1380,14 +1363,23 @@ static int __cpufreq_remove_dev_prepare(struct device *dev,
1380 if (cpu != policy->cpu) { 1363 if (cpu != policy->cpu) {
1381 sysfs_remove_link(&dev->kobj, "cpufreq"); 1364 sysfs_remove_link(&dev->kobj, "cpufreq");
1382 } else if (cpus > 1) { 1365 } else if (cpus > 1) {
1383 new_cpu = cpufreq_nominate_new_policy_cpu(policy, cpu); 1366 /* Nominate new CPU */
1384 if (new_cpu >= 0) { 1367 int new_cpu = cpumask_any_but(policy->cpus, cpu);
1385 update_policy_cpu(policy, new_cpu); 1368 struct device *cpu_dev = get_cpu_device(new_cpu);
1386 1369
1387 if (!cpufreq_suspended) 1370 sysfs_remove_link(&cpu_dev->kobj, "cpufreq");
1388 pr_debug("%s: policy Kobject moved to cpu: %d from: %d\n", 1371 ret = update_policy_cpu(policy, new_cpu, cpu_dev);
1389 __func__, new_cpu, cpu); 1372 if (ret) {
1373 if (sysfs_create_link(&cpu_dev->kobj, &policy->kobj,
1374 "cpufreq"))
1375 pr_err("%s: Failed to restore kobj link to cpu:%d\n",
1376 __func__, cpu_dev->id);
1377 return ret;
1390 } 1378 }
1379
1380 if (!cpufreq_suspended)
1381 pr_debug("%s: policy Kobject moved to cpu: %d from: %d\n",
1382 __func__, new_cpu, cpu);
1391 } else if (cpufreq_driver->stop_cpu && cpufreq_driver->setpolicy) { 1383 } else if (cpufreq_driver->stop_cpu && cpufreq_driver->setpolicy) {
1392 cpufreq_driver->stop_cpu(policy); 1384 cpufreq_driver->stop_cpu(policy);
1393 } 1385 }
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
index 18d409189092..ad3f38fd3eb9 100644
--- a/drivers/cpufreq/cpufreq_ondemand.c
+++ b/drivers/cpufreq/cpufreq_ondemand.c
@@ -170,21 +170,24 @@ static void od_check_cpu(int cpu, unsigned int load)
170 dbs_freq_increase(policy, policy->max); 170 dbs_freq_increase(policy, policy->max);
171 } else { 171 } else {
172 /* Calculate the next frequency proportional to load */ 172 /* Calculate the next frequency proportional to load */
173 unsigned int freq_next; 173 unsigned int freq_next, min_f, max_f;
174 freq_next = load * policy->cpuinfo.max_freq / 100; 174
175 min_f = policy->cpuinfo.min_freq;
176 max_f = policy->cpuinfo.max_freq;
177 freq_next = min_f + load * (max_f - min_f) / 100;
175 178
176 /* No longer fully busy, reset rate_mult */ 179 /* No longer fully busy, reset rate_mult */
177 dbs_info->rate_mult = 1; 180 dbs_info->rate_mult = 1;
178 181
179 if (!od_tuners->powersave_bias) { 182 if (!od_tuners->powersave_bias) {
180 __cpufreq_driver_target(policy, freq_next, 183 __cpufreq_driver_target(policy, freq_next,
181 CPUFREQ_RELATION_L); 184 CPUFREQ_RELATION_C);
182 return; 185 return;
183 } 186 }
184 187
185 freq_next = od_ops.powersave_bias_target(policy, freq_next, 188 freq_next = od_ops.powersave_bias_target(policy, freq_next,
186 CPUFREQ_RELATION_L); 189 CPUFREQ_RELATION_L);
187 __cpufreq_driver_target(policy, freq_next, CPUFREQ_RELATION_L); 190 __cpufreq_driver_target(policy, freq_next, CPUFREQ_RELATION_C);
188 } 191 }
189} 192}
190 193
diff --git a/drivers/cpufreq/freq_table.c b/drivers/cpufreq/freq_table.c
index 1632981c4b25..df14766a8e06 100644
--- a/drivers/cpufreq/freq_table.c
+++ b/drivers/cpufreq/freq_table.c
@@ -117,7 +117,7 @@ int cpufreq_frequency_table_target(struct cpufreq_policy *policy,
117 .frequency = 0, 117 .frequency = 0,
118 }; 118 };
119 struct cpufreq_frequency_table *pos; 119 struct cpufreq_frequency_table *pos;
120 unsigned int freq, i = 0; 120 unsigned int freq, diff, i = 0;
121 121
122 pr_debug("request for target %u kHz (relation: %u) for cpu %u\n", 122 pr_debug("request for target %u kHz (relation: %u) for cpu %u\n",
123 target_freq, relation, policy->cpu); 123 target_freq, relation, policy->cpu);
@@ -127,6 +127,7 @@ int cpufreq_frequency_table_target(struct cpufreq_policy *policy,
127 suboptimal.frequency = ~0; 127 suboptimal.frequency = ~0;
128 break; 128 break;
129 case CPUFREQ_RELATION_L: 129 case CPUFREQ_RELATION_L:
130 case CPUFREQ_RELATION_C:
130 optimal.frequency = ~0; 131 optimal.frequency = ~0;
131 break; 132 break;
132 } 133 }
@@ -168,6 +169,15 @@ int cpufreq_frequency_table_target(struct cpufreq_policy *policy,
168 } 169 }
169 } 170 }
170 break; 171 break;
172 case CPUFREQ_RELATION_C:
173 diff = abs(freq - target_freq);
174 if (diff < optimal.frequency ||
175 (diff == optimal.frequency &&
176 freq > table[optimal.driver_data].frequency)) {
177 optimal.frequency = diff;
178 optimal.driver_data = i;
179 }
180 break;
171 } 181 }
172 } 182 }
173 if (optimal.driver_data > i) { 183 if (optimal.driver_data > i) {
diff --git a/drivers/cpufreq/imx6q-cpufreq.c b/drivers/cpufreq/imx6q-cpufreq.c
index af366c21d4b4..c2d30765bf3d 100644
--- a/drivers/cpufreq/imx6q-cpufreq.c
+++ b/drivers/cpufreq/imx6q-cpufreq.c
@@ -66,10 +66,12 @@ static int imx6q_set_target(struct cpufreq_policy *policy, unsigned int index)
66 66
67 /* scaling up? scale voltage before frequency */ 67 /* scaling up? scale voltage before frequency */
68 if (new_freq > old_freq) { 68 if (new_freq > old_freq) {
69 ret = regulator_set_voltage_tol(pu_reg, imx6_soc_volt[index], 0); 69 if (!IS_ERR(pu_reg)) {
70 if (ret) { 70 ret = regulator_set_voltage_tol(pu_reg, imx6_soc_volt[index], 0);
71 dev_err(cpu_dev, "failed to scale vddpu up: %d\n", ret); 71 if (ret) {
72 return ret; 72 dev_err(cpu_dev, "failed to scale vddpu up: %d\n", ret);
73 return ret;
74 }
73 } 75 }
74 ret = regulator_set_voltage_tol(soc_reg, imx6_soc_volt[index], 0); 76 ret = regulator_set_voltage_tol(soc_reg, imx6_soc_volt[index], 0);
75 if (ret) { 77 if (ret) {
@@ -121,10 +123,12 @@ static int imx6q_set_target(struct cpufreq_policy *policy, unsigned int index)
121 dev_warn(cpu_dev, "failed to scale vddsoc down: %d\n", ret); 123 dev_warn(cpu_dev, "failed to scale vddsoc down: %d\n", ret);
122 ret = 0; 124 ret = 0;
123 } 125 }
124 ret = regulator_set_voltage_tol(pu_reg, imx6_soc_volt[index], 0); 126 if (!IS_ERR(pu_reg)) {
125 if (ret) { 127 ret = regulator_set_voltage_tol(pu_reg, imx6_soc_volt[index], 0);
126 dev_warn(cpu_dev, "failed to scale vddpu down: %d\n", ret); 128 if (ret) {
127 ret = 0; 129 dev_warn(cpu_dev, "failed to scale vddpu down: %d\n", ret);
130 ret = 0;
131 }
128 } 132 }
129 } 133 }
130 134
@@ -182,9 +186,9 @@ static int imx6q_cpufreq_probe(struct platform_device *pdev)
182 } 186 }
183 187
184 arm_reg = regulator_get(cpu_dev, "arm"); 188 arm_reg = regulator_get(cpu_dev, "arm");
185 pu_reg = regulator_get(cpu_dev, "pu"); 189 pu_reg = regulator_get_optional(cpu_dev, "pu");
186 soc_reg = regulator_get(cpu_dev, "soc"); 190 soc_reg = regulator_get(cpu_dev, "soc");
187 if (IS_ERR(arm_reg) || IS_ERR(pu_reg) || IS_ERR(soc_reg)) { 191 if (IS_ERR(arm_reg) || IS_ERR(soc_reg)) {
188 dev_err(cpu_dev, "failed to get regulators\n"); 192 dev_err(cpu_dev, "failed to get regulators\n");
189 ret = -ENOENT; 193 ret = -ENOENT;
190 goto put_reg; 194 goto put_reg;
@@ -268,9 +272,11 @@ soc_opp_out:
268 ret = regulator_set_voltage_time(soc_reg, imx6_soc_volt[0], imx6_soc_volt[num - 1]); 272 ret = regulator_set_voltage_time(soc_reg, imx6_soc_volt[0], imx6_soc_volt[num - 1]);
269 if (ret > 0) 273 if (ret > 0)
270 transition_latency += ret * 1000; 274 transition_latency += ret * 1000;
271 ret = regulator_set_voltage_time(pu_reg, imx6_soc_volt[0], imx6_soc_volt[num - 1]); 275 if (!IS_ERR(pu_reg)) {
272 if (ret > 0) 276 ret = regulator_set_voltage_time(pu_reg, imx6_soc_volt[0], imx6_soc_volt[num - 1]);
273 transition_latency += ret * 1000; 277 if (ret > 0)
278 transition_latency += ret * 1000;
279 }
274 280
275 /* 281 /*
276 * OPP is maintained in order of increasing frequency, and 282 * OPP is maintained in order of increasing frequency, and
@@ -327,7 +333,8 @@ static int imx6q_cpufreq_remove(struct platform_device *pdev)
327 cpufreq_unregister_driver(&imx6q_cpufreq_driver); 333 cpufreq_unregister_driver(&imx6q_cpufreq_driver);
328 dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table); 334 dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table);
329 regulator_put(arm_reg); 335 regulator_put(arm_reg);
330 regulator_put(pu_reg); 336 if (!IS_ERR(pu_reg))
337 regulator_put(pu_reg);
331 regulator_put(soc_reg); 338 regulator_put(soc_reg);
332 clk_put(arm_clk); 339 clk_put(arm_clk);
333 clk_put(pll1_sys_clk); 340 clk_put(pll1_sys_clk);
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index 86631cb6f7de..c5eac949760d 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -37,7 +37,6 @@
37#define BYT_TURBO_RATIOS 0x66c 37#define BYT_TURBO_RATIOS 0x66c
38#define BYT_TURBO_VIDS 0x66d 38#define BYT_TURBO_VIDS 0x66d
39 39
40
41#define FRAC_BITS 8 40#define FRAC_BITS 8
42#define int_tofp(X) ((int64_t)(X) << FRAC_BITS) 41#define int_tofp(X) ((int64_t)(X) << FRAC_BITS)
43#define fp_toint(X) ((X) >> FRAC_BITS) 42#define fp_toint(X) ((X) >> FRAC_BITS)
@@ -50,7 +49,7 @@ static inline int32_t mul_fp(int32_t x, int32_t y)
50 49
51static inline int32_t div_fp(int32_t x, int32_t y) 50static inline int32_t div_fp(int32_t x, int32_t y)
52{ 51{
53 return div_s64((int64_t)x << FRAC_BITS, (int64_t)y); 52 return div_s64((int64_t)x << FRAC_BITS, y);
54} 53}
55 54
56struct sample { 55struct sample {
@@ -148,7 +147,7 @@ static struct perf_limits limits = {
148}; 147};
149 148
150static inline void pid_reset(struct _pid *pid, int setpoint, int busy, 149static inline void pid_reset(struct _pid *pid, int setpoint, int busy,
151 int deadband, int integral) { 150 int deadband, int integral) {
152 pid->setpoint = setpoint; 151 pid->setpoint = setpoint;
153 pid->deadband = deadband; 152 pid->deadband = deadband;
154 pid->integral = int_tofp(integral); 153 pid->integral = int_tofp(integral);
@@ -167,7 +166,6 @@ static inline void pid_i_gain_set(struct _pid *pid, int percent)
167 166
168static inline void pid_d_gain_set(struct _pid *pid, int percent) 167static inline void pid_d_gain_set(struct _pid *pid, int percent)
169{ 168{
170
171 pid->d_gain = div_fp(int_tofp(percent), int_tofp(100)); 169 pid->d_gain = div_fp(int_tofp(percent), int_tofp(100));
172} 170}
173 171
@@ -207,16 +205,13 @@ static inline void intel_pstate_busy_pid_reset(struct cpudata *cpu)
207 pid_d_gain_set(&cpu->pid, pid_params.d_gain_pct); 205 pid_d_gain_set(&cpu->pid, pid_params.d_gain_pct);
208 pid_i_gain_set(&cpu->pid, pid_params.i_gain_pct); 206 pid_i_gain_set(&cpu->pid, pid_params.i_gain_pct);
209 207
210 pid_reset(&cpu->pid, 208 pid_reset(&cpu->pid, pid_params.setpoint, 100, pid_params.deadband, 0);
211 pid_params.setpoint,
212 100,
213 pid_params.deadband,
214 0);
215} 209}
216 210
217static inline void intel_pstate_reset_all_pid(void) 211static inline void intel_pstate_reset_all_pid(void)
218{ 212{
219 unsigned int cpu; 213 unsigned int cpu;
214
220 for_each_online_cpu(cpu) { 215 for_each_online_cpu(cpu) {
221 if (all_cpu_data[cpu]) 216 if (all_cpu_data[cpu])
222 intel_pstate_busy_pid_reset(all_cpu_data[cpu]); 217 intel_pstate_busy_pid_reset(all_cpu_data[cpu]);
@@ -230,13 +225,13 @@ static int pid_param_set(void *data, u64 val)
230 intel_pstate_reset_all_pid(); 225 intel_pstate_reset_all_pid();
231 return 0; 226 return 0;
232} 227}
228
233static int pid_param_get(void *data, u64 *val) 229static int pid_param_get(void *data, u64 *val)
234{ 230{
235 *val = *(u32 *)data; 231 *val = *(u32 *)data;
236 return 0; 232 return 0;
237} 233}
238DEFINE_SIMPLE_ATTRIBUTE(fops_pid_param, pid_param_get, 234DEFINE_SIMPLE_ATTRIBUTE(fops_pid_param, pid_param_get, pid_param_set, "%llu\n");
239 pid_param_set, "%llu\n");
240 235
241struct pid_param { 236struct pid_param {
242 char *name; 237 char *name;
@@ -253,9 +248,9 @@ static struct pid_param pid_files[] = {
253 {NULL, NULL} 248 {NULL, NULL}
254}; 249};
255 250
256static struct dentry *debugfs_parent; 251static void __init intel_pstate_debug_expose_params(void)
257static void intel_pstate_debug_expose_params(void)
258{ 252{
253 struct dentry *debugfs_parent;
259 int i = 0; 254 int i = 0;
260 255
261 debugfs_parent = debugfs_create_dir("pstate_snb", NULL); 256 debugfs_parent = debugfs_create_dir("pstate_snb", NULL);
@@ -263,8 +258,8 @@ static void intel_pstate_debug_expose_params(void)
263 return; 258 return;
264 while (pid_files[i].name) { 259 while (pid_files[i].name) {
265 debugfs_create_file(pid_files[i].name, 0660, 260 debugfs_create_file(pid_files[i].name, 0660,
266 debugfs_parent, pid_files[i].value, 261 debugfs_parent, pid_files[i].value,
267 &fops_pid_param); 262 &fops_pid_param);
268 i++; 263 i++;
269 } 264 }
270} 265}
@@ -280,10 +275,11 @@ static void intel_pstate_debug_expose_params(void)
280 } 275 }
281 276
282static ssize_t store_no_turbo(struct kobject *a, struct attribute *b, 277static ssize_t store_no_turbo(struct kobject *a, struct attribute *b,
283 const char *buf, size_t count) 278 const char *buf, size_t count)
284{ 279{
285 unsigned int input; 280 unsigned int input;
286 int ret; 281 int ret;
282
287 ret = sscanf(buf, "%u", &input); 283 ret = sscanf(buf, "%u", &input);
288 if (ret != 1) 284 if (ret != 1)
289 return -EINVAL; 285 return -EINVAL;
@@ -296,10 +292,11 @@ static ssize_t store_no_turbo(struct kobject *a, struct attribute *b,
296} 292}
297 293
298static ssize_t store_max_perf_pct(struct kobject *a, struct attribute *b, 294static ssize_t store_max_perf_pct(struct kobject *a, struct attribute *b,
299 const char *buf, size_t count) 295 const char *buf, size_t count)
300{ 296{
301 unsigned int input; 297 unsigned int input;
302 int ret; 298 int ret;
299
303 ret = sscanf(buf, "%u", &input); 300 ret = sscanf(buf, "%u", &input);
304 if (ret != 1) 301 if (ret != 1)
305 return -EINVAL; 302 return -EINVAL;
@@ -307,14 +304,16 @@ static ssize_t store_max_perf_pct(struct kobject *a, struct attribute *b,
307 limits.max_sysfs_pct = clamp_t(int, input, 0 , 100); 304 limits.max_sysfs_pct = clamp_t(int, input, 0 , 100);
308 limits.max_perf_pct = min(limits.max_policy_pct, limits.max_sysfs_pct); 305 limits.max_perf_pct = min(limits.max_policy_pct, limits.max_sysfs_pct);
309 limits.max_perf = div_fp(int_tofp(limits.max_perf_pct), int_tofp(100)); 306 limits.max_perf = div_fp(int_tofp(limits.max_perf_pct), int_tofp(100));
307
310 return count; 308 return count;
311} 309}
312 310
313static ssize_t store_min_perf_pct(struct kobject *a, struct attribute *b, 311static ssize_t store_min_perf_pct(struct kobject *a, struct attribute *b,
314 const char *buf, size_t count) 312 const char *buf, size_t count)
315{ 313{
316 unsigned int input; 314 unsigned int input;
317 int ret; 315 int ret;
316
318 ret = sscanf(buf, "%u", &input); 317 ret = sscanf(buf, "%u", &input);
319 if (ret != 1) 318 if (ret != 1)
320 return -EINVAL; 319 return -EINVAL;
@@ -342,17 +341,16 @@ static struct attribute *intel_pstate_attributes[] = {
342static struct attribute_group intel_pstate_attr_group = { 341static struct attribute_group intel_pstate_attr_group = {
343 .attrs = intel_pstate_attributes, 342 .attrs = intel_pstate_attributes,
344}; 343};
345static struct kobject *intel_pstate_kobject;
346 344
347static void intel_pstate_sysfs_expose_params(void) 345static void __init intel_pstate_sysfs_expose_params(void)
348{ 346{
347 struct kobject *intel_pstate_kobject;
349 int rc; 348 int rc;
350 349
351 intel_pstate_kobject = kobject_create_and_add("intel_pstate", 350 intel_pstate_kobject = kobject_create_and_add("intel_pstate",
352 &cpu_subsys.dev_root->kobj); 351 &cpu_subsys.dev_root->kobj);
353 BUG_ON(!intel_pstate_kobject); 352 BUG_ON(!intel_pstate_kobject);
354 rc = sysfs_create_group(intel_pstate_kobject, 353 rc = sysfs_create_group(intel_pstate_kobject, &intel_pstate_attr_group);
355 &intel_pstate_attr_group);
356 BUG_ON(rc); 354 BUG_ON(rc);
357} 355}
358 356
@@ -360,6 +358,7 @@ static void intel_pstate_sysfs_expose_params(void)
360static int byt_get_min_pstate(void) 358static int byt_get_min_pstate(void)
361{ 359{
362 u64 value; 360 u64 value;
361
363 rdmsrl(BYT_RATIOS, value); 362 rdmsrl(BYT_RATIOS, value);
364 return (value >> 8) & 0x7F; 363 return (value >> 8) & 0x7F;
365} 364}
@@ -367,6 +366,7 @@ static int byt_get_min_pstate(void)
367static int byt_get_max_pstate(void) 366static int byt_get_max_pstate(void)
368{ 367{
369 u64 value; 368 u64 value;
369
370 rdmsrl(BYT_RATIOS, value); 370 rdmsrl(BYT_RATIOS, value);
371 return (value >> 16) & 0x7F; 371 return (value >> 16) & 0x7F;
372} 372}
@@ -374,6 +374,7 @@ static int byt_get_max_pstate(void)
374static int byt_get_turbo_pstate(void) 374static int byt_get_turbo_pstate(void)
375{ 375{
376 u64 value; 376 u64 value;
377
377 rdmsrl(BYT_TURBO_RATIOS, value); 378 rdmsrl(BYT_TURBO_RATIOS, value);
378 return value & 0x7F; 379 return value & 0x7F;
379} 380}
@@ -407,7 +408,6 @@ static void byt_get_vid(struct cpudata *cpudata)
407{ 408{
408 u64 value; 409 u64 value;
409 410
410
411 rdmsrl(BYT_VIDS, value); 411 rdmsrl(BYT_VIDS, value);
412 cpudata->vid.min = int_tofp((value >> 8) & 0x7f); 412 cpudata->vid.min = int_tofp((value >> 8) & 0x7f);
413 cpudata->vid.max = int_tofp((value >> 16) & 0x7f); 413 cpudata->vid.max = int_tofp((value >> 16) & 0x7f);
@@ -420,10 +420,10 @@ static void byt_get_vid(struct cpudata *cpudata)
420 cpudata->vid.turbo = value & 0x7f; 420 cpudata->vid.turbo = value & 0x7f;
421} 421}
422 422
423
424static int core_get_min_pstate(void) 423static int core_get_min_pstate(void)
425{ 424{
426 u64 value; 425 u64 value;
426
427 rdmsrl(MSR_PLATFORM_INFO, value); 427 rdmsrl(MSR_PLATFORM_INFO, value);
428 return (value >> 40) & 0xFF; 428 return (value >> 40) & 0xFF;
429} 429}
@@ -431,6 +431,7 @@ static int core_get_min_pstate(void)
431static int core_get_max_pstate(void) 431static int core_get_max_pstate(void)
432{ 432{
433 u64 value; 433 u64 value;
434
434 rdmsrl(MSR_PLATFORM_INFO, value); 435 rdmsrl(MSR_PLATFORM_INFO, value);
435 return (value >> 8) & 0xFF; 436 return (value >> 8) & 0xFF;
436} 437}
@@ -439,9 +440,10 @@ static int core_get_turbo_pstate(void)
439{ 440{
440 u64 value; 441 u64 value;
441 int nont, ret; 442 int nont, ret;
443
442 rdmsrl(MSR_NHM_TURBO_RATIO_LIMIT, value); 444 rdmsrl(MSR_NHM_TURBO_RATIO_LIMIT, value);
443 nont = core_get_max_pstate(); 445 nont = core_get_max_pstate();
444 ret = ((value) & 255); 446 ret = (value) & 255;
445 if (ret <= nont) 447 if (ret <= nont)
446 ret = nont; 448 ret = nont;
447 return ret; 449 return ret;
@@ -493,12 +495,12 @@ static struct cpu_defaults byt_params = {
493 }, 495 },
494}; 496};
495 497
496
497static void intel_pstate_get_min_max(struct cpudata *cpu, int *min, int *max) 498static void intel_pstate_get_min_max(struct cpudata *cpu, int *min, int *max)
498{ 499{
499 int max_perf = cpu->pstate.turbo_pstate; 500 int max_perf = cpu->pstate.turbo_pstate;
500 int max_perf_adj; 501 int max_perf_adj;
501 int min_perf; 502 int min_perf;
503
502 if (limits.no_turbo) 504 if (limits.no_turbo)
503 max_perf = cpu->pstate.max_pstate; 505 max_perf = cpu->pstate.max_pstate;
504 506
@@ -507,8 +509,7 @@ static void intel_pstate_get_min_max(struct cpudata *cpu, int *min, int *max)
507 cpu->pstate.min_pstate, cpu->pstate.turbo_pstate); 509 cpu->pstate.min_pstate, cpu->pstate.turbo_pstate);
508 510
509 min_perf = fp_toint(mul_fp(int_tofp(max_perf), limits.min_perf)); 511 min_perf = fp_toint(mul_fp(int_tofp(max_perf), limits.min_perf));
510 *min = clamp_t(int, min_perf, 512 *min = clamp_t(int, min_perf, cpu->pstate.min_pstate, max_perf);
511 cpu->pstate.min_pstate, max_perf);
512} 513}
513 514
514static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate) 515static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate)
@@ -529,21 +530,6 @@ static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate)
529 pstate_funcs.set(cpu, pstate); 530 pstate_funcs.set(cpu, pstate);
530} 531}
531 532
532static inline void intel_pstate_pstate_increase(struct cpudata *cpu, int steps)
533{
534 int target;
535 target = cpu->pstate.current_pstate + steps;
536
537 intel_pstate_set_pstate(cpu, target);
538}
539
540static inline void intel_pstate_pstate_decrease(struct cpudata *cpu, int steps)
541{
542 int target;
543 target = cpu->pstate.current_pstate - steps;
544 intel_pstate_set_pstate(cpu, target);
545}
546
547static void intel_pstate_get_cpu_pstates(struct cpudata *cpu) 533static void intel_pstate_get_cpu_pstates(struct cpudata *cpu)
548{ 534{
549 cpu->pstate.min_pstate = pstate_funcs.get_min(); 535 cpu->pstate.min_pstate = pstate_funcs.get_min();
@@ -559,13 +545,9 @@ static inline void intel_pstate_calc_busy(struct cpudata *cpu)
559{ 545{
560 struct sample *sample = &cpu->sample; 546 struct sample *sample = &cpu->sample;
561 int64_t core_pct; 547 int64_t core_pct;
562 int32_t rem;
563 548
564 core_pct = int_tofp(sample->aperf) * int_tofp(100); 549 core_pct = int_tofp(sample->aperf) * int_tofp(100);
565 core_pct = div_u64_rem(core_pct, int_tofp(sample->mperf), &rem); 550 core_pct = div64_u64(core_pct, int_tofp(sample->mperf));
566
567 if ((rem << 1) >= int_tofp(sample->mperf))
568 core_pct += 1;
569 551
570 sample->freq = fp_toint( 552 sample->freq = fp_toint(
571 mul_fp(int_tofp(cpu->pstate.max_pstate * 1000), core_pct)); 553 mul_fp(int_tofp(cpu->pstate.max_pstate * 1000), core_pct));
@@ -576,12 +558,12 @@ static inline void intel_pstate_calc_busy(struct cpudata *cpu)
576static inline void intel_pstate_sample(struct cpudata *cpu) 558static inline void intel_pstate_sample(struct cpudata *cpu)
577{ 559{
578 u64 aperf, mperf; 560 u64 aperf, mperf;
561 unsigned long flags;
579 562
563 local_irq_save(flags);
580 rdmsrl(MSR_IA32_APERF, aperf); 564 rdmsrl(MSR_IA32_APERF, aperf);
581 rdmsrl(MSR_IA32_MPERF, mperf); 565 rdmsrl(MSR_IA32_MPERF, mperf);
582 566 local_irq_restore(flags);
583 aperf = aperf >> FRAC_BITS;
584 mperf = mperf >> FRAC_BITS;
585 567
586 cpu->last_sample_time = cpu->sample.time; 568 cpu->last_sample_time = cpu->sample.time;
587 cpu->sample.time = ktime_get(); 569 cpu->sample.time = ktime_get();
@@ -598,10 +580,9 @@ static inline void intel_pstate_sample(struct cpudata *cpu)
598 580
599static inline void intel_pstate_set_sample_time(struct cpudata *cpu) 581static inline void intel_pstate_set_sample_time(struct cpudata *cpu)
600{ 582{
601 int sample_time, delay; 583 int delay;
602 584
603 sample_time = pid_params.sample_rate_ms; 585 delay = msecs_to_jiffies(pid_params.sample_rate_ms);
604 delay = msecs_to_jiffies(sample_time);
605 mod_timer_pinned(&cpu->timer, jiffies + delay); 586 mod_timer_pinned(&cpu->timer, jiffies + delay);
606} 587}
607 588
@@ -616,12 +597,12 @@ static inline int32_t intel_pstate_get_scaled_busy(struct cpudata *cpu)
616 current_pstate = int_tofp(cpu->pstate.current_pstate); 597 current_pstate = int_tofp(cpu->pstate.current_pstate);
617 core_busy = mul_fp(core_busy, div_fp(max_pstate, current_pstate)); 598 core_busy = mul_fp(core_busy, div_fp(max_pstate, current_pstate));
618 599
619 sample_time = (pid_params.sample_rate_ms * USEC_PER_MSEC); 600 sample_time = pid_params.sample_rate_ms * USEC_PER_MSEC;
620 duration_us = (u32) ktime_us_delta(cpu->sample.time, 601 duration_us = (u32) ktime_us_delta(cpu->sample.time,
621 cpu->last_sample_time); 602 cpu->last_sample_time);
622 if (duration_us > sample_time * 3) { 603 if (duration_us > sample_time * 3) {
623 sample_ratio = div_fp(int_tofp(sample_time), 604 sample_ratio = div_fp(int_tofp(sample_time),
624 int_tofp(duration_us)); 605 int_tofp(duration_us));
625 core_busy = mul_fp(core_busy, sample_ratio); 606 core_busy = mul_fp(core_busy, sample_ratio);
626 } 607 }
627 608
@@ -632,20 +613,15 @@ static inline void intel_pstate_adjust_busy_pstate(struct cpudata *cpu)
632{ 613{
633 int32_t busy_scaled; 614 int32_t busy_scaled;
634 struct _pid *pid; 615 struct _pid *pid;
635 signed int ctl = 0; 616 signed int ctl;
636 int steps;
637 617
638 pid = &cpu->pid; 618 pid = &cpu->pid;
639 busy_scaled = intel_pstate_get_scaled_busy(cpu); 619 busy_scaled = intel_pstate_get_scaled_busy(cpu);
640 620
641 ctl = pid_calc(pid, busy_scaled); 621 ctl = pid_calc(pid, busy_scaled);
642 622
643 steps = abs(ctl); 623 /* Negative values of ctl increase the pstate and vice versa */
644 624 intel_pstate_set_pstate(cpu, cpu->pstate.current_pstate - ctl);
645 if (ctl < 0)
646 intel_pstate_pstate_increase(cpu, steps);
647 else
648 intel_pstate_pstate_decrease(cpu, steps);
649} 625}
650 626
651static void intel_pstate_timer_func(unsigned long __data) 627static void intel_pstate_timer_func(unsigned long __data)
@@ -705,8 +681,7 @@ static int intel_pstate_init_cpu(unsigned int cpunum)
705 681
706 init_timer_deferrable(&cpu->timer); 682 init_timer_deferrable(&cpu->timer);
707 cpu->timer.function = intel_pstate_timer_func; 683 cpu->timer.function = intel_pstate_timer_func;
708 cpu->timer.data = 684 cpu->timer.data = (unsigned long)cpu;
709 (unsigned long)cpu;
710 cpu->timer.expires = jiffies + HZ/100; 685 cpu->timer.expires = jiffies + HZ/100;
711 intel_pstate_busy_pid_reset(cpu); 686 intel_pstate_busy_pid_reset(cpu);
712 intel_pstate_sample(cpu); 687 intel_pstate_sample(cpu);
@@ -751,7 +726,7 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy)
751 limits.min_perf_pct = clamp_t(int, limits.min_perf_pct, 0 , 100); 726 limits.min_perf_pct = clamp_t(int, limits.min_perf_pct, 0 , 100);
752 limits.min_perf = div_fp(int_tofp(limits.min_perf_pct), int_tofp(100)); 727 limits.min_perf = div_fp(int_tofp(limits.min_perf_pct), int_tofp(100));
753 728
754 limits.max_policy_pct = policy->max * 100 / policy->cpuinfo.max_freq; 729 limits.max_policy_pct = (policy->max * 100) / policy->cpuinfo.max_freq;
755 limits.max_policy_pct = clamp_t(int, limits.max_policy_pct, 0 , 100); 730 limits.max_policy_pct = clamp_t(int, limits.max_policy_pct, 0 , 100);
756 limits.max_perf_pct = min(limits.max_policy_pct, limits.max_sysfs_pct); 731 limits.max_perf_pct = min(limits.max_policy_pct, limits.max_sysfs_pct);
757 limits.max_perf = div_fp(int_tofp(limits.max_perf_pct), int_tofp(100)); 732 limits.max_perf = div_fp(int_tofp(limits.max_perf_pct), int_tofp(100));
@@ -763,8 +738,8 @@ static int intel_pstate_verify_policy(struct cpufreq_policy *policy)
763{ 738{
764 cpufreq_verify_within_cpu_limits(policy); 739 cpufreq_verify_within_cpu_limits(policy);
765 740
766 if ((policy->policy != CPUFREQ_POLICY_POWERSAVE) && 741 if (policy->policy != CPUFREQ_POLICY_POWERSAVE &&
767 (policy->policy != CPUFREQ_POLICY_PERFORMANCE)) 742 policy->policy != CPUFREQ_POLICY_PERFORMANCE)
768 return -EINVAL; 743 return -EINVAL;
769 744
770 return 0; 745 return 0;
@@ -797,7 +772,7 @@ static int intel_pstate_cpu_init(struct cpufreq_policy *policy)
797 772
798 rdmsrl(MSR_IA32_MISC_ENABLE, misc_en); 773 rdmsrl(MSR_IA32_MISC_ENABLE, misc_en);
799 if (misc_en & MSR_IA32_MISC_ENABLE_TURBO_DISABLE || 774 if (misc_en & MSR_IA32_MISC_ENABLE_TURBO_DISABLE ||
800 cpu->pstate.max_pstate == cpu->pstate.turbo_pstate) { 775 cpu->pstate.max_pstate == cpu->pstate.turbo_pstate) {
801 limits.turbo_disabled = 1; 776 limits.turbo_disabled = 1;
802 limits.no_turbo = 1; 777 limits.no_turbo = 1;
803 } 778 }
@@ -839,8 +814,8 @@ static int intel_pstate_msrs_not_valid(void)
839 rdmsrl(MSR_IA32_MPERF, mperf); 814 rdmsrl(MSR_IA32_MPERF, mperf);
840 815
841 if (!pstate_funcs.get_max() || 816 if (!pstate_funcs.get_max() ||
842 !pstate_funcs.get_min() || 817 !pstate_funcs.get_min() ||
843 !pstate_funcs.get_turbo()) 818 !pstate_funcs.get_turbo())
844 return -ENODEV; 819 return -ENODEV;
845 820
846 rdmsrl(MSR_IA32_APERF, tmp); 821 rdmsrl(MSR_IA32_APERF, tmp);
@@ -922,14 +897,14 @@ static bool intel_pstate_platform_pwr_mgmt_exists(void)
922 struct acpi_table_header hdr; 897 struct acpi_table_header hdr;
923 struct hw_vendor_info *v_info; 898 struct hw_vendor_info *v_info;
924 899
925 if (acpi_disabled 900 if (acpi_disabled ||
926 || ACPI_FAILURE(acpi_get_table_header(ACPI_SIG_FADT, 0, &hdr))) 901 ACPI_FAILURE(acpi_get_table_header(ACPI_SIG_FADT, 0, &hdr)))
927 return false; 902 return false;
928 903
929 for (v_info = vendor_info; v_info->valid; v_info++) { 904 for (v_info = vendor_info; v_info->valid; v_info++) {
930 if (!strncmp(hdr.oem_id, v_info->oem_id, ACPI_OEM_ID_SIZE) 905 if (!strncmp(hdr.oem_id, v_info->oem_id, ACPI_OEM_ID_SIZE) &&
931 && !strncmp(hdr.oem_table_id, v_info->oem_table_id, ACPI_OEM_TABLE_ID_SIZE) 906 !strncmp(hdr.oem_table_id, v_info->oem_table_id, ACPI_OEM_TABLE_ID_SIZE) &&
932 && intel_pstate_no_acpi_pss()) 907 intel_pstate_no_acpi_pss())
933 return true; 908 return true;
934 } 909 }
935 910
diff --git a/drivers/cpufreq/powernow-k6.c b/drivers/cpufreq/powernow-k6.c
index c8012bc86910..f91027259c3c 100644
--- a/drivers/cpufreq/powernow-k6.c
+++ b/drivers/cpufreq/powernow-k6.c
@@ -55,6 +55,7 @@ static const struct {
55 unsigned freq; 55 unsigned freq;
56 unsigned mult; 56 unsigned mult;
57} usual_frequency_table[] = { 57} usual_frequency_table[] = {
58 { 350000, 35 }, // 100 * 3.5
58 { 400000, 40 }, // 100 * 4 59 { 400000, 40 }, // 100 * 4
59 { 450000, 45 }, // 100 * 4.5 60 { 450000, 45 }, // 100 * 4.5
60 { 475000, 50 }, // 95 * 5 61 { 475000, 50 }, // 95 * 5
diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
index 8f8ae95c6e27..7d1955afa62c 100644
--- a/include/linux/cpufreq.h
+++ b/include/linux/cpufreq.h
@@ -176,6 +176,7 @@ static inline void disable_cpufreq(void) { }
176 176
177#define CPUFREQ_RELATION_L 0 /* lowest frequency at or above target */ 177#define CPUFREQ_RELATION_L 0 /* lowest frequency at or above target */
178#define CPUFREQ_RELATION_H 1 /* highest frequency below or at target */ 178#define CPUFREQ_RELATION_H 1 /* highest frequency below or at target */
179#define CPUFREQ_RELATION_C 2 /* closest frequency to target */
179 180
180struct freq_attr { 181struct freq_attr {
181 struct attribute attr; 182 struct attribute attr;