aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/cpufreq/intel_pstate.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/cpufreq/intel_pstate.c')
-rw-r--r--drivers/cpufreq/intel_pstate.c169
1 files changed, 75 insertions, 94 deletions
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index 4e7f492ad583..c5eac949760d 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -37,7 +37,6 @@
37#define BYT_TURBO_RATIOS 0x66c 37#define BYT_TURBO_RATIOS 0x66c
38#define BYT_TURBO_VIDS 0x66d 38#define BYT_TURBO_VIDS 0x66d
39 39
40
41#define FRAC_BITS 8 40#define FRAC_BITS 8
42#define int_tofp(X) ((int64_t)(X) << FRAC_BITS) 41#define int_tofp(X) ((int64_t)(X) << FRAC_BITS)
43#define fp_toint(X) ((X) >> FRAC_BITS) 42#define fp_toint(X) ((X) >> FRAC_BITS)
@@ -50,7 +49,7 @@ static inline int32_t mul_fp(int32_t x, int32_t y)
50 49
51static inline int32_t div_fp(int32_t x, int32_t y) 50static inline int32_t div_fp(int32_t x, int32_t y)
52{ 51{
53 return div_s64((int64_t)x << FRAC_BITS, (int64_t)y); 52 return div_s64((int64_t)x << FRAC_BITS, y);
54} 53}
55 54
56struct sample { 55struct sample {
@@ -128,6 +127,7 @@ static struct pstate_funcs pstate_funcs;
128 127
129struct perf_limits { 128struct perf_limits {
130 int no_turbo; 129 int no_turbo;
130 int turbo_disabled;
131 int max_perf_pct; 131 int max_perf_pct;
132 int min_perf_pct; 132 int min_perf_pct;
133 int32_t max_perf; 133 int32_t max_perf;
@@ -147,7 +147,7 @@ static struct perf_limits limits = {
147}; 147};
148 148
149static inline void pid_reset(struct _pid *pid, int setpoint, int busy, 149static inline void pid_reset(struct _pid *pid, int setpoint, int busy,
150 int deadband, int integral) { 150 int deadband, int integral) {
151 pid->setpoint = setpoint; 151 pid->setpoint = setpoint;
152 pid->deadband = deadband; 152 pid->deadband = deadband;
153 pid->integral = int_tofp(integral); 153 pid->integral = int_tofp(integral);
@@ -166,7 +166,6 @@ static inline void pid_i_gain_set(struct _pid *pid, int percent)
166 166
167static inline void pid_d_gain_set(struct _pid *pid, int percent) 167static inline void pid_d_gain_set(struct _pid *pid, int percent)
168{ 168{
169
170 pid->d_gain = div_fp(int_tofp(percent), int_tofp(100)); 169 pid->d_gain = div_fp(int_tofp(percent), int_tofp(100));
171} 170}
172 171
@@ -196,10 +195,7 @@ static signed int pid_calc(struct _pid *pid, int32_t busy)
196 pid->last_err = fp_error; 195 pid->last_err = fp_error;
197 196
198 result = pterm + mul_fp(pid->integral, pid->i_gain) + dterm; 197 result = pterm + mul_fp(pid->integral, pid->i_gain) + dterm;
199 if (result >= 0) 198 result = result + (1 << (FRAC_BITS-1));
200 result = result + (1 << (FRAC_BITS-1));
201 else
202 result = result - (1 << (FRAC_BITS-1));
203 return (signed int)fp_toint(result); 199 return (signed int)fp_toint(result);
204} 200}
205 201
@@ -209,16 +205,13 @@ static inline void intel_pstate_busy_pid_reset(struct cpudata *cpu)
209 pid_d_gain_set(&cpu->pid, pid_params.d_gain_pct); 205 pid_d_gain_set(&cpu->pid, pid_params.d_gain_pct);
210 pid_i_gain_set(&cpu->pid, pid_params.i_gain_pct); 206 pid_i_gain_set(&cpu->pid, pid_params.i_gain_pct);
211 207
212 pid_reset(&cpu->pid, 208 pid_reset(&cpu->pid, pid_params.setpoint, 100, pid_params.deadband, 0);
213 pid_params.setpoint,
214 100,
215 pid_params.deadband,
216 0);
217} 209}
218 210
219static inline void intel_pstate_reset_all_pid(void) 211static inline void intel_pstate_reset_all_pid(void)
220{ 212{
221 unsigned int cpu; 213 unsigned int cpu;
214
222 for_each_online_cpu(cpu) { 215 for_each_online_cpu(cpu) {
223 if (all_cpu_data[cpu]) 216 if (all_cpu_data[cpu])
224 intel_pstate_busy_pid_reset(all_cpu_data[cpu]); 217 intel_pstate_busy_pid_reset(all_cpu_data[cpu]);
@@ -232,13 +225,13 @@ static int pid_param_set(void *data, u64 val)
232 intel_pstate_reset_all_pid(); 225 intel_pstate_reset_all_pid();
233 return 0; 226 return 0;
234} 227}
228
235static int pid_param_get(void *data, u64 *val) 229static int pid_param_get(void *data, u64 *val)
236{ 230{
237 *val = *(u32 *)data; 231 *val = *(u32 *)data;
238 return 0; 232 return 0;
239} 233}
240DEFINE_SIMPLE_ATTRIBUTE(fops_pid_param, pid_param_get, 234DEFINE_SIMPLE_ATTRIBUTE(fops_pid_param, pid_param_get, pid_param_set, "%llu\n");
241 pid_param_set, "%llu\n");
242 235
243struct pid_param { 236struct pid_param {
244 char *name; 237 char *name;
@@ -255,9 +248,9 @@ static struct pid_param pid_files[] = {
255 {NULL, NULL} 248 {NULL, NULL}
256}; 249};
257 250
258static struct dentry *debugfs_parent; 251static void __init intel_pstate_debug_expose_params(void)
259static void intel_pstate_debug_expose_params(void)
260{ 252{
253 struct dentry *debugfs_parent;
261 int i = 0; 254 int i = 0;
262 255
263 debugfs_parent = debugfs_create_dir("pstate_snb", NULL); 256 debugfs_parent = debugfs_create_dir("pstate_snb", NULL);
@@ -265,8 +258,8 @@ static void intel_pstate_debug_expose_params(void)
265 return; 258 return;
266 while (pid_files[i].name) { 259 while (pid_files[i].name) {
267 debugfs_create_file(pid_files[i].name, 0660, 260 debugfs_create_file(pid_files[i].name, 0660,
268 debugfs_parent, pid_files[i].value, 261 debugfs_parent, pid_files[i].value,
269 &fops_pid_param); 262 &fops_pid_param);
270 i++; 263 i++;
271 } 264 }
272} 265}
@@ -282,23 +275,28 @@ static void intel_pstate_debug_expose_params(void)
282 } 275 }
283 276
284static ssize_t store_no_turbo(struct kobject *a, struct attribute *b, 277static ssize_t store_no_turbo(struct kobject *a, struct attribute *b,
285 const char *buf, size_t count) 278 const char *buf, size_t count)
286{ 279{
287 unsigned int input; 280 unsigned int input;
288 int ret; 281 int ret;
282
289 ret = sscanf(buf, "%u", &input); 283 ret = sscanf(buf, "%u", &input);
290 if (ret != 1) 284 if (ret != 1)
291 return -EINVAL; 285 return -EINVAL;
292 limits.no_turbo = clamp_t(int, input, 0 , 1); 286 limits.no_turbo = clamp_t(int, input, 0 , 1);
293 287 if (limits.turbo_disabled) {
288 pr_warn("Turbo disabled by BIOS or unavailable on processor\n");
289 limits.no_turbo = limits.turbo_disabled;
290 }
294 return count; 291 return count;
295} 292}
296 293
297static ssize_t store_max_perf_pct(struct kobject *a, struct attribute *b, 294static ssize_t store_max_perf_pct(struct kobject *a, struct attribute *b,
298 const char *buf, size_t count) 295 const char *buf, size_t count)
299{ 296{
300 unsigned int input; 297 unsigned int input;
301 int ret; 298 int ret;
299
302 ret = sscanf(buf, "%u", &input); 300 ret = sscanf(buf, "%u", &input);
303 if (ret != 1) 301 if (ret != 1)
304 return -EINVAL; 302 return -EINVAL;
@@ -306,14 +304,16 @@ static ssize_t store_max_perf_pct(struct kobject *a, struct attribute *b,
306 limits.max_sysfs_pct = clamp_t(int, input, 0 , 100); 304 limits.max_sysfs_pct = clamp_t(int, input, 0 , 100);
307 limits.max_perf_pct = min(limits.max_policy_pct, limits.max_sysfs_pct); 305 limits.max_perf_pct = min(limits.max_policy_pct, limits.max_sysfs_pct);
308 limits.max_perf = div_fp(int_tofp(limits.max_perf_pct), int_tofp(100)); 306 limits.max_perf = div_fp(int_tofp(limits.max_perf_pct), int_tofp(100));
307
309 return count; 308 return count;
310} 309}
311 310
312static ssize_t store_min_perf_pct(struct kobject *a, struct attribute *b, 311static ssize_t store_min_perf_pct(struct kobject *a, struct attribute *b,
313 const char *buf, size_t count) 312 const char *buf, size_t count)
314{ 313{
315 unsigned int input; 314 unsigned int input;
316 int ret; 315 int ret;
316
317 ret = sscanf(buf, "%u", &input); 317 ret = sscanf(buf, "%u", &input);
318 if (ret != 1) 318 if (ret != 1)
319 return -EINVAL; 319 return -EINVAL;
@@ -341,17 +341,16 @@ static struct attribute *intel_pstate_attributes[] = {
341static struct attribute_group intel_pstate_attr_group = { 341static struct attribute_group intel_pstate_attr_group = {
342 .attrs = intel_pstate_attributes, 342 .attrs = intel_pstate_attributes,
343}; 343};
344static struct kobject *intel_pstate_kobject;
345 344
346static void intel_pstate_sysfs_expose_params(void) 345static void __init intel_pstate_sysfs_expose_params(void)
347{ 346{
347 struct kobject *intel_pstate_kobject;
348 int rc; 348 int rc;
349 349
350 intel_pstate_kobject = kobject_create_and_add("intel_pstate", 350 intel_pstate_kobject = kobject_create_and_add("intel_pstate",
351 &cpu_subsys.dev_root->kobj); 351 &cpu_subsys.dev_root->kobj);
352 BUG_ON(!intel_pstate_kobject); 352 BUG_ON(!intel_pstate_kobject);
353 rc = sysfs_create_group(intel_pstate_kobject, 353 rc = sysfs_create_group(intel_pstate_kobject, &intel_pstate_attr_group);
354 &intel_pstate_attr_group);
355 BUG_ON(rc); 354 BUG_ON(rc);
356} 355}
357 356
@@ -359,22 +358,25 @@ static void intel_pstate_sysfs_expose_params(void)
359static int byt_get_min_pstate(void) 358static int byt_get_min_pstate(void)
360{ 359{
361 u64 value; 360 u64 value;
361
362 rdmsrl(BYT_RATIOS, value); 362 rdmsrl(BYT_RATIOS, value);
363 return (value >> 8) & 0x3F; 363 return (value >> 8) & 0x7F;
364} 364}
365 365
366static int byt_get_max_pstate(void) 366static int byt_get_max_pstate(void)
367{ 367{
368 u64 value; 368 u64 value;
369
369 rdmsrl(BYT_RATIOS, value); 370 rdmsrl(BYT_RATIOS, value);
370 return (value >> 16) & 0x3F; 371 return (value >> 16) & 0x7F;
371} 372}
372 373
373static int byt_get_turbo_pstate(void) 374static int byt_get_turbo_pstate(void)
374{ 375{
375 u64 value; 376 u64 value;
377
376 rdmsrl(BYT_TURBO_RATIOS, value); 378 rdmsrl(BYT_TURBO_RATIOS, value);
377 return value & 0x3F; 379 return value & 0x7F;
378} 380}
379 381
380static void byt_set_pstate(struct cpudata *cpudata, int pstate) 382static void byt_set_pstate(struct cpudata *cpudata, int pstate)
@@ -384,7 +386,7 @@ static void byt_set_pstate(struct cpudata *cpudata, int pstate)
384 u32 vid; 386 u32 vid;
385 387
386 val = pstate << 8; 388 val = pstate << 8;
387 if (limits.no_turbo) 389 if (limits.no_turbo && !limits.turbo_disabled)
388 val |= (u64)1 << 32; 390 val |= (u64)1 << 32;
389 391
390 vid_fp = cpudata->vid.min + mul_fp( 392 vid_fp = cpudata->vid.min + mul_fp(
@@ -406,10 +408,9 @@ static void byt_get_vid(struct cpudata *cpudata)
406{ 408{
407 u64 value; 409 u64 value;
408 410
409
410 rdmsrl(BYT_VIDS, value); 411 rdmsrl(BYT_VIDS, value);
411 cpudata->vid.min = int_tofp((value >> 8) & 0x3f); 412 cpudata->vid.min = int_tofp((value >> 8) & 0x7f);
412 cpudata->vid.max = int_tofp((value >> 16) & 0x3f); 413 cpudata->vid.max = int_tofp((value >> 16) & 0x7f);
413 cpudata->vid.ratio = div_fp( 414 cpudata->vid.ratio = div_fp(
414 cpudata->vid.max - cpudata->vid.min, 415 cpudata->vid.max - cpudata->vid.min,
415 int_tofp(cpudata->pstate.max_pstate - 416 int_tofp(cpudata->pstate.max_pstate -
@@ -419,10 +420,10 @@ static void byt_get_vid(struct cpudata *cpudata)
419 cpudata->vid.turbo = value & 0x7f; 420 cpudata->vid.turbo = value & 0x7f;
420} 421}
421 422
422
423static int core_get_min_pstate(void) 423static int core_get_min_pstate(void)
424{ 424{
425 u64 value; 425 u64 value;
426
426 rdmsrl(MSR_PLATFORM_INFO, value); 427 rdmsrl(MSR_PLATFORM_INFO, value);
427 return (value >> 40) & 0xFF; 428 return (value >> 40) & 0xFF;
428} 429}
@@ -430,6 +431,7 @@ static int core_get_min_pstate(void)
430static int core_get_max_pstate(void) 431static int core_get_max_pstate(void)
431{ 432{
432 u64 value; 433 u64 value;
434
433 rdmsrl(MSR_PLATFORM_INFO, value); 435 rdmsrl(MSR_PLATFORM_INFO, value);
434 return (value >> 8) & 0xFF; 436 return (value >> 8) & 0xFF;
435} 437}
@@ -438,9 +440,10 @@ static int core_get_turbo_pstate(void)
438{ 440{
439 u64 value; 441 u64 value;
440 int nont, ret; 442 int nont, ret;
443
441 rdmsrl(MSR_NHM_TURBO_RATIO_LIMIT, value); 444 rdmsrl(MSR_NHM_TURBO_RATIO_LIMIT, value);
442 nont = core_get_max_pstate(); 445 nont = core_get_max_pstate();
443 ret = ((value) & 255); 446 ret = (value) & 255;
444 if (ret <= nont) 447 if (ret <= nont)
445 ret = nont; 448 ret = nont;
446 return ret; 449 return ret;
@@ -451,7 +454,7 @@ static void core_set_pstate(struct cpudata *cpudata, int pstate)
451 u64 val; 454 u64 val;
452 455
453 val = pstate << 8; 456 val = pstate << 8;
454 if (limits.no_turbo) 457 if (limits.no_turbo && !limits.turbo_disabled)
455 val |= (u64)1 << 32; 458 val |= (u64)1 << 32;
456 459
457 wrmsrl_on_cpu(cpudata->cpu, MSR_IA32_PERF_CTL, val); 460 wrmsrl_on_cpu(cpudata->cpu, MSR_IA32_PERF_CTL, val);
@@ -492,12 +495,12 @@ static struct cpu_defaults byt_params = {
492 }, 495 },
493}; 496};
494 497
495
496static void intel_pstate_get_min_max(struct cpudata *cpu, int *min, int *max) 498static void intel_pstate_get_min_max(struct cpudata *cpu, int *min, int *max)
497{ 499{
498 int max_perf = cpu->pstate.turbo_pstate; 500 int max_perf = cpu->pstate.turbo_pstate;
499 int max_perf_adj; 501 int max_perf_adj;
500 int min_perf; 502 int min_perf;
503
501 if (limits.no_turbo) 504 if (limits.no_turbo)
502 max_perf = cpu->pstate.max_pstate; 505 max_perf = cpu->pstate.max_pstate;
503 506
@@ -506,8 +509,7 @@ static void intel_pstate_get_min_max(struct cpudata *cpu, int *min, int *max)
506 cpu->pstate.min_pstate, cpu->pstate.turbo_pstate); 509 cpu->pstate.min_pstate, cpu->pstate.turbo_pstate);
507 510
508 min_perf = fp_toint(mul_fp(int_tofp(max_perf), limits.min_perf)); 511 min_perf = fp_toint(mul_fp(int_tofp(max_perf), limits.min_perf));
509 *min = clamp_t(int, min_perf, 512 *min = clamp_t(int, min_perf, cpu->pstate.min_pstate, max_perf);
510 cpu->pstate.min_pstate, max_perf);
511} 513}
512 514
513static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate) 515static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate)
@@ -528,21 +530,6 @@ static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate)
528 pstate_funcs.set(cpu, pstate); 530 pstate_funcs.set(cpu, pstate);
529} 531}
530 532
531static inline void intel_pstate_pstate_increase(struct cpudata *cpu, int steps)
532{
533 int target;
534 target = cpu->pstate.current_pstate + steps;
535
536 intel_pstate_set_pstate(cpu, target);
537}
538
539static inline void intel_pstate_pstate_decrease(struct cpudata *cpu, int steps)
540{
541 int target;
542 target = cpu->pstate.current_pstate - steps;
543 intel_pstate_set_pstate(cpu, target);
544}
545
546static void intel_pstate_get_cpu_pstates(struct cpudata *cpu) 533static void intel_pstate_get_cpu_pstates(struct cpudata *cpu)
547{ 534{
548 cpu->pstate.min_pstate = pstate_funcs.get_min(); 535 cpu->pstate.min_pstate = pstate_funcs.get_min();
@@ -558,13 +545,9 @@ static inline void intel_pstate_calc_busy(struct cpudata *cpu)
558{ 545{
559 struct sample *sample = &cpu->sample; 546 struct sample *sample = &cpu->sample;
560 int64_t core_pct; 547 int64_t core_pct;
561 int32_t rem;
562 548
563 core_pct = int_tofp(sample->aperf) * int_tofp(100); 549 core_pct = int_tofp(sample->aperf) * int_tofp(100);
564 core_pct = div_u64_rem(core_pct, int_tofp(sample->mperf), &rem); 550 core_pct = div64_u64(core_pct, int_tofp(sample->mperf));
565
566 if ((rem << 1) >= int_tofp(sample->mperf))
567 core_pct += 1;
568 551
569 sample->freq = fp_toint( 552 sample->freq = fp_toint(
570 mul_fp(int_tofp(cpu->pstate.max_pstate * 1000), core_pct)); 553 mul_fp(int_tofp(cpu->pstate.max_pstate * 1000), core_pct));
@@ -575,12 +558,12 @@ static inline void intel_pstate_calc_busy(struct cpudata *cpu)
575static inline void intel_pstate_sample(struct cpudata *cpu) 558static inline void intel_pstate_sample(struct cpudata *cpu)
576{ 559{
577 u64 aperf, mperf; 560 u64 aperf, mperf;
561 unsigned long flags;
578 562
563 local_irq_save(flags);
579 rdmsrl(MSR_IA32_APERF, aperf); 564 rdmsrl(MSR_IA32_APERF, aperf);
580 rdmsrl(MSR_IA32_MPERF, mperf); 565 rdmsrl(MSR_IA32_MPERF, mperf);
581 566 local_irq_restore(flags);
582 aperf = aperf >> FRAC_BITS;
583 mperf = mperf >> FRAC_BITS;
584 567
585 cpu->last_sample_time = cpu->sample.time; 568 cpu->last_sample_time = cpu->sample.time;
586 cpu->sample.time = ktime_get(); 569 cpu->sample.time = ktime_get();
@@ -597,10 +580,9 @@ static inline void intel_pstate_sample(struct cpudata *cpu)
597 580
598static inline void intel_pstate_set_sample_time(struct cpudata *cpu) 581static inline void intel_pstate_set_sample_time(struct cpudata *cpu)
599{ 582{
600 int sample_time, delay; 583 int delay;
601 584
602 sample_time = pid_params.sample_rate_ms; 585 delay = msecs_to_jiffies(pid_params.sample_rate_ms);
603 delay = msecs_to_jiffies(sample_time);
604 mod_timer_pinned(&cpu->timer, jiffies + delay); 586 mod_timer_pinned(&cpu->timer, jiffies + delay);
605} 587}
606 588
@@ -615,12 +597,12 @@ static inline int32_t intel_pstate_get_scaled_busy(struct cpudata *cpu)
615 current_pstate = int_tofp(cpu->pstate.current_pstate); 597 current_pstate = int_tofp(cpu->pstate.current_pstate);
616 core_busy = mul_fp(core_busy, div_fp(max_pstate, current_pstate)); 598 core_busy = mul_fp(core_busy, div_fp(max_pstate, current_pstate));
617 599
618 sample_time = (pid_params.sample_rate_ms * USEC_PER_MSEC); 600 sample_time = pid_params.sample_rate_ms * USEC_PER_MSEC;
619 duration_us = (u32) ktime_us_delta(cpu->sample.time, 601 duration_us = (u32) ktime_us_delta(cpu->sample.time,
620 cpu->last_sample_time); 602 cpu->last_sample_time);
621 if (duration_us > sample_time * 3) { 603 if (duration_us > sample_time * 3) {
622 sample_ratio = div_fp(int_tofp(sample_time), 604 sample_ratio = div_fp(int_tofp(sample_time),
623 int_tofp(duration_us)); 605 int_tofp(duration_us));
624 core_busy = mul_fp(core_busy, sample_ratio); 606 core_busy = mul_fp(core_busy, sample_ratio);
625 } 607 }
626 608
@@ -631,20 +613,15 @@ static inline void intel_pstate_adjust_busy_pstate(struct cpudata *cpu)
631{ 613{
632 int32_t busy_scaled; 614 int32_t busy_scaled;
633 struct _pid *pid; 615 struct _pid *pid;
634 signed int ctl = 0; 616 signed int ctl;
635 int steps;
636 617
637 pid = &cpu->pid; 618 pid = &cpu->pid;
638 busy_scaled = intel_pstate_get_scaled_busy(cpu); 619 busy_scaled = intel_pstate_get_scaled_busy(cpu);
639 620
640 ctl = pid_calc(pid, busy_scaled); 621 ctl = pid_calc(pid, busy_scaled);
641 622
642 steps = abs(ctl); 623 /* Negative values of ctl increase the pstate and vice versa */
643 624 intel_pstate_set_pstate(cpu, cpu->pstate.current_pstate - ctl);
644 if (ctl < 0)
645 intel_pstate_pstate_increase(cpu, steps);
646 else
647 intel_pstate_pstate_decrease(cpu, steps);
648} 625}
649 626
650static void intel_pstate_timer_func(unsigned long __data) 627static void intel_pstate_timer_func(unsigned long __data)
@@ -699,14 +676,12 @@ static int intel_pstate_init_cpu(unsigned int cpunum)
699 676
700 cpu = all_cpu_data[cpunum]; 677 cpu = all_cpu_data[cpunum];
701 678
702 intel_pstate_get_cpu_pstates(cpu);
703
704 cpu->cpu = cpunum; 679 cpu->cpu = cpunum;
680 intel_pstate_get_cpu_pstates(cpu);
705 681
706 init_timer_deferrable(&cpu->timer); 682 init_timer_deferrable(&cpu->timer);
707 cpu->timer.function = intel_pstate_timer_func; 683 cpu->timer.function = intel_pstate_timer_func;
708 cpu->timer.data = 684 cpu->timer.data = (unsigned long)cpu;
709 (unsigned long)cpu;
710 cpu->timer.expires = jiffies + HZ/100; 685 cpu->timer.expires = jiffies + HZ/100;
711 intel_pstate_busy_pid_reset(cpu); 686 intel_pstate_busy_pid_reset(cpu);
712 intel_pstate_sample(cpu); 687 intel_pstate_sample(cpu);
@@ -744,14 +719,14 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy)
744 limits.min_perf = int_tofp(1); 719 limits.min_perf = int_tofp(1);
745 limits.max_perf_pct = 100; 720 limits.max_perf_pct = 100;
746 limits.max_perf = int_tofp(1); 721 limits.max_perf = int_tofp(1);
747 limits.no_turbo = 0; 722 limits.no_turbo = limits.turbo_disabled;
748 return 0; 723 return 0;
749 } 724 }
750 limits.min_perf_pct = (policy->min * 100) / policy->cpuinfo.max_freq; 725 limits.min_perf_pct = (policy->min * 100) / policy->cpuinfo.max_freq;
751 limits.min_perf_pct = clamp_t(int, limits.min_perf_pct, 0 , 100); 726 limits.min_perf_pct = clamp_t(int, limits.min_perf_pct, 0 , 100);
752 limits.min_perf = div_fp(int_tofp(limits.min_perf_pct), int_tofp(100)); 727 limits.min_perf = div_fp(int_tofp(limits.min_perf_pct), int_tofp(100));
753 728
754 limits.max_policy_pct = policy->max * 100 / policy->cpuinfo.max_freq; 729 limits.max_policy_pct = (policy->max * 100) / policy->cpuinfo.max_freq;
755 limits.max_policy_pct = clamp_t(int, limits.max_policy_pct, 0 , 100); 730 limits.max_policy_pct = clamp_t(int, limits.max_policy_pct, 0 , 100);
756 limits.max_perf_pct = min(limits.max_policy_pct, limits.max_sysfs_pct); 731 limits.max_perf_pct = min(limits.max_policy_pct, limits.max_sysfs_pct);
757 limits.max_perf = div_fp(int_tofp(limits.max_perf_pct), int_tofp(100)); 732 limits.max_perf = div_fp(int_tofp(limits.max_perf_pct), int_tofp(100));
@@ -763,8 +738,8 @@ static int intel_pstate_verify_policy(struct cpufreq_policy *policy)
763{ 738{
764 cpufreq_verify_within_cpu_limits(policy); 739 cpufreq_verify_within_cpu_limits(policy);
765 740
766 if ((policy->policy != CPUFREQ_POLICY_POWERSAVE) && 741 if (policy->policy != CPUFREQ_POLICY_POWERSAVE &&
767 (policy->policy != CPUFREQ_POLICY_PERFORMANCE)) 742 policy->policy != CPUFREQ_POLICY_PERFORMANCE)
768 return -EINVAL; 743 return -EINVAL;
769 744
770 return 0; 745 return 0;
@@ -787,6 +762,7 @@ static int intel_pstate_cpu_init(struct cpufreq_policy *policy)
787{ 762{
788 struct cpudata *cpu; 763 struct cpudata *cpu;
789 int rc; 764 int rc;
765 u64 misc_en;
790 766
791 rc = intel_pstate_init_cpu(policy->cpu); 767 rc = intel_pstate_init_cpu(policy->cpu);
792 if (rc) 768 if (rc)
@@ -794,8 +770,13 @@ static int intel_pstate_cpu_init(struct cpufreq_policy *policy)
794 770
795 cpu = all_cpu_data[policy->cpu]; 771 cpu = all_cpu_data[policy->cpu];
796 772
797 if (!limits.no_turbo && 773 rdmsrl(MSR_IA32_MISC_ENABLE, misc_en);
798 limits.min_perf_pct == 100 && limits.max_perf_pct == 100) 774 if (misc_en & MSR_IA32_MISC_ENABLE_TURBO_DISABLE ||
775 cpu->pstate.max_pstate == cpu->pstate.turbo_pstate) {
776 limits.turbo_disabled = 1;
777 limits.no_turbo = 1;
778 }
779 if (limits.min_perf_pct == 100 && limits.max_perf_pct == 100)
799 policy->policy = CPUFREQ_POLICY_PERFORMANCE; 780 policy->policy = CPUFREQ_POLICY_PERFORMANCE;
800 else 781 else
801 policy->policy = CPUFREQ_POLICY_POWERSAVE; 782 policy->policy = CPUFREQ_POLICY_POWERSAVE;
@@ -833,8 +814,8 @@ static int intel_pstate_msrs_not_valid(void)
833 rdmsrl(MSR_IA32_MPERF, mperf); 814 rdmsrl(MSR_IA32_MPERF, mperf);
834 815
835 if (!pstate_funcs.get_max() || 816 if (!pstate_funcs.get_max() ||
836 !pstate_funcs.get_min() || 817 !pstate_funcs.get_min() ||
837 !pstate_funcs.get_turbo()) 818 !pstate_funcs.get_turbo())
838 return -ENODEV; 819 return -ENODEV;
839 820
840 rdmsrl(MSR_IA32_APERF, tmp); 821 rdmsrl(MSR_IA32_APERF, tmp);
@@ -916,14 +897,14 @@ static bool intel_pstate_platform_pwr_mgmt_exists(void)
916 struct acpi_table_header hdr; 897 struct acpi_table_header hdr;
917 struct hw_vendor_info *v_info; 898 struct hw_vendor_info *v_info;
918 899
919 if (acpi_disabled 900 if (acpi_disabled ||
920 || ACPI_FAILURE(acpi_get_table_header(ACPI_SIG_FADT, 0, &hdr))) 901 ACPI_FAILURE(acpi_get_table_header(ACPI_SIG_FADT, 0, &hdr)))
921 return false; 902 return false;
922 903
923 for (v_info = vendor_info; v_info->valid; v_info++) { 904 for (v_info = vendor_info; v_info->valid; v_info++) {
924 if (!strncmp(hdr.oem_id, v_info->oem_id, ACPI_OEM_ID_SIZE) 905 if (!strncmp(hdr.oem_id, v_info->oem_id, ACPI_OEM_ID_SIZE) &&
925 && !strncmp(hdr.oem_table_id, v_info->oem_table_id, ACPI_OEM_TABLE_ID_SIZE) 906 !strncmp(hdr.oem_table_id, v_info->oem_table_id, ACPI_OEM_TABLE_ID_SIZE) &&
926 && intel_pstate_no_acpi_pss()) 907 intel_pstate_no_acpi_pss())
927 return true; 908 return true;
928 } 909 }
929 910