diff options
Diffstat (limited to 'drivers/cpufreq/intel_pstate.c')
-rw-r--r-- | drivers/cpufreq/intel_pstate.c | 122 |
1 files changed, 21 insertions, 101 deletions
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index cc3a8e6c92be..9c36ace92a39 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c | |||
@@ -48,12 +48,7 @@ static inline int32_t div_fp(int32_t x, int32_t y) | |||
48 | } | 48 | } |
49 | 49 | ||
50 | struct sample { | 50 | struct sample { |
51 | ktime_t start_time; | ||
52 | ktime_t end_time; | ||
53 | int core_pct_busy; | 51 | int core_pct_busy; |
54 | int pstate_pct_busy; | ||
55 | u64 duration_us; | ||
56 | u64 idletime_us; | ||
57 | u64 aperf; | 52 | u64 aperf; |
58 | u64 mperf; | 53 | u64 mperf; |
59 | int freq; | 54 | int freq; |
@@ -86,13 +81,9 @@ struct cpudata { | |||
86 | struct pstate_adjust_policy *pstate_policy; | 81 | struct pstate_adjust_policy *pstate_policy; |
87 | struct pstate_data pstate; | 82 | struct pstate_data pstate; |
88 | struct _pid pid; | 83 | struct _pid pid; |
89 | struct _pid idle_pid; | ||
90 | 84 | ||
91 | int min_pstate_count; | 85 | int min_pstate_count; |
92 | int idle_mode; | ||
93 | 86 | ||
94 | ktime_t prev_sample; | ||
95 | u64 prev_idle_time_us; | ||
96 | u64 prev_aperf; | 87 | u64 prev_aperf; |
97 | u64 prev_mperf; | 88 | u64 prev_mperf; |
98 | int sample_ptr; | 89 | int sample_ptr; |
@@ -124,6 +115,8 @@ struct perf_limits { | |||
124 | int min_perf_pct; | 115 | int min_perf_pct; |
125 | int32_t max_perf; | 116 | int32_t max_perf; |
126 | int32_t min_perf; | 117 | int32_t min_perf; |
118 | int max_policy_pct; | ||
119 | int max_sysfs_pct; | ||
127 | }; | 120 | }; |
128 | 121 | ||
129 | static struct perf_limits limits = { | 122 | static struct perf_limits limits = { |
@@ -132,6 +125,8 @@ static struct perf_limits limits = { | |||
132 | .max_perf = int_tofp(1), | 125 | .max_perf = int_tofp(1), |
133 | .min_perf_pct = 0, | 126 | .min_perf_pct = 0, |
134 | .min_perf = 0, | 127 | .min_perf = 0, |
128 | .max_policy_pct = 100, | ||
129 | .max_sysfs_pct = 100, | ||
135 | }; | 130 | }; |
136 | 131 | ||
137 | static inline void pid_reset(struct _pid *pid, int setpoint, int busy, | 132 | static inline void pid_reset(struct _pid *pid, int setpoint, int busy, |
@@ -202,19 +197,6 @@ static inline void intel_pstate_busy_pid_reset(struct cpudata *cpu) | |||
202 | 0); | 197 | 0); |
203 | } | 198 | } |
204 | 199 | ||
205 | static inline void intel_pstate_idle_pid_reset(struct cpudata *cpu) | ||
206 | { | ||
207 | pid_p_gain_set(&cpu->idle_pid, cpu->pstate_policy->p_gain_pct); | ||
208 | pid_d_gain_set(&cpu->idle_pid, cpu->pstate_policy->d_gain_pct); | ||
209 | pid_i_gain_set(&cpu->idle_pid, cpu->pstate_policy->i_gain_pct); | ||
210 | |||
211 | pid_reset(&cpu->idle_pid, | ||
212 | 75, | ||
213 | 50, | ||
214 | cpu->pstate_policy->deadband, | ||
215 | 0); | ||
216 | } | ||
217 | |||
218 | static inline void intel_pstate_reset_all_pid(void) | 200 | static inline void intel_pstate_reset_all_pid(void) |
219 | { | 201 | { |
220 | unsigned int cpu; | 202 | unsigned int cpu; |
@@ -302,7 +284,8 @@ static ssize_t store_max_perf_pct(struct kobject *a, struct attribute *b, | |||
302 | if (ret != 1) | 284 | if (ret != 1) |
303 | return -EINVAL; | 285 | return -EINVAL; |
304 | 286 | ||
305 | limits.max_perf_pct = clamp_t(int, input, 0 , 100); | 287 | limits.max_sysfs_pct = clamp_t(int, input, 0 , 100); |
288 | limits.max_perf_pct = min(limits.max_policy_pct, limits.max_sysfs_pct); | ||
306 | limits.max_perf = div_fp(int_tofp(limits.max_perf_pct), int_tofp(100)); | 289 | limits.max_perf = div_fp(int_tofp(limits.max_perf_pct), int_tofp(100)); |
307 | return count; | 290 | return count; |
308 | } | 291 | } |
@@ -408,9 +391,8 @@ static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate) | |||
408 | if (pstate == cpu->pstate.current_pstate) | 391 | if (pstate == cpu->pstate.current_pstate) |
409 | return; | 392 | return; |
410 | 393 | ||
411 | #ifndef MODULE | ||
412 | trace_cpu_frequency(pstate * 100000, cpu->cpu); | 394 | trace_cpu_frequency(pstate * 100000, cpu->cpu); |
413 | #endif | 395 | |
414 | cpu->pstate.current_pstate = pstate; | 396 | cpu->pstate.current_pstate = pstate; |
415 | wrmsrl(MSR_IA32_PERF_CTL, pstate << 8); | 397 | wrmsrl(MSR_IA32_PERF_CTL, pstate << 8); |
416 | 398 | ||
@@ -450,48 +432,26 @@ static inline void intel_pstate_calc_busy(struct cpudata *cpu, | |||
450 | struct sample *sample) | 432 | struct sample *sample) |
451 | { | 433 | { |
452 | u64 core_pct; | 434 | u64 core_pct; |
453 | sample->pstate_pct_busy = 100 - div64_u64( | ||
454 | sample->idletime_us * 100, | ||
455 | sample->duration_us); | ||
456 | core_pct = div64_u64(sample->aperf * 100, sample->mperf); | 435 | core_pct = div64_u64(sample->aperf * 100, sample->mperf); |
457 | sample->freq = cpu->pstate.max_pstate * core_pct * 1000; | 436 | sample->freq = cpu->pstate.max_pstate * core_pct * 1000; |
458 | 437 | ||
459 | sample->core_pct_busy = div_s64((sample->pstate_pct_busy * core_pct), | 438 | sample->core_pct_busy = core_pct; |
460 | 100); | ||
461 | } | 439 | } |
462 | 440 | ||
463 | static inline void intel_pstate_sample(struct cpudata *cpu) | 441 | static inline void intel_pstate_sample(struct cpudata *cpu) |
464 | { | 442 | { |
465 | ktime_t now; | ||
466 | u64 idle_time_us; | ||
467 | u64 aperf, mperf; | 443 | u64 aperf, mperf; |
468 | 444 | ||
469 | now = ktime_get(); | ||
470 | idle_time_us = get_cpu_idle_time_us(cpu->cpu, NULL); | ||
471 | |||
472 | rdmsrl(MSR_IA32_APERF, aperf); | 445 | rdmsrl(MSR_IA32_APERF, aperf); |
473 | rdmsrl(MSR_IA32_MPERF, mperf); | 446 | rdmsrl(MSR_IA32_MPERF, mperf); |
474 | /* for the first sample, don't actually record a sample, just | 447 | cpu->sample_ptr = (cpu->sample_ptr + 1) % SAMPLE_COUNT; |
475 | * set the baseline */ | 448 | cpu->samples[cpu->sample_ptr].aperf = aperf; |
476 | if (cpu->prev_idle_time_us > 0) { | 449 | cpu->samples[cpu->sample_ptr].mperf = mperf; |
477 | cpu->sample_ptr = (cpu->sample_ptr + 1) % SAMPLE_COUNT; | 450 | cpu->samples[cpu->sample_ptr].aperf -= cpu->prev_aperf; |
478 | cpu->samples[cpu->sample_ptr].start_time = cpu->prev_sample; | 451 | cpu->samples[cpu->sample_ptr].mperf -= cpu->prev_mperf; |
479 | cpu->samples[cpu->sample_ptr].end_time = now; | 452 | |
480 | cpu->samples[cpu->sample_ptr].duration_us = | 453 | intel_pstate_calc_busy(cpu, &cpu->samples[cpu->sample_ptr]); |
481 | ktime_us_delta(now, cpu->prev_sample); | ||
482 | cpu->samples[cpu->sample_ptr].idletime_us = | ||
483 | idle_time_us - cpu->prev_idle_time_us; | ||
484 | |||
485 | cpu->samples[cpu->sample_ptr].aperf = aperf; | ||
486 | cpu->samples[cpu->sample_ptr].mperf = mperf; | ||
487 | cpu->samples[cpu->sample_ptr].aperf -= cpu->prev_aperf; | ||
488 | cpu->samples[cpu->sample_ptr].mperf -= cpu->prev_mperf; | ||
489 | |||
490 | intel_pstate_calc_busy(cpu, &cpu->samples[cpu->sample_ptr]); | ||
491 | } | ||
492 | 454 | ||
493 | cpu->prev_sample = now; | ||
494 | cpu->prev_idle_time_us = idle_time_us; | ||
495 | cpu->prev_aperf = aperf; | 455 | cpu->prev_aperf = aperf; |
496 | cpu->prev_mperf = mperf; | 456 | cpu->prev_mperf = mperf; |
497 | } | 457 | } |
@@ -505,16 +465,6 @@ static inline void intel_pstate_set_sample_time(struct cpudata *cpu) | |||
505 | mod_timer_pinned(&cpu->timer, jiffies + delay); | 465 | mod_timer_pinned(&cpu->timer, jiffies + delay); |
506 | } | 466 | } |
507 | 467 | ||
508 | static inline void intel_pstate_idle_mode(struct cpudata *cpu) | ||
509 | { | ||
510 | cpu->idle_mode = 1; | ||
511 | } | ||
512 | |||
513 | static inline void intel_pstate_normal_mode(struct cpudata *cpu) | ||
514 | { | ||
515 | cpu->idle_mode = 0; | ||
516 | } | ||
517 | |||
518 | static inline int intel_pstate_get_scaled_busy(struct cpudata *cpu) | 468 | static inline int intel_pstate_get_scaled_busy(struct cpudata *cpu) |
519 | { | 469 | { |
520 | int32_t busy_scaled; | 470 | int32_t busy_scaled; |
@@ -547,50 +497,21 @@ static inline void intel_pstate_adjust_busy_pstate(struct cpudata *cpu) | |||
547 | intel_pstate_pstate_decrease(cpu, steps); | 497 | intel_pstate_pstate_decrease(cpu, steps); |
548 | } | 498 | } |
549 | 499 | ||
550 | static inline void intel_pstate_adjust_idle_pstate(struct cpudata *cpu) | ||
551 | { | ||
552 | int busy_scaled; | ||
553 | struct _pid *pid; | ||
554 | int ctl = 0; | ||
555 | int steps; | ||
556 | |||
557 | pid = &cpu->idle_pid; | ||
558 | |||
559 | busy_scaled = intel_pstate_get_scaled_busy(cpu); | ||
560 | |||
561 | ctl = pid_calc(pid, 100 - busy_scaled); | ||
562 | |||
563 | steps = abs(ctl); | ||
564 | if (ctl < 0) | ||
565 | intel_pstate_pstate_decrease(cpu, steps); | ||
566 | else | ||
567 | intel_pstate_pstate_increase(cpu, steps); | ||
568 | |||
569 | if (cpu->pstate.current_pstate == cpu->pstate.min_pstate) | ||
570 | intel_pstate_normal_mode(cpu); | ||
571 | } | ||
572 | |||
573 | static void intel_pstate_timer_func(unsigned long __data) | 500 | static void intel_pstate_timer_func(unsigned long __data) |
574 | { | 501 | { |
575 | struct cpudata *cpu = (struct cpudata *) __data; | 502 | struct cpudata *cpu = (struct cpudata *) __data; |
576 | 503 | ||
577 | intel_pstate_sample(cpu); | 504 | intel_pstate_sample(cpu); |
505 | intel_pstate_adjust_busy_pstate(cpu); | ||
578 | 506 | ||
579 | if (!cpu->idle_mode) | ||
580 | intel_pstate_adjust_busy_pstate(cpu); | ||
581 | else | ||
582 | intel_pstate_adjust_idle_pstate(cpu); | ||
583 | |||
584 | #if defined(XPERF_FIX) | ||
585 | if (cpu->pstate.current_pstate == cpu->pstate.min_pstate) { | 507 | if (cpu->pstate.current_pstate == cpu->pstate.min_pstate) { |
586 | cpu->min_pstate_count++; | 508 | cpu->min_pstate_count++; |
587 | if (!(cpu->min_pstate_count % 5)) { | 509 | if (!(cpu->min_pstate_count % 5)) { |
588 | intel_pstate_set_pstate(cpu, cpu->pstate.max_pstate); | 510 | intel_pstate_set_pstate(cpu, cpu->pstate.max_pstate); |
589 | intel_pstate_idle_mode(cpu); | ||
590 | } | 511 | } |
591 | } else | 512 | } else |
592 | cpu->min_pstate_count = 0; | 513 | cpu->min_pstate_count = 0; |
593 | #endif | 514 | |
594 | intel_pstate_set_sample_time(cpu); | 515 | intel_pstate_set_sample_time(cpu); |
595 | } | 516 | } |
596 | 517 | ||
@@ -631,7 +552,6 @@ static int intel_pstate_init_cpu(unsigned int cpunum) | |||
631 | (unsigned long)cpu; | 552 | (unsigned long)cpu; |
632 | cpu->timer.expires = jiffies + HZ/100; | 553 | cpu->timer.expires = jiffies + HZ/100; |
633 | intel_pstate_busy_pid_reset(cpu); | 554 | intel_pstate_busy_pid_reset(cpu); |
634 | intel_pstate_idle_pid_reset(cpu); | ||
635 | intel_pstate_sample(cpu); | 555 | intel_pstate_sample(cpu); |
636 | intel_pstate_set_pstate(cpu, cpu->pstate.max_pstate); | 556 | intel_pstate_set_pstate(cpu, cpu->pstate.max_pstate); |
637 | 557 | ||
@@ -675,8 +595,9 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy) | |||
675 | limits.min_perf_pct = clamp_t(int, limits.min_perf_pct, 0 , 100); | 595 | limits.min_perf_pct = clamp_t(int, limits.min_perf_pct, 0 , 100); |
676 | limits.min_perf = div_fp(int_tofp(limits.min_perf_pct), int_tofp(100)); | 596 | limits.min_perf = div_fp(int_tofp(limits.min_perf_pct), int_tofp(100)); |
677 | 597 | ||
678 | limits.max_perf_pct = policy->max * 100 / policy->cpuinfo.max_freq; | 598 | limits.max_policy_pct = policy->max * 100 / policy->cpuinfo.max_freq; |
679 | limits.max_perf_pct = clamp_t(int, limits.max_perf_pct, 0 , 100); | 599 | limits.max_policy_pct = clamp_t(int, limits.max_policy_pct, 0 , 100); |
600 | limits.max_perf_pct = min(limits.max_policy_pct, limits.max_sysfs_pct); | ||
680 | limits.max_perf = div_fp(int_tofp(limits.max_perf_pct), int_tofp(100)); | 601 | limits.max_perf = div_fp(int_tofp(limits.max_perf_pct), int_tofp(100)); |
681 | 602 | ||
682 | return 0; | 603 | return 0; |
@@ -788,10 +709,9 @@ static int __init intel_pstate_init(void) | |||
788 | 709 | ||
789 | pr_info("Intel P-state driver initializing.\n"); | 710 | pr_info("Intel P-state driver initializing.\n"); |
790 | 711 | ||
791 | all_cpu_data = vmalloc(sizeof(void *) * num_possible_cpus()); | 712 | all_cpu_data = vzalloc(sizeof(void *) * num_possible_cpus()); |
792 | if (!all_cpu_data) | 713 | if (!all_cpu_data) |
793 | return -ENOMEM; | 714 | return -ENOMEM; |
794 | memset(all_cpu_data, 0, sizeof(void *) * num_possible_cpus()); | ||
795 | 715 | ||
796 | rc = cpufreq_register_driver(&intel_pstate_driver); | 716 | rc = cpufreq_register_driver(&intel_pstate_driver); |
797 | if (rc) | 717 | if (rc) |