aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/cpufreq/intel_pstate.c
diff options
context:
space:
mode:
authorDirk Brandewie <dirk.j.brandewie@intel.com>2014-11-06 12:40:47 -0500
committerRafael J. Wysocki <rafael.j.wysocki@intel.com>2014-11-11 18:04:38 -0500
commit2f86dc4cddcb21290ca099e1dce2a53533c86e0b (patch)
tree4f713a464f066752b8f190fafe1d23bc37118d59 /drivers/cpufreq/intel_pstate.c
parent77873887729aaddec5cd27203a6ce8c4987733e4 (diff)
intel_pstate: Add support for HWP
Add support of Hardware Managed Performance States (HWP) described in Volume 3 section 14.4 of the SDM. With HWP enbaled intel_pstate will no longer be responsible for selecting P states for the processor. intel_pstate will continue to register to the cpufreq core as the scaling driver for CPUs implementing HWP. In HWP mode intel_pstate provides three functions reporting frequency to the cpufreq core, support for the set_policy() interface from the core and maintaining the intel_pstate sysfs interface in /sys/devices/system/cpu/intel_pstate. User preferences expressed via the set_policy() interface or the sysfs interface are forwared to the CPU via the HWP MSR interface. Signed-off-by: Dirk Brandewie <dirk.j.brandewie@intel.com> Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Diffstat (limited to 'drivers/cpufreq/intel_pstate.c')
-rw-r--r--drivers/cpufreq/intel_pstate.c100
1 files changed, 98 insertions, 2 deletions
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index 27bb6d3877ed..ba35db092239 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -137,6 +137,7 @@ struct cpu_defaults {
137 137
138static struct pstate_adjust_policy pid_params; 138static struct pstate_adjust_policy pid_params;
139static struct pstate_funcs pstate_funcs; 139static struct pstate_funcs pstate_funcs;
140static int hwp_active;
140 141
141struct perf_limits { 142struct perf_limits {
142 int no_turbo; 143 int no_turbo;
@@ -244,6 +245,34 @@ static inline void update_turbo_state(void)
244 cpu->pstate.max_pstate == cpu->pstate.turbo_pstate); 245 cpu->pstate.max_pstate == cpu->pstate.turbo_pstate);
245} 246}
246 247
248#define PCT_TO_HWP(x) (x * 255 / 100)
249static void intel_pstate_hwp_set(void)
250{
251 int min, max, cpu;
252 u64 value, freq;
253
254 get_online_cpus();
255
256 for_each_online_cpu(cpu) {
257 rdmsrl_on_cpu(cpu, MSR_HWP_REQUEST, &value);
258 min = PCT_TO_HWP(limits.min_perf_pct);
259 value &= ~HWP_MIN_PERF(~0L);
260 value |= HWP_MIN_PERF(min);
261
262 max = PCT_TO_HWP(limits.max_perf_pct);
263 if (limits.no_turbo) {
264 rdmsrl( MSR_HWP_CAPABILITIES, freq);
265 max = HWP_GUARANTEED_PERF(freq);
266 }
267
268 value &= ~HWP_MAX_PERF(~0L);
269 value |= HWP_MAX_PERF(max);
270 wrmsrl_on_cpu(cpu, MSR_HWP_REQUEST, value);
271 }
272
273 put_online_cpus();
274}
275
247/************************** debugfs begin ************************/ 276/************************** debugfs begin ************************/
248static int pid_param_set(void *data, u64 val) 277static int pid_param_set(void *data, u64 val)
249{ 278{
@@ -279,6 +308,8 @@ static void __init intel_pstate_debug_expose_params(void)
279 struct dentry *debugfs_parent; 308 struct dentry *debugfs_parent;
280 int i = 0; 309 int i = 0;
281 310
311 if (hwp_active)
312 return;
282 debugfs_parent = debugfs_create_dir("pstate_snb", NULL); 313 debugfs_parent = debugfs_create_dir("pstate_snb", NULL);
283 if (IS_ERR_OR_NULL(debugfs_parent)) 314 if (IS_ERR_OR_NULL(debugfs_parent))
284 return; 315 return;
@@ -329,8 +360,12 @@ static ssize_t store_no_turbo(struct kobject *a, struct attribute *b,
329 pr_warn("Turbo disabled by BIOS or unavailable on processor\n"); 360 pr_warn("Turbo disabled by BIOS or unavailable on processor\n");
330 return -EPERM; 361 return -EPERM;
331 } 362 }
363
332 limits.no_turbo = clamp_t(int, input, 0, 1); 364 limits.no_turbo = clamp_t(int, input, 0, 1);
333 365
366 if (hwp_active)
367 intel_pstate_hwp_set();
368
334 return count; 369 return count;
335} 370}
336 371
@@ -348,6 +383,8 @@ static ssize_t store_max_perf_pct(struct kobject *a, struct attribute *b,
348 limits.max_perf_pct = min(limits.max_policy_pct, limits.max_sysfs_pct); 383 limits.max_perf_pct = min(limits.max_policy_pct, limits.max_sysfs_pct);
349 limits.max_perf = div_fp(int_tofp(limits.max_perf_pct), int_tofp(100)); 384 limits.max_perf = div_fp(int_tofp(limits.max_perf_pct), int_tofp(100));
350 385
386 if (hwp_active)
387 intel_pstate_hwp_set();
351 return count; 388 return count;
352} 389}
353 390
@@ -363,6 +400,8 @@ static ssize_t store_min_perf_pct(struct kobject *a, struct attribute *b,
363 limits.min_perf_pct = clamp_t(int, input, 0 , 100); 400 limits.min_perf_pct = clamp_t(int, input, 0 , 100);
364 limits.min_perf = div_fp(int_tofp(limits.min_perf_pct), int_tofp(100)); 401 limits.min_perf = div_fp(int_tofp(limits.min_perf_pct), int_tofp(100));
365 402
403 if (hwp_active)
404 intel_pstate_hwp_set();
366 return count; 405 return count;
367} 406}
368 407
@@ -395,8 +434,16 @@ static void __init intel_pstate_sysfs_expose_params(void)
395 rc = sysfs_create_group(intel_pstate_kobject, &intel_pstate_attr_group); 434 rc = sysfs_create_group(intel_pstate_kobject, &intel_pstate_attr_group);
396 BUG_ON(rc); 435 BUG_ON(rc);
397} 436}
398
399/************************** sysfs end ************************/ 437/************************** sysfs end ************************/
438
439static void intel_pstate_hwp_enable(void)
440{
441 hwp_active++;
442 pr_info("intel_pstate HWP enabled\n");
443
444 wrmsrl( MSR_PM_ENABLE, 0x1);
445}
446
400static int byt_get_min_pstate(void) 447static int byt_get_min_pstate(void)
401{ 448{
402 u64 value; 449 u64 value;
@@ -648,6 +695,14 @@ static inline void intel_pstate_sample(struct cpudata *cpu)
648 cpu->prev_mperf = mperf; 695 cpu->prev_mperf = mperf;
649} 696}
650 697
698static inline void intel_hwp_set_sample_time(struct cpudata *cpu)
699{
700 int delay;
701
702 delay = msecs_to_jiffies(50);
703 mod_timer_pinned(&cpu->timer, jiffies + delay);
704}
705
651static inline void intel_pstate_set_sample_time(struct cpudata *cpu) 706static inline void intel_pstate_set_sample_time(struct cpudata *cpu)
652{ 707{
653 int delay; 708 int delay;
@@ -694,6 +749,14 @@ static inline void intel_pstate_adjust_busy_pstate(struct cpudata *cpu)
694 intel_pstate_set_pstate(cpu, cpu->pstate.current_pstate - ctl); 749 intel_pstate_set_pstate(cpu, cpu->pstate.current_pstate - ctl);
695} 750}
696 751
752static void intel_hwp_timer_func(unsigned long __data)
753{
754 struct cpudata *cpu = (struct cpudata *) __data;
755
756 intel_pstate_sample(cpu);
757 intel_hwp_set_sample_time(cpu);
758}
759
697static void intel_pstate_timer_func(unsigned long __data) 760static void intel_pstate_timer_func(unsigned long __data)
698{ 761{
699 struct cpudata *cpu = (struct cpudata *) __data; 762 struct cpudata *cpu = (struct cpudata *) __data;
@@ -737,6 +800,11 @@ static const struct x86_cpu_id intel_pstate_cpu_ids[] = {
737}; 800};
738MODULE_DEVICE_TABLE(x86cpu, intel_pstate_cpu_ids); 801MODULE_DEVICE_TABLE(x86cpu, intel_pstate_cpu_ids);
739 802
803static const struct x86_cpu_id intel_pstate_cpu_oob_ids[] = {
804 ICPU(0x56, core_params),
805 {}
806};
807
740static int intel_pstate_init_cpu(unsigned int cpunum) 808static int intel_pstate_init_cpu(unsigned int cpunum)
741{ 809{
742 struct cpudata *cpu; 810 struct cpudata *cpu;
@@ -753,9 +821,14 @@ static int intel_pstate_init_cpu(unsigned int cpunum)
753 intel_pstate_get_cpu_pstates(cpu); 821 intel_pstate_get_cpu_pstates(cpu);
754 822
755 init_timer_deferrable(&cpu->timer); 823 init_timer_deferrable(&cpu->timer);
756 cpu->timer.function = intel_pstate_timer_func;
757 cpu->timer.data = (unsigned long)cpu; 824 cpu->timer.data = (unsigned long)cpu;
758 cpu->timer.expires = jiffies + HZ/100; 825 cpu->timer.expires = jiffies + HZ/100;
826
827 if (!hwp_active)
828 cpu->timer.function = intel_pstate_timer_func;
829 else
830 cpu->timer.function = intel_hwp_timer_func;
831
759 intel_pstate_busy_pid_reset(cpu); 832 intel_pstate_busy_pid_reset(cpu);
760 intel_pstate_sample(cpu); 833 intel_pstate_sample(cpu);
761 834
@@ -792,6 +865,7 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy)
792 limits.no_turbo = 0; 865 limits.no_turbo = 0;
793 return 0; 866 return 0;
794 } 867 }
868
795 limits.min_perf_pct = (policy->min * 100) / policy->cpuinfo.max_freq; 869 limits.min_perf_pct = (policy->min * 100) / policy->cpuinfo.max_freq;
796 limits.min_perf_pct = clamp_t(int, limits.min_perf_pct, 0 , 100); 870 limits.min_perf_pct = clamp_t(int, limits.min_perf_pct, 0 , 100);
797 limits.min_perf = div_fp(int_tofp(limits.min_perf_pct), int_tofp(100)); 871 limits.min_perf = div_fp(int_tofp(limits.min_perf_pct), int_tofp(100));
@@ -801,6 +875,9 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy)
801 limits.max_perf_pct = min(limits.max_policy_pct, limits.max_sysfs_pct); 875 limits.max_perf_pct = min(limits.max_policy_pct, limits.max_sysfs_pct);
802 limits.max_perf = div_fp(int_tofp(limits.max_perf_pct), int_tofp(100)); 876 limits.max_perf = div_fp(int_tofp(limits.max_perf_pct), int_tofp(100));
803 877
878 if (hwp_active)
879 intel_pstate_hwp_set();
880
804 return 0; 881 return 0;
805} 882}
806 883
@@ -823,6 +900,9 @@ static void intel_pstate_stop_cpu(struct cpufreq_policy *policy)
823 pr_info("intel_pstate CPU %d exiting\n", cpu_num); 900 pr_info("intel_pstate CPU %d exiting\n", cpu_num);
824 901
825 del_timer_sync(&all_cpu_data[cpu_num]->timer); 902 del_timer_sync(&all_cpu_data[cpu_num]->timer);
903 if (hwp_active)
904 return;
905
826 intel_pstate_set_pstate(cpu, cpu->pstate.min_pstate); 906 intel_pstate_set_pstate(cpu, cpu->pstate.min_pstate);
827} 907}
828 908
@@ -866,6 +946,7 @@ static struct cpufreq_driver intel_pstate_driver = {
866}; 946};
867 947
868static int __initdata no_load; 948static int __initdata no_load;
949static int __initdata no_hwp;
869 950
870static int intel_pstate_msrs_not_valid(void) 951static int intel_pstate_msrs_not_valid(void)
871{ 952{
@@ -959,6 +1040,15 @@ static bool intel_pstate_platform_pwr_mgmt_exists(void)
959{ 1040{
960 struct acpi_table_header hdr; 1041 struct acpi_table_header hdr;
961 struct hw_vendor_info *v_info; 1042 struct hw_vendor_info *v_info;
1043 const struct x86_cpu_id *id;
1044 u64 misc_pwr;
1045
1046 id = x86_match_cpu(intel_pstate_cpu_oob_ids);
1047 if (id) {
1048 rdmsrl(MSR_MISC_PWR_MGMT, misc_pwr);
1049 if ( misc_pwr & (1 << 8))
1050 return true;
1051 }
962 1052
963 if (acpi_disabled || 1053 if (acpi_disabled ||
964 ACPI_FAILURE(acpi_get_table_header(ACPI_SIG_FADT, 0, &hdr))) 1054 ACPI_FAILURE(acpi_get_table_header(ACPI_SIG_FADT, 0, &hdr)))
@@ -982,6 +1072,7 @@ static int __init intel_pstate_init(void)
982 int cpu, rc = 0; 1072 int cpu, rc = 0;
983 const struct x86_cpu_id *id; 1073 const struct x86_cpu_id *id;
984 struct cpu_defaults *cpu_info; 1074 struct cpu_defaults *cpu_info;
1075 struct cpuinfo_x86 *c = &boot_cpu_data;
985 1076
986 if (no_load) 1077 if (no_load)
987 return -ENODEV; 1078 return -ENODEV;
@@ -1011,6 +1102,9 @@ static int __init intel_pstate_init(void)
1011 if (!all_cpu_data) 1102 if (!all_cpu_data)
1012 return -ENOMEM; 1103 return -ENOMEM;
1013 1104
1105 if (cpu_has(c,X86_FEATURE_HWP) && !no_hwp)
1106 intel_pstate_hwp_enable();
1107
1014 rc = cpufreq_register_driver(&intel_pstate_driver); 1108 rc = cpufreq_register_driver(&intel_pstate_driver);
1015 if (rc) 1109 if (rc)
1016 goto out; 1110 goto out;
@@ -1041,6 +1135,8 @@ static int __init intel_pstate_setup(char *str)
1041 1135
1042 if (!strcmp(str, "disable")) 1136 if (!strcmp(str, "disable"))
1043 no_load = 1; 1137 no_load = 1;
1138 if (!strcmp(str, "no_hwp"))
1139 no_hwp = 1;
1044 return 0; 1140 return 0;
1045} 1141}
1046early_param("intel_pstate", intel_pstate_setup); 1142early_param("intel_pstate", intel_pstate_setup);