aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/cpufreq/acpi-cpufreq.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/cpufreq/acpi-cpufreq.c')
-rw-r--r--drivers/cpufreq/acpi-cpufreq.c272
1 files changed, 266 insertions, 6 deletions
diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c
index 56c6c6b4eb4d..0d048f6a2b23 100644
--- a/drivers/cpufreq/acpi-cpufreq.c
+++ b/drivers/cpufreq/acpi-cpufreq.c
@@ -51,13 +51,19 @@ MODULE_AUTHOR("Paul Diefenbaugh, Dominik Brodowski");
51MODULE_DESCRIPTION("ACPI Processor P-States Driver"); 51MODULE_DESCRIPTION("ACPI Processor P-States Driver");
52MODULE_LICENSE("GPL"); 52MODULE_LICENSE("GPL");
53 53
54#define PFX "acpi-cpufreq: "
55
54enum { 56enum {
55 UNDEFINED_CAPABLE = 0, 57 UNDEFINED_CAPABLE = 0,
56 SYSTEM_INTEL_MSR_CAPABLE, 58 SYSTEM_INTEL_MSR_CAPABLE,
59 SYSTEM_AMD_MSR_CAPABLE,
57 SYSTEM_IO_CAPABLE, 60 SYSTEM_IO_CAPABLE,
58}; 61};
59 62
60#define INTEL_MSR_RANGE (0xffff) 63#define INTEL_MSR_RANGE (0xffff)
64#define AMD_MSR_RANGE (0x7)
65
66#define MSR_K7_HWCR_CPB_DIS (1ULL << 25)
61 67
62struct acpi_cpufreq_data { 68struct acpi_cpufreq_data {
63 struct acpi_processor_performance *acpi_data; 69 struct acpi_processor_performance *acpi_data;
@@ -74,6 +80,116 @@ static struct acpi_processor_performance __percpu *acpi_perf_data;
74static struct cpufreq_driver acpi_cpufreq_driver; 80static struct cpufreq_driver acpi_cpufreq_driver;
75 81
76static unsigned int acpi_pstate_strict; 82static unsigned int acpi_pstate_strict;
83static bool boost_enabled, boost_supported;
84static struct msr __percpu *msrs;
85
86static bool boost_state(unsigned int cpu)
87{
88 u32 lo, hi;
89 u64 msr;
90
91 switch (boot_cpu_data.x86_vendor) {
92 case X86_VENDOR_INTEL:
93 rdmsr_on_cpu(cpu, MSR_IA32_MISC_ENABLE, &lo, &hi);
94 msr = lo | ((u64)hi << 32);
95 return !(msr & MSR_IA32_MISC_ENABLE_TURBO_DISABLE);
96 case X86_VENDOR_AMD:
97 rdmsr_on_cpu(cpu, MSR_K7_HWCR, &lo, &hi);
98 msr = lo | ((u64)hi << 32);
99 return !(msr & MSR_K7_HWCR_CPB_DIS);
100 }
101 return false;
102}
103
104static void boost_set_msrs(bool enable, const struct cpumask *cpumask)
105{
106 u32 cpu;
107 u32 msr_addr;
108 u64 msr_mask;
109
110 switch (boot_cpu_data.x86_vendor) {
111 case X86_VENDOR_INTEL:
112 msr_addr = MSR_IA32_MISC_ENABLE;
113 msr_mask = MSR_IA32_MISC_ENABLE_TURBO_DISABLE;
114 break;
115 case X86_VENDOR_AMD:
116 msr_addr = MSR_K7_HWCR;
117 msr_mask = MSR_K7_HWCR_CPB_DIS;
118 break;
119 default:
120 return;
121 }
122
123 rdmsr_on_cpus(cpumask, msr_addr, msrs);
124
125 for_each_cpu(cpu, cpumask) {
126 struct msr *reg = per_cpu_ptr(msrs, cpu);
127 if (enable)
128 reg->q &= ~msr_mask;
129 else
130 reg->q |= msr_mask;
131 }
132
133 wrmsr_on_cpus(cpumask, msr_addr, msrs);
134}
135
136static ssize_t _store_boost(const char *buf, size_t count)
137{
138 int ret;
139 unsigned long val = 0;
140
141 if (!boost_supported)
142 return -EINVAL;
143
144 ret = kstrtoul(buf, 10, &val);
145 if (ret || (val > 1))
146 return -EINVAL;
147
148 if ((val && boost_enabled) || (!val && !boost_enabled))
149 return count;
150
151 get_online_cpus();
152
153 boost_set_msrs(val, cpu_online_mask);
154
155 put_online_cpus();
156
157 boost_enabled = val;
158 pr_debug("Core Boosting %sabled.\n", val ? "en" : "dis");
159
160 return count;
161}
162
163static ssize_t store_global_boost(struct kobject *kobj, struct attribute *attr,
164 const char *buf, size_t count)
165{
166 return _store_boost(buf, count);
167}
168
169static ssize_t show_global_boost(struct kobject *kobj,
170 struct attribute *attr, char *buf)
171{
172 return sprintf(buf, "%u\n", boost_enabled);
173}
174
175static struct global_attr global_boost = __ATTR(boost, 0644,
176 show_global_boost,
177 store_global_boost);
178
179#ifdef CONFIG_X86_ACPI_CPUFREQ_CPB
180static ssize_t store_cpb(struct cpufreq_policy *policy, const char *buf,
181 size_t count)
182{
183 return _store_boost(buf, count);
184}
185
186static ssize_t show_cpb(struct cpufreq_policy *policy, char *buf)
187{
188 return sprintf(buf, "%u\n", boost_enabled);
189}
190
191static struct freq_attr cpb = __ATTR(cpb, 0644, show_cpb, store_cpb);
192#endif
77 193
78static int check_est_cpu(unsigned int cpuid) 194static int check_est_cpu(unsigned int cpuid)
79{ 195{
@@ -82,6 +198,13 @@ static int check_est_cpu(unsigned int cpuid)
82 return cpu_has(cpu, X86_FEATURE_EST); 198 return cpu_has(cpu, X86_FEATURE_EST);
83} 199}
84 200
201static int check_amd_hwpstate_cpu(unsigned int cpuid)
202{
203 struct cpuinfo_x86 *cpu = &cpu_data(cpuid);
204
205 return cpu_has(cpu, X86_FEATURE_HW_PSTATE);
206}
207
85static unsigned extract_io(u32 value, struct acpi_cpufreq_data *data) 208static unsigned extract_io(u32 value, struct acpi_cpufreq_data *data)
86{ 209{
87 struct acpi_processor_performance *perf; 210 struct acpi_processor_performance *perf;
@@ -101,7 +224,11 @@ static unsigned extract_msr(u32 msr, struct acpi_cpufreq_data *data)
101 int i; 224 int i;
102 struct acpi_processor_performance *perf; 225 struct acpi_processor_performance *perf;
103 226
104 msr &= INTEL_MSR_RANGE; 227 if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
228 msr &= AMD_MSR_RANGE;
229 else
230 msr &= INTEL_MSR_RANGE;
231
105 perf = data->acpi_data; 232 perf = data->acpi_data;
106 233
107 for (i = 0; data->freq_table[i].frequency != CPUFREQ_TABLE_END; i++) { 234 for (i = 0; data->freq_table[i].frequency != CPUFREQ_TABLE_END; i++) {
@@ -115,6 +242,7 @@ static unsigned extract_freq(u32 val, struct acpi_cpufreq_data *data)
115{ 242{
116 switch (data->cpu_feature) { 243 switch (data->cpu_feature) {
117 case SYSTEM_INTEL_MSR_CAPABLE: 244 case SYSTEM_INTEL_MSR_CAPABLE:
245 case SYSTEM_AMD_MSR_CAPABLE:
118 return extract_msr(val, data); 246 return extract_msr(val, data);
119 case SYSTEM_IO_CAPABLE: 247 case SYSTEM_IO_CAPABLE:
120 return extract_io(val, data); 248 return extract_io(val, data);
@@ -150,6 +278,7 @@ static void do_drv_read(void *_cmd)
150 278
151 switch (cmd->type) { 279 switch (cmd->type) {
152 case SYSTEM_INTEL_MSR_CAPABLE: 280 case SYSTEM_INTEL_MSR_CAPABLE:
281 case SYSTEM_AMD_MSR_CAPABLE:
153 rdmsr(cmd->addr.msr.reg, cmd->val, h); 282 rdmsr(cmd->addr.msr.reg, cmd->val, h);
154 break; 283 break;
155 case SYSTEM_IO_CAPABLE: 284 case SYSTEM_IO_CAPABLE:
@@ -174,6 +303,9 @@ static void do_drv_write(void *_cmd)
174 lo = (lo & ~INTEL_MSR_RANGE) | (cmd->val & INTEL_MSR_RANGE); 303 lo = (lo & ~INTEL_MSR_RANGE) | (cmd->val & INTEL_MSR_RANGE);
175 wrmsr(cmd->addr.msr.reg, lo, hi); 304 wrmsr(cmd->addr.msr.reg, lo, hi);
176 break; 305 break;
306 case SYSTEM_AMD_MSR_CAPABLE:
307 wrmsr(cmd->addr.msr.reg, cmd->val, 0);
308 break;
177 case SYSTEM_IO_CAPABLE: 309 case SYSTEM_IO_CAPABLE:
178 acpi_os_write_port((acpi_io_address)cmd->addr.io.port, 310 acpi_os_write_port((acpi_io_address)cmd->addr.io.port,
179 cmd->val, 311 cmd->val,
@@ -217,6 +349,10 @@ static u32 get_cur_val(const struct cpumask *mask)
217 cmd.type = SYSTEM_INTEL_MSR_CAPABLE; 349 cmd.type = SYSTEM_INTEL_MSR_CAPABLE;
218 cmd.addr.msr.reg = MSR_IA32_PERF_STATUS; 350 cmd.addr.msr.reg = MSR_IA32_PERF_STATUS;
219 break; 351 break;
352 case SYSTEM_AMD_MSR_CAPABLE:
353 cmd.type = SYSTEM_AMD_MSR_CAPABLE;
354 cmd.addr.msr.reg = MSR_AMD_PERF_STATUS;
355 break;
220 case SYSTEM_IO_CAPABLE: 356 case SYSTEM_IO_CAPABLE:
221 cmd.type = SYSTEM_IO_CAPABLE; 357 cmd.type = SYSTEM_IO_CAPABLE;
222 perf = per_cpu(acfreq_data, cpumask_first(mask))->acpi_data; 358 perf = per_cpu(acfreq_data, cpumask_first(mask))->acpi_data;
@@ -326,6 +462,11 @@ static int acpi_cpufreq_target(struct cpufreq_policy *policy,
326 cmd.addr.msr.reg = MSR_IA32_PERF_CTL; 462 cmd.addr.msr.reg = MSR_IA32_PERF_CTL;
327 cmd.val = (u32) perf->states[next_perf_state].control; 463 cmd.val = (u32) perf->states[next_perf_state].control;
328 break; 464 break;
465 case SYSTEM_AMD_MSR_CAPABLE:
466 cmd.type = SYSTEM_AMD_MSR_CAPABLE;
467 cmd.addr.msr.reg = MSR_AMD_PERF_CTL;
468 cmd.val = (u32) perf->states[next_perf_state].control;
469 break;
329 case SYSTEM_IO_CAPABLE: 470 case SYSTEM_IO_CAPABLE:
330 cmd.type = SYSTEM_IO_CAPABLE; 471 cmd.type = SYSTEM_IO_CAPABLE;
331 cmd.addr.io.port = perf->control_register.address; 472 cmd.addr.io.port = perf->control_register.address;
@@ -419,6 +560,44 @@ static void free_acpi_perf_data(void)
419 free_percpu(acpi_perf_data); 560 free_percpu(acpi_perf_data);
420} 561}
421 562
563static int boost_notify(struct notifier_block *nb, unsigned long action,
564 void *hcpu)
565{
566 unsigned cpu = (long)hcpu;
567 const struct cpumask *cpumask;
568
569 cpumask = get_cpu_mask(cpu);
570
571 /*
572 * Clear the boost-disable bit on the CPU_DOWN path so that
573 * this cpu cannot block the remaining ones from boosting. On
574 * the CPU_UP path we simply keep the boost-disable flag in
575 * sync with the current global state.
576 */
577
578 switch (action) {
579 case CPU_UP_PREPARE:
580 case CPU_UP_PREPARE_FROZEN:
581 boost_set_msrs(boost_enabled, cpumask);
582 break;
583
584 case CPU_DOWN_PREPARE:
585 case CPU_DOWN_PREPARE_FROZEN:
586 boost_set_msrs(1, cpumask);
587 break;
588
589 default:
590 break;
591 }
592
593 return NOTIFY_OK;
594}
595
596
597static struct notifier_block boost_nb = {
598 .notifier_call = boost_notify,
599};
600
422/* 601/*
423 * acpi_cpufreq_early_init - initialize ACPI P-States library 602 * acpi_cpufreq_early_init - initialize ACPI P-States library
424 * 603 *
@@ -559,6 +738,14 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
559 policy->shared_type = CPUFREQ_SHARED_TYPE_ALL; 738 policy->shared_type = CPUFREQ_SHARED_TYPE_ALL;
560 cpumask_copy(policy->cpus, cpu_core_mask(cpu)); 739 cpumask_copy(policy->cpus, cpu_core_mask(cpu));
561 } 740 }
741
742 if (check_amd_hwpstate_cpu(cpu) && !acpi_pstate_strict) {
743 cpumask_clear(policy->cpus);
744 cpumask_set_cpu(cpu, policy->cpus);
745 cpumask_copy(policy->related_cpus, cpu_sibling_mask(cpu));
746 policy->shared_type = CPUFREQ_SHARED_TYPE_HW;
747 pr_info_once(PFX "overriding BIOS provided _PSD data\n");
748 }
562#endif 749#endif
563 750
564 /* capability check */ 751 /* capability check */
@@ -580,12 +767,16 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
580 break; 767 break;
581 case ACPI_ADR_SPACE_FIXED_HARDWARE: 768 case ACPI_ADR_SPACE_FIXED_HARDWARE:
582 pr_debug("HARDWARE addr space\n"); 769 pr_debug("HARDWARE addr space\n");
583 if (!check_est_cpu(cpu)) { 770 if (check_est_cpu(cpu)) {
584 result = -ENODEV; 771 data->cpu_feature = SYSTEM_INTEL_MSR_CAPABLE;
585 goto err_unreg; 772 break;
586 } 773 }
587 data->cpu_feature = SYSTEM_INTEL_MSR_CAPABLE; 774 if (check_amd_hwpstate_cpu(cpu)) {
588 break; 775 data->cpu_feature = SYSTEM_AMD_MSR_CAPABLE;
776 break;
777 }
778 result = -ENODEV;
779 goto err_unreg;
589 default: 780 default:
590 pr_debug("Unknown addr space %d\n", 781 pr_debug("Unknown addr space %d\n",
591 (u32) (perf->control_register.space_id)); 782 (u32) (perf->control_register.space_id));
@@ -718,6 +909,7 @@ static int acpi_cpufreq_resume(struct cpufreq_policy *policy)
718 909
719static struct freq_attr *acpi_cpufreq_attr[] = { 910static struct freq_attr *acpi_cpufreq_attr[] = {
720 &cpufreq_freq_attr_scaling_available_freqs, 911 &cpufreq_freq_attr_scaling_available_freqs,
912 NULL, /* this is a placeholder for cpb, do not remove */
721 NULL, 913 NULL,
722}; 914};
723 915
@@ -733,6 +925,49 @@ static struct cpufreq_driver acpi_cpufreq_driver = {
733 .attr = acpi_cpufreq_attr, 925 .attr = acpi_cpufreq_attr,
734}; 926};
735 927
928static void __init acpi_cpufreq_boost_init(void)
929{
930 if (boot_cpu_has(X86_FEATURE_CPB) || boot_cpu_has(X86_FEATURE_IDA)) {
931 msrs = msrs_alloc();
932
933 if (!msrs)
934 return;
935
936 boost_supported = true;
937 boost_enabled = boost_state(0);
938
939 get_online_cpus();
940
941 /* Force all MSRs to the same value */
942 boost_set_msrs(boost_enabled, cpu_online_mask);
943
944 register_cpu_notifier(&boost_nb);
945
946 put_online_cpus();
947 } else
948 global_boost.attr.mode = 0444;
949
950 /* We create the boost file in any case, though for systems without
951 * hardware support it will be read-only and hardwired to return 0.
952 */
953 if (sysfs_create_file(cpufreq_global_kobject, &(global_boost.attr)))
954 pr_warn(PFX "could not register global boost sysfs file\n");
955 else
956 pr_debug("registered global boost sysfs file\n");
957}
958
959static void __exit acpi_cpufreq_boost_exit(void)
960{
961 sysfs_remove_file(cpufreq_global_kobject, &(global_boost.attr));
962
963 if (msrs) {
964 unregister_cpu_notifier(&boost_nb);
965
966 msrs_free(msrs);
967 msrs = NULL;
968 }
969}
970
736static int __init acpi_cpufreq_init(void) 971static int __init acpi_cpufreq_init(void)
737{ 972{
738 int ret; 973 int ret;
@@ -746,9 +981,32 @@ static int __init acpi_cpufreq_init(void)
746 if (ret) 981 if (ret)
747 return ret; 982 return ret;
748 983
984#ifdef CONFIG_X86_ACPI_CPUFREQ_CPB
985 /* this is a sysfs file with a strange name and an even stranger
986 * semantic - per CPU instantiation, but system global effect.
987 * Lets enable it only on AMD CPUs for compatibility reasons and
988 * only if configured. This is considered legacy code, which
989 * will probably be removed at some point in the future.
990 */
991 if (check_amd_hwpstate_cpu(0)) {
992 struct freq_attr **iter;
993
994 pr_debug("adding sysfs entry for cpb\n");
995
996 for (iter = acpi_cpufreq_attr; *iter != NULL; iter++)
997 ;
998
999 /* make sure there is a terminator behind it */
1000 if (iter[1] == NULL)
1001 *iter = &cpb;
1002 }
1003#endif
1004
749 ret = cpufreq_register_driver(&acpi_cpufreq_driver); 1005 ret = cpufreq_register_driver(&acpi_cpufreq_driver);
750 if (ret) 1006 if (ret)
751 free_acpi_perf_data(); 1007 free_acpi_perf_data();
1008 else
1009 acpi_cpufreq_boost_init();
752 1010
753 return ret; 1011 return ret;
754} 1012}
@@ -757,6 +1015,8 @@ static void __exit acpi_cpufreq_exit(void)
757{ 1015{
758 pr_debug("acpi_cpufreq_exit\n"); 1016 pr_debug("acpi_cpufreq_exit\n");
759 1017
1018 acpi_cpufreq_boost_exit();
1019
760 cpufreq_unregister_driver(&acpi_cpufreq_driver); 1020 cpufreq_unregister_driver(&acpi_cpufreq_driver);
761 1021
762 free_acpi_perf_data(); 1022 free_acpi_perf_data();