diff options
author | Andre Przywara <andre.przywara@amd.com> | 2012-09-04 04:28:07 -0400 |
---|---|---|
committer | Rafael J. Wysocki <rjw@sisk.pl> | 2012-09-09 16:05:12 -0400 |
commit | 615b7300717b9ad5c23d1f391843484fe30f6c12 (patch) | |
tree | a9773a2bb6ce4d411485402af45dcadf7fbefe28 /drivers/cpufreq | |
parent | f594065faf4f9067c2283a34619fc0714e79a98d (diff) |
acpi-cpufreq: Add support for disabling dynamic overclocking
One feature present in powernow-k8 that isn't present in acpi-cpufreq
is support for enabling or disabling AMD's core performance boost
technology. This patch adds support to acpi-cpufreq, but also
includes support for Intel's dynamic acceleration.
The original boost disabling sysfs file was per CPU, but acted
globally. Also the naming (cpb) was at least not intuitive.
So lets introduce a single file simply called "boost", which sits
once in /sys/devices/system/cpu/cpufreq.
This should be the only way of using this feature, so add
documentation about the rationale and the usage.
A following patch will re-introduce the cpb knob for compatibility
reasons on AMD CPUs.
Per-CPU boost switching is possible, but not trivial and is thus
postponed to a later patch series.
Signed-off-by: Andre Przywara <andre.przywara@amd.com>
Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
Diffstat (limited to 'drivers/cpufreq')
-rw-r--r-- | drivers/cpufreq/acpi-cpufreq.c | 177 |
1 files changed, 177 insertions, 0 deletions
diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c index 70e717305c29..dffa7af1db71 100644 --- a/drivers/cpufreq/acpi-cpufreq.c +++ b/drivers/cpufreq/acpi-cpufreq.c | |||
@@ -63,6 +63,8 @@ enum { | |||
63 | #define INTEL_MSR_RANGE (0xffff) | 63 | #define INTEL_MSR_RANGE (0xffff) |
64 | #define AMD_MSR_RANGE (0x7) | 64 | #define AMD_MSR_RANGE (0x7) |
65 | 65 | ||
66 | #define MSR_K7_HWCR_CPB_DIS (1ULL << 25) | ||
67 | |||
66 | struct acpi_cpufreq_data { | 68 | struct acpi_cpufreq_data { |
67 | struct acpi_processor_performance *acpi_data; | 69 | struct acpi_processor_performance *acpi_data; |
68 | struct cpufreq_frequency_table *freq_table; | 70 | struct cpufreq_frequency_table *freq_table; |
@@ -78,6 +80,96 @@ static struct acpi_processor_performance __percpu *acpi_perf_data; | |||
78 | static struct cpufreq_driver acpi_cpufreq_driver; | 80 | static struct cpufreq_driver acpi_cpufreq_driver; |
79 | 81 | ||
80 | static unsigned int acpi_pstate_strict; | 82 | static unsigned int acpi_pstate_strict; |
83 | static bool boost_enabled, boost_supported; | ||
84 | static struct msr __percpu *msrs; | ||
85 | |||
86 | static bool boost_state(unsigned int cpu) | ||
87 | { | ||
88 | u32 lo, hi; | ||
89 | u64 msr; | ||
90 | |||
91 | switch (boot_cpu_data.x86_vendor) { | ||
92 | case X86_VENDOR_INTEL: | ||
93 | rdmsr_on_cpu(cpu, MSR_IA32_MISC_ENABLE, &lo, &hi); | ||
94 | msr = lo | ((u64)hi << 32); | ||
95 | return !(msr & MSR_IA32_MISC_ENABLE_TURBO_DISABLE); | ||
96 | case X86_VENDOR_AMD: | ||
97 | rdmsr_on_cpu(cpu, MSR_K7_HWCR, &lo, &hi); | ||
98 | msr = lo | ((u64)hi << 32); | ||
99 | return !(msr & MSR_K7_HWCR_CPB_DIS); | ||
100 | } | ||
101 | return false; | ||
102 | } | ||
103 | |||
104 | static void boost_set_msrs(bool enable, const struct cpumask *cpumask) | ||
105 | { | ||
106 | u32 cpu; | ||
107 | u32 msr_addr; | ||
108 | u64 msr_mask; | ||
109 | |||
110 | switch (boot_cpu_data.x86_vendor) { | ||
111 | case X86_VENDOR_INTEL: | ||
112 | msr_addr = MSR_IA32_MISC_ENABLE; | ||
113 | msr_mask = MSR_IA32_MISC_ENABLE_TURBO_DISABLE; | ||
114 | break; | ||
115 | case X86_VENDOR_AMD: | ||
116 | msr_addr = MSR_K7_HWCR; | ||
117 | msr_mask = MSR_K7_HWCR_CPB_DIS; | ||
118 | break; | ||
119 | default: | ||
120 | return; | ||
121 | } | ||
122 | |||
123 | rdmsr_on_cpus(cpumask, msr_addr, msrs); | ||
124 | |||
125 | for_each_cpu(cpu, cpumask) { | ||
126 | struct msr *reg = per_cpu_ptr(msrs, cpu); | ||
127 | if (enable) | ||
128 | reg->q &= ~msr_mask; | ||
129 | else | ||
130 | reg->q |= msr_mask; | ||
131 | } | ||
132 | |||
133 | wrmsr_on_cpus(cpumask, msr_addr, msrs); | ||
134 | } | ||
135 | |||
136 | static ssize_t store_global_boost(struct kobject *kobj, struct attribute *attr, | ||
137 | const char *buf, size_t count) | ||
138 | { | ||
139 | int ret; | ||
140 | unsigned long val = 0; | ||
141 | |||
142 | if (!boost_supported) | ||
143 | return -EINVAL; | ||
144 | |||
145 | ret = kstrtoul(buf, 10, &val); | ||
146 | if (ret || (val > 1)) | ||
147 | return -EINVAL; | ||
148 | |||
149 | if ((val && boost_enabled) || (!val && !boost_enabled)) | ||
150 | return count; | ||
151 | |||
152 | get_online_cpus(); | ||
153 | |||
154 | boost_set_msrs(val, cpu_online_mask); | ||
155 | |||
156 | put_online_cpus(); | ||
157 | |||
158 | boost_enabled = val; | ||
159 | pr_debug("Core Boosting %sabled.\n", val ? "en" : "dis"); | ||
160 | |||
161 | return count; | ||
162 | } | ||
163 | |||
164 | static ssize_t show_global_boost(struct kobject *kobj, | ||
165 | struct attribute *attr, char *buf) | ||
166 | { | ||
167 | return sprintf(buf, "%u\n", boost_enabled); | ||
168 | } | ||
169 | |||
170 | static struct global_attr global_boost = __ATTR(boost, 0644, | ||
171 | show_global_boost, | ||
172 | store_global_boost); | ||
81 | 173 | ||
82 | static int check_est_cpu(unsigned int cpuid) | 174 | static int check_est_cpu(unsigned int cpuid) |
83 | { | 175 | { |
@@ -448,6 +540,44 @@ static void free_acpi_perf_data(void) | |||
448 | free_percpu(acpi_perf_data); | 540 | free_percpu(acpi_perf_data); |
449 | } | 541 | } |
450 | 542 | ||
543 | static int boost_notify(struct notifier_block *nb, unsigned long action, | ||
544 | void *hcpu) | ||
545 | { | ||
546 | unsigned cpu = (long)hcpu; | ||
547 | const struct cpumask *cpumask; | ||
548 | |||
549 | cpumask = get_cpu_mask(cpu); | ||
550 | |||
551 | /* | ||
552 | * Clear the boost-disable bit on the CPU_DOWN path so that | ||
553 | * this cpu cannot block the remaining ones from boosting. On | ||
554 | * the CPU_UP path we simply keep the boost-disable flag in | ||
555 | * sync with the current global state. | ||
556 | */ | ||
557 | |||
558 | switch (action) { | ||
559 | case CPU_UP_PREPARE: | ||
560 | case CPU_UP_PREPARE_FROZEN: | ||
561 | boost_set_msrs(boost_enabled, cpumask); | ||
562 | break; | ||
563 | |||
564 | case CPU_DOWN_PREPARE: | ||
565 | case CPU_DOWN_PREPARE_FROZEN: | ||
566 | boost_set_msrs(1, cpumask); | ||
567 | break; | ||
568 | |||
569 | default: | ||
570 | break; | ||
571 | } | ||
572 | |||
573 | return NOTIFY_OK; | ||
574 | } | ||
575 | |||
576 | |||
577 | static struct notifier_block boost_nb = { | ||
578 | .notifier_call = boost_notify, | ||
579 | }; | ||
580 | |||
451 | /* | 581 | /* |
452 | * acpi_cpufreq_early_init - initialize ACPI P-States library | 582 | * acpi_cpufreq_early_init - initialize ACPI P-States library |
453 | * | 583 | * |
@@ -774,6 +904,49 @@ static struct cpufreq_driver acpi_cpufreq_driver = { | |||
774 | .attr = acpi_cpufreq_attr, | 904 | .attr = acpi_cpufreq_attr, |
775 | }; | 905 | }; |
776 | 906 | ||
907 | static void __init acpi_cpufreq_boost_init(void) | ||
908 | { | ||
909 | if (boot_cpu_has(X86_FEATURE_CPB) || boot_cpu_has(X86_FEATURE_IDA)) { | ||
910 | msrs = msrs_alloc(); | ||
911 | |||
912 | if (!msrs) | ||
913 | return; | ||
914 | |||
915 | boost_supported = true; | ||
916 | boost_enabled = boost_state(0); | ||
917 | |||
918 | get_online_cpus(); | ||
919 | |||
920 | /* Force all MSRs to the same value */ | ||
921 | boost_set_msrs(boost_enabled, cpu_online_mask); | ||
922 | |||
923 | register_cpu_notifier(&boost_nb); | ||
924 | |||
925 | put_online_cpus(); | ||
926 | } else | ||
927 | global_boost.attr.mode = 0444; | ||
928 | |||
929 | /* We create the boost file in any case, though for systems without | ||
930 | * hardware support it will be read-only and hardwired to return 0. | ||
931 | */ | ||
932 | if (sysfs_create_file(cpufreq_global_kobject, &(global_boost.attr))) | ||
933 | pr_warn(PFX "could not register global boost sysfs file\n"); | ||
934 | else | ||
935 | pr_debug("registered global boost sysfs file\n"); | ||
936 | } | ||
937 | |||
938 | static void __exit acpi_cpufreq_boost_exit(void) | ||
939 | { | ||
940 | sysfs_remove_file(cpufreq_global_kobject, &(global_boost.attr)); | ||
941 | |||
942 | if (msrs) { | ||
943 | unregister_cpu_notifier(&boost_nb); | ||
944 | |||
945 | msrs_free(msrs); | ||
946 | msrs = NULL; | ||
947 | } | ||
948 | } | ||
949 | |||
777 | static int __init acpi_cpufreq_init(void) | 950 | static int __init acpi_cpufreq_init(void) |
778 | { | 951 | { |
779 | int ret; | 952 | int ret; |
@@ -790,6 +963,8 @@ static int __init acpi_cpufreq_init(void) | |||
790 | ret = cpufreq_register_driver(&acpi_cpufreq_driver); | 963 | ret = cpufreq_register_driver(&acpi_cpufreq_driver); |
791 | if (ret) | 964 | if (ret) |
792 | free_acpi_perf_data(); | 965 | free_acpi_perf_data(); |
966 | else | ||
967 | acpi_cpufreq_boost_init(); | ||
793 | 968 | ||
794 | return ret; | 969 | return ret; |
795 | } | 970 | } |
@@ -798,6 +973,8 @@ static void __exit acpi_cpufreq_exit(void) | |||
798 | { | 973 | { |
799 | pr_debug("acpi_cpufreq_exit\n"); | 974 | pr_debug("acpi_cpufreq_exit\n"); |
800 | 975 | ||
976 | acpi_cpufreq_boost_exit(); | ||
977 | |||
801 | cpufreq_unregister_driver(&acpi_cpufreq_driver); | 978 | cpufreq_unregister_driver(&acpi_cpufreq_driver); |
802 | 979 | ||
803 | free_acpi_perf_data(); | 980 | free_acpi_perf_data(); |