aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMatthew Garrett <mjg@redhat.com>2012-09-04 04:28:09 -0400
committerRafael J. Wysocki <rjw@sisk.pl>2012-09-09 16:05:30 -0400
commite1f0b8e9b04a262834ed111e605e5d215685dfab (patch)
tree13319110b77c21754b89ca7e9b71842e459c40a2
parent11269ff506888a06b19c8c7a3297114f30673973 (diff)
cpufreq: Remove support for hardware P-state chips from powernow-k8
These chips are now supported by acpi-cpufreq, so we can delete all the code handling them. Andre: Tighten the deprecation warning message. Trigger load of acpi-cpufreq and let the load of the module finally fail. This avoids the problem of users ending up without any cpufreq support after the transition. Signed-off-by: Matthew Garrett <mjg@redhat.com> Signed-off-by: Andre Przywara <andre.przywara@amd.com> Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
-rw-r--r--drivers/cpufreq/Makefile2
-rw-r--r--drivers/cpufreq/powernow-k8.c392
-rw-r--r--drivers/cpufreq/powernow-k8.h32
3 files changed, 29 insertions, 397 deletions
diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile
index 9531fc2eda22..b99790f400c4 100644
--- a/drivers/cpufreq/Makefile
+++ b/drivers/cpufreq/Makefile
@@ -19,7 +19,7 @@ obj-$(CONFIG_CPU_FREQ_TABLE) += freq_table.o
19# K8 systems. ACPI is preferred to all other hardware-specific drivers. 19# K8 systems. ACPI is preferred to all other hardware-specific drivers.
20# speedstep-* is preferred over p4-clockmod. 20# speedstep-* is preferred over p4-clockmod.
21 21
22obj-$(CONFIG_X86_POWERNOW_K8) += powernow-k8.o mperf.o 22obj-$(CONFIG_X86_POWERNOW_K8) += powernow-k8.o
23obj-$(CONFIG_X86_ACPI_CPUFREQ) += acpi-cpufreq.o mperf.o 23obj-$(CONFIG_X86_ACPI_CPUFREQ) += acpi-cpufreq.o mperf.o
24obj-$(CONFIG_X86_PCC_CPUFREQ) += pcc-cpufreq.o 24obj-$(CONFIG_X86_PCC_CPUFREQ) += pcc-cpufreq.o
25obj-$(CONFIG_X86_POWERNOW_K6) += powernow-k6.o 25obj-$(CONFIG_X86_POWERNOW_K6) += powernow-k6.o
diff --git a/drivers/cpufreq/powernow-k8.c b/drivers/cpufreq/powernow-k8.c
index f1035a920b0a..0b19faf002ee 100644
--- a/drivers/cpufreq/powernow-k8.c
+++ b/drivers/cpufreq/powernow-k8.c
@@ -49,22 +49,12 @@
49#define PFX "powernow-k8: " 49#define PFX "powernow-k8: "
50#define VERSION "version 2.20.00" 50#define VERSION "version 2.20.00"
51#include "powernow-k8.h" 51#include "powernow-k8.h"
52#include "mperf.h"
53 52
54/* serialize freq changes */ 53/* serialize freq changes */
55static DEFINE_MUTEX(fidvid_mutex); 54static DEFINE_MUTEX(fidvid_mutex);
56 55
57static DEFINE_PER_CPU(struct powernow_k8_data *, powernow_data); 56static DEFINE_PER_CPU(struct powernow_k8_data *, powernow_data);
58 57
59static int cpu_family = CPU_OPTERON;
60
61/* array to map SW pstate number to acpi state */
62static u32 ps_to_as[8];
63
64/* core performance boost */
65static bool cpb_capable, cpb_enabled;
66static struct msr __percpu *msrs;
67
68static struct cpufreq_driver cpufreq_amd64_driver; 58static struct cpufreq_driver cpufreq_amd64_driver;
69 59
70#ifndef CONFIG_SMP 60#ifndef CONFIG_SMP
@@ -86,12 +76,6 @@ static u32 find_khz_freq_from_fid(u32 fid)
86 return 1000 * find_freq_from_fid(fid); 76 return 1000 * find_freq_from_fid(fid);
87} 77}
88 78
89static u32 find_khz_freq_from_pstate(struct cpufreq_frequency_table *data,
90 u32 pstate)
91{
92 return data[ps_to_as[pstate]].frequency;
93}
94
95/* Return the vco fid for an input fid 79/* Return the vco fid for an input fid
96 * 80 *
97 * Each "low" fid has corresponding "high" fid, and you can get to "low" fids 81 * Each "low" fid has corresponding "high" fid, and you can get to "low" fids
@@ -114,9 +98,6 @@ static int pending_bit_stuck(void)
114{ 98{
115 u32 lo, hi; 99 u32 lo, hi;
116 100
117 if (cpu_family == CPU_HW_PSTATE)
118 return 0;
119
120 rdmsr(MSR_FIDVID_STATUS, lo, hi); 101 rdmsr(MSR_FIDVID_STATUS, lo, hi);
121 return lo & MSR_S_LO_CHANGE_PENDING ? 1 : 0; 102 return lo & MSR_S_LO_CHANGE_PENDING ? 1 : 0;
122} 103}
@@ -130,20 +111,6 @@ static int query_current_values_with_pending_wait(struct powernow_k8_data *data)
130 u32 lo, hi; 111 u32 lo, hi;
131 u32 i = 0; 112 u32 i = 0;
132 113
133 if (cpu_family == CPU_HW_PSTATE) {
134 rdmsr(MSR_PSTATE_STATUS, lo, hi);
135 i = lo & HW_PSTATE_MASK;
136 data->currpstate = i;
137
138 /*
139 * a workaround for family 11h erratum 311 might cause
140 * an "out-of-range Pstate if the core is in Pstate-0
141 */
142 if ((boot_cpu_data.x86 == 0x11) && (i >= data->numps))
143 data->currpstate = HW_PSTATE_0;
144
145 return 0;
146 }
147 do { 114 do {
148 if (i++ > 10000) { 115 if (i++ > 10000) {
149 pr_debug("detected change pending stuck\n"); 116 pr_debug("detected change pending stuck\n");
@@ -300,14 +267,6 @@ static int decrease_vid_code_by_step(struct powernow_k8_data *data,
300 return 0; 267 return 0;
301} 268}
302 269
303/* Change hardware pstate by single MSR write */
304static int transition_pstate(struct powernow_k8_data *data, u32 pstate)
305{
306 wrmsr(MSR_PSTATE_CTRL, pstate, 0);
307 data->currpstate = pstate;
308 return 0;
309}
310
311/* Change Opteron/Athlon64 fid and vid, by the 3 phases. */ 270/* Change Opteron/Athlon64 fid and vid, by the 3 phases. */
312static int transition_fid_vid(struct powernow_k8_data *data, 271static int transition_fid_vid(struct powernow_k8_data *data,
313 u32 reqfid, u32 reqvid) 272 u32 reqfid, u32 reqvid)
@@ -524,8 +483,6 @@ static int core_voltage_post_transition(struct powernow_k8_data *data,
524static const struct x86_cpu_id powernow_k8_ids[] = { 483static const struct x86_cpu_id powernow_k8_ids[] = {
525 /* IO based frequency switching */ 484 /* IO based frequency switching */
526 { X86_VENDOR_AMD, 0xf }, 485 { X86_VENDOR_AMD, 0xf },
527 /* MSR based frequency switching supported */
528 X86_FEATURE_MATCH(X86_FEATURE_HW_PSTATE),
529 {} 486 {}
530}; 487};
531MODULE_DEVICE_TABLE(x86cpu, powernow_k8_ids); 488MODULE_DEVICE_TABLE(x86cpu, powernow_k8_ids);
@@ -561,15 +518,8 @@ static void check_supported_cpu(void *_rc)
561 "Power state transitions not supported\n"); 518 "Power state transitions not supported\n");
562 return; 519 return;
563 } 520 }
564 } else { /* must be a HW Pstate capable processor */ 521 *rc = 0;
565 cpuid(CPUID_FREQ_VOLT_CAPABILITIES, &eax, &ebx, &ecx, &edx);
566 if ((edx & USE_HW_PSTATE) == USE_HW_PSTATE)
567 cpu_family = CPU_HW_PSTATE;
568 else
569 return;
570 } 522 }
571
572 *rc = 0;
573} 523}
574 524
575static int check_pst_table(struct powernow_k8_data *data, struct pst_s *pst, 525static int check_pst_table(struct powernow_k8_data *data, struct pst_s *pst,
@@ -633,18 +583,11 @@ static void print_basics(struct powernow_k8_data *data)
633 for (j = 0; j < data->numps; j++) { 583 for (j = 0; j < data->numps; j++) {
634 if (data->powernow_table[j].frequency != 584 if (data->powernow_table[j].frequency !=
635 CPUFREQ_ENTRY_INVALID) { 585 CPUFREQ_ENTRY_INVALID) {
636 if (cpu_family == CPU_HW_PSTATE) {
637 printk(KERN_INFO PFX
638 " %d : pstate %d (%d MHz)\n", j,
639 data->powernow_table[j].index,
640 data->powernow_table[j].frequency/1000);
641 } else {
642 printk(KERN_INFO PFX 586 printk(KERN_INFO PFX
643 "fid 0x%x (%d MHz), vid 0x%x\n", 587 "fid 0x%x (%d MHz), vid 0x%x\n",
644 data->powernow_table[j].index & 0xff, 588 data->powernow_table[j].index & 0xff,
645 data->powernow_table[j].frequency/1000, 589 data->powernow_table[j].frequency/1000,
646 data->powernow_table[j].index >> 8); 590 data->powernow_table[j].index >> 8);
647 }
648 } 591 }
649 } 592 }
650 if (data->batps) 593 if (data->batps)
@@ -652,20 +595,6 @@ static void print_basics(struct powernow_k8_data *data)
652 data->batps); 595 data->batps);
653} 596}
654 597
655static u32 freq_from_fid_did(u32 fid, u32 did)
656{
657 u32 mhz = 0;
658
659 if (boot_cpu_data.x86 == 0x10)
660 mhz = (100 * (fid + 0x10)) >> did;
661 else if (boot_cpu_data.x86 == 0x11)
662 mhz = (100 * (fid + 8)) >> did;
663 else
664 BUG();
665
666 return mhz * 1000;
667}
668
669static int fill_powernow_table(struct powernow_k8_data *data, 598static int fill_powernow_table(struct powernow_k8_data *data,
670 struct pst_s *pst, u8 maxvid) 599 struct pst_s *pst, u8 maxvid)
671{ 600{
@@ -825,7 +754,7 @@ static void powernow_k8_acpi_pst_values(struct powernow_k8_data *data,
825{ 754{
826 u64 control; 755 u64 control;
827 756
828 if (!data->acpi_data.state_count || (cpu_family == CPU_HW_PSTATE)) 757 if (!data->acpi_data.state_count)
829 return; 758 return;
830 759
831 control = data->acpi_data.states[index].control; 760 control = data->acpi_data.states[index].control;
@@ -876,10 +805,7 @@ static int powernow_k8_cpu_init_acpi(struct powernow_k8_data *data)
876 data->numps = data->acpi_data.state_count; 805 data->numps = data->acpi_data.state_count;
877 powernow_k8_acpi_pst_values(data, 0); 806 powernow_k8_acpi_pst_values(data, 0);
878 807
879 if (cpu_family == CPU_HW_PSTATE) 808 ret_val = fill_powernow_table_fidvid(data, powernow_table);
880 ret_val = fill_powernow_table_pstate(data, powernow_table);
881 else
882 ret_val = fill_powernow_table_fidvid(data, powernow_table);
883 if (ret_val) 809 if (ret_val)
884 goto err_out_mem; 810 goto err_out_mem;
885 811
@@ -916,51 +842,6 @@ err_out:
916 return ret_val; 842 return ret_val;
917} 843}
918 844
919static int fill_powernow_table_pstate(struct powernow_k8_data *data,
920 struct cpufreq_frequency_table *powernow_table)
921{
922 int i;
923 u32 hi = 0, lo = 0;
924 rdmsr(MSR_PSTATE_CUR_LIMIT, lo, hi);
925 data->max_hw_pstate = (lo & HW_PSTATE_MAX_MASK) >> HW_PSTATE_MAX_SHIFT;
926
927 for (i = 0; i < data->acpi_data.state_count; i++) {
928 u32 index;
929
930 index = data->acpi_data.states[i].control & HW_PSTATE_MASK;
931 if (index > data->max_hw_pstate) {
932 printk(KERN_ERR PFX "invalid pstate %d - "
933 "bad value %d.\n", i, index);
934 printk(KERN_ERR PFX "Please report to BIOS "
935 "manufacturer\n");
936 invalidate_entry(powernow_table, i);
937 continue;
938 }
939
940 ps_to_as[index] = i;
941
942 /* Frequency may be rounded for these */
943 if ((boot_cpu_data.x86 == 0x10 && boot_cpu_data.x86_model < 10)
944 || boot_cpu_data.x86 == 0x11) {
945
946 rdmsr(MSR_PSTATE_DEF_BASE + index, lo, hi);
947 if (!(hi & HW_PSTATE_VALID_MASK)) {
948 pr_debug("invalid pstate %d, ignoring\n", index);
949 invalidate_entry(powernow_table, i);
950 continue;
951 }
952
953 powernow_table[i].frequency =
954 freq_from_fid_did(lo & 0x3f, (lo >> 6) & 7);
955 } else
956 powernow_table[i].frequency =
957 data->acpi_data.states[i].core_frequency * 1000;
958
959 powernow_table[i].index = index;
960 }
961 return 0;
962}
963
964static int fill_powernow_table_fidvid(struct powernow_k8_data *data, 845static int fill_powernow_table_fidvid(struct powernow_k8_data *data,
965 struct cpufreq_frequency_table *powernow_table) 846 struct cpufreq_frequency_table *powernow_table)
966{ 847{
@@ -1037,15 +918,7 @@ static int get_transition_latency(struct powernow_k8_data *data)
1037 max_latency = cur_latency; 918 max_latency = cur_latency;
1038 } 919 }
1039 if (max_latency == 0) { 920 if (max_latency == 0) {
1040 /* 921 pr_err(FW_WARN PFX "Invalid zero transition latency\n");
1041 * Fam 11h and later may return 0 as transition latency. This
1042 * is intended and means "very fast". While cpufreq core and
1043 * governors currently can handle that gracefully, better set it
1044 * to 1 to avoid problems in the future.
1045 */
1046 if (boot_cpu_data.x86 < 0x11)
1047 printk(KERN_ERR FW_WARN PFX "Invalid zero transition "
1048 "latency\n");
1049 max_latency = 1; 922 max_latency = 1;
1050 } 923 }
1051 /* value in usecs, needs to be in nanoseconds */ 924 /* value in usecs, needs to be in nanoseconds */
@@ -1105,40 +978,6 @@ static int transition_frequency_fidvid(struct powernow_k8_data *data,
1105 return res; 978 return res;
1106} 979}
1107 980
1108/* Take a frequency, and issue the hardware pstate transition command */
1109static int transition_frequency_pstate(struct powernow_k8_data *data,
1110 unsigned int index)
1111{
1112 u32 pstate = 0;
1113 int res, i;
1114 struct cpufreq_freqs freqs;
1115
1116 pr_debug("cpu %d transition to index %u\n", smp_processor_id(), index);
1117
1118 /* get MSR index for hardware pstate transition */
1119 pstate = index & HW_PSTATE_MASK;
1120 if (pstate > data->max_hw_pstate)
1121 return -EINVAL;
1122
1123 freqs.old = find_khz_freq_from_pstate(data->powernow_table,
1124 data->currpstate);
1125 freqs.new = find_khz_freq_from_pstate(data->powernow_table, pstate);
1126
1127 for_each_cpu(i, data->available_cores) {
1128 freqs.cpu = i;
1129 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
1130 }
1131
1132 res = transition_pstate(data, pstate);
1133 freqs.new = find_khz_freq_from_pstate(data->powernow_table, pstate);
1134
1135 for_each_cpu(i, data->available_cores) {
1136 freqs.cpu = i;
1137 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
1138 }
1139 return res;
1140}
1141
1142/* Driver entry point to switch to the target frequency */ 981/* Driver entry point to switch to the target frequency */
1143static int powernowk8_target(struct cpufreq_policy *pol, 982static int powernowk8_target(struct cpufreq_policy *pol,
1144 unsigned targfreq, unsigned relation) 983 unsigned targfreq, unsigned relation)
@@ -1180,18 +1019,15 @@ static int powernowk8_target(struct cpufreq_policy *pol,
1180 if (query_current_values_with_pending_wait(data)) 1019 if (query_current_values_with_pending_wait(data))
1181 goto err_out; 1020 goto err_out;
1182 1021
1183 if (cpu_family != CPU_HW_PSTATE) { 1022 pr_debug("targ: curr fid 0x%x, vid 0x%x\n",
1184 pr_debug("targ: curr fid 0x%x, vid 0x%x\n", 1023 data->currfid, data->currvid);
1185 data->currfid, data->currvid);
1186 1024
1187 if ((checkvid != data->currvid) || 1025 if ((checkvid != data->currvid) ||
1188 (checkfid != data->currfid)) { 1026 (checkfid != data->currfid)) {
1189 printk(KERN_INFO PFX 1027 pr_info(PFX
1190 "error - out of sync, fix 0x%x 0x%x, " 1028 "error - out of sync, fix 0x%x 0x%x, vid 0x%x 0x%x\n",
1191 "vid 0x%x 0x%x\n", 1029 checkfid, data->currfid,
1192 checkfid, data->currfid, 1030 checkvid, data->currvid);
1193 checkvid, data->currvid);
1194 }
1195 } 1031 }
1196 1032
1197 if (cpufreq_frequency_table_target(pol, data->powernow_table, 1033 if (cpufreq_frequency_table_target(pol, data->powernow_table,
@@ -1202,11 +1038,8 @@ static int powernowk8_target(struct cpufreq_policy *pol,
1202 1038
1203 powernow_k8_acpi_pst_values(data, newstate); 1039 powernow_k8_acpi_pst_values(data, newstate);
1204 1040
1205 if (cpu_family == CPU_HW_PSTATE) 1041 ret = transition_frequency_fidvid(data, newstate);
1206 ret = transition_frequency_pstate(data, 1042
1207 data->powernow_table[newstate].index);
1208 else
1209 ret = transition_frequency_fidvid(data, newstate);
1210 if (ret) { 1043 if (ret) {
1211 printk(KERN_ERR PFX "transition frequency failed\n"); 1044 printk(KERN_ERR PFX "transition frequency failed\n");
1212 ret = 1; 1045 ret = 1;
@@ -1215,11 +1048,7 @@ static int powernowk8_target(struct cpufreq_policy *pol,
1215 } 1048 }
1216 mutex_unlock(&fidvid_mutex); 1049 mutex_unlock(&fidvid_mutex);
1217 1050
1218 if (cpu_family == CPU_HW_PSTATE) 1051 pol->cur = find_khz_freq_from_fid(data->currfid);
1219 pol->cur = find_khz_freq_from_pstate(data->powernow_table,
1220 data->powernow_table[newstate].index);
1221 else
1222 pol->cur = find_khz_freq_from_fid(data->currfid);
1223 ret = 0; 1052 ret = 0;
1224 1053
1225err_out: 1054err_out:
@@ -1259,8 +1088,7 @@ static void __cpuinit powernowk8_cpu_init_on_cpu(void *_init_on_cpu)
1259 return; 1088 return;
1260 } 1089 }
1261 1090
1262 if (cpu_family == CPU_OPTERON) 1091 fidvid_msr_init();
1263 fidvid_msr_init();
1264 1092
1265 init_on_cpu->rc = 0; 1093 init_on_cpu->rc = 0;
1266} 1094}
@@ -1277,7 +1105,6 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol)
1277 struct powernow_k8_data *data; 1105 struct powernow_k8_data *data;
1278 struct init_on_cpu init_on_cpu; 1106 struct init_on_cpu init_on_cpu;
1279 int rc; 1107 int rc;
1280 struct cpuinfo_x86 *c = &cpu_data(pol->cpu);
1281 1108
1282 if (!cpu_online(pol->cpu)) 1109 if (!cpu_online(pol->cpu))
1283 return -ENODEV; 1110 return -ENODEV;
@@ -1293,7 +1120,6 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol)
1293 } 1120 }
1294 1121
1295 data->cpu = pol->cpu; 1122 data->cpu = pol->cpu;
1296 data->currpstate = HW_PSTATE_INVALID;
1297 1123
1298 if (powernow_k8_cpu_init_acpi(data)) { 1124 if (powernow_k8_cpu_init_acpi(data)) {
1299 /* 1125 /*
@@ -1330,17 +1156,10 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol)
1330 if (rc != 0) 1156 if (rc != 0)
1331 goto err_out_exit_acpi; 1157 goto err_out_exit_acpi;
1332 1158
1333 if (cpu_family == CPU_HW_PSTATE) 1159 cpumask_copy(pol->cpus, cpu_core_mask(pol->cpu));
1334 cpumask_copy(pol->cpus, cpumask_of(pol->cpu));
1335 else
1336 cpumask_copy(pol->cpus, cpu_core_mask(pol->cpu));
1337 data->available_cores = pol->cpus; 1160 data->available_cores = pol->cpus;
1338 1161
1339 if (cpu_family == CPU_HW_PSTATE) 1162 pol->cur = find_khz_freq_from_fid(data->currfid);
1340 pol->cur = find_khz_freq_from_pstate(data->powernow_table,
1341 data->currpstate);
1342 else
1343 pol->cur = find_khz_freq_from_fid(data->currfid);
1344 pr_debug("policy current frequency %d kHz\n", pol->cur); 1163 pr_debug("policy current frequency %d kHz\n", pol->cur);
1345 1164
1346 /* min/max the cpu is capable of */ 1165 /* min/max the cpu is capable of */
@@ -1352,18 +1171,10 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol)
1352 return -EINVAL; 1171 return -EINVAL;
1353 } 1172 }
1354 1173
1355 /* Check for APERF/MPERF support in hardware */
1356 if (cpu_has(c, X86_FEATURE_APERFMPERF))
1357 cpufreq_amd64_driver.getavg = cpufreq_get_measured_perf;
1358
1359 cpufreq_frequency_table_get_attr(data->powernow_table, pol->cpu); 1174 cpufreq_frequency_table_get_attr(data->powernow_table, pol->cpu);
1360 1175
1361 if (cpu_family == CPU_HW_PSTATE) 1176 pr_debug("cpu_init done, current fid 0x%x, vid 0x%x\n",
1362 pr_debug("cpu_init done, current pstate 0x%x\n", 1177 data->currfid, data->currvid);
1363 data->currpstate);
1364 else
1365 pr_debug("cpu_init done, current fid 0x%x, vid 0x%x\n",
1366 data->currfid, data->currvid);
1367 1178
1368 per_cpu(powernow_data, pol->cpu) = data; 1179 per_cpu(powernow_data, pol->cpu) = data;
1369 1180
@@ -1416,88 +1227,15 @@ static unsigned int powernowk8_get(unsigned int cpu)
1416 if (err) 1227 if (err)
1417 goto out; 1228 goto out;
1418 1229
1419 if (cpu_family == CPU_HW_PSTATE) 1230 khz = find_khz_freq_from_fid(data->currfid);
1420 khz = find_khz_freq_from_pstate(data->powernow_table,
1421 data->currpstate);
1422 else
1423 khz = find_khz_freq_from_fid(data->currfid);
1424 1231
1425 1232
1426out: 1233out:
1427 return khz; 1234 return khz;
1428} 1235}
1429 1236
1430static void _cpb_toggle_msrs(bool t)
1431{
1432 int cpu;
1433
1434 get_online_cpus();
1435
1436 rdmsr_on_cpus(cpu_online_mask, MSR_K7_HWCR, msrs);
1437
1438 for_each_cpu(cpu, cpu_online_mask) {
1439 struct msr *reg = per_cpu_ptr(msrs, cpu);
1440 if (t)
1441 reg->l &= ~BIT(25);
1442 else
1443 reg->l |= BIT(25);
1444 }
1445 wrmsr_on_cpus(cpu_online_mask, MSR_K7_HWCR, msrs);
1446
1447 put_online_cpus();
1448}
1449
1450/*
1451 * Switch on/off core performance boosting.
1452 *
1453 * 0=disable
1454 * 1=enable.
1455 */
1456static void cpb_toggle(bool t)
1457{
1458 if (!cpb_capable)
1459 return;
1460
1461 if (t && !cpb_enabled) {
1462 cpb_enabled = true;
1463 _cpb_toggle_msrs(t);
1464 printk(KERN_INFO PFX "Core Boosting enabled.\n");
1465 } else if (!t && cpb_enabled) {
1466 cpb_enabled = false;
1467 _cpb_toggle_msrs(t);
1468 printk(KERN_INFO PFX "Core Boosting disabled.\n");
1469 }
1470}
1471
1472static ssize_t store_cpb(struct cpufreq_policy *policy, const char *buf,
1473 size_t count)
1474{
1475 int ret = -EINVAL;
1476 unsigned long val = 0;
1477
1478 ret = strict_strtoul(buf, 10, &val);
1479 if (!ret && (val == 0 || val == 1) && cpb_capable)
1480 cpb_toggle(val);
1481 else
1482 return -EINVAL;
1483
1484 return count;
1485}
1486
1487static ssize_t show_cpb(struct cpufreq_policy *policy, char *buf)
1488{
1489 return sprintf(buf, "%u\n", cpb_enabled);
1490}
1491
1492#define define_one_rw(_name) \
1493static struct freq_attr _name = \
1494__ATTR(_name, 0644, show_##_name, store_##_name)
1495
1496define_one_rw(cpb);
1497
1498static struct freq_attr *powernow_k8_attr[] = { 1237static struct freq_attr *powernow_k8_attr[] = {
1499 &cpufreq_freq_attr_scaling_available_freqs, 1238 &cpufreq_freq_attr_scaling_available_freqs,
1500 &cpb,
1501 NULL, 1239 NULL,
1502}; 1240};
1503 1241
@@ -1513,58 +1251,20 @@ static struct cpufreq_driver cpufreq_amd64_driver = {
1513 .attr = powernow_k8_attr, 1251 .attr = powernow_k8_attr,
1514}; 1252};
1515 1253
1516/*
1517 * Clear the boost-disable flag on the CPU_DOWN path so that this cpu
1518 * cannot block the remaining ones from boosting. On the CPU_UP path we
1519 * simply keep the boost-disable flag in sync with the current global
1520 * state.
1521 */
1522static int cpb_notify(struct notifier_block *nb, unsigned long action,
1523 void *hcpu)
1524{
1525 unsigned cpu = (long)hcpu;
1526 u32 lo, hi;
1527
1528 switch (action) {
1529 case CPU_UP_PREPARE:
1530 case CPU_UP_PREPARE_FROZEN:
1531
1532 if (!cpb_enabled) {
1533 rdmsr_on_cpu(cpu, MSR_K7_HWCR, &lo, &hi);
1534 lo |= BIT(25);
1535 wrmsr_on_cpu(cpu, MSR_K7_HWCR, lo, hi);
1536 }
1537 break;
1538
1539 case CPU_DOWN_PREPARE:
1540 case CPU_DOWN_PREPARE_FROZEN:
1541 rdmsr_on_cpu(cpu, MSR_K7_HWCR, &lo, &hi);
1542 lo &= ~BIT(25);
1543 wrmsr_on_cpu(cpu, MSR_K7_HWCR, lo, hi);
1544 break;
1545
1546 default:
1547 break;
1548 }
1549
1550 return NOTIFY_OK;
1551}
1552
1553static struct notifier_block cpb_nb = {
1554 .notifier_call = cpb_notify,
1555};
1556
1557/* driver entry point for init */ 1254/* driver entry point for init */
1558static int __cpuinit powernowk8_init(void) 1255static int __cpuinit powernowk8_init(void)
1559{ 1256{
1560 unsigned int i, supported_cpus = 0, cpu; 1257 unsigned int i, supported_cpus = 0;
1561 int rv; 1258 int rv;
1562 1259
1563 if (!x86_match_cpu(powernow_k8_ids)) 1260 if (static_cpu_has(X86_FEATURE_HW_PSTATE)) {
1261 pr_warn(PFX "this CPU is not supported anymore, using acpi-cpufreq instead.\n");
1262 request_module("acpi-cpufreq");
1564 return -ENODEV; 1263 return -ENODEV;
1264 }
1565 1265
1566 if (static_cpu_has(X86_FEATURE_HW_PSTATE)) 1266 if (!x86_match_cpu(powernow_k8_ids))
1567 pr_warn(PFX "support for this CPU is deprecated, use acpi-cpufreq instead.\n"); 1267 return -ENODEV;
1568 1268
1569 for_each_online_cpu(i) { 1269 for_each_online_cpu(i) {
1570 int rc; 1270 int rc;
@@ -1576,26 +1276,6 @@ static int __cpuinit powernowk8_init(void)
1576 if (supported_cpus != num_online_cpus()) 1276 if (supported_cpus != num_online_cpus())
1577 return -ENODEV; 1277 return -ENODEV;
1578 1278
1579 if (boot_cpu_has(X86_FEATURE_CPB)) {
1580
1581 cpb_capable = true;
1582
1583 msrs = msrs_alloc();
1584 if (!msrs) {
1585 printk(KERN_ERR "%s: Error allocating msrs!\n", __func__);
1586 return -ENOMEM;
1587 }
1588
1589 register_cpu_notifier(&cpb_nb);
1590
1591 rdmsr_on_cpus(cpu_online_mask, MSR_K7_HWCR, msrs);
1592
1593 for_each_cpu(cpu, cpu_online_mask) {
1594 struct msr *reg = per_cpu_ptr(msrs, cpu);
1595 cpb_enabled |= !(!!(reg->l & BIT(25)));
1596 }
1597 }
1598
1599 rv = cpufreq_register_driver(&cpufreq_amd64_driver); 1279 rv = cpufreq_register_driver(&cpufreq_amd64_driver);
1600 1280
1601 if (!rv) 1281 if (!rv)
@@ -1603,15 +1283,6 @@ static int __cpuinit powernowk8_init(void)
1603 num_online_nodes(), boot_cpu_data.x86_model_id, 1283 num_online_nodes(), boot_cpu_data.x86_model_id,
1604 supported_cpus); 1284 supported_cpus);
1605 1285
1606 if (boot_cpu_has(X86_FEATURE_CPB)) {
1607 if (rv < 0) {
1608 unregister_cpu_notifier(&cpb_nb);
1609 msrs_free(msrs);
1610 msrs = NULL;
1611 } else
1612 pr_info(PFX "Core Performance Boosting: %s.\n",
1613 (cpb_enabled ? "on" : "off"));
1614 }
1615 return rv; 1286 return rv;
1616} 1287}
1617 1288
@@ -1620,13 +1291,6 @@ static void __exit powernowk8_exit(void)
1620{ 1291{
1621 pr_debug("exit\n"); 1292 pr_debug("exit\n");
1622 1293
1623 if (boot_cpu_has(X86_FEATURE_CPB)) {
1624 msrs_free(msrs);
1625 msrs = NULL;
1626
1627 unregister_cpu_notifier(&cpb_nb);
1628 }
1629
1630 cpufreq_unregister_driver(&cpufreq_amd64_driver); 1294 cpufreq_unregister_driver(&cpufreq_amd64_driver);
1631} 1295}
1632 1296
diff --git a/drivers/cpufreq/powernow-k8.h b/drivers/cpufreq/powernow-k8.h
index 3744d26cdc2b..79329d4d5abe 100644
--- a/drivers/cpufreq/powernow-k8.h
+++ b/drivers/cpufreq/powernow-k8.h
@@ -5,24 +5,11 @@
5 * http://www.gnu.org/licenses/gpl.html 5 * http://www.gnu.org/licenses/gpl.html
6 */ 6 */
7 7
8enum pstate {
9 HW_PSTATE_INVALID = 0xff,
10 HW_PSTATE_0 = 0,
11 HW_PSTATE_1 = 1,
12 HW_PSTATE_2 = 2,
13 HW_PSTATE_3 = 3,
14 HW_PSTATE_4 = 4,
15 HW_PSTATE_5 = 5,
16 HW_PSTATE_6 = 6,
17 HW_PSTATE_7 = 7,
18};
19
20struct powernow_k8_data { 8struct powernow_k8_data {
21 unsigned int cpu; 9 unsigned int cpu;
22 10
23 u32 numps; /* number of p-states */ 11 u32 numps; /* number of p-states */
24 u32 batps; /* number of p-states supported on battery */ 12 u32 batps; /* number of p-states supported on battery */
25 u32 max_hw_pstate; /* maximum legal hardware pstate */
26 13
27 /* these values are constant when the PSB is used to determine 14 /* these values are constant when the PSB is used to determine
28 * vid/fid pairings, but are modified during the ->target() call 15 * vid/fid pairings, but are modified during the ->target() call
@@ -37,7 +24,6 @@ struct powernow_k8_data {
37 /* keep track of the current fid / vid or pstate */ 24 /* keep track of the current fid / vid or pstate */
38 u32 currvid; 25 u32 currvid;
39 u32 currfid; 26 u32 currfid;
40 enum pstate currpstate;
41 27
42 /* the powernow_table includes all frequency and vid/fid pairings: 28 /* the powernow_table includes all frequency and vid/fid pairings:
43 * fid are the lower 8 bits of the index, vid are the upper 8 bits. 29 * fid are the lower 8 bits of the index, vid are the upper 8 bits.
@@ -97,23 +83,6 @@ struct powernow_k8_data {
97#define MSR_S_HI_CURRENT_VID 0x0000003f 83#define MSR_S_HI_CURRENT_VID 0x0000003f
98#define MSR_C_HI_STP_GNT_BENIGN 0x00000001 84#define MSR_C_HI_STP_GNT_BENIGN 0x00000001
99 85
100
101/* Hardware Pstate _PSS and MSR definitions */
102#define USE_HW_PSTATE 0x00000080
103#define HW_PSTATE_MASK 0x00000007
104#define HW_PSTATE_VALID_MASK 0x80000000
105#define HW_PSTATE_MAX_MASK 0x000000f0
106#define HW_PSTATE_MAX_SHIFT 4
107#define MSR_PSTATE_DEF_BASE 0xc0010064 /* base of Pstate MSRs */
108#define MSR_PSTATE_STATUS 0xc0010063 /* Pstate Status MSR */
109#define MSR_PSTATE_CTRL 0xc0010062 /* Pstate control MSR */
110#define MSR_PSTATE_CUR_LIMIT 0xc0010061 /* pstate current limit MSR */
111
112/* define the two driver architectures */
113#define CPU_OPTERON 0
114#define CPU_HW_PSTATE 1
115
116
117/* 86/*
118 * There are restrictions frequencies have to follow: 87 * There are restrictions frequencies have to follow:
119 * - only 1 entry in the low fid table ( <=1.4GHz ) 88 * - only 1 entry in the low fid table ( <=1.4GHz )
@@ -218,5 +187,4 @@ static int core_frequency_transition(struct powernow_k8_data *data, u32 reqfid);
218 187
219static void powernow_k8_acpi_pst_values(struct powernow_k8_data *data, unsigned int index); 188static void powernow_k8_acpi_pst_values(struct powernow_k8_data *data, unsigned int index);
220 189
221static int fill_powernow_table_pstate(struct powernow_k8_data *data, struct cpufreq_frequency_table *powernow_table);
222static int fill_powernow_table_fidvid(struct powernow_k8_data *data, struct cpufreq_frequency_table *powernow_table); 190static int fill_powernow_table_fidvid(struct powernow_k8_data *data, struct cpufreq_frequency_table *powernow_table);