aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/cpufreq
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2012-10-02 21:32:35 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2012-10-02 21:32:35 -0400
commit16642a2e7be23bbda013fc32d8f6c68982eab603 (patch)
tree346ae485f485f6901e5d8150f0d34d178a7dd448 /drivers/cpufreq
parent51562cba98939da0a1d10fe7c25359b77a069033 (diff)
parentb9142167a2bb979b58b98ffcd928a311b55cbd9f (diff)
Merge tag 'pm-for-3.7-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm
Pull power management updates from Rafael J Wysocki: - Improved system suspend/resume and runtime PM handling for the SH TMU, CMT and MTU2 clock event devices (also used by ARM/shmobile). - Generic PM domains framework extensions related to cpuidle support and domain objects lookup using names. - ARM/shmobile power management updates including improved support for the SH7372's A4S power domain containing the CPU core. - cpufreq changes related to AMD CPUs support from Matthew Garrett, Andre Przywara and Borislav Petkov. - cpu0 cpufreq driver from Shawn Guo. - cpufreq governor fixes related to the relaxing of limit from Michal Pecio. - OMAP cpufreq updates from Axel Lin and Richard Zhao. - cpuidle ladder governor fixes related to the disabling of states from Carsten Emde and me. - Runtime PM core updates related to the interactions with the system suspend core from Alan Stern and Kevin Hilman. - Wakeup sources modification allowing more helper functions to be called from interrupt context from John Stultz and additional diagnostic code from Todd Poynor. - System suspend error code path fix from Feng Hong. Fixed up conflicts in cpufreq/powernow-k8 that stemmed from the workqueue fixes conflicting fairly badly with the removal of support for hardware P-state chips. The changes were independent but somewhat intertwined. * tag 'pm-for-3.7-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm: (76 commits) Revert "PM QoS: Use spinlock in the per-device PM QoS constraints code" PM / Runtime: let rpm_resume() succeed if RPM_ACTIVE, even when disabled, v2 cpuidle: rename function name "__cpuidle_register_driver", v2 cpufreq: OMAP: Check IS_ERR() instead of NULL for omap_device_get_by_hwmod_name cpuidle: remove some empty lines PM: Prevent runtime suspend during system resume PM QoS: Use spinlock in the per-device PM QoS constraints code PM / Sleep: use resume event when call dpm_resume_early cpuidle / ACPI : move cpuidle_device field out of the acpi_processor_power structure ACPI / processor: remove pointless variable initialization ACPI / processor: remove unused function parameter cpufreq: OMAP: remove loops_per_jiffy recalculate for smp sections: fix section conflicts in drivers/cpufreq cpufreq: conservative: update frequency when limits are relaxed cpufreq / ondemand: update frequency when limits are relaxed properly __init-annotate pm_sysrq_init() cpufreq: Add a generic cpufreq-cpu0 driver PM / OPP: Initialize OPP table from device tree ARM: add cpufreq transiton notifier to adjust loops_per_jiffy for smp cpufreq: Remove support for hardware P-state chips from powernow-k8 ...
Diffstat (limited to 'drivers/cpufreq')
-rw-r--r--drivers/cpufreq/Kconfig11
-rw-r--r--drivers/cpufreq/Kconfig.x8618
-rw-r--r--drivers/cpufreq/Makefile4
-rw-r--r--drivers/cpufreq/acpi-cpufreq.c272
-rw-r--r--drivers/cpufreq/cpufreq-cpu0.c269
-rw-r--r--drivers/cpufreq/cpufreq_conservative.c2
-rw-r--r--drivers/cpufreq/cpufreq_ondemand.c1
-rw-r--r--drivers/cpufreq/longhaul.h26
-rw-r--r--drivers/cpufreq/omap-cpufreq.c39
-rw-r--r--drivers/cpufreq/powernow-k8.c406
-rw-r--r--drivers/cpufreq/powernow-k8.h32
11 files changed, 623 insertions, 457 deletions
diff --git a/drivers/cpufreq/Kconfig b/drivers/cpufreq/Kconfig
index e24a2a1b6666..ea512f47b789 100644
--- a/drivers/cpufreq/Kconfig
+++ b/drivers/cpufreq/Kconfig
@@ -179,6 +179,17 @@ config CPU_FREQ_GOV_CONSERVATIVE
179 179
180 If in doubt, say N. 180 If in doubt, say N.
181 181
182config GENERIC_CPUFREQ_CPU0
183 bool "Generic CPU0 cpufreq driver"
184 depends on HAVE_CLK && REGULATOR && PM_OPP && OF
185 select CPU_FREQ_TABLE
186 help
187 This adds a generic cpufreq driver for CPU0 frequency management.
188 It supports both uniprocessor (UP) and symmetric multiprocessor (SMP)
189 systems which share clock and voltage across all CPUs.
190
191 If in doubt, say N.
192
182menu "x86 CPU frequency scaling drivers" 193menu "x86 CPU frequency scaling drivers"
183depends on X86 194depends on X86
184source "drivers/cpufreq/Kconfig.x86" 195source "drivers/cpufreq/Kconfig.x86"
diff --git a/drivers/cpufreq/Kconfig.x86 b/drivers/cpufreq/Kconfig.x86
index 78ff7ee48951..934854ae5eb4 100644
--- a/drivers/cpufreq/Kconfig.x86
+++ b/drivers/cpufreq/Kconfig.x86
@@ -23,7 +23,8 @@ config X86_ACPI_CPUFREQ
23 help 23 help
24 This driver adds a CPUFreq driver which utilizes the ACPI 24 This driver adds a CPUFreq driver which utilizes the ACPI
25 Processor Performance States. 25 Processor Performance States.
26 This driver also supports Intel Enhanced Speedstep. 26 This driver also supports Intel Enhanced Speedstep and newer
27 AMD CPUs.
27 28
28 To compile this driver as a module, choose M here: the 29 To compile this driver as a module, choose M here: the
29 module will be called acpi-cpufreq. 30 module will be called acpi-cpufreq.
@@ -32,6 +33,18 @@ config X86_ACPI_CPUFREQ
32 33
33 If in doubt, say N. 34 If in doubt, say N.
34 35
36config X86_ACPI_CPUFREQ_CPB
37 default y
38 bool "Legacy cpb sysfs knob support for AMD CPUs"
39 depends on X86_ACPI_CPUFREQ && CPU_SUP_AMD
40 help
41 The powernow-k8 driver used to provide a sysfs knob called "cpb"
42 to disable the Core Performance Boosting feature of AMD CPUs. This
43 file has now been superseeded by the more generic "boost" entry.
44
45 By enabling this option the acpi_cpufreq driver provides the old
46 entry in addition to the new boost ones, for compatibility reasons.
47
35config ELAN_CPUFREQ 48config ELAN_CPUFREQ
36 tristate "AMD Elan SC400 and SC410" 49 tristate "AMD Elan SC400 and SC410"
37 select CPU_FREQ_TABLE 50 select CPU_FREQ_TABLE
@@ -95,7 +108,8 @@ config X86_POWERNOW_K8
95 select CPU_FREQ_TABLE 108 select CPU_FREQ_TABLE
96 depends on ACPI && ACPI_PROCESSOR 109 depends on ACPI && ACPI_PROCESSOR
97 help 110 help
98 This adds the CPUFreq driver for K8/K10 Opteron/Athlon64 processors. 111 This adds the CPUFreq driver for K8/early Opteron/Athlon64 processors.
112 Support for K10 and newer processors is now in acpi-cpufreq.
99 113
100 To compile this driver as a module, choose M here: the 114 To compile this driver as a module, choose M here: the
101 module will be called powernow-k8. 115 module will be called powernow-k8.
diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile
index 9531fc2eda22..1bc90e1306d8 100644
--- a/drivers/cpufreq/Makefile
+++ b/drivers/cpufreq/Makefile
@@ -13,13 +13,15 @@ obj-$(CONFIG_CPU_FREQ_GOV_CONSERVATIVE) += cpufreq_conservative.o
13# CPUfreq cross-arch helpers 13# CPUfreq cross-arch helpers
14obj-$(CONFIG_CPU_FREQ_TABLE) += freq_table.o 14obj-$(CONFIG_CPU_FREQ_TABLE) += freq_table.o
15 15
16obj-$(CONFIG_GENERIC_CPUFREQ_CPU0) += cpufreq-cpu0.o
17
16################################################################################## 18##################################################################################
17# x86 drivers. 19# x86 drivers.
18# Link order matters. K8 is preferred to ACPI because of firmware bugs in early 20# Link order matters. K8 is preferred to ACPI because of firmware bugs in early
19# K8 systems. ACPI is preferred to all other hardware-specific drivers. 21# K8 systems. ACPI is preferred to all other hardware-specific drivers.
20# speedstep-* is preferred over p4-clockmod. 22# speedstep-* is preferred over p4-clockmod.
21 23
22obj-$(CONFIG_X86_POWERNOW_K8) += powernow-k8.o mperf.o 24obj-$(CONFIG_X86_POWERNOW_K8) += powernow-k8.o
23obj-$(CONFIG_X86_ACPI_CPUFREQ) += acpi-cpufreq.o mperf.o 25obj-$(CONFIG_X86_ACPI_CPUFREQ) += acpi-cpufreq.o mperf.o
24obj-$(CONFIG_X86_PCC_CPUFREQ) += pcc-cpufreq.o 26obj-$(CONFIG_X86_PCC_CPUFREQ) += pcc-cpufreq.o
25obj-$(CONFIG_X86_POWERNOW_K6) += powernow-k6.o 27obj-$(CONFIG_X86_POWERNOW_K6) += powernow-k6.o
diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c
index 56c6c6b4eb4d..0d048f6a2b23 100644
--- a/drivers/cpufreq/acpi-cpufreq.c
+++ b/drivers/cpufreq/acpi-cpufreq.c
@@ -51,13 +51,19 @@ MODULE_AUTHOR("Paul Diefenbaugh, Dominik Brodowski");
51MODULE_DESCRIPTION("ACPI Processor P-States Driver"); 51MODULE_DESCRIPTION("ACPI Processor P-States Driver");
52MODULE_LICENSE("GPL"); 52MODULE_LICENSE("GPL");
53 53
54#define PFX "acpi-cpufreq: "
55
54enum { 56enum {
55 UNDEFINED_CAPABLE = 0, 57 UNDEFINED_CAPABLE = 0,
56 SYSTEM_INTEL_MSR_CAPABLE, 58 SYSTEM_INTEL_MSR_CAPABLE,
59 SYSTEM_AMD_MSR_CAPABLE,
57 SYSTEM_IO_CAPABLE, 60 SYSTEM_IO_CAPABLE,
58}; 61};
59 62
60#define INTEL_MSR_RANGE (0xffff) 63#define INTEL_MSR_RANGE (0xffff)
64#define AMD_MSR_RANGE (0x7)
65
66#define MSR_K7_HWCR_CPB_DIS (1ULL << 25)
61 67
62struct acpi_cpufreq_data { 68struct acpi_cpufreq_data {
63 struct acpi_processor_performance *acpi_data; 69 struct acpi_processor_performance *acpi_data;
@@ -74,6 +80,116 @@ static struct acpi_processor_performance __percpu *acpi_perf_data;
74static struct cpufreq_driver acpi_cpufreq_driver; 80static struct cpufreq_driver acpi_cpufreq_driver;
75 81
76static unsigned int acpi_pstate_strict; 82static unsigned int acpi_pstate_strict;
83static bool boost_enabled, boost_supported;
84static struct msr __percpu *msrs;
85
86static bool boost_state(unsigned int cpu)
87{
88 u32 lo, hi;
89 u64 msr;
90
91 switch (boot_cpu_data.x86_vendor) {
92 case X86_VENDOR_INTEL:
93 rdmsr_on_cpu(cpu, MSR_IA32_MISC_ENABLE, &lo, &hi);
94 msr = lo | ((u64)hi << 32);
95 return !(msr & MSR_IA32_MISC_ENABLE_TURBO_DISABLE);
96 case X86_VENDOR_AMD:
97 rdmsr_on_cpu(cpu, MSR_K7_HWCR, &lo, &hi);
98 msr = lo | ((u64)hi << 32);
99 return !(msr & MSR_K7_HWCR_CPB_DIS);
100 }
101 return false;
102}
103
104static void boost_set_msrs(bool enable, const struct cpumask *cpumask)
105{
106 u32 cpu;
107 u32 msr_addr;
108 u64 msr_mask;
109
110 switch (boot_cpu_data.x86_vendor) {
111 case X86_VENDOR_INTEL:
112 msr_addr = MSR_IA32_MISC_ENABLE;
113 msr_mask = MSR_IA32_MISC_ENABLE_TURBO_DISABLE;
114 break;
115 case X86_VENDOR_AMD:
116 msr_addr = MSR_K7_HWCR;
117 msr_mask = MSR_K7_HWCR_CPB_DIS;
118 break;
119 default:
120 return;
121 }
122
123 rdmsr_on_cpus(cpumask, msr_addr, msrs);
124
125 for_each_cpu(cpu, cpumask) {
126 struct msr *reg = per_cpu_ptr(msrs, cpu);
127 if (enable)
128 reg->q &= ~msr_mask;
129 else
130 reg->q |= msr_mask;
131 }
132
133 wrmsr_on_cpus(cpumask, msr_addr, msrs);
134}
135
136static ssize_t _store_boost(const char *buf, size_t count)
137{
138 int ret;
139 unsigned long val = 0;
140
141 if (!boost_supported)
142 return -EINVAL;
143
144 ret = kstrtoul(buf, 10, &val);
145 if (ret || (val > 1))
146 return -EINVAL;
147
148 if ((val && boost_enabled) || (!val && !boost_enabled))
149 return count;
150
151 get_online_cpus();
152
153 boost_set_msrs(val, cpu_online_mask);
154
155 put_online_cpus();
156
157 boost_enabled = val;
158 pr_debug("Core Boosting %sabled.\n", val ? "en" : "dis");
159
160 return count;
161}
162
163static ssize_t store_global_boost(struct kobject *kobj, struct attribute *attr,
164 const char *buf, size_t count)
165{
166 return _store_boost(buf, count);
167}
168
169static ssize_t show_global_boost(struct kobject *kobj,
170 struct attribute *attr, char *buf)
171{
172 return sprintf(buf, "%u\n", boost_enabled);
173}
174
175static struct global_attr global_boost = __ATTR(boost, 0644,
176 show_global_boost,
177 store_global_boost);
178
179#ifdef CONFIG_X86_ACPI_CPUFREQ_CPB
180static ssize_t store_cpb(struct cpufreq_policy *policy, const char *buf,
181 size_t count)
182{
183 return _store_boost(buf, count);
184}
185
186static ssize_t show_cpb(struct cpufreq_policy *policy, char *buf)
187{
188 return sprintf(buf, "%u\n", boost_enabled);
189}
190
191static struct freq_attr cpb = __ATTR(cpb, 0644, show_cpb, store_cpb);
192#endif
77 193
78static int check_est_cpu(unsigned int cpuid) 194static int check_est_cpu(unsigned int cpuid)
79{ 195{
@@ -82,6 +198,13 @@ static int check_est_cpu(unsigned int cpuid)
82 return cpu_has(cpu, X86_FEATURE_EST); 198 return cpu_has(cpu, X86_FEATURE_EST);
83} 199}
84 200
201static int check_amd_hwpstate_cpu(unsigned int cpuid)
202{
203 struct cpuinfo_x86 *cpu = &cpu_data(cpuid);
204
205 return cpu_has(cpu, X86_FEATURE_HW_PSTATE);
206}
207
85static unsigned extract_io(u32 value, struct acpi_cpufreq_data *data) 208static unsigned extract_io(u32 value, struct acpi_cpufreq_data *data)
86{ 209{
87 struct acpi_processor_performance *perf; 210 struct acpi_processor_performance *perf;
@@ -101,7 +224,11 @@ static unsigned extract_msr(u32 msr, struct acpi_cpufreq_data *data)
101 int i; 224 int i;
102 struct acpi_processor_performance *perf; 225 struct acpi_processor_performance *perf;
103 226
104 msr &= INTEL_MSR_RANGE; 227 if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
228 msr &= AMD_MSR_RANGE;
229 else
230 msr &= INTEL_MSR_RANGE;
231
105 perf = data->acpi_data; 232 perf = data->acpi_data;
106 233
107 for (i = 0; data->freq_table[i].frequency != CPUFREQ_TABLE_END; i++) { 234 for (i = 0; data->freq_table[i].frequency != CPUFREQ_TABLE_END; i++) {
@@ -115,6 +242,7 @@ static unsigned extract_freq(u32 val, struct acpi_cpufreq_data *data)
115{ 242{
116 switch (data->cpu_feature) { 243 switch (data->cpu_feature) {
117 case SYSTEM_INTEL_MSR_CAPABLE: 244 case SYSTEM_INTEL_MSR_CAPABLE:
245 case SYSTEM_AMD_MSR_CAPABLE:
118 return extract_msr(val, data); 246 return extract_msr(val, data);
119 case SYSTEM_IO_CAPABLE: 247 case SYSTEM_IO_CAPABLE:
120 return extract_io(val, data); 248 return extract_io(val, data);
@@ -150,6 +278,7 @@ static void do_drv_read(void *_cmd)
150 278
151 switch (cmd->type) { 279 switch (cmd->type) {
152 case SYSTEM_INTEL_MSR_CAPABLE: 280 case SYSTEM_INTEL_MSR_CAPABLE:
281 case SYSTEM_AMD_MSR_CAPABLE:
153 rdmsr(cmd->addr.msr.reg, cmd->val, h); 282 rdmsr(cmd->addr.msr.reg, cmd->val, h);
154 break; 283 break;
155 case SYSTEM_IO_CAPABLE: 284 case SYSTEM_IO_CAPABLE:
@@ -174,6 +303,9 @@ static void do_drv_write(void *_cmd)
174 lo = (lo & ~INTEL_MSR_RANGE) | (cmd->val & INTEL_MSR_RANGE); 303 lo = (lo & ~INTEL_MSR_RANGE) | (cmd->val & INTEL_MSR_RANGE);
175 wrmsr(cmd->addr.msr.reg, lo, hi); 304 wrmsr(cmd->addr.msr.reg, lo, hi);
176 break; 305 break;
306 case SYSTEM_AMD_MSR_CAPABLE:
307 wrmsr(cmd->addr.msr.reg, cmd->val, 0);
308 break;
177 case SYSTEM_IO_CAPABLE: 309 case SYSTEM_IO_CAPABLE:
178 acpi_os_write_port((acpi_io_address)cmd->addr.io.port, 310 acpi_os_write_port((acpi_io_address)cmd->addr.io.port,
179 cmd->val, 311 cmd->val,
@@ -217,6 +349,10 @@ static u32 get_cur_val(const struct cpumask *mask)
217 cmd.type = SYSTEM_INTEL_MSR_CAPABLE; 349 cmd.type = SYSTEM_INTEL_MSR_CAPABLE;
218 cmd.addr.msr.reg = MSR_IA32_PERF_STATUS; 350 cmd.addr.msr.reg = MSR_IA32_PERF_STATUS;
219 break; 351 break;
352 case SYSTEM_AMD_MSR_CAPABLE:
353 cmd.type = SYSTEM_AMD_MSR_CAPABLE;
354 cmd.addr.msr.reg = MSR_AMD_PERF_STATUS;
355 break;
220 case SYSTEM_IO_CAPABLE: 356 case SYSTEM_IO_CAPABLE:
221 cmd.type = SYSTEM_IO_CAPABLE; 357 cmd.type = SYSTEM_IO_CAPABLE;
222 perf = per_cpu(acfreq_data, cpumask_first(mask))->acpi_data; 358 perf = per_cpu(acfreq_data, cpumask_first(mask))->acpi_data;
@@ -326,6 +462,11 @@ static int acpi_cpufreq_target(struct cpufreq_policy *policy,
326 cmd.addr.msr.reg = MSR_IA32_PERF_CTL; 462 cmd.addr.msr.reg = MSR_IA32_PERF_CTL;
327 cmd.val = (u32) perf->states[next_perf_state].control; 463 cmd.val = (u32) perf->states[next_perf_state].control;
328 break; 464 break;
465 case SYSTEM_AMD_MSR_CAPABLE:
466 cmd.type = SYSTEM_AMD_MSR_CAPABLE;
467 cmd.addr.msr.reg = MSR_AMD_PERF_CTL;
468 cmd.val = (u32) perf->states[next_perf_state].control;
469 break;
329 case SYSTEM_IO_CAPABLE: 470 case SYSTEM_IO_CAPABLE:
330 cmd.type = SYSTEM_IO_CAPABLE; 471 cmd.type = SYSTEM_IO_CAPABLE;
331 cmd.addr.io.port = perf->control_register.address; 472 cmd.addr.io.port = perf->control_register.address;
@@ -419,6 +560,44 @@ static void free_acpi_perf_data(void)
419 free_percpu(acpi_perf_data); 560 free_percpu(acpi_perf_data);
420} 561}
421 562
563static int boost_notify(struct notifier_block *nb, unsigned long action,
564 void *hcpu)
565{
566 unsigned cpu = (long)hcpu;
567 const struct cpumask *cpumask;
568
569 cpumask = get_cpu_mask(cpu);
570
571 /*
572 * Clear the boost-disable bit on the CPU_DOWN path so that
573 * this cpu cannot block the remaining ones from boosting. On
574 * the CPU_UP path we simply keep the boost-disable flag in
575 * sync with the current global state.
576 */
577
578 switch (action) {
579 case CPU_UP_PREPARE:
580 case CPU_UP_PREPARE_FROZEN:
581 boost_set_msrs(boost_enabled, cpumask);
582 break;
583
584 case CPU_DOWN_PREPARE:
585 case CPU_DOWN_PREPARE_FROZEN:
586 boost_set_msrs(1, cpumask);
587 break;
588
589 default:
590 break;
591 }
592
593 return NOTIFY_OK;
594}
595
596
597static struct notifier_block boost_nb = {
598 .notifier_call = boost_notify,
599};
600
422/* 601/*
423 * acpi_cpufreq_early_init - initialize ACPI P-States library 602 * acpi_cpufreq_early_init - initialize ACPI P-States library
424 * 603 *
@@ -559,6 +738,14 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
559 policy->shared_type = CPUFREQ_SHARED_TYPE_ALL; 738 policy->shared_type = CPUFREQ_SHARED_TYPE_ALL;
560 cpumask_copy(policy->cpus, cpu_core_mask(cpu)); 739 cpumask_copy(policy->cpus, cpu_core_mask(cpu));
561 } 740 }
741
742 if (check_amd_hwpstate_cpu(cpu) && !acpi_pstate_strict) {
743 cpumask_clear(policy->cpus);
744 cpumask_set_cpu(cpu, policy->cpus);
745 cpumask_copy(policy->related_cpus, cpu_sibling_mask(cpu));
746 policy->shared_type = CPUFREQ_SHARED_TYPE_HW;
747 pr_info_once(PFX "overriding BIOS provided _PSD data\n");
748 }
562#endif 749#endif
563 750
564 /* capability check */ 751 /* capability check */
@@ -580,12 +767,16 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
580 break; 767 break;
581 case ACPI_ADR_SPACE_FIXED_HARDWARE: 768 case ACPI_ADR_SPACE_FIXED_HARDWARE:
582 pr_debug("HARDWARE addr space\n"); 769 pr_debug("HARDWARE addr space\n");
583 if (!check_est_cpu(cpu)) { 770 if (check_est_cpu(cpu)) {
584 result = -ENODEV; 771 data->cpu_feature = SYSTEM_INTEL_MSR_CAPABLE;
585 goto err_unreg; 772 break;
586 } 773 }
587 data->cpu_feature = SYSTEM_INTEL_MSR_CAPABLE; 774 if (check_amd_hwpstate_cpu(cpu)) {
588 break; 775 data->cpu_feature = SYSTEM_AMD_MSR_CAPABLE;
776 break;
777 }
778 result = -ENODEV;
779 goto err_unreg;
589 default: 780 default:
590 pr_debug("Unknown addr space %d\n", 781 pr_debug("Unknown addr space %d\n",
591 (u32) (perf->control_register.space_id)); 782 (u32) (perf->control_register.space_id));
@@ -718,6 +909,7 @@ static int acpi_cpufreq_resume(struct cpufreq_policy *policy)
718 909
719static struct freq_attr *acpi_cpufreq_attr[] = { 910static struct freq_attr *acpi_cpufreq_attr[] = {
720 &cpufreq_freq_attr_scaling_available_freqs, 911 &cpufreq_freq_attr_scaling_available_freqs,
912 NULL, /* this is a placeholder for cpb, do not remove */
721 NULL, 913 NULL,
722}; 914};
723 915
@@ -733,6 +925,49 @@ static struct cpufreq_driver acpi_cpufreq_driver = {
733 .attr = acpi_cpufreq_attr, 925 .attr = acpi_cpufreq_attr,
734}; 926};
735 927
928static void __init acpi_cpufreq_boost_init(void)
929{
930 if (boot_cpu_has(X86_FEATURE_CPB) || boot_cpu_has(X86_FEATURE_IDA)) {
931 msrs = msrs_alloc();
932
933 if (!msrs)
934 return;
935
936 boost_supported = true;
937 boost_enabled = boost_state(0);
938
939 get_online_cpus();
940
941 /* Force all MSRs to the same value */
942 boost_set_msrs(boost_enabled, cpu_online_mask);
943
944 register_cpu_notifier(&boost_nb);
945
946 put_online_cpus();
947 } else
948 global_boost.attr.mode = 0444;
949
950 /* We create the boost file in any case, though for systems without
951 * hardware support it will be read-only and hardwired to return 0.
952 */
953 if (sysfs_create_file(cpufreq_global_kobject, &(global_boost.attr)))
954 pr_warn(PFX "could not register global boost sysfs file\n");
955 else
956 pr_debug("registered global boost sysfs file\n");
957}
958
959static void __exit acpi_cpufreq_boost_exit(void)
960{
961 sysfs_remove_file(cpufreq_global_kobject, &(global_boost.attr));
962
963 if (msrs) {
964 unregister_cpu_notifier(&boost_nb);
965
966 msrs_free(msrs);
967 msrs = NULL;
968 }
969}
970
736static int __init acpi_cpufreq_init(void) 971static int __init acpi_cpufreq_init(void)
737{ 972{
738 int ret; 973 int ret;
@@ -746,9 +981,32 @@ static int __init acpi_cpufreq_init(void)
746 if (ret) 981 if (ret)
747 return ret; 982 return ret;
748 983
984#ifdef CONFIG_X86_ACPI_CPUFREQ_CPB
985 /* this is a sysfs file with a strange name and an even stranger
986 * semantic - per CPU instantiation, but system global effect.
987 * Lets enable it only on AMD CPUs for compatibility reasons and
988 * only if configured. This is considered legacy code, which
989 * will probably be removed at some point in the future.
990 */
991 if (check_amd_hwpstate_cpu(0)) {
992 struct freq_attr **iter;
993
994 pr_debug("adding sysfs entry for cpb\n");
995
996 for (iter = acpi_cpufreq_attr; *iter != NULL; iter++)
997 ;
998
999 /* make sure there is a terminator behind it */
1000 if (iter[1] == NULL)
1001 *iter = &cpb;
1002 }
1003#endif
1004
749 ret = cpufreq_register_driver(&acpi_cpufreq_driver); 1005 ret = cpufreq_register_driver(&acpi_cpufreq_driver);
750 if (ret) 1006 if (ret)
751 free_acpi_perf_data(); 1007 free_acpi_perf_data();
1008 else
1009 acpi_cpufreq_boost_init();
752 1010
753 return ret; 1011 return ret;
754} 1012}
@@ -757,6 +1015,8 @@ static void __exit acpi_cpufreq_exit(void)
757{ 1015{
758 pr_debug("acpi_cpufreq_exit\n"); 1016 pr_debug("acpi_cpufreq_exit\n");
759 1017
1018 acpi_cpufreq_boost_exit();
1019
760 cpufreq_unregister_driver(&acpi_cpufreq_driver); 1020 cpufreq_unregister_driver(&acpi_cpufreq_driver);
761 1021
762 free_acpi_perf_data(); 1022 free_acpi_perf_data();
diff --git a/drivers/cpufreq/cpufreq-cpu0.c b/drivers/cpufreq/cpufreq-cpu0.c
new file mode 100644
index 000000000000..e9158278c71d
--- /dev/null
+++ b/drivers/cpufreq/cpufreq-cpu0.c
@@ -0,0 +1,269 @@
1/*
2 * Copyright (C) 2012 Freescale Semiconductor, Inc.
3 *
4 * The OPP code in function cpu0_set_target() is reused from
5 * drivers/cpufreq/omap-cpufreq.c
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13
14#include <linux/clk.h>
15#include <linux/cpu.h>
16#include <linux/cpufreq.h>
17#include <linux/err.h>
18#include <linux/module.h>
19#include <linux/of.h>
20#include <linux/opp.h>
21#include <linux/regulator/consumer.h>
22#include <linux/slab.h>
23
24static unsigned int transition_latency;
25static unsigned int voltage_tolerance; /* in percentage */
26
27static struct device *cpu_dev;
28static struct clk *cpu_clk;
29static struct regulator *cpu_reg;
30static struct cpufreq_frequency_table *freq_table;
31
32static int cpu0_verify_speed(struct cpufreq_policy *policy)
33{
34 return cpufreq_frequency_table_verify(policy, freq_table);
35}
36
37static unsigned int cpu0_get_speed(unsigned int cpu)
38{
39 return clk_get_rate(cpu_clk) / 1000;
40}
41
42static int cpu0_set_target(struct cpufreq_policy *policy,
43 unsigned int target_freq, unsigned int relation)
44{
45 struct cpufreq_freqs freqs;
46 struct opp *opp;
47 unsigned long freq_Hz, volt = 0, volt_old = 0, tol = 0;
48 unsigned int index, cpu;
49 int ret;
50
51 ret = cpufreq_frequency_table_target(policy, freq_table, target_freq,
52 relation, &index);
53 if (ret) {
54 pr_err("failed to match target freqency %d: %d\n",
55 target_freq, ret);
56 return ret;
57 }
58
59 freq_Hz = clk_round_rate(cpu_clk, freq_table[index].frequency * 1000);
60 if (freq_Hz < 0)
61 freq_Hz = freq_table[index].frequency * 1000;
62 freqs.new = freq_Hz / 1000;
63 freqs.old = clk_get_rate(cpu_clk) / 1000;
64
65 if (freqs.old == freqs.new)
66 return 0;
67
68 for_each_online_cpu(cpu) {
69 freqs.cpu = cpu;
70 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
71 }
72
73 if (cpu_reg) {
74 opp = opp_find_freq_ceil(cpu_dev, &freq_Hz);
75 if (IS_ERR(opp)) {
76 pr_err("failed to find OPP for %ld\n", freq_Hz);
77 return PTR_ERR(opp);
78 }
79 volt = opp_get_voltage(opp);
80 tol = volt * voltage_tolerance / 100;
81 volt_old = regulator_get_voltage(cpu_reg);
82 }
83
84 pr_debug("%u MHz, %ld mV --> %u MHz, %ld mV\n",
85 freqs.old / 1000, volt_old ? volt_old / 1000 : -1,
86 freqs.new / 1000, volt ? volt / 1000 : -1);
87
88 /* scaling up? scale voltage before frequency */
89 if (cpu_reg && freqs.new > freqs.old) {
90 ret = regulator_set_voltage_tol(cpu_reg, volt, tol);
91 if (ret) {
92 pr_err("failed to scale voltage up: %d\n", ret);
93 freqs.new = freqs.old;
94 return ret;
95 }
96 }
97
98 ret = clk_set_rate(cpu_clk, freqs.new * 1000);
99 if (ret) {
100 pr_err("failed to set clock rate: %d\n", ret);
101 if (cpu_reg)
102 regulator_set_voltage_tol(cpu_reg, volt_old, tol);
103 return ret;
104 }
105
106 /* scaling down? scale voltage after frequency */
107 if (cpu_reg && freqs.new < freqs.old) {
108 ret = regulator_set_voltage_tol(cpu_reg, volt, tol);
109 if (ret) {
110 pr_err("failed to scale voltage down: %d\n", ret);
111 clk_set_rate(cpu_clk, freqs.old * 1000);
112 freqs.new = freqs.old;
113 return ret;
114 }
115 }
116
117 for_each_online_cpu(cpu) {
118 freqs.cpu = cpu;
119 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
120 }
121
122 return 0;
123}
124
125static int cpu0_cpufreq_init(struct cpufreq_policy *policy)
126{
127 int ret;
128
129 if (policy->cpu != 0)
130 return -EINVAL;
131
132 ret = cpufreq_frequency_table_cpuinfo(policy, freq_table);
133 if (ret) {
134 pr_err("invalid frequency table: %d\n", ret);
135 return ret;
136 }
137
138 policy->cpuinfo.transition_latency = transition_latency;
139 policy->cur = clk_get_rate(cpu_clk) / 1000;
140
141 /*
142 * The driver only supports the SMP configuartion where all processors
143 * share the clock and voltage and clock. Use cpufreq affected_cpus
144 * interface to have all CPUs scaled together.
145 */
146 policy->shared_type = CPUFREQ_SHARED_TYPE_ANY;
147 cpumask_setall(policy->cpus);
148
149 cpufreq_frequency_table_get_attr(freq_table, policy->cpu);
150
151 return 0;
152}
153
154static int cpu0_cpufreq_exit(struct cpufreq_policy *policy)
155{
156 cpufreq_frequency_table_put_attr(policy->cpu);
157
158 return 0;
159}
160
161static struct freq_attr *cpu0_cpufreq_attr[] = {
162 &cpufreq_freq_attr_scaling_available_freqs,
163 NULL,
164};
165
166static struct cpufreq_driver cpu0_cpufreq_driver = {
167 .flags = CPUFREQ_STICKY,
168 .verify = cpu0_verify_speed,
169 .target = cpu0_set_target,
170 .get = cpu0_get_speed,
171 .init = cpu0_cpufreq_init,
172 .exit = cpu0_cpufreq_exit,
173 .name = "generic_cpu0",
174 .attr = cpu0_cpufreq_attr,
175};
176
177static int __devinit cpu0_cpufreq_driver_init(void)
178{
179 struct device_node *np;
180 int ret;
181
182 np = of_find_node_by_path("/cpus/cpu@0");
183 if (!np) {
184 pr_err("failed to find cpu0 node\n");
185 return -ENOENT;
186 }
187
188 cpu_dev = get_cpu_device(0);
189 if (!cpu_dev) {
190 pr_err("failed to get cpu0 device\n");
191 ret = -ENODEV;
192 goto out_put_node;
193 }
194
195 cpu_dev->of_node = np;
196
197 cpu_clk = clk_get(cpu_dev, NULL);
198 if (IS_ERR(cpu_clk)) {
199 ret = PTR_ERR(cpu_clk);
200 pr_err("failed to get cpu0 clock: %d\n", ret);
201 goto out_put_node;
202 }
203
204 cpu_reg = regulator_get(cpu_dev, "cpu0");
205 if (IS_ERR(cpu_reg)) {
206 pr_warn("failed to get cpu0 regulator\n");
207 cpu_reg = NULL;
208 }
209
210 ret = of_init_opp_table(cpu_dev);
211 if (ret) {
212 pr_err("failed to init OPP table: %d\n", ret);
213 goto out_put_node;
214 }
215
216 ret = opp_init_cpufreq_table(cpu_dev, &freq_table);
217 if (ret) {
218 pr_err("failed to init cpufreq table: %d\n", ret);
219 goto out_put_node;
220 }
221
222 of_property_read_u32(np, "voltage-tolerance", &voltage_tolerance);
223
224 if (of_property_read_u32(np, "clock-latency", &transition_latency))
225 transition_latency = CPUFREQ_ETERNAL;
226
227 if (cpu_reg) {
228 struct opp *opp;
229 unsigned long min_uV, max_uV;
230 int i;
231
232 /*
233 * OPP is maintained in order of increasing frequency, and
234 * freq_table initialised from OPP is therefore sorted in the
235 * same order.
236 */
237 for (i = 0; freq_table[i].frequency != CPUFREQ_TABLE_END; i++)
238 ;
239 opp = opp_find_freq_exact(cpu_dev,
240 freq_table[0].frequency * 1000, true);
241 min_uV = opp_get_voltage(opp);
242 opp = opp_find_freq_exact(cpu_dev,
243 freq_table[i-1].frequency * 1000, true);
244 max_uV = opp_get_voltage(opp);
245 ret = regulator_set_voltage_time(cpu_reg, min_uV, max_uV);
246 if (ret > 0)
247 transition_latency += ret * 1000;
248 }
249
250 ret = cpufreq_register_driver(&cpu0_cpufreq_driver);
251 if (ret) {
252 pr_err("failed register driver: %d\n", ret);
253 goto out_free_table;
254 }
255
256 of_node_put(np);
257 return 0;
258
259out_free_table:
260 opp_free_cpufreq_table(cpu_dev, &freq_table);
261out_put_node:
262 of_node_put(np);
263 return ret;
264}
265late_initcall(cpu0_cpufreq_driver_init);
266
267MODULE_AUTHOR("Shawn Guo <shawn.guo@linaro.org>");
268MODULE_DESCRIPTION("Generic CPU0 cpufreq driver");
269MODULE_LICENSE("GPL");
diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c
index 55f0354864e2..a152af7e1991 100644
--- a/drivers/cpufreq/cpufreq_conservative.c
+++ b/drivers/cpufreq/cpufreq_conservative.c
@@ -504,6 +504,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
504 j_dbs_info->prev_cpu_nice = 504 j_dbs_info->prev_cpu_nice =
505 kcpustat_cpu(j).cpustat[CPUTIME_NICE]; 505 kcpustat_cpu(j).cpustat[CPUTIME_NICE];
506 } 506 }
507 this_dbs_info->cpu = cpu;
507 this_dbs_info->down_skip = 0; 508 this_dbs_info->down_skip = 0;
508 this_dbs_info->requested_freq = policy->cur; 509 this_dbs_info->requested_freq = policy->cur;
509 510
@@ -583,6 +584,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
583 __cpufreq_driver_target( 584 __cpufreq_driver_target(
584 this_dbs_info->cur_policy, 585 this_dbs_info->cur_policy,
585 policy->min, CPUFREQ_RELATION_L); 586 policy->min, CPUFREQ_RELATION_L);
587 dbs_check_cpu(this_dbs_info);
586 mutex_unlock(&this_dbs_info->timer_mutex); 588 mutex_unlock(&this_dbs_info->timer_mutex);
587 589
588 break; 590 break;
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
index 14c1af5a264f..396322f2a83f 100644
--- a/drivers/cpufreq/cpufreq_ondemand.c
+++ b/drivers/cpufreq/cpufreq_ondemand.c
@@ -761,6 +761,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
761 else if (policy->min > this_dbs_info->cur_policy->cur) 761 else if (policy->min > this_dbs_info->cur_policy->cur)
762 __cpufreq_driver_target(this_dbs_info->cur_policy, 762 __cpufreq_driver_target(this_dbs_info->cur_policy,
763 policy->min, CPUFREQ_RELATION_L); 763 policy->min, CPUFREQ_RELATION_L);
764 dbs_check_cpu(this_dbs_info);
764 mutex_unlock(&this_dbs_info->timer_mutex); 765 mutex_unlock(&this_dbs_info->timer_mutex);
765 break; 766 break;
766 } 767 }
diff --git a/drivers/cpufreq/longhaul.h b/drivers/cpufreq/longhaul.h
index cbf48fbca881..e2dc436099d1 100644
--- a/drivers/cpufreq/longhaul.h
+++ b/drivers/cpufreq/longhaul.h
@@ -56,7 +56,7 @@ union msr_longhaul {
56/* 56/*
57 * VIA C3 Samuel 1 & Samuel 2 (stepping 0) 57 * VIA C3 Samuel 1 & Samuel 2 (stepping 0)
58 */ 58 */
59static const int __cpuinitdata samuel1_mults[16] = { 59static const int __cpuinitconst samuel1_mults[16] = {
60 -1, /* 0000 -> RESERVED */ 60 -1, /* 0000 -> RESERVED */
61 30, /* 0001 -> 3.0x */ 61 30, /* 0001 -> 3.0x */
62 40, /* 0010 -> 4.0x */ 62 40, /* 0010 -> 4.0x */
@@ -75,7 +75,7 @@ static const int __cpuinitdata samuel1_mults[16] = {
75 -1, /* 1111 -> RESERVED */ 75 -1, /* 1111 -> RESERVED */
76}; 76};
77 77
78static const int __cpuinitdata samuel1_eblcr[16] = { 78static const int __cpuinitconst samuel1_eblcr[16] = {
79 50, /* 0000 -> RESERVED */ 79 50, /* 0000 -> RESERVED */
80 30, /* 0001 -> 3.0x */ 80 30, /* 0001 -> 3.0x */
81 40, /* 0010 -> 4.0x */ 81 40, /* 0010 -> 4.0x */
@@ -97,7 +97,7 @@ static const int __cpuinitdata samuel1_eblcr[16] = {
97/* 97/*
98 * VIA C3 Samuel2 Stepping 1->15 98 * VIA C3 Samuel2 Stepping 1->15
99 */ 99 */
100static const int __cpuinitdata samuel2_eblcr[16] = { 100static const int __cpuinitconst samuel2_eblcr[16] = {
101 50, /* 0000 -> 5.0x */ 101 50, /* 0000 -> 5.0x */
102 30, /* 0001 -> 3.0x */ 102 30, /* 0001 -> 3.0x */
103 40, /* 0010 -> 4.0x */ 103 40, /* 0010 -> 4.0x */
@@ -119,7 +119,7 @@ static const int __cpuinitdata samuel2_eblcr[16] = {
119/* 119/*
120 * VIA C3 Ezra 120 * VIA C3 Ezra
121 */ 121 */
122static const int __cpuinitdata ezra_mults[16] = { 122static const int __cpuinitconst ezra_mults[16] = {
123 100, /* 0000 -> 10.0x */ 123 100, /* 0000 -> 10.0x */
124 30, /* 0001 -> 3.0x */ 124 30, /* 0001 -> 3.0x */
125 40, /* 0010 -> 4.0x */ 125 40, /* 0010 -> 4.0x */
@@ -138,7 +138,7 @@ static const int __cpuinitdata ezra_mults[16] = {
138 120, /* 1111 -> 12.0x */ 138 120, /* 1111 -> 12.0x */
139}; 139};
140 140
141static const int __cpuinitdata ezra_eblcr[16] = { 141static const int __cpuinitconst ezra_eblcr[16] = {
142 50, /* 0000 -> 5.0x */ 142 50, /* 0000 -> 5.0x */
143 30, /* 0001 -> 3.0x */ 143 30, /* 0001 -> 3.0x */
144 40, /* 0010 -> 4.0x */ 144 40, /* 0010 -> 4.0x */
@@ -160,7 +160,7 @@ static const int __cpuinitdata ezra_eblcr[16] = {
160/* 160/*
161 * VIA C3 (Ezra-T) [C5M]. 161 * VIA C3 (Ezra-T) [C5M].
162 */ 162 */
163static const int __cpuinitdata ezrat_mults[32] = { 163static const int __cpuinitconst ezrat_mults[32] = {
164 100, /* 0000 -> 10.0x */ 164 100, /* 0000 -> 10.0x */
165 30, /* 0001 -> 3.0x */ 165 30, /* 0001 -> 3.0x */
166 40, /* 0010 -> 4.0x */ 166 40, /* 0010 -> 4.0x */
@@ -196,7 +196,7 @@ static const int __cpuinitdata ezrat_mults[32] = {
196 -1, /* 1111 -> RESERVED (12.0x) */ 196 -1, /* 1111 -> RESERVED (12.0x) */
197}; 197};
198 198
199static const int __cpuinitdata ezrat_eblcr[32] = { 199static const int __cpuinitconst ezrat_eblcr[32] = {
200 50, /* 0000 -> 5.0x */ 200 50, /* 0000 -> 5.0x */
201 30, /* 0001 -> 3.0x */ 201 30, /* 0001 -> 3.0x */
202 40, /* 0010 -> 4.0x */ 202 40, /* 0010 -> 4.0x */
@@ -235,7 +235,7 @@ static const int __cpuinitdata ezrat_eblcr[32] = {
235/* 235/*
236 * VIA C3 Nehemiah */ 236 * VIA C3 Nehemiah */
237 237
238static const int __cpuinitdata nehemiah_mults[32] = { 238static const int __cpuinitconst nehemiah_mults[32] = {
239 100, /* 0000 -> 10.0x */ 239 100, /* 0000 -> 10.0x */
240 -1, /* 0001 -> 16.0x */ 240 -1, /* 0001 -> 16.0x */
241 40, /* 0010 -> 4.0x */ 241 40, /* 0010 -> 4.0x */
@@ -270,7 +270,7 @@ static const int __cpuinitdata nehemiah_mults[32] = {
270 -1, /* 1111 -> 12.0x */ 270 -1, /* 1111 -> 12.0x */
271}; 271};
272 272
273static const int __cpuinitdata nehemiah_eblcr[32] = { 273static const int __cpuinitconst nehemiah_eblcr[32] = {
274 50, /* 0000 -> 5.0x */ 274 50, /* 0000 -> 5.0x */
275 160, /* 0001 -> 16.0x */ 275 160, /* 0001 -> 16.0x */
276 40, /* 0010 -> 4.0x */ 276 40, /* 0010 -> 4.0x */
@@ -315,7 +315,7 @@ struct mV_pos {
315 unsigned short pos; 315 unsigned short pos;
316}; 316};
317 317
318static const struct mV_pos __cpuinitdata vrm85_mV[32] = { 318static const struct mV_pos __cpuinitconst vrm85_mV[32] = {
319 {1250, 8}, {1200, 6}, {1150, 4}, {1100, 2}, 319 {1250, 8}, {1200, 6}, {1150, 4}, {1100, 2},
320 {1050, 0}, {1800, 30}, {1750, 28}, {1700, 26}, 320 {1050, 0}, {1800, 30}, {1750, 28}, {1700, 26},
321 {1650, 24}, {1600, 22}, {1550, 20}, {1500, 18}, 321 {1650, 24}, {1600, 22}, {1550, 20}, {1500, 18},
@@ -326,14 +326,14 @@ static const struct mV_pos __cpuinitdata vrm85_mV[32] = {
326 {1475, 17}, {1425, 15}, {1375, 13}, {1325, 11} 326 {1475, 17}, {1425, 15}, {1375, 13}, {1325, 11}
327}; 327};
328 328
329static const unsigned char __cpuinitdata mV_vrm85[32] = { 329static const unsigned char __cpuinitconst mV_vrm85[32] = {
330 0x04, 0x14, 0x03, 0x13, 0x02, 0x12, 0x01, 0x11, 330 0x04, 0x14, 0x03, 0x13, 0x02, 0x12, 0x01, 0x11,
331 0x00, 0x10, 0x0f, 0x1f, 0x0e, 0x1e, 0x0d, 0x1d, 331 0x00, 0x10, 0x0f, 0x1f, 0x0e, 0x1e, 0x0d, 0x1d,
332 0x0c, 0x1c, 0x0b, 0x1b, 0x0a, 0x1a, 0x09, 0x19, 332 0x0c, 0x1c, 0x0b, 0x1b, 0x0a, 0x1a, 0x09, 0x19,
333 0x08, 0x18, 0x07, 0x17, 0x06, 0x16, 0x05, 0x15 333 0x08, 0x18, 0x07, 0x17, 0x06, 0x16, 0x05, 0x15
334}; 334};
335 335
336static const struct mV_pos __cpuinitdata mobilevrm_mV[32] = { 336static const struct mV_pos __cpuinitconst mobilevrm_mV[32] = {
337 {1750, 31}, {1700, 30}, {1650, 29}, {1600, 28}, 337 {1750, 31}, {1700, 30}, {1650, 29}, {1600, 28},
338 {1550, 27}, {1500, 26}, {1450, 25}, {1400, 24}, 338 {1550, 27}, {1500, 26}, {1450, 25}, {1400, 24},
339 {1350, 23}, {1300, 22}, {1250, 21}, {1200, 20}, 339 {1350, 23}, {1300, 22}, {1250, 21}, {1200, 20},
@@ -344,7 +344,7 @@ static const struct mV_pos __cpuinitdata mobilevrm_mV[32] = {
344 {675, 3}, {650, 2}, {625, 1}, {600, 0} 344 {675, 3}, {650, 2}, {625, 1}, {600, 0}
345}; 345};
346 346
347static const unsigned char __cpuinitdata mV_mobilevrm[32] = { 347static const unsigned char __cpuinitconst mV_mobilevrm[32] = {
348 0x1f, 0x1e, 0x1d, 0x1c, 0x1b, 0x1a, 0x19, 0x18, 348 0x1f, 0x1e, 0x1d, 0x1c, 0x1b, 0x1a, 0x19, 0x18,
349 0x17, 0x16, 0x15, 0x14, 0x13, 0x12, 0x11, 0x10, 349 0x17, 0x16, 0x15, 0x14, 0x13, 0x12, 0x11, 0x10,
350 0x0f, 0x0e, 0x0d, 0x0c, 0x0b, 0x0a, 0x09, 0x08, 350 0x0f, 0x0e, 0x0d, 0x0c, 0x0b, 0x0a, 0x09, 0x08,
diff --git a/drivers/cpufreq/omap-cpufreq.c b/drivers/cpufreq/omap-cpufreq.c
index b47034e650a5..65f8e9a54975 100644
--- a/drivers/cpufreq/omap-cpufreq.c
+++ b/drivers/cpufreq/omap-cpufreq.c
@@ -40,16 +40,6 @@
40/* OPP tolerance in percentage */ 40/* OPP tolerance in percentage */
41#define OPP_TOLERANCE 4 41#define OPP_TOLERANCE 4
42 42
43#ifdef CONFIG_SMP
44struct lpj_info {
45 unsigned long ref;
46 unsigned int freq;
47};
48
49static DEFINE_PER_CPU(struct lpj_info, lpj_ref);
50static struct lpj_info global_lpj_ref;
51#endif
52
53static struct cpufreq_frequency_table *freq_table; 43static struct cpufreq_frequency_table *freq_table;
54static atomic_t freq_table_users = ATOMIC_INIT(0); 44static atomic_t freq_table_users = ATOMIC_INIT(0);
55static struct clk *mpu_clk; 45static struct clk *mpu_clk;
@@ -161,31 +151,6 @@ static int omap_target(struct cpufreq_policy *policy,
161 } 151 }
162 152
163 freqs.new = omap_getspeed(policy->cpu); 153 freqs.new = omap_getspeed(policy->cpu);
164#ifdef CONFIG_SMP
165 /*
166 * Note that loops_per_jiffy is not updated on SMP systems in
167 * cpufreq driver. So, update the per-CPU loops_per_jiffy value
168 * on frequency transition. We need to update all dependent CPUs.
169 */
170 for_each_cpu(i, policy->cpus) {
171 struct lpj_info *lpj = &per_cpu(lpj_ref, i);
172 if (!lpj->freq) {
173 lpj->ref = per_cpu(cpu_data, i).loops_per_jiffy;
174 lpj->freq = freqs.old;
175 }
176
177 per_cpu(cpu_data, i).loops_per_jiffy =
178 cpufreq_scale(lpj->ref, lpj->freq, freqs.new);
179 }
180
181 /* And don't forget to adjust the global one */
182 if (!global_lpj_ref.freq) {
183 global_lpj_ref.ref = loops_per_jiffy;
184 global_lpj_ref.freq = freqs.old;
185 }
186 loops_per_jiffy = cpufreq_scale(global_lpj_ref.ref, global_lpj_ref.freq,
187 freqs.new);
188#endif
189 154
190done: 155done:
191 /* notifiers */ 156 /* notifiers */
@@ -301,9 +266,9 @@ static int __init omap_cpufreq_init(void)
301 } 266 }
302 267
303 mpu_dev = omap_device_get_by_hwmod_name("mpu"); 268 mpu_dev = omap_device_get_by_hwmod_name("mpu");
304 if (!mpu_dev) { 269 if (IS_ERR(mpu_dev)) {
305 pr_warning("%s: unable to get the mpu device\n", __func__); 270 pr_warning("%s: unable to get the mpu device\n", __func__);
306 return -EINVAL; 271 return PTR_ERR(mpu_dev);
307 } 272 }
308 273
309 mpu_reg = regulator_get(mpu_dev, "vcc"); 274 mpu_reg = regulator_get(mpu_dev, "vcc");
diff --git a/drivers/cpufreq/powernow-k8.c b/drivers/cpufreq/powernow-k8.c
index 1a40935c85fd..129e80bfff22 100644
--- a/drivers/cpufreq/powernow-k8.c
+++ b/drivers/cpufreq/powernow-k8.c
@@ -48,22 +48,12 @@
48#define PFX "powernow-k8: " 48#define PFX "powernow-k8: "
49#define VERSION "version 2.20.00" 49#define VERSION "version 2.20.00"
50#include "powernow-k8.h" 50#include "powernow-k8.h"
51#include "mperf.h"
52 51
53/* serialize freq changes */ 52/* serialize freq changes */
54static DEFINE_MUTEX(fidvid_mutex); 53static DEFINE_MUTEX(fidvid_mutex);
55 54
56static DEFINE_PER_CPU(struct powernow_k8_data *, powernow_data); 55static DEFINE_PER_CPU(struct powernow_k8_data *, powernow_data);
57 56
58static int cpu_family = CPU_OPTERON;
59
60/* array to map SW pstate number to acpi state */
61static u32 ps_to_as[8];
62
63/* core performance boost */
64static bool cpb_capable, cpb_enabled;
65static struct msr __percpu *msrs;
66
67static struct cpufreq_driver cpufreq_amd64_driver; 57static struct cpufreq_driver cpufreq_amd64_driver;
68 58
69#ifndef CONFIG_SMP 59#ifndef CONFIG_SMP
@@ -85,12 +75,6 @@ static u32 find_khz_freq_from_fid(u32 fid)
85 return 1000 * find_freq_from_fid(fid); 75 return 1000 * find_freq_from_fid(fid);
86} 76}
87 77
88static u32 find_khz_freq_from_pstate(struct cpufreq_frequency_table *data,
89 u32 pstate)
90{
91 return data[ps_to_as[pstate]].frequency;
92}
93
94/* Return the vco fid for an input fid 78/* Return the vco fid for an input fid
95 * 79 *
96 * Each "low" fid has corresponding "high" fid, and you can get to "low" fids 80 * Each "low" fid has corresponding "high" fid, and you can get to "low" fids
@@ -113,9 +97,6 @@ static int pending_bit_stuck(void)
113{ 97{
114 u32 lo, hi; 98 u32 lo, hi;
115 99
116 if (cpu_family == CPU_HW_PSTATE)
117 return 0;
118
119 rdmsr(MSR_FIDVID_STATUS, lo, hi); 100 rdmsr(MSR_FIDVID_STATUS, lo, hi);
120 return lo & MSR_S_LO_CHANGE_PENDING ? 1 : 0; 101 return lo & MSR_S_LO_CHANGE_PENDING ? 1 : 0;
121} 102}
@@ -129,20 +110,6 @@ static int query_current_values_with_pending_wait(struct powernow_k8_data *data)
129 u32 lo, hi; 110 u32 lo, hi;
130 u32 i = 0; 111 u32 i = 0;
131 112
132 if (cpu_family == CPU_HW_PSTATE) {
133 rdmsr(MSR_PSTATE_STATUS, lo, hi);
134 i = lo & HW_PSTATE_MASK;
135 data->currpstate = i;
136
137 /*
138 * a workaround for family 11h erratum 311 might cause
139 * an "out-of-range Pstate if the core is in Pstate-0
140 */
141 if ((boot_cpu_data.x86 == 0x11) && (i >= data->numps))
142 data->currpstate = HW_PSTATE_0;
143
144 return 0;
145 }
146 do { 113 do {
147 if (i++ > 10000) { 114 if (i++ > 10000) {
148 pr_debug("detected change pending stuck\n"); 115 pr_debug("detected change pending stuck\n");
@@ -299,14 +266,6 @@ static int decrease_vid_code_by_step(struct powernow_k8_data *data,
299 return 0; 266 return 0;
300} 267}
301 268
302/* Change hardware pstate by single MSR write */
303static int transition_pstate(struct powernow_k8_data *data, u32 pstate)
304{
305 wrmsr(MSR_PSTATE_CTRL, pstate, 0);
306 data->currpstate = pstate;
307 return 0;
308}
309
310/* Change Opteron/Athlon64 fid and vid, by the 3 phases. */ 269/* Change Opteron/Athlon64 fid and vid, by the 3 phases. */
311static int transition_fid_vid(struct powernow_k8_data *data, 270static int transition_fid_vid(struct powernow_k8_data *data,
312 u32 reqfid, u32 reqvid) 271 u32 reqfid, u32 reqvid)
@@ -523,8 +482,6 @@ static int core_voltage_post_transition(struct powernow_k8_data *data,
523static const struct x86_cpu_id powernow_k8_ids[] = { 482static const struct x86_cpu_id powernow_k8_ids[] = {
524 /* IO based frequency switching */ 483 /* IO based frequency switching */
525 { X86_VENDOR_AMD, 0xf }, 484 { X86_VENDOR_AMD, 0xf },
526 /* MSR based frequency switching supported */
527 X86_FEATURE_MATCH(X86_FEATURE_HW_PSTATE),
528 {} 485 {}
529}; 486};
530MODULE_DEVICE_TABLE(x86cpu, powernow_k8_ids); 487MODULE_DEVICE_TABLE(x86cpu, powernow_k8_ids);
@@ -560,15 +517,8 @@ static void check_supported_cpu(void *_rc)
560 "Power state transitions not supported\n"); 517 "Power state transitions not supported\n");
561 return; 518 return;
562 } 519 }
563 } else { /* must be a HW Pstate capable processor */ 520 *rc = 0;
564 cpuid(CPUID_FREQ_VOLT_CAPABILITIES, &eax, &ebx, &ecx, &edx);
565 if ((edx & USE_HW_PSTATE) == USE_HW_PSTATE)
566 cpu_family = CPU_HW_PSTATE;
567 else
568 return;
569 } 521 }
570
571 *rc = 0;
572} 522}
573 523
574static int check_pst_table(struct powernow_k8_data *data, struct pst_s *pst, 524static int check_pst_table(struct powernow_k8_data *data, struct pst_s *pst,
@@ -632,18 +582,11 @@ static void print_basics(struct powernow_k8_data *data)
632 for (j = 0; j < data->numps; j++) { 582 for (j = 0; j < data->numps; j++) {
633 if (data->powernow_table[j].frequency != 583 if (data->powernow_table[j].frequency !=
634 CPUFREQ_ENTRY_INVALID) { 584 CPUFREQ_ENTRY_INVALID) {
635 if (cpu_family == CPU_HW_PSTATE) {
636 printk(KERN_INFO PFX
637 " %d : pstate %d (%d MHz)\n", j,
638 data->powernow_table[j].index,
639 data->powernow_table[j].frequency/1000);
640 } else {
641 printk(KERN_INFO PFX 585 printk(KERN_INFO PFX
642 "fid 0x%x (%d MHz), vid 0x%x\n", 586 "fid 0x%x (%d MHz), vid 0x%x\n",
643 data->powernow_table[j].index & 0xff, 587 data->powernow_table[j].index & 0xff,
644 data->powernow_table[j].frequency/1000, 588 data->powernow_table[j].frequency/1000,
645 data->powernow_table[j].index >> 8); 589 data->powernow_table[j].index >> 8);
646 }
647 } 590 }
648 } 591 }
649 if (data->batps) 592 if (data->batps)
@@ -651,20 +594,6 @@ static void print_basics(struct powernow_k8_data *data)
651 data->batps); 594 data->batps);
652} 595}
653 596
654static u32 freq_from_fid_did(u32 fid, u32 did)
655{
656 u32 mhz = 0;
657
658 if (boot_cpu_data.x86 == 0x10)
659 mhz = (100 * (fid + 0x10)) >> did;
660 else if (boot_cpu_data.x86 == 0x11)
661 mhz = (100 * (fid + 8)) >> did;
662 else
663 BUG();
664
665 return mhz * 1000;
666}
667
668static int fill_powernow_table(struct powernow_k8_data *data, 597static int fill_powernow_table(struct powernow_k8_data *data,
669 struct pst_s *pst, u8 maxvid) 598 struct pst_s *pst, u8 maxvid)
670{ 599{
@@ -824,7 +753,7 @@ static void powernow_k8_acpi_pst_values(struct powernow_k8_data *data,
824{ 753{
825 u64 control; 754 u64 control;
826 755
827 if (!data->acpi_data.state_count || (cpu_family == CPU_HW_PSTATE)) 756 if (!data->acpi_data.state_count)
828 return; 757 return;
829 758
830 control = data->acpi_data.states[index].control; 759 control = data->acpi_data.states[index].control;
@@ -875,10 +804,7 @@ static int powernow_k8_cpu_init_acpi(struct powernow_k8_data *data)
875 data->numps = data->acpi_data.state_count; 804 data->numps = data->acpi_data.state_count;
876 powernow_k8_acpi_pst_values(data, 0); 805 powernow_k8_acpi_pst_values(data, 0);
877 806
878 if (cpu_family == CPU_HW_PSTATE) 807 ret_val = fill_powernow_table_fidvid(data, powernow_table);
879 ret_val = fill_powernow_table_pstate(data, powernow_table);
880 else
881 ret_val = fill_powernow_table_fidvid(data, powernow_table);
882 if (ret_val) 808 if (ret_val)
883 goto err_out_mem; 809 goto err_out_mem;
884 810
@@ -915,51 +841,6 @@ err_out:
915 return ret_val; 841 return ret_val;
916} 842}
917 843
918static int fill_powernow_table_pstate(struct powernow_k8_data *data,
919 struct cpufreq_frequency_table *powernow_table)
920{
921 int i;
922 u32 hi = 0, lo = 0;
923 rdmsr(MSR_PSTATE_CUR_LIMIT, lo, hi);
924 data->max_hw_pstate = (lo & HW_PSTATE_MAX_MASK) >> HW_PSTATE_MAX_SHIFT;
925
926 for (i = 0; i < data->acpi_data.state_count; i++) {
927 u32 index;
928
929 index = data->acpi_data.states[i].control & HW_PSTATE_MASK;
930 if (index > data->max_hw_pstate) {
931 printk(KERN_ERR PFX "invalid pstate %d - "
932 "bad value %d.\n", i, index);
933 printk(KERN_ERR PFX "Please report to BIOS "
934 "manufacturer\n");
935 invalidate_entry(powernow_table, i);
936 continue;
937 }
938
939 ps_to_as[index] = i;
940
941 /* Frequency may be rounded for these */
942 if ((boot_cpu_data.x86 == 0x10 && boot_cpu_data.x86_model < 10)
943 || boot_cpu_data.x86 == 0x11) {
944
945 rdmsr(MSR_PSTATE_DEF_BASE + index, lo, hi);
946 if (!(hi & HW_PSTATE_VALID_MASK)) {
947 pr_debug("invalid pstate %d, ignoring\n", index);
948 invalidate_entry(powernow_table, i);
949 continue;
950 }
951
952 powernow_table[i].frequency =
953 freq_from_fid_did(lo & 0x3f, (lo >> 6) & 7);
954 } else
955 powernow_table[i].frequency =
956 data->acpi_data.states[i].core_frequency * 1000;
957
958 powernow_table[i].index = index;
959 }
960 return 0;
961}
962
963static int fill_powernow_table_fidvid(struct powernow_k8_data *data, 844static int fill_powernow_table_fidvid(struct powernow_k8_data *data,
964 struct cpufreq_frequency_table *powernow_table) 845 struct cpufreq_frequency_table *powernow_table)
965{ 846{
@@ -1036,15 +917,7 @@ static int get_transition_latency(struct powernow_k8_data *data)
1036 max_latency = cur_latency; 917 max_latency = cur_latency;
1037 } 918 }
1038 if (max_latency == 0) { 919 if (max_latency == 0) {
1039 /* 920 pr_err(FW_WARN PFX "Invalid zero transition latency\n");
1040 * Fam 11h and later may return 0 as transition latency. This
1041 * is intended and means "very fast". While cpufreq core and
1042 * governors currently can handle that gracefully, better set it
1043 * to 1 to avoid problems in the future.
1044 */
1045 if (boot_cpu_data.x86 < 0x11)
1046 printk(KERN_ERR FW_WARN PFX "Invalid zero transition "
1047 "latency\n");
1048 max_latency = 1; 921 max_latency = 1;
1049 } 922 }
1050 /* value in usecs, needs to be in nanoseconds */ 923 /* value in usecs, needs to be in nanoseconds */
@@ -1104,40 +977,6 @@ static int transition_frequency_fidvid(struct powernow_k8_data *data,
1104 return res; 977 return res;
1105} 978}
1106 979
1107/* Take a frequency, and issue the hardware pstate transition command */
1108static int transition_frequency_pstate(struct powernow_k8_data *data,
1109 unsigned int index)
1110{
1111 u32 pstate = 0;
1112 int res, i;
1113 struct cpufreq_freqs freqs;
1114
1115 pr_debug("cpu %d transition to index %u\n", smp_processor_id(), index);
1116
1117 /* get MSR index for hardware pstate transition */
1118 pstate = index & HW_PSTATE_MASK;
1119 if (pstate > data->max_hw_pstate)
1120 return -EINVAL;
1121
1122 freqs.old = find_khz_freq_from_pstate(data->powernow_table,
1123 data->currpstate);
1124 freqs.new = find_khz_freq_from_pstate(data->powernow_table, pstate);
1125
1126 for_each_cpu(i, data->available_cores) {
1127 freqs.cpu = i;
1128 cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
1129 }
1130
1131 res = transition_pstate(data, pstate);
1132 freqs.new = find_khz_freq_from_pstate(data->powernow_table, pstate);
1133
1134 for_each_cpu(i, data->available_cores) {
1135 freqs.cpu = i;
1136 cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
1137 }
1138 return res;
1139}
1140
1141struct powernowk8_target_arg { 980struct powernowk8_target_arg {
1142 struct cpufreq_policy *pol; 981 struct cpufreq_policy *pol;
1143 unsigned targfreq; 982 unsigned targfreq;
@@ -1173,18 +1012,15 @@ static long powernowk8_target_fn(void *arg)
1173 if (query_current_values_with_pending_wait(data)) 1012 if (query_current_values_with_pending_wait(data))
1174 return -EIO; 1013 return -EIO;
1175 1014
1176 if (cpu_family != CPU_HW_PSTATE) { 1015 pr_debug("targ: curr fid 0x%x, vid 0x%x\n",
1177 pr_debug("targ: curr fid 0x%x, vid 0x%x\n", 1016 data->currfid, data->currvid);
1178 data->currfid, data->currvid);
1179 1017
1180 if ((checkvid != data->currvid) || 1018 if ((checkvid != data->currvid) ||
1181 (checkfid != data->currfid)) { 1019 (checkfid != data->currfid)) {
1182 printk(KERN_INFO PFX 1020 pr_info(PFX
1183 "error - out of sync, fix 0x%x 0x%x, " 1021 "error - out of sync, fix 0x%x 0x%x, vid 0x%x 0x%x\n",
1184 "vid 0x%x 0x%x\n", 1022 checkfid, data->currfid,
1185 checkfid, data->currfid, 1023 checkvid, data->currvid);
1186 checkvid, data->currvid);
1187 }
1188 } 1024 }
1189 1025
1190 if (cpufreq_frequency_table_target(pol, data->powernow_table, 1026 if (cpufreq_frequency_table_target(pol, data->powernow_table,
@@ -1195,11 +1031,8 @@ static long powernowk8_target_fn(void *arg)
1195 1031
1196 powernow_k8_acpi_pst_values(data, newstate); 1032 powernow_k8_acpi_pst_values(data, newstate);
1197 1033
1198 if (cpu_family == CPU_HW_PSTATE) 1034 ret = transition_frequency_fidvid(data, newstate);
1199 ret = transition_frequency_pstate(data, 1035
1200 data->powernow_table[newstate].index);
1201 else
1202 ret = transition_frequency_fidvid(data, newstate);
1203 if (ret) { 1036 if (ret) {
1204 printk(KERN_ERR PFX "transition frequency failed\n"); 1037 printk(KERN_ERR PFX "transition frequency failed\n");
1205 mutex_unlock(&fidvid_mutex); 1038 mutex_unlock(&fidvid_mutex);
@@ -1207,11 +1040,7 @@ static long powernowk8_target_fn(void *arg)
1207 } 1040 }
1208 mutex_unlock(&fidvid_mutex); 1041 mutex_unlock(&fidvid_mutex);
1209 1042
1210 if (cpu_family == CPU_HW_PSTATE) 1043 pol->cur = find_khz_freq_from_fid(data->currfid);
1211 pol->cur = find_khz_freq_from_pstate(data->powernow_table,
1212 data->powernow_table[newstate].index);
1213 else
1214 pol->cur = find_khz_freq_from_fid(data->currfid);
1215 1044
1216 return 0; 1045 return 0;
1217} 1046}
@@ -1264,22 +1093,23 @@ static void __cpuinit powernowk8_cpu_init_on_cpu(void *_init_on_cpu)
1264 return; 1093 return;
1265 } 1094 }
1266 1095
1267 if (cpu_family == CPU_OPTERON) 1096 fidvid_msr_init();
1268 fidvid_msr_init();
1269 1097
1270 init_on_cpu->rc = 0; 1098 init_on_cpu->rc = 0;
1271} 1099}
1272 1100
1101static const char missing_pss_msg[] =
1102 KERN_ERR
1103 FW_BUG PFX "No compatible ACPI _PSS objects found.\n"
1104 FW_BUG PFX "First, make sure Cool'N'Quiet is enabled in the BIOS.\n"
1105 FW_BUG PFX "If that doesn't help, try upgrading your BIOS.\n";
1106
1273/* per CPU init entry point to the driver */ 1107/* per CPU init entry point to the driver */
1274static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol) 1108static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol)
1275{ 1109{
1276 static const char ACPI_PSS_BIOS_BUG_MSG[] =
1277 KERN_ERR FW_BUG PFX "No compatible ACPI _PSS objects found.\n"
1278 FW_BUG PFX "Try again with latest BIOS.\n";
1279 struct powernow_k8_data *data; 1110 struct powernow_k8_data *data;
1280 struct init_on_cpu init_on_cpu; 1111 struct init_on_cpu init_on_cpu;
1281 int rc; 1112 int rc;
1282 struct cpuinfo_x86 *c = &cpu_data(pol->cpu);
1283 1113
1284 if (!cpu_online(pol->cpu)) 1114 if (!cpu_online(pol->cpu))
1285 return -ENODEV; 1115 return -ENODEV;
@@ -1295,7 +1125,6 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol)
1295 } 1125 }
1296 1126
1297 data->cpu = pol->cpu; 1127 data->cpu = pol->cpu;
1298 data->currpstate = HW_PSTATE_INVALID;
1299 1128
1300 if (powernow_k8_cpu_init_acpi(data)) { 1129 if (powernow_k8_cpu_init_acpi(data)) {
1301 /* 1130 /*
@@ -1303,7 +1132,7 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol)
1303 * an UP version, and is deprecated by AMD. 1132 * an UP version, and is deprecated by AMD.
1304 */ 1133 */
1305 if (num_online_cpus() != 1) { 1134 if (num_online_cpus() != 1) {
1306 printk_once(ACPI_PSS_BIOS_BUG_MSG); 1135 printk_once(missing_pss_msg);
1307 goto err_out; 1136 goto err_out;
1308 } 1137 }
1309 if (pol->cpu != 0) { 1138 if (pol->cpu != 0) {
@@ -1332,17 +1161,10 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol)
1332 if (rc != 0) 1161 if (rc != 0)
1333 goto err_out_exit_acpi; 1162 goto err_out_exit_acpi;
1334 1163
1335 if (cpu_family == CPU_HW_PSTATE) 1164 cpumask_copy(pol->cpus, cpu_core_mask(pol->cpu));
1336 cpumask_copy(pol->cpus, cpumask_of(pol->cpu));
1337 else
1338 cpumask_copy(pol->cpus, cpu_core_mask(pol->cpu));
1339 data->available_cores = pol->cpus; 1165 data->available_cores = pol->cpus;
1340 1166
1341 if (cpu_family == CPU_HW_PSTATE) 1167 pol->cur = find_khz_freq_from_fid(data->currfid);
1342 pol->cur = find_khz_freq_from_pstate(data->powernow_table,
1343 data->currpstate);
1344 else
1345 pol->cur = find_khz_freq_from_fid(data->currfid);
1346 pr_debug("policy current frequency %d kHz\n", pol->cur); 1168 pr_debug("policy current frequency %d kHz\n", pol->cur);
1347 1169
1348 /* min/max the cpu is capable of */ 1170 /* min/max the cpu is capable of */
@@ -1354,18 +1176,10 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol)
1354 return -EINVAL; 1176 return -EINVAL;
1355 } 1177 }
1356 1178
1357 /* Check for APERF/MPERF support in hardware */
1358 if (cpu_has(c, X86_FEATURE_APERFMPERF))
1359 cpufreq_amd64_driver.getavg = cpufreq_get_measured_perf;
1360
1361 cpufreq_frequency_table_get_attr(data->powernow_table, pol->cpu); 1179 cpufreq_frequency_table_get_attr(data->powernow_table, pol->cpu);
1362 1180
1363 if (cpu_family == CPU_HW_PSTATE) 1181 pr_debug("cpu_init done, current fid 0x%x, vid 0x%x\n",
1364 pr_debug("cpu_init done, current pstate 0x%x\n", 1182 data->currfid, data->currvid);
1365 data->currpstate);
1366 else
1367 pr_debug("cpu_init done, current fid 0x%x, vid 0x%x\n",
1368 data->currfid, data->currvid);
1369 1183
1370 per_cpu(powernow_data, pol->cpu) = data; 1184 per_cpu(powernow_data, pol->cpu) = data;
1371 1185
@@ -1418,88 +1232,15 @@ static unsigned int powernowk8_get(unsigned int cpu)
1418 if (err) 1232 if (err)
1419 goto out; 1233 goto out;
1420 1234
1421 if (cpu_family == CPU_HW_PSTATE) 1235 khz = find_khz_freq_from_fid(data->currfid);
1422 khz = find_khz_freq_from_pstate(data->powernow_table,
1423 data->currpstate);
1424 else
1425 khz = find_khz_freq_from_fid(data->currfid);
1426 1236
1427 1237
1428out: 1238out:
1429 return khz; 1239 return khz;
1430} 1240}
1431 1241
1432static void _cpb_toggle_msrs(bool t)
1433{
1434 int cpu;
1435
1436 get_online_cpus();
1437
1438 rdmsr_on_cpus(cpu_online_mask, MSR_K7_HWCR, msrs);
1439
1440 for_each_cpu(cpu, cpu_online_mask) {
1441 struct msr *reg = per_cpu_ptr(msrs, cpu);
1442 if (t)
1443 reg->l &= ~BIT(25);
1444 else
1445 reg->l |= BIT(25);
1446 }
1447 wrmsr_on_cpus(cpu_online_mask, MSR_K7_HWCR, msrs);
1448
1449 put_online_cpus();
1450}
1451
1452/*
1453 * Switch on/off core performance boosting.
1454 *
1455 * 0=disable
1456 * 1=enable.
1457 */
1458static void cpb_toggle(bool t)
1459{
1460 if (!cpb_capable)
1461 return;
1462
1463 if (t && !cpb_enabled) {
1464 cpb_enabled = true;
1465 _cpb_toggle_msrs(t);
1466 printk(KERN_INFO PFX "Core Boosting enabled.\n");
1467 } else if (!t && cpb_enabled) {
1468 cpb_enabled = false;
1469 _cpb_toggle_msrs(t);
1470 printk(KERN_INFO PFX "Core Boosting disabled.\n");
1471 }
1472}
1473
1474static ssize_t store_cpb(struct cpufreq_policy *policy, const char *buf,
1475 size_t count)
1476{
1477 int ret = -EINVAL;
1478 unsigned long val = 0;
1479
1480 ret = strict_strtoul(buf, 10, &val);
1481 if (!ret && (val == 0 || val == 1) && cpb_capable)
1482 cpb_toggle(val);
1483 else
1484 return -EINVAL;
1485
1486 return count;
1487}
1488
1489static ssize_t show_cpb(struct cpufreq_policy *policy, char *buf)
1490{
1491 return sprintf(buf, "%u\n", cpb_enabled);
1492}
1493
1494#define define_one_rw(_name) \
1495static struct freq_attr _name = \
1496__ATTR(_name, 0644, show_##_name, store_##_name)
1497
1498define_one_rw(cpb);
1499
1500static struct freq_attr *powernow_k8_attr[] = { 1242static struct freq_attr *powernow_k8_attr[] = {
1501 &cpufreq_freq_attr_scaling_available_freqs, 1243 &cpufreq_freq_attr_scaling_available_freqs,
1502 &cpb,
1503 NULL, 1244 NULL,
1504}; 1245};
1505 1246
@@ -1515,53 +1256,18 @@ static struct cpufreq_driver cpufreq_amd64_driver = {
1515 .attr = powernow_k8_attr, 1256 .attr = powernow_k8_attr,
1516}; 1257};
1517 1258
1518/*
1519 * Clear the boost-disable flag on the CPU_DOWN path so that this cpu
1520 * cannot block the remaining ones from boosting. On the CPU_UP path we
1521 * simply keep the boost-disable flag in sync with the current global
1522 * state.
1523 */
1524static int cpb_notify(struct notifier_block *nb, unsigned long action,
1525 void *hcpu)
1526{
1527 unsigned cpu = (long)hcpu;
1528 u32 lo, hi;
1529
1530 switch (action) {
1531 case CPU_UP_PREPARE:
1532 case CPU_UP_PREPARE_FROZEN:
1533
1534 if (!cpb_enabled) {
1535 rdmsr_on_cpu(cpu, MSR_K7_HWCR, &lo, &hi);
1536 lo |= BIT(25);
1537 wrmsr_on_cpu(cpu, MSR_K7_HWCR, lo, hi);
1538 }
1539 break;
1540
1541 case CPU_DOWN_PREPARE:
1542 case CPU_DOWN_PREPARE_FROZEN:
1543 rdmsr_on_cpu(cpu, MSR_K7_HWCR, &lo, &hi);
1544 lo &= ~BIT(25);
1545 wrmsr_on_cpu(cpu, MSR_K7_HWCR, lo, hi);
1546 break;
1547
1548 default:
1549 break;
1550 }
1551
1552 return NOTIFY_OK;
1553}
1554
1555static struct notifier_block cpb_nb = {
1556 .notifier_call = cpb_notify,
1557};
1558
1559/* driver entry point for init */ 1259/* driver entry point for init */
1560static int __cpuinit powernowk8_init(void) 1260static int __cpuinit powernowk8_init(void)
1561{ 1261{
1562 unsigned int i, supported_cpus = 0, cpu; 1262 unsigned int i, supported_cpus = 0;
1563 int rv; 1263 int rv;
1564 1264
1265 if (static_cpu_has(X86_FEATURE_HW_PSTATE)) {
1266 pr_warn(PFX "this CPU is not supported anymore, using acpi-cpufreq instead.\n");
1267 request_module("acpi-cpufreq");
1268 return -ENODEV;
1269 }
1270
1565 if (!x86_match_cpu(powernow_k8_ids)) 1271 if (!x86_match_cpu(powernow_k8_ids))
1566 return -ENODEV; 1272 return -ENODEV;
1567 1273
@@ -1575,38 +1281,13 @@ static int __cpuinit powernowk8_init(void)
1575 if (supported_cpus != num_online_cpus()) 1281 if (supported_cpus != num_online_cpus())
1576 return -ENODEV; 1282 return -ENODEV;
1577 1283
1578 printk(KERN_INFO PFX "Found %d %s (%d cpu cores) (" VERSION ")\n", 1284 rv = cpufreq_register_driver(&cpufreq_amd64_driver);
1579 num_online_nodes(), boot_cpu_data.x86_model_id, supported_cpus);
1580
1581 if (boot_cpu_has(X86_FEATURE_CPB)) {
1582
1583 cpb_capable = true;
1584
1585 msrs = msrs_alloc();
1586 if (!msrs) {
1587 printk(KERN_ERR "%s: Error allocating msrs!\n", __func__);
1588 return -ENOMEM;
1589 }
1590
1591 register_cpu_notifier(&cpb_nb);
1592
1593 rdmsr_on_cpus(cpu_online_mask, MSR_K7_HWCR, msrs);
1594 1285
1595 for_each_cpu(cpu, cpu_online_mask) { 1286 if (!rv)
1596 struct msr *reg = per_cpu_ptr(msrs, cpu); 1287 pr_info(PFX "Found %d %s (%d cpu cores) (" VERSION ")\n",
1597 cpb_enabled |= !(!!(reg->l & BIT(25))); 1288 num_online_nodes(), boot_cpu_data.x86_model_id,
1598 } 1289 supported_cpus);
1599 1290
1600 printk(KERN_INFO PFX "Core Performance Boosting: %s.\n",
1601 (cpb_enabled ? "on" : "off"));
1602 }
1603
1604 rv = cpufreq_register_driver(&cpufreq_amd64_driver);
1605 if (rv < 0 && boot_cpu_has(X86_FEATURE_CPB)) {
1606 unregister_cpu_notifier(&cpb_nb);
1607 msrs_free(msrs);
1608 msrs = NULL;
1609 }
1610 return rv; 1291 return rv;
1611} 1292}
1612 1293
@@ -1615,13 +1296,6 @@ static void __exit powernowk8_exit(void)
1615{ 1296{
1616 pr_debug("exit\n"); 1297 pr_debug("exit\n");
1617 1298
1618 if (boot_cpu_has(X86_FEATURE_CPB)) {
1619 msrs_free(msrs);
1620 msrs = NULL;
1621
1622 unregister_cpu_notifier(&cpb_nb);
1623 }
1624
1625 cpufreq_unregister_driver(&cpufreq_amd64_driver); 1299 cpufreq_unregister_driver(&cpufreq_amd64_driver);
1626} 1300}
1627 1301
diff --git a/drivers/cpufreq/powernow-k8.h b/drivers/cpufreq/powernow-k8.h
index 3744d26cdc2b..79329d4d5abe 100644
--- a/drivers/cpufreq/powernow-k8.h
+++ b/drivers/cpufreq/powernow-k8.h
@@ -5,24 +5,11 @@
5 * http://www.gnu.org/licenses/gpl.html 5 * http://www.gnu.org/licenses/gpl.html
6 */ 6 */
7 7
8enum pstate {
9 HW_PSTATE_INVALID = 0xff,
10 HW_PSTATE_0 = 0,
11 HW_PSTATE_1 = 1,
12 HW_PSTATE_2 = 2,
13 HW_PSTATE_3 = 3,
14 HW_PSTATE_4 = 4,
15 HW_PSTATE_5 = 5,
16 HW_PSTATE_6 = 6,
17 HW_PSTATE_7 = 7,
18};
19
20struct powernow_k8_data { 8struct powernow_k8_data {
21 unsigned int cpu; 9 unsigned int cpu;
22 10
23 u32 numps; /* number of p-states */ 11 u32 numps; /* number of p-states */
24 u32 batps; /* number of p-states supported on battery */ 12 u32 batps; /* number of p-states supported on battery */
25 u32 max_hw_pstate; /* maximum legal hardware pstate */
26 13
27 /* these values are constant when the PSB is used to determine 14 /* these values are constant when the PSB is used to determine
28 * vid/fid pairings, but are modified during the ->target() call 15 * vid/fid pairings, but are modified during the ->target() call
@@ -37,7 +24,6 @@ struct powernow_k8_data {
37 /* keep track of the current fid / vid or pstate */ 24 /* keep track of the current fid / vid or pstate */
38 u32 currvid; 25 u32 currvid;
39 u32 currfid; 26 u32 currfid;
40 enum pstate currpstate;
41 27
42 /* the powernow_table includes all frequency and vid/fid pairings: 28 /* the powernow_table includes all frequency and vid/fid pairings:
43 * fid are the lower 8 bits of the index, vid are the upper 8 bits. 29 * fid are the lower 8 bits of the index, vid are the upper 8 bits.
@@ -97,23 +83,6 @@ struct powernow_k8_data {
97#define MSR_S_HI_CURRENT_VID 0x0000003f 83#define MSR_S_HI_CURRENT_VID 0x0000003f
98#define MSR_C_HI_STP_GNT_BENIGN 0x00000001 84#define MSR_C_HI_STP_GNT_BENIGN 0x00000001
99 85
100
101/* Hardware Pstate _PSS and MSR definitions */
102#define USE_HW_PSTATE 0x00000080
103#define HW_PSTATE_MASK 0x00000007
104#define HW_PSTATE_VALID_MASK 0x80000000
105#define HW_PSTATE_MAX_MASK 0x000000f0
106#define HW_PSTATE_MAX_SHIFT 4
107#define MSR_PSTATE_DEF_BASE 0xc0010064 /* base of Pstate MSRs */
108#define MSR_PSTATE_STATUS 0xc0010063 /* Pstate Status MSR */
109#define MSR_PSTATE_CTRL 0xc0010062 /* Pstate control MSR */
110#define MSR_PSTATE_CUR_LIMIT 0xc0010061 /* pstate current limit MSR */
111
112/* define the two driver architectures */
113#define CPU_OPTERON 0
114#define CPU_HW_PSTATE 1
115
116
117/* 86/*
118 * There are restrictions frequencies have to follow: 87 * There are restrictions frequencies have to follow:
119 * - only 1 entry in the low fid table ( <=1.4GHz ) 88 * - only 1 entry in the low fid table ( <=1.4GHz )
@@ -218,5 +187,4 @@ static int core_frequency_transition(struct powernow_k8_data *data, u32 reqfid);
218 187
219static void powernow_k8_acpi_pst_values(struct powernow_k8_data *data, unsigned int index); 188static void powernow_k8_acpi_pst_values(struct powernow_k8_data *data, unsigned int index);
220 189
221static int fill_powernow_table_pstate(struct powernow_k8_data *data, struct cpufreq_frequency_table *powernow_table);
222static int fill_powernow_table_fidvid(struct powernow_k8_data *data, struct cpufreq_frequency_table *powernow_table); 190static int fill_powernow_table_fidvid(struct powernow_k8_data *data, struct cpufreq_frequency_table *powernow_table);